8000 The size of tensor a (640) must match the size of tensor b (320) at non-singleton · Issue #43 · ant-research/MagicQuill · GitHub
[go: up one dir, main page]

Skip to content

The size of tensor a (640) must match the size of tensor b (320) at non-singleton #43

@BlaiseRodrigues

Description

@BlaiseRodrigues

Hi, on windows I am getting the following error when i use add brush

loading in lowvram mode 64.0 0%| | 0/20 [00:00<?, ?it/s]BrushNet inference, step = 0: image batch = 1, got 1 latents, starting from 0 BrushNet inference: sample torch.Size([1, 4, 112, 64]) , CL torch.Size([1, 5, 112, 64]) dtype torch.float16 C:\Users\Blaise\.conda\envs\MagicQuill\lib\site-packages\diffusers\models\resnet.py:323: FutureWarning: scaleis deprecated and will be removed in version 1.0.0. Thescaleargument is deprecated and will be ignored. Please remove it, as passing it will raise an error in the future.scaleshould directly be passed while calling the underlying pipeline component i.e., viacross_attention_kwargs. deprecate("scale", "1.0.0", deprecation_message) BrushNet can't find <class 'comfy.ops.disable_weight_init.Conv2d'> layer in 0 input block: None shape: 56, 112, 32, 64 0%| | 0/20 [00:04<?, ?it/s] Traceback (most recent call last): File "C:\Users\Blaise\.conda\envs\MagicQuill\lib\site-packages\gradio\queueing.py", line 624, in process_events response = await route_utils.call_process_api( File "C:\Users\Blaise\.conda\envs\MagicQuill\lib\site-packages\gradio\route_utils.py", line 323, in call_process_api output = await app.get_blocks().process_api( File "C:\Users\Blaise\.conda\envs\MagicQuill\lib\site-packages\gradio\blocks.py", line 2018, in process_api result = await self.call_function( File "C:\Users\Blaise\.conda\envs\MagicQuill\lib\site-packages\gradio\blocks.py", line 1567, in call_function prediction = await anyio.to_thread.run_sync( # type: ignore File "C:\Users\Blaise\.conda\envs\MagicQuill\lib\site-packages\anyio\to_thread.py", line 56, in run_sync return await get_async_backend().run_sync_in_worker_thread( File "C:\Users\Blaise\.conda\envs\MagicQuill\lib\site-packages\anyio\_backends\_asyncio.py", line 2441, in run_sync_in_worker_thread return await future File "C:\Users\Blaise\.conda\envs\MagicQuill\lib\site-packages\anyio\_backends\_asyncio.py", line 943, in run result = context.run(func, *args) File "C:\Users\Blaise\.conda\envs\MagicQuill\lib\site-packages\gradio\utils.py", line 846, in wrapper response = f(*args, **kwargs) File "I:\MagicQuill\gradio_run.py", line 152, in generate_image_handler res = generate( File "I:\MagicQuill\gradio_run.py", line 120, in generate latent_samples, final_image, lineart_output, color_output = scribbleColorEditModel.process( File "I:\MagicQuill\MagicQuill\scribble_color_edit.py", line 110, in process latent_samples = self.ksampler.sample( File "I:\MagicQuill\MagicQuill\comfyui_utils.py", line 154, in sample return self.common_ksampler(model, seed, steps, cfg, sampler_name, scheduler, positive, negative, latent_image, denoise=denoise) File "I:\MagicQuill\MagicQuill\comfyui_utils.py", line 146, in common_ksampler samples = comfy.sample.sample(model, noise, steps, cfg, sampler_name, scheduler, positive, negative, latent_image, File "I:\MagicQuill\MagicQuill\comfy\sample.py", line 43, in sample samples = sampler.sample(noise, positive, negative, cfg=cfg, latent_image=latent_image, start_step=start_step, last_step=last_step, force_full_denoise=force_full_denoise, denoise_mask=noise_mask, sigmas=sigmas, callback=callback, disable_pbar=disable_pbar, seed=seed) File "I:\MagicQuill\MagicQuill\comfy\samplers.py", line 794, in sample return sample(self.model, noise, positive, negative, cfg, self.device, sampler, sigmas, self.model_options, latent_image=latent_image, denoise_mask=denoise_mask, callback=callback, disable_pbar=disable_pbar, seed=seed) File "I:\MagicQuill\MagicQuill\model_patch.py", line 120, in modified_sample return cfg_guider.sample(noise, latent_image, sampler, sigmas, denoise_mask, callback, disable_pbar, seed) File "I:\MagicQuill\MagicQuill\comfy\samplers.py", line 683, in sample output = self.inner_sample(noise, latent_image, device, sampler, sigmas, denoise_mask, callback, disable_pbar, seed) File "I:\MagicQuill\MagicQuill\comfy\samplers.py", line 662, in inner_sample samples = sampler.sample(self, sigmas, extra_args, callback, noise, latent_image, denoise_mask, disable_pbar) File "I:\MagicQuill\MagicQuill\comfy\samplers.py", line 567, in sample samples = self.sampler_function(model_k, noise, sigmas, extra_args=extra_args, callback=k_callback, disable=disable_pbar, **self.extra_options) File "C:\Users\Blaise\.conda\envs\MagicQuill\lib\site-packages\torch\utils\_contextlib.py", line 115, in decorate_context return func(*args, **kwargs) File "I:\MagicQuill\MagicQuill\comfy\k_diffusion\sampling.py", line 159, in sample_euler_ancestral denoised = model(x, sigmas[i] * s_in, **extra_args) File "I:\MagicQuill\MagicQuill\comfy\samplers.py", line 291, in __call__ out = self.inner_model(x, sigma, model_options=model_options, seed=seed) File "I:\MagicQuill\MagicQuill\comfy\samplers.py", line 649, in __call__ return self.predict_noise(*args, **kwargs) File "I:\MagicQuill\MagicQuill\comfy\samplers.py", line 652, in predict_noise return sampling_function(self.inner_model, x, timestep, self.conds.get("negative", None), self.conds.get("positive", None), self.cfg, model_options=model_options, seed=seed) File "I:\MagicQuill\MagicQuill\comfy\samplers.py", line 277, in sampling_function out = calc_cond_batch(model, conds, x, timestep, model_options) File "I:\MagicQuill\MagicQuill\comfy\samplers.py", line 224, in calc_cond_batch output = model_options['model_function_wrapper'](model.apply_model, {"input": input_x, "timestep": timestep_, "c": c, "cond_or_uncond": cond_or_uncond}).chunk(batch_chunks) File "I:\MagicQuill\MagicQuill\model_patch.py", line 52, in brushnet_model_function_wrapper return apply_model_method(x, timestep, **options_dict['c']) File "I:\MagicQuill\MagicQuill\comfy\model_base.py", line 113, in apply_model model_output = self.diffusion_model(xc, t, context=context, control=control, transformer_options=transformer_options, **extra_conds).float() File "C:\Users\Blaise\.conda\envs\MagicQuill\lib\site-packages\torch\nn\modules\ 688F module.py", line 1518, in _wrapped_call_impl return self._call_impl(*args, **kwargs) File "C:\Users\Blaise\.conda\envs\MagicQuill\lib\site-packages\torch\nn\modules\module.py", line 1527, in _call_impl return forward_call(*args, **kwargs) File "I:\MagicQuill\MagicQuill\comfy\ldm\modules\diffusionmodules\openaimodel.py", line 852, in forward h = forward_timestep_embed(module, h, emb, context, transformer_options, time_context=time_context, num_video_frames=num_video_frames, image_only_indicator=image_only_indicator) File "I:\MagicQuill\MagicQuill\comfy\ldm\modules\diffusionmodules\openaimodel.py", line 44, in forward_timestep_embed x = layer(x, context, transformer_options) File "C:\Users\Blaise\.conda\envs\MagicQuill\lib\site-packages\torch\nn\modules\module.py", line 1518, in _wrapped_call_impl return self._call_impl(*args, **kwargs) File "C:\Users\Blaise\.conda\envs\MagicQuill\lib\site-packages\torch\nn\modules\module.py", line 1527, in _call_impl return forward_call(*args, **kwargs) File "I:\MagicQuill\MagicQuill\brushnet_nodes.py", line 1071, in forward_patched_by_brushnet h += to_add.to(h.dtype).to(h.device) RuntimeError: The size of tensor a (640) must match the size of tensor b (320) at non-singleton dimension 1

Would appreciate any feedback

Metadata

Metadata

Assignees

No one assigned

    Labels

    No labels
    No labels

    Type

    No type

    Projects

    No projects

    Milestone

    No milestone

    Relationships

    None yet

    Development

    No branches or pull requests

    Issue actions

      0