Skip to content
This repository has been archived by the owner on Dec 25, 2023. It is now read-only.

When I use the IP adapter node with T2I Style Model, the black image is generated. #4

Open
toyxyz opened this issue Aug 19, 2023 · 1 comment

Comments

@toyxyz
Copy link

toyxyz commented Aug 19, 2023

I tried to use the IP adapter node simultaneously with the T2I adapter_style, but only the black empty image was generated. There is no problem when each used separately. Also there is no problem when used simultaneously with Shuffle Control Net.

IP adapter
workflow (8)

IP adapter + T2I adapter Style
workflow (7)

T2I adapter Style
workflow (6)

IP adapter + Shuffle Controlnet
workflow (9)

`Error occurred when executing KSampler:

step must be nonzero

File "C:\ComfyUI_windows_portable\ComfyUI\execution.py", line 151, in recursive_execute
output_data, output_ui = get_output_data(obj, input_data_all)
File "C:\ComfyUI_windows_portable\ComfyUI\execution.py", line 81, in get_output_data
return_values = map_node_over_list(obj, input_data_all, obj.FUNCTION, allow_interrupt=True)
File "C:\ComfyUI_windows_portable\ComfyUI\execution.py", line 74, in map_node_over_list
results.append(getattr(obj, func)(**slice_dict(input_data_all, i)))
File "C:\ComfyUI_windows_portable\ComfyUI\nodes.py", line 1206, in sample
return common_ksampler(model, seed, steps, cfg, sampler_name, scheduler, positive, negative, latent_image, denoise=denoise)
File "C:\ComfyUI_windows_portable\ComfyUI\nodes.py", line 1176, in common_ksampler
samples = comfy.sample.sample(model, noise, steps, cfg, sampler_name, scheduler, positive, negative, latent_image,
File "C:\ComfyUI_windows_portable\ComfyUI\custom_nodes\ComfyUI-Impact-Pack\modules\impact\hacky.py", line 22, in informative_sample
raise e
File "C:\ComfyUI_windows_portable\ComfyUI\custom_nodes\ComfyUI-Impact-Pack\modules\impact\hacky.py", line 9, in informative_sample
return original_sample(*args, **kwargs)
File "C:\ComfyUI_windows_portable\ComfyUI\comfy\sample.py", line 93, in sample
samples = sampler.sample(noise, positive_copy, negative_copy, cfg=cfg, latent_image=latent_image, start_step=start_step, last_step=last_step, force_full_denoise=force_full_denoise, denoise_mask=noise_mask, sigmas=sigmas, callback=callback, disable_pbar=disable_pbar, seed=seed)
File "C:\ComfyUI_windows_portable\ComfyUI\comfy\samplers.py", line 733, in sample
samples = getattr(k_diffusion_sampling, "sample_{}".format(self.sampler))(self.model_k, noise, sigmas, extra_args=extra_args, callback=k_callback, disable=disable_pbar)
File "C:\ComfyUI_windows_portable\python_embeded\lib\site-packages\torch\utils_contextlib.py", line 115, in decorate_context
return func(*args, **kwargs)
File "C:\ComfyUI_windows_portable\ComfyUI\comfy\k_diffusion\sampling.py", line 580, in sample_dpmpp_2m
denoised = model(x, sigmas[i] * s_in, **extra_args)
File "C:\ComfyUI_windows_portable\python_embeded\lib\site-packages\torch\nn\modules\module.py", line 1501, in _call_impl
return forward_call(*args, **kwargs)
File "C:\ComfyUI_windows_portable\ComfyUI\comfy\samplers.py", line 323, in forward
out = self.inner_model(x, sigma, cond=cond, uncond=uncond, cond_scale=cond_scale, cond_concat=cond_concat, model_options=model_options, seed=seed)
File "C:\ComfyUI_windows_portable\python_embeded\lib\site-packages\torch\nn\modules\module.py", line 1501, in call_impl
return forward_call(*args, **kwargs)
File "C:\ComfyUI_windows_portable\ComfyUI\comfy\k_diffusion\external.py", line 125, in forward
eps = self.get_eps(input * c_in, self.sigma_to_t(sigma), **kwargs)
File "C:\ComfyUI_windows_portable\ComfyUI\comfy\k_diffusion\external.py", line 151, in get_eps
return self.inner_model.apply_model(*args, **kwargs)
File "C:\ComfyUI_windows_portable\ComfyUI\comfy\samplers.py", line 311, in apply_model
out = sampling_function(self.inner_model.apply_model, x, timestep, uncond, cond, cond_scale, cond_concat, model_options=model_options, seed=seed)
File "C:\ComfyUI_windows_portable\ComfyUI\comfy\samplers.py", line 289, in sampling_function
cond, uncond = calc_cond_uncond_batch(model_function, cond, uncond, x, timestep, max_total_area, cond_concat, model_options)
File "C:\ComfyUI_windows_portable\ComfyUI\comfy\samplers.py", line 263, in calc_cond_uncond_batch
output = model_function(input_x, timestep
, **c).chunk(batch_chunks)
File "C:\ComfyUI_windows_portable\ComfyUI\comfy\model_base.py", line 61, in apply_model
return self.diffusion_model(xc, t, context=context, y=c_adm, control=control, transformer_options=transformer_options).float()
File "C:\ComfyUI_windows_portable\python_embeded\lib\site-packages\torch\nn\modules\module.py", line 1501, in _call_impl
return forward_call(*args, **kwargs)
File "C:\ComfyUI_windows_portable\ComfyUI\comfy\ldm\modules\diffusionmodules\openaimodel.py", line 626, in forward
h = forward_timestep_embed(module, h, emb, context, transformer_options)
File "C:\ComfyUI_windows_portable\ComfyUI\comfy\ldm\modules\diffusionmodules\openaimodel.py", line 56, in forward_timestep_embed
x = layer(x, context, transformer_options)
File "C:\ComfyUI_windows_portable\python_embeded\lib\site-packages\torch\nn\modules\module.py", line 1501, in _call_impl
return forward_call(*args, **kwargs)
File "C:\ComfyUI_windows_portable\ComfyUI\comfy\ldm\modules\attention.py", line 696, in forward
x = block(x, context=context[i], transformer_options=transformer_options)
File "C:\ComfyUI_windows_portable\python_embeded\lib\site-packages\torch\nn\modules\module.py", line 1501, in _call_impl
return forward_call(*args, **kwargs)
File "C:\ComfyUI_windows_portable\ComfyUI\comfy\ldm\modules\attention.py", line 528, in forward
return checkpoint(self._forward, (x, context, transformer_options), self.parameters(), self.checkpoint)
File "C:\ComfyUI_windows_portable\ComfyUI\comfy\ldm\modules\diffusionmodules\util.py", line 123, in checkpoint
return func(*inputs)
File "C:\ComfyUI_windows_portable\ComfyUI\comfy\ldm\modules\attention.py", line 625, in _forward
n = attn2_replace_patch[block_attn2](n, context_attn2, value_attn2, extra_options)
File "C:\ComfyUI_windows_portable\ComfyUI\custom_nodes\IPAdapter-ComfyUI\ip_adapter.py", line 166, in forward
ip_out = torch.nn.functional.scaled_dot_product_attention(q, ip_k, ip_v, attn_mask=None, dropout_p=0.0, is_causal=False)`

@testFaze
Copy link

I think I have found a workaround for this. You need to also apply a t2i style model to your negative prompt conditioning. You can weight this to zero so it won't do anything. I assume it must reshape an array or something, and there is no doubt a more elegant coding fix possible. But it works.

Sign up for free to subscribe to this conversation on GitHub. Already have an account? Sign in.
Labels
None yet
Projects
None yet
Development

No branches or pull requests

2 participants