diff --git a/comfyui_custom_nodes/webui_io.py b/comfyui_custom_nodes/webui_io.py index 6a333392..65911343 100644 --- a/comfyui_custom_nodes/webui_io.py +++ b/comfyui_custom_nodes/webui_io.py @@ -39,7 +39,7 @@ def INPUT_TYPES(cls): OUTPUT_NODE = True def set_images(self, images): - global_state.node_outputs += images.permute(0, 3, 1, 2) + global_state.node_outputs += [images.permute(0, 3, 1, 2)] return [] @@ -78,7 +78,7 @@ def INPUT_TYPES(cls): def set_images(self, latents): latent_format = get_comfy_model_config().latent_format - global_state.node_outputs += latent_format.process_in(latents['samples'].to('cpu')) + global_state.node_outputs += [latent_format.process_in(latents['samples'].to('cpu'))] return [] diff --git a/lib_comfyui/comfyui/iframe_requests.py b/lib_comfyui/comfyui/iframe_requests.py index 057731da..d3321cce 100644 --- a/lib_comfyui/comfyui/iframe_requests.py +++ b/lib_comfyui/comfyui/iframe_requests.py @@ -4,7 +4,7 @@ import traceback import torch from queue import Empty -from typing import List +from typing import List, Any from lib_comfyui import ipc, global_state, torch_utils, external_code from lib_comfyui.comfyui import queue_tracker @@ -37,17 +37,17 @@ def send(request_params): @staticmethod @ipc.restrict_to_process('webui') def start_workflow_sync( - batch_input: List[torch.Tensor], + batch_input: torch.Tensor, workflow_type_id: str, queue_front: bool, - ): + ) -> List[torch.Tensor]: from modules import shared if shared.state.interrupted: - return batch_input + return [batch_input] if is_default_workflow(workflow_type_id): print('[sd-webui-comfyui]', f'Skipping workflow {workflow_type_id} because it is empty.') - return batch_input + return [batch_input] global_state.node_inputs = batch_input global_state.node_outputs = [] diff --git a/lib_comfyui/external_code/api.py b/lib_comfyui/external_code/api.py index 4c9fa828..3890c79d 100644 --- a/lib_comfyui/external_code/api.py +++ b/lib_comfyui/external_code/api.py @@ -195,7 +195,7 @@ def run_workflow( workflow_type_id = candidate_ids[0] if not (getattr(global_state, 'enable', True) and getattr(global_state, 'enabled_workflow_type_ids', {}).get(workflow_type_id, False)): - return batch_input + return [batch_input] if queue_front is None: queue_front = getattr(global_state, 'queue_front', True) diff --git a/lib_comfyui/webui/workflow_patcher.py b/lib_comfyui/webui/workflow_patcher.py index 00eb0646..8121a475 100644 --- a/lib_comfyui/webui/workflow_patcher.py +++ b/lib_comfyui/webui/workflow_patcher.py @@ -38,8 +38,7 @@ def sample_img2img_hijack(p, x, *args, original_function, **kwargs): tab='img2img', batch_input=x.to(device='cpu'), ) - x = torch.stack(processed_x).to(device=x.device) if isinstance(processed_x, list) else processed_x.to(device=x.device) - return original_function(p, x, *args, **kwargs) + return original_function(p, processed_x[0].to(device=x.device), *args, **kwargs) @ipc.restrict_to_process('webui') @@ -65,7 +64,7 @@ def p_sample_patch(*args, original_function, is_img2img, **kwargs): tab='img2img' if is_img2img else 'txt2img', batch_input=x.to(device='cpu'), ) - return torch.stack(processed_x).to(device=x.device) if isinstance(processed_x, list) else processed_x.to(device=x.device) + return processed_x[0].to(device=x.device) def p_img2img_init(*args, original_function, p_ref, **kwargs): @@ -74,5 +73,5 @@ def p_img2img_init(*args, original_function, p_ref, **kwargs): tab='img2img', batch_input=[F.pil_to_tensor(image) / 255 for image in p_ref.init_images], ) - p_ref.init_images = [F.to_pil_image(image_tensor) for image_tensor in preprocessed_images] + p_ref.init_images = [F.to_pil_image(image_tensor) for image_tensor in preprocessed_images[0]] return original_function(*args, **kwargs) diff --git a/scripts/comfyui.py b/scripts/comfyui.py index 24f0cf91..283e24b8 100644 --- a/scripts/comfyui.py +++ b/scripts/comfyui.py @@ -63,13 +63,11 @@ def postprocess_batch_list(self, p, pp, *args, **kwargs): batch_input=list(pp.images), ) - batch_size_factor = max(1, len(batch_results) // len(pp.images)) - for list_to_scale in [p.prompts, p.negative_prompts, p.seeds, p.subseeds]: - list_to_scale[:] = list_to_scale * batch_size_factor + list_to_scale[:] = list_to_scale * len(batch_results) pp.images.clear() - pp.images.extend(batch_results) + pp.images.extend(image for batch in batch_results for image in batch) iframe_requests.extend_infotext_with_comfyui_workflows(p, self.get_tab())