diff --git a/fooocusapi/api_utils.py b/fooocusapi/api_utils.py index 661daa2..3fa9a14 100644 --- a/fooocusapi/api_utils.py +++ b/fooocusapi/api_utils.py @@ -130,46 +130,37 @@ def req_to_params(req: Text2ImgRequest) -> ImageGenerationParams: print(f"[Warning] Wrong inpaint_engine input: {adp.inpaint_engine}, using default") adp.inpaint_engine = default_inpaint_engine_version - advanced_params = [ - adp.disable_preview, adp.adm_scaler_positive, adp.adm_scaler_negative, adp.adm_scaler_end, adp.adaptive_cfg, adp.sampler_name, \ - adp.scheduler_name, False, adp.overwrite_step, adp.overwrite_switch, adp.overwrite_width, adp.overwrite_height, \ - adp.overwrite_vary_strength, adp.overwrite_upscale_strength, \ - adp.mixing_image_prompt_and_vary_upscale, adp.mixing_image_prompt_and_inpaint, \ - adp.debugging_cn_preprocessor, adp.skipping_cn_preprocessor, adp.controlnet_softness, adp.canny_low_threshold, adp.canny_high_threshold, \ - adp.refiner_swap_method, \ - adp.freeu_enabled, adp.freeu_b1, adp.freeu_b2, adp.freeu_s1, adp.freeu_s2, \ - adp.debugging_inpaint_preprocessor, adp.inpaint_disable_initial_latent, adp.inpaint_engine, adp.inpaint_strength, adp.inpaint_respective_field, \ - False, adp.invert_mask_checkbox, adp.inpaint_erode_or_dilate - ] - - return ImageGenerationParams(prompt=prompt, - negative_prompt=negative_prompt, - style_selections=style_selections, - performance_selection=performance_selection, - aspect_ratios_selection=aspect_ratios_selection, - image_number=image_number, - image_seed=image_seed, - sharpness=sharpness, - guidance_scale=guidance_scale, - base_model_name=base_model_name, - refiner_model_name=refiner_model_name, - refiner_switch=refiner_switch, - loras=loras, - uov_input_image=uov_input_image, - uov_method=uov_method, - upscale_value=upscale_value, - outpaint_selections=outpaint_selections, - outpaint_distance_left=outpaint_distance_left, - outpaint_distance_right=outpaint_distance_right, - outpaint_distance_top=outpaint_distance_top, - outpaint_distance_bottom=outpaint_distance_bottom, - inpaint_input_image=inpaint_input_image, - inpaint_additional_prompt=inpaint_additional_prompt, - image_prompts=image_prompts, - advanced_params=advanced_params, - save_extension=req.save_extension, - require_base64=req.require_base64, - ) + advanced_params = adp + + return ImageGenerationParams( + prompt=prompt, + negative_prompt=negative_prompt, + style_selections=style_selections, + performance_selection=performance_selection, + aspect_ratios_selection=aspect_ratios_selection, + image_number=image_number, + image_seed=image_seed, + sharpness=sharpness, + guidance_scale=guidance_scale, + base_model_name=base_model_name, + refiner_model_name=refiner_model_name, + refiner_switch=refiner_switch, + loras=loras, + uov_input_image=uov_input_image, + uov_method=uov_method, + upscale_value=upscale_value, + outpaint_selections=outpaint_selections, + outpaint_distance_left=outpaint_distance_left, + outpaint_distance_right=outpaint_distance_right, + outpaint_distance_top=outpaint_distance_top, + outpaint_distance_bottom=outpaint_distance_bottom, + inpaint_input_image=inpaint_input_image, + inpaint_additional_prompt=inpaint_additional_prompt, + image_prompts=image_prompts, + advanced_params=advanced_params, + save_extension=req.save_extension, + require_base64=req.require_base64, + ) def generate_async_output(task: QueueTask, require_step_preview: bool = False) -> AsyncJobResponse: @@ -213,7 +204,7 @@ def generate_image_result_output(results: List[ImageGenerationResult], require_b results = [GeneratedImageResult( base64=output_file_to_base64img(item.im) if require_base64 else None, url=get_file_serve_url(item.im), - seed=item.seed, + seed=str(item.seed), finish_reason=item.finish_reason) for item in results] return results diff --git a/fooocusapi/base_args.py b/fooocusapi/base_args.py index c190f3c..e0071fe 100644 --- a/fooocusapi/base_args.py +++ b/fooocusapi/base_args.py @@ -11,7 +11,7 @@ def add_base_args(parser: ArgumentParser, before_prepared: bool): parser.add_argument("--sync-repo", default=None, help="Sync dependent git repositories to local, 'skip' for skip sync action, 'only' for only do the sync action and not launch app") parser.add_argument("--skip-pip", default=False, action="store_true", help="Skip automatic pip install when setup") parser.add_argument("--preload-pipeline", default=False, action="store_true", help="Preload pipeline before start http server") - parser.add_argument("--queue-size", type=int, default=100, help="Working queue size, default: 3, generation requests exceeding working queue size will return failure") + parser.add_argument("--queue-size", type=int, default=100, help="Working queue size, default: 100, generation requests exceeding working queue size will return failure") parser.add_argument("--queue-history", type=int, default=0, help="Finished jobs reserve size, tasks exceeding the limit will be deleted, including output image files, default: 0, means no limit") parser.add_argument('--webhook-url', type=str, default=None, help='The URL to send a POST request when a job is finished') parser.add_argument('--persistent', default=False, action="store_true", help="Store history to db") diff --git a/fooocusapi/models.py b/fooocusapi/models.py index 7b9383f..c2055e8 100644 --- a/fooocusapi/models.py +++ b/fooocusapi/models.py @@ -24,6 +24,7 @@ class Lora(BaseModel): + enabled: bool model_name: str weight: float = Field(default=0.5, ge=-2, le=2) @@ -33,7 +34,7 @@ class Lora(BaseModel): LoraList = TypeAdapter(List[Lora]) -default_loras_model = [Lora(model_name=lora[0], weight=lora[1]) for lora in default_loras if lora[0] != 'None'] +default_loras_model = [Lora(enabled=lora[0], model_name=lora[1], weight=lora[2]) for lora in default_loras if lora[0] != 'None'] default_loras_json = LoraList.dump_json(default_loras_model) @@ -74,15 +75,16 @@ class ImagePrompt(BaseModel): class AdvancedParams(BaseModel): disable_preview: bool = Field(False, description="Disable preview during generation") + disable_intermediate_results: bool = Field(False, description="Disable intermediate results") + disable_seed_increment: bool = Field(False, description="Disable Seed Increment") adm_scaler_positive: float = Field(1.5, description="Positive ADM Guidance Scaler", ge=0.1, le=3.0) adm_scaler_negative: float = Field(0.8, description="Negative ADM Guidance Scaler", ge=0.1, le=3.0) adm_scaler_end: float = Field(0.3, description="ADM Guidance End At Step", ge=0.0, le=1.0) - refiner_swap_method: str = Field('joint', description="Refiner swap method") adaptive_cfg: float = Field(7.0, description="CFG Mimicking from TSNR", ge=1.0, le=30.0) sampler_name: str = Field(default_sampler, description="Sampler") scheduler_name: str = Field(default_scheduler, description="Scheduler") overwrite_step: int = Field(-1, description="Forced Overwrite of Sampling Step", ge=-1, le=200) - overwrite_switch: int = Field(-1, description="Forced Overwrite of Refiner Switch Step", ge=-1, le=200) + overwrite_switch: float = Field(-1, description="Forced Overwrite of Refiner Switch Step", ge=-1, le=1) overwrite_width: int = Field(-1, description="Forced Overwrite of Generating Width", ge=-1, le=2048) overwrite_height: int = Field(-1, description="Forced Overwrite of Generating Height", ge=-1, le=2048) overwrite_vary_strength: float = Field(-1, description='Forced Overwrite of Denoising Strength of "Vary"', ge=-1, le=1.0) @@ -91,9 +93,10 @@ class AdvancedParams(BaseModel): mixing_image_prompt_and_inpaint: bool = Field(False, description="Mixing Image Prompt and Inpaint") debugging_cn_preprocessor: bool = Field(False, description="Debug Preprocessors") skipping_cn_preprocessor: bool = Field(False, description="Skip Preprocessors") - controlnet_softness: float = Field(0.25, description="Softness of ControlNet", ge=0.0, le=1.0) canny_low_threshold: int = Field(64, description="Canny Low Threshold", ge=1, le=255) canny_high_threshold: int = Field(128, description="Canny High Threshold", ge=1, le=255) + refiner_swap_method: str = Field('joint', description="Refiner swap method") + controlnet_softness: float = Field(0.25, description="Softness of ControlNet", ge=0.0, le=1.0) freeu_enabled: bool = Field(False, description="FreeU enabled") freeu_b1: float = Field(1.01, description="FreeU B1") freeu_b2: float = Field(1.02, description="FreeU B2") @@ -104,6 +107,7 @@ class AdvancedParams(BaseModel): inpaint_engine: str = Field('v1', description="Inpaint Engine") inpaint_strength: float = Field(1.0, description="Inpaint Denoising Strength", ge=0.0, le=1.0) inpaint_respective_field: float = Field(1.0, description="Inpaint Respective Field", ge=0.0, le=1.0) + inpaint_mask_upload_checkbox: bool = Field(False, description="Upload Mask") invert_mask_checkbox: bool = Field(False, description="Invert Mask") inpaint_erode_or_dilate: int = Field(0, description="Mask Erode or Dilate", ge=-64, le=64) @@ -124,6 +128,7 @@ class Text2ImgRequest(BaseModel): loras: List[Lora] = Field(default=default_loras_model) advanced_params: AdvancedParams | None = AdvancedParams() save_extension: str = Field(default='png', description="Save extension, one of [png, jpg, webp]") + read_wildcards_in_order: bool = Field(default=False, description="Read wildcards in order") require_base64: bool = Field(default=False, description="Return base64 data of generated image") async_process: bool = Field(default=False, description="Set to true will run async and return job info for retrieve generataion result later") webhook_url: str | None = Field(default='', description="Optional URL for a webhook callback. If provided, the system will send a POST request to this URL upon task completion or failure." @@ -152,15 +157,13 @@ def lora_parser(loras: str) -> List[Lora]: raise RequestValidationError(errors=[errs]) def advanced_params_parser(advanced_params: str | None) -> AdvancedParams: - advanced_params_obj = None if advanced_params is not None and len(advanced_params) > 0: try: advanced_params_obj = AdvancedParams.__pydantic_validator__.validate_json(advanced_params) return advanced_params_obj - except ValidationError as ve: - errs = ve.errors() - raise RequestValidationError(errors=[errs]) - return advanced_params_obj + except ValidationError: + return AdvancedParams() + return AdvancedParams() def oupaint_selections_parser(outpaint_selections: str) -> List[OutpaintExpansion]: outpaint_selections_arr: List[OutpaintExpansion] = [] diff --git a/fooocusapi/repositories_versions.py b/fooocusapi/repositories_versions.py index ecc5cc8..b216ea4 100644 --- a/fooocusapi/repositories_versions.py +++ b/fooocusapi/repositories_versions.py @@ -1,5 +1,5 @@ import os -fooocus_version = '2.1.860' +fooocus_version = '2.3.0' fooocus_commit_hash = os.environ.get( - 'FOOOCUS_COMMIT_HASH', "624f74a1ed78ea09467c856cef35aeee0af863f6") + 'FOOOCUS_COMMIT_HASH', "e9bc5e50c6a9e9502e822d308cb370883c4ef202") diff --git a/fooocusapi/worker.py b/fooocusapi/worker.py index f6e3e68..0c596eb 100644 --- a/fooocusapi/worker.py +++ b/fooocusapi/worker.py @@ -1,15 +1,19 @@ import copy +import os import random import time import numpy as np import torch -import re import logging from typing import List from fooocusapi.file_utils import save_output_file from fooocusapi.parameters import GenerationFinishReason, ImageGenerationResult from fooocusapi.task_queue import QueueTask, TaskQueue, TaskOutputs +from modules.patch import PatchSettings, patch_settings, patch_all +from modules.sdxl_styles import apply_arrays + +patch_all() worker_queue: TaskQueue = None last_model_name = None @@ -64,38 +68,41 @@ def process_generate(async_task: QueueTask): print(f"[Task Queue] Finish task with error, seq={async_task.job_id}") return [] - import modules.patch as patch import modules.flags as flags import modules.core as core import modules.inpaint_worker as inpaint_worker import modules.config as config - import modules.advanced_parameters as advanced_parameters import modules.constants as constants import extras.preprocessors as preprocessors import extras.ip_adapter as ip_adapter import extras.face_crop as face_crop import ldm_patched.modules.model_management as model_management from modules.util import remove_empty_str, resize_image, HWC3, set_image_shape_ceil, get_image_shape_ceil, get_shape_ceil, resample_image, erode_or_dilate - from modules.private_logger import log from modules.upscaler import perform_upscale from extras.expansion import safe_str from modules.sdxl_styles import apply_style, fooocus_expansion, apply_wildcards - import fooocus_version + + pid = os.getpid() outputs = TaskOutputs(async_task) results = [] - def refresh_seed(r, seed_string): - if r: - return random.randint(constants.MIN_SEED, constants.MAX_SEED) - else: - try: - seed_value = int(seed_string) - if constants.MIN_SEED <= seed_value <= constants.MAX_SEED: - return seed_value - except ValueError: - pass + def refresh_seed(seed_string: int | str | None) -> int: + """ + Refresh and check seed number. + :params seed_string: seed, str or int. None means random + :return: seed number + """ + if seed_string is None or seed_string == -1: return random.randint(constants.MIN_SEED, constants.MAX_SEED) + + try: + seed_value = int(seed_string) + if constants.MIN_SEED <= seed_value <= constants.MAX_SEED: + return seed_value + except ValueError: + pass + return random.randint(constants.MIN_SEED, constants.MAX_SEED) def progressbar(_, number, text): print(f'[Fooocus] {text}') @@ -142,7 +149,9 @@ def yield_result(_, imgs, tasks, extension='png'): performance_selection = params.performance_selection aspect_ratios_selection = params.aspect_ratios_selection image_number = params.image_number - image_seed = None if params.image_seed == -1 else params.image_seed + save_extension = params.save_extension + image_seed = refresh_seed(params.image_seed) + read_wildcards_in_order = False sharpness = params.sharpness guidance_scale = params.guidance_scale base_model_name = params.base_model_name @@ -160,28 +169,61 @@ def yield_result(_, imgs, tasks, extension='png'): outpaint_distance_right = params.outpaint_distance_right outpaint_distance_bottom = params.outpaint_distance_bottom inpaint_input_image = params.inpaint_input_image - inpaint_additional_prompt = params.inpaint_additional_prompt + inpaint_additional_prompt = '' if params.inpaint_additional_prompt is None else params.inpaint_additional_prompt inpaint_mask_image_upload = None - save_extension = params.save_extension - if inpaint_additional_prompt is None: - inpaint_additional_prompt = '' - - image_seed = refresh_seed(image_seed is None, image_seed) + adp = params.advanced_params + disable_preview = adp.disable_preview + disable_intermediate_results = adp.disable_intermediate_results + disable_seed_increment = adp.disable_seed_increment + adm_scaler_positive = adp.adm_scaler_positive + adm_scaler_negative = adp.adm_scaler_negative + adm_scaler_end = adp.adm_scaler_end + adaptive_cfg = adp.adaptive_cfg + sampler_name = adp.sampler_name + scheduler_name = adp.scheduler_name + overwrite_step = adp.overwrite_step + overwrite_switch = adp.overwrite_switch + overwrite_width = adp.overwrite_width + overwrite_height = adp.overwrite_height + overwrite_vary_strength = adp.overwrite_vary_strength + overwrite_upscale_strength = adp.overwrite_upscale_strength + mixing_image_prompt_and_vary_upscale = adp.mixing_image_prompt_and_vary_upscale + mixing_image_prompt_and_inpaint = adp.mixing_image_prompt_and_inpaint + debugging_cn_preprocessor = adp.debugging_cn_preprocessor + skipping_cn_preprocessor = adp.skipping_cn_preprocessor + canny_low_threshold = adp.canny_low_threshold + canny_high_threshold = adp.canny_high_threshold + refiner_swap_method = adp.refiner_swap_method + controlnet_softness = adp.controlnet_softness + freeu_enabled = adp.freeu_enabled + freeu_b1 = adp.freeu_b1 + freeu_b2 = adp.freeu_b2 + freeu_s1 = adp.freeu_s1 + freeu_s2 = adp.freeu_s2 + debugging_inpaint_preprocessor = adp.debugging_inpaint_preprocessor + inpaint_disable_initial_latent = adp.inpaint_disable_initial_latent + inpaint_engine = adp.inpaint_engine + inpaint_strength = adp.inpaint_strength + inpaint_respective_field = adp.inpaint_respective_field + inpaint_mask_upload_checkbox = adp.inpaint_mask_upload_checkbox + invert_mask_checkbox = adp.invert_mask_checkbox + inpaint_erode_or_dilate = adp.inpaint_erode_or_dilate + + save_metadata_to_images = False + metadata_scheme = 'fooocus' cn_tasks = {x: [] for x in flags.ip_list} for img_prompt in params.image_prompts: cn_img, cn_stop, cn_weight, cn_type = img_prompt cn_tasks[cn_type].append([cn_img, cn_stop, cn_weight]) - advanced_parameters.set_all_advanced_parameters(*params.advanced_params) - if inpaint_input_image is not None and inpaint_input_image['image'] is not None: inpaint_image_size = inpaint_input_image['image'].shape[:2] if inpaint_input_image['mask'] is None: inpaint_input_image['mask'] = np.zeros(inpaint_image_size, dtype=np.uint8) else: - advanced_parameters.inpaint_mask_upload_checkbox = True + inpaint_mask_upload_checkbox = True inpaint_input_image['mask'] = HWC3(inpaint_input_image['mask']) inpaint_mask_image_upload = inpaint_input_image['mask'] @@ -205,16 +247,12 @@ def yield_result(_, imgs, tasks, extension='png'): print(f'Refiner disabled because base model and refiner are same.') refiner_model_name = 'None' - assert performance_selection in ['Speed', 'Quality', 'Extreme Speed'] - steps = 30 if performance_selection == 'Speed': steps = 30 - if performance_selection == 'Quality': steps = 60 - if performance_selection == 'Extreme Speed': print('Enter LCM mode.') progressbar(async_task, 1, 'Downloading LCM components ...') @@ -224,30 +262,52 @@ def yield_result(_, imgs, tasks, extension='png'): print(f'Refiner disabled in LCM mode.') refiner_model_name = 'None' - sampler_name = advanced_parameters.sampler_name = 'lcm' - scheduler_name = advanced_parameters.scheduler_name = 'lcm' - patch.sharpness = sharpness = 0.0 - cfg_scale = guidance_scale = 1.0 - patch.adaptive_cfg = advanced_parameters.adaptive_cfg = 1.0 + sampler_name = 'lcm' + scheduler_name = 'lcm' + sharpness = 0.0 + guidance_scale = 1.0 + adaptive_cfg = 1.0 refiner_switch = 1.0 - patch.positive_adm_scale = advanced_parameters.adm_scaler_positive = 1.0 - patch.negative_adm_scale = advanced_parameters.adm_scaler_negative = 1.0 - patch.adm_scaler_end = advanced_parameters.adm_scaler_end = 0.0 + adm_scaler_positive = 1.0 + adm_scaler_negative = 1.0 + adm_scaler_end = 0.0 steps = 8 - patch.adaptive_cfg = advanced_parameters.adaptive_cfg - print(f'[Parameters] Adaptive CFG = {patch.adaptive_cfg}') + elif performance_selection == 'LIGHTNING': + print('Enter Lightning mode.') + progressbar(async_task, 1, 'Downloading Lightning components ...') + loras += [(config.downloading_sdxl_lightning_lora(), 1.0)] - patch.sharpness = sharpness - print(f'[Parameters] Sharpness = {patch.sharpness}') + if refiner_model_name != 'None': + print(f'Refiner disabled in Lightning mode.') - patch.positive_adm_scale = advanced_parameters.adm_scaler_positive - patch.negative_adm_scale = advanced_parameters.adm_scaler_negative - patch.adm_scaler_end = advanced_parameters.adm_scaler_end + refiner_model_name = 'None' + sampler_name = 'euler' + scheduler_name = 'sgm_uniform' + sharpness = 0.0 + guidance_scale = 1.0 + adaptive_cfg = 1.0 + refiner_switch = 1.0 + adm_scaler_positive = 1.0 + adm_scaler_negative = 1.0 + adm_scaler_end = 0.0 + + print(f'[Parameters] Adaptive CFG = {adaptive_cfg}') + print(f'[Parameters] Sharpness = {sharpness}') + print(f'[Parameters] ControlNet Softness = {controlnet_softness}') print(f'[Parameters] ADM Scale = ' - f'{patch.positive_adm_scale} : ' - f'{patch.negative_adm_scale} : ' - f'{patch.adm_scaler_end}') + f'{adm_scaler_positive} : ' + f'{adm_scaler_negative} : ' + f'{adm_scaler_end}') + + patch_settings[pid] = PatchSettings( + sharpness, + adm_scaler_end, + adm_scaler_positive, + adm_scaler_negative, + controlnet_softness, + adaptive_cfg + ) cfg_scale = float(guidance_scale) print(f'[Parameters] CFG = {cfg_scale}') @@ -256,21 +316,13 @@ def yield_result(_, imgs, tasks, extension='png'): denoising_strength = 1.0 tiled = False - # Validate input format - if not aspect_ratios_selection.replace('*', ' ').replace(' ', '').isdigit(): - raise ValueError("Invalid input format. Please enter aspect ratios in the form 'width*height'.") - width, height = aspect_ratios_selection.replace('*', '*').replace('*', ' ').split(' ')[:2] - # Validate width and height are integers - if not (width.isdigit() and height.isdigit()): - raise ValueError("Invalid width or height. Please enter valid integers.") - + width, height = aspect_ratios_selection.replace('×', ' ').replace('*', ' ').split(' ')[:2] width, height = int(width), int(height) skip_prompt_processing = False - refiner_swap_method = advanced_parameters.refiner_swap_method inpaint_worker.current_task = None - inpaint_parameterized = advanced_parameters.inpaint_engine != 'None' + inpaint_parameterized = inpaint_engine != 'None' inpaint_image = None inpaint_mask = None inpaint_head_model_path = None @@ -284,15 +336,12 @@ def yield_result(_, imgs, tasks, extension='png'): seed = int(image_seed) print(f'[Parameters] Seed = {seed}') - sampler_name = advanced_parameters.sampler_name - scheduler_name = advanced_parameters.scheduler_name - goals = [] tasks = [] if input_image_checkbox: if (current_tab == 'uov' or ( - current_tab == 'ip' and advanced_parameters.mixing_image_prompt_and_vary_upscale)) \ + current_tab == 'ip' and mixing_image_prompt_and_vary_upscale)) \ and uov_method != flags.disabled and uov_input_image is not None: uov_input_image = HWC3(uov_input_image) if 'vary' in uov_method: @@ -303,37 +352,34 @@ def yield_result(_, imgs, tasks, extension='png'): skip_prompt_processing = True else: steps = 18 - if performance_selection == 'Speed': steps = 18 - if performance_selection == 'Quality': steps = 36 - if performance_selection == 'Extreme Speed': steps = 8 progressbar(async_task, 1, 'Downloading upscale models ...') config.downloading_upscale_model() if (current_tab == 'inpaint' or ( - current_tab == 'ip' and advanced_parameters.mixing_image_prompt_and_inpaint)) \ + current_tab == 'ip' and mixing_image_prompt_and_inpaint)) \ and isinstance(inpaint_input_image, dict): inpaint_image = inpaint_input_image['image'] inpaint_mask = inpaint_input_image['mask'][:, :, 0] - if advanced_parameters.inpaint_mask_upload_checkbox: + if inpaint_mask_upload_checkbox: if isinstance(inpaint_mask_image_upload, np.ndarray): if inpaint_mask_image_upload.ndim == 3: H, W, C = inpaint_image.shape inpaint_mask_image_upload = resample_image(inpaint_mask_image_upload, width=W, height=H) inpaint_mask_image_upload = np.mean(inpaint_mask_image_upload, axis=2) inpaint_mask_image_upload = (inpaint_mask_image_upload > 127).astype(np.uint8) * 255 - inpaint_mask = inpaint_mask_image_upload + inpaint_mask = np.maximum(inpaint_mask, inpaint_mask_image_upload) - if int(advanced_parameters.inpaint_erode_or_dilate) != 0: - inpaint_mask = erode_or_dilate(inpaint_mask, advanced_parameters.inpaint_erode_or_dilate) + if int(inpaint_erode_or_dilate) != 0: + inpaint_mask = erode_or_dilate(inpaint_mask, inpaint_erode_or_dilate) - if advanced_parameters.invert_mask_checkbox: + if invert_mask_checkbox: inpaint_mask = 255 - inpaint_mask inpaint_image = HWC3(inpaint_image) @@ -344,12 +390,12 @@ def yield_result(_, imgs, tasks, extension='png'): if inpaint_parameterized: progressbar(async_task, 1, 'Downloading inpainter ...') inpaint_head_model_path, inpaint_patch_model_path = config.downloading_inpaint_models( - advanced_parameters.inpaint_engine) + inpaint_engine) base_model_additional_loras += [(inpaint_patch_model_path, 1.0)] print(f'[Inpaint] Current inpaint model is {inpaint_patch_model_path}') if refiner_model_name == 'None': use_synthetic_refiner = True - refiner_switch = 0.5 + refiner_switch = 0.8 else: inpaint_head_model_path, inpaint_patch_model_path = None, None print(f'[Inpaint] Parameterized inpaint is disabled.') @@ -360,8 +406,8 @@ def yield_result(_, imgs, tasks, extension='png'): prompt = inpaint_additional_prompt + '\n' + prompt goals.append('inpaint') if current_tab == 'ip' or \ - advanced_parameters.mixing_image_prompt_and_inpaint or \ - advanced_parameters.mixing_image_prompt_and_vary_upscale: + mixing_image_prompt_and_vary_upscale or \ + mixing_image_prompt_and_inpaint: goals.append('cn') progressbar(async_task, 1, 'Downloading control models ...') if len(cn_tasks[flags.cn_canny]) > 0: @@ -380,19 +426,19 @@ def yield_result(_, imgs, tasks, extension='png'): ip_adapter.load_ip_adapter(clip_vision_path, ip_negative_path, ip_adapter_path) ip_adapter.load_ip_adapter(clip_vision_path, ip_negative_path, ip_adapter_face_path) - switch = int(round(steps * refiner_switch)) + if overwrite_step > 0: + steps = overwrite_step - if advanced_parameters.overwrite_step > 0: - steps = advanced_parameters.overwrite_step + switch = int(round(steps * refiner_switch)) - if advanced_parameters.overwrite_switch > 0: - switch = advanced_parameters.overwrite_switch + if overwrite_switch > 0: + switch = overwrite_switch - if advanced_parameters.overwrite_width > 0: - width = advanced_parameters.overwrite_width + if overwrite_width > 0: + width = overwrite_width - if advanced_parameters.overwrite_height > 0: - height = advanced_parameters.overwrite_height + if overwrite_height > 0: + height = overwrite_height print(f'[Parameters] Sampler = {sampler_name} - {scheduler_name}') print(f'[Parameters] Steps = {steps} - {switch}') @@ -421,14 +467,19 @@ def yield_result(_, imgs, tasks, extension='png'): progressbar(async_task, 3, 'Processing prompts ...') tasks = [] + for i in range(image_number): - task_seed = (seed + i) % (constants.MAX_SEED + 1) # randint is inclusive, % is not - task_rng = random.Random(task_seed) # may bind to inpaint noise in the future + if disable_seed_increment: + task_seed = seed % (constants.MAX_SEED + 1) + else: + task_seed = (seed + i) % (constants.MAX_SEED + 1) # randint is inclusive, % is not - task_prompt = apply_wildcards(prompt, task_rng) - task_negative_prompt = apply_wildcards(negative_prompt, task_rng) - task_extra_positive_prompts = [apply_wildcards(pmt, task_rng) for pmt in extra_positive_prompts] - task_extra_negative_prompts = [apply_wildcards(pmt, task_rng) for pmt in extra_negative_prompts] + task_rng = random.Random(task_seed) # may bind to inpaint noise in the future + task_prompt = apply_wildcards(prompt, task_rng, i, read_wildcards_in_order) + task_prompt = apply_arrays(task_prompt, i) + task_negative_prompt = apply_wildcards(negative_prompt, task_rng, i, read_wildcards_in_order) + task_extra_positive_prompts = [apply_wildcards(pmt, task_rng, i, read_wildcards_in_order) for pmt in extra_positive_prompts] + task_extra_negative_prompts = [apply_wildcards(pmt, task_rng, i, read_wildcards_in_order) for pmt in extra_negative_prompts] positive_basic_workloads = [] negative_basic_workloads = [] @@ -491,8 +542,8 @@ def yield_result(_, imgs, tasks, extension='png'): denoising_strength = 0.5 if 'strong' in uov_method: denoising_strength = 0.85 - if advanced_parameters.overwrite_vary_strength > 0: - denoising_strength = advanced_parameters.overwrite_vary_strength + if overwrite_vary_strength > 0: + denoising_strength = overwrite_vary_strength shape_ceil = get_image_shape_ceil(uov_input_image) if shape_ceil < 1024: @@ -526,15 +577,15 @@ def yield_result(_, imgs, tasks, extension='png'): uov_input_image = perform_upscale(uov_input_image) print(f'Image upscaled.') - f = 1.0 if upscale_value is not None and upscale_value > 1.0: f = upscale_value else: - pattern = r"([0-9]+(?:\.[0-9]+)?)x" - matches = re.findall(pattern, uov_method) - if len(matches) > 0: - f_tmp = float(matches[0]) - f = 1.0 if f_tmp < 1.0 else 5.0 if f_tmp > 5.0 else f_tmp + if '1.5x' in uov_method: + f = 1.5 + elif '2x' in uov_method: + f = 2.0 + else: + f = 1.0 shape_ceil = get_shape_ceil(H * f, W * f) @@ -559,15 +610,15 @@ def yield_result(_, imgs, tasks, extension='png'): if direct_return: d = [('Upscale (Fast)', '2x')] - log(uov_input_image, d) + # log(uov_input_image, d, output_format=save_extension) yield_result(async_task, uov_input_image, tasks, save_extension) return tiled = True denoising_strength = 0.382 - if advanced_parameters.overwrite_upscale_strength > 0: - denoising_strength = advanced_parameters.overwrite_upscale_strength + if overwrite_upscale_strength > 0: + denoising_strength = overwrite_upscale_strength initial_pixels = core.numpy_to_pytorch(uov_input_image) progressbar(async_task, 13, 'VAE encoding ...') @@ -626,24 +677,23 @@ def yield_result(_, imgs, tasks, extension='png'): inpaint_image = np.pad(inpaint_image, [[0, 0], [0, distance_right], [0, 0]], mode='edge') inpaint_mask = np.pad(inpaint_mask, [[0, 0], [0, distance_right]], mode='constant', constant_values=255) - + inpaint_image = np.ascontiguousarray(inpaint_image.copy()) inpaint_mask = np.ascontiguousarray(inpaint_mask.copy()) - advanced_parameters.inpaint_strength = 1.0 - advanced_parameters.inpaint_respective_field = 1.0 + inpaint_strength = 1.0 + inpaint_respective_field = 1.0 - denoising_strength = advanced_parameters.inpaint_strength + denoising_strength = inpaint_strength inpaint_worker.current_task = inpaint_worker.InpaintWorker( image=inpaint_image, mask=inpaint_mask, use_fill=denoising_strength > 0.99, - k=advanced_parameters.inpaint_respective_field + k=inpaint_respective_field ) - if advanced_parameters.debugging_inpaint_preprocessor: - yield_result(async_task, inpaint_worker.current_task.visualize_mask_processing(), - do_not_show_finished_images=True) + if debugging_inpaint_preprocessor: + yield_result(async_task, inpaint_worker.current_task.visualize_mask_processing()) return progressbar(async_task, 13, 'VAE Inpaint encoding ...') @@ -687,7 +737,7 @@ def yield_result(_, imgs, tasks, extension='png'): model=pipeline.final_unet ) - if not advanced_parameters.inpaint_disable_initial_latent: + if not inpaint_disable_initial_latent: initial_latent = {'samples': latent_fill} B, C, H, W = latent_fill.shape @@ -700,24 +750,24 @@ def yield_result(_, imgs, tasks, extension='png'): cn_img, cn_stop, cn_weight = task cn_img = resize_image(HWC3(cn_img), width=width, height=height) - if not advanced_parameters.skipping_cn_preprocessor: - cn_img = preprocessors.canny_pyramid(cn_img) + if not skipping_cn_preprocessor: + cn_img = preprocessors.canny_pyramid(cn_img, canny_low_threshold, canny_high_threshold) cn_img = HWC3(cn_img) task[0] = core.numpy_to_pytorch(cn_img) - if advanced_parameters.debugging_cn_preprocessor: + if debugging_cn_preprocessor: yield_result(async_task, cn_img, tasks, save_extension) return for task in cn_tasks[flags.cn_cpds]: cn_img, cn_stop, cn_weight = task cn_img = resize_image(HWC3(cn_img), width=width, height=height) - if not advanced_parameters.skipping_cn_preprocessor: + if not skipping_cn_preprocessor: cn_img = preprocessors.cpds(cn_img) cn_img = HWC3(cn_img) task[0] = core.numpy_to_pytorch(cn_img) - if advanced_parameters.debugging_cn_preprocessor: + if debugging_cn_preprocessor: yield_result(async_task, cn_img, tasks, save_extension) return for task in cn_tasks[flags.cn_ip]: @@ -728,21 +778,21 @@ def yield_result(_, imgs, tasks, extension='png'): cn_img = resize_image(cn_img, width=224, height=224, resize_mode=0) task[0] = ip_adapter.preprocess(cn_img, ip_adapter_path=ip_adapter_path) - if advanced_parameters.debugging_cn_preprocessor: + if debugging_cn_preprocessor: yield_result(async_task, cn_img, tasks, save_extension) return for task in cn_tasks[flags.cn_ip_face]: cn_img, cn_stop, cn_weight = task cn_img = HWC3(cn_img) - if not advanced_parameters.skipping_cn_preprocessor: + if not skipping_cn_preprocessor: cn_img = face_crop.crop_image(cn_img) # https://github.com/tencent-ailab/IP-Adapter/blob/d580c50a291566bbf9fc7ac0f760506607297e6d/README.md?plain=1#L75 cn_img = resize_image(cn_img, width=224, height=224, resize_mode=0) task[0] = ip_adapter.preprocess(cn_img, ip_adapter_path=ip_adapter_face_path) - if advanced_parameters.debugging_cn_preprocessor: + if debugging_cn_preprocessor: yield_result(async_task, cn_img, tasks, save_extension) return @@ -751,14 +801,14 @@ def yield_result(_, imgs, tasks, extension='png'): if len(all_ip_tasks) > 0: pipeline.final_unet = ip_adapter.patch_model(pipeline.final_unet, all_ip_tasks) - if advanced_parameters.freeu_enabled: + if freeu_enabled: print(f'FreeU is enabled!') pipeline.final_unet = core.apply_freeu( pipeline.final_unet, - advanced_parameters.freeu_b1, - advanced_parameters.freeu_b2, - advanced_parameters.freeu_s1, - advanced_parameters.freeu_s2 + freeu_b1, + freeu_b2, + freeu_s1, + freeu_s2 ) all_steps = steps * image_number @@ -832,40 +882,14 @@ def callback(step, x0, x, total_steps, y): denoise=denoising_strength, tiled=tiled, cfg_scale=cfg_scale, - refiner_swap_method=refiner_swap_method + refiner_swap_method=refiner_swap_method, + disable_preview=disable_preview ) del task['c'], task['uc'], positive_cond, negative_cond # Save memory if inpaint_worker.current_task is not None: imgs = [inpaint_worker.current_task.post_process(x) for x in imgs] - - for x in imgs: - d = [ - ('Prompt', task['log_positive_prompt']), - ('Negative Prompt', task['log_negative_prompt']), - ('Fooocus V2 Expansion', task['expansion']), - ('Styles', str(raw_style_selections)), - ('Performance', performance_selection), - ('Resolution', str((width, height))), - ('Sharpness', sharpness), - ('Guidance Scale', guidance_scale), - ('ADM Guidance', str(( - patch.positive_adm_scale, - patch.negative_adm_scale, - patch.adm_scaler_end))), - ('Base Model', base_model_name), - ('Refiner Model', refiner_model_name), - ('Refiner Switch', refiner_switch), - ('Sampler', sampler_name), - ('Scheduler', scheduler_name), - ('Seed', task['task_seed']), - ] - for n, w in loras: - if n != 'None': - d.append((f'LoRA', f'{n} : {w}')) - d.append(('Version', 'v' + fooocus_version.version)) - log(x, d) # Fooocus async_worker.py code end diff --git a/main.py b/main.py index a775591..03413c2 100644 --- a/main.py +++ b/main.py @@ -211,8 +211,8 @@ def download_models(): ] from modules.model_loader import load_file_from_url - from modules.config import (path_checkpoints as modelfile_path, - path_loras as lorafile_path, + from modules.config import (paths_checkpoints as modelfile_path, + paths_loras as lorafile_path, path_vae_approx as vae_approx_path, path_fooocus_expansion as fooocus_expansion_path, checkpoint_downloads, @@ -220,11 +220,11 @@ def download_models(): embeddings_downloads, lora_downloads) for file_name, url in checkpoint_downloads.items(): - load_file_from_url(url=url, model_dir=modelfile_path, file_name=file_name) + load_file_from_url(url=url, model_dir=modelfile_path[0], file_name=file_name) for file_name, url in embeddings_downloads.items(): load_file_from_url(url=url, model_dir=embeddings_path, file_name=file_name) for file_name, url in lora_downloads.items(): - load_file_from_url(url=url, model_dir=lorafile_path, file_name=file_name) + load_file_from_url(url=url, model_dir=lorafile_path[0], file_name=file_name) for file_name, url in vae_approx_filenames: load_file_from_url(url=url, model_dir=vae_approx_path, file_name=file_name) @@ -247,6 +247,11 @@ def install_dependents(args): if not is_installed("torch") or not is_installed("torchvision"): print(f"torch_index_url: {torch_index_url}") run_pip(f"install torch==2.1.0 torchvision==0.16.0 --extra-index-url {torch_index_url}", "torch") + else: + import torch + if not torch.cuda.is_available(): + print("Your torch installation does not have CUDA support. Application will not work well.") + print(f"try execute 'pip install torch==2.1.0 torchvision==0.16.0 --extra-index-url {torch_index_url}'") if args.persistent and not is_installed("sqlalchemy"): run_pip(f"install sqlalchemy==2.0.25", "sqlalchemy") diff --git a/requirements.txt b/requirements.txt index 5b12d46..e1d4d23 100644 --- a/requirements.txt +++ b/requirements.txt @@ -18,4 +18,9 @@ fastapi==0.103.1 pydantic==2.4.2 pydantic_core==2.10.1 python-multipart==0.0.6 -uvicorn[standard]==0.23.2 \ No newline at end of file +uvicorn[standard]==0.23.2 +colorlog +requests +numpy +sqlalchemy +packaging \ No newline at end of file