diff --git a/README.md b/README.md index 9e9babf..a52b83f 100644 --- a/README.md +++ b/README.md @@ -104,6 +104,22 @@ You can previous and download more examples [here](examples/README.md). ## Recent Changelog +### Nov 30, 2024 +* New Nodes: + * `Griptape Driver: Black Forest Labs Image Generation` - Now generate images with the incredible Flux models - `flux-pro-1.1`, `flux-pro`, `flux-dev`, and `flux-pro-1.1-ultra`. + - Requires an API_KEY from Black Forest Labs (https://docs.bfl.ml/) + - Utilizes new Griptape Extension: https://github.com/griptape-ai/griptape-black-forest + + ![Black Forest Labs - Create Image](examples/griptape_black_forest_labs_create_image.png) + + - It also works with the `Griptape Create: Image Variation` node. + + ![Black Forest Labs - Image Variation](examples/griptape_black_forest_labs_create_variation.png) + + * `Griptape Create: Image Inpainting Variation` to the Griptape -> Image menu. Gives the ability to paint a mask and replace that part of the image. + + ![Black Forest Labs - Inpainting](examples/griptape_black_forest_flux_inpainting.png) + ### Nov 29, 2024 * Iterating on configuration settings to improve compatibility with ComfyUI Desktop diff --git a/__init__.py b/__init__.py index 69698f0..50ff1b5 100644 --- a/__init__.py +++ b/__init__.py @@ -118,6 +118,11 @@ gtUIAzureOpenAiImageGenerationDriver, ) +# - BlackForest Labs +from .nodes.drivers.gtUIBlackForestImageGenerationDriver import ( + gtUIBlackForestImageGenerationDriver, +) + # - Cohere from .nodes.drivers.gtUICohereEmbeddingDriver import gtUICohereEmbeddingDriver from .nodes.drivers.gtUICoherePromptDriver import gtUICoherePromptDriver @@ -357,6 +362,7 @@ "Griptape Driver: Amazon Bedrock Stable Diffusion": gtUIAmazonBedrockStableDiffusionImageGenerationDriver, "Griptape Driver: Amazon Bedrock Titan": gtUIAmazonBedrockTitanImageGenerationDriver, "Griptape Driver: Azure OpenAI Image Generation": gtUIAzureOpenAiImageGenerationDriver, + "Griptape Driver: Black Forest Labs Image Generation": gtUIBlackForestImageGenerationDriver, "Griptape Driver: Leonardo.AI": gtUILeonardoImageGenerationDriver, "Griptape Driver: OpenAI Image Generation": gtUIOpenAiImageGenerationDriver, "Griptape Driver: OpenAI Compatible Image Generation": gtUIOpenAiCompatibleImageGenerationDriver, diff --git a/examples/README.md b/examples/README.md index ea85487..a929eb8 100644 --- a/examples/README.md +++ b/examples/README.md @@ -17,6 +17,26 @@ Uses `Griptape Load: Text` node to import a PDF of a robot resume, then the `Gri ![Profile Pic](pdf_to_profile_pic.png) Download: [pdf_to_profile_pic.png](pdf_to_profile_pic.png) +## Black Forest Labs - Flux Pro 1.1 Image Generation +Use Black Forest Labs Flux models with Griptape! + +![BFL Flux-Pro-1.1](griptape_black_forest_labs_create_image.png) +Download: [griptape_black_forest_labs_create_image.png](griptape_black_forest_labs_create_image.png) + +## Black Forest Labs - Flux Pro 1.0-Canny - Image Variation + +Use Black Forest Labs Flux-Pro-1.0-canny with Griptape to create a variation on an image. + +![BFL flux-pro-1.0-canny](griptape_black_forest_labs_create_variation.png) +Download: [griptape_black_forest_labs_create_variation.png](griptape_black_forest_labs_create_variation.png) + +## Black Forest Labs - Flux Pro 1.0-Fill - Image Inpainting + +Use Black Forest Labs Flux-Pro-1.0-fill with Griptape to perform inpainting on an image. + +![BFL flux-pro-1.0-fill](griptape_black_forest_flux_inpainting.png) +Download: [griptape_black_forest_flux_inpainting.png](griptape_black_forest_flux_inpainting.png) + ## Griptape Expert Photographers Use Agents as experts in their field to help provide feedback that will generate more advanced outputs. Utilizes multiple models - including Ollama running locally. If you don't have Ollama installed, feel free to use another `prompt_driver`. diff --git a/examples/griptape_black_forest_flux_inpainting.png b/examples/griptape_black_forest_flux_inpainting.png new file mode 100644 index 0000000..b4dd900 Binary files /dev/null and b/examples/griptape_black_forest_flux_inpainting.png differ diff --git a/examples/griptape_black_forest_labs_create_image.png b/examples/griptape_black_forest_labs_create_image.png new file mode 100644 index 0000000..86ade7f Binary files /dev/null and b/examples/griptape_black_forest_labs_create_image.png differ diff --git a/examples/griptape_black_forest_labs_create_variation.png b/examples/griptape_black_forest_labs_create_variation.png new file mode 100644 index 0000000..50a35ed Binary files /dev/null and b/examples/griptape_black_forest_labs_create_variation.png differ diff --git a/js/NodesWithVisibilityToggles.js b/js/NodesWithVisibilityToggles.js index 68648f3..dd776d3 100644 --- a/js/NodesWithVisibilityToggles.js +++ b/js/NodesWithVisibilityToggles.js @@ -4,8 +4,97 @@ import { formatAndDisplayJSON } from "./gtUIUtils.js"; import { hideWidget, showWidget } from "./utils.js"; import { app } from "../../../scripts/app.js"; export function setupVisibilityToggles(nodeType, nodeData, app) { - // if (nodeData.name.includes("Black Forest Labs Image Generation")) { - // setupBlackForestLabsImageGenerationNode(nodeType, nodeData, app); - // } + if (nodeData.name.includes("Black Forest Labs Image Generation")) { + setupBlackForestLabsImageGenerationNode(nodeType, nodeData, app); + } } +function setupBlackForestLabsImageGenerationNode(nodeType, nodeData, app) { + const onNodeCreated = nodeType.prototype.onNodeCreated; + nodeType.prototype.onNodeCreated = function () { + const me = onNodeCreated?.apply(this); + const widget_model = this.widgets.find( + (w) => w.name === "image_generation_model" + ); + + const width_widget = this.widgets.find((w) => w.name === "width"); + const height_widget = this.widgets.find((w) => w.name === "height"); + const aspect_ratio_width_widget = this.widgets.find( + (w) => w.name === "aspect_ratio_width" + ); + const aspect_ratio_height_widget = this.widgets.find( + (w) => w.name === "aspect_ratio_height" + ); + + const raw_widget = this.widgets.find((w) => w.name === "raw"); + const guidance_widget = this.widgets.find((w) => w.name === "guidance"); + const steps_widget = this.widgets.find((w) => w.name === "steps"); + const interval_widget = this.widgets.find((w) => w.name === "interval"); + const prompt_upsampling_widget = this.widgets.find( + (w) => w.name === "prompt_upsampling" + ); + const image_prompt_strength_widget = this.widgets.find((w) => w.name === "image_prompt_strength"); + + // Hide both widgets + widget_model.callback = async () => { + hideWidget(this, width_widget); + hideWidget(this, height_widget); + hideWidget(this, aspect_ratio_width_widget); + hideWidget(this, aspect_ratio_height_widget); + hideWidget(this, raw_widget); + hideWidget(this, guidance_widget); + hideWidget(this, steps_widget); + hideWidget(this, interval_widget); + hideWidget(this, prompt_upsampling_widget); + hideWidget(this, image_prompt_strength_widget); + + switch (widget_model.value) { + case "flux-pro-1.1-ultra": + showWidget(aspect_ratio_height_widget); + showWidget(aspect_ratio_width_widget); + showWidget(raw_widget); + showWidget(image_prompt_strength_widget); + break; + case "flux-pro-1.1": + showWidget(width_widget); + showWidget(height_widget); + showWidget(prompt_upsampling_widget); + break; + case "flux-pro": + showWidget(width_widget); + showWidget(height_widget); + showWidget(prompt_upsampling_widget); + showWidget(interval_widget); + showWidget(guidance_widget); + showWidget(steps_widget); + break; + case "flux-dev": + showWidget(width_widget); + showWidget(height_widget); + showWidget(guidance_widget); + showWidget(steps_widget); + showWidget(prompt_upsampling_widget); + break; + case "flux-pro-1.0-depth": + case "flux-pro-1.0-canny": + showWidget(guidance_widget); + showWidget(steps_widget); + showWidget(prompt_upsampling_widget); + break; + case "flux-pro-1.0-fill": + showWidget(guidance_widget); + showWidget(steps_widget); + showWidget(prompt_upsampling_widget); + break; + default: + break; + } + fitHeight(this, true); + }; + + setTimeout(() => { + widget_model.callback(); + }, 5); + return me; + }; +} diff --git a/js/griptape_api_keys.js b/js/griptape_api_keys.js index af3bd82..1ad041b 100644 --- a/js/griptape_api_keys.js +++ b/js/griptape_api_keys.js @@ -9,6 +9,8 @@ export const keys_organized = { "AMAZON_OPENSEARCH_INDEX_NAME", ], "Anthropic": ["ANTHROPIC_API_KEY"], + "BlackForest Labs": ["BFL_API_KEY"], + "Microsoft Azure": [ "AZURE_OPENAI_ENDPOINT", "AZURE_OPENAI_DALL_E_3_ENDPOINT", diff --git a/nodes/drivers/gtUIBlackForestImageGenerationDriver.py b/nodes/drivers/gtUIBlackForestImageGenerationDriver.py new file mode 100644 index 0000000..f6206d0 --- /dev/null +++ b/nodes/drivers/gtUIBlackForestImageGenerationDriver.py @@ -0,0 +1,237 @@ +from griptape.black_forest.drivers.black_forest_image_generation_driver import ( + BlackForestImageGenerationDriver, +) + +from .gtUIBaseImageDriver import gtUIBaseImageGenerationDriver + +DEFAULT_API_KEY = "BFL_API_KEY" +models = [ + "flux-pro-1.1", + "flux-pro-1.1-ultra", + "flux-pro", + "flux-dev", + "flux-pro-1.0-canny", + "flux-pro-1.0-depth", + "flux-pro-1.0-fill", +] +widths = [str(i) for i in range(256, 1441, 32)] +heights = [str(i) for i in range(256, 1441, 32)] +safety_tolerance_list = ["strict", "high", "medium", "low", "very_low", "none"] +aspect_ratios = [f"{w}:{h}" for w in range(21, 8, -1) for h in range(9, 22)] + + +class gtUIBlackForestImageGenerationDriver(gtUIBaseImageGenerationDriver): + DESCRIPTION = "Black Forest Image Generation Driver to use Flux models." + + @classmethod + def INPUT_TYPES(s): + inputs = super().INPUT_TYPES() + inputs["optional"].update( + { + "image_generation_model": ( + models, + { + "default": models[0], + "tooltip": "Select the image generation model.", + }, + ), + "width": ( + widths, + {"default": "1024", "tooltip": "Select the desired image width."}, + ), + "height": ( + heights, + {"default": "768", "tooltip": "Select the desired image height."}, + ), + "aspect_ratio_width": ( + "INT", + { + "default": 16, + "min": 9, + "max": 21, + "tooltip": "Select the desired aspect width.", + }, + ), + "aspect_ratio_height": ( + "INT", + { + "default": 9, + "min": 9, + "max": 21, + "tooltip": "Select the desired aspect height.", + }, + ), + "prompt_upsampling": ( + "BOOLEAN", + { + "default": False, + "tooltip": "Whether to perform upsampling on the prompt. If active, automatically modifies the prompt for more creative generation.", + "label_on": "True (Modify Prompt)", + "label_off": "False (No Modification)", + }, + ), + "safety_tolerance": ( + safety_tolerance_list, + { + "default": safety_tolerance_list[2], + "tooltip": "Select the safety tolerance level.", + }, + ), + "steps": ( + "INT", + { + "default": None, + "tooltip": "Number of steps for the image generation process. Values: 1-50", + "min": 0, + "max": 50, + }, + ), + "guidance": ( + "FLOAT", + { + "default": 0.0, + "tooltip": "Optional guidance scale for image generation. \n\nFor 'flux-dev' and 'flux-pro', values are 1.5-5. High guidance scales improve prompt adherence at the cost of reduced realism.\n\nFor 'flux-pro-1.0-canny' and 'flux-pro-1.0-depth' values are 1-100 and higher guidance applies image over prompt.", + }, + ), + "interval": ( + "INT", + { + "default": None, + "tooltip": "Optional interval parameter for guidance control. Values: 1-4", + "min": 0, + "max": 4, + }, + ), + "seed": ( + "INT", + { + "default": 10342349342, + "tooltip": "Seed for random number generation.", + }, + ), + "raw": ( + "BOOLEAN", + { + "default": False, + "tooltip": "Generate less processed, more natural-looking images", + }, + ), + "image_prompt_strength": ( + "FLOAT", + { + "default": 0.1, + "tooltip": "Image prompt strength. Values: 0.0-1.0. Higher values increase the influence of the image.", + "min": 0, + "max": 1, + }, + ), + "api_key_env_var": ( + "STRING", + { + "default": DEFAULT_API_KEY, + "tooltip": "Enter the environment variable name for the API key, not the actual API key.", + }, + ), + } + ) + + return inputs + + def build_params(self, **kwargs): + model = kwargs.get("image_generation_model", models[0]) + width = kwargs.get("width", "1024") + height = kwargs.get("height", "768") + aspect_ratio_width = kwargs.get("aspect_ratio_width", 16) + aspect_ratio_height = kwargs.get("aspect_ratio_height", 9) + aspect_ratio = f"{aspect_ratio_width}:{aspect_ratio_height}" + prompt_upsampling = kwargs.get("prompt_upsampling", False) + safety_tolerance_str = kwargs.get("safety_tolerance", safety_tolerance_list[2]) + safety_tolerance = safety_tolerance_list.index(safety_tolerance_str) + image_prompt_strength = kwargs.get("image_prompt_strength", 0.1) + seed = kwargs.get("seed", 10342349342) + steps = kwargs.get("steps", None) + guidance = kwargs.get("guidance", None) + + interval = kwargs.get("interval", 0.5) + raw = kwargs.get("raw", False) + api_key = self.getenv(kwargs.get("api_key_env_var", DEFAULT_API_KEY)) + + params = {} + + if model: + params["model"] = model + if safety_tolerance: + params["safety_tolerance"] = int(safety_tolerance) + if seed: + params["seed"] = int(seed) + if api_key: + params["api_key"] = api_key + + # Ultra model specific settings + if model == "flux-pro-1.1-ultra": + if aspect_ratio: + params["aspect_ratio"] = aspect_ratio + if raw: + params["raw"] = bool(raw) + if image_prompt_strength: + params["image_prompt_strength"] = float(image_prompt_strength) + # Fill model specific settings + if model == "flux-pro-1.0-fill": + if guidance is not None and guidance > 0: + if guidance < 1.5: + guidance == 1.5 + if guidance > 100: + raise ValueError( + f"When using {model}, guidance must be between 1.5 and 100." + ) + params["guidance"] = float(guidance) + # Canny/Depth model specific settings + if model in ["flux-pro-1.0-canny", "flux-pro-1.0-depth"]: + if guidance is not None and guidance > 0: + if guidance < 1: + guidance == 1 + params["guidance"] = float(guidance) + + # Dev/Pro specific settings + if model in ["flux-dev", "flux-pro"]: + if guidance is not None and guidance > 0: + if guidance < 1.5: + guidance == 1.5 + if guidance > 5: + raise ValueError( + f"When using {model}, guidance must be between 1.5 and 5." + ) + + params["guidance"] = float(guidance) + + # Pro model specific settings + if model == "flux-pro": + if interval is not None and interval > 0: + params["interval"] = int(interval) + + # Dev/Pro/Canny/Depth specific settings + if model in [ + "flux-dev", + "flux-pro", + "flux-pro-1.0-canny", + "flux-pro-1.0-depth", + ]: + if steps is not None and steps > 0: + params["steps"] = int(steps) + if prompt_upsampling: + params["prompt_upsampling"] = bool(prompt_upsampling) + + # Flux-pro-1.1, flux-dev, flux-pro specific settings + if model in ["flux-dev", "flux-pro", "flux-pro-1.1"]: + if width: + params["width"] = int(width) + if height: + params["height"] = int(height) + + return params + + def create(self, **kwargs): + params = self.build_params(**kwargs) + params["max_attempts"] = 1 + driver = BlackForestImageGenerationDriver(**params) + return (driver,) diff --git a/nodes/tasks/gtUIInpaintingImageGenerationTask.py b/nodes/tasks/gtUIInpaintingImageGenerationTask.py index 87d3fa5..628c1f2 100644 --- a/nodes/tasks/gtUIInpaintingImageGenerationTask.py +++ b/nodes/tasks/gtUIInpaintingImageGenerationTask.py @@ -2,6 +2,9 @@ import os import folder_paths +from griptape.black_forest.drivers.black_forest_image_generation_driver import ( + BlackForestImageGenerationDriver, +) from griptape.drivers import ( AmazonBedrockImageGenerationDriver, AzureOpenAiImageGenerationDriver, @@ -58,6 +61,10 @@ def _model_supports_inpainting(self, driver): "models": ["dall-e-2"], "default_message": "OpenAI model must be dall-e-2 for inpainting.", }, + BlackForestImageGenerationDriver: { + "models": ["flux-pro-1.0-fill"], + "default_message": "BlackForest model must be flux-pro-1.0-fill for inpainting.", + }, LeonardoImageGenerationDriver: { "models": [], "default_message": "Leonardo.AI does not support Inpainting. Please choose a different driver.", @@ -116,6 +123,12 @@ def run(self, **kwargs): if not inpainting_supported: raise ValueError(msg) + # Quick fix for flux-pro-1.0-fill + if isinstance(driver, BlackForestImageGenerationDriver): + # check the model to make sure it's one that can handle Variation Image Generation + if driver.model == "flux-pro-1.0-fill": + driver.model = "flux-pro-1.0" + # Create an engine configured to use the driver. engine = InpaintingImageGenerationEngine( image_generation_driver=driver, diff --git a/nodes/tasks/gtUIPromptImageVariationTask.py b/nodes/tasks/gtUIPromptImageVariationTask.py index 2ec4a12..082a32d 100644 --- a/nodes/tasks/gtUIPromptImageVariationTask.py +++ b/nodes/tasks/gtUIPromptImageVariationTask.py @@ -2,6 +2,9 @@ import os import folder_paths +from griptape.black_forest.drivers.black_forest_image_generation_driver import ( + BlackForestImageGenerationDriver, +) from griptape.drivers import ( OpenAiImageGenerationDriver, ) @@ -63,6 +66,24 @@ def run(self, **kwargs): api_key=OPENAI_API_KEY, model="dall-e-2", ) + # Check if driver is BlackForestImageGenerationDriver + if isinstance(driver, BlackForestImageGenerationDriver): + # check the model to make sure it's one that can handle Variation Image Generation + if driver.model not in [ + "flux-pro-1.0-canny", + "flux-pro-1.0-depth", + # "flux-dev", + "flux-pro-1.1", + # "flux-pro", + "flux-pro-1.1-ultra", + ]: + raise ValueError( + f"Model {driver.model} is not supported for image variation." + ) + return ( + None, + f"Model {driver.model} is not supported for image variation.", + ) # Create an engine configured to use the driver. engine = VariationImageGenerationEngine( image_generation_driver=driver, diff --git a/pyproject.toml b/pyproject.toml index 93a7113..929a7e5 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -9,6 +9,7 @@ readme = "README.md" python = "^3.11" python-dotenv = "^1.0.1" griptape = { version = "^0.34.3", extras = ["all"]} +griptape-black-forest = {git = "https://github.com/griptape-ai/griptape-black-forest.git"} [tool.poetry.group.dev.dependencies] icecream = "^2.1.3" @@ -26,9 +27,9 @@ priority = "explicit" [project] name = "comfyui-griptape" description = "Griptape LLM(Large Language Model) Nodes for ComfyUI." -version = "1.0.26" +version = "1.0.27" license = {file = "LICENSE"} -dependencies = ["griptape[all]==0.34.3", "python-dotenv"] +dependencies = ["griptape[all]==0.34.3", "python-dotenv", "griptape-black-forest @ git+https://github.com/griptape-ai/griptape-black-forest.git"] [project.urls] Repository = "https://github.com/griptape-ai/ComfyUI-Griptape" diff --git a/requirements.txt b/requirements.txt index 56bdc9e..79fc427 100644 --- a/requirements.txt +++ b/requirements.txt @@ -1,2 +1,3 @@ griptape[all]==0.34.3 python-dotenv +git+https://github.com/griptape-ai/griptape-black-forest.git \ No newline at end of file