From 6f77477a1c881a73b204efab4d803f9585575312 Mon Sep 17 00:00:00 2001 From: blessedcoolant <54517381+blessedcoolant@users.noreply.github.com> Date: Tue, 27 Feb 2024 20:29:43 +0530 Subject: [PATCH 1/2] cleanup: remove manual offload from depth anything --- invokeai/app/invocations/controlnet_image_processors.py | 5 ++--- invokeai/backend/image_util/depth_anything/__init__.py | 5 +---- 2 files changed, 3 insertions(+), 7 deletions(-) diff --git a/invokeai/app/invocations/controlnet_image_processors.py b/invokeai/app/invocations/controlnet_image_processors.py index 9eba3acdcaa..610dbd433f8 100644 --- a/invokeai/app/invocations/controlnet_image_processors.py +++ b/invokeai/app/invocations/controlnet_image_processors.py @@ -576,7 +576,7 @@ def run_processor(self, image: Image.Image): title="Depth Anything Processor", tags=["controlnet", "depth", "depth anything"], category="controlnet", - version="1.0.0", + version="1.0.1", ) class DepthAnythingImageProcessorInvocation(ImageProcessorInvocation): """Generates a depth map based on the Depth Anything algorithm""" @@ -585,13 +585,12 @@ class DepthAnythingImageProcessorInvocation(ImageProcessorInvocation): default="small", description="The size of the depth model to use" ) resolution: int = InputField(default=512, ge=64, multiple_of=64, description=FieldDescriptions.image_res) - offload: bool = InputField(default=False) def run_processor(self, image: Image.Image): depth_anything_detector = DepthAnythingDetector() depth_anything_detector.load_model(model_size=self.model_size) - processed_image = depth_anything_detector(image=image, resolution=self.resolution, offload=self.offload) + processed_image = depth_anything_detector(image=image, resolution=self.resolution) return processed_image diff --git a/invokeai/backend/image_util/depth_anything/__init__.py b/invokeai/backend/image_util/depth_anything/__init__.py index fcd600b99e2..ddb0aaa0c48 100644 --- a/invokeai/backend/image_util/depth_anything/__init__.py +++ b/invokeai/backend/image_util/depth_anything/__init__.py @@ -84,7 +84,7 @@ def to(self, device): self.model.to(device) return self - def __call__(self, image, resolution=512, offload=False): + def __call__(self, image, resolution=512): image = np.array(image, dtype=np.uint8) image = image[:, :, ::-1] / 255.0 @@ -103,7 +103,4 @@ def __call__(self, image, resolution=512, offload=False): new_height = int(image_height * (resolution / image_width)) depth_map = depth_map.resize((resolution, new_height)) - if offload: - del self.model - return depth_map From 41b77cd5ff70804b1740bae09143ff529bd370a7 Mon Sep 17 00:00:00 2001 From: blessedcoolant <54517381+blessedcoolant@users.noreply.github.com> Date: Tue, 27 Feb 2024 20:50:15 +0530 Subject: [PATCH 2/2] fix: minor fixes to types in the DA Detector --- .../image_util/depth_anything/__init__.py | 23 +++++++++++-------- 1 file changed, 14 insertions(+), 9 deletions(-) diff --git a/invokeai/backend/image_util/depth_anything/__init__.py b/invokeai/backend/image_util/depth_anything/__init__.py index ddb0aaa0c48..78b828d816c 100644 --- a/invokeai/backend/image_util/depth_anything/__init__.py +++ b/invokeai/backend/image_util/depth_anything/__init__.py @@ -17,6 +17,8 @@ config = InvokeAIAppConfig.get_config() +DEPTH_ANYTHING_MODEL_SIZES = Literal["large", "base", "small"] + DEPTH_ANYTHING_MODELS = { "large": { "url": "https://huggingface.co/spaces/LiheYoung/Depth-Anything/resolve/main/checkpoints/depth_anything_vitl14.pth?download=true", @@ -53,9 +55,9 @@ class DepthAnythingDetector: def __init__(self) -> None: self.model = None - self.model_size: Union[Literal["large", "base", "small"], None] = None + self.model_size: Union[DEPTH_ANYTHING_MODEL_SIZES, None] = None - def load_model(self, model_size=Literal["large", "base", "small"]): + def load_model(self, model_size: DEPTH_ANYTHING_MODEL_SIZES = "small"): DEPTH_ANYTHING_MODEL_PATH = pathlib.Path(config.models_path / DEPTH_ANYTHING_MODELS[model_size]["local"]) if not DEPTH_ANYTHING_MODEL_PATH.exists(): download_with_progress_bar(DEPTH_ANYTHING_MODELS[model_size]["url"], DEPTH_ANYTHING_MODEL_PATH) @@ -84,16 +86,19 @@ def to(self, device): self.model.to(device) return self - def __call__(self, image, resolution=512): - image = np.array(image, dtype=np.uint8) - image = image[:, :, ::-1] / 255.0 + def __call__(self, image: Image.Image, resolution: int = 512): + if self.model is None: + raise Exception("Depth Anything Model not loaded") + + np_image = np.array(image, dtype=np.uint8) + np_image = np_image[:, :, ::-1] / 255.0 - image_height, image_width = image.shape[:2] - image = transform({"image": image})["image"] - image = torch.from_numpy(image).unsqueeze(0).to(choose_torch_device()) + image_height, image_width = np_image.shape[:2] + np_image = transform({"image": image})["image"] + tensor_image = torch.from_numpy(np_image).unsqueeze(0).to(choose_torch_device()) with torch.no_grad(): - depth = self.model(image) + depth = self.model(tensor_image) depth = F.interpolate(depth[None], (image_height, image_width), mode="bilinear", align_corners=False)[0, 0] depth = (depth - depth.min()) / (depth.max() - depth.min()) * 255.0