From 1082361a1978d30db5c3932d1ee08914d74d9697 Mon Sep 17 00:00:00 2001 From: Pedro Cuenca Date: Fri, 5 Jul 2024 20:28:41 +0200 Subject: [PATCH] Depth Anything: update conversion script for V2 (#31522) * Depth Anything: update conversion script for V2 * Update docs * Style * Revert "Update docs" This reverts commit be0ca47ea1be4f3cd9aa2113bdd8efcc9959119e. * Add docs for depth anything v2 * Add depth_anything_v2 to MODEL_NAMES_MAPPING Done similarly to Flan-T5: https://github.com/huggingface/transformers/pull/19892/files * Add tip in original docs --- docs/source/en/_toctree.yml | 2 + docs/source/en/model_doc/depth_anything.md | 6 + docs/source/en/model_doc/depth_anything_v2.md | 115 ++++++++++++++++++ .../models/auto/configuration_auto.py | 1 + .../convert_depth_anything_to_hf.py | 57 ++++++--- 5 files changed, 165 insertions(+), 16 deletions(-) create mode 100644 docs/source/en/model_doc/depth_anything_v2.md diff --git a/docs/source/en/_toctree.yml b/docs/source/en/_toctree.yml index e48378d8c25377..1fcf2d26c17530 100644 --- a/docs/source/en/_toctree.yml +++ b/docs/source/en/_toctree.yml @@ -581,6 +581,8 @@ title: DeiT - local: model_doc/depth_anything title: Depth Anything + - local: model_doc/depth_anything_v2 + title: Depth Anything V2 - local: model_doc/deta title: DETA - local: model_doc/detr diff --git a/docs/source/en/model_doc/depth_anything.md b/docs/source/en/model_doc/depth_anything.md index 99332697b38ef2..e08e4bfc9904b7 100644 --- a/docs/source/en/model_doc/depth_anything.md +++ b/docs/source/en/model_doc/depth_anything.md @@ -20,6 +20,12 @@ rendered properly in your Markdown viewer. The Depth Anything model was proposed in [Depth Anything: Unleashing the Power of Large-Scale Unlabeled Data](https://arxiv.org/abs/2401.10891) by Lihe Yang, Bingyi Kang, Zilong Huang, Xiaogang Xu, Jiashi Feng, Hengshuang Zhao. Depth Anything is based on the [DPT](dpt) architecture, trained on ~62 million images, obtaining state-of-the-art results for both relative and absolute depth estimation. + + +[Depth Anything V2](depth_anything_v2) was released in June 2024. It uses the same architecture as Depth Anything and therefore it is compatible with all code examples and existing workflows. However, it leverages synthetic data and a larger capacity teacher model to achieve much finer and robust depth predictions. + + + The abstract from the paper is the following: *This work presents Depth Anything, a highly practical solution for robust monocular depth estimation. Without pursuing novel technical modules, we aim to build a simple yet powerful foundation model dealing with any images under any circumstances. To this end, we scale up the dataset by designing a data engine to collect and automatically annotate large-scale unlabeled data (~62M), which significantly enlarges the data coverage and thus is able to reduce the generalization error. We investigate two simple yet effective strategies that make data scaling-up promising. First, a more challenging optimization target is created by leveraging data augmentation tools. It compels the model to actively seek extra visual knowledge and acquire robust representations. Second, an auxiliary supervision is developed to enforce the model to inherit rich semantic priors from pre-trained encoders. We evaluate its zero-shot capabilities extensively, including six public datasets and randomly captured photos. It demonstrates impressive generalization ability. Further, through fine-tuning it with metric depth information from NYUv2 and KITTI, new SOTAs are set. Our better depth model also results in a better depth-conditioned ControlNet.* diff --git a/docs/source/en/model_doc/depth_anything_v2.md b/docs/source/en/model_doc/depth_anything_v2.md new file mode 100644 index 00000000000000..49f655238efca6 --- /dev/null +++ b/docs/source/en/model_doc/depth_anything_v2.md @@ -0,0 +1,115 @@ + + +# Depth Anything V2 + +## Overview + +Depth Anything V2 was introduced in [the paper of the same name](https://arxiv.org/abs/2406.09414) by Lihe Yang et al. It uses the same architecture as the original [Depth Anything model](depth_anything), but uses synthetic data and a larger capacity teacher model to achieve much finer and robust depth predictions. + +The abstract from the paper is the following: + +*This work presents Depth Anything V2. Without pursuing fancy techniques, we aim to reveal crucial findings to pave the way towards building a powerful monocular depth estimation model. Notably, compared with V1, this version produces much finer and more robust depth predictions through three key practices: 1) replacing all labeled real images with synthetic images, 2) scaling up the capacity of our teacher model, and 3) teaching student models via the bridge of large-scale pseudo-labeled real images. Compared with the latest models built on Stable Diffusion, our models are significantly more efficient (more than 10x faster) and more accurate. We offer models of different scales (ranging from 25M to 1.3B params) to support extensive scenarios. Benefiting from their strong generalization capability, we fine-tune them with metric depth labels to obtain our metric depth models. In addition to our models, considering the limited diversity and frequent noise in current test sets, we construct a versatile evaluation benchmark with precise annotations and diverse scenes to facilitate future research.* + + + + Depth Anything overview. Taken from the original paper. + +The Depth Anything models were contributed by [nielsr](https://huggingface.co/nielsr). +The original code can be found [here](https://github.com/DepthAnything/Depth-Anything-V2). + +## Usage example + +There are 2 main ways to use Depth Anything V2: either using the pipeline API, which abstracts away all the complexity for you, or by using the `DepthAnythingForDepthEstimation` class yourself. + +### Pipeline API + +The pipeline allows to use the model in a few lines of code: + +```python +>>> from transformers import pipeline +>>> from PIL import Image +>>> import requests + +>>> # load pipe +>>> pipe = pipeline(task="depth-estimation", model="depth-anything/Depth-Anything-V2-Small-hf") + +>>> # load image +>>> url = 'http://images.cocodataset.org/val2017/000000039769.jpg' +>>> image = Image.open(requests.get(url, stream=True).raw) + +>>> # inference +>>> depth = pipe(image)["depth"] +``` + +### Using the model yourself + +If you want to do the pre- and post-processing yourself, here's how to do that: + +```python +>>> from transformers import AutoImageProcessor, AutoModelForDepthEstimation +>>> import torch +>>> import numpy as np +>>> from PIL import Image +>>> import requests + +>>> url = "http://images.cocodataset.org/val2017/000000039769.jpg" +>>> image = Image.open(requests.get(url, stream=True).raw) + +>>> image_processor = AutoImageProcessor.from_pretrained("depth-anything/Depth-Anything-V2-Small-hf") +>>> model = AutoModelForDepthEstimation.from_pretrained("depth-anything/Depth-Anything-V2-Small-hf") + +>>> # prepare image for the model +>>> inputs = image_processor(images=image, return_tensors="pt") + +>>> with torch.no_grad(): +... outputs = model(**inputs) +... predicted_depth = outputs.predicted_depth + +>>> # interpolate to original size +>>> prediction = torch.nn.functional.interpolate( +... predicted_depth.unsqueeze(1), +... size=image.size[::-1], +... mode="bicubic", +... align_corners=False, +... ) + +>>> # visualize the prediction +>>> output = prediction.squeeze().cpu().numpy() +>>> formatted = (output * 255 / np.max(output)).astype("uint8") +>>> depth = Image.fromarray(formatted) +``` + +## Resources + +A list of official Hugging Face and community (indicated by 🌎) resources to help you get started with Depth Anything. + +- [Monocular depth estimation task guide](../tasks/depth_estimation) +- [Depth Anything V2 demo](https://huggingface.co/spaces/depth-anything/Depth-Anything-V2). +- A notebook showcasing inference with [`DepthAnythingForDepthEstimation`] can be found [here](https://github.com/NielsRogge/Transformers-Tutorials/blob/master/Depth%20Anything/Predicting_depth_in_an_image_with_Depth_Anything.ipynb). 🌎 +- [Core ML conversion of the `small` variant for use on Apple Silicon](https://huggingface.co/apple/coreml-depth-anything-v2-small). + +If you're interested in submitting a resource to be included here, please feel free to open a Pull Request and we'll review it! The resource should ideally demonstrate something new instead of duplicating an existing resource. + +## DepthAnythingConfig + +[[autodoc]] DepthAnythingConfig + +## DepthAnythingForDepthEstimation + +[[autodoc]] DepthAnythingForDepthEstimation + - forward \ No newline at end of file diff --git a/src/transformers/models/auto/configuration_auto.py b/src/transformers/models/auto/configuration_auto.py index 7f52b3dc280ac6..3770dfabec27aa 100755 --- a/src/transformers/models/auto/configuration_auto.py +++ b/src/transformers/models/auto/configuration_auto.py @@ -356,6 +356,7 @@ ("deit", "DeiT"), ("deplot", "DePlot"), ("depth_anything", "Depth Anything"), + ("depth_anything_v2", "Depth Anything V2"), ("deta", "DETA"), ("detr", "DETR"), ("dialogpt", "DialoGPT"), diff --git a/src/transformers/models/depth_anything/convert_depth_anything_to_hf.py b/src/transformers/models/depth_anything/convert_depth_anything_to_hf.py index 9b9836e8522b3f..3e45c95de9abfa 100644 --- a/src/transformers/models/depth_anything/convert_depth_anything_to_hf.py +++ b/src/transformers/models/depth_anything/convert_depth_anything_to_hf.py @@ -33,25 +33,28 @@ def get_dpt_config(model_name): if "small" in model_name: + out_indices = [3, 6, 9, 12] if "v2" in model_name else [9, 10, 11, 12] backbone_config = Dinov2Config.from_pretrained( - "facebook/dinov2-small", out_indices=[9, 10, 11, 12], apply_layernorm=True, reshape_hidden_states=False + "facebook/dinov2-small", out_indices=out_indices, apply_layernorm=True, reshape_hidden_states=False ) fusion_hidden_size = 64 neck_hidden_sizes = [48, 96, 192, 384] elif "base" in model_name: + out_indices = [3, 6, 9, 12] if "v2" in model_name else [9, 10, 11, 12] backbone_config = Dinov2Config.from_pretrained( - "facebook/dinov2-base", out_indices=[9, 10, 11, 12], apply_layernorm=True, reshape_hidden_states=False + "facebook/dinov2-base", out_indices=out_indices, apply_layernorm=True, reshape_hidden_states=False ) fusion_hidden_size = 128 neck_hidden_sizes = [96, 192, 384, 768] elif "large" in model_name: + out_indices = [5, 12, 18, 24] if "v2" in model_name else [21, 22, 23, 24] backbone_config = Dinov2Config.from_pretrained( - "facebook/dinov2-large", out_indices=[21, 22, 23, 24], apply_layernorm=True, reshape_hidden_states=False + "facebook/dinov2-large", out_indices=out_indices, apply_layernorm=True, reshape_hidden_states=False ) fusion_hidden_size = 256 neck_hidden_sizes = [256, 512, 1024, 1024] else: - raise NotImplementedError("To do") + raise NotImplementedError(f"Model not supported: {model_name}") config = DepthAnythingConfig( reassemble_hidden_size=backbone_config.hidden_size, @@ -169,9 +172,13 @@ def prepare_img(): name_to_checkpoint = { - "depth-anything-small": "depth_anything_vits14.pth", - "depth-anything-base": "depth_anything_vitb14.pth", - "depth-anything-large": "depth_anything_vitl14.pth", + "depth-anything-small": "pytorch_model.bin", + "depth-anything-base": "pytorch_model.bin", + "depth-anything-large": "pytorch_model.bin", + "depth-anything-v2-small": "depth_anything_v2_vits.pth", + "depth-anything-v2-base": "depth_anything_v2_vitb.pth", + "depth-anything-v2-large": "depth_anything_v2_vitl.pth", + # v2-giant pending } @@ -184,17 +191,23 @@ def convert_dpt_checkpoint(model_name, pytorch_dump_folder_path, push_to_hub, ve # define DPT configuration config = get_dpt_config(model_name) - model_name_to_filename = { - "depth-anything-small": "depth_anything_vits14.pth", - "depth-anything-base": "depth_anything_vitb14.pth", - "depth-anything-large": "depth_anything_vitl14.pth", + model_name_to_repo = { + "depth-anything-small": "LiheYoung/depth_anything_vits14", + "depth-anything-base": "LiheYoung/depth_anything_vitb14", + "depth-anything-large": "LiheYoung/depth_anything_vitl14", + "depth-anything-v2-small": "depth-anything/Depth-Anything-V2-Small", + "depth-anything-v2-base": "depth-anything/Depth-Anything-V2-Base", + "depth-anything-v2-large": "depth-anything/Depth-Anything-V2-Large", } # load original state_dict - filename = model_name_to_filename[model_name] + repo_id = model_name_to_repo[model_name] + filename = name_to_checkpoint[model_name] filepath = hf_hub_download( - repo_id="LiheYoung/Depth-Anything", filename=f"checkpoints/{filename}", repo_type="space" + repo_id=repo_id, + filename=f"{filename}", ) + state_dict = torch.load(filepath, map_location="cpu") # rename keys rename_keys = create_rename_keys(config) @@ -247,11 +260,23 @@ def convert_dpt_checkpoint(model_name, pytorch_dump_folder_path, push_to_hub, ve expected_slice = torch.tensor( [[87.9968, 87.7493, 88.2704], [87.1927, 87.6611, 87.3640], [86.7789, 86.9469, 86.7991]] ) + elif model_name == "depth-anything-v2-small": + expected_slice = torch.tensor( + [[2.6751, 2.6211, 2.6571], [2.5820, 2.6138, 2.6271], [2.6160, 2.6141, 2.6306]] + ) + elif model_name == "depth-anything-v2-base": + expected_slice = torch.tensor( + [[4.3576, 4.3723, 4.3908], [4.3231, 4.3146, 4.3611], [4.3016, 4.3170, 4.3121]] + ) + elif model_name == "depth-anything-v2-large": + expected_slice = torch.tensor( + [[162.2751, 161.8504, 162.8788], [160.3138, 160.8050, 161.9835], [159.3812, 159.9884, 160.0768]] + ) else: raise ValueError("Not supported") assert predicted_depth.shape == torch.Size(expected_shape) - assert torch.allclose(predicted_depth[0, :3, :3], expected_slice, atol=1e-6) + assert torch.allclose(predicted_depth[0, :3, :3], expected_slice, atol=1e-4) print("Looks ok!") if pytorch_dump_folder_path is not None: @@ -262,8 +287,8 @@ def convert_dpt_checkpoint(model_name, pytorch_dump_folder_path, push_to_hub, ve if push_to_hub: print("Pushing model and processor to hub...") - model.push_to_hub(repo_id=f"LiheYoung/{model_name}-hf") - processor.push_to_hub(repo_id=f"LiheYoung/{model_name}-hf") + model.push_to_hub(repo_id=f"{model_name.title()}-hf") + processor.push_to_hub(repo_id=f"{model_name.title()}-hf") if __name__ == "__main__":