This repository has been archived by the owner on Dec 18, 2024. It is now read-only.
-
Notifications
You must be signed in to change notification settings - Fork 260
Commit
This commit does not belong to any branch on this repository, and may belong to a fork outside of the repository.
Merge pull request #35 from AlexeyAB/master
TensorFlow inference .pb and .onnx models
- Loading branch information
Showing
8 changed files
with
745 additions
and
0 deletions.
There are no files selected for viewing
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
Original file line number | Diff line number | Diff line change |
---|---|---|
@@ -0,0 +1,111 @@ | ||
## Towards Robust Monocular Depth Estimation: Mixing Datasets for Zero-shot Cross-dataset Transfer | ||
|
||
### TensorFlow inference using `.pb` and `.onnx` models | ||
|
||
1. [Run inference on TensorFlow-model by using TensorFlow](#run-inference-on-tensorflow-model-by-using-tensorFlow) | ||
|
||
2. [Run inference on ONNX-model by using TensorFlow](#run-inference-on-onnx-model-by-using-tensorflow) | ||
|
||
3. [Make ONNX model from downloaded Pytorch model file](#make-onnx-model-from-downloaded-pytorch-model-file) | ||
|
||
|
||
### Run inference on TensorFlow-model by using TensorFlow | ||
|
||
1) Download the model weights [model-f45da743.pb](https://github.com/AlexeyAB/MiDaS/releases/download/v2/model-f46da743.pb) and place the | ||
file in the `/tf/` folder. | ||
|
||
2) Set up dependencies: | ||
|
||
```shell | ||
# install OpenCV | ||
pip install --upgrade pip | ||
pip install opencv-python | ||
|
||
# install TensorFlow | ||
pip install grpcio tensorflow tensorflow-addons | ||
``` | ||
|
||
#### Usage | ||
|
||
1) Place one or more input images in the folder `tf/input`. | ||
|
||
2) Run the model: | ||
|
||
```shell | ||
python tf/run_pb.py | ||
``` | ||
|
||
3) The resulting inverse depth maps are written to the `tf/output` folder. | ||
|
||
|
||
### Run inference on ONNX-model by using TensorFlow | ||
|
||
1) Download the model weights [model-f45da743.onnx](https://github.com/AlexeyAB/MiDaS/releases/download/v2/model-f46da743.onnx) and place the | ||
file in the `/tf/` folder. | ||
|
||
2) Set up dependencies: | ||
|
||
```shell | ||
# install OpenCV | ||
pip install --upgrade pip | ||
pip install opencv-python | ||
# install TensorFlow | ||
pip install grpcio tensorflow tensorflow-addons | ||
# install ONNX and ONNX_TensorFlow | ||
pip install onnx | ||
git clone https://github.com/onnx/onnx-tensorflow.git && cd onnx-tensorflow && pip install -e . && cd .. | ||
dir | ||
``` | ||
|
||
#### Usage | ||
|
||
1) Place one or more input images in the folder `tf/input`. | ||
|
||
2) Run the model: | ||
|
||
```shell | ||
python tf/run_onnx.py | ||
``` | ||
|
||
3) The resulting inverse depth maps are written to the `tf/output` folder. | ||
|
||
|
||
|
||
### Make ONNX model from downloaded Pytorch model file | ||
|
||
1) Download the model weights [model-f45da743.pt](https://github.com/intel-isl/MiDaS/releases/download/v2/model-f46da743.pt) and place the | ||
file in the root folder. | ||
|
||
2) Set up dependencies: | ||
|
||
```shell | ||
# install OpenCV | ||
pip install --upgrade pip | ||
pip install opencv-python | ||
# install ONNX | ||
pip install onnx | ||
# install PyTorch TorchVision | ||
pip install torch==1.5.1+cu101 torchvision==0.6.1+cu101 -f https://download.pytorch.org/whl/torch_stable.html | ||
``` | ||
|
||
#### Usage | ||
|
||
1) Run the converter: | ||
|
||
```shell | ||
python tf/make_onnx_model.py | ||
``` | ||
|
||
2) The resulting `model-f46da743.onnx` file is written to the `/tf/` folder. | ||
|
||
|
||
### Requirements | ||
|
||
The code was tested with Python 3.6.9, PyTorch 1.5.1, TensorFlow 2.2.0, TensorFlow-addons 0.8.3, ONNX 1.7.0, ONNX-TensorFlow (GitHub-master-17.07.2020) and OpenCV 4.3.0. | ||
|
||
|
Empty file.
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
Original file line number | Diff line number | Diff line change |
---|---|---|
@@ -0,0 +1,112 @@ | ||
"""Compute depth maps for images in the input folder. | ||
""" | ||
import os | ||
import ntpath | ||
import glob | ||
import torch | ||
import utils | ||
import cv2 | ||
import numpy as np | ||
from torchvision.transforms import Compose, Normalize | ||
from torchvision import transforms | ||
|
||
from shutil import copyfile | ||
import fileinput | ||
import sys | ||
sys.path.append(os.getcwd() + '/..') | ||
|
||
def modify_file(): | ||
modify_filename = '../midas/blocks.py' | ||
copyfile(modify_filename, modify_filename+'.bak') | ||
|
||
with open(modify_filename, 'r') as file : | ||
filedata = file.read() | ||
|
||
filedata = filedata.replace('align_corners=True', 'align_corners=False') | ||
filedata = filedata.replace('import torch.nn as nn', 'import torch.nn as nn\nimport torchvision.models as models') | ||
filedata = filedata.replace('torch.hub.load("facebookresearch/WSL-Images", "resnext101_32x8d_wsl")', 'models.resnext101_32x8d()') | ||
|
||
with open(modify_filename, 'w') as file: | ||
file.write(filedata) | ||
|
||
def restore_file(): | ||
modify_filename = '../midas/blocks.py' | ||
copyfile(modify_filename+'.bak', modify_filename) | ||
|
||
modify_file() | ||
|
||
from midas.midas_net import MidasNet | ||
from midas.transforms import Resize, NormalizeImage, PrepareForNet | ||
|
||
restore_file() | ||
|
||
|
||
class MidasNet_preprocessing(MidasNet): | ||
"""Network for monocular depth estimation. | ||
""" | ||
def forward(self, x): | ||
"""Forward pass. | ||
Args: | ||
x (tensor): input data (image) | ||
Returns: | ||
tensor: depth | ||
""" | ||
|
||
mean = torch.tensor([0.485, 0.456, 0.406]) | ||
std = torch.tensor([0.229, 0.224, 0.225]) | ||
x.sub_(mean[None, :, None, None]).div_(std[None, :, None, None]) | ||
|
||
return MidasNet.forward(self, x) | ||
|
||
|
||
def run(model_path): | ||
"""Run MonoDepthNN to compute depth maps. | ||
Args: | ||
model_path (str): path to saved model | ||
""" | ||
print("initialize") | ||
|
||
# select device | ||
|
||
# load network | ||
#model = MidasNet(model_path, non_negative=True) | ||
model = MidasNet_preprocessing(model_path, non_negative=True) | ||
|
||
model.eval() | ||
|
||
print("start processing") | ||
|
||
# input | ||
img_input = np.zeros((3, 384, 384), np.float32) | ||
|
||
# compute | ||
with torch.no_grad(): | ||
sample = torch.from_numpy(img_input).unsqueeze(0) | ||
prediction = model.forward(sample) | ||
prediction = ( | ||
torch.nn.functional.interpolate( | ||
prediction.unsqueeze(1), | ||
size=img_input.shape[:2], | ||
mode="bicubic", | ||
align_corners=False, | ||
) | ||
.squeeze() | ||
.cpu() | ||
.numpy() | ||
) | ||
|
||
torch.onnx.export(model, sample, ntpath.basename(model_path).rsplit('.', 1)[0]+'.onnx', opset_version=9) | ||
|
||
print("finished") | ||
|
||
|
||
if __name__ == "__main__": | ||
# set paths | ||
# MODEL_PATH = "model.pt" | ||
MODEL_PATH = "../model-f46da743.pt" | ||
|
||
# compute depth maps | ||
run(MODEL_PATH) |
Empty file.
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
Original file line number | Diff line number | Diff line change |
---|---|---|
@@ -0,0 +1,100 @@ | ||
"""Compute depth maps for images in the input folder. | ||
""" | ||
import os | ||
import glob | ||
import utils | ||
import cv2 | ||
import sys | ||
import numpy as np | ||
sys.path | ||
sys.path.append("C:\Intel\onnx_tf\onnx-tensorflow") | ||
#print(sys.path) | ||
|
||
import onnx | ||
import onnx_tf | ||
import tensorflow as tf | ||
|
||
from transforms import Resize, NormalizeImage, PrepareForNet | ||
|
||
|
||
def run(input_path, output_path, model_path): | ||
"""Run MonoDepthNN to compute depth maps. | ||
Args: | ||
input_path (str): path to input folder | ||
output_path (str): path to output folder | ||
model_path (str): path to saved model | ||
""" | ||
print("initialize") | ||
|
||
# select device | ||
device = "CUDA:0" | ||
#device = "CPU" | ||
print("device: %s" % device) | ||
|
||
# load network | ||
print("loading model...") | ||
model = onnx.load(model_path) | ||
print("checking model...") | ||
onnx.checker.check_model(model) | ||
print("preparing model...") | ||
tf_rep = onnx_tf.backend.prepare(model, device) | ||
|
||
print('inputs:', tf_rep.inputs) | ||
print('outputs:', tf_rep.outputs) | ||
|
||
resize_image = Resize( | ||
384, | ||
384, | ||
resize_target=None, | ||
keep_aspect_ratio=False, | ||
ensure_multiple_of=32, | ||
resize_method="upper_bound", | ||
image_interpolation_method=cv2.INTER_CUBIC, | ||
) | ||
|
||
def compose2(f1, f2): | ||
return lambda x: f2(f1(x)) | ||
|
||
transform = compose2(resize_image, PrepareForNet()) | ||
|
||
# get input | ||
img_names = glob.glob(os.path.join(input_path, "*")) | ||
num_images = len(img_names) | ||
|
||
# create output folder | ||
os.makedirs(output_path, exist_ok=True) | ||
|
||
print("start processing") | ||
|
||
for ind, img_name in enumerate(img_names): | ||
|
||
print(" processing {} ({}/{})".format(img_name, ind + 1, num_images)) | ||
|
||
# input | ||
|
||
img = utils.read_image(img_name) | ||
img_input = transform({"image": img})["image"] | ||
|
||
# compute | ||
output = tf_rep.run(img_input.reshape(1, 3, 384, 384)) | ||
prediction = np.array(output).reshape(384, 384) | ||
prediction = cv2.resize(prediction, (img.shape[1], img.shape[0]), interpolation=cv2.INTER_CUBIC) | ||
|
||
# output | ||
filename = os.path.join( | ||
output_path, os.path.splitext(os.path.basename(img_name))[0] | ||
) | ||
utils.write_depth(filename, prediction, bits=2) | ||
|
||
print("finished") | ||
|
||
|
||
if __name__ == "__main__": | ||
# set paths | ||
INPUT_PATH = "input" | ||
OUTPUT_PATH = "output" | ||
MODEL_PATH = "model-f46da743.onnx" | ||
|
||
# compute depth maps | ||
run(INPUT_PATH, OUTPUT_PATH, MODEL_PATH) |
Oops, something went wrong.