diff --git a/tf/README.md b/tf/README.md new file mode 100644 index 0000000..0702ac0 --- /dev/null +++ b/tf/README.md @@ -0,0 +1,111 @@ +## Towards Robust Monocular Depth Estimation: Mixing Datasets for Zero-shot Cross-dataset Transfer + +### TensorFlow inference using `.pb` and `.onnx` models + +1. [Run inference on TensorFlow-model by using TensorFlow](#run-inference-on-tensorflow-model-by-using-tensorFlow) + +2. [Run inference on ONNX-model by using TensorFlow](#run-inference-on-onnx-model-by-using-tensorflow) + +3. [Make ONNX model from downloaded Pytorch model file](#make-onnx-model-from-downloaded-pytorch-model-file) + + +### Run inference on TensorFlow-model by using TensorFlow + +1) Download the model weights [model-f45da743.pb](https://github.com/AlexeyAB/MiDaS/releases/download/v2/model-f46da743.pb) and place the +file in the `/tf/` folder. + +2) Set up dependencies: + +```shell +# install OpenCV +pip install --upgrade pip +pip install opencv-python + +# install TensorFlow +pip install grpcio tensorflow tensorflow-addons +``` + +#### Usage + +1) Place one or more input images in the folder `tf/input`. + +2) Run the model: + + ```shell + python tf/run_pb.py + ``` + +3) The resulting inverse depth maps are written to the `tf/output` folder. + + +### Run inference on ONNX-model by using TensorFlow + +1) Download the model weights [model-f45da743.onnx](https://github.com/AlexeyAB/MiDaS/releases/download/v2/model-f46da743.onnx) and place the +file in the `/tf/` folder. + +2) Set up dependencies: + +```shell +# install OpenCV +pip install --upgrade pip +pip install opencv-python + +# install TensorFlow +pip install grpcio tensorflow tensorflow-addons + +# install ONNX and ONNX_TensorFlow +pip install onnx + +git clone https://github.com/onnx/onnx-tensorflow.git && cd onnx-tensorflow && pip install -e . && cd .. +dir +``` + +#### Usage + +1) Place one or more input images in the folder `tf/input`. + +2) Run the model: + + ```shell + python tf/run_onnx.py + ``` + +3) The resulting inverse depth maps are written to the `tf/output` folder. + + + +### Make ONNX model from downloaded Pytorch model file + +1) Download the model weights [model-f45da743.pt](https://github.com/intel-isl/MiDaS/releases/download/v2/model-f46da743.pt) and place the +file in the root folder. + +2) Set up dependencies: + +```shell +# install OpenCV +pip install --upgrade pip +pip install opencv-python + +# install ONNX +pip install onnx + +# install PyTorch TorchVision +pip install torch==1.5.1+cu101 torchvision==0.6.1+cu101 -f https://download.pytorch.org/whl/torch_stable.html +``` + +#### Usage + +1) Run the converter: + + ```shell + python tf/make_onnx_model.py + ``` + +2) The resulting `model-f46da743.onnx` file is written to the `/tf/` folder. + + +### Requirements + + The code was tested with Python 3.6.9, PyTorch 1.5.1, TensorFlow 2.2.0, TensorFlow-addons 0.8.3, ONNX 1.7.0, ONNX-TensorFlow (GitHub-master-17.07.2020) and OpenCV 4.3.0. + + \ No newline at end of file diff --git a/tf/run_onnx.py b/tf/run_onnx.py index 3816add..6d8e5e6 100644 --- a/tf/run_onnx.py +++ b/tf/run_onnx.py @@ -8,7 +8,7 @@ import numpy as np sys.path sys.path.append("C:\Intel\onnx_tf\onnx-tensorflow") -print(sys.path) +#print(sys.path) import onnx import onnx_tf diff --git a/tf/run_pb.py b/tf/run_pb.py index 4c158bb..a86d926 100644 --- a/tf/run_pb.py +++ b/tf/run_pb.py @@ -25,6 +25,8 @@ def run(input_path, output_path, model_path): try: for gpu in gpus: tf.config.experimental.set_memory_growth(gpu, True) + tf.config.experimental.set_virtual_device_configuration(gpu, + [tf.config.experimental.VirtualDeviceConfiguration(memory_limit=4000)]) except RuntimeError as e: print(e) @@ -37,10 +39,12 @@ def run(input_path, output_path, model_path): graph_def.ParseFromString(f.read()) tf.import_graph_def(graph_def, name='') - #output_layer = '1191:0' - #input_node = 'input.1:0' - output_layer = '1195:0' + #output_layer = '1195:0' + #input_node = '0:0' + model_operations = tf.compat.v1.get_default_graph().get_operations() input_node = '0:0' + output_layer = model_operations[len(model_operations) - 1].name + ':0' + print("Last layer name: ", output_layer) resize_image = Resize( 384, @@ -68,25 +72,21 @@ def compose2(f1, f2): print("start processing") - for ind, img_name in enumerate(img_names): + with tf.compat.v1.Session() as sess: + try: + # load images + for ind, img_name in enumerate(img_names): - print(" processing {} ({}/{})".format(img_name, ind + 1, num_images)) + print(" processing {} ({}/{})".format(img_name, ind + 1, num_images)) - # input + # input + img = utils.read_image(img_name) + img_input = transform({"image": img})["image"] - img = utils.read_image(img_name) - img_input = transform({"image": img})["image"] - - # compute - with tf.compat.v1.Session() as sess: - try: - prob_tensor = sess.graph.get_tensor_by_name(output_layer) - prediction, = sess.run(prob_tensor, {input_node: [img_input] }) - prediction = prediction.reshape(384, 384) - except KeyError: - print ("Couldn't find classification output layer: " + output_layer + ".") - print ("Verify this a model exported from an Object Detection project.") - exit(-1) + # compute + prob_tensor = sess.graph.get_tensor_by_name(output_layer) + prediction, = sess.run(prob_tensor, {input_node: [img_input] }) + prediction = prediction.reshape(384, 384) # output filename = os.path.join( @@ -94,6 +94,10 @@ def compose2(f1, f2): ) utils.write_depth(filename, prediction, bits=2) + except KeyError: + print ("Couldn't find input node: ' + input_node + ' or output layer: " + output_layer + ".") + exit(-1) + print("finished")