Skip to content

Commit

Permalink
Move model conversion from MO to openVINO API (openvinotoolkit#1270)
Browse files Browse the repository at this point in the history
* Move model conversion from MO to openVINO API

notebooks\211-speech-to-text\211-speech-to-text.ipynb
notebooks\215-image-inpainting\215-image-inpainting.ipynb
notebooks\216-attention-center\216-attention-center.ipynb
notebooks\217-vision-deblur\217-vision-deblur.ipynb
notebooks\223-text-prediction\223-text-prediction.ipynb
notebooks\226-yolov7-optimization\226-yolov7-optimization.ipynb
notebooks\228-clip-zero-shot-image-classification\228-clip-zero-shot-convert.ipynb
notebooks\229-distilbert-sequence-classification\229-distilbert-sequence-classification.ipynb
notebooks\231-instruct-pix2pix-image-editing\231-instruct-pix2pix-image-editing.ipynb

* fix comments
  • Loading branch information
sbalandi authored Aug 30, 2023
1 parent f8e9074 commit 9b3fb47
Show file tree
Hide file tree
Showing 9 changed files with 123 additions and 108 deletions.
28 changes: 13 additions & 15 deletions notebooks/211-speech-to-text/211-speech-to-text.ipynb
Original file line number Diff line number Diff line change
Expand Up @@ -49,7 +49,7 @@
"metadata": {},
"outputs": [],
"source": [
"!pip install -q \"librosa>=0.8.1\""
"!pip install -q \"librosa>=0.8.1\" \"openvino-dev==2023.1.0.dev20230811\" \"onnx\""
]
},
{
Expand All @@ -76,8 +76,7 @@
"import librosa.display\n",
"import numpy as np\n",
"import scipy\n",
"from openvino.runtime import Core, serialize, Tensor\n",
"from openvino.tools import mo"
"import openvino as ov"
]
},
{
Expand Down Expand Up @@ -255,9 +254,9 @@
" dynamic_axes={\"audio_signal\": {0: \"batch_size\", 2: \"wave_len\"}, \"output\": {0: \"batch_size\", 2: \"wave_len\"}}\n",
" )\n",
" # convert model to OpenVINO Model using model conversion API\n",
" ov_model = mo.convert_model(str(onnx_model_path))\n",
" # serialize model to IR for next usage\n",
" serialize(ov_model, str(converted_model_path))"
" ov_model = ov.convert_model(str(onnx_model_path))\n",
" # save model in IR format for next usage\n",
" ov.save_model(ov_model, str(converted_model_path))"
]
},
{
Expand Down Expand Up @@ -636,17 +635,18 @@
},
"outputs": [],
"source": [
"ie = Core()"
"core = ov.Core()"
]
},
{
"attachments": {},
"cell_type": "markdown",
"id": "ce3fc33e",
"metadata": {},
"source": [
"You may run the model on multiple devices. By default, it will load the model on CPU (you can choose manually CPU, GPU etc.) or let the engine choose the best available device (AUTO).\n",
"\n",
"To list all available devices that can be used, run `print(ie.available_devices)` command."
"To list all available devices that can be used, run `print(core.available_devices)` command."
]
},
{
Expand All @@ -666,7 +666,7 @@
}
],
"source": [
"print(ie.available_devices)"
"print(core.available_devices)"
]
},
{
Expand All @@ -686,8 +686,6 @@
"source": [
"import ipywidgets as widgets\n",
"\n",
"core = Core()\n",
"\n",
"device = widgets.Dropdown(\n",
" options=core.available_devices + [\"AUTO\"],\n",
" value='AUTO',\n",
Expand All @@ -707,14 +705,14 @@
},
"outputs": [],
"source": [
"model = ie.read_model(\n",
"model = core.read_model(\n",
" model=f\"{model_folder}/public/{model_name}/{precision}/{model_name}.xml\"\n",
")\n",
"model_input_layer = model.input(0)\n",
"shape = model_input_layer.partial_shape\n",
"shape[2] = -1\n",
"model.reshape({model_input_layer: shape})\n",
"compiled_model = ie.compile_model(model=model, device_name=device.value)"
"compiled_model = core.compile_model(model=model, device_name=device.value)"
]
},
{
Expand All @@ -738,7 +736,7 @@
"source": [
"output_layer_ir = compiled_model.output(0)\n",
"\n",
"character_probabilities = compiled_model([Tensor(audio)])[output_layer_ir]"
"character_probabilities = compiled_model([ov.Tensor(audio)])[output_layer_ir]"
]
},
{
Expand Down Expand Up @@ -854,4 +852,4 @@
},
"nbformat": 4,
"nbformat_minor": 5
}
}
11 changes: 5 additions & 6 deletions notebooks/215-image-inpainting/215-image-inpainting.ipynb
Original file line number Diff line number Diff line change
Expand Up @@ -37,8 +37,7 @@
"import matplotlib.pyplot as plt\n",
"import numpy as np\n",
"from zipfile import ZipFile\n",
"from openvino.tools import mo\n",
"from openvino.runtime import Core, Tensor, serialize\n",
"import openvino as ov\n",
"\n",
"sys.path.append(\"../utils\")\n",
"import notebook_utils as utils"
Expand Down Expand Up @@ -114,8 +113,8 @@
"\n",
"# Run model conversion API to convert model to OpenVINO IR FP32 format, if the IR file does not exist.\n",
"if not ir_path.exists():\n",
" ov_model = mo.convert_model(model_path, input_shape=[[1,512,680,3],[1,512,680,1]])\n",
" serialize(ov_model, str(ir_path))\n",
" ov_model = ov.convert_model(model_path, input=[[1,512,680,3],[1,512,680,1]])\n",
" ov.save_model(ov_model, str(ir_path))\n",
"else:\n",
" print(f\"{ir_path} already exists.\")"
]
Expand Down Expand Up @@ -144,7 +143,7 @@
"metadata": {},
"outputs": [],
"source": [
"core = Core()\n",
"core = ov.Core()\n",
"\n",
"# Read the model.xml and weights file\n",
"model = core.read_model(model=ir_path)"
Expand Down Expand Up @@ -415,7 +414,7 @@
}
],
"source": [
"result = compiled_model([Tensor(masked_image.astype(np.float32)), Tensor(mask.astype(np.float32))])[output_layer]\n",
"result = compiled_model([ov.Tensor(masked_image.astype(np.float32)), ov.Tensor(mask.astype(np.float32))])[output_layer]\n",
"result = result.squeeze().astype(np.uint8)\n",
"plt.figure(figsize=(16, 12))\n",
"plt.imshow(cv2.cvtColor(result, cv2.COLOR_BGR2RGB));"
Expand Down
33 changes: 26 additions & 7 deletions notebooks/216-attention-center/216-attention-center.ipynb
Original file line number Diff line number Diff line change
Expand Up @@ -28,6 +28,15 @@
"- [Get result with OpenVINO IR model](#Get-result-with-OpenVINO-IR-model-Uparrow)\n"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"! pip install \"openvino==2023.1.0.dev20230811\""
]
},
{
"cell_type": "markdown",
"metadata": {},
Expand Down Expand Up @@ -59,8 +68,7 @@
"from pathlib import Path\n",
"import matplotlib.pyplot as plt\n",
"\n",
"from openvino.tools import mo\n",
"from openvino.runtime import serialize, Core"
"import openvino as ov"
]
},
{
Expand All @@ -85,13 +93,14 @@
]
},
{
"attachments": {},
"cell_type": "markdown",
"metadata": {},
"source": [
"### Convert Tensorflow Lite model to OpenVINO IR format [$\\Uparrow$](#Table-of-content:)\n",
"\n",
"The attention-center model is pre-trained model in TensorFlow Lite format. In this Notebook the model will be converted to \n",
"OpenVINO IR format with Model Optimizer. This step will be skipped if the model have already been converted. For more information about Model Optimizer, please, see the [Model Optimizer Developer Guide]( https://docs.openvino.ai/2023.0/openvino_docs_MO_DG_Deep_Learning_Model_Optimizer_DevGuide.html). \n",
"OpenVINO IR format with model conversion API. For more information about model conversion, see this [page](https://docs.openvino.ai/2023.0/openvino_docs_model_processing_introduction.html). This step is also skipped if the model is already converted.\n",
"\n",
"Also TFLite models format is supported in OpenVINO by TFLite frontend, so the model can be passed directly to `core.read_model()`. You can find example in [002-openvino-api](https://github.com/openvinotoolkit/openvino_notebooks/tree/main/notebooks/002-openvino-api)."
]
Expand All @@ -114,18 +123,19 @@
"\n",
"ir_model_path = Path(\"./model/ir_center_model.xml\")\n",
"\n",
"core = Core()\n",
"core = ov.Core()\n",
"\n",
"if not ir_model_path.exists():\n",
" model = mo.convert_model(tflite_model_path)\n",
" serialize(model, ir_model_path.as_posix())\n",
" model = ov.convert_model(tflite_model_path, input=[('image:0', [1,480,640,3], ov.Type.f32)])\n",
" ov.save_model(model, ir_model_path)\n",
" print(\"IR model saved to {}\".format(ir_model_path))\n",
"else:\n",
" print(\"Read IR model from {}\".format(ir_model_path))\n",
" model = core.read_model(ir_model_path)"
]
},
{
"attachments": {},
"cell_type": "markdown",
"metadata": {},
"source": [
Expand Down Expand Up @@ -307,8 +317,17 @@
"source": [
"import io\n",
"import PIL\n",
"from urllib.request import urlretrieve\n",
"\n",
"img_path = Path(\"data/coco.jpg\")\n",
"img_path.parent.mkdir(parents=True, exist_ok=True)\n",
"urlretrieve(\n",
" \"https://storage.openvinotoolkit.org/repositories/openvino_notebooks/data/data/image/coco.jpg\",\n",
" img_path,\n",
")\n",
"\n",
"# read uploaded image\n",
"image = PIL.Image.open(io.BytesIO(load_file_widget.value[-1]['content'])) if load_file_widget.value else PIL.Image.open(\"../data/image/coco.jpg\")\n",
"image = PIL.Image.open(io.BytesIO(list(load_file_widget.value.values())[-1]['content'])) if load_file_widget.value else PIL.Image.open(img_path)\n",
"image.convert(\"RGB\")\n",
"\n",
"input_image = Image((480, 640), image=(np.ascontiguousarray(image)[:, :, ::-1]).astype(np.uint8))\n",
Expand Down
20 changes: 8 additions & 12 deletions notebooks/217-vision-deblur/217-vision-deblur.ipynb
Original file line number Diff line number Diff line change
Expand Up @@ -79,7 +79,7 @@
"import matplotlib.pyplot as plt\n",
"import numpy as np\n",
"from IPython.display import Markdown, display\n",
"from openvino.runtime import Core\n",
"import openvino as ov\n",
"\n",
"sys.path.append(\"../utils\")\n",
"from notebook_utils import load_image"
Expand Down Expand Up @@ -149,7 +149,7 @@
"source": [
"import ipywidgets as widgets\n",
"\n",
"core = Core()\n",
"core = ov.Core()\n",
"\n",
"device = widgets.Dropdown(\n",
" options=core.available_devices + [\"AUTO\"],\n",
Expand Down Expand Up @@ -286,7 +286,7 @@
"source": [
"### Convert DeblurGAN-v2 Model to OpenVINO IR format [$\\Uparrow$](#Table-of-content:)\n",
"\n",
"For best results with OpenVINO, it is recommended to convert the model to OpenVINO IR format. To convert the PyTorch model, we will use model conversion Python API. The `mo.convert_model` Python function returns an OpenVINO model ready to load on a device and start making predictions. We can save it on a disk for next usage with `openvino.runtime.serialize`. For more information about model conversion Python API, see this [page](https://docs.openvino.ai/2023.0/openvino_docs_model_processing_introduction.html).\n",
"For best results with OpenVINO, it is recommended to convert the model to OpenVINO IR format. To convert the PyTorch model, we will use model conversion Python API. The `ov.convert_model` Python function returns an OpenVINO model ready to load on a device and start making predictions. We can save the model on the disk for next usage with `ov.save_model`. For more information about model conversion Python API, see this [page](https://docs.openvino.ai/2023.0/openvino_docs_model_processing_introduction.html).\n",
"\n",
"Model conversion may take a while."
]
Expand All @@ -298,15 +298,12 @@
"metadata": {},
"outputs": [],
"source": [
"from openvino.tools import mo\n",
"from openvino.runtime import serialize\n",
"\n",
"deblur_gan_model = DeblurV2(\"model/public/deblurgan-v2/ckpt/fpn_mobilenet.h5\", \"fpn_mobilenet\")\n",
"\n",
"with torch.no_grad():\n",
" deblur_gan_model.eval()\n",
" ov_model = mo.convert_model(deblur_gan_model, input_shape=[[1,3,736,1312]], compress_to_fp16=(precision == \"FP16\"))\n",
" serialize(ov_model, model_xml_path)"
" ov_model = ov.convert_model(deblur_gan_model, example_input=torch.ones((1,3,736,1312), dtype=torch.float32), input=[[1,3,736,1312]])\n",
" ov.save_model(ov_model, model_xml_path, compress_to_fp16=(precision == \"FP16\"))"
]
},
{
Expand All @@ -317,7 +314,7 @@
"source": [
"## Load the Model [$\\Uparrow$](#Table-of-content:)\n",
"\n",
"Load and compile the DeblurGAN-v2 model in the OpenVINO Runtime with `ie.read_model` and compile it for the specified device with `ie.compile_model`. Get input and output keys and the expected input shape for the model."
"Load and compile the DeblurGAN-v2 model in the OpenVINO Runtime with `core.read_model` and compile it for the specified device with `core.compile_model`. Get input and output keys and the expected input shape for the model."
]
},
{
Expand All @@ -327,9 +324,8 @@
"metadata": {},
"outputs": [],
"source": [
"ie = Core()\n",
"model = ie.read_model(model=model_xml_path)\n",
"compiled_model = ie.compile_model(model=model, device_name=device.value)"
"model = core.read_model(model=model_xml_path)\n",
"compiled_model = core.compile_model(model=model, device_name=device.value)"
]
},
{
Expand Down
21 changes: 9 additions & 12 deletions notebooks/223-text-prediction/223-text-prediction.ipynb
Original file line number Diff line number Diff line change
Expand Up @@ -102,7 +102,7 @@
],
"source": [
"# Install Gradio for Interactive Inference and other requirements\n",
"!pip install -q \"openvino-dev>=2023.0.0\"\n",
"!pip install -q \"openvino==2023.1.0.dev20230811\"\n",
"!pip install -q gradio\n",
"!pip install -q transformers[torch] onnx"
]
Expand Down Expand Up @@ -171,6 +171,7 @@
]
},
{
"attachments": {},
"cell_type": "markdown",
"metadata": {},
"source": [
Expand All @@ -181,7 +182,7 @@
"For starting work with GPT-Neo model using OpenVINO, a model should be converted to OpenVINO Intermediate Representation (IR) format. HuggingFace provides a GPT-Neo model in PyTorch format, which is supported in OpenVINO via conversion to ONNX. We use the HuggingFace transformers library's onnx module to export the model to ONNX. `transformers.onnx.export` accepts the preprocessing function for input sample generation (the tokenizer in our case), an instance of the model, ONNX export configuration, the ONNX opset version for export and output path. More information about transformers export to ONNX can be found in HuggingFace [documentation](https://huggingface.co/docs/transformers/serialization).\n",
"\n",
"While ONNX models are directly supported by OpenVINO runtime, it can be useful to convert them to IR format to take advantage of OpenVINO optimization tools and features.\n",
"The `mo.convert_model` Python function of [model conversion API](https://docs.openvino.ai/2023.0/openvino_docs_model_processing_introduction.html) can be used for converting the model. The function returns instance of OpenVINO Model class, which is ready to use in Python interface but can also be serialized to OpenVINO IR format for future execution using `openvino.runtime.serialize`. In our case, the `compress_to_fp16` parameter is enabled for compression model weights to FP16 precision and also specified dynamic input shapes with a possible shape range (from 1 token to a maximum length defined in our processing function) for optimization of memory consumption."
"The `ov.convert_model` Python function of [model conversion API](https://docs.openvino.ai/2023.0/openvino_docs_model_processing_introduction.html) can be used for converting the model. The function returns instance of OpenVINO Model class, which is ready to use in Python interface. The Model can also be save on device in OpenVINO IR format for future execution using `ov.save_model`. In our case dynamic input shapes with a possible shape range (from 1 token to a maximum length defined in our processing function) are specified for optimization of memory consumption."
]
},
{
Expand All @@ -200,10 +201,9 @@
],
"source": [
"from pathlib import Path\n",
"from openvino.runtime import serialize\n",
"from openvino.tools import mo\n",
"from transformers.onnx import export, FeaturesManager\n",
"\n",
"import openvino as ov\n",
"\n",
"# define path for saving onnx model\n",
"onnx_path = Path(\"model/text_generator.onnx\")\n",
Expand All @@ -223,12 +223,12 @@
"\n",
"# convert model to openvino\n",
"if model_name.value == \"PersonaGPT (Converastional)\":\n",
" ov_model = mo.convert_model(onnx_path, compress_to_fp16=True, input=\"input_ids[1,-1],attention_mask[1,-1]\")\n",
" ov_model = ov.convert_model(onnx_path, input=[('input_ids', [1, -1], ov.Type.i64), ('attention_mask', [1,-1], ov.Type.i64)])\n",
"else:\n",
" ov_model = mo.convert_model(onnx_path, compress_to_fp16=True, input=\"input_ids[1,1..128],attention_mask[1,1..128]\")\n",
" ov_model = ov.convert_model(onnx_path, input=[('input_ids', [1, ov.Dimension(1,128)], ov.Type.i64), ('attention_mask', [1, ov.Dimension(1,128)], ov.Type.i64)])\n",
"\n",
"# serialize openvino model\n",
"serialize(ov_model, str(model_path))"
"ov.save_model(ov_model, str(model_path))"
]
},
{
Expand Down Expand Up @@ -271,10 +271,10 @@
}
],
"source": [
"from openvino.runtime import Core\n",
"import ipywidgets as widgets\n",
"\n",
"core = Core()\n",
"# initialize openvino core\n",
"core = ov.Core()\n",
"\n",
"device = widgets.Dropdown(\n",
" options=core.available_devices + [\"AUTO\"],\n",
Expand All @@ -292,9 +292,6 @@
"metadata": {},
"outputs": [],
"source": [
"# initialize openvino core\n",
"core = Core()\n",
"\n",
"# read the model and corresponding weights from file\n",
"model = core.read_model(model_path)"
]
Expand Down
Loading

0 comments on commit 9b3fb47

Please sign in to comment.