Skip to content

Commit

Permalink
reduce onnx dependency
Browse files Browse the repository at this point in the history
  • Loading branch information
eaidova committed Dec 4, 2024
1 parent 97e9141 commit b95d78e
Show file tree
Hide file tree
Showing 2 changed files with 13 additions and 12 deletions.
2 changes: 1 addition & 1 deletion optimum/exporters/openvino/__main__.py
Original file line number Diff line number Diff line change
Expand Up @@ -29,7 +29,6 @@
from optimum.exporters import TasksManager
from optimum.exporters.onnx.base import OnnxConfig
from optimum.exporters.onnx.constants import SDPA_ARCHS_ONNX_EXPORT_NOT_SUPPORTED
from optimum.exporters.openvino.convert import export_from_model
from optimum.intel.utils.import_utils import (
is_nncf_available,
is_openvino_tokenizers_available,
Expand Down Expand Up @@ -185,6 +184,7 @@ def main_export(
>>> main_export("gpt2", output="gpt2_ov/")
```
"""
from optimum.exporters.openvino.convert import export_from_model

if use_auth_token is not None:
warnings.warn(
Expand Down
23 changes: 12 additions & 11 deletions optimum/exporters/openvino/convert.py
Original file line number Diff line number Diff line change
Expand Up @@ -19,19 +19,13 @@
import os
from pathlib import Path
from typing import TYPE_CHECKING, Any, Callable, Dict, List, Optional, Tuple, Union

import onnx
from transformers.generation import GenerationMixin
from transformers.utils import is_tf_available, is_torch_available

from openvino.runtime import Model, save_model
from openvino.runtime.exceptions import OVTypeError
from openvino.tools.ovc import convert_model
from optimum.exporters import TasksManager
from optimum.exporters.onnx.base import OnnxConfig
from optimum.exporters.onnx.convert import check_dummy_inputs_are_allowed
from optimum.exporters.onnx.convert import export_pytorch as export_pytorch_to_onnx
from optimum.exporters.onnx.convert import export_tensorflow as export_tensorflow_onnx
from optimum.exporters.utils import (
_get_submodels_and_export_configs as _default_get_submodels_and_export_configs,
)
Expand Down Expand Up @@ -90,6 +84,7 @@

if TYPE_CHECKING:
from optimum.intel.openvino.configuration import OVConfig
from optimum.exporters.onnx.base import OnnxConfig


def _set_runtime_options(
Expand All @@ -111,7 +106,7 @@ def _save_model(
path: str,
ov_config: Optional["OVConfig"] = None,
library_name: Optional[str] = None,
config: OnnxConfig = None,
config: "OnnxConfig" = None,
):
compress_to_fp16 = ov_config is not None and ov_config.dtype == "fp16"
model = _add_version_info_to_model(model, library_name)
Expand All @@ -125,7 +120,7 @@ def _save_model(

def export(
model: Union["PreTrainedModel", "TFPreTrainedModel", "ModelMixin", "DiffusionPipeline"],
config: OnnxConfig,
config: "OnnxConfig",
output: Path,
opset: Optional[int] = None,
device: str = "cpu",
Expand Down Expand Up @@ -208,7 +203,7 @@ def export(

def export_tensorflow(
model: Union["PreTrainedModel", "ModelMixin"],
config: OnnxConfig,
config: "OnnxConfig",
opset: int,
output: Path,
ov_config: Optional["OVConfig"] = None,
Expand All @@ -228,6 +223,8 @@ def export_tensorflow(
output_names: list of output names from ONNX configuration
bool: True if the model was exported successfully.
"""
from optimum.exporters.onnx.convert import export_tensorflow as export_tensorflow_onnx

onnx_path = Path(output).with_suffix(".onnx")
input_names, output_names = export_tensorflow_onnx(model, config, opset, onnx_path)
ov_model = convert_model(str(onnx_path))
Expand All @@ -248,7 +245,7 @@ def export_tensorflow(

def export_pytorch_via_onnx(
model: Union["PreTrainedModel", "ModelMixin"],
config: OnnxConfig,
config: "OnnxConfig",
opset: int,
output: Path,
device: str = "cpu",
Expand Down Expand Up @@ -284,6 +281,7 @@ def export_pytorch_via_onnx(
the ONNX configuration and boolean flag - was legacy ONNX path were applied to model or not.
"""
import torch
from optimum.exporters.onnx.convert import export_pytorch as export_pytorch_to_onnx

output = Path(output)
orig_torch_onnx_export = torch.onnx.export
Expand Down Expand Up @@ -313,7 +311,7 @@ def export_pytorch_via_onnx(

def export_pytorch(
model: Union["PreTrainedModel", "ModelMixin"],
config: OnnxConfig,
config: "OnnxConfig",
opset: int,
output: Path,
device: str = "cpu",
Expand Down Expand Up @@ -354,6 +352,7 @@ def export_pytorch(
"""
import torch
from torch.utils._pytree import tree_map
from optimum.exporters.onnx.convert import check_dummy_inputs_are_allowed

logger.info(f"Using framework PyTorch: {torch.__version__}")
output = Path(output)
Expand Down Expand Up @@ -869,6 +868,8 @@ def _add_version_info_to_model(model: Model, library_name: Optional[str] = None)
model.set_rt_info(_nncf_version, ["optimum", "nncf_version"])
input_model = rt_info["conversion_parameters"].get("input_model", None)
if input_model is not None and "onnx" in input_model.value:
import onnx

model.set_rt_info(onnx.__version__, ["optimum", "onnx_version"])

except Exception:
Expand Down

0 comments on commit b95d78e

Please sign in to comment.