Skip to content

Commit

Permalink
fix code style
Browse files Browse the repository at this point in the history
  • Loading branch information
eaidova committed Dec 4, 2024
1 parent c26a450 commit 5af0206
Show file tree
Hide file tree
Showing 4 changed files with 22 additions and 16 deletions.
6 changes: 3 additions & 3 deletions optimum/exporters/openvino/model_configs.py
Original file line number Diff line number Diff line change
Expand Up @@ -112,9 +112,9 @@ def init_model_configs():
"transformers",
"Qwen2VLForConditionalGeneration",
)
TasksManager._TRANSFORMERS_TASKS_TO_MODEL_LOADERS["image-text-to-text"] = (
TasksManager._TRANSFORMERS_TASKS_TO_MODEL_LOADERS["text-generation"]
)
TasksManager._TRANSFORMERS_TASKS_TO_MODEL_LOADERS[
"image-text-to-text"
] = TasksManager._TRANSFORMERS_TASKS_TO_MODEL_LOADERS["text-generation"]

supported_model_types = [
"_SUPPORTED_MODEL_TYPE",
Expand Down
21 changes: 10 additions & 11 deletions optimum/exporters/openvino/model_patcher.py
Original file line number Diff line number Diff line change
Expand Up @@ -423,9 +423,9 @@ def _llama_gemma_update_causal_mask_legacy(self, attention_mask, input_tensor, c
offset = 0
mask_shape = attention_mask.shape
mask_slice = (attention_mask.eq(0.0)).to(dtype=dtype) * min_dtype
causal_mask[: mask_shape[0], : mask_shape[1], offset : mask_shape[2] + offset, : mask_shape[3]] = (
mask_slice
)
causal_mask[
: mask_shape[0], : mask_shape[1], offset : mask_shape[2] + offset, : mask_shape[3]
] = mask_slice

if (
self.config._attn_implementation == "sdpa"
Expand Down Expand Up @@ -2060,9 +2060,9 @@ def _dbrx_update_causal_mask_legacy(
offset = 0
mask_shape = attention_mask.shape
mask_slice = (attention_mask.eq(0.0)).to(dtype=dtype) * min_dtype
causal_mask[: mask_shape[0], : mask_shape[1], offset : mask_shape[2] + offset, : mask_shape[3]] = (
mask_slice
)
causal_mask[
: mask_shape[0], : mask_shape[1], offset : mask_shape[2] + offset, : mask_shape[3]
] = mask_slice

if (
self.config._attn_implementation == "sdpa"
Expand Down Expand Up @@ -3386,10 +3386,9 @@ class Qwen2VLLanguageModelPatcher(DecoderModelPatcher):
def __init__(
self,
config: OnnxConfig,
model: PreTrainedModel | TFPreTrainedModel,
model_kwargs: Dict[str, Any] | None = None,
model: Union[PreTrainedModel, TFPreTrainedModel],
model_kwargs: Dict[str, Any] = None,
):

model.__orig_forward = model.forward

def forward_wrap(
Expand Down Expand Up @@ -3426,8 +3425,8 @@ class Qwen2VLVisionEmbMergerPatcher(ModelPatcher):
def __init__(
self,
config: OnnxConfig,
model: PreTrainedModel | TFPreTrainedModel,
model_kwargs: Dict[str, Any] | None = None,
model: Union[PreTrainedModel, TFPreTrainedModel],
model_kwargs: Dict[str, Any] = None,
):
model.__orig_forward = model.forward

Expand Down
10 changes: 9 additions & 1 deletion optimum/exporters/openvino/utils.py
Original file line number Diff line number Diff line change
Expand Up @@ -216,7 +216,15 @@ def get_submodels(model):
return custom_export, fn_get_submodels


MULTI_MODAL_TEXT_GENERATION_MODELS = ["llava", "llava-next", "llava-qwen2", "internvl-chat", "minicpmv", "phi3-v", "qwen2-vl"]
MULTI_MODAL_TEXT_GENERATION_MODELS = [
"llava",
"llava-next",
"llava-qwen2",
"internvl-chat",
"minicpmv",
"phi3-v",
"qwen2-vl",
]


def save_config(config, save_dir):
Expand Down
1 change: 0 additions & 1 deletion optimum/intel/openvino/modeling_visual_language.py
Original file line number Diff line number Diff line change
Expand Up @@ -2354,7 +2354,6 @@ def get_multimodal_embeddings(
video_grid_thw=None,
**kwargs,
):

inputs_embeds = torch.from_numpy(self.get_text_embeddings(input_ids))
if pixel_values is not None and input_ids.shape[1] != 1:
image_embeds = torch.from_numpy(self.get_vision_embeddings(pixel_values, image_grid_thw))
Expand Down

0 comments on commit 5af0206

Please sign in to comment.