diff --git a/llmfoundry/models/inference_api_wrapper/openai_causal_lm.py b/llmfoundry/models/inference_api_wrapper/openai_causal_lm.py index c5cd967948..624583c31b 100644 --- a/llmfoundry/models/inference_api_wrapper/openai_causal_lm.py +++ b/llmfoundry/models/inference_api_wrapper/openai_causal_lm.py @@ -7,8 +7,21 @@ from time import sleep from typing import Any, Dict, List, Optional, Union -import openai -import tiktoken +from composer.utils.import_helpers import MissingConditionalImportError + +try: + import openai +except ImportError as e: + raise MissingConditionalImportError(extra_deps_group='openai', + conda_package='openai', + conda_channel='conda-forge') from e +try: + import tiktoken +except ImportError as e: + raise MissingConditionalImportError(extra_deps_group='openai', + conda_package='tiktoken', + conda_channel='conda-forge') from e + import torch from composer.core.types import Batch from openai.error import RateLimitError diff --git a/scripts/eval/eval.py b/scripts/eval/eval.py index ced2c5c355..24e05528a6 100644 --- a/scripts/eval/eval.py +++ b/scripts/eval/eval.py @@ -214,7 +214,10 @@ def main(cfg: DictConfig): device_eval_batch_size: int = pop_config(cfg, 'device_eval_batch_size', must_exist=True) - precision: str = pop_config(cfg, 'precision', must_exist=False, default_value=None) + precision: str = pop_config(cfg, + 'precision', + must_exist=False, + default_value=None) python_log_level: Optional[str] = pop_config(cfg, 'python_log_level', must_exist=False,