Skip to content
This repository has been archived by the owner on Feb 12, 2024. It is now read-only.

Commit

Permalink
cleanup further
Browse files Browse the repository at this point in the history
  • Loading branch information
emrgnt-cmplxty committed Oct 30, 2023
1 parent cf2eb26 commit e173cc2
Show file tree
Hide file tree
Showing 8 changed files with 16 additions and 13 deletions.
8 changes: 5 additions & 3 deletions .env.example
Original file line number Diff line number Diff line change
@@ -1,8 +1,10 @@
# LLM Providers
OPENAI_API_KEY=your_openai_key
ANTHROPIC_API_KEY=your_anthropic_key
VLLM_API_KEY=your_vllm_token
## Fill where necessary.
OPENAI_API_KEY=your_openai_api_key
ANTHROPIC_API_KEY=your_anthropic_api_key
HF_TOKEN=your_huggingface_token
SCIPHI_API_KEY=your_sciphi_api_key
VLLM_API_KEY=your_vllm_api_key
# RAG Setttings
RAG_API_BASE=your_rag_server_base_url
RAG_API_KEY=your_rag_server_key
2 changes: 1 addition & 1 deletion .flake8
Original file line number Diff line number Diff line change
@@ -1,3 +1,3 @@
[flake8]
exclude = playground/*,sciphi/deprecated/**
exclude = playground/*,sciphi/deprecated/**,dump/*
ignore = E501, W503, E203, F541, W293, W291, E266
2 changes: 1 addition & 1 deletion sciphi/llm/models/anthropic_llm.py
Original file line number Diff line number Diff line change
Expand Up @@ -32,7 +32,7 @@ def __init__(
from anthropic import AI_PROMPT, HUMAN_PROMPT, Anthropic
except ImportError:
raise ImportError(
"Please install the anthropic package before attempting to run with an Anthropic model. This can be accomplished via `poetry install -E anthropic_support, ...OTHER_DEPENDENCIES_HERE`."
"Please install the anthropic package before attempting to run with an Anthropic model. This can be accomplished via `pip install anthropic`."
)

self.raw_prompt = HUMAN_PROMPT + " {instruction} " + AI_PROMPT
Expand Down
2 changes: 1 addition & 1 deletion sciphi/llm/models/hugging_face_llm.py
Original file line number Diff line number Diff line change
Expand Up @@ -47,7 +47,7 @@ def __init__(
)
except ImportError:
raise ImportError(
"Please install the torch and transformers packages before attempting to run with a HuggingFace model. This can be accomplished via `poetry install -E hf_support, ...OTHER_DEPENDENCIES_HERE`."
"Please install the torch and transformers packages before attempting to run with a HuggingFace model. This can be accomplished via `pip install transformers`."
)

super().__init__(
Expand Down
6 changes: 4 additions & 2 deletions sciphi/llm/models/openai_llm.py
Original file line number Diff line number Diff line change
@@ -1,5 +1,7 @@
"""A module for creating OpenAI model abstractions."""

# TODO - Will we face issues if a user attempts to access
# OpenAI + vLLM / SciPhi remote in the same session?
# My guess is yes, but need to test + workaround.
from dataclasses import dataclass

import tiktoken
Expand Down Expand Up @@ -38,7 +40,7 @@ def __init__(
import openai
except ImportError:
raise ImportError(
"Please install the openai package before attempting to run with an OpenAI model. This can be accomplished via `poetry install -E openai_support, ...OTHER_DEPENDENCIES_HERE`."
"Please install the openai package before attempting to run with an OpenAI model. This can be accomplished via `pip install openai`."
)
if (
config.provider_name == LLMProviderName.OPENAI
Expand Down
3 changes: 1 addition & 2 deletions sciphi/llm/models/sciphi_llm.py
Original file line number Diff line number Diff line change
@@ -1,6 +1,6 @@
"""A module for managing local vLLM models."""
import os
import logging
import os
from dataclasses import dataclass
from enum import Enum
from typing import Optional
Expand Down Expand Up @@ -42,7 +42,6 @@ def __init__(
*args,
**kwargs,
) -> None:
print("config = ", config)
self.config: SciPhiConfig = config
if self.config.mode in [
SciPhiProviderMode.REMOTE,
Expand Down
5 changes: 2 additions & 3 deletions sciphi/llm/models/vllm_llm.py
Original file line number Diff line number Diff line change
@@ -1,16 +1,15 @@
"""A module for managing local vLLM models."""

import os
import logging
import os
from dataclasses import dataclass
from enum import Enum
from typing import Optional

from sciphi.core import LLMProviderName
from sciphi.llm.base import LLM, GenerationConfig, LLMConfig
from sciphi.llm.config_manager import model_config

logging.basicConfig(level=logging.INFO)
from enum import Enum


class vLLMProviderMode(Enum):
Expand Down
1 change: 1 addition & 0 deletions sciphi/scripts/sciphi_gen_completion.py
Original file line number Diff line number Diff line change
Expand Up @@ -39,6 +39,7 @@ def main(
LLMProviderName(llm_provider_name),
api_key=api_key,
server_base=server_base,
# Currently only consumed by SciPhi
rag_interface=rag_interface,
)

Expand Down

0 comments on commit e173cc2

Please sign in to comment.