-
Notifications
You must be signed in to change notification settings - Fork 262
/
.env-template
46 lines (34 loc) · 872 Bytes
/
.env-template
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
# 1 - Search
# Options: searxng, tavily, serper, bing
SEARCH_PROVIDER=searxng
SEARXNG_BASE_URL=http://searxng:8080
# tavily, serper, bing (Optional)
TAVILY_API_KEY=
SERPER_API_KEY=
BING_API_KEY=
# 2 - LLMs
OLLAMA_API_BASE=http://host.docker.internal:11434
# Cloud Models (Optional)
OPENAI_API_KEY=
OPENAI_API_BASE=
GROQ_API_KEY=
AZURE_DEPLOYMENT_NAME=
AZURE_API_KEY=
AZURE_API_BASE=
AZURE_API_VERSION=
# azure, openai
OPENAI_MODE=openai
# Any `provider/model` from https://litellm.vercel.app/docs/providers
CUSTOM_MODEL=
# 3 - Frontend
NEXT_PUBLIC_API_URL=http://localhost:8000
# DB
DATABASE_URL=postgresql+psycopg2://postgres:password@db:5432/postgres
DB_ENABLED=True
# 4 - Caching + Rate Limiting (Optional)
RATE_LIMIT_ENABLED=False
REDIS_URL=
# 5 - Local Models
ENABLE_LOCAL_MODELS=True
NEXT_PUBLIC_LOCAL_MODE_ENABLED=True
NEXT_PUBLIC_PRO_MODE_ENABLED=True