{[
{ title: "Docs", path: "/docs", Icon: LibraryBig },
+ { title: "Self Hosting", path: "/self-hosting", Icon: Server },
{ title: "Guides", path: "/guides", Icon: FileCode },
{ title: "FAQ", path: "/faq", Icon: CircleHelp },
].map((item) =>
diff --git a/components/home/OpenSource.tsx b/components/home/OpenSource.tsx
index 2133d283d..29c391611 100644
--- a/components/home/OpenSource.tsx
+++ b/components/home/OpenSource.tsx
@@ -14,11 +14,11 @@ export default function OpenSource() {
description={
Langfuse is committed to open source. You can also run it{" "}
-
+
locally
{" "}
or{" "}
-
+
self-hosted
.
diff --git a/components/home/Pricing.tsx b/components/home/Pricing.tsx
index db1550da8..338b32fcc 100644
--- a/components/home/Pricing.tsx
+++ b/components/home/Pricing.tsx
@@ -98,7 +98,7 @@ const tiers = {
{
name: "Open Source",
id: "tier-self-hosted-oss",
- href: "/docs/deployment/self-host",
+ href: "/self-hosting",
featured: true,
description:
"Self-host all core Langfuse features for free without any limitations.",
@@ -1167,7 +1167,7 @@ const faqs = [
{
question: "Can I self-host Langfuse?",
answer:
- "Yes, Langfuse is open source and you can run Langfuse locally using docker compose or for production use via docker and a standalone database.",
+ "Yes, Langfuse is open source and you can run Langfuse locally using docker compose or for production use via docker and a standalone database.",
},
{
question: "Where is the data stored?",
diff --git a/cookbook/integration_amazon_bedrock.ipynb b/cookbook/integration_amazon_bedrock.ipynb
index 91c67bdc8..c46e4624a 100644
--- a/cookbook/integration_amazon_bedrock.ipynb
+++ b/cookbook/integration_amazon_bedrock.ipynb
@@ -331,7 +331,7 @@
"## Additional Resources\n",
"\n",
"- Metadocs, [Monitoring your Langchain app's cost using Bedrock with Langfuse](https://www.metadocs.co/2024/07/03/monitor-your-langchain-app-cost-using-bedrock-with-langfuse/), featuring Langchain integration and custom model price definitions for Bedrock models.\n",
- "- [Self-hosting guide](https://langfuse.com/docs/deployment/self-host) to deploy Langfuse on AWS."
+ "- [Self-hosting guide](https://langfuse.com/self-hosting) to deploy Langfuse on AWS."
]
}
],
diff --git a/cookbook/integration_ollama.ipynb b/cookbook/integration_ollama.ipynb
index 08a9845f7..1d7317fa6 100644
--- a/cookbook/integration_ollama.ipynb
+++ b/cookbook/integration_ollama.ipynb
@@ -31,7 +31,7 @@
"\n",
"### Local Deployment of Langfuse\n",
"\n",
- "Of course, you can also locally deploy Langfuse to run models and trace LLM outputs only on your own device. [Here](https://langfuse.com/docs/deployment/local) is a guide on how to run Langfuse on your local machine using Docker Compose. This method is ideal for testing Langfuse and troubleshooting integration issues.\n",
+ "Of course, you can also locally deploy Langfuse to run models and trace LLM outputs only on your own device. [Here](https://langfuse.com/self-hosting/local) is a guide on how to run Langfuse on your local machine using Docker Compose. This method is ideal for testing Langfuse and troubleshooting integration issues.\n",
"\n",
"For this example, we will use the Langfuse cloud version."
]
diff --git a/cookbook/python_decorators.ipynb b/cookbook/python_decorators.ipynb
index 58a1fc7b6..ce0dc88a1 100644
--- a/cookbook/python_decorators.ipynb
+++ b/cookbook/python_decorators.ipynb
@@ -48,7 +48,7 @@
"cell_type": "markdown",
"metadata": {},
"source": [
- "If you haven't done so yet, [sign up to Langfuse](https://cloud.langfuse.com/auth/sign-up) and obtain your API keys from the project settings. You can also [self-host](https://langfuse.com/docs/deployment/self-host) Langfuse."
+ "If you haven't done so yet, [sign up to Langfuse](https://cloud.langfuse.com/auth/sign-up) and obtain your API keys from the project settings. You can also [self-host](https://langfuse.com/self-hosting) Langfuse."
]
},
{
diff --git a/next.config.mjs b/next.config.mjs
index 40a862bb5..3699c5201 100644
--- a/next.config.mjs
+++ b/next.config.mjs
@@ -225,6 +225,19 @@ const nonPermanentRedirects = [
["/changelog/2024-09-04-headless-initialization-or-self-hosted-deployments", "/changelog/2024-09-04-headless-initialization-of-self-hosted-deployments"],
["/docs/deployment/v3", "/docs/deployment/v3/overview"],
+ // new self-hosting section
+ ["/docs/deployment/feature-overview", "/self-hosting/license-key"],
+ ["/docs/deployment/local", "/self-hosting/local"],
+ ["/docs/deployment/self-host", "/self-hosting"],
+ ["/docs/deployment/v3/overview", "/self-hosting"],
+ ["/docs/deployment/v3/migrate-v2-to-v3", "/self-hosting/upgrade-guides/upgrade-v2-to-v3"],
+ ["/docs/deployment/v3/troubleshooting", "/self-hosting/troubleshooting"],
+ ["/docs/deployment/v3/guides/docker-compose", "/self-hosting/docker-compose"],
+ ["/docs/deployment/v3/guides/kubernetes-helm", "/self-hosting/kubernetes-helm"],
+ ["/docs/deployment/v3/components/clickhouse", "/self-hosting/infrastructure/clickhouse"],
+ ["/docs/deployment/v3/components/redis", "/self-hosting/infrastructure/cache"],
+ ["/docs/deployment/v3/components/blobstorage", "/self-hosting/infrastructure/blobstorage"],
+
// Reorder Tracing section
["/docs/tracing/overview", "/docs/tracing"],
["/docs/tracing-features", "/docs/tracing"],
diff --git a/pages/_meta.tsx b/pages/_meta.tsx
index aae0728d8..89aabe392 100644
--- a/pages/_meta.tsx
+++ b/pages/_meta.tsx
@@ -24,6 +24,11 @@ export default {
type: "page",
title: "Docs",
},
+ "self-hosting": {
+ type: "page",
+ title: "Self Hosting",
+ // hidden from main menu via overrides.css, nextra display:hidden otherwise breaks type:page
+ },
guides: {
type: "page",
title: "Guides",
diff --git a/pages/blog/2024-11-17-launch-week-2.mdx b/pages/blog/2024-11-17-launch-week-2.mdx
index c23ea6259..901f6d735 100644
--- a/pages/blog/2024-11-17-launch-week-2.mdx
+++ b/pages/blog/2024-11-17-launch-week-2.mdx
@@ -159,7 +159,7 @@ List of additional features that were released this week:
- [`llms.txt`](/changelog/2024-11-17-llms-txt): Easily use the Langfuse documentation in Cursor and other LLM editors via the new `llms.txt` file.
- [`/docs`](/docs): New documentation start page with a simplified overview of all Langfuse features.
- [Self-hosted Pro Plan](/pricing-self-host): Get access to additional features without the need for a sales call or enterprise pricing. All core Langfuse features are OSS without limitations, see [comparison](/pricing-self-host) for more details.
-- [Developer Preview of v3 (self-hosted)](/docs/deployment/v3/overview): v3 is the biggest release in Langfuse history. After running large parts of it on Langfuse Cloud for a while, an initial developer preview for self-hosted users is now available.
+- [Developer Preview of v3 (self-hosted)](/self-hosting): v3 is the biggest release in Langfuse history. After running large parts of it on Langfuse Cloud for a while, an initial developer preview for self-hosted users is now available.
## Don't Miss Out
diff --git a/pages/blog/announcing-our-seed-round.mdx b/pages/blog/announcing-our-seed-round.mdx
index f17b48147..75940c011 100644
--- a/pages/blog/announcing-our-seed-round.mdx
+++ b/pages/blog/announcing-our-seed-round.mdx
@@ -108,7 +108,7 @@ After having built the data and observability layer we close the loop and return
Based on the foundation of our existing observability stack, we are building out the leading open source LLM analytics platform. The job of our users is not easy as there is lots of short-term noise about frameworks, models, and emerging techniques for working with LLMs. These are exciting yet hard to navigate times. We aspire to guide developers with the right insights to make informed decisions grounded in real-world data, user feedback and evaluations.
-We are open, the project is super easy to [self-host](/docs/deployment/self-host), and you can try Langfuse Cloud without a credit card. Join us on [Discord](/discord), give Langfuse a spin ([sign-up](https://cloud.langfuse.com)), or try the [live demo](/demo).
+We are open, the project is super easy to [self-host](/self-hosting), and you can try Langfuse Cloud without a credit card. Join us on [Discord](/discord), give Langfuse a spin ([sign-up](https://cloud.langfuse.com)), or try the [live demo](/demo).
Thank you all! Clemens, Max and Marc
diff --git a/pages/blog/update-2023-08.mdx b/pages/blog/update-2023-08.mdx
index 70d6aeaea..88df8cdbf 100644
--- a/pages/blog/update-2023-08.mdx
+++ b/pages/blog/update-2023-08.mdx
@@ -233,7 +233,7 @@ To reduce the friction of self-hosting, we now publish Docker images for the Lan
docker pull ghcr.io/langfuse/langfuse
```
-For detailed instructions, see [self-hosting](/docs/deployment/self-host) and [local setup](/docs/deployment/local) documentation.
+For detailed instructions, see [self-hosting](/self-hosting) and [local setup](/self-hosting/local) documentation.
## 🚢 What's Next?
diff --git a/pages/blog/update-2023-10.mdx b/pages/blog/update-2023-10.mdx
index 2ade417ba..c3fc477ed 100644
--- a/pages/blog/update-2023-10.mdx
+++ b/pages/blog/update-2023-10.mdx
@@ -114,7 +114,7 @@ We've integrated the application of migrations to the startup process of the Doc
docker pull ghcr.io/langfuse/langfuse:latest
```
-Check [self-hosting docs](/docs/deployment/self-host) for full details on how to deploy Langfuse in a production environment.
+Check [self-hosting docs](/self-hosting) for full details on how to deploy Langfuse in a production environment.
Optionally, you can also one-click deploy Langfuse to Railway:
@@ -175,7 +175,7 @@ _Use the `j` and `k` keys to navigate even faster._
### Sign in via Google and GitHub
-You can now authenticate using **Google** and **GitHub**. Also supported for self-hosted deployments. Check [docs](/docs/deployment/self-host) for details.
+You can now authenticate using **Google** and **GitHub**. Also supported for self-hosted deployments. Check [docs](/self-hosting) for details.
### Delete projects
diff --git a/pages/changelog/2023-08-16-prebuilt-docker-image.mdx b/pages/changelog/2023-08-16-prebuilt-docker-image.mdx
index 4a539bd92..3407febff 100644
--- a/pages/changelog/2023-08-16-prebuilt-docker-image.mdx
+++ b/pages/changelog/2023-08-16-prebuilt-docker-image.mdx
@@ -16,5 +16,5 @@ docker pull ghcr.io/langfuse/langfuse:latest
See docs:
-- [Run Langfuse locally](/docs/deployment/local) using `docker compose`
-- [Self-host Langfuse](/docs/deployment/self-host)
+- [Run Langfuse locally](/self-hosting/local) using `docker compose`
+- [Self-host Langfuse](/self-hosting)
diff --git a/pages/changelog/2023-10-31-simplified-self-hosting.mdx b/pages/changelog/2023-10-31-simplified-self-hosting.mdx
index 8998f0dc5..2148ffe1c 100644
--- a/pages/changelog/2023-10-31-simplified-self-hosting.mdx
+++ b/pages/changelog/2023-10-31-simplified-self-hosting.mdx
@@ -13,7 +13,7 @@ import { ChangelogHeader } from "@/components/changelog/ChangelogHeader";
docker pull ghcr.io/langfuse/langfuse:latest
```
-See [self-hosting documentation](/docs/deployment/self-host) for details when deploying Langfuse for the first time.
+See [self-hosting documentation](/self-hosting) for details when deploying Langfuse for the first time.
Alternatively, deploy to Railway using this template:
diff --git a/pages/changelog/2023-11-03-sso-enforcement.mdx b/pages/changelog/2023-11-03-sso-enforcement.mdx
index 17969179b..5de3013fb 100644
--- a/pages/changelog/2023-11-03-sso-enforcement.mdx
+++ b/pages/changelog/2023-11-03-sso-enforcement.mdx
@@ -11,4 +11,4 @@ import { ChangelogHeader } from "@/components/changelog/ChangelogHeader";
When self-hosting, you can also enforce SSO by adding your domain as an environment variable: `AUTH_DOMAINS_WITH_SSO_ENFORCEMENT`.
-You can find up-to-date information on configuring SSO and environment variables in our [documentation](/docs/deployment/self-host).
+You can find up-to-date information on configuring SSO and environment variables in our [documentation](/self-hosting/authentication-and-sso).
diff --git a/pages/changelog/2024-01-29-custom-model-prices.mdx b/pages/changelog/2024-01-29-custom-model-prices.mdx
index 67de62039..181dd0215 100644
--- a/pages/changelog/2024-01-29-custom-model-prices.mdx
+++ b/pages/changelog/2024-01-29-custom-model-prices.mdx
@@ -77,7 +77,7 @@ We updated the API and UI to include usage and cost on most endpoints. Let us kn
[Langfuse Cloud](/docs/deployment/cloud): No action is required.
-If you [self-host](/docs/deployment/self-host) Langfuse, new model/price definitions are applied to all newly ingested traces. You need to run a [migration script](/docs/deployment/self-host#migrate-models) to apply them to your existing data in Langfuse.
+If you [self-host](/self-hosting) Langfuse, new model/price definitions are applied to all newly ingested traces. You need to run a [migration script](/self-hosting/upgrade-guides/upgrade-v1-to-v2) to apply them to your existing data in Langfuse.
### More details
diff --git a/pages/changelog/2024-04-13-images-on-docker-hub.mdx b/pages/changelog/2024-04-13-images-on-docker-hub.mdx
index f8ab0ffff..f94d7085d 100644
--- a/pages/changelog/2024-04-13-images-on-docker-hub.mdx
+++ b/pages/changelog/2024-04-13-images-on-docker-hub.mdx
@@ -9,7 +9,7 @@ import { ChangelogHeader } from "@/components/changelog/ChangelogHeader";
-This change made deploying Langfuse easier across various platforms that do not natively support the GitHub Container Registry (e.g., Google Cloud Run). For more details, see the updated [self-hosting instructions](/docs/deployment/self-host).
+This change made deploying Langfuse easier across various platforms that do not natively support the GitHub Container Registry (e.g., Google Cloud Run). For more details, see the updated [self-hosting instructions](/self-hosting).
Links:
diff --git a/pages/changelog/2024-08-14-ui-customization.mdx b/pages/changelog/2024-08-14-ui-customization.mdx
index 0fd891e73..05aeb6655 100644
--- a/pages/changelog/2024-08-14-ui-customization.mdx
+++ b/pages/changelog/2024-08-14-ui-customization.mdx
@@ -20,4 +20,4 @@ This feature is available in the [Enterprise Edition](/docs/open-source) (self-h
| `LANGFUSE_UI_SUPPORT_HREF` | Customize the support link reference in the menu and settings. |
| `LANGFUSE_UI_FEEDBACK_HREF` | Replace the default feedback widget with your own feedback link. |
-_Source: [self-hosting documentation](/docs/self-host)_
+_Source: [self-hosting documentation](/self-hosting/ui-customization)_
diff --git a/pages/changelog/2024-08-15-deployment-as-porter-add-on.mdx b/pages/changelog/2024-08-15-deployment-as-porter-add-on.mdx
index bbeacd549..6ca0cc491 100644
--- a/pages/changelog/2024-08-15-deployment-as-porter-add-on.mdx
+++ b/pages/changelog/2024-08-15-deployment-as-porter-add-on.mdx
@@ -10,6 +10,6 @@ import { ChangelogHeader } from "@/components/changelog/ChangelogHeader";
-The add-on automatically configures the necessary [environment variables](/docs/deployment/self-host#configuring-environment-variables), handles HTTPS and custom domains, autoscales the application, and allows you to connect your Porter-managed Postgres database to Langfuse by injecting the credentials.
+The add-on automatically configures the necessary [environment variables](/self-hosting/configuration), handles HTTPS and custom domains, autoscales the application, and allows you to connect your Porter-managed Postgres database to Langfuse by injecting the credentials.
Learn more about [Porter](https://porter.run) and [Porter Add-ons](https://www.porter.run/addons).
diff --git a/pages/changelog/2024-09-04-headless-initialization-of-self-hosted-deployments.mdx b/pages/changelog/2024-09-04-headless-initialization-of-self-hosted-deployments.mdx
index 99e73ef95..22997feae 100644
--- a/pages/changelog/2024-09-04-headless-initialization-of-self-hosted-deployments.mdx
+++ b/pages/changelog/2024-09-04-headless-initialization-of-self-hosted-deployments.mdx
@@ -34,6 +34,6 @@ Organization
## Get Started
-Checkout the [self-hosting documentation](/docs/deployment/self-host#initialization) for a full list of available environment variables.
+Checkout the [self-hosting documentation](/self-hosting/headless-initialization) for a full list of available environment variables.
We're excited to see how this new feature will simplify your Langfuse deployments. As always, we welcome your feedback and suggestions for further improvements!
diff --git a/pages/changelog/2024-09-13-custom-basepath-for-self-hosted-deployments.mdx b/pages/changelog/2024-09-13-custom-basepath-for-self-hosted-deployments.mdx
index 2590fd174..dba1722a4 100644
--- a/pages/changelog/2024-09-13-custom-basepath-for-self-hosted-deployments.mdx
+++ b/pages/changelog/2024-09-13-custom-basepath-for-self-hosted-deployments.mdx
@@ -16,4 +16,4 @@ To use a custom base path:
1. Set the `NEXT_PUBLIC_BASE_PATH` environment variable at build time.
2. Build the Docker image. As this base path is inlined in static assets, you cannot use the prebuilt docker images.
-See the [self-hosting documentation](/docs/deployment/self-host#custom-base-path) for more details. Thanks to everyone who contributed to the [discussion](https://github.com/orgs/langfuse/discussions/2400) on this!
+See the [self-hosting documentation](/self-hosting/custom-base-path) for more details. Thanks to everyone who contributed to the [discussion](https://github.com/orgs/langfuse/discussions/2400) on this!
diff --git a/pages/changelog/2024-12-03-new-sso-providers-github-enterprise-and-keycloak.mdx b/pages/changelog/2024-12-03-new-sso-providers-github-enterprise-and-keycloak.mdx
index 314075a3f..41715cf11 100644
--- a/pages/changelog/2024-12-03-new-sso-providers-github-enterprise-and-keycloak.mdx
+++ b/pages/changelog/2024-12-03-new-sso-providers-github-enterprise-and-keycloak.mdx
@@ -30,4 +30,4 @@ If you need a specific additional identity provider, please let us know by [crea
On Langfuse Cloud, please reach out to [support](/support) to configure a custom SSO identity provider for your domain.
-When self-hosting Langfuse, see our [self-hosting documentation](/docs/deployment/self-host#sso) for setup instructions.
+When self-hosting Langfuse, see our [self-hosting documentation](/self-hosting/authentication-and-sso) for setup instructions.
diff --git a/pages/cn.mdx b/pages/cn.mdx
index c32dd2742..d6f78488d 100644
--- a/pages/cn.mdx
+++ b/pages/cn.mdx
@@ -1,23 +1,24 @@
---
title: "Langfuse - 中文概述"
description: "Langfuse 是最受欢迎的开源 LLMOps 平台。它帮助团队协作开发、监控、评估和调试 AI 应用程序。"
-hreflang: [
- { lang: "en", url: "https://langfuse.com" },
- { lang: "zh", url: "https://langfuse.com/cn" }
-]
+hreflang:
+ [
+ { lang: "en", url: "https://langfuse.com" },
+ { lang: "zh", url: "https://langfuse.com/cn" },
+ ]
---
# **Langfuse \- 开放源码 LLMOps 平台 \- 中文版概述**
-注:本页面由机器翻译。我们非常感谢我们的中国用户和客户,并希望用您的母语为您提供一个易于理解的 Langfuse 介绍。
+注:本页面由机器翻译。我们非常感谢我们的中国用户和客户,并希望用您的母语为您提供一个易于理解的 Langfuse 介绍。
请注意,Langfuse 网站的所有其他资源仅提供英文版,但可通过谷歌浏览器内置翻译器等工具使用其他语言。
-## **🇨🇳 🤝 🪢**
+## **🇨🇳 🤝 🪢**
我们很高兴在中国拥有一个不断壮大的 Langfuse 用户社区。您的支持和反馈对我们来说非常宝贵。我们期待着与您合作并听取您的意见。请随时与我们联系--我们会阅读并回复每一份来信。
感谢您使用 Langfuse 并支持我们的产品和社区。
@@ -25,19 +26,18 @@ hreflang: [
## **什么是 Langfuse?**
-![Langfuse Trace Chinese](/images/docs/chinese-example-trace.png)
+ ![Langfuse Trace Chinese](/images/docs/chinese-example-trace.png)
-
[Langfuse](/) 是一个**开源的**可观测性和分析平台,专为由大型语言模型(LLM)驱动的应用而设计。我们的使命是帮助开发人员和组织构建并改进 LLM 应用程序。为此,我们通过先进的跟踪和分析模块深入了解模型的成本、质量和延迟。
-*在我们的[公开演示](/docs/demo)中查看示例跟踪*
+_在我们的[公开演示](/docs/demo)中查看示例跟踪_
### **为什么选择 Langfuse?**
-Langfuse 是市场上[**最受欢迎的开源 LLMOps 工具**](/blog/2024-11-most-used-oss-llmops),拥有一个规模庞大的社区,负责构建和维护与最新框架的集成。
+Langfuse 是市场上[**最受欢迎的开源 LLMOps 工具**](/blog/2024-11-most-used-oss-llmops),拥有一个规模庞大的社区,负责构建和维护与最新框架的集成。
-Langfuse **易于[自助托管](/docs/deployment/feature-overview)**,可在几分钟内完成设置。这对受监管行业的企业客户尤其有吸引力。
+Langfuse **易于[自助托管](/pricing-self-host)**,可在几分钟内完成设置。这对受监管行业的企业客户尤其有吸引力。
Langfuse 可提供**一流的跟踪服务**,帮助您开发和改进产品。
@@ -49,26 +49,26 @@ Langfuse 提供一系列功能,可在人工智能产品的整个周期中为
### **监测**
-* **跟踪**:捕捉产品的[完整上下文](/docs/tracing),包括外部 API 或工具调用、上下文、提示等。
-* **实时指标**:监控关键[性能指标](/docs/analytics/overview),如响应时间、错误率和吞吐量。
-* **反馈**:收集[用户反馈](/docs/scores/user-feedback),以改进应用程序的性能和用户体验。
+- **跟踪**:捕捉产品的[完整上下文](/docs/tracing),包括外部 API 或工具调用、上下文、提示等。
+- **实时指标**:监控关键[性能指标](/docs/analytics/overview),如响应时间、错误率和吞吐量。
+- **反馈**:收集[用户反馈](/docs/scores/user-feedback),以改进应用程序的性能和用户体验。
### **分析**
-* **评估**:通过设置 [llm-as-a-judge](/docs/scores/model-based-evals) 评估或[人工标注](/docs/scores/annotation)工作流程,比较不同模型、提示和配置的性能。
-* **测试**:试验不同版本(A/B)的应用程序,通过[测试](/docs/datasets/overview)和[提示管理](/docs/prompts/get-started)确定最有效的解决方案
-* **用户行为**:了解[用户](/docs/scores/user-feedback)与人工智能应用程序的交互方式。
+- **评估**:通过设置 [llm-as-a-judge](/docs/scores/model-based-evals) 评估或[人工标注](/docs/scores/annotation)工作流程,比较不同模型、提示和配置的性能。
+- **测试**:试验不同版本(A/B)的应用程序,通过[测试](/docs/datasets/overview)和[提示管理](/docs/prompts/get-started)确定最有效的解决方案
+- **用户行为**:了解[用户](/docs/scores/user-feedback)与人工智能应用程序的交互方式。
### **调试**
-* **详细的调试日志**:访问所有应用程序活动的综合日志,以排除故障。
-* **错误跟踪**:检测和跟踪应用程序中的[错误](/docs/tracing-features/log-levels)和异常。
+- **详细的调试日志**:访问所有应用程序活动的综合日志,以排除故障。
+- **错误跟踪**:检测和跟踪应用程序中的[错误](/docs/tracing-features/log-levels)和异常。
### **集成**
-* **框架支持**:与 [LangChain](/docs/integrations/langchain/tracing)、[LlamaIndex](/docs/integrations/llama-index/get-started) 和 [AWS Bedrock](/docs/integrations/amazon-bedrock) 等流行的 LLM 框架集成。
-* **工具支持**:与 [Dify](/docs/integrations/dify) 或 [LobeChat](/docs/integrations/lobechat) 等无代码构建工具集成。
-* **应用程序接口(API**):利用我们开放且功能强大的[应用程序接口](https://api.reference.langfuse.com/#get-/api/public/comments)进行自定义集成和工作流程自动化。
+- **框架支持**:与 [LangChain](/docs/integrations/langchain/tracing)、[LlamaIndex](/docs/integrations/llama-index/get-started) 和 [AWS Bedrock](/docs/integrations/amazon-bedrock) 等流行的 LLM 框架集成。
+- **工具支持**:与 [Dify](/docs/integrations/dify) 或 [LobeChat](/docs/integrations/lobechat) 等无代码构建工具集成。
+- **应用程序接口(API**):利用我们开放且功能强大的[应用程序接口](https://api.reference.langfuse.com/#get-/api/public/comments)进行自定义集成和工作流程自动化。
## **开始使用 Langfuse**
@@ -78,7 +78,7 @@ Langfuse 提供一系列功能,可在人工智能产品的整个周期中为
### **注册:**
-访问 [Langfuse Cloud](https://cloud.langfuse.com/),在几分钟内创建一个免费账户或[自助托管](/docs/deployment/feature-overview) Langfuse。创建新项目并获取 Langfuse API 密钥,即可开始摄取数据。
+访问 [Langfuse Cloud](https://cloud.langfuse.com/),在几分钟内创建一个免费账户或[自助托管](/self-hosting/license-key) Langfuse。创建新项目并获取 Langfuse API 密钥,即可开始摄取数据。
### **探索文档:**
@@ -96,20 +96,20 @@ Langfuse 提供一系列功能,可在人工智能产品的整个周期中为
## **客户支持**
-我们理解用您喜欢的语言提供帮助的重要性。但是,作为一个小团队,我们只能提供英语支持。
+我们理解用您喜欢的语言提供帮助的重要性。但是,作为一个小团队,我们只能提供英语支持。
-* [人工智能聊天机器人](/docs/ask-ai)(懂中文)
-* **GitHub:** [github.com/langfuse/discussions](https://github.com/orgs/langfuse/discussions) \- 在我们的 GitHub 公共讨论区(英语)上提问。
-* **电子邮件:**通过 [support@langfuse.com](mailto:support@langfuse.com) 联系我们
-* **企业支持:**如果您对此感兴趣,请通过 [enterprise@langfuse.com](mailto:enterprise@langfuse.com) 联系我们。
+- [人工智能聊天机器人](/docs/ask-ai)(懂中文)
+- **GitHub:** [github.com/langfuse/discussions](https://github.com/orgs/langfuse/discussions) \- 在我们的 GitHub 公共讨论区(英语)上提问。
+- **电子邮件:**通过 [support@langfuse.com](mailto:support@langfuse.com) 联系我们
+- **企业支持:**如果您对此感兴趣,请通过 [enterprise@langfuse.com](mailto:enterprise@langfuse.com) 联系我们。
## **加入 Langfuse 社区**
成为我们不断壮大的全球社区的一员:
-* ⭐️ 在 [GitHub](https://github.com/langfuse/langfuse) 上点击星星关注我们,即可获得更新。
-* 🤷♂️ 保持联系:在社交媒体上关注我们,了解最新动态。
- * [推特](https://x.com/langfuse)
- * [纪和声](https://discord.langfuse.com/)
- * [LinkedIn](https://www.linkedin.com/company/langfuse)
-* 🖼️ [接收朗富斯贴纸](https://langfuse.com/stickers)
\ No newline at end of file
+- ⭐️ 在 [GitHub](https://github.com/langfuse/langfuse) 上点击星星关注我们,即可获得更新。
+- 🤷♂️ 保持联系:在社交媒体上关注我们,了解最新动态。
+ - [推特](https://x.com/langfuse)
+ - [纪和声](https://discord.langfuse.com/)
+ - [LinkedIn](https://www.linkedin.com/company/langfuse)
+- 🖼️ [接收朗富斯贴纸](https://langfuse.com/stickers)
diff --git a/pages/docs/_meta.tsx b/pages/docs/_meta.tsx
index 8d5eafee0..2e54909cd 100644
--- a/pages/docs/_meta.tsx
+++ b/pages/docs/_meta.tsx
@@ -7,7 +7,6 @@ export default {
},
index: "Overview",
demo: "Interactive Demo",
- deployment: "Self-host",
"-- Tracing": {
type: "separator",
title: "Tracing",
diff --git a/pages/docs/data-security-privacy.mdx b/pages/docs/data-security-privacy.mdx
index 3796af725..f9c38a3cd 100644
--- a/pages/docs/data-security-privacy.mdx
+++ b/pages/docs/data-security-privacy.mdx
@@ -47,7 +47,7 @@ With Langfuse Cloud, we handle:
**Self-hosted Instances**
-- For installation and configuration, see: [Self-hosting guide](/docs/deployment/self-host)
+- For installation and configuration, see: [Self-hosting guide](/self-hosting)
- For architecture/component diagram, see: [CONTRIBUTING.md](https://github.com/langfuse/langfuse/blob/main/CONTRIBUTING.md)
- For basic telemetry, see: [README.md](https://github.com/langfuse/langfuse/blob/main/README.md#telemetry)
- For security inquiries, please contact us at security@langfuse.com
@@ -61,12 +61,12 @@ With Langfuse Cloud, we handle:
## Compliance Measures
-| Framework | Status (Langfuse Cloud) |
-| ------------- | --------------------------------------------------------------------------------------------------------------------------- |
-| GDPR | Compliant. DPA available upon request on Pro and Team plan. |
-| SOC 2 Type II | Certified. Report available upon request on Team plan. |
-| ISO 27001 | Certified. Certificate available upon request on Team plan. |
-| HIPAA | Not compliant. However, compliance can be attained by [self-hosting](/docs/deployment/self-host) on own infrastructure/VPC. |
+| Framework | Status (Langfuse Cloud) |
+| ------------- | -------------------------------------------------------------------------------------------------------------- |
+| GDPR | Compliant. DPA available upon request on Pro and Team plan. |
+| SOC 2 Type II | Certified. Report available upon request on Team plan. |
+| ISO 27001 | Certified. Certificate available upon request on Team plan. |
+| HIPAA | Not compliant. However, compliance can be attained by [self-hosting](/self-hosting) on own infrastructure/VPC. |
For specific compliance requirements or questions, please contact us at compliance@langfuse.com
diff --git a/pages/docs/datasets/prompt-experiments.mdx b/pages/docs/datasets/prompt-experiments.mdx
index 05d125bd2..60a6d125c 100644
--- a/pages/docs/datasets/prompt-experiments.mdx
+++ b/pages/docs/datasets/prompt-experiments.mdx
@@ -10,7 +10,7 @@ description: Run prompt experiments on datasets with LLM-as-a-Judge evaluations.
hobby: "public-beta",
pro: "public-beta",
team: "public-beta",
- selfHosted: "not-available",
+ selfHosted: "ee",
}}
/>
diff --git a/pages/docs/deployment/_meta.tsx b/pages/docs/deployment/_meta.tsx
deleted file mode 100644
index b059adb3a..000000000
--- a/pages/docs/deployment/_meta.tsx
+++ /dev/null
@@ -1,6 +0,0 @@
-export default {
- "feature-overview": "Deployment & Features",
- local: "Local (docker compose)",
- "self-host": "Self-host (docker)",
- v3: "v3 (Preview)",
-};
diff --git a/pages/docs/deployment/feature-overview.mdx b/pages/docs/deployment/feature-overview.mdx
deleted file mode 100644
index ec622ccb3..000000000
--- a/pages/docs/deployment/feature-overview.mdx
+++ /dev/null
@@ -1,33 +0,0 @@
-# Langfuse Feature Overview
-
-## Deployment Options
-
-You can use use Langfuse through our [Managed Cloud Offering](https://cloud.langfuse.com) or you can [self-host Langfuse](/docs/deployment/self-host). In either case, there are paid tiers that will give you access to different features and services.
-
-## Feature Availability
-
-The core Langfuse software is [open source and MIT-licensed](/docs/open-source). There are certain commercially licensed peripheral features.
-
-| Feature | Cloud - Free | Cloud - Pro | Cloud - Team | Self-Hosted - FOSS | Self-Hosted - Enterprise |
-| -------------------------------------------------------------- | ------------ | ----------- | ------------ | ------------------ | ------------------------ |
-| [Tracing & UI](/docs/tracing) | ✅ | ✅ | ✅ | ✅ | ✅ |
-| [Integrations and SDKs](/docs/tracing) | ✅ | ✅ | ✅ | ✅ | ✅ |
-| [Prompt Management](/docs/prompts) | ✅ | ✅ | ✅ | ✅ | ✅ |
-| [Analytics / Dashboards](/docs/analytics/overview) | ✅ | ✅ | ✅ | ✅ | ✅ |
-| [Datasets](/docs/datasets/overview) | ✅ | ✅ | ✅ | ✅ | ✅ |
-| [Scores](/docs/scores/overview) | ✅ | ✅ | ✅ | ✅ | ✅ |
-| [API access](/docs/query-traces) | ✅ | ✅ | ✅ | ✅ | ✅ |
-| [Automated Evaluations](/docs/scores/model-based-evals) | ✅ | ✅ | ✅ | 🛑 | 🚧 (soon) |
-| [Prompt Playground](/docs/playground) | ✅ | ✅ | ✅ | 🛑 | ✅ |
-| [Annotation Queues](/docs/scores/annotation#annotation-queues) | ✅ | ✅ | ✅ | 🛑 | ✅ |
-| [Data Processing Agreement (DPA)](/security) | 🛑 | ✅ | ✅ | 🛑 | ✅ |
-| [SOC2 & ISO27001 Reports](/security) | 🛑 | 🛑 | ✅ | 🛑 | ✅ |
-| SSO Enforcement | 🛑 | 🛑 | ✅ | 🛑 | ✅ |
-| [Project-level RBAC roles](/docs/rbac) | 🛑 | 🛑 | ✅ | 🛑 | ✅ |
-| [UI Customization](/docs/deployment/self-host#ee) | 🛑 | 🛑 | 🛑 | 🛑 | ✅ |
-
-## Get in touch
-
-You can upgrade to Langfuse Pro from within your Langfuse Cloud Settings.
-
-You can learn more about enterprise licenses [here](/enterprise). Please reach out to sales@langfuse.com to discuss an enterprise license (self-hosted or cloud) for your team. Enterprise licenses start at $500/month.
diff --git a/pages/docs/deployment/v3/_meta.tsx b/pages/docs/deployment/v3/_meta.tsx
deleted file mode 100644
index 6d2e18cdc..000000000
--- a/pages/docs/deployment/v3/_meta.tsx
+++ /dev/null
@@ -1,7 +0,0 @@
-export default {
- overview: "Overview",
- "migrate-v2-to-v3": "Migrate from v2 to v3",
- troubleshooting: "Troubleshooting",
- guides: "Guides",
- components: "Components",
-};
diff --git a/pages/docs/deployment/v3/components/_meta.tsx b/pages/docs/deployment/v3/components/_meta.tsx
deleted file mode 100644
index 460e28586..000000000
--- a/pages/docs/deployment/v3/components/_meta.tsx
+++ /dev/null
@@ -1,5 +0,0 @@
-export default {
- "clickhouse": "Clickhouse",
- "redis": "Redis / Valkey",
- "blobstorage": "S3 / Blob Storage",
-};
diff --git a/pages/docs/deployment/v3/guides/_meta.tsx b/pages/docs/deployment/v3/guides/_meta.tsx
deleted file mode 100644
index 1cd340766..000000000
--- a/pages/docs/deployment/v3/guides/_meta.tsx
+++ /dev/null
@@ -1,4 +0,0 @@
-export default {
- "docker-compose": "Docker Compose",
- "kubernetes-helm": "Kubernetes (Helm)",
-};
diff --git a/pages/docs/deployment/v3/guides/docker-compose.mdx b/pages/docs/deployment/v3/guides/docker-compose.mdx
deleted file mode 100644
index adddff710..000000000
--- a/pages/docs/deployment/v3/guides/docker-compose.mdx
+++ /dev/null
@@ -1,103 +0,0 @@
----
-description: Step-by-step guide to run Langfuse on a cloud provider using docker compose.
----
-
-# Self-hosted deployment - Docker Compose
-
-This guide will walk you through the steps to deploy Langfuse on a cloud provider using Docker Compose.
-You will need access to a cloud provider like AWS, GCP, or Azure to deploy the application with permissions to deploy a virtual machine.
-While the Docker Compose setup can be highly effective for development environments, we recommend to **not use it in production**.
-There is no high-availability, no automatic restarts, no scaling, and no backup.
-
-## Start a new instance
-
-Enter your cloud provider interface and navigate to the VM instance section.
-This is EC2 on AWS, Compute Engine on GCP, and Virtual Machines on Azure.
-Create a new instance.
-We recommend that you use at least 4 cores and 16 GiB of memory, e.g. a t3.xlarge on AWS.
-Assign a public IP address in case you want to send traces from external sources.
-As observability data tends to be large in volume, choose a sufficient amount of storage, e.g. 100GiB.
-
-The rest of this guide will assume that you have an Ubuntu OS running on your VM and are connected via SSH.
-
-## Install Docker and Docker Compose
-
-Install docker (see [official guide](https://docs.docker.com/engine/install/ubuntu/) as well).
-
-Setup Docker's apt repository:
-```bash
-# Add Docker's official GPG key:
-sudo apt-get update
-sudo apt-get install ca-certificates curl
-sudo install -m 0755 -d /etc/apt/keyrings
-sudo curl -fsSL https://download.docker.com/linux/ubuntu/gpg -o /etc/apt/keyrings/docker.asc
-sudo chmod a+r /etc/apt/keyrings/docker.asc
-
-# Add the repository to Apt sources:
-echo \
- "deb [arch=$(dpkg --print-architecture) signed-by=/etc/apt/keyrings/docker.asc] https://download.docker.com/linux/ubuntu \
- $(. /etc/os-release && echo "$VERSION_CODENAME") stable" | \
- sudo tee /etc/apt/sources.list.d/docker.list > /dev/null
-sudo apt-get update
-```
-
-Install Docker packages:
-```bash
-sudo apt-get install docker-ce docker-ce-cli containerd.io docker-buildx-plugin docker-compose-plugin
-```
-
-Verify installlation:
-```bash
-sudo docker run hello-world
-```
-
-## Clone Langfuse Repository
-
-Get a copy of the latest Langfuse repository:
-```bash
-git clone https://github.com/langfuse/langfuse.git
-
-cd langfuse
-```
-
-## Start the application
-
-For local experimentation, the pre-configured variables in the docker-compose file are usually sufficient.
-If you send _any_ kind of sensitive data to the application or intend to keep it up for longer, we recommend that
-you modify the docker-compose file and overwrite the following environment variables:
-
-- **SALT**: A random string used to hash passwords. It should be at least 32 characters long.
-- **ENCRYPTION_KEY**: Generate this via `openssl rand -base64 32`.
-- **NEXTAUTH_SECRET**: A random string used to sign JWT tokens.
-- **NEXTAUTH_URL**: The URL where the application is hosted. Used for redirects after signup.
-
-In addition, you should change any database or storage credential.
-
-Run the docker compose file for Langfuse v3:
-```bash
-docker compose up
-```
-
-Watch the containers being started and the logs flowing in.
-After about 2-3 minutes, the langfuse-web-1 container should log "Ready".
-At this point you can proceed to the next step.
-
-## Smoke test UI
-
-You should be able to load the Langfuse UI, by opening `http://:3000/` in your browser.
-Go ahead and register, create a new organization, project, and explore Langfuse.
-
-## Shutdown
-
-You can stop the containers by hitting `Ctrl+C` in the terminal.
-If you started docker-compose in the background (`-d` flag), you can stop all instance using:
-```bash
-docker compose down
-```
-Adding the `-v` flag will also remove the volumes.
-
-Ensure to stop the VM instance in your cloud provider interface to avoid unnecessary costs.
-
-## How to Upgrade
-
-To upgrade Langfuse, you can stop all instances and run `docker compose up --pull always`.
diff --git a/pages/docs/deployment/v3/overview.mdx b/pages/docs/deployment/v3/overview.mdx
deleted file mode 100644
index 5f423413f..000000000
--- a/pages/docs/deployment/v3/overview.mdx
+++ /dev/null
@@ -1,445 +0,0 @@
----
-description: Step-by-step guide to run Langfuse on your local machine using docker compose.
----
-
-import { Callout } from "nextra/components";
-
-# Self-hosted deployment - v3 Preview - Open Source LLM Observability
-
-
- This guide covers a developer preview which is **not suitable for production use**.
- v3 is under active development and we plan to ship a production-ready version by the end of November 2024.
- We share this information to gather feedback from our awesome developer community.
-
-For a production-ready setup, follow the [self-hosting guide](/docs/deployment/self-host)
-or consider using [Langfuse Cloud](https://cloud.langfuse.com) maintained by the Langfuse team.
-
-If you are on a v2 setup and want to migrate to the v3 developer preview (not recommended in production environments),
-take a look at our [migration guide](/docs/deployment/v3/migrate-v2-to-v3).
-
-If you face any questions or issues, please reach out to us on [Discord](/discord), contact the maintainers at
-support@langfuse.com,
-or join the [GitHub Discussion](https://github.com/orgs/langfuse/discussions/1902).
-
-
-
-## Components
-
-Langfuse consists of multiple storage components and two Docker containers:
-
-- **Langfuse Web**: The main web application serving the Langfuse UI and APIs.
-- **Langfuse Worker**: A worker that asynchronously processes events.
-- **Postgres**: The main database for transactional workloads.
-- **Redis**: A fast in-memory data structure store. Used for queue and cache operations.
-- **S3/Blob Store**: Object storage to persist all incoming events, multi-modal inputs, and large exports.
-- **Clickhouse**: High-performance OLAP database which stores traces, observations, and scores.
-
-See the chart below for an overview of the components and their interactions:
-
-```mermaid
-flowchart TB
- subgraph clients["Clients"]
- Browser["Browser"]
- JS["JavaScript SDK"]
- Python["Python SDK"]
- end
-
- subgraph storage["Storage"]
- DB[Postgres Database]
- Redis[Redis Cache/Queue]
- Clickhouse[Clickhouse Database]
- S3[S3/Blob Store]
- end
-
- subgraph app["Langfuse Containers"]
- subgraph web["Langfuse Web"]
- TRPC["TRPC API"]
- REST["Public API"]
- Frontend["React Frontend"]
- Backend["Backend"]
- end
-
- subgraph worker["Langfuse Worker"]
- QueueProcessor["Queue Processor"]
- end
- end
-
- Browser --> Frontend
- Frontend --> TRPC
- JS --> REST
- Python --> REST
-
- TRPC --> Backend
- REST --> Backend
-
- Backend --> S3
- Backend --> DB
- Backend --> Redis
- Backend --> Clickhouse
-
- Redis --> QueueProcessor
- QueueProcessor --> Clickhouse
- QueueProcessor --> DB
- QueueProcessor --> S3
-```
-
-### Postgres Database
-
-Langfuse requires a persistent Postgres database to store its state.
-You can use a managed service on AWS, Azure, or GCP, or host it yourself.
-At least version 12 is required.
-
-### Redis
-
-Langfuse uses Redis for caching and queue operations.
-You can use a managed service on AWS, Azure, or GCP, or host it yourself.
-At least version 7 is required and the instance must have `maxmemory-policy=noeviction` configured.
-You may use Valkey instead of Redis, but there is no active support from the Langfuse team as of now.
-See [Redis](/docs/deployment/v3/components/redis) for more details on how to connect Redis to Langfuse.
-
-### S3/Blob Store
-
-Langfuse requires an S3-compatible blob store to persist all incoming events, multi-modal inputs, and large exports.
-You can use a managed service on AWS, or GCP, or host it yourself using MinIO.
-Langfuse also has experimental support for Azure Blob Storage.
-See [S3/Blob Store](/docs/deployment/v3/components/blobstorage) for more details on how to connect a blob store to Langfuse
-and more details on Azure Blob Storage.
-
-### Clickhouse
-
-Langfuse uses Clickhouse as an OLAP database to store traces, observations, and scores.
-You can use the managed service by Clickhouse Cloud, or host it yourself.
-See [ClickHouse](/docs/deployment/v3/components/clickhouse) for more details on how to connect ClickHouse to Langfuse.
-
-## Deploying Langfuse
-
-Deploy the application container to your infrastructure.
-You can use managed services like AWS ECS, Azure Container Instances, or GCP Cloud Run, or host it yourself.
-
-During the container startup, all database migrations will be applied automatically.
-This can be optionally disabled via environment variables.
-
-
- We recommend that you test v3 using the latest release candidate. You can find
- the newest tag in our [GitHub
- Releases](https://github.com/langfuse/langfuse/releases?q=v3.0.0-rc&expanded=true).
-
-
-### Run Langfuse Web
-
-```bash
-docker run --name langfuse-web \
- -e DATABASE_URL=postgresql://hello \
- -e NEXTAUTH_URL=http://localhost:3000 \
- -e NEXTAUTH_SECRET=mysecret \
- -e SALT=mysalt \
- -e ENCRYPTION_KEY=0000000000000000000000000000000000000000000000000000000000000000 \ # generate via: openssl rand -hex 32
- -e CLICKHOUSE_URL=http://clickhouse:8123 \
- -e CLICKHOUSE_USER=clickhouse \
- -e CLICKHOUSE_PASSWORD=clickhouse \
- -e CLICKHOUSE_MIGRATION_URL=clickhouse://clickhouse:9000 \
- -e REDIS_HOST=localhost \
- -e REDIS_PORT=6379 \
- -e REDIS_AUTH=redis \
- -e LANGFUSE_S3_EVENT_UPLOAD_BUCKET=my-bucket \
- -e LANGFUSE_S3_EVENT_UPLOAD_REGION=us-east-1 \
- -e LANGFUSE_S3_EVENT_UPLOAD_ACCESS_KEY_ID=AKIAIOSFODNN7EXAMPLE \
- -e LANGFUSE_S3_EVENT_UPLOAD_SECRET_ACCESS_KEY=bPxRfiCYEXAMPLEKEY \
- -p 3000:3000 \
- -a STDOUT \
-langfuse/langfuse:3.0.0-rc.3
-```
-
-### Run Langfuse Worker
-
-```bash
-docker run --name langfuse-worker \
- -e DATABASE_URL=postgresql://hello \
- -e SALT=mysalt \
- -e ENCRYPTION_KEY=0000000000000000000000000000000000000000000000000000000000000000 \ # generate via: openssl rand -hex 32
- -e CLICKHOUSE_URL=http://clickhouse:8123 \
- -e CLICKHOUSE_USER=clickhouse \
- -e CLICKHOUSE_PASSWORD=clickhouse \
- -e REDIS_HOST=localhost \
- -e REDIS_PORT=6379 \
- -e REDIS_AUTH=redis \
- -e LANGFUSE_S3_EVENT_UPLOAD_BUCKET=my-bucket \
- -e LANGFUSE_S3_EVENT_UPLOAD_REGION=us-east-1 \
- -e LANGFUSE_S3_EVENT_UPLOAD_ACCESS_KEY_ID=AKIAIOSFODNN7EXAMPLE \
- -e LANGFUSE_S3_EVENT_UPLOAD_SECRET_ACCESS_KEY=bPxRfiCYEXAMPLEKEY \
- -p 3030:3030 \
- -a STDOUT \
-langfuse/langfuse-worker:3.0.0-rc.3
-```
-
-### Recommended sizing
-
-For production environments, we recommend to use at least 2 CPUs and 4 GB of RAM for all containers.
-You should have at least two instances of the Langfuse Web container for high availability.
-For auto-scaling, we recommend to add instances once the CPU utilization exceeds 50% on either container.
-
-### Environment Variables
-
-Langfuse accepts additional environment variables to fine-tune your deployment.
-You can use the same environment variables for the Langfuse Web and Langfuse Worker containers.
-
-| Variable | Required / Default | Description |
-| ----------------------------------------------- | ------------------ | ----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- |
-| `DATABASE_URL` | Required | Connection string of your Postgres database. Instead of `DATABASE_URL`, you can also use `DATABASE_HOST`, `DATABASE_USERNAME`, `DATABASE_PASSWORD` and `DATABASE_NAME`. |
-| `DIRECT_URL` | `DATABASE_URL` | Connection string of your Postgres database used for database migrations. Use this if you want to use a different user for migrations or use connection pooling on `DATABASE_URL`. **For large deployments**, configure the database user with long timeouts as migrations might need a while to complete. |
-| `SHADOW_DATABASE_URL` | | If your database user lacks the `CREATE DATABASE` permission, you must create a shadow database and configure the "SHADOW_DATABASE_URL". This is often the case if you use a Cloud database. Refer to the [Prisma docs](https://www.prisma.io/docs/orm/prisma-migrate/understanding-prisma-migrate/shadow-database#cloud-hosted-shadow-databases-must-be-created-manually) for detailed instructions. |
-| `CLICKHOUSE_MIGRATION_URL` | Required | Migration URL (TCP protocol) for the clickhouse instance. Pattern: `clickhouse://:(9000/9440)` |
-| `CLICKHOUSE_MIGRATION_SSL` | `false` | Set to true to establish an SSL connection to Clickhouse for the database migration. |
-| `CLICKHOUSE_URL` | Required | Hostname of the clickhouse instance. Pattern: `http(s)://:(8123/8443)` |
-| `CLICKHOUSE_USER` | Required | Username of the clickhouse database. Needs SELECT, ALTER, INSERT, CREATE, DELETE grants. |
-| `CLICKHOUSE_PASSWORD` | Required | Password of the clickhouse user. |
-| `CLICKHOUSE_CLUSTER_ENABLED` | `true` | Whether to run ClickHouse commands `ON CLUSTER`. Set to `false` for single-container setups. |
-| `LANGFUSE_AUTO_CLICKHOUSE_MIGRATION_DISABLED` | `false` | Whether to disable automatic ClickHouse migrations on startup. |
-| `REDIS_CONNECTION_STRING` | Required | Connection string of your redis instance. Instead of `REDIS_CONNECTION_STRING`, you can also use `REDIS_HOST`, `REDIS_PORT`, and `REDIS_AUTH`. |
-| `NEXTAUTH_URL` | Required | URL of your Langfuse web deployment, e.g. `https://yourdomain.com` or `http://localhost:3000`. Required for successful authentication via OAUTH. |
-| `NEXTAUTH_SECRET` | Required | Used to validate login session cookies, generate secret with at least 256 entropy using `openssl rand -base64 32`. |
-| `SALT` | Required | Used to salt hashed API keys, generate secret with at least 256 entropy using `openssl rand -base64 32`. |
-| `ENCRYPTION_KEY` | Required | Used to encrypt sensitive data. Must be 256 bits, 64 string characters in hex format, generate via: `openssl rand -hex 32`. |
-| `LANGFUSE_CSP_ENFORCE_HTTPS` | `false` | Set to `true` to set CSP headers to only allow HTTPS connections. |
-| `PORT` | `3000` / `3030` | Port the server listens on. 3000 for web, 3030 for worker. |
-| `HOSTNAME` | `localhost` | In some environments it needs to be set to `0.0.0.0` to be accessible from outside the container (e.g. Google Cloud Run). |
-| `LANGFUSE_DEFAULT_ORG_ID` | | Configure optional default organization for new users. When users create an account they will be automatically added to this organization. |
-| `LANGFUSE_DEFAULT_ORG_ROLE` | `VIEWER` | Role of the user in the default organization (if set). Possible values are `OWNER`, `ADMIN`, `MEMBER`, `VIEWER`. See [roles](/docs/rbac) for details. |
-| `LANGFUSE_DEFAULT_PROJECT_ID` | | Configure optional default project for new users. When users create an account they will be automatically added to this project. |
-| `LANGFUSE_DEFAULT_PROJECT_ROLE` | `VIEWER` | Role of the user in the default project (if set). Possible values are `OWNER`, `ADMIN`, `MEMBER`, `VIEWER`. See [roles](/docs/rbac) for details. |
-| `SMTP_CONNECTION_URL` | | Configure optional SMTP server connection for transactional email. Connection URL is passed to Nodemailer ([docs](https://nodemailer.com/smtp)). |
-| `EMAIL_FROM_ADDRESS` | | Configure from address for transactional email. Required if `SMTP_CONNECTION_URL` is set. |
-| `LANGFUSE_S3_EVENT_UPLOAD_BUCKET` | Required | Name of the bucket in which event information should be uploaded. |
-| `LANGFUSE_S3_EVENT_UPLOAD_PREFIX` | `""` | Prefix to store events within a subpath of the bucket. Defaults to the bucket root. If provided, must end with a `/`. |
-| `LANGFUSE_S3_EVENT_UPLOAD_REGION` | | Region in which the bucket resides. |
-| `LANGFUSE_S3_EVENT_UPLOAD_ENDPOINT` | | Endpoint to use to upload events. |
-| `LANGFUSE_S3_EVENT_UPLOAD_ACCESS_KEY_ID` | | Access key for the bucket. Must have List, Get, and Put permissions. |
-| `LANGFUSE_S3_EVENT_UPLOAD_SECRET_ACCESS_KEY` | | Secret access key for the bucket. |
-| `LANGFUSE_S3_EVENT_UPLOAD_FORCE_PATH_STYLE` | | Whether to force path style on requests. Required for MinIO. |
-| `LANGFUSE_S3_BATCH_EXPORT_ENABLED` | `false` | Whether to enable Langfuse S3 batch exports. This must be set to `true` to enable batch exports. |
-| `LANGFUSE_S3_BATCH_EXPORT_BUCKET` | Required | Name of the bucket in which batch exports should be uploaded. |
-| `LANGFUSE_S3_BATCH_EXPORT_PREFIX` | `""` | Prefix to store batch exports within a subpath of the bucket. Defaults to the bucket root. If provided, must end with a `/`. |
-| `LANGFUSE_S3_BATCH_EXPORT_REGION` | | Region in which the bucket resides. |
-| `LANGFUSE_S3_BATCH_EXPORT_ENDPOINT` | | Endpoint to use to upload batch exports. |
-| `LANGFUSE_S3_BATCH_EXPORT_ACCESS_KEY_ID` | | Access key for the bucket. Must have List, Get, and Put permissions. |
-| `LANGFUSE_S3_BATCH_EXPORT_SECRET_ACCESS_KEY` | | Secret access key for the bucket. |
-| `LANGFUSE_S3_BATCH_EXPORT_FORCE_PATH_STYLE` | | Whether to force path style on requests. Required for MinIO. |
-| `DB_EXPORT_PAGE_SIZE` | `1000` | Optional page size for streaming exports to S3 to avoid memory issues. The page size can be adjusted if needed to optimize performance. |
-| `LANGFUSE_S3_MEDIA_UPLOAD_BUCKET` | Required | Name of the bucket in which media files should be uploaded. |
-| `LANGFUSE_S3_MEDIA_UPLOAD_PREFIX` | `""` | Prefix to store media within a subpath of the bucket. Defaults to the bucket root. If provided, must end with a `/`. |
-| `LANGFUSE_S3_MEDIA_UPLOAD_REGION` | | Region in which the bucket resides. |
-| `LANGFUSE_S3_MEDIA_UPLOAD_ENDPOINT` | | Endpoint to use to upload media files. |
-| `LANGFUSE_S3_MEDIA_UPLOAD_ACCESS_KEY_ID` | | Access key for the bucket. Must have List, Get, and Put permissions. |
-| `LANGFUSE_S3_MEDIA_UPLOAD_SECRET_ACCESS_KEY` | | Secret access key for the bucket. |
-| `LANGFUSE_S3_MEDIA_UPLOAD_FORCE_PATH_STYLE` | | Whether to force path style on requests. Required for MinIO. |
-| `LANGFUSE_S3_MEDIA_MAX_CONTENT_LENGTH` | `1_000_000_000` | Maximum file size in bytes that is allowed for upload. Default is 1GB. |
-| `LANGFUSE_S3_MEDIA_DOWNLOAD_URL_EXPIRY_SECONDS` | `3600` | Presigned download URL expiry in seconds. Defaults to 1h. |
-| `LANGFUSE_AUTO_POSTGRES_MIGRATION_DISABLED` | `false` | Set to `true` to disable automatic database migrations on docker startup. |
-| `LANGFUSE_LOG_LEVEL` | `info` | Set the log level for the application. Possible values are `trace`, `debug`, `info`, `warn`, `error`, `fatal`. |
-| `LANGFUSE_LOG_FORMAT` | `text` | Set the log format for the application. Possible values are `text`, `json`. |
-| `NEXT_PUBLIC_BASE_PATH` | | Set the base path for the application. This is useful if you want to deploy Langfuse on a subpath, especially when integrating Langfuse into existing infrastructure. Refer to the [section](#custom-base-path) below for details. |
-
-### Authentication
-
-#### Email/Password [#auth-email-password]
-
-Email/password authentication is enabled by default. Users can sign up and log in using their email and password.
-
-To disable email/password authentication, set `AUTH_DISABLE_USERNAME_PASSWORD=true`. In this case, you need to set up [SSO](#sso) instead.
-
-If you want to provision a default user for your Langfuse instance, you can use the [`LANGFUSE_INIT_*`](#initialization) environment variables.
-
-**Password Reset**
-
-- **If transactional emails are configured** on your instance via the `SMTP_CONNECTION_URL` and `EMAIL_FROM_ADDRESS` environments, users can reset their password by using the "Forgot password" link on the login page.
-
-- **If transactional emails are not set up**, passwords can be reset by following these steps:
-
-1. Update the email associated with your user account in database, such as by adding a prefix.
-2. You can then sign up again with a new password.
-3. Reassign any organizations you were associated with via the `organization_memberships` table in database.
-4. Finally, remove the old user account from the `users` table in database.
-
-#### SSO
-
-To enable OAuth/SSO provider sign-in for Langfuse, add the following environment variables:
-
-| Provider | Variables | OAuth Redirect URL |
-| -------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | ---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | -------------------------------------- |
-| [Google](https://next-auth.js.org/providers/google) | `AUTH_GOOGLE_CLIENT_ID` `AUTH_GOOGLE_CLIENT_SECRET`
`AUTH_GOOGLE_ALLOW_ACCOUNT_LINKING=true` (optional) `AUTH_GOOGLE_ALLOWED_DOMAINS=langfuse.com,google.com`(optional, list of allowed domains based on [`hd` OAuth claim](https://developers.google.com/identity/openid-connect/openid-connect#an-id-tokens-payload)) | `/api/auth/callback/google` |
-| [GitHub](https://next-auth.js.org/providers/github) | `AUTH_GITHUB_CLIENT_ID` `AUTH_GITHUB_CLIENT_SECRET`
`AUTH_CUSTOM_SCOPE` (optional, defaults to `"openid email profile"`) | `/api/auth/callback/custom` |
-
-Use `*_ALLOW_ACCOUNT_LINKING` to allow merging accounts with the same email address. This is useful when users sign in with different providers or email/password but have the same email address. You need to be careful with this setting as it can lead to security issues if the emails are not verified.
-
-Need another provider? Langfuse uses Auth.js, which integrates with [many providers](https://next-auth.js.org/providers/). Add a [feature request on GitHub](/ideas) if you want us to add support for a specific provider.
-
-#### Additional configuration
-
-| Variable | Description |
-| ----------------------------------- | -------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- |
-| `AUTH_DOMAINS_WITH_SSO_ENFORCEMENT` | Comma-separated list of domains that are only allowed to sign in using SSO. Email/password sign in is disabled for these domains. E.g. `domain1.com,domain2.com` |
-| `AUTH_DISABLE_SIGNUP` | Set to `true` to disable sign up for new users. Only existing users can sign in. This affects all new users that try to sign up, also those who received an invite to a project and have no account yet. |
-| `AUTH_SESSION_MAX_AGE` | Set the maximum age of the session (JWT) in minutes. The default is 30 days (`43200`). The value must be greater than 5 minutes, as the front-end application refreshes its session every 5 minutes. |
-
-### Headless Initialization [#initialization]
-
-By default, you need to create a user account, organization and project via the Langfuse UI before being able to use the API. You can find the API keys in the project settings within the UI.
-
-If you want to automatically initialize these resources, you can optionally use the following `LANGFUSE_INIT_*` environment variables. When these variables are set, Langfuse will automatically create the specified resources on startup if they don't already exist. This allows for easy integration with infrastructure-as-code and automated deployment pipelines.
-
-| Environment Variable | Description | Required to Create Resource | Example |
-| ---------------------------------- | -------------------------------------- | --------------------------- | ------------------ |
-| `LANGFUSE_INIT_ORG_ID` | Unique identifier for the organization | Yes | `my-org` |
-| `LANGFUSE_INIT_ORG_NAME` | Name of the organization | No | `My Org` |
-| `LANGFUSE_INIT_PROJECT_ID` | Unique identifier for the project | Yes | `my-project` |
-| `LANGFUSE_INIT_PROJECT_NAME` | Name of the project | No | `My Project` |
-| `LANGFUSE_INIT_PROJECT_PUBLIC_KEY` | Public API key for the project | Yes | `lf_pk_1234567890` |
-| `LANGFUSE_INIT_PROJECT_SECRET_KEY` | Secret API key for the project | Yes | `lf_sk_1234567890` |
-| `LANGFUSE_INIT_USER_EMAIL` | Email address of the initial user | Yes | `user@example.com` |
-| `LANGFUSE_INIT_USER_NAME` | Name of the initial user | No | `John Doe` |
-| `LANGFUSE_INIT_USER_PASSWORD` | Password for the initial user | Yes | `password123` |
-
-The different resources depend on each other in the following way. You can e.g. intialize an organization and a user without having to also initialize a project and API keys, but you cannot initialize a project without also initializing an organization.
-
-```
-Organization
-├── Project (part of organization)
-│ └── API Keys (set for project)
-└── User (owner of organization)
-```
-
-Troubleshooting:
-
-- If you use `LANGFUSE_INIT_*` in Docker Compose, do not double-quote the values ([GitHub issue](https://github.com/langfuse/langfuse/issues/3398)).
-- The resources depend on one another (see note above). For example, you must create an organization to initialize a project.
-
-### Configuring the Enterprise Edition [#ee]
-
-The Enterprise Edition ([compare versions](/docs/deployment/feature-overview)) of Langfuse includes additional optional configuration options that can be set via environment variables.
-
-| Variable | Description |
-| ------------------------------------------------------------------------ | ------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------ |
-| `LANGFUSE_ALLOWED_ORGANIZATION_CREATORS` | Comma-separated list of allowlisted users that can create new organizations. By default, all users can create organizations. E.g. `user1@langfuse.com,user2@langfuse.com`. |
-| `LANGFUSE_UI_API_HOST` | Customize the hostname that is referenced in the settings. Defaults to `window.origin`. |
-| `LANGFUSE_UI_DOCUMENTATION_HREF` | Customize the documentation link reference in the menu and settings. |
-| `LANGFUSE_UI_SUPPORT_HREF` | Customize the support link reference in the menu and settings. |
-| `LANGFUSE_UI_FEEDBACK_HREF` | Replace the default feedback widget with your own feedback link. |
-| `LANGFUSE_UI_LOGO_DARK_MODE_HREF` `LANGFUSE_UI_LOGO_LIGHT_MODE_HREF` | Co-brand the Langfuse interface with your own logo. Langfuse adapts to the logo width, with a maximum aspect ratio of 1:3. Narrower ratios (e.g., 2:3, 1:1) also work. The logo is fitted into a bounding box, so there are no specific pixel constraints. For reference, the example logo is 160px x 400px. |
-| `LANGFUSE_UI_DEFAULT_MODEL_ADAPTER` | Set the default model adapter for the LLM playground and evals. Options: `OpenAI`, `Anthropic`, `Azure`. Example: `Anthropic` |
-| `LANGFUSE_UI_DEFAULT_BASE_URL_OPENAI` | Set the default base URL for OpenAI API in the LLM playground and evals. Example: `https://api.openai.com/v1` |
-| `LANGFUSE_UI_DEFAULT_BASE_URL_ANTHROPIC` | Set the default base URL for Anthropic API in the LLM playground and evals. Example: `https://api.anthropic.com` |
-| `LANGFUSE_UI_DEFAULT_BASE_URL_AZURE_OPENAI` | Set the default base URL for Azure OpenAI API in the LLM playground and evals. Example: `https://{instanceName}.openai.azure.com/openai/deployments` |
-
-### Health and Readiness Check Endpoint
-
-Langfuse web includes a health check endpoint at `/api/public/health` and a readiness check endpoint at `/api/public/ready` and the
-Langfuse worker a health check endpoint at `/api/health`.
-The health check endpoint indicates if the application is alive and the readiness check endpoint indicates if the application is ready to serve traffic.
-
-Access the health and readiness check endpoints:
-
-```bash
-curl http://localhost:3000/api/public/health
-curl http://localhost:3000/api/public/ready
-curl http://localhost:3030/api/health
-```
-
-The potential responses from the health check endpoint are:
-
-- `200 OK`: Both the API is functioning normally and a successful connection to the database was made.
-- `503 Service Unavailable`: Either the API is not functioning or it couldn't establish a connection to the database.
-
-The potential responses from the readiness check endpoint are:
-
-- `200 OK`: The application is ready to serve traffic.
-- `500 Internal Server Error`: The application received a SIGTERM or SIGINT and should not receive traffic.
-
-Applications and monitoring services can call this endpoint periodically for health updates.
-
-Per default, the Langfuse web healthcheck endpoint does not validate if the database is reachable, as there are cases where the
-database is unavailable, but the application still serves traffic.
-If you want to run database healthchecks, you can add `?failIfDatabaseUnavailable=true` to the healthcheck endpoint.
-
-### Encryption
-
-#### Encryption in transit (HTTPS) [#https]
-
-For encryption in transit, HTTPS is strongly recommended.
-Langfuse itself does not handle HTTPS directly.
-Instead, HTTPS is typically managed at the infrastructure level.
-There are two main approaches to handle HTTPS for Langfuse:
-
-1. Load Balancer Termination:
- In this approach, HTTPS is terminated at the load balancer level.
- The load balancer handles the SSL/TLS certificates and encryption, then forwards the decrypted traffic to the Langfuse container over HTTP.
- This is a common and straightforward method, especially in cloud environments.
-
-- Pros: Simplifies certificate management as it is usually a fully managed service (e.g. AWS ALB), offloads encryption overhead from application servers.
-- Cons: Traffic between load balancer and Langfuse container is unencrypted (though typically within a secure network).
-
-2. Service Mesh Sidecar:
- This method involves using a service mesh like Istio or Linkerd.
- A sidecar proxy is deployed alongside each Langfuse container, handling all network traffic including HTTPS.
-
-- Pros: Provides end-to-end encryption (mutual TLS), offers advanced traffic management and observability.
-- Cons: Adds complexity to the deployment, requires understanding of service mesh concepts.
-
-Once HTTPS is enabled, you can configure add `LANGFUSE_CSP_ENFORCE_HTTPS=true` to ensure browser only allow HTTPS connections when using Langfuse.
-
-#### Encryption at rest (database) [#encryption-at-rest]
-
-All Langfuse data is stored in your Postgres database, Clickhouse, Redis, or S3/Blob Store.
-Database-level encryption is recommended for a secure production deployment and available across cloud providers.
-
-The Langfuse team has implemented this for Langfuse Cloud and it is fully ISO27001, SOC2 Type 2 and GDPR compliant ([security page](/docs/security)).
-
-#### Additional application-level encryption [#application-level-encryption]
-
-In addition to in-transit and at-rest encryption, sensitive data is also encrypted or hashed at the application level.
-
-| Data | Encryption |
-| ----------------------------------------- | ------------------------------------------------------------------------------------ |
-| API keys | Hashed using `SALT` |
-| Langfuse Console JWTs | Encrypted via `NEXTAUTH_SECRET` |
-| LLM API credentials stored in Langfuse | Encrypted using `ENCRYPTION_KEY` |
-| Integration credentials (e.g. PostHog) | Encrypted using `ENCRYPTION_KEY` |
-| Input/Outputs of LLM Calls, Traces, Spans | Work in progress, reach out to enterprise@langfuse.com if you are interested in this |
-
-## Deployment Guides
-
-The Langfuse team and our community maintain a collection of deployment guides to illustrate how you can run Langfuse in various environments.
-This section is work in progress and relies on community contributions.
-If you have successfully deployed Langfuse on a specific platform, consider contributing a guide either via a GitHub [PR/Issue](https://github.com/langfuse/langfuse-docs)
-or by [reaching out](#contact) to the maintainers.
-Please also let us know if one of these guides does not work anymore or if you have a better solution.
-
-- [Docker Compose](/docs/deployment/v3/guides/docker-compose)
-- [Kubernetes (Helm)](/docs/deployment/v3/guides/kubernetes-helm)
-
-## Support
-
-If you experience any issues, please join us on [Discord](/discord) or contact the maintainers at support@langfuse.com.
-
-For support with production deployments, the Langfuse team provides dedicated enterprise support. To learn more, reach out to enterprise@langfuse.com or [schedule a demo](/schedule-demo).
-
-Alternatively, you may consider using [Langfuse Cloud](/docs/deployment/cloud), which is a fully managed version of Langfuse. You can find information about its security and privacy [here](/docs/data-security-privacy).
-
-## FAQ
-
-import { FaqPreview } from "@/components/faq/FaqPreview";
-
-
-
-## GitHub Discussions
-
-import { GhDiscussionsPreview } from "@/components/gh-discussions/GhDiscussionsPreview";
-
-
diff --git a/pages/docs/get-started.mdx b/pages/docs/get-started.mdx
index 80291fe13..b296dd243 100644
--- a/pages/docs/get-started.mdx
+++ b/pages/docs/get-started.mdx
@@ -10,7 +10,7 @@ If you are looking for other features, see the [overview](/docs).
## Create new project in Langfuse
-1. [Create Langfuse account](https://cloud.langfuse.com/auth/sign-up) or [self-host](/docs/deployment/self-host)
+1. [Create Langfuse account](https://cloud.langfuse.com/auth/sign-up) or [self-host](/self-hosting)
2. Create a new project
3. Create new API credentials in the project settings
diff --git a/pages/docs/index.mdx b/pages/docs/index.mdx
index 7bedbdc83..c152cda66 100644
--- a/pages/docs/index.mdx
+++ b/pages/docs/index.mdx
@@ -176,10 +176,11 @@ The [LLM Playground](/docs/playground) is a tool for testing and iterating on yo
## Get started
-
+
+
## Updates
diff --git a/pages/docs/integrations/amazon-bedrock.md b/pages/docs/integrations/amazon-bedrock.md
index a139f5401..f345d1836 100644
--- a/pages/docs/integrations/amazon-bedrock.md
+++ b/pages/docs/integrations/amazon-bedrock.md
@@ -220,4 +220,4 @@ You can define custom price information via the Langfuse dashboard or UI ([see d
## Additional Resources
- Metadocs, [Monitoring your Langchain app's cost using Bedrock with Langfuse](https://www.metadocs.co/2024/07/03/monitor-your-langchain-app-cost-using-bedrock-with-langfuse/), featuring Langchain integration and custom model price definitions for Bedrock models.
-- [Self-hosting guide](https://langfuse.com/docs/deployment/self-host) to deploy Langfuse on AWS.
+- [Self-hosting guide](https://langfuse.com/self-hosting) to deploy Langfuse on AWS.
diff --git a/pages/docs/integrations/lobechat.mdx b/pages/docs/integrations/lobechat.mdx
index ef1681eda..4ef64c546 100644
--- a/pages/docs/integrations/lobechat.mdx
+++ b/pages/docs/integrations/lobechat.mdx
@@ -24,7 +24,7 @@ description: Enhance your LobeChat applications with open-source observability a
### Set up Langfuse
-Get your Langfuse API key by signing up for [Langfuse Cloud](https://cloud.langfuse.com) or [self-hosting](/docs/deployment/self-host) Langfuse.
+Get your Langfuse API key by signing up for [Langfuse Cloud](https://cloud.langfuse.com) or [self-hosting](/self-hosting) Langfuse.
### Set up LobeChat
@@ -41,6 +41,7 @@ LANGFUSE_SECRET_KEY = 'sk-lf...'
LANGFUSE_PUBLIC_KEY = 'pk-lf...'
LANGFUSE_HOST = 'https://cloud.langfuse.com'
```
+
@@ -59,21 +60,21 @@ Before running the Docker container, set the environment variables in the Docker
Once you have LobeChat running, navigate to the **About** tab in the **Settings** and activate analytics. This is necessary for traces to be sent to Langfuse.
-![LobeChat Settings](/images/docs/lobechat-settings.png)
+ ![LobeChat Settings](/images/docs/lobechat-settings.png)
### See your traces in Langfuse
-After setting your LLM model key, you can start interacting with your LobeChat application.
+After setting your LLM model key, you can start interacting with your LobeChat application.
-![LobeChat Conversation](/images/docs/lobechat-converstation.png)
+ ![LobeChat Conversation](/images/docs/lobechat-converstation.png)
All conversations in the chat are automatically traced and sent to Langfuse. You can view the traces in the [Traces section](/docs/tracing) of the Langfuse platform.
-![LobeChat Example Trace](/images/docs/lobechat-example-trace.png)
+ ![LobeChat Example Trace](/images/docs/lobechat-example-trace.png)
_[Example trace in the Langfuse UI](https://cloud.langfuse.com/project/cloramnkj0002jz088vzn1ja4/traces/63e9246d-3f22-4e45-936d-b0c4ccf55a1e?timestamp=2024-11-26T17%3A00%3A02.028Z&observation=7ea75a0c-d9d1-425c-9b88-27561c63b413)_
@@ -81,4 +82,4 @@ _[Example trace in the Langfuse UI](https://cloud.langfuse.com/project/cloramnkj
## Feedback
-If you have any feedback or requests, please create a GitHub [Issue](/issue) or share your work with the community on [Discord](https://discord.langfuse.com/).
\ No newline at end of file
+If you have any feedback or requests, please create a GitHub [Issue](/issue) or share your work with the community on [Discord](https://discord.langfuse.com/).
diff --git a/pages/docs/integrations/ollama.md b/pages/docs/integrations/ollama.md
index 2fbaf09e5..ab2e91450 100644
--- a/pages/docs/integrations/ollama.md
+++ b/pages/docs/integrations/ollama.md
@@ -20,7 +20,7 @@ Langfuse ([GitHub](https://github.com/langfuse/langfuse)) is an open-source LLM
### Local Deployment of Langfuse
-Of course, you can also locally deploy Langfuse to run models and trace LLM outputs only on your own device. [Here](https://langfuse.com/docs/deployment/local) is a guide on how to run Langfuse on your local machine using Docker Compose. This method is ideal for testing Langfuse and troubleshooting integration issues.
+Of course, you can also locally deploy Langfuse to run models and trace LLM outputs only on your own device. [Here](https://langfuse.com/self-hosting/local) is a guide on how to run Langfuse on your local machine using Docker Compose. This method is ideal for testing Langfuse and troubleshooting integration issues.
For this example, we will use the Langfuse cloud version.
diff --git a/pages/docs/model-usage-and-cost.mdx b/pages/docs/model-usage-and-cost.mdx
index 7d99d4a4c..5e802da11 100644
--- a/pages/docs/model-usage-and-cost.mdx
+++ b/pages/docs/model-usage-and-cost.mdx
@@ -263,7 +263,7 @@ For more details, see [the OpenAI guide](https://platform.openai.com/docs/guides
## Troubleshooting
-**Usage and cost are missing for historical generations**. Except for changes in prices, Langfuse does not retroactively infer usage and cost for existing generations when model definitions are changed. You can request a batch job (Langfuse Cloud) or run a [script](/docs/deployment/self-host#migrate-models) (self-hosting) to apply new model definitions to existing generations.
+**Usage and cost are missing for historical generations**. Except for changes in prices, Langfuse does not retroactively infer usage and cost for existing generations when model definitions are changed. You can request a batch job (Langfuse Cloud) or run a [script](/self-hosting/upgrade-guides/upgrade-v1-to-v2) (self-hosting) to apply new model definitions to existing generations.
## GitHub Discussions
diff --git a/pages/docs/open-source.mdx b/pages/docs/open-source.mdx
index af639ccc3..e41a279c8 100644
--- a/pages/docs/open-source.mdx
+++ b/pages/docs/open-source.mdx
@@ -7,7 +7,7 @@ description: Langfuse strives to be the most open and transparent LLM toolkit. L
Langfuse is open source for the following reasons:
- To establish complete transparency
-- To enable the community to [self-host](/docs/deployment/self-host) and [contribute](https://github.com/langfuse/langfuse/blob/main/CONTRIBUTING.md)
+- To enable the community to [self-host](/self-hosting) and [contribute](https://github.com/langfuse/langfuse/blob/main/CONTRIBUTING.md)
- To collaborate on integrations with various other open-source tools and frameworks
- To assure users that Langfuse is open and does not impose lock-in, and that production traces are user data that can be exported/fetched at any time
@@ -21,7 +21,7 @@ The Langfuse core product and all Langfuse-maintained integrations and SDKs are
## Enterprise Edition (EE) FAQ
-- What features are EE and what is MIT-licensed when self-hosting Langfuse? The core of Langfuse is MIT licensed and open-source (tracing, integrations, public API, prompt management). There are no restrictions on usage, modification or deployment of these features when self-hosting Langfuse. The EE features are commercially licensed add-on features that are not required for the core functionality of Langfuse. The documentation and codebase are clearly marked ("Where is this feature available?") to indicate which features are MIT-licensed and which parts are EE (commercially licensed). See a full comparison of features [here](/docs/deployment/feature-overview).
+- What features are EE and what is MIT-licensed when self-hosting Langfuse? The core of Langfuse is MIT licensed and open-source (tracing, integrations, public API, prompt management). There are no restrictions on usage, modification or deployment of these features when self-hosting Langfuse. The EE features are commercially licensed add-on features that are not required for the core functionality of Langfuse. The documentation and codebase are clearly marked ("Where is this feature available?") to indicate which features are MIT-licensed and which parts are EE (commercially licensed). See a full comparison of features [here](/self-hosting/license-key).
- Do I risk executing EE code when using Langfuse without a license? No, the EE features are only available when you have a license. You do not risk using EE features if you self-host Langfuse without a license, unless you modify the codebase to circumvent the checks.
- Do I need to care about the difference between MIT and EE when using Langfuse Cloud? No, depending on which tier of Langfuse Cloud you are on, you will have access to features that are both MIT and EE licensed.
@@ -36,7 +36,7 @@ The Langfuse core product and all Langfuse-maintained integrations and SDKs are
The Langfuse team provides Langfuse Cloud as a managed solution to simplify the initial setup of Langfuse and to minimize the operational overhead of maintaining high availability in production. Get started for free on: https://cloud.langfuse.com.
-Alternatively, Langfuse can be used [locally](/docs/deployment/local) and [self-hosted](/docs/deployment/self-host).
+Alternatively, Langfuse can be used [locally](/self-hosting/local) and [self-hosted](/self-hosting).
## Contributing
diff --git a/pages/docs/scores/annotation.mdx b/pages/docs/scores/annotation.mdx
index ef6e2c477..ad30520f4 100644
--- a/pages/docs/scores/annotation.mdx
+++ b/pages/docs/scores/annotation.mdx
@@ -101,9 +101,9 @@ Upon completing annotation click on the `Scores` tab to view a table of all the
diff --git a/pages/docs/scores/model-based-evals.mdx b/pages/docs/scores/model-based-evals.mdx
index 90a3211e6..af8d3ea92 100644
--- a/pages/docs/scores/model-based-evals.mdx
+++ b/pages/docs/scores/model-based-evals.mdx
@@ -7,10 +7,10 @@ description: Langfuse (open source) helps run model-based evaluations (llm-as-a-
diff --git a/pages/docs/sdk/python/example.md b/pages/docs/sdk/python/example.md
index 367a96293..c109c57c3 100644
--- a/pages/docs/sdk/python/example.md
+++ b/pages/docs/sdk/python/example.md
@@ -18,7 +18,7 @@ Install `langfuse`:
%pip install langfuse
```
-If you haven't done so yet, [sign up to Langfuse](https://cloud.langfuse.com/auth/sign-up) and obtain your API keys from the project settings. You can also [self-host](https://langfuse.com/docs/deployment/self-host) Langfuse.
+If you haven't done so yet, [sign up to Langfuse](https://cloud.langfuse.com/auth/sign-up) and obtain your API keys from the project settings. You can also [self-host](https://langfuse.com/self-hosting) Langfuse.
```python
diff --git a/pages/enterprise.mdx b/pages/enterprise.mdx
index c1e3d0abc..5d153ab2d 100644
--- a/pages/enterprise.mdx
+++ b/pages/enterprise.mdx
@@ -8,7 +8,7 @@ Langfuse addresses key challenges when deploying LLM-based applications within a
Langfuse is licensed as an open-core project. It's core product (tracing, observability, evals, prompt management and API/SDK endpoints) is MIT-licensed and freely available (also without limitation for commercial use). Some Langfuse features on the periphery of the core product are not available in the open-source version and cannot be used out of the box. As of today, these are either a) quality of life features (such as LLM-as-a-judge service, prompt playground) or b) security & compliance features (e.g. SSO enforcement, data retention)
-Please refer to our [feature overview](/docs/deployment/feature-overview) and the Enterprise Edition FAQ [here](/docs/open-source#enterprise-edition-ee-faq). Please reach out to enterprise@langfuse.com to discuss an enterprise license (self-hosted or cloud) for your team.
+Please refer to our [feature overview](/pricing-self-host) and the Enterprise Edition FAQ [here](/docs/open-source#enterprise-edition-ee-faq). Please reach out to enterprise@langfuse.com to discuss an enterprise license (self-hosted or cloud) for your team.
**Select Resources**
@@ -88,13 +88,13 @@ We [partner](https://www.shakudo.io/partners) with **[Shakudo](https://www.shaku
### What deployment options are available for Langfuse?
1. Managed Cloud (cloud.langfuse.com), see [Pricing](/pricing) and [Security](/docs/data-security-privacy) page for details.
-2. [Self-hosted](/docs/deployment/self-host) on your own infrastructure. Contact us if you are interested in additional support. Note that some of the infrastructure requirements will change with [Langfuse v3](https://github.com/orgs/langfuse/discussions/1902).
+2. [Self-hosted](/self-hosting) on your own infrastructure. Contact us if you are interested in additional support. Note that some of the infrastructure requirements will change with [Langfuse v3](https://github.com/orgs/langfuse/discussions/1902).
### What is the difference between Langfuse Cloud and the open-source version?
The Langfuse team provides Langfuse Cloud as a managed solution to simplify the initial setup of Langfuse and to minimize the operational overhead of maintaining high availability in production. You can chose to self-host Langfuse on your own infrastructure.
-Some features are not available in the open-source version. Please refer to the overview [here](/docs/deployment/feature-overview).
+Some features are not available in the open-source version. Please refer to the overview [here](/pricing-self-host).
### How does Authentication and RBAC work in Langfuse?
@@ -108,7 +108,7 @@ SSO with Langfuse is simple. Currently Google, GitHub, Azure AD, Okta, Auth0, an
The Hobby Plan on [Langfuse Cloud](https://cloud.langfuse.com) includes enough resources to try Langfuse for free while in a non-production environment, no credit card required.
-Alternatively, you can quickly spin up Langfuse on your own machine using `docker compose up` ([docs](/docs/deployment/local)).
+Alternatively, you can quickly spin up Langfuse on your own machine using `docker compose up` ([docs](/self-hosting/local)).
If you require security and compliance features to run a POC, please reach out to us at enterprise@langfuse.com.
diff --git a/pages/faq/all/ai-research-assistant-monitoring.mdx b/pages/faq/all/ai-research-assistant-monitoring.mdx
index 69d354d4a..7b6c8b0b6 100644
--- a/pages/faq/all/ai-research-assistant-monitoring.mdx
+++ b/pages/faq/all/ai-research-assistant-monitoring.mdx
@@ -6,10 +6,11 @@ tags: [product]
# Monitoring and Observability for AI Research Assistants
- ![AI Research Assistant Observability](/images/blog/faq/monitoring-ai-research-assistants.png)
+ ![AI Research Assistant
+ Observability](/images/blog/faq/monitoring-ai-research-assistants.png)
-The integration of large language models (LLMs) into research workflows has given rise to AI research assistants that can sift through vast amounts of data, generate summaries, and even suggest hypotheses.
+The integration of large language models (LLMs) into research workflows has given rise to AI research assistants that can sift through vast amounts of data, generate summaries, and even suggest hypotheses.
These tools often incorporate retrieval-augmented generation (RAG) to synthesize relevant information. While they are efficient, powerful and cheap to operate, monitoring them carefully is essential.
@@ -97,9 +98,8 @@ Langfuse offers a range of [integrations and SDKs](/docs/integrations/get-starte
- **Performance Maintenance**: Ensure monitoring tools scale with increased AI assistant usage.
- **Distributed Systems Support**: Monitor AI assistants operating across various platforms.
-Langfuse is designed to [scale](/docs/deployment/v3/migrate-v2-to-v3) with your needs.
+Langfuse is designed to scale with your needs.
## Get Started
To implement observability for your AI research assistant, have a look at the Langfuse quickstart [guide](/docs/integrations/overview).
-
diff --git a/pages/faq/all/best-braintrustdata-alternatives.mdx b/pages/faq/all/best-braintrustdata-alternatives.mdx
index 209be6999..a759b7c89 100644
--- a/pages/faq/all/best-braintrustdata-alternatives.mdx
+++ b/pages/faq/all/best-braintrustdata-alternatives.mdx
@@ -10,13 +10,14 @@ This article compares **Langfuse** and **Braintrust**, two platforms designed to
## What is Braintrust?
-![Braintrust](/images/blog/faq/braintrust/braintrust-example-screen.png)
+ ![Braintrust](/images/blog/faq/braintrust/braintrust-example-screen.png)
[Braintrust](https://www.braintrust.dev/) is an LLM logging and experimentation platform. It provides tools for model evaluation, performance insights, real-time monitoring, and human review. They offer an LLM proxy to log application data and an in-UI playground for rapid prototyping.
- Read our view on using LLM proxies for LLM application development [here](/blog/2024-09-langfuse-proxy).
+ Read our view on using LLM proxies for LLM application development
+ [here](/blog/2024-09-langfuse-proxy).
## What is Langfuse?
@@ -43,34 +44,34 @@ Both platforms offer functionalities to support developers working with LLMs, bu
### High level overview
-One of the biggest differences between [Langfuse](https://www.langfuse.com/) and [Braintrust](https://www.braintrust.dev/) is that Langfuse is **open-source**, making it free and easy to self-host and customize according to your needs. Being open-source provides transparency, flexibility, and full control over the codebase, allowing developers to inspect, modify, and contribute to the platform.
+One of the biggest differences between [Langfuse](https://www.langfuse.com/) and [Braintrust](https://www.braintrust.dev/) is that Langfuse is **open-source**, making it free and easy to self-host and customize according to your needs. Being open-source provides transparency, flexibility, and full control over the codebase, allowing developers to inspect, modify, and contribute to the platform.
Langfuse is built for production use cases, with a focus on reliability, security, and control over infrastructure.
Braintrust offers innovative in-UI experiences such as a playground, prompt iteration, and functions which makes it a great solution for experimentation. Langfuse focuses on its best-in-class core features: tracing, evaluations, prompt management, and open, stable APIs.
### Feature Comparison
-| Feature | **Langfuse** | **Braintrust** |
-|-----------------------------------------|---------------------------------------------------------------------------|----------------------------------------------------------------|
-| **Open Source** | ✅ Yes ([GitHub Repository](https://github.com/langfuse/langfuse)) | ❌ No |
-| **Customizability** | ✅ High (modify and extend as needed) | ⚠️ Limited (proprietary platform) |
-| **LLM Proxy** | ❌ No (direct integrations) | ✅ Yes (provides AI proxy layer) |
-| **Production Risks via Proxy** | ❌ None introduced by Langfuse | ⚠️ Potential risks (latency, downtime, data privacy concerns) |
-| **Prompt Management** | ✅ Comprehensive ([Learn more](/docs/prompts/get-started)) | ✅ Yes |
-| **Evaluation Framework** | ✅ Yes ([Learn more](/docs/scores/overview)) | ✅ Yes |
-| **Human Annotation Queues** | ✅ Built-in ([Learn more](/docs/scores/annotation#annotation-queues)) | ❌ No |
-| **LLM Playground**| ✅ Yes ([Learn more](/docs/playground)) | ✅ Yes |
-| **Self-Hosting** | ✅ Open Source ([Deployment options](/docs/deployment/feature-overview)) | ⚠️ Enterprise Plans |
-| **Integrations** | ✅ Yes ([Integrations](/docs/integrations/overview)) | ✅ Yes |
+| Feature | **Langfuse** | **Braintrust** |
+| ------------------------------ | --------------------------------------------------------------------- | ------------------------------------------------------------- |
+| **Open Source** | ✅ Yes ([GitHub Repository](https://github.com/langfuse/langfuse)) | ❌ No |
+| **Customizability** | ✅ High (modify and extend as needed) | ⚠️ Limited (proprietary platform) |
+| **LLM Proxy** | ❌ No (direct integrations) | ✅ Yes (provides AI proxy layer) |
+| **Production Risks via Proxy** | ❌ None introduced by Langfuse | ⚠️ Potential risks (latency, downtime, data privacy concerns) |
+| **Prompt Management** | ✅ Comprehensive ([Learn more](/docs/prompts/get-started)) | ✅ Yes |
+| **Evaluation Framework** | ✅ Yes ([Learn more](/docs/scores/overview)) | ✅ Yes |
+| **Human Annotation Queues** | ✅ Built-in ([Learn more](/docs/scores/annotation#annotation-queues)) | ❌ No |
+| **LLM Playground** | ✅ Yes ([Learn more](/docs/playground)) | ✅ Yes |
+| **Self-Hosting** | ✅ Open Source ([Deployment options](/self-hosting)) | ⚠️ Enterprise Plans |
+| **Integrations** | ✅ Yes ([Integrations](/docs/integrations/overview)) | ✅ Yes |
import { Callout } from "nextra/components";
### Langfuse Strengths
- **Open-Source**: Langfuse's open-source nature allows developers to inspect, modify, and contribute to the codebase, providing transparency and flexibility.
-- **No LLM Proxy**: Langfuse integrates [directly](/blog/2024-09-langfuse-proxy) with LLMs without introducing an intermediary proxy, reducing potential risks related to latency, downtime, and data privacy.
+- **No LLM Proxy**: Langfuse integrates [directly](/blog/2024-09-langfuse-proxy) with LLMs without introducing an intermediary proxy, reducing potential risks related to latency, downtime, and data privacy.
- **Comprehensive Observability**: Offers deep insights into model interactions by tracing not only LLM calls, but also related application processes.
-- **Self-Hosting Flexibility**: Provides self-hosting options, ensuring organizations can maintain full control over data residency, compliance, and security ([Learn more](/docs/deployment/feature-overview)).
+- **Self-Hosting Flexibility**: Provides self-hosting options, ensuring organizations can maintain full control over data residency, compliance, and security ([Learn more](/self-hosting)).
### Braintrust Considerations
@@ -90,32 +91,33 @@ import { Callout } from "nextra/components";
## Download Metrics
-| | pypi downloads | npm downloads | docker pulls |
-| --------------- | -------- | ------------ | ----- |
-| 🪢 Langfuse | [![Langfuse pypi downloads](https://img.shields.io/pypi/dm/langfuse?style=for-the-badge&label=%20&labelColor=black&color=orange)](https://pypi.org/project/langfuse) | [![Langfuse npm downloads](https://img.shields.io/npm/dm/langfuse?style=for-the-badge&label=%20&labelColor=black&color=orange)](https://www.npmjs.com/package/langfuse) | [![Langfuse Docker Pulls](https://img.shields.io/docker/pulls/langfuse/langfuse?style=for-the-badge&label=%20&labelColor=black&color=orange)](https://hub.docker.com/r/langfuse/langfuse) |
-| Braintrust | [![Braintrust pypi downloads](https://img.shields.io/pypi/dm/braintrust?style=for-the-badge&label=%20&labelColor=black&color=grey)](https://pypi.org/project/braintrust) | [![Braintrust npm downloads](https://img.shields.io/npm/dm/braintrust?style=for-the-badge&label=%20&labelColor=black&color=grey)](https://www.npmjs.com/package/braintrust) | N/A |
+| | pypi downloads | npm downloads | docker pulls |
+| ----------- | ------------------------------------------------------------------------------------------------------------------------------------------------------------------------ | --------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | ----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- |
+| 🪢 Langfuse | [![Langfuse pypi downloads](https://img.shields.io/pypi/dm/langfuse?style=for-the-badge&label=%20&labelColor=black&color=orange)](https://pypi.org/project/langfuse) | [![Langfuse npm downloads](https://img.shields.io/npm/dm/langfuse?style=for-the-badge&label=%20&labelColor=black&color=orange)](https://www.npmjs.com/package/langfuse) | [![Langfuse Docker Pulls](https://img.shields.io/docker/pulls/langfuse/langfuse?style=for-the-badge&label=%20&labelColor=black&color=orange)](https://hub.docker.com/r/langfuse/langfuse) |
+| Braintrust | [![Braintrust pypi downloads](https://img.shields.io/pypi/dm/braintrust?style=for-the-badge&label=%20&labelColor=black&color=grey)](https://pypi.org/project/braintrust) | [![Braintrust npm downloads](https://img.shields.io/npm/dm/braintrust?style=for-the-badge&label=%20&labelColor=black&color=grey)](https://www.npmjs.com/package/braintrust) | N/A |
## Conclusion
-Both **Langfuse** and **Braintrust** offer valuable solutions for developers building AI applications with large language models.
+Both **Langfuse** and **Braintrust** offer valuable solutions for developers building AI applications with large language models.
-Langfuse's open-source nature provides transparency and flexibility, allowing organizations to customize the platform and maintain full control over their data—an essential factor for **production environments** where security and compliance are critical. Its direct integration with LLMs, without using LLM proxies, minimizes potential risks related to latency, uptime, and data privacy.
+Langfuse's open-source nature provides transparency and flexibility, allowing organizations to customize the platform and maintain full control over their data—an essential factor for **production environments** where security and compliance are critical. Its direct integration with LLMs, without using LLM proxies, minimizes potential risks related to latency, uptime, and data privacy.
-Braintrust offers a tightly integrated suite of tools with a focus on evaluation, including a playground for rapid prototyping and A/B testing features. It provides a rich in-UI experience.
+Braintrust offers a tightly integrated suite of tools with a focus on evaluation, including a playground for rapid prototyping and A/B testing features. It provides a rich in-UI experience.
---
## Other Comparisons
+
- Atla-AI has written up a review of LLM Evaluation Tooling, comparing [Braintrust and Langfuse](https://www.atla-ai.com/post/llm-evaluation-tooling-review).
## Learn More About Langfuse
- **Get Started with Langfuse**: [Documentation Overview](/docs)
-- **Deployment Options**: [Self-Hosting Guide](/docs/deployment/feature-overview)
+- **Deployment Options**: [Self-Hosting Guide](/self-hosting)
- **Integrations**: [Supported SDKs and Frameworks](/docs/integrations/overview)
- **Prompt Management**: [Managing Prompts in Production](/docs/prompts/get-started)
- **Evaluation Tools**: [Evaluating Model Outputs](/docs/scores/overview)
---
-*This comparison is out of date? Please [raise a pull request](https://github.com/langfuse/langfuse-docs/tree/main/pages/faq/all) with up-to-date information.*
+_This comparison is out of date? Please [raise a pull request](https://github.com/langfuse/langfuse-docs/tree/main/pages/faq/all) with up-to-date information._
diff --git a/pages/faq/all/best-helicone-alternative.mdx b/pages/faq/all/best-helicone-alternative.mdx
index a468580c4..c6017de6b 100644
--- a/pages/faq/all/best-helicone-alternative.mdx
+++ b/pages/faq/all/best-helicone-alternative.mdx
@@ -24,9 +24,11 @@ Langfuse is the most popular open source LLM observability platform. You can fin
| | ⭐️ GitHub stars | Last commit | GitHub Discussions | GitHub Issues |
+
| --------- | --------- | ---- | --- | ----- |
-| 🪢 Langfuse | [![Langfuse GitHub stars](https://img.shields.io/github/stars/langfuse/langfuse?style=for-the-badge&label=%20&labelColor=black&color=orange)](https://github.com/langfuse/langfuse) | [![Langfuse last commit](https://img.shields.io/github/last-commit/langfuse/langfuse?style=for-the-badge&label=%20&labelColor=black&color=orange)](https://github.com/langfuse/langfuse) | [![Langfuse GitHub Discussions](https://img.shields.io/github/discussions/langfuse/langfuse?style=for-the-badge&label=%20&labelColor=black&color=orange)](https://github.com/orgs/langfuse/discussions) | [![Langfuse GitHub Issues](https://img.shields.io/github/issues-pr-closed-raw/langfuse/langfuse?style=for-the-badge&label=%20&labelColor=black&color=orange)](https://github.com/langfuse/langfuse/issues?q=is%3Aissue+is%3Aclosed) |
-| Helicone | [![Helicone GitHub stars](https://img.shields.io/github/stars/helicone/helicone?style=for-the-badge&label=%20&labelColor=black&color=grey)](https://github.com/helicone/helicone) | [![Helicone last commit](https://img.shields.io/github/last-commit/helicone/helicone?style=for-the-badge&label=%20&labelColor=black&color=grey)](https://github.com/helicone/helicone) | [![Helicone GitHub Discussions](https://img.shields.io/github/discussions/helicone/helicone?style=for-the-badge&label=%20&labelColor=black&color=grey)](https://github.com/orgs/helicone/discussions) | [![Helicone GitHub Issues](https://img.shields.io/github/issues-pr-closed-raw/helicone/helicone?style=for-the-badge&label=%20&labelColor=black&color=grey)](https://github.com/helicone/helicone/issues?q=is%3Aissue+is%3Aclosed)|
+| 🪢 Langfuse | [![Langfuse GitHub stars](https://img.shields.io/github/stars/langfuse/langfuse?style=for-the-badge&label=%20&labelColor=black&color=orange)](https://github.com/langfuse/langfuse) | [![Langfuse last commit](https://img.shields.io/github/last-commit/langfuse/langfuse?style=for-the-badge&label=%20&labelColor=black&color=orange)](https://github.com/langfuse/langfuse) | [![Langfuse GitHub Discussions](https://img.shields.io/github/discussions/langfuse/langfuse?style=for-the-badge&label=%20&labelColor=black&color=orange)](https://github.com/orgs/langfuse/discussions) | [![Langfuse GitHub Issues](https://img.shields.io/github/issues-pr-closed-raw/langfuse/langfuse?style=for-the-badge&label=%20&labelColor=black&color=orange)](https://github.com/langfuse/langfuse/issues?q=is%3Aissue+is%3Aclosed) |
+| Helicone | [![Helicone GitHub stars](https://img.shields.io/github/stars/helicone/helicone?style=for-the-badge&label=%20&labelColor=black&color=grey)](https://github.com/helicone/helicone) | [![Helicone last commit](https://img.shields.io/github/last-commit/helicone/helicone?style=for-the-badge&label=%20&labelColor=black&color=grey)](https://github.com/helicone/helicone) | [![Helicone GitHub Discussions](https://img.shields.io/github/discussions/helicone/helicone?style=for-the-badge&label=%20&labelColor=black&color=grey)](https://github.com/orgs/helicone/discussions) | [![Helicone GitHub Issues](https://img.shields.io/github/issues-pr-closed-raw/helicone/helicone?style=for-the-badge&label=%20&labelColor=black&color=grey)](https://github.com/helicone/helicone/issues?q=is%3Aissue+is%3Aclosed)|
+
@@ -36,12 +38,12 @@ Langfuse is the most popular open source LLM observability platform. You can fin
-
### Downloads
-| | pypi downloads | npm downloads | docker pulls |
-| ----- | --- | -------- | ----------- |
-| 🪢 Langfuse | [![Langfuse pypi downloads](https://img.shields.io/pypi/dm/langfuse?style=for-the-badge&label=%20&labelColor=black&color=orange)](https://pypi.org/project/langfuse) | [![Langfuse npm downloads](https://img.shields.io/npm/dm/langfuse?style=for-the-badge&label=%20&labelColor=black&color=orange)](https://www.npmjs.com/package/langfuse) | [![Langfuse Docker Pulls](https://img.shields.io/docker/pulls/langfuse/langfuse?style=for-the-badge&label=%20&labelColor=black&color=orange)](https://hub.docker.com/r/langfuse/langfuse) |
-| Helicone | [![Helicone pypi downloads](https://img.shields.io/pypi/dm/helicone?style=for-the-badge&label=%20&labelColor=black&color=grey)](https://pypi.org/project/helicone) | [![Helicone npm downloads](https://img.shields.io/npm/dm/helicone?style=for-the-badge&label=%20&labelColor=black&color=grey)](https://www.npmjs.com/package/helicone) | [![Helicone Docker Pulls](https://img.shields.io/docker/pulls/helicone/worker?style=for-the-badge&label=%20&labelColor=black&color=grey)](https://hub.docker.com/r/helicone/worker) |
+
+| | pypi downloads | npm downloads | docker pulls |
+| ----------- | -------------------------------------------------------------------------------------------------------------------------------------------------------------------- | ----------------------------------------------------------------------------------------------------------------------------------------------------------------------- | ----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- |
+| 🪢 Langfuse | [![Langfuse pypi downloads](https://img.shields.io/pypi/dm/langfuse?style=for-the-badge&label=%20&labelColor=black&color=orange)](https://pypi.org/project/langfuse) | [![Langfuse npm downloads](https://img.shields.io/npm/dm/langfuse?style=for-the-badge&label=%20&labelColor=black&color=orange)](https://www.npmjs.com/package/langfuse) | [![Langfuse Docker Pulls](https://img.shields.io/docker/pulls/langfuse/langfuse?style=for-the-badge&label=%20&labelColor=black&color=orange)](https://hub.docker.com/r/langfuse/langfuse) |
+| Helicone | [![Helicone pypi downloads](https://img.shields.io/pypi/dm/helicone?style=for-the-badge&label=%20&labelColor=black&color=grey)](https://pypi.org/project/helicone) | [![Helicone npm downloads](https://img.shields.io/npm/dm/helicone?style=for-the-badge&label=%20&labelColor=black&color=grey)](https://www.npmjs.com/package/helicone) | [![Helicone Docker Pulls](https://img.shields.io/docker/pulls/helicone/worker?style=for-the-badge&label=%20&labelColor=black&color=grey)](https://hub.docker.com/r/helicone/worker) |
## Helicone AI
@@ -59,21 +61,19 @@ Helicone is an open source project for language model observability that provide
import { Callout } from "nextra/components";
- Read our view on using LLM proxies for LLM application development [here](/blog/2024-09-langfuse-proxy).
+ Read our view on using LLM proxies for LLM application development
+ [here](/blog/2024-09-langfuse-proxy).
### Pros and Cons of Helicone
-| ✅ Advantages: | ⛔️ Limitations: |
-| :---- | :---- |
+| ✅ Advantages: | ⛔️ Limitations: |
+| :----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | :----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- |
| **Implementation:** Simple and quick setup process for LLM logging.
**Managed Proxy:** Monitoring though the Helicone managed proxy supporting caching, security checks, and key management. | **Limited Tracing Capabilities:** Natively provides only basic LLM logging with session grouping, [limited tracing](https://docs.helicone.ai/getting-started/integration-method/openllmetry) via OpenLLMetry.
**Lacks Deep Integration:** Does not support decorator or framework integrations for automatic trace generation.
**Evaluation Constraints:** Restricted to adding custom scores via the API with no support for LLM-as-a-judge methodology or manual annotation workflows. |
-## Langfuse
+## Langfuse
-
+
_Example trace in our [public demo](/docs/demo)_
### What is Langfuse?
@@ -88,30 +88,30 @@ Langfuse is an LLM observability platform that provides a comprehensive tracing
### Pros and Cons of Langfuse
-| ✅ Advantages: | ⛔️ Limitations: |
-| :---- | :---- |
-| **Comprehensive Tracing:** Effectively tracks both LLM and non-LLM actions, delivering [complete context](/docs/tracing) for applications.
**Integration Options**: Supports asynchronous logging and tracing SDKs with integrations for frameworks like [Langchain](/docs/integrations/langchain/tracing), [Llama Index](/docs/integrations/llama-index/get-started), [OpenAI SDK](/docs/integrations/openai/python/get-started), and [others](/docs/integrations/overview).
**Prompt Management:** Optimized for minimal latency and uptime risk, with [extensive capabilities](/docs/prompts/get-started).
**Deep Evaluation:** Facilitates user feedback collection, manual reviews, automated annotations, and [custom evaluation](/docs/scores/overview) functions.
**Self-Hosting:** Extensive [self-hosting documentation](/docs/deployment/feature-overview) of required for data security or compliance requirements. | **Additional Proxy Setup:** Some LLM-related features like caching and key management require an external proxy setup, such as LiteLLM, which [integrates natively with Langfuse](/docs/integrations/litellm/tracing). Langfuse is not in the critical path and does not provide these features.
Read more on our opinion on LLM proxies in production settings [here](/blog/2024-09-langfuse-proxy). |
+| ✅ Advantages: | ⛔️ Limitations: |
+| :-------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | :-------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- |
+| **Comprehensive Tracing:** Effectively tracks both LLM and non-LLM actions, delivering [complete context](/docs/tracing) for applications.
**Integration Options**: Supports asynchronous logging and tracing SDKs with integrations for frameworks like [Langchain](/docs/integrations/langchain/tracing), [Llama Index](/docs/integrations/llama-index/get-started), [OpenAI SDK](/docs/integrations/openai/python/get-started), and [others](/docs/integrations/overview).
**Prompt Management:** Optimized for minimal latency and uptime risk, with [extensive capabilities](/docs/prompts/get-started).
**Deep Evaluation:** Facilitates user feedback collection, manual reviews, automated annotations, and [custom evaluation](/docs/scores/overview) functions.
**Self-Hosting:** Extensive [self-hosting documentation](/self-hosting) of required for data security or compliance requirements. | **Additional Proxy Setup:** Some LLM-related features like caching and key management require an external proxy setup, such as LiteLLM, which [integrates natively with Langfuse](/docs/integrations/litellm/tracing). Langfuse is not in the critical path and does not provide these features.
Read more on our opinion on LLM proxies in production settings [here](/blog/2024-09-langfuse-proxy). |
## Core Feature Comparison
This table compares the core features of LLM observability tools: Logging model calls, managing and testing prompts in production, and evaluating model outputs.
-| | Helicone | 🪢 Langfuse |
-| :---- | :---- | :---- |
-| [Tracing and Logging](/docs/tracing) | Offers **basic LLM logging** capabilities with the ability to group logs into sessions. However, it does not provide detailed tracing and lacks support for framework integrations that would allow enhanced tracing functionalities. | Specializes in **comprehensive tracing**, enabling detailed tracking of both LLM and other activities within the system. Langfuse captures the **complete context** of applications and supports asynchronous logging with tracing SDKs, offering richer insights into application behavior. |
-| [Prompt Management](/docs/prompts/get-started) | Currently in beta, it introduces **latency and uptime risks** if prompts are fetched at runtime without using their proxy. Users are required to manage prompt-fetching mechanisms independently. | Delivers robust prompt management solutions through client SDKs, ensuring **minimal impact on application latency** and uptime during prompt retrieval. |
-| [Evaluation Capabilities](/docs/scores/overview) | Supports the addition of **custom scores** via its API, but does not offer advanced evaluation features beyond this basic capability. | Provides a wide array of evaluation tools, including mechanisms for **user feedback**, both **manual and automated annotations**, and the ability to define **custom evaluation functions**, enabling a richer and more thorough assessment of LLM performance. |
+| | Helicone | 🪢 Langfuse |
+| :----------------------------------------------- | :------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------ | :------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- |
+| [Tracing and Logging](/docs/tracing) | Offers **basic LLM logging** capabilities with the ability to group logs into sessions. However, it does not provide detailed tracing and lacks support for framework integrations that would allow enhanced tracing functionalities. | Specializes in **comprehensive tracing**, enabling detailed tracking of both LLM and other activities within the system. Langfuse captures the **complete context** of applications and supports asynchronous logging with tracing SDKs, offering richer insights into application behavior. |
+| [Prompt Management](/docs/prompts/get-started) | Currently in beta, it introduces **latency and uptime risks** if prompts are fetched at runtime without using their proxy. Users are required to manage prompt-fetching mechanisms independently. | Delivers robust prompt management solutions through client SDKs, ensuring **minimal impact on application latency** and uptime during prompt retrieval. |
+| [Evaluation Capabilities](/docs/scores/overview) | Supports the addition of **custom scores** via its API, but does not offer advanced evaluation features beyond this basic capability. | Provides a wide array of evaluation tools, including mechanisms for **user feedback**, both **manual and automated annotations**, and the ability to define **custom evaluation functions**, enabling a richer and more thorough assessment of LLM performance. |
## Conclusion
-Langfuse is a good choice for most **production use cases**, particularly when comprehensive **tracing**, deep **evaluation** capabilities, and robust **prompt management** are critical. Its ability to provide detailed insights into both **LLM and non-LLM activities**, along with support for **asynchronous logging** and various framework **integrations**, makes it ideal for complex applications requiring thorough observability.
+Langfuse is a good choice for most **production use cases**, particularly when comprehensive **tracing**, deep **evaluation** capabilities, and robust **prompt management** are critical. Its ability to provide detailed insights into both **LLM and non-LLM activities**, along with support for **asynchronous logging** and various framework **integrations**, makes it ideal for complex applications requiring thorough observability.
For teams prioritizing **ease of implementation** and willing to accept the trade-offs of increased risk and limited observability, Helicone's managed LLM proxy offers a **simpler setup** with features like caching and key management.
## Other Helicone vs. Langfuse Comparisons
+
- Helicone has its own comparison against Langfuse live on [their website](https://www.helicone.ai/blog/best-langfuse-alternatives)
## This comparison is out of date?
Please [raise a pull request](https://github.com/langfuse/langfuse-docs/tree/main/pages/faq/all) with up to date information.
-
diff --git a/pages/faq/all/best-phoenix-arize-alternatives.mdx b/pages/faq/all/best-phoenix-arize-alternatives.mdx
index 368bd3175..ea8f75b3d 100644
--- a/pages/faq/all/best-phoenix-arize-alternatives.mdx
+++ b/pages/faq/all/best-phoenix-arize-alternatives.mdx
@@ -13,7 +13,7 @@ Arize Phoenix and Langfuse are both open-source tools for LLM observability, ana
**Langfuse** focuses on being **best in class** for core LLM engineering features (tracing, evaluations, prompt management, APIs), prompt management, and usage analytics. **Arize Phoenix** focuses on the **experimental and development** stages of LLM apps and is particularly strong for RAG use cases.
-- **Self-Hosting**: Langfuse is very easy to self-host and offers extensive [self-hosting documentation](/docs/deployment/feature-overview) for data security or compliance requirements.
+- **Self-Hosting**: Langfuse is very easy to self-host and offers extensive [self-hosting documentation](/self-hosting) for data security or compliance requirements.
- **Integration with Arize**: Arize Phoenix is a good solution if your company already uses [Arize AI's](https://arize.com) platform. Phoenix enables a smooth data transfer between the two tools. However, it lacks **prompt management** and **LLM usage monitoring** features, which may limit its effectiveness in production environments.
@@ -29,11 +29,13 @@ Langfuse is the most popular open source LLM observability platform. You can fin
| | ⭐️ GitHub stars | Last commit | GitHub Discussions | GitHub Issues |
+
| --------- | --------- | ---- | --- | ----- |
-| 🪢 Langfuse | [![Langfuse GitHub stars](https://img.shields.io/github/stars/langfuse/langfuse?style=for-the-badge&label=%20&labelColor=black&color=orange)](https://github.com/langfuse/langfuse) | [![Langfuse last commit](https://img.shields.io/github/last-commit/langfuse/langfuse?style=for-the-badge&label=%20&labelColor=black&color=orange)](https://github.com/langfuse/langfuse) | [![Langfuse GitHub Discussions](https://img.shields.io/github/discussions/langfuse/langfuse?style=for-the-badge&label=%20&labelColor=black&color=orange)](https://github.com/orgs/langfuse/discussions) | [![Langfuse GitHub Issues](https://img.shields.io/github/issues-pr-closed-raw/langfuse/langfuse?style=for-the-badge&label=%20&labelColor=black&color=orange)](https://github.com/langfuse/langfuse/issues?q=is%3Aissue+is%3Aclosed) |
-| Phoenix / Arize | [![Phoenix GitHub stars](https://img.shields.io/github/stars/arize-ai/phoenix?style=for-the-badge&label=%20&labelColor=black&color=grey)](https://github.com/arize-ai/phoenix) | [![Phoenix last commit](https://img.shields.io/github/last-commit/arize-ai/phoenix?style=for-the-badge&label=%20&labelColor=black&color=grey)](https://github.com/arize-ai/phoenix) | [![Phoenix GitHub Discussions](https://img.shields.io/github/discussions/arize-ai/phoenix?style=for-the-badge&label=%20&labelColor=black&color=grey)](https://github.com/orgs/arize-ai/discussions) | [![Phoenix GitHub Issues](https://img.shields.io/github/issues-pr-closed-raw/arize-ai/phoenix?style=for-the-badge&label=%20&labelColor=black&color=grey)](https://github.com/arize-ai/phoenix/issues?q=is%3Aissue+is%3Aclosed) |
+| 🪢 Langfuse | [![Langfuse GitHub stars](https://img.shields.io/github/stars/langfuse/langfuse?style=for-the-badge&label=%20&labelColor=black&color=orange)](https://github.com/langfuse/langfuse) | [![Langfuse last commit](https://img.shields.io/github/last-commit/langfuse/langfuse?style=for-the-badge&label=%20&labelColor=black&color=orange)](https://github.com/langfuse/langfuse) | [![Langfuse GitHub Discussions](https://img.shields.io/github/discussions/langfuse/langfuse?style=for-the-badge&label=%20&labelColor=black&color=orange)](https://github.com/orgs/langfuse/discussions) | [![Langfuse GitHub Issues](https://img.shields.io/github/issues-pr-closed-raw/langfuse/langfuse?style=for-the-badge&label=%20&labelColor=black&color=orange)](https://github.com/langfuse/langfuse/issues?q=is%3Aissue+is%3Aclosed) |
+| Phoenix / Arize | [![Phoenix GitHub stars](https://img.shields.io/github/stars/arize-ai/phoenix?style=for-the-badge&label=%20&labelColor=black&color=grey)](https://github.com/arize-ai/phoenix) | [![Phoenix last commit](https://img.shields.io/github/last-commit/arize-ai/phoenix?style=for-the-badge&label=%20&labelColor=black&color=grey)](https://github.com/arize-ai/phoenix) | [![Phoenix GitHub Discussions](https://img.shields.io/github/discussions/arize-ai/phoenix?style=for-the-badge&label=%20&labelColor=black&color=grey)](https://github.com/orgs/arize-ai/discussions) | [![Phoenix GitHub Issues](https://img.shields.io/github/issues-pr-closed-raw/arize-ai/phoenix?style=for-the-badge&label=%20&labelColor=black&color=grey)](https://github.com/arize-ai/phoenix/issues?q=is%3Aissue+is%3Aclosed) |
_Numbers refresh automatically via [shields.io](https://shields.io)_
+
@@ -43,12 +45,12 @@ _Numbers refresh automatically via [shields.io](https://shields.io)_
-
### Downloads
-| | pypi downloads | npm downloads | docker pulls |
-| ----- | --- | -------- | ----------- |
-| 🪢 Langfuse | [![Langfuse pypi downloads](https://img.shields.io/pypi/dm/langfuse?style=for-the-badge&label=%20&labelColor=black&color=orange)](https://pypi.org/project/langfuse) | [![Langfuse npm downloads](https://img.shields.io/npm/dm/langfuse?style=for-the-badge&label=%20&labelColor=black&color=orange)](https://www.npmjs.com/package/langfuse) | [![Langfuse Docker Pulls](https://img.shields.io/docker/pulls/langfuse/langfuse?style=for-the-badge&label=%20&labelColor=black&color=orange)](https://hub.docker.com/r/langfuse/langfuse) |
-| Phoenix / Arize | [![Phoenix pypi downloads](https://img.shields.io/pypi/dm/arize-phoenix?style=for-the-badge&label=%20&labelColor=black&color=grey)](https://pypi.org/project/phoenix) | N/A | [![Phoenix Docker Pulls](https://img.shields.io/docker/pulls/arizephoenix/phoenix?style=for-the-badge&label=%20&labelColor=black&color=grey)](https://hub.docker.com/r/arizephoenix/phoenix) |
+
+| | pypi downloads | npm downloads | docker pulls |
+| --------------- | --------------------------------------------------------------------------------------------------------------------------------------------------------------------- | ----------------------------------------------------------------------------------------------------------------------------------------------------------------------- | -------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- |
+| 🪢 Langfuse | [![Langfuse pypi downloads](https://img.shields.io/pypi/dm/langfuse?style=for-the-badge&label=%20&labelColor=black&color=orange)](https://pypi.org/project/langfuse) | [![Langfuse npm downloads](https://img.shields.io/npm/dm/langfuse?style=for-the-badge&label=%20&labelColor=black&color=orange)](https://www.npmjs.com/package/langfuse) | [![Langfuse Docker Pulls](https://img.shields.io/docker/pulls/langfuse/langfuse?style=for-the-badge&label=%20&labelColor=black&color=orange)](https://hub.docker.com/r/langfuse/langfuse) |
+| Phoenix / Arize | [![Phoenix pypi downloads](https://img.shields.io/pypi/dm/arize-phoenix?style=for-the-badge&label=%20&labelColor=black&color=grey)](https://pypi.org/project/phoenix) | N/A | [![Phoenix Docker Pulls](https://img.shields.io/docker/pulls/arizephoenix/phoenix?style=for-the-badge&label=%20&labelColor=black&color=grey)](https://hub.docker.com/r/arizephoenix/phoenix) |
_Numbers refresh automatically via [shields.io](https://shields.io)_
@@ -67,10 +69,7 @@ Arize Phoenix is an open-source observability tool designed for experimentation,
## Langfuse
-
+
_Example trace in our [public demo](/docs/demo)_
### What is Langfuse?
@@ -84,24 +83,21 @@ Langfuse is an LLM observability platform that provides a comprehensive tracing
- **Prompt Management**: Provides robust [prompt management](/docs/prompts/get-started) solutions through client SDKs, ensuring minimal impact on application latency and uptime during prompt retrieval.
- **Integration Options**: Supports asynchronous logging and tracing SDKs with integrations for frameworks like [LangChain](/docs/integrations/langchain/tracing), [LlamaIndex](/docs/integrations/llama-index/get-started), [OpenAI SDK](/docs/integrations/openai/python/get-started), and [others](/docs/integrations/overview).
- **Deep Evaluation**: Facilitates user feedback collection, manual reviews, annotation queues, [LLM-as-a-Judge](/docs/scores/model-based-evals) automated annotations, and [custom evaluation](/docs/scores/overview) functions.
-- **Self-Hosting**: Extensive [self-hosting documentation](/docs/deployment/feature-overview) for data security or compliance requirements.
-
+- **Self-Hosting**: Extensive [self-hosting documentation](/self-hosting) for data security or compliance requirements.
## Core Feature Comparison
This table compares the core features of LLM observability tools: Logging model calls, managing and testing prompts in production, and evaluating model outputs.
-
-| **Feature** | **Arize Phoenix** | **🪢 Langfuse** |
-|-----------------------|-------------------|--------------|
-| **Open Source** | ✅ Yes | ✅ [Yes](https://github.com/langfuse/langfuse) |
-| **Tracing** | ✅ Yes | ✅ [Yes](/docs/tracing) |
-| **Prompt Management** | ❌ No | ✅ [Yes](/docs/prompts/get-started) |
-| **User Feedback** | ✅ Yes | ✅ [Yes](/docs/scores/user-feedback) |
-| **Usage Monitoring** | ❌ No | ✅ [Yes](/docs/analytics/overview) |
-| **Evaluations** | ✅ Yes | ✅ [Yes](/docs/scores/overview) |
-| **Playground** | ❌ No | ✅ [Yes](/docs/playground) |
-
+| **Feature** | **Arize Phoenix** | **🪢 Langfuse** |
+| --------------------- | ----------------- | ---------------------------------------------- |
+| **Open Source** | ✅ Yes | ✅ [Yes](https://github.com/langfuse/langfuse) |
+| **Tracing** | ✅ Yes | ✅ [Yes](/docs/tracing) |
+| **Prompt Management** | ❌ No | ✅ [Yes](/docs/prompts/get-started) |
+| **User Feedback** | ✅ Yes | ✅ [Yes](/docs/scores/user-feedback) |
+| **Usage Monitoring** | ❌ No | ✅ [Yes](/docs/analytics/overview) |
+| **Evaluations** | ✅ Yes | ✅ [Yes](/docs/scores/overview) |
+| **Playground** | ❌ No | ✅ [Yes](/docs/playground) |
## Conclusion
diff --git a/pages/faq/all/chatbot-analytics.mdx b/pages/faq/all/chatbot-analytics.mdx
index 7ccaed1b0..eed3bc92c 100644
--- a/pages/faq/all/chatbot-analytics.mdx
+++ b/pages/faq/all/chatbot-analytics.mdx
@@ -130,14 +130,13 @@ Ensure your monitoring solution can handle growth.
Langfuse is designed to scale with your needs.
-
## Start Tracking your Chatbot with Langfuse
The [`@observe()` decorator](/docs/sdk/python/decorators) makes it easy to trace any Python LLM application. In this quickstart we also use the Langfuse [OpenAI integration](/docs/integrations/openai) to automatically capture all model parameters.
Not using OpenAI? Check out how you can [trace any LLM with Langfuse](/docs/get-started).
-1. [Create Langfuse account](https://cloud.langfuse.com/auth/sign-up) or [self-host](/docs/deployment/self-host)
+1. [Create Langfuse account](https://cloud.langfuse.com/auth/sign-up) or [self-host](/self-hosting)
2. Create a new project
3. Create new API credentials in the project settings
@@ -155,7 +154,7 @@ LANGFUSE_HOST="https://cloud.langfuse.com" # 🇪🇺 EU region
```python
from langfuse.decorators import observe
from langfuse.openai import openai # OpenAI integration
-
+
@observe()
def story():
return openai.chat.completions.create(
@@ -166,15 +165,15 @@ def story():
{"role": "user", "content": "Once upon a time in a galaxy far, far away..."}
],
).choices[0].message.content
-
+
@observe()
def main():
return story()
-
+
main()
```
+
## Resources
- To see chatbot tracing in action, have a look at our interactive demo [here](/demo).
- Have a look at [this guide](/blog/qa-chatbot-for-langfuse-docs) to see how we built and instrumented a chatbot for the Langfuse docs.
-
diff --git a/pages/faq/all/debug-docker-deployment.mdx b/pages/faq/all/debug-docker-deployment.mdx
index 05261aeec..aec91dccd 100644
--- a/pages/faq/all/debug-docker-deployment.mdx
+++ b/pages/faq/all/debug-docker-deployment.mdx
@@ -5,7 +5,7 @@ tags: [self-hosting]
# I cannot connect to my self-hosted Langfuse instance, what should I do?
-If you encounter issues while [self-hosting](/docs/deployment/self-host) Langfuse, ensure the following:
+If you encounter issues while [self-hosting](/self-hosting) Langfuse, ensure the following:
- `NEXTAUTH_URL` exactly matches the URL you're accessing Langfuse with. Pay attention to the protocol (http vs https) and the port (e.g., 3000 if you do not expose Langfuse on port 80).
- Set `HOSTNAME` to `0.0.0.0` if you cannot access Langfuse.
diff --git a/pages/faq/all/forgot-password.mdx b/pages/faq/all/forgot-password.mdx
index f6791cd97..c9acba715 100644
--- a/pages/faq/all/forgot-password.mdx
+++ b/pages/faq/all/forgot-password.mdx
@@ -13,6 +13,6 @@ Please contact us if you encounter any issues: support@langfuse.com
## Self-hosted Langfuse Instance
-**If transactional emails are configured** on your instance via the `SMTP_CONNECTION_URL` and `EMAIL_FROM_ADDRESS` environments ([docs](/docs/deployment/self-host#auth-email-password)), you can reset your password by using the "Forgot password" link on the login page.
+**If transactional emails are configured** on your instance via the `SMTP_CONNECTION_URL` and `EMAIL_FROM_ADDRESS` environments ([docs](/self-hosting/transactional-emails)), you can reset your password by using the "Forgot password" link on the login page.
-**If transactional emails are not set up**, you (or the admin of your instance with access to the database) can reset your password by following the steps outlined in the [self-hosting documentation](/docs/deployment/self-host#auth-email-password).
+**If transactional emails are not set up**, you (or the admin of your instance with access to the database) can reset your password by following the steps outlined in the [self-hosting documentation](/self-hosting/authentication-and-sso).
diff --git a/pages/faq/all/langsmith-alternative.mdx b/pages/faq/all/langsmith-alternative.mdx
index 546e77921..e4c0d86dd 100644
--- a/pages/faq/all/langsmith-alternative.mdx
+++ b/pages/faq/all/langsmith-alternative.mdx
@@ -18,7 +18,7 @@ LangSmith and Langfuse are broadly similar products. They provide LLM observabil
- Langfuse is open source while LangSmith is a closed source project.
- LangSmith is developed by the LangChain team and integrates very well with the LangChain framework. While Langfuse also maintains [Langchain integrations](/docs/integrations/langchain/tracing), you may be better of choosing Langsmith if you plan to exclusively develop using the Langchain and Langgraph frameworks.
- Langfuse maintains a [large number of integrations](/docs/integrations/overview) into many frameworks and libraries. Langsmith focuses on its Langchain integration.
-- Langfuse can be [freely self hosted](/docs/deployment/self-host) at no cost while LangSmith needs to be purchased to be self hosted.
+- Langfuse can be [freely self hosted](/self-hosting) at no cost while LangSmith needs to be purchased to be self hosted.
## Is Langfuse related to Langsmith and Langchain?
@@ -34,7 +34,7 @@ Langsmith is a closed source product. It is developed by the Langchain team whic
Langsmith can be self-hosted but it requires [a paid Enterprise License](https://www.langchain.com/pricing) to do so. It is a closed source project.
-Langfuse can be freely [self-hosted](/docs/deployment/self-host) and is open source. Users can self-host Langfuse for in a FOSS (Free and Open Source) version while a paid Enterprise Edition with some additional features is also available.
+Langfuse can be freely [self-hosted](/self-hosting) and is open source. Users can self-host Langfuse for in a FOSS (Free and Open Source) version while a paid Enterprise Edition with some additional features is also available.
## Langfuse and Langsmith Comparisons
diff --git a/pages/faq/all/limit-access-to-internal-users.mdx b/pages/faq/all/limit-access-to-internal-users.mdx
index 0636c65ab..5190a7e0f 100644
--- a/pages/faq/all/limit-access-to-internal-users.mdx
+++ b/pages/faq/all/limit-access-to-internal-users.mdx
@@ -10,5 +10,9 @@ If you expose your self-hosted Langfuse instance to the public internet, you may
**Recommended ways to restrict access:**
1. Disable email/password authentication (`AUTH_DISABLE_USERNAME_PASSWORD=true`)
-2. Use an authentication provider supported by Langfuse, such as Google, GitHub, Okta, Auth0. See [docs](/docs/deployment/self-host#authentication) for details.
+2. Use an authentication provider supported by Langfuse, such as Google, GitHub, Okta, Auth0. See [docs](/self-hosting/authentication-and-sso) for details.
3. Configure your authentication provider to restrict access to only internal users.
+
+**Optional: Restrict organization creation**
+
+You can rely on an allowlist of _Organization Creators_ to restrict who can create new organizations. See [docs](/self-hosting/organization-creators) for details.
diff --git a/pages/faq/all/llm-analytics-101.mdx b/pages/faq/all/llm-analytics-101.mdx
index f39ef15c9..125a143cc 100644
--- a/pages/faq/all/llm-analytics-101.mdx
+++ b/pages/faq/all/llm-analytics-101.mdx
@@ -70,11 +70,11 @@ We've seen successful teams implement the following best practice KPIs by slicin
## Give Langfuse a Spin
-[Langfuse](https://langfuse.com) makes tracing and analyzing LLM applications accessible. It is an open-source project under MIT license.
+[Langfuse](/) makes tracing and analyzing LLM applications accessible. It is an open-source project under MIT license.
-It offers data integration with async SDKs (JS/TS, Python), via API, and Langchain integrations. It provides a UI for debugging complex traces & includes pre-built dashboards to analyze quality, latency and cost. It allows for recording user feedback and using LLM models to grade and score your outputs. To get going, refer to the [quickstart guide](https://langfuse.com/docs/get-started) in the docs.
+It offers data integration with async SDKs (JS/TS, Python), via API, and Langchain integrations. It provides a UI for debugging complex traces & includes pre-built dashboards to analyze quality, latency and cost. It allows for recording user feedback and using LLM models to grade and score your outputs. To get going, refer to the [quickstart guide](/docs/get-started) in the docs.
-Visit us on [Discord](https://langfuse.com/discord) and [Github](https://github.com/langfuse/langfuse/) to engage with our project.
+Visit us on [Discord](/discord) and [Github](https://github.com/langfuse/langfuse/) to engage with our project.
![A trace in Langfuse](https://dev-to-uploads.s3.amazonaws.com/uploads/articles/b3ksrdnl61f7vzaej4t6.png)
-Interested? Sign up to try the demo at [langfuse.com](https://langfuse.com). Self-hosting instructions can be found in [our docs](https://langfuse.com/docs/deployment/self-host).
+Interested? Sign up to try the demo at [langfuse.com](/). Self-hosting instructions can be found in [our docs](/self-hosting).
diff --git a/pages/faq/all/self-hosting-langfuse.mdx b/pages/faq/all/self-hosting-langfuse.mdx
index 26899bf2b..00458be29 100644
--- a/pages/faq/all/self-hosting-langfuse.mdx
+++ b/pages/faq/all/self-hosting-langfuse.mdx
@@ -7,15 +7,15 @@ tags: [product]
## Overview
-Langfuse is an open source project. The core of Langfuse is MIT-licensed and can easily be [self-hosted](/docs/deployment/self-host).
+Langfuse is an open source project. The core of Langfuse is MIT-licensed and can easily be [self-hosted](/self-hosting).
-The self-hosted version of Langfuse is also available as an Enterprise Edition (EE) which is paid and includes access to certain [commercially-licensed features and services](/docs/deployment/feature-overview).
+The self-hosted version of Langfuse is also available as an Enterprise Edition (EE) which is paid and includes access to certain commercially-licensed features and services that require a [license key](/self-hosting/license-key).
-| Langfuse | Cloud | Self Hosted |
-| -------- | ---------------- | ------------- |
-| Free | Hobby Plan | Langfuse FOSS |
-| Paid | Pro & Teams Plan | Langfuse EE |
+| Langfuse | Cloud | Self Hosted |
+| -------- | ---------------- | --------------------- |
+| Free | Hobby Plan | Langfuse FOSS |
+| Paid | Pro & Teams Plan | Pro & Enterprise Plan |
## Learn more about Self-Hosting Langfuse
-[Our self-hosting documentation](/docs/deployment/self-host) is the most up to date resource for instructions and questions around self-hosting Langfuse in a variety of environments via Docker.
+[Our self-hosting documentation](/self-hosting) is the most up to date resource for instructions and questions around self-hosting Langfuse in a variety of environments via Docker.
diff --git a/pages/faq/all/ten-reasons-to-use-langfuse.mdx b/pages/faq/all/ten-reasons-to-use-langfuse.mdx
index dff98daf8..a2dd25268 100644
--- a/pages/faq/all/ten-reasons-to-use-langfuse.mdx
+++ b/pages/faq/all/ten-reasons-to-use-langfuse.mdx
@@ -5,7 +5,7 @@ tags: [product]
# Ten Reasons to Use Langfuse
-Langfuse is a powerful tool designed to enhance LLM observability, evaluations, and prompt management.
+Langfuse is a powerful tool designed to enhance LLM observability, evaluations, and prompt management.
## Ten reasons why you should consider using it:
@@ -17,7 +17,7 @@ Langfuse is a powerful tool designed to enhance LLM observability, evaluations,
4. **Real-time Monitoring and Evals**: Langfuse enables real-time monitoring and [evaluation](/docs/scores/overview) to keep track of your models' performance.
-5. **Flexible Deployment**: Whether [on-premise](https://langfuse.com/docs/deployment/self-host) or in the cloud, Langfuse adapts to your deployment needs.
+5. **Flexible Deployment**: Whether [on-premise](/self-hosting) or in the cloud, Langfuse adapts to your deployment needs.
6. **Scalable**: Built to scale with your projects, Langfuse handles projects of all sizes: from small to enterprise-level needs.
@@ -27,4 +27,4 @@ Langfuse is a powerful tool designed to enhance LLM observability, evaluations,
9. **Easy Integration**: Langfuse is simple to integrate to your existing applications through its large number of [integrations](/docs/integrations).
-10. **Community Support**: You will benefit from Langfuse's supportive [open-source community](/support) and continuous updates.
\ No newline at end of file
+10. **Community Support**: You will benefit from Langfuse's supportive [open-source community](/support) and continuous updates.
diff --git a/pages/guides/cookbook/integration_amazon_bedrock.md b/pages/guides/cookbook/integration_amazon_bedrock.md
index a139f5401..f345d1836 100644
--- a/pages/guides/cookbook/integration_amazon_bedrock.md
+++ b/pages/guides/cookbook/integration_amazon_bedrock.md
@@ -220,4 +220,4 @@ You can define custom price information via the Langfuse dashboard or UI ([see d
## Additional Resources
- Metadocs, [Monitoring your Langchain app's cost using Bedrock with Langfuse](https://www.metadocs.co/2024/07/03/monitor-your-langchain-app-cost-using-bedrock-with-langfuse/), featuring Langchain integration and custom model price definitions for Bedrock models.
-- [Self-hosting guide](https://langfuse.com/docs/deployment/self-host) to deploy Langfuse on AWS.
+- [Self-hosting guide](https://langfuse.com/self-hosting) to deploy Langfuse on AWS.
diff --git a/pages/guides/cookbook/integration_ollama.md b/pages/guides/cookbook/integration_ollama.md
index 2fbaf09e5..ab2e91450 100644
--- a/pages/guides/cookbook/integration_ollama.md
+++ b/pages/guides/cookbook/integration_ollama.md
@@ -20,7 +20,7 @@ Langfuse ([GitHub](https://github.com/langfuse/langfuse)) is an open-source LLM
### Local Deployment of Langfuse
-Of course, you can also locally deploy Langfuse to run models and trace LLM outputs only on your own device. [Here](https://langfuse.com/docs/deployment/local) is a guide on how to run Langfuse on your local machine using Docker Compose. This method is ideal for testing Langfuse and troubleshooting integration issues.
+Of course, you can also locally deploy Langfuse to run models and trace LLM outputs only on your own device. [Here](https://langfuse.com/self-hosting/local) is a guide on how to run Langfuse on your local machine using Docker Compose. This method is ideal for testing Langfuse and troubleshooting integration issues.
For this example, we will use the Langfuse cloud version.
diff --git a/pages/guides/cookbook/python_decorators.md b/pages/guides/cookbook/python_decorators.md
index 367a96293..c109c57c3 100644
--- a/pages/guides/cookbook/python_decorators.md
+++ b/pages/guides/cookbook/python_decorators.md
@@ -18,7 +18,7 @@ Install `langfuse`:
%pip install langfuse
```
-If you haven't done so yet, [sign up to Langfuse](https://cloud.langfuse.com/auth/sign-up) and obtain your API keys from the project settings. You can also [self-host](https://langfuse.com/docs/deployment/self-host) Langfuse.
+If you haven't done so yet, [sign up to Langfuse](https://cloud.langfuse.com/auth/sign-up) and obtain your API keys from the project settings. You can also [self-host](https://langfuse.com/self-hosting) Langfuse.
```python
diff --git a/pages/jp.mdx b/pages/jp.mdx
index 758d52e6e..a1549fb5e 100644
--- a/pages/jp.mdx
+++ b/pages/jp.mdx
@@ -1,122 +1,121 @@
---
title: "Langfuse - 日本語概要"
description: "Langfuseは最も人気のあるオープンソースのLLMOpsプラットフォームです。チームが協力してAIアプリケーションを開発、監視、評価、デバッグするのを支援します。"
-hreflang: [
- { lang: "en", url: "https://langfuse.com" },
- { lang: "ja", url: "https://langfuse.com/jp" }
-]
+hreflang:
+ [
+ { lang: "en", url: "https://langfuse.com" },
+ { lang: "ja", url: "https://langfuse.com/jp" },
+ ]
---
-# **Langfuse \- オープンソースLLMOpsプラットフォーム \- 日本語概要**
+# **Langfuse \- オープンソース LLMOps プラットフォーム \- 日本語概要**
-注:Langfuseは、日本のユーザーとお客様に感謝し、母国語でLangfuseをわかりやすく紹介したいと考えています。
-その他のLangfuse のウェブサイトリソースはすべて英語のみで提供されていますが、Google Chromeの内蔵トランスレーターなどのツールを使用することで、他の言語でもご利用いただけます。
+注:Langfuse は、日本のユーザーとお客様に感謝し、母国語で Langfuse をわかりやすく紹介したいと考えています。
+その他の Langfuse のウェブサイトリソースはすべて英語のみで提供されていますが、Google Chrome の内蔵トランスレーターなどのツールを使用することで、他の言語でもご利用いただけます。
-## **🇯🇵 🤝 🪢**
+## **🇯🇵 🤝 🪢**
Langfuse ユーザーのコミュニティが日本で大きく成長していることを嬉しく思います。皆様のサポートとフィードバックは私たちにとってかけがえのないものです。皆様からのご連絡をお待ちしております。いつでもお気軽にご連絡ください。
-Langfuseをご利用いただき、私たちの製品とコミュニティをご支援いただき、ありがとうございます。
+Langfuse をご利用いただき、私たちの製品とコミュニティをご支援いただき、ありがとうございます。
## **Langfuse とは?**
-
-![Langfuse Trace](/images/docs/japanese-trace-example.png)
-
+![Langfuse Trace](/images/docs/japanese-trace-example.png)
-Langfuseは、大規模言語モデル(LLM)を利用したアプリケーションのために特別に設計された、**オープンソース**の観測・分析プラットフォームです。私たちは、開発者や組織がLLMアプリケーションの構築や改善を支援することを目的としており、高度なトレース&分析モジュールを通じて、モデルのコスト、品質、レイテンシに関する深い洞察を提供します。
+Langfuse は、大規模言語モデル(LLM)を利用したアプリケーションのために特別に設計された、**オープンソース**の観測・分析プラットフォームです。私たちは、開発者や組織が LLM アプリケーションの構築や改善を支援することを目的としており、高度なトレース&分析モジュールを通じて、モデルのコスト、品質、レイテンシに関する深い洞察を提供します。
-[*公開デモ*](/docs/demo)*のトレース例を見る*
+[_公開デモ_](/docs/demo)_のトレース例を見る_
-### **Langfuseを選ぶ理由**
+### **Langfuse を選ぶ理由**
-Langfuseは、市場で[**最も人気のあるオープンソースのLLMOpsツール**](/blog/2024-11-most-used-oss-llmops)であり、最新のフレームワークとの統合を構築・維持する大規模なコミュニティを持っています。
+Langfuse は、市場で[**最も人気のあるオープンソースの LLMOps ツール**](/blog/2024-11-most-used-oss-llmops)であり、最新のフレームワークとの統合を構築・維持する大規模なコミュニティを持っています。
-Langfuseは[**セルフホストが**](/docs/deployment/feature-overview)**簡単**で、数分でセットアップが完了します。これは、規制業界の企業顧客にとっても特に興味深いものです。
+Langfuse は[**セルフホストが**](/pricing-self-host)**簡単**で、数分でセットアップが完了します。これは、規制業界の企業顧客にとっても特に興味深いものです。
-Langfuseは**クラス最高のトレースを**提供し、お客様の製品開発と改良を支援します。
+Langfuse は**クラス最高のトレースを**提供し、お客様の製品開発と改良を支援します。
-**注**:Langfuseを選ぶべき理由については、[こちら](/why)をお読みください。
+**注**:Langfuse を選ぶべき理由については、[こちら](/why)をお読みください。
## **主な特徴**
-Langfuse は、生成AIプロダクトの全サイクルをサポートする一連の機能を提供します:開発からテスト、大規模なモニタリングやデバッグまで、お客様の生成AIプロダクトの全サイクルをサポートします。
+Langfuse は、生成 AI プロダクトの全サイクルをサポートする一連の機能を提供します:開発からテスト、大規模なモニタリングやデバッグまで、お客様の生成 AI プロダクトの全サイクルをサポートします。
### **モニタリング**
-* **トレース**:外部APIやツールの呼び出し、コンテキスト、プロンプトなど、製品の[完全なコンテキスト](/docs/tracing)をキャプチャします。
-* **リアルタイム・メトリクス**:応答時間、エラー率、スループットなどの主要なパフォーマンス[指標](/docs/analytics/overview)を監視します。
-* **フィードバック**:アプリケーションのパフォーマンスとユーザーエクスペリエンスを向上させるために、[ユーザーからのフィードバック](/docs/scores/user-feedback)を収集します。
+- **トレース**:外部 API やツールの呼び出し、コンテキスト、プロンプトなど、製品の[完全なコンテキスト](/docs/tracing)をキャプチャします。
+- **リアルタイム・メトリクス**:応答時間、エラー率、スループットなどの主要なパフォーマンス[指標](/docs/analytics/overview)を監視します。
+- **フィードバック**:アプリケーションのパフォーマンスとユーザーエクスペリエンスを向上させるために、[ユーザーからのフィードバック](/docs/scores/user-feedback)を収集します。
### **アナリティクス**
-* **評価**:[LLM-as-a-judge](/docs/scores/model-based-evals)による評価、または人的なアノテーションワークフローを設定することにより、異なるモデル、プロンプト、構成のパフォーマンスを比較を可能にします。
-* **テスト**:テストと[プロンプト管理](/docs/prompts/get-started)を通じて、最も効果的なソリューションを決定するために、アプリの異なるバージョン(A/B)で実験します。
-* **ユーザーの行動**:[ユーザーフィードバック](/docs/scores/user-feedback)により、AIアプリケーションをどのように操作するかを理解します。
+- **評価**:[LLM-as-a-judge](/docs/scores/model-based-evals)による評価、または人的なアノテーションワークフローを設定することにより、異なるモデル、プロンプト、構成のパフォーマンスを比較を可能にします。
+- **テスト**:テストと[プロンプト管理](/docs/prompts/get-started)を通じて、最も効果的なソリューションを決定するために、アプリの異なるバージョン(A/B)で実験します。
+- **ユーザーの行動**:[ユーザーフィードバック](/docs/scores/user-feedback)により、AI アプリケーションをどのように操作するかを理解します。
### **デバッグ**
-* **詳細なデバッグログ**:トラブルシューティングのために、すべてのアプリケーション・アクティビティの包括的なログにアクセスできます。
-* **エラートラッキング**:アプリケーション内のエラーや異常を検出し、追跡します。
+- **詳細なデバッグログ**:トラブルシューティングのために、すべてのアプリケーション・アクティビティの包括的なログにアクセスできます。
+- **エラートラッキング**:アプリケーション内のエラーや異常を検出し、追跡します。
### **統合**
-* **フレームワークのサポート**:[LangChain](/docs/integrations/langchain/tracing)、[LlamaIndex](/docs/integrations/llama-index/get-started)、[AWS Bedrockなど](/docs/integrations/amazon-bedrock) の一般的なLLMフレームワークと統合。
-* **ツールのサポート**:Difyや[Langflow](/docs/integrations/langflow)のようなノーコードビルダーと統合できます。
-* **API**:カスタム統合やワークフロー自動化のためのオープンで強力な[API](https://api.reference.langfuse.com/#get-/api/public/comments)をご利用ください。
+- **フレームワークのサポート**:[LangChain](/docs/integrations/langchain/tracing)、[LlamaIndex](/docs/integrations/llama-index/get-started)、[AWS Bedrock など](/docs/integrations/amazon-bedrock) の一般的な LLM フレームワークと統合。
+- **ツールのサポート**:Dify や[Langflow](/docs/integrations/langflow)のようなノーコードビルダーと統合できます。
+- **API**:カスタム統合やワークフロー自動化のためのオープンで強力な[API](https://api.reference.langfuse.com/#get-/api/public/comments)をご利用ください。
-## **Langfuseを使い始める**
+## **Langfuse を使い始める**
-Langfuseのご利用は簡単です:
+Langfuse のご利用は簡単です:
### **サインアップ**
-[Langfuse Cloud](https://cloud.langfuse.com/) にアクセスして無料アカウントを作成するか、数分でLangfuseを[セルフホスト](/docs/deployment/feature-overview)してください。新しいプロジェクトを作成し、Langfuse APIキーを取得してデータの取り込みを開始します。
+[Langfuse Cloud](https://cloud.langfuse.com/) にアクセスして無料アカウントを作成するか、数分で Langfuse を[セルフホスト](/pricing-self-host)してください。新しいプロジェクトを作成し、Langfuse API キーを取得してデータの取り込みを開始します。
### **ドキュメンテーションを調べる**
-OpenAIや[Langchainなど](/docs/integrations/langchain/tracing)のネイティブインテグレーションとLangfuseのセットアップガイド、またはAPIや[ローレベルSDK](/docs/sdk/python/low-level-sdk)を使用して任意のモデルをトレースするためのガイドについては、当社の[ドキュメント](https://docs.langfuse.com/)にアクセスしてください。
+OpenAI や[Langchain など](/docs/integrations/langchain/tracing)のネイティブインテグレーションと Langfuse のセットアップガイド、または API や[ローレベル SDK](/docs/sdk/python/low-level-sdk)を使用して任意のモデルをトレースするためのガイドについては、当社の[ドキュメント](https://docs.langfuse.com/)にアクセスしてください。
### **アプリケーションと統合する:**
-Langfuseは[インテグレーションやSDKを通して](/docs/integrations/overview)統合できます。LangfuseはDifyや[Langflow](/docs/integrations/langflow)のようなノーコードLLMビルダーとも統合できます。
+Langfuse は[インテグレーションや SDK を通して](/docs/integrations/overview)統合できます。Langfuse は Dify や[Langflow](/docs/integrations/langflow)のようなノーコード LLM ビルダーとも統合できます。
### **モニタリングを開始する:**
-データの収集、パフォーマンスの監視を開始し、貴重な洞察を得ましょう。Langfuseはアプリケーションの動作を[トレース](/docs/tracing)として表示します。Langfuseの[ダッシュボード](/docs/analytics/overview)では、モデルコスト、評価スコア、ユーザフィードバックなどの概要を見ることができます。
+データの収集、パフォーマンスの監視を開始し、貴重な洞察を得ましょう。Langfuse はアプリケーションの動作を[トレース](/docs/tracing)として表示します。Langfuse の[ダッシュボード](/docs/analytics/overview)では、モデルコスト、評価スコア、ユーザフィードバックなどの概要を見ることができます。
## **日本語の外部リソース**:
-* [書籍:俺たちと探究するLLMアプリケーションのオブザーバビリティ](https://techbookfest.org/product/mn0L7GEm3s8Vhmxq971HEi?productVariantID=myG2YLxFNAEVkRf2dipG8f)
-* [Blog記事: 生成AIアプリの出力をRagasで評価して、LangfuseでGUI監視しよう!](https://qiita.com/minorun365/items/70ad2f5a0afaac6e5cb9)
-* [Blog記事: Google CloudでLLMアプリ監視ツールLangfuseをセルフホスティングする方法](https://zenn.dev/cloud_ace/articles/2a3668221e9c90)
+- [書籍:俺たちと探究する LLM アプリケーションのオブザーバビリティ](https://techbookfest.org/product/mn0L7GEm3s8Vhmxq971HEi?productVariantID=myG2YLxFNAEVkRf2dipG8f)
+- [Blog 記事: 生成 AI アプリの出力を Ragas で評価して、Langfuse で GUI 監視しよう!](https://qiita.com/minorun365/items/70ad2f5a0afaac6e5cb9)
+- [Blog 記事: Google Cloud で LLM アプリ監視ツール Langfuse をセルフホスティングする方法](https://zenn.dev/cloud_ace/articles/2a3668221e9c90)
-*リソースを追加したい場合は、[プルリクエストを上げて](https://github.com/langfuse/langfuse-docs)ください。*
+_リソースを追加したい場合は、[プルリクエストを上げて](https://github.com/langfuse/langfuse-docs)ください。_
## **カスタマーサポート**
-私たちは、お客様のご希望の言語でサポートを提供することの重要性を理解しています。しかしながら、私たちは小さなチームですので、英語でのサポートしか提供することができません。
+私たちは、お客様のご希望の言語でサポートを提供することの重要性を理解しています。しかしながら、私たちは小さなチームですので、英語でのサポートしか提供することができません。
-* [チャットボット](/docs/ask-ai)(日本語を話す)
-* **GitHub :** [github.com/langfuse/discussions](https://github.com/orgs/langfuse/discussions) \- GitHubの公開ディスカッションボード(英語)で質問してください。
-* **Eメール :** [support@langfuse.com](mailto:support@langfuse.com) までご連絡ください。
-* **企業向けサポート:** [ガオ株式会社(GAO,inc.)](https://gao-ai.com)を通じて、Langfuse Enterpriseプランを日本円で購入し、日本語でサポートを受けることが可能です。ご興味のある方は [contact@gao-ai.com](mailto:contact@gao-ai.com) までご連絡ください。
-* [**Twitter**](https://x.com/LangfuseJP) (Langfuseチームによって保守されていない)
+- [チャットボット](/docs/ask-ai)(日本語を話す)
+- **GitHub :** [github.com/langfuse/discussions](https://github.com/orgs/langfuse/discussions) \- GitHub の公開ディスカッションボード(英語)で質問してください。
+- **E メール :** [support@langfuse.com](mailto:support@langfuse.com) までご連絡ください。
+- **企業向けサポート:** [ガオ株式会社(GAO,inc.)](https://gao-ai.com)を通じて、Langfuse Enterprise プランを日本円で購入し、日本語でサポートを受けることが可能です。ご興味のある方は [contact@gao-ai.com](mailto:contact@gao-ai.com) までご連絡ください。
+- [**Twitter**](https://x.com/LangfuseJP) (Langfuse チームによって保守されていない)
-## **Langfuseコミュニティに参加**
+## **Langfuse コミュニティに参加**
成長し続けるグローバル・コミュニティの一員になりませんか:
-* ⭐️ [GitHub](https://github.com/langfuse/langfuse)でスターをつけると、アップデートが届く。
-* 🤷♂️ 連絡を取り合う:ソーシャルメディアで最新情報をご覧ください。
- * [Twitter](https://x.com/langfuse)
- * [Discord](https://discord.langfuse.com/)
- * [LinkedIn](https://www.linkedin.com/company/langfuse)
-* 🖼️ [ラングフューズのステッカーを受け取る](https://langfuse.com/stickers)
\ No newline at end of file
+- ⭐️ [GitHub](https://github.com/langfuse/langfuse)でスターをつけると、アップデートが届く。
+- 🤷♂️ 連絡を取り合う:ソーシャルメディアで最新情報をご覧ください。
+ - [Twitter](https://x.com/langfuse)
+ - [Discord](https://discord.langfuse.com/)
+ - [LinkedIn](https://www.linkedin.com/company/langfuse)
+- 🖼️ [ラングフューズのステッカーを受け取る](https://langfuse.com/stickers)
diff --git a/pages/kr.mdx b/pages/kr.mdx
index 5ba37354e..a7e5d512a 100644
--- a/pages/kr.mdx
+++ b/pages/kr.mdx
@@ -1,23 +1,24 @@
---
title: "Langfuse - 한국어 개요"
description: "Langfuse는 가장 인기 있는 오픈 소스 LLMOps 플랫폼입니다. 팀이 협력하여 AI 애플리케이션을 개발, 모니터링, 평가 및 디버깅할 수 있도록 도와줍니다."
-hreflang: [
- { lang: "en", url: "https://langfuse.com" },
- { lang: "ko", url: "https://langfuse.com/kr" }
-]
+hreflang:
+ [
+ { lang: "en", url: "https://langfuse.com" },
+ { lang: "ko", url: "https://langfuse.com/kr" },
+ ]
---
# **Langfuse \- 오픈 소스 LLMOps 플랫폼 \- 한국어 개요**
-이 페이지는 기계에 의해 번역되었습니다. 한국 사용자 및 고객 여러분께 감사드리며, 여러분의 모국어로 Langfuse를 쉽게 소개하고자 합니다.
+이 페이지는 기계에 의해 번역되었습니다. 한국 사용자 및 고객 여러분께 감사드리며, 여러분의 모국어로 Langfuse를 쉽게 소개하고자 합니다.
- 다른 모든 Langfuse 웹사이트 리소스는 영어로만 제공되지만, 구글 크롬에 내장된 번역기 등의 도구를 통해 다른 언어로 이용할 수 있다는 점을 양해해 주세요.
+다른 모든 Langfuse 웹사이트 리소스는 영어로만 제공되지만, 구글 크롬에 내장된 번역기 등의 도구를 통해 다른 언어로 이용할 수 있다는 점을 양해해 주세요.
-## **🇰🇷 🤝 🪢**
+## **🇰🇷 🤝 🪢**
한국에 대규모로 성장하고 있는 Langfuse 사용자 커뮤니티를 보유하게 되어 기쁘게 생각합니다. 여러분의 지원과 피드백은 저희에게 매우 소중합니다. 여러분과 함께 일하고 여러분의 의견을 기다리겠습니다. 언제든 언제든지 문의해 주세요. 모든 제출물을 읽고 답변해 드리겠습니다.
Langfuse를 사용해 주시고 저희 제품과 커뮤니티를 응원해 주셔서 감사합니다.
@@ -25,18 +26,18 @@ Langfuse를 사용해 주시고 저희 제품과 커뮤니티를 응원해 주
## **랭퓨즈란 무엇인가요?**
-![랭퓨즈란 무엇인가요?](/images/docs/korean-example-trace.png)
+ ![랭퓨즈란 무엇인가요?](/images/docs/korean-example-trace.png)
Langfuse는 대규모 언어 모델(LLM)로 구동되는 애플리케이션을 위해 특별히 설계된 **오픈 소스** 통합 가시성 및 분석 플랫폼입니다. 개발자와 조직이 LLM 애플리케이션을 구축하고 개선할 수 있도록 돕는 것이 우리의 사명입니다. 이를 위해 고급 추적 및 분석 모듈을 통해 모델 비용, 품질 및 지연 시간에 대한 심층적인 인사이트를 제공합니다.
-[*공개 데모에서*](/docs/demo) *추적 예시를 확인하세요.*
+[_공개 데모에서_](/docs/demo) _추적 예시를 확인하세요._
### **왜 랭퓨즈를 선택해야 할까요?**
-Langfuse는 시장에서 [**가장 인기 있는 오픈소스 LLMOps 도구로**](/blog/2024-11-most-used-oss-llmops), 상당한 규모의 커뮤니티를 구축하고 최신 프레임워크와의 통합을 유지하고 있습니다.
+Langfuse는 시장에서 [**가장 인기 있는 오픈소스 LLMOps 도구로**](/blog/2024-11-most-used-oss-llmops), 상당한 규모의 커뮤니티를 구축하고 최신 프레임워크와의 통합을 유지하고 있습니다.
-Langfuse는 [**셀프 호스팅이**](/docs/deployment/feature-overview) **쉬우며** 몇 분 안에 설정할 수 있습니다. 이는 특히 규제를 받는 산업에 종사하는 기업 고객에게 유용합니다.
+Langfuse는 [**셀프 호스팅이**](/pricing-self-host) **쉬우며** 몇 분 안에 설정할 수 있습니다. 이는 특히 규제를 받는 산업에 종사하는 기업 고객에게 유용합니다.
Langfuse는 제품을 개발하고 개선하는 데 도움이 되는 **동급 최고의 추적 기능을** 제공합니다.
@@ -48,26 +49,26 @@ Langfuse는 AI 제품의 전체 주기를 지원하는 일련의 기능을 제
### **모니터링**
-* **추적**: 외부 API 또는 도구 호출, 컨텍스트, 프롬프트 등을 포함하여 제품의 [전체 컨텍스트를](/docs/tracing) 캡처하세요.
-* **실시간 지표**: 응답 시간, 오류율, 처리량과 같은 주요 성과 [지표를](/docs/analytics/overview) 모니터링하세요.
-* **피드백**: [사용자 피드백을](/docs/scores/user-feedback) 수집하여 애플리케이션의 성능과 사용자 경험을 개선하세요.
+- **추적**: 외부 API 또는 도구 호출, 컨텍스트, 프롬프트 등을 포함하여 제품의 [전체 컨텍스트를](/docs/tracing) 캡처하세요.
+- **실시간 지표**: 응답 시간, 오류율, 처리량과 같은 주요 성과 [지표를](/docs/analytics/overview) 모니터링하세요.
+- **피드백**: [사용자 피드백을](/docs/scores/user-feedback) 수집하여 애플리케이션의 성능과 사용자 경험을 개선하세요.
### **분석**
-* **평가**: [LLM-as-a-Judge](/docs/scores/model-based-evals) 평가 또는 [인간 주석](/docs/scores/annotation) 워크플로를 설정하여 다양한 모델, 프롬프트 및 구성의 성능을 비교하세요.
-* **테스트**: 다양한 버전의 앱(A/B)으로 실험하여 [테스트](/docs/datasets/overview) 및 [신속한 관리를](/docs/prompts/get-started) 통해 가장 효과적인 솔루션을 결정하세요.
-* **사용자 행동**: [사용자가](/docs/scores/user-feedback) AI 애플리케이션과 상호 작용하는 방식을 이해합니다.
+- **평가**: [LLM-as-a-Judge](/docs/scores/model-based-evals) 평가 또는 [인간 주석](/docs/scores/annotation) 워크플로를 설정하여 다양한 모델, 프롬프트 및 구성의 성능을 비교하세요.
+- **테스트**: 다양한 버전의 앱(A/B)으로 실험하여 [테스트](/docs/datasets/overview) 및 [신속한 관리를](/docs/prompts/get-started) 통해 가장 효과적인 솔루션을 결정하세요.
+- **사용자 행동**: [사용자가](/docs/scores/user-feedback) AI 애플리케이션과 상호 작용하는 방식을 이해합니다.
### **디버깅**
-* **자세한 디버그 로그**: 문제 해결을 위해 모든 애플리케이션 활동에 대한 포괄적인 로그에 액세스하세요.
-* **오류 추적**: 애플리케이션 내에서 오류와 이상 징후를 감지하고 추적하세요.
+- **자세한 디버그 로그**: 문제 해결을 위해 모든 애플리케이션 활동에 대한 포괄적인 로그에 액세스하세요.
+- **오류 추적**: 애플리케이션 내에서 오류와 이상 징후를 감지하고 추적하세요.
### **통합**
-* **프레임워크 지원**: [LangChain](/docs/integrations/langchain/tracing), [LlamaIndex](/docs/integrations/llama-index/get-started), [AWS Bedrock과](/docs/integrations/amazon-bedrock) 같은 인기 있는 LLM 프레임워크와 통합하세요.
-* **도구 지원**: [Dify](/docs/integrations/dify) 또는 [Langflow와](/docs/integrations/langflow) 같은 노코드 빌더와 통합하세요.
-* **API**: 사용자 지정 통합 및 워크플로 자동화를 위한 개방적이고 강력한 [API를](https://api.reference.langfuse.com/#get-/api/public/comments) 활용하세요.
+- **프레임워크 지원**: [LangChain](/docs/integrations/langchain/tracing), [LlamaIndex](/docs/integrations/llama-index/get-started), [AWS Bedrock과](/docs/integrations/amazon-bedrock) 같은 인기 있는 LLM 프레임워크와 통합하세요.
+- **도구 지원**: [Dify](/docs/integrations/dify) 또는 [Langflow와](/docs/integrations/langflow) 같은 노코드 빌더와 통합하세요.
+- **API**: 사용자 지정 통합 및 워크플로 자동화를 위한 개방적이고 강력한 [API를](https://api.reference.langfuse.com/#get-/api/public/comments) 활용하세요.
## **Langfuse 시작하기**
@@ -77,7 +78,7 @@ Langfuse와 함께 여정을 시작하는 방법은 간단합니다:
### **가입하세요:**
-[Langfuse Cloud를](https://cloud.langfuse.com/) 방문하여 무료 계정을 만들거나 몇 분 안에 Langfuse를 [셀프 호스팅하세요](/docs/deployment/feature-overview). 새 프로젝트를 생성하고 Langfuse API 키를 받아 데이터 수집을 시작하세요.
+[Langfuse Cloud를](https://cloud.langfuse.com/) 방문하여 무료 계정을 만들거나 몇 분 안에 Langfuse를 [셀프 호스팅하세요](/pricing-self-host). 새 프로젝트를 생성하고 Langfuse API 키를 받아 데이터 수집을 시작하세요.
### **문서 살펴보기:**
@@ -97,24 +98,24 @@ Langfuse와 함께 여정을 시작하는 방법은 간단합니다:
- [LLM 앱 디버깅 툴, Langfuse를 Amazon ECS에 배포하는 방법](https://www.youtube.com/watch?v=rrPQcWq5pe8)
-*리소스를 추가하려면 [풀](https://github.com/langfuse/langfuse-docs) 리퀘스트를 올려주세요.*
+_리소스를 추가하려면 [풀](https://github.com/langfuse/langfuse-docs) 리퀘스트를 올려주세요._
## **고객 지원**
-고객이 선호하는 언어로 지원을 제공하는 것이 중요하다는 것을 잘 알고 있습니다. 하지만 소규모 팀인 저희는 영어로만 지원을 제공할 수 있습니다.
+고객이 선호하는 언어로 지원을 제공하는 것이 중요하다는 것을 잘 알고 있습니다. 하지만 소규모 팀인 저희는 영어로만 지원을 제공할 수 있습니다.
-* [챗봇(한국어 말하기)](/docs/ask-ai)
-* **GitHub:** [github.com/langfuse/discussions](https://github.com/orgs/langfuse/discussions) \- 공개 GitHub 토론 게시판에서 질문하세요(영문).
-* **이메일:** [support@langfuse.com](mailto:support@langfuse.com) 으로 문의하세요.
-* **기업 지원:** 관심이 있으시면 [enterprise@langfuse.com](mailto:enterprise@langfuse.com) 으로 문의해 주세요.
+- [챗봇(한국어 말하기)](/docs/ask-ai)
+- **GitHub:** [github.com/langfuse/discussions](https://github.com/orgs/langfuse/discussions) \- 공개 GitHub 토론 게시판에서 질문하세요(영문).
+- **이메일:** [support@langfuse.com](mailto:support@langfuse.com) 으로 문의하세요.
+- **기업 지원:** 관심이 있으시면 [enterprise@langfuse.com](mailto:enterprise@langfuse.com) 으로 문의해 주세요.
## **랭퓨즈 커뮤니티 가입하기**
성장하는 글로벌 커뮤니티의 일원이 되어보세요:
-* ⭐️ [GitHub에](https://github.com/langfuse/langfuse) 별표를 표시하여 업데이트를 받아보세요.
-* 🤷♂️ 연결 상태 유지: 소셜 미디어를 팔로우하여 최신 소식을 받아보세요.
- * [트위터](https://x.com/langfuse)
- * [디스코드](https://discord.langfuse.com/)
- * [LinkedIn](https://www.linkedin.com/company/langfuse)
-* 🖼️ [랑퓨즈 스티커 받기](https://langfuse.com/stickers)
\ No newline at end of file
+- ⭐️ [GitHub에](https://github.com/langfuse/langfuse) 별표를 표시하여 업데이트를 받아보세요.
+- 🤷♂️ 연결 상태 유지: 소셜 미디어를 팔로우하여 최신 소식을 받아보세요.
+ - [트위터](https://x.com/langfuse)
+ - [디스코드](https://discord.langfuse.com/)
+ - [LinkedIn](https://www.linkedin.com/company/langfuse)
+- 🖼️ [랑퓨즈 스티커 받기](https://langfuse.com/stickers)
diff --git a/pages/self-hosting/_meta.tsx b/pages/self-hosting/_meta.tsx
new file mode 100644
index 000000000..484e80732
--- /dev/null
+++ b/pages/self-hosting/_meta.tsx
@@ -0,0 +1,64 @@
+import { MenuSwitcher } from "@/components/MenuSwitcher";
+
+export default {
+ "-- Switcher": {
+ type: "separator",
+ title: ,
+ },
+ index: "Overview",
+ configuration: "Configuration",
+ "license-key": "License Key",
+ troubleshooting: "Troubleshooting",
+ "-- Deployment": {
+ type: "separator",
+ title: "Deployment",
+ },
+ local: "Local Machine",
+ "docker-compose": "VM (Docker Compose)",
+ docker: "Docker",
+ "kubernetes-helm": "Kubernetes (Helm)",
+ railway: "Railway",
+ infrastructure: "Infrastructure",
+ "-- Security": {
+ type: "separator",
+ title: "Security",
+ },
+ "authentication-and-sso": "Authentication and SSO",
+ "data-masking": {
+ title: "Data Masking ↗ (main docs)",
+ href: "/docs/tracing-features/masking",
+ newWindow: true,
+ },
+ encryption: "Encryption",
+ "deployment-strategies": "Deployment Strategies",
+ rbac: {
+ title: "RBAC ↗ (main docs)",
+ href: "/docs/rbac",
+ newWindow: true,
+ },
+ networking: "Networking",
+ "-- Features": {
+ type: "separator",
+ title: "Features",
+ },
+ "automated-access-provisioning": "Automated Access Provisioning",
+ "custom-base-path": "Custom Base Path",
+ "headless-initialization": "Headless Initialization",
+ "organization-creators": "Organization Creators (EE)",
+ "transactional-emails": "Transactional Emails",
+ "ui-customization": "UI Customization (EE)",
+ "-- Upgrade": {
+ type: "separator",
+ title: "Upgrade",
+ },
+ upgrade: "How to Upgrade",
+ versioning: "Versioning",
+ "background-migrations": "Background Migrations",
+ "upgrade-guides": "Upgrade Guides",
+ "release-notes": "Release Notes",
+ "-- Former Versions": {
+ type: "separator",
+ title: "Former Versions",
+ },
+ v2: "v2",
+};
diff --git a/pages/self-hosting/authentication-and-sso.mdx b/pages/self-hosting/authentication-and-sso.mdx
new file mode 100644
index 000000000..39d053a81
--- /dev/null
+++ b/pages/self-hosting/authentication-and-sso.mdx
@@ -0,0 +1,152 @@
+---
+title: Authentication and SSO (self-hosted)
+description: Langfuse supports both email/password and SSO authentication. Follow this guide to configure authentication for your self-hosted Langfuse deployment.
+label: "Version: v3"
+---
+
+# Authentication and SSO
+
+Langfuse supports both email/password and SSO authentication.
+
+## Email/Password [#auth-email-password]
+
+Email/password authentication is enabled by default. Users can sign up and log in using their email and password.
+
+### Password Reset
+
+**If [transactional emails](/self-hosting/transactional-emails) are configured** on your instance, users can reset their password by using the "Forgot password" link on the login page.
+
+**If transactional emails are not set up**, passwords can be reset by following these steps:
+
+ 1. Update the email associated with your user account in database, such as by adding a prefix.
+ 2. You can then sign up again with a new password.
+ 3. Reassign any organizations you were associated with via the `organization_memberships` table in database.
+ 4. Finally, remove the old user account from the `users` table in database.
+
+### Disable email/password authentication to use SSO
+
+To disable email/password authentication, set `AUTH_DISABLE_USERNAME_PASSWORD=true`. In this case, you need to set up [SSO](#sso) instead.
+
+If you decide to switch from email/password to SSO on a running instance, you can enable `*_ALLOW_ACCOUNT_LINKING=true` on the SSO provider. This will automatically merge accounts with the same email address.
+
+### Creation of default user
+
+If you want to programmatically create a default user, check out the [Headless Initialization](/self-hosting/headless-initialization) documentation. This is useful if you want to initialize the instance without using the UI, e.g. when running Langfuse in a CI/CD pipeline or programmatically deploying Langfuse into many environments.
+
+## SSO
+
+To enable OAuth/SSO provider sign-in for Langfuse, configure the required environment variables for the provider.
+
+Use `*_ALLOW_ACCOUNT_LINKING` to allow merging accounts with the same email address. This is useful when users sign in with different providers or email/password but have the same email address. You need to be careful with this setting as it can lead to security issues if the emails are not verified.
+
+Need another provider? Langfuse uses Auth.js, which integrates with [many providers](https://next-auth.js.org/providers/). Add a [feature request on GitHub](/ideas) if you want us to add support for a specific provider.
+
+### Google
+
+[NextAuth Google Provider Docs](https://next-auth.js.org/providers/google)
+
+| Configuration | Value |
+| ------------------ | --------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- |
+| Required Variables | `AUTH_GOOGLE_CLIENT_ID` `AUTH_GOOGLE_CLIENT_SECRET` |
+| Optional Variables | `AUTH_GOOGLE_ALLOW_ACCOUNT_LINKING=true` `AUTH_GOOGLE_ALLOWED_DOMAINS=langfuse.com,google.com` (list of allowed domains based on [`hd` OAuth claim](https://developers.google.com/identity/openid-connect/openid-connect#an-id-tokens-payload)) |
+| OAuth Redirect URL | `/api/auth/callback/google` |
+
+### GitHub
+
+[NextAuth GitHub Provider Docs](https://next-auth.js.org/providers/github)
+
+| Configuration | Value |
+| ------------------ | ------------------------------------------------------- |
+| Required Variables | `AUTH_GITHUB_CLIENT_ID` `AUTH_GITHUB_CLIENT_SECRET` |
+| Optional Variables | `AUTH_GITHUB_ALLOW_ACCOUNT_LINKING=true` |
+| OAuth Redirect URL | `/api/auth/callback/github` |
+
+### GitHub Enterprise
+
+| Configuration | Value |
+| ------------------ | ------------------------------------------------------------------------------------------------------------------- |
+| Required Variables | `AUTH_GITHUB_ENTERPRISE_CLIENT_ID` `AUTH_GITHUB_ENTERPRISE_CLIENT_SECRET` `AUTH_GITHUB_ENTERPRISE_BASE_URL` |
+| Optional Variables | `AUTH_GITHUB_ENTERPRISE_ALLOW_ACCOUNT_LINKING=false` |
+| OAuth Redirect URL | `/api/auth/callback/github-enterprise` |
+
+Thanks to [@jay0129](https://github.com/jay0129) for the initial contribution of GitHub Enterprise support!
+
+### GitLab
+
+[NextAuth GitLab Provider Docs](https://next-auth.js.org/providers/gitlab)
+
+| Configuration | Value |
+| ------------------ | ----------------------------------------------------------------- |
+| Required Variables | `AUTH_GITLAB_CLIENT_ID` `AUTH_GITLAB_CLIENT_SECRET` |
+| Optional Variables | `AUTH_GITLAB_ISSUER` `AUTH_GITLAB_ALLOW_ACCOUNT_LINKING=true` |
+| OAuth Redirect URL | `/api/auth/callback/gitlab` |
+
+### Azure AD/Entra ID
+
+[NextAuth Azure AD Provider Docs](https://next-auth.js.org/providers/azure-ad)
+
+| Configuration | Value |
+| ------------------ | ----------------------------------------------------------------------------------------- |
+| Required Variables | `AUTH_AZURE_AD_CLIENT_ID` `AUTH_AZURE_AD_CLIENT_SECRET` `AUTH_AZURE_AD_TENANT_ID` |
+| Optional Variables | `AUTH_AZURE_ALLOW_ACCOUNT_LINKING=true` |
+| OAuth Redirect URL | `/api/auth/callback/azure-ad` |
+
+### Okta
+
+[NextAuth Okta Provider Docs](https://next-auth.js.org/providers/okta)
+
+| Configuration | Value |
+| ------------------ | -------------------------------------------------------------------------- |
+| Required Variables | `AUTH_OKTA_CLIENT_ID` `AUTH_OKTA_CLIENT_SECRET` `AUTH_OKTA_ISSUER` |
+| Optional Variables | `AUTH_OKTA_ALLOW_ACCOUNT_LINKING=true` |
+| OAuth Redirect URL | `/api/auth/callback/okta` |
+
+### Auth0
+
+[NextAuth Auth0 Provider Docs](https://next-auth.js.org/providers/auth0)
+
+| Configuration | Value |
+| ------------------ | ----------------------------------------------------------------------------- |
+| Required Variables | `AUTH_AUTH0_CLIENT_ID` `AUTH_AUTH0_CLIENT_SECRET` `AUTH_AUTH0_ISSUER` |
+| Optional Variables | `AUTH_AUTH0_ALLOW_ACCOUNT_LINKING=true` |
+| OAuth Redirect URL | `/api/auth/callback/auth0` |
+
+### AWS Cognito
+
+[NextAuth Cognito Provider Docs](https://next-auth.js.org/providers/cognito)
+
+| Configuration | Value |
+| ------------------ | ----------------------------------------------------------------------------------- |
+| Required Variables | `AUTH_COGNITO_CLIENT_ID` `AUTH_COGNITO_CLIENT_SECRET` `AUTH_COGNITO_ISSUER` |
+| Optional Variables | `AUTH_COGNITO_ALLOW_ACCOUNT_LINKING=true` |
+| OAuth Redirect URL | `/api/auth/callback/cognito` |
+
+### Keycloak
+
+[NextAuth Keycloak Provider Docs](https://next-auth.js.org/providers/keycloak)
+
+| Configuration | Value |
+| ------------------ | -------------------------------------------------------------------------------------- |
+| Required Variables | `AUTH_KEYCLOAK_CLIENT_ID` `AUTH_KEYCLOAK_CLIENT_SECRET` `AUTH_KEYCLOAK_ISSUER` |
+| Optional Variables | `AUTH_KEYCLOAK_ALLOW_ACCOUNT_LINKING=true` |
+| OAuth Redirect URL | `/api/auth/callback/keycloak` |
+
+Thanks to [@RTae](https://github.com/RTae) for the initial contribution of Keycloak support!
+
+### Custom OAuth Provider
+
+[NextAuth Custom OAuth Provider Docs](https://next-auth.js.org/configuration/providers/oauth#using-a-custom-provider) ([source](https://github.com/langfuse/langfuse/blob/main/web/src/server/auth.ts))
+
+| Configuration | Value |
+| ------------------ | ------------------------------------------------------------------------------------------------------------------------------ |
+| Required Variables | `AUTH_CUSTOM_CLIENT_ID` `AUTH_CUSTOM_CLIENT_SECRET` `AUTH_CUSTOM_ISSUER` `AUTH_CUSTOM_NAME` (any, used only in UI) |
+| Optional Variables | `AUTH_CUSTOM_ALLOW_ACCOUNT_LINKING=true` `AUTH_CUSTOM_SCOPE` (defaults to `"openid email profile"`) |
+| OAuth Redirect URL | `/api/auth/callback/custom` |
+
+## Additional configuration
+
+| Variable | Description |
+| ----------------------------------- | -------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- |
+| `AUTH_DOMAINS_WITH_SSO_ENFORCEMENT` | Comma-separated list of domains that are only allowed to sign in using SSO. Email/password sign in is disabled for these domains. E.g. `domain1.com,domain2.com` |
+| `AUTH_DISABLE_SIGNUP` | Set to `true` to disable sign up for new users. Only existing users can sign in. This affects all new users that try to sign up, also those who received an invite to a project and have no account yet. |
+| `AUTH_SESSION_MAX_AGE` | Set the maximum age of the session (JWT) in minutes. The default is 30 days (`43200`). The value must be greater than 5 minutes, as the front-end application refreshes its session every 5 minutes. |
diff --git a/pages/self-hosting/automated-access-provisioning.mdx b/pages/self-hosting/automated-access-provisioning.mdx
new file mode 100644
index 000000000..1464a6523
--- /dev/null
+++ b/pages/self-hosting/automated-access-provisioning.mdx
@@ -0,0 +1,23 @@
+---
+title: Automated Access Provisioning (self-hosted)
+description: Optionally, you can configure automated access provisioning for new users when self-hosting Langfuse.
+label: "Version: v3"
+---
+
+# Automated Access Provisioning
+
+Optionally, you can configure automated access provisioning for new users.
+Thereby, they will be added to a default organization and project with specific roles upon signup.
+
+See [RBAC documentation](/docs/rbac) for details on the available roles, scopes, and organizations/projects.
+
+## Configuration
+
+Set up the following environment variables on the application containers:
+
+| Variable | Required / Default | Description |
+| ------------------------------- | ------------------ | ----------------------------------------------------------------------------------------------------------------------------------------------------- |
+| `LANGFUSE_DEFAULT_ORG_ID` | | Configure optional default organization for new users. When users create an account they will be automatically added to this organization. |
+| `LANGFUSE_DEFAULT_ORG_ROLE` | `VIEWER` | Role of the user in the default organization (if set). Possible values are `OWNER`, `ADMIN`, `MEMBER`, `VIEWER`. See [roles](/docs/rbac) for details. |
+| `LANGFUSE_DEFAULT_PROJECT_ID` | | Configure optional default project for new users. When users create an account they will be automatically added to this project. |
+| `LANGFUSE_DEFAULT_PROJECT_ROLE` | `VIEWER` | Role of the user in the default project (if set). Possible values are `OWNER`, `ADMIN`, `MEMBER`, `VIEWER`. See [roles](/docs/rbac) for details. |
diff --git a/pages/self-hosting/background-migrations.mdx b/pages/self-hosting/background-migrations.mdx
new file mode 100644
index 000000000..494f38809
--- /dev/null
+++ b/pages/self-hosting/background-migrations.mdx
@@ -0,0 +1,32 @@
+---
+title: Background Migrations (self-hosted)
+description: Langfuse uses background migrations to perform long-running changes within the storage components when upgrading the application.
+label: "Version: v3"
+---
+
+# Background Migrations
+
+Langfuse uses background migrations to perform long-running changes within the storage components when [upgrading](/self-hosting/upgrade) the application.
+These may include the addition and backfilling of new columns or the migration of data between storages.
+Background migrations are executed on startup of the worker container and run in the background until completion or failure.
+
+Next to background migrations, fast migrations are applied directly to the database on startup of the web container.
+
+## Monitoring
+
+You can monitor the progress of background migrations within the Langfuse UI.
+Click on the Langfuse version tag and select "Background Migrations".
+You see all migrations that ever ran and their status.
+You can also monitor the progress of background migrations via the worker container logs.
+
+If migrations are running or have failed, we show a status indicator within the UI to guide users towards the background migrations overview.
+
+## Deployment stops
+
+Langfuse does not require deployment stops between minor releases as of now.
+However, we recommend that you monitor the progress of background migrations after each update to ensure that all migrations have completed successfully before attempting another update.
+We will highlight within the changelog if a deployment stop becomes required.
+
+## Configuration
+
+Background migrations are enabled by default and can be disabled by setting `LANGFUSE_ENABLE_BACKGROUND_MIGRATIONS=false`. This is not recommended as it may leave the application in an inconsistent state where the UI and API does not reflect the current state of the data correctly.
diff --git a/pages/self-hosting/configuration.mdx b/pages/self-hosting/configuration.mdx
new file mode 100644
index 000000000..0b2bfa3be
--- /dev/null
+++ b/pages/self-hosting/configuration.mdx
@@ -0,0 +1,103 @@
+---
+title: Configuration (self-hosted)
+description: Langfuse has extensive configuration options via environment variables.
+label: "Version: v3"
+---
+
+# Configuration
+
+Langfuse has extensive configuration options via environment variables. These need to be passed to all application containers.
+
+## Environment Variables
+
+Langfuse accepts additional environment variables to fine-tune your deployment.
+You can use the same environment variables for the Langfuse Web and Langfuse Worker containers.
+
+### Core Infrastructure Settings
+
+| Variable | Required / Default | Description |
+| ----------------------------------------------- | ------------------ | ----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- |
+| `DATABASE_URL` | Required | Connection string of your Postgres database. Instead of `DATABASE_URL`, you can also use `DATABASE_HOST`, `DATABASE_USERNAME`, `DATABASE_PASSWORD` and `DATABASE_NAME`. |
+| `DIRECT_URL` | `DATABASE_URL` | Connection string of your Postgres database used for database migrations. Use this if you want to use a different user for migrations or use connection pooling on `DATABASE_URL`. **For large deployments**, configure the database user with long timeouts as migrations might need a while to complete. |
+| `SHADOW_DATABASE_URL` | | If your database user lacks the `CREATE DATABASE` permission, you must create a shadow database and configure the "SHADOW_DATABASE_URL". This is often the case if you use a Cloud database. Refer to the [Prisma docs](https://www.prisma.io/docs/orm/prisma-migrate/understanding-prisma-migrate/shadow-database#cloud-hosted-shadow-databases-must-be-created-manually) for detailed instructions. |
+| `CLICKHOUSE_MIGRATION_URL` | Required | Migration URL (TCP protocol) for the clickhouse instance. Pattern: `clickhouse://:(9000/9440)` |
+| `CLICKHOUSE_MIGRATION_SSL` | `false` | Set to true to establish an SSL connection to Clickhouse for the database migration. |
+| `CLICKHOUSE_URL` | Required | Hostname of the clickhouse instance. Pattern: `http(s)://:(8123/8443)` |
+| `CLICKHOUSE_USER` | Required | Username of the clickhouse database. Needs SELECT, ALTER, INSERT, CREATE, DELETE grants. |
+| `CLICKHOUSE_PASSWORD` | Required | Password of the clickhouse user. |
+| `CLICKHOUSE_CLUSTER_ENABLED` | `true` | Whether to run ClickHouse commands `ON CLUSTER`. Set to `false` for single-container setups. |
+| `LANGFUSE_AUTO_CLICKHOUSE_MIGRATION_DISABLED` | `false` | Whether to disable automatic ClickHouse migrations on startup. |
+| `REDIS_CONNECTION_STRING` | Required | Connection string of your redis instance. Instead of `REDIS_CONNECTION_STRING`, you can also use `REDIS_HOST`, `REDIS_PORT`, and `REDIS_AUTH`. |
+| `NEXTAUTH_URL` | Required | URL of your Langfuse web deployment, e.g. `https://yourdomain.com` or `http://localhost:3000`. Required for successful authentication via OAUTH. |
+| `NEXTAUTH_SECRET` | Required | Used to validate login session cookies, generate secret with at least 256 entropy using `openssl rand -base64 32`. |
+| `SALT` | Required | Used to salt hashed API keys, generate secret with at least 256 entropy using `openssl rand -base64 32`. |
+| `ENCRYPTION_KEY` | Required | Used to encrypt sensitive data. Must be 256 bits, 64 string characters in hex format, generate via: `openssl rand -hex 32`. |
+| `LANGFUSE_CSP_ENFORCE_HTTPS` | `false` | Set to `true` to set CSP headers to only allow HTTPS connections. Needs to be set at build-time. |
+| `PORT` | `3000` / `3030` | Port the server listens on. 3000 for web, 3030 for worker. |
+| `HOSTNAME` | `localhost` | In some environments it needs to be set to `0.0.0.0` to be accessible from outside the container (e.g. Google Cloud Run). |
+| `LANGFUSE_S3_EVENT_UPLOAD_BUCKET` | Required | Name of the bucket in which event information should be uploaded. |
+| `LANGFUSE_S3_EVENT_UPLOAD_PREFIX` | `""` | Prefix to store events within a subpath of the bucket. Defaults to the bucket root. If provided, must end with a `/`. |
+| `LANGFUSE_S3_EVENT_UPLOAD_REGION` | | Region in which the bucket resides. |
+| `LANGFUSE_S3_EVENT_UPLOAD_ENDPOINT` | | Endpoint to use to upload events. |
+| `LANGFUSE_S3_EVENT_UPLOAD_ACCESS_KEY_ID` | | Access key for the bucket. Must have List, Get, and Put permissions. |
+| `LANGFUSE_S3_EVENT_UPLOAD_SECRET_ACCESS_KEY` | | Secret access key for the bucket. |
+| `LANGFUSE_S3_EVENT_UPLOAD_FORCE_PATH_STYLE` | | Whether to force path style on requests. Required for MinIO. |
+| `LANGFUSE_S3_BATCH_EXPORT_ENABLED` | `false` | Whether to enable Langfuse S3 batch exports. This must be set to `true` to enable batch exports. |
+| `LANGFUSE_S3_BATCH_EXPORT_BUCKET` | Required | Name of the bucket in which batch exports should be uploaded. |
+| `LANGFUSE_S3_BATCH_EXPORT_PREFIX` | `""` | Prefix to store batch exports within a subpath of the bucket. Defaults to the bucket root. If provided, must end with a `/`. |
+| `LANGFUSE_S3_BATCH_EXPORT_REGION` | | Region in which the bucket resides. |
+| `LANGFUSE_S3_BATCH_EXPORT_ENDPOINT` | | Endpoint to use to upload batch exports. |
+| `LANGFUSE_S3_BATCH_EXPORT_ACCESS_KEY_ID` | | Access key for the bucket. Must have List, Get, and Put permissions. |
+| `LANGFUSE_S3_BATCH_EXPORT_SECRET_ACCESS_KEY` | | Secret access key for the bucket. |
+| `LANGFUSE_S3_BATCH_EXPORT_FORCE_PATH_STYLE` | | Whether to force path style on requests. Required for MinIO. |
+| `DB_EXPORT_PAGE_SIZE` | `1000` | Optional page size for streaming exports to S3 to avoid memory issues. The page size can be adjusted if needed to optimize performance. |
+| `LANGFUSE_S3_MEDIA_UPLOAD_BUCKET` | Required | Name of the bucket in which media files should be uploaded. |
+| `LANGFUSE_S3_MEDIA_UPLOAD_PREFIX` | `""` | Prefix to store media within a subpath of the bucket. Defaults to the bucket root. If provided, must end with a `/`. |
+| `LANGFUSE_S3_MEDIA_UPLOAD_REGION` | | Region in which the bucket resides. |
+| `LANGFUSE_S3_MEDIA_UPLOAD_ENDPOINT` | | Endpoint to use to upload media files. |
+| `LANGFUSE_S3_MEDIA_UPLOAD_ACCESS_KEY_ID` | | Access key for the bucket. Must have List, Get, and Put permissions. |
+| `LANGFUSE_S3_MEDIA_UPLOAD_SECRET_ACCESS_KEY` | | Secret access key for the bucket. |
+| `LANGFUSE_S3_MEDIA_UPLOAD_FORCE_PATH_STYLE` | | Whether to force path style on requests. Required for MinIO. |
+| `LANGFUSE_S3_MEDIA_MAX_CONTENT_LENGTH` | `1_000_000_000` | Maximum file size in bytes that is allowed for upload. Default is 1GB. |
+| `LANGFUSE_S3_MEDIA_DOWNLOAD_URL_EXPIRY_SECONDS` | `3600` | Presigned download URL expiry in seconds. Defaults to 1h. |
+| `LANGFUSE_AUTO_POSTGRES_MIGRATION_DISABLED` | `false` | Set to `true` to disable automatic database migrations on docker startup. Not recommended. |
+| `LANGFUSE_LOG_LEVEL` | `info` | Set the log level for the application. Possible values are `trace`, `debug`, `info`, `warn`, `error`, `fatal`. |
+| `LANGFUSE_LOG_FORMAT` | `text` | Set the log format for the application. Possible values are `text`, `json`. |
+
+### Additional Features
+
+There are additional features that can be enabled and configured via environment variables.
+
+import SelfHostFeatures from "@/components-mdx/self-host-features.mdx";
+
+
+
+## Health and Readiness Check Endpoint
+
+Langfuse web includes a health check endpoint at `/api/public/health` and a readiness check endpoint at `/api/public/ready` and the
+Langfuse worker a health check endpoint at `/api/health`.
+The health check endpoint indicates if the application is alive and the readiness check endpoint indicates if the application is ready to serve traffic.
+
+Access the health and readiness check endpoints:
+
+```bash
+curl http://localhost:3000/api/public/health
+curl http://localhost:3000/api/public/ready
+curl http://localhost:3030/api/health
+```
+
+The potential responses from the health check endpoint are:
+
+- `200 OK`: Both the API is functioning normally and a successful connection to the database was made.
+- `503 Service Unavailable`: Either the API is not functioning or it couldn't establish a connection to the database.
+
+The potential responses from the readiness check endpoint are:
+
+- `200 OK`: The application is ready to serve traffic.
+- `500 Internal Server Error`: The application received a SIGTERM or SIGINT and should not receive traffic.
+
+Applications and monitoring services can call this endpoint periodically for health updates.
+
+Per default, the Langfuse web healthcheck endpoint does not validate if the database is reachable, as there are cases where the
+database is unavailable, but the application still serves traffic.
+If you want to run database healthchecks, you can add `?failIfDatabaseUnavailable=true` to the healthcheck endpoint.
diff --git a/pages/self-hosting/custom-base-path.mdx b/pages/self-hosting/custom-base-path.mdx
new file mode 100644
index 000000000..e605ed8d4
--- /dev/null
+++ b/pages/self-hosting/custom-base-path.mdx
@@ -0,0 +1,63 @@
+---
+title: Custom Base Path (self-hosted)
+description: Follow this guide to deploy Langfuse on a custom base path, e.g. https://yourdomain.com/langfuse.
+label: "Version: v3"
+---
+
+# Custom Base Path
+
+By default, Langfuse is served on the root path of a domain, e.g. `https://langfuse.yourdomain.com`, `https://yourdomain.com`.
+
+In some circumstances, you might want to deploy Langfuse on a custom base path, e.g. `https://yourdomain.com/langfuse`, when integrating Langfuse into existing infrastructure.
+
+## Setup
+
+
+ As this base path is inlined in static assets, you cannot use the prebuilt
+ docker image for the **web container** (langfuse/langfuse). **You need to
+ build the image from source** with the `NEXT_PUBLIC_BASE_PATH` environment
+ variable set at build time. The worker container (langfuse/langfuse-worker)
+ can be run with the prebuilt image.
+
+
+
+
+### Update environment variables
+
+When using a custom base path, `NEXTAUTH_URL` must be set to the full URL including the base path and `/api/auth`. For example, if you are deploying Langfuse at `https://yourdomain.com/langfuse-base-path`, you need to set:
+
+```bash filename=".env"
+NEXT_PUBLIC_BASE_PATH="/langfuse-base-path"
+NEXTAUTH_URL="https://yourdomain.com/langfuse-base-path/api/auth"
+```
+
+### Build Langfuse Web image from source
+
+Build image for the Langfuse Web container (`langfuse/langfuse`) from source with `NEXT_PUBLIC_BASE_PATH` as build argument:
+
+```bash /NEXT_PUBLIC_BASE_PATH/
+# clone repo
+git clone https://github.com/langfuse/langfuse.git
+cd langfuse
+
+# checkout production branch
+# main branch includes unreleased changes that might be unstable
+git checkout production
+
+# build image with NEXT_PUBLIC_BASE_PATH
+docker build -t langfuse/langfuse --build-arg NEXT_PUBLIC_BASE_PATH=/langfuse-base-path -f ./web/Dockerfile .
+```
+
+### Run Langfuse
+
+When Deploying Langfuse according to one of the deployment guides, replace the prebuilt image for the web container (`langfuse/langfuse`) with the image you built from source.
+
+### Connect to Langfuse
+
+Once your Langfuse instance is running, you can access both the API and console through your configured custom base path. When connecting via SDKs, make sure to include the custom base path in the hostname.
+
+
+
+import SelfHostHelpFooter from "@/components-mdx/self-host-help-footer.mdx";
+
+
diff --git a/pages/self-hosting/deployment-strategies.mdx b/pages/self-hosting/deployment-strategies.mdx
new file mode 100644
index 000000000..9fdc8de47
--- /dev/null
+++ b/pages/self-hosting/deployment-strategies.mdx
@@ -0,0 +1,131 @@
+---
+title: Deployment Strategies (self-hosted)
+description: Learn how to manage Langfuse effectively. It covers strategies for handling multiple projects and environments.
+label: "Version: v3"
+---
+
+# Deployment Strategies
+
+Use this guide to learn how to manage Langfuse effectively. It covers strategies for handling multiple projects and environments.
+
+When self-hosting Langfuse, there are several strategies you can use to manage projects and environments. This guide outlines the different approaches, their trade-offs, and implementation details to help you decide which strategy best suits your use case.
+
+In most cases, a single Langfuse deployment is the best approach. It leverages RBAC (role-based access control) to separate data by organizations, projects, and user roles. However, certain use cases might require multiple deployments based on specific architectural or organizational needs.
+
+## Single Langfuse Deployment
+
+A single Langfuse deployment is the standard and recommended setup. It centralizes management, scales efficiently across projects and environments, and takes full advantage of Langfuse's built-in RBAC features.
+
+
+
+```mermaid
+graph TB
+ subgraph AppVPC1["App/Env VPC 1"]
+ App1[Application 1]
+ end
+ subgraph AppVPC2["App/Env VPC 2"]
+ App2[Application 2]
+ end
+ subgraph AppVPCn["App/Env VPC N"]
+ AppN[Application N]
+ end
+ subgraph CentralVPC["Langfuse VPC"]
+ LF["Langfuse Service (logical separation of data)"]
+ end
+
+ App1 -- VPC Peering --> LF
+ App2 -- VPC Peering --> LF
+ AppN -- VPC Peering --> LF
+
+ User["User/API/SDK"]
+ LF -- Public Hostname and SSO --> User
+```
+
+
+
+### When to Use
+
+- Your team can rely on Langfuse's RBAC to enforce data isolation.
+- You want to minimize infrastructure complexity and operational overhead.
+
+### Implementation Steps
+
+1. Deploy Langfuse following the [self-hosting guide](/self-hosting).
+2. Configure organizations and projects for each logical unit (e.g., team, client, or department).
+3. Optional: Use [organization creators](/self-hosting/organization-creators) and [project-level RBAC](/docs/rbac) roles to optimize permission management across teams and environments.
+
+### Additional Considerations
+
+- RBAC is critical to ensure proper data isolation. Plan your access control policies carefully.
+- Langfuse is designed to be exposed publicly (see networking documentation). This approach simplifies access for stakeholders and eliminates complex network configurations, making it easier to integrate seamlessly across teams and projects.
+- VPC peering can be used to access Langfuse privately across projects and environments, enhancing security and connectivity in centralized deployments.
+
+## Langfuse Deployment for Each Service or Project
+
+In this approach, you run a separate Langfuse deployment for each service, project, or environment. This provides complete isolation at the infrastructure level but comes with additional complexity.
+
+Langfuse can be deployed via infrastructure as code (IaC) tools like Terraform or Helm, making this approach more manageable.
+
+
+
+```mermaid
+graph TB
+ subgraph AppVPC1["App/Env VPC 1"]
+ App1[Application 1]
+ LF1["Langfuse Service"]
+ App1 -- Within VPC --> LF1
+ end
+ subgraph AppVPC2["App/Env VPC 2"]
+ App2[Application 2]
+ LF2["Langfuse Service"]
+ App2 -- Within VPC --> LF2
+ end
+ subgraph AppVPCn["App/Env VPC N"]
+ AppN[Application N]
+ LFn["Langfuse Service"]
+ AppN -- Within VPC --> LFn
+ end
+
+ User["User/API/SDK"]
+ LF1 -- VPN --> User
+ LF2 -- VPN --> User
+ LFn -- VPN --> User
+```
+
+
+
+### When to Use
+
+- Compliance or regulatory requirements mandate strict data separation.
+
+### Implementation Steps
+
+1. Deploy Langfuse instances for each project or service by following the [self-hosting guide](/self-hosting). For example, you can use a Helm chart to seamlessly integrate Langfuse into your application stack.
+2. Use [headless initialization](/self-hosting/headless-initialization) to provision default organizations, projects, and API keys in each Langfuse instance when deploying it together with an application stack.
+3. Provision access for users of each individual deployment and educate them about which Langfuse instances are available to them.
+
+### Considerations
+
+- **Higher Costs:** Each deployment requires dedicated resources, including infrastructure, maintenance, and updates.
+- **Operational Complexity:** Managing multiple deployments can increase overhead for DevOps teams to scale and continuously [upgrade](/self-hosting/upgrade).
+- **More difficult to adopt**: New teams cannot just get started but need to request deployment of an instance for the project or environment.
+- **Cross-Project Visibility:** There is no shared view across projects or environments unless you build an external aggregation solution. Separating environments makes prompt deployment across instances more complex. It also makes it harder to sync datasets between production, staging, and development, limiting the ability to test edge cases and learn from production data.
+- **Confusion of non-engineering teams:** Non-engineering teams might not understand the difference between Langfuse instances and how to use them.
+
+## Choosing the Right Strategy
+
+| Factor | Single Deployment | Multiple Deployments |
+| ----------------------- | --------------------------------------------- | ------------------------------------------------------------ |
+| **Ease of Maintenance** | Centralized and simplified management | Complex management with higher operational overhead |
+| **Ease of Adoption** | Quick self-service via project creation in UI | Requires deployment requests and infrastructure provisioning |
+| **Cost Efficiency** | Optimized costs through shared infrastructure | Higher costs from duplicated infrastructure and maintenance |
+| **Data Isolation** | Project-level isolation through RBAC controls | Complete physical and logical separation between deployments |
+| **Scalability** | Unified scaling of centralized infrastructure | Independent but duplicated scaling for each deployment |
+| **Compliance Needs** | Suitable for standard compliance requirements | Required for strict regulatory isolation requirements |
+| **User Experience** | Single interface with seamless project access | Multiple interfaces requiring additional user training |
+
+### General Recommendation
+
+Start with a single Langfuse deployment and evaluate its scalability and data isolation capabilities. If specific needs arise that require isolated environments, consider moving to a multi-deployment approach for those cases. However, this is usually not recommended.
+
+Please [reach out](/support) in case you have any questions on how to best architect your Langfuse deployment.
diff --git a/pages/self-hosting/docker-compose.mdx b/pages/self-hosting/docker-compose.mdx
new file mode 100644
index 000000000..02e3498e7
--- /dev/null
+++ b/pages/self-hosting/docker-compose.mdx
@@ -0,0 +1,132 @@
+---
+title: Docker Compose (self-hosted)
+description: Step-by-step guide to run Langfuse on a VM using docker compose.
+label: "Version: v3"
+---
+
+# Docker Compose
+
+This guide will walk you through deploying Langfuse on a VM using Docker Compose.
+We will use the [`docker-compose.yml`](https://github.com/langfuse/langfuse/blob/main/docker-compose.yml) file.
+
+If you use a cloud provider like AWS, GCP, or Azure, you will need permissions to deploy virtual machines.
+
+For high-availability and high-throughput, we recommend using Kubernetes ([deployment guide](/self-hosting/kubernetes-helm)).
+The docker compose setup lacks high-availability, scaling capabilities, and backup functionality.
+
+## Get Started
+
+
+
+### Start a new instance and SSH into it
+
+Enter your cloud provider interface and navigate to the VM instance section.
+This is EC2 on AWS, Compute Engine on GCP, and Virtual Machines on Azure.
+Create a new instance.
+
+We recommend that you use at least 4 cores and 16 GiB of memory, e.g. a t3.xlarge on AWS.
+Assign a public IP address in case you want to send traces from external sources.
+As observability data tends to be large in volume, choose a sufficient amount of storage, e.g. 100GiB.
+
+The rest of this guide will assume that you have an Ubuntu OS running on your VM and are connected via SSH.
+
+### Install Docker and Docker Compose
+
+Install docker (see [official guide](https://docs.docker.com/engine/install/ubuntu/) as well). Setup Docker's apt repository:
+
+```bash
+# Add Docker's official GPG key:
+sudo apt-get update
+sudo apt-get install ca-certificates curl
+sudo install -m 0755 -d /etc/apt/keyrings
+sudo curl -fsSL https://download.docker.com/linux/ubuntu/gpg -o /etc/apt/keyrings/docker.asc
+sudo chmod a+r /etc/apt/keyrings/docker.asc
+
+# Add the repository to Apt sources:
+echo \
+ "deb [arch=$(dpkg --print-architecture) signed-by=/etc/apt/keyrings/docker.asc] https://download.docker.com/linux/ubuntu \
+ $(. /etc/os-release && echo "$VERSION_CODENAME") stable" | \
+ sudo tee /etc/apt/sources.list.d/docker.list > /dev/null
+sudo apt-get update
+```
+
+Install Docker packages:
+
+```bash
+sudo apt-get install docker-ce docker-ce-cli containerd.io docker-buildx-plugin docker-compose-plugin
+```
+
+Verify installlation:
+
+```bash
+sudo docker run hello-world
+```
+
+### Clone Langfuse Repository
+
+Get a copy of the latest Langfuse repository:
+
+```bash
+git clone https://github.com/langfuse/langfuse.git
+
+cd langfuse
+```
+
+### Update Secrets
+
+For testing purposes, the pre-configured variables in the docker-compose file are usually sufficient. Feel free to skip this step.
+
+If you send _any_ kind of sensitive data to the application or intend to keep it up for longer, we recommend that you modify the docker-compose file and overwrite the following environment variables:
+
+- `SALT`: A random string used to hash passwords. It should be at least 32 characters long.
+- `ENCRYPTION_KEY`: Generate this via `openssl rand -base64 32`.
+- `NEXTAUTH_SECRET`: A random string used to sign JWT tokens.
+- `NEXTAUTH_URL`: The URL where the application is hosted. Used for redirects after signup.
+
+In addition, you can change the database and storage credentials to be more secure.
+
+### Start the application
+
+```bash
+docker compose up
+```
+
+Watch the containers being started and the logs flowing in.
+After about 2-3 minutes, the langfuse-web-1 container should log "Ready".
+At this point you can proceed to the next step.
+
+### Done
+
+And you are ready to go! Open `http://:3000` in your browser to access the Langfuse UI.
+
+Depending on your configuration, you might need to open an SSH tunnel to your VM to access the IP. Please refer to your cloud provider's documentation for how to do this.
+
+
+
+## Features
+
+Langfuse supports many configuration options and self-hosted features.
+For more details, please refer to the [configuration guide](/self-hosting/configuration).
+
+import SelfHostFeatures from "@/components-mdx/self-host-features.mdx";
+
+
+
+## Shutdown
+
+You can stop the containers by hitting `Ctrl+C` in the terminal.
+If you started docker-compose in the background (`-d` flag), you can stop all instance using:
+
+```bash
+docker compose down
+```
+
+Adding the `-v` flag will also remove the volumes.
+
+Ensure to stop the VM instance in your cloud provider interface to avoid unnecessary costs.
+
+## How to Upgrade
+
+To upgrade Langfuse, you can stop the containers and run `docker compose up --pull always`.
+
+For more details on upgrading, please refer to the [upgrade guide](/self-hosting/upgrade).
diff --git a/pages/self-hosting/docker.mdx b/pages/self-hosting/docker.mdx
new file mode 100644
index 000000000..b577139a4
--- /dev/null
+++ b/pages/self-hosting/docker.mdx
@@ -0,0 +1,73 @@
+---
+title: Docker (self-hosted)
+description: Learn how to deploy Langfuse v3 on your own infrastructure using Docker.
+label: "Version: v3"
+---
+
+# Docker
+
+Deploy the application container to your infrastructure.
+You can use managed services like AWS ECS, Azure Container Instances, or GCP Cloud Run, or host it yourself.
+
+During the container startup, all database migrations will be applied automatically.
+This can be optionally disabled via environment variables.
+
+Before running Langfuse, please familiarize yourself with the [architecture](/self-hosting#architecture).
+
+Please follow one of the deployment guides to run Langfuse [locally](/self-hosting/local), on a VM ([docker-compose](/self-hosting/docker-compose)), or in Kubernetes ([helm](/self-hosting/kubernetes-helm)).
+
+## Run Langfuse Web
+
+```bash
+docker run --name langfuse-web \
+ -e DATABASE_URL=postgresql://hello \
+ -e NEXTAUTH_URL=http://localhost:3000 \
+ -e NEXTAUTH_SECRET=mysecret \
+ -e SALT=mysalt \
+ -e ENCRYPTION_KEY=0000000000000000000000000000000000000000000000000000000000000000 \ # generate via: openssl rand -hex 32
+ -e CLICKHOUSE_URL=http://clickhouse:8123 \
+ -e CLICKHOUSE_USER=clickhouse \
+ -e CLICKHOUSE_PASSWORD=clickhouse \
+ -e CLICKHOUSE_MIGRATION_URL=clickhouse://clickhouse:9000 \
+ -e REDIS_HOST=localhost \
+ -e REDIS_PORT=6379 \
+ -e REDIS_AUTH=redis \
+ -e LANGFUSE_S3_EVENT_UPLOAD_BUCKET=my-bucket \
+ -e LANGFUSE_S3_EVENT_UPLOAD_REGION=us-east-1 \
+ -e LANGFUSE_S3_EVENT_UPLOAD_ACCESS_KEY_ID=AKIAIOSFODNN7EXAMPLE \
+ -e LANGFUSE_S3_EVENT_UPLOAD_SECRET_ACCESS_KEY=bPxRfiCYEXAMPLEKEY \
+ -p 3000:3000 \
+ -a STDOUT \
+langfuse/langfuse:3
+```
+
+## Run Langfuse Worker
+
+```bash
+docker run --name langfuse-worker \
+ -e DATABASE_URL=postgresql://hello \
+ -e SALT=mysalt \
+ -e ENCRYPTION_KEY=0000000000000000000000000000000000000000000000000000000000000000 \ # generate via: openssl rand -hex 32
+ -e CLICKHOUSE_URL=http://clickhouse:8123 \
+ -e CLICKHOUSE_USER=clickhouse \
+ -e CLICKHOUSE_PASSWORD=clickhouse \
+ -e REDIS_HOST=localhost \
+ -e REDIS_PORT=6379 \
+ -e REDIS_AUTH=redis \
+ -e LANGFUSE_S3_EVENT_UPLOAD_BUCKET=my-bucket \
+ -e LANGFUSE_S3_EVENT_UPLOAD_REGION=us-east-1 \
+ -e LANGFUSE_S3_EVENT_UPLOAD_ACCESS_KEY_ID=AKIAIOSFODNN7EXAMPLE \
+ -e LANGFUSE_S3_EVENT_UPLOAD_SECRET_ACCESS_KEY=bPxRfiCYEXAMPLEKEY \
+ -p 3030:3030 \
+ -a STDOUT \
+langfuse/langfuse-worker:3
+```
+
+## Features
+
+Langfuse supports many configuration options and self-hosted features.
+For more details, please refer to the [configuration guide](/self-hosting/configuration).
+
+import SelfHostFeatures from "@/components-mdx/self-host-features.mdx";
+
+
diff --git a/pages/self-hosting/encryption.mdx b/pages/self-hosting/encryption.mdx
new file mode 100644
index 000000000..3dba1e87b
--- /dev/null
+++ b/pages/self-hosting/encryption.mdx
@@ -0,0 +1,57 @@
+---
+title: Encryption (self-hosted)
+description: Learn how to encrypt your self-hosted Langfuse deployment. This guide covers encryption in transit (HTTPS), at rest (database) and application-level encryption.
+label: "Version: v3"
+---
+
+# Encryption
+
+
+
+Security and privacy are core design objectives at Langfuse. The Langfuse Team runs Langfuse in production on Langfuse Cloud which is ISO27001, SOC2 Type 2 and GDPR compliant ([Langfuse Cloud security page](/docs/security)).
+
+
+
+This guide covers the different encryption methods and considerations.
+It is assumed that you are familiar with the [architecture](/self-hosting#architecture) of Langfuse.
+
+## Encryption in transit (HTTPS) [#https]
+
+For encryption in transit, HTTPS is strongly recommended.
+Langfuse itself does not handle HTTPS directly.
+Instead, HTTPS is typically managed at the infrastructure level.
+There are two main approaches to handle HTTPS for Langfuse:
+
+1. Load Balancer Termination:
+ In this approach, HTTPS is terminated at the load balancer level.
+ The load balancer handles the SSL/TLS certificates and encryption, then forwards the decrypted traffic to the Langfuse container over HTTP.
+ This is a common and straightforward method, especially in cloud environments.
+
+- Pros: Simplifies certificate management as it is usually a fully managed service (e.g. AWS ALB), offloads encryption overhead from application servers.
+- Cons: Traffic between load balancer and Langfuse container is unencrypted (though typically within a secure network).
+
+2. Service Mesh Sidecar:
+ This method involves using a service mesh like Istio or Linkerd.
+ A sidecar proxy is deployed alongside each Langfuse container, handling all network traffic including HTTPS.
+
+- Pros: Provides end-to-end encryption (mutual TLS), offers advanced traffic management and observability.
+- Cons: Adds complexity to the deployment, requires understanding of service mesh concepts.
+
+Once HTTPS is enabled, you can configure add `LANGFUSE_CSP_ENFORCE_HTTPS=true` to ensure browser only allow HTTPS connections when using Langfuse.
+
+## Encryption at rest (database) [#encryption-at-rest]
+
+All Langfuse data is stored in your Postgres database, Clickhouse, Redis, or S3/Blob Store.
+Database-level encryption is recommended for a secure production deployment and available across cloud providers.
+
+## Additional application-level encryption [#application-level-encryption]
+
+In addition to in-transit and at-rest encryption, sensitive data is also encrypted or hashed at the application level.
+
+| Data | Encryption |
+| ----------------------------------------- | ------------------------------------------------------------------------------------ |
+| API keys | Hashed using `SALT` |
+| Langfuse Console JWTs | Encrypted via `NEXTAUTH_SECRET` |
+| LLM API credentials stored in Langfuse | Encrypted using `ENCRYPTION_KEY` |
+| Integration credentials (e.g. PostHog) | Encrypted using `ENCRYPTION_KEY` |
+| Input/Outputs of LLM Calls, Traces, Spans | Work in progress, reach out to enterprise@langfuse.com if you are interested in this |
diff --git a/pages/self-hosting/headless-initialization.mdx b/pages/self-hosting/headless-initialization.mdx
new file mode 100644
index 000000000..60a6df392
--- /dev/null
+++ b/pages/self-hosting/headless-initialization.mdx
@@ -0,0 +1,43 @@
+---
+title: Headless Initialization (self-hosted)
+description: Learn how to automatically initialize Langfuse resources via environment variables.
+label: "Version: v3"
+---
+
+# Headless Initialization
+
+By default, you need to create a user account, organization and project via the Langfuse UI before being able to use the API. You can find the API keys in the project settings within the UI.
+
+If you want to automatically initialize these resources, you can optionally use the following `LANGFUSE_INIT_*` environment variables. When these variables are set, Langfuse will automatically create the specified resources on startup if they don't already exist. This allows for easy integration with infrastructure-as-code and automated deployment pipelines.
+
+## Resource Dependencies
+
+The different resources depend on each other.
+
+You can e.g. intialize an organization and a user without having to also initialize a project and API keys, but you cannot initialize a project or user without also initializing an organization.
+
+```
+Organization
+├── Project (part of organization)
+│ └── API Keys (set for project)
+└── User (owner of organization)
+```
+
+## Environment Variables
+
+| Environment Variable | Description | Required to Create Resource | Example |
+| ---------------------------------- | -------------------------------------- | --------------------------- | ------------------ |
+| `LANGFUSE_INIT_ORG_ID` | Unique identifier for the organization | Yes | `my-org` |
+| `LANGFUSE_INIT_ORG_NAME` | Name of the organization | No | `My Org` |
+| `LANGFUSE_INIT_PROJECT_ID` | Unique identifier for the project | Yes | `my-project` |
+| `LANGFUSE_INIT_PROJECT_NAME` | Name of the project | No | `My Project` |
+| `LANGFUSE_INIT_PROJECT_PUBLIC_KEY` | Public API key for the project | Yes | `lf_pk_1234567890` |
+| `LANGFUSE_INIT_PROJECT_SECRET_KEY` | Secret API key for the project | Yes | `lf_sk_1234567890` |
+| `LANGFUSE_INIT_USER_EMAIL` | Email address of the initial user | Yes | `user@example.com` |
+| `LANGFUSE_INIT_USER_NAME` | Name of the initial user | No | `John Doe` |
+| `LANGFUSE_INIT_USER_PASSWORD` | Password for the initial user | Yes | `password123` |
+
+## Troubleshooting
+
+- If you use `LANGFUSE_INIT_*` in Docker Compose, do not double-quote the values ([GitHub issue](https://github.com/langfuse/langfuse/issues/3398)).
+- The resources depend on one another (see note above). For example, you must create an organization to initialize a project.
diff --git a/pages/self-hosting/index.mdx b/pages/self-hosting/index.mdx
new file mode 100644
index 000000000..bdcfb2dcc
--- /dev/null
+++ b/pages/self-hosting/index.mdx
@@ -0,0 +1,106 @@
+---
+title: Self-host Langfuse (Open Source LLM Observability)
+description: Step-by-step guide to run Langfuse on your local machine using docker compose.
+label: "Version: v3"
+---
+
+import { Callout } from "nextra/components";
+
+# Self-host Langfuse
+
+
+ Looking for a managed solution? Consider [Langfuse
+ Cloud](https://cloud.langfuse.com) maintained by the Langfuse team.
+
+
+
+ This guide covers Langfuse v3. For Langfuse v2, see the [v2
+ documentation](/self-hosting/v2).
+
+
+Langfuse is open source and can be self-hosted using Docker. This section contains guides for different deployment scenarios.
+
+## Deployment Options [#deployment-options]
+
+The following options are available:
+
+- Langfuse Cloud: A fully managed version of Langfuse that is hosted and maintained by the Langfuse team.
+- Self-host Langfuse: Run Langfuse on your own infrastructure.
+ - [Local](/self-hosting/local): Run Langfuse on your own machine in 5 minutes using Docker Compose.
+ - [VM](/self-hosting/docker-compose): Run Langfuse on a single VM using Docker Compose.
+ - [Docker](/self-hosting/docker)
+ - [Kubernetes (Helm)](/self-hosting/kubernetes-helm): Run Langfuse on a Kubernetes cluster using Helm.
+ - Planned: Cloud-specific deployment guides, please upvote and comment on the following threads: [AWS](https://github.com/orgs/langfuse/discussions/4645), [Google Cloud](https://github.com/orgs/langfuse/discussions/4646), [Azure](https://github.com/orgs/langfuse/discussions/4647).
+
+## Architecture
+
+Langfuse only depends on open source components and can be deployed locally, on cloud infrastructure, or on-premises.
+
+import ArchitectureDiagram from "@/components-mdx/architecture-diagram-v3.mdx";
+
+
+
+import ArchitectureDescription from "@/components-mdx/architecture-description-v3.mdx";
+
+
+
+## Optimized for performance, reliability, and uptime
+
+Langfuse self-hosted is optimized for production environments. It is the exact same codebase as Langfuse Cloud, just deployed on your own infrastructure. The Langfuse teams serves thousands of teams with Langfuse Cloud with high availability ([status page](https://status.langfuse.com)) and performance.
+
+Some of the optimizations include:
+
+- **Queued trace ingestion**: All traces are received in batches by the Langfuse Web container and immediately written to S3. Only a reference is persisted in Redis for queueing. Afterwards, the Langfuse Worker will pick up the traces from S3 and ingest them into Clickhouse. This ensures that high spikes in request load do not lead to timeouts or errors constrained by the database.
+- **Caching of API keys**: API keys are cached in-memory in Redis. Thereby, the database is not hit on every API call and unauthorized requests can be rejected with very low resource usage.
+- **Caching of prompts (SDKs and API)**: Even though prompts are cached client-side by the Langfuse SDKs and only revalidated in the background ([docs](/docs/prompts)), they need to be fetched from the Langfuse on first use. Thus, API response times are very important. Prompts are cached in a read-through cache in Redis. Thereby, hot prompts can be fetched from Langfuse without hitting a database.
+- **OLAP database**: All read-heavy analytical operations are offloaded to an OLAP database (Clickhouse) for fast query performance.
+- **Multi-modal traces in S3**: Multi-modal traces can include large videos or arbitrary files. To enable support for these, they are directly uploaded to S3/Blob Storage from the client SDKs. Learn more [here](/docs/tracing-features/multi-modality).
+- **Recoverability of events**: All incoming tracing and evaluation events are persisted in S3/Blob Storage first. Only after successful processing, the events are written to the database. This ensures that even if the database is temporarily unavailable, the events are not lost and can be processed later.
+- **Background migrations**: Long-running migrations that are required by an upgrade but not blocking for regular operations are offloaded to a background job. This massively reduces the downtime during an upgrade. Learn more [here](/self-hosting/background-migrations).
+
+If you have any feedback or questions regarding the architecture, please reach out to us.
+
+## Features
+
+Langfuse supports many configuration options and self-hosted features.
+For more details, please refer to the [configuration guide](/self-hosting/configuration).
+
+import SelfHostFeatures from "@/components-mdx/self-host-features.mdx";
+
+
+
+## Subscribe to updates
+
+import { ProductUpdateSignup } from "@/components/productUpdateSignup";
+
+Release notes are published on [GitHub](https://github.com/langfuse/langfuse/releases). Langfuse uses tagged semver releases ([versioning policy](/self-hosting/versioning)).
+
+You can subscribe to our mailing list to get notified about new releases and new major versions.
+
+
+
+You can also watch the GitHub releases to get notified about new releases:
+
+
+ ![Langfuse releases](/images/docs/github-watch-changelog.gif)
+
+
+## Support
+
+If you experience any issues, please join us on [Discord](/discord) or contact the maintainers at support@langfuse.com.
+
+For support with production deployments, the Langfuse team provides dedicated enterprise support. To learn more, reach out to enterprise@langfuse.com or [schedule a demo](/schedule-demo).
+
+Alternatively, you may consider using [Langfuse Cloud](/docs/deployment/cloud), which is a fully managed version of Langfuse. You can find information about its security and privacy [here](/docs/data-security-privacy).
+
+## FAQ
+
+import { FaqPreview } from "@/components/faq/FaqPreview";
+
+
+
+## GitHub Discussions
+
+import { GhDiscussionsPreview } from "@/components/gh-discussions/GhDiscussionsPreview";
+
+
diff --git a/pages/self-hosting/infrastructure/_meta.tsx b/pages/self-hosting/infrastructure/_meta.tsx
new file mode 100644
index 000000000..c28a74039
--- /dev/null
+++ b/pages/self-hosting/infrastructure/_meta.tsx
@@ -0,0 +1,12 @@
+export default {
+ "architecture-overview": {
+ title: "Architecture Overview ↗",
+ href: "/self-hosting#architecture",
+ },
+ containers: "Application Containers",
+ clickhouse: "Clickhouse",
+ cache: "Redis / Valkey",
+ blobstorage: "Blob Storage (S3)",
+ postgres: "PostgreSQL",
+ "llm-api": "LLM API / Gateway",
+};
diff --git a/pages/docs/deployment/v3/components/blobstorage.mdx b/pages/self-hosting/infrastructure/blobstorage.mdx
similarity index 95%
rename from pages/docs/deployment/v3/components/blobstorage.mdx
rename to pages/self-hosting/infrastructure/blobstorage.mdx
index 770d37882..4f85ead00 100644
--- a/pages/docs/deployment/v3/components/blobstorage.mdx
+++ b/pages/self-hosting/infrastructure/blobstorage.mdx
@@ -1,10 +1,19 @@
---
-description: How Langfuse uses S3 / Blob Storage
+title: S3 / Blob Storage (self-hosted)
+description: Langfuse uses S3 / Blob Storage to store raw events, multi-modal inputs, batch exports, and other files.
+label: "Version: v3"
---
# S3 / Blob Storage
-Langfuse uses S3 or another S3-compatible blob storage (referred to as S3 going forward) to store raw events, user media, batch expors, and other files.
+
+
+This is a deep dive into the configuration of S3. Follow one of the [deployment guides](/self-hosting#deployment-options) to get started.
+
+
+
+Langfuse uses S3 or another S3-compatible blob storage (referred to as S3 going forward) to store raw events, multi-modal inputs, batch expors, and other files.
+You can use a managed service on AWS, or GCP, or host it yourself using MinIO.
We use it as a scalable and durable storage solution for large files with strong read-after-write guarantees.
This guide covers how to configure S3 within Langfuse and how to connect your own S3-compatible storage.
@@ -118,7 +127,7 @@ LANGFUSE_S3_EVENT_UPLOAD_SECRET_ACCESS_KEY=wJalrXUtnFEMI/K7MDENG/bPxRfiCYEXAMPLE
[MinIO](https://min.io/) is an open-source object storage server that is compatible with the S3 API.
It is a popular choice for on-premise deployments and local development.
-Langfuse uses it for local development and as a default in our [Docker Compose](/docs/deployment/v3/guides/docker-compose) and [Kubernetes (Helm)](/docs/deployment/v3/guides/kubernetes-helm) deployment options.
+Langfuse uses it for local development and as a default in our [Docker Compose](/self-hosting/docker-compose) and [Kubernetes (Helm)](/self-hosting/kubernetes-helm) deployment options.
#### Example Configuration
diff --git a/pages/docs/deployment/v3/components/redis.mdx b/pages/self-hosting/infrastructure/cache.mdx
similarity index 64%
rename from pages/docs/deployment/v3/components/redis.mdx
rename to pages/self-hosting/infrastructure/cache.mdx
index 7359a2dac..9d73e6daa 100644
--- a/pages/docs/deployment/v3/components/redis.mdx
+++ b/pages/self-hosting/infrastructure/cache.mdx
@@ -1,12 +1,24 @@
---
-description: How Langfuse uses Redis or Valkey
+title: Cache (Redis/Valkey) (self-hosted)
+description: Langfuse uses Redis/Valkey as a caching layer and queue.
+label: "Version: v3"
---
-# Redis / Valkey
+# Cache (Redis/Valkey)
-Langfuse uses Redis/Valkey (referred to as Redis going forward) as a caching layer and as a queue.
+
+
+This is a deep dive into Redis/Valkey configuration. Follow one of the [deployment guides](/self-hosting#deployment-options) to get started.
+
+
+
+Langfuse uses Redis/Valkey as a caching layer and queue.
It is used to accept new events quickly on the API and defer their processing and insertion.
This allows Langfuse to handle request peaks gracefully.
+
+You can use a managed service on AWS, Azure, or GCP, or host it yourself.
+At least version 7 is required and the instance must have `maxmemory-policy=noeviction` configured.
+
This guide covers how to configure Redis within Langfuse and what to keep in mind when bringing your own Redis.
## Configuration
@@ -14,38 +26,40 @@ This guide covers how to configure Redis within Langfuse and what to keep in min
Langfuse accepts the following environment variables to fine-tune your Redis usage.
They need to be provided for the Langfuse Web and Langfuse Worker containers.
-| Variable | Required / Default | Description |
-| -------------------------- | ------------------ | ----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- |
-| `REDIS_CONNECTION_STRING` | Required | Redis connection string with format `redis[s]://[[username][:password]@][host][:port][/db-number]` |
+| Variable | Required / Default | Description |
+| ------------------------- | ------------------ | -------------------------------------------------------------------------------------------------- |
+| `REDIS_CONNECTION_STRING` | Required | Redis connection string with format `redis[s]://[[username][:password]@][host][:port][/db-number]` |
OR
-| Variable | Required / Default | Description |
-| ------------ | ------------------ | ----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- |
-| `REDIS_HOST` | Required | Redis host name. |
-| `REDIS_PORT` | `6379` | Port of the Redis instance. |
+| Variable | Required / Default | Description |
+| ------------ | ------------------ | --------------------------------------------- |
+| `REDIS_HOST` | Required | Redis host name. |
+| `REDIS_PORT` | `6379` | Port of the Redis instance. |
| `REDIS_AUTH` | | Authentication string for the Redis instance. |
## Deployment Options
This section covers different deployment options and provides example environment variables.
-### Managed Redis by Cloud Providers
+### Managed Redis/Valkey by Cloud Providers
[Amazon ElastiCache](https://aws.amazon.com/de/elasticache/redis/), [Azure Cache for Redis](https://azure.microsoft.com/de-de/products/cache/), and [GCP Memorystore](https://cloud.google.com/memorystore/?hl=en) are fully managed Redis services.
Langfuse handles failovers between read-replicas, but does not support Redis cluster mode for now, i.e. there is no sharding support.
To connect your cloud provider manager Redis instance, set the environment variables as defined above.
Ensure that your Langfuse container can reach your Redis instance within the VPC.
-#### Example Configuration
+
You must set the parameter `maxmemory-policy` to `noeviction` to ensure that the queue jobs are not evicted from the cache.
+
+
### Redis on Kubernetes (Helm)
Bitnami offers Helm Charts for [Redis](https://github.com/bitnami/charts/tree/main/bitnami/redis) and [Valkey](https://github.com/bitnami/charts/tree/main/bitnami/valkey).
We use the Valkey chart as a dependency for [Langfuse K8s](https://github.com/bitnami/charts/tree/main/bitnami/clickhouse).
-See [Langfuse on Kubernetes (Helm)](/docs/deployment/v3/guides/kubernetes-helm) for more details on how to deploy Langfuse on Kubernetes.
+See [Langfuse on Kubernetes (Helm)](/self-hosting/kubernetes-helm) for more details on how to deploy Langfuse on Kubernetes.
#### Example Configuration
@@ -76,6 +90,7 @@ As there is no redundancy, this is **not recommended for production workloads**.
#### Example Configuration
Start the container with
+
```bash
docker run --name redis \
-p 6379:6379 \
@@ -98,7 +113,7 @@ For every ~100000 events per minute we recommend about 1GB of memory for the Red
## Valkey vs Redis
-Valkey was created as an open source (BSD) alternative to Redis.
+[Valkey](https://github.com/valkey-io/valkey) was created as an open source (BSD) alternative to Redis.
It is a drop-in replacement for Redis and is compatible with the Redis protocol.
According to the maintainers their major version 8.0.0 retains compatibility to Redis v7 in most instances.
We do not extensively test new Langfuse releases with Valkey, but have not encountered any issues in internal experiments using it.
diff --git a/pages/docs/deployment/v3/components/clickhouse.mdx b/pages/self-hosting/infrastructure/clickhouse.mdx
similarity index 91%
rename from pages/docs/deployment/v3/components/clickhouse.mdx
rename to pages/self-hosting/infrastructure/clickhouse.mdx
index 229ad43a7..54352f2c1 100644
--- a/pages/docs/deployment/v3/components/clickhouse.mdx
+++ b/pages/self-hosting/infrastructure/clickhouse.mdx
@@ -1,12 +1,22 @@
---
-description: How Langfuse uses ClickHouse
+title: ClickHouse (self-hosted)
+description: Langfuse uses ClickHouse as the main OLAP storage solution for traces, observations, and scores.
+label: "Version: v3"
---
# ClickHouse
-ClickHouse is the main storage solution within Langfuse for our Trace, Observation, and Score entities.
+
+
+This is a deep dive into ClickHouse configuration. Follow one of the [deployment guides](/self-hosting#deployment-options) to get started.
+
+
+
+[ClickHouse](https://github.com/ClickHouse/ClickHouse) is the main OLAP storage solution within Langfuse for our Trace, Observation, and Score entities.
It is optimized for high write throughput and fast analytical queries.
-This guide covers how to configure ClickHouse within Langfuse and what to keep in mind when bringing your own ClickHouse.
+This guide covers how to configure ClickHouse within Langfuse and what to keep in mind when (optionally) bringing your own ClickHouse.
+
+Langfuse supports ClickHouse versions >= 24.3.
## Configuration
@@ -26,8 +36,6 @@ They need to be provided for the Langfuse Web and Langfuse Worker containers.
Please note, that Langfuse uses the `default` schema and `default` cluster.
Get in touch with us on [Discord](/discord) or contact the maintainers at support@langfuse.com if this limitation is blocking.
-Langfuse only supports ClickHouse versions >= 24.3.
-
## Deployment Options
This section covers different deployment options and provides example environment variables.
@@ -63,7 +71,7 @@ CLICKHOUSE_MIGRATION_SSL=true
The [Bitnami ClickHouse Helm Chart](https://github.com/bitnami/charts/tree/main/bitnami/clickhouse) provides a production ready deployment of ClickHouse using a given Kubernetes cluster.
We use it as a dependency for [Langfuse K8s](https://github.com/bitnami/charts/tree/main/bitnami/clickhouse).
-See [Langfuse on Kubernetes (Helm)](/docs/deployment/v3/guides/kubernetes-helm) for more details on how to deploy Langfuse on Kubernetes.
+See [Langfuse on Kubernetes (Helm)](/self-hosting/kubernetes-helm) for more details on how to deploy Langfuse on Kubernetes.
#### Example Configuration
diff --git a/pages/self-hosting/infrastructure/containers.mdx b/pages/self-hosting/infrastructure/containers.mdx
new file mode 100644
index 000000000..9aa8b470f
--- /dev/null
+++ b/pages/self-hosting/infrastructure/containers.mdx
@@ -0,0 +1,54 @@
+---
+title: Application Containers (self-hosted)
+description: Langfuse uses Docker to containerize the application. The application is split into two containers (Langfuse Web and Langfuse Worker).
+label: "Version: v3"
+---
+
+# Application Containers
+
+
+
+This is a deep dive into the configuration of the application containers. Follow one of the [deployment guides](/self-hosting/deployment-options) to get started.
+
+
+
+Langfuse uses Docker to containerize the application. The application is split into two containers:
+
+- **Langfuse Web**: The web server that serves the Langfuse Console and API.
+- **Langfuse Worker**: The worker that handles background tasks such as sending emails or processing events.
+
+## Recommended sizing
+
+For production environments, we recommend to use at least 2 CPUs and 4 GB of RAM for all containers.
+You should have at least two instances of the Langfuse Web container for high availability.
+For auto-scaling, we recommend to add instances once the CPU utilization exceeds 50% on either container.
+
+## Node.js memory settings
+
+The Node.js applications in Langfuse containers need to be configured with appropriate memory limits to operate efficiently. By default, Node.js uses a maximum heap size of 1.7 GiB, which may be less than the actual container memory allocation. For example, if your container has 4 GiB of memory allocated but Node.js is limited to 1.7 GiB, you may encounter memory issues.
+
+To properly configure memory limits, set the `max-old-space-size` via the `NODE_OPTIONS` environment variable on both the Langfuse Web and Worker containers:
+
+```bash filename=".env"
+NODE_OPTIONS=--max-old-space-size=${var.memory}
+```
+
+## Build container from source [#build-from-source]
+
+While we recommend using the prebuilt docker image, you can also build the image yourself from source.
+
+```bash
+# clone repo
+git clone https://github.com/langfuse/langfuse.git
+cd langfuse
+
+# checkout production branch
+# main branch includes unreleased changes that might be unstable
+git checkout production
+
+# build web image
+docker build -t langfuse/langfuse -f ./web/Dockerfile .
+
+# build worker image
+docker build -t langfuse/langfuse-worker -f ./worker/Dockerfile .
+```
diff --git a/pages/self-hosting/infrastructure/llm-api.mdx b/pages/self-hosting/infrastructure/llm-api.mdx
new file mode 100644
index 000000000..bf39f0432
--- /dev/null
+++ b/pages/self-hosting/infrastructure/llm-api.mdx
@@ -0,0 +1,27 @@
+---
+title: LLM API / Gateway (self-hosted)
+description: Optionally, you can configure Langfuse to use an external LLM API or gateway for add-on features. Langfuse tracing does not need access to the LLM API as traces are captured client-side.
+label: "Version: v3"
+---
+
+# LLM API / Gateway
+
+Optionally, you can configure Langfuse to use an external LLM API or gateway for add-on features. Langfuse tracing does not need access to the LLM API as traces are captured client-side.
+
+## Supported LLM APIs
+
+Langfuse supports:
+
+- OpenAI
+- Azure OpenAI
+- Anthropic
+- Google Vertex
+- Amazon Bedrock
+
+Via the OpenAI API, many other LLM services and proxies can be used.
+
+## Features powered by LLM API
+
+- [Playground](/docs/playground)
+- [LLM-as-a-Judge Evaluation](/docs/scores/model-based-evals)
+- [Prompt Experiments](/docs/datasets/prompt-experiments)
diff --git a/pages/self-hosting/infrastructure/postgres.mdx b/pages/self-hosting/infrastructure/postgres.mdx
new file mode 100644
index 000000000..c892b9d6d
--- /dev/null
+++ b/pages/self-hosting/infrastructure/postgres.mdx
@@ -0,0 +1,29 @@
+---
+title: Postgres Database (self-hosted)
+description: Langfuse requires a persistent Postgres database to store its state.
+label: "Version: v3"
+---
+
+# Postgres Database
+
+
+
+Follow one of the [deployment guides](/self-hosting#deployment-options) to get started.
+
+
+
+Langfuse requires a persistent Postgres database to store its state.
+You can use a managed service on AWS, Azure, or GCP, or host it yourself.
+
+Langfuse supports Postgres versions >= 12.
+
+## Use Cases
+
+Postgres is used for all transactional data, including:
+
+- Users
+- Organizations
+- Projects
+- Datasets
+- Encrypted API keys
+- Settings
diff --git a/pages/docs/deployment/v3/guides/kubernetes-helm.mdx b/pages/self-hosting/kubernetes-helm.mdx
similarity index 55%
rename from pages/docs/deployment/v3/guides/kubernetes-helm.mdx
rename to pages/self-hosting/kubernetes-helm.mdx
index 3e88c865c..68523d199 100644
--- a/pages/docs/deployment/v3/guides/kubernetes-helm.mdx
+++ b/pages/self-hosting/kubernetes-helm.mdx
@@ -1,64 +1,87 @@
---
+title: Kubernetes (Helm) (self-hosted)
description: Step-by-step guide to run Langfuse on Kubernetes via Helm.
+label: "Version: v3"
---
-# Self-hosted deployment - Kubernetes (Helm)
+# Kubernetes (Helm)
This guide will walk you through the steps to deploy Langfuse on Kubernetes using the Helm package manager.
You will need access to a Kubernetes cluster and Helm installed on your local machine.
For the purposes of this guide, we will use a local minikube instance, but each step should extend to a managed Kubernetes service like GKE, EKS, or AKS.
+By default, the chart will deploy the Langfuse application containers and data stores ([architecture overview](/self-hosting#architecture)). You can optionally point to an existing PostgreSQL, Clickhouse and Redis instance. See [Readme](https://github.com/langfuse/langfuse-k8s/blob/lfe-1348-v3-chart/README.md) for more details.
+
+
+ If you are interested in contributing to our Kubernetes deployment guide or
+ Helm chart, please reach out to us on [Discord](/discord), contact the
+ maintainers at support@langfuse.com, or join the [GitHub
+ Discussion](https://github.com/orgs/langfuse/discussions/1902).
+
+
- If you are interested in contributing to our Kubernetes deployment guide or Helm chart,
- please reach out to us on [Discord](/discord), contact the maintainers at support@langfuse.com,
- or join the [GitHub Discussion](https://github.com/orgs/langfuse/discussions/1902).
+ This guide references the `lfe-1348-v3-chart` branch and v3-preview. It will
+ be updated to `v3.0.0` on Dec 9, 2024.
+Planned: Cloud-specific deployment guides, please upvote and comment on the following threads: [AWS](https://github.com/orgs/langfuse/discussions/4645), [Google Cloud](https://github.com/orgs/langfuse/discussions/4646),[Azure](https://github.com/orgs/langfuse/discussions/4647).
+
## Fetch the Helm chart and customize values
-Fetch the `langfuse-k8s` GitHub repository to your local machine to install the v3 preview using Helm.
+Fetch the `langfuse-k8s` GitHub repository to your local machine to install using Helm.
+
```bash
git clone https://github.com/langfuse/langfuse-k8s.git
cd langfuse-k8s/charts/langfuse
```
-Checkout the `lfe-1348-v3-chart` branch that we use to develop the chart for v3.
+Checkout the `lfe-1348-v3-chart` branch. This will be merged on Dec 9, 2024.
+
```bash
git checkout lfe-1348-v3-chart
```
For local experimentation, the pre-configured variables in the values.yaml file are usually sufficient.
+
If you send _any_ kind of sensitive data to the application or intend to keep it up for longer, we recommend that
you modify the values.yaml file and overwrite the following environment variables using the `additionalEnv` field:
-- **SALT**: A random string used to hash passwords. It should be at least 32 characters long.
-- **ENCRYPTION_KEY**: Generate this via `openssl rand -base64 32`.
-- **NEXTAUTH_SECRET**: A random string used to sign JWT tokens.
-- **NEXTAUTH_URL**: The URL where the application is hosted. Used for redirects after signup.
+- `SALT`: A random string used to hash passwords. It should be at least 32 characters long.
+- `ENCRYPTION_KEY`: Generate this via `openssl rand -base64 32`.
+- `NEXTAUTH_SECRET`: A random string used to sign JWT tokens.
+- `NEXTAUTH_URL`: The URL where the application is hosted. Used for redirects after signup.
+
+In addition, you can change the database and storage credentials to be more secure.
+
+For a comprehensive overview of all available environment variables and configuration options, please refer to the [configuration guide](/self-hosting/configuration).
## Deploy the helm chart
Create a new namespace for the Langfuse deployment, e.g.:
+
```bash
kubectl create namespace langfuse-v3-preview
```
Download the Helm chart dependencies:
+
```bash
helm dependency update
```
Install the Helm chart to our demo namespace:
+
```bash
helm install langfuse . -n langfuse-v3-preview
```
+
Our chart assumes that it's installed as `langfuse`.
If you want to install it with a different name, you will have to adjust the Redis hostname in the `values.yaml` accordingly.
At this point, Kubernetes will start to deploy the Langfuse application and its dependencies.
This can take up to 5 minutes.
-You can monitor the progress by checking `kubectl get pods -n langfuse-v3-preview` - we expect all pods to be Running eventually.
+You can monitor the progress by checking `kubectl get pods -n langfuse-v3-preview` - we expect all pods to be running eventually.
The langfuse-web and langfuse-worker container will restart a couple of times while the databases are being provisioned.
## Smoke test UI
@@ -68,9 +91,19 @@ Use `kubectl get services -n langfuse-v3-preview` and search for `langfuse-web`
You can access the Langfuse UI by visiting `http://:` in your browser.
Go ahead and register, create a new organization, project, and explore Langfuse.
+## Features
+
+Langfuse supports many configuration options and self-hosted features.
+For more details, please refer to the [configuration guide](/self-hosting/configuration).
+
+import SelfHostFeatures from "@/components-mdx/self-host-features.mdx";
+
+
+
## Shutdown
You can delete the Helm release and the namespace to clean up the resources:
+
```bash
helm uninstall langfuse -n langfuse-v3-preview
kubectl delete namespace langfuse-v3-preview
@@ -79,7 +112,10 @@ kubectl delete namespace langfuse-v3-preview
## How to Upgrade
Run the following commands to upgrade the Helm chart to the latest version:
+
```bash
helm dependency update
helm upgrade langfuse . -n langfuse-v3-preview
```
+
+For more details on upgrading, please refer to the [upgrade guide](/self-hosting/upgrade).
diff --git a/pages/self-hosting/license-key.mdx b/pages/self-hosting/license-key.mdx
new file mode 100644
index 000000000..b34280cdc
--- /dev/null
+++ b/pages/self-hosting/license-key.mdx
@@ -0,0 +1,53 @@
+---
+title: License Key (self-hosted)
+description: Learn how to activate a license key for your self-hosted Langfuse deployment.
+label: "Version: v3"
+---
+
+# License Key
+
+All core Langfuse features and APIs are available in Langfuse OSS (MIT licensed) without any limits.
+Some additional features require a license key.
+See [pricing page](/pricing-self-host) for more details on Langfuse Pro and Langfuse Enterprise.
+
+## Activating a License Key
+
+After purchasing a license key, you can activate it by adding the following environment variable to your Langfuse deployment:
+
+```bash
+LANGFUSE_EE_LICENSE_KEY=
+```
+
+## Feature Availability
+
+All core Langfuse features and APIs are [open source and MIT-licensed](/docs/open-source). They are available without any limits on usage when self-hosting.
+
+| Feature | Cloud Free | Cloud Pro | Cloud Team | Self-Hosted OSS | Self-Hosted Pro | Self-Hosted Enterprise |
+| -------------------------------------------------- | -------------- | ------------- | -------------- | ------------------- | ------------------- | -------------------------- |
+| [Tracing & UI](/docs/tracing) | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ |
+| [Integrations and SDKs](/docs/tracing) | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ |
+| [Prompt Management](/docs/prompts) | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ |
+| [Analytics / Dashboards](/docs/analytics/overview) | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ |
+| [Datasets](/docs/datasets/overview) | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ |
+| [Scores](/docs/scores/overview) | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ |
+| [API access](/docs/query-traces) | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ |
+
+There are some commercially licensed peripheral features:
+
+| Feature | Cloud Free | Cloud Pro | Cloud Team | Self-Hosted OSS | Self-Hosted Pro | Self-Hosted Enterprise |
+| -------------------------------------------------------------- | -------------- | ------------- | -------------- | ------------------- | ------------------- | -------------------------- |
+| [LLM-as-a-Judge evaluations](/docs/scores/model-based-evals) | ✅ | ✅ | ✅ | 🛑 | ✅ | ✅ |
+| [Prompt Playground](/docs/playground) | ✅ | ✅ | ✅ | 🛑 | ✅ | ✅ |
+| [Prompt Experiments](/docs/datasets/prompt-experiments) | ✅ | ✅ | ✅ | 🛑 | ✅ | ✅ |
+| [Annotation Queues](/docs/scores/annotation#annotation-queues) | ✅ | ✅ | ✅ | 🛑 | ✅ | ✅ |
+| [Data Processing Agreement (DPA)](/security) | 🛑 | ✅ | ✅ | 🛑 | 🛑 | ✅ |
+| [SOC2 & ISO27001 Reports](/security) | 🛑 | 🛑 | ✅ | 🛑 | 🛑 | ✅ |
+| SSO Enforcement | 🛑 | 🛑 | ✅ | 🛑 | 🛑 | ✅ |
+| [Project-level RBAC roles](/docs/rbac) | 🛑 | 🛑 | ✅ | 🛑 | 🛑 | ✅ |
+| Data Retention Policies | 🛑 | 🛑 | ✅ | 🛑 | 🛑 | soon |
+| [UI Customization](/self-hosting/ui-customization) | 🛑 | 🛑 | 🛑 | 🛑 | 🛑 | ✅ |
+| [Organization Creators](/self-hosting/organization-creators) | 🛑 | 🛑 | 🛑 | 🛑 | 🛑 | ✅ |
+
+## Questions?
+
+If you have any questions about licensing, please contact us ([support](/support)).
diff --git a/pages/self-hosting/local.mdx b/pages/self-hosting/local.mdx
new file mode 100644
index 000000000..4dd2eca06
--- /dev/null
+++ b/pages/self-hosting/local.mdx
@@ -0,0 +1,77 @@
+---
+title: Run Langfuse Locally (self-hosted)
+description: Step-by-step guide to run Langfuse locally via docker compose.
+label: "Version: v3"
+---
+
+# Run Langfuse Locally
+
+This guide will walk you through the steps to run Langfuse locally via docker compose.
+We will use the [`docker-compose.yml`](https://github.com/langfuse/langfuse/blob/main/docker-compose.yml) file.
+This is the simplest way to run Langfuse to give it a try.
+
+For high-availability and high-throughput, we recommend using Kubernetes ([deployment guide](/self-hosting/kubernetes-helm)).
+The docker compose setup lacks high-availability, scaling capabilities, and backup functionality.
+
+## Get Started
+
+Requirements:
+
+- git
+- docker & docker compose -> use [Docker Desktop](https://www.docker.com/products/docker-desktop/) on Mac or Windows
+
+
+
+### Clone Langfuse Repository
+
+Get a copy of the latest Langfuse repository:
+
+```bash
+git clone https://github.com/langfuse/langfuse.git
+cd langfuse
+```
+
+### Start the application
+
+Run the langfuse docker compose
+
+```bash
+docker compose up
+```
+
+Watch the containers being started and the logs flowing in.
+After about 2-3 minutes, the langfuse-web-1 container should log "Ready".
+At this point you can proceed to the next step.
+
+### Done
+
+And you are ready to go! Open `http://localhost:3000` in your browser to access the Langfuse UI.
+
+
+
+## Features
+
+Langfuse supports many configuration options and self-hosted features.
+For more details, please refer to the [configuration guide](/self-hosting/configuration).
+
+import SelfHostFeatures from "@/components-mdx/self-host-features.mdx";
+
+
+
+## Shutdown
+
+You can stop the containers by hitting `Ctrl+C` in the terminal.
+
+If you started docker-compose in the background (`-d` flag), you can stop all instance using:
+
+```bash
+docker compose down
+```
+
+Adding the `-v` flag will also remove the volumes.
+
+## How to Upgrade
+
+To upgrade Langfuse, you can stop the containers and run `docker compose up --pull always`.
+
+For more details on upgrading, please refer to the [upgrade guide](/self-hosting/upgrade).
diff --git a/pages/self-hosting/networking.mdx b/pages/self-hosting/networking.mdx
new file mode 100644
index 000000000..b9c017092
--- /dev/null
+++ b/pages/self-hosting/networking.mdx
@@ -0,0 +1,35 @@
+---
+title: Networking (self-hosted)
+description: Learn how to configure networking for your self-hosted Langfuse deployment. Langfuse can be run without internet access.
+label: "Version: v3"
+---
+
+# Networking
+
+Langfuse can be deployed in a VPC or on-premises in high-security environments. This guide covers the networking requirements and considerations.
+
+Architecture diagram (from [architecture overview](/self-hosting#architecture)):
+
+import ArchitectureDiagram from "@/components-mdx/architecture-diagram-v3.mdx";
+
+
+
+## Network Exposure & Service Configuration
+
+Only the `langfuse/langfuse` (web) container needs to be accessible by users, via API, and SDKs.
+Optionally, this can be behind a firewall, proxy, or VPN.
+
+By default `PORT=3000` is used for the Langfuse Web container. This can be configured using the `PORT` environment variable ([docs](/self-hosting/configuration)). Usually a network load balancer is used to expose the service and handle ssl termination ([docs](/self-hosting/encryption)).
+
+Langfuse is designed to be exposed publicly as a web service.
+This is penetration tested and secure by design as the Langfuse Team runs the same container for the managed Langfuse Cloud Offering.
+See [security documentation](/security) of Langfuse Cloud for more details.
+
+## Internet Access
+
+Langfuse does not require internet access.
+
+Some optional components, like the LLM Playground and LLM-evals require access to an [LLM API/Gateway](/self-hosting/infrastructure/llm-api).
+This can be deployed in the same VPC or peered with the VPC.
+
+Langfuse pings a cached version of the GitHub API to check for updates to the Langfuse Server. If internet access is not available, this check will fail gracefully.
diff --git a/pages/self-hosting/organization-creators.mdx b/pages/self-hosting/organization-creators.mdx
new file mode 100644
index 000000000..a598a99f1
--- /dev/null
+++ b/pages/self-hosting/organization-creators.mdx
@@ -0,0 +1,23 @@
+---
+title: Allowlist of organization creators (self-hosted)
+description: Learn how to restrict organization creation to a specific set of users in your self-hosted Langfuse deployment.
+label: "Version: v3"
+---
+
+# Allowlist of organization creators
+
+
+
+This is only available in the Enterprise Edition. Please add your [license key](/self-hosting/license-key) to activate it.
+
+
+
+By default, all users who have access to a Langfuse instance can create new organizations.
+
+If you want to restrict organization creation to a specific set of users, you can use the `LANGFUSE_ALLOWED_ORGANIZATION_CREATORS` environment variable. In some organizations, there is a certain set of users who create new organizations and then provision access to the single organization or project via [RBAC](/docs/rbac).
+
+```bash filename=".env"
+LANGFUSE_ALLOWED_ORGANIZATION_CREATORS=user1@langfuse.com,user2@langfuse.com
+```
+
+If you have specific requirements and wonder how to best managed this, please reach out to us at [support@langfuse.com](mailto:support@langfuse.com). We are happy to help!
diff --git a/pages/self-hosting/railway.mdx b/pages/self-hosting/railway.mdx
new file mode 100644
index 000000000..0e4318f23
--- /dev/null
+++ b/pages/self-hosting/railway.mdx
@@ -0,0 +1,35 @@
+---
+title: Deploy Langfuse v3 on Railway
+description: Use this guide to deploy Langfuse v3 on Railway via the prebuilt template.
+label: "Version: v3"
+---
+
+# Railway
+
+You can deploy Langfuse v3 on [Railway](https://railway.app/) via the prebuilt template.
+The template contains all the necessary services and configurations to get you started.
+See [architecture overview](/self-hosting#architecture) for more details.
+
+## Deploy
+
+Use the following button to deploy the Langfuse v3 template on Railway:
+
+[![Deploy on Railway](https://railway.com/button.svg)](https://railway.app/template/exma_H?referralCode=513qqz)
+
+Recording of 1-click deployment on Railway:
+
+
+
+## Features
+
+Langfuse supports many configuration options and self-hosted features.
+For more details, please refer to the [configuration guide](/self-hosting/configuration).
+
+import SelfHostFeatures from "@/components-mdx/self-host-features.mdx";
+
+
diff --git a/pages/self-hosting/release-notes/_meta.tsx b/pages/self-hosting/release-notes/_meta.tsx
new file mode 100644
index 000000000..13206c086
--- /dev/null
+++ b/pages/self-hosting/release-notes/_meta.tsx
@@ -0,0 +1,17 @@
+export default {
+ server: {
+ title: "Server ↗",
+ href: "https://github.com/langfuse/langfuse/releases",
+ newWindow: true,
+ },
+ python: {
+ title: "Python SDK ↗",
+ href: "https://github.com/langfuse/langfuse-python/releases",
+ newWindow: true,
+ },
+ js: {
+ title: "JS/TS SDK ↗",
+ href: "https://github.com/langfuse/langfuse-js/releases",
+ newWindow: true,
+ },
+};
diff --git a/pages/self-hosting/transactional-emails.mdx b/pages/self-hosting/transactional-emails.mdx
new file mode 100644
index 000000000..8e7b1918a
--- /dev/null
+++ b/pages/self-hosting/transactional-emails.mdx
@@ -0,0 +1,24 @@
+---
+title: Transactional Emails (self-hosted)
+description: Learn how to configure transactional emails for your self-hosted Langfuse deployment.
+label: "Version: v3"
+---
+
+# Transactional Email
+
+Optionally, you can configure an SMTP server to send transactional emails.
+These are used for password resets, project/organization invitations, and notifications when a batch export is completed.
+
+## Configuration
+
+To enable transactional emails, set the following environment variables on the application containers:
+
+| Variable | Description |
+| --------------------- | ------------------------------------------------------------------------------------------------------------------------------------------------ |
+| `SMTP_CONNECTION_URL` | Configure optional SMTP server connection for transactional email. Connection URL is passed to Nodemailer ([docs](https://nodemailer.com/smtp)). |
+| `EMAIL_FROM_ADDRESS` | Configure from address for transactional email. Required if `SMTP_CONNECTION_URL` is set. |
+
+## FAQ
+
+- **Which SMTP service to use?** It is recommended to use a reputable SMTP service for transactional emails to ensure delivery and prevent abuse. If you do not have a preferred service from your cloud provider, try [Resend](https://resend.com/) or [Postmark](https://postmarkapp.com/). Both are easy to set up and have generous free tiers.
+- **Can I use my private inbox?** No, private inboxes (like GMail) are generally not recommended and difficult to configure correctly.
diff --git a/pages/docs/deployment/v3/troubleshooting.mdx b/pages/self-hosting/troubleshooting.mdx
similarity index 84%
rename from pages/docs/deployment/v3/troubleshooting.mdx
rename to pages/self-hosting/troubleshooting.mdx
index 90f194fe9..a4203e148 100644
--- a/pages/docs/deployment/v3/troubleshooting.mdx
+++ b/pages/self-hosting/troubleshooting.mdx
@@ -1,11 +1,12 @@
---
+title: Troubleshooting a self-hosted Langfuse deployment
description: Learn how to troubleshoot common issues with self-hosted Langfuse.
+label: "Version: v3"
---
# Troubleshooting
-This guide covers common issues that Langfuse self-hosters observe and how to address them.
-If you encounter an issue that is not covered here, please [open an issue](https://github.com/langfuse/langfuse/issues) or start a [discussion](https://github.com/orgs/langfuse/discussions).
+This guide covers common issues that Langfuse self-hosters observe and how to address them. If you encounter an issue that is not covered here, please [open a GitHub issue](https://github.com/langfuse/langfuse/issues) or reach out to [support](/support).
## Missing Events After POST /api/public/ingestion
@@ -14,7 +15,7 @@ Events do not appear immediately in the UI, as they are being processed asynchro
If your events are not shown after a few minutes, you can check the following:
- **Check the Langfuse Web logs**: Look for any errors in the Langfuse Web container around the time that you ingested the events.
- Any errors you observe indicate that the event is malformatted or that either [Redis](/docs/deployment/v3/components/redis) or [S3](/docs/deployment/v3/components/blobstorage) are not available.
+ Any errors you observe indicate that the event is malformatted or that either [Redis](/self-hosting/infrastructure/cache) or [S3](/self-hosting/infrastructure/blobstorage) are not available.
In this case, you should also see non-207 status codes within your application.
- **Check the S3/Blob Storage bucket**: Validate that the event was uploaded correctly into your blob storage.
It should be available in a path like `////.json`.
@@ -47,3 +48,15 @@ To address this issue, we recommend that you configure `NODE_OPTIONS=--max-old-s
Use the available memory in MiB as the value for `var.memory`, e.g. 4096 for 4 GiB of memory.
The value should be equal or above the memory limit of the container.
This ensures that your container orchestrator kills the pod gracefully if the memory limit is exceeded, instead of the application terminating abruptly.
+
+## FAQ
+
+import { FaqPreview } from "@/components/faq/FaqPreview";
+
+
+
+## GitHub Discussions
+
+import { GhDiscussionsPreview } from "@/components/gh-discussions/GhDiscussionsPreview";
+
+
diff --git a/pages/self-hosting/ui-customization.mdx b/pages/self-hosting/ui-customization.mdx
new file mode 100644
index 000000000..24cbf41e3
--- /dev/null
+++ b/pages/self-hosting/ui-customization.mdx
@@ -0,0 +1,54 @@
+---
+title: UI Customization (self-hosted)
+description: Learn how to customize the Langfuse UI for your organization.
+label: "Version: v3"
+---
+
+# UI Customization
+
+
+
+This is only available in the Enterprise Edition. Please add your [license key](/self-hosting/license-key) to activate it.
+
+
+
+To help with large-scale deployments, Langfuse allows to customize some key parts of the UI to fit an organization's environment.
+
+## Links
+
+You can customize the links highlighted in the screenshot below:
+
+
+ ![UI Customization Links](/images/docs/ui-customization-links.png)
+
+
+| Number | Variable | Description |
+| ------ | -------------------------------- | -------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- |
+| 1 | `LANGFUSE_UI_FEEDBACK_HREF` | Replace the default feedback widget with your internal feedback link. |
+| 2 | `LANGFUSE_UI_DOCUMENTATION_HREF` | Customize the documentation link reference in the menu and settings to point to your internal documentation. |
+| 3 | `LANGFUSE_UI_SUPPORT_HREF` | Customize the support link reference in the menu and settings to point to your internal support. |
+| 4 | `LANGFUSE_UI_API_HOST` | Customize the hostname that is referenced in the Langfuse project settings. Defaults to `window.origin`. Useful if Langfuse is deployed behind a reverse proxy for API requests. |
+
+## Co-branding
+
+Co-brand the Langfuse interface with your own logo.
+
+![UI Customization Logo](/images/docs/ui-customization-logo.png)
+
+Langfuse adapts to the logo width, with a maximum aspect ratio of 1:3. Narrower ratios (e.g., 2:3, 1:1) also work. The logo is fitted into a bounding box, so there are no specific pixel constraints. For reference, the example logo shown above is 160px x 400px.
+
+| Variable | Description | Example |
+| ---------------------------------- | ------------------------------ | ---------------------------------------------------------------------- |
+| `LANGFUSE_UI_LOGO_LIGHT_MODE_HREF` | URL to the logo in light mode. | `https://static.langfuse.com/langfuse-dev/example-logo-light-mode.png` |
+| `LANGFUSE_UI_LOGO_DARK_MODE_HREF` | URL to the logo in dark mode. | `https://static.langfuse.com/langfuse-dev/example-logo-dark-mode.png` |
+
+## LLM API/Gateway Connection defaults
+
+LLM connections are configured in the Langfuse project settings. You can customize the default values via the following environment variables.
+
+| Variable | Description |
+| ------------------------------------------- | ---------------------------------------------------------------------------------------------------------------------------------------------------- |
+| `LANGFUSE_UI_DEFAULT_MODEL_ADAPTER` | Set the default model adapter for the LLM playground and evals. Options: `OpenAI`, `Anthropic`, `Azure`. Example: `Anthropic` |
+| `LANGFUSE_UI_DEFAULT_BASE_URL_OPENAI` | Set the default base URL for OpenAI API in the LLM playground and evals. Example: `https://api.openai.com/v1` |
+| `LANGFUSE_UI_DEFAULT_BASE_URL_ANTHROPIC` | Set the default base URL for Anthropic API in the LLM playground and evals. Example: `https://api.anthropic.com` |
+| `LANGFUSE_UI_DEFAULT_BASE_URL_AZURE_OPENAI` | Set the default base URL for Azure OpenAI API in the LLM playground and evals. Example: `https://{instanceName}.openai.azure.com/openai/deployments` |
diff --git a/pages/self-hosting/upgrade-guides/_meta.tsx b/pages/self-hosting/upgrade-guides/_meta.tsx
new file mode 100644
index 000000000..77cf8b2b4
--- /dev/null
+++ b/pages/self-hosting/upgrade-guides/_meta.tsx
@@ -0,0 +1,4 @@
+export default {
+ "upgrade-v2-to-v3": "Upgrade v2 to v3",
+ "upgrade-v1-to-v2": "Upgrade v1 to v2",
+};
diff --git a/pages/self-hosting/upgrade-guides/upgrade-v1-to-v2.mdx b/pages/self-hosting/upgrade-guides/upgrade-v1-to-v2.mdx
new file mode 100644
index 000000000..3f337da2c
--- /dev/null
+++ b/pages/self-hosting/upgrade-guides/upgrade-v1-to-v2.mdx
@@ -0,0 +1,101 @@
+---
+title: Migrate Langfuse v1 to v2 (self-hosted)
+description: A guide to upgrade a Langfuse v1 setup to v2.
+---
+
+# Migrate Langfuse v1 to v2
+
+Langfuse v2 ([released](https://github.com/langfuse/langfuse/releases/tag/v2.0.0) Jan 30, 2024) is a major release of Langfuse that introduces a rebuilt usage and cost tracking system for LLM generations. The update requires running a one-off migration script on historical data to ensure accurate LLM costs of existing traces.
+
+## Changes
+
+### What has changed?
+
+- Completely rebuilt usage/cost tracking system for LLM generations
+- New model definition abstraction that enables:
+ - Quick support for new emerging models
+ - Tracking of model price changes over time
+ - Custom models/prices at the project level
+- Added ability to set usage and cost via API when ingesting traces
+- Usage and cost information available on all UI tables and APIs
+
+### What has not changed?
+
+Everything else, including APIs and infrastructure components, remains the same. No breaking changes.
+
+## Who needs to take action during the upgrade?
+
+- **No action required** if you:
+
+ - Use Langfuse Cloud
+ - Only care about newly ingested traces
+ - Don't use the cost tracking features
+
+- **Action required** if you:
+ - Self-host Langfuse
+ - Want accurate cost data for historical traces
+
+## Migration Steps
+
+
+ This process is non-blocking and does not impact the availability of your
+ Langfuse deployment.
+
+
+
+
+### Update Langfuse to v2
+
+Follow the deployment guide to upgrade your Langfuse deployment to v2.
+
+- For production deployments, see the [upgrade guide](/self-hosting/v2/deployment-guide#update)
+- If you use docker compose, see the [upgrade guide](/self-hosting/v2/docker-compose)
+
+### Apply new model logic and prices to existing data
+
+Langfuse includes a list of supported models for [usage and cost tracking](/docs/model-usage-and-cost). If a Langfuse update includes support for new models, these will only be applied to newly ingested traces/generations.
+
+Optionally, you can apply the new model definitions to existing data using the following steps. During the migration, the database remains available (non-blocking).
+
+1. Clone the repository and create an `.env` file:
+
+ ```bash
+ # Clone the Langfuse repository
+ git clone https://github.com/langfuse/langfuse.git
+
+ # Navigate to the Langfuse directory
+ cd langfuse
+
+ # Install all dependencies
+ pnpm i
+
+ # Create an .env file
+ cp .env.dev.example .env
+ ```
+
+2. Edit the `.env` to connect to your database from your machine:
+
+ ```bash filename=".env"
+ NODE_ENV=production
+
+ # Replace with your database connection string
+ DATABASE_URL=postgresql://postgres:postgres@localhost:5432/postgres
+ ```
+
+3. Execute the migration. Depending on the size of your database, this might take a while.
+
+ ```bash
+ pnpm run models:migrate
+ ```
+
+4. Clean up: remove the `.env` file to avoid connecting to the production database from your local machine.
+
+
+
+## Support
+
+If you experience any issues, please create an [issue on GitHub](/issues) or contact the maintainers ([support](/support)).
+
+For support with production deployments, the Langfuse team provides dedicated enterprise support. To learn more, reach out to enterprise@langfuse.com or [schedule a demo](/schedule-demo).
+
+Alternatively, you may consider using [Langfuse Cloud](/docs/deployment/cloud), which is a fully managed version of Langfuse. You can find information about its security and privacy [here](/docs/data-security-privacy).
diff --git a/pages/docs/deployment/v3/migrate-v2-to-v3.mdx b/pages/self-hosting/upgrade-guides/upgrade-v2-to-v3.mdx
similarity index 69%
rename from pages/docs/deployment/v3/migrate-v2-to-v3.mdx
rename to pages/self-hosting/upgrade-guides/upgrade-v2-to-v3.mdx
index 0fd6050f2..acfa7581f 100644
--- a/pages/docs/deployment/v3/migrate-v2-to-v3.mdx
+++ b/pages/self-hosting/upgrade-guides/upgrade-v2-to-v3.mdx
@@ -1,37 +1,180 @@
---
+title: Migrate Langfuse v2 to v3 (self-hosted)
description: A guide to upgrade a Langfuse v2 setup to v3.
---
# Migrate Langfuse v2 to v3
-
- This guide covers a developer preview which is **not suitable for production use**.
- v3 is under active development and we plan to ship a production-ready version by the end of November 2024.
- We share this information to gather feedback from our awesome developer community.
+
- For a production-ready setup, follow the [self-hosting guide](/docs/deployment/self-host)
- or consider using [Langfuse Cloud](https://cloud.langfuse.com) maintained by the Langfuse team.
+This is a big upgrade and we tried to make it as seamless as possible. Please create a [GitHub Issue](/issues) or contact [support](/support) in case you have any questions while upgrading to v3.
- If you want to get started on v3, please follow our [v3 Self-Host Guide](/docs/deployment/v3/overview).
-To learn more about our reasons for the architectural changes, jump to the [Reasoning](#reasoning) section.
+Langfuse v3 (released on Dec. 6th, 2024) introduces a new backend architecture that unlocks many new features and performance improvements.
-## SDK compatibility and API changes
+Follow this guide to:
-While we aim to keep our SDKs and APIs fully backwards compatible, we have to introduce backwards incompatible changes with our update to v3.
-Certain APIs in SDK versions below version 2.0.0 are not compatible with our new backend architecture.
-Please upgrade and benefit from many performance improvements or features such as [prompt caching](/changelog/2024-02-05-sdk-level-prompt-caching).
+1. Understand the architectural changes and reasoning behind them.
+2. Learn about the other breaking changes.
+3. Follow the upgrade steps to successfully migrate to Langfuse v3.
-**Upgrade options**:
-- Default SDK upgrade: Follow the 1.x.x to 2.x.x upgrade path ([Python](/docs/sdk/python/low-level-sdk#upgrading-from-v1xx-to-v2xx), [JavaScript](/docs/sdk/typescript/guide#upgrade1to2)). For the JavaScrupt SDK, consider an upgrade [from 2.x.x to 3.x.x](/docs/sdk/typescript/guide#upgrade2to3) as well. The upgrade is straightforward and should not take much time.
-- Improved integrations: Since the first major version, we built many new ways to integrate your code with Langfuse such as [Decorators](/docs/sdk/python/decorators) for Python. We would recommend to check out our [quickstart](/docs/get-started) to see whether there is a more convenient integration available for you.
+## Architecture Changes
-**Background**: Langfuse v3 relies on an event driven backend architecture.
+
+
+This section dives into the reasoning behind the architectural changes we made for Langfuse v3.
+To learn more about the architecture of Langfuse v3, jump to the [architecture overview](/self-hosting#architecture-overview).
+
+
+
+Langfuse has gained significant traction over the last months, both in our Cloud environment and in self-hosted setups.
+With Langfuse v3 we introduce changes that allow our backend to handle hundreds of events per second with higher reliability.
+To achieve this scale, we introduce a second Langfuse container and additional storage services like S3/Blob store, Clickhouse, and Redis which are better suited for the required workloads than our previous Postgres-based setup.
+
+In short, Langfuse v3 adds:
+
+- A new worker container that processes events asynchronously.
+- A new S3/Blob store for storing large objects.
+- A new Clickhouse instance for storing traces, observations, and scores.
+- Redis/Valkey for queuing events and caching data.
+
+### Comparison of the architectures
+
+import ArchitectureDiagramV2 from "@/components-mdx/architecture-diagram-v2.mdx";
+import ArchitectureDiagramV3 from "@/components-mdx/architecture-diagram-v3.mdx";
+import ArchitectureDescriptionV3 from "@/components-mdx/architecture-description-v3.mdx";
+
+
+
+
+Architecture Diagram
+
+
+
+
+
+
+
+
+
+Architecture Diagram
+
+
+
+
+
+
+### Reasoning for the architectural changes [#reasoning]
+
+
+1. Why Clickhouse
+
+We made the strategic decision to migrate our traces, observations, and scores table from Postgres to Clickhouse.
+Both us and our self-hosters observed bottlenecks in Postgres when dealing with millions of rows of tracing data,
+both on ingestion and retrieval of information.
+Our core requirement was a database that could handle massive volumes of trace and event data with exceptional query speed and efficiency
+while also being available for free to self-hosters.
+
+#### Limitations of Postgres
+
+Initially, Postgres was an excellent choice due to its robustness, flexibility, and the extensive tooling available.
+As our platform grew, we encountered performance bottlenecks with complex aggregations and time-series data.
+The row-based storage model of PostgreSQL becomes increasingly inefficient when dealing with billions of rows of tracing data,
+leading to slow query times and high resource consumption.
+
+#### Our requirements
+
+- Analytical queries: all queries for our dashboards (e.g. sum of LLM tokens consumed over time)
+- Table queries: Finding tracing data based on filtering and ordering selected via tables in our UI.
+- Select by ID: Quickly locating a specific trace by its ID.
+- High write throughput while allowing for updates. Our tracing data can be updated from the SKDs. Hence, we need an option to update rows in the database.
+- Self-hosting: We needed a database that is free to use for self-hosters, avoiding dependencies on specific cloud providers.
+- Low operational effort: As a small team, we focus on building features for our users. We try to keep operational efforts as low as possible.
+
+#### Why Clickhouse is great
+
+- Optimized for Analytical Queries: ClickHouse is a modern OLAP database capable of ingesting data at high rates and querying it with low latency. It handles billions of rows efficiently.
+- Rich feature-set: Clickhouse offers different Table Engines, Materialized views, different types of Indices, and many integrations which helps us to build fast and achieve low latency read queries.
+- Our self-hosters can use the official Clickhouse Helm Charts and Docker Images for deploying in the cloud infrastructure of their choice.
+- Clickhouse Cloud: Clickhouse Cloud is a database as a SaaS service which allows us to reduce operational efforts on our side.
+
+When talking to other companies and looking at their code bases, we learned that Clickhouse is a popular choice these days for analytical workloads.
+Many modern observability tools, such as [Signoz](https://signoz.io/) or [Posthog](https://posthog.com/), as well as established companies like [Cloudflare](https://blog.cloudflare.com/http-analytics-for-6m-requests-per-second-using-clickhouse/), use Clickhouse for their analytical workloads.
+
+#### Clickhouse vs. others
+
+We think there are many great OLAP databases out there and are sure that we could have chosen an alternative and would also succeed with it. However, here are some thoughts on alternatives:
+
+- Druid: Unlike Druid's [modular architecture](https://posthog.com/blog/clickhouse-vs-druid), ClickHouse provides a more straightforward, unified instance approach. Hence, it is easier for teams to manage Clickhouse in production as there are fewer moving parts. This reduces the operational burden especially for our self-hosters.
+- StarRocks: We think StarRocks is great but early. The vast amount of features in Clickhouse help us to remain flexible with our requirements while benefiting from the performance of an OLAP database.
+
+#### Building an adapter and support multiple databases
+
+We explored building a multi-database adapter to support Postgres for smaller self-hosted deployments.
+After talking to engineers and reviewing some of PostHog's [Clickhouse implementation](https://github.com/PostHog/posthog),
+we decided against this path due to its complexity and maintenance overhead.
+This allows us to focus our resources on building user features instead.
+
+
+
+
+2. Why Redis
+
+We added a Redis instance to serve cache and queue use-cases within our stack.
+With its open source license, broad native support my major cloud vendors, and ubiquity in the industry, Redis was a natural choice for us.
+
+
+
+
+3. Why S3/Blob Store
+
+Observability data for LLM application tends to contain large, semi-structured bodies of data to represent inputs and outputs.
+We chose S3/Blob Store as a scalable, secure, and cost-effective solution to store these large objects.
+It allows us to store all incoming events for further processing and acts as a native backup solution, as the full state
+can be restored based on the events stored there.
+
+
+
+
+4. Why Worker Container
+
+When processing observability data for LLM applications, there are many CPU-heavy operations which block the main loop in our Node.js backend,
+e.g. tokenization and other parsing of event bodies.
+To achieve high availability and low latencies across client applications, we decided to move the heavy processing into an asynchronous worker container.
+It accepts events from a Redis queue and ensures that they are eventually being upserted into Clickhouse.
+
+
+
+## Other Breaking Changes
+
+
+
+If you use Langfuse SDKs above version 2.0.0 (released Dec 2023), these changes will not affect you. The Langfuse Team has already upgraded Langfuse Cloud to v3 without any issues after helping a handful of teams (less than 1% of users) to upgrade the Langfuse SDKs.
+
+
+
+### SDK Requirements
+
+**SDK v1.x.x is no longer supported**. While we aim to keep our SDKs and APIs fully backwards compatible, we have to introduce backwards incompatible changes with our update to Langfuse Server v3. Certain APIs in SDK versions below version 2.0.0 are not compatible with our new backend architecture.
+
+#### Release dates of SDK v2
+
+- Langfuse Python SDK v2 was [released](https://github.com/langfuse/langfuse-python/releases/tag/v2.0.1) on Dec 17, 2023,
+- Langfuse JavaScript SDK v2 was [released](https://github.com/langfuse/langfuse-js/releases/tag/v2.0.0) on Dec 18, 2023.
+
+#### Upgrade options if you are on SDK version 1.x.x
+
+- Default SDK upgrade: Follow the 1.x.x to 2.x.x upgrade path ([Python](/docs/sdk/python/low-level-sdk#upgrading-from-v1xx-to-v2xx), [JavaScript](/docs/sdk/typescript/guide#upgrade1to2)). For the JavaScript SDK, consider an upgrade [from 2.x.x to 3.x.x](/docs/sdk/typescript/guide#upgrade2to3) as well. The upgrade is straightforward and should not take much time.
+- Optionally switch to our [new integrations](/docs/get-started): Since the first major version, we built many new ways to integrate your code with Langfuse such as [Decorators](/docs/sdk/python/decorators) for Python. We would recommend to check out our [quickstart](/docs/get-started) to see whether there is a more convenient integration available for you.
+
+#### Background of this change
+
+Langfuse v3 relies on an event driven backend architecture.
This means, that we acknowledge HTTP requests from the SDKs, queue the HTTP bodies in the backend, and process them asynchronously.
This allows us to scale the backend more easily and handle more requests without overloading the database.
The SDKs below 2.0.0 send the events to our server and expect a synchronous response containing the database representation of the event.
-If you rely on this data and access it in the code, your SDK will break as of Nov. 11th, 2024 for the cloud version and post-upgrade for self-hosted versions.
+If you rely on this data and access it in the code, your SDK will break as of Nov. 11th, 2024 for the cloud version and post-upgrade to Langfuse v3 when self-hosting.
### API Changes
@@ -57,9 +200,10 @@ This change is inline with our [API reference](https://api.reference.langfuse.co
#### Deprecated endpoints
-The following endpoints are deprecated since our v2 release.
-We continue to accept requests to these endpoints and will remove them in a future release.
+The following endpoints are deprecated since our v2 release and thereby have not been used by the Langfuse SDKs since Feb 2024.
+Langfuse v3 continues to accept requests to these endpoints.
Their API behavior changes to be asynchronous and the endpoints will only return the id of the created object instead of the full updated record.
+Please note that these endpoints will be removed in a future release.
- POST /api/public/events
- POST /api/public/generations
@@ -68,8 +212,7 @@ Their API behavior changes to be asynchronous and the endpoints will only return
- PATCH /api/public/spans
- POST /api/public/traces
-
-### Behavioral Changes
+### UI Behavioral Changes
#### Trace Deletion
@@ -81,17 +224,38 @@ Going forward, all traces will be scheduled for deletion, but may still be visib
Deleting projects within Langfuse was a synchronous operation and projects got removed immediately.
Projects will be marked as deleted within Langfuse v3 and will not be accessible using the standard UI navigation options.
We immediately revoke access keys for projects, but all remaining data will be removed in the background.
-Information will not be deleted from the [S3/Blob Store](/docs/deployment/v3/components/s3-blob-store).
+
+Information will not be deleted from the [S3/Blob Store](/self-hosting/infrastructure/blobstorage).
This action needs to be performed manually by an administrator.
+This process will be automated in a future release.
## Migration Steps
+
+
We tried to make the version upgrade as seamless as possible.
-If you encounter any issues please reach out to our support team or open an issue on our GitHub repository.
+If you encounter any issues please reach out to [support](/support) or open an [issue on GitHub](/issues).
+
+
+
+By following this guide, you can upgrade your Langfuse v2 deployment to v3 without prolonged downtime.
+
+### Video Walkthrough
+
+
### Before you start the upgrade
-Before starting your upgrade, make sure you are familiar with the contents of [our v3 hosting guide](/docs/deployment/v3/overview).
+Before starting your upgrade, make sure you are familiar with the contents of the respective [v3 deployment guide](/self-hosting).
In addition, we recommend that you perform a backup of your Postgres database before you start the upgrade.
Also, ensure that you run a recent version of Langfuse, ideally a version later than v2.92.0.
@@ -100,9 +264,11 @@ and move your traffic after validating that the new instances are working as exp
### Upgrade Steps
+
+
#### 1. Provision new infrastructure
-Ensure that you deploy all required storage components ([Clickhouse](/docs/deployment/v3/components/clickhouse), [Redis](/docs/deployment/v3/components/redis), [S3/Blob Store](/docs/deployment/v3/components/blobstorage)) and have the connection information handy.
+Ensure that you deploy all required storage components ([Clickhouse](/self-hosting/infrastructure/clickhouse), [Redis](/self-hosting/infrastructure/cache), [S3/Blob Store](/self-hosting/infrastructure/blobstorage)) and have the connection information handy.
You can reuse your existing Postgres instance for the new deployment.
Ensure that you also have your Postgres connection details ready.
@@ -132,7 +298,7 @@ All new events will be stored in Clickhouse and should appear within the UI with
#### 4. Wait for historic data migration to complete
-We have introduced background migrations as part of the migration to v3.
+We have introduced [background migrations](/self-hosting/background-migrations) as part of the migration to v3.
Those allow Langfuse to schedule longer-running migrations without impacting the availability of the service.
As part of the v3 release, we have introduced four migrations that will run once you deploy the new stack.
@@ -144,103 +310,16 @@ As part of the v3 release, we have introduced four migrations that will run once
Each migration has to finish, before the next one starts.
Depending on the size of your event tables, this process may take multiple hours.
-You need to set `LANGFUSE_ENABLE_BACKGROUND_MIGRATIONS=true` to enable background migrations and start the migration of data from Postgres to ClickHouse.
-
-[//]: # (TODO: Reference to new UI to monitor background migrations)
-
#### 5. Stop the old Langfuse containers
After you have verified that new events are being stored in Clickhouse and are shown in the UI, you can stop the old Langfuse containers.
-## Reasoning
-
-Langfuse has gained significant traction over the last months, both in our Cloud environment and in self-hosted setups.
-With Langfuse v3 we introduce changes that allow our backend to handle hundreds of events per second with higher reliability.
-To achieve this scale, we introduce a second Langfuse container and additional storage services like S3/Blob store, Clickhouse, and Redis.
-We explain why we chose each element of the stack and why we believe that they help us achieve best in class scale.
-
-### Why Clickhouse
-
-We made the strategic decision to migrate our traces, observations, and scores table from Postgres to Clickhouse.
-Both us and our self-hosters observed bottlenecks in Postgres when dealing with millions of rows of tracing data,
-both on ingestion and retrieval of information.
-Our core requirement was a database that could handle massive volumes of trace and event data with exceptional query speed and efficiency
-while also being available for free to self-hosters.
-
-#### Limitations of Postgres
-
-Initially, Postgres was an excellent choice due to its robustness, flexibility, and the extensive tooling available.
-As our platform grew, we encountered performance bottlenecks with complex aggregations and time-series data.
-The row-based storage model of PostgreSQL becomes increasingly inefficient when dealing with billions of rows of tracing data,
-leading to slow query times and high resource consumption.
-
-#### Our requirements
-
-- Analytical queries: all queries for our dashboards (e.g. sum of LLM tokens consumed over time)
-- Table queries: Finding tracing data based on filtering and ordering selected via tables in our UI.
-- Select by ID: Quickly locating a specific trace by its ID.
-- High write throughput while allowing for updates. Our tracing data can be updated from the SKDs. Hence, we need an option to update rows in the database.
-- Self-hosting: We needed a database that is free to use for self-hosters, avoiding dependencies on specific cloud providers.
-- Low operational effort: As a small team, we focus on building features for our users. We try to keep operational efforts as low as possible.
-
-#### Why Clickhouse is great
-
-- Optimized for Analytical Queries: ClickHouse is a modern OLAP database capable of ingesting data at high rates and querying it with low latency. It handles billions of rows efficiently.
-- Rich feature-set: Clickhouse offers different Table Engines, Materialized views, different types of Indices, and many integrations which helps us to build fast and achieve low latency read queries.
-- Our self-hosters can use the official Clickhouse Helm Charts and Docker Images for deploying in the cloud infrastructure of their choice.
-- Clickhouse Cloud: Clickhouse Cloud is a database as a SaaS service which allows us to reduce operational efforts on our side.
-
-When talking to other companies and looking at their code bases, we learned that Clickhouse is a popular choice these days for analytical workloads.
-Many modern observability tools, such as [Signoz](https://signoz.io/) or [Posthog](https://posthog.com/), as well as established companies like [Cloudflare](https://blog.cloudflare.com/http-analytics-for-6m-requests-per-second-using-clickhouse/), use Clickhouse for their analytical workloads.
-
-#### Clickhouse vs. others
-
-We think there are many great OLAP databases out there and are sure that we could have chosen an alternative and would also succeed with it. However, here are some thoughts on alternatives:
-- Druid: Unlike Druid's [modular architecture](https://posthog.com/blog/clickhouse-vs-druid), ClickHouse provides a more straightforward, unified instance approach. Hence, it is easier for teams to manage Clickhouse in production as there are fewer moving parts. This reduces the operational burden especially for our self-hosters.
-- StarRocks: We think StarRocks is great but early. The vast amount of features in Clickhouse help us to remain flexible with our requirements while benefiting from the performance of an OLAP database.
-
-#### Building an adapter and support multiple databases
-
-We explored building a multi-database adapter to support Postgres for smaller self-hosted deployments.
-After talking to engineers and reviewing some of PostHog's [Clickhouse implementation](https://github.com/PostHog/posthog),
-we decided against this path due to its complexity and maintenance overhead.
-This allows us to focus our resources on building user features instead.
-
-### Why Redis
-
-We added a Redis instance to serve cache and queue use-cases within our stack.
-With its open source license, broad native support my major cloud vendors, and ubiquity in the industry, Redis was a natural choice for us.
-
-### Why S3/Blob Store
-
-Observability data for LLM application tends to contain large, semi-structured bodies of data to represent inputs and outputs.
-We chose S3/Blob Store as a scalable, secure, and cost-effective solution to store these large objects.
-It allows us to store all incoming events for further processing and acts as a native backup solution, as the full state
-can be restored based on the events stored there.
-
-### Why Worker Container
-
-When processing observability data for LLM applications, there are many CPU-heavy operations which block the main loop in our Node.js backend,
-e.g. tokenization and other parsing of event bodies.
-To achieve high availability and low latencies across client applications, we decided to move the heavy processing into an asynchronous worker container.
-It accepts events from a Redis queue and ensures that they are eventually being upserted into Clickhouse.
+
## Support
-If you experience any issues, please join us on [Discord](/discord) or contact the maintainers at support@langfuse.com.
+If you experience any issues, please create an [issue on GitHub](/issues) or contact the maintainers ([support](/support)).
For support with production deployments, the Langfuse team provides dedicated enterprise support. To learn more, reach out to enterprise@langfuse.com or [schedule a demo](/schedule-demo).
-Alternatively, you may consider using [Langfuse Cloud](/docs/deployment/cloud), which is a fully managed version of Langfuse. You can find information about its security and privacy [here](/docs/data-security-privacy).
-
-## FAQ
-
-import { FaqPreview } from "@/components/faq/FaqPreview";
-
-
-
-## GitHub Discussions
-
-import { GhDiscussionsPreview } from "@/components/gh-discussions/GhDiscussionsPreview";
-
-
+Alternatively, you may consider using Langfuse Cloud, which is a fully managed version of Langfuse. You can find information about its security and privacy [here](/docs/data-security-privacy).
diff --git a/pages/self-hosting/upgrade.mdx b/pages/self-hosting/upgrade.mdx
new file mode 100644
index 000000000..a1a6d95a0
--- /dev/null
+++ b/pages/self-hosting/upgrade.mdx
@@ -0,0 +1,63 @@
+---
+title: How to upgrade a self-hosted Langfuse deployment
+description: Use this guide to keep your Langfuse deployment up to date. Updates between minor/patch versions can be applied automatically. For major versions, please refer to the migration guides.
+label: "Version: v3"
+---
+
+# Upgrading a Self-Hosted Langfuse Deployment
+
+Langfuse evolves quickly ([changelog](/changelog)) and keeping your deployment up to date is key to benefit from security, performance, and feature updates.
+
+The Langfuse versioning and upgrade process is optimized for minimal complexity and disruption. If you ever experience any issues, please create an [issue on GitHub](/issues) or contact the maintainers ([support](/support)).
+
+## How to upgrade
+
+
+
+It is recommended to be familiar with our [versioning](/self-hosting/versioning) policy before upgrading existing deployments.
+
+
+
+### Minor/Patch Versions
+
+Updates within a major version are designed to be non-disruptive and are automatically applied. On start of the application, all migrations are automatically applied to the databases.
+
+You can automatically use the latest version of a major release by using `langfuse/langfuse:3` and `langfuse/langfuse-worker:3` as the image tags in your deployment.
+
+To update deployments, follow the update section in our deployment guides:
+
+- [Local](/self-hosting/local#how-to-upgrade)
+- [VM](/self-hosting/docker-compose#how-to-upgrade)
+- [Docker](/self-hosting/docker#how-to-upgrade)
+- [Kubernetes (Helm)](/self-hosting/kubernetes-helm#how-to-upgrade)
+
+### Major Versions
+
+If you upgrade between major versions, please follow our migration guides:
+
+- [v2.x.x to v3.x.x](/self-hosting/upgrade-guides/upgrade-v2-to-v3)
+- [v1.x.x to v2.x.x](/self-hosting/upgrade-guides/upgrade-v1-to-v2)
+
+## Release Notes
+
+Subscribe to our mailing list to get notified about new releases and new major versions.
+
+
+
+You can also watch the [GitHub releases](https://github.com/langfuse/langfuse/releases) for information about the changes in each version.
+
+
+ ![Langfuse releases](/images/docs/github-watch-changelog.gif)
+
+
+_Watch the repository on GitHub to get notified about new releases_
+
+import { ProductUpdateSignup } from "@/components/productUpdateSignup";
+
+## Support
+
+If you experience any issues, please create an [issue on GitHub](/issues) or contact the maintainers ([support](/support)).
+
+For support with production deployments, the Langfuse team provides dedicated enterprise support. To learn more, reach out to enterprise@langfuse.com or [schedule a demo](/schedule-demo).
+
+Alternatively, you may consider using [Langfuse Cloud](/docs/deployment/cloud), which is a fully managed version of Langfuse. You can find information about its security and privacy [here](/docs/data-security-privacy).
diff --git a/pages/self-hosting/v2/_meta.tsx b/pages/self-hosting/v2/_meta.tsx
new file mode 100644
index 000000000..370c25118
--- /dev/null
+++ b/pages/self-hosting/v2/_meta.tsx
@@ -0,0 +1,7 @@
+import { MenuSwitcher } from "@/components/MenuSwitcher";
+
+export default {
+ index: "Overview",
+ "deployment-guide": "Deployment Guide",
+ "docker-compose": "Local Deployment (docker compose)",
+};
diff --git a/pages/docs/deployment/self-host.mdx b/pages/self-hosting/v2/deployment-guide.mdx
similarity index 96%
rename from pages/docs/deployment/self-host.mdx
rename to pages/self-hosting/v2/deployment-guide.mdx
index 69fc612ec..84059ed45 100644
--- a/pages/docs/deployment/self-host.mdx
+++ b/pages/self-hosting/v2/deployment-guide.mdx
@@ -1,10 +1,18 @@
---
+title: Self-hosting Langfuse v2
description: Self-host Langfuse in your infrastructure using Docker.
+label: "Version: v2"
---
-# Self-Hosting Langfuse - Open Source LLM Observability
+# Deployment Guide (v2)
-[![Docker Image](https://img.shields.io/badge/docker-langfuse-blue?logo=Docker&logoColor=white&style=flat-square)](https://github.com/langfuse/langfuse/pkgs/container/langfuse)
+
+ This guide covers Langfuse v2. For Langfuse v3, see the [v3
+ documentation](/self-hosting). Langfuse v2 receives security updates until end
+ of Q1 2025. If you have any questions while upgrading, please refer to the [v3
+ upgrade guide](/self-hosting/upgrade-v2-to-v3) or open a thread on [GitHub
+ Discussions](/gh-support).
+
Langfuse Server, which includes the API and Web UI, is open-source and can be self-hosted using Docker.
@@ -25,25 +33,10 @@ Deploy the application container to your infrastructure. You can use managed ser
During the container startup, all database migrations will be applied automatically. This can be optionally disabled via environment variables.
-
-
-
-
```bash
docker pull langfuse/langfuse:2
```
-
-
-
-```bash
-docker pull langfuse/langfuse:latest
-```
-
-
-
-
-
```bash
docker run --name langfuse \
-e DATABASE_URL=postgresql://hello \
@@ -185,7 +178,7 @@ Troubleshooting:
### Configuring the Enterprise Edition [#ee]
-The Enterprise Edition ([compare versions](/docs/deployment/feature-overview)) of Langfuse includes additional optional configuration options that can be set via environment variables.
+The Enterprise Edition ([compare versions](/pricing-self-host)) of Langfuse includes additional optional configuration options that can be set via environment variables.
| Variable | Description |
| ------------------------------------------------------------------------ | ------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------ |
@@ -337,6 +330,10 @@ If you encounter issues, ensure the following:
We recommend enabling automated updates within the current major version to
benefit from the latest features, bug fixes, and security patches.
+
+ Coming from Langfuse v1? Please refer to the [upgrade
+ guide](/self-hosting/upgrade-guides/upgrade-v1-to-v2) for more details.
+
To update the application:
@@ -354,51 +351,6 @@ Langfuse is released through tagged semver releases. Check [GitHub releases](htt
_Watch the repository on GitHub to get notified about new releases_
-## (Optional) Apply newly supported models to existing data in Langfuse [#migrate-models]
-
-
- This is only necessary if you want new model prices to be applied to existing
- traces/generations. Most users will not need to do this as applying prices to
- new traces only is totally fine when updating regularly.
-
-
-Langfuse includes a list of supported models for [usage and cost tracking](/docs/model-usage-and-cost). If a Langfuse update includes support for new models, these will only be applied to newly ingested traces/generations.
-
-Optionally, you can apply the new model definitions to existing data using the following steps. During the migration, the database remains available (non-blocking).
-
-1. Clone the repository and create an `.env` file:
-
- ```bash
- # Clone the Langfuse repository
- git clone https://github.com/langfuse/langfuse.git
-
- # Navigate to the Langfuse directory
- cd langfuse
-
- # Install all dependencies
- pnpm i
-
- # Create an .env file
- cp .env.dev.example .env
- ```
-
-2. Edit the `.env` to connect to your database from your machine:
-
- ```bash filename=".env"
- NODE_ENV=production
-
- # Replace with your database connection string
- DATABASE_URL=postgresql://postgres:postgres@localhost:5432/postgres
- ```
-
-3. Execute the migration. Depending on the size of your database, this might take a while.
-
- ```bash
- pnpm run models:migrate
- ```
-
-4. Clean up: remove the `.env` file to avoid connecting to the production database from your local machine.
-
## Kubernetes deployments
Kubernetes is a popular choice for deploying Langfuse when teams maintain the rest of their infrastructure using Kubernetes. You can find community-maintained templates and Helm Charts in the [langfuse/langfuse-k8s](https://github.com/langfuse/langfuse-k8s) repository.
diff --git a/pages/docs/deployment/local.mdx b/pages/self-hosting/v2/docker-compose.mdx
similarity index 73%
rename from pages/docs/deployment/local.mdx
rename to pages/self-hosting/v2/docker-compose.mdx
index ff17c1ef2..ee2bdb690 100644
--- a/pages/docs/deployment/local.mdx
+++ b/pages/self-hosting/v2/docker-compose.mdx
@@ -1,16 +1,26 @@
---
+title: Local Deployment (v2, docker compose)
description: Step-by-step guide to run Langfuse on your local machine using docker compose.
+label: "Version: v2"
---
import { Callout } from "nextra/components";
-# Local Deployment - Open Source LLM Observability
+# Local Deployment (v2, docker compose)
+
+
+ This guide covers Langfuse v2. For Langfuse v3, see the [v3
+ documentation](/self-hosting). Langfuse v2 receives security updates until end
+ of Q1 2025. If you have any questions while upgrading, please refer to the [v3
+ upgrade guide](/self-hosting/upgrade-v2-to-v3) or open a thread on [GitHub
+ Discussions](/gh-support).
+
This setup is **not suitable for production use** as the database is not
persistent and environment variables are not kept secret. For a
- production-ready setup, follow the [self-hosting
- guide](/docs/deployment/self-host) or consider using [Langfuse
+ production-ready setup, follow the [deployment
+ guide](/self-hosting/v2/deployment-guide) or consider using [Langfuse
Cloud](https://cloud.langfuse.com) maintained by the Langfuse team.
@@ -42,7 +52,7 @@ Langfuse is now running on your local machine:
- The server is accessible at: `http://localhost:3000`
- Integrations: set the `HOST`/`BASEURL` of the SDKs to `http://localhost:3000`
-Langfuse is very configurable, see [self-hosting (docker) guide](/docs/deployment/self-host) for more details on configuration options.
+Langfuse is very configurable, see [deployment guide](/self-hosting/v2/deployment-guide) for more details on configuration options.
Checkout the [docker-compose.yml](https://github.com/langfuse/langfuse/blob/v2/docker-compose.yml) file for more details.
### Create an Account
@@ -82,7 +92,7 @@ Refer to [CONTRIBUTING.md](https://github.com/langfuse/langfuse/blob/main/CONTRI
## Troubleshooting
-The [self-hosting guide](/docs/deployment/self-host) is more extensive and includes troubleshooting steps which are also applicable to local deployments using docker compose.
+The [deployment guide](/self-hosting/v2/deployment-guide) is more extensive and includes troubleshooting steps which are also applicable to local deployments using docker compose.
## GitHub Discussions
diff --git a/pages/self-hosting/v2/index.mdx b/pages/self-hosting/v2/index.mdx
new file mode 100644
index 000000000..f01892da7
--- /dev/null
+++ b/pages/self-hosting/v2/index.mdx
@@ -0,0 +1,38 @@
+---
+title: Self-host Langfuse v2
+description: Langfuse is open source and can be self-hosted using Docker. This section contains guides for different deployment scenarios.
+label: "Version: v2"
+---
+
+# Self-host Langfuse v2
+
+
+ This guide covers Langfuse v2. For Langfuse v3, see the [v3
+ documentation](/self-hosting). Langfuse v2 receives security updates until end
+ of Q1 2025. If you have any questions while upgrading, please refer to the [v3
+ upgrade guide](/self-hosting/upgrade-v2-to-v3) or open a thread on [GitHub
+ Discussions](/gh-support).
+
+
+Langfuse is open source and can be self-hosted using Docker. This section contains guides for different deployment scenarios.
+
+## Deployment Options [#deployment-options]
+
+The following options are available:
+
+- Langfuse Cloud: A fully managed version of Langfuse that is hosted and maintained by the Langfuse team.
+- Self-host Langfuse: Run Langfuse on your own infrastructure.
+ - Production via Docker. Please follow the [deployment guide](/self-hosting/v2/deployment-guide) for more details and detailed instructions on how to deploy Langfuse on various cloud providers.
+ - Locally or on a single VM via [Docker Compose](/self-hosting/v2/docker-compose).
+
+## Architecture
+
+Langfuse only depends on open source components and can be deployed locally, on cloud infrastructure, or on-premises.
+
+import ArchitectureDiagram from "@/components-mdx/architecture-diagram-v2.mdx";
+
+
+
+## Upgrade to Langfuse v2
+
+If you are upgrading from Langfuse v1, please refer to the [upgrade guide](/self-hosting/upgrade-guides/upgrade-v1-to-v2).
diff --git a/pages/self-hosting/versioning.mdx b/pages/self-hosting/versioning.mdx
new file mode 100644
index 000000000..7b31c1fed
--- /dev/null
+++ b/pages/self-hosting/versioning.mdx
@@ -0,0 +1,49 @@
+---
+label: "Version: v3"
+---
+
+# Versioning
+
+Versioning is key to ensure compatibility between Langfuse Server, SDKs, and custom integrations via the Public API. Thus, we take [semantic versioning](https://semver.org/) seriously.
+
+## Scope of semantic versioning
+
+The following changes **result in a major version bump** as they are considered breaking:
+
+- Infrastructure changes
+- Removal of existing Public APIs or removal/changes of existing parameters from Public APIs
+
+The following changes **do not result in a major version bump** as they are considered internal implementation details:
+
+- Database schemas
+- Frontend APIs
+
+## Compatibility between Langfuse Server and SDKs
+
+Langfuse Server and SDKs are versioned independently to allow for more flexibility in upgrading components:
+
+- **Server**: Can be upgraded independently of SDK versions, unless explicitly noted in release notes
+- **SDKs**: Can remain on older versions while running newer server versions
+- **Compatibility**: New SDK features may require recent server versions
+
+We recommend keeping the Langfuse Server up to date to ensure access to all features and security updates.
+
+## Release Notes
+
+Release notes are published on GitHub:
+
+- [Langfuse Server](https://github.com/langfuse/langfuse/releases)
+- [Langfuse Python SDK](https://github.com/langfuse/langfuse-python/releases)
+- [Langfuse JS/TS SDK](https://github.com/langfuse/langfuse-js/releases)
+
+You can watch the GitHub releases to get notified about new releases:
+
+
+ ![Langfuse releases](/images/docs/github-watch-changelog.gif)
+
+
+Also, you can subscribe to our mailing list to get notified about new releases and new major versions:
+
+import { ProductUpdateSignup } from "@/components/productUpdateSignup";
+
+
diff --git a/pages/why.mdx b/pages/why.mdx
index e4f69e0ea..c180c245f 100644
--- a/pages/why.mdx
+++ b/pages/why.mdx
@@ -3,7 +3,7 @@
### Open Source
- Langfuse is [open source](/docs/open-source).
-- You can [self-host](/docs/deployment/self-host) it.
+- You can [self-host](/self-hosting) it.
- Langfuse's core tracing is MIT licensed and will always be freely available.
- We are transparent about [what we are building](/roadmap) and how.
- We [iterate with our users](https://github.com/orgs/langfuse/discussions) and celebrate their feedback.
diff --git a/public/images/docs/ui-customization-links.png b/public/images/docs/ui-customization-links.png
new file mode 100644
index 000000000..2706fa159
Binary files /dev/null and b/public/images/docs/ui-customization-links.png differ
diff --git a/public/images/docs/ui-customization-logo.png b/public/images/docs/ui-customization-logo.png
new file mode 100644
index 000000000..6f2e02431
Binary files /dev/null and b/public/images/docs/ui-customization-logo.png differ
diff --git a/src/overrides.css b/src/overrides.css
index 2e69be873..7ee3869b0 100644
--- a/src/overrides.css
+++ b/src/overrides.css
@@ -25,6 +25,10 @@
display: none !important;
}
+.nextra-nav-container > nav > div > a[href="/self-hosting"] {
+ display: none !important;
+}
+
/* Less gap in desktop menu */
.nextra-menu-desktop {
gap: 0.1rem;