From 643ebfb049534893f702681b7ee133430386fd12 Mon Sep 17 00:00:00 2001 From: Lianmin Zheng Date: Sat, 30 Sep 2023 01:23:50 +0000 Subject: [PATCH] update links to lmsys-chat-1m --- README.md | 2 +- docs/dataset_release.md | 6 ++++++ fastchat/serve/gradio_block_arena_anony.py | 2 +- fastchat/serve/gradio_block_arena_named.py | 2 +- fastchat/serve/gradio_web_server.py | 2 +- fastchat/utils.py | 2 +- 6 files changed, 11 insertions(+), 5 deletions(-) create mode 100644 docs/dataset_release.md diff --git a/README.md b/README.md index 267fad328..4ab9ffbbb 100644 --- a/README.md +++ b/README.md @@ -238,7 +238,7 @@ CUDA_VISIBLE_DEVICES=1 python3 -m fastchat.serve.model_worker --model-path lmsys ```bash python3 -m fastchat.serve.gradio_web_server_multi ``` -- The default model worker based on huggingface/transformers has great compatibility but can be slow. If you want high-throughput serving, you can try [vLLM integration](docs/vllm_integration.md). +- The default model worker based on huggingface/transformers has great compatibility but can be slow. If you want high-throughput batched serving, you can try [vLLM integration](docs/vllm_integration.md). ## API ### OpenAI-Compatible RESTful APIs & SDK diff --git a/docs/dataset_release.md b/docs/dataset_release.md new file mode 100644 index 000000000..add2c8909 --- /dev/null +++ b/docs/dataset_release.md @@ -0,0 +1,6 @@ +## Datasets +We release the following datasets based on our projects and websites. + +- [LMSYS-Chat-1M: A Large-Scale Real-World LLM Conversation Dataset](https://huggingface.co/datasets/lmsys/lmsys-chat-1m) +- [Chatbot Arena Conversation Dataset](https://huggingface.co/datasets/lmsys/chatbot_arena_conversations) +- [MT-bench Human Annotation Dataset](https://huggingface.co/datasets/lmsys/mt_bench_human_judgments) diff --git a/fastchat/serve/gradio_block_arena_anony.py b/fastchat/serve/gradio_block_arena_anony.py index edd89d072..7859e31ec 100644 --- a/fastchat/serve/gradio_block_arena_anony.py +++ b/fastchat/serve/gradio_block_arena_anony.py @@ -367,7 +367,7 @@ def bot_response_multi( def build_side_by_side_ui_anony(models): notice_markdown = """ # ⚔️ Chatbot Arena ⚔️ : Benchmarking LLMs in the Wild -| [Blog](https://lmsys.org/blog/2023-05-03-arena/) | [GitHub](https://github.com/lm-sys/FastChat) | [Paper](https://arxiv.org/abs/2306.05685) | [Dataset](https://huggingface.co/datasets/lmsys/chatbot_arena_conversations) | [Twitter](https://twitter.com/lmsysorg) | [Discord](https://discord.gg/HSWAKCrnFx) | +| [Blog](https://lmsys.org/blog/2023-05-03-arena/) | [GitHub](https://github.com/lm-sys/FastChat) | [Paper](https://arxiv.org/abs/2306.05685) | [Dataset](https://github.com/lm-sys/FastChat/blob/main/docs/dataset_release.md) | [Twitter](https://twitter.com/lmsysorg) | [Discord](https://discord.gg/HSWAKCrnFx) | ### Rules - Chat with two anonymous models side-by-side and vote for which one is better! diff --git a/fastchat/serve/gradio_block_arena_named.py b/fastchat/serve/gradio_block_arena_named.py index 6c2d0b534..c6e909321 100644 --- a/fastchat/serve/gradio_block_arena_named.py +++ b/fastchat/serve/gradio_block_arena_named.py @@ -295,7 +295,7 @@ def flash_buttons(): def build_side_by_side_ui_named(models): notice_markdown = """ # ⚔️ Chatbot Arena ⚔️ : Benchmarking LLMs in the Wild -| [Blog](https://lmsys.org/blog/2023-05-03-arena/) | [GitHub](https://github.com/lm-sys/FastChat) | [Paper](https://arxiv.org/abs/2306.05685) | [Dataset](https://huggingface.co/datasets/lmsys/chatbot_arena_conversations) | [Twitter](https://twitter.com/lmsysorg) | [Discord](https://discord.gg/HSWAKCrnFx) | +| [Blog](https://lmsys.org/blog/2023-05-03-arena/) | [GitHub](https://github.com/lm-sys/FastChat) | [Paper](https://arxiv.org/abs/2306.05685) | [Dataset](https://github.com/lm-sys/FastChat/blob/main/docs/dataset_release.md) | [Twitter](https://twitter.com/lmsysorg) | [Discord](https://discord.gg/HSWAKCrnFx) | ### Rules - Chat with two models side-by-side and vote for which one is better! diff --git a/fastchat/serve/gradio_web_server.py b/fastchat/serve/gradio_web_server.py index 24db98b34..e66a7622c 100644 --- a/fastchat/serve/gradio_web_server.py +++ b/fastchat/serve/gradio_web_server.py @@ -541,7 +541,7 @@ def get_model_description_md(models): def build_single_model_ui(models, add_promotion_links=False): promotion = ( """ -- | [GitHub](https://github.com/lm-sys/FastChat) | [Twitter](https://twitter.com/lmsysorg) | [Discord](https://discord.gg/HSWAKCrnFx) | +- | [GitHub](https://github.com/lm-sys/FastChat) | [Dataset](https://github.com/lm-sys/FastChat/blob/main/docs/dataset_release.md) | [Twitter](https://twitter.com/lmsysorg) | [Discord](https://discord.gg/HSWAKCrnFx) | - Introducing Llama 2: The Next Generation Open Source Large Language Model. [[Website]](https://ai.meta.com/llama/) - Vicuna: An Open-Source Chatbot Impressing GPT-4 with 90% ChatGPT Quality. [[Blog]](https://lmsys.org/blog/2023-03-30-vicuna/) """ diff --git a/fastchat/utils.py b/fastchat/utils.py index 947d8b687..7c0614e3e 100644 --- a/fastchat/utils.py +++ b/fastchat/utils.py @@ -207,7 +207,7 @@ def pretty_print_semaphore(semaphore): url_params = Object.fromEntries(params); console.log("url_params", url_params); - msg = "Users of this website are required to agree to the following terms:\\nThe service is a research preview. It only provides limited safety measures and may generate offensive content. It must not be used for any illegal, harmful, violent, racist, or sexual purposes.\\nThe service collects user dialogue data and reserves the right to distribute it under a Creative Commons Attribution (CC-BY) license." + msg = "Users of this website are required to agree to the following terms:\\nThe service is a research preview. It only provides limited safety measures and may generate offensive content. It must not be used for any illegal, harmful, violent, racist, or sexual purposes.\\nThe service collects user dialogue data and reserves the right to distribute it under a Creative Commons Attribution (CC-BY) or a similar license." alert(msg); return url_params;