diff --git a/examples/chat_rag_connector/RAG_Chatbot_with_Connectors.ipynb b/examples/chat_rag_connector/RAG_Chatbot_with_Connectors.ipynb index 0227f2af..bdb3f785 100644 --- a/examples/chat_rag_connector/RAG_Chatbot_with_Connectors.ipynb +++ b/examples/chat_rag_connector/RAG_Chatbot_with_Connectors.ipynb @@ -2,7 +2,7 @@ "cells": [ { "cell_type": "code", - "execution_count": 2, + "execution_count": 1, "metadata": {}, "outputs": [], "source": [ @@ -11,8 +11,7 @@ "import uuid\n", "from typing import List, Dict\n", "\n", - "COHERE_API_KEY = os.getenv(\"COHERE_API_KEY\")\n", - "co = cohere.Client(COHERE_API_KEY)" + "co = cohere.Client(\"COHERE_API_KEY\")" ] }, { @@ -79,7 +78,7 @@ }, { "cell_type": "code", - "execution_count": 10, + "execution_count": 3, "metadata": {}, "outputs": [], "source": [ @@ -98,7 +97,8 @@ " )\n", "\n", " for event in response:\n", - " yield event" + " yield event\n", + " yield response" ] }, { @@ -110,7 +110,7 @@ }, { "cell_type": "code", - "execution_count": 11, + "execution_count": 4, "metadata": {}, "outputs": [], "source": [ @@ -136,31 +136,33 @@ " # Print the chatbot response\n", " print(\"Chatbot:\")\n", " \n", - " documents = []\n", - " documents_flag = False\n", " citations_flag = False\n", " \n", " for event in response:\n", - " # Documents\n", - " if event.event_type == \"search-results\":\n", - " documents_flag = True\n", - " documents = event.documents\n", - " \n", + " stream_type = type(event).__name__\n", + " \n", " # Text\n", - " if event.event_type == \"text-generation\":\n", - " print(event.text, end=\"\") \n", + " if stream_type == \"StreamTextGeneration\":\n", + " print(event.text, end=\"\")\n", "\n", " # Citations\n", - " if event.event_type == \"citation-generation\":\n", + " if stream_type == \"StreamCitationGeneration\":\n", " if not citations_flag:\n", " print(\"\\n\\nCITATIONS:\")\n", " citations_flag = True\n", - " print(event.citations)\n", - " \n", - " if documents_flag:\n", - " print(\"\\n\\nDOCUMENTS:\")\n", - " for d in documents:\n", - " print(f'{d[\"title\"]} ({d[\"id\"]}). URL: {d[\"url\"]}')\n", + " print(event.citations[0])\n", + " \n", + " # Documents\n", + " if citations_flag:\n", + " if stream_type == \"StreamingChat\":\n", + " print(\"\\n\\nDOCUMENTS:\")\n", + " documents = [{'id': doc['id'],\n", + " 'text': doc['text'][:50] + '...',\n", + " 'title': doc['title'],\n", + " 'url': doc['url']} \n", + " for doc in event.documents]\n", + " for doc in documents:\n", + " print(doc)\n", "\n", " print(f\"\\n{'-'*100}\\n\")" ] @@ -174,30 +176,47 @@ }, { "cell_type": "code", - "execution_count": 13, + "execution_count": 10, "metadata": {}, "outputs": [ { "name": "stdout", "output_type": "stream", "text": [ - "User: What is attention\n", + "User: What are sentence embeddings\n", + "Chatbot:\n", + "Sentence embeddings are the building blocks of language models. They associate each sentence with a vector (list of numbers) in a way that similar sentences are assigned similar vectors. These vectors are composed of numbers and carry important properties of the sentence. The embeddings act as a form of translation between languages as well, as they provide a relatable vector for similar sentences in different languages.\n", + "\n", + "CITATIONS:\n", + "{'start': 69, 'end': 124, 'text': 'associate each sentence with a vector (list of numbers)', 'document_ids': ['demo-conn-e5y5ps_0', 'demo-conn-e5y5ps_1', 'demo-conn-e5y5ps_2']}\n", + "{'start': 139, 'end': 186, 'text': 'similar sentences are assigned similar vectors.', 'document_ids': ['demo-conn-e5y5ps_0', 'demo-conn-e5y5ps_1']}\n", + "{'start': 235, 'end': 272, 'text': 'important properties of the sentence.', 'document_ids': ['demo-conn-e5y5ps_1', 'demo-conn-e5y5ps_2']}\n", + "\n", + "\n", + "DOCUMENTS:\n", + "{'id': 'demo-conn-e5y5ps_0', 'text': 'In the previous chapter, we learned that sentence ...', 'title': 'Similarity Between Words and Sentences', 'url': 'https://docs.cohere.com/docs/similarity-between-words-and-sentences'}\n", + "{'id': 'demo-conn-e5y5ps_1', 'text': 'This is where sentence embeddings come into play. ...', 'title': 'Text Embeddings', 'url': 'https://docs.cohere.com/docs/text-embeddings'}\n", + "{'id': 'demo-conn-e5y5ps_2', 'text': 'Sentence embeddings are even more powerful, as the...', 'title': 'Similarity Between Words and Sentences', 'url': 'https://docs.cohere.com/docs/similarity-between-words-and-sentences'}\n", + "\n", + "----------------------------------------------------------------------------------------------------\n", + "\n", + "User: How is it different from word embeddings\n", "Chatbot:\n", - "Attention is a technique used in language models to provide context to each word in a sentence or text, based on the other words. Attention plays a crucial role in transformer models, which can help improve large language models.\n", + "The primary distinction between word embeddings and sentence embeddings is that the latter assigns a vector to every sentence whereas the former does the same thing but for individual words. \n", + "\n", + "Both embeddings are similar in the sense that they associate vectors in a way that similar items (words or sentences) are mapped to similar vectors. Word embeddings are a subset of sentence embeddings.\n", "\n", "CITATIONS:\n", - "[{'start': 60, 'end': 67, 'text': 'context', 'document_ids': ['demo-conn-tm17qr_0', 'demo-conn-tm17qr_1', 'demo-conn-tm17qr_2']}]\n", - "[{'start': 68, 'end': 102, 'text': 'to each word in a sentence or text', 'document_ids': ['demo-conn-tm17qr_1', 'demo-conn-tm17qr_2']}]\n", - "[{'start': 117, 'end': 129, 'text': 'other words.', 'document_ids': ['demo-conn-tm17qr_1']}]\n", - "[{'start': 148, 'end': 160, 'text': 'crucial role', 'document_ids': ['demo-conn-tm17qr_2']}]\n", - "[{'start': 164, 'end': 182, 'text': 'transformer models', 'document_ids': ['demo-conn-tm17qr_2']}]\n", - "[{'start': 199, 'end': 229, 'text': 'improve large language models.', 'document_ids': ['demo-conn-tm17qr_2']}]\n", + "{'start': 91, 'end': 125, 'text': 'assigns a vector to every sentence', 'document_ids': ['demo-conn-e5y5ps_0', 'demo-conn-e5y5ps_1']}\n", + "{'start': 165, 'end': 190, 'text': 'but for individual words.', 'document_ids': ['demo-conn-e5y5ps_0']}\n", + "{'start': 244, 'end': 261, 'text': 'associate vectors', 'document_ids': ['demo-conn-e5y5ps_0', 'demo-conn-e5y5ps_1']}\n", + "{'start': 315, 'end': 341, 'text': 'mapped to similar vectors.', 'document_ids': ['demo-conn-e5y5ps_0', 'demo-conn-e5y5ps_1']}\n", + "{'start': 342, 'end': 394, 'text': 'Word embeddings are a subset of sentence embeddings.', 'document_ids': ['demo-conn-e5y5ps_1']}\n", "\n", "\n", "DOCUMENTS:\n", - "Transformer Models (demo-conn-tm17qr_0). URL: https://docs.cohere.com/docs/transformer-models\n", - "Transformer Models (demo-conn-tm17qr_1). URL: https://docs.cohere.com/docs/transformer-models\n", - "Transformer Models (demo-conn-tm17qr_2). URL: https://docs.cohere.com/docs/transformer-models\n", + "{'id': 'demo-conn-e5y5ps_0', 'text': 'In the previous chapters, you learned about word a...', 'title': 'The Attention Mechanism', 'url': 'https://docs.cohere.com/docs/the-attention-mechanism'}\n", + "{'id': 'demo-conn-e5y5ps_1', 'text': 'This is where sentence embeddings come into play. ...', 'title': 'Text Embeddings', 'url': 'https://docs.cohere.com/docs/text-embeddings'}\n", "\n", "----------------------------------------------------------------------------------------------------\n", "\n", @@ -207,7 +226,7 @@ ], "source": [ "# Define connectors\n", - "connectors = [\"demo-conn-tm17qr\"]\n", + "connectors = [\"demo-conn-e5y5ps\"]\n", "\n", "# Create an instance of the Chatbot class by supplying the connectors\n", "chatbot = Chatbot(connectors)\n", diff --git a/examples/chat_rag_quickstart_connector/RAG_Chatbot_with_Quickstart_Connectors.ipynb.ipynb b/examples/chat_rag_quickstart_connector/RAG_Chatbot_with_Quickstart_Connectors.ipynb.ipynb index cfe0ddc0..2e1fe9c0 100644 --- a/examples/chat_rag_quickstart_connector/RAG_Chatbot_with_Quickstart_Connectors.ipynb.ipynb +++ b/examples/chat_rag_quickstart_connector/RAG_Chatbot_with_Quickstart_Connectors.ipynb.ipynb @@ -2,7 +2,7 @@ "cells": [ { "cell_type": "code", - "execution_count": 6, + "execution_count": 1, "metadata": {}, "outputs": [], "source": [ @@ -11,8 +11,7 @@ "import uuid\n", "from typing import List, Dict\n", "\n", - "COHERE_API_KEY = os.getenv(\"COHERE_API_KEY\")\n", - "co = cohere.Client(COHERE_API_KEY)" + "co = cohere.Client(\"COHERE_API_KEY\")" ] }, { @@ -24,7 +23,7 @@ }, { "cell_type": "code", - "execution_count": 12, + "execution_count": 2, "metadata": {}, "outputs": [], "source": [ @@ -43,7 +42,8 @@ " )\n", "\n", " for event in response:\n", - " yield event" + " yield event\n", + " yield response" ] }, { @@ -55,7 +55,7 @@ }, { "cell_type": "code", - "execution_count": 13, + "execution_count": 4, "metadata": {}, "outputs": [], "source": [ @@ -81,31 +81,33 @@ " # Print the chatbot response\n", " print(\"Chatbot:\")\n", " \n", - " documents = []\n", - " documents_flag = False\n", " citations_flag = False\n", " \n", " for event in response:\n", - " # Documents\n", - " if event.event_type == \"search-results\":\n", - " documents_flag = True\n", - " documents = event.documents\n", - " \n", + " stream_type = type(event).__name__\n", + " \n", " # Text\n", - " if event.event_type == \"text-generation\":\n", - " print(event.text, end=\"\") \n", + " if stream_type == \"StreamTextGeneration\":\n", + " print(event.text, end=\"\")\n", "\n", " # Citations\n", - " if event.event_type == \"citation-generation\":\n", + " if stream_type == \"StreamCitationGeneration\":\n", " if not citations_flag:\n", " print(\"\\n\\nCITATIONS:\")\n", " citations_flag = True\n", - " print(event.citations)\n", - " \n", - " if documents_flag:\n", - " print(\"\\n\\nDOCUMENTS:\")\n", - " for d in documents:\n", - " print(f'{d[\"title\"]} ({d[\"id\"]}). URL: {d[\"url\"]}')\n", + " print(event.citations[0])\n", + " \n", + " # Documents\n", + " if citations_flag:\n", + " if stream_type == \"StreamingChat\":\n", + " print(\"\\n\\nDOCUMENTS:\")\n", + " documents = [{'id': doc['id'],\n", + " 'text': doc.get('text', doc.get('snippet', ''))[:50] + '...', # snippet field to account for web search results\n", + " 'title': doc['title'],\n", + " 'url': doc['url']} \n", + " for doc in event.documents]\n", + " for doc in documents:\n", + " print(doc)\n", "\n", " print(f\"\\n{'-'*100}\\n\")" ] @@ -119,37 +121,51 @@ }, { "cell_type": "code", - "execution_count": 19, + "execution_count": 5, "metadata": {}, "outputs": [ { "name": "stdout", "output_type": "stream", "text": [ - "User: What is prompt engineering\n", + "User: What are sentence embeddings\n", "Chatbot:\n", - "Prompt engineering is about sending instructions to text generation models and obtaining responses. A prompt can consist of a single line of instruction, but the more specific it is, the more accurate you can expect the response to be. Each additional building block added to a prompt provides a different means of improving the quality of the response.\n", + "Sentence embeddings are a useful tool for validating output from an LLM. They can be used to ensure the output is similar enough to a target, for example, in text summarization tasks. Sentence embeddings can also be used to ensure the output meets certain criteria such as safety and correctness. For example, we may want to confirm that an output does not contain profanity.\n", + "\n", + "CITATIONS:\n", + "{'start': 42, 'end': 59, 'text': 'validating output', 'document_ids': ['demo-conn-gdrive-6bfrp6_1:5']}\n", + "{'start': 68, 'end': 72, 'text': 'LLM.', 'document_ids': ['demo-conn-gdrive-6bfrp6_1:5']}\n", + "{'start': 114, 'end': 140, 'text': 'similar enough to a target', 'document_ids': ['demo-conn-gdrive-6bfrp6_1:5', 'demo-conn-gdrive-6bfrp6_1:4']}\n", + "{'start': 158, 'end': 183, 'text': 'text summarization tasks.', 'document_ids': ['demo-conn-gdrive-6bfrp6_1:5']}\n", + "{'start': 273, 'end': 279, 'text': 'safety', 'document_ids': ['demo-conn-gdrive-6bfrp6_1:4']}\n", + "{'start': 365, 'end': 375, 'text': 'profanity.', 'document_ids': ['demo-conn-gdrive-6bfrp6_1:4']}\n", + "\n", "\n", - "When working with large language models (LLMs), prompt engineering opens up many possibilities for creativity. However, there are trade-offs in terms of performance, and outputs from LLMs are probabilistic, so a mechanism for validating outputs is necessary.\n", + "DOCUMENTS:\n", + "{'id': 'demo-conn-gdrive-6bfrp6_1:5', 'text': ' For this, with the help of text embeddings, we wa...', 'title': 'Validating Outputs', 'url': 'https://docs.google.com/document/d/1wngAfCJY1IgD6H__4AkQXFfymKUpSeJL13TItbigdyA/edit?usp=drivesdk'}\n", + "{'id': 'demo-conn-gdrive-6bfrp6_1:4', 'text': ' We can extend this to synthetic data generation c...', 'title': 'Validating Outputs', 'url': 'https://docs.google.com/document/d/1wngAfCJY1IgD6H__4AkQXFfymKUpSeJL13TItbigdyA/edit?usp=drivesdk'}\n", + "\n", + "----------------------------------------------------------------------------------------------------\n", + "\n", + "User: What about prompt engineering\n", + "Chatbot:\n", + "Prompt engineering is about sending instructions to LLMs and tailoring inputs to get the desired response from the model. This can be done creatively, such as by chaining prompts, where you give the LLM a starting point and have it finish the prompt based on a partially completed prompt. The flexibility and creativity in dealing with LLMs' probabilistic outputs pose an exciting challenge for building LLM-powered applications.\n", "\n", "CITATIONS:\n", - "[{'start': 28, 'end': 99, 'text': 'sending instructions to text generation models and obtaining responses.', 'document_ids': ['demo-conn-gdrive-1x2p4k_0:3']}]\n", - "[{'start': 126, 'end': 152, 'text': 'single line of instruction', 'document_ids': ['demo-conn-gdrive-1x2p4k_0:3', 'demo-conn-gdrive-1x2p4k_0:33']}]\n", - "[{'start': 162, 'end': 235, 'text': 'more specific it is, the more accurate you can expect the response to be.', 'document_ids': ['demo-conn-gdrive-1x2p4k_0:33']}]\n", - "[{'start': 252, 'end': 353, 'text': 'building block added to a prompt provides a different means of improving the quality of the response.', 'document_ids': ['demo-conn-gdrive-1x2p4k_0:33']}]\n", - "[{'start': 373, 'end': 401, 'text': 'large language models (LLMs)', 'document_ids': ['demo-conn-gdrive-1x2p4k_1:15']}]\n", - "[{'start': 422, 'end': 465, 'text': 'opens up many possibilities for creativity.', 'document_ids': ['demo-conn-gdrive-1x2p4k_1:15']}]\n", - "[{'start': 485, 'end': 519, 'text': 'trade-offs in terms of performance', 'document_ids': ['demo-conn-gdrive-1x2p4k_1:15']}]\n", - "[{'start': 525, 'end': 560, 'text': 'outputs from LLMs are probabilistic', 'document_ids': ['demo-conn-gdrive-1x2p4k_3:0']}]\n", - "[{'start': 567, 'end': 613, 'text': 'mechanism for validating outputs is necessary.', 'document_ids': ['demo-conn-gdrive-1x2p4k_3:0']}]\n", + "{'start': 28, 'end': 56, 'text': 'sending instructions to LLMs', 'document_ids': ['demo-conn-gdrive-6bfrp6_0:3']}\n", + "{'start': 61, 'end': 77, 'text': 'tailoring inputs', 'document_ids': ['demo-conn-gdrive-6bfrp6_3:0']}\n", + "{'start': 162, 'end': 178, 'text': 'chaining prompts', 'document_ids': ['demo-conn-gdrive-6bfrp6_1:15']}\n", + "{'start': 293, 'end': 304, 'text': 'flexibility', 'document_ids': ['demo-conn-gdrive-6bfrp6_1:15']}\n", + "{'start': 309, 'end': 319, 'text': 'creativity', 'document_ids': ['demo-conn-gdrive-6bfrp6_1:15']}\n", + "{'start': 336, 'end': 363, 'text': \"LLMs' probabilistic outputs\", 'document_ids': ['demo-conn-gdrive-6bfrp6_3:0']}\n", + "{'start': 372, 'end': 390, 'text': 'exciting challenge', 'document_ids': ['demo-conn-gdrive-6bfrp6_1:15']}\n", + "{'start': 395, 'end': 429, 'text': 'building LLM-powered applications.', 'document_ids': ['demo-conn-gdrive-6bfrp6_1:15']}\n", "\n", "\n", "DOCUMENTS:\n", - "Chaining Prompts (demo-conn-gdrive-1x2p4k_1:15). URL: https://docs.google.com/document/d/1oF20QD0lHNdYQp6F7sSyEC1grGErout4GIQn1JBUACo/edit?usp=drivesdk\n", - "Evaluating Outputs (demo-conn-gdrive-1x2p4k_4:15). URL: https://docs.google.com/document/d/10x9mJOnEr62hg1IFxgAtD1aIFS4NXJ2l5Lt-UhJXLVg/edit?usp=drivesdk\n", - "Validating Outputs (demo-conn-gdrive-1x2p4k_3:0). URL: https://docs.google.com/document/d/1wngAfCJY1IgD6H__4AkQXFfymKUpSeJL13TItbigdyA/edit?usp=drivesdk\n", - "Constructing Prompts (demo-conn-gdrive-1x2p4k_0:3). URL: https://docs.google.com/document/d/1LGsOhBL02jwy5UUIS8tuv9G80FSn7vxeQYiiglsN9oY/edit?usp=drivesdk\n", - "Constructing Prompts (demo-conn-gdrive-1x2p4k_0:33). URL: https://docs.google.com/document/d/1LGsOhBL02jwy5UUIS8tuv9G80FSn7vxeQYiiglsN9oY/edit?usp=drivesdk\n", + "{'id': 'demo-conn-gdrive-6bfrp6_0:3', 'text': ' At its core, prompting a Command model is about s...', 'title': 'Constructing Prompts', 'url': 'https://docs.google.com/document/d/1LGsOhBL02jwy5UUIS8tuv9G80FSn7vxeQYiiglsN9oY/edit?usp=drivesdk'}\n", + "{'id': 'demo-conn-gdrive-6bfrp6_3:0', 'text': \"\\ufeffValidating Outputs\\r\\nIn this chapter, you'll learn...\", 'title': 'Validating Outputs', 'url': 'https://docs.google.com/document/d/1wngAfCJY1IgD6H__4AkQXFfymKUpSeJL13TItbigdyA/edit?usp=drivesdk'}\n", + "{'id': 'demo-conn-gdrive-6bfrp6_1:15', 'text': '\\r\\nThis is a fascinating area of prompt engineering...', 'title': 'Chaining Prompts', 'url': 'https://docs.google.com/document/d/1oF20QD0lHNdYQp6F7sSyEC1grGErout4GIQn1JBUACo/edit?usp=drivesdk'}\n", "\n", "----------------------------------------------------------------------------------------------------\n", "\n", @@ -159,7 +175,7 @@ ], "source": [ "# Define connectors\n", - "connectors = [\"demo-conn-gdrive-1x2p4k\"]\n", + "connectors = [\"demo-conn-gdrive-6bfrp6\"]\n", "\n", "# Create an instance of the Chatbot class by supplying the connectors\n", "chatbot = Chatbot(connectors)\n", @@ -180,38 +196,38 @@ }, { "cell_type": "code", - "execution_count": 25, + "execution_count": 5, "metadata": {}, "outputs": [ { "name": "stdout", "output_type": "stream", "text": [ - "User: What is LLM University\n", + "User: What is LLM university\n", "Chatbot:\n", - "LLM University (LLMU) is an online learning resource provided by Cohere that offers a comprehensive curriculum on natural language processing (NLP) using large language models. It caters to learners from all backgrounds, from beginners to advanced, and the courses are geared towards anyone excited about language processing, including those looking to build apps using language AI. The courses cover everything from the basics of LLMs to advanced topics like generative AI, with practical code examples to help learners solidify their knowledge. The hands-on exercises allow learners to build and deploy their own models.\n", + "LLM University (LLMU) is a set of comprehensive learning resources for anyone interested in natural language processing (NLP) or large language models (LLMs), from beginners to advanced learners. \n", + "\n", + "You can customise your learning path and the curriculum covers everything from the fundamentals of LLMs to the most advanced topics, including generative AI.\n", "\n", "CITATIONS:\n", - "[{'start': 15, 'end': 21, 'text': '(LLMU)', 'document_ids': ['web-search_1:0', 'web-search_0:4', 'web-search_1:2', 'web-search_1:3', 'web-search_0:3']}]\n", - "[{'start': 28, 'end': 71, 'text': 'online learning resource provided by Cohere', 'document_ids': ['web-search_1:0']}]\n", - "[{'start': 86, 'end': 147, 'text': 'comprehensive curriculum on natural language processing (NLP)', 'document_ids': ['web-search_1:0']}]\n", - "[{'start': 180, 'end': 219, 'text': 'caters to learners from all backgrounds', 'document_ids': ['web-search_1:2']}]\n", - "[{'start': 226, 'end': 247, 'text': 'beginners to advanced', 'document_ids': ['web-search_1:0']}]\n", - "[{'start': 257, 'end': 324, 'text': 'courses are geared towards anyone excited about language processing', 'document_ids': ['web-search_1:2']}]\n", - "[{'start': 353, 'end': 382, 'text': 'build apps using language AI.', 'document_ids': ['web-search_1:2']}]\n", - "[{'start': 421, 'end': 473, 'text': 'basics of LLMs to advanced topics like generative AI', 'document_ids': ['web-search_1:0', 'web-search_1:2']}]\n", - "[{'start': 480, 'end': 503, 'text': 'practical code examples', 'document_ids': ['web-search_1:2']}]\n", - "[{'start': 521, 'end': 546, 'text': 'solidify their knowledge.', 'document_ids': ['web-search_1:2']}]\n", - "[{'start': 551, 'end': 569, 'text': 'hands-on exercises', 'document_ids': ['web-search_1:3']}]\n", - "[{'start': 588, 'end': 622, 'text': 'build and deploy their own models.', 'document_ids': ['web-search_1:3']}]\n", + "{'start': 0, 'end': 21, 'text': 'LLM University (LLMU)', 'document_ids': ['web-search_8:0', 'web-search_8:2', 'web-search_8:1', 'web-search_5:0', 'web-search_5:3']}\n", + "{'start': 34, 'end': 66, 'text': 'comprehensive learning resources', 'document_ids': ['web-search_8:0', 'web-search_8:2', 'web-search_8:1', 'web-search_5:0']}\n", + "{'start': 92, 'end': 125, 'text': 'natural language processing (NLP)', 'document_ids': ['web-search_8:0', 'web-search_8:1', 'web-search_5:0']}\n", + "{'start': 129, 'end': 157, 'text': 'large language models (LLMs)', 'document_ids': ['web-search_8:1', 'web-search_5:0']}\n", + "{'start': 164, 'end': 173, 'text': 'beginners', 'document_ids': ['web-search_8:0', 'web-search_8:2', 'web-search_5:0']}\n", + "{'start': 177, 'end': 195, 'text': 'advanced learners.', 'document_ids': ['web-search_8:0', 'web-search_8:2', 'web-search_5:0']}\n", + "{'start': 206, 'end': 234, 'text': 'customise your learning path', 'document_ids': ['web-search_8:2']}\n", + "{'start': 281, 'end': 293, 'text': 'fundamentals', 'document_ids': ['web-search_8:0', 'web-search_8:2', 'web-search_8:1', 'web-search_5:0']}\n", + "{'start': 309, 'end': 329, 'text': 'most advanced topics', 'document_ids': ['web-search_8:0', 'web-search_8:2', 'web-search_8:1', 'web-search_5:0']}\n", + "{'start': 341, 'end': 355, 'text': 'generative AI.', 'document_ids': ['web-search_8:0', 'web-search_8:2', 'web-search_8:1', 'web-search_5:0']}\n", "\n", "\n", "DOCUMENTS:\n", - "Introducing LLM University — Your Go-To Learning Resource for NLP🎓 (web-search_1:0). URL: https://txt.cohere.com/llm-university/\n", - "LLM University (LLMU) | Cohere (web-search_0:4). URL: https://docs.cohere.com/docs/llmu\n", - "Introducing LLM University — Your Go-To Learning Resource for NLP🎓 (web-search_1:2). URL: https://txt.cohere.com/llm-university/\n", - "Introducing LLM University — Your Go-To Learning Resource for NLP🎓 (web-search_1:3). URL: https://txt.cohere.com/llm-university/\n", - "LLM University (LLMU) | Cohere (web-search_0:3). URL: https://docs.cohere.com/docs/llmu\n", + "{'id': 'web-search_8:0', 'text': 'Introducing LLM University — Your Go-To Learning R...', 'title': 'Introducing LLM University — Your Go-To Learning Resource for NLP🎓', 'url': 'https://txt.cohere.com/llm-university/'}\n", + "{'id': 'web-search_8:2', 'text': ' We cater to learners from all backgrounds, and co...', 'title': 'Introducing LLM University — Your Go-To Learning Resource for NLP🎓', 'url': 'https://txt.cohere.com/llm-university/'}\n", + "{'id': 'web-search_8:1', 'text': ' These courses are tailor-made for learners who wa...', 'title': 'Introducing LLM University — Your Go-To Learning Resource for NLP🎓', 'url': 'https://txt.cohere.com/llm-university/'}\n", + "{'id': 'web-search_5:0', 'text': 'Guides and ConceptsAPI ReferenceRelease NotesAppli...', 'title': 'LLM University (LLMU) | Cohere', 'url': 'https://docs.cohere.com/docs/llmu'}\n", + "{'id': 'web-search_5:3', 'text': '\\n\\nHow to build apps, including semantic search mod...', 'title': 'LLM University (LLMU) | Cohere', 'url': 'https://docs.cohere.com/docs/llmu'}\n", "\n", "----------------------------------------------------------------------------------------------------\n", "\n", @@ -242,7 +258,7 @@ }, { "cell_type": "code", - "execution_count": 30, + "execution_count": 6, "metadata": {}, "outputs": [ { @@ -251,33 +267,35 @@ "text": [ "User: What is chain of thought prompting\n", "Chatbot:\n", - "Chain of thought prompting is a technique used to help LLMs (Large Language Models) perform complex reasoning by breaking down problems into logical, bite-sized chunks. This method encourages LLMs to produce intermediate reasoning steps before delivering a final answer to a multi-step problem. The idea is that a model-generated chain of thought would mimic an intuitive thought process when working through a multi-step reasoning problem. \n", + "Chain of thought prompting is a technique that guides LLMs (language model) to follow a reasoning process when dealing with problematic questions. When a prompt asks a model to give a final answer to a multi-step problem, chain of thought prompting induces the model to decompose the problem into intermediate reasoning steps, leading to a correct final answer. This is done by showing the model a few examples where the step-by-step reasoning is clearly laid out. The model is then expected to follow that \"chain of thought\" reasoning and get to the correct answer.\n", "\n", - "This concept was introduced by Wei et al. in 2023, and has been found to be particularly useful in improving LLMs' performance at complex arithmetic, commonsense, and symbolic reasoning tasks.\n", + "This approach has been found to significantly enhance the ability of LLMs to tackle complex arithmetic and commonsense reasoning tasks.\n", "\n", "CITATIONS:\n", - "[{'start': 55, 'end': 83, 'text': 'LLMs (Large Language Models)', 'document_ids': ['web-search_9:2']}]\n", - "[{'start': 92, 'end': 109, 'text': 'complex reasoning', 'document_ids': ['web-search_7:2', 'web-search_9:2', 'web-search_8:7', 'web-search_8:1']}]\n", - "[{'start': 113, 'end': 168, 'text': 'breaking down problems into logical, bite-sized chunks.', 'document_ids': ['web-search_7:2', 'web-search_9:2']}]\n", - "[{'start': 208, 'end': 269, 'text': 'intermediate reasoning steps before delivering a final answer', 'document_ids': ['web-search_3:2', 'demo-conn-gdrive-1x2p4k_11:20', 'web-search_7:2', 'demo-conn-gdrive-1x2p4k_10:6', 'web-search_8:7', 'web-search_8:1']}]\n", - "[{'start': 275, 'end': 294, 'text': 'multi-step problem.', 'document_ids': ['web-search_3:2', 'web-search_8:7']}]\n", - "[{'start': 314, 'end': 387, 'text': 'model-generated chain of thought would mimic an intuitive thought process', 'document_ids': ['web-search_3:2', 'web-search_8:7']}]\n", - "[{'start': 474, 'end': 484, 'text': 'Wei et al.', 'document_ids': ['demo-conn-gdrive-1x2p4k_11:20', 'web-search_9:2', 'demo-conn-gdrive-1x2p4k_10:6']}]\n", - "[{'start': 488, 'end': 492, 'text': '2023', 'document_ids': ['demo-conn-gdrive-1x2p4k_10:6']}]\n", - "[{'start': 573, 'end': 635, 'text': 'complex arithmetic, commonsense, and symbolic reasoning tasks.', 'document_ids': ['web-search_7:2', 'web-search_9:2']}]\n", + "{'start': 47, 'end': 58, 'text': 'guides LLMs', 'document_ids': ['web-search_4:1']}\n", + "{'start': 79, 'end': 105, 'text': 'follow a reasoning process', 'document_ids': ['web-search_4:1']}\n", + "{'start': 184, 'end': 196, 'text': 'final answer', 'document_ids': ['web-search_5:2']}\n", + "{'start': 202, 'end': 220, 'text': 'multi-step problem', 'document_ids': ['web-search_5:2']}\n", + "{'start': 270, 'end': 279, 'text': 'decompose', 'document_ids': ['web-search_5:2', 'web-search_6:2']}\n", + "{'start': 297, 'end': 325, 'text': 'intermediate reasoning steps', 'document_ids': ['web-search_5:2', 'web-search_6:2', 'web-search_7:2', 'web-search_8:13']}\n", + "{'start': 340, 'end': 361, 'text': 'correct final answer.', 'document_ids': ['web-search_5:2', 'web-search_6:2', 'web-search_4:1', 'web-search_8:13']}\n", + "{'start': 378, 'end': 410, 'text': 'showing the model a few examples', 'document_ids': ['web-search_5:2', 'demo-conn-gdrive-6bfrp6_11:20', 'demo-conn-gdrive-6bfrp6_11:30', 'web-search_4:1', 'demo-conn-gdrive-6bfrp6_10:6', 'web-search_7:2']}\n", + "{'start': 421, 'end': 443, 'text': 'step-by-step reasoning', 'document_ids': ['demo-conn-gdrive-6bfrp6_11:20', 'web-search_4:1']}\n", + "{'start': 495, 'end': 535, 'text': 'follow that \"chain of thought\" reasoning', 'document_ids': ['web-search_4:1']}\n", + "{'start': 600, 'end': 621, 'text': 'significantly enhance', 'document_ids': ['web-search_6:2', 'web-search_7:2']}\n", + "{'start': 645, 'end': 670, 'text': 'tackle complex arithmetic', 'document_ids': ['web-search_6:2', 'web-search_7:2']}\n", + "{'start': 675, 'end': 703, 'text': 'commonsense reasoning tasks.', 'document_ids': ['web-search_6:2', 'web-search_7:2']}\n", "\n", "\n", "DOCUMENTS:\n", - "Language Models Perform Reasoning via Chain of Thought – Google Research Blog (web-search_3:2). URL: https://blog.research.google/2022/05/language-models-perform-reasoning-via.html\n", - "Constructing Prompts (demo-conn-gdrive-1x2p4k_11:20). URL: https://docs.google.com/document/d/1LGsOhBL02jwy5UUIS8tuv9G80FSn7vxeQYiiglsN9oY/edit?usp=drivesdk\n", - "Let’s Think Step by Step: Advanced Reasoning in Business with Chain-of-Thought Prompting | by Jerry Cuomo | Aug, 2023 | Medium (web-search_7:2). URL: https://medium.com/@JerryCuomo/lets-think-step-by-step-advanced-reasoning-in-business-with-chain-of-thought-prompting-dd5ae8a6008\n", - "Constructing Prompts (demo-conn-gdrive-1x2p4k_11:30). URL: https://docs.google.com/document/d/1LGsOhBL02jwy5UUIS8tuv9G80FSn7vxeQYiiglsN9oY/edit?usp=drivesdk\n", - "Chain-of-Thought Prompting: Helping LLMs Learn by Example | Deepgram (web-search_9:2). URL: https://deepgram.com/learn/chain-of-thought-prompting-guide\n", - "Chaining Prompts (demo-conn-gdrive-1x2p4k_10:6). URL: https://docs.google.com/document/d/1oF20QD0lHNdYQp6F7sSyEC1grGErout4GIQn1JBUACo/edit?usp=drivesdk\n", - "Master Prompting Concepts: Chain of Thought Prompting (web-search_8:7). URL: https://promptengineering.org/master-prompting-concepts-chain-of-thought-prompting/\n", - "Chaining Prompts (demo-conn-gdrive-1x2p4k_10:7). URL: https://docs.google.com/document/d/1oF20QD0lHNdYQp6F7sSyEC1grGErout4GIQn1JBUACo/edit?usp=drivesdk\n", - "Master Prompting Concepts: Chain of Thought Prompting (web-search_8:1). URL: https://promptengineering.org/master-prompting-concepts-chain-of-thought-prompting/\n", - "Chaining Prompts (demo-conn-gdrive-1x2p4k_10:0). URL: https://docs.google.com/document/d/1oF20QD0lHNdYQp6F7sSyEC1grGErout4GIQn1JBUACo/edit?usp=drivesdk\n", + "{'id': 'web-search_4:1', 'text': '\\n\\nWhat is Chain-of-Thought Prompting?\\n\\nDifference ...', 'title': 'Chain of Thought Prompting (CoT): Everything you need to know', 'url': 'https://www.vellum.ai/blog/chain-of-thought-prompting-cot-everything-you-need-to-know'}\n", + "{'id': 'web-search_5:2', 'text': ' In chain of thought prompting (below, right), the...', 'title': 'Language Models Perform Reasoning via Chain of Thought – Google Research Blog', 'url': 'https://blog.research.google/2022/05/language-models-perform-reasoning-via.html'}\n", + "{'id': 'web-search_6:2', 'text': '\\n\\nWhat is Chain-of-Thought Prompting?\\n\\nIn chain-of...', 'title': 'Let’s Think Step by Step: Advanced Reasoning in Business with Chain-of-Thought Prompting | by Jerry Cuomo | Medium', 'url': 'https://medium.com/@JerryCuomo/lets-think-step-by-step-advanced-reasoning-in-business-with-chain-of-thought-prompting-dd5ae8a6008'}\n", + "{'id': 'web-search_7:2', 'text': '\\n\\nIn 2022, Google researchers Wei et al. proposed ...', 'title': 'Chain-of-Thought Prompting: Helping LLMs Learn by Example | Deepgram', 'url': 'https://deepgram.com/learn/chain-of-thought-prompting-guide'}\n", + "{'id': 'web-search_8:13', 'text': '\\n\\nTo perform Chain of Thought prompting you just n...', 'title': 'Comprehensive Guide to Chain-of-Thought Prompting', 'url': 'https://www.mercity.ai/blog-post/guide-to-chain-of-thought-prompting'}\n", + "{'id': 'demo-conn-gdrive-6bfrp6_11:20', 'text': '\\r\\nCan you pick me up tonight to go to the concert ...', 'title': 'Constructing Prompts', 'url': 'https://docs.google.com/document/d/1LGsOhBL02jwy5UUIS8tuv9G80FSn7vxeQYiiglsN9oY/edit?usp=drivesdk'}\n", + "{'id': 'demo-conn-gdrive-6bfrp6_11:30', 'text': ' This will allow them to make intelligent decision...', 'title': 'Constructing Prompts', 'url': 'https://docs.google.com/document/d/1LGsOhBL02jwy5UUIS8tuv9G80FSn7vxeQYiiglsN9oY/edit?usp=drivesdk'}\n", + "{'id': 'demo-conn-gdrive-6bfrp6_10:6', 'text': ' But dialing up the creativity knob (i.e., “temper...', 'title': 'Chaining Prompts', 'url': 'https://docs.google.com/document/d/1oF20QD0lHNdYQp6F7sSyEC1grGErout4GIQn1JBUACo/edit?usp=drivesdk'}\n", "\n", "----------------------------------------------------------------------------------------------------\n", "\n", @@ -287,7 +305,7 @@ ], "source": [ "# Define connectors\n", - "connectors = [\"demo-conn-gdrive-1x2p4k\", \"web-search\"]\n", + "connectors = [\"demo-conn-gdrive-6bfrp6\", \"web-search\"]\n", "\n", "# Create an instance of the Chatbot class by supplying the connectors\n", "chatbot = Chatbot(connectors)\n", diff --git a/notebooks/RAG_Chatbot_with_Chat_Embed_Rerank.ipynb b/notebooks/RAG_Chatbot_with_Chat_Embed_Rerank.ipynb index 25d9fc45..9c555350 100644 --- a/notebooks/RAG_Chatbot_with_Chat_Embed_Rerank.ipynb +++ b/notebooks/RAG_Chatbot_with_Chat_Embed_Rerank.ipynb @@ -474,8 +474,11 @@ " if citations_flag:\n", " if stream_type == \"StreamingChat\":\n", " print(\"\\n\\nDOCUMENTS:\")\n", - " documents = [{'id': doc['id'], 'text': doc['text'][:50] + '...', 'title': doc['title'], 'url': doc['url']} \n", - " for doc in event.documents]\n", + " documents = [{'id': doc['id'],\n", + " 'text': doc['text'][:50] + '...',\n", + " 'title': doc['title'],\n", + " 'url': doc['url']} \n", + " for doc in event.documents]\n", " for doc in documents:\n", " print(doc)\n", "\n",