From 05fe9388677be0c1b0fc104fe7427a3e3031b02f Mon Sep 17 00:00:00 2001 From: Hyesoo Kim <100982596+duper203@users.noreply.github.com> Date: Wed, 2 Oct 2024 14:24:07 -0700 Subject: [PATCH 1/2] Created using Colab --- Solar-Fullstack-LLM-101/04_CAG_GC.ipynb | 726 +++++++++++++----------- 1 file changed, 396 insertions(+), 330 deletions(-) diff --git a/Solar-Fullstack-LLM-101/04_CAG_GC.ipynb b/Solar-Fullstack-LLM-101/04_CAG_GC.ipynb index c80c8d9..703a28d 100644 --- a/Solar-Fullstack-LLM-101/04_CAG_GC.ipynb +++ b/Solar-Fullstack-LLM-101/04_CAG_GC.ipynb @@ -1,345 +1,411 @@ { - "cells": [ - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "\n", - "\"Open\n", - "" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "# 04. CAG GC\n", - "\n", - "## Overview \n", - "In this exercise, we will explore the concept of Credibility-Aware Generation (CAG) and Groundedness Check (GC) using the Solar framework. CAG focuses on generating responses that are credible and factually accurate, while GC ensures that the generated content is grounded in verifiable sources. This notebook will guide you through the implementation and application of these techniques to enhance the reliability and accuracy of the model's responses.\n", - " \n", - "## Purpose of the Exercise\n", - "The purpose of this exercise is to demonstrate the practical application of Credibility-Aware Generation and Groundedness Check within the Solar framework. By the end of this tutorial, users will understand how to utilize these techniques to produce credible and well-grounded outputs from the language model, thus improving the trustworthiness and utility of the generated content.\n" - ] - }, - { - "cell_type": "code", - "execution_count": 79, - "metadata": {}, - "outputs": [ + "cells": [ { - "name": "stdout", - "output_type": "stream", - "text": [ - "\n", - "\u001b[1m[\u001b[0m\u001b[34;49mnotice\u001b[0m\u001b[1;39;49m]\u001b[0m\u001b[39;49m A new release of pip is available: \u001b[0m\u001b[31;49m24.0\u001b[0m\u001b[39;49m -> \u001b[0m\u001b[32;49m24.1.1\u001b[0m\n", - "\u001b[1m[\u001b[0m\u001b[34;49mnotice\u001b[0m\u001b[1;39;49m]\u001b[0m\u001b[39;49m To update, run: \u001b[0m\u001b[32;49mpip install --upgrade pip\u001b[0m\n" - ] - } - ], - "source": [ - "! pip3 install -qU langchain-upstage python-dotenv" - ] - }, - { - "cell_type": "code", - "execution_count": 81, - "metadata": {}, - "outputs": [], - "source": [ - "# @title set API key\n", - "import os\n", - "import getpass\n", - "from pprint import pprint\n", - "import warnings\n", - "\n", - "warnings.filterwarnings(\"ignore\")\n", - "\n", - "from IPython import get_ipython\n", - "\n", - "if \"google.colab\" in str(get_ipython()):\n", - " # Running in Google Colab. Please set the UPSTAGE_API_KEY in the Colab Secrets\n", - " from google.colab import userdata\n", - " os.environ[\"UPSTAGE_API_KEY\"] = userdata.get(\"UPSTAGE_API_KEY\")\n", - "else:\n", - " # Running locally. Please set the UPSTAGE_API_KEY in the .env file\n", - " from dotenv import load_dotenv\n", - "\n", - " load_dotenv()\n", - "\n", - "if \"UPSTAGE_API_KEY\" not in os.environ:\n", - " os.environ[\"UPSTAGE_API_KEY\"] = getpass.getpass(\"Enter your Upstage API key: \")\n" -] - - }, - { - "cell_type": "code", - "execution_count": 82, - "metadata": {}, - "outputs": [], - "source": [ - "from langchain_core.prompts import PromptTemplate\n", - "from langchain_upstage import ChatUpstage\n", - "from langchain_core.output_parsers import StrOutputParser\n", - "\n", - "llm = ChatUpstage()" - ] - }, - { - "cell_type": "code", - "execution_count": 83, - "metadata": {}, - "outputs": [], - "source": [ - "from langchain_core.prompts import PromptTemplate\n", - "\n", - "prompt_template = PromptTemplate.from_template(\n", - " \"\"\"\n", - " Please provide most correct answer for the given question from the following context. \n", - " If the answer is not present in the context, please write \"The information is not present in the context.\"\n", - " ---\n", - " Question: {question}\n", - " ---\n", - " Context: {Context}\n", - " \"\"\"\n", - ")\n", - "chain = prompt_template | llm | StrOutputParser()" - ] - }, - { - "cell_type": "code", - "execution_count": 84, - "metadata": {}, - "outputs": [ + "cell_type": "markdown", + "metadata": { + "id": "ywT-2I7-G7TS" + }, + "source": [ + "\n", + "\"Open\n", + "" + ] + }, { - "data": { - "text/plain": [ - "'The license under which SOLAR 10.7B is available is the Apache 2.0 license.'" + "cell_type": "markdown", + "metadata": { + "id": "2HEO_binG7TT" + }, + "source": [ + "# 04. CAG GC\n", + "\n", + "## Overview \n", + "In this exercise, we will explore the concept of Credibility-Aware Generation (CAG) and Groundedness Check (GC) using the Solar framework. CAG focuses on generating responses that are credible and factually accurate, while GC ensures that the generated content is grounded in verifiable sources. This notebook will guide you through the implementation and application of these techniques to enhance the reliability and accuracy of the model's responses.\n", + "\n", + "## Purpose of the Exercise\n", + "The purpose of this exercise is to demonstrate the practical application of Credibility-Aware Generation and Groundedness Check within the Solar framework. By the end of this tutorial, users will understand how to utilize these techniques to produce credible and well-grounded outputs from the language model, thus improving the trustworthiness and utility of the generated content.\n" ] - }, - "execution_count": 84, - "metadata": {}, - "output_type": "execute_result" - } - ], - "source": [ - "context = \"\"\"\n", - "We introduce SOLAR 10.7B, a large language model (LLM) with 10.7 billion parameters, \n", - " demonstrating superior performance in various natural language processing (NLP) tasks. \n", - " Inspired by recent efforts to efficiently up-scale LLMs, \n", - " we present a method for scaling LLMs called depth up-scaling (DUS), \n", - " which encompasses depthwise scaling and continued pretraining.\n", - " In contrast to other LLM up-scaling methods that use mixture-of-experts, \n", - " DUS does not require complex changes to train and inference efficiently. \n", - " We show experimentally that DUS is simple yet effective \n", - " in scaling up high-performance LLMs from small ones. \n", - " Building on the DUS model, we additionally present SOLAR 10.7B-Instruct, \n", - " a variant fine-tuned for instruction-following capabilities, \n", - " surpassing Mixtral-8x7B-Instruct. \n", - " SOLAR 10.7B is publicly available under the Apache 2.0 license, \n", - " promoting broad access and application in the LLM field.\n", - "\"\"\"\n", - "\n", - "chain.invoke(\n", - " {\n", - " \"question\": \"What is the license under which SOLAR 10.7B is available?\",\n", - " \"Context\": context,\n", - " }\n", - ")" - ] - }, - { - "cell_type": "code", - "execution_count": 85, - "metadata": {}, - "outputs": [ + }, { - "data": { - "text/plain": [ - "'The information is not present in the context.'" + "cell_type": "code", + "execution_count": 1, + "metadata": { + "id": "Z3bXdxrvG7TU" + }, + "outputs": [], + "source": [ + "! pip3 install -qU langchain-upstage python-dotenv" ] - }, - "execution_count": 85, - "metadata": {}, - "output_type": "execute_result" - } - ], - "source": [ - "chain.invoke({\"question\": \"Who is the created SOLAR 10.7B?\", \"Context\": context})" - ] - }, - { - "cell_type": "code", - "execution_count": 86, - "metadata": {}, - "outputs": [ + }, { - "data": { - "text/plain": [ - "'The information is not present in the context.'" + "cell_type": "code", + "execution_count": 2, + "metadata": { + "id": "ty4qn-PMG7TU" + }, + "outputs": [], + "source": [ + "# @title set API key\n", + "import os\n", + "import getpass\n", + "from pprint import pprint\n", + "import warnings\n", + "\n", + "warnings.filterwarnings(\"ignore\")\n", + "\n", + "from IPython import get_ipython\n", + "\n", + "if \"google.colab\" in str(get_ipython()):\n", + " # Running in Google Colab. Please set the UPSTAGE_API_KEY in the Colab Secrets\n", + " from google.colab import userdata\n", + " os.environ[\"UPSTAGE_API_KEY\"] = userdata.get(\"UPSTAGE_API_KEY\")\n", + "else:\n", + " # Running locally. Please set the UPSTAGE_API_KEY in the .env file\n", + " from dotenv import load_dotenv\n", + "\n", + " load_dotenv()\n", + "\n", + "if \"UPSTAGE_API_KEY\" not in os.environ:\n", + " os.environ[\"UPSTAGE_API_KEY\"] = getpass.getpass(\"Enter your Upstage API key: \")\n" ] - }, - "execution_count": 86, - "metadata": {}, - "output_type": "execute_result" - } - ], - "source": [ - "chain.invoke(\n", - " {\n", - " \"question\": \"Did Google provide resources for the SOLAR 10.7B project?\",\n", - " \"Context\": context,\n", - " }\n", - ")" - ] - }, - { - "cell_type": "code", - "execution_count": 87, - "metadata": {}, - "outputs": [ + }, { - "data": { - "text/plain": [ - "'DUS stands for depth up-scaling.'" + "cell_type": "code", + "execution_count": 3, + "metadata": { + "id": "a9ka91-IG7TU" + }, + "outputs": [], + "source": [ + "from langchain_core.prompts import PromptTemplate\n", + "from langchain_upstage import ChatUpstage\n", + "from langchain_core.output_parsers import StrOutputParser\n", + "\n", + "llm = ChatUpstage(model=\"solar-pro\")" ] - }, - "execution_count": 87, - "metadata": {}, - "output_type": "execute_result" - } - ], - "source": [ - "chain.invoke({\"question\": \"What does DUS stand for?\", \"Context\": context})" - ] - }, - { - "cell_type": "code", - "execution_count": 88, - "metadata": {}, - "outputs": [ + }, { - "name": "stdout", - "output_type": "stream", - "text": [ - "SOLAR 10.7B-Instruct\n" - ] - } - ], - "source": [ - "answer = chain.invoke(\n", - " {\n", - " \"question\": \"What is the name of the variant fine-tuned for instruction-following capabilities?\",\n", - " \"Context\": context,\n", - " }\n", - ")\n", - "print(answer)" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "## Groundedness Check with LangChain and Upstage\n", - "![Groundedness](./figures/gc.png)\n" - ] - }, - { - "cell_type": "code", - "execution_count": 91, - "metadata": {}, - "outputs": [ + "cell_type": "code", + "execution_count": 4, + "metadata": { + "id": "uym434weG7TU" + }, + "outputs": [], + "source": [ + "from langchain_core.prompts import PromptTemplate\n", + "\n", + "prompt_template = PromptTemplate.from_template(\n", + " \"\"\"\n", + " Please provide most correct answer for the given question from the following context.\n", + " If the answer is not present in the context, please write \"The information is not present in the context.\"\n", + " ---\n", + " Question: {question}\n", + " ---\n", + " Context: {Context}\n", + " \"\"\"\n", + ")\n", + "chain = prompt_template | llm | StrOutputParser()" + ] + }, { - "name": "stdout", - "output_type": "stream", - "text": [ - "Potential answer: DUS stands for depth up-scaling.\n", - "GC check result: grounded\n", - "✅ Groundedness check passed\n" - ] - } - ], - "source": [ - "# GC\n", - "from langchain_upstage import UpstageGroundednessCheck\n", - "\n", - "groundedness_check = UpstageGroundednessCheck()\n", - "\n", - "answer = chain.invoke(\n", - " {\n", - " \"question\": \"What is DUS?\",\n", - " \"Context\": context,\n", - " }\n", - ")\n", - "print(\"Potential answer: \", answer)\n", - "gc_result = groundedness_check.invoke({\"context\": context, \"answer\": answer})\n", - "\n", - "print(\"GC check result: \", gc_result)\n", - "if gc_result.lower().startswith(\"grounded\"):\n", - " print(\"✅ Groundedness check passed\")\n", - "else:\n", - " print(\"❌ Groundedness check failed\")" - ] - }, - { - "cell_type": "code", - "execution_count": 92, - "metadata": {}, - "outputs": [ + "cell_type": "code", + "execution_count": 5, + "metadata": { + "id": "8x9rkITMG7TU", + "outputId": "fb5dd51d-d1ec-4e4b-e3f5-8478e08f6c04", + "colab": { + "base_uri": "https://localhost:8080/", + "height": 35 + } + }, + "outputs": [ + { + "output_type": "execute_result", + "data": { + "text/plain": [ + "'SOLAR 10.7B is available under the Apache 2.0 license.'" + ], + "application/vnd.google.colaboratory.intrinsic+json": { + "type": "string" + } + }, + "metadata": {}, + "execution_count": 5 + } + ], + "source": [ + "context = \"\"\"\n", + "We introduce SOLAR 10.7B, a large language model (LLM) with 10.7 billion parameters,\n", + " demonstrating superior performance in various natural language processing (NLP) tasks.\n", + " Inspired by recent efforts to efficiently up-scale LLMs,\n", + " we present a method for scaling LLMs called depth up-scaling (DUS),\n", + " which encompasses depthwise scaling and continued pretraining.\n", + " In contrast to other LLM up-scaling methods that use mixture-of-experts,\n", + " DUS does not require complex changes to train and inference efficiently.\n", + " We show experimentally that DUS is simple yet effective\n", + " in scaling up high-performance LLMs from small ones.\n", + " Building on the DUS model, we additionally present SOLAR 10.7B-Instruct,\n", + " a variant fine-tuned for instruction-following capabilities,\n", + " surpassing Mixtral-8x7B-Instruct.\n", + " SOLAR 10.7B is publicly available under the Apache 2.0 license,\n", + " promoting broad access and application in the LLM field.\n", + "\"\"\"\n", + "\n", + "chain.invoke(\n", + " {\n", + " \"question\": \"What is the license under which SOLAR 10.7B is available?\",\n", + " \"Context\": context,\n", + " }\n", + ")" + ] + }, + { + "cell_type": "code", + "execution_count": 6, + "metadata": { + "id": "XhXSwzdCG7TV", + "outputId": "ad655d71-7777-44a0-d0fe-05583c1d2f9d", + "colab": { + "base_uri": "https://localhost:8080/", + "height": 35 + } + }, + "outputs": [ + { + "output_type": "execute_result", + "data": { + "text/plain": [ + "'The information is not present in the context.'" + ], + "application/vnd.google.colaboratory.intrinsic+json": { + "type": "string" + } + }, + "metadata": {}, + "execution_count": 6 + } + ], + "source": [ + "chain.invoke({\"question\": \"Who is the created SOLAR 10.7B?\", \"Context\": context})" + ] + }, + { + "cell_type": "code", + "execution_count": 7, + "metadata": { + "id": "fvBuB9pCG7TV", + "outputId": "31e6e654-ec52-46f4-f378-057a95ea4f76", + "colab": { + "base_uri": "https://localhost:8080/", + "height": 35 + } + }, + "outputs": [ + { + "output_type": "execute_result", + "data": { + "text/plain": [ + "'The information is not present in the context.'" + ], + "application/vnd.google.colaboratory.intrinsic+json": { + "type": "string" + } + }, + "metadata": {}, + "execution_count": 7 + } + ], + "source": [ + "chain.invoke(\n", + " {\n", + " \"question\": \"Did Google provide resources for the SOLAR 10.7B project?\",\n", + " \"Context\": context,\n", + " }\n", + ")" + ] + }, + { + "cell_type": "code", + "execution_count": 8, + "metadata": { + "id": "J7G4YmYzG7TV", + "outputId": "fdd68816-fea3-46d0-8cee-aaf546f613dd", + "colab": { + "base_uri": "https://localhost:8080/", + "height": 35 + } + }, + "outputs": [ + { + "output_type": "execute_result", + "data": { + "text/plain": [ + "'DUS stands for Depth Up-Scaling.'" + ], + "application/vnd.google.colaboratory.intrinsic+json": { + "type": "string" + } + }, + "metadata": {}, + "execution_count": 8 + } + ], + "source": [ + "chain.invoke({\"question\": \"What does DUS stand for?\", \"Context\": context})" + ] + }, + { + "cell_type": "code", + "execution_count": 9, + "metadata": { + "id": "bJIILZxrG7TV", + "outputId": "363fc0a1-207d-4f4b-f90a-4767c93e87eb", + "colab": { + "base_uri": "https://localhost:8080/" + } + }, + "outputs": [ + { + "output_type": "stream", + "name": "stdout", + "text": [ + "The name of the variant fine-tuned for instruction-following capabilities is SOLAR 10.7B-Instruct.\n" + ] + } + ], + "source": [ + "answer = chain.invoke(\n", + " {\n", + " \"question\": \"What is the name of the variant fine-tuned for instruction-following capabilities?\",\n", + " \"Context\": context,\n", + " }\n", + ")\n", + "print(answer)" + ] + }, + { + "cell_type": "markdown", + "metadata": { + "id": "_oI3in5kG7TV" + }, + "source": [ + "## Groundedness Check with LangChain and Upstage\n", + "![Groundedness](https://github.com/UpstageAI/cookbook/blob/main/Solar-Fullstack-LLM-101/figures/gc.png?raw=1)\n" + ] + }, + { + "cell_type": "code", + "execution_count": 10, + "metadata": { + "id": "fzNoIbeaG7TV", + "outputId": "a606d9bb-cdbd-4579-c4dd-5a7044c9315a", + "colab": { + "base_uri": "https://localhost:8080/" + } + }, + "outputs": [ + { + "output_type": "stream", + "name": "stdout", + "text": [ + "Potential answer: DUS is depth up-scaling, a method for scaling language models (LLMs) that encompasses depthwise scaling and continued pretraining.\n", + "GC check result: grounded\n", + "✅ Groundedness check passed\n" + ] + } + ], + "source": [ + "# GC\n", + "from langchain_upstage import UpstageGroundednessCheck\n", + "\n", + "groundedness_check = UpstageGroundednessCheck()\n", + "\n", + "answer = chain.invoke(\n", + " {\n", + " \"question\": \"What is DUS?\",\n", + " \"Context\": context,\n", + " }\n", + ")\n", + "print(\"Potential answer: \", answer)\n", + "gc_result = groundedness_check.invoke({\"context\": context, \"answer\": answer})\n", + "\n", + "print(\"GC check result: \", gc_result)\n", + "if gc_result.lower().startswith(\"grounded\"):\n", + " print(\"✅ Groundedness check passed\")\n", + "else:\n", + " print(\"❌ Groundedness check failed\")" + ] + }, { - "name": "stdout", - "output_type": "stream", - "text": [ - "notGrounded\n", - "❌ Groundedness check failed\n" - ] + "cell_type": "code", + "execution_count": 11, + "metadata": { + "id": "7O7U-iKyG7TW", + "outputId": "d7c8eab7-700c-4cae-e47f-6110f0356dd8", + "colab": { + "base_uri": "https://localhost:8080/" + } + }, + "outputs": [ + { + "output_type": "stream", + "name": "stdout", + "text": [ + "notGrounded\n", + "❌ Groundedness check failed\n" + ] + } + ], + "source": [ + "# GC\n", + "from langchain_upstage import UpstageGroundednessCheck\n", + "\n", + "groundedness_check = UpstageGroundednessCheck()\n", + "\n", + "answer = \"Solar 10.7B is available to the public with a non-commercial license.\"\n", + "gc_result = groundedness_check.invoke({\"context\": context, \"answer\": answer})\n", + "\n", + "print(gc_result)\n", + "if gc_result.lower().startswith(\"grounded\"):\n", + " print(\"✅ Groundedness check passed\")\n", + "else:\n", + " print(\"❌ Groundedness check failed\")" + ] + }, + { + "cell_type": "markdown", + "metadata": { + "id": "Ehbw6AE5G7TW" + }, + "source": [ + "# Excercise\n", + "\n", + "Write code to check GC and make LLM more secure. For example, check GC several times until LLM gives you a reliable answer." + ] + } + ], + "metadata": { + "kernelspec": { + "display_name": "Python 3 (ipykernel)", + "language": "python", + "name": "python3" + }, + "language_info": { + "codemirror_mode": { + "name": "ipython", + "version": 3 + }, + "file_extension": ".py", + "mimetype": "text/x-python", + "name": "python", + "nbconvert_exporter": "python", + "pygments_lexer": "ipython3", + "version": "3.10.10" + }, + "colab": { + "provenance": [] } - ], - "source": [ - "# GC\n", - "from langchain_upstage import UpstageGroundednessCheck\n", - "\n", - "groundedness_check = UpstageGroundednessCheck()\n", - "\n", - "answer = \"Solar 10.7B is available to the public with a non-commercial license.\"\n", - "gc_result = groundedness_check.invoke({\"context\": context, \"answer\": answer})\n", - "\n", - "print(gc_result)\n", - "if gc_result.lower().startswith(\"grounded\"):\n", - " print(\"✅ Groundedness check passed\")\n", - "else:\n", - " print(\"❌ Groundedness check failed\")" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "# Excercise \n", - "\n", - "Write code to check GC and make LLM more secure. For example, check GC several times until LLM gives you a reliable answer." - ] - } - ], - "metadata": { - "kernelspec": { - "display_name": "Python 3 (ipykernel)", - "language": "python", - "name": "python3" }, - "language_info": { - "codemirror_mode": { - "name": "ipython", - "version": 3 - }, - "file_extension": ".py", - "mimetype": "text/x-python", - "name": "python", - "nbconvert_exporter": "python", - "pygments_lexer": "ipython3", - "version": "3.10.10" - } - }, - "nbformat": 4, - "nbformat_minor": 4 -} + "nbformat": 4, + "nbformat_minor": 0 +} \ No newline at end of file From a2a0c1f01b1b098b44c26be50c4fcd98b678eff4 Mon Sep 17 00:00:00 2001 From: Hyesoo Kim <100982596+duper203@users.noreply.github.com> Date: Mon, 7 Oct 2024 09:46:34 -0700 Subject: [PATCH 2/2] Update 04_CAG_GC.ipynb : env setting --- Solar-Fullstack-LLM-101/04_CAG_GC.ipynb | 65 ++++++++++++++++--------- 1 file changed, 41 insertions(+), 24 deletions(-) diff --git a/Solar-Fullstack-LLM-101/04_CAG_GC.ipynb b/Solar-Fullstack-LLM-101/04_CAG_GC.ipynb index 703a28d..bfd49de 100644 --- a/Solar-Fullstack-LLM-101/04_CAG_GC.ipynb +++ b/Solar-Fullstack-LLM-101/04_CAG_GC.ipynb @@ -37,6 +37,23 @@ "! pip3 install -qU langchain-upstage python-dotenv" ] }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "## UPSTAGE_API_KEY\n", + "To obtain your Upstage API key, follow these steps:\n", + "\n", + "1. Visit the Upstage AI console at .\n", + "2. Sign up for an account if you don't already have one.\n", + "3. Log in to your account.\n", + "4. Navigate to the API key section.\n", + "5. Generate your API key.\n", + "6. Copy the key and save it securely.\n", + "\n", + "![Console](./figures/console.upstage.ai.jpg)" + ] + }, { "cell_type": "code", "execution_count": 2, @@ -45,29 +62,29 @@ }, "outputs": [], "source": [ - "# @title set API key\n", - "import os\n", - "import getpass\n", - "from pprint import pprint\n", - "import warnings\n", - "\n", - "warnings.filterwarnings(\"ignore\")\n", - "\n", - "from IPython import get_ipython\n", - "\n", - "if \"google.colab\" in str(get_ipython()):\n", - " # Running in Google Colab. Please set the UPSTAGE_API_KEY in the Colab Secrets\n", - " from google.colab import userdata\n", - " os.environ[\"UPSTAGE_API_KEY\"] = userdata.get(\"UPSTAGE_API_KEY\")\n", - "else:\n", - " # Running locally. Please set the UPSTAGE_API_KEY in the .env file\n", - " from dotenv import load_dotenv\n", - "\n", - " load_dotenv()\n", - "\n", - "if \"UPSTAGE_API_KEY\" not in os.environ:\n", - " os.environ[\"UPSTAGE_API_KEY\"] = getpass.getpass(\"Enter your Upstage API key: \")\n" - ] + "# @title set API key\n", + "from pprint import pprint\n", + "import os\n", + "\n", + "import warnings\n", + "\n", + "warnings.filterwarnings(\"ignore\")\n", + "\n", + "if \"google.colab\" in str(get_ipython()):\n", + " # Running in Google Colab. Please set the UPSTAGE_API_KEY in the Colab Secrets\n", + " from google.colab import userdata\n", + "\n", + " os.environ[\"UPSTAGE_API_KEY\"] = userdata.get(\"UPSTAGE_API_KEY\")\n", + "else:\n", + " # Running locally. Please set the UPSTAGE_API_KEY in the .env file\n", + " from dotenv import load_dotenv\n", + "\n", + " load_dotenv()\n", + "\n", + "assert (\n", + " \"UPSTAGE_API_KEY\" in os.environ\n", + "), \"Please set the UPSTAGE_API_KEY environment variable\"" + ] }, { "cell_type": "code", @@ -408,4 +425,4 @@ }, "nbformat": 4, "nbformat_minor": 0 -} \ No newline at end of file +}