diff --git a/docs/docs/integrations/chat/kinetica.ipynb b/docs/docs/integrations/chat/kinetica.ipynb index 685e4c5b826ee..a8c84d6a541d2 100644 --- a/docs/docs/integrations/chat/kinetica.ipynb +++ b/docs/docs/integrations/chat/kinetica.ipynb @@ -98,13 +98,14 @@ "outputs": [], "source": [ "from langchain_community.chat_models.kinetica import KineticaChatLLM\n", + "\n", "kinetica_llm = KineticaChatLLM()\n", "\n", "# Test table we will create\n", "table_name = \"demo.user_profiles\"\n", "\n", "# LLM Context we will create\n", - "kinetica_ctx = 'demo.test_llm_ctx'" + "kinetica_ctx = \"demo.test_llm_ctx\"" ] }, { @@ -250,15 +251,17 @@ "from typing import Generator\n", "\n", "Faker.seed(5467)\n", - "faker = Faker(locale='en-US')\n", + "faker = Faker(locale=\"en-US\")\n", + "\n", "\n", "def profile_gen(count: int) -> Generator:\n", - " for id in range(0, count):\n", - " rec = dict(id=id, **faker.simple_profile())\n", - " rec['birthdate'] = pd.Timestamp(rec['birthdate'])\n", - " yield rec\n", + " for id in range(0, count):\n", + " rec = dict(id=id, **faker.simple_profile())\n", + " rec[\"birthdate\"] = pd.Timestamp(rec[\"birthdate\"])\n", + " yield rec\n", + "\n", "\n", - "load_df = pd.DataFrame.from_records(data=profile_gen(100), index='id')\n", + "load_df = pd.DataFrame.from_records(data=profile_gen(100), index=\"id\")\n", "load_df.head()" ] }, @@ -359,10 +362,13 @@ "source": [ "from gpudb import GPUdbTable\n", "\n", - "gpudb_table = GPUdbTable.from_df(load_df, db=kinetica_llm.kdbc, \n", - " table_name=table_name, \n", - " clear_table=True,\n", - " load_data=True)\n", + "gpudb_table = GPUdbTable.from_df(\n", + " load_df,\n", + " db=kinetica_llm.kdbc,\n", + " table_name=table_name,\n", + " clear_table=True,\n", + " load_data=True,\n", + ")\n", "\n", "# See the Kinetica column types\n", "gpudb_table.type_as_df()" @@ -403,7 +409,7 @@ "\n", "from gpudb import GPUdbException\n", "\n", - "sql=f\"\"\"\n", + "sql = f\"\"\"\n", "CREATE OR REPLACE CONTEXT {kinetica_ctx}\n", "(\n", " TABLE = demo.test_profiles\n", @@ -418,15 +424,17 @@ ")\n", "\"\"\"\n", "\n", + "\n", "def _check_error(response: dict) -> None:\n", - " status = response['status_info']['status']\n", - " if (status != 'OK'):\n", - " message = response['status_info']['message']\n", - " raise GPUdbException('[%s]: %s' % (status, message))\n", + " status = response[\"status_info\"][\"status\"]\n", + " if status != \"OK\":\n", + " message = response[\"status_info\"][\"message\"]\n", + " raise GPUdbException(\"[%s]: %s\" % (status, message))\n", + "\n", "\n", "response = kinetica_llm.kdbc.execute_sql(sql)\n", "_check_error(response)\n", - "response['status_info']" + "response[\"status_info\"]" ] }, { @@ -509,7 +517,11 @@ "metadata": {}, "outputs": [], "source": [ - "from langchain_community.chat_models.kinetica import KineticaSqlOutputParser, KineticaSqlResponse\n", + "from langchain_community.chat_models.kinetica import (\n", + " KineticaSqlOutputParser,\n", + " KineticaSqlResponse,\n", + ")\n", + "\n", "chain = prompt_template | kinetica_llm | KineticaSqlOutputParser(kdbc=kinetica_llm.kdbc)" ] }, @@ -602,7 +614,9 @@ ], "source": [ "# Here you must ask a question relevant to the LLM context provided in the prompt template.\n", - "response: KineticaSqlResponse = chain.invoke({\"input\": \"What are the female users ordered by username?\"})\n", + "response: KineticaSqlResponse = chain.invoke(\n", + " {\"input\": \"What are the female users ordered by username?\"}\n", + ")\n", "\n", "print(f\"SQL: {response.sql}\")\n", "response.dataframe.head()"