Skip to content

Commit

Permalink
updated guides
Browse files Browse the repository at this point in the history
  • Loading branch information
sharanshirodkar7 committed Apr 19, 2024
1 parent 6d95213 commit 49fcfa2
Show file tree
Hide file tree
Showing 4 changed files with 28 additions and 19 deletions.
12 changes: 7 additions & 5 deletions fern/docs/pages/guides/ada.mdx
Original file line number Diff line number Diff line change
Expand Up @@ -39,15 +39,17 @@ from langchain import PromptTemplate
from sentence_transformers import SentenceTransformer
import lancedb
from lancedb.embeddings import with_embeddings
import predictionguard as pg
from predictionguard import PredictionGuard
import pandas as pd
from getpass import getpass
```

## Authenticate to Prediction Guard API
```python
pg_access_token = getpass('Enter your Prediction Guard access token: ')
os.environ['PREDICTIONGUARD_TOKEN'] = pg_access_token
# Set your Prediction Guard token as an environmental variable.
os.environ["PREDICTIONGUARD_API_KEY"] = "<your access token>"

client = PredictionGuard()
```

## Create a SQLite Database
Expand Down Expand Up @@ -209,7 +211,7 @@ def generate_sql_query(question, injected_schema):
prompt_filled = sql_prompt.format(question=question, schema_description=injected_schema)

try:
result = pg.Completion.create(
result = client.completions.create(
model="deepseek-coder-6.7b-instruct",
prompt=prompt_filled,
max_tokens=300,
Expand Down Expand Up @@ -270,7 +272,7 @@ def get_answer(question, data, sql_query):
prompt_filled = qa_prompt.format(question=question, data=data, sql_query=sql_query)

# Respond to the user
output = pg.Completion.create(
output = client.completions.create(
model="Neural-Chat-7B",
prompt=prompt_filled,
max_tokens=200,
Expand Down
12 changes: 7 additions & 5 deletions fern/docs/pages/guides/data-extraction.mdx
Original file line number Diff line number Diff line change
Expand Up @@ -15,10 +15,12 @@ import itertools

import pandas as pd
from langchain import PromptTemplate
import predictionguard as pg
from predictionguard import PredictionGuard

# Set your Prediction Guard token as an environmental variable.
os.environ["PREDICTIONGUARD_API_KEY"] = "<your access token>"

os.environ["PREDICTIONGUARD_TOKEN"] = "<your access token>"
client = PredictionGuard()

# Load the JSON data into a dataframe
data = []
Expand Down Expand Up @@ -65,7 +67,7 @@ summary_prompt = PromptTemplate(template=summarize_template,
# Loop over the rows summarizing the data
summaries = []
for i,row in df.iterrows():
result=pg.Completion.create(
result=client.completions.create(
model="Nous-Hermes-Llama2-13B",
prompt=summary_prompt.format(
transcript=row['transcript']
Expand Down Expand Up @@ -121,7 +123,7 @@ for i, row in df.iterrows():
for q in questions:

# Extract the information
result = pg.Completion.create(
result = client.completions.create(
model="Nous-Hermes-Llama2-13B",
prompt=q_and_a_prompt.format(
question=q, transcript_summary=row["summary"]
Expand All @@ -132,7 +134,7 @@ for i, row in df.iterrows():
)

# Generate a factual consistency score
fact_score = pg.Factuality.check(
fact_score =client.factuality.check(
reference=row['summary'],
text=result['choices'][0]['text']
)
Expand Down
12 changes: 7 additions & 5 deletions fern/docs/pages/guides/langchainllm.mdx
Original file line number Diff line number Diff line change
Expand Up @@ -7,7 +7,7 @@ helps you "Build applications with LLMs through composability." LangChain doesn'

## Installation and Setup
- Install the Python SDK with `pip install predictionguard`
- Get a Prediction Guard access token (as described [here](https://docs.predictionguard.com/)) and set it as the environment variable `PREDICTIONGUARD_TOKEN`.
- Get a Prediction Guard access token (as described [here](https://docs.predictionguard.com/)) and set it as the environment variable `PREDICTIONGUARD_API_KEY`.
## LLM Wrapper

There exists a Prediction Guard LLM wrapper, which you can access with
Expand Down Expand Up @@ -36,12 +36,14 @@ Basic usage of the controlled or guarded LLM wrapper:
```python
import os

import predictionguard as pg
from langchain.llms import PredictionGuard
from langchain import PromptTemplate, LLMChain

# Your Prediction Guard API key. Get one at predictionguard.com
os.environ["PREDICTIONGUARD_TOKEN"] = "<your access token>"

# Set your Prediction Guard token as an environmental variable.
os.environ["PREDICTIONGUARD_API_KEY"] = "<your access token>"

client = PredictionGuard()

# Define a prompt template
template = """Respond to the following query based on the context.
Expand Down Expand Up @@ -73,7 +75,7 @@ from langchain import PromptTemplate, LLMChain
from langchain.llms import PredictionGuard

# Your Prediction Guard API key. Get one at predictionguard.com
os.environ["PREDICTIONGUARD_TOKEN"] = "<your Prediction Guard access token>"
os.environ["PREDICTIONGUARD_API_KEY"] = "<your Prediction Guard access token>"

pgllm = PredictionGuard(model="Nous-Hermes-Llama2-13B")

Expand Down
11 changes: 7 additions & 4 deletions fern/docs/pages/guides/output.mdx
Original file line number Diff line number Diff line change
Expand Up @@ -15,14 +15,17 @@ This example demonstrates how to integrate PredictionGuard with custom data stru
1. **Set Environment Variable for PredictionGuard**: Ensure that your PredictionGuard API token is correctly set up in your environment variables.

```python
import os
os.environ["PREDICTIONGUARD_TOKEN"] = "<your access token>"
from predictionguard import PredictionGuard

# Set your Prediction Guard token as an environmental variable.
os.environ["PREDICTIONGUARD_API_KEY"] = "<your access token>"

client = PredictionGuard()
```

2. **Import Necessary Libraries**: Import PredictionGuard, Pydantic for data validation, and LangChain for output parsing and prompt templating.

```python
import predictionguard as pg
from langchain.output_parsers import PydanticOutputParser
from langchain.prompts import PromptTemplate
from pydantic import BaseModel, Field, validator
Expand Down Expand Up @@ -61,7 +64,7 @@ prompt = PromptTemplate(
6. **Generate and Parse Output**: Call PredictionGuard's text completion model "Neural-Chat-7B" to generate an output based on the formatted prompt, then parse the output into the Pydantic model. Handle exceptions for parsing errors.

```python
result = pg.Completion.create(
result = client.completions.create(
model="Neural-Chat-7B",
prompt=prompt.format(query="Tell me a joke."),
max_tokens=200,
Expand Down

0 comments on commit 49fcfa2

Please sign in to comment.