Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

docs(weave): fix feedback example to support text feedback by improving streamlit …app structure. standardize around .env instead of .toml for secrets. #3238

Open
wants to merge 1 commit into
base: master
Choose a base branch
from
Open
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
136 changes: 96 additions & 40 deletions docs/docs/reference/gen_notebooks/feedback_prod.md
Original file line number Diff line number Diff line change
Expand Up @@ -18,22 +18,24 @@ title: Log Feedback from Production

It is often hard to automatically evaluate a generated LLM response so, depending on your risk tolerance, you can gather direct user feedback to find areas to improve.

In this tutorial, we'll use a custom RAG chatbot as an example app with which the users can interact and which allows us to collect user feedback.
In this tutorial, we'll use a custom chatbot as an example app from which to collect user feedback.
We'll use Streamlit to build the interface and we'll capture the LLM interactions and feedback in Weave.

## Setup


```python
!pip install weave openai streamlit
!pip install weave openai streamlit wandb
!pip install set-env-colab-kaggle-dotenv -q # for env var
```

First, create a file called `secrets.toml` and add an OpenAI key so it works with [st.secrets](https://docs.streamlit.io/develop/api-reference/connections/st.secrets). You can [sign up](https://platform.openai.com/signup) on the OpenAI platform to get your own API key.


```python
# secrets.toml
OPENAI_API_KEY = "your OpenAI key"
# Add a .env file with your OpenAI and WandB API keys
from set_env import set_env

_ = set_env("OPENAI_API_KEY")
_ = set_env("WANDB_API_KEY")
```

Next, create a file called `chatbot.py` with the following contents:
Expand All @@ -43,17 +45,28 @@ Next, create a file called `chatbot.py` with the following contents:
# chatbot.py

import streamlit as st
import wandb
from openai import OpenAI
from set_env import set_env

import weave

st.title("Add feedback")
_ = set_env("OPENAI_API_KEY")
_ = set_env("WANDB_API_KEY")

# highlight-next-line
wandb.login()

# highlight-next-line
weave_client = weave.init("feedback-example")

oai_client = OpenAI()


# highlight-next-line
@weave.op
def chat_response(prompt):
stream = client.chat.completions.create(
stream = oai_client.chat.completions.create(
model="gpt-4o",
messages=[
{"role": "user", "content": prompt},
Expand All @@ -68,60 +81,103 @@ def chat_response(prompt):
return {"response": response}


client = OpenAI(api_key=st.secrets["OPENAI_API_KEY"])
# highlight-next-line
weave_client = weave.init("feedback-example")


def display_chat_messages():
for message in st.session_state.messages:
for idx, message in enumerate(st.session_state.messages):
with st.chat_message(message["role"]):
st.markdown(message["content"])

# Only show feedback options for assistant messages
if message["role"] == "assistant":
# Get index of this call in the session state
call_idx = (
sum(
m["role"] == "assistant"
for m in st.session_state.messages[: idx + 1]
)
- 1
)

def get_and_process_prompt():
# Create a container for feedback options
feedback_container = st.container()
with feedback_container:
col1, col2, col3 = st.columns([1, 1, 4])

# Thumbs up button
with col1:
# highlight-next-line
if st.button("👍", key=f"thumbs_up_{idx}"):
if "calls" in st.session_state and call_idx < len(
st.session_state.calls
):
# highlight-next-line
st.session_state.calls[call_idx].feedback.add_reaction(
"👍"
)
st.success("Thanks for the feedback!")

# Thumbs down button
with col2:
# highlight-next-line
if st.button("👎", key=f"thumbs_down_{idx}"):
if "calls" in st.session_state and call_idx < len(
st.session_state.calls
):
# highlight-next-line
st.session_state.calls[call_idx].feedback.add_reaction(
"👎"
)
st.success("Thanks for the feedback!")

# Text feedback
with col3:
feedback_text = st.text_input(
"Feedback", key=f"feedback_input_{idx}"
)
if st.button("Submit Feedback", key=f"submit_feedback_{idx}"):
if feedback_text and call_idx < len(st.session_state.calls):
# highlight-next-line
st.session_state.calls[call_idx].feedback.add_note(
feedback_text
)
st.success("Feedback submitted!")


def show_chat_prompt():
if prompt := st.chat_input("What is up?"):
st.session_state.messages.append({"role": "user", "content": prompt})

with st.chat_message("user"):
st.markdown(prompt)

with st.chat_message("assistant"):
# highlight-next-line
with weave.attributes(
{"session": st.session_state["session_id"], "env": "prod"}
):
# This could also be weave model.predict.call if you're using a weave.Model subclass
result, call = chat_response.call(
prompt
) # call the function with `.call`, this returns a tuple with a new Call object
# highlight-next-line
st.button(
":thumbsup:",
on_click=lambda: call.feedback.add_reaction("👍"),
key="up",
)
# highlight-next-line
st.button(
":thumbsdown:",
on_click=lambda: call.feedback.add_reaction("👎"),
key="down",
)
result, call = chat_response.call(prompt)
st.write(result["response"])
st.session_state.messages.append(
{"role": "assistant", "content": result["response"]}
)
# highlight-next-line
st.session_state.calls.append(call)


def init_session_state():
if "session_id" not in st.session_state:
st.session_state["session_id"] = "123abc"

def init_chat_history():
if "messages" not in st.session_state:
st.session_state.messages = st.session_state.messages = []
st.session_state.messages = []

if "calls" not in st.session_state:
st.session_state.calls = []


def main():
st.session_state["session_id"] = "123abc"
init_chat_history()
st.title("Add feedback")

init_session_state()
display_chat_messages()
get_and_process_prompt()
show_chat_prompt()

st.rerun()
Copy link
Contributor

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

what's the purpose of this addition? I remove this and it stops the constant refreshing



if __name__ == "__main__":
Expand Down
Loading
Loading