Skip to content

Commit

Permalink
Merge remote-tracking branch 'origin/master'
Browse files Browse the repository at this point in the history
  • Loading branch information
dahaipeng committed Aug 13, 2024
2 parents 2b0cb29 + 10b1dbf commit 124a4ea
Show file tree
Hide file tree
Showing 11 changed files with 213 additions and 52 deletions.
26 changes: 26 additions & 0 deletions apps/datascience_assistant/README.md
Original file line number Diff line number Diff line change
@@ -0,0 +1,26 @@
# Data Science Assistant with Streamlit ⭐
Data Science Assistant (hereinafter referred to as DS Assistant) is a Data Science Assistant developed based on the modelscope-agent framework, which can automatically perform exploratory Data analysis (EDA) in Data Science tasks according to user needs, Data preprocessing, feature engineering, model training, model evaluation and other steps are fully automated.

Detailed information can be found in the [documentation](../../docs/source/agents/data_science_assistant.md).

## Quick Start
Streamlit is a Python library that makes it easy to create and share beautiful, custom web apps for machine learning and data science.

To run the DS Assistant in streamlit, you need to install the Streamlit library. You can install it using pip:
```bash
pip install streamlit streamlit-jupyter
```
Then, you need to set
Then, you can run the DS Assistant using the following command:
```bash
streamlit run app.py
```

After running the command, a new tab will open in your default web browser with the DS Assistant running.
The following are screenshots of the DS Assistant running in the browser:

![img_2.png](../../resources/data_science_assistant_streamlit_1.png)
you can view all of the codes and in streamlit
![img_3.png](../../resources/data_science_assistant_streamlit_2.png)
After you have finished using the DS Assistant, you can directly convert the running process to a pdf
![img_5.png](../../resources/data_science_assistant_streamlit_3.png)
23 changes: 23 additions & 0 deletions apps/datascience_assistant/app.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,23 @@
import os

import streamlit as st
from modelscope_agent.agents.data_science_assistant import DataScienceAssistant
from modelscope_agent.tools.metagpt_tools.tool_recommend import \
TypeMatchToolRecommender

llm_config = {
'model': 'qwen2-72b-instruct',
'model_server': 'dashscope',
}
os.environ['DASHSCOPE_API_KEY'] = input(
'Please input your dashscope api key: ')
data_science_assistant = DataScienceAssistant(
llm=llm_config, tool_recommender=TypeMatchToolRecommender(tools=['<all>']))
st.title('Data Science Assistant')
st.write(
'This is a data science assistant that can help you with your data science tasks.'
)
st.write('Please input your request below and click the submit button.')
user_request = st.text_input('User Request')
if st.button('submit'):
data_science_assistant.run(user_request=user_request, streamlit=True)
8 changes: 8 additions & 0 deletions docs/source/index.rst
Original file line number Diff line number Diff line change
Expand Up @@ -52,6 +52,14 @@ Modelscope-Agent DOCUMENTATION
deployment/local_deploy.md


.. toctree::
:maxdepth: 2
:caption: Agents

agents/data_science_assistant.md




Indices and tables
==================
Expand Down
5 changes: 5 additions & 0 deletions docs/source_en/index.rst
Original file line number Diff line number Diff line change
Expand Up @@ -50,6 +50,11 @@ Modelscope-Agent DOCUMENTATION
use_cases/openAPI_for_agent.md
deployment/local_deploy.md

.. toctree::
:maxdepth: 2
:caption: Agents

agents/data_science_assistant.md


Indices and tables
Expand Down
97 changes: 77 additions & 20 deletions modelscope_agent/agents/data_science_assistant.py
Original file line number Diff line number Diff line change
@@ -1,5 +1,6 @@
# Implementation inspired by the paper "DATA INTERPRETER: AN LLM AGENT FOR DATA SCIENCE"
import asyncio
import copy
import os
import time
from datetime import datetime
Expand All @@ -18,6 +19,12 @@
from modelscope_agent.utils.logger import agent_logger as logger
from modelscope_agent.utils.utils import parse_code

try:
import streamlit as st # noqa
from nbconvert import HTMLExporter
from traitlets.config import Config
except Exception as e:
print(f'import error: {str(e)}, please install streamlit and nbconvert')
PLAN_TEMPLATE = """
# Context:
{context}
Expand All @@ -28,9 +35,9 @@
- **feature engineering**: Only for creating new columns fo input data.
- **model train**: Only for training model.
- **model evaluate**: Only for evaluating model.
- **ocr**: Only for OCR tasks.
- **other**: Any tasks not in the defined categories
# Task:
Based on the context, write a simple plan or modify an existing plan of what you should do to achieve the goal. A plan \
consists of one to four tasks.
Expand Down Expand Up @@ -226,14 +233,12 @@
these are the previous code blocks, which have been executed successfully in the previous jupyter notebook code blocks \
{previous_code_blocks}
Attention: your response should be one of the following:
- [your step by step thought], correct
- [your step by step thought], incorrect
at the end of your thought, you need to give the final judgement with a new line( correct or incorrect).
don't generate code , just give the reason why the code is correct or incorrect.
## Attention
don't use the word 'incorrect' in your step by step thought.
your answer should be short and clear, don't need to be too long.
"""

CHECK_DATA_PROMPT = """
Expand Down Expand Up @@ -311,6 +316,7 @@ def __init__(self,
self.code_interpreter = CodeInterpreter()
self.plan = None
self.total_token = 0
self.streamlit = False

def _update_plan(self, user_request: str, curr_plan: Plan = None) -> Plan:
call_llm_success = False
Expand All @@ -325,18 +331,26 @@ def _update_plan(self, user_request: str, curr_plan: Plan = None) -> Plan:
}]
while not call_llm_success and call_llm_count < 10:
resp = self._call_llm(prompt=None, messages=messages, stop=None)
resp_streamlit = resp
tasks_text = ''
for r in resp:
tasks_text += r
if self.streamlit:
st.write('#### Generate a plan based on the user request')
tasks_text = st.write_stream(resp_streamlit)
else:
for r in resp:
tasks_text += r
if 'Error code' in tasks_text:
call_llm_count += 1
time.sleep(10)
else:
call_llm_success = True
print('Tasks_text: ', tasks_text)
tasks_text = parse_code(text=tasks_text, lang='json')

logger.info(f'tasks: {tasks_text}')
tasks = json5.loads(tasks_text)
tasks = [Task(**task) for task in tasks]

if curr_plan is None:
new_plan = Plan(goal=user_request)
new_plan.add_tasks(tasks=tasks)
Expand Down Expand Up @@ -429,9 +443,8 @@ def _generate_code(self, code_counter: int, task: Task,
else:
# reflect the error and ask user to fix the code
if self.tool_recommender:
tool_info = asyncio.run(
self.tool_recommender.get_recommended_tool_info(
plan=self.plan))
tool_info = self.tool_recommender.get_recommended_tool_info(
plan=self.plan)
prompt = CODE_USING_TOOLS_REFLECTION_TEMPLATE.format(
instruction=task.instruction,
task_guidance=TaskType.get_type(task.task_type).guidance,
Expand Down Expand Up @@ -555,9 +568,6 @@ def _check_data(self):

def _judge_code(self, task, previous_code_blocks, code,
code_interpreter_resp):
success = True
failed_reason = ''

judge_prompt = JUDGE_TEMPLATE.format(
instruction=task.instruction,
previous_code_blocks=previous_code_blocks,
Expand All @@ -578,13 +588,12 @@ def _judge_code(self, task, previous_code_blocks, code,
self._get_total_tokens()
if 'Error code' in judge_result:
call_llm_count += 1
time.sleep(10)
time.sleep(5)
else:
call_llm_success = True
if not call_llm_success:
raise Exception('call llm failed')
logger.info(f'judge result for task{task.task_id}: \n {judge_result}')

if 'incorrect' in judge_result.split('\n')[-1]:
success = False
failed_reason = (
Expand All @@ -593,11 +602,17 @@ def _judge_code(self, task, previous_code_blocks, code,
return success, failed_reason

else:
return True, 'The code logic is correct'
return True, judge_result

def _run(self, user_request, save: bool = True, **kwargs):
before_time = time.time()
try:
self.streamlit = kwargs.get('streamlit', False)
if self.streamlit:
st.write("""# DataScience Assistant """)
st.write("""### The user request is: \n""")
st.write(user_request)
print('streamlit: ', self.streamlit)
self.plan = self._update_plan(user_request=user_request)
jupyter_file_path = ''
dir_name = ''
Expand All @@ -610,7 +625,9 @@ def _run(self, user_request, save: bool = True, **kwargs):

while self.plan.current_task_id:
task = self.plan.task_map.get(self.plan.current_task_id)
# write_and_execute_code(self)
if self.streamlit:
st.write(
f"""### Task {task.task_id}: {task.instruction}\n""")
logger.info(
f'new task starts: task_{task.task_id} , instruction: {task.instruction}'
)
Expand All @@ -622,7 +639,6 @@ def _run(self, user_request, save: bool = True, **kwargs):
code_execute_success = False
code_logic_success = False
temp_code_interpreter = CodeInterpreter()

temp_code_interpreter.call(
params=json.dumps({
'code':
Expand All @@ -633,26 +649,56 @@ def _run(self, user_request, save: bool = True, **kwargs):
# generate code
code = self._generate_code(code_counter, task,
user_request)
code = '%matplotlib inline \n' + code
code_execute_success, code_interpreter_resp = temp_code_interpreter.call(
params=json.dumps({'code': code}),
nb_mode=True,
silent_mode=True)
# 删除临时 jupyter环境
temp_code_interpreter.terminate()
if self.streamlit:
st.divider()
st_notebook = nbformat.v4.new_notebook()
st_notebook.cells = [
temp_code_interpreter.nb.cells[-1]
]
c = Config()
c.HTMLExporter.preprocessors = [
'nbconvert.preprocessors.ConvertFiguresPreprocessor'
]
# create the new exporter using the custom config
html_exporter_with_figs = HTMLExporter(config=c)
(html, resources_with_fig
) = html_exporter_with_figs.from_notebook_node(
st_notebook)
st.write(
'We have generated the code for the current task')
st.html(html)
judge_resp = ''
if not code_execute_success:
logger.error(
f'code execution failed, task{task.task_id} code_counter{code_counter}:\n '
f'{code_interpreter_resp}')
if self.streamlit:
st.write(
'The code execution failed. Now we will take a reflection and regenerate the code.'
)
else:
logger.info(
f'code execution success, task{task.task_id} code_counter{code_counter}:\n '
f'{code_interpreter_resp}')
if self.streamlit:
st.write(
'The code execution is successful. Now we will ask the judge to check the code.'
)
code_logic_success, judge_resp = self._judge_code(
task=task,
previous_code_blocks=previous_code_blocks,
code=code,
code_interpreter_resp=code_interpreter_resp)
if self.streamlit:
st.write(
'The judge has checked the code, here is the result.'
)
st.write(judge_resp)
success = code_execute_success and code_logic_success
task.code_cells.append(
CodeCell(
Expand All @@ -663,6 +709,10 @@ def _run(self, user_request, save: bool = True, **kwargs):
if success:
self.code_interpreter.call(
params=json.dumps({'code': code}), nb_mode=True)
if self.streamlit:
st.write(
'The code is correct, we will move to the next task.'
)
task.code = code
task.result = code_interpreter_resp
code_counter += 1
Expand Down Expand Up @@ -699,6 +749,13 @@ def _run(self, user_request, save: bool = True, **kwargs):
json.dumps(plan_dict, indent=4, cls=TaskEncoder))
except Exception as e:
print(f'json write error: {str(e)}')
if self.streamlit:
st.divider()
st.write('### We have finished all the tasks! ')
st.balloons()
st.write(
f"""#### The total time cost is: {time_cost}\n #### The total token cost is: {total_token}"""
)

except Exception as e:
logger.error(f'error: {e}')
Expand Down
1 change: 1 addition & 0 deletions modelscope_agent/llm/__init__.py
Original file line number Diff line number Diff line change
Expand Up @@ -17,6 +17,7 @@ def get_chat_model(model: str, model_server: str, **kwargs) -> BaseChatModel:
"""
model_type = re.split(r'[-/_]', model)[0] # parser qwen / gpt / ...
registered_model_id = f'{model_server}_{model_type}'

if registered_model_id in LLM_REGISTRY: # specific model from specific source
return LLM_REGISTRY[registered_model_id](model, model_server, **kwargs)
elif model_server in LLM_REGISTRY: # specific source
Expand Down
Loading

0 comments on commit 124a4ea

Please sign in to comment.