diff --git a/.dev_scripts/dockerci.sh b/.dev_scripts/dockerci.sh
index 30822de01..e16fe10c7 100644
--- a/.dev_scripts/dockerci.sh
+++ b/.dev_scripts/dockerci.sh
@@ -15,7 +15,7 @@ pip install playwright
playwright install --with-deps chromium
# install package
-pip install fastapi pydantic uvicorn docker sqlmodel
+pip install fastapi pydantic uvicorn docker sqlmodel transformers ray
# run ci
pytest tests
diff --git a/.gitignore b/.gitignore
index 0e15057dd..78bf2763e 100644
--- a/.gitignore
+++ b/.gitignore
@@ -39,6 +39,7 @@ MANIFEST
*.manifest
*.spec
release.sh
+build.sh
*.html
# Installer logs
diff --git a/apps/agentfabric/appBot.py b/apps/agentfabric/appBot.py
index b14c0137a..1336fffbd 100644
--- a/apps/agentfabric/appBot.py
+++ b/apps/agentfabric/appBot.py
@@ -11,6 +11,7 @@
import modelscope_studio as mgr
from config_utils import get_avatar_image, get_ci_dir, parse_configuration
from gradio_utils import format_cover_html
+from modelscope_agent.constants import ApiNames
from modelscope_agent.schemas import Message
from modelscope_agent.utils.logger import agent_logger as logger
from modelscope_studio.components.Chatbot.llm_thinking_presets import qwen
@@ -42,10 +43,13 @@ def check_uuid(uuid_str):
return uuid_str
-def init_user(state):
+def init_user(state, _user_token=None):
try:
+ in_ms_studio = os.getenv('MODELSCOPE_ENVIRONMENT', 'None') == 'studio'
seed = state.get('session_seed', random.randint(0, 1000000000))
- user_agent, user_memory = init_user_chatbot_agent(uuid_str)
+ # use tool api in ms studio
+ user_agent, user_memory = init_user_chatbot_agent(
+ uuid_str, use_tool_api=in_ms_studio, user_token=_user_token)
user_agent.seed = seed
state['user_agent'] = user_agent
state['user_memory'] = user_memory
@@ -72,6 +76,7 @@ def delete(state):
# 创建 Gradio 界面
demo = gr.Blocks(css='assets/appBot.css', theme=customTheme)
with demo:
+ user_token = gr.Textbox(label='modelscope_agent_tool_token', visible=False)
gr.Markdown(
'#
\N{fire} AgentFabric powered by Modelscope-agent [github star](https://github.com/modelscope/modelscope-agent/tree/main)' # noqa E501
)
@@ -111,10 +116,16 @@ def delete(state):
examples=suggests,
inputs=[user_chatbot_input])
- def send_message(chatbot, input, _state):
+ def send_message(chatbot, input, _state, _user_token):
# 将发送的消息添加到聊天历史
if 'user_agent' not in _state:
- init_user(_state)
+ init_user(_state, _user_token)
+
+ kwargs = {
+ name.lower(): os.getenv(value.value)
+ for name, value in ApiNames.__members__.items()
+ }
+
# 将发送的消息添加到聊天历史
_uuid_str = check_uuid(uuid_str)
user_agent = _state['user_agent']
@@ -149,7 +160,9 @@ def send_message(chatbot, input, _state):
input.text,
history=history,
ref_doc=ref_doc,
- append_files=append_files):
+ append_files=append_files,
+ user_token=_user_token,
+ **kwargs):
# important! do not change this
response += frame
@@ -178,10 +191,10 @@ def send_message(chatbot, input, _state):
gr.on([user_chatbot_input.submit],
fn=send_message,
- inputs=[user_chatbot, user_chatbot_input, state],
+ inputs=[user_chatbot, user_chatbot_input, state, user_token],
outputs=[user_chatbot, user_chatbot_input])
- demo.load(init_user, inputs=[state], outputs=[state])
+ demo.load(init_user, inputs=[state, user_token], outputs=[state])
demo.queue()
demo.launch(show_error=True, max_threads=10)
diff --git a/apps/agentfabric/config_utils.py b/apps/agentfabric/config_utils.py
index a5d07cab7..9929ad184 100644
--- a/apps/agentfabric/config_utils.py
+++ b/apps/agentfabric/config_utils.py
@@ -158,9 +158,14 @@ def parse_configuration(uuid_str=''):
tools_info = builder_cfg.tools
available_tool_list = []
for key, value in tools_info.items():
+ if key in tool_cfg:
+ tool_cfg[key]['use'] = value['use']
+ else:
+ # for tool hub only
+ if '/' in key:
+ tool_cfg[key] = value
if value['use']:
available_tool_list.append(key)
- tool_cfg[key]['use'] = value['use']
openapi_plugin_file = get_user_openapi_plugin_cfg_file(uuid_str)
plugin_cfg = {}
diff --git a/apps/agentfabric/requirements.txt b/apps/agentfabric/requirements.txt
index 4f8db879d..974962029 100644
--- a/apps/agentfabric/requirements.txt
+++ b/apps/agentfabric/requirements.txt
@@ -1,10 +1,10 @@
dashscope
faiss-cpu
gradio==4.36.1
+https://modelscope-agent.oss-cn-hangzhou.aliyuncs.com/releases/v0.6.2/modelscope_agent-0.6.2-py3-none-any.whl
langchain
markdown-cjk-spacing
mdx_truly_sane_lists
-modelscope-agent==0.4.1
modelscope_studio
pymdown-extensions
python-slugify
diff --git a/apps/agentfabric/server.py b/apps/agentfabric/server.py
index 7c6034a31..09ffc0a37 100644
--- a/apps/agentfabric/server.py
+++ b/apps/agentfabric/server.py
@@ -15,8 +15,10 @@
is_valid_plugin_configuration, parse_configuration,
save_builder_configuration,
save_plugin_configuration)
-from flask import (Flask, Response, jsonify, make_response, request,
+from flask import (Flask, Response, g, jsonify, make_response, request,
send_from_directory)
+from modelscope_agent.constants import (MODELSCOPE_AGENT_TOKEN_HEADER_NAME,
+ ApiNames)
from modelscope_agent.schemas import Message
from publish_util import (pop_user_info_from_config, prepare_agent_zip,
reload_agent_dir)
@@ -29,6 +31,28 @@
app.session_manager = SessionManager()
+def get_auth_token():
+ auth_header = request.headers.get('Authorization')
+ if auth_header and auth_header.startswith('Bearer '):
+ return auth_header[7:] # Slice off the 'Bearer ' prefix
+ return None
+
+
+def get_modelscope_agent_token():
+ token = None
+ # Check if authorization header is present
+ if MODELSCOPE_AGENT_TOKEN_HEADER_NAME in request.headers:
+ auth_header = request.headers[MODELSCOPE_AGENT_TOKEN_HEADER_NAME]
+ # Check if the value of the header starts with 'Bearer'
+ if auth_header.startswith('Bearer '):
+ # Extract the token part from 'Bearer token_value'
+ token = auth_header[7:]
+ else:
+ # Extract the token part from auth_header
+ token = auth_header
+ return token
+
+
@app.before_request
def set_request_id():
request_id = request.headers.get('X-Modelscope-Request-Id', 'unknown')
@@ -94,8 +118,6 @@ def generate():
llm_result = frame.get('llm_text', '')
exec_result = frame.get('exec_result', '')
step_result = frame.get('step', '')
- logger.info('frame, {}'.format(
- str(frame).replace('\n', '\\n')))
if len(exec_result) != 0:
if isinstance(exec_result, dict):
exec_result = exec_result['result']
@@ -285,14 +307,6 @@ def save_builder_config(uuid_str):
builder_config_str = request.form.get('builder_config')
logger.info(f'builder_config: {builder_config_str}')
builder_config = json.loads(builder_config_str)
- if 'tools' in builder_config:
- if 'code_interpreter' in builder_config['tools']:
- return jsonify({
- 'success': False,
- 'status': 404,
- 'message': 'Using code_interpreter.',
- 'request_id': request_id_var.get('')
- }), 404
if 'knowledge' in builder_config:
builder_config['knowledge'] = [
os.path.join(get_user_dir(uuid_str), os.path.basename(k))
@@ -367,6 +381,11 @@ def preview_publish_get_zip(uuid_str):
@app.route('/preview/chat//', methods=['POST'])
@with_request_id
def preview_chat(uuid_str, session_str):
+ user_token = get_modelscope_agent_token()
+ if not user_token:
+ # If token is not found, return 401 Unauthorized response
+ return jsonify({'message': 'Token is missing!'}), 401
+
logger.info(f'preview_chat: uuid_str_{uuid_str}_session_str_{session_str}')
params_str = request.form.get('params')
@@ -383,13 +402,18 @@ def preview_chat(uuid_str, session_str):
file.save(file_path)
file_paths.append(file_path)
logger.info(f'/preview/chat/{uuid_str}/{session_str}: files: {file_paths}')
+ # Generating the kwargs dictionary
+ kwargs = {
+ name.lower(): os.getenv(value.value)
+ for name, value in ApiNames.__members__.items()
+ }
def generate():
try:
start_time = time.time()
seed = random.randint(0, 1000000000)
user_agent, user_memory = app.session_manager.get_user_bot(
- uuid_str, session_str)
+ uuid_str, session_str, user_token=user_token)
user_agent.seed = seed
logger.info(
f'get method: time consumed {time.time() - start_time}')
@@ -422,12 +446,15 @@ def generate():
'request_id': request_id_var.get('')
},
ensure_ascii=False)
+
for frame in user_agent.run(
input_content,
history=history,
ref_doc=ref_doc,
append_files=file_paths,
- uuid_str=uuid_str):
+ uuid_str=uuid_str,
+ user_token=user_token,
+ **kwargs):
logger.info('frame, {}'.format(
str(frame).replace('\n', '\\n')))
# important! do not change this
@@ -486,8 +513,13 @@ def get_preview_chat_history(uuid_str, session_str):
logger.info(
f'get_preview_chat_history: uuid_str_{uuid_str}_session_str_{session_str}'
)
+ user_token = get_modelscope_agent_token()
+ if not user_token:
+ # If token is not found, return 401 Unauthorized response
+ return jsonify({'message': 'Token is missing!'}), 401
- _, user_memory = app.session_manager.get_user_bot(uuid_str, session_str)
+ _, user_memory = app.session_manager.get_user_bot(
+ uuid_str, session_str, user_token=user_token)
return jsonify({
'history': user_memory.get_history(),
'success': True,
diff --git a/apps/agentfabric/server_utils.py b/apps/agentfabric/server_utils.py
index 98e299ebe..1abc8e7fc 100644
--- a/apps/agentfabric/server_utils.py
+++ b/apps/agentfabric/server_utils.py
@@ -137,29 +137,17 @@ def get_user_bot(
self,
builder_id,
session,
- renew=False) -> Tuple[RolePlay, MemoryWithRetrievalKnowledge]:
+ renew=False,
+ user_token=None) -> Tuple[RolePlay, MemoryWithRetrievalKnowledge]:
unique_id = builder_id + '_' + session
user_agent = self.user_bots[unique_id]
if renew or user_agent is None:
logger.info(f'init_user_chatbot_agent: {builder_id} {session}')
- # check code_interpreter
builder_cfg, _, tool_cfg, _, _, _ = parse_configuration(builder_id)
- if 'tools' in builder_cfg and 'code_interpreter' in builder_cfg[
- 'tools']:
- if builder_cfg['tools']['code_interpreter'].get(
- 'is_active', False
- ) and builder_cfg['tools']['code_interpreter'].get(
- 'use', False):
- raise ValueError('Using code interpreter.')
- if 'code_interpreter' in tool_cfg:
- if tool_cfg['code_interpreter'].get(
- 'is_active',
- False) and tool_cfg['code_interpreter'].get(
- 'use', False):
- raise ValueError('Using code interpreter.')
-
- user_agent = init_user_chatbot_agent(builder_id, session)
+
+ user_agent = init_user_chatbot_agent(
+ builder_id, session, use_tool_api=True, user_token=user_token)
self.user_bots[unique_id] = user_agent
return user_agent
diff --git a/apps/agentfabric/user_core.py b/apps/agentfabric/user_core.py
index 89d70def8..db2d3e680 100644
--- a/apps/agentfabric/user_core.py
+++ b/apps/agentfabric/user_core.py
@@ -12,7 +12,10 @@
# init user chatbot_agent
-def init_user_chatbot_agent(uuid_str='', session='default'):
+def init_user_chatbot_agent(uuid_str='',
+ session='default',
+ use_tool_api=False,
+ user_token=None):
builder_cfg, model_cfg, tool_cfg, _, plugin_cfg, _ = parse_configuration(
uuid_str)
# set top_p and stop_words for role play
@@ -21,17 +24,18 @@ def init_user_chatbot_agent(uuid_str='', session='default'):
model_cfg[builder_cfg.model]['generate_cfg']['top_p'] = 0.5
model_cfg[builder_cfg.model]['generate_cfg']['stop'] = 'Observation'
- # build model
- logger.query_info(
- uuid=uuid_str,
- message=f'using model {builder_cfg.model}',
- details={'model_config': model_cfg[builder_cfg.model]})
-
# update function_list
function_list = parse_tool_cfg(tool_cfg)
function_list = add_openapi_plugin_to_additional_tool(
plugin_cfg, function_list)
+ # build model
+ logger.query_info(
+ uuid=uuid_str,
+ message=
+ f'using model {builder_cfg.model} with tool {tool_cfg} and function list {function_list}',
+ details={'model_config': model_cfg[builder_cfg.model]})
+
llm_config = copy.deepcopy(model_cfg[builder_cfg.model])
llm_config['model_server'] = llm_config.pop('type')
instruction = {
@@ -43,7 +47,10 @@ def init_user_chatbot_agent(uuid_str='', session='default'):
function_list=function_list,
llm=llm_config,
instruction=instruction,
- uuid_str=uuid_str)
+ uuid_str=uuid_str,
+ use_tool_api=use_tool_api,
+ user_token=user_token,
+ )
# build memory
preview_history_dir = get_user_preview_history_dir(uuid_str, session)
diff --git a/apps/agentfabric/version.py b/apps/agentfabric/version.py
index c95a2a5da..d7cf31934 100644
--- a/apps/agentfabric/version.py
+++ b/apps/agentfabric/version.py
@@ -1 +1 @@
-__version__ = '0.2.1rc0'
+__version__ = '0.3.0rc0'
diff --git a/build.sh b/build.sh
deleted file mode 100644
index 5e4c7c9cc..000000000
--- a/build.sh
+++ /dev/null
@@ -1,4 +0,0 @@
-tag=$(date +"%Y-%m-%d")-$(git rev-parse HEAD | cut -c1-6)
-git apply ssrf.patch
-sudo docker build . -f docker/dockerfile.agentfabric -t mshub-registry.cn-zhangjiakou.cr.aliyuncs.com/modelscope-repo/agent-fabric:${tag}
-sudo docker push mshub-registry.cn-zhangjiakou.cr.aliyuncs.com/modelscope-repo/agent-fabric:${tag}
diff --git a/docker/tool_node.dockerfile b/docker/tool_node.dockerfile
index d75304341..bd4d26965 100644
--- a/docker/tool_node.dockerfile
+++ b/docker/tool_node.dockerfile
@@ -39,7 +39,9 @@ ENV BASE_TOOL_DIR /app/assets
# install tool_node
COPY modelscope_agent_servers /app/modelscope_agent_servers
-
+# start up script file
+COPY scripts/run_tool_node.sh /app/run_tool_node.sh
+RUN chmod +x /app/run_tool_node.sh
#ENTRYPOINT exec uvicorn tool_service.tool_node.api:app --host 0.0.0.0 --port $PORT
diff --git a/modelscope_agent/agent.py b/modelscope_agent/agent.py
index 08d134b67..67a3f1237 100644
--- a/modelscope_agent/agent.py
+++ b/modelscope_agent/agent.py
@@ -1,3 +1,4 @@
+import os
from abc import ABC, abstractmethod
from typing import Dict, Iterator, List, Optional, Tuple, Union
@@ -50,7 +51,7 @@ def __init__(self,
self.function_map = {}
if function_list:
for function in function_list:
- self._register_tool(function)
+ self._register_tool(function, **kwargs)
self.storage_path = storage_path
self.mem = None
@@ -97,7 +98,8 @@ def _call_tool(self, tool_name: str, tool_args: str, **kwargs):
def _register_tool(self,
tool: Union[str, Dict],
- tenant_id: str = 'default'):
+ tenant_id: str = 'default',
+ **kwargs):
"""
Instantiate the tool for the agent
@@ -115,18 +117,28 @@ def _register_tool(self,
if isinstance(tool, dict):
tool_name = next(iter(tool))
tool_cfg = tool[tool_name]
- if tool_name not in TOOL_REGISTRY:
+ if tool_name not in TOOL_REGISTRY and not self.use_tool_api:
raise NotImplementedError
if tool not in self.function_list:
self.function_list.append(tool)
- tool_class_with_tenant = TOOL_REGISTRY[tool_name]
-
- # adapt the TOOL_REGISTRY[tool_name] to origin tool class
-
- if isinstance(tool_class_with_tenant, BaseTool):
- tool_class_with_tenant = {'class': TOOL_REGISTRY[tool_name]}
- TOOL_REGISTRY[tool_name] = tool_class_with_tenant
+ try:
+ tool_class_with_tenant = TOOL_REGISTRY[tool_name]
+
+ # adapt the TOOL_REGISTRY[tool_name] to origin tool class
+ if isinstance(tool_class_with_tenant, BaseTool):
+ tool_class_with_tenant = {
+ 'class': TOOL_REGISTRY[tool_name]
+ }
+ TOOL_REGISTRY[tool_name] = tool_class_with_tenant
+
+ except KeyError as e:
+ print(e)
+ if not self.use_tool_api:
+ raise KeyError(
+ f'Tool {tool_name} is not registered in TOOL_REGISTRY, please register it first.'
+ )
+ tool_class_with_tenant = {'class': ToolServiceProxy}
# check if the tenant_id of tool instance or tool service are exists
# TODO: change from use_tool_api=True to False, to get the tenant_id of the tool changes to
@@ -137,7 +149,14 @@ def _register_tool(self,
if self.use_tool_api:
# get service proxy as tool instance, call method will call remote tool service
tool_instance = ToolServiceProxy(tool_name, tool_cfg,
- tenant_id)
+ tenant_id, **kwargs)
+
+ # if the tool name is running in studio, remove the studio prefix from tool name
+ # TODO: it might cause duplicated name from different studio
+ in_ms_studio = os.getenv('MODELSCOPE_ENVIRONMENT', 'none')
+ if in_ms_studio == 'studio':
+ tool_name = tool_name.split('/')[-1]
+
else:
# instantiation tool class as tool instance
tool_instance = TOOL_REGISTRY[tool_name]['class'](tool_cfg)
diff --git a/modelscope_agent/agent_env_util.py b/modelscope_agent/agent_env_util.py
index ebce48517..2010b61ff 100644
--- a/modelscope_agent/agent_env_util.py
+++ b/modelscope_agent/agent_env_util.py
@@ -19,7 +19,7 @@ def __init__(self,
use_history: bool = True,
human_input_mode: Optional[str] = 'CLOSE',
parse_env_prompt_function: Callable = None,
- remote=True,
+ remote=False,
**kwargs):
"""
Agent environment context mixin class to allow the agent to communicate with other agent, in the
diff --git a/modelscope_agent/agents/multi_role_play.py b/modelscope_agent/agents/multi_role_play.py
index 932ff9f62..1e8d8fca4 100644
--- a/modelscope_agent/agents/multi_role_play.py
+++ b/modelscope_agent/agents/multi_role_play.py
@@ -210,9 +210,6 @@ def _run(self,
max_turn = 10
while True and max_turn > 0:
- # print('=====one input planning_prompt======')
- # print(planning_prompt)
- # print('=============Answer=================')
max_turn -= 1
if self.llm.support_function_calling():
output = self.llm.chat_with_functions(
diff --git a/modelscope_agent/agents/role_play.py b/modelscope_agent/agents/role_play.py
index f9a369980..58f8461a7 100644
--- a/modelscope_agent/agents/role_play.py
+++ b/modelscope_agent/agents/role_play.py
@@ -263,9 +263,6 @@ def _run(self,
max_turn = 10
call_llm_count = 0
while True and max_turn > 0:
- # print('=====one input planning_prompt======')
- # print(planning_prompt)
- # print('=============Answer=================')
max_turn -= 1
call_llm_count += 1
if self.llm.support_function_calling():
diff --git a/modelscope_agent/agents_registry.py b/modelscope_agent/agents_registry.py
index 6123ae35b..ef37f3233 100644
--- a/modelscope_agent/agents_registry.py
+++ b/modelscope_agent/agents_registry.py
@@ -6,7 +6,7 @@
class AgentRegistry:
- def __init__(self, remote=True, **kwargs):
+ def __init__(self, remote=False, **kwargs):
self._agents = {}
self._agents_state = {}
self.remote = remote
diff --git a/modelscope_agent/constants.py b/modelscope_agent/constants.py
index bd2629568..f1adf6c87 100644
--- a/modelscope_agent/constants.py
+++ b/modelscope_agent/constants.py
@@ -10,6 +10,7 @@
TASK_CENTER_NAME = 'task_center'
DEFAULT_TOOL_MANAGER_SERVICE_URL = 'http://localhost:31511'
DEFAULT_ASSISTANT_SERVICE_URL = 'http://localhost:31512'
+MODELSCOPE_AGENT_TOKEN_HEADER_NAME = 'X-Modelscope-Agent-Token'
class ApiNames(Enum):
@@ -17,3 +18,4 @@ class ApiNames(Enum):
modelscope_api_key = 'MODELSCOPE_API_TOKEN'
amap_api_key = 'AMAP_TOKEN'
bing_api_key = 'BING_SEARCH_V7_SUBSCRIPTION_KEY'
+ zhipu_api_key = 'ZHIPU_API_KEY'
diff --git a/modelscope_agent/llm/dashscope.py b/modelscope_agent/llm/dashscope.py
index 75cde4cd6..dc7b5696b 100644
--- a/modelscope_agent/llm/dashscope.py
+++ b/modelscope_agent/llm/dashscope.py
@@ -20,7 +20,6 @@ def stream_output(response, **kwargs):
for trunk in response:
if trunk.status_code == HTTPStatus.OK:
# logging at the first frame for request_id, and the last frame for the whole output
- print(trunk)
if not text:
logger.info(
f'call dashscope generation api success, '
@@ -101,7 +100,6 @@ def _chat_stream(self,
if kwargs.get('seed', None):
generation_input['seed'] = kwargs.get('seed')
response = dashscope.Generation.call(**generation_input)
- print(response)
response = self.stat_last_call_token_info(response)
return stream_output(response, **kwargs)
@@ -230,7 +228,6 @@ def build_multi_role_raw_prompt(self, messages: list):
prompt = ''
im_start = '<|im_start|>'
im_end = '<|im_end|>'
- print('build_raw_prompt', messages)
if messages[0]['role'] == 'system':
system_prompt = messages[0]['content']
else:
@@ -264,7 +261,6 @@ def build_multi_role_raw_prompt(self, messages: list):
user_content += f'<|im_start|>{cur_role.strip()}\n{cur_chat.strip()}<|im_end|>\n'
prompt = f'{prompt}{user_content}<|im_start|>{cur_role_name}\n'
- print('prompt: ', [prompt])
return prompt
def _chat_stream(self,
diff --git a/modelscope_agent/memory/memory_with_rag.py b/modelscope_agent/memory/memory_with_rag.py
index 7e540a6b5..870daa470 100644
--- a/modelscope_agent/memory/memory_with_rag.py
+++ b/modelscope_agent/memory/memory_with_rag.py
@@ -52,6 +52,8 @@ def _run(self,
query, files=url, **kwargs)
# limit length
if isinstance(summary_result, list):
+ if len(summary_result) == 0:
+ return ''
single_max_token = int(max_token / len(summary_result))
concatenated_records = '\n'.join([
record[0:single_max_token - 1] for record in summary_result
diff --git a/modelscope_agent/multi_agents_utils/executors/ray.py b/modelscope_agent/multi_agents_utils/executors/ray.py
index 2034fc5c7..a6d476ab8 100644
--- a/modelscope_agent/multi_agents_utils/executors/ray.py
+++ b/modelscope_agent/multi_agents_utils/executors/ray.py
@@ -1,18 +1,25 @@
import logging
from typing import Union
-import ray
from modelscope_agent.agents_registry import AgentRegistry
from modelscope_agent.constants import USER_REQUIREMENT
from modelscope_agent.environment.environment import Environment
from modelscope_agent.schemas import Message
from ray._raylet import ObjectRefGenerator
+try:
+ import ray
+except ImportError:
+ logging.error(
+ 'Ray is not installed, please install ray first by running `pip install ray>=2.9.4`'
+ )
+
class RayTaskExecutor:
@staticmethod
def init_ray():
+
if ray.is_initialized:
ray.shutdown()
ray.init(logging_level=logging.ERROR)
diff --git a/modelscope_agent/rag/__init__.py b/modelscope_agent/rag/__init__.py
index e69de29bb..8f1abc42d 100644
--- a/modelscope_agent/rag/__init__.py
+++ b/modelscope_agent/rag/__init__.py
@@ -0,0 +1,5 @@
+from modelscope_agent.utils.nltk_utils import install_nltk_data
+
+# install nltk data
+
+install_nltk_data()
diff --git a/modelscope_agent/rag/knowledge.py b/modelscope_agent/rag/knowledge.py
index 510d40a19..1450a6f1f 100644
--- a/modelscope_agent/rag/knowledge.py
+++ b/modelscope_agent/rag/knowledge.py
@@ -20,9 +20,6 @@
from modelscope_agent.llm.base import BaseChatModel
from modelscope_agent.rag.emb import DashscopeEmbedding
from modelscope_agent.rag.llm import ModelscopeAgentLLM
-from modelscope_agent.utils.nltk_utils import install_nltk_data
-
-install_nltk_data()
@dataclass
diff --git a/modelscope_agent/rag/llm.py b/modelscope_agent/rag/llm.py
index 59d37e3ca..b56d52154 100644
--- a/modelscope_agent/rag/llm.py
+++ b/modelscope_agent/rag/llm.py
@@ -131,7 +131,6 @@ def gen() -> ChatResponseGen:
raw=r,
)
- print(f'response: {response}')
return gen()
@llm_completion_callback()
diff --git a/modelscope_agent/tools/__init__.py b/modelscope_agent/tools/__init__.py
index 034a28fbd..545667ec9 100644
--- a/modelscope_agent/tools/__init__.py
+++ b/modelscope_agent/tools/__init__.py
@@ -1,6 +1,7 @@
import sys
from ..utils import _LazyModule
+from .contrib import * # noqa F403
_import_structure = {
'amap_weather': ['AMAPWeather'],
diff --git a/modelscope_agent/tools/base.py b/modelscope_agent/tools/base.py
index 9237f6290..b97493138 100644
--- a/modelscope_agent/tools/base.py
+++ b/modelscope_agent/tools/base.py
@@ -1,3 +1,4 @@
+import os
import time
from abc import ABC, abstractmethod
from typing import Dict, List, Optional, Union
@@ -5,7 +6,9 @@
import json
import json5
import requests
-from modelscope_agent.constants import DEFAULT_TOOL_MANAGER_SERVICE_URL
+from modelscope_agent.constants import (DEFAULT_TOOL_MANAGER_SERVICE_URL,
+ MODELSCOPE_AGENT_TOKEN_HEADER_NAME)
+from modelscope_agent.utils.logger import agent_logger as logger
from modelscope_agent.utils.utils import has_chinese_chars
# ast?
@@ -161,7 +164,8 @@ def _verify_args(self, params: str) -> Union[str, dict]:
"""
try:
params_json = json5.loads(params)
- except Exception:
+ except Exception as e:
+ print(e)
params = params.replace('\r', '\\r').replace('\n', '\\n')
params_json = json5.loads(params)
@@ -284,12 +288,15 @@ def has_chinese_chars_in_tools(_tools):
class ToolServiceProxy(BaseTool):
- def __init__(
- self,
- tool_name: str,
- tool_cfg: dict,
- tenant_id: str = 'default',
- tool_service_manager_url: str = DEFAULT_TOOL_MANAGER_SERVICE_URL):
+ def __init__(self,
+ tool_name: str,
+ tool_cfg: dict,
+ tenant_id: str = 'default',
+ tool_service_manager_url: str = os.getenv(
+ 'TOOL_MANAGER_SERVICE_URL',
+ DEFAULT_TOOL_MANAGER_SERVICE_URL),
+ user_token: str = None,
+ **kwargs):
"""
Tool service proxy class
Args:
@@ -297,8 +304,11 @@ def __init__(
tool_cfg: the configuration of tool
tenant_id: the tenant id that the tool belongs to, defalut to 'default'
tool_service_manager_url: the url of tool service manager, default to 'http://localhost:31511'
+ user_token: used to pass to the tool service manager to authenticate the user
"""
+
self.tool_service_manager_url = tool_service_manager_url
+ self.user_token = user_token
self.tool_name = tool_name
self.tool_cfg = tool_cfg
self.tenant_id = tenant_id
@@ -337,13 +347,21 @@ def parse_service_response(response):
def _register_tool(self):
try:
+ service_token = os.getenv('TOOL_MANAGER_AUTH', '')
+ headers = {
+ 'Content-Type': 'application/json',
+ MODELSCOPE_AGENT_TOKEN_HEADER_NAME: self.user_token,
+ 'authorization': service_token
+ }
+ print(f'reach here create {headers}')
response = requests.post(
f'{self.tool_service_manager_url}/create_tool_service',
json={
'tool_name': self.tool_name,
'tenant_id': self.tenant_id,
'tool_cfg': self.tool_cfg
- })
+ },
+ headers=headers)
response.raise_for_status()
result = ToolServiceProxy.parse_service_response(response)
if 'status' not in result:
@@ -356,17 +374,24 @@ def _register_tool(self):
)
except Exception as e:
raise RuntimeError(
- f'Get error during registering tool from tool manager service with detail {e}'
+ f'Get error during registering tool from tool manager service with detail {e}.'
)
def _check_tool_status(self):
try:
- response = requests.post(
+ service_token = os.getenv('TOOL_MANAGER_AUTH', '')
+ headers = {
+ 'Content-Type': 'application/json',
+ MODELSCOPE_AGENT_TOKEN_HEADER_NAME: self.user_token,
+ 'authorization': service_token
+ }
+ response = requests.get(
f'{self.tool_service_manager_url}/check_tool_service_status',
params={
'tool_name': self.tool_name,
'tenant_id': self.tenant_id,
- })
+ },
+ headers=headers)
response.raise_for_status()
result = ToolServiceProxy.parse_service_response(response)
if 'status' not in result:
@@ -381,12 +406,20 @@ def _check_tool_status(self):
def _get_tool_info(self):
try:
+ service_token = os.getenv('TOOL_MANAGER_AUTH', '')
+ headers = {
+ 'Content-Type': 'application/json',
+ MODELSCOPE_AGENT_TOKEN_HEADER_NAME: self.user_token,
+ 'authorization': service_token
+ }
+ logger.query_info(message=f'tool_info requests header {headers}')
response = requests.post(
f'{self.tool_service_manager_url}/tool_info',
json={
'tool_name': self.tool_name,
'tenant_id': self.tenant_id
- })
+ },
+ headers=headers)
response.raise_for_status()
return ToolServiceProxy.parse_service_response(response)
except Exception as e:
@@ -395,6 +428,16 @@ def _get_tool_info(self):
)
def call(self, params: str, **kwargs):
+ # ms_token
+ self.user_token = kwargs.get('user_token', self.user_token)
+ service_token = os.getenv('TOOL_MANAGER_AUTH', '')
+ headers = {
+ 'Content-Type': 'application/json',
+ MODELSCOPE_AGENT_TOKEN_HEADER_NAME: self.user_token,
+ 'authorization': service_token
+ }
+ logger.query_info(message=f'calling tool header {headers}')
+
try:
# visit tool node to call tool
response = requests.post(
@@ -404,7 +447,11 @@ def call(self, params: str, **kwargs):
'tenant_id': self.tenant_id,
'params': params,
'kwargs': kwargs
- })
+ },
+ headers=headers)
+ logger.query_info(
+ message=f'calling tool message {response.json()}')
+
response.raise_for_status()
return ToolServiceProxy.parse_service_response(response)
except Exception as e:
diff --git a/modelscope_agent/tools/code_interpreter/code_interpreter.py b/modelscope_agent/tools/code_interpreter/code_interpreter.py
index 9e55037f4..d5ab83d0f 100644
--- a/modelscope_agent/tools/code_interpreter/code_interpreter.py
+++ b/modelscope_agent/tools/code_interpreter/code_interpreter.py
@@ -18,6 +18,7 @@
from typing import Dict, Optional
import json
+import json5
import matplotlib
import PIL.Image
from jupyter_client import BlockingKernelClient
@@ -50,7 +51,7 @@ class CodeInterpreter(BaseTool):
should not be used the other code interpreter tool at the same time
"""
name = 'code_interpreter'
- description = '代码解释器,可用于执行Python代码。 Enclose the code within triple backticks (`) at the beginning and end of the code.' # noqa E501
+ description = '代码解释器,可用于执行Python代码。' # noqa E501
parameters = [{'name': 'code', 'type': 'string', 'description': '待执行的代码'}]
def __init__(self, cfg={}):
@@ -267,10 +268,10 @@ def _execute_code(self, kc: BlockingKernelClient, code: str) -> str:
return result
def call(self, params: str, timeout: Optional[int] = 30, **kwargs) -> str:
- params = self._verify_args(params)
- if isinstance(params, dict):
+ try:
+ params = json5.loads(params)
code = params['code']
- else:
+ except Exception:
code = extract_code(params)
if not code.strip():
diff --git a/modelscope_agent/tools/modelscope_tools/pipeline_tool.py b/modelscope_agent/tools/modelscope_tools/pipeline_tool.py
index af24f69d8..7833af5d5 100644
--- a/modelscope_agent/tools/modelscope_tools/pipeline_tool.py
+++ b/modelscope_agent/tools/modelscope_tools/pipeline_tool.py
@@ -64,7 +64,7 @@ def call(self, params: str, **kwargs) -> str:
return self._remote_call(params, **kwargs)
def _remote_call(self, params: dict, **kwargs):
- data = json.dumps(params)
+ data = json.dumps(params, ensure_ascii=False)
try:
api_token = get_api_key(ApiNames.modelscope_api_key, **kwargs)
except AssertionError:
@@ -101,7 +101,7 @@ def _local_call(self, params: dict, **kwargs):
try:
self.setup()
origin_result = self.pipeline(**kwargs)
- return json.dumps(origin_result, default=str)
+ return json.dumps(origin_result, default=str, ensure_ascii=False)
except RuntimeError as e:
import traceback
raise RuntimeError(
diff --git a/modelscope_agent/utils/git.py b/modelscope_agent/utils/git.py
new file mode 100644
index 000000000..79165a522
--- /dev/null
+++ b/modelscope_agent/utils/git.py
@@ -0,0 +1,25 @@
+import subprocess
+
+
+def clone_git_repository(repo_url, branch_name, folder_name):
+ """
+ Clone a git repository into a specified folder.
+ Args:
+ repo_url: user repository url
+ branch_name: branch name
+ folder_name: where should the repository be cloned
+
+ Returns:
+
+ """
+ try:
+ subprocess.run(
+ ['git', 'clone', '-b', branch_name, repo_url, folder_name],
+ check=True,
+ stdout=subprocess.PIPE,
+ stderr=subprocess.PIPE)
+ print(f"Repository cloned successfully into '{folder_name}'.")
+
+ except subprocess.CalledProcessError as e:
+ print(f'Error cloning repository: {e.stderr.decode()}')
+ raise RuntimeError(f'Repository cloning failed with e {e}')
diff --git a/modelscope_agent/utils/nltk/stopwords.zip b/modelscope_agent/utils/nltk/stopwords.zip
new file mode 100644
index 000000000..3f1d91306
Binary files /dev/null and b/modelscope_agent/utils/nltk/stopwords.zip differ
diff --git a/modelscope_agent/utils/nltk_utils.py b/modelscope_agent/utils/nltk_utils.py
index 31127a786..795cdcd47 100644
--- a/modelscope_agent/utils/nltk_utils.py
+++ b/modelscope_agent/utils/nltk_utils.py
@@ -7,6 +7,7 @@
def install_nltk_data():
+ print('Starting install nltk data...')
user_current_working_dir = os.getcwd()
nltk_working_dir = os.path.join(user_current_working_dir, 'tmp',
'nltk_data')
@@ -17,12 +18,16 @@ def install_nltk_data():
punkt_zip_file = os.path.join(nltk_artifacts_dir, 'punkt.zip')
averaged_perceptron_tagger_zip_file = os.path.join(
nltk_artifacts_dir, 'averaged_perceptron_tagger.zip')
+ corpora_zip_file = os.path.join(nltk_artifacts_dir, 'stopwords.zip')
# get target dir
punkt_target_dir = os.path.join(nltk_working_dir, 'tokenizers')
averaged_target_dir = os.path.join(nltk_working_dir, 'taggers')
+ corpora_target_dir = os.path.join(nltk_working_dir, 'corpora')
+
+ # extract nltk artifacts
if not os.path.exists(os.path.join(punkt_target_dir, 'punkt')):
os.makedirs(os.path.join(punkt_target_dir, 'punkt'), exist_ok=True)
with zipfile.ZipFile(punkt_zip_file, 'r') as zip_ref:
@@ -36,3 +41,13 @@ def install_nltk_data():
with zipfile.ZipFile(averaged_perceptron_tagger_zip_file,
'r') as zip_ref:
zip_ref.extractall(averaged_target_dir)
+
+ if not os.path.exists(os.path.join(corpora_target_dir, 'stopwords')):
+ os.makedirs(
+ os.path.join(corpora_target_dir, 'stopwords'), exist_ok=True)
+ with zipfile.ZipFile(corpora_zip_file, 'r') as zip_ref:
+ zip_ref.extractall(corpora_target_dir)
+
+ print('nltk data installed.')
+ os.environ['NLTK_DATA'] = nltk_working_dir
+ print(f'setting nltk data path to: {nltk_working_dir}')
diff --git a/modelscope_agent_servers/tool_manager_server/api.py b/modelscope_agent_servers/tool_manager_server/api.py
index 71eb6ee4d..3be7e36f8 100644
--- a/modelscope_agent_servers/tool_manager_server/api.py
+++ b/modelscope_agent_servers/tool_manager_server/api.py
@@ -1,10 +1,11 @@
import os
from contextlib import asynccontextmanager
-from typing import List
+from typing import List, Optional
from uuid import uuid4
import requests
-from fastapi import BackgroundTasks, FastAPI, HTTPException
+from fastapi import BackgroundTasks, Depends, FastAPI, Header, HTTPException
+from modelscope_agent.constants import MODELSCOPE_AGENT_TOKEN_HEADER_NAME
from modelscope_agent_servers.service_utils import (create_error_msg,
create_success_msg,
parse_service_response)
@@ -39,6 +40,34 @@ async def lifespan(app: FastAPI):
app = FastAPI(lifespan=lifespan)
+# Dependency to extract the authentication token
+def get_auth_token(authorization: Optional[str] = Header(
+ None)) -> Optional[str]: # noqa E125
+ if authorization:
+ schema, _, token = authorization.partition(' ')
+ if schema.lower() == 'bearer' and token:
+ # Assuming the token is a bearer token, return the token part
+ return token
+ elif token == '':
+ return authorization
+
+ # If the schema is not bearer or there is no token, raise an exception
+ raise HTTPException(status_code=403, detail='Invalid authentication')
+
+
+def get_user_token(authorization: Optional[str] = Header(
+ None, alias=MODELSCOPE_AGENT_TOKEN_HEADER_NAME)): # noqa E125
+ if authorization:
+ # Assuming the token is a bearer token
+ schema, _, token = authorization.partition(' ')
+ if schema and token and schema.lower() == 'bearer':
+ return token
+ elif token == '':
+ return authorization
+ else:
+ return ''
+
+
def start_docker_container_and_store_status(tool: ToolRegisterInfo,
app_instance: FastAPI):
with Session(engine) as session:
@@ -199,7 +228,8 @@ async def root():
@app.post('/create_tool_service/')
async def create_tool_service(tool_info: CreateTool,
- background_tasks: BackgroundTasks):
+ background_tasks: BackgroundTasks,
+ auth_token: str = Depends(get_auth_token)):
# todo: the tool name might be the repo dir for the tool, need to parse in this situation.
tool_node_name = f'{tool_info.tool_name}_{tool_info.tenant_id}'
tool_register_info = ToolRegisterInfo(
@@ -209,6 +239,7 @@ async def create_tool_service(tool_info: CreateTool,
tenant_id=tool_info.tenant_id,
image=tool_info.tool_image,
workspace_dir=os.getcwd(),
+ tool_url=tool_info.tool_url,
)
background_tasks.add_task(start_docker_container_and_store_status,
tool_register_info, app)
@@ -222,11 +253,10 @@ async def create_tool_service(tool_info: CreateTool,
return create_success_msg(output, request_id=request_id)
-@app.post('/check_tool_service_status/')
-async def check_tool_service_status(
- tool_name: str,
- tenant_id: str = 'default',
-):
+@app.get('/check_tool_service_status/')
+async def check_tool_service_status(tool_name: str,
+ tenant_id: str = 'default',
+ auth_token: str = Depends(get_auth_token)):
# todo: the tool name might be the repo dir for the tool, need to parse in this situation.
tool_node_name = f'{tool_name}_{tenant_id}'
request_id = str(uuid4())
@@ -250,21 +280,18 @@ async def check_tool_service_status(
@app.post('/update_tool_service/')
-async def update_tool_service(
- tool_name: str,
- background_tasks: BackgroundTasks,
- tool_cfg: dict = {},
- tenant_id: str = 'default',
- tool_image: str = 'modelscope-agent/tool-node:latest',
-):
- tool_node_name = f'{tool_name}_{tenant_id}'
+async def update_tool_service(tool_info: CreateTool,
+ background_tasks: BackgroundTasks,
+ auth_token: str = Depends(get_auth_token)):
+ tool_node_name = f'{tool_info.tool_name}_{tool_info.tenant_id}'
tool_register_info = ToolRegisterInfo(
node_name=tool_node_name,
- tool_name=tool_name,
- config=tool_cfg,
- tenant_id=tenant_id,
- image=tool_image,
+ tool_name=tool_info.tool_name,
+ config=tool_info.tool_cfg,
+ tenant_id=tool_info.tenant_id,
+ image=tool_info.tool_image,
workspace_dir=os.getcwd(),
+ tool_url=tool_info.tool_url,
)
background_tasks.add_task(restart_docker_container_and_update_status,
tool_register_info, app)
@@ -281,7 +308,8 @@ async def update_tool_service(
@app.post('/remove_tool/')
async def deregister_tool(tool_name: str,
background_tasks: BackgroundTasks,
- tenant_id: str = 'default'):
+ tenant_id: str = 'default',
+ auth_token: str = Depends(get_auth_token)):
tool_node_name = f'{tool_name}_{tenant_id}'
tool_register = ToolRegisterInfo(
node_name=tool_node_name,
@@ -302,7 +330,8 @@ async def deregister_tool(tool_name: str,
@app.get('/tools/', response_model=List[ToolInstance])
-async def list_tools(tenant_id: str = 'default'):
+async def list_tools(tenant_id: str = 'default',
+ auth_token: str = Depends(get_auth_token)):
with Session(engine) as session:
statement = select(ToolInstance).where(
ToolInstance.tenant_id == tenant_id)
@@ -313,7 +342,9 @@ async def list_tools(tenant_id: str = 'default'):
@app.post('/tool_info/')
-async def get_tool_info(tool_input: ExecuteTool):
+async def get_tool_info(tool_input: ExecuteTool,
+ user_token: str = Depends(get_user_token),
+ auth_token: str = Depends(get_auth_token)):
# get tool instance
request_id = str(uuid4())
@@ -333,7 +364,9 @@ async def get_tool_info(tool_input: ExecuteTool):
tool_info_url = 'http://' + tool_instance.ip + ':' + str(
tool_instance.port) + '/tool_info'
response = requests.get(
- tool_info_url, params={'request_id': request_id})
+ tool_info_url,
+ params={'request_id': request_id},
+ headers={'Authorization': f'Bearer {user_token}'})
response.raise_for_status()
return create_success_msg(
parse_service_response(response), request_id=request_id)
@@ -347,7 +380,9 @@ async def get_tool_info(tool_input: ExecuteTool):
@app.post('/execute_tool/')
-async def execute_tool(tool_input: ExecuteTool):
+async def execute_tool(tool_input: ExecuteTool,
+ user_token: str = Depends(get_user_token),
+ auth_token: str = Depends(get_auth_token)):
request_id = str(uuid4())
@@ -378,7 +413,8 @@ async def execute_tool(tool_input: ExecuteTool):
'params': tool_input.params,
'kwargs': tool_input.kwargs,
'request_id': request_id
- })
+ },
+ headers={'Authorization': f'Bearer {user_token}'})
response.raise_for_status()
return create_success_msg(
parse_service_response(response), request_id=request_id)
@@ -387,8 +423,8 @@ async def execute_tool(tool_input: ExecuteTool):
status_code=400,
request_id=request_id,
message=
- f'Failed to execute tool for {tool_input.tool_name}_{tool_input.tenant_id}, with error {e}'
- )
+ f'Failed to execute tool for {tool_input.tool_name}_{tool_input.tenant_id}, '
+ f'with error: {e} and origin error {response.message}')
if __name__ == '__main__':
diff --git a/modelscope_agent_servers/tool_manager_server/models.py b/modelscope_agent_servers/tool_manager_server/models.py
index cffab1386..149b8efb2 100644
--- a/modelscope_agent_servers/tool_manager_server/models.py
+++ b/modelscope_agent_servers/tool_manager_server/models.py
@@ -26,6 +26,7 @@ class ToolRegisterInfo(BaseModel):
tenant_id: str
config: dict = {}
port: Optional[int] = 31513
+ tool_url: str = ''
class CreateTool(BaseModel):
@@ -33,6 +34,7 @@ class CreateTool(BaseModel):
tenant_id: str = 'default'
tool_cfg: dict = {}
tool_image: str = 'modelscope-agent/tool-node:latest'
+ tool_url: str = ''
class ExecuteTool(BaseModel):
diff --git a/modelscope_agent_servers/tool_manager_server/sandbox.py b/modelscope_agent_servers/tool_manager_server/sandbox.py
index 0093c614d..a279e3653 100644
--- a/modelscope_agent_servers/tool_manager_server/sandbox.py
+++ b/modelscope_agent_servers/tool_manager_server/sandbox.py
@@ -45,7 +45,7 @@ def init_docker_container(docker_client, tool: ToolRegisterInfo):
container_port = f'{tool.port}/tcp' # inside container port(protocal could be tcp/udp)
host_port = tool.port # host port
port_bindings = {container_port: host_port}
- command = f'uvicorn modelscope_agent_servers.tool_node_server.api:app --host 0.0.0.0 --port {tool.port}'
+ command = f'/bin/bash /app/run_tool_node.sh {tool.port}'
container = docker_client.containers.run(
tool.image,
@@ -55,7 +55,10 @@ def init_docker_container(docker_client, tool: ToolRegisterInfo):
name=tool.node_name,
detach=True,
ports=port_bindings,
- environment={'PORT': 41111})
+ environment={
+ 'TOOL_OSS_URL': tool.tool_url,
+ 'TOOL_NAME': tool.tool_name
+ })
return container
@@ -117,7 +120,7 @@ def start_docker_container(tool: ToolRegisterInfo):
if elapsed > TIMEOUT:
break
# make configuration for class or copy remote github repo to docker container
- inject_tool_info_to_container(container, tool)
+ # inject_tool_info_to_container(container, tool)
return container
except Exception as e:
raise Exception(
diff --git a/requirements.txt b/requirements.txt
index b2de66382..667e68fdb 100644
--- a/requirements.txt
+++ b/requirements.txt
@@ -10,7 +10,7 @@ langchain-experimental
llama-index
llama-index-readers-json
llama-index-retrievers-bm25
-modelscope >=1.10.0,<=1.12.0
+modelscope >=1.15.0
moviepy
openai
opencv-python
@@ -21,9 +21,7 @@ pydantic>=2.3.0
pytest
pytest-mock
python-dotenv
-ray>=2.9.4
seaborn
sentencepiece
tiktoken
-transformers
unstructured
diff --git a/scripts/run_tool_node.sh b/scripts/run_tool_node.sh
new file mode 100644
index 000000000..c080940ac
--- /dev/null
+++ b/scripts/run_tool_node.sh
@@ -0,0 +1,57 @@
+#!/bin/bash
+
+# get OSS_URL from ENV
+OSS_URL=${TOOL_OSS_URL}
+ZIP_FILE_NAME="new_tool.zip"
+DESTINATION_FOLDER="/app/modelscope_agent/tools/contrib/new_tool"
+
+mkdir -p /app/assets
+echo "{\"name\": \"${TOOL_NAME}\"}" > /app/assets/configuration.json
+
+# check if OSS_URL is empty, if empty them run a normal tool node server.
+if [ -z "${OSS_URL}" ]; then
+ uvicorn modelscope_agent_servers.tool_node_server.api:app --host 0.0.0.0 --port "$1"
+fi
+
+# Make sure the destination folder exists
+mkdir -p "${DESTINATION_FOLDER}"
+
+# download the zip file
+wget -O "${ZIP_FILE_NAME}" "${OSS_URL}"
+
+# check if download is successful
+if [ $? -ne 0 ]; then
+ echo "Download failed."
+ exit 1
+else
+ echo "Downloaded ${ZIP_FILE_NAME} successfully."
+
+ # unzip the downloaded file
+ unzip -o "${ZIP_FILE_NAME}" -d "${DESTINATION_FOLDER}"
+ for subfolder in "${DESTINATION_FOLDER}"/*; do
+ if [ -d "$subfolder" ]; then # Check if it's a directory
+ find "$subfolder" -type f -exec mv {} "${DESTINATION_FOLDER}"/ \;
+ # Optionally, remove the now-empty subdirectory
+ rmdir "$subfolder"
+ fi
+done
+ echo "from .new_tool import *" >> /app/modelscope_agent/tools/contrib/__init__.py
+
+ # check if extraction is successful
+ if [ $? -ne 0 ]; then
+ echo "Extraction failed."
+ exit 1
+ else
+ echo "Extracted ${ZIP_FILE_NAME} into ${DESTINATION_FOLDER}."
+
+ # clean up the downloaded zip file
+ rm "${ZIP_FILE_NAME}"
+ echo "Removed the downloaded zip file."
+ fi
+fi
+
+# get config from ENV
+TOOL_NAME=${TOOL_NAME}
+
+uvicorn modelscope_agent_servers.tool_node_server.api:app --host 0.0.0.0 --port "$1"
+#sleep 90m
diff --git a/tests/utils/test_git_clone.py b/tests/utils/test_git_clone.py
new file mode 100644
index 000000000..ecc791182
--- /dev/null
+++ b/tests/utils/test_git_clone.py
@@ -0,0 +1,44 @@
+import os
+import shutil
+import tempfile
+
+import pytest
+from modelscope_agent.utils.git import clone_git_repository
+
+
+@pytest.fixture(scope='function')
+def temp_dir():
+ # create temp dir
+ temp_directory = tempfile.mkdtemp()
+ yield temp_directory
+ # delete temp dir after test
+ shutil.rmtree(temp_directory)
+
+
+def test_clone_git_repository_success(temp_dir):
+ # use temp dir as folder name
+ repo_url = 'http://www.modelscope.cn/studios/zhicheng/zzc_tool_test.git'
+ branch_name = 'master'
+ folder_name = temp_dir
+
+ # store the git to local dir
+ clone_git_repository(repo_url, branch_name, folder_name)
+
+ # check if success
+ assert os.listdir(
+ folder_name) != [], 'Directory should not be empty after cloning'
+
+
+def test_clone_git_repository_failed(temp_dir):
+ # use temp dir as folder name
+ repo_url = 'http://www.modelscope.cn/studios/zhicheng/zzc_tool_test1.git'
+ branch_name = 'master'
+ folder_name = temp_dir
+
+ # store the git to local dir
+ with pytest.raises(RuntimeError):
+ clone_git_repository(repo_url, branch_name, folder_name)
+
+ # check if error
+ assert os.listdir(
+ folder_name) == [], 'Directory should not be empty after cloning'