From ee54195e7cdeb2d558f35755c55f7f5fecbee694 Mon Sep 17 00:00:00 2001 From: Jonathan Lessinger Date: Fri, 17 Nov 2023 11:26:55 -0500 Subject: [PATCH 1/2] make cli-mate better --- cookbooks/Cli-Mate/cli-mate.py | 178 +++++++++++++++++++++++++-------- 1 file changed, 138 insertions(+), 40 deletions(-) diff --git a/cookbooks/Cli-Mate/cli-mate.py b/cookbooks/Cli-Mate/cli-mate.py index 7cdaf9e6a..a5b828368 100644 --- a/cookbooks/Cli-Mate/cli-mate.py +++ b/cookbooks/Cli-Mate/cli-mate.py @@ -1,8 +1,13 @@ +from dataclasses import dataclass +from textwrap import dedent import warnings +from result import Err, Ok, Result + warnings.filterwarnings("ignore") import asyncio +from asyncio import AbstractEventLoop import argparse import signal import sys @@ -81,12 +86,110 @@ async def mod_code( return 0 +@dataclass +class Help: + pass + + +@dataclass +class Run: + user_input: str + + +class Reload: + # TODO + # if should_reload and source_code_file is not None: + # user_input = deprefix(user_input.strip(), "reload") + # with open(source_code_file.strip(), "r", encoding="utf8") as file: + # source_code = file.read() + # llm_input = f"QUERY ABOUT SOURCE CODE:\n{user_input}\nSOURCE CODE:\n```{source_code}\n```" + pass + + +class Clear: + pass + + +class Pass: + pass + + +class MultilineToggle: + pass + + +Command = Pass | Help | Run | Reload | Clear | MultilineToggle + + +def _get_command(user_input: str) -> Command: + normed = user_input.strip().lower() + if normed in ["h", "help", "?"]: + return Help() + elif normed in ["r", "reload"]: + return Reload() + elif normed in ["c", "clear"]: + return Clear() + elif normed in ["m", "multiline"]: + return MultilineToggle() + else: + return Run(user_input=user_input) + + +def _print_help(): + print( + dedent( + """ + Exit loop: Ctrl-D + Toggle multiline input mode: m or multiline + Clear screen: c or clear + Reload source code: r or reload + """ + ) + ) + + +async def _run_llm( + runtime: AIConfigRuntime, llm_input: str +) -> Result[ExecuteResult, str]: + # Dynamically generate the prompt name and prompt object + new_prompt_name = f"prompt{len(runtime.prompts)+1}" # Prompt{number of prompts} + new_prompt = Prompt(name=new_prompt_name, input=llm_input) + + # Add the new prompt and run the model + runtime.add_prompt(new_prompt.name, new_prompt) + + def callback(delta: Any, _: Any, __: int): + if state["interrupt"]: + raise InterruptException() + + print(delta.get("content", ""), end="", flush=True) + + options = InferenceOptions(stream=True, stream_callback=callback) + state["interrupt"] = False + try: + result = await runtime.run(new_prompt_name, {}, options=options) + print(flush=True) + return Ok(result) + except InterruptException: + return Err("interrupted") + + +async def _get_raw_input( + event_loop: AbstractEventLoop, session: PromptSession[str], is_multiline: bool +) -> str: + def _prompt(): # type: ignore + return session.prompt( + "> ", + multiline=is_multiline, + ) + + return await event_loop.run_in_executor(None, _prompt) + + async def loop(aiconfig_path: str, source_code_file: str | None): runtime = AIConfigRuntime.load(aiconfig_path) event_loop = asyncio.get_event_loop() - session = PromptSession() - state["interrupt"] = False def signal_handler(_: int, __: FrameType | None): @@ -95,52 +198,47 @@ def signal_handler(_: int, __: FrameType | None): signal.signal(signal.SIGINT, signal_handler) - i = 0 + is_multiline = False + print("Enter 'h', 'help', or '?' for help.", flush=True) while True: try: - user_input = await event_loop.run_in_executor( - None, session.prompt, "Query: [ctrl-D to exit] " - ) + raw_input = await _get_raw_input(event_loop, session, is_multiline) except KeyboardInterrupt: continue except EOFError: print("Exiting") break - if user_input.strip() == "": - continue - - should_reload = user_input.strip().startswith("reload") or i == 0 - if should_reload and source_code_file is not None: - user_input = deprefix(user_input.strip(), "reload") - with open(source_code_file.strip(), "r", encoding="utf8") as file: - source_code = file.read() - prompt = f"QUERY ABOUT SOURCE CODE:\n{user_input}\nSOURCE CODE:\n```{source_code}\n```" - else: - prompt = user_input - - # Dynamically generate the prompt name and prompt object - new_prompt_name = f"prompt{len(runtime.prompts)+1}" # Prompt{number of prompts} - new_prompt = Prompt(name=new_prompt_name, input=prompt) - - # Add the new prompt and run the model - runtime.add_prompt(new_prompt.name, new_prompt) - - def callback(delta: Any, _: Any, __: int): - if state["interrupt"]: - raise InterruptException() - - print(delta.get("content", ""), end="", flush=True) - - options = InferenceOptions(stream=True, stream_callback=callback) - state["interrupt"] = False - try: - result = await runtime.run(new_prompt_name, {}, options=options) - # print(f"{result=}") - print(flush=True) - i += 1 - except InterruptException: - continue + command = _get_command(raw_input) + + match command: + case Pass(): + pass + case Help(): + _print_help() + case MultilineToggle(): + is_multiline = not is_multiline + print(f"Multiline input mode: {'on' if is_multiline else 'off'}") + if is_multiline: + print("Hit option-enter to submit.") + case Run(user_input=user_input): + prompt = f""" + INSTRUCTIONS: respond to the following query as concisely as possible. + Do not output more tokens than necessary. + QUERY: {user_input} + """ + llm_res = await _run_llm(runtime, prompt) + match llm_res: + case Ok(_): + # TODO somethign with res? + pass + case Err(msg): + print(msg) + case Reload(): + # TODO + pass + case Clear(): + print("\033c", end="") async def main(): From 70d8fe274908bb86989764b9dcbbca7c4c86e7ed Mon Sep 17 00:00:00 2001 From: Jonathan Lessinger Date: Fri, 17 Nov 2023 11:26:55 -0500 Subject: [PATCH 2/2] [AIC-PY] + publish script for any pypi pkg, small mods to llama ext --- extensions/llama/python/README.md | 5 +++ extensions/llama/python/pyproject.toml | 43 ++++++++++++++++++++++++++ scripts/pypipublish.sh | 37 ++++++++++++++++++++++ 3 files changed, 85 insertions(+) create mode 100755 scripts/pypipublish.sh diff --git a/extensions/llama/python/README.md b/extensions/llama/python/README.md index e69de29bb..0a043b452 100644 --- a/extensions/llama/python/README.md +++ b/extensions/llama/python/README.md @@ -0,0 +1,5 @@ +AIConfig Model Parser for LLama cpp (Python bindings). + +Usage: See cookbook: + +https://github.com/lastmile-ai/aiconfig/blob/c64224ed48ccb7f8cbd2d3a1b2e8bd250aeb9ff2/cookbooks/llama/python/ask_llama.py#L4 diff --git a/extensions/llama/python/pyproject.toml b/extensions/llama/python/pyproject.toml index e69de29bb..0d49ad958 100644 --- a/extensions/llama/python/pyproject.toml +++ b/extensions/llama/python/pyproject.toml @@ -0,0 +1,43 @@ +[build-system] +requires = ["setuptools", "wheel"] + +[project] +name = "python-aiconfig-llama" +version = "0.0.1" +authors = [ + { name="Jonathan Lessinger", email="jonathan@lastmileai.dev" }, +] +description = "LLama extension for AIConfig Library" +readme = "README.md" +requires-python = ">=3.7" +classifiers = [ + "Intended Audience :: Developers", + "Programming Language :: Python :: 3", +] +dynamic = ["dependencies"] + +[tool.setuptools.dynamic] +dependencies = {file = ["requirements.txt"]} + +[project.urls] +"Homepage" = "https://github.com/lastmile-ai/aiconfig" +"Bug Tracker" = "https://github.com/lastmile-ai/aiconfig/issues" + +# Black formatting +[tool.black] +line-length = 99 +include = '\.pyi?$' +exclude = ''' +/( + .eggs # exclude a few common directories in the + | .git # root of the project + | .hg + | .mypy_cache + | .tox + | venv + | _build + | buck-out + | build + | dist + )/ +''' \ No newline at end of file diff --git a/scripts/pypipublish.sh b/scripts/pypipublish.sh new file mode 100755 index 000000000..18fe4205e --- /dev/null +++ b/scripts/pypipublish.sh @@ -0,0 +1,37 @@ +#! /bin/zsh +# pypipublish.sh +# Usage: Run ./scripts/pypipublish.sh path/to/project/root conda-env-name +# path/to/project/root is anywhere with a pyproject.toml. + +# NOTE: This assumes you have the aiconfig conda environment created. +# You will be prompted for a username and password. For the username, use __token__. +# For the password, use the token value, including the pypi- prefix. +# To get a PyPi token, go here: +# Need to get a token from here (scroll down to API Tokens): https://pypi.org/manage/account/ +# If you have issues, read the docs: https://packaging.python.org/en/latest/tutorials/packaging-projects/ + +# If you want to upload to testpypi, run pypipublish-test.sh. + +if [ -z "$2" ] +then + echo "Usage: pypipublish.sh path/to/project/root conda-env-name" + exit 1 +fi + + +cd "$1" +if [ ! -f "pyproject.toml" ] +then + echo "File pyproject.toml does not exist in the current directory" + exit 1 +fi + +rm -rf ./dist + +source /opt/homebrew/Caskroom/miniconda/base/etc/profile.d/conda.sh && conda activate "$2"\ + && python3 -m pip install --upgrade build \ + && python3 -m build \ + && python3 -m pip install --upgrade twine \ + && python3 -m twine upload dist/* + +cd - \ No newline at end of file