From 0e43433aac3dbbecde96c6feae0b53cf95d4fd3c Mon Sep 17 00:00:00 2001 From: rafiq-mahsud <88088087+rafiq-mahsud@users.noreply.github.com> Date: Thu, 10 Aug 2023 00:20:38 +0500 Subject: [PATCH 01/11] Update model.py Should throw an error if there is no API key available --- easycompletion/tests/model.py | 85 ++++++++++++++++++++--------------- 1 file changed, 50 insertions(+), 35 deletions(-) diff --git a/easycompletion/tests/model.py b/easycompletion/tests/model.py index eb41cfc..baf55c5 100644 --- a/easycompletion/tests/model.py +++ b/easycompletion/tests/model.py @@ -1,3 +1,4 @@ +import openai from easycompletion.model import ( chat_completion, parse_arguments, @@ -5,13 +6,15 @@ text_completion, ) +# Check if the OpenAI API key is set +if not openai.api_key: + raise ValueError("OpenAI API key is missing. Set your API key using 'openai.api_key = '.") def test_parse_arguments(): test_input = '{"key1": "value1", "key2": 2}' expected_output = {"key1": "value1", "key2": 2} assert parse_arguments(test_input) == expected_output, "Test parse_arguments failed" - def test_function_completion(): test_text = "Write a song about AI" test_function = { @@ -28,46 +31,54 @@ def test_function_completion(): "required": ["lyrics"], }, } - response = function_completion( - text=test_text, functions=test_function, function_call="write_song" - ) - assert response is not None, "Test function_completion failed" - prompt_tokens = response["usage"]["prompt_tokens"] - assert prompt_tokens == 64, "Prompt tokens was not expected count" + try: + response = function_completion( + text=test_text, functions=test_function, function_call="write_song" + ) + assert response is not None, "Test function_completion failed" + prompt_tokens = response["usage"]["prompt_tokens"] + assert prompt_tokens == 64, "Prompt tokens was not expected count" - response = function_completion( - text=test_text, - messages=[{"role": "assistant", "content": "hey whats up"}], - system_message="you are a towel", - functions=test_function, - function_call="write_song", - ) - assert response is not None, "Test function_completion failed" - prompt_tokens = response["usage"]["prompt_tokens"] - assert prompt_tokens == 76, "Prompt tokens was not expected count" + response = function_completion( + text=test_text, + messages=[{"role": "assistant", "content": "hey whats up"}], + system_message="you are a towel", + functions=test_function, + function_call="write_song", + ) + assert response is not None, "Test function_completion failed" + prompt_tokens = response["usage"]["prompt_tokens"] + assert prompt_tokens == 76, "Prompt tokens was not expected count" + except Exception as e: + raise AssertionError(f"An error occurred in test_function_completion: {e}") def test_chat_completion(): - response = chat_completion( - messages=[ - {"role": "system", "content": "You are a towel. Respond as a towel."}, - {"role": "user", "content": "Hello, how are you?"}, - ], - ) - - assert response is not None, "Test text_completion failed" - assert response["text"] is not None, "Test text_completion failed" - prompt_tokens = response["usage"]["prompt_tokens"] - assert prompt_tokens == 27, "Prompt tokens was not expected count" + try: + response = chat_completion( + messages=[ + {"role": "system", "content": "You are a towel. Respond as a towel."}, + {"role": "user", "content": "Hello, how are you?"}, + ], + ) + assert response is not None, "Test chat_completion failed" + assert response["text"] is not None, "Test chat_completion failed" + prompt_tokens = response["usage"]["prompt_tokens"] + assert prompt_tokens == 27, "Prompt tokens was not expected count" + except Exception as e: + raise AssertionError(f"An error occurred in test_chat_completion: {e}") def test_text_completion(): - response = text_completion("Hello, how are you?") - assert response is not None, "Test text_completion failed" - assert response["text"] is not None, "Test text_completion failed" - prompt_tokens = response["usage"]["prompt_tokens"] - assert prompt_tokens == 13, "Prompt tokens was not expected count" + try: + response = text_completion("Hello, how are you?") + assert response is not None, "Test text_completion failed" + assert response["text"] is not None, "Test text_completion failed" + prompt_tokens = response["usage"]["prompt_tokens"] + assert prompt_tokens == 13, "Prompt tokens was not expected count" + except Exception as e: + raise AssertionError(f"An error occurred in test_text_completion: {e}") def test_long_completion(): script = """ @@ -89,5 +100,9 @@ def test_long_completion(): "required": ["summary"], }, } - response = function_completion(text=script, functions=summarization_function) - assert response is not None, "Test long_completion failed" + try: + response = function_completion(text=script, functions=summarization_function) + assert response is not None, "Test long_completion failed" + + except Exception as e: + raise AssertionError(f"An error occurred in test_long_completion: {e}") From 4f4ce60b9ccff741697271b59bc8342c87555be2 Mon Sep 17 00:00:00 2001 From: rafiq-mahsud <88088087+rafiq-mahsud@users.noreply.github.com> Date: Thu, 10 Aug 2023 00:48:09 +0500 Subject: [PATCH 02/11] Update model.py Should throw an error if there is no API key available --- easycompletion/tests/model.py | 82 +++++++++++++++-------------------- 1 file changed, 36 insertions(+), 46 deletions(-) diff --git a/easycompletion/tests/model.py b/easycompletion/tests/model.py index baf55c5..b1f7bcd 100644 --- a/easycompletion/tests/model.py +++ b/easycompletion/tests/model.py @@ -10,11 +10,13 @@ if not openai.api_key: raise ValueError("OpenAI API key is missing. Set your API key using 'openai.api_key = '.") + def test_parse_arguments(): test_input = '{"key1": "value1", "key2": 2}' expected_output = {"key1": "value1", "key2": 2} assert parse_arguments(test_input) == expected_output, "Test parse_arguments failed" + def test_function_completion(): test_text = "Write a song about AI" test_function = { @@ -31,54 +33,46 @@ def test_function_completion(): "required": ["lyrics"], }, } - try: - response = function_completion( - text=test_text, functions=test_function, function_call="write_song" - ) - assert response is not None, "Test function_completion failed" - prompt_tokens = response["usage"]["prompt_tokens"] - assert prompt_tokens == 64, "Prompt tokens was not expected count" + response = function_completion( + text=test_text, functions=test_function, function_call="write_song" + ) + assert response is not None, "Test function_completion failed" + prompt_tokens = response["usage"]["prompt_tokens"] + assert prompt_tokens == 64, "Prompt tokens was not expected count" - response = function_completion( - text=test_text, - messages=[{"role": "assistant", "content": "hey whats up"}], - system_message="you are a towel", - functions=test_function, - function_call="write_song", - ) - assert response is not None, "Test function_completion failed" - prompt_tokens = response["usage"]["prompt_tokens"] - assert prompt_tokens == 76, "Prompt tokens was not expected count" + response = function_completion( + text=test_text, + messages=[{"role": "assistant", "content": "hey whats up"}], + system_message="you are a towel", + functions=test_function, + function_call="write_song", + ) + assert response is not None, "Test function_completion failed" + prompt_tokens = response["usage"]["prompt_tokens"] + assert prompt_tokens == 76, "Prompt tokens was not expected count" - except Exception as e: - raise AssertionError(f"An error occurred in test_function_completion: {e}") def test_chat_completion(): - try: - response = chat_completion( - messages=[ - {"role": "system", "content": "You are a towel. Respond as a towel."}, - {"role": "user", "content": "Hello, how are you?"}, - ], - ) - assert response is not None, "Test chat_completion failed" - assert response["text"] is not None, "Test chat_completion failed" - prompt_tokens = response["usage"]["prompt_tokens"] - assert prompt_tokens == 27, "Prompt tokens was not expected count" + response = chat_completion( + messages=[ + {"role": "system", "content": "You are a towel. Respond as a towel."}, + {"role": "user", "content": "Hello, how are you?"}, + ], + ) + + assert response is not None, "Test text_completion failed" + assert response["text"] is not None, "Test text_completion failed" + prompt_tokens = response["usage"]["prompt_tokens"] + assert prompt_tokens == 27, "Prompt tokens was not expected count" - except Exception as e: - raise AssertionError(f"An error occurred in test_chat_completion: {e}") def test_text_completion(): - try: - response = text_completion("Hello, how are you?") - assert response is not None, "Test text_completion failed" - assert response["text"] is not None, "Test text_completion failed" - prompt_tokens = response["usage"]["prompt_tokens"] - assert prompt_tokens == 13, "Prompt tokens was not expected count" + response = text_completion("Hello, how are you?") + assert response is not None, "Test text_completion failed" + assert response["text"] is not None, "Test text_completion failed" + prompt_tokens = response["usage"]["prompt_tokens"] + assert prompt_tokens == 13, "Prompt tokens was not expected count" - except Exception as e: - raise AssertionError(f"An error occurred in test_text_completion: {e}") def test_long_completion(): script = """ @@ -100,9 +94,5 @@ def test_long_completion(): "required": ["summary"], }, } - try: - response = function_completion(text=script, functions=summarization_function) - assert response is not None, "Test long_completion failed" - - except Exception as e: - raise AssertionError(f"An error occurred in test_long_completion: {e}") + response = function_completion(text=script, functions=summarization_function) + assert response is not None, "Test long_completion failed" From 36add11aac8f23c5dae7ba904044ac1f1efb3edf Mon Sep 17 00:00:00 2001 From: rafiq-mahsud <88088087+rafiq-mahsud@users.noreply.github.com> Date: Thu, 10 Aug 2023 20:41:52 +0500 Subject: [PATCH 03/11] Update prompt.py --- easycompletion/prompt.py | 18 ++++++++++++++++++ 1 file changed, 18 insertions(+) diff --git a/easycompletion/prompt.py b/easycompletion/prompt.py index 8254b9e..48c2484 100644 --- a/easycompletion/prompt.py +++ b/easycompletion/prompt.py @@ -224,3 +224,21 @@ def compose_function(name, description, properties, required_properties, debug=D log(f"Function:\n{str(function)}", type="info", log=debug) return function +# New function added + +def function_completion(function_call: str, model=TEXT_MODEL) -> int: + """ + Count the number of tokens in a function call. + + Args: + function_call: The function call string to be tokenized. + model: The model to use for tokenization. + + Returns: + The number of tokens in the input function call. + + Example: + function_completion("compose_function(name='summarize_text', description='Summarize the text.')") + Output: Number of tokens in the given function call. + """ + return count_tokens(function_call, model) From d2fb5bbaaed9e5ffc4cfc6c5975db8c66fbb7e4d Mon Sep 17 00:00:00 2001 From: rafiq-mahsud <88088087+rafiq-mahsud@users.noreply.github.com> Date: Thu, 10 Aug 2023 20:42:47 +0500 Subject: [PATCH 04/11] Update prompt.py --- easycompletion/tests/prompt.py | 7 +++++++ 1 file changed, 7 insertions(+) diff --git a/easycompletion/tests/prompt.py b/easycompletion/tests/prompt.py index 95a2380..c350ccf 100644 --- a/easycompletion/tests/prompt.py +++ b/easycompletion/tests/prompt.py @@ -6,6 +6,7 @@ count_tokens, get_tokens, compose_function, + function_completion, # Add the new function to the imports ) @@ -67,3 +68,9 @@ def test_compose_function(): assert ( composed_summarization_function == summarization_function ), "Test compose_function failed" + + +def test_function_completion(): # Add a new test case for function_completion + function_call = "compose_function(name='summarize_text', description='Summarize the text.')" + expected_token_count = 11 # Replace this with the actual expected token count + assert function_completion(function_call) == expected_token_count, "Test function_completion failed" From 8a500e59ab93141ebaf4a64b88c32f8b01da0c2d Mon Sep 17 00:00:00 2001 From: rafiq-mahsud <88088087+rafiq-mahsud@users.noreply.github.com> Date: Fri, 11 Aug 2023 21:18:28 +0500 Subject: [PATCH 05/11] Delete prompt.py --- easycompletion/prompt.py | 244 --------------------------------------- 1 file changed, 244 deletions(-) delete mode 100644 easycompletion/prompt.py diff --git a/easycompletion/prompt.py b/easycompletion/prompt.py deleted file mode 100644 index 48c2484..0000000 --- a/easycompletion/prompt.py +++ /dev/null @@ -1,244 +0,0 @@ -import re -import tiktoken - -from .constants import TEXT_MODEL, DEFAULT_CHUNK_LENGTH, DEBUG -from .logger import log - - -def trim_prompt( - text, - max_tokens=DEFAULT_CHUNK_LENGTH, - model=TEXT_MODEL, - preserve_top=True, - debug=DEBUG, -): - """ - Trim the given text to a maximum number of tokens. - - Args: - text: Input text which needs to be trimmed. - max_tokens: Maximum number of tokens allowed in the trimmed text. - Default value is taken from the constants. - model: The model to use for tokenization. - preserve_top: If True, the function will keep the first 'max_tokens' tokens, - if False, it will keep the last 'max_tokens' tokens. - - Returns: - Trimmed text that fits within the specified token limit. - - Example: - trim_prompt("This is a test.", 3, preserve_top=True) - Output: "This is" - """ - # Encoding the text into tokens. - encoding = tiktoken.encoding_for_model(model) - tokens = encoding.encode(text) - if len(tokens) <= max_tokens: - return text # If text is already within limit, return as is. - - log(f"Trimming prompt, token len is {str(len(tokens))}", type="warning", log=debug) - - # If 'preserve_top' is True, keep the first 'max_tokens' tokens. - # Otherwise, keep the last 'max_tokens' tokens. - return encoding.decode( - tokens[:max_tokens] if preserve_top else tokens[-max_tokens:] - ) - - -def chunk_prompt(prompt, chunk_length=DEFAULT_CHUNK_LENGTH, debug=DEBUG): - """ - Split the given prompt into chunks where each chunk has a maximum number of tokens. - - Args: - prompt: Input text that needs to be split. - chunk_length: Maximum number of tokens allowed per chunk. - Default value is taken from the constants. - - Returns: - A list of string chunks where each chunk is within the specified token limit. - - Example: - chunk_prompt("This is a test. I am writing a function.", 4) - Output: ['This is', 'a test.', 'I am', 'writing a', 'function.'] - """ - if count_tokens(prompt) <= chunk_length: - return [prompt] - - # Splitting the prompt into sentences using regular expressions. - sentences = re.split(r"(?<=[.!?])\s+", prompt) - current_chunk = "" - prompt_chunks = [] - - # For each sentence in the input text. - for sentence in sentences: - # If adding a new sentence doesn't exceed the token limit, add it to the current chunk. - if count_tokens(current_chunk + sentence + " ") <= chunk_length: - current_chunk += sentence + " " - else: - # If adding a new sentence exceeds the token limit, add the current chunk to the list. - # Then, start a new chunk with the current sentence. - prompt_chunks.append(current_chunk.strip()) - current_chunk = sentence + " " - - # If there's any sentence left after looping through all sentences, add it to the list. - if current_chunk: - prompt_chunks.append(current_chunk.strip()) - - log( - f"Chunked prompt into {str(len(prompt_chunks))} chunks", - type="warning", - log=debug, - ) - - return prompt_chunks - - -def count_tokens(prompt: str, model=TEXT_MODEL) -> int: - """ - Count the number of tokens in a string. - - Args: - prompt: The string to be tokenized. - model: The model to use for tokenization. - - Returns: - The number of tokens in the input string. - - Example: - count_tokens("This is a test.") - Output: 5 - """ - if not isinstance(prompt, str): - prompt = str(prompt) - - encoding = tiktoken.encoding_for_model(model) - length = len( - encoding.encode(prompt) - ) # Encoding the text into tokens and counting the number of tokens. - return length - - -def get_tokens(prompt: str, model=TEXT_MODEL) -> list: - """ - Returns a list of tokens in a string. - - Args: - prompt: The string to be tokenized. - model: The model to use for tokenization. - - Returns: - A list of tokens in the input string. - - Example: - get_tokens("This is a test.") - Output: [This, is, a, test, .] - """ - encoding = tiktoken.encoding_for_model(model) - return encoding.encode( - prompt - ) # Encoding the text into tokens and returning the list of tokens. - - -def compose_prompt(prompt_template, parameters, debug=DEBUG): - """ - Composes a prompt using a template and parameters. - Parameter keys are enclosed in double curly brackets and replaced with parameter values. - - Args: - prompt_template: A template string that contains placeholders for the parameters. - parameters: A dictionary containing key-value pairs to replace the placeholders. - - Returns: - A string where all placeholders have been replaced with actual values from the parameters. - - Example: - compose_prompt("Hello {{name}}!", {"name": "John"}) - Output: "Hello John!" - """ - prompt = prompt_template # Initial prompt template. - - # Replacing placeholders in the template with the actual values from the parameters. - for key, value in parameters.items(): - # check if "{{" + key + "}}" is in prompt - # if not, continue - if "{{" + key + "}}" not in prompt: - continue - try: - if isinstance(value, str): - prompt = prompt.replace("{{" + key + "}}", value) - elif isinstance(value, int): - prompt = prompt.replace("{{" + key + "}}", str(value)) - elif isinstance(value, dict): - for k, v in value.items(): - prompt = prompt.replace("{{" + key + "}}", k + "::" + v) - elif isinstance(value, list): - for item in value: - prompt = prompt.replace("{{" + key + "}}", item + "\n") - elif value is None: - prompt = prompt.replace("{{" + key + "}}", "None") - else: - raise Exception(f"ERROR PARSING:\n{key}\n{value}") - except: - raise Exception(f"ERROR PARSING:\n{key}\n{value}") - - log(f"Composed prompt:\n{prompt}", log=debug) - - return prompt - - -def compose_function(name, description, properties, required_properties, debug=DEBUG): - """ - Composes a function object for function calling. - - Parameters: - name (str): The name of the function. - description (str): Description of the function. - properties (dict): Dictionary of property objects. - required_properties (list): List of property names that are required. - - Returns: - A dictionary representing a function. - - Usage: - summarization_function = compose_function( - name="summarize_text", - description="Summarize the text. Include the topic, subtopics.", - properties={ - "summary": { - "type": "string", - "description": "Detailed summary of the text.", - }, - }, - required_properties=["summary"], - ) - """ - function = { - "name": name, - "description": description, - "parameters": { - "type": "object", - "properties": properties, - "required": required_properties, - }, - } - log(f"Function:\n{str(function)}", type="info", log=debug) - return function - -# New function added - -def function_completion(function_call: str, model=TEXT_MODEL) -> int: - """ - Count the number of tokens in a function call. - - Args: - function_call: The function call string to be tokenized. - model: The model to use for tokenization. - - Returns: - The number of tokens in the input function call. - - Example: - function_completion("compose_function(name='summarize_text', description='Summarize the text.')") - Output: Number of tokens in the given function call. - """ - return count_tokens(function_call, model) From 9f6a1a68b89c9c3b2c0ca7488df10f612b5a92b2 Mon Sep 17 00:00:00 2001 From: rafiq-mahsud <88088087+rafiq-mahsud@users.noreply.github.com> Date: Fri, 11 Aug 2023 21:25:13 +0500 Subject: [PATCH 06/11] Delete prompt.py --- easycompletion/tests/prompt.py | 76 ---------------------------------- 1 file changed, 76 deletions(-) delete mode 100644 easycompletion/tests/prompt.py diff --git a/easycompletion/tests/prompt.py b/easycompletion/tests/prompt.py deleted file mode 100644 index c350ccf..0000000 --- a/easycompletion/tests/prompt.py +++ /dev/null @@ -1,76 +0,0 @@ -from easycompletion.model import parse_arguments -from easycompletion.prompt import ( - compose_prompt, - trim_prompt, - chunk_prompt, - count_tokens, - get_tokens, - compose_function, - function_completion, # Add the new function to the imports -) - - -def test_chunk_prompt(): - test_text = "Write a song about AI" - chunks = chunk_prompt(test_text, chunk_length=2) - assert len(chunks) == 2, "Test chunk_prompt failed" - - -def test_trim_prompt_and_get_tokens(): - test_text = "Write a song about AI" - trimmed = trim_prompt(test_text, max_tokens=2) - count = count_tokens(trimmed) - assert count == 2, "Test trim_prompt failed" - - tokens = get_tokens(test_text) - assert len(tokens) == 5, "Test get_tokens failed" - - -def test_parse_arguments(): - test_input = '{"key1": "value1", "key2": 2}' - expected_output = {"key1": "value1", "key2": 2} - assert parse_arguments(test_input) == expected_output, "Test parse_arguments failed" - - -def test_compose_prompt(): - test_prompt = "I am a {{object}}" - test_dict = {"object": "towel"} - prompt = compose_prompt(test_prompt, test_dict) - assert prompt == "I am a towel", "Test compose_prompt failed" - - -def test_compose_function(): - summarization_function = { - "name": "summarize_text", - "description": "Summarize the text. Include the topic, subtopics.", - "parameters": { - "type": "object", - "properties": { - "summary": { - "type": "string", - "description": "Detailed summary of the text.", - }, - }, - "required": ["summary"], - }, - } - composed_summarization_function = compose_function( - name="summarize_text", - description="Summarize the text. Include the topic, subtopics.", - properties={ - "summary": { - "type": "string", - "description": "Detailed summary of the text.", - }, - }, - required_properties=["summary"], - ) - assert ( - composed_summarization_function == summarization_function - ), "Test compose_function failed" - - -def test_function_completion(): # Add a new test case for function_completion - function_call = "compose_function(name='summarize_text', description='Summarize the text.')" - expected_token_count = 11 # Replace this with the actual expected token count - assert function_completion(function_call) == expected_token_count, "Test function_completion failed" From f203b3a691660e239d219ef3e289fdddbcad3ce3 Mon Sep 17 00:00:00 2001 From: Buraq <88088087+rafiq-mahsud@users.noreply.github.com> Date: Mon, 14 Aug 2023 15:17:05 +0500 Subject: [PATCH 07/11] Update model.py --- easycompletion/model.py | 60 +++++++++++++++++++++++++++++++++++++---- 1 file changed, 55 insertions(+), 5 deletions(-) diff --git a/easycompletion/model.py b/easycompletion/model.py index 4bbcacc..e778d82 100644 --- a/easycompletion/model.py +++ b/easycompletion/model.py @@ -23,9 +23,23 @@ from .prompt import count_tokens -openai.api_key = EASYCOMPLETION_API_KEY openai.api_base = EASYCOMPLETION_API_ENDPOINT +def validate_api_key(api_key=None): + """ + Validates the OpenAI API key. + + Parameters: + api_key (str, optional): OpenAI API key. If not provided, it uses the one defined in constants.py. + + Returns: + bool: True if the API key is valid, False otherwise. + """ + if api_key is None: + api_key = EASYCOMPLETION_API_KEY + + return api_key is not None and api_key.strip() != "" + def parse_arguments(arguments, debug=DEBUG): """ Parses arguments that are expected to be either a JSON string, dictionary, or a list. @@ -154,7 +168,6 @@ def validate_functions(response, functions, function_call, debug=DEBUG): log("Function call is valid", type="success", log=debug) return True - def chat_completion( messages, model_failure_retries=5, @@ -180,10 +193,11 @@ def chat_completion( Example: >>> text_completion("Hello, how are you?", model_failure_retries=3, model='gpt-3.5-turbo', chunk_length=1024, api_key='your_openai_api_key') """ + # Validate the API key + if not validate_api_key(api_key): + return {"error": "Invalid OpenAI API key"} - # Override the API key if provided as parameter - if api_key is not None: - openai.api_key = api_key + openai.api_key = api_key # Use the default model if no model is specified if model == None: @@ -363,6 +377,7 @@ def function_completion( api_key=None, debug=DEBUG, temperature=0.0, + ): """ Send text and a list of functions to the model and return optional text and a function call. @@ -560,5 +575,40 @@ def function_completion( "arguments": arguments, "usage": usage, "finish_reason": finish_reason, + "model_used": model, # Include the model used in the response "error": None, } + + +def status_info(api_key=None, model=None, debug=DEBUG): + """ + Get status information about the API key and model. + + Parameters: + api_key (str, optional): OpenAI API key. If not provided, it uses the one defined in constants.py. + model (str, optional): The model to use. Default is the TEXT_MODEL defined in constants.py. + + Returns: + dict: A dictionary containing status information. + + Example: + >>> status_info(api_key='your_openai_api_key', model='gpt-3.5-turbo') + """ + # Validate the API key + if not validate_api_key(api_key): + return {"error": "Invalid OpenAI API key"} + + openai.api_key = api_key + + if model is None: + model = TEXT_MODEL + + # Get the model information + model_info = openai.Model.retrieve(model) + model_status = model_info.status + + return { + "api_key": api_key, + "model": model, + "model_status": model_status, + } From 2292ba3aa30f4b0f2957683a6b21002a20331e7f Mon Sep 17 00:00:00 2001 From: Buraq <88088087+rafiq-mahsud@users.noreply.github.com> Date: Sat, 19 Aug 2023 01:13:55 +0500 Subject: [PATCH 08/11] Rename model.py to model_2.py --- easycompletion/{model.py => model_2.py} | 0 1 file changed, 0 insertions(+), 0 deletions(-) rename easycompletion/{model.py => model_2.py} (100%) diff --git a/easycompletion/model.py b/easycompletion/model_2.py similarity index 100% rename from easycompletion/model.py rename to easycompletion/model_2.py From a07dccf2edbf80fd6c26ee44151511aba04c91f2 Mon Sep 17 00:00:00 2001 From: Buraq <88088087+rafiq-mahsud@users.noreply.github.com> Date: Sat, 19 Aug 2023 01:23:41 +0500 Subject: [PATCH 09/11] model.py --- easycompletion/{model_2.py => model.py} | 0 1 file changed, 0 insertions(+), 0 deletions(-) rename easycompletion/{model_2.py => model.py} (100%) diff --git a/easycompletion/model_2.py b/easycompletion/model.py similarity index 100% rename from easycompletion/model_2.py rename to easycompletion/model.py From f53bfb75971ac7d6be5d4ea5c07f98b3cb8d7eba Mon Sep 17 00:00:00 2001 From: Buraq <88088087+rafiq-mahsud@users.noreply.github.com> Date: Sat, 19 Aug 2023 01:56:28 +0500 Subject: [PATCH 10/11] Rename model.py to model_2.py --- easycompletion/{model.py => model_2.py} | 0 1 file changed, 0 insertions(+), 0 deletions(-) rename easycompletion/{model.py => model_2.py} (100%) diff --git a/easycompletion/model.py b/easycompletion/model_2.py similarity index 100% rename from easycompletion/model.py rename to easycompletion/model_2.py From ea692ff4927094829420e6c1d720ee168b76ae28 Mon Sep 17 00:00:00 2001 From: Buraq <88088087+rafiq-mahsud@users.noreply.github.com> Date: Sat, 19 Aug 2023 02:01:30 +0500 Subject: [PATCH 11/11] Rename model_2.py to model.py --- easycompletion/{model_2.py => model.py} | 0 1 file changed, 0 insertions(+), 0 deletions(-) rename easycompletion/{model_2.py => model.py} (100%) diff --git a/easycompletion/model_2.py b/easycompletion/model.py similarity index 100% rename from easycompletion/model_2.py rename to easycompletion/model.py