From 827f04fe8ceceb22477b19c82e4c788b24e247ff Mon Sep 17 00:00:00 2001 From: Greg Lind Date: Wed, 4 Sep 2024 16:03:33 -0700 Subject: [PATCH] evaluation --- .gitignore | 3 + mysite/settings/base.py | 4 +- mysite/settings/dev.py | 2 + mysite/settings/production.py | 4 +- mysite/settings/rename_to_dev.py | 37 +++++ punchlist/models.py | 43 ++++++ punchlist/templates/welcome_page.html | 4 +- punchlist/util.py | 187 ++++++++++++++++++++++++++ 8 files changed, 280 insertions(+), 4 deletions(-) create mode 100644 mysite/settings/rename_to_dev.py create mode 100644 punchlist/util.py diff --git a/.gitignore b/.gitignore index 4471261..a3ef1cc 100644 --- a/.gitignore +++ b/.gitignore @@ -131,3 +131,6 @@ dmypy.json # meida contents media/* +# dev settings +mysite/settings/dev.py + diff --git a/mysite/settings/base.py b/mysite/settings/base.py index b4e0dc0..91ed943 100644 --- a/mysite/settings/base.py +++ b/mysite/settings/base.py @@ -224,4 +224,6 @@ } -SENDGRID_API_KEY = '' \ No newline at end of file +SENDGRID_API_KEY = '' + +OPENAI_API_KEY = os.environ.get('OPENAI_API_KEY') \ No newline at end of file diff --git a/mysite/settings/dev.py b/mysite/settings/dev.py index fb5e086..a46e7fe 100644 --- a/mysite/settings/dev.py +++ b/mysite/settings/dev.py @@ -31,3 +31,5 @@ from .local import * except ImportError: pass + +OPENAI_API_KEY = "asdfghjkl1234567890" \ No newline at end of file diff --git a/mysite/settings/production.py b/mysite/settings/production.py index c94d90f..e00cebd 100644 --- a/mysite/settings/production.py +++ b/mysite/settings/production.py @@ -50,4 +50,6 @@ AWS_DEFAULT_ACL = 'public-read' -SENDGRID_API_KEY = os.environ.get("SENDGRID") \ No newline at end of file +SENDGRID_API_KEY = os.environ.get("SENDGRID") + +OPENAI_API_KEY = os.environ.get('OPENAI_API_KEY') \ No newline at end of file diff --git a/mysite/settings/rename_to_dev.py b/mysite/settings/rename_to_dev.py new file mode 100644 index 0000000..86dba02 --- /dev/null +++ b/mysite/settings/rename_to_dev.py @@ -0,0 +1,37 @@ +""" +rename this to dev.py to use for local development +""" + +from .base import * + +DATABASES = { + 'default': { + 'ENGINE': 'django.db.backends.sqlite3', + 'NAME': os.path.join(BASE_DIR, 'db.sqlite3'), + } +} + +# SECURITY WARNING: don't run with debug turned on in production! +DEBUG = True + +# SECURITY WARNING: keep the secret key used in production secret! +SECRET_KEY = 'django-insecure-4w$$of)udb)qv8=vs^5vy#8%9+kk73x0u$de0dxg2xl+@s^v1g' + +# SECURITY WARNING: define the correct hosts in production! +ALLOWED_HOSTS = ['*'] + +EMAIL_BACKEND = 'django.core.mail.backends.console.EmailBackend' + +MIDDLEWARE = MIDDLEWARE + ['debug_toolbar.middleware.DebugToolbarMiddleware'] + +INSTALLED_APPS = INSTALLED_APPS + ["debug_toolbar",] + +INTERNAL_IPS = [ + "localhost", + "127.0.0.1", +] + +try: + from .local import * +except ImportError: + pass diff --git a/punchlist/models.py b/punchlist/models.py index c9a9f76..c838923 100644 --- a/punchlist/models.py +++ b/punchlist/models.py @@ -10,6 +10,9 @@ from django.utils import timezone from django.contrib.auth.models import User +from .util import evaluate_product_idea + + class Level(Enum): INTERN = 'Intern' @@ -84,6 +87,46 @@ class Product(models.Model): end_date = models.DateTimeField(null=True, blank=True) create_date = models.DateTimeField(null=True, blank=True) edit_date = models.DateTimeField(null=True, blank=True) + + # Evaluation fields + originality_score = models.TextField(null=True, blank=True) + marketability_score = models.TextField(null=True, blank=True) + feasibility_score = models.TextField(null=True, blank=True) + completeness_score = models.TextField(null=True, blank=True) + summary = models.TextField(blank=True, null=True) + gemini_completeness_score = models.TextField(null=True, blank=True) + gemini_originality_score = models.TextField(null=True, blank=True) + gemini_marketability_score = models.TextField(null=True, blank=True) + gemini_feasibility_score = models.TextField(null=True, blank=True) + gemini_summary = models.TextField(blank=True, null=True) + + # Override the save method to include evaluation logic + def save(self, *args, **kwargs): + # Convert the instance to a dictionary suitable for analysis + application_data = { + 'name': self.name, + 'description': self.description, + 'product_info': self.product_info, + # Include other relevant fields as needed + } + + # Assume `analyze_ai_response` is imported and ready to use + # and it now accepts a dictionary and returns a dictionary with scores and summary + evaluation_results = evaluate_startup_idea(application_data) + # review_text, originality_score, marketability_score, feasibility_score, completeness_score + # Update the instance with evaluation results + self.summary = evaluation_results[0] + self.originality_score = evaluation_results[1] + self.marketability_score = evaluation_results[2] + self.feasibility_score = evaluation_results[3] + self.completeness_score = evaluation_results[4] + self.gemini_summary = evaluation_results[5] + self.gemini_originality_score = evaluation_results[6] + self.gemini_marketability_score = evaluation_results[7] + self.gemini_feasibility_score = evaluation_results[8] + self.gemini_completeness_score = evaluation_results[9] + + super().save(*args, **kwargs) # Call the "real" save() method. def __str__(self): return self.name diff --git a/punchlist/templates/welcome_page.html b/punchlist/templates/welcome_page.html index a118602..5c38b84 100644 --- a/punchlist/templates/welcome_page.html +++ b/punchlist/templates/welcome_page.html @@ -13,8 +13,8 @@

Welcome to the Buildly Collab Hub

The CollabHub brings together, product teams with remote development agencies to build great products together using Buildly Labs. - Here you can find a team to build your new product, or help you migrate that legacy system to the cloud. Or if you just a have a small feature or bug that needs fixing, - you can use the CollabHub to find a community developer to help you out. + Here you can find a team to build your new product, or help you migrate that legacy system to the cloud. +
Or if you just a have a small feature or bug that needs fixing, you can use the CollabHub to find a community developer to help you out.

diff --git a/punchlist/util.py b/punchlist/util.py new file mode 100644 index 0000000..fdd1656 --- /dev/null +++ b/punchlist/util.py @@ -0,0 +1,187 @@ +from openai import OpenAI +from django.http import JsonResponse, HttpRequest +from django.views.decorators.http import require_http_methods +from django.conf import settings +import re +from sendgrid import SendGridAPIClient +from sendgrid.helpers.mail import Mail +import logging + +from openai import OpenAI + +client = OpenAI(api_key=settings.OPENAI_API_KEY) + +class RateLimitError(Exception): + pass + +def preprocess_application_data(application): + """ + Preprocess the application data to create a comprehensive summary text. + Adjust the details as per your model's requirements. + + 'name': self.name, + 'description': self.description, + 'product_info': self.product_info, + """ + details = [ + f"Product Description: {application['description']}", + f"Name: {application['name']}", + f"Product Information: {application['product_info']}", + # Add more fields as necessary + ] + return " ".join(details) + +def evaluate_product_idea(application): + """ + Evaluate the detailed startup application using ChatGPT for scoring based on specific criteria. + """ + + # Define the application summary from preprocess_application_data + application_summary = preprocess_application_data(application) + + try: + # Generate the review using ChatGPT + completion = client.chat.completions.create( + model="gpt-4", + messages=[ + {"role": "system", "content": f"Please review and evaluate the software product idea: Evaluation Criteria:\n1. Originality\n2. Marketability\n3. Feasibility\n4. Completeness\n\nPlease provide your summary text of how good or bad the idea is and individual numeric scores for each criterion out of 100 each:\n\nOriginality Score:\nMarketability Score:\nFeasibility Score:\nCompleteness Score:"}, + {"role": "user", "content": application_summary} + ] + ) + print(completion.choices[0].message.content) + score_text = completion.choices[0].message.content + + # Generate Review using Gemini + import google.generativeai as genai + import os + + genai.configure(api_key=os.environ["GOOGLE_API_KEY"]) + model = genai.GenerativeModel('gemini-1.0-pro-latest') + response = model.generate_content(f"Please review and evaluate the software product idea: Evaluation Criteria:\n1. Originality\n2. Marketability\n3. Feasibility\n4. Completeness\n\nPlease provide your summary text of how good or bad the idea is and individual numeric scores for each criterion out of 100 each:\n\nOriginality Score:\nMarketability Score:\nFeasibility Score:\nCompleteness Score: {application_summary}") + + gemini_score_text = response + + + # Extract scores from the response + if completion.choices[0].message: + + # Extract individual scores from the score text + score_lines = score_text.split('\n') + + # Find and extract the scores + scores = {} + for line in score_lines: + if "Score" in line: + criterion, score = line.split(":") + scores[criterion.strip()] = int(score.split("/")[0]) + + originality_score = scores.get('Originality Score', 0) + marketability_score = scores.get('Marketability Score', 0) + feasibility_score = scores.get('Feasibility Score', 0) + completeness_score = scores.get('Completeness Score', 0) + else: + score_text = "AI Failed to Summarize the Application. Please review manually." + originality_score = 0 + marketability_score = 0 + feasibility_score = 0 + completeness_score = 0 + + logging.info(f"openAI response: {score_text}") + + # Extract scores from the response + if gemini_score_text: + + # Extract individual scores from the score text + score_lines = gemini_score_text.split('\n') + + # Find and extract the scores + scores = {} + for line in score_lines: + if "Score" in line: + criterion, score = line.split(":") + scores[criterion.strip()] = int(score.split("/")[0]) + + gemini_originality_score = scores.get('Originality Score', 0) + gemini_marketability_score = scores.get('Marketability Score', 0) + gemini_feasibility_score = scores.get('Feasibility Score', 0) + gemini_completeness_score = scores.get('Completeness Score', 0) + else: + gemini_score_text = "Gemini AI Failed to Summarize the Application. Please review manually." + gemini_originality_score = 0 + gemini_marketability_score = 0 + gemini_feasibility_score = 0 + gemini_completeness_score = 0 + + logging.info(f"openAI response: {score_text}") + + except Exception as e: + logging.error(f"Rate Limit Error: {str(e)}") + scores = "0" + + originality_score = "0" + marketability_score = "0" + feasibility_score = "0" + completeness_score = "0" + gemini_score_text = "0" + gemini_originality_score = "0" + gemini_marketability_score = "0" + gemini_feasibility_score = "0" + gemini_completeness_score = "0" + + # Send an email using SendGrid API + message = Mail( + from_email='foundry@buildly.io', + to_emails='greg@buildly.io', + subject='Open AI Rate Limit Exceeded', + html_content='Check the Foundry') + + try: + sg = SendGridAPIClient(settings.SENDGRID_API_KEY) + response = sg.send(message) + print(response.status_code) + print(response.body) + print(response.headers) + except Exception as e: + print(str(e)) + + return score_text, originality_score, marketability_score, feasibility_score, completeness_score, gemini_score_text, gemini_originality_score, gemini_marketability_score, gemini_feasibility_score, gemini_completeness_score + +def analyze_ai_response(response): + """ + Parses the structured response from the OpenAI API based on the given prompt. + The function extracts scores for originality, marketability, feasibility, + and completeness, as well as a summary from the text. + """ + text = response.choices[0].text.strip() + + # Initialize a dictionary to hold the scores and summary + analysis = { + 'originality_score': 0.0, + 'marketability_score': 0.0, + 'feasibility_score': 0.0, + 'completeness_score': 0.0, + 'summary': "", + } + + # Regex patterns to find scores and summary in the response + score_pattern = r"originality: (\d\.\d+)|marketability: (\d\.\d+)|feasibility: (\d\.\d+)|completeness: (\d\.\d+)" + summary_pattern = r"summary: (.+)" + + # Extract scores using regex + matches = re.finditer(score_pattern, text, re.IGNORECASE) + for match in matches: + if match.group(1): + analysis['originality_score'] = float(match.group(1)) + elif match.group(2): + analysis['marketability_score'] = float(match.group(2)) + elif match.group(3): + analysis['feasibility_score'] = float(match.group(3)) + elif match.group(4): + analysis['completeness_score'] = float(match.group(4)) + + # Extract summary using regex + summary_match = re.search(summary_pattern, text, re.IGNORECASE) + if summary_match: + analysis['summary'] = summary_match.group(1).strip() # Ensure whitespace is removed + + return analysis