Skip to content

Commit

Permalink
evaluation
Browse files Browse the repository at this point in the history
  • Loading branch information
Greg Lind committed Sep 4, 2024
1 parent 7c01b5e commit 827f04f
Show file tree
Hide file tree
Showing 8 changed files with 280 additions and 4 deletions.
3 changes: 3 additions & 0 deletions .gitignore
Original file line number Diff line number Diff line change
Expand Up @@ -131,3 +131,6 @@ dmypy.json
# meida contents
media/*

# dev settings
mysite/settings/dev.py

4 changes: 3 additions & 1 deletion mysite/settings/base.py
Original file line number Diff line number Diff line change
Expand Up @@ -224,4 +224,6 @@
}


SENDGRID_API_KEY = ''
SENDGRID_API_KEY = ''

OPENAI_API_KEY = os.environ.get('OPENAI_API_KEY')
2 changes: 2 additions & 0 deletions mysite/settings/dev.py
Original file line number Diff line number Diff line change
Expand Up @@ -31,3 +31,5 @@
from .local import *
except ImportError:
pass

OPENAI_API_KEY = "asdfghjkl1234567890"
4 changes: 3 additions & 1 deletion mysite/settings/production.py
Original file line number Diff line number Diff line change
Expand Up @@ -50,4 +50,6 @@

AWS_DEFAULT_ACL = 'public-read'

SENDGRID_API_KEY = os.environ.get("SENDGRID")
SENDGRID_API_KEY = os.environ.get("SENDGRID")

OPENAI_API_KEY = os.environ.get('OPENAI_API_KEY')
37 changes: 37 additions & 0 deletions mysite/settings/rename_to_dev.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,37 @@
"""
rename this to dev.py to use for local development
"""

from .base import *

DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': os.path.join(BASE_DIR, 'db.sqlite3'),
}
}

# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True

# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = 'django-insecure-4w$$of)udb)qv8=vs^5vy#8%9+kk73x0u$de0dxg2xl+@s^v1g'

# SECURITY WARNING: define the correct hosts in production!
ALLOWED_HOSTS = ['*']

EMAIL_BACKEND = 'django.core.mail.backends.console.EmailBackend'

MIDDLEWARE = MIDDLEWARE + ['debug_toolbar.middleware.DebugToolbarMiddleware']

INSTALLED_APPS = INSTALLED_APPS + ["debug_toolbar",]

INTERNAL_IPS = [
"localhost",
"127.0.0.1",
]

try:
from .local import *
except ImportError:
pass
43 changes: 43 additions & 0 deletions punchlist/models.py
Original file line number Diff line number Diff line change
Expand Up @@ -10,6 +10,9 @@
from django.utils import timezone
from django.contrib.auth.models import User

from .util import evaluate_product_idea



class Level(Enum):
INTERN = 'Intern'
Expand Down Expand Up @@ -84,6 +87,46 @@ class Product(models.Model):
end_date = models.DateTimeField(null=True, blank=True)
create_date = models.DateTimeField(null=True, blank=True)
edit_date = models.DateTimeField(null=True, blank=True)

# Evaluation fields
originality_score = models.TextField(null=True, blank=True)
marketability_score = models.TextField(null=True, blank=True)
feasibility_score = models.TextField(null=True, blank=True)
completeness_score = models.TextField(null=True, blank=True)
summary = models.TextField(blank=True, null=True)
gemini_completeness_score = models.TextField(null=True, blank=True)
gemini_originality_score = models.TextField(null=True, blank=True)
gemini_marketability_score = models.TextField(null=True, blank=True)
gemini_feasibility_score = models.TextField(null=True, blank=True)
gemini_summary = models.TextField(blank=True, null=True)

# Override the save method to include evaluation logic
def save(self, *args, **kwargs):
# Convert the instance to a dictionary suitable for analysis
application_data = {
'name': self.name,
'description': self.description,
'product_info': self.product_info,
# Include other relevant fields as needed
}

# Assume `analyze_ai_response` is imported and ready to use
# and it now accepts a dictionary and returns a dictionary with scores and summary
evaluation_results = evaluate_startup_idea(application_data)
# review_text, originality_score, marketability_score, feasibility_score, completeness_score
# Update the instance with evaluation results
self.summary = evaluation_results[0]
self.originality_score = evaluation_results[1]
self.marketability_score = evaluation_results[2]
self.feasibility_score = evaluation_results[3]
self.completeness_score = evaluation_results[4]
self.gemini_summary = evaluation_results[5]
self.gemini_originality_score = evaluation_results[6]
self.gemini_marketability_score = evaluation_results[7]
self.gemini_feasibility_score = evaluation_results[8]
self.gemini_completeness_score = evaluation_results[9]

super().save(*args, **kwargs) # Call the "real" save() method.

def __str__(self):
return self.name
Expand Down
4 changes: 2 additions & 2 deletions punchlist/templates/welcome_page.html
Original file line number Diff line number Diff line change
Expand Up @@ -13,8 +13,8 @@
<h2 class="animate__animated animate__fadeInDown">Welcome to the <a href="https://www.buildly.io">Buildly</a> Collab Hub</h2>
<p class="hero-light" style="width: 70%">
The CollabHub brings together, product teams with remote development agencies to build great products together using <a href="https://labs.buildly.io">Buildly Labs</a>.
Here you can <em>find a team to build your new product, or help you migrate that legacy system to the cloud</em>. Or if you just a have a small feature or bug that needs fixing,
you can use the CollabHub to find a community developer to help you out.
Here you can <b>find a team to build your new product, or help you migrate that legacy system to the cloud</b>.
<br/>Or if you just a have a small feature or bug that needs fixing, you can use the CollabHub to find a community developer to help you out.
</p>
<div class="container align-items-center justify-content-center">
<div class="row">
Expand Down
187 changes: 187 additions & 0 deletions punchlist/util.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,187 @@
from openai import OpenAI
from django.http import JsonResponse, HttpRequest
from django.views.decorators.http import require_http_methods
from django.conf import settings
import re
from sendgrid import SendGridAPIClient
from sendgrid.helpers.mail import Mail
import logging

from openai import OpenAI

client = OpenAI(api_key=settings.OPENAI_API_KEY)

class RateLimitError(Exception):
pass

def preprocess_application_data(application):
"""
Preprocess the application data to create a comprehensive summary text.
Adjust the details as per your model's requirements.
'name': self.name,
'description': self.description,
'product_info': self.product_info,
"""
details = [
f"Product Description: {application['description']}",
f"Name: {application['name']}",
f"Product Information: {application['product_info']}",
# Add more fields as necessary
]
return " ".join(details)

def evaluate_product_idea(application):
"""
Evaluate the detailed startup application using ChatGPT for scoring based on specific criteria.
"""

# Define the application summary from preprocess_application_data
application_summary = preprocess_application_data(application)

try:
# Generate the review using ChatGPT
completion = client.chat.completions.create(
model="gpt-4",
messages=[
{"role": "system", "content": f"Please review and evaluate the software product idea: Evaluation Criteria:\n1. Originality\n2. Marketability\n3. Feasibility\n4. Completeness\n\nPlease provide your summary text of how good or bad the idea is and individual numeric scores for each criterion out of 100 each:\n\nOriginality Score:\nMarketability Score:\nFeasibility Score:\nCompleteness Score:"},
{"role": "user", "content": application_summary}
]
)
print(completion.choices[0].message.content)
score_text = completion.choices[0].message.content

# Generate Review using Gemini
import google.generativeai as genai
import os

genai.configure(api_key=os.environ["GOOGLE_API_KEY"])
model = genai.GenerativeModel('gemini-1.0-pro-latest')
response = model.generate_content(f"Please review and evaluate the software product idea: Evaluation Criteria:\n1. Originality\n2. Marketability\n3. Feasibility\n4. Completeness\n\nPlease provide your summary text of how good or bad the idea is and individual numeric scores for each criterion out of 100 each:\n\nOriginality Score:\nMarketability Score:\nFeasibility Score:\nCompleteness Score: {application_summary}")

gemini_score_text = response


# Extract scores from the response
if completion.choices[0].message:

# Extract individual scores from the score text
score_lines = score_text.split('\n')

# Find and extract the scores
scores = {}
for line in score_lines:
if "Score" in line:
criterion, score = line.split(":")
scores[criterion.strip()] = int(score.split("/")[0])

originality_score = scores.get('Originality Score', 0)
marketability_score = scores.get('Marketability Score', 0)
feasibility_score = scores.get('Feasibility Score', 0)
completeness_score = scores.get('Completeness Score', 0)
else:
score_text = "AI Failed to Summarize the Application. Please review manually."
originality_score = 0
marketability_score = 0
feasibility_score = 0
completeness_score = 0

logging.info(f"openAI response: {score_text}")

# Extract scores from the response
if gemini_score_text:

# Extract individual scores from the score text
score_lines = gemini_score_text.split('\n')

# Find and extract the scores
scores = {}
for line in score_lines:
if "Score" in line:
criterion, score = line.split(":")
scores[criterion.strip()] = int(score.split("/")[0])

gemini_originality_score = scores.get('Originality Score', 0)
gemini_marketability_score = scores.get('Marketability Score', 0)
gemini_feasibility_score = scores.get('Feasibility Score', 0)
gemini_completeness_score = scores.get('Completeness Score', 0)
else:
gemini_score_text = "Gemini AI Failed to Summarize the Application. Please review manually."
gemini_originality_score = 0
gemini_marketability_score = 0
gemini_feasibility_score = 0
gemini_completeness_score = 0

logging.info(f"openAI response: {score_text}")

except Exception as e:
logging.error(f"Rate Limit Error: {str(e)}")
scores = "0"

originality_score = "0"
marketability_score = "0"
feasibility_score = "0"
completeness_score = "0"
gemini_score_text = "0"
gemini_originality_score = "0"
gemini_marketability_score = "0"
gemini_feasibility_score = "0"
gemini_completeness_score = "0"

# Send an email using SendGrid API
message = Mail(
from_email='[email protected]',
to_emails='[email protected]',
subject='Open AI Rate Limit Exceeded',
html_content='<strong>Check the Foundry</strong>')

try:
sg = SendGridAPIClient(settings.SENDGRID_API_KEY)
response = sg.send(message)
print(response.status_code)
print(response.body)
print(response.headers)
except Exception as e:
print(str(e))

return score_text, originality_score, marketability_score, feasibility_score, completeness_score, gemini_score_text, gemini_originality_score, gemini_marketability_score, gemini_feasibility_score, gemini_completeness_score

def analyze_ai_response(response):
"""
Parses the structured response from the OpenAI API based on the given prompt.
The function extracts scores for originality, marketability, feasibility,
and completeness, as well as a summary from the text.
"""
text = response.choices[0].text.strip()

# Initialize a dictionary to hold the scores and summary
analysis = {
'originality_score': 0.0,
'marketability_score': 0.0,
'feasibility_score': 0.0,
'completeness_score': 0.0,
'summary': "",
}

# Regex patterns to find scores and summary in the response
score_pattern = r"originality: (\d\.\d+)|marketability: (\d\.\d+)|feasibility: (\d\.\d+)|completeness: (\d\.\d+)"
summary_pattern = r"summary: (.+)"

# Extract scores using regex
matches = re.finditer(score_pattern, text, re.IGNORECASE)
for match in matches:
if match.group(1):
analysis['originality_score'] = float(match.group(1))
elif match.group(2):
analysis['marketability_score'] = float(match.group(2))
elif match.group(3):
analysis['feasibility_score'] = float(match.group(3))
elif match.group(4):
analysis['completeness_score'] = float(match.group(4))

# Extract summary using regex
summary_match = re.search(summary_pattern, text, re.IGNORECASE)
if summary_match:
analysis['summary'] = summary_match.group(1).strip() # Ensure whitespace is removed

return analysis

0 comments on commit 827f04f

Please sign in to comment.