diff --git a/AI Resume Analyzer/Resume Analyzer Sentence Transformers/README.md b/AI Resume Analyzer/Resume Analyzer Sentence Transformers/README.md new file mode 100644 index 000000000..6a2b05e6f --- /dev/null +++ b/AI Resume Analyzer/Resume Analyzer Sentence Transformers/README.md @@ -0,0 +1,11 @@ +
+ +![Screenshot (5313)](https://github.com/user-attachments/assets/ff312d66-7b70-4479-b1cf-06c9a871d853) + +
+ +![Screenshot (5314)](https://github.com/user-attachments/assets/aed37f5f-361f-40e7-86a2-4ad77234db0d) + +
+ +![Screenshot (5315)](https://github.com/user-attachments/assets/7ea6d358-40e3-4f57-908e-fd0a78d494b1) diff --git a/AI Resume Analyzer/Resume Analyzer Sentence Transformers/app.py b/AI Resume Analyzer/Resume Analyzer Sentence Transformers/app.py new file mode 100644 index 000000000..ad13fa126 --- /dev/null +++ b/AI Resume Analyzer/Resume Analyzer Sentence Transformers/app.py @@ -0,0 +1,49 @@ + + +import streamlit as st +from resume_analyzer import ResumeAnalyzer +import os + +def main(): + st.set_page_config(page_title="Smart Resume Analyzer", layout="wide") + st.title("Smart Resume Analyzer") + + job_description = st.text_area("Enter the Job Description", height=200) + st.write("Upload resumes as PDF files:") + uploaded_files = st.file_uploader("Upload PDF files", accept_multiple_files=True, type="pdf") + + if st.button("Analyze Resumes") and uploaded_files: + resume_files = [] + for uploaded_file in uploaded_files: + file_path = os.path.join("/tmp", uploaded_file.name) + with open(file_path, "wb") as f: + f.write(uploaded_file.getbuffer()) + resume_files.append(file_path) + + analyzer = ResumeAnalyzer() + analysis_results = analyzer.analyze_resumes(resume_files, job_description) + + for result in analysis_results: + st.write(f"**Resume Name**: {result['Resume Name']}") + st.write(f"**Job Titles**: {result['Job Titles']}") + st.write(f"**Primary Skills**: {result['Primary Skills']}") + st.write(f"**Secondary Skills**: {result['Secondary Skills']}") + st.write(f"**Total Experience (Years)**: {result['Total Experience (Years)']}") + st.write(f"**Relevant Experience Duration (Years)**: {result['Relevant Experience Duration (Years)']:.2f}") + st.write(f"**Average Experience Relevance**: {result['Average Experience Relevance']:.2f}") + st.write(f"**Relevant Projects**: {result['Relevant Projects']}") + st.write(f"**Average Project Relevance**: {result['Average Project Relevance']:.2f}") + st.write(f"**Score**: {result['Score']:.2f}") + st.write("-" * 50) + + report_path = analyzer.generate_ranking_report(analysis_results) + with open(report_path, "rb") as report_file: + st.download_button( + label="Download Analysis Report as PDF", + data=report_file, + file_name="resume_analysis_report.pdf", + mime="application/pdf" + ) + +if __name__ == "__main__": + main() diff --git a/AI Resume Analyzer/Resume Analyzer Sentence Transformers/requirements.txt b/AI Resume Analyzer/Resume Analyzer Sentence Transformers/requirements.txt new file mode 100644 index 000000000..436bd52f8 --- /dev/null +++ b/AI Resume Analyzer/Resume Analyzer Sentence Transformers/requirements.txt @@ -0,0 +1,16 @@ +streamlit +sentence-transformers +PyMuPDF +rapidfuzz +fpdf +python-dateutil +spacy +# en_core_web_sm @ https://github.com/explosion/spacy-models/releases/download/en_core_web_sm-2.2.0/en_core_web_sm-2.2.0.tar.gz +# pandas +# spacy==3.5.0 +# https://github.com/explosion/spacy-models/releases/download/en_core_web_sm-3.5.0/en_core_web_sm-3.5.0-py3-none-any.whl + +# python -m spacy download en_core_web_sm +# # # # import nltk +# # # # nltk.download('punkt') +# # # # nltk.download('stopwords') \ No newline at end of file diff --git a/AI Resume Analyzer/Resume Analyzer Sentence Transformers/results/382853996-ff312d66-7b70-4479-b1cf-06c9a871d853.png b/AI Resume Analyzer/Resume Analyzer Sentence Transformers/results/382853996-ff312d66-7b70-4479-b1cf-06c9a871d853.png new file mode 100644 index 000000000..3b6d75a52 Binary files /dev/null and b/AI Resume Analyzer/Resume Analyzer Sentence Transformers/results/382853996-ff312d66-7b70-4479-b1cf-06c9a871d853.png differ diff --git a/AI Resume Analyzer/Resume Analyzer Sentence Transformers/results/382854091-aed37f5f-361f-40e7-86a2-4ad77234db0d.png b/AI Resume Analyzer/Resume Analyzer Sentence Transformers/results/382854091-aed37f5f-361f-40e7-86a2-4ad77234db0d.png new file mode 100644 index 000000000..e1e503f52 Binary files /dev/null and b/AI Resume Analyzer/Resume Analyzer Sentence Transformers/results/382854091-aed37f5f-361f-40e7-86a2-4ad77234db0d.png differ diff --git a/AI Resume Analyzer/Resume Analyzer Sentence Transformers/results/382854167-7ea6d358-40e3-4f57-908e-fd0a78d494b1.png b/AI Resume Analyzer/Resume Analyzer Sentence Transformers/results/382854167-7ea6d358-40e3-4f57-908e-fd0a78d494b1.png new file mode 100644 index 000000000..60dec0cc3 Binary files /dev/null and b/AI Resume Analyzer/Resume Analyzer Sentence Transformers/results/382854167-7ea6d358-40e3-4f57-908e-fd0a78d494b1.png differ diff --git a/AI Resume Analyzer/Resume Analyzer Sentence Transformers/resume_analyzer.py b/AI Resume Analyzer/Resume Analyzer Sentence Transformers/resume_analyzer.py new file mode 100644 index 000000000..21df5d3a0 --- /dev/null +++ b/AI Resume Analyzer/Resume Analyzer Sentence Transformers/resume_analyzer.py @@ -0,0 +1,498 @@ + +# resume_analyzer.py + +import os +import re +import logging +import fitz # PyMuPDF for PDF text extraction +from rapidfuzz import fuzz +from typing import List, Dict, Tuple +from datetime import datetime +import tempfile +from fpdf import FPDF +from sentence_transformers import util +import utils # Import the utils module + +logging.basicConfig(level=logging.INFO, format='%(asctime)s - %(levelname)s: %(message)s') + +class ResumeAnalyzer: + def __init__(self): + self.nlp = utils.load_nlp_model() + self.model = utils.load_transformer_model() + self.SKILL_KEYWORDS = self._load_skill_keywords() + self.JOB_TITLES = list(self.SKILL_KEYWORDS.keys()) + + def _load_skill_keywords(self) -> Dict[str, List[str]]: + return { + "Data Scientist": [ + "Python", "R", "SQL", "Pandas", "NumPy", "Scikit-learn", + "TensorFlow", "PyTorch", "Tableau", "Power BI", "Machine Learning", + "Deep Learning", "Natural Language Processing", "Data Analysis", + "Data Visualization", "Statistics", "Predictive Modeling", "Big Data" + ], + "Software Engineer": [ + "Java", "C++", "C#", "Python", "JavaScript", "Git", "Agile Methodologies", + "Unit Testing", "OOP", "REST APIs", "Microservices", "SQL", "NoSQL", + "Linux", "Docker", "Kubernetes", "Design Patterns", "Algorithms", "Data Structures" + ], + "Web Developer": [ + "HTML", "CSS", "JavaScript", "React", "Angular", "Vue.js", + "Node.js", "Django", "Flask", "Ruby on Rails", "Bootstrap", + "jQuery", "Webpack", "REST APIs", "GraphQL", "TypeScript", "Responsive Design", "SEO" + ], + "Mobile App Developer": [ + "Swift", "Kotlin", "React Native", "Flutter", "Android Studio", + "Xcode", "Firebase", "UI/UX Design", "Mobile Security", "Objective-C", + "Java", "Dart", "APIs", "Cross-Platform Development", "SQLite", "JSON" + ], + "DevOps Engineer": [ + "Git", "Docker", "Kubernetes", "Terraform", "Ansible", + "AWS", "Azure", "GCP", "Jenkins", "CI/CD", "Monitoring", "Logging", + "Bash", "PowerShell", "Prometheus", "Grafana", "Infrastructure as Code", "Scripting", "Linux" + ], + "Cybersecurity Specialist": [ + "Network Security", "Information Security", "Penetration Testing", "Vulnerability Assessment", + "Ethical Hacking", "Firewalls", "Intrusion Detection", "Incident Response", + "Security Compliance", "Encryption", "Risk Management", "SIEM", "IDS/IPS", "Security Auditing" + ], + "Database Administrator": [ + "SQL", "NoSQL", "Oracle", "MySQL", "PostgreSQL", "MongoDB", "Database Design", + "Data Modeling", "Performance Tuning", "Backup and Recovery", "PL/SQL", + "Data Warehousing", "ETL", "SQL Server", "Replication" + ], + "Cloud Architect": [ + "AWS", "Azure", "GCP", "Cloud Computing", "Cloud Infrastructure", + "Cloud Security", "DevOps", "Docker", "Kubernetes", "Automation", + "CI/CD", "IaaS", "PaaS", "SaaS", "Virtualization", "Disaster Recovery" + ], + "AI Researcher": [ + "Machine Learning", "Deep Learning", "Python", "TensorFlow", "PyTorch", + "Natural Language Processing", "Computer Vision", "Reinforcement Learning", + "Algorithms", "Research", "Mathematics", "Statistics", "C++", "Data Analysis", "Artificial Intelligence" + ], + "Data Engineer": [ + "Python", "Scala", "Java", "SQL", "NoSQL", "ETL", "Hadoop", "Spark", + "Kafka", "Airflow", "Data Warehousing", "Big Data", "AWS", "Azure", + "Data Pipelines", "Data Modeling", "Redshift", "Snowflake" + ], + "Network Engineer": [ + "Networking", "Cisco", "Juniper", "TCP/IP", "Routing", "Switching", + "Network Security", "Firewalls", "VPN", "LAN/WAN", "Wireless Networks", + "Network Troubleshooting", "VoIP", "BGP", "OSPF", "Network Monitoring" + ], + "Business Analyst": [ + "Business Analysis", "Requirements Gathering", "SQL", "Data Analysis", + "Process Modeling", "UML", "Agile Methodologies", "User Stories", + "Project Management", "Communication Skills", "Microsoft Excel", + "Stakeholder Management", "JIRA", "Confluence" + ], + "Project Manager": [ + "Project Management", "Agile", "Scrum", "Kanban", "Risk Management", + "Budgeting", "Scheduling", "Communication Skills", "Leadership", + "Microsoft Project", "Stakeholder Management", "Team Management", "PMP Certification" + ], + "UX/UI Designer": [ + "User Experience", "User Interface", "Adobe XD", "Sketch", "Figma", + "Wireframing", "Prototyping", "User Research", "Interaction Design", + "Visual Design", "HTML", "CSS", "Usability Testing", "Adobe Creative Suite" + ], + "Quality Assurance Engineer": [ + "Quality Assurance", "Testing", "Test Automation", "Selenium", "JUnit", + "Test Cases", "Bug Tracking", "Regression Testing", "Performance Testing", + "Load Testing", "Agile Methodologies", "QA Processes", "Cucumber", "TestNG" + ], + "Systems Analyst": [ + "Systems Analysis", "Requirements Analysis", "Business Analysis", + "SQL", "Data Modeling", "Process Improvement", "Technical Documentation", + "Software Development Life Cycle", "UML", "Testing", "Problem-Solving", "ERP Systems" + ], + "Software Tester": [ + "Software Testing", "Manual Testing", "Automated Testing", "Test Cases", + "Selenium", "LoadRunner", "Bug Tracking", "Quality Assurance", + "Regression Testing", "Test Plans", "Agile Testing", "Black Box Testing", "White Box Testing" + ], + "Technical Writer": [ + "Technical Writing", "Documentation", "Writing Skills", "API Documentation", + "User Manuals", "Microsoft Office", "Adobe Acrobat", "Content Management", + "Editing", "Communication Skills", "XML", "Markdown", "DITA" + ], + "Sales Engineer": [ + "Sales", "Technical Knowledge", "Product Demonstrations", "Client Relations", + "Negotiation Skills", "Communication Skills", "CRM Software", "Presentations", + "Solution Selling", "Technical Support", "B2B Sales", "Networking" + ], + "IT Support Specialist": [ + "Technical Support", "Troubleshooting", "Windows OS", "Mac OS", "Linux", + "Hardware Support", "Software Installation", "Network Support", + "Customer Service", "Active Directory", "Help Desk", "Ticketing Systems" + ], + "Machine Learning Engineer": [ + "Python", "TensorFlow", "PyTorch", "Scikit-learn", "Machine Learning", + "Deep Learning", "Algorithms", "Data Structures", "Data Preprocessing", + "Model Deployment", "AWS SageMaker", "Big Data", "Distributed Computing", "MLOps" + ], + "Full Stack Developer": [ + "JavaScript", "Python", "Ruby", "Java", "Node.js", "React", "Angular", + "Vue.js", "Django", "Flask", "Ruby on Rails", "SQL", "NoSQL", "REST APIs", + "GraphQL", "HTML", "CSS", "Webpack", "Microservices", "TypeScript" + ], + "Product Manager": [ + "Product Management", "Roadmapping", "Agile Methodologies", "Scrum", + "Market Research", "User Experience", "Project Management", "Stakeholder Management", + "Communication Skills", "Data Analysis", "Prioritization", "Product Lifecycle" + ], + "Data Analyst": [ + "SQL", "Excel", "Python", "R", "Tableau", "Power BI", "Data Visualization", + "Statistics", "Data Mining", "Data Cleaning", "Business Intelligence", + "Data Modeling", "Analytics", "SAS", "SPSS" + ], + "Embedded Systems Engineer": [ + "C", "C++", "Microcontrollers", "Firmware", "RTOS", "Embedded Linux", + "Hardware Design", "Debugging", "IoT", "Circuit Design", "Assembly Language", + "Sensors", "Protocols (I2C, SPI, UART)", "VHDL", "Verilog" + ], + "Systems Engineer": [ + "Systems Engineering", "Linux", "Windows Server", "Scripting", + "Automation", "Ansible", "Puppet", "Docker", "Virtualization", + "Cloud Computing", "Networking", "Troubleshooting", "Active Directory" + ], + "SEO Specialist": [ + "SEO", "Google Analytics", "Keyword Research", "Content Optimization", + "HTML", "CSS", "Link Building", "On-page Optimization", "Off-page Optimization", + "SEM", "Marketing", "Digital Analytics", "Google Search Console" + ], + "Graphic Designer": [ + "Adobe Photoshop", "Adobe Illustrator", "InDesign", "Graphic Design", + "Branding", "Typography", "Layout Design", "Color Theory", + "Creative Suite", "Sketch", "Visual Communication", "After Effects", "3D Modeling" + ], + "Content Writer": [ + "Content Writing", "SEO", "Blogging", "Copywriting", "Editing", + "Proofreading", "Research", "Social Media", "Marketing", "WordPress", + "Creative Writing", "Content Strategy" + ], + "Blockchain Developer": [ + "Blockchain", "Ethereum", "Solidity", "Smart Contracts", "Cryptocurrency", + "Bitcoin", "Hyperledger", "Web3.js", "Cryptography", "Distributed Ledger", + "Consensus Algorithms", "Truffle", "Ganache" + ], + "Artificial Intelligence Engineer": [ + "Artificial Intelligence", "Machine Learning", "Deep Learning", + "Python", "TensorFlow", "PyTorch", "NLP", "Computer Vision", + "Reinforcement Learning", "Algorithms", "Data Structures", "Keras", "OpenCV" + ] + } + + def extract_text_from_pdf(self, pdf_path: str) -> str: + try: + with fitz.open(pdf_path) as pdf_document: + text = "\n".join(page.get_text("text") for page in pdf_document) + return text.strip() + except Exception as e: + logging.error(f"PDF extraction error for {pdf_path}: {e}") + return "" + + def preprocess_text(self, text: str) -> str: + text = re.sub(r'\s+', ' ', text) + text = re.sub(r'[^\x00-\x7F]+', ' ', text) + return text.strip() + + def extract_job_title_and_primary_skills(self, job_description: str) -> Tuple[List[str], List[str]]: + doc = self.nlp(job_description) + job_description_lower = job_description.lower() + extracted_titles = [] + for title in self.JOB_TITLES: + if title.lower() in job_description_lower: + extracted_titles.append(title) + extracted_skills = set() + for token in doc: + for skills in self.SKILL_KEYWORDS.values(): + if token.text in skills: + extracted_skills.add(token.text) + for entity in doc.ents: + for skills in self.SKILL_KEYWORDS.values(): + if entity.text in skills: + extracted_skills.add(entity.text) + return extracted_titles, list(extracted_skills) + + def extract_all_skills(self, resume_text: str) -> List[str]: + doc = self.nlp(resume_text) + extracted_skills = set() + for token in doc: + for skills in self.SKILL_KEYWORDS.values(): + if token.text in skills: + extracted_skills.add(token.text) + for entity in doc.ents: + for skills in self.SKILL_KEYWORDS.values(): + if entity.text in skills: + extracted_skills.add(entity.text) + return list(extracted_skills) + + def match_primary_skills(self, resume_skills: List[str], job_description_skills: List[str]) -> List[str]: + return list(set(resume_skills) & set(job_description_skills)) + + def extract_secondary_skills(self, resume_skills: List[str], primary_skills: List[str], job_titles: List[str]) -> List[str]: + secondary_skills = set() + for title in job_titles: + if title in self.SKILL_KEYWORDS: + title_skills = set(self.SKILL_KEYWORDS[title]) + additional_skills = (title_skills & set(resume_skills)) - set(primary_skills) + secondary_skills.update(additional_skills) + return list(secondary_skills) + + def extract_project_section(self, resume_text: str) -> str: + headings = ['Projects', 'Project Experience', 'Relevant Projects', 'Academic Projects', 'Professional Projects'] + pattern = r'(?i)(' + '|'.join(headings) + r')\b' + match = re.search(pattern, resume_text) + if match: + start = match.end() + end_match = re.search(r'\n[A-Z][^\n]+\n', resume_text[start:]) + end = start + end_match.start() if end_match else len(resume_text) + project_section = resume_text[start:end].strip() + return project_section + return '' + + def extract_project_skills(self, project_section: str) -> List[str]: + doc = self.nlp(project_section) + project_skills = set() + for token in doc: + if token.pos_ in ['NOUN', 'PROPN'] and not token.is_stop: + project_skills.add(token.text) + for chunk in doc.noun_chunks: + project_skills.add(chunk.text.strip()) + return list(project_skills) + + def extract_advanced_terms(self, job_description: str) -> List[str]: + doc = self.nlp(job_description) + advanced_terms = set() + for chunk in doc.noun_chunks: + text = chunk.text.strip() + if len(text.split()) > 1 and not text.lower().startswith(('we are', 'the candidate', 'you will')): + advanced_terms.add(text) + return list(advanced_terms) + + def assess_project_relevance(self, project_skills: List[str], required_skills: List[str], advanced_terms: List[str]) -> float: + matched_skills = set() + for skill in required_skills: + for project_skill in project_skills: + if fuzz.partial_ratio(skill.lower(), project_skill.lower()) > 80: + matched_skills.add(skill) + break + skill_relevance = len(matched_skills) / len(required_skills) if required_skills else 0 + complexity = 0 + for term in advanced_terms: + for project_skill in project_skills: + if fuzz.partial_ratio(term.lower(), project_skill.lower()) > 80: + complexity += 1 + break + complexity_score = complexity / len(advanced_terms) if advanced_terms else 0 + relevance_score = (skill_relevance * 0.8) + (complexity_score * 0.2) + return relevance_score + + def analyze_projects(self, resume_text: str, job_description: str, required_skills: List[str]) -> Tuple[int, float]: + advanced_terms = self.extract_advanced_terms(job_description) + project_section = self.extract_project_section(resume_text) + if not project_section: + logging.info("No project section found in resume.") + return 0, 0.0 + project_skills = self.extract_project_skills(project_section) + relevance_score = self.assess_project_relevance(project_skills, required_skills, advanced_terms) + relevant_project_count = 1 if relevance_score > 0.2 else 0 + average_relevance_score = relevance_score if relevant_project_count else 0.0 + return relevant_project_count, average_relevance_score + + def extract_experience_section(self, resume_text: str) -> str: + headings = [ + 'Work Experience', 'Professional Experience', 'Employment History', + 'Experience', 'Relevant Experience', 'Internship Experience', 'Internships' + ] + pattern = r'(?i)(' + '|'.join(headings) + r')\b' + match = re.search(pattern, resume_text) + if match: + start = match.end() + end_match = re.search(r'\n[A-Z][^\n]+\n', resume_text[start:]) + end = start + end_match.start() if end_match else len(resume_text) + experience_section = resume_text[start:end].strip() + return experience_section + return '' + + def parse_individual_experiences(self, experience_section: str) -> List[Dict[str, str]]: + entries = re.split(r'\n\s*\n', experience_section) + experiences = [] + for entry in entries: + entry = entry.strip() + if entry: + lines = entry.split('\n') + job_title = lines[0].strip() if lines else '' + company = lines[1].strip() if len(lines) > 1 else '' + experiences.append({ + 'description': entry, + 'job_title': job_title, + 'company': company + }) + return experiences + + def assess_experience_relevance(self, experience_description: str, required_skills: List[str], job_titles: List[str]) -> float: + tokens = set(token.text.lower() for token in self.nlp(experience_description) if not token.is_stop) + matched_skills = [skill.lower() for skill in required_skills if skill.lower() in tokens] + skill_relevance = len(matched_skills) / len(required_skills) if required_skills else 0 + matched_titles = [title.lower() for title in job_titles if title.lower() in experience_description.lower()] + title_relevance = 1.0 if matched_titles else 0.0 + relevance_score = (skill_relevance * 0.7) + (title_relevance * 0.3) + return relevance_score + + def assess_experience_relevance_semantic(self, experience_description: str, job_description: str) -> float: + embedding_experience = self.model.encode(experience_description, convert_to_tensor=True) + embedding_job = self.model.encode(job_description, convert_to_tensor=True) + similarity = util.pytorch_cos_sim(embedding_experience, embedding_job).item() + return similarity + + def calculate_experience_relevance(self, experience_description: str, required_skills: List[str], job_titles: List[str], job_description: str) -> float: + relevance_keyword = self.assess_experience_relevance(experience_description, required_skills, job_titles) + relevance_semantic = self.assess_experience_relevance_semantic(experience_description, job_description) + relevance_score = (relevance_keyword * 0.6) + (relevance_semantic * 0.4) + return relevance_score + + def extract_experience_duration(self, experience_description: str) -> float: + from dateutil import parser + date_patterns = [ + r'((Jan|Feb|Mar|Apr|May|Jun|Jul|Aug|Sep|Oct|Nov|Dec)[a-z]*[\s\-.,]\d{2,4})', + r'(\b\d{1,2}/\d{1,2}/\d{2,4})', + r'(\b\d{1,2}-\d{1,2}-\d{2,4})', + r'(\b\d{4}\b)', + r'(\b\d{1,2}/\d{4})', + ] + start_date = None + end_date = None + matches = [] + for pattern in date_patterns: + matches.extend(re.findall(pattern, experience_description, re.IGNORECASE)) + for match in matches: + date_str = match[0] + if 'present' in date_str.lower(): + end_date = datetime.now() + else: + try: + date = parser.parse(date_str, fuzzy=True) + if not start_date or date < start_date: + start_date = date + if not end_date or date > end_date: + end_date = date + except Exception: + continue + if start_date and end_date: + duration = (end_date - start_date).days / 365.25 + return max(duration, 0) + return 0.0 + + def analyze_experience(self, resume_text: str, job_description: str, required_skills: List[str], job_titles: List[str]) -> Tuple[float, float]: + experience_section = self.extract_experience_section(resume_text) + if not experience_section: + return 0.0, 0.0 + experiences = self.parse_individual_experiences(experience_section) + relevant_experience_duration = 0.0 + total_relevance_score = 0.0 + for exp in experiences: + exp_desc = exp['description'] + relevance_score = self.calculate_experience_relevance(exp_desc, required_skills, job_titles, job_description) + duration = self.extract_experience_duration(exp_desc) + relevant_experience_duration += duration * relevance_score + total_relevance_score += relevance_score + average_relevance_score = total_relevance_score / len(experiences) if experiences else 0.0 + return relevant_experience_duration, average_relevance_score + + def analyze_total_experience(self, resume_text: str) -> float: + experience_years = 0.0 + patterns = [ + r'(\d+)\+?\s+years of experience', + r'over\s+(\d+)\s+years', + r'(\d+)\s+years\'\s+experience', + r'experience of\s+(\d+)\s+years', + r'(\d+)-year experience', + r'experience\s+spanning\s+(\d+)\s+years' + ] + for pattern in patterns: + matches = re.findall(pattern, resume_text, re.IGNORECASE) + for match in matches: + experience_years = max(experience_years, float(match)) + return experience_years + + def rank_resume(self, primary_skills: List[str], secondary_skills: List[str], total_experience: float, + relevant_experience_duration: float, average_experience_relevance: float, + relevant_project_count: int, average_project_relevance: float) -> float: + score = 0.0 + score += len(primary_skills) * 5 + score += len(secondary_skills) * 3 + score += total_experience * 1.5 + score += relevant_experience_duration * average_experience_relevance * 5 + score += relevant_project_count * average_project_relevance * 2 + return score + + def analyze_resume(self, resume_file: str, job_description: str) -> Dict[str, any]: + job_titles, job_description_skills = self.extract_job_title_and_primary_skills(job_description) + if not job_titles: + logging.warning("No job titles found in job description.") + return {} + required_skills = job_description_skills + resume_text = self.extract_text_from_pdf(resume_file) + resume_text = self.preprocess_text(resume_text) + resume_skills = self.extract_all_skills(resume_text) + primary_skills = self.match_primary_skills(resume_skills, required_skills) + secondary_skills = self.extract_secondary_skills(resume_skills, primary_skills, job_titles) + total_experience_years = self.analyze_total_experience(resume_text) + relevant_experience_duration, average_experience_relevance = self.analyze_experience( + resume_text, job_description, required_skills, job_titles + ) + relevant_project_count, average_project_relevance = self.analyze_projects( + resume_text, job_description, required_skills + ) + score = self.rank_resume( + primary_skills, secondary_skills, total_experience_years, + relevant_experience_duration, average_experience_relevance, + relevant_project_count, average_project_relevance + ) + result = { + "Resume Name": os.path.basename(resume_file), + "Job Titles": job_titles, + "Primary Skills": primary_skills, + "Secondary Skills": secondary_skills, + "Total Experience (Years)": total_experience_years, + "Relevant Experience Duration (Years)": relevant_experience_duration, + "Average Experience Relevance": average_experience_relevance, + "Relevant Projects": relevant_project_count, + "Average Project Relevance": average_project_relevance, + "Score": score + } + return result + + def analyze_resumes(self, resume_files: List[str], job_description: str) -> List[Dict[str, any]]: + results = [] + for resume_file in resume_files: + result = self.analyze_resume(resume_file, job_description) + if result: + results.append(result) + results.sort(key=lambda x: x['Score'], reverse=True) + return results + + def generate_ranking_report(self, analysis_results): + pdf = FPDF() + pdf.add_page() + pdf.set_font("Arial", size=12) + pdf.cell(200, 10, txt="Resume Analysis Report", ln=True, align="C") + pdf.ln(10) + for result in analysis_results: + pdf.cell(200, 10, txt=f"Resume Name: {result['Resume Name']}", ln=True) + pdf.cell(200, 10, txt=f"Job Titles: {', '.join(result['Job Titles'])}", ln=True) + pdf.cell(200, 10, txt=f"Primary Skills: {', '.join(result['Primary Skills'])}", ln=True) + pdf.cell(200, 10, txt=f"Secondary Skills: {', '.join(result['Secondary Skills'])}", ln=True) + pdf.cell(200, 10, txt=f"Total Experience (Years): {result['Total Experience (Years)']}", ln=True) + pdf.cell(200, 10, txt=f"Relevant Experience Duration (Years): {result['Relevant Experience Duration (Years)']:.2f}", ln=True) + pdf.cell(200, 10, txt=f"Average Experience Relevance: {result['Average Experience Relevance']:.2f}", ln=True) + pdf.cell(200, 10, txt=f"Relevant Projects: {result['Relevant Projects']}", ln=True) + pdf.cell(200, 10, txt=f"Average Project Relevance: {result['Average Project Relevance']:.2f}", ln=True) + pdf.cell(200, 10, txt=f"Score: {result['Score']:.2f}", ln=True) + pdf.cell(200, 10, txt="-" * 50, ln=True) + with tempfile.NamedTemporaryFile(delete=False, suffix=".pdf") as tmp_file: + pdf_output_path = tmp_file.name + pdf.output(pdf_output_path) + return pdf_output_path diff --git a/AI Resume Analyzer/Resume Analyzer Sentence Transformers/utils.py b/AI Resume Analyzer/Resume Analyzer Sentence Transformers/utils.py new file mode 100644 index 000000000..c37eca4ac --- /dev/null +++ b/AI Resume Analyzer/Resume Analyzer Sentence Transformers/utils.py @@ -0,0 +1,12 @@ + +import streamlit as st +import spacy +from sentence_transformers import SentenceTransformer + +# @st.cache_resource +def load_nlp_model(): + return spacy.load("en_core_web_sm") + +# @st.cache_resource +def load_transformer_model(): + return SentenceTransformer('all-MiniLM-L6-v2') diff --git a/AI Resume Analyzer/Resume Analyzer With Gemini/Resume Application Tracking System(ATS)/README.md b/AI Resume Analyzer/Resume Analyzer With Gemini/Resume Application Tracking System(ATS)/README.md new file mode 100644 index 000000000..66db69324 --- /dev/null +++ b/AI Resume Analyzer/Resume Analyzer With Gemini/Resume Application Tracking System(ATS)/README.md @@ -0,0 +1,72 @@ + + +# ATS Resume Expert + +ATS Resume Expert is a Streamlit-based web application that uses Google's Generative AI (Gemini) model to analyze resumes in PDF format against specific job descriptions. The application evaluates the resume content, providing insights and match percentages to help users understand how well their resume aligns with job requirements. + +## Features +- **Resume Analysis**: Upload a PDF resume, and the AI evaluates it based on a provided job description. +- **Job Match Scoring**: The AI provides a match percentage between the resume and job description, highlighting strengths, weaknesses, missing keywords, and more. +- **Streamlit UI**: User-friendly interface with text input for job description and resume upload capability. + +## Getting Started + +### Prerequisites +1. **Python**: Make sure you have Python 3.7+ installed. +2. **Google API Key**: This project requires access to Google Generative AI's Gemini model. Obtain an API key and configure it in the environment. + +### Installation +1. Clone this repository: + ```bash + git clone https://github.com/your-username/ATS-Resume-Expert.git + cd ATS-Resume-Expert + ``` +2. Install the required packages: + ```bash + pip install -r requirements.txt + ``` + Here is a sample `requirements.txt`: + ``` + streamlit + dotenv + pdf2image + pillow + google-generativeai + ``` + +3. Install **poppler** (required for `pdf2image`): + - **Windows**: [Download Poppler for Windows](http://blog.alivate.com.au/poppler-windows/), extract, and add `poppler/bin` to your PATH. + - **Linux**: Run `sudo apt install poppler-utils`. + - **macOS**: Run `brew install poppler`. + +4. Create a `.env` file in the project root with your Google API key: + ``` + GOOGLE_API_KEY=your_google_api_key + ``` + +### Running the App +1. Start the Streamlit app: + ```bash + streamlit run app.py + ``` + +2. Open the provided local URL to access the ATS Resume Expert app. + +## Usage +1. **Job Description**: Enter the job description in the text area. +2. **Resume Upload**: Upload a PDF version of the resume. +3. **Analyze Resume**: + - Click **Tell Me About the Resume** to get an evaluation of the resume based on job requirements. + - Click **Percentage Match** to receive a match score along with suggestions for improvement. + +## File Structure +- **app.py**: Main application code. +- **README.md**: Documentation for the app. +- **requirements.txt**: List of required Python libraries. +- **.env**: Environment file for API keys (not included in repository). + +## Troubleshooting +1. **Poppler Installation**: Ensure Poppler is installed and accessible in your PATH if you encounter PDF processing errors. +2. **API Errors**: Check your Google API key and usage limits if there are issues with the AI model responses. + + diff --git a/AI Resume Analyzer/Resume Analyzer With Gemini/Resume Application Tracking System(ATS)/app.py b/AI Resume Analyzer/Resume Analyzer With Gemini/Resume Application Tracking System(ATS)/app.py new file mode 100644 index 000000000..d4ba87c96 --- /dev/null +++ b/AI Resume Analyzer/Resume Analyzer With Gemini/Resume Application Tracking System(ATS)/app.py @@ -0,0 +1,93 @@ +import base64 +import streamlit as st +import os +import io +from PIL import Image +import pdf2image +import google.generativeai as genai + +genai.configure(api_key=os.getenv("apikey")) + +def get_gemini_response(input,pdf_cotent,prompt): + model=genai.GenerativeModel('gemini-pro-vision') + response=model.generate_content([input,pdf_content[0],prompt]) + return response.text + +def input_pdf_setup(uploaded_file): + if uploaded_file is not None: + ## Convert the PDF to image + images=pdf2image.convert_from_bytes(uploaded_file.read()) + + first_page=images[0] + + # Convert to bytes + img_byte_arr = io.BytesIO() + first_page.save(img_byte_arr, format='JPEG') + img_byte_arr = img_byte_arr.getvalue() + + pdf_parts = [ + { + "mime_type": "image/jpeg", + "data": base64.b64encode(img_byte_arr).decode() # encode to base64 + } + ] + return pdf_parts + else: + raise FileNotFoundError("No file uploaded") + +## Streamlit App + +st.set_page_config(page_title="ATS Resume EXpert") +st.header("ATS Tracking System") +input_text=st.text_area("Job Description: ",key="input") +uploaded_file=st.file_uploader("Upload your resume(PDF)...",type=["pdf"]) + + +if uploaded_file is not None: + st.write("PDF Uploaded Successfully") + + +submit1 = st.button("Tell Me About the Resume") + +#submit2 = st.button("How Can I Improvise my Skills") + +submit3 = st.button("Percentage match") + +input_prompt1 = """ + You are an experienced Technical Human Resource Manager,your task is to review the provided resume against the job description. + Please share your professional evaluation on whether the candidate's profile aligns with the role. + Highlight the strengths and weaknesses of the applicant in relation to the specified job requirements. +""" + +input_prompt3 = """ +You are an skilled ATS (Applicant Tracking System) scanner with a deep understanding of data science and ATS functionality, +your task is to evaluate the resume against the provided job description. give me the percentage of match if the resume matches +the job description. First the output should come as percentage and then keywords missing and last final thoughts. +""" + +if submit1: + if uploaded_file is not None: + pdf_content=input_pdf_setup(uploaded_file) + response=get_gemini_response(input_prompt1,pdf_content,input_text) + st.subheader("The Repsonse is") + st.write(response) + else: + st.write("Please uplaod the resume") + +elif submit3: + if uploaded_file is not None: + pdf_content=input_pdf_setup(uploaded_file) + response=get_gemini_response(input_prompt3,pdf_content,input_text) + st.subheader("The Repsonse is") + st.write(response) + else: + st.write("Please uplaod the resume") + + + + + + + + + diff --git a/AI Resume Analyzer/Resume Analyzer With Gemini/Resume Application Tracking System(ATS)/output/ats percentage.pdf b/AI Resume Analyzer/Resume Analyzer With Gemini/Resume Application Tracking System(ATS)/output/ats percentage.pdf new file mode 100644 index 000000000..f6035a7ed Binary files /dev/null and b/AI Resume Analyzer/Resume Analyzer With Gemini/Resume Application Tracking System(ATS)/output/ats percentage.pdf differ diff --git a/AI Resume Analyzer/Resume Analyzer With Gemini/Resume Application Tracking System(ATS)/output/ats.pdf b/AI Resume Analyzer/Resume Analyzer With Gemini/Resume Application Tracking System(ATS)/output/ats.pdf new file mode 100644 index 000000000..d0c0e63ed Binary files /dev/null and b/AI Resume Analyzer/Resume Analyzer With Gemini/Resume Application Tracking System(ATS)/output/ats.pdf differ diff --git a/AI Resume Analyzer/Resume Analyzer With Gemini/Resume Application Tracking System(ATS)/requirements.txt b/AI Resume Analyzer/Resume Analyzer With Gemini/Resume Application Tracking System(ATS)/requirements.txt new file mode 100644 index 000000000..872492ae6 --- /dev/null +++ b/AI Resume Analyzer/Resume Analyzer With Gemini/Resume Application Tracking System(ATS)/requirements.txt @@ -0,0 +1,7 @@ +streamlit +google-generativeai +python-dotenv +langchain +PyPDF2 +faiss-cpu +langchain_google_genai diff --git a/AI Resume Analyzer/Resume Analyzer With Gemini/ai_powered_resume_analyzer.py b/AI Resume Analyzer/Resume Analyzer With Gemini/ai_powered_resume_analyzer.py new file mode 100644 index 000000000..c549bede6 --- /dev/null +++ b/AI Resume Analyzer/Resume Analyzer With Gemini/ai_powered_resume_analyzer.py @@ -0,0 +1,253 @@ +from flask import Flask, jsonify, request +from flask_cors import CORS +import config +import google.generativeai as genai +from PyPDF2 import PdfReader +from langchain.text_splitter import RecursiveCharacterTextSplitter + +# Configure generative AI model +genai.configure(api_key=config.API_KEY) +generation_config = { + "temperature": 1, + "top_p": 0.95, + "top_k": 64, + "max_output_tokens": 8192, + "response_mime_type": "text/plain", +} + +model = genai.GenerativeModel( + model_name="gemini-1.5-flash", + generation_config=generation_config, +) + +app = Flask(__name__) +CORS(app) + + +# Function to interact with Gemini AI +def gemini_generate_response(prompt): + chat_session = model.start_chat(history=[{"role": "user", "parts": [prompt]}]) + response = chat_session.send_message(prompt) + return response.text + + +# 1. PDF Parsing +def parse_pdf(file): + reader = PdfReader(file) + text = "" + for page in reader.pages: + text += page.extract_text() + return text + + +# 2. Split Text into Chunks +def split_text_into_chunks(text): + text_splitter = RecursiveCharacterTextSplitter(chunk_size=700, chunk_overlap=200, length_function=len) + return text_splitter.split_text(text=text) + + +# 3. Summarize Resume +def resume_summary(chunks): + prompt = f''' + Provide a detailed summarization of the resume below: + """"""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""" + {chunks} + """"""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""" + ''' + return gemini_generate_response(prompt) + + +# 4. Analyze Strengths +def resume_strength(chunks): + prompt = f''' + Analyze the strengths of the resume below and provide detailed insights: + """"""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""" + {chunks} + """"""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""" + ''' + return gemini_generate_response(prompt) + + +# 5. Analyze Weaknesses +def resume_weakness(chunks): + prompt = f''' + Analyze the weaknesses of the resume below and suggest improvements to make it better: + """"""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""" + {chunks} + """"""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""" + ''' + return gemini_generate_response(prompt) + + +# 6. Job Title Suggestions +def job_title_suggestion(chunks): + prompt = f''' + Based on the resume content below, suggest suitable job roles: + """"""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""" + {chunks} + """"""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""" + ''' + return gemini_generate_response(prompt) + + +# Flask Route to Process Resume +@app.route('/analyze_resume', methods=['POST']) +def analyze_resume(): + if 'fileUploaded' not in request.files: + return jsonify({'error': 'No file uploaded'}), 400 + + file = request.files["fileUploaded"] + + try: + # Parse and process resume PDF + resume_text = parse_pdf(file) + chunks = split_text_into_chunks(resume_text) + + # Generate responses for different analyses + summary = resume_summary(chunks) + strengths = resume_strength(chunks) + weaknesses = resume_weakness(chunks) + job_titles = job_title_suggestion(chunks) + + # Prepare JSON response + response = { + "summary": summary, + "strengths": strengths, + "weaknesses": weaknesses, + "job_titles": job_titles + } + + return jsonify(response) + + except Exception as e: + return jsonify({'error': f'An error occurred: {str(e)}'}), 500 + + +if __name__ == '__main__': + app.run(debug=True) +from flask import Flask, jsonify, request +from flask_cors import CORS +import config +import google.generativeai as genai +from PyPDF2 import PdfReader +from langchain.text_splitter import RecursiveCharacterTextSplitter + +# Configure generative AI model +genai.configure(api_key=config.API_KEY) +generation_config = { + "temperature": 0.5, + "top_p": 0.95, + "top_k": 64, + "max_output_tokens": 8192, + "response_mime_type": "text/plain", +} +model = genai.GenerativeModel( + model_name="gemini-1.5-flash", + generation_config=generation_config, +) + +app = Flask(__name__) +CORS(app) + + +# Function to interact with Gemini AI +def gemini_generate_response(prompt): + chat_session = model.start_chat(history=[{"role": "user", "parts": [prompt]}]) + response = chat_session.send_message(prompt) + return response.text + + +# 1. PDF Parsing +def parse_pdf(file): + reader = PdfReader(file) + text = "" + for page in reader.pages: + text += page.extract_text() + return text + + +# 2. Split Text into Chunks +def split_text_into_chunks(text): + text_splitter = RecursiveCharacterTextSplitter(chunk_size=700, chunk_overlap=200, length_function=len) + return text_splitter.split_text(text=text) + + +# 3. Summarize Resume +def resume_summary(chunks): + prompt = f''' + Provide a detailed summarization of the resume below: + """"""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""" + {chunks} + """"""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""" + ''' + return gemini_generate_response(prompt) + + +# 4. Analyze Strengths +def resume_strength(chunks): + prompt = f''' + Analyze the strengths of the resume below and provide detailed insights: + """"""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""" + {chunks} + """"""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""" + ''' + return gemini_generate_response(prompt) + + +# 5. Analyze Weaknesses +def resume_weakness(chunks): + prompt = f''' + Analyze the weaknesses of the resume below and suggest improvements to make it better: + """"""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""" + {chunks} + """"""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""" + ''' + return gemini_generate_response(prompt) + + +# 6. Job Title Suggestions +def job_title_suggestion(chunks): + prompt = f''' + Based on the resume content below, suggest suitable job roles: + """"""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""" + {chunks} + """"""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""" + ''' + return gemini_generate_response(prompt) + + +# Flask Route to Process Resume +@app.route('/analyze_resume', methods=['POST']) +def analyze_resume(): + if 'fileUploaded' not in request.files: + return jsonify({'error': 'No file uploaded'}), 400 + + file = request.files["fileUploaded"] + + try: + # Parse and process resume PDF + resume_text = parse_pdf(file) + chunks = split_text_into_chunks(resume_text) + + # Generate responses for different analyses + summary = resume_summary(chunks) + strengths = resume_strength(chunks) + weaknesses = resume_weakness(chunks) + job_titles = job_title_suggestion(chunks) + + # Prepare JSON response + response = { + "summary": summary, + "strengths": strengths, + "weaknesses": weaknesses, + "job_titles": job_titles + } + + return jsonify(response) + + except Exception as e: + return jsonify({'error': f'An error occurred: {str(e)}'}), 500 + + +if __name__ == '__main__': + app.run(debug=True) diff --git a/AI Resume Analyzer/Resume Analyzer With Gemini/readme.md b/AI Resume Analyzer/Resume Analyzer With Gemini/readme.md new file mode 100644 index 000000000..841b70bdd --- /dev/null +++ b/AI Resume Analyzer/Resume Analyzer With Gemini/readme.md @@ -0,0 +1,120 @@ + +# Resume Analyzer +## Overview + +The **Resume Analyzer** is a web application that allows users to upload a resume PDF and get detailed insights, including a summary, strengths, weaknesses, and suggested job titles. The application uses **Google's Gemini Generative AI** for processing and analyzing the content of the resume. The back-end is powered by Flask, and the front-end uses **Streamlit** for a smooth user interface. + +## Features + +- **Upload Resume PDF**: Allows users to upload a resume file in PDF format. +- **Analyze Resume**: Automatically analyzes the uploaded resume to generate insights on: + - Resume Summary + - Resume Strengths + - Resume Weaknesses + - Suggested Job Titles +- **Interactive UI**: The user can select between different analysis options (Summary, Strengths, Weaknesses, and Suggested Job Titles) using a radio button. +- **Generative AI Integration**: Utilizes **Gemini Generative AI** for analyzing and generating insights based on the resume content. + +## Requirements + +Make sure you have the following libraries installed to run this application: + +1. Flask +2. Flask-CORS +3. Streamlit +4. PyPDF2 +5. Google Generative AI (Gemini) +6. LangChain + +You can install all required libraries by running: + +```bash +pip install -r requirements.txt +``` + +## Installation + +### 1. Set Up Flask Backend + +First, you need to set up the Flask backend that handles the resume analysis and interacts with the Gemini AI. + +1. Clone or download the project files. +2. Set up a virtual environment and activate it. +3. Install the dependencies using the command: + + ```bash + pip install -r requirements.txt + ``` + +4. Set up your **Google Generative AI API Key** in a `config.py` file. Make sure to include the key: + + ```python + API_KEY = 'your-api-key-here' + ``` + +5. Run the Flask backend server: + + ```bash + python app.py + ``` + +The server should be running at `http://localhost:5000`. + +### 2. Set Up Streamlit Frontend + +1. After setting up the Flask backend, go to the frontend directory and install the necessary dependencies: + + ```bash + pip install -r frontend_requirements.txt + ``` + +2. To start the Streamlit app, run: + + ```bash + streamlit run frontend_app.py + ``` + +The Streamlit app will start, and you can visit it in your browser (usually `http://localhost:8501`). + +## Usage + +1. **Upload Resume**: Click the "Choose a Resume PDF file" button to upload your resume in PDF format. +2. **Analyze Resume**: After uploading, click the "Analyze Resume" button. The app will process the resume and generate insights. +3. **View Results**: Once the analysis is complete, you can choose from the available analysis options: + - Summary + - Strengths + - Weaknesses + - Suggested Job Titles + +## Example + +- **Summary**: A detailed overview of the resume content. +- **Strengths**: Insights into the strengths based on the content of the resume. +- **Weaknesses**: Suggestions to improve weak areas in the resume. +- **Suggested Job Titles**: Potential job roles based on the resume content. + +## Troubleshooting + +- If the app is not working as expected, ensure that both the Flask backend and Streamlit frontend are running. +- Make sure the **API Key** for Google Generative AI is set correctly in the `config.py` file. + +## Contributing + +Feel free to fork the project and submit pull requests for any enhancements or bug fixes. All contributions are welcome! + +## License + +This project is open-source and available under the MIT License. + +--- + +## Credits + +- **Google Generative AI**: For processing and generating responses based on the resume content. +- **Streamlit**: For building the user interface. +- **Flask**: For building the backend API. +- **PyPDF2**: For parsing the PDF resume. +- **LangChain**: For text splitting and chunking. + +``` + diff --git a/AI Resume Analyzer/Resume Analyzer With Gemini/requirements.txt b/AI Resume Analyzer/Resume Analyzer With Gemini/requirements.txt new file mode 100644 index 000000000..872492ae6 --- /dev/null +++ b/AI Resume Analyzer/Resume Analyzer With Gemini/requirements.txt @@ -0,0 +1,7 @@ +streamlit +google-generativeai +python-dotenv +langchain +PyPDF2 +faiss-cpu +langchain_google_genai diff --git a/AI Resume Analyzer/Resume Analyzer With Gemini/result/resumeanalyzer.pdf b/AI Resume Analyzer/Resume Analyzer With Gemini/result/resumeanalyzer.pdf new file mode 100644 index 000000000..1cd11900b Binary files /dev/null and b/AI Resume Analyzer/Resume Analyzer With Gemini/result/resumeanalyzer.pdf differ diff --git a/AI Resume Analyzer/Resume Analyzer With Gemini/stramlit.py b/AI Resume Analyzer/Resume Analyzer With Gemini/stramlit.py new file mode 100644 index 000000000..8317a0344 --- /dev/null +++ b/AI Resume Analyzer/Resume Analyzer With Gemini/stramlit.py @@ -0,0 +1,62 @@ +import streamlit as st +import requests + +# Flask API endpoint for the Resume Analyzer +FLASK_API_URL = "http://localhost:5000/analyze_resume" + +st.title("Resume Analyzer") +st.write("Upload a resume PDF and view insights by selecting from different analysis options.") + +# File uploader for the PDF resume +uploaded_resume = st.file_uploader("Choose a Resume PDF file", type="pdf") + +# Check if the analysis has been done (if results are in session state) +if "response_data" in st.session_state: + response_data = st.session_state.response_data +else: + response_data = None + +# Submit button to analyze the resume +if st.button("Analyze Resume"): + if uploaded_resume: + files = {"fileUploaded": uploaded_resume} + + # Send the resume file to the Flask API + response = requests.post(FLASK_API_URL, files=files) + + if response.status_code == 200: + response_data = response.json() + # Save the response data in session state + st.session_state.response_data = response_data + st.success("Resume analyzed successfully!") + else: + st.error("Error: Could not retrieve the response from the server.") + else: + st.warning("Please upload a resume PDF.") + +# Only show analysis options if the response is available +if response_data: + # Radio button for selecting analysis type + selected_analysis = st.radio( + "Select an Analysis to View", + ["Summary", "Strengths", "Weaknesses", "Suggested Job Titles"] + ) + + # Display analysis based on the selected option + if selected_analysis == "Summary": + st.subheader("Summary") + st.write(response_data.get("summary", "No summary available.")) + + elif selected_analysis == "Strengths": + st.subheader("Strengths") + st.write(response_data.get("strengths", "No strengths detected.")) + + elif selected_analysis == "Weaknesses": + st.subheader("Weaknesses") + st.write(response_data.get("weaknesses", "No weaknesses detected.")) + + elif selected_analysis == "Suggested Job Titles": + st.subheader("Suggested Job Titles") + st.write(response_data.get("job_titles", "No job title suggestions available.")) +else: + st.info("Please upload and analyze a resume first.")