Building a Personal Career Coach AI Agent with Open AI Chatgpt 4.o

The whole idea here is building an AI Agent to automate the preparation of customized resumes and cover letters for Linkedin job applications, as well as prepare one for the interviews.

Pre-requisite:

Multiple copies of one own resumes, preferably span across multiple stages of a person career

Strength Finder Results

Below is the complete python scripts for the Personal Career Coash AI Agent:

[Note: need to input your own API key and indicate which Open AI model to use]

from flask import Flask, request, render_template, jsonify

import openai

import json

import base64

import requests

from bs4 import BeautifulSoup

from PyPDF2 import PdfReader

import docx

import pandas as pd

import os

import logging

app = Flask(__name__)

openai.api_key = ‘Your Open AI API keys’

logging.basicConfig(level=logging.DEBUG)

# Step 1: Serve the HTML Form

@app.route(‘/’)

def upload_form():

return render_template(‘upload_form.html’)

@app.route(‘/upload’, methods=[‘POST’])

def upload_file():

resumes = request.files.getlist(‘resumes’)

strengths = request.files.getlist(‘strengths’)

resume_texts = [extract_text(resume) for resume in resumes]

strengths_texts = [extract_text(strength) for strength in strengths]

profile = analyze_profile(resume_texts, strengths_texts)

summary = generate_summary(profile)

profile_json = json.dumps(profile, ensure_ascii=False)

profile_encoded = base64.b64encode(profile_json.encode()).decode()

return render_template(‘profile_summary.html’, summary=summary, profile_encoded=profile_encoded)

# Extract text from different file types

def extract_text(file):

if file.filename.endswith(‘.pdf’):

return extract_text_from_pdf(file)

elif file.filename.endswith(‘.docx’):

return extract_text_from_docx(file)

elif file.filename.endswith(‘.xlsx’):

return extract_text_from_excel(file)

else:

return extract_text_from_text(file)

def extract_text_from_text(file):

try:

return file.read().decode(‘utf-8’)

except UnicodeDecodeError:

return file.read().decode(‘latin1’)  # Try a different encoding if utf-8 fails

def extract_text_from_pdf(file):

pdf_reader = PdfReader(file)

text = “”

for page in pdf_reader.pages:

text += page.extract_text()

return text

def extract_text_from_docx(file):

doc = docx.Document(file)

return “\n”.join([para.text for para in doc.paragraphs])

def extract_text_from_excel(file):

df = pd.read_excel(file)

return df.to_string()

# Analyze User Profile

def analyze_profile(resume_texts, strengths_texts):

resume_analysis = [call_openai(f”Analyze this resume and highlight key points in paragraph and bullet points:\n{resume_text}”) for resume_text in resume_texts]

strengths_analysis = [call_openai(f”Analyze these strengths and highlight key points in paragraph and bullet points:\n{strength_text}”) for strength_text in strengths_texts]

return {‘resume’: resume_analysis, ‘strengths’: strengths_analysis}

# Generate Summary

def generate_summary(profile):

summary_prompt = (

f”Generate a brief summary highlighting key points from this profile. ”

f”Return a JSON object with two fields: ‘paragraph’ for a paragraph summary and ‘bullet_points’ for bullet points. ”

f”Ensure the JSON is properly formatted and contains both fields.\n”

f”Resume Highlights: {profile[‘resume’]}\n”

f”Strengths Highlights: {profile[‘strengths’]}”

)

summary_response = call_openai(summary_prompt)

summary_response = fix_json_format(summary_response)

try:

summary = json.loads(summary_response)

except json.JSONDecodeError:

raise ValueError(f”Invalid JSON response: {summary_response}”)

return summary

def call_openai(prompt):

response = openai.ChatCompletion.create(

model=”gpt-4o”,  # Ensure you are using a chat-capable model

messages=[

{“role”: “system”, “content”: “You are a helpful assistant.”},

{“role”: “user”, “content”: prompt}

]

)

response_text = response[‘choices’][0][‘message’][‘content’].strip()

return response_text

def fix_json_format(json_str):

json_str = json_str.replace(“\n”, “”).replace(“\r”, “”)

json_str = json_str.replace(““`json”, “”).replace(““`”, “”)

json_str = json_str.replace(“””, “\””).replace(““”, “\””)  # Replace fancy quotes with standard quotes

return json_str

# Step 2: Get Job Preferences

@app.route(‘/preferences’, methods=[‘POST’])

def get_preferences():

if ‘profile’ not in request.form:

return jsonify({‘error’: ‘Profile data missing’}), 400

job_title = request.form[‘job_title’]

location = request.form[‘location’]

profile_encoded = request.form[‘profile’]

profile_json = base64.b64decode(profile_encoded).decode()

profile = json.loads(profile_json)

jobs = fetch_linkedin_jobs(job_title, location)

logging.debug(f”Fetched Jobs: {jobs}”)  # Debug statement to check fetched jobs

# Ensure at least 3 jobs are available

if len(jobs) < 3: jobs += generate_dummy_jobs(3 - len(jobs)) ranked_jobs = jobs[:3]  # Take the top 3 jobs logging.debug(f"Ranked Jobs: {ranked_jobs}")  # Debug statement to check ranked jobs documents = generate_documents(ranked_jobs, profile) logging.debug(f"Generated Documents: {documents}")  # Debug statement to check generated documents return render_template('job_recommendations.html', jobs=ranked_jobs, documents=documents) # Fetch job listings from LinkedIn def fetch_linkedin_jobs(job_title, location): url = f"https://www.linkedin.com/jobs/search?keywords={job_title}&location={location}&trk=public_jobs_jobs-search-bar_search-submit&position=1&pageNum=0" headers = {'User-Agent': 'Mozilla/5.0'} response = requests.get(url, headers=headers) soup = BeautifulSoup(response.text, 'html.parser') jobs = [] for job_card in soup.select('.result-card__contents'): title = job_card.select_one('.result-card__title').text.strip() company = job_card.select_one('.result-card__subtitle').text.strip() loc = job_card.select_one('.job-result-card__location').text.strip() description = job_card.select_one('.job-result-card__snippet').text.strip() if job_card.select_one('.job-result-card__snippet') else 'No description available' jobs.append({'title': title, 'company': company, 'location': loc, 'description': description}) return jobs def generate_dummy_jobs(count): dummy_jobs = [] for i in range(count): dummy_jobs.append({ 'title': f'Dummy Job {i+1}', 'company': 'Dummy Company', 'location': 'Dummy Location', 'description': 'This is a dummy job description for testing purposes.' }) return dummy_jobs # Filter and Rank Jobs def filter_and_rank_jobs(jobs, profile): resume_keywords = extract_keywords(profile['resume']) strengths_keywords = extract_keywords(profile['strengths']) profile_keywords = resume_keywords + strengths_keywords for job in jobs: job['relevance'] = calculate_relevance(job['description'], profile_keywords) return sorted(jobs, key=lambda x: x['relevance'], reverse=True) def extract_keywords(texts): keywords = [] for text in texts: keywords += call_openai(f"Extract keywords from the following text:\n{text}").split(', ') return keywords def calculate_relevance(description, keywords): relevance = 0 for keyword in keywords: if keyword.lower() in description.lower(): relevance += 1 return relevance # Generate Customized Resume and Cover Letter def generate_documents(selected_jobs, profile): documents = {} for job in selected_jobs: resume = call_openai(f"Generate a customized resume for the job: {job} and profile: {profile} in paragraph and bullet points") cover_letter = call_openai(f"Generate a customized cover letter for the job: {job} and profile: {profile} in paragraph and bullet points") interview_prep = call_openai(f"Generate interview preparation tips for the job: {job} and profile: {profile} in paragraph and bullet points") documents[job['title']] = { 'resume': resume, 'cover_letter': cover_letter, 'interview_prep': interview_prep } return documents # Chat interface for further questions @app.route('/chat', methods=['POST']) def chat(): user_message = request.form['message'] response = call_openai(user_message) return jsonify({'response': response}) if __name__ == "__main__": app.run(debug=True) 3 html files, namely upload_form.html, profile_summary.html and job_recommendations.html should be placed under the /templates folder where the above python file located. HTML Templates upload_form.html

Upload Profile

Upload Your Profile

profile_summary.html

Profile Summary

Profile Summary

Summary in Paragraph Format:

Summary in Bullet Points:

    {% for point in summary.bullet_points %}

  • {% endfor %}

job_recommendations.html

Top 3 Job Recommendations

Top 3 Job Recommendations

{% for job in jobs %}

Company:

Location:

Description:

Generated Documents

Resume:

 

Cover Letter:

 

Interview Preparation:

 

{% endfor %}

Ask a Question

Once you run the script, you will see the below in the localhost:5000

Upload your own resumes [Preferable span across multiple stage of your career life], and your Gallup Strength Finder results [normally  5 different core strengths]

It generates the profile summary about the candidate as below:

Enter the relevant job title and location you wish to crawl from linkedin, it will rank the top 3 relevant jobs and help to prepare customized resume, cover letter as well as key highlights for interview.

Below I use 3 dummy jobs as examples instead of crawling the linkedin jobs, just for demo purpose [there are rooms for improvement in the crawled jobs handling, job relevancy ranking and filtering etc]:

At the end of the UI, there is a chat UI for user to ask any question.

The above is not a perfect solution yet, there are rooms for improvement. Due to the other urgent project tasks, I have to stop it here. For those who is interested in improving this with langchain/ crew.ai etc to eventually build it into a complete Career Coach AI Agent, feel free to do so.

Related Posts