interviewsss / process.py
ombhojane's picture
Upload 91 files
2d63df8 verified
import streamlit as st
import json
import google.generativeai as genai
# Placeholder for your API key - securely manage this in your actual application
API_KEY = "AIzaSyCA4__JMC_ZIQ9xQegIj5LOMLhSSrn3pMw"
def fetch_data_from_json(filename):
"""Utility function to fetch data from a given JSON file."""
try:
with open(filename, 'r') as file:
return json.load(file)
except FileNotFoundError:
st.error(f"File {filename} not found. Please ensure it's in the correct path.")
return None
def app():
st.title('Career Insights and Recommendations')
# Paths to JSON files - adjust these paths as necessary
json_files = {
"core_values": "core_values_responses.json",
"strengths": "strength_responses.json",
"dream_job": "dream_job_info.json",
"strengths2": "dynamic_strength_responses.json",
"preferences": "preferences_sets.json",
"skills_experience": "skills_and_experience_sets.json",
"career_priorities": "career_priorities_data.json",
}
json_files["strengths"] = "strength_responses.json"
merge_json_files("strength_responses.json", "dynamic_strength_responses.json", "strength_responses.json")
comprehensive_data = {}
for key, file_path in json_files.items():
comprehensive_data[key] = fetch_data_from_json(file_path)
# Generate and display a comprehensive analysis based on all aspects
comprehensive_prompt = construct_comprehensive_prompt(comprehensive_data)
st.subheader("Comprehensive Career Analysis")
comprehensive_response_text = call_gemini(comprehensive_prompt)
st.text("Comprehensive API Response:")
st.write(comprehensive_response_text)
# Save the comprehensive response
save_responses("comprehensive_analysis", comprehensive_response_text)
def merge_json_files(file1, file2, output_file):
"""Merge the contents of two JSON files and save the result in another file."""
try:
with open(file1, 'r') as file:
data1 = json.load(file)
with open(file2, 'r') as file:
data2 = json.load(file)
# Ensure data1 and data2 are dictionaries
if not isinstance(data1, dict):
data1 = {}
if not isinstance(data2, dict):
data2 = {}
merged_data = {**data1, **data2}
with open(output_file, 'w') as file:
json.dump(merged_data, file, indent=4)
st.success(f"Merged data saved to {output_file}.")
except FileNotFoundError:
st.error("One or more input files not found. Please ensure they are in the correct path.")
def process_section(section_name, data):
"""
Processes each section individually by constructing a tailored prompt,
calling the Gemini API, and displaying the response.
"""
prompt = construct_prompt(section_name, data)
st.subheader(f"{section_name.replace('_', ' ').title()} Analysis")
response_text = call_gemini(prompt)
st.text(f"{section_name.replace('_', ' ').title()} API Response:")
st.write(response_text)
# Save the response
save_responses(section_name, response_text)
def save_responses(section_name, response_text):
"""Saves the API responses to a JSON file."""
try:
# Attempt to load existing data
with open('gemini_responses.json', 'r') as file:
responses = json.load(file)
except (FileNotFoundError, json.JSONDecodeError):
# If the file does not exist or contains invalid data, start with an empty dictionary
responses = {}
# Update the dictionary with the new response
responses[section_name] = response_text
# Save the updated dictionary back to the file
with open('gemini_responses.json', 'w') as file:
json.dump(responses, file, indent=4)
def construct_prompt(section_name, data):
"""
Constructs a detailed and tailored prompt for a specific section,
guiding the model to provide insights and recommendations based on that section's data.
"""
prompt_template = {
"career_priorities": "Analyze and evaluate user's current skill level related to these career priorities: {details}.",
"core_values": "Assess how user's current behaviours and skills align with these core values: {details}.",
"strengths": "Evaluate and highlight user's competency levels across these strengths: {details}.",
"dream_job": "Compare user's current skills and experience to the requirements of this dream job: {details}.",
"strengths2": "Summarize how user's friend's/collegs/seniors view user's capabilities based on this feedback: {details}.",
"preferences": "Judge how well user's skills and attributes fit these preferences: {details}.",
"skills_experience": "Assess user's current skill level within this area of expertise: {details}.",
}
# Constructing the tailored prompt
details = json.dumps(data, ensure_ascii=False)
prompt = prompt_template.get(section_name, "Please provide data for analysis.").format(details=details)
return prompt
def construct_comprehensive_prompt(data):
prompt_parts = [
"Given an individual's career aspirations, core values, strengths, preferences, and skills, provide a comprehensive analysis that identifies key strengths, aligns these with career values, and suggests career paths. Then, recommend the top 5 job descriptions that would be a perfect fit based on the analysis. Here are the details:",
f"Career Priorities: {json.dumps(data['career_priorities'], ensure_ascii=False)}",
f"Core Values: {json.dumps(data['core_values'], ensure_ascii=False)}",
"Rate the user's career priorities out of 100 and provide justification:",
f"Strengths: {json.dumps(data['strengths'], ensure_ascii=False)}",
"Rate the user's strengths out of 100 and provide justification:",
f"Dream Job Information: {json.dumps(data['dream_job'], ensure_ascii=False)}",
"Rate the user's dream job alignment out of 100 and provide justification:",
f"Preferences: {json.dumps(data['preferences'], ensure_ascii=False)}",
"Rate the user's preferences out of 100 and provide justification:",
f"Skills and Experience: {json.dumps(data['skills_experience'], ensure_ascii=False)}",
"Rate the user's skills and experience out of 100 and provide justification:",
"Based on the analysis, suggest 2-3 areas for mindful upskilling and professional development for the user, along with relevant certifications that would help strengthen their profile:",
"Consider the following in the further analysis:",
"- Given the strengths and dream job aspirations, what are the top industries or roles that would be a perfect fit?",
"- Based on the preferences, what work environment or company culture would be most suitable?",
"Conclude with recommendations for the top 5 open job descriptions in India aligned to the user's goals, including any specific industries or companies where these roles may be in demand currently.",
]
prompt = "\n\n".join(prompt_parts)
return prompt
def call_gemini(prompt):
"""Calls the Gemini API with the given prompt and returns the response."""
# Configure the API with your key
genai.configure(api_key=API_KEY)
# Set up the model configuration
generation_config = {
"temperature": 0.7,
"top_p": 0.95,
"max_output_tokens": 4096,
}
safety_settings = [
{"category": "HARM_CATEGORY_HARASSMENT", "threshold": "BLOCK_MEDIUM_AND_ABOVE"},
{"category": "HARM_CATEGORY_HATE_SPEECH", "threshold": "BLOCK_MEDIUM_AND_ABOVE"},
{"category": "HARM_CATEGORY_SEXUALLY_EXPLICIT", "threshold": "BLOCK_MEDIUM_AND_ABOVE"},
{"category": "HARM_CATEGORY_DANGEROUS_CONTENT", "threshold": "BLOCK_MEDIUM_AND_ABOVE"},
]
# Create the model instance
model = genai.GenerativeModel(model_name="gemini-1.0-pro",
generation_config=generation_config,
safety_settings=safety_settings)
# Generate content
response = model.generate_content([prompt])
response_text = response.text
return response_text
if __name__ == "__main__":
app()