rishabmj commited on
Commit
b07e23d
Β·
verified Β·
1 Parent(s): 6094582

Upload 3 files

Browse files
Files changed (3) hide show
  1. app.py +417 -0
  2. packages.txt +1 -0
  3. requirements.txt +8 -0
app.py ADDED
@@ -0,0 +1,417 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import streamlit as st
2
+ import os
3
+ import sqlite3
4
+ import re
5
+ import pandas as pd
6
+ import numpy as np
7
+ import PyPDF2
8
+ import docx
9
+ import spacy
10
+ from sentence_transformers import SentenceTransformer, util
11
+ from collections import Counter
12
+ from langchain_google_genai import ChatGoogleGenerativeAI
13
+ from langchain_core.prompts import ChatPromptTemplate
14
+ from langchain_core.output_parsers import StrOutputParser
15
+ from datetime import datetime, date
16
+
17
+ # --- App Setup and Constants ---
18
+ DB_FILE = "placement_portal.db"
19
+ ANALYSIS_DB_FILE = "analysis_results.db"
20
+
21
+ # --- Resource Loading (Cached for Performance) ---
22
+ @st.cache_resource
23
+ def load_resources():
24
+ """Loads models and initializes databases once."""
25
+ print("Loading resources...")
26
+ nlp = spacy.load("en_core_web_sm")
27
+ semantic_model = SentenceTransformer('all-MiniLM-L6-v2')
28
+
29
+ google_api_key = st.secrets.get("GOOGLE_API_KEY")
30
+ if not google_api_key:
31
+ st.error("Google API key not found. Please add it to your Streamlit secrets.", icon="🚨")
32
+ return None, None, None
33
+
34
+ llm = ChatGoogleGenerativeAI(
35
+ model="gemini-1.5-pro",
36
+ google_api_key=google_api_key,
37
+ convert_system_message_to_human=True
38
+ )
39
+
40
+ init_db()
41
+ init_analysis_db()
42
+ print("Resources loaded successfully.")
43
+ return nlp, semantic_model, llm
44
+
45
+ # --- Database Functions ---
46
+ def get_db_connection(db_file):
47
+ conn = sqlite3.connect(db_file)
48
+ conn.row_factory = sqlite3.Row
49
+ return conn
50
+
51
+ def init_db():
52
+ with get_db_connection(DB_FILE) as conn:
53
+ conn.execute("""
54
+ CREATE TABLE IF NOT EXISTS jobs (
55
+ id INTEGER PRIMARY KEY AUTOINCREMENT, timestamp DATETIME NOT NULL, title TEXT NOT NULL,
56
+ description TEXT NOT NULL, due_date DATE NOT NULL
57
+ )""")
58
+ conn.execute("""
59
+ CREATE TABLE IF NOT EXISTS applications (
60
+ id INTEGER PRIMARY KEY AUTOINCREMENT, job_id INTEGER NOT NULL, timestamp DATETIME NOT NULL,
61
+ candidate_name TEXT NOT NULL, candidate_email TEXT NOT NULL, final_score REAL,
62
+ ai_feedback TEXT, verdict TEXT, lacking_skills TEXT,
63
+ status TEXT DEFAULT 'Applied', sim_gender TEXT, sim_university_tier TEXT,
64
+ FOREIGN KEY (job_id) REFERENCES jobs (id)
65
+ )""")
66
+ conn.commit()
67
+
68
+ def init_analysis_db():
69
+ with get_db_connection(ANALYSIS_DB_FILE) as conn:
70
+ conn.execute("""
71
+ CREATE TABLE IF NOT EXISTS analyses (
72
+ id INTEGER PRIMARY KEY AUTOINCREMENT, timestamp DATETIME NOT NULL, job_title TEXT NOT NULL,
73
+ filename TEXT NOT NULL, score REAL, verdict TEXT, lacking_skills TEXT, feedback TEXT
74
+ )""")
75
+ conn.commit()
76
+
77
+ def add_job(title, description, due_date):
78
+ with get_db_connection(DB_FILE) as conn:
79
+ conn.execute("INSERT INTO jobs (timestamp, title, description, due_date) VALUES (?, ?, ?, ?)",
80
+ (datetime.now(), title, description, due_date))
81
+ conn.commit()
82
+
83
+ def get_all_jobs():
84
+ with get_db_connection(DB_FILE) as conn:
85
+ return pd.read_sql_query("SELECT id, title, description, due_date FROM jobs ORDER BY timestamp DESC", conn)
86
+
87
+ def add_application(job_id, name, email, score, feedback, verdict, lacking_skills, gender, uni_tier):
88
+ with get_db_connection(DB_FILE) as conn:
89
+ conn.execute("""
90
+ INSERT INTO applications (job_id, timestamp, candidate_name, candidate_email, final_score, ai_feedback, verdict, lacking_skills, sim_gender, sim_university_tier)
91
+ VALUES (?, ?, ?, ?, ?, ?, ?, ?, ?, ?)
92
+ """, (job_id, datetime.now(), name, email, score, feedback, verdict, lacking_skills, gender, uni_tier))
93
+ conn.commit()
94
+
95
+ def get_applications_for_job(job_id):
96
+ with get_db_connection(DB_FILE) as conn:
97
+ return pd.read_sql_query("SELECT id, candidate_name, candidate_email, final_score, verdict, status, lacking_skills, sim_gender, sim_university_tier FROM applications WHERE job_id = ? ORDER BY final_score DESC", conn, params=(job_id,))
98
+
99
+ def update_candidate_status(application_id, new_status):
100
+ with get_db_connection(DB_FILE) as conn:
101
+ conn.execute("UPDATE applications SET status = ? WHERE id = ?", (new_status, application_id))
102
+ conn.commit()
103
+
104
+ def get_student_applications(email):
105
+ with get_db_connection(DB_FILE) as conn:
106
+ query = """
107
+ SELECT j.title, a.status, a.final_score, a.ai_feedback, a.verdict
108
+ FROM applications a JOIN jobs j ON a.job_id = j.id
109
+ WHERE a.candidate_email = ? ORDER BY a.timestamp DESC
110
+ """
111
+ return pd.read_sql_query(query, conn, params=(email,))
112
+
113
+ def add_analysis_record(job_title, result):
114
+ with get_db_connection(ANALYSIS_DB_FILE) as conn:
115
+ conn.execute("""
116
+ INSERT INTO analyses (timestamp, job_title, filename, score, verdict, lacking_skills, feedback)
117
+ VALUES (?, ?, ?, ?, ?, ?, ?)
118
+ """, (datetime.now(), job_title, result['filename'], result['score'], result['verdict'], result['lacking_skills'], result['feedback']))
119
+ conn.commit()
120
+
121
+ def get_all_analyses():
122
+ with get_db_connection(ANALYSIS_DB_FILE) as conn:
123
+ return pd.read_sql_query("SELECT timestamp, job_title, filename, score, verdict, lacking_skills FROM analyses ORDER BY timestamp DESC", conn)
124
+
125
+ # --- Helper & Analysis Functions ---
126
+ def read_pdf(file_object):
127
+ pdf_reader = PyPDF2.PdfReader(file_object)
128
+ return "".join([page.extract_text() for page in pdf_reader.pages])
129
+
130
+ def read_docx(file_object):
131
+ doc = docx.Document(file_object)
132
+ return "\n".join([para.text for para in doc.paragraphs])
133
+
134
+ def read_txt(file_object):
135
+ return file_object.read().decode('utf-8')
136
+
137
+ def redact_pii(text):
138
+ email_pattern = r'\b[A-Za-z0-9._%+-]+@[A-Za-z0-9.-]+\.[A-Z|a-z]{2,}\b'
139
+ phone_pattern = r'(\(?\d{3}\)?[-.\s]?)?(\d{3}[-.\s]?\d{4})'
140
+ redacted_text = re.sub(email_pattern, '[REDACTED EMAIL]', text)
141
+ redacted_text = re.sub(phone_pattern, '[REDACTED PHONE]', redacted_text)
142
+ return redacted_text
143
+
144
+ def improved_extract_keywords(text):
145
+ doc = nlp(text.lower())
146
+ keywords = [token.lemma_ for token in doc if (not token.is_stop and not token.is_punct and token.pos_ in ['PROPN', 'NOUN', 'ADJ'])]
147
+ return [word for word, _ in Counter(keywords).most_common(15)]
148
+
149
+ def generate_ai_feedback_langchain(_jd_text, _resume_text, _job_title):
150
+ prompt_template = """
151
+ You are an expert, impartial, and ethical career coach AI. Your primary goal is to provide fair and objective feedback.
152
+ **CRITICAL INSTRUCTIONS FOR FAIRNESS:**
153
+ 1. **Evaluate based ONLY on skills and experience** directly relevant to the job description.
154
+ 2. **DO NOT penalize for employment gaps, non-traditional career paths, or unconventional phrasing.** Focus on transferable skills.
155
+ 3. **Give fair consideration to soft skills** (e.g., leadership, communication) demonstrated through project descriptions or roles.
156
+ 4. **Ignore any personally identifiable information** such as names, emails, or phone numbers. Your analysis must be blind to personal identity.
157
+ ---
158
+ **JOB DESCRIPTION:**
159
+ {jd}
160
+ ---
161
+ **RESUME:**
162
+ {resume}
163
+ ---
164
+ Now, analyze the provided Resume against the Job Description and generate a feedback report strictly in the following Markdown format.
165
+ **Overall Score:** [Provide a single integer score from 0 to 100.]
166
+ **Verdict:** [A short, objective one-line verdict.]
167
+ **Lacking Skills:** [List 2-3 key skills from the job description that are missing or weakly represented in the resume.]
168
+ ---
169
+ ### Resume Analysis for {job_title}
170
+ #### βœ… Key Strengths (Job-Relevant)
171
+ * **[Strength 1]:** [Explain why this is a strength by linking a specific part of the resume to a key requirement in the job description.]
172
+ * **[Strength 2]:** [Provide another specific example of a strong, objective alignment.]
173
+ #### πŸ’‘ Areas for Improvement
174
+ * **[Suggestion 1]:** [Provide a concrete suggestion on how to better quantify achievements or tailor the resume.]
175
+ **Final Summary:** [A brief, objective closing statement.]
176
+ """
177
+ prompt = ChatPromptTemplate.from_template(prompt_template)
178
+ parser = StrOutputParser()
179
+ chain = prompt | llm | parser
180
+ return chain.invoke({"jd": _jd_text, "resume": _resume_text, "job_title": _job_title})
181
+
182
+ def calculate_hybrid_score(jd_embedding, resume_text, jd_keywords, llm_score):
183
+ resume_lower = resume_text.lower()
184
+ matched_keywords = [kw for kw in jd_keywords if kw in resume_lower]
185
+ hard_score = (len(matched_keywords) / len(jd_keywords)) * 100 if jd_keywords else 0
186
+ resume_embedding = semantic_model.encode(resume_text, convert_to_tensor=True)
187
+ soft_score = util.pytorch_cos_sim(jd_embedding, resume_embedding).item() * 100
188
+ return (0.3 * hard_score) + (0.5 * soft_score) + (0.2 * llm_score)
189
+
190
+ def analyze_resume(job_details, resume_file_object):
191
+ job_description = job_details['description']
192
+ job_title = job_details['title']
193
+
194
+ file_name = resume_file_object.name
195
+ if file_name.endswith('.pdf'): resume_text_raw = read_pdf(resume_file_object)
196
+ elif file_name.endswith('.docx'): resume_text_raw = read_docx(resume_file_object)
197
+ else: resume_text_raw = read_txt(resume_file_object)
198
+
199
+ resume_text = redact_pii(resume_text_raw)
200
+ raw_feedback = generate_ai_feedback_langchain(job_description, resume_text, job_title)
201
+
202
+ llm_score, verdict, lacking_skills = 0.0, "N/A", "Not specified"
203
+ score_match = re.search(r"\*\*Overall Score:\*\*\s*(\d{1,3})", raw_feedback)
204
+ if score_match: llm_score = float(score_match.group(1))
205
+ verdict_match = re.search(r"\*\*Verdict:\*\*\s*(.*)", raw_feedback)
206
+ if verdict_match: verdict = verdict_match.group(1).strip()
207
+ lacking_skills_match = re.search(r"\*\*Lacking Skills:\*\*\s*(.*)", raw_feedback)
208
+ if lacking_skills_match: lacking_skills = lacking_skills_match.group(1).strip()
209
+
210
+ jd_keywords = improved_extract_keywords(job_description)
211
+ jd_embedding = semantic_model.encode(job_description, convert_to_tensor=True)
212
+ final_score = calculate_hybrid_score(jd_embedding, resume_text, jd_keywords, llm_score)
213
+
214
+ return {"filename": file_name, "score": final_score, "verdict": verdict, "lacking_skills": lacking_skills, "feedback": raw_feedback}
215
+
216
+ # --- UI Views ---
217
+ def student_view():
218
+ st.title("πŸŽ“ Student Job Portal")
219
+ st.info("Explore open positions below. After you apply, check your status to see AI-powered feedback.")
220
+ jobs_df = get_all_jobs()
221
+ if jobs_df.empty:
222
+ st.info("No jobs posted yet.")
223
+ return
224
+
225
+ today = date.today()
226
+ for _, job in jobs_df.iterrows():
227
+ with st.expander(f"**{job['title']}**"):
228
+ st.markdown(f"##### Job Description\n{job['description']}")
229
+ job_due_date = datetime.strptime(job['due_date'], '%Y-%m-%d').date()
230
+ if job_due_date:
231
+ st.warning(f"**Application Deadline:** {job_due_date.strftime('%B %d, %Y')}")
232
+
233
+ if job_due_date and job_due_date < today:
234
+ st.error("Applications for this position are now closed.")
235
+ else:
236
+ with st.form(key=f"apply_form_{job['id']}"):
237
+ st.markdown("--- \n##### Apply Now")
238
+ student_name = st.text_input("Your Full Name")
239
+ student_email = st.text_input("Your Email Address")
240
+ uploaded_resume = st.file_uploader("Upload your resume", type=['pdf', 'docx', 'txt'])
241
+ if st.form_submit_button("Submit Application"):
242
+ if student_name and student_email and uploaded_resume:
243
+ with st.spinner("Analyzing and submitting..."):
244
+ job_details = {'title': job['title'], 'description': job['description']}
245
+ analysis = analyze_resume(job_details, uploaded_resume)
246
+ sim_gender = np.random.choice(["Male", "Female"], p=[0.6,0.4])
247
+ sim_uni_tier = np.random.choice(["Tier 1", "Tier 2/3"], p=[0.3,0.7])
248
+ add_application(job['id'], student_name, student_email, analysis['score'],
249
+ analysis['feedback'], analysis['verdict'], analysis['lacking_skills'],
250
+ sim_gender, sim_uni_tier)
251
+ st.success("Application submitted successfully!")
252
+ else: st.warning("Please fill all fields and upload your resume.")
253
+
254
+ st.write("---")
255
+ st.header("πŸ“‹ Check Your Application Status")
256
+ email_check = st.text_input("Enter your email address to check your applications:")
257
+ if st.button("Check Status"):
258
+ if email_check:
259
+ apps_df = get_student_applications(email_check)
260
+ if not apps_df.empty:
261
+ for _, row in apps_df.iterrows():
262
+ with st.container(border=True):
263
+ st.subheader(row['title'])
264
+ cols = st.columns(3)
265
+ cols[0].metric("Your Final Score", f"{row['final_score']:.2f}%")
266
+ cols[1].metric("AI Verdict", row['verdict'])
267
+ status = row['status']
268
+ if status == 'Shortlisted': cols[2].success(f"Status: {status} πŸŽ‰")
269
+ elif status == 'Not Shortlisted': cols[2].error(f"Status: {status}")
270
+ else: cols[2].info(f"Status: {status}")
271
+ with st.expander("πŸ’‘ View Detailed Feedback"):
272
+ st.markdown(row['ai_feedback'])
273
+ else: st.info("No applications found for that email.")
274
+ else: st.warning("Please enter your email.")
275
+
276
+
277
+ def bias_audit_dashboard(df):
278
+ st.header("Bias & Fairness Audit Dashboard")
279
+ st.info("This dashboard helps monitor the system for potential biases in shortlisting outcomes. Data shown here is simulated for demonstration purposes.")
280
+ if len(df) < 10:
281
+ st.warning("Insufficient data for a meaningful bias analysis. At least 10 applications are recommended.")
282
+ return
283
+ st.subheader("Success Rate Parity")
284
+ st.markdown("This metric checks if candidates from different groups are being shortlisted at similar rates.")
285
+ gender_df = df.groupby('sim_gender')['status'].value_counts(normalize=True).unstack().fillna(0)
286
+ if 'Shortlisted' in gender_df.columns:
287
+ st.markdown("**By Gender**"); st.bar_chart(gender_df['Shortlisted'])
288
+ uni_df = df.groupby('sim_university_tier')['status'].value_counts(normalize=True).unstack().fillna(0)
289
+ if 'Shortlisted' in uni_df.columns:
290
+ st.markdown("**By University Tier (Proxy for Background)**"); st.bar_chart(uni_df['Shortlisted'])
291
+ st.subheader("Adverse Impact Ratio")
292
+ st.markdown("The 'Four-Fifths Rule' states the selection rate for a minority group should be at least 80% of the rate for the majority group.")
293
+ if 'Shortlisted' in gender_df.columns and len(gender_df) > 1:
294
+ majority_group = df['sim_gender'].value_counts().idxmax()
295
+ minority_group = df['sim_gender'].value_counts().idxmin()
296
+ if majority_group != minority_group:
297
+ rate_majority = gender_df.loc[majority_group, 'Shortlisted']
298
+ rate_minority = gender_df.loc[minority_group, 'Shortlisted']
299
+ if rate_majority > 0:
300
+ impact_ratio = (rate_minority / rate_majority) * 100
301
+ st.metric(label=f"Adverse Impact Ratio ({minority_group} vs {majority_group})", value=f"{impact_ratio:.2f}%")
302
+ if impact_ratio < 80: st.error("Adverse impact detected! Manual review recommended.", icon="🚨")
303
+ else: st.success("No significant adverse impact detected.", icon="βœ…")
304
+ else: st.info("Cannot calculate Adverse Impact Ratio as majority group has 0% selection rate.")
305
+
306
+ def placement_team_view():
307
+ st.title("πŸ’Ό Placement Team Dashboard")
308
+
309
+ placement_password = st.secrets.get("PLACEMENT_PASSWORD")
310
+ if 'password_correct' not in st.session_state: st.session_state.password_correct = False
311
+ def check_password():
312
+ if placement_password and st.session_state["password"] == placement_password:
313
+ st.session_state.password_correct = True; del st.session_state["password"]
314
+ else: st.session_state.password_correct = False
315
+ if not st.session_state.password_correct:
316
+ st.text_input("Password", type="password", on_change=check_password, key="password")
317
+ if "password" in st.session_state and not st.session_state.password_correct: st.error("Wrong password.")
318
+ return
319
+
320
+ jobs_df = get_all_jobs()
321
+ job_titles = {row['id']: row['title'] for _, row in jobs_df.iterrows()}
322
+
323
+ with st.expander("Post a New Job"):
324
+ with st.form(key="post_job_form"):
325
+ job_title = st.text_input("Job Title")
326
+ job_description = st.text_area("Job Description", height=200)
327
+ due_date = st.date_input("Application Due Date", min_value=date.today())
328
+ if st.form_submit_button("Post Job"):
329
+ if job_title and job_description and due_date:
330
+ add_job(job_title, job_description, due_date.strftime('%Y-%m-%d'))
331
+ st.success(f"Job '{job_title}' posted successfully!"); st.rerun()
332
+ else: st.warning("Please fill in all fields.")
333
+
334
+ with st.expander("Analyze External Resumes"):
335
+ if not job_titles:
336
+ st.info("Please post a job first to enable resume analysis.")
337
+ else:
338
+ analysis_job_id = st.selectbox("Select job to screen against:", options=list(job_titles.keys()), format_func=lambda x: job_titles.get(x, 'N/A'))
339
+ uploaded_files = st.file_uploader("Upload one or more resumes", accept_multiple_files=True, key="multi_uploader")
340
+
341
+ if st.button("Analyze Uploaded Resumes"):
342
+ if analysis_job_id and uploaded_files:
343
+ job_details = {'title': job_titles[analysis_job_id], 'description': jobs_df.loc[jobs_df['id'] == analysis_job_id, 'description'].iloc[0]}
344
+ with st.spinner(f"Analyzing {len(uploaded_files)} resumes..."):
345
+ results = [analyze_resume(job_details, f) for f in uploaded_files]
346
+ for res in results:
347
+ add_analysis_record(job_details['title'], res)
348
+ st.session_state.analysis_results = results
349
+ else: st.warning("Please select a job and upload at least one resume.")
350
+
351
+ if 'analysis_results' in st.session_state:
352
+ st.subheader("Analysis Results")
353
+ results_df = pd.DataFrame(st.session_state.analysis_results).sort_values(by="score", ascending=False)
354
+ st.dataframe(results_df, use_container_width=True, hide_index=True, column_config={
355
+ "filename": "Filename", "score": st.column_config.ProgressColumn("Score", format="%.2f%%", min_value=0, max_value=100),
356
+ "verdict": "AI Verdict", "lacking_skills": "Lacking Skills"
357
+ })
358
+ del st.session_state.analysis_results
359
+
360
+ st.write("---")
361
+ st.header("Manage Portal Data")
362
+ tab1, tab2, tab3 = st.tabs(["Student Applications", "Bias & Fairness Audit", "Past Analysis Results"])
363
+
364
+ with tab1:
365
+ st.subheader("Applications Submitted via the Student Portal")
366
+ if not job_titles:
367
+ st.info("No jobs have been posted yet.")
368
+ else:
369
+ selected_job_id = st.selectbox("Select a job to view applications:", options=list(job_titles.keys()), format_func=lambda x: job_titles.get(x, 'N/A'))
370
+ if selected_job_id:
371
+ apps_df = get_applications_for_job(selected_job_id)
372
+ if not apps_df.empty:
373
+ cols = st.columns([2, 3, 1, 2, 3, 2]); cols[0].markdown("**Name**"); cols[1].markdown("**Email**"); cols[2].markdown("**Score**"); cols[3].markdown("**Verdict**"); cols[4].markdown("**Lacking Skills**"); cols[5].markdown("**Status**")
374
+ for _, row in apps_df.iterrows():
375
+ cols = st.columns([2, 3, 1, 2, 3, 2]); cols[0].text(row['candidate_name']); cols[1].text(row['candidate_email']); cols[2].text(f"{row['final_score']:.1f}%"); cols[3].text(row['verdict']); cols[4].text(row['lacking_skills'])
376
+ status_options = ["Applied", "Shortlisted", "Not Shortlisted"]
377
+ current_status_index = status_options.index(row['status']) if row['status'] in status_options else 0
378
+ new_status = cols[5].selectbox("Set Status", status_options, index=current_status_index, key=f"status_{row['id']}", label_visibility="collapsed")
379
+ if new_status != row['status']:
380
+ update_candidate_status(row['id'], new_status); st.toast(f"Updated {row['candidate_name']}'s status."); st.rerun()
381
+ else: st.info("No applications for this job yet.")
382
+
383
+ with tab2:
384
+ st.subheader("Bias & Fairness Audit for Student Applications")
385
+ if not job_titles:
386
+ st.info("No jobs have been posted yet.")
387
+ else:
388
+ bias_job_id = st.selectbox("Select a job to audit:", options=list(job_titles.keys()), format_func=lambda x: job_titles.get(x, 'N/A'), key="bias_job_select")
389
+ if bias_job_id:
390
+ bias_apps_df = get_applications_for_job(bias_job_id)
391
+ bias_audit_dashboard(bias_apps_df)
392
+
393
+ with tab3:
394
+ st.subheader("History of Analyzed External Resumes")
395
+ analyses_df = get_all_analyses()
396
+ if not analyses_df.empty:
397
+ analyses_df['timestamp'] = pd.to_datetime(analyses_df['timestamp']).dt.strftime('%Y-%m-%d %H:%M')
398
+ grouped = analyses_df.groupby('job_title')
399
+ for job_title, group_df in grouped:
400
+ with st.expander(f"Resumes Analyzed for: **{job_title}** ({len(group_df)} files)"):
401
+ st.dataframe(group_df, use_container_width=True, hide_index=True, column_order=("filename", "score", "verdict", "lacking_skills", "timestamp"),
402
+ column_config={"filename": "Filename", "score": st.column_config.ProgressColumn("Score", format="%.1f%%", min_value=0, max_value=100),
403
+ "verdict": "AI Verdict", "lacking_skills": "Lacking Skills", "timestamp": "Analyzed On"})
404
+ else: st.info("No external resumes have been analyzed yet.")
405
+
406
+ # --- Main App Execution ---
407
+ st.set_page_config(layout="wide", page_title="AI Resume Ranker")
408
+ nlp, semantic_model, llm = load_resources()
409
+
410
+ if llm:
411
+ st.sidebar.title("πŸ‘¨β€πŸ’» User Role")
412
+ user_role = st.sidebar.radio("Select role:", ["Student", "Placement Team"])
413
+ if user_role == "Student":
414
+ student_view()
415
+ else:
416
+ placement_team_view()
417
+
packages.txt ADDED
@@ -0,0 +1 @@
 
 
1
+ libsqlite3-dev
requirements.txt ADDED
@@ -0,0 +1,8 @@
 
 
 
 
 
 
 
 
 
1
+ streamlit
2
+ spacy
3
+ sentence-transformers
4
+ pandas
5
+ pypdf2
6
+ python-docx
7
+ numpy
8
+ langchain-google-genai