| | import os |
| | |
| | os.environ.setdefault("HOME", "/tmp") |
| |
|
| | import io |
| | import json |
| | import hashlib |
| | import time |
| | from typing import List, Tuple, Dict |
| | from datetime import datetime |
| | import requests |
| | from bs4 import BeautifulSoup |
| | import PyPDF2 |
| | from docx import Document |
| | import gradio as gr |
| | import difflib |
| | import tempfile |
| | from reportlab.lib.pagesizes import letter |
| | from reportlab.lib.styles import getSampleStyleSheet, ParagraphStyle |
| | from reportlab.lib.units import inch |
| | from reportlab.platypus import SimpleDocTemplate, Paragraph, Spacer |
| | from reportlab.lib import colors |
| | import re |
| |
|
| | |
| | VERSIONS_FILE = "/tmp/resume_versions.json" |
| |
|
| | def persist_versions(obj: Dict): |
| | try: |
| | with open(VERSIONS_FILE, "w") as f: |
| | json.dump(obj, f) |
| | except Exception: |
| | pass |
| |
|
| | def load_versions() -> Dict: |
| | try: |
| | if os.path.exists(VERSIONS_FILE): |
| | with open(VERSIONS_FILE, "r") as f: |
| | return json.load(f) |
| | except Exception: |
| | pass |
| | return {} |
| |
|
| | def save_debug_tmp(data: bytes, fname: str) -> str: |
| | safe = hashlib.md5((fname + str(time.time())).encode()).hexdigest()[:8] |
| | tmpdir = tempfile.gettempdir() |
| | tmp_path = os.path.join(tmpdir, f"uploaded_{safe}_{fname}") |
| | with open(tmp_path, "wb") as fw: |
| | fw.write(data) |
| | return tmp_path |
| |
|
| | def read_uploaded_file(f) -> Tuple[bytes, str]: |
| | """Accepts Gradio file (path or file object). Returns bytes and filename.""" |
| | if f is None: |
| | return None, None |
| | try: |
| | if isinstance(f, str): |
| | with open(f, "rb") as fh: |
| | data = fh.read() |
| | name = os.path.basename(f) |
| | return data, name |
| | |
| | data = f.read() |
| | name = getattr(f, "name", "upload") |
| | return data, name |
| | except Exception: |
| | return None, None |
| |
|
| | |
| | def extract_text_from_pdf_bytes(data: bytes) -> str: |
| | try: |
| | reader = PyPDF2.PdfReader(io.BytesIO(data)) |
| | parts = [] |
| | for p in reader.pages: |
| | parts.append(p.extract_text() or "") |
| | return "\n".join([p for p in parts if p.strip()]).strip() |
| | except Exception as e: |
| | return f"ERROR_PDF: {e}" |
| |
|
| | def extract_text_from_docx_bytes(data: bytes) -> str: |
| | try: |
| | doc = Document(io.BytesIO(data)) |
| | paras = [p.text for p in doc.paragraphs if p.text.strip()] |
| | return "\n".join(paras).strip() |
| | except Exception as e: |
| | return f"ERROR_DOCX: {e}" |
| |
|
| | |
| | def scrape_job_description_advanced(url: str) -> str: |
| | try: |
| | headers = {"User-Agent":"Mozilla/5.0"} |
| | r = requests.get(url, headers=headers, timeout=12) |
| | r.raise_for_status() |
| | soup = BeautifulSoup(r.content, "html.parser") |
| | for tag in soup(["script","style","nav","footer","header","form"]): |
| | tag.decompose() |
| | |
| | jd = None |
| | if 'linkedin.com' in url: |
| | jd = soup.find('div', {'class':'description__text'}) or soup.find('div', {'class':'description'}) |
| | elif 'indeed.com' in url: |
| | jd = soup.find('div', {'id':'jobDescriptionText'}) |
| | if not jd: |
| | jd = soup.find('main') or soup.find('article') or soup.body |
| | text = (jd.get_text(separator="\n", strip=True) if jd else soup.get_text(separator="\n", strip=True)) |
| | return text[:20000] |
| | except Exception as e: |
| | return f"ERROR_FETCH: {e}" |
| |
|
| | |
| | def export_to_pdf_bytes(content: str, title: str = "document") -> bytes: |
| | buffer = io.BytesIO() |
| | doc = SimpleDocTemplate(buffer, pagesize=letter, |
| | rightMargin=0.6*inch, leftMargin=0.6*inch, |
| | topMargin=0.6*inch, bottomMargin=0.6*inch) |
| | styles = getSampleStyleSheet() |
| | story = [] |
| | title_style = ParagraphStyle('Title', parent=styles['Heading1'], fontSize=16, textColor=colors.HexColor('#2b6cb0'), spaceAfter=10) |
| | body = ParagraphStyle('Body', parent=styles['BodyText'], fontSize=10, spaceAfter=6) |
| | story.append(Paragraph(title, title_style)) |
| | for line in content.splitlines(): |
| | if line.strip() == "": |
| | story.append(Spacer(1, 0.08*inch)) |
| | else: |
| | story.append(Paragraph(line, body)) |
| | doc.build(story) |
| | buffer.seek(0) |
| | return buffer.getvalue() |
| |
|
| | |
| | def _llm_endpoint_for(provider: str) -> str: |
| | if provider == "OpenAI": |
| | return "https://api.openai.com/v1/chat/completions" |
| | if provider == "OpenRouter": |
| | return "https://openrouter.ai/api/v1/chat/completions" |
| | if provider == "Groq": |
| | return "https://api.groq.com/openai/v1/chat/completions" |
| | return "https://api.together.xyz/v1/chat/completions" |
| |
|
| | def llm_chat(api_endpoint: str, api_key: str, model: str, messages: List[dict], timeout=60): |
| | api_key = api_key or os.environ.get("API_KEY", "") |
| | if not api_key: |
| | return {"error": "API key not provided. Set API_KEY env secret or paste in UI."} |
| | |
| | headers = {"Authorization": f"Bearer {api_key}", "Content-Type": "application/json"} |
| | |
| | if "groq.com" in api_endpoint: |
| | payload = { |
| | "model": model, |
| | "messages": messages, |
| | "max_tokens": 8000, |
| | "temperature": 0.4 |
| | } |
| | else: |
| | payload = { |
| | "model": model, |
| | "messages": messages, |
| | "max_tokens": 1500, |
| | "temperature": 0.4 |
| | } |
| | |
| | try: |
| | r = requests.post(api_endpoint, headers=headers, json=payload, timeout=timeout) |
| | if r.status_code >= 400: |
| | return {"error": f"{r.status_code} {r.reason}", "detail": r.text} |
| | r.raise_for_status() |
| | return r.json() |
| | except Exception as e: |
| | return {"error": str(e)} |
| | |
| | def extract_skills_from_text(job_text: str, provider: str, api_key: str, model: str) -> List[str]: |
| | """Extract skills using LLM or heuristic fallback""" |
| | from bs4 import BeautifulSoup |
| |
|
| | def parse_with_llm(text: str) -> List[str]: |
| | endpoint = _llm_endpoint_for(provider) |
| | system = ( |
| | "You are a JSON extractor. Given a job description, return ONLY a JSON array of skill/qualification strings. " |
| | "Do NOT include any explanatory text." |
| | ) |
| | user = f"Extract skills from the following JOB DESCRIPTION. Return only a JSON array of strings:\n\n{text}" |
| | messages = [{"role": "system", "content": system}, {"role": "user", "content": user}] |
| | resp = llm_chat(endpoint, api_key, model, messages, timeout=30) |
| | try: |
| | if "error" in resp: |
| | return [] |
| | content = resp["choices"][0]["message"]["content"] |
| | skills = json.loads(content) |
| | if isinstance(skills, list) and len(skills) >= 3: |
| | return [s.strip() for s in skills if isinstance(s, str) and s.strip()] |
| | except Exception: |
| | return [] |
| | return [] |
| |
|
| | def heuristic_extract(text: str) -> List[str]: |
| | candidates = [] |
| | if "<" in text and ">" in text: |
| | try: |
| | soup = BeautifulSoup(text, "html.parser") |
| | for li in soup.find_all("li"): |
| | t = li.get_text(separator=" ", strip=True) |
| | if t: |
| | candidates.append(t) |
| | for header in soup.find_all(["h2", "h3", "h4"]): |
| | nxt = header.find_next_sibling() |
| | if nxt and nxt.name in ("ul", "ol"): |
| | for li in nxt.find_all("li"): |
| | candidates.append(li.get_text(separator=" ", strip=True)) |
| | except Exception: |
| | pass |
| |
|
| | lines = [l.strip() for l in text.splitlines() if l.strip()] |
| | for i, line in enumerate(lines): |
| | if re.match(r"^(\*|-|β’|\u2022|\d+\.)\s+", line) or any(k in line.lower() for k in ["skills", "requirements", "qualifications", "experience", "responsibilities"]): |
| | clean = re.sub(r"^(\*|-|β’|\u2022|\d+\.)\s*", "", line) |
| | parts = re.split(r"[;,β’\|Β·]", clean) |
| | for p in parts: |
| | p = p.strip() |
| | if len(p) > 1 and len(p.split()) <= 6: |
| | candidates.append(p) |
| | else: |
| | if "," in line and len(line) < 200: |
| | parts = [p.strip() for p in line.split(",") if p.strip()] |
| | if len(parts) >= 2: |
| | candidates.extend([p for p in parts if len(p.split()) <= 6]) |
| |
|
| | token_pattern = re.compile(r"\b[A-Za-z0-9\+\#\.\-_/]{2,40}\b") |
| | for match in token_pattern.findall(text): |
| | tok = match.strip() |
| | if len(tok) > 1 and not re.fullmatch(r"\d+", tok): |
| | if re.search(r"[A-Z]|[\+\#\.\/-]", tok) or tok.lower() in ("sql", "aws", "docker", "kubernetes", "linux", "unix"): |
| | candidates.append(tok) |
| |
|
| | seen = set() |
| | out = [] |
| | for c in candidates: |
| | s = re.sub(r"\s{2,}", " ", c).strip(" .;:-") |
| | key = s.lower() |
| | if key and key not in seen: |
| | seen.add(key) |
| | out.append(s) |
| | if len(out) >= 200: |
| | break |
| |
|
| | out = [o for o in out if len(o) > 1 and not re.fullmatch(r"(and|the|or|of|in|to)", o.lower())] |
| | return out[:80] |
| |
|
| | if api_key: |
| | llm_res = parse_with_llm(job_text) |
| | if llm_res and len(llm_res) >= 3: |
| | return [re.sub(r"\s{2,}", " ", s).strip() for s in llm_res] |
| |
|
| | heur = heuristic_extract(job_text or "") |
| | return heur |
| |
|
| | def generate_tailored_resume_text(resume_text: str, job_desc: str, provider: str, api_key: str, model: str, style: str) -> str: |
| | endpoint = _llm_endpoint_for(provider) |
| | prompt = ( |
| | f"You are an expert resume writer. Using ONLY facts from the ORIGINAL RESUME, produce a tailored resume in plain text " |
| | f"optimized for the following JOB DESCRIPTION. Keep truthful, do not invent. Template style: {style}.\n\n" |
| | f"ORIGINAL RESUME:\n{resume_text}\n\nJOB DESCRIPTION:\n{job_desc}\n\nReturn the tailored resume." |
| | ) |
| | messages = [{"role":"system","content":"You are a truthful resume-writing assistant."},{"role":"user","content":prompt}] |
| | resp = llm_chat(endpoint, api_key, model, messages, timeout=90) |
| | if "error" in resp: |
| | return f"ERROR: {resp['error']}" |
| | try: |
| | return resp["choices"][0]["message"]["content"] |
| | except Exception as e: |
| | return f"ERROR_PARSE: {e}" |
| |
|
| | def generate_cover_letter_text(resume_text: str, job_desc: str, provider: str, api_key: str, model: str, company: str, position: str) -> str: |
| | endpoint = _llm_endpoint_for(provider) |
| | prompt = ( |
| | f"Write a 300-400 word cover letter using ONLY facts from the resume and tailored to this job.\n\n" |
| | f"RESUME:\n{resume_text}\n\nJOB DESCRIPTION:\n{job_desc}\n\nCOMPANY: {company}\nPOSITION: {position}" |
| | ) |
| | messages = [{"role":"system","content":"You are a cover letter writer."},{"role":"user","content":prompt}] |
| | resp = llm_chat(endpoint, api_key, model, messages, timeout=90) |
| | if "error" in resp: |
| | return f"ERROR: {resp['error']}" |
| | try: |
| | return resp["choices"][0]["message"]["content"] |
| | except Exception as e: |
| | return f"ERROR_PARSE: {e}" |
| |
|
| | |
| | def calculate_ats_score(resume_text: str, job_skills: List[str]) -> Tuple[int, Dict]: |
| | """Improved ATS scoring with fuzzy matching""" |
| | from difflib import SequenceMatcher, get_close_matches |
| |
|
| | resume_text = (resume_text or "").lower() |
| | resume_norm = re.sub(r"[^a-z0-9\s]", " ", resume_text) |
| | resume_words = [w for w in resume_norm.split() if w] |
| |
|
| | details = {"matched": [], "missing": [], "scores": {}, "total": len(job_skills)} |
| | if not job_skills: |
| | return 0, details |
| |
|
| | total_score = 0.0 |
| | max_ngram = 4 |
| | ngrams = [] |
| | L = len(resume_words) |
| | for size in range(1, min(max_ngram, L) + 1): |
| | for i in range(0, L - size + 1): |
| | ngrams.append(" ".join(resume_words[i : i + size])) |
| |
|
| | for skill in job_skills: |
| | sk = (skill or "").lower().strip() |
| | sk_norm = re.sub(r"[^a-z0-9\s]", " ", sk) |
| | sk_tokens = [t for t in sk_norm.split() if t] |
| | match_type = "no_match" |
| | score = 0.0 |
| |
|
| | pattern = r"\b" + re.escape(" ".join(sk_tokens)) + r"\b" |
| | if re.search(pattern, resume_norm): |
| | score = 1.0 |
| | match_type = "exact" |
| | elif " ".join(sk_tokens) in resume_norm: |
| | score = 0.95 |
| | match_type = "substring" |
| | else: |
| | if sk_tokens: |
| | hits = sum(1 for t in sk_tokens if re.search(r"\b" + re.escape(t) + r"\b", resume_norm)) |
| | frac = hits / len(sk_tokens) |
| | if len(sk_tokens) > 1 and frac >= 0.5: |
| | score = 0.88 |
| | match_type = f"partial_tokens({hits}/{len(sk_tokens)})" |
| | if score == 0.0: |
| | best_ratio = 0.0 |
| | for cand in ngrams: |
| | ratio = SequenceMatcher(None, " ".join(sk_tokens), cand).ratio() |
| | if ratio > best_ratio: |
| | best_ratio = ratio |
| | if best_ratio >= 0.95: |
| | break |
| | if best_ratio >= 0.9: |
| | score = 0.9 |
| | match_type = f"fuzzy({best_ratio:.2f})" |
| | elif best_ratio >= 0.8: |
| | score = 0.8 |
| | match_type = f"fuzzy({best_ratio:.2f})" |
| |
|
| | total_score += score |
| | details["scores"][skill] = round(score, 2) |
| | if score > 0: |
| | details["matched"].append({"skill": skill, "score": round(score, 2), "match_type": match_type}) |
| | else: |
| | suggestions = get_close_matches(" ".join(sk_tokens), ngrams, n=3, cutoff=0.6) |
| | details["missing"].append({"skill": skill, "suggestions": suggestions}) |
| |
|
| | overall = int((total_score / len(job_skills)) * 100) |
| | details["overall"] = overall |
| | return overall, details |
| |
|
| | def sanitize_skills(skills: List[str], job_text: str = "") -> List[str]: |
| | """Clean noisy skills and merge fragments""" |
| | if not skills: |
| | return [] |
| |
|
| | cleaned = [] |
| | for s in skills: |
| | if not s or not isinstance(s, str): |
| | continue |
| | s = s.strip() |
| | low = s.lower() |
| | if len(s) <= 2: |
| | continue |
| | if re.search(r"https?://|www\.|@|\.com|\.de", low): |
| | continue |
| | if any(phr in low for phr in ["mode of employment", "about us", "faq", "show", "if ", "we ", "join ", "take "]): |
| | continue |
| | if low in ("you","your","are","we","us","our","take","join","show","about"): |
| | continue |
| | cleaned.append(s) |
| |
|
| | jt = (job_text or "").lower() |
| | merged = [] |
| | i = 0 |
| | while i < len(cleaned): |
| | curr = cleaned[i] |
| | if len(curr.split()) == 1: |
| | j = i + 1 |
| | candidate = curr |
| | while j < len(cleaned) and len(candidate.split()) < 4: |
| | next_tok = cleaned[j] |
| | combined = candidate + " " + next_tok |
| | if combined.lower() in jt: |
| | candidate = combined |
| | j += 1 |
| | else: |
| | break |
| | merged.append(candidate) |
| | i = j |
| | else: |
| | merged.append(curr) |
| | i += 1 |
| |
|
| | seen = set() |
| | final = [] |
| | for s in merged: |
| | key = re.sub(r"\s+", " ", s).strip().lower() |
| | if key and key not in seen: |
| | seen.add(key) |
| | final.append(s.strip()) |
| | return final |
| |
|
| | |
| | def handle_upload(file_obj): |
| | data, fname = read_uploaded_file(file_obj) |
| | if not data: |
| | return "β Failed to read upload.", "", "" |
| | |
| | if fname.lower().endswith(".pdf"): |
| | text = extract_text_from_pdf_bytes(data) |
| | else: |
| | text = extract_text_from_docx_bytes(data) |
| | |
| | return f"β
Uploaded {fname} ({len(data)} bytes)", fname, text |
| |
|
| | def handle_fetch_job(url: str): |
| | if not url: |
| | return "β No URL provided.", "" |
| | text = scrape_job_description_advanced(url) |
| | if text.startswith("ERROR_FETCH"): |
| | return f"β {text}", "" |
| | return f"β
Fetched job description ({len(text)} chars)", text |
| |
|
| | def handle_analyze(resume_text: str, job_text: str, provider: str, api_key: str, model_name: str): |
| | if not job_text: |
| | return "β No job description provided.", "", "N/A", "", [] |
| | |
| | skills = extract_skills_from_text(job_text, provider, api_key, model_name) |
| | skills_clean = sanitize_skills(skills, job_text) |
| | |
| | if resume_text and len(resume_text.strip()) > 20: |
| | score, details = calculate_ats_score(resume_text, skills_clean) |
| | |
| | |
| | matched_display = "\n".join([f"β {m['skill']} ({m['score']}) - {m['match_type']}" for m in details['matched'][:10]]) |
| | missing_display = "\n".join([f"β {m['skill']}" for m in details['missing'][:10]]) |
| | |
| | return ( |
| | f"β
Extracted {len(skills_clean)} skills", |
| | json.dumps(skills_clean, indent=2), |
| | f"{score}%", |
| | matched_display or "No matches", |
| | missing_display or "All skills matched!" |
| | ) |
| | else: |
| | return f"β
Extracted {len(skills_clean)} skills", json.dumps(skills_clean, indent=2), "N/A", "", [] |
| |
|
| | def handle_generate(resume_text: str, job_desc: str, provider: str, api_key: str, model_name: str, template_style: str, company: str, position: str): |
| | if not (resume_text and job_desc): |
| | return "β Missing resume or job description.", "", "", "N/A", "", [] |
| |
|
| | |
| | api_key = api_key or os.environ.get("API_KEY", "") |
| | |
| | if not api_key: |
| | return "β API key required.", "", "", "N/A", "", [] |
| | |
| | tailored = generate_tailored_resume_text(resume_text, job_desc, provider, api_key, model_name, template_style) |
| | cover = generate_cover_letter_text(resume_text, job_desc, provider, api_key, model_name, company, position) |
| | |
| | |
| | skills = extract_skills_from_text(job_desc, provider, api_key, model_name) |
| | skills_clean = sanitize_skills(skills, job_desc) |
| | post_score, post_details = calculate_ats_score(tailored, skills_clean) |
| | |
| | matched_display = "\n".join([f"β {m['skill']} ({m['score']}) - {m['match_type']}" for m in post_details['matched'][:10]]) |
| | missing_display = "\n".join([f"β {m['skill']}" for m in post_details['missing'][:10]]) |
| | |
| | return ( |
| | "β
Generation complete!", |
| | tailored, |
| | cover, |
| | f"{post_score}%", |
| | matched_display or "No matches", |
| | missing_display or "All skills matched!" |
| | ) |
| |
|
| | def make_pdf_download(text: str, title: str): |
| | if not text: |
| | return None |
| | pdf = export_to_pdf_bytes(text, title or "document") |
| | tmpdir = tempfile.gettempdir() |
| | tmp = os.path.join(tmpdir, f"{hashlib.md5((title+str(time.time())).encode()).hexdigest()[:8]}.pdf") |
| | with open(tmp, "wb") as fw: |
| | fw.write(pdf) |
| | return tmp |
| |
|
| | |
| | with gr.Blocks(title="Job Application Assistant", theme=gr.themes.Soft()) as demo: |
| | gr.Markdown(""" |
| | # πΌ Job Application Assistant |
| | ### AI-powered resume tailoring and ATS optimization |
| | """) |
| | |
| | with gr.Tabs() as tabs: |
| | |
| | with gr.Tab("π 1. Setup"): |
| | gr.Markdown("### Configure your LLM provider and upload your resume") |
| | |
| | with gr.Row(): |
| | with gr.Column(scale=1): |
| | gr.Markdown("#### LLM Configuration") |
| | provider = gr.Dropdown( |
| | choices=["Groq", "Together AI", "OpenRouter", "OpenAI"], |
| | value="Groq", |
| | label="Provider" |
| | ) |
| | model_name = gr.Textbox( |
| | label="Model", |
| | value="openai/gpt-oss-120b", |
| | placeholder="e.g., openai/gpt-oss-120b" |
| | ) |
| | api_key = gr.Textbox( |
| | label="API Key", |
| | type="password", |
| | placeholder="Paste your API key or set API_KEY env variable" |
| | ) |
| | |
| | with gr.Column(scale=1): |
| | gr.Markdown("#### Upload Resume") |
| | file_input = gr.File( |
| | label="Upload your resume", |
| | file_types=[".pdf", ".docx"] |
| | ) |
| | upload_btn = gr.Button("π€ Process Upload", variant="primary", size="lg") |
| | upload_status = gr.Textbox(label="Status", interactive=False) |
| | |
| | resume_text_out = gr.Textbox( |
| | label="π Extracted Resume Text", |
| | lines=15, |
| | placeholder="Your resume text will appear here after upload..." |
| | ) |
| | |
| | |
| | with gr.Tab("π― 2. Job Analysis"): |
| | gr.Markdown("### Analyze job posting and calculate initial ATS score") |
| | |
| | with gr.Row(): |
| | with gr.Column(): |
| | job_url = gr.Textbox( |
| | label="Job Posting URL (optional)", |
| | placeholder="https://www.linkedin.com/jobs/view/..." |
| | ) |
| | fetch_btn = gr.Button("π Fetch Job Description", size="sm") |
| | |
| | job_desc_out = gr.Textbox( |
| | label="Job Description", |
| | lines=12, |
| | placeholder="Paste job description or fetch from URL..." |
| | ) |
| | |
| | analyze_btn = gr.Button("π Analyze Job & Calculate ATS Score", variant="primary", size="lg") |
| | analyze_status = gr.Textbox(label="Status", interactive=False) |
| | |
| | with gr.Row(): |
| | with gr.Column(): |
| | pre_ats_score = gr.Textbox(label="π Initial ATS Score", interactive=False) |
| | matched_skills = gr.Textbox(label="β
Matched Skills (Top 10)", lines=8, interactive=False) |
| | missing_skills = gr.Textbox(label="β Missing Skills (Top 10)", lines=8, interactive=False) |
| | |
| | with gr.Column(): |
| | skills_out = gr.Textbox(label="π― Extracted Skills (JSON)", lines=20, interactive=False) |
| | |
| | |
| | with gr.Tab("β¨ 3. Generate"): |
| | gr.Markdown("### Generate tailored resume and cover letter") |
| | |
| | with gr.Row(): |
| | with gr.Column(scale=1): |
| | template_style = gr.Dropdown( |
| | choices=["Professional", "Modern", "Creative", "Executive", "Technical"], |
| | value="Professional", |
| | label="Resume Style" |
| | ) |
| | company_name = gr.Textbox(label="Company Name", placeholder="e.g., Google") |
| | position_title = gr.Textbox(label="Position", placeholder="e.g., Senior Software Engineer") |
| | |
| | gen_btn = gr.Button("β¨ Generate Tailored Documents", variant="primary", size="lg") |
| | gen_status = gr.Textbox(label="Status", interactive=False) |
| | |
| | with gr.Column(scale=2): |
| | tailored_out = gr.Textbox( |
| | label="π Tailored Resume", |
| | lines=20, |
| | placeholder="Your tailored resume will appear here..." |
| | ) |
| | |
| | with gr.Row(): |
| | download_resume_btn = gr.Button("β¬οΈ Download Resume as PDF") |
| | resume_pdf = gr.File(label="Resume PDF") |
| | |
| | cover_out = gr.Textbox( |
| | label="βοΈ Cover Letter", |
| | lines=15, |
| | placeholder="Your cover letter will appear here..." |
| | ) |
| | |
| | with gr.Row(): |
| | download_cover_btn = gr.Button("β¬οΈ Download Cover Letter as PDF") |
| | cover_pdf = gr.File(label="Cover Letter PDF") |
| | |
| | gr.Markdown("### π Post-Generation ATS Score") |
| | with gr.Row(): |
| | post_ats_score = gr.Textbox(label="Final ATS Score", interactive=False) |
| | post_matched = gr.Textbox(label="β
Matched Skills", lines=6, interactive=False) |
| | post_missing = gr.Textbox(label="β Missing Skills", lines=6, interactive=False) |
| | |
| | |
| | upload_btn.click( |
| | fn=handle_upload, |
| | inputs=[file_input], |
| | outputs=[upload_status, gr.Textbox(visible=False), resume_text_out] |
| | ) |
| | |
| | fetch_btn.click( |
| | fn=handle_fetch_job, |
| | inputs=[job_url], |
| | outputs=[analyze_status, job_desc_out] |
| | ) |
| | |
| | analyze_btn.click( |
| | fn=handle_analyze, |
| | inputs=[resume_text_out, job_desc_out, provider, api_key, model_name], |
| | outputs=[analyze_status, skills_out, pre_ats_score, matched_skills, missing_skills] |
| | ) |
| | |
| | gen_btn.click( |
| | fn=handle_generate, |
| | inputs=[resume_text_out, job_desc_out, provider, api_key, model_name, template_style, company_name, position_title], |
| | outputs=[gen_status, tailored_out, cover_out, post_ats_score, post_matched, post_missing] |
| | ) |
| | |
| | download_resume_btn.click( |
| | fn=lambda t: make_pdf_download(t, "tailored_resume"), |
| | inputs=[tailored_out], |
| | outputs=[resume_pdf] |
| | ) |
| | |
| | download_cover_btn.click( |
| | fn=lambda t: make_pdf_download(t, "cover_letter"), |
| | inputs=[cover_out], |
| | outputs=[cover_pdf] |
| | ) |
| |
|
| | if __name__ == "__main__": |
| | demo.launch(server_name="0.0.0.0", server_port=int(os.environ.get("PORT", 7860))) |