from flask import Flask, render_template, request, jsonify
import requests
from bs4 import BeautifulSoup
import os
from urllib.parse import urljoin

app = Flask(__name__)

def scrape_resume(url):
    try:
        headers = {
            'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/91.0.4472.124 Safari/537.36'
        }
        response = requests.get(url, headers=headers)
        response.encoding = 'utf-8'
        response.raise_for_status()
        
        soup = BeautifulSoup(response.text, 'html.parser')
        
        # 提取基本信息
        result = {
            "name": "",
            "title": "",
            "contact": "",
            "education": "",
            "skills": "",
            "projects": [],
            "awards": "",
            "self_evaluation": "",
            "image_url": ""
        }
        
        # 1. 提取姓名和求职意向
        profile = soup.find('div', class_='profile')
        if profile:
            result["name"] = profile.find('h1').get_text(strip=True) if profile.find('h1') else ""
            result["title"] = profile.find('p').get_text(strip=True) if profile.find('p') else ""
        
        # 2. 提取联系方式
        contact_info = soup.find('div', class_='contact-info')
        result["contact"] = contact_info.get_text(strip=True, separator='\n') if contact_info else ""
        
        # 3. 提取各章节内容
        def get_section_content(title):
            section = soup.find('h2', string=title)
            if section:
                content = section.find_next('div', class_='content')
                return content.get_text(strip=True, separator='\n') if content else ""
            return ""
        
        result["education"] = get_section_content("教育背景")
        result["skills"] = get_section_content("技能专长")
        result["awards"] = get_section_content("获奖情况")
        result["self_evaluation"] = get_section_content("自我评价")
        
        # 4. 特殊处理项目经验（可能有多个）
        project_section = soup.find('h2', string="项目经验")
        if project_section:
            for content in project_section.find_all_next('div', class_='content'):
                result["projects"].append(content.get_text(strip=True, separator='\n'))
        
        # 5. 提取图片
        img = soup.find('img', class_='profile-img')
        if img and img.get('src'):
            result["image_url"] = urljoin(url, img['src'])
        
        return {"status": "success", "data": result}
    
    except Exception as e:
        return {"status": "error", "message": str(e)}

@app.route('/')
def home():
    # 默认显示自己的简历
    return render_template('resume.html', is_default=True)

@app.route('/scrape', methods=['POST'])
def scrape():
    url = request.form.get('url')
    if not url:
        return jsonify({"status": "error", "message": "URL不能为空"})
    
    result = scrape_resume(url)
    return jsonify(result)

if __name__ == '__main__':
    app.run(host='0.0.0.0', port=5000, debug=True)