from flask import Flask, render_template, request, jsonify, session, send_from_directory
import requests
from bs4 import BeautifulSoup
from urllib.parse import urljoin
import json
import os
import urllib3
import time

urllib3.disable_warnings()

app = Flask(__name__)
app.secret_key = 'your_secret_key_here'  # 用于session加密

# 你的原始简历数据
with open('resume_data/resume.json', 'r', encoding='utf-8') as f:
    MY_RESUME = json.load(f)

# 设置静态文件路由
@app.route('/resume_data/images/<path:filename>')
def serve_resume_images(filename):
    return send_from_directory('resume_data/images', filename)

@app.route('/resume_data_zhou/images/<path:filename>')
def serve_resume_images_zhou(filename):
    return send_from_directory('resume_data_zhou/images', filename)

@app.route('/')
def home():
    """ 默认显示自己的简历或爬取到的简历 """
    crawled_data = session.get('crawled_resume', None)
    return render_template('resume.html', 
                         resume=crawled_data if crawled_data else MY_RESUME,
                         is_crawled=bool(crawled_data))

@app.route('/reset', methods=['POST'])
def reset_resume():
    """ 清除爬取的简历数据 """
    session.pop('crawled_resume', None)
    return jsonify({"success": True, "redirect": "/"})

@app.route('/crawl', methods=['POST'])
def crawl_resume():
    """ 爬取目标URL的简历 """
    target_url = request.form.get('url')
    
    try:
        crawled_data = scrape_resume(target_url)
        session['crawled_resume'] = crawled_data
        return jsonify({"success": True, "redirect": "/"})
    except Exception as e:
        return jsonify({"error": str(e)}), 500

def download_image(img_url, filename, session):
    try:
        img_data = session.get(img_url, timeout=10, verify=False).content
        with open(f'resume_data/images/{filename}', 'wb') as handler:
            handler.write(img_data)
        return f'resume_data/images/{filename}'
    except Exception as e:
        print(f"下载图片失败 {img_url}: {e}")
        return None

def save_data(data):
    with open('resume_data/resume.json', 'w', encoding='utf-8') as f:
        json.dump(data, f, ensure_ascii=False, indent=4)

def scrape_resume(url):
    """通用的简历爬取函数"""
    os.makedirs('resume_data', exist_ok=True)
    os.makedirs('resume_data/images', exist_ok=True)

    try:
        headers = {
            'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/91.0.4472.124 Safari/537.36',
            'Accept': 'text/html,application/xhtml+xml,application/xml;q=0.9,image/webp,*/*;q=0.8',
            'Accept-Language': 'zh-CN,zh;q=0.9',
            'Connection': 'keep-alive'
        }
        
        session = requests.Session()
        session.headers.update(headers)
        
        # 增加重试机制
        for _ in range(3):
            try:
                response = session.get(url, timeout=30, verify=False)
                response.raise_for_status()
                break
            except requests.exceptions.RequestException as e:
                print(f"请求失败，重试中... ({str(e)})")
                time.sleep(2)
        else:
            raise Exception("多次尝试后仍无法获取网页内容")
            
        response.encoding = 'utf-8'
        soup = BeautifulSoup(response.text, 'html.parser')
        
        resume_data = {
            'basic_info': {},
            'education': [],
            'skills': {},
            'projects': [],
            'awards': [],
            'self_evaluation': []
        }

        # 1. 提取基本信息 - 更通用的选择器
        name = soup.find('h1').get_text(strip=True) if soup.find('h1') else "未提供"
        job_title = soup.find('h2').get_text(strip=True) if soup.find('h2') else "未提供"
        
        # 提取联系方式
        contact_info = {}
        for a in soup.find_all('a', href=True):
            if 'mailto:' in a['href']:
                contact_info['email'] = a['href'].replace('mailto:', '')
            elif 'tel:' in a['href']:
                contact_info['phone'] = a['href'].replace('tel:', '')
            elif 'github.com' in a['href'] or 'gitee.com' in a['href']:
                contact_info['code_repo'] = a['href']
        
        # 提取照片
        photo = None
        img = soup.find('img')
        if img and img.get('src'):
            photo = download_image(urljoin(url, img['src']), 'profile.jpg', session)
        
        resume_data['basic_info'] = {
            'name': name,
            'position': job_title,
            'phone': contact_info.get('phone', '未提供'),
            'email': contact_info.get('email', '未提供'),
            'code_repo': contact_info.get('code_repo', '未提供'),
            'photo': photo
        }

        # 2. 提取教育背景
        for edu in soup.find_all('div', class_=lambda x: x and 'education' in x.lower()):
            edu_data = {
                'university': edu.find('h3').get_text(strip=True) if edu.find('h3') else "未提供",
                'period': edu.find('span', class_='date').get_text(strip=True) if edu.find('span', class_='date') else "未提供",
                'courses': [li.get_text(strip=True) for li in edu.find_all('li')]
            }
            resume_data['education'].append(edu_data)

        # 3. 提取技能
        skills_section = soup.find('section', id='skills') or soup.find('div', class_='skills')
        if skills_section:
            for skill_group in skills_section.find_all('div', class_='skill-group'):
                title = skill_group.find('h3').get_text(strip=True) if skill_group.find('h3') else "其他技能"
                skills = [li.get_text(strip=True) for li in skill_group.find_all('li')]
                resume_data['skills'][title] = skills

        # 4. 提取项目经验
        projects_section = soup.find('section', id='projects') or soup.find('div', class_='projects')
        if projects_section:
            for project in projects_section.find_all('div', class_='project'):
                project_data = {
                    'name': project.find('h3').get_text(strip=True) if project.find('h3') else "未命名项目",
                    'period': project.find('span', class_='date').get_text(strip=True) if project.find('span', class_='date') else "未提供",
                    'description': project.find('p').get_text(strip=True) if project.find('p') else "无描述",
                    'responsibility': [li.get_text(strip=True) for li in project.find_all('li')]
                }
                resume_data['projects'].append(project_data)

        # 5. 提取获奖情况
        awards_section = soup.find('section', id='awards') or soup.find('div', class_='awards')
        if awards_section:
            resume_data['awards'] = [li.get_text(strip=True) for li in awards_section.find_all('li')]

        # 6. 提取自我评价
        eval_section = soup.find('section', id='about') or soup.find('div', class_='about')
        if eval_section:
            resume_data['self_evaluation'] = [p.get_text(strip=True) for p in eval_section.find_all('p')]

        save_data(resume_data)
        return resume_data

    except Exception as e:
        print(f"发生错误: {e}")
        raise Exception(f"爬取简历失败: {str(e)}")

if __name__ == '__main__':
    app.run(debug=True, host='0.0.0.0')