from flask import Flask, jsonify, render_template, request, flash, session
import requests
from bs4 import BeautifulSoup
import logging
from urllib.parse import urlparse, urljoin
from datetime import datetime
import os
import uuid
import json

# 配置日志记录
logging.basicConfig(level=logging.INFO, format='%(asctime)s - %(levelname)s - %(message)s')

app = Flask(__name__, 
            static_folder='static',
            static_url_path='/static')
app.secret_key = 'resume-crawler-secure-key-2024'

# 增强版智能选择器配置（包含照片）
SELECTORS = {
    "photo": [
        ("header img", "src"),
        (".profile-photo img", "src"),
        ("img.profile-image", "src")
    ],
    "name": [
        ("header h1", "text"), 
        (".profile-name", "text")
    ],
    "objective": [
        ("header p:nth-of-type(1)", "text"),
        (".career-objective", "text")
    ],
    "education": [
        ("section#education", "html"),
        (".education-section", "html")
    ],
    "projects": [
        ("section#projects", "html"),
        (".project-experience", "html")
    ],
    "skills": [
        ("section#skills", "html"),
        (".technical-skills", "html")
    ],
    "contact": [
        ("section#contact", "html"),
        (".contact-info", "html")
    ]
}

def sanitize_html(html):
    """安全清理HTML内容"""
    allowed_tags = {'div', 'h2', 'h3', 'p', 'span', 'ul', 'li', 'a', 'section', 'progress', 'img'}
    allowed_attrs = {'class', 'id', 'href', 'src', 'alt', 'title', 'value', 'max'}
    
    soup = BeautifulSoup(html, 'html.parser')
    
    # 移除危险元素
    for tag in soup(['script', 'iframe', 'form', 'object', 'style']):
        tag.decompose()
        
    # 清理属性和标签
    for tag in soup.find_all(True):
        if tag.name not in allowed_tags:
            tag.unwrap()
            continue
        tag.attrs = {attr: val for attr, val in tag.attrs.items() 
                     if attr in allowed_attrs}
    
    return str(soup)

def delete_previous_photo():
    """删除之前保存的照片"""
    photo_dir = os.path.join(app.static_folder, 'resume_assets')
    if os.path.exists(photo_dir):
        for file in os.listdir(photo_dir):
            file_path = os.path.join(photo_dir, file)
            if os.path.isfile(file_path):
                os.remove(file_path)

def download_image(url, base_url):
    """下载图片并返回本地路径"""
    try:
        # 先删除之前的照片
        delete_previous_photo()

        full_url = urljoin(base_url, url)
        response = requests.get(full_url, timeout=10, stream=True)
        response.raise_for_status()
        
        # 创建保存目录
        save_dir = os.path.join(app.static_folder, 'resume_assets')
        os.makedirs(save_dir, exist_ok=True)
        
        # 生成固定文件名
        filename = "resume_photo.jpg"
        save_path = os.path.join(save_dir, filename)
        
        # 保存文件
        with open(save_path, 'wb') as f:
            for chunk in response.iter_content(1024):
                f.write(chunk)
                
        return f"/static/resume_assets/{filename}"
    except Exception as e:
        logging.error(f"图片下载失败: {str(e)}")
        return None

def get_element_content(soup, selector, mode):
    """获取元素内容"""
    element = soup.select_one(selector)
    if element:
        if mode == 'text':
            return element.get_text(strip=True)
        else:
            return sanitize_html(str(element))
    return ""

def delete_previous_data():
    """删除之前保存的数据文件"""
    data_dir = os.path.join(app.root_path, 'resume_data')
    if os.path.exists(data_dir):
        for file in os.listdir(data_dir):
            file_path = os.path.join(data_dir, file)
            if os.path.isfile(file_path):
                os.remove(file_path)

def fetch_resume_data(url):
    try:
        parsed = urlparse(url)
        if not parsed.scheme:
            url = f'http://{url}'
            
        response = requests.get(url, 
                                timeout=20,
                                headers={
                                    'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) ResumeCrawler/2.0',
                                    'Accept-Language': 'zh-CN,zh;q=0.9'
                                })
        
        if response.encoding.lower() in ('iso-8859-1', 'windows-1252'):
            response.encoding = response.apparent_encoding or 'utf-8'
            
        soup = BeautifulSoup(response.text, 'html.parser')
        base_url = f"{parsed.scheme}://{parsed.netloc}"
        
        resume_data = {}
        
        # 处理照片下载
        for selector_config in SELECTORS["photo"]:
            element = soup.select_one(selector_config[0])
            if element and element.get(selector_config[1]):
                photo_url = element[selector_config[1]]
                local_path = download_image(photo_url, base_url)
                if local_path:
                    resume_data['photo'] = local_path
                    break
        
        # 处理其他字段
        for field in SELECTORS:
            if field == 'photo':
                continue
            for selector, mode in SELECTORS[field]:
                content = get_element_content(soup, selector, mode)
                if content:
                    resume_data[field] = content
                    break
        
        # 先删除之前的数据文件
        delete_previous_data()

        # 自动保存到文件，使用固定文件名
        if resume_data:
            save_dir = os.path.join(app.root_path, 'resume_data')
            os.makedirs(save_dir, exist_ok=True)
            filename = "latest_resume.json"
            with open(os.path.join(save_dir, filename), 'w', encoding='utf-8') as f:
                json.dump(resume_data, f, ensure_ascii=False, indent=2)
        
        return resume_data if any(resume_data.values()) else None
        
    except requests.RequestException as e:
        logging.error(f"请求错误: {str(e)}")
    except Exception as e:
        logging.error(f"[爬虫错误] {str(e)}")
    return None

@app.route('/reset', methods=['POST'])
def reset_resume():
    """清除爬取的简历数据"""
    session.pop('crawled_resume', None)
    flash('✅ 已恢复原始简历')
    return jsonify({"success": True, "redirect": "/"})

@app.route('/', methods=['GET', 'POST'])
def index():
    resume_data = {}
    if request.method == 'POST':
        url = request.form.get('url', '').strip()
        if url:
            data = fetch_resume_data(url)
            if data:
                session['crawled_resume'] = data
                flash('✅ 简历解析成功！数据已保存', 'success')
            else:
                flash('⚠️ 无法获取有效简历信息，请检查URL有效性', 'error')
    # 从session中获取爬取的数据，如果没有则使用空字典
    crawled_data = session.get('crawled_resume', {})
    resume_data = crawled_data if crawled_data else resume_data
    
    return render_template('index.html', 
                           resume_data=resume_data,
                           original=SELECTORS)

if __name__ == '__main__':
    # 初始化存储目录
    for folder in ['static/resume_assets', 'resume_data']:
        os.makedirs(folder, exist_ok=True)
    app.run(debug=True, port=5000)