from flask import Flask, render_template, request
import requests
from bs4 import BeautifulSoup, element
from urllib.parse import urljoin, urlparse
import re
import os
import json
import hashlib
from datetime import datetime

app = Flask(__name__)

# 配置保存路径
SAVE_DIR = 'resume_data'
os.makedirs(os.path.join(SAVE_DIR, 'images'), exist_ok=True)

def generate_image_name(url, img_src):
    """生成与原始图片一致的名称"""
    try:
        # 保留原始图片文件名
        original_name = os.path.basename(urlparse(img_src).path)
        if not original_name or '.' not in original_name:
            ext = os.path.splitext(img_src)[1] or '.jpg'
            return f"{hashlib.md5(img_src.encode()).hexdigest()}{ext}"
        return original_name
    except:
        ext = os.path.splitext(img_src)[1] or '.jpg'
        return f"{hashlib.md5(img_src.encode()).hexdigest()}{ext}"

def download_image(img_src, base_url, save_dir):
    """下载图片并保留原始命名"""
    try:
        img_url = urljoin(base_url, img_src)
        if not re.search(r'\.(jpg|jpeg|png|gif|webp|svg|bmp)$', img_url, re.IGNORECASE):
            return None
            
        response = requests.get(img_url, stream=True, timeout=15)
        response.raise_for_status()
        
        filename = generate_image_name(img_url, img_src)
        save_path = os.path.join(save_dir, filename)
        
        with open(save_path, 'wb') as f:
            for chunk in response.iter_content(1024):
                f.write(chunk)
                
        return save_path
    except Exception as e:
        print(f"图片下载失败: {img_src} - {str(e)}")
        return None

def save_to_json(data):
    """保存数据到独立的JSON文件（修改后的版本）"""
    try:
        # 确保目录存在
        os.makedirs(SAVE_DIR, exist_ok=True)
        
        # 生成唯一文件名（基于URL和当前时间）
        url = data['basic_info']['source_url']
        timestamp = datetime.now().strftime("%Y%m%d_%H%M%S")
        filename = f"resume_{hashlib.md5(url.encode()).hexdigest()[:8]}_{timestamp}.json"
        filepath = os.path.join(SAVE_DIR, filename)
        
        # 准备要保存的数据（保持原有结构，仅移除original_html）
        save_data = {
            "basic_info": data['basic_info'],
            "education": data['education'],
            "skills": data['skills'],
            "projects": data['projects'],
            "awards": data['awards'],
            "images": data['images'],
            "self_evaluation": data['self_evaluation']
        }
        
        with open(filepath, 'w', encoding='utf-8') as f:
            json.dump(save_data, f, ensure_ascii=False, indent=2)
            
        print(f"已保存独立JSON文件: {filepath}")
        return True
    except Exception as e:
        print(f"保存JSON失败: {str(e)}")
        return False

def extract_skills(soup):
    """从页面提取技能关键词"""
    skills = set()
    tech_keywords = ['java', 'python', 'spring', 'html', 'css', 'javascript', 'sql', 'mysql', 'oracle', 'linux', 'git']
    
    # 从code/pre标签提取
    for code in soup.find_all(['code', 'pre']):
        text = code.get_text().lower()
        for keyword in tech_keywords:
            if keyword in text:
                skills.add(keyword.capitalize())
    
    # 从正文提取
    for p in soup.find_all('p'):
        text = p.get_text().lower()
        for keyword in tech_keywords:
            if keyword in text:
                skills.add(keyword.capitalize())
    
    return list(skills)

def crawl_website(url):
    """爬取网站内容（原有逻辑完全不变）"""
    try:
        headers = {
            'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/91.0.4472.124 Safari/537.36',
            'Accept-Language': 'zh-CN,zh;q=0.9'
        }
        
        if not re.match(r'^https?://', url, re.IGNORECASE):
            url = 'http://' + url
            
        response = requests.get(url, headers=headers, timeout=20)
        response.raise_for_status()
        response.encoding = response.apparent_encoding
        
        soup = BeautifulSoup(response.text, 'html.parser')
        base_url = f"{urlparse(url).scheme}://{urlparse(url).netloc}"
        
        # 准备保存的数据结构
        resume_data = {
            "basic_info": {
                "name": "网页内容抓取结果",
                "source_url": url,
                "crawl_time": datetime.now().isoformat(),
                "status_code": response.status_code
            },
            "education": [
                {
                    "school": "网页内容分析结果",
                    "period": "N/A",
                    "courses": "从网页提取的技术相关内容"
                }
            ],
            "skills": extract_skills(soup),
            "projects": [],
            "awards": [],
            "images": [],
            "self_evaluation": "",
            "original_html": str(soup)
        }
        
        # 提取项目信息
        project_count = 0
        for section in soup.find_all(['section', 'div', 'article']):
            if project_count >= 3:  # 最多提取3个项目
                break
                
            if 'project' in str(section.get('class', [])).lower() or '项目' in section.get_text():
                title = section.find(['h2', 'h3'])
                resume_data['projects'].append({
                    "name": title.get_text() if title else f"项目{project_count+1}",
                    "period": "N/A",
                    "description": section.get_text(' ', strip=True)[:200] + ('...' if len(section.get_text()) > 200 else ''),
                    "tech_stack": ", ".join(resume_data['skills'][:3]) if resume_data['skills'] else "未识别技术栈"
                })
                project_count += 1
        
        # 提取奖项信息
        for li in soup.find_all('li'):
            text = li.get_text()
            if ('奖' in text or 'award' in text.lower()) and len(resume_data['awards']) < 3:
                resume_data['awards'].append(text.strip())
        
        # 提取自我评价内容
        main_content = []
        for p in soup.find_all('p'):
            text = p.get_text(' ', strip=True)
            if len(text) > 20:
                main_content.append(text)
        resume_data['self_evaluation'] = " ".join(main_content[:3])[:500] + ('...' if len(main_content) > 3 else '')
        
        # 处理图片
        for img in soup.find_all('img', src=True):
            img_src = img['src']
            if not img_src.startswith('data:'):  # 忽略base64图片
                local_path = download_image(img_src, base_url, os.path.join(SAVE_DIR, 'images'))
                resume_data['images'].append({
                    "original_src": img_src,
                    "local_path": os.path.basename(local_path) if local_path else None,
                    "alt": img.get('alt', '')
                })
        
        # 保存数据（调用修改后的save_to_json）
        save_to_json(resume_data)
        
        # 返回给前端的数据
        page_info = {
            'url': url,
            'status_code': response.status_code,
            'title': soup.title.string if soup.title else '无标题',
            'meta_description': soup.find('meta', attrs={'name': 'description'})['content'] 
                            if soup.find('meta', attrs={'name': 'description'}) else '无描述',
            'links': [{
                'text': a.get_text(strip=True),
                'url': urljoin(base_url, a['href'])
            } for a in soup.find_all('a', href=True)][:10],  # 只取前10个链接
            'images': [{
                'src': urljoin(base_url, img['src']),
                'alt': img.get('alt', '')
            } for img in soup.find_all('img', src=True)][:6],  # 只取前6张图片
            'original_html': str(soup)
        }
        
        return {
            'success': True,
            'page_info': page_info,
            'resume_data': resume_data,
            'error': None
        }
        
    except Exception as e:
        return {
            'success': False,
            'error': f"爬取失败: {str(e)}",
            'page_info': None,
            'resume_data': None
        }

@app.route('/', methods=['GET', 'POST'])
def index():
    if request.method == 'POST':
        url = request.form.get('url', '').strip()
        if url:
            result = crawl_website(url)
            return render_template('index.html', **result)
    
    return render_template('index.html')

if __name__ == '__main__':
    app.run(debug=True)