from flask import Flask, render_template, request, jsonify, session, send_from_directory
import requests
from bs4 import BeautifulSoup
import json, os, re
from urllib.parse import urljoin, urlparse
import time
from concurrent.futures import ThreadPoolExecutor
import threading

app = Flask(__name__, template_folder='templates')
app.secret_key = 'your_secret_key_here'
app.config['MAX_CONTENT_LENGTH'] = 50 * 1024 * 1024  # 允许50MB大文件

# 全局存储爬取结果
crawled_pages = {}
lock = threading.Lock()

# 加载本地简历模板
with open('resume_data_zhou/resume.json', 'r', encoding='utf-8') as f:
    MY_RESUME = json.load(f)

def normalize_url(url):
    """标准化URL输入"""
    url = url.strip()
    if not re.match(r'^https?://', url):
        # 如果是IP地址格式
        if re.match(r'^\d{1,3}\.\d{1,3}\.\d{1,3}\.\d{1,3}(:\d+)?$', url):
            url = f'http://{url}'
        else:  # 普通域名
            url = f'http://{url}'
    return url

def save_webpage(url, content):
    """保存网页内容"""
    os.makedirs('webpages', exist_ok=True)
    parsed = urlparse(url)
    filename = f"webpages/{parsed.netloc}_{int(time.time())}.html"
    with open(filename, 'w', encoding='utf-8') as f:
        f.write(content)
    return filename

@app.route('/webpages/<path:filename>')
def serve_webpage(filename):
    return send_from_directory('webpages', filename)

@app.route('/')
def home():
    """显示主页面"""
    page_id = request.args.get('page_id')
    if page_id and page_id in crawled_pages:
        return render_template('resume.html', 
                            resume=crawled_pages[page_id],
                            is_crawled=True)
    return render_template('resume.html', 
                         resume=MY_RESUME,
                         is_crawled=False)

@app.route('/crawl', methods=['POST'])
def crawl():
    """处理爬取请求"""
    url = request.form.get('url', '').strip()
    if not url:
        return jsonify({"error": "URL不能为空"}), 400
    
    try:
        url = normalize_url(url)
        
        headers = {
            'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/91.0.4472.124 Safari/537.36',
            'Accept-Language': 'en-US,en;q=0.9'
        }
        
        # 尝试获取页面
        response = requests.get(url, headers=headers, timeout=15)
        response.raise_for_status()
        response.encoding = response.apparent_encoding
        
        # 解析内容
        soup = BeautifulSoup(response.text, 'html.parser')
        
        # 保存网页
        saved_path = save_webpage(url, response.text)
        
        # 提取基本信息
        data = {
            'source_url': url,
            'saved_path': saved_path,
            'title': soup.title.string if soup.title else "无标题",
            'content': str(soup),  # 完整HTML
            'text_content': soup.get_text('\n', strip=True)[:1000] + "...",  # 前1000字符
            'screenshot': None
        }
        
        # 存储结果
        page_id = re.sub(r'[^\w\-]', '_', url)
        with lock:
            crawled_pages[page_id] = data
        
        return jsonify({
            "success": True,
            "page_id": page_id,
            "redirect": f"/?page_id={page_id}"
        })
        
    except Exception as e:
        return jsonify({"error": f"抓取失败: {str(e)}"}), 500

@app.route('/reset', methods=['POST'])
def reset():
    """重置数据"""
    crawled_pages.clear()
    return jsonify({"success": True})

if __name__ == '__main__':
    os.makedirs('webpages', exist_ok=True)
    app.run(host='0.0.0.0', port=5000, debug=True)