from flask import Flask, render_template, request
import requests
from bs4 import BeautifulSoup
from urllib.parse import urljoin, urlparse
import re
import os
import json
import hashlib

app = Flask(__name__)

# 确保目录存在
os.makedirs('resume_data/images', exist_ok=True)

def download_image(url, folder='resume_data/images'):
    """下载图片并返回本地路径"""
    try:
        response = requests.get(url, stream=True, timeout=10)
        response.raise_for_status()
        
        # 生成唯一文件名
        ext = os.path.splitext(urlparse(url).path)[1] or '.jpg'
        filename = hashlib.md5(url.encode()).hexdigest() + ext
        filepath = os.path.join(folder, filename)
        
        with open(filepath, 'wb') as f:
            for chunk in response.iter_content(1024):
                f.write(chunk)
                
        return filepath
    except Exception as e:
        print(f"图片下载失败: {url} - {str(e)}")
        return None

def save_to_json(data, filename='resume_data/resume.json'):
    """保存数据到JSON文件"""
    with open(filename, 'w', encoding='utf-8') as f:
        json.dump(data, f, ensure_ascii=False, indent=2)

def crawl_website(url):
    """爬取网站并保存数据"""
    try:
        headers = {
            'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/91.0.4472.124 Safari/537.36'
        }
        
        if not re.match(r'^https?://', url, re.IGNORECASE):
            url = 'http://' + url
            
        response = requests.get(url, headers=headers, timeout=10)
        response.raise_for_status()
        response.encoding = response.apparent_encoding
        
        soup = BeautifulSoup(response.text, 'html.parser')
        
        # 准备保存的数据
        result = {
            'url': url,
            'status_code': response.status_code,
            'title': soup.title.string if soup.title else '无标题',
            'meta_description': soup.find('meta', attrs={'name': 'description'})['content'] 
                            if soup.find('meta', attrs={'name': 'description'}) else '无描述',
            'links': [],
            'images': [],
            'content': []
        }
        
        # 处理链接
        for link in soup.find_all('a', href=True):
            result['links'].append({
                'text': link.get_text(strip=True),
                'url': urljoin(url, link['href'])
            })
        
        # 处理图片并下载
        for img in soup.find_all('img', src=True):
            img_url = urljoin(url, img['src'])
            local_path = download_image(img_url)
            
            result['images'].append({
                'original_url': img_url,
                'local_path': local_path.replace('\\', '/') if local_path else None,
                'alt': img.get('alt', '')
            })
        
        # 提取主要内容
        for element in soup.find_all(['p', 'h1', 'h2', 'h3', 'div']):
            text = element.get_text(strip=True)
            if text:
                result['content'].append({
                    'tag': element.name,
                    'text': text
                })
        
        # 保存到文件
        save_to_json(result)
        
        return {
            'success': True,
            'message': f'数据已保存到 resume_data/resume.json',
            'data': result
        }
        
    except Exception as e:
        return {
            'success': False,
            'message': f"爬取失败: {str(e)}",
            'data': None
        }

@app.route('/', methods=['GET', 'POST'])
def index():
    if request.method == 'POST':
        url = request.form['url']
        result = crawl_website(url)
        return render_template('index.html', result=result)
    return render_template('index.html')

if __name__ == '__main__':
    app.run(debug=True)