from flask import Flask, render_template, request, jsonify, session, send_from_directory
import requests
from bs4 import BeautifulSoup
import json
import os
from spider import scrape_resume, save_data, download_image

app = Flask(__name__)
app.secret_key = 'your_secret_key_here'  # 用于session加密

# 你的原始简历数据
with open('resume_data/resume.json', 'r', encoding='utf-8') as f:
    MY_RESUME = json.load(f)

# 设置静态文件路由
@app.route('/resume_data/images/<path:filename>')
def serve_resume_images(filename):
    return send_from_directory('resume_data/images', filename)

@app.route('/templates/<path:filename>')
def serve_template_images(filename):
    return send_from_directory('templates', filename)

@app.route('/')
def home():
    """ 默认显示自己的简历或爬取到的简历 """
    # 从session中获取爬取的数据，如果没有则显示自己的简历
    crawled_data = session.get('crawled_resume', None)
    return render_template('resume.html',
                           resume=crawled_data if crawled_data else MY_RESUME,
                           is_crawled=bool(crawled_data))

@app.route('/reset', methods=['POST'])
def reset_resume():
    """ 清除爬取的简历数据 """
    session.pop('crawled_resume', None)
    return jsonify({"success": True})

@app.route('/crawl', methods=['POST'])
def crawl_resume():
    """ 爬取目标URL的简历 """
    target_url = request.form.get('url')

    try:
        # 调用爬虫函数
        crawled_data = scrape_resume(target_url)
        # 将爬取的数据存入session
        session['crawled_resume'] = crawled_data
        return jsonify({"success": True, "redirect": "/"})
    except Exception as e:
        return jsonify({"error": str(e)}), 500

if __name__ == '__main__':
    app.run(debug=True)