import os
import re
import zipfile
from threading import Thread

import webview
from apscheduler.schedulers.background import BackgroundScheduler
from flask import request, Flask, send_file, render_template
from flask_cors import CORS

from constant import RUNNING, SUCCESS, CRAWL_TYPE_REPO, CRAWL_TYPE_DOC, WARNING_SUCCESS
from crawl import YuqueCrawler
from job import clean_task_job
from js_api import API
from model import Result
from store import tasks
from utils import generate_short_uuid

app = Flask(__name__, static_folder="static", static_url_path="/static", template_folder="template")
app.json.ensure_ascii = False  # 解决中文乱码问题
CORS(app)  # 解决跨域问题
app.add_url_rule('/assets/<path:filename>', endpoint='assets', view_func=app.send_static_file)
app.json.ensure_ascii = False  # 解决中文乱码问题
CORS(app)  # 解决跨域问题
scheduler = BackgroundScheduler()  # 定时任务
scheduler.add_job(clean_task_job, 'interval', seconds=60)  # 每60秒执行”清理任务“函数
scheduler.start()  # 启动定时任务


@app.route('/')
def index():
    return render_template('index.html')


# 开始知识库爬取任务
@app.route('/start_task', methods=['POST'])
def start_crawl_repo_task():
    url = request.json.get('url')
    if not url:
        return Result(status=400, message="爬取地址不能为空").to_json(), 400
    if not re.match(r'^https?://www\.yuque\.com/[^/]+/[^/]+', url):
        return Result(status=400, message="爬取地址不合法").to_json(), 400
    # 生成task_id
    task_id = generate_short_uuid()
    url = '/'.join(url.split('/')[0:5])
    # 启动爬虫
    YuqueCrawler(url, task_id, CRAWL_TYPE_REPO).start()
    return Result(data={"taskId": task_id, "status": RUNNING}).to_json(), 200


@app.route('/start_one_task', methods=['POST'])
def start_doc_task():
    url = request.json.get('url')
    if not url:
        return Result(status=400, message="爬取地址不能为空").to_json(), 400
    if not re.match(r'^https?://www\.yuque\.com/[^/]+/[^/]+/[^/]+', url):
        return Result(status=400, message="爬取地址不合法").to_json(), 400
    # 生成task_id
    task_id = generate_short_uuid()
    # 启动爬虫
    YuqueCrawler(url, task_id, CRAWL_TYPE_DOC).start()
    return Result(data={"taskId": task_id, "status": RUNNING}).to_json(), 200


# 查看爬取任务状态
@app.route('/task_status/<task_id>', methods=['GET'])
def crawl_task_status(task_id):
    task = tasks.get(task_id)
    if not task:
        return Result(status=404, message="不存在此任务").to_json(), 404
    return Result(data=task).to_json(), 200


# 进行单文档下载
@app.route('/download_one_task/<task_id>', methods=['GET'])
def crawl_doc_download(task_id):
    task = tasks.get(task_id)
    title = task['docTitle']
    if not task:
        return Result(status=404, message="不存在此任务").to_json(), 404
    if task['status'] != SUCCESS:
        return Result(status=400, message="未完成的任务不能一键下载").to_json(), 400
    if task['crawlType'] != CRAWL_TYPE_DOC:
        return Result(status=400, message="不是文档任务").to_json(), 400
    file_path = f"download/{task_id}/{title}.md"
    if not os.path.exists(file_path):
        return Result(status=404, message="不存在该文件").to_json(), 404
    # 发送md文件
    return send_file(file_path, as_attachment=True)


# 单独下载知识库中单个文档
@app.route('/download_task_inner_single/<task_id>', methods=['GET'])
def crawl_repo_download_inner_single(task_id):
    task = tasks.get(task_id)
    title = request.args.get('title')
    if not task:
        return Result(status=404, message="不存在此任务").to_json(), 404
    if title is None:
        return Result(status=400, message="文档标题不能为空").to_json(), 400
    file_path = f"download/{task_id}/{title}.md"
    print(file_path)
    if not os.path.exists(file_path):
        return Result(status=404, message="不存在该文件").to_json(), 404
    # 发送md文件
    return send_file(file_path, as_attachment=True)


# 进行知识库资源下载（以压缩包形式）
@app.route('/download_task/<task_id>', methods=['GET'])
def crawl_repo_zip_download(task_id):
    task = tasks.get(task_id)
    if not task:
        return Result(status=404, message="不存在此任务").to_json(), 404

    if task['status'] != SUCCESS and task['status'] != WARNING_SUCCESS:
        return Result(status=400, message="未完成的任务不能一键下载").to_json(), 400

    save_dir = f"download/{task_id}"
    # 假设所有文档都保存在 download/ 目录下
    if not os.path.exists(save_dir):
        return Result(status=404, message="不存在该文件").to_json(), 404
    # 创建压缩文件
    zip_file = save_dir + '.zip'
    zip_package = zipfile.ZipFile(zip_file, 'w', zipfile.ZIP_DEFLATED)
    # 遍历目录下的所有文件，将其写入压缩包
    for root, dirs, files in os.walk(save_dir):
        for file in files:
            zip_package.write(os.path.join(root, file), os.path.relpath(os.path.join(root, file), save_dir))
    # 关闭压缩包流
    zip_package.close()
    # 发送压缩包文件
    return send_file(zip_file, as_attachment=True)



def start_flask():
    # 启动WEB服务
    app.run(host="0.0.0.0", port=8085, debug=False)

def on_window_close():
    # 关闭 Flask 服务
    scheduler.shutdown(wait=False)  # 停止定时任务
    os.close(0)

if __name__ == '__main__':
    # 启动 Flask 服务
    Thread(target=start_flask).start()
    # 使用 WebView 打开 GUI 界面
    webview.create_window("语雀文档爬取工具", "http://localhost:8085", js_api=API())
    webview.start(debug=False)

