import io
import math
import zipfile
import threading

from flask import jsonify, request, Blueprint, send_file
from .tools.tools import *

import requests
import parsel
import re
import pdfkit

config = pdfkit.configuration(wkhtmltopdf=r'D:\Program Files\wkhtmltopdf\bin\wkhtmltopdf.exe')

headers = {
    'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/127.0.0.0 Safari/537.36 Edg/127.0.0.0'
}

zhuanlan_api = Blueprint('zhuanlan_api', __name__)


def process_article(url, pdfs, config, author_name, zl_name):
    try:
        response = requests.get(url=url, headers=headers, timeout=10)
        if response.status_code == 200:
            html = response.text
            selector = parsel.Selector(html)
            author_url = selector.css('.profile-href::attr(href)').get()
            title = selector.css('#articleContentId::text').get()
            content = selector.css('#content_views').get()
            new_title = re.sub(r'[\\:*?"<>|/]', '', title).strip()

            html_str = be_html(new_title, author_url,content)
            print(f"开始生成{new_title}.pdf")
            pdf_data = pdfkit.from_string(html_str, configuration=config)
            pdf_stream = io.BytesIO(pdf_data)
            pdf_filename = f'{new_title}.pdf'
            pdfs.append((pdf_filename, pdf_stream))
    except Exception as e:
        print(f"Error processing {url}: {e}")


@zhuanlan_api.route('/', methods=['GET'])
def get_zhuanlan():
    global max_pager
    url = request.args.get('url')
    no_html = url.split('.')[0:-1]
    no_html = '.'.join(no_html)

    # 验证url
    validation_response = is_valid_url(url)

    if isinstance(validation_response, tuple):  # 检查是否返回的是 JSON 响应
        return validation_response  # 直接返回错误信息

    html_urls = []
    link = validation_response
    print(f'开始爬取第1页')
    # 继续处理 HTTP GET 请求
    response = requests.get(url=link, headers=headers)
    if response.status_code == 200:
        html = response.text
        url_selector = parsel.Selector(html)
        html_urls += url_selector.css('.column_article_list li a::attr(href)').getall()

        author_name = url_selector.css('.column_person_tit span:nth-child(2)::text').get().strip()
        zl_name = url_selector.css('.column_title::text').get()
        print(f"作者名称:{author_name}，专栏名称:{zl_name}")

        pageSize_pattern = r"var pageSize = (\d+);"
        listTotal_pattern = r"var listTotal = (\d+);"

        pageSize_match = re.search(pageSize_pattern, html)
        listTotal_match = re.search(listTotal_pattern, html)
        if pageSize_match and listTotal_match:
            pageSize = int(pageSize_match.group(1))
            listTotal = int(listTotal_match.group(1))
            max_pager = math.ceil(listTotal / pageSize)
        else:
            print("One or both variables were not found in the HTML.")

    pdfs = []
    threads = []

    if max_pager > 1:
        for page in range(2, max_pager + 1):
            link = f"{no_html}_{page}.html"
            print(f'开始爬取第{page}页')
            response = requests.get(url=link, headers=headers)
            if response.status_code == 200:
                html = response.text
                url_selector = parsel.Selector(html)
                html_urls += url_selector.css('.column_article_list li a::attr(href)').getall()

    for url in html_urls:
        thread = threading.Thread(target=process_article, args=(url, pdfs, config, author_name, zl_name))
        threads.append(thread)
        thread.start()
        print("多线程开始执行")

    for thread in threads:
        thread.join()
    print(f"多线程执行完成")
    zip_output = io.BytesIO()
    with zipfile.ZipFile(zip_output, 'w', zipfile.ZIP_DEFLATED) as zipf:
        for filename, pdf_stream in pdfs:
            pdf_stream.seek(0)
            zipf.writestr(filename, pdf_stream.read())

    zip_output.seek(0)
    return send_file(zip_output, as_attachment=True, download_name=f'{author_name}_{zl_name}.zip')
