import logging
from pathlib import Path
import requests
from tqdm import tqdm
import pdfkit
from config import Config
import re
from fake_useragent import UserAgent

# 将wkhtmltopdf.exe程序绝对路径传入config对象
pdfkit_config = pdfkit.configuration(wkhtmltopdf=str(Config.WKHTMLTOPDF_PATH))

def html_to_pdf(url,out_path):

    # 验证URL格式（必须带http/https）
    if not re.match(r'^https?://', url, re.I):
        # logging.info(f"警告：URL格式无效（缺少http/https）：{url}")
        url = f"https://{url}"

    pdfkit.from_url(url=url,output_path=out_path,configuration=pdfkit_config,options=Config.ToPDF_OPTIONS)

def download_file(url: str, save_path: Path) -> tuple[bool, str]:
    """
    下载文件到指定路径（支持断点续传、进度提示）
    返回：(是否成功, 错误信息)
    """
    if save_path.exists():
        logging.info(f"文件已存在，跳过下载：{save_path.name}")
        return True, "文件已存在"

    try:
        # 关键：添加模拟浏览器的请求头（解决403 Forbidden）
        ua = UserAgent()
        headers = {
            "User-Agent": ua.random,
            "Accept": "text/html,application/xhtml+xml,application/xml;q=0.9,image/avif,image/webp,*/*;q=0.8",
            "Accept-Language": "zh-CN,zh;q=0.8,zh-TW;q=0.7,zh-HK;q=0.5,en-US;q=0.3,en;q=0.2",
            "Referer": "https://www.baidu.com/"  # 来源页
        }
        if save_path.exists():
            # 若文件已部分下载，从断点继续
            downloaded_size = save_path.stat().st_size
            headers["Range"] = f"bytes={downloaded_size}-"

        with requests.get(url, headers=headers, stream=True, timeout=30) as response:
            response.raise_for_status()  # 抛出HTTP错误（如404、500）

            # 获取总文件大小（支持断点续传时累加已下载大小）
            total_size = int(response.headers.get('content-length', 0))
            if headers.get("Range"):
                total_size += int(headers["Range"].split('=')[1].split('-')[0])

            # 写入文件（二进制模式）
            mode = "ab" if headers.get("Range") else "wb"  # 断点续传用追加，新文件用覆盖
            with open(save_path, mode) as f:
            # with open(save_path, mode) as f, tqdm(
            #         total=total_size,
            #         unit='B',
            #         unit_scale=True,
            #         desc=f"下载 {save_path.name}",
            #         leave=False  # 下载完成后不保留进度条
            # ) as pbar:
                # 按块写入，更新进度条
                for chunk in response.iter_content(chunk_size=1024 * 1024):  # 1MB/块
                    if chunk:
                        f.write(chunk)
                        # pbar.update(len(chunk))

        # logging.info(f"下载成功：{save_path.name}（路径：{save_path}）")
        return True, ""

    except requests.exceptions.RequestException as e:
        error_msg = f"下载失败：{str(e)}"
        # 清理下载失败的不完整文件
        if save_path.exists() and save_path.stat().st_size == 0:
            save_path.unlink(missing_ok=True)
        return False, error_msg
    except Exception as e:
        error_msg = f"未知错误：{str(e)}"
        return False, error_msg

