#!/usr/bin/env python
# -*- coding: utf-8 -*-
# @Time    : 2025/9/1
# @Author  : YunZhen
# @File    : download.py
# @Software: PyCharm
"""
Python安装包下载
"""
import os
import asyncio
import aiohttp
import aiofiles
import random
import time
from tqdm.asyncio import tqdm
from loguru import logger
from collections import defaultdict
from typing import List, Dict

from src.script._load import get_cached_versions
from src.script.verify import verify_download

# 常见User-Agent列表
USER_AGENTS = [
    "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/91.0.4472.124 Safari/537.36",
    "Mozilla/5.0 (Macintosh; Intel Mac OS X 10_15_7) AppleWebKit/605.1.15 (KHTML, like Gecko) Version/14.1.1 Safari/605.1.15",
    "Mozilla/5.0 (Windows NT 10.0; Win64; x64; rv:89.0) Gecko/20100101 Firefox/89.0",
    "Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/92.0.4515.107 Safari/537.36"
]

# 下载统计
download_stats = {
    "total_files": 0,
    "completed": 0,
    "failed": 0,
    "retries": 0,
    "total_size": 0,
    "start_time": 0
}


async def get_download_links(version: str) -> Dict[str, List[Dict]]:
    """ 获取指定版本的所有下载链接 """
    # 从缓存获取版本下载连接
    all_python_versions = await get_cached_versions()
    if not all_python_versions:
        raise ValueError("无法获取版本缓存，确保已正确加载版本信息")

    download_links = defaultdict(list)
    for system, versions in all_python_versions.items():
        version_item = versions.get(version)
        if version_item:
            for name, link in version_item['links'].items():
                download_links[system].append(
                    {
                        'name': name,
                        'url': link,
                        'platform': system
                    }
                )
        else:
            logger.warning(f"版本 {version} 在系统 {system} 中未找到")
    logger.info(f"版本 {version} 的下载链接: {dict(download_links)}")
    return download_links


async def download_file(
        session: aiohttp.ClientSession,
        url: str,
        save_path: str,
        max_retries: int = 3,
        backoff: int = 1
) -> bool:
    """下载文件（支持断点续传、超时控制和重试机制）"""
    global download_stats

    # 记录开始时间
    start_time = time.time()
    file_name = os.path.basename(save_path)

    # 检查已下载部分
    downloaded = 0
    if os.path.exists(save_path):
        downloaded = os.path.getsize(save_path)
        logger.info(f"检测到部分下载文件: {file_name} ({downloaded}字节), 尝试续传")

    headers = {'Range': f'bytes={downloaded}-'} if downloaded else {}

    # 随机选择User-Agent
    headers['User-Agent'] = random.choice(USER_AGENTS)

    # 设置超时（总超时1小时，连接超时30秒）
    timeout = aiohttp.ClientTimeout(total=3600, connect=30)

    for attempt in range(max_retries + 1):
        try:
            async with session.get(url, headers=headers, timeout=timeout) as response:
                # 验证是否支持断点续传
                if downloaded and response.status != 206:
                    logger.warning("服务器不支持断点续传，重新开始下载")
                    downloaded = 0
                    headers = {'User-Agent': headers['User-Agent']}
                    async with session.get(url, headers=headers, timeout=timeout) as new_response:
                        response = new_response

                total_size = int(response.headers.get('content-length', 0)) + downloaded

                # 更新总大小统计
                if attempt == 0:
                    download_stats["total_size"] += total_size

                # 进度条设置
                progress = tqdm(
                    total=total_size,
                    unit='B',
                    unit_scale=True,
                    desc=file_name,
                    initial=downloaded,
                    leave=False  # 下载完成后清除进度条
                )

                # 创建目录（如果不存在）
                os.makedirs(os.path.dirname(save_path), exist_ok=True)

                # 异步写入文件
                async with aiofiles.open(save_path, 'ab' if downloaded else 'wb') as f:
                    while True:
                        try:
                            chunk = await response.content.read(1024 * 8)
                            if not chunk:
                                break
                            await f.write(chunk)
                            downloaded += len(chunk)
                            progress.update(len(chunk))

                            # 监控下载速度
                            elapsed = time.time() - start_time
                            if elapsed > 10 and downloaded / elapsed < 50 * 1024:  # 50KB/s
                                logger.warning(f"下载速度过低: {file_name} ({downloaded / elapsed / 1024:.2f}KB/s)")
                        except asyncio.TimeoutError:
                            logger.warning(f"读取数据超时: {file_name}, 尝试重连")
                            break
                progress.close()

                # 校验文件完整性
                if await verify_download(save_path, total_size):
                    elapsed = time.time() - start_time
                    speed = total_size / elapsed / 1024 if elapsed > 0 else 0
                    logger.success(f"✅ 下载完成: {file_name} ({total_size / 1024 / 1024:.2f}MB, {speed:.2f}KB/s)")
                    download_stats["completed"] += 1
                    return True
                else:
                    logger.error(f"文件校验失败: {file_name}")
                    # 删除不完整文件
                    if os.path.exists(save_path):
                        os.remove(save_path)
                    raise ValueError("文件校验失败")

        except (aiohttp.ClientError, asyncio.TimeoutError, OSError) as e:
            if attempt < max_retries:
                wait_time = backoff * (2 ** attempt)
                logger.warning(f"下载失败: {file_name} ({type(e).__name__}), {wait_time}秒后重试...")
                download_stats["retries"] += 1
                await asyncio.sleep(wait_time)
            else:
                logger.error(f"❌ 下载失败: {file_name} 重试{max_retries}次后仍失败")
                download_stats["failed"] += 1
                return False
        except Exception as e:
            logger.error(f"❌ 未知错误: {file_name} - {str(e)}")
            download_stats["failed"] += 1
            return False

    return False


async def download_python_version(
        version: str,
        platforms: List[str],
        save_dir: str = "packages",
        max_concurrent: int = 5
) -> bool:
    """下载指定版本的Python安装包（带并发控制和超时处理）"""
    global download_stats
    download_stats = {
        "total_files": 0,
        "completed": 0,
        "failed": 0,
        "retries": 0,
        "total_size": 0,
        "start_time": time.time()
    }

    logger.info(f"获取 {version} 下载链接...")
    download_links = await get_download_links(version)

    if not download_links:
        logger.warning(f"未找到版本 {version} 的下载链接")
        return False

    # 使用信号量控制并发数量
    semaphore = asyncio.Semaphore(max_concurrent)

    async def limited_download(session_, file_info_, save_path_):
        async with semaphore:
            return await download_file(session_, file_info_["url"], save_path_)

    async with aiohttp.ClientSession() as session:
        tasks = []
        for plat in platforms:
            if plat in download_links:
                for file_info in download_links[plat]:
                    filename = file_info["url"].split('/')[-1]
                    save_path = os.path.join(save_dir, plat, version, filename)

                    # 检查文件是否已完整存在
                    if os.path.exists(save_path):
                        file_size = os.path.getsize(save_path)
                        try:
                            # 发送HEAD请求获取文件大小
                            async with session.head(file_info["url"],
                                                    headers={'User-Agent': random.choice(USER_AGENTS)}) as response:
                                if response.status == 200:
                                    total_size = int(response.headers.get('content-length', 0))
                                    if file_size == total_size:
                                        logger.info(f"文件已存在且完整: {file_info['name']}")
                                        download_stats["completed"] += 1
                                        continue
                        except Exception as err:
                            logger.warning('检查文件完整性时发生异常：{}，{}', save_path, err)

                    logger.info(f"开始下载: {file_info['name']} -> {save_path}")
                    download_stats["total_files"] += 1
                    tasks.append(limited_download(session, file_info, save_path))
            else:
                logger.warning(f"版本 {version} 没有 {plat} 平台的安装包")

        # 并行下载
        if tasks:
            results = await asyncio.gather(*tasks)

            # 生成下载报告
            elapsed = time.time() - download_stats["start_time"]
            total_size_mb = download_stats["total_size"] / (1024 * 1024)
            avg_speed = total_size_mb / elapsed * 1024 if elapsed > 0 else 0

            logger.info("\n" + "=" * 50)
            logger.info(f"下载报告 - Python {version}")
            logger.info(f"总文件数: {download_stats['total_files']}")
            logger.info(f"成功: {download_stats['completed']}")
            logger.info(f"失败: {download_stats['failed']}")
            logger.info(f"重试次数: {download_stats['retries']}")
            logger.info(f"总大小: {total_size_mb:.2f} MB")
            logger.info(f"总耗时: {elapsed:.2f} 秒")
            logger.info(f"平均速度: {avg_speed:.2f} KB/s")
            logger.info("=" * 50)

            return all(results)
        else:
            logger.info(f"所有文件已存在，跳过下载")
            return True
