import os
import time
import json
import requests
import logging
import threading
import sys
import warnings
from concurrent.futures import ThreadPoolExecutor, TimeoutError
from collections import defaultdict
import atexit

# 忽略特定警告
warnings.filterwarnings("ignore", category=DeprecationWarning)

# 配置日志
logging.basicConfig(
    level=logging.INFO,
    format='%(asctime)s - %(levelname)s - %(message)s',
    handlers=[
        logging.FileHandler("audio_download.log", encoding="utf-8"),
        logging.StreamHandler()
    ]
)
logger = logging.getLogger(__name__)

# 全局缓存集合，用于跟踪已下载的音频URL
downloaded_urls = set()
# 全局任务集合，用于跟踪待下载的任务
pending_tasks = set()
# 全局线程池
global_executor = None
# 全局线程池锁
executor_lock = threading.Lock()

# JSON 结果缓存
json_cache_lock = threading.Lock()
json_cache = defaultdict(list)  # 存储不同状态的结果
json_cache_size = 0
MAX_CACHE_SIZE = 50  # 缓存达到此数量时自动写入

# 修复线程清理问题的补丁
def patch_threading_excepthook():
    """修复 Python 3.7 及以下版本中的 threading.excepthook 问题"""
    if hasattr(threading, 'excepthook'):
        return
    
    init_old = threading.Thread.__init__
    
    def init(self, *args, **kwargs):
        init_old(self, *args, **kwargs)
        self._original_run = self.run
        self.run = self._trace_run
        
    def _trace_run(self, *args, **kwargs):
        try:
            self._original_run(*args, **kwargs)
        except SystemExit:
            pass
        except Exception:
            sys.excepthook(*sys.exc_info())
    
    threading.Thread.__init__ = init

# 修复线程清理问题的钩子
def threading_excepthook(args):
    """自定义线程异常钩子"""
    if args.exc_type == TypeError and "'NoneType' object" in str(args.exc_value):
        # 忽略特定的线程清理错误
        return
    sys.__excepthook__(args.exc_type, args.exc_value, args.exc_traceback)

# 应用补丁
def apply_threading_patches():
    """应用线程相关的补丁"""
    try:
        # Python 3.8+
        threading.excepthook = threading_excepthook
    except AttributeError:
        # Python 3.7 及以下版本
        patch_threading_excepthook()
    
    # 设置默认异常钩子
    sys.excepthook = lambda *args: None

# 初始化全局线程池
def init_global_executor(max_workers=4):
    """初始化全局线程池"""
    global global_executor
    with executor_lock:
        if global_executor is None or global_executor._shutdown:
            logger.info(f"初始化全局线程池，最大工作线程数: {max_workers}")
            global_executor = ThreadPoolExecutor(max_workers=max_workers)

# 获取全局线程池
def get_executor():
    """获取全局线程池实例"""
    if global_executor is None:
        init_global_executor()
    return global_executor

# 下载音频文件
def download_audio(url, filename,item_title,item_id):
    current_dir = os.path.dirname(os.path.abspath(__file__))
    # 构建完整的文件路径
    if not os.path.isabs(filename):
        filename = os.path.join(current_dir, "audio", filename)
    try:
        logger.info(f"开始下载: {url} 到 {filename}")
        headers = {
            "User-Agent": (
                "Mozilla/5.0 (Windows NT 10.0; Win64; x64) "
                "AppleWebKit/537.36 (KHTML, like Gecko) "
                "Chrome/115.0.0.0 Safari/537.36"
            ),
            "Accept": "*/*",
            "Connection": "keep-alive",
            # 如有需要可添加 Referer、Cookie 等
        }
        response = requests.get(url, stream=True,headers=headers, timeout=(60))  # 连接超时40秒，读取超时60秒
        response.raise_for_status()
        os.makedirs(os.path.dirname(filename), exist_ok=True)
        with open(filename, 'wb') as f:
            f.write(response.content)
        logger.info(f"下载完成: {filename}")
        write_to_json({
            "id": item_id,
            "title": item_title,
            "timestamp": time.strftime("%Y-%m-%d %H:%M:%S")
        }, "success")  # 立即写入成功记录
        return True
    except Exception as e:
        logger.error(f"下载失败: {url} - {str(e)}")
        write_to_json({
            "id": item_id,
            "title": item_title,
            "filename": filename,
            "timestamp": time.strftime("%Y-%m-%d %H:%M:%S")
        }, "fail")  # 立即写入成功记录
        if url in downloaded_urls:
            downloaded_urls.remove(url)
        return False

# 添加结果到缓存
def add_to_cache(data, status):
    global json_cache_size
    with json_cache_lock:
        json_cache[status].append(data)
        json_cache_size += 1
        print(f"添加到缓存: {status} - {data} - {json_cache_size}")
        if json_cache_size >= MAX_CACHE_SIZE:
            flush_json_cache()

# 批量写入JSON缓存到文件
def flush_json_cache():
    global json_cache_size
    with json_cache_lock:
        if json_cache_size == 0:
            return
            
        try:
            current_dir = os.path.dirname(os.path.abspath(__file__))
            output_dir = os.path.join(current_dir, "audio")
            os.makedirs(output_dir, exist_ok=True)
            
            for status, data_list in json_cache.items():
                if not data_list:
                    continue
                    
                filename = os.path.join(output_dir, f"{status}_audio_results.json")
                
                try:
                    if os.path.exists(filename):
                        with open(filename, 'r', encoding='utf-8') as f:
                            existing_data = json.load(f)
                            if not isinstance(existing_data, list):
                                existing_data = []
                    else:
                        existing_data = []
                except (json.JSONDecodeError, FileNotFoundError):
                    existing_data = []
                
                existing_data.extend(data_list)
                
                try:
                    with open(filename, 'w', encoding='utf-8') as f:
                        json.dump(existing_data, f, ensure_ascii=False, indent=2)
                    logger.info(f"成功写入 {len(data_list)} 条 {status} 记录到 {filename}")
                except Exception as e:
                    logger.error(f"写入 {filename} 失败: {str(e)}")
            
            json_cache.clear()
            json_cache_size = 0
        except Exception as e:
            logger.error(f"刷新JSON缓存时出错: {str(e)}")

# 所有任务完成后的回调函数
def _all_tasks_completed_callback(futures, title, id, filenames):
    try:
        success = True
        error_message = None
        skipped = False

        if futures:
            for future in futures:
                try:
                    if not future.result():
                        success = False
                        error_message = "下载失败"
                except Exception as e:
                    success = False
                    error_message = str(e)
        else:
            success = False
            skipped = True

        output_data = {
            "title": title or "未提供标题",
            "id": id or "未知ID",
            "single_filename": filenames[0] if filenames else "未知文件名",
            "timestamp": time.strftime("%Y-%m-%d %H:%M:%S")
        }

        if success:
            print("c测试")
            add_to_cache(output_data, "success")
        elif skipped:
            add_to_cache(output_data, "skipped")
        else:
            add_to_cache(output_data, "failed")
    except Exception as e:
        logger.error(f"生成JSON数据时出错: {str(e)}")

# 处理单个下载结果的回调函数
def _handle_result(future, index, filenames):
    try:
        if future.result():
            logger.info(f"文件 {filenames[index]} 下载成功")
        else:
            logger.warning(f"文件 {filenames[index]} 下载失败")
    except Exception as e:
        logger.error(f"处理文件 {filenames[index]} 结果时出错: {str(e)}")

# 开启线程下载，使用全局线程池
def startThread(single_url=None, single_filename=None, title=None, id=None, max_workers=4, cache=True, download_list=None, task_timeout=120):
    # 批量下载逻辑
    if download_list:
        init_global_executor(max_workers)
        executor = get_executor()
        futures = []

        for item in download_list:
            url = item.get('audio_url')
            filename = item.get('filename')
            item_title = item.get('title', title)
            item_id = item.get('id', id)

            if not url or not filename:
                logger.error("批量下载中缺少音频URL或文件名")
                continue

            task_id = f"{url}|{filename}"

            if cache and (task_id in pending_tasks or url in downloaded_urls):
                logger.info(f"音频已在下载队列中或已下载，跳过: {url}")
                _all_tasks_completed_callback([], item_title, item_id, [filename])
                continue

            pending_tasks.add(task_id)
            downloaded_urls.add(url)

            logger.info(f"准备下载: {url} 到 {filename}")

            future = executor.submit(download_audio, url, filename)
            future.add_done_callback(lambda f, fn=filename: _handle_result(f, 0, [fn]))
            futures.append(future)

            def _completion_callback(f, task_id=task_id, title=item_title, id=item_id, filename=filename):
                try:
                    pending_tasks.discard(task_id)
                    _all_tasks_completed_callback([f], title, id, [filename])
                except Exception as e:
                    logger.error(f"任务完成回调出错: {str(e)}")

            future.add_done_callback(_completion_callback)

            # 新增：为每个future启动一个定时器，超时自动释放pending_tasks
            def timeout_releaser(future, task_id=task_id, timeout=task_timeout):
                def check():
                    try:
                        future.result(timeout=timeout)
                    except TimeoutError:
                        logger.error(f"任务超时未完成，自动释放: {task_id}")
                        pending_tasks.discard(task_id)
                threading.Thread(target=check, daemon=True).start()
            timeout_releaser(future, task_id)

        logger.info(f"已提交 {len(futures)} 个下载任务到全局线程池")
        return futures

    # 单个下载逻辑
    if not single_url or not single_filename:
        logger.error("未提供音频URL或文件名")
        return None

    init_global_executor(max_workers)
    executor = get_executor()

    task_id = f"{single_url}|{single_filename}"

    if cache and (task_id in pending_tasks or single_url in downloaded_urls):
        logger.info(f"音频已在下载队列中或已下载，跳过: {single_url}")
        _all_tasks_completed_callback([], title, id, [single_filename])
        return None

    pending_tasks.add(task_id)
    downloaded_urls.add(single_url)

    logger.info(f"准备下载: {single_url} 到 {single_filename}")

    future = executor.submit(download_audio, single_url, single_filename)
    future.add_done_callback(lambda f: _handle_result(f, 0, [single_filename]))

    def _completion_callback(f):
        try:
            pending_tasks.discard(task_id)
            _all_tasks_completed_callback([f], title, id, [single_filename])
        except Exception as e:
            logger.error(f"任务完成回调出错: {str(e)}")

    future.add_done_callback(_completion_callback)

    # 新增：为单个future启动一个定时器，超时自动释放pending_tasks
    def timeout_releaser(future, task_id=task_id, timeout=task_timeout):
        def check():
            try:
                future.result(timeout=timeout)
            except TimeoutError:
                logger.error(f"任务超时未完成，自动释放: {task_id}")
                pending_tasks.discard(task_id)
        threading.Thread(target=check, daemon=True).start()
    timeout_releaser(future, task_id)

    logger.info("下载任务已提交到全局线程池")
    return future

# 关闭全局线程池
def shutdown_executor(wait=True):
    logger.warning(">>> shutdown_executor 被调用")
    logger.warning(">>> shutdown_executor 准备进入锁")
    if pending_tasks:
        logger.warning(f"还有未完成的音频下载任务数: {len(pending_tasks)}")
        for task in list(pending_tasks):
            logger.warning(f"未完成任务: {task}")
    else:
        logger.info("所有音频下载任务已完成释放。")
    global global_executor
    logger.warning(">>> shutdown_executor 进入 with executor_lock 之前")
    with executor_lock:
        logger.warning(">>> shutdown_executor 已进入锁")
        try:
            flush_json_cache()
            if global_executor is not None:
                global_executor.shutdown(wait=wait)
                logger.info("全局线程池已关闭")
        except Exception as e:
            logger.error(f"关闭线程池时出错: {str(e)}")
        finally:
            global_executor = None

# 注册程序退出时的清理函数
def register_cleanup():
    atexit.register(shutdown_executor)
    logger.info("已注册程序退出时关闭线程池的清理函数")

def print_pending_tasks_periodically(interval=10):
    """定时打印pending_tasks数量，便于观察任务释放情况"""
    stop_event = threading.Event()

    def printer():
        while not stop_event.is_set():
            logger.warning(f"[定时监控] 当前未完成音频下载任务数: {len(pending_tasks)}")
            if len(pending_tasks) > 0:
                for task in list(pending_tasks)[:5]:  # 只打印前5个，避免刷屏
                    logger.warning(f"[定时监控] 未完成任务: {task}")
            else:
                logger.info("所有音频下载任务已完成释放。")
                stop_event.set()
                break
            time.sleep(interval)

    t = threading.Thread(target=printer, daemon=True)
    t.start()
    return stop_event

# 初始化模块
def init_module():
    apply_threading_patches()
    register_cleanup()
    print_pending_tasks_periodically(interval=10)  # 每10秒打印一次
    logger.info("音频下载模块已初始化")

# 模块导入时自动初始化
#init_module()

success_count = 0
fail_count = 0
# 开始下载
def startup_download(download_list):
    print(f"执行下载：{len(download_list)}")
    for item in download_list[:110]:  # 限制最多下载110个
        url = item.get('audio_url')
        filename = item.get('filename')
        item_title = item.get('title')
        item_id = item.get('id', id)

        if not url or not filename:
            logger.error("批量下载中缺少音频URL或文件名")
            continue

        task_id = f"{url}|{filename}"
        time.sleep(2)  # 等待内容加载
        download_audio(url, filename,item_title,item_id)
    print(f"下载任务执行完毕,成功写入 {success_count} 条记录，失败 {fail_count} 条记录。")



# 写入单条记录到JSON文件
def write_to_json(data, status):
    with json_cache_lock:
        current_dir = os.path.dirname(os.path.abspath(__file__))
        output_dir = os.path.join(current_dir, "audio")
        os.makedirs(output_dir, exist_ok=True)
        filename = os.path.join(output_dir, f"{status}_audio_results.json")

        try:
            if os.path.exists(filename):
                with open(filename, 'r', encoding='utf-8') as f:
                    existing_data = json.load(f)
                    if not isinstance(existing_data, list):
                        existing_data = []
            else:
                existing_data = []
        except (json.JSONDecodeError, FileNotFoundError):
            existing_data = []

        existing_data.append(data)

        try:
            with open(filename, 'w', encoding='utf-8') as f:
                json.dump(existing_data, f, ensure_ascii=False, indent=2)
            logger.info(f"成功写入一条 {status} 记录到 {filename}")
            success_count += 1
        except Exception as e:
            logger.error(f"写入 {filename} 失败: {str(e)}")
            fail_count += 1
        
