import shutil
import subprocess
import time
import threading

import httpx
import redis
import json
import asyncio
import argparse
import os
from config import generate_audio_port, AVERAGE_TASK_DURATION, HTTPX_DEFAULT_TIMEOUT, USER_VOICE_FOLDER, \
    tts_host, tts_protocol

from loguru import logger
from typing import List, Dict

from server.db.repository.voice_clone_task_repository import get_voice_clone_task_from_success_db_by_task_id, update_voice_clone_task_status


class Task:
    # 保持原有Task类不变
    def __init__(self, task_id: str, task_type: str, uid: str, timbre_id: str, text: str = None, command: str = None,
                 seed: int = None, ** kwargs):
        self.task_id = task_id
        self.task_type = task_type
        self.uid = uid
        self.timbre_id = timbre_id
        self.text = text
        self.seed = seed
        self.command = command
        self.status = 'waiting'
        self.start_time = None

    def to_dict(self):
        return {
            'task_id': self.task_id,
            'task_type': self.task_type,
            'uid': self.uid,
            'timbre_id': self.timbre_id,
            'text': self.text,
            'seed': self.seed,
            'status': self.status,
            'command': self.command,
            'start_time': self.start_time
        }

    @classmethod
    def from_dict(cls, data: Dict):
        task = cls(data['task_id'], data['task_type'], data['uid'], data['timbre_id'], data['text'], data['seed'])
        task.status = data['status']
        task.start_time = data.get('start_time', None)
        return task


class TaskManager:
    def __init__(self, max_concurrent_tasks: int, redis_client: redis.Redis):
        self.max_concurrent_tasks = max_concurrent_tasks
        self.lock = threading.RLock()
        self.active_queue_key = "active_queue"
        self.waiting_queue_key = "waiting_queue"
        
        # 使用外部传入的Redis客户端
        self.redis_client = redis_client  # 直接使用传入的客户端实例

    def _safe_redis_operation(self, func, *args, **kwargs):
        """带重试机制的Redis操作封装"""
        max_retries = 3
        for attempt in range(max_retries):
            try:
                return func(*args, **kwargs)
            except (redis.exceptions.ConnectionError, ConnectionResetError) as e:
                logger.warning(f"Redis连接异常({attempt+1}/{max_retries}): {str(e)}")
                time.sleep(0.5)
            except Exception as e:
                logger.error(f"Redis操作异常: {str(e)}")
                raise
        raise redis.exceptions.RedisError("Redis操作重试失败")

    async def _execute_task(self, task_id: str):
        # 保持原有_execute_task逻辑不变
        result = get_voice_clone_task_from_success_db_by_task_id(task_id) # 从克隆成功表中获取任务
        if not result:
            logger.error(f"任务 {task_id} 未找到")
            return
        uid = result["uid"]
        timbre_id = result["timbre_id"]

        task_info = {
            "uid": uid,
            "timbre_id": timbre_id,
            "task_id": task_id,
            "stream": False,
            "seed": 0,
            "task_type": "clone"
        }
        async with httpx.AsyncClient() as client:
            try:
                response = await client.post(f"{tts_protocol}://{tts_host}:{generate_audio_port}/generate_audio",
                                             json=task_info,
                                             timeout=HTTPX_DEFAULT_TIMEOUT)
                if response.status_code == 200:
                    logger.debug(f"音色克隆任务 {task_id} 执行成功。")
                else:
                    self._handle_task_failure(task_id, uid, timbre_id, f"状态码: {response.status_code}")
            except Exception as e:
                self._handle_task_failure(task_id, uid, timbre_id, str(e))

    def _handle_task_failure(self, task_id, uid, timbre_id, error_msg):
        """统一处理任务失败逻辑"""
        try:
            shutil.rmtree(os.path.join(USER_VOICE_FOLDER, uid, timbre_id))
            logger.error(f"删除音色文件夹 {uid}/{timbre_id} 成功")
        except Exception as e:
            logger.warning(f"删除音色文件夹失败：{str(e)}")

        self.fail_task(task_id) # 删除活跃队列中的任务并更新任务状态为失败

        logger.error(f"任务 {task_id} 失败，失败原因：{error_msg}")

    def add_task(self, task: Task):
        self._safe_redis_operation(self.redis_client.rpush, self.waiting_queue_key, task.task_id)
        update_voice_clone_task_status(task.task_id, 'waiting')
        logger.debug(f"任务 {task.task_id} 已加入队列。")

    def run_async_task(self, task_id: str):
        asyncio.run(self._execute_task(task_id))


    def promote_task(self):
        with self.lock:
            try:
                # 使用封装的安全操作
                active_len = self._safe_redis_operation(self.redis_client.llen, self.active_queue_key)  # 获取活跃队列中的任务数量
                waiting_len = self._safe_redis_operation(self.redis_client.llen, self.waiting_queue_key)  # 获取等待队列中的任务数量

                if active_len < self.max_concurrent_tasks and waiting_len > 0: # 如果活跃队列中的任务数量小于最大并发任务数且等待队列中有任务
                    task_id_bytes = self._safe_redis_operation(self.redis_client.lpop, self.waiting_queue_key)  # 从等待队列中获取任务
                    if task_id_bytes:
                        task_id = task_id_bytes.decode("utf-8")
                        self._safe_redis_operation(self.redis_client.rpush, self.active_queue_key, task_id)  # 将任务加入活跃队列
                        update_voice_clone_task_status(task_id, "active")  # 更新任务状态为激活
                        logger.debug(f"任务 {task_id} 已激活")

                        # 使用独立线程运行任务
                        threading.Thread(
                            target=self.run_async_task,
                            args=(task_id,),
                            daemon=True
                        ).start()
            except Exception as e:
                logger.error(f"任务提升失败: {str(e)}")
    
    # 完成任务
    def completed_task(self, task_id: str):
        try:
            update_voice_clone_task_status(task_id, "completed")  # 更新任务状态为已完成
            logger.debug(f"任务 {task_id} 已完成，任务状态更新为 completed")
        except Exception as e:
            logger.error(f"更新任务状态失败：{str(e)}")
        try:
            delete_count = self.remove_task(task_id)  # 删除活跃队列中的任务
        except Exception as e:
            logger.error(f"从Redis活跃队列删除任务: {task_id} 失败：{e}")
            return
        if delete_count != 0:
            logger.debug(f"任务 {task_id} 已完成，已从活跃队列移除。")
        else:
            logger.debug(f"任务 {task_id} 已完成，不在活跃队列中。")

    # 修改原有方法调用方式
    def remove_task(self, task_id: str):
        return self._safe_redis_operation(self.redis_client.lrem, self.active_queue_key, 0, task_id)  # 删除活跃队列中的任务

    def fail_task(self, task_id: str):
        try:
            update_voice_clone_task_status(task_id, "failed")  # 更新任务状态为失败
            logger.debug(f"任务 {task_id} 失败，任务状态更新为失败")
        except Exception as e:
            logger.error(f"更新任务状态失败：{str(e)}")
        try:
            delete_count = self.remove_task(task_id)  # 删除活跃队列中的任务
        except Exception as e:
            logger.error(f"从Redis活跃队列删除任务: {task_id} 失败：{e}")
            return
        if delete_count != 0:
            logger.debug(f"任务 {task_id} 失败，已从活跃队列移除。")
        else:
            logger.debug(f"任务 {task_id} 失败，不在活跃队列中。")

    def check_task_status(self, task_id: str) -> Dict:
        # 保持原有逻辑，使用_safe_redis_operation封装Redis操作
        with self.lock:
            try:
                active_tasks = self._safe_redis_operation(self.redis_client.lrange, self.active_queue_key, 0, -1)  # 获取活跃队列中的任务
                for active_task in active_tasks:  # 遍历活跃队列中的任务
                    if active_task.decode("utf-8") == task_id:
                        return {'status': 'active', 'position': 0, 'wait_time': 0}

                # 判断活跃队列是否未满
                active_slots_available = self.max_concurrent_tasks - self._safe_redis_operation(self.redis_client.llen, self.active_queue_key)  # 获取活跃队列中剩余的可用位置
                waiting_tasks = self._safe_redis_operation(self.redis_client.lrange, self.waiting_queue_key, 0, -1)  # 获取等待队列中的任务
                # 如果活跃队列未满，等待队列前面的任务可以立刻进入活跃队列
                if active_slots_available > 0:
                    for position, _task_id in enumerate(waiting_tasks):
                        if _task_id.decode("utf-8") == task_id:
                            if position < active_slots_available:
                                # 任务可以立刻执行
                                return {'status': 'active', 'position': 0, 'wait_time': 0}
                            else:
                                # 任务需要等待，排队位置从1开始
                                adjusted_position = position - active_slots_available + 1
                                average_task_duration = 30  # 假设每个任务的平均执行时间为30秒
                                wait_time = adjusted_position * AVERAGE_TASK_DURATION
                                return {'status': 'waiting', 'position': adjusted_position,
                                        'wait_time': wait_time}
                # 如果活跃队列已满，等待队列中的任务根据顺序排队
                for position, _task_id in enumerate(waiting_tasks):
                    if _task_id.decode("utf-8") == task_id:
                        # 位置从1开始
                        adjusted_position = position + 1
                        average_task_duration = 30  # 假设每个任务的平均执行时间为30秒
                        wait_time = adjusted_position * AVERAGE_TASK_DURATION
                        return {'status': 'waiting', 'position': adjusted_position, 'wait_time': wait_time}

                # result = mysql_handler.get_voice_clone_task_by_task_id(task_id)
                result = get_voice_clone_task_from_success_db_by_task_id(task_id)
                if not result:  # 如果没有从克隆成功表找到任务，就从克隆失败的表找任务
                        return {'status': 'not_found', 'position': -1, 'wait_time': -1}
                
                task_status = result["task_status"]
                if task_status == 'failed':
                    return {'status': task_status, 'position': -3, 'wait_time': -3}
                elif task_status == 'completed':
                    return {'status': task_status, 'position': 0, 'wait_time': 0}
                else:
                    return {'status': task_status, 'position': -4, 'wait_time': -4}
            except Exception as e:
                logger.error(f"状态检查失败: {str(e)}")
                return {'status': 'error', 'position': -5, 'wait_time': -5}


def scheduled_task(task_manager: TaskManager):
    """增强稳定性的定时任务"""
    while True:
        try:
            task_manager.promote_task()

            # 添加连接健康检查
            try:
                task_manager._safe_redis_operation(lambda: task_manager.redis_client.ping())
            except Exception as e:
                logger.error(f"Redis健康检查失败: {str(e)}")

            # 当队列有内容时，每5秒打印一次
            if int(time.time()) % 5 == 0:
                # 获取队列详细内容
                active_tasks = task_manager._safe_redis_operation(
                    task_manager.redis_client.lrange, 
                    task_manager.active_queue_key, 0, -1
                ) or []
                
                waiting_tasks = task_manager._safe_redis_operation(
                    task_manager.redis_client.lrange,
                    task_manager.waiting_queue_key, 0, -1
                ) or []

                # 当任一队列有内容时打印详细信息
                if active_tasks or waiting_tasks:
                    # 转换字节数据为可读字符串
                    active_list = [t.decode('utf-8') for t in active_tasks]
                    waiting_list = [t.decode('utf-8') for t in waiting_tasks]
                    
                    logger.info(
                        "队列详情:\n"
                        f"Active队列({len(active_list)}/{task_manager.max_concurrent_tasks}):\n{active_list}\n"
                        f"Waiting队列({len(waiting_list)}):\n{waiting_list}\n"
                        "────────────────────────────"
                    )

            time.sleep(2)
        except Exception as e:
            logger.critical(f"调度线程异常: {str(e)}")
            time.sleep(5)


def start_scheduled_task(task_manager: TaskManager):
    """启动监控线程"""

    def wrapper():
        while True:
            try:
                scheduled_task(task_manager)
            except Exception as e:
                logger.critical(f"调度线程崩溃: {str(e)}，5秒后重启...")
                time.sleep(5)

    thread = threading.Thread(target=wrapper, daemon=True)
    thread.start()

