import time
import io, os, sys
import re
from fastapi import FastAPI, Form, HTTPException, WebSocket, WebSocketDisconnect, Request, status, BackgroundTasks
from fastapi.middleware.cors import CORSMiddleware
from fastapi.responses import StreamingResponse
ROOT_DIR = os.path.dirname(os.path.abspath(__file__))
sys.path.append('{}'.format(ROOT_DIR))
sys.path.append('{}/third_party/Matcha-TTS'.format(ROOT_DIR))

import numpy as np
import torch
import torchaudio
import struct
import json
import asyncio
import logging
import gc  # 导入垃圾回收模块
from typing import Optional, List, Dict, Any
from datetime import datetime, timedelta
from contextlib import nullcontext

from cosyvoice.cli.cosyvoice import CosyVoice
from cosyvoice.utils.file_utils import load_wav

# 配置日志
logging.basicConfig(level=logging.INFO, 
                   format='%(asctime)s - %(name)s - %(levelname)s - %(message)s')
logger = logging.getLogger(__name__)

# 检测Torch版本并设置兼容的内存管理配置
torch_version = torch.__version__.split('.')
major_version = int(torch_version[0])
minor_version = int(torch_version[1]) if len(torch_version) > 1 else 0

# 根据PyTorch版本配置内存选项
if major_version >= 1 and minor_version >= 11:
    # 较新版本的PyTorch支持更多选项
    logger.info(f"检测到PyTorch版本 {torch.__version__}，使用高级内存配置")
    os.environ["PYTORCH_CUDA_ALLOC_CONF"] = "max_split_size_mb:64,garbage_collection_threshold:0.6"
else:
    # 兼容旧版本
    logger.info(f"检测到PyTorch版本 {torch.__version__}，使用基础内存配置")
    os.environ["PYTORCH_CUDA_ALLOC_CONF"] = "max_split_size_mb:64"

# 创建FastAPI应用
app = FastAPI()

# 配置CORS - 确保允许WebSocket连接
app.add_middleware(
    CORSMiddleware,
    allow_origins=["*"],  # 允许所有来源
    allow_credentials=True,
    allow_methods=["*"],  # 允许所有方法
    allow_headers=["*"],  # 允许所有头
)

# 全局变量
use_cuda = False                  # 是否使用CUDA
device = "cpu"                    # 当前设备
cosyvoice = None                  # 模型实例
available_speakers = []           # 可用音色
last_gpu_check = datetime.now()   # 上次GPU检查时间
gpu_check_interval = 60           # GPU检查间隔(秒)
min_gpu_memory_mb = 200           # 最小需要的GPU内存(MB) - 降低阈值
gpu_cool_down_period = 60         # 从CPU升级到GPU的冷却期(秒) - 减少冷却期
last_cpu_fallback = None          # 上次降级到CPU的时间
memory_threshold_percent = 10     # 总内存的百分比作为动态阈值
force_release_interval = 600      # 强制释放内存间隔(秒)
last_force_release = None         # 上次强制释放内存时间

# 设备管理功能
def force_release_gpu_memory():
    """强制释放GPU内存"""
    global last_force_release
    
    if not torch.cuda.is_available():
        return
    
    logger.info("正在强制释放GPU内存...")
    
    # 记录释放前的内存状态
    before_total = torch.cuda.get_device_properties(0).total_memory
    before_reserved = torch.cuda.memory_reserved(0)
    before_allocated = torch.cuda.memory_allocated(0)
    before_free = before_total - before_reserved
    before_cached = before_reserved - before_allocated
    
    # 执行多种清理方法
    torch.cuda.empty_cache()
    gc.collect()
    
    # 针对CosyVoice模型结构调整，不再尝试移动模型
    # 只执行内存清理操作
    
    # 记录释放后的内存状态
    after_total = torch.cuda.get_device_properties(0).total_memory
    after_reserved = torch.cuda.memory_reserved(0)
    after_allocated = torch.cuda.memory_allocated(0)
    after_free = after_total - after_reserved
    after_cached = after_reserved - after_allocated
    
    # 更新最后释放时间
    last_force_release = datetime.now()
    
    # 输出内存变化
    logger.info(f"强制内存释放结果:")
    logger.info(f"  总内存: {before_total/1024**3:.2f} GB")
    logger.info(f"  释放前: 已分配={before_allocated/1024**3:.2f}GB, 已预留={before_reserved/1024**3:.2f}GB")
    logger.info(f"  释放前: 可用={before_free/1024**3:.2f}GB, 缓存={before_cached/1024**3:.2f}GB")
    logger.info(f"  释放后: 已分配={after_allocated/1024**3:.2f}GB, 已预留={after_reserved/1024**3:.2f}GB")
    logger.info(f"  释放后: 可用={after_free/1024**3:.2f}GB, 缓存={after_cached/1024**3:.2f}GB")
    logger.info(f"  净释放: {(after_free-before_free)/1024**3:.2f} GB")

def check_gpu_availability():
    """检查GPU可用性，如果内存足够则返回True"""
    global use_cuda, device, last_gpu_check, last_cpu_fallback, last_force_release
    
    # 更新最后检查时间
    last_gpu_check = datetime.now()
    
    if not torch.cuda.is_available():
        return False
        
    # 检查是否需要强制释放内存
    if (last_force_release is None or 
        (datetime.now() - last_force_release).total_seconds() > force_release_interval):
        force_release_gpu_memory()
    
    # 获取内存信息
    total_memory = torch.cuda.get_device_properties(0).total_memory
    reserved_memory = torch.cuda.memory_reserved(0)
    allocated_memory = torch.cuda.memory_allocated(0)
    free_memory = total_memory - reserved_memory
    cached_memory = reserved_memory - allocated_memory
    
    # 计算动态阈值 - 使用总内存的百分比或最小绝对值
    dynamic_threshold = max(min_gpu_memory_mb * 1024 * 1024, 
                          total_memory * memory_threshold_percent / 100)
    
    # 输出详细内存信息
    logger.info(f"GPU内存详情:")
    logger.info(f"  总内存: {total_memory/1024**3:.2f} GB")
    logger.info(f"  已分配: {allocated_memory/1024**3:.2f} GB")
    logger.info(f"  已预留: {reserved_memory/1024**3:.2f} GB")
    logger.info(f"  缓存: {cached_memory/1024**3:.2f} GB")
    logger.info(f"  可用: {free_memory/1024**3:.2f} GB")
    logger.info(f"  动态阈值: {dynamic_threshold/1024**3:.2f} GB")
    
    # 检查是否冷却期已过（如果有降级到CPU的记录）
    if last_cpu_fallback:
        time_since_fallback = (datetime.now() - last_cpu_fallback).total_seconds()
        if time_since_fallback < gpu_cool_down_period:
            logger.debug(f"GPU冷却期未过，剩余{gpu_cool_down_period - time_since_fallback:.1f}秒")
            return False
    
    # 使用两种标准来判断是否有足够内存
    # 1. 直接可用内存 (total - reserved)
    # 2. 缓存内存 (reserved - allocated) - 这部分可以被重用
    available_memory = free_memory + cached_memory * 0.8  # 假设80%缓存可回收
    
    # 检查内存是否足够
    is_available = available_memory >= dynamic_threshold
    
    if is_available:
        logger.info(f"GPU内存充足: 可用={available_memory/1024**3:.2f}GB >= 阈值={dynamic_threshold/1024**3:.2f}GB")
    else:
        logger.info(f"GPU内存不足: 可用={available_memory/1024**3:.2f}GB < 阈值={dynamic_threshold/1024**3:.2f}GB")
    
    return is_available

def switch_to_gpu():
    """将设备设置为GPU（不移动模型）"""
    global use_cuda, device
    
    logger.info("将设备设置为GPU...")
    try:
        # 清理内存
        torch.cuda.empty_cache()
        gc.collect()
        
        # 只更改设备标志，不再移动模型
        use_cuda = True
        device = "cuda"
        logger.info("设备已切换到GPU模式")
        return True
    except RuntimeError as e:
        logger.warning(f"切换到GPU失败: {str(e)}")
        use_cuda = False
        device = "cpu"
        return False

def switch_to_cpu():
    """将设备设置为CPU（不移动模型）"""
    global use_cuda, device, last_cpu_fallback
    
    logger.info("将设备设置为CPU...")
    try:
        # 记录降级时间
        last_cpu_fallback = datetime.now()
        
        # 只更改设备标志，不再移动模型
        use_cuda = False
        device = "cpu"
        logger.info("设备已切换到CPU模式")
        
        # 清理GPU内存
        torch.cuda.empty_cache()
        gc.collect()
        return True
    except Exception as e:
        logger.warning(f"切换到CPU失败: {str(e)}")
        return False

# 定期检查GPU可用性的后台任务
async def check_gpu_periodically():
    """定期检查GPU可用性并切换设备"""
    global use_cuda, device
    
    while True:
        await asyncio.sleep(gpu_check_interval)
        logger.debug("执行定期GPU可用性检查...")
        
        try:
            # 只有当前在CPU模式时才考虑升级到GPU
            if not use_cuda:
                # 检查GPU是否可用
                if check_gpu_availability():
                    # 尝试切换到GPU
                    switch_to_gpu()
                    logger.info(f"定期检查: 已将模型从CPU切换到GPU")
        except Exception as e:
            logger.error(f"GPU定期检查出错: {str(e)}")

# 添加加载前清理程序
def clean_gpu_memory():
    """彻底清理GPU内存"""
    if not torch.cuda.is_available():
        return
    
    # 强制垃圾回收
    gc.collect()
    
    # 释放缓存
    torch.cuda.empty_cache()
    
    # 重置峰值内存统计
    if hasattr(torch.cuda, 'reset_peak_memory_stats'):
        torch.cuda.reset_peak_memory_stats()
    
    # 报告清理后的内存状态
    free_memory = torch.cuda.memory_reserved(0) - torch.cuda.memory_allocated(0)
    total_memory = torch.cuda.get_device_properties(0).total_memory
    logger.info(f"GPU内存清理完成: 总内存={total_memory/1024**3:.2f}GB, 当前已分配={torch.cuda.memory_allocated(0)/1024**3:.2f}GB, 可用内存={free_memory/1024**3:.2f}GB")

# 启用CPU卸载功能
def enable_cpu_offload_settings():
    """配置CPU卸载和内存优化选项"""
    # 设置CPU卸载和相关优化标志
    try:
        # 非常重要：提前设置PyTorch选项，避免OOM
        torch.backends.cudnn.benchmark = False  # 关闭cudnn benchmark
        torch.backends.cudnn.deterministic = True  # 使用确定性算法
        
        # 设置小批量处理
        os.environ['PYTORCH_CUDA_ALLOC_CONF'] = 'max_split_size_mb:32'
        
        # 使用混合精度
        if hasattr(torch.cuda, 'amp') and hasattr(torch.cuda.amp, 'autocast'):
            logger.info("启用自动混合精度计算")
            os.environ['PYTORCH_CUDA_AUTOCAST_ENABLED'] = '1'
        
        logger.info("已配置CPU卸载和内存优化选项")
    except Exception as e:
        logger.error(f"配置内存优化选项时出错: {str(e)}")

# 加载模型的定制版本，支持部分GPU加速
def load_model_with_cpu_offload(model_path):
    """使用CPU卸载功能加载模型，确保不会OOM"""
    import torch
    from cosyvoice.cli.cosyvoice import CosyVoice
    
    logger.info(f"使用CPU卸载功能加载模型: {model_path}")
    
    # 步骤1: 设置CPU作为主设备，但启用CUDA计算
    initial_device = 'cpu'
    logger.info(f"初始加载设备: {initial_device}")
    
    # 步骤2: 清理任何现有的GPU内存
    clean_gpu_memory()
    
    # 步骤3: 设置环境变量，允许CUDA，但使用CPU作为默认值
    os.environ["CUDA_VISIBLE_DEVICES"] = "0"  # 保持GPU可见
    
    # 步骤4: 修改PyTorch的CUDA行为
    original_cuda_device_count = None
    
    # 使用内存优化设置
    enable_cpu_offload_settings()
    
    # 步骤5: 加载模型
    try:
        # 使用自定义加载方法
        logger.info("正在CPU上加载初始模型...")
        torch.cuda.empty_cache()  # 再次清理内存
        model = CosyVoice(model_path)
        logger.info("模型基本结构已加载到CPU")
        
        # 步骤6: 检查是否可以使用GPU进行推理
        can_use_cuda_for_inference = torch.cuda.is_available() and torch.cuda.memory_allocated(0) < torch.cuda.get_device_properties(0).total_memory * 0.9
        
        if can_use_cuda_for_inference:
            logger.info("GPU内存足够用于推理，将在推理时使用GPU加速")
        else:
            logger.info("GPU内存不足用于完整加速，将使用部分GPU加速")
        
        # 记录模型内存使用情况
        logger.info(f"模型加载后内存: CPU占用未知, GPU占用={torch.cuda.memory_allocated(0)/1024**3:.2f}GB")
        
        return model, can_use_cuda_for_inference
        
    except Exception as e:
        logger.error(f"加载模型失败: {str(e)}")
        import traceback
        logger.error(traceback.format_exc())
        raise

# 在应用启动时初始化模型和GPU检查
@app.on_event("startup")
async def startup_event():
    """应用启动时初始化资源"""
    global cosyvoice, available_speakers, use_cuda, device
    
    # 初始化模型
    try:
        # 检查是否有可用的CUDA设备
        has_cuda = torch.cuda.is_available()
        if has_cuda:
            # 先检查GPU是否已被其他程序占用大部分内存
            total_memory = torch.cuda.get_device_properties(0).total_memory
            allocated_memory = torch.cuda.memory_allocated(0)
            free_memory = total_memory - allocated_memory
            
            logger.info(f"GPU内存状态: 总计={total_memory/1024**3:.2f}GB, "
                        f"已分配={allocated_memory/1024**3:.2f}GB, "
                        f"空闲={free_memory/1024**3:.2f}GB")
            
            if free_memory < 500 * 1024 * 1024:  # 少于500MB空闲
                logger.warning(f"GPU内存几乎已用尽 ({free_memory/1024**3:.2f}GB可用)，将尝试优化加载")
            
            # 无论如何尝试使用自定义加载方法
            device = "cuda"
            use_cuda = True
        else:
            logger.info("没有GPU可用，使用CPU模式")
            device = "cpu"
            use_cuda = False
        
        # 加载模型路径
        model_path = './pretrained_models/CosyVoice-300M'
        if not os.path.exists(model_path):
            logger.error(f"模型路径不存在: {os.path.abspath(model_path)}")
            logger.info(f"当前工作目录: {os.getcwd()}")
            logger.info(f"pretrained_models目录内容: {os.listdir('./pretrained_models') if os.path.exists('./pretrained_models') else '目录不存在'}")
            raise FileNotFoundError(f"模型路径不存在: {os.path.abspath(model_path)}")
        
        # 使用优化的方法加载模型
        cosyvoice, can_use_cuda_for_inference = load_model_with_cpu_offload(model_path)
        
        # 更新设备状态
        if not can_use_cuda_for_inference:
            logger.warning("GPU内存不足，将使用CPU+部分GPU混合模式")
            device = "cpu+gpu"  # 混合模式
        else:
            logger.info("GPU内存充足，将使用完整GPU加速")
            device = "cuda"
        
        # 获取可用音色
        available_speakers = cosyvoice.list_available_spks()
        logger.info(f"加载模型成功! 可用音色: {available_speakers}")
        
        # 尝试获取模型信息
        logger.info(f"CosyVoice模型信息: 类型={type(cosyvoice)}")
        
        # 启动GPU定期检查任务
        asyncio.create_task(check_gpu_periodically())
        logger.info("GPU定期检查任务已启动")
        
    except Exception as e:
        logger.error(f"初始化模型失败: {str(e)}")
        import traceback
        logger.error(traceback.format_exc())
        available_speakers = []
        device = "cpu"
        use_cuda = False

# 改进的WebSocket连接管理
class ConnectionManager:
    def __init__(self):
        self.active_connections: Dict[str, WebSocket] = {}
        self.heartbeat_tasks: Dict[str, asyncio.Task] = {}
        logger.info("ConnectionManager初始化完成")

    async def connect(self, websocket: WebSocket):
        await websocket.accept()
        # 使用唯一标识符作为键
        conn_id = f"{id(websocket)}"
        self.active_connections[conn_id] = websocket
        logger.info(f"WebSocket连接已建立 (ID: {conn_id})，当前连接数: {len(self.active_connections)}")
        
        # 启动心跳任务
        self.heartbeat_tasks[conn_id] = asyncio.create_task(self.heartbeat(conn_id, websocket))
        
        return conn_id

    def disconnect(self, conn_id: str):
        if conn_id in self.active_connections:
            del self.active_connections[conn_id]
            logger.info(f"WebSocket连接已移除 (ID: {conn_id})，当前连接数: {len(self.active_connections)}")
        
        # 取消心跳任务
        if conn_id in self.heartbeat_tasks:
            self.heartbeat_tasks[conn_id].cancel()
            del self.heartbeat_tasks[conn_id]
            logger.debug(f"心跳任务已取消 (ID: {conn_id})")

    # 心跳机制，每30秒发送一次ping消息
    async def heartbeat(self, conn_id: str, websocket: WebSocket):
        try:
            while True:
                await asyncio.sleep(30)  # 每30秒发送一次心跳
                if conn_id in self.active_connections:
                    try:
                        # 发送心跳消息
                        await websocket.send_text(json.dumps({
                            "type": "heartbeat",
                            "timestamp": time.time()
                        }))
                        logger.debug(f"发送心跳消息到 (ID: {conn_id})")
                    except Exception as e:
                        logger.error(f"发送心跳消息失败 (ID: {conn_id}): {str(e)}")
                        # 连接可能已关闭，移除连接
                        self.disconnect(conn_id)
                        break
                else:
                    # 连接已不在活跃列表中
                    break
        except asyncio.CancelledError:
            # 任务被取消，正常退出
            logger.debug(f"心跳任务已取消 (ID: {conn_id})")
        except Exception as e:
            logger.error(f"心跳任务异常 (ID: {conn_id}): {str(e)}")

manager = ConnectionManager()

# 优化后的模型推理函数
def model_inference(text, speaker_id, speed=1.0, stream=True):
    """模型推理函数，使用混合计算模式"""
    global use_cuda, device, last_gpu_check
    
    try:
        # 设置混合精度计算上下文
        inference_context = torch.cuda.amp.autocast() if (use_cuda and hasattr(torch.cuda, 'amp') and 
                                                        hasattr(torch.cuda.amp, 'autocast')) else nullcontext()
        
        # 日志记录当前使用的设备
        logger.info(f"在{device}上执行推理, 文本长度: {len(text)}")
        
        # 运行推理
        start_time = time.time()
        
        # 使用混合精度上下文
        with inference_context:
            results = cosyvoice.inference_sft(text, speaker_id, stream=stream, speed=speed)
        
        # 如果是流式输出，需要转换为生成器
        if stream:
            for output in results:
                # 确保数据在CPU上 - 使用try/except避免不支持to()方法的错误
                if 'tts_speech' in output and torch.is_tensor(output['tts_speech']):
                    try:
                        if output['tts_speech'].device.type != 'cpu':
                            output['tts_speech'] = output['tts_speech'].cpu()
                    except (AttributeError, RuntimeError):
                        # 如果没有device属性或cpu()方法，直接使用原始张量
                        pass
                yield output
            # 在生成器结束时记录总时间
            inference_time = time.time() - start_time
            logger.info(f"流式推理完成, 耗时: {inference_time:.2f}秒, 设备: {device}")
        else:
            # 确保数据在CPU上
            if 'tts_speech' in results and torch.is_tensor(results['tts_speech']):
                try:
                    if results['tts_speech'].device.type != 'cpu':
                        results['tts_speech'] = results['tts_speech'].cpu()
                except (AttributeError, RuntimeError):
                    pass
            
            # 记录推理完成时间
            inference_time = time.time() - start_time
            logger.info(f"推理完成, 耗时: {inference_time:.2f}秒, 设备: {device}")
            return results
            
    except RuntimeError as e:
        # 特殊处理内存错误
        if 'CUDA out of memory' in str(e) and use_cuda:
            logger.warning(f"推理过程中GPU内存不足，将使用CPU: {str(e)}")
            
            # 清理内存
            torch.cuda.empty_cache()
            gc.collect()
            
            # 运行纯CPU推理
            logger.info(f"在CPU上重新运行推理，文本长度: {len(text)}")
            start_time = time.time()
            
            # 临时禁用CUDA
            original_visible_devices = os.environ.get("CUDA_VISIBLE_DEVICES", "")
            os.environ["CUDA_VISIBLE_DEVICES"] = ""
            
            try:
                results = cosyvoice.inference_sft(text, speaker_id, stream=stream, speed=speed)
                
                # 如果是流式输出，需要转换为生成器
                if stream:
                    for output in results:
                        yield output
                    # 记录CPU上的推理时间
                    inference_time = time.time() - start_time
                    logger.info(f"CPU流式推理完成, 耗时: {inference_time:.2f}秒")
                else:
                    # 记录CPU上的推理时间
                    inference_time = time.time() - start_time
                    logger.info(f"CPU推理完成, 耗时: {inference_time:.2f}秒")
                    return results
            finally:
                # 恢复原始CUDA设置
                os.environ["CUDA_VISIBLE_DEVICES"] = original_visible_devices
        else:
            # 其他运行时错误，直接抛出
            raise

# 添加上下文管理器用于混合精度计算
class nullcontext:
    def __enter__(self):
        return None
    def __exit__(self, *args):
        pass

def segment_text(text):
    """根据标点符号分割文本，并合并过短的片段
    
    参考api.py中的segmentText实现，增加合并短句的逻辑
    """
    # 定义分割的标点符号
    delimiters = ['，', '。', '!', '！', '?', '？', '；', '：', ',', '.', '\n']
    
    # 初始分段
    segments = [text]
    
    # 按每个分隔符分割
    for delimiter in delimiters:
        new_segments = []
        for segment in segments:
            # 跳过空段
            if not segment.strip():
                continue
            
            # 按当前分隔符分割
            parts = segment.split(delimiter)
            for i in range(len(parts) - 1):
                new_segments.append(parts[i] + delimiter)
            
            # 最后一部分可能没有分隔符
            if parts[-1].strip():
                new_segments.append(parts[-1])
        
        segments = new_segments
    
    # 过滤掉空段
    filtered_segments = [seg for seg in segments if seg.strip()]
    
    # 合并过短的段落（少于20个字符）
    final_segments = []
    current_segment = ''
    
    for segment in filtered_segments:
        if current_segment and (len(current_segment) + len(segment) < 20):
            # 如果当前段加上新段的长度小于20，则合并
            current_segment += segment
        else:
            # 否则保存当前段（如果有），并开始新段
            if current_segment:
                final_segments.append(current_segment)
            current_segment = segment
    
    # 添加最后一个段落
    if current_segment:
        final_segments.append(current_segment)
    
    return final_segments

def create_wav_header(data_size, channels=1, sample_rate=16000, bits_per_sample=16):
    """创建WAV文件头"""
    header = bytearray()
    # RIFF header
    header.extend(b'RIFF')
    header.extend(struct.pack('<I', data_size + 36))  # file size - 8
    header.extend(b'WAVE')
    
    # fmt chunk
    header.extend(b'fmt ')
    header.extend(struct.pack('<I', 16))  # fmt chunk size
    header.extend(struct.pack('<H', 1))   # format = PCM
    header.extend(struct.pack('<H', channels))
    header.extend(struct.pack('<I', sample_rate))
    bytes_per_second = sample_rate * channels * bits_per_sample // 8
    header.extend(struct.pack('<I', bytes_per_second))  # bytes per second
    block_align = channels * bits_per_sample // 8
    header.extend(struct.pack('<H', block_align))  # block align
    header.extend(struct.pack('<H', bits_per_sample))  # bits per sample
    
    # data chunk
    header.extend(b'data')
    header.extend(struct.pack('<I', data_size))  # data size
    
    return header

def create_progress_message(current_segment, total_segments, current_text=""):
    """创建进度信息JSON"""
    progress_data = {
        "type": "progress",
        "current": current_segment,
        "total": total_segments,
        "percentage": int((current_segment / total_segments) * 100) if total_segments > 0 else 0,
        "text": current_text[:20] + "..." if len(current_text) > 20 else current_text
    }
    return json.dumps(progress_data).encode('utf-8')

@app.post("/api/v1/tts/stream")
async def synthesize_sft(
    text: str = Form(..., description="要合成的文本"),
    speaker_id: str = Form("中文女", description="预训练音色ID"),
    speed: float = Form(1.0, description="速度调节"),
    seed: int = Form(0, description="随机种子，0表示随机生成"),
    chunk_size: int = Form(8192, description="数据块大小"),
    enable_segmentation: bool = Form(True, description="是否启用文本分段处理"),
    segment_silence_ms: int = Form(50, description="段落之间的静音毫秒数")
):
    try:
        print(f"收到TTS请求: text={text}, speaker_id={speaker_id}, speed={speed}, seed={seed}, enable_segmentation={enable_segmentation}")
        
        if not text:
            raise HTTPException(status_code=400, detail="必须提供text参数")
        
        # 处理随机种子 - 注意：当前CosyVoice模型不支持设置种子，所以这里只是记录
        if seed <= 0:
            seed = np.random.randint(1, 1000000)
            print(f"使用随机种子: {seed}（注意：当前版本不支持设置种子）")
        else:
            print(f"使用指定种子: {seed}（注意：当前版本不支持设置种子）")
        
        # 根据设置决定是否分段处理文本
        if enable_segmentation and len(text) > 10:
            # 使用改进的分段算法
            text_segments = segment_text(text)
        else:
            # 不分段，整体处理
            text_segments = [text]
        
        print(f"文本已分割为 {len(text_segments)} 个片段:")
        for i, segment in enumerate(text_segments):
            print(f"  片段 {i+1}/{len(text_segments)}: '{segment}'")
        
        # 创建生成器函数
        async def generate_stream():
            # 是否已发送WAV头部
            header_sent = False
            current_segment_index = 0
            total_segments = len(text_segments)
            processed_segments = 0
            
            # 处理每个文本片段
            for i, segment in enumerate(text_segments):
                if not segment.strip():
                    continue
                
                current_segment_index = i + 1
                
                # 发送进度信息
                progress_json = create_progress_message(
                    current_segment=current_segment_index,
                    total_segments=total_segments,
                    current_text=segment
                )
                
                try:
                    # 设置处理超时
                    segment_timeout = max(5, len(segment) * 0.5)  # 根据文本长度动态设置超时，至少5秒
                    print(f"正在合成语音 ({i+1}/{total_segments}): '{segment}'")
                    print(f"处理段落 {i+1}/{total_segments}: '{segment}', 超时设置: {segment_timeout}秒")
                    
                    # 使用inference_sft方法进行流式合成 - 移除seed参数
                    for model_output in model_inference(segment, speaker_id, speed=speed, stream=True):
                        # 获取音频数据
                        speech_data = model_output['tts_speech']
                        # 将浮点数据转换为PCM 16位整数
                        int_data = (speech_data.numpy() * 32767).astype(np.int16)
                        byte_data = int_data.tobytes()
                        
                        # 首次发送时，先发送WAV头部
                        if not header_sent:
                            # 对于第一个片段，我们需要发送WAV头部
                            header = create_wav_header(len(byte_data))
                            yield bytes(header)
                            header_sent = True
                            print(f"已发送WAV头部: {len(header)} 字节")
                        
                        # 异步等待一小段时间，允许其他请求处理
                        await asyncio.sleep(0.01)
                        
                        print(f"发送数据块，段落 {i+1}/{total_segments}: '{segment}', 大小: {len(byte_data)} 字节")
                        yield byte_data
                    
                    processed_segments += 1
                    print(f"已完成段落 {i+1}/{total_segments} 的语音合成: '{segment}'")
                
                except Exception as e:
                    print(f"处理段落 {i+1}/{total_segments} 失败: '{segment}'")
                    print(f"错误详情: {str(e)}")
                    # 段落处理失败，添加一个错误标记（可选，取决于客户端如何处理）
                    # 但我们继续处理下一段，而不是整个请求失败
                    error_message = f"段落 {i+1} 处理失败: {str(e)}"
                    silence_duration = 0.5  # 500ms
                    sample_rate = 16000
                    num_samples = int(sample_rate * silence_duration)
                    silence_data = np.zeros(num_samples, dtype=np.int16)
                    silence_bytes = silence_data.tobytes()
                    yield silence_bytes
            
            # 发送最终处理完成信息
            print(f"文本处理完成，共生成 {processed_segments}/{total_segments} 段音频")
        
        # 返回流式响应
        return StreamingResponse(
            content=generate_stream(),
            media_type="audio/wav",
            headers={
                "Content-Disposition": "attachment; filename=audio.wav",
                "Transfer-Encoding": "chunked",
                "X-Total-Segments": str(len(text_segments))
            }
        )
        
    except Exception as e:
        import traceback
        print(f"处理请求时出错: {str(e)}")
        print(traceback.format_exc())
        raise HTTPException(status_code=500, detail=str(e))

@app.post("/api/v1/tts")
async def synthesize_complete(
    text: str = Form(..., description="要合成的文本"),
    speaker_id: str = Form("中文女", description="预训练音色ID"),
    speed: float = Form(1.0, description="速度调节"),
    seed: int = Form(0, description="随机种子，0表示随机生成"),
    enable_segmentation: bool = Form(True, description="是否启用文本分段处理")
):
    """非流式合成完整音频"""
    try:
        print(f"收到完整TTS请求: text={text}, speaker_id={speaker_id}, speed={speed}, seed={seed}, enable_segmentation={enable_segmentation}")
        
        if not text:
            raise HTTPException(status_code=400, detail="必须提供text参数")
        
        # 处理随机种子 - 注意：当前CosyVoice模型不支持设置种子，所以这里只是记录
        if seed <= 0:
            seed = np.random.randint(1, 1000000)
            print(f"使用随机种子: {seed}（注意：当前版本不支持设置种子）")
        else:
            print(f"使用指定种子: {seed}（注意：当前版本不支持设置种子）")
        
        # 根据设置决定是否分段处理文本
        if enable_segmentation and len(text) > 10:
            text_segments = segment_text(text)
        else:
            text_segments = [text]
        
        print(f"文本已分割为 {len(text_segments)} 个片段:")
        for i, segment in enumerate(text_segments):
            print(f"  片段 {i+1}/{len(text_segments)}: '{segment}'")
        
        # 处理所有片段并合并
        all_audio_data = []
        for i, segment in enumerate(text_segments):
            if not segment.strip():
                continue
                
            print(f"正在合成语音 ({i+1}/{len(text_segments)}): '{segment}'")
            try:
                # 合成音频 - 移除seed参数
                output = model_inference(segment, speaker_id, stream=False, speed=speed)
                speech_data = output['tts_speech']
                int_data = (speech_data.numpy() * 32767).astype(np.int16)
                all_audio_data.append(int_data)
                print(f"已完成段落 {i+1}/{len(text_segments)} 的语音合成: '{segment}'")
            
            except Exception as e:
                print(f"处理段落 {i+1}/{len(text_segments)} 失败: '{segment}'")
                print(f"错误详情: {str(e)}")
                # 继续处理下一段
        
        # 合并所有音频数据
        if not all_audio_data:
            raise HTTPException(status_code=500, detail="未能生成任何音频数据")
            
        combined_audio = np.concatenate(all_audio_data)
        byte_data = combined_audio.tobytes()
        
        # 创建WAV头部
        header = create_wav_header(len(byte_data))
        
        # 合并头部和音频数据
        wav_data = bytearray()
        wav_data.extend(header)
        wav_data.extend(byte_data)
        
        return StreamingResponse(
            content=io.BytesIO(wav_data),
            media_type="audio/wav",
            headers={
                "Content-Disposition": "attachment; filename=audio.wav"
            }
        )
        
    except Exception as e:
        import traceback
        print(f"处理请求时出错: {str(e)}")
        print(traceback.format_exc())
        raise HTTPException(status_code=500, detail=str(e))

@app.get("/api/v1/list_speakers")
async def list_speakers():
    """获取可用音色列表"""
    try:
        speakers = cosyvoice.list_available_spks()
        return {"speakers": speakers, "count": len(speakers)}
    except Exception as e:
        raise HTTPException(status_code=500, detail=str(e))

@app.get("/api/v1/info")
async def get_info():
    """获取API信息"""
    # 获取GPU信息
    gpu_info = "N/A"
    gpu_memory = "N/A"
    if torch.cuda.is_available():
        gpu_info = f"{torch.cuda.get_device_name(0)} (CUDA {torch.version.cuda})"
        total_memory = torch.cuda.get_device_properties(0).total_memory / 1024**3
        free_memory = (torch.cuda.memory_reserved(0) - torch.cuda.memory_allocated(0)) / 1024**3
        gpu_memory = f"总内存: {total_memory:.2f} GB, 可用: {free_memory:.2f} GB"
    
    return {
        "status": "ok",
        "version": "1.0.0",
        "model": "CosyVoice-300M",
        "speakers": available_speakers,
        "features": ["streaming", "segmentation", "speed_control"],
        "gpu": gpu_info,
        "gpu_memory": gpu_memory,
        "device": device,
        "using_cuda": use_cuda
    }

@app.get("/health")
async def health():
    """健康检查"""
    return {
        "status": "ok", 
        "speakers": available_speakers,
        "gpu": torch.cuda.is_available(),
        "device": "cuda" if torch.cuda.is_available() else "cpu"
    }

# 添加一个简单的首页，提供API文档链接
@app.get("/")
async def root():
    return {
        "message": "CosyVoice API 服务正在运行",
        "docs_url": "/docs",
        "health_check": "/health",
        "speakers": f"{len(available_speakers)} 个可用音色",
        "gpu": "已启用" if torch.cuda.is_available() else "未启用"
    }

@app.get("/api/v1/device")
async def get_device_info():
    """获取当前设备信息"""
    global use_cuda, device, last_gpu_check, last_cpu_fallback

    # 获取GPU信息
    gpu_info = "N/A"
    gpu_memory = {}
    if torch.cuda.is_available():
        gpu_info = f"{torch.cuda.get_device_name(0)} (CUDA {torch.version.cuda})"
        total_memory = torch.cuda.get_device_properties(0).total_memory
        reserved_memory = torch.cuda.memory_reserved(0)
        allocated_memory = torch.cuda.memory_allocated(0)
        free_memory = total_memory - reserved_memory
        cached_memory = reserved_memory - allocated_memory
        
        # 计算动态阈值
        dynamic_threshold = max(min_gpu_memory_mb * 1024 * 1024, 
                              total_memory * memory_threshold_percent / 100)
        
        gpu_memory = {
            "total_gb": f"{total_memory/1024**3:.2f}",
            "allocated_gb": f"{allocated_memory/1024**3:.2f}",
            "reserved_gb": f"{reserved_memory/1024**3:.2f}",
            "free_gb": f"{free_memory/1024**3:.2f}",
            "cached_gb": f"{cached_memory/1024**3:.2f}",
            "threshold_gb": f"{dynamic_threshold/1024**3:.2f}"
        }
    
    # 计算距离可以升级到GPU的时间
    can_upgrade_in = None
    if last_cpu_fallback and not use_cuda:
        elapsed = (datetime.now() - last_cpu_fallback).total_seconds()
        if elapsed < gpu_cool_down_period:
            can_upgrade_in = f"{(gpu_cool_down_period - elapsed):.1f}秒"
        else:
            can_upgrade_in = "可以立即升级（需要足够内存）"
    
    # 计算距离下次强制释放内存的时间
    next_force_release = None
    if last_force_release:
        elapsed = (datetime.now() - last_force_release).total_seconds()
        if elapsed < force_release_interval:
            next_force_release = f"{(force_release_interval - elapsed):.1f}秒"
        else:
            next_force_release = "可以立即执行"
    
    return {
        "current_device": device,
        "using_cuda": use_cuda,
        "gpu_available": torch.cuda.is_available(),
        "gpu_info": gpu_info,
        "gpu_memory": gpu_memory,
        "last_gpu_check": last_gpu_check.isoformat(),
        "gpu_check_interval": f"{gpu_check_interval}秒",
        "min_gpu_memory_required": f"{min_gpu_memory_mb}MB",
        "memory_threshold_percent": f"{memory_threshold_percent}%",
        "last_cpu_fallback": last_cpu_fallback.isoformat() if last_cpu_fallback else None,
        "gpu_cool_down_period": f"{gpu_cool_down_period}秒",
        "can_upgrade_to_gpu_in": can_upgrade_in,
        "last_force_release": last_force_release.isoformat() if last_force_release else None,
        "force_release_interval": f"{force_release_interval}秒",
        "next_force_release_in": next_force_release
    }

@app.post("/api/v1/switch_device")
async def switch_device(target: str = Form(...)):
    """手动切换设备"""
    global use_cuda, device
    
    if target.lower() == 'gpu':
        if use_cuda:
            return {"status": "success", "message": "已经在使用GPU", "device": device}
        
        # 检查是否可以使用GPU
        if check_gpu_availability():
            if switch_to_gpu():
                return {"status": "success", "message": "已切换到GPU", "device": device}
            else:
                return {"status": "error", "message": "切换到GPU失败", "device": device}
        else:
            return {"status": "error", "message": "GPU内存不足，无法切换", "device": device}
            
    elif target.lower() == 'cpu':
        if not use_cuda:
            return {"status": "success", "message": "已经在使用CPU", "device": device}
            
        if switch_to_cpu():
            return {"status": "success", "message": "已切换到CPU", "device": device}
        else:
            return {"status": "error", "message": "切换到CPU失败", "device": device}
    else:
        return {"status": "error", "message": f"无效的目标设备: {target}", "device": device}

@app.websocket("/ws/tts")
async def websocket_endpoint(websocket: WebSocket):
    conn_id = f"{id(websocket)}"
    logger.info(f"收到新的WebSocket连接请求 [ID: {conn_id}]")
    
    try:
        # 1. 接受WebSocket连接
        conn_id = await manager.connect(websocket)
        logger.info(f"WebSocket连接已建立并注册到ConnectionManager [ID: {conn_id}]")
        
        # 2. 发送连接成功消息
        await websocket.send_text(json.dumps({
            "type": "connected",
            "message": "WebSocket连接已建立",
            "id": conn_id,
            "device": device,
            "using_cuda": use_cuda
        }))
        logger.info(f"已发送连接成功消息 [ID: {conn_id}]")
        
        # 3. 等待并处理消息
        while True:
            try:
                # 等待接收消息
                try:
                    data = await websocket.receive_text()
                    logger.info(f"收到消息 [ID: {conn_id}]: {data[:100]}...")
                except WebSocketDisconnect:
                    logger.info(f"WebSocket连接断开 [ID: {conn_id}]")
                    manager.disconnect(conn_id)
                    break
                
                # 处理心跳响应消息
                try:
                    request = json.loads(data)
                    if request.get('type') == 'heartbeat_response':
                        logger.debug(f"收到心跳响应 [ID: {conn_id}]")
                        continue
                    
                    # 处理设备切换请求
                    if request.get('type') == 'switch_device':
                        target_device = request.get('device', '').lower()
                        if target_device == 'gpu' and not use_cuda:
                            # 检查是否可以使用GPU
                            if check_gpu_availability():
                                if switch_to_gpu():
                                    await websocket.send_text(json.dumps({
                                        "type": "device_switched",
                                        "device": "cuda",
                                        "success": True
                                    }))
                                else:
                                    await websocket.send_text(json.dumps({
                                        "type": "device_switched",
                                        "device": "cpu",
                                        "success": False,
                                        "message": "切换到GPU失败"
                                    }))
                            else:
                                await websocket.send_text(json.dumps({
                                    "type": "device_switched",
                                    "device": "cpu",
                                    "success": False,
                                    "message": "GPU内存不足，无法切换"
                                }))
                        elif target_device == 'cpu' and use_cuda:
                            if switch_to_cpu():
                                await websocket.send_text(json.dumps({
                                    "type": "device_switched",
                                    "device": "cpu",
                                    "success": True
                                }))
                            else:
                                await websocket.send_text(json.dumps({
                                    "type": "device_switched",
                                    "device": "cuda",
                                    "success": False,
                                    "message": "切换到CPU失败"
                                }))
                        continue
                except:
                    pass
                
                # 解析JSON请求
                try:
                    request = json.loads(data)
                except json.JSONDecodeError as e:
                    logger.error(f"JSON解析错误 [ID: {conn_id}]: {str(e)}")
                    await websocket.send_text(json.dumps({
                        "type": "error",
                        "message": f"无效的JSON格式: {str(e)}"
                    }))
                    continue
                
                # 提取参数
                text = request.get('text', '')
                speaker_id = request.get('speaker_id', '中文女')
                speed = float(request.get('speed', 1.0))
                
                # 验证文本
                if not text:
                    logger.warning(f"收到空文本请求 [ID: {conn_id}]")
                    await websocket.send_text(json.dumps({
                        "type": "error",
                        "message": "必须提供text参数"
                    }))
                    continue
                
                # 发送处理开始通知
                logger.info(f"开始处理TTS请求 [ID: {conn_id}]: '{text[:20]}...'")
                await websocket.send_text(json.dumps({
                    "type": "start",
                    "message": "开始处理TTS请求"
                }))
                
                # 处理TTS请求
                try:
                    # 分割文本以便更好地处理
                    text_segments = segment_text(text)
                    logger.info(f"文本已分割为 {len(text_segments)} 个片段 [ID: {conn_id}]")
                    
                    # 发送元数据信息
                    await websocket.send_text(json.dumps({
                        "type": "tts_metadata",
                        "session_id": conn_id,
                        "text": text,
                        "segments": len(text_segments),
                        "speaker_id": speaker_id,
                        "speed": speed,
                        "device": device
                    }))
                    
                    # 记录开始时间
                    start_time = time.time()
                    
                    # 由于我们直接发送PCM数据，不需要发送WAV头部
                    # 这避免了前端解析WAV头部的问题
                    
                    # 处理每个文本片段前释放一些内存
                    if use_cuda:
                        torch.cuda.empty_cache()
                        gc.collect()
                    
                    # 处理每个文本片段
                    for i, segment in enumerate(text_segments):
                        if not segment.strip():
                            continue
                            
                        logger.info(f"处理片段 {i+1}/{len(text_segments)} [ID: {conn_id}]: '{segment}'")
                        
                        # 发送片段进度信息
                        await websocket.send_text(json.dumps({
                            "type": "segment_progress",
                            "segment": i+1,
                            "total": len(text_segments),
                            "text": segment
                        }))
                        
                        try:
                            # 使用模型合成语音，使用优化后的推理函数
                            for model_output in model_inference(segment, speaker_id, speed=speed, stream=True):
                                speech_data = model_output['tts_speech']
                                # 将浮点数据转换为PCM 16位整数
                                int_data = (speech_data.numpy() * 32767).astype(np.int16)
                                byte_data = int_data.tobytes()
                                
                                # 分块发送音频数据
                                chunk_size = 4096  # 4KB
                                for j in range(0, len(byte_data), chunk_size):
                                    chunk = byte_data[j:j+chunk_size]
                                    try:
                                        # 直接发送原始PCM数据，无需额外处理
                                        await websocket.send_bytes(chunk)
                                        # 减少延迟，但不要太小以避免过于频繁的网络请求
                                        await asyncio.sleep(0.0005)
                                    except Exception as send_err:
                                        logger.error(f"发送音频数据失败 [ID: {conn_id}]: {str(send_err)}")
                                        raise send_err
                            
                            # 发送片段完成信息
                            await websocket.send_text(json.dumps({
                                "type": "segment_complete",
                                "segment": i+1,
                                "total": len(text_segments)
                            }))
                            
                        except Exception as segment_err:
                            logger.error(f"处理片段 {i+1}/{len(text_segments)} 失败 [ID: {conn_id}]: {str(segment_err)}")
                            # 发送片段错误信息，但继续处理下一个片段
                            await websocket.send_text(json.dumps({
                                "type": "segment_error",
                                "segment": i+1,
                                "total": len(text_segments),
                                "message": str(segment_err)
                            }))
                    
                    # 记录结束时间
                    end_time = time.time()
                    duration = end_time - start_time
                    
                    # 发送完成通知，但保持连接开放
                    try:
                        await websocket.send_text(json.dumps({
                            "type": "complete",
                            "message": "TTS处理完成",
                            "duration": f"{duration:.2f}秒",
                            "device": device
                        }))
                        logger.info(f"语音合成完成 [ID: {conn_id}], 耗时: {duration:.2f}秒")
                    except Exception as complete_err:
                        logger.error(f"发送完成消息失败 [ID: {conn_id}]: {str(complete_err)}")
                    
                    # 处理完后进行内存清理
                    if use_cuda:
                        torch.cuda.empty_cache()
                        gc.collect()
                    
                except Exception as e:
                    logger.error(f"处理TTS请求失败 [ID: {conn_id}]: {str(e)}")
                    try:
                        await websocket.send_text(json.dumps({
                            "type": "error",
                            "message": f"TTS处理失败: {str(e)}"
                        }))
                    except Exception as send_error:
                        logger.error(f"无法发送错误消息 [ID: {conn_id}]: {str(send_error)}")
            
            except WebSocketDisconnect:
                logger.info(f"WebSocket连接断开 [ID: {conn_id}]")
                manager.disconnect(conn_id)
                break
                
            except Exception as e:
                logger.error(f"处理WebSocket消息时出错 [ID: {conn_id}]: {str(e)}")
                try:
                    await websocket.send_text(json.dumps({
                        "type": "error",
                        "message": f"处理消息时出错: {str(e)}"
                    }))
                except Exception as send_error:
                    logger.error(f"无法发送错误消息 [ID: {conn_id}]: {str(send_error)}")
                    # 只有在确认连接已断开时才跳出循环
                    if isinstance(e, WebSocketDisconnect) or "WebSocket is not connected" in str(e):
                        logger.info(f"WebSocket已断开连接，退出循环 [ID: {conn_id}]")
                        manager.disconnect(conn_id)
                        break
    
    except Exception as e:
        logger.error(f"WebSocket连接处理异常 [ID: {conn_id}]: {str(e)}")
    
    finally:
        # 确保连接从管理器中移除
        manager.disconnect(conn_id)
        logger.info(f"WebSocket连接已关闭 [ID: {conn_id}]")

@app.get("/api/v1/force-release-memory")
async def force_release_memory():
    """手动强制释放GPU内存的API"""
    if not torch.cuda.is_available():
        return {"status": "error", "message": "没有可用的GPU"}
    
    force_release_gpu_memory()
    
    # 获取当前内存状态
    total_memory = torch.cuda.get_device_properties(0).total_memory
    reserved_memory = torch.cuda.memory_reserved(0)
    allocated_memory = torch.cuda.memory_allocated(0)
    free_memory = total_memory - reserved_memory
    cached_memory = reserved_memory - allocated_memory
    
    return {
        "status": "success", 
        "message": "已强制释放GPU内存",
        "total_memory_gb": f"{total_memory/1024**3:.2f}",
        "allocated_memory_gb": f"{allocated_memory/1024**3:.2f}",
        "reserved_memory_gb": f"{reserved_memory/1024**3:.2f}",
        "free_memory_gb": f"{free_memory/1024**3:.2f}",
        "cached_memory_gb": f"{cached_memory/1024**3:.2f}",
        "device": device,
        "using_cuda": use_cuda
    }

if __name__ == "__main__":
    import uvicorn
    port = int(os.getenv("PORT", "8001"))
    logger.info(f"CosyVoice API 服务启动，监听 0.0.0.0:{port}...")
    uvicorn.run(app, host="0.0.0.0", port=port)
    