import asyncio
from typing import AsyncGenerator, Any, Dict, Optional, List, Callable
from routes.local.streaming.stream_config import StreamConfig, InputType, OutputType
from data_handler.data_input import get_input_handler
from data_handler.data_output import get_output_handler
from config import APP_CONFIG
import httpx
from fastapi.responses import StreamingResponse

from fastapi import APIRouter, UploadFile, File, HTTPException, Depends, Request,Body
import asyncio
import numpy as np
from io import BytesIO
import soundfile as sf

# 基本配置
from config import ModelConfig
from config import MODEL_CONFIG

# 日志记录
from utils.logger import setup_logger
logger = setup_logger()
import whisper


# 定义模型处理器
class ModelHandler:
    def __init__(self, default_config: Optional[ModelConfig] = None):
        self.default_config = default_config or ModelConfig()
        self.default_payload = {
            "config": self.default_config.dict(),
            "prompt": ""
        }
        self.data_format = "json"

    async def process_model_request(
        self,
        model: str,
        model_type: str,
        prompt: str,
        model_manager: Any,
        app_state: Any,
        custom_config: Optional[ModelConfig] = None,
        streaming: bool = False
    ) -> Any:
        """
        处理模型请求，根据模型类型分发。
        """
        # 合并默认配置和自定义配置
        logger.debug(f"处理{model_type}请求，加载模型: {model}")
        config = custom_config or self.default_config
        payload = self.default_payload.copy()
        payload["prompt"] = prompt
        payload["config"] = config.dict()

        try:
            # 加载模型
            await model_manager.load_model(model, app_state, payload)
            # 通用选项
            # logger.debug(f"通用选项: {config.temperature}")
            options = {
                "temperature": config.temperature,
                "num_predict": config.max_tokens,
                "top_p": config.top_p
            }

            if model_type in ["ollama_chat", "ollama_vision"]:
                # 特殊不需要模型
                return await self._handle_ollama(prompt, app_state, config, options, streaming)
            elif model_type == "chattts":
                return await self._handle_chattts(prompt, app_state, config, streaming)
            elif model_type in ["whisperx","whisper"]:
                return await self._handle_whisperx( prompt, app_state, config, streaming)
            else:
                raise ValueError(f"不支持的模型类型: {model_type}")

        except Exception as e:
            logger.error(f"处理{model_type}请求时出错: {str(e)}")
            raise HTTPException(status_code=500, detail=f"模型处理错误: {str(e)}")

    async def _handle_ollama(
        self,
        prompt: str,
        app_state: Any,
        config: ModelConfig,
        options: Dict,
        streaming: bool,
        model: str = "deepseek-r1:7b",
    ) -> Any:
        """处理Ollama Chat和Vision模型请求。"""
        logger.debug(f"diaoyong ollama 进行回复{config.model_name}")
        logger.debug(f"streaming: {streaming} {config.stream}")
        
        # 检查app_state.ollama_client是否存在，如果不存在则重新加载模型
        if not hasattr(app_state, 'ollama_client') or app_state.ollama_client is None:
            logger.warning("ollama_client不存在，正在重新加载模型...")
            from models.model_manager import ModelManager
            model_manager = ModelManager()
            await model_manager.load_model("ollama", app_state, {"config": {"model_name": config.model_name or model}})
            logger.info("Ollama模型已重新加载")
        
        if streaming or config.stream:
            logger.debug(f"进入函数: {streaming} {config.stream}")
            logger.debug(f"type: {type(app_state.ollama_client)} ")
            async def stream_generator() -> AsyncGenerator[str, None]:
                logger.debug(f"进入streaming: {streaming} {config.stream}")
                try:
                    generate = await app_state.ollama_client.generate(
                        model= config.model_name or model,
                        prompt=prompt,
                        options=options,
                        stream=True
                    )
                    async for chunk in generate:
                        logger.debug(f"streaming answer: {chunk}")
                        # 确保正确地传递响应内容
                        if "response" in chunk:
                            yield chunk["response"]
                        else:
                            logger.debug(f"响应中没有'response'字段: {chunk}")
                except Exception as e:
                    logger.debug(f"Ollama 流式处理错误: {str(e)}")
                    logger.debug(f"Ollama生成失败: {e}")
                    yield "[ERROR] 模型响应中断"
            return StreamingResponse(stream_generator(), media_type="text/plain")
        else:
            # 非流式处理
            response = await app_state.ollama_client.generate(
                model=config.model_name,
                prompt=prompt,
                options=options,
                stream=False
            )
            logger.debug(f"ollama 非流式 answer: {response} ")
            return response

    async def _handle_chattts(
        self,
        prompt: str,
        app_state: Any,
        config: ModelConfig,
        streaming: bool
    ) -> Any:
        input_text = config.text or prompt
        if not input_text:
            raise HTTPException(status_code=400, detail="需要提供输入文本")

        logger.debug(f"diaoyong  {config.model_name}")
        if streaming or config.stream:
            async def stream_audio() -> AsyncGenerator[bytes, None]:
                rand_spk = app_state.chattts_model.sample_random_speaker()
                params_infer_code =app_state.chattts_model.InferCodeParams(
                    spk_emb = rand_spk, # add sampled speaker 
                    temperature = .3,   # using custom temperature
                    top_P = 0.7,        # top P decode
                    top_K = 20,         # top K decode
                )
                # 由于ChatTTS可能不支持直接流式输出，我们将整个音频分块返回
                wavs = app_state.chattts_model.infer(
                    [input_text],
                    params_infer_code=params_infer_code,
                )[0]  # 获取完整音频
                
                # 将完整音频转换为numpy数组
                audio_data = np.array(wavs, dtype=np.float32)
                
                # 分块处理音频（每块约0.5秒）
                chunk_size = int(MODEL_CONFIG.chattts_sample_rate * 0.5)  # 0.5秒的样本数
                total_samples = len(audio_data)
                
                # 生成唯一的WAV头
                header_buffer = BytesIO()
                sf.write(header_buffer, np.zeros(0), MODEL_CONFIG.chattts_sample_rate, format='WAV')
                header = header_buffer.getvalue()[:44]  # WAV头固定44字节
                yield header  # 先发送头
                
                # 分块发送原始PCM数据（无头）
                for i in range(0, len(audio_data), chunk_size):
                    chunk = audio_data[i:i+chunk_size]
                    buffer = BytesIO()
                    sf.write(buffer, chunk, MODEL_CONFIG.chattts_sample_rate, format='WAV')
                    raw_data = buffer.getvalue()[44:]  # 去除头部，仅发送数据部分
                    yield raw_data
                    await asyncio.sleep(0.05)

            return StreamingResponse(
                stream_audio(), 
                media_type="audio/wav",
                headers={"Content-Disposition": "attachment; filename=tts_output.wav"}
            )

        else:
            # 调用ChatTTS模型生成音频
            logger.debug(f"diaoyong ChatTTS 生成音频{config.model_name}")
            rand_spk = app_state.chattts_model.sample_random_speaker()
            params_infer_code =app_state.chattts_model.InferCodeParams(
                spk_emb = rand_spk, # add sampled speaker 
                temperature = .3,   # using custom temperature
                top_P = 0.7,        # top P decode
                top_K = 20,         # top K decode
            )
            wavs = app_state.chattts_model.infer(
                [input_text],
                params_infer_code=params_infer_code
            )[0]
            audio_data = np.array(wavs, dtype=np.float32)
            def generate_audio_stream():
                buffer = BytesIO()
                sf.write(buffer, audio_data, MODEL_CONFIG.chattts_sample_rate, format='WAV')
                buffer.seek(0)
                while chunk := buffer.read(4096):
                    yield chunk

            return StreamingResponse(
                generate_audio_stream(),
                media_type="audio/wav",
                headers={"Content-Disposition": "attachment; filename=tts_output.wav"}
            )


    async def _handle_whisperx(
        self,
        prompt: str,  # 音频文件路径
        app_state: Any,
        config: ModelConfig,
        streaming: bool
    ) -> Any:
        if not prompt:
            raise HTTPException(status_code=400, detail="需要提供音频文件路径")
        # 模型
        model = app_state.whisper_model
        if streaming or config.stream:
            async def stream_transcription() -> AsyncGenerator[str, None]:
                # 模拟流式转录（实际需根据 whisper 流式支持调整）
                audio = whisper.load_audio(prompt)
                audio_chunks = [audio[i:i+16000] for i in range(0, len(audio), 16000)]  # 分块处理
                for chunk in audio_chunks:
                    chunk = whisper.pad_or_trim(chunk)
                    mel = whisper.log_mel_spectrogram(chunk, n_mels=model.dims.n_mels).to(model.device)
                    options = whisper.DecodingOptions(
                        language=config.language if config.language != "auto" else None,
                        beam_size=config.batch_size
                    )
                    result = whisper.decode(model, mel, options)
                    logger.debug(f"whisper 生成流式 answer: {result.text}")
                    yield result.text
                    await asyncio.sleep(0.1)  # 模拟流式延迟
            return StreamingResponse(stream_transcription(), media_type="text/plain")
        else:
            
            audio = whisper.load_audio(prompt)
            audio = whisper.pad_or_trim(audio)
            mel = whisper.log_mel_spectrogram(audio, n_mels=model.dims.n_mels).to(model.device)
            
            options = whisper.DecodingOptions(
                language=config.language if config.language != "auto" else None,
                beam_size=config.batch_size
            )
            result = whisper.decode(model, mel, options)
            
            # 如果需要对齐输出（时间戳）
            segments = []
            if config.align_output and result.text:
                align_model = config.custom_alignment_model or "default_align_model"
                # 假设有对齐逻辑（需根据实际 whisper 实现调整）
                segments = [{"start": 0, "end": len(audio)/MODEL_CONFIG.whisper_sample_rate, "text": result.text}]
            
            return {
                "text": result.text,
                "segments": segments
            }

    async def call_model(
        self,
        model: str,
        model_type: str,
        data: Any,
        streaming: bool = False,
        config: Optional[ModelConfig] = None,
        dependencies: Dict = {}
    ) -> Any:
        """调用模型，统一入口。"""
        model_manager = dependencies.get("model_manager")
        app_state = dependencies.get("app_state")
        model = model or self.default_config.model
        prompt = data if isinstance(data, str) else data.get("prompt", "")
        logger.debug(f"调用模型: {model}  prompt: {prompt}")
        return await self.process_model_request(
            model=model,
            model_type=model_type,
            prompt=prompt,
            model_manager=model_manager,
            app_state=app_state,
            custom_config=config,
            streaming=streaming
        )

    async def process(self, input_queue: Any, output_queue: Any) -> None:
        """处理消息队列（占位符）。"""
        logger.info("消息队列处理待实现")
        # TODO: 实现消息队列逻辑，处理输入队列并将结果放入输出队列
        pass

    # async def process(self, input_queue: MessageQueue, output_queue: MessageQueue) -> None:
    #     """
    #     Process messages from input_queue, call the model, and put results in output_queue.
    #     """
    #     logger.info(f"Processing with model: {self.model_name}")
    #     try:
    #         while True:
    #             message = await input_queue.get()
                
    #             if message.content is None:
    #                 logger.info(f"Received end of input for model {self.model_name}")
    #                 input_queue.task_done()
    #                 await output_queue.put(Message(None, {"end": True}))
    #                 break
                
    #             try:
    #                 # Determine streaming based on input_type
    #                 streaming = self.input_type in ["text_stream", "audio_stream"] and self.model_name in ["chattts", "ollama"]
    #                 result = await self.call_model_local(message.content, streaming=streaming)
                    
    #                 if isinstance(result, StreamingResponse):
    #                     # For streaming responses, extract content (simplified for queue)
    #                     async def extract_streaming_content(response):
    #                         content = b""
    #                         async for chunk in response.body_iterator:
    #                             content += chunk
    #                         return content.decode("utf-8") if self.model_name == "ollama" else content

    #                     stream_content = await extract_streaming_content(result)
    #                     await output_queue.put(Message(stream_content, message.metadata))
    #                 else:
    #                     await output_queue.put(Message(result, message.metadata))
    #             except Exception as e:
    #                 logger.error(f"Error processing message with model {self.model_name}: {e}", exc_info=True)
    #                 await output_queue.put(Message(f"Error: {str(e)}", {"error": True, **message.metadata}))
                
    #             input_queue.task_done()
    #     except asyncio.CancelledError:
    #         logger.info(f"Model processor {self.model_name} task cancelled")
    #         raise
    #     except Exception as e:
    #         logger.error(f"Unexpected error in model processor {self.model_name}: {e}", exc_info=True)
    #         input_queue.task_done()
    #         await output_queue.put(Message(None, {"error": True, "message": str(e)}))
    #     finally:
    #         logger.info(f"Finished processing with model: {self.model_name}")
