
import asyncio
import json
import logging
from typing import Any, Dict, List, Optional, AsyncGenerator
from utils.logger import setup_logger

logger = setup_logger()

# 定义消息类型
class Message:
    def __init__(self, content: Any, metadata: Optional[Dict[str, Any]] = None):
        self.content = content
        self.metadata = metadata or {}

# 定义消息队列
class MessageQueue:
    def __init__(self, maxsize: int = 100):
        self.queue = asyncio.Queue(maxsize=maxsize)
    
    async def put(self, message: Message) -> None:
        await self.queue.put(message)
    
    async def get(self) -> Message:
        return await self.queue.get()
    
    async def join(self) -> None:
        await self.queue.join()
    
    def task_done(self) -> None:
        self.queue.task_done()
    
    def empty(self) -> bool:
        return self.queue.empty()
    
    def full(self) -> bool:
        return self.queue.full()
    
    def qsize(self) -> int:
        return self.queue.qsize()

# 定义处理器接口
class Processor:
    async def process(self, input_queue: MessageQueue, output_queue: MessageQueue) -> None:
        pass



# 流式处理管道
class StreamPipeline:
    def __init__(self, processors: List[Processor]):
        self.processors = processors
        self.queues = [MessageQueue() for _ in range(len(processors) + 1)]
    
    async def process(self, input_generator: AsyncGenerator[Any, None]) -> AsyncGenerator[Any, None]:
        # 创建处理任务
        tasks = []
        for i, processor in enumerate(self.processors):
            task = asyncio.create_task(
                processor.process(self.queues[i], self.queues[i+1])
            )
            tasks.append(task)
        
        # 输入生成器任务
        async def feed_input():
            try:
                async for item in input_generator:
                    await self.queues[0].put(Message(item))
                # 发送结束信号
                await self.queues[0].put(Message(None, {"end": True}))
            except Exception as e:
                logger.error(f"Error feeding input: {e}", exc_info=True)
                await self.queues[0].put(Message(None, {"error": True, "message": str(e)}))
        
        # 启动输入任务
        input_task = asyncio.create_task(feed_input())
        
        # 从最后一个队列读取结果
        try:
            while True:
                message = await self.queues[-1].get()
                self.queues[-1].task_done()
                
                # 如果收到结束信号，退出循环
                if message.content is None and message.metadata.get("end", False):
                    break
                
                # 如果收到错误信号，记录错误但继续处理
                if message.metadata.get("error", False):
                    logger.error(f"Error in pipeline: {message.content}")
                    if message.content is not None:  # 只有当有错误消息时才产出
                        yield message.content
                    continue
                
                # 产出结果
                yield message.content
        except asyncio.CancelledError:
            logger.info("Stream pipeline cancelled")
            # 取消所有任务
            for task in tasks + [input_task]:
                task.cancel()
            raise
        except Exception as e:
            logger.error(f"Unexpected error in stream pipeline: {e}", exc_info=True)
            # 取消所有任务
            for task in tasks + [input_task]:
                task.cancel()
            raise
        finally:
            # 等待所有任务完成或被取消
            await asyncio.gather(*tasks, input_task, return_exceptions=True)

# 模型处理器类
class ModelProcessor(Processor):
    def __init__(self, model_name: str, input_type: str, params: Optional[Dict[str, Any]] = None):
        self.model_name = model_name
        self.input_type = input_type
        self.params = params or {}
        self.model_instance = None
        self.app_state = None
        
    async def _get_app_state(self):
        # 动态导入，避免循环引用
        from app.app_main import get_app_state
        # 每次都重新获取app_state，确保获取最新状态
        self.app_state = await get_app_state()
        return self.app_state
        
    async def process(self, input_queue: MessageQueue, output_queue: MessageQueue) -> None:
        logger.info(f"处理模型: {self.model_name}, 输入类型: {self.input_type}")
        try:
            # 获取应用状态，包含已加载的模型
            app_state = await self._get_app_state()
            
            # 根据输入类型和模型名称确定处理方法
            is_streaming = self.input_type in ["audio_stream", "video_stream", "text_stream"]
            
            while True:
                message = await input_queue.get()
                
                # 检查是否是结束信号
                if message.content is None and message.metadata.get("end", False):
                    logger.info(f"模型处理器 {self.model_name} 收到结束信号")
                    input_queue.task_done()
                    await output_queue.put(Message(None, {"end": True}))
                    break
                
                # 检查是否是错误信号
                if message.metadata.get("error", False):
                    logger.error(f"模型处理器 {self.model_name} 收到错误信号: {message.metadata.get('message')}")
                    input_queue.task_done()
                    await output_queue.put(message)  # 传递错误信号
                    continue
                
                try:
                    # 处理输入数据
                    logger.debug(f"模型处理器 {self.model_name} 处理输入数据")
                    
                    # 根据模型类型调用相应的处理逻辑
                    if "ollama" in self.model_name.lower():
                        await self._process_ollama(message, output_queue, app_state, is_streaming)
                    elif "whisper" in self.model_name.lower():
                        await self._process_whisper(message, output_queue, app_state, is_streaming)
                    elif "chattts" in self.model_name.lower():
                        await self._process_chattts(message, output_queue, app_state, is_streaming)
                    else:
                        # 默认处理方式
                        result = f"处理结果: {message.content}"
                        await output_queue.put(Message(result, message.metadata))
                    
                except Exception as e:
                    logger.error(f"模型处理器 {self.model_name} 处理数据时出错: {e}", exc_info=True)
                    await output_queue.put(Message(None, {"error": True, "message": str(e)}))
                
                input_queue.task_done()
        
        except asyncio.CancelledError:
            logger.info(f"模型处理器 {self.model_name} 被取消")
            raise
        except Exception as e:
            logger.error(f"模型处理器 {self.model_name} 发生意外错误: {e}", exc_info=True)
            await output_queue.put(Message(None, {"error": True, "message": str(e)}))
            raise
            
    async def _process_ollama(self, message: Message, output_queue: MessageQueue, app_state: Any, is_streaming: bool):
        """处理Ollama模型请求"""
        try:
            # 获取Ollama客户端
            ollama_client = app_state.ollama_client
            
            # 设置选项
            options = self.params.get("options", {
                "temperature": 0.7,
                "num_predict": 1024,
                "top_p": 0.9
            })
            
            # 获取输入内容
            prompt = message.content
            
            if is_streaming:
                # 流式处理
                async for chunk in await ollama_client.generate(
                    model=self.model_name,
                    prompt=prompt,
                    options=options,
                    stream=True
                ):
                    response_text = chunk.get("response", "")
                    if response_text:
                        await output_queue.put(Message(response_text, message.metadata))
            else:
                # 非流式处理
                response = await ollama_client.generate(
                    model=self.model_name,
                    prompt=prompt,
                    options=options,
                    stream=False
                )
                result = response.get("response", "无结果")
                await output_queue.put(Message(result, message.metadata))
        except Exception as e:
            logger.error(f"Ollama处理错误: {e}", exc_info=True)
            raise
    
    async def _process_whisper(self, message: Message, output_queue: MessageQueue, app_state: Any, is_streaming: bool):
        """处理Whisper语音识别请求"""
        try:
            import whisper
            import numpy as np
            
            # 获取Whisper模型
            model = app_state.whisper_model
            
            # 获取音频数据
            audio_data = message.content
            
            if is_streaming:
                # 流式处理 - 分块处理音频
                if isinstance(audio_data, bytes):
                    # 将字节转换为numpy数组 (假设是16kHz, 16bit, mono)
                    import io
                    import soundfile as sf
                    with io.BytesIO(audio_data) as buf:
                        audio_array, _ = sf.read(buf)
                else:
                    # 假设已经是numpy数组或文件路径
                    audio_array = audio_data if isinstance(audio_data, np.ndarray) else whisper.load_audio(audio_data)
                
                # 分块处理
                chunk_size = 16000  # 1秒的音频
                for i in range(0, len(audio_array), chunk_size):
                    chunk = audio_array[i:i+chunk_size]
                    chunk = whisper.pad_or_trim(chunk)
                    mel = whisper.log_mel_spectrogram(chunk, n_mels=model.dims.n_mels).to(model.device)
                    
                    # 解码
                    options = whisper.DecodingOptions(
                        language=self.params.get("language"),
                        beam_size=self.params.get("batch_size", 5)
                    )
                    result = whisper.decode(model, mel, options)
                    
                    if result.text.strip():
                        await output_queue.put(Message(result.text, message.metadata))
            else:
                # 非流式处理 - 处理整个音频
                if isinstance(audio_data, bytes):
                    # 将字节转换为临时文件
                    import tempfile
                    with tempfile.NamedTemporaryFile(suffix=".wav", delete=False) as temp_file:
                        temp_file.write(audio_data)
                        temp_path = temp_file.name
                    
                    audio = whisper.load_audio(temp_path)
                    import os
                    os.unlink(temp_path)  # 删除临时文件
                else:
                    # 假设已经是文件路径
                    audio = whisper.load_audio(audio_data)
                
                audio = whisper.pad_or_trim(audio)
                mel = whisper.log_mel_spectrogram(audio, n_mels=model.dims.n_mels).to(model.device)
                
                options = whisper.DecodingOptions(
                    language=self.params.get("language"),
                    beam_size=self.params.get("batch_size", 5)
                )
                result = whisper.decode(model, mel, options)
                
                await output_queue.put(Message(result.text, message.metadata))
        except Exception as e:
            logger.error(f"Whisper处理错误: {e}", exc_info=True)
            raise
    
    async def _process_chattts(self, message: Message, output_queue: MessageQueue, app_state: Any, is_streaming: bool):
        """处理ChatTTS语音合成请求"""
        try:
            import numpy as np
            from io import BytesIO
            import soundfile as sf
            from config import MODEL_CONFIG
            
            # 获取ChatTTS模型
            chattts_model = app_state.chattts_model
            
            # 获取输入文本
            input_text = message.content
            
            # 设置参数
            rand_spk = chattts_model.sample_random_speaker()
            params_infer_code = chattts_model.InferCodeParams(
                spk_emb = rand_spk,
                temperature = self.params.get("temperature", 0.3),
                top_P = self.params.get("top_p", 0.7),
                top_K = self.params.get("top_k", 20),
            )
            
            # 生成音频
            wavs = chattts_model.infer(
                [input_text],
                params_infer_code=params_infer_code,
            )[0]
            
            # 将音频转换为numpy数组
            audio_data = np.array(wavs, dtype=np.float32)
            
            if is_streaming:
                # 分块处理音频（每块约0.5秒）
                chunk_size = int(MODEL_CONFIG.chattts_sample_rate * 0.5)  # 0.5秒的样本数
                total_samples = len(audio_data)
                
                # 分块生成音频
                for i in range(0, total_samples, chunk_size):
                    end_idx = min(i + chunk_size, total_samples)
                    chunk_data = audio_data[i:end_idx]
                    
                    # 写入缓冲区
                    buffer = BytesIO()
                    sf.write(buffer, chunk_data, MODEL_CONFIG.chattts_sample_rate, format='WAV')
                    buffer.seek(0)
                    audio_bytes = buffer.read()
                    
                    await output_queue.put(Message(audio_bytes, message.metadata))
                    await asyncio.sleep(0.1)  # 添加小延迟模拟流式效果
            else:
                # 整个音频一次性处理
                buffer = BytesIO()
                sf.write(buffer, audio_data, MODEL_CONFIG.chattts_sample_rate, format='WAV')
                buffer.seek(0)
                audio_bytes = buffer.read()
                
                await output_queue.put(Message(audio_bytes, message.metadata))
        except Exception as e:
            logger.error(f"ChatTTS处理错误: {e}", exc_info=True)
            raise

# 模型处理器工厂函数
def create_model_processor(model_name: str, input_type: str, params: Optional[Dict[str, Any]] = None) -> ModelProcessor:
    return ModelProcessor(model_name, input_type, params)

# 创建流式处理管道
def create_stream_pipeline(model_name: str, input_type: str, params: Optional[Dict[str, Any]] = None) -> StreamPipeline:
    processor = create_model_processor(model_name, input_type, params)
    return StreamPipeline([processor])

# 兼容旧版API的ModelHandler类
class ModelHandler:
    def __init__(self, model_name: str, input_type: str):
        self.model_name = model_name
        self.input_type = input_type
        self.pipeline = create_stream_pipeline(model_name, input_type)
    
    async def process(self, data: AsyncGenerator[Any, None]) -> AsyncGenerator[Any, None]:
        async for item in self.pipeline.process(data):
            yield item


# ollama yibu API
async def stream_ollama_response(async_client, model, prompt, options):
    """生成流式响应的异步生成器"""
    async for chunk in await async_client.generate(
        model=model,
        prompt=prompt,
        options=options,
        stream=True
    ):
        if not isinstance(chunk, dict):
            logger.error(f"Unexpected chunk type: {type(chunk)}")
            continue
        response_text = chunk.get('response', '')
        if response_text:
            yield json.dumps({"response": response_text}) + "\n"