from flask import request
import numpy as np
from .baseGraphNode import  *
from .httpGraphNode import HttpGraphNode
from .sessionInfo import SessionInfo
import soundfile as sf
import os
from datetime import datetime
from typing import Optional, Dict, Any
from ..execute.baseExeHis import BaseExeHis
import json
# from funasr import AutoModel

class STTGraphNode(BaseGraphNode):
    def __init__(self, id:str, data:dict, graphNodeManager, random_key:str):
        super().__init__(id, data, graphNodeManager, random_key)
        self.algorithm = data['data'].get('algorithm', 'paraformer')  # 默认使用 paraformer
        self.model_size = data['data'].get('model', 'base')
        self.language = data['data'].get('language', 'zh')
        self.confidence_threshold = data['data'].get('confidenceThreshold', 0.7)
        self.enable_punctuation = data['data'].get('enablePunctuation', True)
        
        # 初始化模型
        self.model = None
        self.stream_contexts = {}  # session_id -> Stream context
        self.initialize_model()
        
        # 音频缓冲和状态
        self.audio_buffers = {}  # session_id -> list of audio chunks
        self.last_results = {}  # session_id -> last recognition result
        self.buffer_size = 300  # 缓冲区大小（样本数）
        
        # 音频存储
        self.audio_history = {}  # session_id -> list of audio clips
        self.last_pulse_time = {}  # session_id -> last pulse time
        self.current_audio_buffer = {}  # session_id -> current audio buffer
        self.text_buffer = {}  # session_id -> current text buffer
        
        # 创建音频存储目录
        self.audio_dir = os.path.join(os.getcwd(), 'audio_storage')
        if not os.path.exists(self.audio_dir):
            os.makedirs(self.audio_dir)

    def initialize_model(self):
        """初始化 FunASR 模型"""
        try:
            # 根据语言选择合适的模型
            if self.language == 'zh':
                model_name = "paraformer-zh-streaming"
            else:
                model_name = "paraformer-zh-streaming"
            
            # 加载模型
            # self.model = AutoModel(model=model_name)
            print(f"Successfully loaded FunASR model: {model_name}")
            
        except Exception as e:
            print(f"Error initializing FunASR model: {str(e)}")
            import traceback
            traceback.print_exc()

    def process_audio_chunk(self, session_id: str, audio_chunk: np.ndarray) -> Optional[str]:
        """处理音频块并返回识别结果"""
        try:
            # 初始化或获取音频缓冲区
            if session_id not in self.audio_buffers:
                self.audio_buffers[session_id] = []
            
            # 添加新的音频块到缓冲区
            self.audio_buffers[session_id].extend(audio_chunk.tolist())
            
            # 如果缓冲区足够大，进行识别
            if len(self.audio_buffers[session_id]) >= self.buffer_size:
                # 将列表转换为numpy数组
                audio_array = np.array(audio_chunk, dtype=np.float32)
                
                # 设置流式识别参数
                chunk_size = [0, 10, 5]  # 600ms
                encoder_chunk_look_back = 4
                decoder_chunk_look_back = 1
                
                # 初始化或获取缓存
                if session_id not in self.stream_contexts:
                    self.stream_contexts[session_id] = {}
                
                # 进行识别
                res = self.model.generate(
                    input=audio_array,
                    cache=self.stream_contexts[session_id],
                    is_final=False,
                    chunk_size=chunk_size,
                    encoder_chunk_look_back=encoder_chunk_look_back,
                    decoder_chunk_look_back=decoder_chunk_look_back
                )
                
                # 更新最后的结果
                if session_id not in self.last_results:
                    self.last_results[session_id] = ""
                self.audio_buffers[session_id] = []
                # 如果有新的文本，返回结果
                if res and res != self.last_results[session_id]:
                    self.last_results[session_id] = res
                    return res
                
                # 保留一部分音频数据用于下一次识别（避免断词）
                overlap = 900  # 重叠样本数
                self.audio_buffers[session_id] = self.audio_buffers[session_id][-overlap:]
                
            
        except Exception as e:
            print(f"Error processing audio chunk: {str(e)}")
            import traceback
            traceback.print_exc()
        
        return None

    def execute(self, session_info: SessionInfo=None, exe_his: BaseExeHis=None, trigger=None, running_id=None):
        # 从前驱节点获取音频数据
        audio_data = None
        if 'cached-audio' in self.dict['in']:
            audio_form = self.dict['in']['cached-audio']
            prenode = self.graphNodeManager.nodesByIdDict[audio_form['nodeId']]
            audio_data = prenode.get_newest_output_in_session(session_info, audio_form['handle'])
        elif 'last-cached-audio' in self.dict['in']:
            audio_form = self.dict['in']['last-cached-audio']
            prenode = self.graphNodeManager.nodesByIdDict[audio_form['nodeId']]
            audio_data = prenode.get_newest_output_in_session(session_info, audio_form['handle'])

        if audio_data is None:
            return

        try:
            # 获取音频数据
            audio_bytes = audio_data['data']
            
            # 将音频数据保存为WAV文件
            audio_array = np.frombuffer(audio_bytes, dtype=np.int16)

            
            # 将 int16 转换为 float32 并归一化
            audio_float = audio_array.astype(np.float32) / 32768.0
            
            # 流式处理
            text = self.process_audio_chunk(session_info.id, audio_float)
            if text is not None:
                # 添加识别结果
                with session_info.out_write_lock:
                    if session_info.out.get(self.id,None) is None: session_info.out[self.id] = {}
                    if 'msg' not in session_info.out[self.id]: session_info.out[self.id]['msg'] = []
                    session_info.out[self.id]['msg'].append(text)
                
                # 触发 flow_next
                self.executed_num += 1
                self.flow_next(session_info, trigger=trigger, running_id=running_id)
            
        except Exception as e:
            print(f"Error processing audio clip: {str(e)}")
            import traceback
            traceback.print_exc()

    def stop(self):
        # 完成所有流式识别
        for session_id in list(self.stream_contexts.keys()):
            try:
                # 获取最终结果
                if session_id in self.audio_buffers and len(self.audio_buffers[session_id]) > 0:
                    audio_array = np.array(self.audio_buffers[session_id], dtype=np.float32)
                    final_text = self.model.generate(
                        input=audio_array,
                        cache=self.stream_contexts[session_id],
                        is_final=True
                    )
                    # if final_text and session_id in self.dict['out']:
                    #     self.dict['out'][session_id]['msg'].append(final_text)
            except Exception as e:
                print(f"Error finalizing stream for session {session_id}: {str(e)}")
        
        # 清理资源
        self.stream_contexts.clear()
        self.text_buffer.clear()
        self.audio_buffers.clear()
        self.last_results.clear()
        
        # 清理音频文件
        for session_id in self.audio_history:
            for audio_clip in self.audio_history[session_id]:
                try:
                    os.remove(audio_clip['filepath'])
                except:
                    pass
        self.audio_history.clear()
        self.last_pulse_time.clear()
        self.current_audio_buffer.clear()
        
        # 清理模型资源
        if self.model is not None:
            del self.model
            self.model = None
