from flask import request
from flask_socketio import SocketIO, emit, join_room, leave_room
from utils.randomUtils import generate_random_by_time
import threading
import time
import numpy as np
from .baseGraphNode import BaseGraphNode
from .httpGraphNode import HttpGraphNode
from .sessionInfo import SessionInfo
import wave
import os
from datetime import datetime
from typing import Optional, Dict, Any
import torch
import torchaudio
from transformers import WhisperProcessor, WhisperForConditionalGeneration, Wav2Vec2Processor, Wav2Vec2ForCTC
import speech_recognition as sr
import json
import noisereduce as nr
from scipy import signal

class STTGraphNode(BaseGraphNode):
    # 支持的算法
    ALGORITHMS = {
        'whisper': 'Whisper',
        'wav2vec': 'Wav2Vec 2.0'
    }
    
    # 支持的模型大小
    MODEL_SIZES = {
        'tiny': 'tiny',
        'base': 'base',
        'small': 'small',
        'medium': 'medium',
        'large': 'large'
    }
    
    # 支持的语言
    LANGUAGES = {
        'en': 'English',
        'zh': 'Chinese'
    }
    
    def __init__(self, id:str, data:dict, graphNodeManager, random_key:str):
        super().__init__(id, data, graphNodeManager, random_key)
        self.algorithm = data['data'].get('algorithm', 'wav2vec')  # 默认使用 Wav2Vec 2.0
        self.model_size = data['data'].get('model', 'base')
        self.language = data['data'].get('language', 'en')
        self.confidence_threshold = data['data'].get('confidenceThreshold', 0.7)
        self.enable_punctuation = data['data'].get('enablePunctuation', True)
        self.enable_word_timestamps = data['data'].get('enableWordTimestamps', False)
        
        # 音频预处理参数
        self.noise_reduce = data['data'].get('noiseReduce', True)  # 是否启用降噪
        self.vad_threshold = data['data'].get('vadThreshold', 0.5)  # VAD阈值
        self.min_silence_duration = data['data'].get('minSilenceDuration', 0.3)  # 最小静音时长(秒)
        self.noise_sample_duration = data['data'].get('noiseSampleDuration', 0.5)  # 噪声采样时长(秒)
        
        # 初始化模型
        self.model = None
        self.processor = None
        self.recognizer = None
        self.stream_contexts = {}  # session_id -> Stream context
        self.initialize_model()
        
        # 音频缓冲和状态
        self.audio_buffers = {}  # session_id -> list of audio chunks
        self.last_results = {}  # session_id -> last recognition result
        self.buffer_size = 4000  # 缓冲区大小（样本数）
        self.noise_profile = {}  # session_id -> noise profile
        self.is_speech = {}  # session_id -> is speech active
        
        # 音频存储
        self.audio_history = {}  # session_id -> list of audio clips
        self.last_pulse_time = {}  # session_id -> last pulse time
        self.current_audio_buffer = {}  # session_id -> current audio buffer
        self.text_buffer = {}  # session_id -> current text buffer
        
        # 创建音频存储目录
        self.audio_dir = os.path.join(os.getcwd(), 'audio_storage')
        if not os.path.exists(self.audio_dir):
            os.makedirs(self.audio_dir)

    def initialize_model(self):
        """初始化 Wav2Vec 2.0 模型"""
        if self.algorithm == 'wav2vec':
            # 选择合适的预训练模型
            if self.language == 'zh':
                model_name = "jonatasgrosman/wav2vec2-large-xlsr-53-chinese-zh-cn"
            else:
                model_name = "facebook/wav2vec2-base-960h"
            
            # 加载模型和处理器
            self.processor = Wav2Vec2Processor.from_pretrained(model_name)
            self.model = Wav2Vec2ForCTC.from_pretrained(model_name)
            
            # 如果有 GPU 就使用 GPU
            if torch.cuda.is_available():
                self.model = self.model.to('cuda')
            
            # 设置为评估模式
            self.model.eval()
        
        elif self.algorithm == 'whisper':
            model_name = f"openai/whisper-{self.model_size}"
            self.processor = WhisperProcessor.from_pretrained(model_name)
            self.model = WhisperForConditionalGeneration.from_pretrained(model_name)
            if torch.cuda.is_available():
                self.model = self.model.to('cuda')

    def create_stream_context(self, session_id: str):
        """为会话创建流式识别上下文"""
        if self.algorithm == 'deepspeech':
            self.stream_contexts[session_id] = self.model.createStream()
            self.text_buffer[session_id] = ""

    def preprocess_audio(self, audio_data: np.ndarray, session_id: str) -> np.ndarray:
        """音频预处理：降噪、VAD等"""
        try:
            # 1. 应用高通滤波器去除低频噪声
            sos = signal.butter(10, 80, 'hp', fs=16000, output='sos')
            audio_filtered = signal.sosfilt(sos, audio_data)

            # 2. 降噪处理
            if self.noise_reduce:
                # 如果没有噪声样本，使用音频开始部分作为噪声样本
                if session_id not in self.noise_profile:
                    noise_samples = int(self.noise_sample_duration * 16000)
                    if len(audio_filtered) >= noise_samples:
                        self.noise_profile[session_id] = audio_filtered[:noise_samples]
                
                # 应用降噪
                if session_id in self.noise_profile:
                    audio_filtered = nr.reduce_noise(
                        y=audio_filtered,
                        sr=16000,
                        prop_decrease=0.8,
                        n_jobs=1
                    )

            # 3. 音量归一化
            audio_filtered = audio_filtered / np.max(np.abs(audio_filtered))

            # 4. 语音活动检测 (VAD)
            frame_length = int(0.025 * 16000)  # 25ms帧
            energy = np.array([
                np.sum(np.abs(audio_filtered[i:i+frame_length])**2)
                for i in range(0, len(audio_filtered), frame_length)
            ])
            energy_threshold = np.mean(energy) * self.vad_threshold
            is_speech_frame = energy > energy_threshold

            # 如果检测到语音活动，更新状态
            if np.any(is_speech_frame):
                self.is_speech[session_id] = True
            else:
                # 如果之前是语音状态，现在变为静音，可能是一句话说完了
                if self.is_speech.get(session_id, False):
                    self.is_speech[session_id] = False
                    return audio_filtered

            return audio_filtered if self.is_speech.get(session_id, False) else np.array([])

        except Exception as e:
            print(f"Error in audio preprocessing: {str(e)}")
            import traceback
            traceback.print_exc()
            return audio_data

    def process_audio_chunk(self, session_id: str, audio_chunk: np.ndarray) -> Optional[str]:
        """处理音频块并返回识别结果"""
        try:
            # 预处理音频
            processed_audio = self.preprocess_audio(audio_chunk, session_id)
            
            # 如果预处理后的音频为空（可能是静音），直接返回
            if len(processed_audio) == 0:
                return None

            # 初始化或获取音频缓冲区
            if session_id not in self.audio_buffers:
                self.audio_buffers[session_id] = []
            
            # 添加新的音频块到缓冲区
            self.audio_buffers[session_id].extend(processed_audio.tolist())
            
            # 如果缓冲区足够大，进行识别
            if len(self.audio_buffers[session_id]) >= self.buffer_size:
                # 将列表转换为numpy数组
                audio_array = np.array(self.audio_buffers[session_id], dtype=np.float32)
                
                # 准备输入数据
                input_values = self.processor(
                    audio_array,
                    sampling_rate=16000,
                    padding=True,
                    return_tensors="pt"
                ).input_values
                
                if torch.cuda.is_available():
                    input_values = input_values.to('cuda')
                
                # 进行识别
                with torch.no_grad():
                    logits = self.model(input_values).logits
                
                # 解码预测结果
                predicted_ids = torch.argmax(logits, dim=-1)
                transcription = self.processor.batch_decode(predicted_ids)[0]
                
                # 更新最后的结果
                if session_id not in self.last_results:
                    self.last_results[session_id] = ""
                
                # 如果有新的文本，返回结果
                if transcription != self.last_results[session_id]:
                    self.last_results[session_id] = transcription
                    return transcription
                
                # 保留一部分音频数据用于下一次识别（避免断词）
                overlap = 1000  # 重叠样本数
                self.audio_buffers[session_id] = self.audio_buffers[session_id][-overlap:]
            
        except Exception as e:
            print(f"Error processing audio chunk: {str(e)}")
            import traceback
            traceback.print_exc()
        
        return None

    def process_stream(self, session_id: str, audio_data: np.ndarray) -> Optional[str]:
        """处理流式音频数据"""
        if self.algorithm != 'deepspeech':
            return None
            
        try:
            # 确保存在流上下文
            if session_id not in self.stream_contexts:
                self.create_stream_context(session_id)
            
            # 处理音频数据
            self.stream_contexts[session_id].feedAudioContent(audio_data)
            
            # 获取中间结果
            text = self.stream_contexts[session_id].intermediateDecode()
            
            # 如果有新的文本，更新缓冲区
            if text != self.text_buffer.get(session_id, ""):
                self.text_buffer[session_id] = text
                return text
            
        except Exception as e:
            print(f"Error in stream processing: {str(e)}")
        
        return None

    def finalize_stream(self, session_id: str) -> Optional[str]:
        """完成流式识别并返回最终结果"""
        if session_id not in self.stream_contexts:
            return None
            
        try:
            # 获取最终结果
            final_text = self.stream_contexts[session_id].finishStream()
            
            # 清理资源
            del self.stream_contexts[session_id]
            self.text_buffer[session_id] = ""
            
            return final_text
            
        except Exception as e:
            print(f"Error finalizing stream: {str(e)}")
            return None

    def execute(self, session_info: SessionInfo=None):
        # 从前驱节点获取音频数据
        audio_data = None
        if 'cached-audio' in self.dict['in']:
            audio_form = self.dict['in']['cached-audio']
            prenode = self.graphNodeManager.nodesByIdDict[audio_form['nodeId']]
            audio_data = prenode.get_newest_output_in_session(session_info, audio_form['handle'])
        elif 'last-cached-audio' in self.dict['in']:
            audio_form = self.dict['in']['last-cached-audio']
            prenode = self.graphNodeManager.nodesByIdDict[audio_form['nodeId']]
            audio_data = prenode.get_newest_output_in_session(session_info, audio_form['handle'])

        if audio_data is None:
            return

        try:
            # 获取音频数据
            audio_bytes = audio_data['data']
            audio_array = np.frombuffer(audio_bytes, dtype=np.int16)
            
            # 将 int16 转换为 float32 并归一化
            audio_float = audio_array.astype(np.float32) / 32768.0
            
            if self.algorithm == 'wav2vec':
                # 流式处理
                text = self.process_audio_chunk(session_info.id, audio_float)
                
                if text is not None:
                    # 存储识别结果
                    if session_info.id not in self.dict['out']:
                        self.dict['out'][session_info.id] = {}
                    if 'msg' not in self.dict['out'][session_info.id]:
                        self.dict['out'][session_info.id]['msg'] = []
                    
                    # 添加识别结果
                    self.dict['out'][session_info.id]['msg'].append(text)
                    # self.dict['out'][session_info.id]['msg'].append({
                    #     'text': text,
                    #     'is_final': False,
                    #     'timestamp': time.time()
                    # })
                    
                    # 触发 flow_next
                    self.executed_num += 1
                    self.flow_next(session_info)
            
            elif self.algorithm == 'deepspeech':
                # 流式处理
                text = self.process_stream(session_info.id, audio_float)
                
                if text is not None:
                    # 存储识别结果
                    if session_info.id not in self.dict['out']:
                        self.dict['out'][session_info.id] = {}
                    if 'msg' not in self.dict['out'][session_info.id]:
                        self.dict['out'][session_info.id]['msg'] = []
                    
                    # 添加识别结果
                    self.dict['out'][session_info.id]['msg'].append(text)
                    # self.dict['out'][session_info.id]['msg'].append({
                    #     'text': text,
                    #     'is_final': False,
                    #     'timestamp': time.time()
                    # })
                    
                    # 触发 flow_next
                    self.executed_num += 1
                    self.flow_next(session_info)
            
            else:
                # Whisper 处理（非流式）
                result = self.transcribe_audio(audio_float, 16000)
                
                if result['error'] is None and result['confidence'] >= self.confidence_threshold:
                    if session_info.id not in self.dict['out']:
                        self.dict['out'][session_info.id] = {}
                    if 'msg' not in self.dict['out'][session_info.id]:
                        self.dict['out'][session_info.id]['msg'] = []
                    
                    self.dict['out'][session_info.id]['msg'].append(result['text'])
                    # self.dict['out'][session_info.id]['msg'].append({
                    #     'text': result['text'],
                    #     'is_final': True,
                    #     'timestamp': time.time()
                    # })
                    
                    self.executed_num += 1
                    self.flow_next(session_info)
            
        except Exception as e:
            print(f"Error processing audio clip: {str(e)}")
            import traceback
            traceback.print_exc()

    def stop(self):
        # 完成所有流式识别
        for session_id in list(self.stream_contexts.keys()):
            final_text = self.finalize_stream(session_id)
            if final_text and session_id in self.dict['out']:
                self.dict['out'][session_id]['msg'].append(final_text)
                # self.dict['out'][session_id]['msg'].append({
                #     'text': final_text,
                #     'is_final': True,
                #     'timestamp': time.time()
                # })
        
        # 清理资源
        self.stream_contexts.clear()
        self.text_buffer.clear()
        self.noise_profile.clear()  # 清理噪声配置
        self.is_speech.clear()  # 清理语音状态
        
        # 清理音频文件
        for session_id in self.audio_history:
            for audio_clip in self.audio_history[session_id]:
                try:
                    os.remove(audio_clip['filepath'])
                except:
                    pass
        self.audio_history.clear()
        self.last_pulse_time.clear()
        self.current_audio_buffer.clear()
        
        # 清理模型资源
        if self.model is not None:
            if torch.cuda.is_available():
                self.model = self.model.cpu()
            del self.model
            self.model = None
        if self.processor is not None:
            del self.processor
            self.processor = None
        if self.recognizer is not None:
            del self.recognizer
            self.recognizer = None

    def transcribe_audio(self, audio_data: np.ndarray, sample_rate: int) -> Dict[str, Any]:
        """使用 Whisper 进行非流式转录"""
        result = {
            'text': '',
            'confidence': 0.0,
            'word_timestamps': [],
            'error': None
        }
        
        try:
            if self.algorithm == 'whisper':
                # 将音频数据转换为模型输入格式
                input_features = self.processor(
                    audio_data, 
                    sampling_rate=sample_rate, 
                    return_tensors="pt"
                ).input_features
                
                if torch.cuda.is_available():
                    input_features = input_features.to('cuda')
                
                # 生成转录
                predicted_ids = self.model.generate(input_features)
                transcription = self.processor.batch_decode(predicted_ids, skip_special_tokens=True)[0]
                
                result['text'] = transcription
                result['confidence'] = 1.0
                
                if self.enable_word_timestamps:
                    word_timestamps = self.model.generate(
                        input_features,
                        return_timestamps=True
                    )
                    result['word_timestamps'] = word_timestamps
        
        except Exception as e:
            result['error'] = str(e)
        
        return result
