from utils.randomUtils import generate_random_by_time
import time
from .baseGraphNode import  *
from .sessionInfo import SessionInfo
import os
import dashscope
from dashscope.audio.asr import TranscriptionResult, TranslationRecognizerRealtime, TranslationRecognizerCallback, TranslationResult
from ..execute.baseExeHis import BaseExeHis
from collections import deque

class RemoteSTTCallback(TranslationRecognizerCallback):
    def __init__(self, node, session_info:SessionInfo):
        super().__init__()
        self.node = node
        self.session_info = session_info

    def on_open(self) -> None:
        print(f'RecognitionCallback open for session {self.session_info.id}.')

    def on_close(self) -> None:
        print(f'RecognitionCallback close for session {self.session_info.id}.')

    def on_complete(self) -> None:
        if self.session_info.id in self.node.recognition_instances:
            try:
                self.node.recognition_instances[self.session_info.id].stop()
                del self.node.recognition_instances[self.session_info.id]
            except:
                pass
        print(f'RecognitionCallback completed for session {self.session_info.id}.')

    def on_error(self, message) -> None:
        print(f'RecognitionCallback error for session {self.session_info.id}: {message.message}')
        if self.session_info.id in self.node.recognition_instances:
            try:
                self.node.recognition_instances[self.session_info.id].stop()
                del self.node.recognition_instances[self.session_info.id]
            except:
                pass
    def on_event(self, request_id, transcription_result: TranscriptionResult, translation_result: TranslationResult, usage) -> None:
        if transcription_result is None:
            return
        text = transcription_result.text
        sentence_end = transcription_result.is_sentence_end
        if text:
            # Store recognition result in buffer
            if self.session_info.id not in self.node.result_buffers:
                self.node.result_buffers[self.session_info.id] = deque(maxlen=100)
            
            # Add recognition result to buffer
            self.node.result_buffers[self.session_info.id].append({'content': text, 'sentence_end': sentence_end})
            
        print(f'RecognitionCallback sentence for session {self.session_info.id}: {text}')

class RemoteSTTGraphNode(BaseGraphNode):
    def __init__(self, id:str, data:dict, graphNodeManager, random_key:str):
        super().__init__(id, data, graphNodeManager, random_key)
        self.api_key = data['data'].get('apiKey', None)
        self.model = 'paraformer-realtime-v2'
        self.sample_rate = 16
        self.format = data['data'].get('format', 'pcm')
        self.semantic_punctuation_enabled = True
        
        # Initialize DashScope API
        if self.api_key:
            dashscope.api_key = self.api_key
        elif 'DASHSCOPE_API_KEY' in os.environ:
            dashscope.api_key = os.environ['DASHSCOPE_API_KEY']
        else:
            raise ValueError("DashScope API key not provided")
        
        # Store recognition instances
        self.recognition_instances = {}
        # Store result buffers for each session
        self.result_buffers = {}
        self.flow_next_nodes_sentence_end = []
        # Store background tasks
        self.sessionID2exe_his = {}
        self.background_tasks = {}



    def process_audio(self, session_info: SessionInfo, audio_data: bytes, trigger, running_id):
        """Process audio data using background task"""
        # Initialize recognition instance if needed
        if session_info.id not in self.recognition_instances or self.recognition_instances[session_info.id]._running == False:
            print(f'Creating new recognition instance for session {session_info.id}')
            callback = RemoteSTTCallback(self, session_info)
            self.recognition_instances[session_info.id] = TranslationRecognizerRealtime(
                model="gummy-realtime-v1",
                format=self.format,
                sample_rate=self.sample_rate*1000,
                transcription_enabled=True,
                # semantic_punctuation_enabled=self.semantic_punctuation_enabled,
                callback=callback
            )
            self.recognition_instances[session_info.id].start()

            session_info.ws.start_background_task(self._process_results_background, session_info, trigger, running_id)
        # Send audio data
        self.recognition_instances[session_info.id].send_audio_frame(audio_data)

    def _process_results_background(self, session_info: SessionInfo, trigger, running_id):
        """Background task to process recognition results"""
        while True:
            if session_info.id not in self.result_buffers or not self.result_buffers[session_info.id]:
                time.sleep(0.3)  # Small delay to prevent CPU spinning
                continue

            # Get all pending results
            results = []
            while self.result_buffers[session_info.id]:
                result = self.result_buffers[session_info.id].popleft()
                results.append(Msg(result['content'], 'user','text', end=result['sentence_end']))# {'content': result['content'], 'end': result['sentence_end']})
                if result['sentence_end']:
                    with session_info.out_write_lock:
                        if session_info.out.get(self.id,None) is None: session_info.out[self.id] = {}
                        if 'end-msg' not in session_info.out[self.id]:
                            session_info.out[self.id]['end-msg'] = []
                        session_info.out[self.id]['end-msg'].append(results[-1])
                    self.flow_next_process(session_info, self.flow_next_nodes_sentence_end, trigger, running_id, type='text', role='user')
                    break

            # Store results in output
            if results:
                with session_info.out_write_lock:
                    if session_info.out.get(self.id,None) is None: session_info.out[self.id] = {}
                    if 'msg' not in session_info.out[self.id]:
                        session_info.out[self.id]['msg'] = []
                    # Add all results
                    session_info.out[self.id]['msg'].extend(results)
                
                # Trigger flow_next if we have results
                self.executed_num += 1
                self.flow_next_process(session_info, self.flow_next_nodes, trigger, running_id, type='text', role='user')

    def get_new_exe_his(self, session_info_ID, beforerunning=False):
        if self.sessionID2exe_his.get(session_info_ID) is None:
            self.sessionID2exe_his[session_info_ID] = super().get_new_exe_his(session_info_ID, beforerunning)
        return self.sessionID2exe_his[session_info_ID]

    def execute(self, session_info: SessionInfo=None, exe_his: BaseExeHis=None, trigger=None, running_id=None):
        if session_info is None:
            return
            
        audio_data = None
        if 'cached-audio' in self.dict['in']:
            audio_form = self.dict['in']['cached-audio']
            prenode = self.graphNodeManager.nodesByIdDict[audio_form['nodeId']]
            audio_data = prenode.get_newest_output_in_session(session_info, audio_form['handle'])
        elif 'last-cached-audio' in self.dict['in']:
            audio_form = self.dict['in']['last-cached-audio']
            prenode = self.graphNodeManager.nodesByIdDict[audio_form['nodeId']]
            audio_data = prenode.get_newest_output_in_session(session_info, audio_form['handle'])

        if audio_data is None:
            return

        if audio_data is not None:
            # print("process audio", len(audio_data['data']))
            exe_his.update_content("Audio Processing...")
            # Process audio and check for results
            self.process_audio(session_info, audio_data['data'], trigger, running_id)
    
    def stop(self):
        # Stop all recognition instances
        for session_id in self.recognition_instances:
            self.recognition_instances[session_id].stop()
        for session_id in self.sessionID2exe_his:
            self.sessionID2exe_his[session_id].done()

        # Clear resources
        self.recognition_instances.clear()
        self.result_buffers.clear()
        self.background_tasks.clear()
        self.sessionID2exe_his.clear()


    def add_flow_next(self, node_id:str, **kwargs):
        """添加 flow_next_nodes。
        如果重写此方法，必须同时重写 get_all_flow_next_nodes。
        """
        if 'end' in kwargs['sourceHandle']:
            self.flow_next_nodes_sentence_end.append(node_id)
        else:
            self.flow_next_nodes.append(node_id)

    def get_all_flow_next_nodes(self): 
        """获取所有 flow_next_nodes。
        如果重写此方法，必须同时重写 add_flow_next。
        """
        all_flow_next_nodes = self.flow_next_nodes+ self.flow_next_nodes_sentence_end
        return all_flow_next_nodes