from flask import request
from flask_socketio import SocketIO, emit, join_room, leave_room
from utils.randomUtils import generate_random_by_time
import threading
import time
import numpy as np
from .baseGraphNode import  *
from .httpGraphNode import HttpGraphNode
from ..execute.baseExeHis import BaseExeHis
from .sessionInfo import SessionInfo
import wave
import os
from datetime import datetime
import base64
from typing import Optional, Dict, Any, List
import json
import io

class HttpAudioInGraphNode(BaseGraphNode):
    
    def __init__(self, id:str, data:dict, graphNodeManager, random_key:str):
        super().__init__(id, data, graphNodeManager, random_key)
        self.route = data['data']['route']
        self.enWS = data['data'].get('enWS', True)
        self.enInterrupt = data['data'].get('enInterrupt', True)
        self.sampleRate = data['data'].get('sampleRate', 16) #data['data']['sampleRate']
        self.channels = data['data'].get('channels', 1)
        self.bitDepth = data['data'].get('bitDepth', 16)
        self.pulseSeconds = data['data'].get('pulseSeconds', 1)
        
        if self.graphNodeManager.http_node is not None:
            self.full_url = self.graphNodeManager.http_node.full_url
        else:
            self.graphNodeManager.http_node = HttpGraphNode(generate_random_by_time(), self.data, self.graphNodeManager, self.random_key)
            self.full_url = self.graphNodeManager.http_node.full_url
            
        self.running = False
        
        # Audio storage
        self.audio_history = {}  # session_id -> list of audio clips
        self.last_pulse_time = {}  # session_id -> last pulse time
        self.current_audio_buffer = {}  # session_id -> current audio buffer
        self.audio_timestamps = {}  # session_id -> list of timestamps
        self.last_cached_audio = {}  # session_id -> list of audio clips since last access
        
        # Create audio storage directory if it doesn't exist
        self.audio_dir = os.path.join(os.getcwd(), 'audio_storage')
        if not os.path.exists(self.audio_dir):
            os.makedirs(self.audio_dir)
            
        # Add lock for last-cached-audio operations
        self.last_cached_audio_lock = threading.Lock()

    def process_wav_data(self, wav_data: bytes) -> np.ndarray:
        """处理WAV格式的音频数据"""
        try:
            # 创建内存中的WAV文件
            wav_file = io.BytesIO(wav_data)
            
            # 使用wave模块读取WAV文件
            with wave.open(wav_file, 'rb') as wf:
                # 验证音频参数
                if wf.getnchannels() != self.channels:
                    print(f"Warning: Channel count mismatch. Expected {self.channels}, got {wf.getnchannels()}")
                if wf.getframerate() != self.sampleRate * 1000:
                    print(f"Warning: Sample rate mismatch. Expected {self.sampleRate * 1000}, got {wf.getframerate()}")
                if wf.getsampwidth() != self.bitDepth // 8:
                    print(f"Warning: Bit depth mismatch. Expected {self.bitDepth // 8}, got {wf.getsampwidth()}")
                
                # 读取音频数据
                audio_data = wf.readframes(wf.getnframes())
                
                # 转换为numpy数组
                audio_array = np.frombuffer(audio_data, dtype=np.int16)
                
                return audio_array
                
        except Exception as e:
            print(f"Error processing WAV data: {str(e)}")
            raise

    def get_newest_output_in_session(self, session_info: SessionInfo, target_handle: str):
        """获取指定会话中最新的输出数据"""
        with session_info.out_write_lock:
            if session_info.out.get(self.id,None) is None:
                return None
            if session_info.out[self.id].get(target_handle,None) is None:
                return None
        
        if target_handle == 'cached-audio':
            return session_info.out[self.id]['cached-audio'][-1]
        
        if target_handle == 'last-cached-audio':
            # print(f'get    last-cached-audio {len(session_info.out[self.id]["last-cached-audio"])}')
            with self.last_cached_audio_lock:
                audio_clips = session_info.out[self.id]['last-cached-audio']
                
                if audio_clips:
                    # 合并所有音频数据
                    all_audio_data = []
                    all_timestamps = []
                    total_duration = 0
                    
                    for clip in audio_clips:
                        # 将字节数据转换为numpy数组
                        audio_data = np.frombuffer(clip['data'], dtype=np.int16)
                        all_audio_data.append(audio_data)
                        all_timestamps.extend(clip['timestamps'])
                        total_duration += clip['duration']
                    
                    # 使用numpy的concatenate合并所有音频数据
                    concatenated_array = np.concatenate(all_audio_data)
                    # print(f'concatenated_array.shape: {concatenated_array.shape}')
                    # 创建合并后的音频数据
                    concatenated_audio = {
                        'data': concatenated_array.tobytes(),
                        'timestamps': all_timestamps,
                        'duration': total_duration,
                        'timestamp': time.time()
                    }
                    

                    
                    # 清空缓存
                    session_info.out[self.id]['last-cached-audio'] = []
                    
                    return concatenated_audio
                
                return None
            
        return super().get_newest_output_in_session(session_info, target_handle)

    def execute(self, session_info: SessionInfo=None, exe_his: BaseExeHis=None, trigger=None, running_id=None):
        if not self.running:
            if self.full_url in self.graphNodeManager.project.onlineRoutes:
                self.graphNodeManager.project.onlineRoutes.pop(self.full_url)
            self.running = True
            
            socketio_instance = self.graphNodeManager.http_node.get_socketio_instance()
            full_url = self.full_url

            @socketio_instance.on(self.route, namespace=full_url)
            def handle_audio_data(data):
                try:
                    # Parse data
                    audio_data = data
                    if not isinstance(audio_data, dict) or 'type' not in audio_data or 'data' not in audio_data:
                        raise ValueError("Invalid audio data format")
                    
                    if audio_data['type'] != 'audio':
                        raise ValueError("Invalid data type")
                    id = audio_data.get('id', None)
                    # Get or create session
                    sessionInfo = self.graphNodeManager.get_session_info(id, socketio_instance, request.sid, full_url)
                    # Initialize session storage if needed
                    if sessionInfo.id not in self.audio_history:
                        self.audio_history[sessionInfo.id] = []
                        self.last_pulse_time[sessionInfo.id] = time.time()
                        self.current_audio_buffer[sessionInfo.id] = []
                        self.audio_timestamps[sessionInfo.id] = []
                    
                    try:
                        # Get raw PCM data
                        pcm_data = audio_data['data']
                        sample_rate = audio_data.get('sampleRate', 16000)  # Default to 16kHz
                        timestamp = audio_data.get('timestamp', time.time() * 1000)
                        
                        # Handle different data formats
                        if isinstance(pcm_data, str):
                            # If data is a string, it might be base64 encoded
                            try:
                                # Try to decode as base64
                                pcm_data = base64.b64decode(pcm_data)
                                # Convert bytes to float32 array
                                pcm_data = np.frombuffer(pcm_data, dtype=np.float32)
                            except:
                                # If not base64, try to parse as JSON array
                                try:
                                    pcm_data = np.array(json.loads(pcm_data), dtype=np.float32)
                                except:
                                    raise ValueError("Could not parse audio data")
                        elif isinstance(pcm_data, list):
                            # If data is a list, convert to numpy array
                            pcm_data = np.array(pcm_data, dtype=np.float32)
                        elif isinstance(pcm_data, bytes):
                            # If data is bytes, convert to float32 array
                            pcm_data = np.frombuffer(pcm_data, dtype=np.float32)
                        elif not isinstance(pcm_data, np.ndarray):
                            raise ValueError(f"Unsupported data type: {type(pcm_data)}")
                        
                        # Ensure data is float32
                        if pcm_data.dtype != np.float32:
                            pcm_data = pcm_data.astype(np.float32)
                        
                        # Convert float32 to int16
                        pcm_data = (pcm_data * 32767).astype(np.int16)

                        self.current_audio_buffer[sessionInfo.id].append(pcm_data)
                        self.audio_timestamps[sessionInfo.id].append(timestamp)
                        
                        # Check if we need to process the buffer
                        current_time = time.time()
                        time_since_last_pulse = current_time - self.last_pulse_time[sessionInfo.id]
                        
                        self._process_audio_buffer(pcm_data, sessionInfo, sample_rate, current_time)
                                
                        # 只有当时间间隔达到pulseSeconds或者缓冲区累积了足够的音频数据时才处理
                        if time_since_last_pulse >= self.pulseSeconds:
                            self.last_pulse_time[sessionInfo.id] = current_time
                            self.flow_next(sessionInfo,trigger=trigger, running_id=running_id, type='text', role='user')
                        
                    except Exception as e:
                        print(f"Error processing audio data: {str(e)}")
                        print(f"Audio data type: {type(audio_data['data'])}")
                        if isinstance(audio_data['data'], str):
                            print(f"First 100 chars of data: {audio_data['data'][:100]}")
                        emit('error', {'message': str(e)})
                
                except Exception as e:
                    print(f"Error processing audio data: {str(e)}")
                    emit('error', {'message': str(e)})
            
            self.graphNodeManager.project.addOnlineRoute(self.route,'audio', self.name)
            self.graphNodeManager.project.is_websocket_running = True
            print(f'WebSocket server started and listening on route: {self.full_url}')

    def stop(self):
        if not self.running:
            return
            
        self.running = False
        self.graphNodeManager.project.removeOnlineRoute(self.route)
        
        # Clean up all audio files
        for session_id in self.audio_history:
            for audio_clip in self.audio_history[session_id]:
                try:
                    os.remove(audio_clip['filepath'])
                except:
                    pass
        self.audio_history.clear()
        self.last_pulse_time.clear()
        self.current_audio_buffer.clear()
        self.audio_timestamps.clear()
        self.last_cached_audio.clear() 

    def _process_audio_buffer(self, audio_buffer, session_info, sample_rate, current_time):
        """Helper method to process audio buffer and store results"""
        
        # Create audio clip data
        audio_clip = {
            'data': audio_buffer.tobytes(),
            'timestamps': self.audio_timestamps[session_info.id],
            'duration': len(audio_buffer) / sample_rate,
            'timestamp': current_time
        }
        with session_info.out_write_lock:
            if session_info.out.get(self.id,None) is None:
                session_info.out[self.id] = {}
            if 'cached-audio' not in session_info.out[self.id]:
                session_info.out[self.id]['cached-audio'] = []
            if 'last-cached-audio' not in session_info.out[self.id]:
                session_info.out[self.id]['last-cached-audio'] = []
            session_info.out[self.id]['cached-audio'].append(audio_clip)
            with self.last_cached_audio_lock:
                session_info.out[self.id]['last-cached-audio'].append(audio_clip) 