from flask import request
from flask_socketio import SocketIO, emit, join_room, leave_room
from utils.randomUtils import generate_random_by_time
import threading
import time
import numpy as np
from .baseGraphNode import BaseGraphNode
from .httpGraphNode import HttpGraphNode
from .sessionInfo import SessionInfo
import wave
import os
from datetime import datetime
import base64
from typing import Optional, Dict, Any, List
import json
import io

class HttpAudioInGraphNode(BaseGraphNode):
    
    def __init__(self, id:str, data:dict, graphNodeManager, random_key:str):
        super().__init__(id, data, graphNodeManager, random_key)
        self.route = data['data']['route']
        self.enWS = data['data']['enWS']
        self.enInterrupt = data['data']['enInterrupt']
        self.sampleRate = data['data']['sampleRate']
        self.channels = data['data']['channels']
        self.bitDepth = data['data']['bitDepth']
        self.pulseSeconds = data['data']['pulseSeconds']
        
        if self.graphNodeManager.http_node is not None:
            self.full_url = self.graphNodeManager.http_node.full_url
        else:
            self.graphNodeManager.http_node = HttpGraphNode(generate_random_by_time(), self.data, self.graphNodeManager, self.random_key)
            self.full_url = self.graphNodeManager.http_node.full_url
            
        self.running = False
        
        # Audio storage
        self.audio_history = {}  # session_id -> list of audio clips
        self.last_pulse_time = {}  # session_id -> last pulse time
        self.current_audio_buffer = {}  # session_id -> current audio buffer
        self.audio_timestamps = {}  # session_id -> list of timestamps
        self.last_access_time = {}  # session_id -> last time audio was accessed
        self.last_cached_audio = {}  # session_id -> list of audio clips since last access
        
        # Create audio storage directory if it doesn't exist
        self.audio_dir = os.path.join(os.getcwd(), 'audio_storage')
        if not os.path.exists(self.audio_dir):
            os.makedirs(self.audio_dir)

    def process_wav_data(self, wav_data: bytes) -> np.ndarray:
        """处理WAV格式的音频数据"""
        try:
            # 创建内存中的WAV文件
            wav_file = io.BytesIO(wav_data)
            
            # 使用wave模块读取WAV文件
            with wave.open(wav_file, 'rb') as wf:
                # 验证音频参数
                if wf.getnchannels() != self.channels:
                    print(f"Warning: Channel count mismatch. Expected {self.channels}, got {wf.getnchannels()}")
                if wf.getframerate() != self.sampleRate * 1000:
                    print(f"Warning: Sample rate mismatch. Expected {self.sampleRate * 1000}, got {wf.getframerate()}")
                if wf.getsampwidth() != self.bitDepth // 8:
                    print(f"Warning: Bit depth mismatch. Expected {self.bitDepth // 8}, got {wf.getsampwidth()}")
                
                # 读取音频数据
                audio_data = wf.readframes(wf.getnframes())
                
                # 转换为numpy数组
                audio_array = np.frombuffer(audio_data, dtype=np.int16)
                
                # 打印调试信息
                # print(f"WAV info: channels={wf.getnchannels()}, rate={wf.getframerate()}, width={wf.getsampwidth()}")
                # print(f"Audio array: samples={len(audio_array)}, dtype={audio_array.dtype}")
                # print(f"Audio range: min={audio_array.min()}, max={audio_array.max()}")
                
                return audio_array
                
        except Exception as e:
            print(f"Error processing WAV data: {str(e)}")
            raise

    def get_newest_output_in_session(self, session_info: SessionInfo, target_handle: str):
        """获取指定会话中最新的输出数据"""
        if session_info.id not in self.dict['out']:
            return None
        
        if target_handle == 'cached-audio':
            return self.dict['out'][session_info.id]['cached-audio'][-1]
        
        if target_handle == 'last-cached-audio':
            # 更新最后访问时间
            self.last_access_time[session_info.id] = time.time()
            returnCode = self.dict['out'][session_info.id]['last-cached-audio'][-1]
            self.dict['out'][session_info.id]['last-cached-audio'] = []
            return returnCode
            
        return super().get_newest_output_in_session(session_info, target_handle)

    def execute(self, session_info: SessionInfo=None):
        if not self.running:
            if self.full_url in self.graphNodeManager.project.onlineRoutes:
                self.graphNodeManager.project.onlineRoutes.pop(self.full_url)
            self.running = True
            
            socketio_instance = self.graphNodeManager.http_node.get_socketio_instance()
            
            def create_websocket_route(full_url):
                @socketio_instance.on('connect', namespace=full_url)
                def on_connect():
                    print(f"WebSocket connected on route: {full_url} User sid:{request.sid}")
                    emit('response', {'data': 'Connected to WebSocket!'})

                @socketio_instance.on(self.route, namespace=full_url)
                def handle_audio_data(data):
                    sid = request.sid
                    
                    try:
                        # Parse data
                        audio_data = data
                        if not isinstance(audio_data, dict) or 'type' not in audio_data or 'data' not in audio_data:
                            raise ValueError("Invalid audio data format")
                        
                        if audio_data['type'] != 'audio':
                            raise ValueError("Invalid data type")
                        
                        # Get or create session
                        sessionInfo = self.graphNodeManager.sid_to_sessionInfo.get(sid, None)
                        if sessionInfo is None:
                            sessionInfo = self.graphNodeManager.createSessionInfo(
                                generate_random_by_time(), 
                                socketio_instance, 
                                sid, 
                                self.route,
                                full_url
                            )
                            join_room(sid)
                        
                        # Initialize session storage if needed
                        if sessionInfo.id not in self.audio_history:
                            self.audio_history[sessionInfo.id] = []
                            self.last_pulse_time[sessionInfo.id] = time.time()
                            self.current_audio_buffer[sessionInfo.id] = []
                            self.audio_timestamps[sessionInfo.id] = []
                            self.last_access_time[sessionInfo.id] = time.time()
                        
                        # Decode base64 audio data
                        try:
                            # 处理 base64 字符串
                            base64_data = audio_data['data']
                            
                            # 检查是否是 Data URL 格式
                            if base64_data.startswith('data:'):
                                # 提取实际的 base64 数据部分
                                base64_data = base64_data.split('base64,')[1]
                            
                            # 移除可能的换行符和空格
                            base64_data = base64_data.replace('\n', '').replace(' ', '')
                            
                            # 检查字符串长度
                            data_length = len(base64_data)
                            if data_length % 4 != 0:
                                # 计算需要添加的填充字符数量
                                padding_needed = 4 - (data_length % 4)
                                # 添加填充字符
                                base64_data += '=' * padding_needed
                            
                            # 尝试解码
                            try:
                                wav_data = base64.b64decode(base64_data)
                            except Exception as decode_error:
                                print(f"Base64 decode error: {str(decode_error)}")
                                # 如果解码失败，尝试移除可能的 URL 安全字符
                                base64_data = base64_data.replace('-', '+').replace('_', '/')
                                wav_data = base64.b64decode(base64_data)
                            
                            # 验证解码后的数据
                            if len(wav_data) == 0:
                                raise ValueError("Decoded audio data is empty")
                            
                            # 打印原始数据信息
                            print(f"Raw WAV data size: {len(wav_data)} bytes")
                            
                            # 处理WAV数据
                            audio_array = self.process_wav_data(wav_data)
                            
                        except Exception as e:
                            print(f"Error decoding base64 audio data: {str(e)}")
                            emit('error', {'message': f"Error decoding audio data: {str(e)}"})
                            return
                        
                        # Store audio data and timestamps
                        self.current_audio_buffer[sessionInfo.id].extend(audio_array)
                        self.audio_timestamps[sessionInfo.id].append(audio_data['timestamp'])
                        
                        # Check if it's time to trigger flow_next
                        current_time = time.time()
                        if current_time - self.last_pulse_time[sessionInfo.id] >= self.pulseSeconds:
                            if self.current_audio_buffer[sessionInfo.id]:
                                try:
                                    # Save audio clip to file
                                    timestamp = datetime.now().strftime("%Y%m%d_%H%M%S")
                                    filename = f"audio_{sessionInfo.id}_{timestamp}.wav"
                                    filepath = os.path.join(self.audio_dir, filename)
                                    
                                    # Convert buffer to numpy array
                                    audio_buffer = np.array(self.current_audio_buffer[sessionInfo.id], dtype=np.int16)
                                    
                                    # Save as WAV file
                                    # with wave.open(filepath, 'wb') as wf:
                                    #     wf.setnchannels(self.channels)
                                    #     wf.setsampwidth(self.bitDepth // 8)
                                    #     wf.setframerate(self.sampleRate * 1000)
                                    #     wf.writeframes(audio_buffer.tobytes())
                                    
                                    # Add to history
                                    self.audio_history[sessionInfo.id].append({
                                        'timestamp': current_time,
                                        'filepath': filepath,
                                        'duration': len(audio_buffer) / (self.sampleRate * 1000)
                                    })
                                    
                                    # Clean up old history (keep last 600 seconds)
                                    while self.audio_history[sessionInfo.id] and \
                                          current_time - self.audio_history[sessionInfo.id][0]['timestamp'] > 600:
                                        old_audio = self.audio_history[sessionInfo.id].pop(0)
                                        try:
                                            os.remove(old_audio['filepath'])
                                        except:
                                            pass
                                    
                                    # Store in output
                                    if sessionInfo.id not in self.dict['out']:
                                        self.dict['out'][sessionInfo.id] = {}
                                    if 'cached-audio' not in self.dict['out'][sessionInfo.id]:
                                        self.dict['out'][sessionInfo.id]['cached-audio'] = []
                                    
                                    # Create audio clip data
                                    audio_clip = {
                                        'data': audio_buffer.tobytes(),
                                        'timestamps': self.audio_timestamps[sessionInfo.id],
                                        'filepath': filepath,
                                        'duration': len(audio_buffer) / (self.sampleRate * 1000),
                                        'timestamp': current_time
                                    }
                                    
                                    # 合并或添加音频片段
                                    cached_audio = self.dict['out'][sessionInfo.id]['cached-audio']
                                    if len(cached_audio)!=0:
                                        # 合并到最后一个片段
                                        last_clip = cached_audio[-1]
                                        # 合并音频数据
                                        last_audio = np.frombuffer(last_clip['data'], dtype=np.int16)
                                        new_audio = np.frombuffer(audio_clip['data'], dtype=np.int16)
                                        merged_audio = np.concatenate([last_audio, new_audio])
                                        last_clip['data'] = merged_audio.tobytes()
                                        # 合并时间戳
                                        last_clip['timestamps'].extend(audio_clip['timestamps'])
                                        # 更新持续时间
                                        last_clip['duration'] += audio_clip['duration']
                                        # 更新最后时间戳
                                        last_clip['timestamp'] = current_time
                                    else:
                                        # 如果是第一个片段，直接添加
                                        cached_audio.append(audio_clip)
                                    
                                    # 处理 last-cached-audio
                                    if 'last-cached-audio' not in self.dict['out'][sessionInfo.id]:
                                        self.dict['out'][sessionInfo.id]['last-cached-audio'] = []
                                    
                                    last_cached = self.dict['out'][sessionInfo.id]['last-cached-audio']
                                    if len(last_cached)!=0:
                                        # 合并到最后一个片段
                                        last_clip = last_cached[-1]
                                        # 合并音频数据
                                        last_audio = np.frombuffer(last_clip['data'], dtype=np.int16)
                                        new_audio = np.frombuffer(audio_clip['data'], dtype=np.int16)
                                        merged_audio = np.concatenate([last_audio, new_audio])
                                        last_clip['data'] = merged_audio.tobytes()
                                        # 合并时间戳
                                        last_clip['timestamps'].extend(audio_clip['timestamps'])
                                        # 更新持续时间
                                        last_clip['duration'] += audio_clip['duration']
                                        # 更新最后时间戳
                                        last_clip['timestamp'] = current_time
                                    else:
                                        # 如果是第一个片段，直接添加
                                        last_cached.append(audio_clip)
                                    
                                    # Clear current buffer
                                    self.current_audio_buffer[sessionInfo.id] = []
                                    self.audio_timestamps[sessionInfo.id] = []
                                    
                                    # Update last pulse time
                                    self.last_pulse_time[sessionInfo.id] = current_time
                                    
                                    # Trigger flow_next
                                    self.executed_num += 1
                                    self.flow_next(sessionInfo)
                                    
                                except Exception as e:
                                    print(f"Error saving audio file: {str(e)}")
                                    emit('error', {'message': f"Error saving audio file: {str(e)}"})
                    
                    except Exception as e:
                        print(f"Error processing audio data: {str(e)}")
                        emit('error', {'message': str(e)})
                
                @socketio_instance.on('disconnect', namespace=full_url)
                def on_disconnect():
                    sid = request.sid
                    print(f"WebSocket disconnected on route: {full_url} User sid: {sid}")
                    leave_room(sid)
                    
                    # Clean up session data
                    sessionInfo = self.graphNodeManager.sid_to_sessionInfo.get(sid, None)
                    if sessionInfo and sessionInfo.id in self.audio_history:
                        # Remove audio files
                        for audio_clip in self.audio_history[sessionInfo.id]:
                            try:
                                os.remove(audio_clip['filepath'])
                            except:
                                pass
                        # Clear session data
                        del self.audio_history[sessionInfo.id]
                        del self.last_pulse_time[sessionInfo.id]
                        del self.current_audio_buffer[sessionInfo.id]
                        del self.audio_timestamps[sessionInfo.id]
                        del self.last_access_time[sessionInfo.id]
                        del self.last_cached_audio[sessionInfo.id]

            
            threading.Thread(target=create_websocket_route, args=(self.full_url,)).start()
            self.graphNodeManager.project.addOnlineRoute(self.route,'audio', self.name)
            self.graphNodeManager.project.is_websocket_running = True
            print(f'WebSocket server started and listening on route: {self.full_url}')

    def stop(self):
        if not self.running:
            return
            
        self.running = False
        self.graphNodeManager.project.removeOnlineRoute(self.route)
        
        # Clean up all audio files
        for session_id in self.audio_history:
            for audio_clip in self.audio_history[session_id]:
                try:
                    os.remove(audio_clip['filepath'])
                except:
                    pass
        self.audio_history.clear()
        self.last_pulse_time.clear()
        self.current_audio_buffer.clear()
        self.audio_timestamps.clear()
        self.last_access_time.clear()
        self.last_cached_audio.clear() 