import asyncio
from re import M
import websockets
import json
import base64
import wave
import io
import threading
import time
import logging
from typing import Dict, List, Optional, Any
from dataclasses import dataclass
import os
import numpy as np
from io import BytesIO
import array
from pydub import AudioSegment
import math
import asr_inference
from asr_inference import ASRInferenceBlock,SimpleASRInferenceBlock
import copy
import struct
import soundfile

# 配置日志
logging.basicConfig(level=logging.INFO)
logger = logging.getLogger(__name__)

@dataclass
class CSndSegment:
    """音频片段类，符合SysML BDD中的CSndSegment定义"""
    m_duration: int  # 音频时长（秒）
    m_segmentTimeLimit: int  # 片段时间限制（秒）
    m_sndBytes: bytes  # 音频数据字节
    
    def __post_init__(self):
        # 符合CSndSegmentConstraintBlock约束：m_duration <= 20
        if self.m_duration > 20:
            logger.warning(f"音频片段时长 {self.m_duration}s 超过20秒限制")

def create_wav_header(sample_rate, num_channels, bits_per_sample, num_samples):
    """手动创建WAV文件头部"""
    # 计算数据大小
    data_size = num_samples * num_channels * (bits_per_sample // 8)
    
    # WAV文件头部结构
    header = bytearray()
    
    # RIFF头
    header.extend(b'RIFF')
    header.extend(struct.pack('<I', 36 + data_size))  # 文件总大小-8
    header.extend(b'WAVE')
    
    # fmt子块
    header.extend(b'fmt ')
    header.extend(struct.pack('<I', 16))  # fmt块大小
    header.extend(struct.pack('<H', 1))   # 音频格式（1=PCM）
    header.extend(struct.pack('<H', num_channels))
    header.extend(struct.pack('<I', sample_rate))
    header.extend(struct.pack('<I', sample_rate * num_channels * (bits_per_sample // 8)))  # 字节率
    header.extend(struct.pack('<H', num_channels * (bits_per_sample // 8)))  # 块对齐
    header.extend(struct.pack('<H', bits_per_sample))
    
    # data子块
    header.extend(b'data')
    header.extend(struct.pack('<I', data_size))
    
    return header

class AudioParams:
    """音频参数类，符合SysML BDD中的AudioParams定义"""
    def __init__(self) -> None:
        self.m_sampleRate: int = 16000
        self.m_bitDepth: int = 16
        self.m_channelCnt: int = 1


class RTASRServer:
    """实时ASR服务器，符合SysML BDD中的RTASRServer定义"""
    #  OpenAI-Whisper要求使用以下音频规格：
    #     channelCount: 1,
    #     bitDepth: 16,
    #     sampleRate: 16000,
    #     bitrate: 256000
    #

    def __init__(self, port: int = 8888):
        self.m_port = port
        self.m_sndSegQueue: List[CSndSegment] = []  # 音频片段队列
        self.m_audioParams: AudioParams = AudioParams()
        self.asr_inference: ASRInferenceBlock = None
        self.active_connections: Dict[str, Any] = {}
        self.recognition_tasks: Dict[str, asyncio.Task[Any]] = {}

    def setASRInferencer(self, asr_inference: ASRInferenceBlock):
        self.asr_inference = asr_inference

    async def start_server(self):
        """启动WebSocket服务器"""

        if(None == self.asr_inference):
            print("启动失败：ASR模型未加载！")

        logger.info(f"启动实时ASR服务器，监听端口: {self.m_port}")
        
        async def handle_client(websocket):
            client_id = f"{websocket.remote_address[0]}:{websocket.remote_address[1]}"
            self.active_connections[client_id] = websocket
            logger.info(f"客户端连接: {client_id}")
            
            try:
                await self.handle_realtime_asr(websocket, client_id)
            except websockets.exceptions.ConnectionClosed:
                logger.info(f"客户端断开连接: {client_id}")
            except Exception as e:
                logger.error(f"处理客户端 {client_id} 时发生错误: {e}")
            finally:
                # 清理连接和任务
                if client_id in self.active_connections:
                    del self.active_connections[client_id]
                if client_id in self.recognition_tasks:
                    self.recognition_tasks[client_id].cancel()
                    del self.recognition_tasks[client_id]
        
        async with websockets.serve(handle_client, "localhost", self.m_port):
            # 保持服务器运行
            await asyncio.Future()
        
    async def handle_realtime_asr(self, websocket, client_id: str):
        """处理实时ASR请求"""
        audio_buffer = bytearray()
        last_recognition_time = 0
        print("Handling realtime_asr!")
        
        async for message in websocket:
            # print("-----dir message----")
            # dir(message)
            # print("--------------------")
            try:
                data = json.loads(message)
                print("JSON load OK!")
                
                if data.get("type") == "set_audio_params":
                    # 设置音频参数
                    print("set_audio_params:") # OK
                    print("type:",data.get("type")) # OK
                    print("sample_rate:",data.get("sample_rate"))
                    print("bit_depth:",data["bit_depth"])
                    print("channel_cnt:",data["channel_cnt"]) #OK

                    self.m_audioParams.m_sampleRate = data["sample_rate"]
                    self.m_audioParams.m_bitDepth = data["bit_depth"]
                    self.m_audioParams.m_channelCnt = data["channel_cnt"]

                    await websocket.send(json.dumps({
                        "type": "set_audio_params_complete",
                        "message": "音频参数设置成功"
                    }))


                if data.get("type") == "audio_chunk":
                    # 接收音频数据
                    print("type:AudioChunk")
                    #print(data["audio"]) # OK. Disable this line after check since the output is too long!
                    audio_data = base64.b64decode(data["audio"])
                    print("len(audio_data):",len(audio_data))
                    audio_buffer.extend(audio_data)
                    current_time = time.time()
                    # 每6秒进行一次识别
                    print("current_time - last_recognition_time:",current_time - last_recognition_time)
                    if current_time - last_recognition_time >= 6.0:

                        if len(audio_buffer) > 0:
                            # 创建音频片段
                            segment = CSndSegment(
                                m_duration=6,  # 6秒片段
                                m_segmentTimeLimit=20,  # 最大20秒限制
                                m_sndBytes=bytes(audio_buffer)
                            )

                            # 启动异步识别任务
                            if client_id in self.recognition_tasks:
                                self.recognition_tasks[client_id].cancel()
                            
                            self.recognition_tasks[client_id] = asyncio.create_task(
                                self.process_realtime_segment(websocket, segment)
                            )
                            
                            last_recognition_time = current_time
                            audio_buffer.clear()
                
                elif data.get("type") == "end_audio":
                    # 处理剩余音频
                    if len(audio_buffer) > 0:
                        segment = CSndSegment(
                            m_duration=len(audio_buffer) // 128000, #16000,  # 假设16kHz采样率
                            m_segmentTimeLimit=20,
                            m_sndBytes=bytes(audio_buffer)
                        )
                        
                        await self.process_realtime_segment(websocket, segment)
                        audio_buffer.clear()
                    
                    await websocket.send(json.dumps({
                        "type": "recognition_complete",
                        "message": "实时识别完成"
                    }))
                    
            except json.JSONDecodeError:
                await websocket.send(json.dumps({
                    "type": "error",
                    "message": "无效的JSON格式"
                }))
            except Exception as e:
                logger.error(f"处理实时ASR时发生错误: {e} | data_type:"+data.get("type"))
                await websocket.send(json.dumps({
                    "type": "error",
                    "message": str(e),
                    "dataValue": data
                }))
    
    def detect_sampleRate_from_bytes(self , audio_bytes):
        """
        从 bytes 中读取 WAV 音频数据，自动检测位深度
        """
        try:
            # 创建 BytesIO 对象
            bytes_io = BytesIO(audio_bytes)
            
            # 使用 soundfile 读取
            data, sample_rate = soundfile.read(bytes_io)
            
            # 获取文件信息
            bytes_io.seek(0)  # 重置指针
            with sf.SoundFile(bytes_io) as snd_file:
                subtype = snd_file.subtype
                channels = snd_file.channels
                duration = len(data) / sample_rate
            
            print(f"采样率: {sample_rate} Hz")
            print(f"位深度: {subtype}")
            print(f"声道数: {channels}")
            print(f"时长: {duration:.2f} 秒")
            print(f"数据形状: {data.shape}")
            
            #return data, sample_rate, subtype, channels
            return sample_rate
            
        except Exception as e:
            print(f"读取音频数据时出错: {e}")
            return None, None, None, None


    def save_audio_with_array(self,audio_data, sample_rate=44100, filename='output.wav'):
        """
        使用 Python 内置的 array 模块保存音频
        """
        try:
            # 确保数据是合适的格式
            if isinstance(audio_data, list):
                # 如果是列表，转换为 array
                audio_array = array.array('h', audio_data)  # 'h' 表示 signed short (16-bit)
            elif isinstance(audio_data, array.array):
                audio_array = audio_data
            else:
                raise ValueError("不支持的音频数据格式")
            
            # 使用 soundfile 写入
            soundfile.write(filename, audio_array, sample_rate, subtype='PCM_16')
            print(f"音频已保存到: {filename}")
            
        except Exception as e:
            print(f"保存音频时出错: {e}")


    def write_wav(self,filename, sample_rate, data, bits_per_sample=16):
        """
        写入 WAV 文件（不支持浮点数，仅支持整数 PCM）
        :param filename: 输出文件名
        :param sample_rate: 采样率（Hz）
        :param data: 音频数据（一维列表或数组，整数）
        :param bits_per_sample: 每采样位数（默认16）
        """
        # 确定声道数（假设数据是一维的，单声道）
        num_channels = 1
        # 每样本字节数
        bytes_per_sample = bits_per_sample // 8
        # 数据总字节数
        data_size = len(data) * bytes_per_sample
        # 块对齐（帧大小）
        block_align = num_channels * bytes_per_sample
        # 字节率
        byte_rate = sample_rate * block_align

        # 打开文件写入二进制数据
        with open(filename, 'wb') as f:
            # 1. 写入 RIFF 头
            f.write(b'RIFF')
            # 文件总大小 - 8（RIFF块大小 = 4 + (子块大小) + ...），包括所有子块
            file_size = 36 + data_size  # 36 是 fmt 块的大小（固定）加上其他头字段
            f.write(struct.pack('<I', file_size))
            f.write(b'WAVE')

            # 2. 写入 fmt 子块
            f.write(b'fmt ')
            fmt_size = 16  # PCM 格式的 fmt 块大小固定为 16
            f.write(struct.pack('<I', fmt_size))
            audio_format = 1  # 1 表示 PCM
            f.write(struct.pack('<H', audio_format))
            f.write(struct.pack('<H', num_channels))
            f.write(struct.pack('<I', sample_rate))
            f.write(struct.pack('<I', byte_rate))
            f.write(struct.pack('<H', block_align))
            f.write(struct.pack('<H', bits_per_sample))

            # 3. 写入 data 子块
            f.write(b'data')
            f.write(struct.pack('<I', data_size))

            #方案1： 将每个采样点按小端序打包
            # if bits_per_sample == 16:
            #     for sample in data:
            #         # 限制采样值在16位有符号整数范围内
            #         sample = max(min(sample, 32767), -32768)
            #         f.write(struct.pack('<h', sample))
            # elif bits_per_sample == 8:
            #     # 8位是无符号的，需要转换
            #     for sample in data:
            #         sample = max(min(sample, 255), 0)
            #         f.write(struct.pack('<B', sample))
            # else:
            #     raise ValueError("仅支持 8 或 16 位采样")

            #方案2：写入整个data：
            f.write(data)


    async def process_realtime_segment(self, websocket, segment: CSndSegment):
        """处理实时音频片段"""
        #print("audioParams: SampleRate:"+str(self.m_audioParams.m_sampleRate))
        print("vars(self.m_audioParams):",vars(self.m_audioParams)) #OK

        try:
            if(not os.path.exists("./serverCache")):
                os.mkdir("./serverCache")

        except Exception as e:
            logger.error(f"创建serverCache失败: {e}")
            await websocket.send(json.dumps({
                "type": "error",
                "message": f"ASR服务器错误: {str(e)}"
            }))
            return


        segInstance: CSndSegment = copy.copy(segment)
        try:

            # Save to serverCache:


            tag = 1
            print("OK1")
            filePath = f"./serverCache/rt{tag:04d}.wav"
            while(os.path.exists(filePath)):
                tag+=1
                filePath = f"./serverCache/rt{tag:04d}.wav"
            print("The tag is now:",tag)
            print("filePath:",filePath) #OK
            print("OK2")

            audio_data = None
            if(self.m_audioParams.m_bitDepth <= 16):
                # print("using int16")
                # audio_data = np.frombuffer(segment.m_sndBytes, dtype=np.int16)
                # 生成简单的音频数据（正弦波）
                sample_rate = 44100
                duration = 2  # 2秒
                num_samples = sample_rate * duration
                
                # 使用 array 生成音频数据
                audio_array = array.array('h')
                for i in range(num_samples):
                    # 生成 440Hz 正弦波
                    sample = int(32767 * 0.5 * math.sin(2 * math.pi * 440 * i / sample_rate))
                    audio_array.append(sample)
                

            else:
                print("What?! BitDepth>16!!")
                audio_data = np.frombuffer(segment.m_sndBytes, dtype=np.float32)

            # if self.m_audioParams.m_channelCnt > 1:
            #     audio_data = audio_data.reshape(-1, self.m_audioParams.m_channelCnt)

            #方案1：soundfile+numpy数组写入文件：
            #soundfile.write(filePath, audio_data, samplerate=self.m_audioParams.m_sampleRate, format='WAV', subtype='PCM_16')

            #方案2：soundfile+array数组写入文件：
            # tmpSampleRate = self.detect_sampleRate_from_bytes(audio_data)
            # self.save_audio_with_array(segment.m_sndBytes, self.m_audioParams.m_sampleRate, filePath)

            #方案3：不适用soundfile二进制写入文件：
            #self.write_wav(filePath, self.m_audioParams.m_sampleRate, segment.m_sndBytes, bits_per_sample=self.m_audioParams.m_bitDepth)

            #(OK)方案4：使用pydub.AudioSegment：
            noheadFilePath = os.path.join(os.path.dirname(filePath) , "nohead_"+os.path.basename(filePath)+".webm")
            with open(noheadFilePath, 'wb') as wav_file:
                wav_file.write(segment.m_sndBytes)
                audio = AudioSegment.from_file(noheadFilePath)
                audio = audio.set_frame_rate(self.m_audioParams.m_sampleRate)
                audio = audio.set_channels(self.m_audioParams.m_channelCnt)
                audio = audio.set_sample_width(self.m_audioParams.m_bitDepth // 8) # e.g., 16 bits -> 2 bytes
                audio.export(filePath, format="wav")


            print(f"文件保存到服务器完成",filePath)

            #识别前发送  告知已收到数据：
            await websocket.send(json.dumps({
                "type": "audio_chunk_received",
                "message": "音频流上传成功"
            }))

            print("音频流上传成功")


            # 执行ASR推理
            #result = await self.asr_inference.inference(segInstance.m_sndBytes)
            result = await self.asr_inference.inferenceFile(filePath)

            # 发送识别结果
            await websocket.send(json.dumps({
                "type": "recognition_result",
                "text": result,
                "duration": segment.m_duration,
                "timestamp": time.time()
            }))
            
        except Exception as e:
            logger.error(f"处理音频片段时发生错误: {e}")
            await websocket.send(json.dumps({
                "type": "error",
                "message": f"识别失败: {str(e)}"
            }))

class FileASRServer:
    """文件ASR服务器，符合SysML BDD中的FileASRServer定义"""
    
    def __init__(self, port: int = 8888):
        self.m_port = port
        self.asr_inference: ASRInferenceBlock = None

    def setASRInferencer(self, asr_inference: ASRInferenceBlock):
        self.asr_inference = asr_inference

    async def start_server(self):
        """启动WebSocket服务器"""

        if(None == self.asr_inference):
            print("启动失败：ASR模型未加载！")

        logger.info(f"启动文件ASR服务器，监听端口: {self.m_port}")
        
        async def handle_client(websocket):
            client_id = f"{websocket.remote_address[0]}:{websocket.remote_address[1]}"
            logger.info(f"客户端连接: {client_id}")
            
            try:
                await self.handle_file_asr(websocket, client_id)
            except websockets.exceptions.ConnectionClosed:
                logger.info(f"客户端断开连接: {client_id}")
            except Exception as e:
                logger.error(f"处理客户端 {client_id} 时发生错误: {e}")
        
        async with websockets.serve(handle_client, "localhost", self.m_port):
            # 保持服务器运行
            await asyncio.Future()
        
    async def handle_file_asr(self, websocket, client_id: str):
        """处理文件ASR请求"""
        async for message in websocket:
            try:
                data = json.loads(message)
                
                if data.get("type") == "file_upload":
                    # 接收文件数据
                    file_data = base64.b64decode(data["file_data"])
                    tmpTimeStamp = time.time()
                    filename = data.get("filename", f"unknownfile{tmpTimeStamp:.4f}.wav")

                    print(f"接收到文件: {filename}, 大小: {len(file_data)} 字节")
                    logger.info(f"接收到文件: {filename}, 大小: {len(file_data)} 字节")
                    
                    #发送file_upload_complete告知上传成功
                    await websocket.send(json.dumps({
                        "type": "file_upload_complete",
                        "filename": filename,
                        "timestamp": time.time()
                    }))


                    # 处理音频文件
                    result = await self.process_audio_file(file_data, filename)
                    
                    # 发送识别结果
                    await websocket.send(json.dumps({
                        "type": "file_recognition_result",
                        "filename": filename,
                        "text": result,
                        "timestamp": time.time()
                    }))
                    
            except json.JSONDecodeError:
                await websocket.send(json.dumps({
                    "type": "error",
                    "message": "无效的JSON格式"
                }))
            except Exception as e:
                logger.error(f"处理文件ASR时发生错误: {e}")
                await websocket.send(json.dumps({
                    "type": "error",
                    "message": str(e)
                }))
    
    async def process_audio_file(self, file_data: bytes, filename: str) -> str:
        """处理音频文件"""

        try:
            if(not os.path.exists("./serverCache")):
                os.mkdir("./serverCache")

        except Exception as e:
            logger.error(f"创建serverCache失败: {e}")
            await websocket.send(json.dumps({
                "type": "error",
                "message": f"ASR服务器错误: {str(e)}"
            }))
            return

        try:
            # 创建音频片段（这里简化处理，实际应该根据文件格式解析）
            # segment = CSndSegment(
            #     m_duration=len(file_data) // 16000,  # 估算时长
            #     m_segmentTimeLimit=20,
            #     m_sndBytes=file_data
            # )

            cacheFileName = f"./serverCache/{filename}"
            print("Processing audio file:",cacheFileName)

            with open(cacheFileName, 'wb') as file:
                file.write(file_data) #segment.m_sndBytes)
            print(f"文件保存到服务器完成",cacheFileName)

            # 执行ASR推理
#            result = await self.asr_inference.inference(file_data)#segment.m_sndBytes)
            result = await self.asr_inference.inferenceFile(cacheFileName) #segment.m_sndBytes)
            return result

        except Exception as e:
            logger.error(f"处理音频文件 {filename} 时发生错误: {e}")
            raise

class ASRServerManager:
    """ASR服务器管理器，统一管理实时和文件ASR服务"""

    def __init__(self, port: int = 8888):
        self.port = port
        self.rt_server = RTASRServer(port)
        self.file_server = FileASRServer(port)
        self.asr_inference:ASRInferenceBlock = SimpleASRInferenceBlock()
        self.rt_server.setASRInferencer(self.asr_inference)
        self.file_server.setASRInferencer(self.asr_inference)
        
    async def start_unified_server(self):
        """启动统一的ASR服务器"""
        await self.asr_inference.initModel("./models")

        logger.info(f"启动统一ASR服务器，监听端口: {self.port}")
        
        async def handle_client(websocket):
            client_id = f"{websocket.remote_address[0]}:{websocket.remote_address[1]}"
            logger.info(f"客户端连接: {client_id}")
            
            try:
                # 等待客户端发送模式选择消息
                async for message in websocket:
                    data = json.loads(message)
                    
                    if data.get("type") == "select_mode":
                        mode = data.get("mode")
                        
                        if mode == "realtime":
                            logger.info(f"客户端 {client_id} 选择实时识别模式")
                            await websocket.send(json.dumps({
                                "type": "mode_selected",
                                "mode": "realtime",
                                "message": "已切换到实时识别模式"
                            }))
                            await self.rt_server.handle_realtime_asr(websocket, client_id)
                            
                        elif mode == "file":
                            logger.info(f"客户端 {client_id} 选择文件识别模式")
                            await websocket.send(json.dumps({
                                "type": "mode_selected",
                                "mode": "file",
                                "message": "已切换到文件识别模式"
                            }))
                            await self.file_server.handle_file_asr(websocket, client_id)
                            
                        else:
                            await websocket.send(json.dumps({
                                "type": "error",
                                "message": "无效的模式选择，请选择 'realtime' 或 'file'"
                            }))
                        break
                    else:
                        await websocket.send(json.dumps({
                            "type": "error",
                            "message": "请先发送模式选择消息"
                        }))

            except websockets.exceptions.ConnectionClosed:
                logger.info(f"客户端断开连接: {client_id}")
            except Exception as e:
                logger.error(f"处理客户端 {client_id} 时发生错误: {e}")
        
        async with websockets.serve(handle_client, "localhost", self.port):
            # 保持服务器运行
            await asyncio.Future()

async def main():
    """主函数"""
    server_manager = ASRServerManager(8888)
    
    logger.info("ASR服务器启动中...")
    await server_manager.start_unified_server()

if __name__ == "__main__":
    try:
        asyncio.run(main())
    except KeyboardInterrupt:
        logger.info("服务器已停止")