from fastapi import WebSocket, WebSocketDisconnect
from typing import Dict
import uuid
import wave
import json
import asyncio
from app.services.emotion_analysis import identify_user_emotions
from app.utils.file_handler import save_audio_file
from app.models.client import clients, send_message
from app.services.streaming_asr_demo import audio2text
import logging
import base64

async def websocket_endpoint(websocket: WebSocket):
    await websocket.accept()  # 接受WebSocket连接

    client_id = None
    try:
        # 接收到的第一个消息是唯一ID
        client_id_message = await websocket.receive()
        logging.info(f"输出接收到的原始消息:{client_id_message}")  # 输出接收到的原始消息
        
        # 解析接收到的消息
        client_id_data = json.loads(client_id_message.get('text'))
        logging.info(client_id_data)  # 输出解析后的JSON对象

        if (client_id_data.get('type') == 'device_id'):
            client_id = client_id_data.get('data')

        clients[client_id] = websocket  # 将客户端添加到字典中

        logging.info(f"Client connected: {client_id}")  # 打印客户端连接信息

        audio_data = bytearray()  # 用于存储接收到的音频数据
    except Exception as e:
        logging.error(f"Unexpected error: {e}")
    try:
        while True:
            try:
                message = await asyncio.wait_for(websocket.receive(), timeout=30.0)
                # logging.info(message)

                if 'text' in message:
                    # 如果是文本格式，则获取文本内容，并根据文本内容进行对应操作
                    message_text = message['text']
                    if message_text == 'ping':
                        # 持续心跳，回应客户端pong
                        await websocket.send_text('pong')
                    elif message_text == "pong":
                        logging.info("客户端接收到ping，并返回pong")
                    elif message_text == "EOF":
                        # 音频数据接受完毕，同时向另一个设备发送EOF
                        # 发送给另外一台设备
                        await send_message(client_id="Android", message="EOF")
                        # 音频数据发送完毕，则保存数据，并发送给语音识别
                        audio_file_path = save_audio_file(audio_data)
                        logging.info(f"Audio file saved at {audio_file_path}")
                        try:
                            # 调用 audio2text 方法
                            audio2text_result = await audio2text(audio_file_path)
                            print(audio2text_result)
                            
                            # 获取识别结果
                            payload_msg = audio2text_result.get('result').get('payload_msg')
                            voice_text_all = payload_msg.get('result')
                            
                            # 确保 voice_text_all 是一个列表并且有元素
                            if voice_text_all and isinstance(voice_text_all, list):
                                voice_text = voice_text_all[0]['text']
                                timeSeries_text = voice_text_all[0]
                                
                                # logging.info("识别时序结果:", timeSeries_text)
                                logging.info(f"识别文字结果: {voice_text}")
                                
                                if voice_text:
                                    await identify_user_emotions(timeSeries_text, client_id)
                                    
                                audio_data.clear()
                            else:
                                logging.error("No results found in audio2text response")
                        except Exception as e:
                            logging.error(f"Unexpected error: {e}")
                    else:
                        await send_message(client_id, f"Received text message: {message_text}")
                elif 'bytes' in message:
                    # 如果是字节格式的话，则是音频数据，保存下来
                    message_bytes = message['bytes']
                    audio_data.extend(message_bytes)

                    try:
                        # 将字节数据编码为Base64字符串
                        message_base64 = base64.b64encode(message_bytes).decode('utf-8')
                        send_json = json.dumps({
                            "audio_bytes": message_base64
                        })

                        # 发送给另外一台设备# 暂时测试版本
                        if (client_id == "iPhone14Pro"):
                            # logging.info(f"检测到是iPhone14Pro发送过来的情绪，是：{send_json}，现在向Android发送表现指令")
                            await send_message(client_id="Android", message=send_json)
                        elif (client_id == "Android"):             
                            # logging.info(f"检测到是Android发送过来的情绪，是：{send_json}，现在向iPhone14Pro发送表现指令")
                            await send_message(client_id="iPhone14Pro", message=send_json)
                    except Exception as e:
                        logging.error(f"Unexpected error: {e}")
                else:
                    # 如果都不是则发送指定ID，不支持的消息格式
                    await send_message(client_id, "Unsupported message type")
            except asyncio.TimeoutError:
                # 如果超时还没有收到消息，则主动给客户端发送保持连接的ping
                logging.info("超时还没有收到消息，主动给客户端发送心跳")
                await websocket.send_text("ping")
    except WebSocketDisconnect:
        logging.info(f"Client {client_id} disconnected.")
        if client_id in clients:
            del clients[client_id]
    except Exception as e:
        logging.error(f"Unexpected error: {e}")
    finally:
        if client_id in clients:
            del clients[client_id]
