import asyncio
import random
import aiohttp
import websockets
import sounddevice as sd
import numpy as np
import requests
import os
from audio_utils import TTSManager, STTManager

FORMAT = 'int16'
CHANNELS = 1
RATE = 16000
RECORD_SECONDS = 6

END_MARKER = b'__END__'
sd.default.device = None  # 重置音频设备默认配置


async def record_audio():
    """带设备释放的录音函数"""
    with sd.InputStream(samplerate=RATE, channels=CHANNELS, dtype='int16') as stream:
        recording = np.empty((int(RATE * RECORD_SECONDS), 1), dtype='int16')
        for i in range(0, int(RATE * RECORD_SECONDS), RATE // 10):  # 分帧录制
            data, _ = stream.read(RATE // 10)
            recording[i:i + len(data)] = data
    return recording.flatten().tobytes()


async def record_sd():
    duration = 5
    recording = sd.rec(
        int(duration * RATE),
        samplerate=RATE,
        channels=CHANNELS,
        dtype='int16'  # 确保数据类型匹配
    )
    sd.wait()  # 等待录音完成
    # 转换为VOSK需要的格式（bytes）
    audio_data = np.array(recording, dtype=np.int16).tobytes()
    return audio_data


async def test_voice_to_text():
    uri = "ws://localhost:8000/ws/voice_to_text"
    async with websockets.connect(uri) as websocket:
        audio_data = await record_sd()
        # audio_data = await record_audio()
        await websocket.send(audio_data)
        response_text = await websocket.recv()
        print(f"Received text from server: {response_text}")


async def test_text_to_text(text_data):
    uri = "ws://localhost:8000/ws/text_to_text"
    async with websockets.connect(uri) as websocket:
        await websocket.send(text_data)
        response_text = await websocket.recv()
        print(f"Received text from server: {response_text}")


async def test_text_to_voice(text_data):
    uri = "ws://localhost:8000/ws/text_to_voice"
    tmp_file = "output/t2v-" + str(random.randint(100, 200)) + ".wav"
    async with websockets.connect(uri) as websocket:
        await websocket.send(text_data)
        audio_chunks = []
        end_marker = b'__END__'
        try:
            while True:
                chunk = await websocket.recv()
                if end_marker in chunk:
                    # 分割结束标记
                    audio_chunks.append(chunk[:-len(end_marker)])
                    break
                audio_chunks.append(chunk)

        except websockets.exceptions.ConnectionClosed:
            pass

        voice_data = b''.join(audio_chunks)
        print(f"客户端接收到的语音数据总长度: {len(voice_data)}")

        with open(tmp_file, 'wb') as f:
            f.write(voice_data)
        print("语音文件已保存")

        # 后续处理（语音转文本、播放等，与之前一致）
        with open(tmp_file, 'rb') as f:
            audio_data = f.read()
        text = STTManager.speech_to_text(audio_data)
        print(f"识别出的文本: {text}")
        TTSManager.speak(text)
        await asyncio.sleep(2)
        TTSManager.stop_speaking()

        if os.path.exists(tmp_file):
            os.remove(tmp_file)


async def test_voice_to_voice():
    uri = "ws://localhost:8000/ws/voice_to_voice"
    tmp_file = "output/received_v2v.wav"
    async with websockets.connect(uri) as websocket:
        audio_data = await record_sd()
        await websocket.send(audio_data)
        audio_chunks = []
        end_marker = b'__END__'
        try:
            while True:
                chunk = await websocket.recv()
                if end_marker in chunk:
                    # 分割结束标记
                    audio_chunks.append(chunk[:-len(end_marker)])
                    break
                audio_chunks.append(chunk)
        except websockets.exceptions.ConnectionClosed:
            pass
        voice_data = b''.join(audio_chunks)
        print(f"客户端接收到的语音数据总长度: {len(voice_data)}")
        with open(tmp_file, 'wb') as f:
            f.write(voice_data)
        print("语音文件已保存")
        # 后续处理（语音转文本、播放等，与之前一致）
        with open(tmp_file, 'rb') as f:
            audio_data = f.read()
        text = STTManager.speech_to_text(audio_data)
        print(f"识别出的文本: {text}")
        TTSManager.speak(text)
        await asyncio.sleep(2)
        TTSManager.stop_speaking()
        if os.path.exists(tmp_file):
            os.remove(tmp_file)


async def handle_question(websocket):
    """单次提问处理（带异常重试）"""
    for _ in range(3):  # 最多重试3次
        try:
            audio_data = await record_audio()
            await websocket.send(audio_data)
            return await receive_voice_reply(websocket)
        except Exception as e:
            print(f"录音/发送失败，重试: {e}")
    raise Exception("多次重试失败")


async def receive_voice_reply(websocket):
    """可靠接收回复（处理断连重试）"""
    audio_chunks = []
    while True:
        try:
            chunk = await asyncio.wait_for(websocket.recv(), timeout=30)
            if END_MARKER in chunk:
                audio_chunks.append(chunk[:-len(END_MARKER)])
                break
            audio_chunks.append(chunk)
        except (asyncio.TimeoutError, websockets.exceptions.ConnectionClosed):
            print("接收超时或连接断开，尝试重新连接...")
            return None  # 由上层处理重连
    return b''.join(audio_chunks)


async def v2v_main():
    """支持重连的主循环"""
    uri = "ws://localhost:8000/ws/voice_to_voice"
    while True:
        try:
            async with websockets.connect(uri, ping_interval=10) as websocket:
                print("连接成功，开始提问（按Ctrl+C退出）")
                while True:
                    voice_data = await handle_question(websocket)
                    if not voice_data:
                        break  # 连接断开，重新进入外层循环
                    # 播放回复（示例：简化处理，可自行实现）
                    tmp_file = "output/v2v-" + str(random.randint(100, 200)) + ".wav"
                    with open(tmp_file, 'wb') as f:
                        f.write(voice_data)
                    os.system(f"start {tmp_file}")  # Windows播放，Linux用aplay，macOS用afplay
        except Exception as e:
            print(f"连接失败，5秒后重试: {e}")
            await asyncio.sleep(5)


async def test_stream():
    uri = "http://localhost:8000/api/stream"
    # 定义要发送的数据
    data = {"prompt": "一二三四五六七八九十"*5}
    # 发送 POST 请求
    response = requests.post(uri, json=data)
    # 检查响应状态码
    if response.status_code == 200:
        print("请求成功！")
        print("响应内容：", response.json())
    else:
        print(f"请求失败，状态码：{response.status_code}")


async def test_event():
    uri = 'http://localhost:8000/audio'
    headers = {'Content-Type': 'text/event-stream'}
    async with aiohttp.ClientSession() as session:
        async with session.get(uri, headers=headers) as r:
            async for chunk in r.content.iter_any():
                try:
                    print(chunk.decode())
                except UnicodeDecodeError:
                    print("无法解码数据块，可能是二进制数据")


async def main():
    text_data = ""
    # text_data = "机器学习"
    # text_data = "计算机视觉"
    # text_data = "生成对抗网络"
    # text_data = "介绍一下数据中台"
    # text_data = "介绍一下数据湖"
    # text_data = "数据挖掘的实现细节"
    # text_data = "智慧工厂"
    # await test_voice_to_text()
    await test_text_to_text(text_data)
    # await test_text_to_voice(text_data)
    # await test_voice_to_voice()
    # await test_stream()
    # await test_event()


if __name__ == "__main__":
    asyncio.run(main())

    # asyncio.run(v2v_main())
