# backend/app/ai/consumers.py
import asyncio
import traceback
from channels.generic.websocket import AsyncWebsocketConsumer
from .realtime_demo.audio_manager import DialogSession
from .realtime_demo import config as demo_config
import base64
import json

def pcm_float32_to_wav_bytes(pcm_data: bytes, sample_rate=24000, channels=1):
    import numpy as np
    import io, wave

    float_data = np.frombuffer(pcm_data, dtype=np.float32)
    int16_data = (float_data * 32767).astype(np.int16)

    with io.BytesIO() as buffer:
        with wave.open(buffer, 'wb') as wf:
            wf.setnchannels(channels)
            wf.setsampwidth(2)  # 16bit
            wf.setframerate(sample_rate)
            wf.writeframes(int16_data.tobytes())
        return buffer.getvalue()
def pcm_s16le_to_wav_bytes(pcm_data: bytes, sample_rate=24000, channels=1):
    import io, wave
    with io.BytesIO() as buffer:
        with wave.open(buffer, 'wb') as wf:
            wf.setnchannels(channels)
            wf.setsampwidth(2)  # 16bit
            wf.setframerate(sample_rate)
            wf.writeframes(pcm_data)  # 直接写原始 s16le
        return buffer.getvalue()

def to_wav_bytes(pcm_data: bytes, sample_rate=24000, channels=1, src_dtype="float32"):
    """
    统一入口：
    - src_dtype="float32"  -> 使用 pcm_float32_to_wav_bytes
    - src_dtype="s16le"    -> 使用 pcm_s16le_to_wav_bytes
    """
    if src_dtype == "s16le":
        return pcm_s16le_to_wav_bytes(pcm_data, sample_rate=sample_rate, channels=channels)
    # 默认按 float32 处理
    return pcm_float32_to_wav_bytes(pcm_data, sample_rate=sample_rate, channels=channels)


class AiStreamConsumer(AsyncWebsocketConsumer):
    """
    实时语音对话 WebSocket 接口：
    - 前端发送音频字节（PCM/WAV）
    - 后端实时转发给豆包
    - 同时接收豆包返回的语音字节并发回前端
    """

    async def connect(self):
        await self.accept()
        print("[DEBUG] Client connected to /ws/ai/stream/")
        try:
            # 创建豆包实时对话会话
            self.session = DialogSession(
                ws_config=demo_config.ws_connect_config,
                output_audio_format=demo_config.start_session_req["tts"]["audio_config"]["format"],
                mod="audio",  # 实时模式
                recv_timeout=demo_config.start_session_req["dialog"]["extra"].get("recv_timeout", 10)
            )
            await self.session.client.connect()

            # ✅ 用“系统/控制指令”明确要求：只说固定开场白一句，然后停下等待用户
            system_boot = (
                "【系统指令】你现在扮演严谨专业的面试考公的人的面试官。连接成功后，"
                "第一句话必须且仅能是："
                "“请坐，我是你的面试官，接下来我会问你一系列面试题，请问你做好准备了吗”"
                "说完这句话后停止输出，等待用户输入。禁止扮演候选人，禁止多说其他内容。"
            )
            await self.session.client.chat_text_query(system_boot)

            # 后台启动接收任务
            asyncio.create_task(self._recv_from_doubao())
        except Exception as e:
            print("[ERROR] Failed to connect Doubao:", e)
            await self.close()

    async def receive(self, bytes_data=None, text_data=None):
        """接收前端发来的音频帧或文本指令"""
        try:
            if bytes_data:
                print(f"[DEBUG] got {len(bytes_data)} bytes audio from client")
                await self.session.client.task_request(bytes_data)
            elif text_data:
                print(f"[DEBUG] got text from client: {text_data}")
                await self.session.client.chat_text_query(text_data)
            else:
                print("[DEBUG] receive() called with empty payload")
        except Exception:
            print("[ERROR] Forwarding to Doubao failed:")
            print(traceback.format_exc())

    async def _recv_from_doubao(self):
        """后台持续接收豆包返回的语音/事件，保持在同一对话会话内多轮往复。"""
        try:
            # 豆包输出格式：示例里由 demo_config 指定，常见为 "pcm_s16le" 或 "float32"
            fmt = demo_config.start_session_req["tts"]["audio_config"]["format"]
            src_dtype = "s16le" if fmt == "pcm_s16le" else "float32"
            sample_rate = 24000
            channels = 1

            while True:
                resp = await self.session.client.receive_server_response()
                if not isinstance(resp, dict):
                    await self.send(text_data=json.dumps({"type": "event", "data": {"err": "invalid_response"}}))
                    continue

                msg_type = resp.get("message_type")
                event_id = resp.get("event")
                payload = resp.get("payload_msg")

                # 1) 音频帧：直接透传原始 PCM（不再转 WAV）
                if msg_type == "SERVER_ACK" and isinstance(payload, bytes):
                    await self.send(text_data=json.dumps({
                        "type": "audio_chunk",
                        "data": {
                            "format": src_dtype,  # "s16le" 或 "float32"
                            "sample_rate": sample_rate,  # 24000
                            "channels": channels,  # 1
                            "b64": base64.b64encode(payload).decode("utf-8")
                        }
                    }))
                    continue

                # 2) 事件原样透传（让前端拿到 152/153/359 等）
                await self.send(text_data=json.dumps({"type": "event", "data": resp}))

                # finish-like 的边界事件：不断开，继续下一轮
                if event_id in [152, 153, 359]:
                    print(f"[DEBUG] finish-like event received: {event_id}")
                    continue

        except Exception as e:
            print("[ERROR] Error receiving from Doubao:", e)
        finally:
            try:
                await self.session.client.close()
            except Exception:
                pass
            await self.close()

    async def disconnect(self, code):
        print(f"[DEBUG] WebSocket disconnected (code={code})")
        try:
            await self.session.client.finish_session()
            await self.session.client.close()
        except Exception:
            pass
