# -*- coding: utf-8 -*-
"""
FastAPI 语音对话后端服务
基于 together3.py 改写的 Web API 版本

功能说明:
- 提供文本对话API接口
- 支持WebSocket实时语音对话
- 集成讯飞ASR语音识别
- 集成讯飞TTS语音合成
- 集成OpenAI GPT对话模型
"""

import asyncio
import base64
import hashlib
import hmac
import json
import queue
import threading
import time
from datetime import datetime
from urllib.parse import quote, urlencode
from wsgiref.handlers import format_date_time

import pyaudio
import websocket
from fastapi import FastAPI, HTTPException, WebSocket, WebSocketDisconnect
from fastapi.middleware.cors import CORSMiddleware
from pydantic import BaseModel
from websocket import create_connection
from openai import OpenAI

# 创建FastAPI应用实例
app = FastAPI(title="语音对话API", version="1.0.0")

# 添加CORS中间件，允许跨域请求
app.add_middleware(
    CORSMiddleware,
    allow_origins=["*"],  # 生产环境应该指定具体域名
    allow_credentials=True,
    allow_methods=["*"],
    allow_headers=["*"],
)

# ========= 配置参数 ========= #
# 讯飞API配置 - 语音识别(ASR)
XF_APPID_ASR = "9646138c"
XF_APIKEY_ASR = "82adf64eb83cf7b9ec6f4ae215943e0d"

# 讯飞API配置 - 语音合成(TTS)
XF_APPID_TTS = "9646138c"
XF_APIKEY_TTS = "ea89fb89877aded124af01b32cb14324"
XF_SECRET_TTS = "MzlhYmQyMjI5ODYzYzU2ODM2YWMxYWNk"

# ========= 数据模型定义 ========= #
class ChatRequest(BaseModel):
    """聊天请求模型"""
    system_prompt: str = "You are chatting with me, your name is xiaoxi. Only reply in English."
    user_prompt: str

class ChatResponse(BaseModel):
    """聊天响应模型"""
    success: bool
    message: str
    audio_data: str = None  # base64编码的音频数据

# ========= GPT 流式处理 ========= #
def gpt_stream_to_queue(q: queue.Queue, user_prompt: str, system_prompt: str = None):
    """
    GPT 流式输出 → 按规则切块 → 入队
    
    参数:
        q: 队列对象，用于存储GPT输出的文本块
        user_prompt: 用户输入的文本
        system_prompt: 系统提示词，用于设定AI角色
    
    功能:
        - 调用OpenAI GPT API进行对话生成
        - 将GPT的流式输出按句子或标点符号切块
        - 将切块后的文本放入队列中
    """
    # 初始化OpenAI客户端
    client = OpenAI(
        api_key="sk-BPVBVZQbXHrUH6b540DbE34b4c71479f9dD4106dCc29C021",
        base_url="https://api.pumpkinaigc.online/v1"
    )
    
    # 构建对话消息列表
    messages = [
        {"role": "system", "content": system_prompt or "You are chatting with me, your name is xiaoxi. Only reply in English."},
        {"role": "user", "content": user_prompt}
    ]
    
    # 调用GPT API进行流式对话
    resp = client.chat.completions.create(
        model="gpt-3.5-turbo",
        messages=messages,
        stream=True
    )

    # 处理流式响应
    buffer = ""
    for part in resp:
        delta = part.choices[0].delta
        if not getattr(delta, "content", None):
            continue
        token = delta.content
        buffer += token
        # 当缓冲区达到80字符或遇到句子结束标点时，将文本块放入队列
        if len(buffer) >= 80 or buffer.endswith(('.', '!', '?', '。', '！', '？')):
            q.put(buffer)
            buffer = ""
    # 处理剩余的文本
    if buffer.strip():
        q.put(buffer)
    q.put(None)  # 发送结束信号

# ========= 讯飞 TTS 语音合成 ========= #
class Ws_Param:
    """讯飞TTS WebSocket参数类"""
    def __init__(self, appid, apikey, apisecret, text):
        self.APPID = appid
        self.APIKey = apikey
        self.APISecret = apisecret
        self.Text = text
        # 通用参数
        self.CommonArgs = {"app_id": self.APPID}
        # 业务参数 - 音频格式配置
        self.BusinessArgs = {
            "aue": "raw",  # 音频编码格式
            "auf": "audio/L16;rate=16000",  # 音频采样率
            "vcn": "x4_yezi",  # 发音人
            "tte": "utf8"  # 文本编码格式
        }
        # 数据参数
        self.Data = {
            "status": 2,  # 状态码
            "text": str(base64.b64encode(self.Text.encode('utf-8')), "utf8")  # base64编码的文本
        }

    def create_url(self):
        """
        生成讯飞TTS WebSocket连接URL
        
        返回:
            str: 包含认证信息的WebSocket URL
        """
        url = "wss://tts-api.xfyun.cn/v2/tts"
        now = datetime.now()
        date = format_date_time(time.mktime(now.timetuple()))
        
        # 生成签名
        sig_origin = f"host: ws-api.xfyun.cn\n" \
                     f"date: {date}\n" \
                     f"GET /v2/tts HTTP/1.1"
        sign = hmac.new(self.APISecret.encode(), sig_origin.encode(),
                        digestmod=hashlib.sha256).digest()
        sign = base64.b64encode(sign).decode()
        
        # 生成认证信息
        auth_origin = f'api_key="{self.APIKey}", algorithm="hmac-sha256", ' \
                      f'headers="host date request-line", signature="{sign}"'
        authorization = base64.b64encode(auth_origin.encode()).decode()
        
        # 返回完整的URL
        return url + "?" + urlencode({
            "authorization": authorization,
            "date": date,
            "host": "ws-api.xfyun.cn"
        })

def tts_synthesize(text):
    """
    TTS 合成并返回音频数据
    
    参数:
        text: 要合成的文本内容
    
    返回:
        str: base64编码的音频数据，失败时返回None
    """
    audio_chunks = []  # 存储音频数据块
    done_flag = threading.Event()  # 完成标志

    def on_message(ws, message):
        """WebSocket消息处理函数"""
        msg = json.loads(message)
        audio_bytes = base64.b64decode(msg["data"]["audio"])
        audio_chunks.append(audio_bytes)
        if msg["data"]["status"] == 2:  # 合成完成
            ws.close()

    def on_error(ws, error):
        """WebSocket错误处理函数"""
        print("TTS ws error:", error)

    def on_close(ws, close_status_code, close_msg):
        """WebSocket关闭处理函数"""
        done_flag.set()

    def on_open(ws):
        """WebSocket连接建立处理函数"""
        def run(*_):
            req = {
                "common": wsParam.CommonArgs,
                "business": wsParam.BusinessArgs,
                "data": wsParam.Data
            }
            ws.send(json.dumps(req))
        threading.Thread(target=run).start()

    # 创建WebSocket参数
    wsParam = Ws_Param(XF_APPID_TTS, XF_APIKEY_TTS, XF_SECRET_TTS, text)
    websocket.enableTrace(False)
    
    # 创建WebSocket连接
    ws = websocket.WebSocketApp(
        wsParam.create_url(),
        on_open=on_open,
        on_message=on_message,
        on_error=on_error,
        on_close=on_close
    )
    
    # 运行WebSocket连接
    ws.run_forever(sslopt={"cert_reqs": 0})
    done_flag.wait()  # 等待合成完成
    
    # 合并所有音频块并返回base64编码
    if audio_chunks:
        combined_audio = b''.join(audio_chunks)
        return base64.b64encode(combined_audio).decode()
    return None

# ========= 讯飞 ASR 语音识别 ========= #
def gen_signa(appid, apikey, ts):
    """
    生成讯飞ASR签名
    
    参数:
        appid: 应用ID
        apikey: API密钥
        ts: 时间戳
    
    返回:
        str: 签名字符串
    """
    base = appid + ts
    md5 = hashlib.md5(base.encode()).hexdigest()
    signa = hmac.new(apikey.encode(), md5.encode(), digestmod='sha1').digest()
    return base64.b64encode(signa).decode()

class XFRealtimeASR:
    """讯飞实时语音识别类"""
    def __init__(self, appid, apikey, vad_eos_ms=1500):
        """
        初始化ASR实例
        
        参数:
            appid: 讯飞应用ID
            apikey: 讯飞API密钥
            vad_eos_ms: 语音活动检测结束时间(毫秒)
        """
        self.appid = appid
        self.apikey = apikey
        self.vad_eos_ms = vad_eos_ms
        self.ws = None
        self.handshake_done = threading.Event()  # 握手完成标志
        self.closed = False
        self.end_tag = "{\"end\":true}"
        self.result_text = ""  # 识别结果文本
        self.last_result_time = None  # 最后识别时间
        self.record_start_time = None  # 录音开始时间

    def connect(self):
        """建立WebSocket连接"""
        ts = str(int(time.time()))
        signa = gen_signa(self.appid, self.apikey, ts)
        url = f"ws://rtasr.xfyun.cn/v1/ws?appid={self.appid}&ts={ts}&signa={quote(signa)}&vad_eos={self.vad_eos_ms}"
        self.ws = create_connection(url)
        # 启动接收线程
        threading.Thread(target=self._recv_thread_func, daemon=True).start()

    def start_mic(self):
        """
        开始录音 + 实时识别，返回整句文本
        
        返回:
            str: 识别到的文本内容
        """
        # 等待握手完成
        if not self.handshake_done.wait(10):
            print("握手失败")
            self.close()
            return ""
        
        # 音频参数配置
        CHUNK = 640
        FORMAT = pyaudio.paInt16
        CHANNELS = 1
        RATE = 16000
        
        # 初始化PyAudio
        pa = pyaudio.PyAudio()
        stream = pa.open(
            format=FORMAT,
            channels=CHANNELS,
            rate=RATE,
            input=True,
            frames_per_buffer=CHUNK
        )
        print("【开始说话，静音 2.5 秒或 10 秒总长自动结束】")

        try:
            while not self.closed:
                # 读取音频数据
                data = stream.read(CHUNK, exception_on_overflow=False)
                self.ws.send(data, opcode=0x2)

                # 检查录音结束条件
                if self.record_start_time:
                    elapsed = time.time() - self.record_start_time
                    if elapsed >= 10 or (time.time() - self.last_result_time) > (self.vad_eos_ms / 1000):
                        print("【句子结束】")
                        break
                time.sleep(0.04)
        except KeyboardInterrupt:
            print("用户中断")
        finally:
            # 清理资源
            stream.stop_stream()
            stream.close()
            pa.terminate()
            self.close()
            time.sleep(0.2)
            return self.result_text

    def parse_data_text(self, data_str):
        """
        解析ASR返回的文本数据
        
        参数:
            data_str: ASR返回的JSON字符串
        
        返回:
            str: 解析出的文本内容
        """
        try:
            data_dict = json.loads(data_str)
            words = []
            # 解析识别结果
            for rt in data_dict.get('cn', {}).get('st', {}).get('rt', []):
                for ws in rt['ws']:
                    for cw in ws['cw']:
                        words.append(cw['w'])
            return ''.join(words)
        except Exception as e:
            print("解析 ASR 文本出错:", e)
            return ""

    def _recv_thread_func(self):
        """WebSocket接收线程函数"""
        try:
            while self.ws.connected:
                result = self.ws.recv()
                if not result:
                    break
                j = json.loads(result)
                
                if j["action"] == "started":
                    # 握手成功
                    self.handshake_done.set()
                elif j["action"] == "result":
                    # 收到识别结果
                    txt = self.parse_data_text(j.get("data", ""))
                    if txt:
                        print(f"【实时识别】{txt}")
                        self.result_text = txt
                        self.last_result_time = time.time()
                        if self.record_start_time is None:
                            self.record_start_time = time.time()
                elif j["action"] == "error":
                    # 发生错误
                    print("ASR 错误:", j)
                    self.closed = True
                    self.ws.close()
                    break
        except Exception as e:
            if not self.closed:
                print("ASR 接收线程异常:", e)

    def close(self):
        """关闭WebSocket连接"""
        self.closed = True
        try:
            if self.ws:
                self.ws.close()
                print("ASR WebSocket 关闭")
        except:
            pass

# ========= API 路由定义 ========= #
@app.post("/api/chat", response_model=ChatResponse)
async def chat_endpoint(request: ChatRequest):
    """
    聊天接口 - 接收文本，返回AI回复的音频
    
    参数:
        request: 包含system_prompt和user_prompt的请求对象
    
    返回:
        ChatResponse: 包含AI回复文本和音频数据的响应对象
    """
    try:
        # 使用GPT生成回复
        q = queue.Queue()
        gpt_stream_to_queue(q, request.user_prompt, request.system_prompt)
        
        # 收集GPT回复
        gpt_response = ""
        while True:
            chunk = q.get()
            if chunk is None:
                break
            gpt_response += chunk
        
        if not gpt_response.strip():
            raise HTTPException(status_code=400, detail="GPT未生成有效回复")
        
        # TTS合成音频
        audio_data = tts_synthesize(gpt_response)
        
        if not audio_data:
            raise HTTPException(status_code=500, detail="TTS合成失败")
        
        return ChatResponse(
            success=True,
            message=gpt_response,
            audio_data=audio_data
        )
    
    except Exception as e:
        raise HTTPException(status_code=500, detail=f"处理失败: {str(e)}")

@app.websocket("/ws/voice-chat")
async def voice_chat_websocket(websocket: WebSocket):
    """
    WebSocket语音聊天接口
    
    功能:
        - 建立WebSocket连接
        - 进行实时语音识别
        - 生成AI回复
        - 合成语音并返回
    """
    await websocket.accept()
    
    try:
        # 初始化ASR
        asr = XFRealtimeASR(XF_APPID_ASR, XF_APIKEY_ASR, vad_eos_ms=1500)
        asr.connect()
        
        # 等待握手完成
        if not asr.handshake_done.wait(10):
            await websocket.send_text(json.dumps({
                "type": "error",
                "message": "ASR连接失败"
            }))
            return
        
        await websocket.send_text(json.dumps({
            "type": "status",
            "message": "ASR连接成功，可以开始说话"
        }))
        
        # 开始录音识别
        user_text = asr.start_mic()
        
        if not user_text.strip():
            await websocket.send_text(json.dumps({
                "type": "error",
                "message": "未识别到语音"
            }))
            return
        
        # 发送识别结果
        await websocket.send_text(json.dumps({
            "type": "recognition",
            "text": user_text
        }))
        
        # GPT处理
        q = queue.Queue()
        gpt_stream_to_queue(q, user_text)
        
        gpt_response = ""
        while True:
            chunk = q.get()
            if chunk is None:
                break
            gpt_response += chunk
        
        # 发送GPT回复
        await websocket.send_text(json.dumps({
            "type": "gpt_response",
            "text": gpt_response
        }))
        
        # TTS合成并发送音频
        audio_data = tts_synthesize(gpt_response)
        if audio_data:
            await websocket.send_text(json.dumps({
                "type": "audio",
                "data": audio_data
            }))
        
        await websocket.send_text(json.dumps({
            "type": "complete",
            "message": "对话完成"
        }))
        
    except WebSocketDisconnect:
        print("WebSocket连接断开")
    except Exception as e:
        await websocket.send_text(json.dumps({
            "type": "error",
            "message": f"处理失败: {str(e)}"
        }))

@app.get("/")
async def root():
    """根路径 - 返回服务信息"""
    return {"message": "语音对话API服务", "version": "1.0.0"}

@app.get("/health")
async def health_check():
    """健康检查接口"""
    return {"status": "healthy", "timestamp": time.time()}

# 主程序入口
if __name__ == "__main__":
    import uvicorn
    print("🚀 启动语音对话API服务...")
    print("📍 服务地址: http://localhost:8000")
    print("📖 API文档: http://localhost:8000/docs")
    uvicorn.run(app, host="0.0.0.0", port=8000) 