import whisper
from fastapi import FastAPI, WebSocket
import numpy as np
import requests
import json
import ChatTTS
import torch
import torchaudio

app = FastAPI()

model = whisper.load_model("tiny", download_root="./model")
chat = ChatTTS.Chat()
chat.load(compile=False) # Set to True for better performance


def extract_latest_content(response):
    choices = response.get("choices", [])
    if not choices:
        return None
    
    # Find the choice with the highest index
    latest_choice = max(choices, key=lambda x: x["index"])
    # return latest_choice["message"]["content"]  # for chat
    return latest_choice["text"]  # for completion

def convert_chinese_punctuation_to_english(text):
    """
    Convert all Chinese punctuation in the given text to English punctuation.

    Parameters
    ----------
    text : str
        The input text containing Chinese punctuation.

    Returns
    -------
    str
        The text with Chinese punctuation converted to English punctuation.
    """
    chinese_to_english_punctuation = {
        '，': ',',
        '。': '.',
        '！': '!',
        '？': '?',
        '：': ':',
        '；': ';',
        '（': '(',
        '）': ')',
        '【': '[',
        '】': ']',
        '「': '{',
        '」': '}',
        '《': '<',
        '》': '>',
        '、': ',',
        '“': '"',
        '”': '"',
        '‘': "'",
        '’': "'"
    }
    
    for chinese_punc, english_punc in chinese_to_english_punctuation.items():
        text = text.replace(chinese_punc, english_punc)
    
    return text

@app.websocket("/audio")
async def audio(websocket: WebSocket):    
    await websocket.accept()
    audio_data = bytearray()
    try:
        while True:
            data = await websocket.receive_bytes()
            if not data:
                break
            audio_data.extend(data)
            print(f"Received data length: {len(data)}")
            print(f"Total audio data length: {len(audio_data)}")
        
        if len(audio_data) > 0:
            # Convert bytearray to numpy array
            audio_np = np.frombuffer(audio_data, dtype=np.int16)
            print(f"Audio numpy array shape: {audio_np.shape}")
            # Convert numpy array to float32
            audio_float32 = audio_np.astype(np.float32) / 32768.0
            audio_float32 = whisper.pad_or_trim(audio_float32)
            result = model.transcribe(audio_float32)
            recognized_text = result["text"]
            recognized_text = convert_chinese_punctuation_to_english(recognized_text)
            print(f'Recognized text: {recognized_text}')
            await websocket.send_text(f"{recognized_text}")
            if "小明" not in recognized_text:
                await websocket.send_text("请说出'小明同学'以启动语音助手")
                return
            
            # send recognized text to the llama, completion
            url = "http://localhost:19327/v1/completions"
            headers = {"Content-Type": "application/json"}
            prompt = "假设你是一个搭载在车辆上的智能语音助手,你所有的回答不超过20字,并且不包括阿拉伯数字和中文标点符号。" + recognized_text
            data = {
                "prompt": prompt
            }

            response = requests.post(url, headers=headers, json=data)
            # response = requests.post("http://localhost:19327/v1/chat/completions", json=data, headers=headers)
            answer = extract_latest_content(response.json())
            if answer is not None:
                print(f"Answer from llama: {answer}")
                
                wavs = chat.infer([answer])
                audio_bytes = wavs[0].tobytes()
                
                # send audio bytes in chunks
                chunk_size = 1024
                for i in range(0, len(audio_bytes), chunk_size):
                    chunk = audio_bytes[i:i+chunk_size]
                    await websocket.send_bytes(chunk)
                
                # Indicate the end of the audio data
                await websocket.send_bytes(b'')
            else:
                print(response)
            
    except Exception as e:
        print(f"Error: {e}")
        await websocket.send_text(f"Error processing audio, with error: {str(e)}")
    finally:
        await websocket.close()


if __name__ == "__main__":
    import uvicorn
    uvicorn.run(app, host="0.0.0.0", port=50512)