import os
import random
import argparse

import torch
import gradio as gr
import numpy as np
import requests
import asyncio

import ChatTTS

print("loading ChatTTS model...")
chat = ChatTTS.Chat()
chat.load_models()

API_URL = "https://ai.gitee.com/api/endpoints/hacker-zj/whisper-tiny-6705/inference"
headers = {
    "Authorization": "Bearer eyJpc3MiOiJodHRwczovL2FpLmdpdGVlLmNvbSIsInN1YiI6IjM5MDkwIn0.UHEdpOnAye_zLVe3FEV8K5iCjwuK38ptlLQIRdyidY7gIlpdtG3SwkzEpZVus3UUCc-CaSu9BFi4n62Mv_G9DA",
    "Content-Type": "audio/flac"
}

async def query(filename):
    with open(filename, "rb") as f:
        data = f.read()
    response = requests.post(API_URL, headers=headers, data=data)
    if response.json():
        return response.json()['text']

def generate_audio(text, temperature=0.3, top_P=0.7, top_K=20, audio_seed_input=42, text_seed_input=42):

    torch.manual_seed(audio_seed_input)
    rand_spk = torch.randn(768)
    params_infer_code = {
        'spk_emb': rand_spk, 
        'temperature': temperature,
        'top_P': top_P,
        'top_K': top_K,
    }
    params_refine_text = {'prompt': '[oral_2][laugh_0][break_6]'}
    
    torch.manual_seed(text_seed_input)

    wav = chat.infer(text, 
                     skip_refine_text=True, 
                     params_refine_text=params_refine_text, 
                     params_infer_code=params_infer_code
                     )
    
    audio_data = np.array(wav[0]).flatten()
    sample_rate = 24000

    return sample_rate, audio_data

async def process_audio_file(audio_file):
    # 保存上传的音频文件
    audio_path = "temp_audio.flac"
    with open(audio_path, "wb") as f:
        f.write(audio_file)
    
    # 调用query函数进行音频转文本
    text = await query(audio_path)
    
    # 调用generate_audio函数将文本转为音频
    sample_rate, audio_data = generate_audio(text)
    
    return text, (sample_rate, audio_data)

with gr.Blocks() as demo:
    gr.Markdown("# Audio to Text and Text to Speech")

    with gr.Row():
        audio_input = gr.Audio(source="microphone", type="file", label="Record Audio")
        record_button = gr.Button("Process")

    text_output = gr.Textbox(label="Transcribed Text", interactive=False)
    audio_output = gr.Audio(label="Generated Audio")

    record_button.click(process_audio_file, 
                        inputs=audio_input, 
                        outputs=[text_output, audio_output])

parser = argparse.ArgumentParser(description='ChatTTS demo Launch')
parser.add_argument('--server_name', type=str, default='0.0.0.0', help='Server name')
parser.add_argument('--server_port', type=int, default=8080, help='Server port')
args = parser.parse_args()

if __name__ == '__main__':
    demo.launch(server_name=args.server_name, server_port=args.server_port, inbrowser=True)
