# Copyright (c) 2024-present AI-Labs

import streamlit as st
import os, subprocess

import requests, json, base64
import uuid
from datetime import datetime

from configs import config


def init_voice_config_form():
    # 定义语音生成配置界面信息
    model = st.selectbox("Model", key="config_voice_model", options=["opea-gateway"])
    col1, col2 = st.columns(2)
    with col1:
        temperature = st.slider("Temperature", key="config_voice_temperature", min_value=0.1, max_value=2.0, value=0.3, step=0.1)
        top_p = st.slider("Top P", key="config_voice_top_p", min_value=1.0, max_value=5.0, value=0.7, step=1.0)
    with col2:
        seed = st.slider("Seed", key="config_voice_seed", min_value=1, max_value=1000, value=42, step=1)
        top_k = st.slider("Top K", key="config_voice_top_k", min_value=1, max_value=100, value=20, step=1)


def text_to_voice(voice_text):
    # 文本转语音的具体实现
    localdir = f"users/{st.session_state.username}/voices"
    os.makedirs(localdir, exist_ok=True)
    localfile = f"{localdir}/{uuid.uuid4()}.wav"

    # 文本转语音参数设置
    headers = {'Content-Type': 'application/json; charset=utf-8'}
    data=json.dumps({
                         'text': voice_text,
                         'temperature': st.session_state.config_voice_temperature if "config_voice_temperature" in st.session_state.keys() else 0.3,
                         'top_P': st.session_state.config_voice_top_p if "config_voice_top_p" in st.session_state.keys() else 0.7,
                         'top_K': st.session_state.config_voice_top_k if "config_voice_top_k" in st.session_state.keys() else 20,
                         'audio_seed_input': st.session_state.config_voice_seed if "config_voice_seed" in st.session_state.keys() else 42,
                         'text_seed_input': st.session_state.config_voice_seed if "config_voice_seed" in st.session_state.keys() else 42,
                    })
    # 请求后台文本转语音服务
    response = requests.post(url=config.opea_service.endpoint.tts, headers=headers, data=data)

    # 接收服务端的响应,并将语音信息保存到本地
    with open(localfile, "wb") as file:
        file.write(base64.b64decode(response.json()["byte_str"]))

    return localfile

def voice_to_text(localdir, filename):
    # 定义自动语音识别的具体实现过程
    with open(f"{localdir}/{filename}", 'rb') as audio_file:
        """
        读取文件内容到字节字符串
        """
        audio_bytes = audio_file.read()

    # 语音识别的参数设置
    headers = {
        'Connection': 'keep-alive',
        'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/109.0.0.0 Safari/537.36'
    }
    data = json.dumps({
        "byte_str": base64.b64encode(audio_bytes).decode('utf-8')
    })
    # 请求后端服务
    response = requests.post(url=config.opea_service.endpoint.asr, headers=headers, data=data)

    return response.json()["query"]

def voice_to_voice(localdir, filename):
    # 定义语音对话的具体实现过程
    with open(f"{localdir}/{filename}", 'rb') as audio_file:
        """
        读取文件内容到字节字符串
        """
        audio_bytes = audio_file.read()
    # 语音对话的参数设置,主要是设置客户端请求的语音识别参数
    headers = {
        'Connection': 'keep-alive',
        'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/109.0.0.0 Safari/537.36'
    }
    data = json.dumps({
        "audio": base64.b64encode(audio_bytes).decode('utf-8')
    })
    # 请求后端服务
    response = requests.post(url=config.opea_service.endpoint.audiochatservice, headers=headers, data=data)

    # 接收后端服务响应的数据,并将语音信息保存到本地
    
    localdir = f"users/{st.session_state.username}/voices"
    os.makedirs(localdir, exist_ok=True)
    localfile = f"{localdir}/{uuid.uuid4()}.wav"

    with open(localfile, "wb") as file:
        file.write(base64.b64decode(response.json()["byte_str"]))

    return localfile
