import streamlit as st
import uuid
import base64
import os
import numpy as np
import time
import re
from macros import *
from utils.generation import SAMPLE_RATE, generate_audio, preload_models, get_preload_models
from scipy.io.wavfile import write as write_wav
from pydub import AudioSegment
from utils.prompt_making import make_prompt
from config import *
import asyncio
from scipy.signal import butter, lfilter

st.title("B4-VALL-E-X大模型 🎤|")

button_choose = st.sidebar.radio(
    "请选择你想要的VALL-E-X大模型能力:",
    key="visibility",
    options=[
        "文本生成语音",
        "多语种混合语音生成",
        "特定音色语音生成",
        "短文本声音克隆",
        "长文本语音克隆（研发中）"
    ]
)


emotion_map = {"愤怒": "emo_anger", "幽默": "emo_amused", "瞌睡": "emo_sleepy", "动漫角色——派蒙": "paimon", "中立": "emo_neutral"}

MIN_LEN, MAX_LEN = 3, 15


def is_english(char):
    if '\u4e00' <= char <= '\u9fff':
        return False
    else:
        return True


def generate_wav(prompt_text, file_name, name=None, language='auto'):
    with st.spinner(text="正在生成你的专属语音，请稍等..."):
        audio_array = generate_audio(prompt_text, prompt=name, language=language)
        uuidstr = str(uuid.uuid1())
        uuidbytes = uuidstr.encode('utf-8')
        uuid_base64 = base64.urlsafe_b64encode(uuidbytes)
        shortuuid = uuid_base64.decode('utf-8').rstrip("=")
        write_wav(os.path.join(wav_save_path, f'{file_name}_{shortuuid}.wav'), SAMPLE_RATE, audio_array)
    st.success("音频文件已生成", icon="✅")

    audio_file = open(os.path.join(wav_save_path, f'{file_name}_{shortuuid}.wav'), 'rb')
    audio_bytes = audio_file.read()
    st.audio(audio_bytes, format='audio/wav')
    with open(os.path.join(wav_save_path, f'{file_name}_{shortuuid}.wav'), "rb") as file:
        st.download_button(label="Download generated audio",
                           data=file,
                           file_name=f'{file_name}.wav')


def audio_study(upload_file, wav_file_path, transcript=None):
    with st.spinner("🤖AI正在模拟你的语音特征"):
        with open(wav_file_path, 'wb') as f:
            f.write(upload_file.getbuffer())
        if not transcript:
            make_prompt(name="user", audio_prompt_path=wav_file_path)
        else:
            make_prompt(name="user", audio_prompt_path=wav_file_path, transcript=transcript)
    st.success("AI已成功学习到你的语音特征！接下来请再输入你想生成的语音文本")


def butter_bandpass(lowcut, highcut, fs, order=5):
    nyq = 0.5 * fs
    low = lowcut / nyq
    high = highcut / nyq
    b, a = butter(order, [low, high], btype='band')
    return b, a


def butter_bandpass_filter(data, lowcut, highcut, fs, order=5):
    b, a = butter_bandpass(lowcut, highcut, fs, order=order)
    y = lfilter(b, a, data)
    return y


def remove_noise(input_file, output_file, lowcut, highcut, order=5):
    audio = AudioSegment.from_wav(input_file)
    samples = np.array(audio.get_array_of_samples())
    sample_rate = audio.frame_rate
    clean_samples = butter_bandpass_filter(samples, lowcut, highcut, sample_rate, order)
    st.markdown(clean_samples.dtype.itemsize)
    clean_audio = AudioSegment(
        data=bytes(clean_samples.tobytes()),
        #sample_width=clean_samples.dtype.itemsize/2,
        sample_width=4,
        frame_rate=sample_rate,
        channels=audio.channels
    )
    clean_audio.export(output_file, format="wav")
    

async def audio_clone():
    try:
        st.markdown("你需要提供一段3~10秒长的语音wav格式的文件，以及该语音对应的文本，来制作音频提示。当然，你也可以将文本留空。")
        upload_file = st.file_uploader("请上传一段该人物的3~10s以内的语音作为样本")
        transcript = st.text_input("请输入你上传文本的语音文本内容，可以帮助我们克隆得更加准确！（可以留空）")
        wav_length = 0
        temp_file = "./generated_files/tmp.wav"
        wav_file_path = os.path.join('./prompts/', 'user.wav')
        if st.button("开始克隆"):
            if upload_file is not None:
                with open(temp_file, 'wb') as f:
                    f.write(upload_file.getbuffer())
            else:
                st.warning("警告：尚未上传语音文件！")
           # remove_noise(temp_file, wav_file_path, lowcut=1000, highcut=4000, order=6)
        audio = AudioSegment.from_file(temp_file)
        wav_length = int(audio.duration_seconds)
        if MIN_LEN <= wav_length <= MAX_LEN:
            audio_study(upload_file, wav_file_path, transcript=transcript)
            prompt_text = st.text_input("请输入你想生成的语音文本", key="prompt_text")
            if st.button("确认"):
                st.write("你输入了", prompt_text)
                generate_wav(prompt_text, "clone", name="user")
                st.success("成功生成")
        else:
            st.warning("警告： 上传的语音文件时间不在给定时间范围内, 请更换文件重新上传")
    except Exception as e:
        print(e)


async def sps_vocal_generation():
    try:
        prompt = st.text_input("请输入你需要通过语音生成的文本: Enter键生成")
        st.radio("请选择你想生成的音色:",
                          key="prompt",
                          options=["愤怒", "幽默", "瞌睡", "动漫角色——派蒙", "中立"])
        if "prompt" not in st.session_state:
            st.session_state.prompt = "中立"
        vocal_type = emotion_map[st.session_state.prompt]
        file_name = f"{vocal_type}"
        if prompt:
            generate_wav(prompt, file_name, name=vocal_type)
    except Exception as e:
        print(e)


def sentence_constructor(words):
    words = re.sub(r'[\d,\W]', '', words)
    sentence = list(words)
    new_sentence = []
    last_tag = '[EN]' if is_english(sentence[0]) else '[ZH]'
    new_sentence.append(last_tag)
    for i, s in enumerate(sentence):
        flag = is_english(s)
        tag = '[EN]' if flag else '[ZH]'
        if last_tag != tag:
            new_sentence.append(last_tag)
            new_sentence.append(tag)
        last_tag = tag
        new_sentence.append(s)
    new_sentence.append(last_tag)
    text = "".join(new_sentence)
    return text


async def mix_language_generation():
    try:
        prompt = st.text_input("请输入你希望通过语音生成的混合文本: Enter键生成")
        file_name = "mix_words"
        if prompt:
            prompt_text = sentence_constructor(str(prompt))
            st.markdown(prompt_text) 
            generate_wav(prompt_text, file_name, name="zh2en_tts_3", language='mix')
    except Exception as e:
        print(e)


async def normal_generation():
    try:
        prompt = st.text_input("请输入你需要通过语音生成的文本: Enter键生成")
        file_name = "vallex_generation"
        if prompt:
            generate_wav(prompt, file_name, name="neutral")
    except Exception as e:
        print(e)

get_preload_models()


async def main():
    if button_choose == "文本生成语音":
        await normal_generation()
    elif button_choose == "短文本声音克隆":
        await audio_clone()
    elif button_choose == "特定音色语音生成":
        await sps_vocal_generation()
    elif button_choose == "多语种混合语音生成":
        await mix_language_generation()

if __name__ == "__main__":
    asyncio.run(main())

