import streamlit as st
import torch
import torchaudio
from moviepy.editor import *

from util.audio_extract import AudioExtract
from util.caption_extract import CaptionExtract
from util.translate import Translate
from util.tts import TTS
import datetime
import os
from pydub import AudioSegment

#TODO 提供中止打断功能
#TODO 支持GPU和CPU两个版本
#TODO 程序打包和部署
if __name__ == '__main__':
    #############################内容区#############################
    st.set_page_config(page_title="AI智能配音", page_icon="img/logo.png",layout="wide")
    # 配置logo和标题
    #st.title("首页")

    src_file = st.file_uploader("选择原始视频文件", type=["mp4", "ogv", "mpeg", "avi", "mov"], accept_multiple_files=False, key=None)
    if src_file is None:
        # 提示上传文件
        st.error("请选择配音文件")
    else:
        with open("files/" + src_file.name, "wb") as save_file:
            save_file.write(src_file.getvalue())

    with st.container():
        col1, col2, col3, col4 = st.columns([1, 1, 1, 1])
        # 选择声优
        player_select = col1.selectbox(
            '选择音色',
            ('使用原始音色', '女生1', '女生2', '女生3')
        )

        # 选择视频场景
        scene_select = col2.selectbox(
            '选择视频场景',
            ('会议', '脱口秀', '教学', '直播')
        )

        # 选择原视频语言
        src_lan_select = col3.selectbox(
            '选择原视频语言',
            ('中文', '英语', '日语')
        )
        # 选择目标语言
        dest_lan_select = col4.selectbox(
            '选择目标语言',
            ('中文', '英语', '日语')
        )

    if st.button("开始配音", type="primary"):
        st.toast("开始处理，时间：" + datetime.datetime.now().strftime("%Y-%m-%d, %H:%M:%S"))
        # 1.提取音频
        audio_extract = AudioExtract()
        if audio_extract.extract_audio(src_file.name, src_file.name):
            st.toast("音频提取完成，时间：" + datetime.datetime.now().strftime("%Y-%m-%d, %H:%M:%S"))
        # 2.对音频进行语音识别，提取时间点及内容
        caption_extract = CaptionExtract()
        total_content, detailes = caption_extract.extract(f"files/" + src_file.name + ".mp3")
        if detailes:
            st.toast("字幕提取完成，时间：" + datetime.datetime.now().strftime("%Y-%m-%d, %H:%M:%S"))
        else:
            st.error("字幕提取失败，时间：" + datetime.datetime.now().strftime("%Y-%m-%d, %H:%M:%S"))
        # 3.翻译字幕
        #TODO 此处能够参数化配置
        #TODO 实现语言类别自动提取

        # 目前采用ollama后台服务，使用qwen2:7b模型
        #TODO 生成的字幕能够在页面上展示和修改
        translate = Translate(src_lan=src_lan_select, dest_lan=dest_lan_select)
        for caption in detailes:
            print(caption["text"])
            caption["text"] = translate.translate(caption["text"])
            print(caption["text"])
        st.toast("字幕翻译完成，时间：" + datetime.datetime.now().strftime("%Y-%m-%d, %H:%M:%S"))
        # 4.提取情感

        # 5.合成语音
        # TODO 智能判断语速
        tts = TTS()
        audios = []
        st.toast("开始语音合成，时间：" + datetime.datetime.now().strftime("%Y-%m-%d, %H:%M:%S"))
        for caption in detailes:
            wav = tts.gen_audio(caption["text"])
            #TODO 能在前端试听每个音频，不合适可以重新生成
            #创建该音频文件的专属文件夹
            if not os.path.exists("files/" + src_file.name + "-d"):
                os.makedirs("files/" + src_file.name + "-d")
            torchaudio.save(f"files/" + src_file.name + "-d/" + str(caption["start"])+".wav", torch.from_numpy(wav), 24000)
            audios.append(caption["start"])
        st.toast("完成语音合成，时间：" + datetime.datetime.now().strftime("%Y-%m-%d, %H:%M:%S"))

        # 6.生成视频
        #TODO 判断音频叠加后是否有重叠
        #TODO 多条音轨可视化展示，重叠或间隔非常小的区域能够醒目提醒
        sound = AudioSegment.from_file(f"files/" + src_file.name + ".mp3", format='mp3')
        sound_du = len(sound)
        new_sound = AudioSegment.silent(duration=sound_du)
        for audio in audios:
            seg = AudioSegment.from_file(f"files/" + src_file.name + "-d/"+ str(audio) +".wav", format='wav')
            new_sound = new_sound.overlay(seg, audio)
        new_sound.export(f"files/" + src_file.name + "-d/out.mp3", format='mp3')
        # 将原始视频静音
        src_video = VideoFileClip(f"files/" + src_file.name)
        dest_video = src_video.without_audio()
        audio_clip = AudioFileClip(f"files/" + src_file.name + "-d/out.mp3")
        video_duration = dest_video.duration
        audio_duration = audio_clip.duration
        print("视频长度：" + str(video_duration))
        print("音频长度：" + str(audio_duration))
        # # 添加文字
        # text = TextClip("老廖出品，必属精品", fontsize=30, color='white')
        # text = text.set_position('center').set_duration(5)
        #
        # final_clip = CompositeVideoClip([dest_video, text])
        dest_video = dest_video.set_audio(audio_clip)
        dest_video.write_videofile(f"files/" + src_file.name + "-d/out.mp4")
        st.toast("完成配音文件生成，时间：" + datetime.datetime.now().strftime("%Y-%m-%d, %H:%M:%S"))

        leftvideo, rightvideo = st.columns([1, 1])
        leftvideo.write("原始视频")
        leftvideo.video(f"files/" + src_file.name)
        rightvideo.write("配音视频")
        rightvideo.video(f"files/" + src_file.name + "-d/out.mp4")
    ###############################侧边栏###########################



