import gradio as gr
import json
import os
import re
from my_audio_client import MyAudioClient
from gradio_client import Client
from moviepy import AudioFileClip
import util

my_audio_client = MyAudioClient()
config = my_audio_client.load_config()
folder_path = my_audio_client.get_folder_path()
# 指定音频文件所在的文件夹路径
audio_folder = f'{folder_path}/audio'

def load_data():
    lines = my_audio_client.read_file()
    rows = my_audio_client.parse_data(lines)
    blocks = my_audio_client.get_blocks(rows)
    role_dict = my_audio_client.get_role_dict(blocks)
    return blocks, role_dict

def generate_and_play_audio(block_index, blocks):
    client = Client(config['gradio_url'])
    block = blocks[block_index]
    index = my_audio_client.geneAudio(block, block_index, client)
    return f"{audio_folder}/a{index}.wav"


def create_ui():
    blocks, role_dict = load_data()

    # 角色列表
    roles = list(role_dict.keys())

    with gr.Blocks() as demo:
        gr.Markdown("## 音频生成与播放系统")

        # 添加音频控制组件
        with gr.Row():
            audio_output = gr.Audio(label="音频输出", type="filepath", sources=["upload", "microphone"], elem_id="audio_output", autoplay=True)
            start_time = gr.Slider(minimum=0, maximum=10, step=0.1, label="开始时间(秒)")
            end_time = gr.Slider(minimum=0, maximum=10, step=0.1, label="结束时间(秒)")

        # 添加控制按钮
        with gr.Row():
            generate_btn = gr.Button("生成音频")
            play_btn = gr.Button("播放音频")
            crop_btn = gr.Button("剪裁音频")

        # 新增角色选择行
        with gr.Row():
            role_checkboxes = gr.CheckboxGroup(
                choices=list(role_dict.keys()),
                label="选择角色",
                info="多选可联动下方文本列表"
            )
            use_moji = gr.Checkbox(
                label="使用Moji表情",
                value=True,
                info="控制是否在生成音频时使用Moji表情",
                elem_id="use_moji_checkbox",
                elem_classes="custom-checkbox",
                container=False,
                scale=1,
                min_width=100
            )
        with gr.Row():
            # 创建多选列表时增加可交互配置
            selected_items = gr.CheckboxGroup(
                choices=[f"[{'✓' if os.path.exists(f'{audio_folder}/a{i}.wav') else '×'}] [{i}] {block['text'][:12]}..." if len(block['text']) > 12 else f"[{'✓' if os.path.exists(f'{audio_folder}/a{i}.wav') else '×'}] [{i}] {block['text']}" for i, block in enumerate(blocks)],
                label="选择要处理的文本",
                interactive=True,
                info="可多选，点击右侧播放按钮试听。✓表示音频已存在，×表示音频未生成"
            )

        

        # 新增角色选择联动逻辑
        def update_selected_texts(selected_roles):
            selected_texts = []
            for role in selected_roles:
                indices = [item['index'] for item in role_dict[role]]
                selected_texts.extend([f"[{'✓' if os.path.exists(f'{audio_folder}/a{i}.wav') else '×'}] [{i}] {blocks[i]['text'][:12]}..." if len(blocks[i]['text']) > 12 else f"[{'✓' if os.path.exists(f'{audio_folder}/a{i}.wav') else '×'}] [{i}] {blocks[i]['text']}" for i in indices])
            return selected_texts

        # 生成音频处理函数
        def handle_generate(selected, use_moji_enabled):
            if not selected:
                return None
            # 从选中项中提取索引并生成音频
            results = []            
            for item in selected:
                match = re.search(r'\[(\d+)\]', item)
                if match:
                    index = int(match.group(1))
                    client = Client(config['gradio_url'])
                    block = blocks[index]
                    result = my_audio_client.geneAudio(block, index, client, use_moji=use_moji_enabled)
                    if result is not None:  # 只添加成功生成的音频结果
                        results.append(f"{audio_folder}/a{result}.wav")
            # 返回最后一个生成的音频
            return results[-1] if results else None

        # 播放音频处理函数
        def handle_play(selected):
            if not selected:
                return None, None
            # 从选中项中提取索引并获取音频文件
            audio_files = []
            max_duration = 0
            indices = []
            # 先提取所有索引
            for item in selected:
                match = re.search(r'\[(\d+)\]', item)
                if match:
                    indices.append(int(match.group(1)))
            # 对索引进行排序
            indices.sort()
            # 按照排序后的索引处理音频文件
            for index in indices:
                audio_file = f"{audio_folder}/a{index}.wav"
                if os.path.exists(audio_file):
                    audio_files.append(audio_file)
                    duration = util.get_audio_duration(audio_file)
                    max_duration = max(max_duration, duration)
            
            if not audio_files:
                return None, None
                
            # 如果只有一个文件，直接返回
            if len(audio_files) == 1:
                return audio_files[0], gr.update(maximum=max_duration, value=max_duration)
                
            # 如果有多个文件，使用pydub合并
            from pydub import AudioSegment
            combined = AudioSegment.from_wav(audio_files[0])
            for audio_file in audio_files[1:]:
                audio = AudioSegment.from_wav(audio_file)
                combined += audio
                
            # 保存合并后的文件
            temp_file = f"{audio_folder}/temp_combined.wav"
            combined.export(temp_file, format="wav")
            return temp_file, gr.update(maximum=max_duration, value=max_duration)

        # 剪裁音频处理函数
        def handle_crop(selected, start, end):
            if not selected or start >= end:
                return None
            # 从选中项中提取索引
            for item in selected:
                index = int(item.split(']')[0][1:])
                audio_file = f"{audio_folder}/a{index}.wav"
                if os.path.exists(audio_file):
                    audio = AudioFileClip(audio_file)
                    cropped = audio.subclipped(start, end)
                    cropped.write_audiofile(audio_file)
                    audio.close()
                    cropped.close()
                    return audio_file
            return None

        role_checkboxes.change(
            update_selected_texts,
            inputs=role_checkboxes,
            outputs=selected_items
        )

        # 绑定按钮事件
        generate_btn.click(
            handle_generate,
            inputs=[selected_items, use_moji],
            outputs=[audio_output]
        )

        play_btn.click(
            handle_play,
            inputs=[selected_items],
            outputs=[audio_output, end_time]
        )

        crop_btn.click(
            handle_crop,
            inputs=[selected_items, start_time, end_time],
            outputs=[audio_output]
        )
    
    return demo

if __name__ == "__main__":
    demo = create_ui()
    demo.launch(allowed_paths=[audio_folder],server_port=7830,show_error=True)