import torch
from pyannote.audio import Pipeline
import streamlit as st
import logging
import traceback
from datetime import timedelta
import tempfile
import os
import warnings
from tqdm import tqdm
import plotly.figure_factory as ff
import pandas as pd
import soundfile as sf
import io
from datetime import datetime

# 配置日志
logging.basicConfig(
    level=logging.INFO,
    format='%(asctime)s - %(levelname)s - %(message)s',
    handlers=[
        logging.StreamHandler(),
        logging.FileHandler('audio_processing.log')
    ]
)

# 忽略警告
warnings.filterwarnings("ignore", category=UserWarning)

# 设置页面配置
st.set_page_config(
    page_title="音频说话人分段分析",
    page_icon="🎤",
    layout="wide",
    initial_sidebar_state="expanded"
)
def format_time(seconds):
    """将秒数转换为 HH:MM:SS 格式"""
    hours = int(seconds // 3600)
    minutes = int((seconds % 3600) // 60)
    seconds = int(seconds % 60)
    return f"{hours:02d}:{minutes:02d}:{seconds:02d}"
def create_visualization(segments, speaker_durations, total_duration):
    """创建可视化图表"""
    # 创建数据框
    df = pd.DataFrame([
        {
            'Task': f"{seg['speaker']}",  # 只显示说话人ID
            'Start': float(seg['start']),
            'Finish': float(seg['end']),
            'Resource': seg['speaker']
        }
        for seg in segments
    ])

    # 创建 Gantt 图
    fig = ff.create_gantt(
        df,
        colors=dict(zip(
            speaker_durations.keys(),
            ['#FF9900', '#00CC96', '#AB63FA', '#FFA15A', '#19D3F3'][:len(speaker_durations)]
        )),
        index_col='Resource',
        show_colorbar=True,
        group_tasks=True,
        showgrid_x=True,
        showgrid_y=True,
        height=400,
        title='说话人时间分布'
    )

    # 更新布局和时间轴
    fig.update_layout(
        xaxis=dict(
            title='时间',
            range=[0, total_duration],  # 设置时间轴范围
            tickformat='%H:%M:%S',  # 设置时间格式
            ticktext=[format_time(t) for t in range(0, int(total_duration) + 1, 30)],  # 每30秒一个刻度
            tickvals=list(range(0, int(total_duration) + 1, 30)),
        ),
        yaxis_title='说话人',
        font=dict(size=12),
        showlegend=True,
        hovermode='x'
    )

    # 添加悬停信息
    for trace in fig.data:
        trace.hovertemplate = '说话人: %{text}<br>开始: %{base}<br>结束: %{x}<extra></extra>'
        trace.text = [f"{task}" for task in df['Task']]

    return fig, df
def merge_continuous_segments(segments):
    """合并连续的相同说话人片段"""
    if not segments:
        return []
        
    merged = []
    current = segments[0].copy()
    
    for next_seg in segments[1:]:
        if (next_seg['speaker'] == current['speaker'] and 
            float(next_seg['start']) - float(current['end']) <= 1.5):  # 0.5秒内的间隔视为连续
            # 更新结束时间和持续时间
            current['end'] = next_seg['end']
            current['duration'] = f"{float(current['end']) - float(current['start']):.2f}"
        else:
            merged.append(current)
            current = next_seg.copy()
    
    merged.append(current)
    return merged

def extract_speaker_audio(audio_data, sr, start_time, end_time):
    """提取指定时间段的音频"""
    start_idx = int(float(start_time) * sr)
    end_idx = int(float(end_time) * sr)
    return audio_data[start_idx:end_idx]

def create_audio_player(audio_data, sr, start_time, end_time):
    """创建音频播放器"""
    # 提取音频片段
    audio_segment = extract_speaker_audio(audio_data, sr, start_time, end_time)
    
    # 将音频数据转换为字节流
    audio_bytes = io.BytesIO()
    sf.write(audio_bytes, audio_segment, sr, format='wav')
    audio_bytes.seek(0)
    
    return st.audio(audio_bytes, format='audio/wav')
def display_results(total_duration, speaker_durations, segments, audio_data, sr):
    """显示分析结果"""
     # 合并连续片段
    merged_segments = merge_continuous_segments(segments)
    # 显示基本信息
    col1, col2 = st.columns(2)
    with col1:
        st.info(f"总时长: {format_time(total_duration)}")
    with col2:
        st.info(f"说话人数量: {len(speaker_durations)}")
    
     # 显示说话人统计
    st.subheader("说话人统计")
    for speaker, duration in speaker_durations.items():
        percentage = (duration / total_duration) * 100
        
        # 为每个说话人创建一个展开部分
        with st.expander(f"说话人 {speaker} - {format_time(duration)} ({percentage:.1f}%)"):
            # 找到该说话人的第一段音频
            first_segment = next(
                (seg for seg in merged_segments if seg['speaker'] == speaker),
                None
            )
            if first_segment:
                st.write(f"首次出现时间: {format_time(float(first_segment['start']))}")
                st.write(f"片段时长: {format_time(float(first_segment['duration']))}")
                create_audio_player(
                    audio_data, 
                    sr,
                    first_segment['start'],
                    first_segment['end']
                )
    
    # 显示时间轴
    st.subheader("时间轴")
    fig, df = create_visualization(segments, speaker_durations, total_duration)
    st.plotly_chart(fig, use_container_width=True)

    # 显示详细信息表格
    st.subheader("详细片段信息")
    detail_df = pd.DataFrame([
        {
            '说话人': seg['speaker'],
            '开始时间': format_time(float(seg['start'])),
            '结束时间': format_time(float(seg['end'])),
            '持续时间': format_time(float(seg['duration']))
        }
        for seg in merged_segments
    ])
    st.dataframe(
        detail_df.style.format({
            '开始时间': lambda x: x,
            '结束时间': lambda x: x,
            '持续时间': lambda x: x
        }),
        use_container_width=True
    )

    # # 导出选项
    # if st.button("导出分析结果"):
    #     # 创建导出数据
    #     export_df = pd.DataFrame([
    #         {
    #             'Speaker': seg['speaker'],
    #             'Start_Time': format_time(float(seg['start'])),
    #             'End_Time': format_time(float(seg['end'])),
    #             'Duration': format_time(float(seg['duration']))
    #         }
    #         for seg in merged_segments
    #     ])
        
    #     # 生成CSV内容
    #     csv = export_df.to_csv(index=False)
        
    #     # 生成下载按钮
    #     current_time = datetime.now().strftime("%Y%m%d_%H%M%S")
    #     filename = f"speaker_segments_{current_time}.csv"
        
    #     st.download_button(
    #         label="下载CSV文件",
    #         data=csv,
    #         file_name=filename,
    #         mime="text/csv",
    #     )
@st.cache_resource
def load_pipeline():
    """加载并缓存 pipeline"""
    try:
        pipeline = Pipeline.from_pretrained(
            "pyannote/speaker-diarization-3.1",
            use_auth_token="YOUR_HF_TOKEN"  # 替换为你的 HuggingFace token
        )
        
        if torch.cuda.is_available():
            pipeline = pipeline.to(torch.device("cuda"))
            logging.info("使用 GPU 进行处理")
        else:
            logging.info("使用 CPU 进行处理")
            
        return pipeline
    except Exception as e:
        logging.error(f"加载模型失败: {str(e)}")
        st.error(f"加载模型失败: {str(e)}")
        return None

def process_audio(audio_file, pipeline):
    """处理音频文件并返回说话人分段结果"""
    try:
        if audio_file is None:
            st.warning("请上传音频文件")
            return None, None, None
            
        # 显示文件信息
        file_size = audio_file.size / (1024 * 1024)
        st.sidebar.info(f"文件大小: {file_size:.2f} MB")
        
        # 保存上传的文件
        progress_bar = st.progress(0)
        status_text = st.empty()
        
        try:
            with tempfile.NamedTemporaryFile(delete=False, suffix='.wav') as tmp_file:
                # 分块写入大文件
                CHUNK_SIZE = 5 * 1024 * 1024
                total_chunks = audio_file.size / CHUNK_SIZE
                chunks_processed = 0
                
                audio_file.seek(0)
                for chunk in tqdm(iter(lambda: audio_file.read(CHUNK_SIZE), b''),
                                total=int(total_chunks),
                                desc="上传文件"):
                    tmp_file.write(chunk)
                    chunks_processed += 1
                    progress = chunks_processed / total_chunks
                    progress_bar.progress(min(progress, 1.0))
                    status_text.text(f"上传进度: {progress * 100:.1f}%")
                
                audio_path = tmp_file.name
                
            status_text.text("正在处理音频...")
            
            # 加载音频数据
            audio_data, sr = sf.read(audio_path)
            # 进行说话人分离
            with st.spinner('分析中...'):
                try:
                    diarization = pipeline(audio_path)
                    if diarization is None:
                        raise ValueError("音频处理失败")
                except Exception as e:
                    st.error(f"音频处理失败: {str(e)}")
                    logging.error(f"音频处理失败: {str(e)}")
                    return None, None, None
            
            # 处理结果
            segments = []
            total_duration = 0
            
            # 使用新的API处理分段
            for segment, track, speaker in diarization.itertracks(yield_label=True):
                duration = segment.end - segment.start
                total_duration = max(total_duration, segment.end)
                
                segments.append({
                    'start': f"{segment.start:.2f}",
                    'end': f"{segment.end:.2f}",
                    'speaker': speaker,
                    'duration': f"{duration:.2f}"
                })
                logging.info(f"检测到片段: {segment.start:.2f}s - {segment.end:.2f}s ({speaker})")
            
            if not segments:
                st.warning("未检测到有效的说话人片段")
                return None, None, None
            
            # 计算每个说话人的总时长
            speaker_durations = {}
            for segment in segments:
                speaker = segment['speaker']
                duration = float(segment['duration'])
                speaker_durations[speaker] = speaker_durations.get(speaker, 0) + duration
            
        finally:
            # 清理临时文件
            try:
                if 'audio_path' in locals():
                    os.unlink(audio_path)
            except Exception as e:
                logging.warning(f"清理临时文件失败: {str(e)}")
            
            # 清理进度显示
            progress_bar.empty()
            status_text.empty()
        
        return total_duration, speaker_durations, segments, audio_data, sr
        
    except Exception as e:
        error_msg = f"处理出错: {str(e)}"
        logging.error(f"{error_msg}\n{traceback.format_exc()}")
        st.error(error_msg)
        return None, None, None, None, None

def main():
    st.title("🎤 音频说话人分段分析")
    st.markdown("""
    上传音频文件进行说话人分段分析。支持大文件，可以识别多个说话人。
    """)
    
    # 侧边栏配置
    with st.sidebar:
        st.header("配置")
        min_duration = st.slider(
            "最小片段时长(秒)",
            min_value=0.5,
            max_value=5.0,
            value=1.0,
            step=0.5
        )
        show_memory = st.checkbox("显示内存使用")
        if show_memory:
            import psutil
            process = psutil.Process()
            memory_info = process.memory_info()
            st.metric("内存使用", f"{memory_info.rss / (1024 * 1024):.1f} MB")
    
    # 加载模型
    pipeline = load_pipeline()
    if pipeline is None:
        st.error("模型加载失败，请检查配置和网络连接")
        return
    
    # 文件上传
    audio_file = st.file_uploader(
        "上传音频文件",
        type=['wav', 'mp3'],
        help="支持 WAV 和 MP3 格式，文件大小限制 4GB"
    )
    
    if audio_file is not None:
        try:
            total_duration, speaker_durations, segments, audio_data, sr = process_audio(audio_file, pipeline)
            
            if all((total_duration, speaker_durations, segments, audio_data is not None, sr)):
                display_results(total_duration, speaker_durations, segments, audio_data, sr)
            else:
                st.warning("音频处理未返回有效结果，请检查音频文件或尝试重新上传")
        except Exception as e:
            st.error(f"处理过程出错: {str(e)}")
            logging.error(f"处理过程出错: {str(e)}\n{traceback.format_exc()}")

if __name__ == "__main__":
    main()