import torch
from pyannote.audio import Pipeline
import logging
import tempfile
import os
import soundfile as sf
from tqdm import tqdm
import streamlit as st
from pathlib import Path
import dotenv
import traceback

# 加载环境变量
dotenv.load_dotenv()

def setup_logging():
    """配置日志"""
    log_dir = Path("data/logs")
    log_dir.mkdir(parents=True, exist_ok=True)
    
    logging.basicConfig(
        level=logging.INFO,
        format='%(asctime)s - %(levelname)s - %(message)s',
        handlers=[
            logging.StreamHandler(),
            logging.FileHandler(log_dir / 'audio_processing.log')
        ]
    )

@st.cache_resource
def load_pipeline():
    """加载并缓存 pipeline"""
    try:
        # 设置 torch hub 缓存目录
        torch.hub.set_dir(str(Path.home() / '.cache' / 'torch' / 'hub'))
        
        # 初始化 pipeline
        pipeline = Pipeline.from_pretrained(
            "pyannote/speaker-diarization-3.1",
            use_auth_token=os.getenv("HF_TOKEN"),
            cache_dir=str(Path.home() / '.cache' / 'huggingface')
        )
        
        # 设置设备
        device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
        pipeline = pipeline.to(device)
        
        if device.type == "cuda":
            logging.info("使用 GPU 进行处理")
        else:
            logging.info("使用 CPU 进行处理")
            
        return pipeline
    except Exception as e:
        logging.error(f"加载模型失败: {str(e)}\n{traceback.format_exc()}")
        st.error(f"加载模型失败: {str(e)}")
        return None

def process_audio(audio_file, pipeline):
    """处理音频文件并返回说话人分段结果"""
    try:
        if audio_file is None:
            st.warning("请上传音频文件")
            return None, None, None, None, None
            
        # 显示文件信息
        file_size = audio_file.size / (1024 * 1024)  # 转换为 MB
        st.sidebar.info(f"文件大小: {file_size:.2f} MB")
        
        # 保存上传的文件
        progress_bar = st.progress(0)
        status_text = st.empty()
        
        try:
            with tempfile.NamedTemporaryFile(delete=False, suffix='.wav') as tmp_file:
                # 增加分块大小到 10MB，以提高大文件处理效率
                CHUNK_SIZE = 10 * 1024 * 1024  # 10MB
                total_chunks = audio_file.size / CHUNK_SIZE
                chunks_processed = 0
                
                audio_file.seek(0)
                for chunk in tqdm(iter(lambda: audio_file.read(CHUNK_SIZE), b''),
                                total=int(total_chunks),
                                desc="上传文件"):
                    tmp_file.write(chunk)
                    chunks_processed += 1
                    progress = chunks_processed / total_chunks
                    progress_bar.progress(min(progress, 1.0))
                    status_text.text(f"上传进度: {progress * 100:.1f}%")
                
                audio_path = tmp_file.name
                
            status_text.text("正在处理音频...")
            
            # 加载音频数据
            audio_data, sr = sf.read(audio_path)
            
            # 进行说话人分离
            with st.spinner('分析中...'):
                try:
                    diarization = pipeline(audio_path)
                    if diarization is None:
                        raise ValueError("音频处理失败")
                except Exception as e:
                    st.error(f"音频处理失败: {str(e)}")
                    logging.error(f"音频处理失败: {str(e)}")
                    return None, None, None, None, None
            
            # 处理结果
            segments = []
            total_duration = 0
            
            # 使用新的API处理分段
            for segment, track, speaker in diarization.itertracks(yield_label=True):
                duration = segment.end - segment.start
                total_duration = max(total_duration, segment.end)
                
                segments.append({
                    'start': f"{segment.start:.2f}",
                    'end': f"{segment.end:.2f}",
                    'speaker': speaker,
                    'duration': f"{duration:.2f}"
                })
                logging.info(f"检测到片段: {segment.start:.2f}s - {segment.end:.2f}s ({speaker})")
            
            if not segments:
                st.warning("未检测到有效的说话人片段")
                return None, None, None, None, None
            
            # 计算每个说话人的总时长
            speaker_durations = {}
            for segment in segments:
                speaker = segment['speaker']
                duration = float(segment['duration'])
                speaker_durations[speaker] = speaker_durations.get(speaker, 0) + duration
            
            return total_duration, speaker_durations, segments, audio_data, sr
            
        finally:
            # 清理临时文件
            try:
                if 'audio_path' in locals():
                    os.unlink(audio_path)
            except Exception as e:
                logging.warning(f"清理临时文件失败: {str(e)}")
            
            # 清理进度显示
            progress_bar.empty()
            status_text.empty()
        
    except Exception as e:
        error_msg = f"处理出错: {str(e)}"
        logging.error(f"{error_msg}\n{traceback.format_exc()}")
        st.error(error_msg)
        return None, None, None, None, None

def merge_continuous_segments(segments):
    """合并连续的相同说话人片段"""
    # 原有的 merge_continuous_segments 函数代码...
    pass

def extract_speaker_audio(audio_data, sr, start_time, end_time):
    """提取指定时间段的音频"""
    # 原有的 extract_speaker_audio 函数代码...
    pass

def format_time(seconds):
    """将秒数转换为 HH:MM:SS 格式"""
    # 原有的 format_time 函数代码...
    pass 