import torch
import os
import glob
import math
import numpy as np
import soundfile as sf
from typing import Any, List, Dict, Tuple
from silero_vad import load_silero_vad, get_speech_timestamps
import librosa
from pydub import AudioSegment

class AudioTranscribe:
    """
    音频转写工具类
    Silero VAD 语音活动检测、音频切片、分片转写功能
    """
    
    def __init__(self, sample_rate: int = 16000, device: str = 'cpu'):
        self.sample_rate = sample_rate
        self.device = device
        
        # 加载模型
        self.model = load_silero_vad()
        self.model = self.model.to(self.device)
        
        # 设置线程数优化性能
        torch.set_num_threads(1)

    def read_audio_alternative(self, audio_path: str):
        """
        读取音频方法
        """
        try:
            # 使用 soundfile 读取音频
            audio_data, sr = sf.read(audio_path)
            
            # 转换为单声道
            if len(audio_data.shape) > 1:
                audio_data = np.mean(audio_data, axis=1)
            
            # 转换为 torch tensor
            wav = torch.from_numpy(audio_data).float()
            
            return wav, sr
            
        except Exception as e:
            print(f"音频读取失败: {e}")
            return None, None

    def detect_speech(self, audio_path: str, threshold: float = 0.5) -> List[Dict]:
        """
        检测语音活动
        """
        try:
            # 读取音频文件
            wav, sr = self.read_audio_alternative(audio_path)
            if wav is None:
                return []
            
            # 如果采样率不是16kHz，需要重采样
            if sr != self.sample_rate:
                print(f"重采样从 {sr}Hz 到 {self.sample_rate}Hz")
                wav = self.resample_audio(wav.numpy(), sr, self.sample_rate)
                wav = torch.from_numpy(wav).float()
            
            # 获取语音时间戳
            speech_timestamps = get_speech_timestamps(
                wav, self.model, 
                threshold=threshold, 
                sampling_rate=self.sample_rate
            )
            
            # 转换为秒
            speech_segments = []
            for ts in speech_timestamps:
                speech_segments.append({
                    'start': ts['start'] / self.sample_rate,
                    'end': ts['end'] / self.sample_rate
                })
            
            print(f"Silero VAD 检测到 {len(speech_segments)} 个语音段")
            return speech_segments
            
        except Exception as e:
            print(f"Silero VAD 检测失败: {e}")
            return []

    def resample_audio(self, audio_data: np.ndarray, orig_sr: int, target_sr: int) -> np.ndarray:
        """
        重采样音频
        """
        try:
            # 使用 librosa 进行重采样
            resampled_audio = librosa.resample(
                audio_data, 
                orig_sr=orig_sr, 
                target_sr=target_sr
            )
            return resampled_audio
        except Exception as e:
            print(f"重采样失败: {e}")
            return audio_data

    def find_silence_gaps(self, 
                         speech_segments: List[Dict], 
                         total_duration: float, 
                         min_silence_duration: float = 1.0) -> List[Dict]:
        """
        从语音段中找出静音间隙
        """
        silence_gaps = []

        # 开始前的静音
        if speech_segments and speech_segments[0]['start'] >= min_silence_duration:
            silence_gaps.append({
                'start': 0.0,
                'end': speech_segments[0]['start'],
                'duration': speech_segments[0]['start']
            })

        # 语音段之间的静音
        for i in range(len(speech_segments) - 1):
            gap_start = speech_segments[i]['end']
            gap_end = speech_segments[i + 1]['start']
            gap_duration = gap_end - gap_start

            if gap_duration >= min_silence_duration:
                silence_gaps.append({
                    'start': gap_start,
                    'end': gap_end,
                    'duration': gap_duration
                })

        # 结束后的静音
        if speech_segments and (total_duration - speech_segments[-1]['end']) >= min_silence_duration:
            silence_gaps.append({
                'start': speech_segments[-1]['end'],
                'end': total_duration,
                'duration': total_duration - speech_segments[-1]['end']
            })

        # 按时长降序排列
        silence_gaps.sort(key=lambda x: x['duration'], reverse=True)
        return silence_gaps

    def calculate_intelligent_chunks(self, 
                                   speech_segments: List[Dict], 
                                   total_duration: float, 
                                   chunk_duration: float = 300.0,
                                   min_silence_duration: float = 0.2) -> List[Tuple[float, float]]:
        """
        计算智能切片边界
        """
        print(f"计算智能切片，总时长: {total_duration:.2f}s, 目标切片: {chunk_duration:.2f}s")
        
        # 如果没有语音段，使用固定间隔
        if not speech_segments:
            print("没有检测到语音段，使用固定间隔切片")
            return self._calculate_fixed_chunks(total_duration, chunk_duration)
        
        # 计算静音间隙
        silence_gaps = self.find_silence_gaps(speech_segments, total_duration, min_silence_duration)
        print(f"找到 {len(silence_gaps)} 个静音间隙")
        
        boundaries = []
        current_start = 0.0
        max_iterations = 50  # 安全限制
        
        for iteration in range(max_iterations):
            if current_start >= total_duration - 1.0:  # 剩余不足1秒就结束
                break
                
            target_end = current_start + chunk_duration
            
            # 如果剩余时长小于chunk_duration的1.2倍，直接取到最后
            remaining_duration = total_duration - current_start
            if remaining_duration <= chunk_duration * 1.2:
                # 只有当剩余时长大于0时才添加切片
                if remaining_duration > 1.0:
                    boundaries.append((current_start, total_duration))
                    print(f"剩余时长 {remaining_duration:.2f}s 较短，直接切片到结束")
                break
            
            # 寻找最佳切割点
            best_cut = self._find_best_cut_point(current_start, target_end, silence_gaps, total_duration, chunk_duration)
            
            # 确保切片长度合理
            segment_length = best_cut - current_start
            min_segment_length = chunk_duration * 0.5  # 最小切片长度
            max_segment_length = chunk_duration * 1.5  # 最大切片长度
            
            if segment_length < min_segment_length:
                # 切片太短，尝试延长到最小长度
                potential_end = current_start + min_segment_length
                if potential_end <= total_duration:
                    best_cut = potential_end
                    print(f"切片过短 ({segment_length:.2f}s)，延长到 {best_cut:.2f}s")
                else:
                    best_cut = total_duration
            elif segment_length > max_segment_length:
                # 切片太长，缩短到最大长度
                best_cut = current_start + max_segment_length
                print(f"切片过长 ({segment_length:.2f}s)，缩短到 {best_cut:.2f}s")
            
            boundaries.append((current_start, best_cut))
            print(f"切片 {len(boundaries)}: {current_start:.2f}s -> {best_cut:.2f}s (长度: {best_cut-current_start:.2f}s)")
            current_start = best_cut
        
        print(f"最终生成 {len(boundaries)} 个切片")
        return boundaries

    def _find_best_cut_point(self, current_start: float, target_end: float, 
                            silence_gaps: List[Dict], total_duration: float,
                            chunk_duration: float) -> float:
        """
        寻找最佳切割点
        """
        # 搜索范围
        search_range = chunk_duration * 0.4  # 目标点前后40%的范围
        min_search = max(current_start + chunk_duration * 0.6, target_end - search_range)
        max_search = min(total_duration, target_end + search_range)
        
        candidate_gaps = []
        
        for gap in silence_gaps:
            gap_mid = (gap['start'] + gap['end']) / 2
            # 静音间隙必须在当前开始时间之后且在搜索范围内
            if (gap['start'] > current_start and 
                min_search <= gap_mid <= max_search):
                # 计算分数：距离目标点越近越好，静音间隙越长越好
                distance_score = 1.0 / (1.0 + abs(gap_mid - target_end) / 10.0)
                length_score = min(gap['duration'] / 5.0, 1.0)  # 归一化到0-1
                total_score = distance_score * 0.7 + length_score * 0.3
                candidate_gaps.append((gap, total_score, gap_mid))
        
        if candidate_gaps:
            # 选择分数最高的静音间隙
            best_gap = max(candidate_gaps, key=lambda x: x[1])
            best_cut = best_gap[2]
            print(f"找到最佳切割点: {best_cut:.2f}s (原目标: {target_end:.2f}s, 分数: {best_gap[1]:.3f})")
            return best_cut
        
        # 如果没有找到合适的静音点，使用目标结束点
        print(f"未找到合适静音点，使用目标时间: {target_end:.2f}s")
        return target_end

    def _calculate_fixed_chunks(self, total_duration: float, chunk_duration: float) -> List[Tuple[float, float]]:
        """
        计算固定间隔的切片边界
        """
        num_chunks = math.ceil(total_duration / chunk_duration)
        boundaries = []
        for i in range(num_chunks):
            start = i * chunk_duration
            end = min((i + 1) * chunk_duration, total_duration)
            boundaries.append((start, end))
        return boundaries

    def get_audio_duration(self, file_path):
        """
        获取音频时长
        """
        try:
            data, samplerate = sf.read(file_path)
            duration = len(data) / samplerate
            return duration
        except Exception as e:
            print(f"获取音频时长错误: {e}")
            return None

    def create_audio_chunk(self, audio_path: str, start_time: float, end_time: float, output_path: str) -> bool:
        """
        创建音频切片
        """
        try:
            # 使用 pydub 进行精确的音频切片
            audio = AudioSegment.from_file(audio_path)
            
            # 转换为毫秒
            start_ms = int(start_time * 1000)
            end_ms = int(end_time * 1000)
            
            # 确保时间在有效范围内
            if start_ms >= len(audio):
                print(f"开始时间 {start_time}s 超出音频长度")
                return False
            
            end_ms = min(end_ms, len(audio))
            
            # 提取音频片段
            chunk = audio[start_ms:end_ms]
            
            # 确保输出目录存在
            os.makedirs(os.path.dirname(output_path), exist_ok=True)
            
            # 导出为wav文件
            chunk.export(output_path, format="wav")
            
            # 验证切片是否成功创建
            if os.path.exists(output_path) and os.path.getsize(output_path) > 0:
                #chunk_duration = len(chunk) / 1000.0
                #print(f"成功创建切片: {output_path} (时长: {chunk_duration:.2f}s)")
                return True
            else:
                print(f"切片创建失败: {output_path}")
                return False
                
        except Exception as e:
            print(f"创建音频切片失败: {e}")
            return False

    def convert_to_16k_mono(self, input_path: str, output_path: str) -> bool:
        """
        转换为16kHz单声道音频
        """
        try:
            audio = AudioSegment.from_file(input_path)
            audio = audio.set_frame_rate(16000).set_channels(1)
            audio.export(output_path, format="wav")
            return True
        except Exception as e:
            print(f"音频转换失败: {e}")
            return False

    def create_intelligent_chunks_silero(self, target_wav_path: str, total_duration: float, 
                                        task_id: str, 
                                        workdir: str, 
                                        chunk_minutes: int = 5,
                                        min_silence_duration: float = 0.2) -> List[str]:
        """
        使用 Silero VAD 进行智能音频切片
        """
        chunk_duration = chunk_minutes * 60
        
        # 如果总时长小于分片时长，直接返回原文件
        if total_duration <= chunk_duration:
            print(f"音频总时长 {total_duration:.2f}s 小于切片时长 {chunk_duration}s，不进行切片")
            return [target_wav_path]
        
        print(f"[{task_id}] 使用 Silero VAD 进行语音活动检测...")
        
        # 确保工作目录存在
        os.makedirs(workdir, exist_ok=True)
        
        # 检测语音
        speech_segments = self.detect_speech(target_wav_path)
        
        if not speech_segments:
            print("Silero VAD 未检测到语音活动，使用固定间隔切片")
            return self.create_fixed_chunks(target_wav_path, total_duration, task_id, workdir, chunk_duration)
        
        # 计算智能切片边界
        boundaries = self.calculate_intelligent_chunks(speech_segments, total_duration, chunk_duration, min_silence_duration)
        
        if not boundaries:
            print("未计算出切片边界，使用固定间隔切片")
            return self.create_fixed_chunks(target_wav_path, total_duration, task_id, workdir, chunk_duration)
        
        # 创建切片
        chunk_paths = []
        
        for i, (start, end) in enumerate(boundaries):
            chunk_path = os.path.join(workdir, f"{task_id}_chunk_{i:03d}.wav")
            if self.create_audio_chunk(target_wav_path, start, end, chunk_path):
                chunk_paths.append(chunk_path)
                print(f"[{task_id}] 创建智能切片 {i}: {start:.2f}s -> {end:.2f}s (长度: {end-start:.2f}s)")
            else:
                print(f"[{task_id}] 创建切片失败: {start:.2f}s -> {end:.2f}s")
        
        return chunk_paths

    def create_fixed_chunks(self, target_wav_path: str, total_duration: float, task_id: str, 
                           workdir: str, chunk_duration: float) -> List[str]:
        """
        创建固定间隔的音频切片（备选方案）
        """
        num_chunks = math.ceil(total_duration / chunk_duration)
        chunk_paths = []
        
        for i in range(num_chunks):
            start_time = i * chunk_duration
            end_time = min((i + 1) * chunk_duration, total_duration)
            
            chunk_path = os.path.join(workdir, f"{task_id}_chunk_{i:03d}.wav")
            success = self.create_audio_chunk(target_wav_path, start_time, end_time, chunk_path)
            
            if success:
                chunk_paths.append(chunk_path)
                print(f"[{task_id}] 创建固定切片 {i}: {start_time:.2f}s -> {end_time:.2f}s")
            else:
                print(f"[{task_id}] 创建固定切片失败: {start_time:.2f}s -> {end_time:.2f}s")
        
        return chunk_paths
    
    def transcribe_chunk_paths(self, asr_model: Any, chunk_paths: List[str], task_id: str) -> List[Dict]:
        """
        对多个音频切片进行转录，并返回结果
        """
        all_segments = []
        all_words = []
        cumulative_time_offset = 0.0

        for i, chunk_path in enumerate(chunk_paths):
            print(f"[{task_id}] 正在转录切片 {i+1}/{len(chunk_paths)}...")
            
            # 对当前切片进行转录
            output = asr_model.transcribe([chunk_path], timestamps=True)
            
            if output and output[0].timestamp:
                # 修正并收集 segment 时间戳
                if 'segment' in output[0].timestamp:
                    for seg in output[0].timestamp['segment']:
                        seg['start'] += cumulative_time_offset
                        seg['end'] += cumulative_time_offset
                        all_segments.append(seg)
                
                # 修正并收集 word 时间戳
                if 'word' in output[0].timestamp:
                    for word in output[0].timestamp['word']:
                        word['start'] += cumulative_time_offset
                        word['end'] += cumulative_time_offset
                        all_words.append(word)

            # 更新下一个切片的时间偏移量
            chunk_actual_duration = self.get_audio_duration(chunk_path)
            cumulative_time_offset += chunk_actual_duration
        return all_segments
    
    def clean_temp_chunks(self, task_id, temp_dir="temp"):
        """
        清理 temp 目录下的分片文件
        
        参数:
            temp_dir (str): 临时目录路径，默认为"temp"
            task_id (str): 任务ID，如果为None则清理所有任务的分片文件
        """
        try:
            print(f"[{task_id}] 开始清理临时文件")

            # 确保目录存在
            if not os.path.exists(temp_dir):
                print(f"目录 {temp_dir} 不存在")
                return
            
            # 构建匹配模式
            if task_id:
                pattern = os.path.join(temp_dir, f"{task_id}_chunk_*.wav")
            else:
                pattern = os.path.join(temp_dir, "*_chunk_*.wav")
            
            # 查找匹配的文件
            chunk_files = glob.glob(pattern)
            
            if not chunk_files:
                print(f"在 {temp_dir} 中未找到匹配的分片文件")
                return
            
            # 删除文件
            deleted_count = 0
            for file_path in chunk_files:
                try:
                    os.remove(file_path)
                    # print(f"已删除: {file_path}")
                    deleted_count += 1
                except Exception as e:
                    print(f"删除文件 {file_path} 时出错: {e}")
            
            print(f"[{task_id}] 清理完成，共删除 {deleted_count} 个文件")
            
        except Exception as e:
            print(f"清理过程中发生错误: {e}")
    
    def format_srt_time(self, seconds: float) -> str:
        """
        将秒数转换为 SRT 字幕时间格式 (HH:MM:SS,mmm)
        
        参数:
            seconds: 秒数，可以是小数表示毫秒
        
        返回:
            SRT 格式的时间字符串，例如 "00:00:09,040"
        """
        # 计算小时、分钟、秒和毫秒
        hours = int(seconds // 3600)
        minutes = int((seconds % 3600) // 60)
        secs = int(seconds % 60)
        milliseconds = int(round((seconds - int(seconds)) * 1000))
        
        # 处理毫秒进位情况
        if milliseconds >= 1000:
            milliseconds -= 1000
            secs += 1
            if secs >= 60:
                secs = 0
                minutes += 1
                if minutes >= 60:
                    minutes = 0
                    hours += 1
        
        # 格式化为两位数或三位数，不足补零
        return f"{hours:02d}:{minutes:02d}:{secs:02d},{milliseconds:03d}"
    
    def segments_to_srt(self, segments: list) -> str:
        """
        将 NeMo 的分段时间戳转换为 SRT 格式字符串
        """
        srt_content = []
        for i, segment in enumerate(segments):
            start_time = self.format_srt_time(segment['start'])
            end_time = self.format_srt_time(segment['end'])
            text = segment['segment'].strip()
            
            if text: # 仅添加有内容的字幕
                srt_content.append(str(i + 1))
                srt_content.append(f"{start_time} --> {end_time}")
                srt_content.append(text)
                srt_content.append("") # 空行分隔
                
        return "\n".join(srt_content)