import time
import warnings
import os
import sys
import torch
import gc

# 忽略numpy警告
warnings.filterwarnings('ignore', category=RuntimeWarning)
warnings.filterwarnings('ignore', category=UserWarning)

# 设置环境变量 - 优化内存管理
os.environ['TOKENIZERS_PARALLELISM'] = 'false'
os.environ['PYTORCH_CUDA_ALLOC_CONF'] = 'expandable_segments:True'

try:
    from transformers import SeamlessM4TModel, AutoProcessor, SeamlessM4TTokenizer
    import torch
    import soundfile as sf
    import librosa
    import numpy as np
    from torch.cuda.amp import autocast
except ImportError as e:
    print(f"导入错误: {e}")
    print("请安装必需的库:")
    print("pip install transformers torch soundfile librosa")
    sys.exit(1)


class SeamlessHFTranslator:
    def __init__(self, model_name=""):
        """使用Hugging Face的SeamlessM4T - 优化显存使用"""
        print(f"开始加载模型: {model_name}")

        try:
            # 首先检查模型是否存在
            from transformers import AutoConfig
            config = AutoConfig.from_pretrained(model_name)
            print("模型配置验证成功")

            # 检查设备和显存
            self.device = "cuda" if torch.cuda.is_available() else "cpu"
            print(f"使用设备: {self.device}")

            if self.device == "cuda":
                self._print_gpu_memory("初始GPU状态")
                # 清理显存
                self._clear_gpu_memory()

            # 加载处理器 - 解决方案：显式使用SeamlessM4TTokenizer
            print("正在加载处理器...")
            # 先加载特征提取器
            self.feature_extractor = AutoProcessor.from_pretrained(model_name).feature_extractor
            # 显式使用SeamlessM4TTokenizer
            self.tokenizer = SeamlessM4TTokenizer.from_pretrained(model_name)
            print("处理器加载成功")

            # 根据设备和内存情况选择加载方式
            print("正在加载模型...")
            if self.device == "cuda":
                # GPU环境，使用更激进的内存优化策略
                self.model = SeamlessM4TModel.from_pretrained(
                    model_name,
                    torch_dtype=torch.float16,  # 使用半精度
                    device_map="auto",
                    low_cpu_mem_usage=True,
                    use_safetensors=True,  # 更安全的加载方式
                    # # 进一步优化选项
                    # load_in_8bit=False,  # 如果显存极小可设为True
                    # max_memory={0: "3GB"}  # 限制GPU 0的最大使用内存
                )

                # 启用梯度检查点以节省内存
                if hasattr(self.model, 'gradient_checkpointing_enable'):
                    self.model.gradient_checkpointing_enable()

                self._print_gpu_memory("模型加载后")

            else:
                # CPU环境
                print("警告: 使用CPU运行，速度会较慢")
                self.model = SeamlessM4TModel.from_pretrained(
                    model_name,
                    torch_dtype=torch.float32,
                    low_cpu_mem_usage=True
                )
                self.model.to(self.device)

            # 设置为评估模式
            self.model.eval()
            print("模型加载成功!")

        except Exception as e:
            print(f"模型加载失败: {e}")
            print("\n可能的解决方案:")
            print("1. 检查网络连接")
            print("2. 尝试使用代理或VPN")
            print("3. 手动下载模型文件")
            print("4. 使用其他模型版本")
            print("5. 释放其他程序占用的GPU显存")
            raise

    def _clear_gpu_memory(self):
        """清理GPU内存"""
        if self.device == "cuda":
            torch.cuda.empty_cache()
            gc.collect()
            print("GPU内存已清理")

    def _print_gpu_memory(self, stage=""):
        """打印GPU内存使用情况"""
        if self.device == "cuda":
            allocated = torch.cuda.memory_allocated(0) / 1024 ** 3
            reserved = torch.cuda.memory_reserved(0) / 1024 ** 3
            total = torch.cuda.get_device_properties(0).total_memory / 1024 ** 3
            print(f"{stage} - GPU显存: 已分配 {allocated:.2f}GB, 已保留 {reserved:.2f}GB, 总计 {total:.2f}GB")

    def speech_to_text(self, audio_file, src_lang="cmn", tgt_lang="eng", max_duration=20):
        """语音转文本翻译 - 优化内存使用"""
        try:
            print(f"正在处理音频文件: {audio_file}")
            self._clear_gpu_memory()  # 处理前清理内存

            # 检查文件是否存在
            if not os.path.exists(audio_file):
                raise FileNotFoundError(f"音频文件不存在: {audio_file}")

            # 使用librosa读取音频文件 - 限制长度以节省内存
            try:
                # 直接加载为16kHz单声道，限制最大时长
                audio, sample_rate = librosa.load(
                    audio_file,
                    sr=16000,
                    mono=True,
                    duration=max_duration  # 限制最大时长
                )
                print(f"音频加载成功，采样率: {sample_rate}, 时长: {len(audio) / sample_rate:.2f}秒")
            except Exception as e:
                print(f"音频加载失败: {e}")
                return None

            # 分段处理长音频
            segment_length = 10  # 每段10秒
            if len(audio) / sample_rate > segment_length:
                print(f"音频较长，将分段处理（每段{segment_length}秒）")
                return self._process_long_audio(audio, sample_rate, src_lang, tgt_lang, segment_length)

            # 处理单段音频
            return self._process_audio_segment(audio, sample_rate, src_lang, tgt_lang)

        except Exception as e:
            print(f"语音转文本失败: {e}")
            import traceback
            print(f"详细错误信息: {traceback.format_exc()}")
            return None
        finally:
            self._clear_gpu_memory()  # 处理后清理内存

    def _process_long_audio(self, audio, sample_rate, src_lang, tgt_lang, segment_length):
        """分段处理长音频"""
        results = []
        segment_samples = segment_length * sample_rate

        for i in range(0, len(audio), segment_samples):
            segment = audio[i:i + segment_samples]
            print(f"处理音频段 {i // segment_samples + 1}")

            result = self._process_audio_segment(segment, sample_rate, src_lang, tgt_lang)
            if result:
                results.append(result)

            # 每段处理后清理内存
            self._clear_gpu_memory()

        return " ".join(results) if results else None

    # 修改音频处理方法，使用新加载的处理器组件
    def _process_audio_segment(self, audio, sample_rate, src_lang, tgt_lang):
        """处理单个音频段"""
        try:
            # 处理音频输入
            print("正在处理音频输入...")
            # 使用特征提取器处理音频
            audio_inputs = self.feature_extractor(
                audios=audio,
                sampling_rate=sample_rate,
                return_tensors="pt"
            )

            # 将输入移动到正确的设备
            audio_inputs = {k: v.to(self.device) for k, v in audio_inputs.items()}

            self._print_gpu_memory("音频处理后")

            print("正在生成翻译...")
            # 使用混合精度和优化的生成参数
            with torch.no_grad():
                if self.device == "cuda":
                    with autocast():  # 混合精度推理
                        generated_outputs = self.model.generate(
                            **audio_inputs,
                            tgt_lang=tgt_lang,
                            generate_speech=False,
                            max_new_tokens=64,  # 减少最大token数
                            do_sample=False,
                            num_beams=1,  # 使用贪婪搜索
                            pad_token_id=self.tokenizer.pad_token_id,
                            eos_token_id=self.tokenizer.eos_token_id,
                            use_cache=False,  # 禁用缓存以节省内存
                        )
                else:
                    generated_outputs = self.model.generate(
                        **audio_inputs,
                        tgt_lang=tgt_lang,
                        generate_speech=False,
                        max_new_tokens=64,
                        do_sample=False,
                        num_beams=1,
                        pad_token_id=self.tokenizer.pad_token_id,
                        eos_token_id=self.tokenizer.eos_token_id,
                        use_cache=False,
                    )

            self._print_gpu_memory("生成后")

            # 解码文本
            return self._decode_output(generated_outputs)

        except torch.cuda.OutOfMemoryError as e:
            print(f"GPU显存不足: {e}")
            print("尝试使用更小的音频段或切换到CPU模式")
            self._clear_gpu_memory()
            return None
        except Exception as e:
            print(f"处理音频段失败: {e}")
            return None

    # 修改输出解码方法
    def _decode_output(self, generated_outputs):
        """优化的输出解码"""
        try:
            # 处理不同类型的输出
            if hasattr(generated_outputs, 'sequences'):
                sequences = generated_outputs.sequences
                tokens_to_decode = sequences[0]
            elif isinstance(generated_outputs, torch.Tensor):
                tokens_to_decode = generated_outputs[0] if generated_outputs.dim() == 2 else generated_outputs
            else:
                print(f"未知的输出格式: {type(generated_outputs)}")
                return "解码失败：未知输出格式"

            # 确保在CPU上
            if tokens_to_decode.is_cuda:
                tokens_to_decode = tokens_to_decode.cpu()

            # 使用tokenizer解码
            translated_text = self.tokenizer.decode(
                tokens_to_decode,
                skip_special_tokens=True
            )

            return translated_text.strip()

        except Exception as e:
            print(f"解码失败: {e}")
            return f"解码失败: {str(e)}"

    # 修改文本转语音方法
    def text_to_speech(self, text, src_lang="cmn", tgt_lang="eng", output_file="output.wav"):
        """文本转语音翻译 - 优化内存使用"""
        try:
            print(f"正在处理文本: {text}")
            self._clear_gpu_memory()

            # 限制文本长度
            max_length = 100  # 进一步减少文本长度
            if len(text) > max_length:
                print(f"文本太长，截取前{max_length}字符")
                text = text[:max_length]

            # 使用tokenizer处理文本输入
            text_inputs = self.tokenizer(
                text=text,
                src_lang=src_lang,
                return_tensors="pt"
            )

            # 将输入移动到正确的设备
            text_inputs = {k: v.to(self.device) for k, v in text_inputs.items()}

            print("正在生成语音...")
            # 生成语音
            with torch.no_grad():
                if self.device == "cuda":
                    with autocast():
                        audio_array_from_text = self.model.generate(
                            **text_inputs,
                            tgt_lang=tgt_lang,
                            generate_speech=True,
                            use_cache=False
                        )[0].cpu().numpy().squeeze()
                else:
                    audio_array_from_text = self.model.generate(
                        **text_inputs,
                        tgt_lang=tgt_lang,
                        generate_speech=True,
                        use_cache=False
                    )[0].cpu().numpy().squeeze()

            # 保存音频
            sf.write(output_file, audio_array_from_text, 16000)
            print(f"语音文件已保存: {output_file}")
            return output_file

        except torch.cuda.OutOfMemoryError as e:
            print(f"GPU显存不足: {e}")
            self._clear_gpu_memory()
            return None
        except Exception as e:
            print(f"文本转语音失败: {e}")
            import traceback
            print(f"详细错误信息: {traceback.format_exc()}")
            return None
        finally:
            self._clear_gpu_memory()

    def __del__(self):
        """析构函数，清理资源"""
        if hasattr(self, 'device') and self.device == "cuda":
            self._clear_gpu_memory()


def main():
    try:
        # 音频文件路径
        audio_file = "E:\\Files\\music\\juhao.mp3"

        # 检查文件是否存在
        if not os.path.exists(audio_file):
            print(f"音频文件不存在: {audio_file}")
            print("请确认文件路径正确，或将文件复制到当前目录")
            return

        # 首先测试模型可用性
        print("=== 检查模型可用性 ===")
        available_model = test_model_availability()

        if not available_model:
            print("没有可用的模型，请检查网络连接或尝试手动下载模型")
            return

        # 初始化翻译器
        print("\n=== 初始化翻译器 ===")
        translator = SeamlessHFTranslator(available_model)

        print("\n=== 开始语音转文本翻译 ===")
        # 使用更短的音频段来避免内存问题
        result = translator.speech_to_text(
            audio_file,
            src_lang="cmn",
            tgt_lang="cmn",
            max_duration=15  # 最大15秒
        )

        if result:
            print(f"\n翻译结果: {result}")
        else:
            print("翻译失败")

        # 显示最终内存状态
        if torch.cuda.is_available():
            translator._print_gpu_memory("程序结束")

    except Exception as e:
        print(f"程序运行失败: {e}")
        import traceback
        print(f"详细错误信息: {traceback.format_exc()}")
    finally:
        # 最终清理
        if torch.cuda.is_available():
            torch.cuda.empty_cache()
            gc.collect()


def test_model_availability():
    """测试模型是否可用"""
    model_names = [
        "facebook/hf-seamless-m4t-medium"
    ]

    for model_name in model_names:
        try:
            from transformers import AutoConfig
            config = AutoConfig.from_pretrained(model_name)
            print(f"✓ {model_name} 可用")
            return model_name
        except Exception as e:
            print(f"✗ {model_name} 不可用: {e}")

    return None


if __name__ == "__main__":
    main()
