import time
import warnings
import os
import sys

# 忽略numpy警告
warnings.filterwarnings('ignore', category=RuntimeWarning)
warnings.filterwarnings('ignore', category=UserWarning)

# 设置环境变量
os.environ['TOKENIZERS_PARALLELISM'] = 'false'

try:
    from transformers import SeamlessM4TModel, AutoProcessor
    import torch
    import soundfile as sf
    import librosa
    import numpy as np
except ImportError as e:
    print(f"导入错误: {e}")
    print("请安装必需的库:")
    print("pip install transformers torch soundfile librosa")
    sys.exit(1)


class SeamlessHFTranslator:
    def __init__(self, model_name=""):
        """使用Hugging Face的SeamlessM4T"""
        print(f"开始加载模型: {model_name}")

        try:
            # 首先检查模型是否存在
            from transformers import AutoConfig
            config = AutoConfig.from_pretrained(model_name)
            print("模型配置验证成功")

            # 检查设备
            # device = "cpu"
            device = "cuda" if torch.cuda.is_available() else "cpu"
            print(f"使用设备: {device}")

            self.processor = AutoProcessor.from_pretrained(model_name)

            # 根据设备和内存情况选择数据类型
            if device == "cuda":
                # GPU环境，使用半精度以节省显存
                self.model = SeamlessM4TModel.from_pretrained(
                    model_name,
                    torch_dtype=torch.float16,
                    device_map="auto",
                    low_cpu_mem_usage=True
                )
            else:
                # CPU环境，使用全精度
                print("警告: 使用CPU运行，速度会较慢")
                self.model = SeamlessM4TModel.from_pretrained(
                    model_name,
                    torch_dtype=torch.float32,
                    low_cpu_mem_usage=True
                )
                self.model.to(device)

            print("模型加载成功!")

        except Exception as e:
            print(f"模型加载失败: {e}")
            print("\n可能的解决方案:")
            print("1. 检查网络连接")
            print("2. 尝试使用代理或VPN")
            print("3. 手动下载模型文件")
            print("4. 使用其他模型版本")
            raise

    def speech_to_text(self, audio_file, src_lang="cmn", tgt_lang="eng"):
        """语音转文本翻译"""
        try:
            print(f"正在处理音频文件: {audio_file}")

            # 检查文件是否存在
            if not os.path.exists(audio_file):
                raise FileNotFoundError(f"音频文件不存在: {audio_file}")

            # 使用librosa读取音频文件
            try:
                # 直接加载为16kHz单声道
                audio, sample_rate = librosa.load(audio_file, sr=16000, mono=True)
                print(f"音频加载成功，采样率: {sample_rate}, 时长: {len(audio) / sample_rate:.2f}秒")
            except Exception as e:
                print(f"音频加载失败: {e}")
                return None

            # 检查音频长度，太长的音频可能导致内存问题
            max_duration = 20  # 最大30秒
            if len(audio) / sample_rate > max_duration:
                print(f"音频太长，截取前{max_duration}秒")
                audio = audio[:max_duration * sample_rate]

            # 处理音频输入
            print("正在处理音频输入...")
            audio_inputs = self.processor(
                audios=audio,
                sampling_rate=sample_rate,
                return_tensors="pt"
            )

            # 将输入移动到正确的设备
            device = next(self.model.parameters()).device
            audio_inputs = {k: v.to(device) for k, v in audio_inputs.items()}

            print("正在生成翻译...")
            # 生成翻译 - 使用更具体的参数
            with torch.no_grad():
                # 对于语音到文本，我们需要指定任务类型
                generated_outputs = self.model.generate(
                    **audio_inputs,
                    tgt_lang=tgt_lang,
                    generate_speech=False,  # 只生成文本
                    max_new_tokens=128,
                    do_sample=False,  # 使用贪婪解码
                    num_beams=1,
                    pad_token_id=self.processor.tokenizer.pad_token_id,
                    eos_token_id=self.processor.tokenizer.eos_token_id,
                )

            print(f"生成输出类型: {type(generated_outputs)}")
            print(f"生成输出属性: {dir(generated_outputs)}")

            # 解码文本 - 处理GenerateEncoderDecoderOutput
            try:
                # 检查输出格式 - 处理GenerateEncoderDecoderOutput
                if hasattr(generated_outputs, 'sequences'):
                    # 这是标准的生成输出对象
                    print("检测到sequences属性")
                    sequences = generated_outputs.sequences
                    print(f"Sequences形状: {sequences.shape}")
                    print(f"Sequences类型: {type(sequences)}")

                    # 获取第一个序列
                    tokens_to_decode = sequences[0]

                    # 确保在CPU上并转换为列表
                    if tokens_to_decode.is_cuda:
                        tokens_to_decode = tokens_to_decode.cpu()

                    token_ids = tokens_to_decode.tolist()
                    print(f"准备解码的token数量: {len(token_ids)}")
                    print(f"前10个tokens: {token_ids[:10]}")

                    print(f"解码内容预览: {self.processor.tokenizer.convert_ids_to_tokens(token_ids[:20])}")

                    # 解码
                    translated_text = self.processor.tokenizer.decode(
                        token_ids,
                        skip_special_tokens=True
                    )

                elif isinstance(generated_outputs, torch.Tensor):
                    print("检测到张量输出")
                    if generated_outputs.dim() == 2:
                        # 标准的token序列 [batch_size, seq_len]
                        tokens_to_decode = generated_outputs[0]
                    else:
                        tokens_to_decode = generated_outputs

                    # 确保在CPU上并转换为列表
                    if tokens_to_decode.is_cuda:
                        tokens_to_decode = tokens_to_decode.cpu()

                    token_ids = tokens_to_decode.tolist()
                    print(f"准备解码的token数量: {len(token_ids)}")
                    print(f"前10个tokens: {token_ids[:10]}")

                    # 解码
                    translated_text = self.processor.tokenizer.decode(
                        token_ids,
                        skip_special_tokens=True
                    )

                elif isinstance(generated_outputs, (list, tuple)):
                    print("检测到列表/元组输出")
                    # 如果输出是列表或元组
                    if len(generated_outputs) > 0:
                        first_output = generated_outputs[0]
                        if hasattr(first_output, 'tolist'):
                            token_ids = first_output.tolist()
                        else:
                            token_ids = first_output

                        translated_text = self.processor.tokenizer.decode(
                            token_ids,
                            skip_special_tokens=True
                        )
                    else:
                        translated_text = "无输出"
                else:
                    print(f"未知的输出格式: {type(generated_outputs)}")
                    # 尝试直接访问可能的属性
                    if hasattr(generated_outputs, '__dict__'):
                        print(f"输出对象的字典: {generated_outputs.__dict__.keys()}")
                    return "解码失败：未知输出格式"

                print(f"解码成功，文本长度: {len(translated_text)}")
                return translated_text.strip()

            except Exception as decode_error:
                print(f"解码失败: {decode_error}")
                print(f"尝试使用batch_decode...")

                try:
                    # 尝试batch解码
                    if isinstance(generated_outputs, torch.Tensor):
                        translated_texts = self.processor.tokenizer.batch_decode(
                            generated_outputs,
                            skip_special_tokens=True
                        )
                        return translated_texts[0].strip() if translated_texts else "无翻译结果"
                    else:
                        return "批量解码失败"

                except Exception as batch_error:
                    print(f"批量解码也失败: {batch_error}")
                    return f"所有解码方法均失败: {decode_error}"

        except Exception as e:
            print(f"语音转文本失败: {e}")
            import traceback
            print(f"详细错误信息: {traceback.format_exc()}")
            return None

    def text_to_speech(self, text, src_lang="cmn", tgt_lang="eng", output_file="output.wav"):
        """文本转语音翻译"""
        try:
            print(f"正在处理文本: {text}")

            # 限制文本长度
            if len(text) > 200:
                print("文本太长，截取前200字符")
                text = text[:200]

            # 处理文本输入
            text_inputs = self.processor(
                text=text,
                src_lang=src_lang,
                return_tensors="pt"
            )

            # 将输入移动到正确的设备
            device = next(self.model.parameters()).device
            text_inputs = {k: v.to(device) for k, v in text_inputs.items()}

            print("正在生成语音...")
            # 生成语音
            with torch.no_grad():
                audio_array_from_text = self.model.generate(
                    **text_inputs,
                    tgt_lang=tgt_lang,
                    generate_speech=True
                )[0].cpu().numpy().squeeze()

            # 保存音频
            sf.write(output_file, audio_array_from_text, 16000)
            print(f"语音文件已保存: {output_file}")
            return output_file

        except Exception as e:
            print(f"文本转语音失败: {e}")
            import traceback
            print(f"详细错误信息: {traceback.format_exc()}")
            return None


def test_model_availability():
    """测试模型是否可用"""
    model_names = [
        "facebook/hf-seamless-m4t-medium"
    ]

    for model_name in model_names:
        try:
            from transformers import AutoConfig
            config = AutoConfig.from_pretrained(model_name)
            print(f"✓ {model_name} 可用")
            return model_name
        except Exception as e:
            print(f"✗ {model_name} 不可用: {e}")

    return None


def main():
    try:
        # 音频文件路径
        audio_file = "E:\\Files\\music\\juhao.mp3"

        # 检查文件是否存在
        if not os.path.exists(audio_file):
            print(f"音频文件不存在: {audio_file}")
            print("请确认文件路径正确，或将文件复制到当前目录")
            return

        # 首先测试模型可用性
        print("=== 检查模型可用性 ===")
        available_model = test_model_availability()

        if not available_model:
            print("没有可用的模型，请检查网络连接或尝试手动下载模型")
            return

        # 初始化翻译器
        print("\n=== 初始化翻译器 ===")
        translator = SeamlessHFTranslator(available_model)

        print("\n=== 开始语音转文本翻译 ===")
        result = translator.speech_to_text(audio_file, "eng", "cmn")

        if result:
            print(f"\n翻译结果: {result}")
        else:
            print("翻译失败")

    except Exception as e:
        print(f"程序运行失败: {e}")
        import traceback
        print(f"详细错误信息: {traceback.format_exc()}")


if __name__ == "__main__":
    main()
