
import torch
import numpy as np
import librosa
import noisereduce as nr
from transformers import HubertModel, Wav2Vec2FeatureExtractor
import whisper
from pathlib import Path
import warnings
warnings.filterwarnings('ignore')





class FrenchTranscriptionConfig:


    SAMPLE_RATE = 16000
    ENABLE_NOISE_REDUCTION = True


    HUBERT_MODEL_NAME = "facebook/hubert-base-ls960"
    FEATURE_DIM = 256


    WHISPER_MODEL_SIZE = "small"
    WHISPER_LANGUAGE = "french"
    BEAM_SIZE = 5


    WHISPER_MEL_BINS = 80
    WHISPER_MAX_FRAMES = 3000

    def __init__(self):

        self.device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
        print(f"✓ 使用设备: {self.device}")


config = FrenchTranscriptionConfig()





class ModelLoader:

    def __init__(self):
        self._hubert_model = None
        self._feature_extractor = None
        self._whisper_model = None

    @property
    def hubert_model(self):
        if self._hubert_model is None:
            print("📥 正在加载 HuBERT 模型（首次运行会下载，约370MB）...")
            self._hubert_model = HubertModel.from_pretrained(
                config.HUBERT_MODEL_NAME
            ).to(config.device)
            self._hubert_model.eval()
            print("✓ HuBERT 模型加载完成")
        return self._hubert_model

    @property
    def feature_extractor(self):
        if self._feature_extractor is None:
            print("📥 正在加载特征提取器...")
            self._feature_extractor = Wav2Vec2FeatureExtractor.from_pretrained(
                config.HUBERT_MODEL_NAME
            )
            print("✓ 特征提取器加载完成")
        return self._feature_extractor

    @property
    def whisper_model(self):
        if self._whisper_model is None:
            print(f"📥 正在加载 Whisper {config.WHISPER_MODEL_SIZE} 模型...")
            self._whisper_model = whisper.load_model(
                config.WHISPER_MODEL_SIZE,
                device=config.device
            )
            print("✓ Whisper 模型加载完成")
        return self._whisper_model


models = ModelLoader()





def load_and_preprocess_audio(audio_path, enable_noise_reduction=None):

    audio_path = Path(audio_path)
    if not audio_path.exists():
        raise FileNotFoundError(f"音频文件不存在: {audio_path}")

    print(f"📂 正在加载音频: {audio_path.name}")


    waveform, sr = librosa.load(
        str(audio_path),
        sr=config.SAMPLE_RATE,
        mono=True
    )

    duration = len(waveform) / config.SAMPLE_RATE
    print(f"✓ 音频加载成功: {duration:.2f} 秒, {config.SAMPLE_RATE} Hz")


    if enable_noise_reduction is None:
        enable_noise_reduction = config.ENABLE_NOISE_REDUCTION

    if enable_noise_reduction:
        print("🔧 正在降噪...")
        waveform = nr.reduce_noise(
            y=waveform,
            sr=config.SAMPLE_RATE,
            stationary=True,
            prop_decrease=1.0
        )
        print("✓ 降噪完成")


    max_val = np.abs(waveform).max()
    if max_val > 0:
        waveform = waveform / max_val * 0.95

    return waveform





def extract_hubert_features(waveform):
    print("🧠 正在提取 HuBERT 特征...")


    inputs = models.feature_extractor(
        waveform,
        sampling_rate=config.SAMPLE_RATE,
        return_tensors="pt",
        padding=True,
        truncation=True,
        max_length=480000,
        return_attention_mask=True
    )


    inputs = {k: v.to(config.device) for k, v in inputs.items()}


    with torch.no_grad():
        outputs = models.hubert_model(**inputs)
        hidden_states = outputs.last_hidden_state


    batch_size, time_steps, hidden_dim = hidden_states.shape
    features_reshaped = hidden_states.reshape(batch_size, time_steps, 3, 256)
    features_256d = features_reshaped.mean(dim=2)


    features = features_256d.squeeze(0).cpu().numpy()

    print(f"✓ HuBERT 特征提取完成: 形状 {features.shape}")
    return features





def adapt_features_to_whisper(features_256d):
    print("🔄 正在适配特征到 Whisper 格式...")

    time_steps, feature_dim = features_256d.shape


    group_size = feature_dim // config.WHISPER_MEL_BINS

    grouped = features_256d[:, :group_size * config.WHISPER_MEL_BINS].reshape(
        time_steps, config.WHISPER_MEL_BINS, group_size
    )
    features_80d = grouped.mean(axis=2)


    features_transposed = features_80d.T


    current_frames = features_transposed.shape[1]

    if current_frames < config.WHISPER_MAX_FRAMES:
        pad_width = ((0, 0), (0, config.WHISPER_MAX_FRAMES - current_frames))
        adapted_features = np.pad(
            features_transposed,
            pad_width,
            mode='constant',
            constant_values=0
        )
        print(f"  ⚠️ 音频较短（{current_frames}帧），已填充到{config.WHISPER_MAX_FRAMES}帧")
    else:
        adapted_features = features_transposed[:, :config.WHISPER_MAX_FRAMES]
        print(f"  ⚠️ 音频较长（{current_frames}帧），已截断到{config.WHISPER_MAX_FRAMES}帧")


    adapted_features = adapted_features.astype(np.float32)

    print(f"✓ 特征适配完成: 形状 {adapted_features.shape}")
    return adapted_features





def transcribe_to_french(adapted_features):
    print("📝 正在使用 Whisper 转录...")


    input_tensor = torch.from_numpy(adapted_features).unsqueeze(0).to(config.device)


    options = whisper.DecodingOptions(
        language=config.WHISPER_LANGUAGE,
        beam_size=config.BEAM_SIZE,
        fp16=torch.cuda.is_available(),
        without_timestamps=True
    )


    result = models.whisper_model.decode(input_tensor, options)
    transcription = result[0].text.strip()

    print(f"✓ 转录完成: {len(transcription)} 字符")
    return transcription





def french_transcription_pipeline(
    audio_path,
    model_size=None,
    enable_noise_reduction=None,
    return_details=False
):

    original_model_size = config.WHISPER_MODEL_SIZE
    if model_size:
        config.WHISPER_MODEL_SIZE = model_size

        models._whisper_model = None
    print("\n" + "="*60)
    print("🎯 增强法语转录流程开始")
    print("="*60)

    try:

        waveform = load_and_preprocess_audio(audio_path, enable_noise_reduction)
        duration = len(waveform) / config.SAMPLE_RATE


        print("📝 正在使用 Whisper 转录（基于降噪后的音频）...")
        print(f"📥 正在加载 Whisper {config.WHISPER_MODEL_SIZE} 模型...")


        import tempfile
        import soundfile as sf

        with tempfile.NamedTemporaryFile(suffix='.wav', delete=False) as tmp_file:
            tmp_path = tmp_file.name
            sf.write(tmp_path, waveform, config.SAMPLE_RATE)

        try:

            result = models.whisper_model.transcribe(
                tmp_path,
                language='fr',
                verbose=False
            )
            transcription = result['text'].strip()
        finally:

            import os
            if os.path.exists(tmp_path):
                os.remove(tmp_path)

        print(f"✓ Whisper 转录完成: {len(transcription)} 字符")

        print("\n" + "="*60)
        print("✅ 增强转录完成！")
        print("="*60)

        if return_details:
            return {
                'text': transcription,
                'waveform': waveform,
                'duration': duration,
                'device': str(config.device),
                'model_size': config.WHISPER_MODEL_SIZE,
                'noise_reduced': enable_noise_reduction if enable_noise_reduction is not None else config.ENABLE_NOISE_REDUCTION
            }
        else:
            return transcription

    except Exception as e:
        print(f"\n❌ 转录失败: {type(e).__name__}: {e}")
        raise

    finally:

        if model_size:
            config.WHISPER_MODEL_SIZE = original_model_size





if __name__ == "__main__":
    print("""
    ╔══════════════════════════════════════════════════════════════╗
    ║          法语高精度转录系统 v1.0                              ║
    ║          HuBERT + Whisper 方案                                ║
    ╚══════════════════════════════════════════════════════════════╝
    """)


    test_audio = "../音频仓库/测试mp3（French）.mp3"

    print(f"📌 测试文件: {test_audio}\n")

    try:

        result = french_transcription_pipeline(test_audio)


        print("\n" + "🎉 "*20)
        print(f"【转录结果】\n{result}")
        print("🎉 "*20)

    except FileNotFoundError:
        print(f"❌ 测试文件不存在: {test_audio}")
        print("请将法语音频文件放在音频仓库，或修改 test_audio 变量的路径")
    except Exception as e:
        print(f"❌ 发生错误: {e}")
