#!/usr/bin/env python3
"""
Transformer音频生成器
从音频数据生成新的音频内容
支持多种音频格式和特征提取
"""

import torch
import torch.nn as nn
import torch.nn.functional as F
import math
import numpy as np
from models.model import TransformerModel

try:
    import librosa
    LIBROSA_AVAILABLE = True
except ImportError:
    LIBROSA_AVAILABLE = False

try:
    from scipy import signal
    SCIPY_AVAILABLE = True
except ImportError:
    SCIPY_AVAILABLE = False

class AudioGenerator:
    def __init__(self, sample_rate=22050, segment_length=1024, hop_length=256, 
                 n_mels=80, embedding_dim=512, nhead=8, num_layers=6, max_segments=512):
        """
        音频生成器 - 从原始音频数据生成新音频
        
        Args:
            sample_rate: 采样率
            segment_length: 音频段长度
            hop_length: 跳跃长度
            n_mels: Mel频谱图的频带数
            embedding_dim: 嵌入维度
            nhead: 注意力头数
            num_layers: Transformer层数
            max_segments: 最大音频段数量
        """
        self.sample_rate = sample_rate
        self.segment_length = segment_length
        self.hop_length = hop_length
        self.n_mels = n_mels
        self.max_segments = max_segments
        
        # 音频特征提取
        if LIBROSA_AVAILABLE:
            # 使用librosa的梅尔滤波器组
            self.mel_transform = librosa.filters.mel(sr=sample_rate, n_fft=segment_length, n_mels=n_mels)
            # (n_mels, segment_length)
            self.mel_transform = torch.tensor(self.mel_transform, dtype=torch.float32)
        else:
            # 使用自定义Mel变换：线性变换 + ReLU
            self.mel_transform = nn.Sequential(
                nn.Linear(segment_length // 2 + 1, n_mels),
                nn.ReLU(),
                nn.LayerNorm(n_mels)
            )
            # 初始化Mel变换参数
            nn.init.xavier_uniform_(self.mel_transform[0].weight)
            nn.init.zeros_(self.mel_transform[0].bias)
        
        # 音频段嵌入
        self.segment_embedding = nn.Linear(n_mels, embedding_dim)
        
        # 位置编码
        self.positional_encoding = self._create_positional_encoding(max_segments, embedding_dim)
        
        # Transformer模型
        self.model = TransformerModel(
            input_dim=embedding_dim,
            output_dim=n_mels,  # 输出Mel频谱特征
            nhead=nhead,
            num_layers=num_layers
        )

    def _create_positional_encoding(self, max_segments, d_model):
        """创建音频段位置编码"""
        position = torch.arange(0, max_segments).unsqueeze(1)
        div_term = torch.exp(torch.arange(0, d_model, 2) * -(math.log(10000.0) / d_model))
        
        pos_encoding = torch.zeros(max_segments, d_model)
        pos_encoding[:, 0::2] = torch.sin(position * div_term)
        pos_encoding[:, 1::2] = torch.cos(position * div_term)
        
        return pos_encoding

    def _audio_to_mel_spectrogram(self, audio_tensor):
        """将音频转换为Mel频谱图序列"""
        # audio_tensor: (batch_size, audio_length)
        batch_size, audio_length = audio_tensor.shape
        
        # 检查音频长度是否足够进行STFT
        if audio_length < self.segment_length:
            # 对于短音频，使用零填充到最小长度
            padding_length = self.segment_length - audio_length
            audio_tensor = F.pad(audio_tensor, (0, padding_length), mode='constant', value=0)
            audio_length = self.segment_length
        
        # 计算STFT
        stft = torch.stft(audio_tensor, n_fft=self.segment_length, hop_length=self.hop_length,
                         window=torch.hann_window(self.segment_length).to(audio_tensor.device),
                         return_complex=True)
        
        # 计算幅度谱
        magnitude = torch.abs(stft)  # (batch_size, freq_bins, time_frames)
        
        # 转换为Mel频谱
        if LIBROSA_AVAILABLE:
            # 使用librosa的梅尔滤波器组进行矩阵乘法
            magnitude_permuted = magnitude.permute(0, 2, 1)  # (batch_size, time_frames, freq_bins)
            mel_spec = torch.matmul(magnitude_permuted, self.mel_transform.T)  # (batch_size, time_frames, n_mels)
        else:
            # 使用神经网络进行Mel变换
            mel_spec = self.mel_transform(magnitude.permute(0, 2, 1))  # (batch_size, time_frames, n_mels)
        
        return mel_spec

    def _mel_spectrogram_to_audio(self, mel_spec):
        """将Mel频谱图转换回音频（简化版本）"""
        # mel_spec: (batch_size, time_frames, n_mels)
        batch_size, time_frames, n_mels = mel_spec.shape
        
        # 这里简化处理，实际应该使用Griffin-Lim算法或声码器
        # 使用逆Mel变换（简化版本）
        reconstructed_stft_real = torch.randn(batch_size, self.segment_length // 2 + 1, time_frames)
        reconstructed_stft_imag = torch.randn(batch_size, self.segment_length // 2 + 1, time_frames)
        
        # 将实数张量转换为复数张量（torch.istft需要复数输入）
        reconstructed_stft = torch.complex(reconstructed_stft_real, reconstructed_stft_imag)
        
        # 使用ISTFT重建音频
        audio = torch.istft(reconstructed_stft, n_fft=self.segment_length, hop_length=self.hop_length,
                           window=torch.hann_window(self.segment_length).to(mel_spec.device),
                           return_complex=False)
        # audio: (batch_size, audio_length)
        
        return audio

    def preprocess_audio(self, audio_input):
        """预处理输入音频"""
        if isinstance(audio_input, str):
            # 从文件路径加载
            if not LIBROSA_AVAILABLE:
                raise ImportError("Librosa is required for audio file processing. Install with: pip install librosa")
            
            audio, sr = librosa.load(audio_input, sr=self.sample_rate)
            # audio：(audio_length,) 其中 audio_length = sr * 音频时长
            audio_tensor = torch.tensor(audio, dtype=torch.float32).unsqueeze(0)
        elif isinstance(audio_input, np.ndarray):
            # 从numpy数组加载
            audio_tensor = torch.tensor(audio_input, dtype=torch.float32)
            if audio_tensor.dim() == 1:
                audio_tensor = audio_tensor.unsqueeze(0)
        elif isinstance(audio_input, torch.Tensor):
            # 已经是tensor
            audio_tensor = audio_input
            if audio_tensor.dim() == 1:
                audio_tensor = audio_tensor.unsqueeze(0)
        else:
            raise ValueError("Unsupported audio input type")
        
        # 标准化音频
        audio_tensor = audio_tensor / torch.max(torch.abs(audio_tensor))
        
        return audio_tensor

    def generate(self, input_audio, is_noise=False, num_steps=50, temperature=1.0):
        """
        从输入音频生成新音频
        
        Args:
            input_audio: 输入音频 (文件路径、numpy数组或tensor)
            num_steps: 生成步数
            temperature: 温度参数控制随机性
            
        Returns:
            generated_audio: 生成的音频tensor
        """
        if is_noise:
            # 直接使用噪声嵌入
            embeddings = self.segment_embedding(input_audio)
            batch_size, time_frames, n_mels = input_audio.shape
            embeddings = embeddings + self.positional_encoding[:time_frames].unsqueeze(0)
        else:
            # 预处理输入音频
            audio_tensor = self.preprocess_audio(input_audio)
        
            # 转换为Mel频谱图
            mel_spec = self._audio_to_mel_spectrogram(audio_tensor)
            batch_size, time_frames, n_mels = mel_spec.shape
        
            # 嵌入音频段
            embeddings = self.segment_embedding(mel_spec)
            embeddings = embeddings + self.positional_encoding[:time_frames].unsqueeze(0)
        
        with torch.no_grad():
            # 自回归生成
            if is_noise:
                current_mel = input_audio.clone()
            else:
                current_mel = mel_spec.clone()
            
            for step in range(num_steps):
                # Transformer前向传播
                output_logits = self.model(embeddings)
                
                # 应用温度缩放
                if temperature != 1.0:
                    output_logits = output_logits / temperature
                
                # 直接预测最后一个音频段
                # 确保数值稳定性：处理静音音频等特殊情况
                next_frame_logits = output_logits[:, -1, :]
                next_frame_logits = next_frame_logits.unsqueeze(1)
                # (batch_size, 1, n_mels)
                
                # 检查并处理非法值
                if torch.isnan(next_frame_logits).any() or torch.isinf(next_frame_logits).any():
                    # 如果包含非法值，使用均匀分布
                    next_frame_logits_normalized = torch.ones_like(next_frame_logits) / n_mels
                else:
                    # 限制logits范围，避免softmax溢出
                    next_frame_logits_clamped = torch.clamp(next_frame_logits, min=-50, max=50)
                    next_frame_logits_clamped_min = next_frame_logits_clamped.min()
                    next_frame_logits_clamped_max = next_frame_logits_clamped.max()
                    next_frame_logits_normalized = (next_frame_logits_clamped - next_frame_logits_clamped_min) / (next_frame_logits_clamped_max - next_frame_logits_clamped_min)
                
                # 更新嵌入(只更新最后一个音频段)
                embeddings = self.segment_embedding(next_frame_logits_normalized)
                embeddings = embeddings + self.positional_encoding[:1].unsqueeze(0)
        
            # 添加到当前序列（自回归生成）
            current_mel = torch.cat([current_mel, next_frame_logits_normalized], dim=1)
                
        # 转换回音频
        generated_audio = self._mel_spectrogram_to_audio(current_mel)
        
        return generated_audio

    def generate_continuation(self, input_audio, continuation_length=5.0, num_steps=20, temperature=1.0):
        """生成音频续写"""
        # 计算需要生成的帧数
        target_frames = int(continuation_length * self.sample_rate / self.hop_length)
        
        # 预处理输入音频
        audio_tensor = self.preprocess_audio(input_audio)
        
        # 转换为Mel频谱图
        mel_spec = self._audio_to_mel_spectrogram(audio_tensor)
        batch_size, time_frames, n_mels = mel_spec.shape
        
        # 嵌入音频段
        embeddings = self.segment_embedding(mel_spec)
        embeddings = embeddings + self.positional_encoding[:time_frames].unsqueeze(0)
        
        with torch.no_grad():
            current_mel = mel_spec.clone()
            
            for frame_idx in range(target_frames):
                # 添加位置编码（使用当前序列的实际长度）
                current_seq_length = time_frames + frame_idx
                current_embeddings = embeddings + self.positional_encoding[:current_seq_length].unsqueeze(0)

                # Transformer前向传播
                output_logits = self.model(current_embeddings)

                # 应用温度缩放
                if temperature != 1.0:
                    output_logits = output_logits / temperature

                # 获取最后一个帧的预测
                next_frame_logits = output_logits[:, -1, :]

                # 直接随机抽样下一个帧
                next_frame_probs = F.softmax(next_frame_logits, dim=-1)
                next_frame_idx = torch.multinomial(next_frame_probs, 1)  # (batch_size, 1)
                
                # 将索引转换为one-hot编码的完整频谱帧
                next_frame_onehot = F.one_hot(next_frame_idx, num_classes=n_mels).float()  # (batch_size, 1, n_mels)

                # 添加到当前序列
                current_mel = torch.cat([current_mel, next_frame_onehot], dim=1)

                # 更新嵌入
                new_embedding = self.segment_embedding(next_frame_onehot)
                embeddings = torch.cat([embeddings, new_embedding], dim=1)
        
        # 转换回音频
        generated_audio = self._mel_spectrogram_to_audio(current_mel)
        
        return generated_audio

    def generate_from_texture(self, duration=10.0):
        """从音频纹理生成音频"""
        # 生成随机噪声作为起始纹理
        num_frames = int(duration * self.sample_rate / self.hop_length)
        noise_mel = torch.randn(1, num_frames, self.n_mels)
        
        return self.generate(noise_mel, is_noise=True, num_steps=30)