from dataclasses import dataclass
from typing import Any, Dict, List, Union
import torch
import os
os.environ["HF_ENDPOINT"] = "https://hf-mirror.com"
@dataclass
class TTSDataCollatorWithPadding:
    processor: Any
    '''
    我们希望将多个样本组合成一个批次（batch），因此需要定义一个自定义的数据整理器（custom data collator）。
    该整理器会使用填充符（padding tokens）对较短的序列进行填充，确保批次中所有样本具有相同的长度。
    对于频谱图标签（spectrogram labels），填充部分会被替换为特殊值 -100。这个特殊值会告诉模型在计算频谱图损失时忽略这些填充区域。
    将输入数据处理诚batch 批次处理
    输入：
    features = [
    {"input_ids": [...], "labels": [...], "speaker_embeddings": [...]},
    ...
    ]
    处理后的数据：
    batch = {
    "input_ids": (B, T_text),
    "labels": (B, T_spec, 80),
    "speaker_embeddings": (B, 512)
    }

    '''
    def __call__(self, features: List[Dict[str, Union[List[int], torch.Tensor]]]) -> Dict[str, torch.Tensor]:
        input_ids = [{"input_ids": feature["input_ids"]} for feature in features]
        label_features = [{"input_values": feature["labels"]} for feature in features]
        speaker_features = [feature["speaker_embeddings"] for feature in features]
        # collate the inputs and targets into a batch
        batch = processor.pad(input_ids=input_ids, labels=label_features, return_tensors="pt")
        # replace padding with -100 to ignore loss correctly  将数据进行空缺数值的填补 将不需要的数据进行忽略 用-100进行处理
        batch["labels"] = batch["labels"].masked_fill(batch.decoder_attention_mask.unsqueeze(-1).ne(1), -100)
        # not used during fine-tuning
        del batch["decoder_attention_mask"]
        # round down target lengths to multiple of reduction factor
        if model.config.reduction_factor > 1:
            target_lengths = torch.tensor([len(feature["input_values"]) for feature in label_features])
            target_lengths = target_lengths.new(
                [length - length % model.config.reduction_factor for length in target_lengths]
            )
            #向下取整 避免维度错位
            max_length = max(target_lengths)
            batch["labels"] = batch["labels"][:, :max_length]

        # also add in the speaker embeddings
        batch["speaker_embeddings"] = torch.tensor(speaker_features)

        return batch
from transformers import SpeechT5Processor
checkpoint="microsoft/speecht5_tts"
processor=SpeechT5Processor.from_pretrained(checkpoint)
data_collator=TTSDataCollatorWithPadding(processor=processor)
from transformers import SpeechT5ForTextToSpeech
model=SpeechT5ForTextToSpeech.from_pretrained(checkpoint)
# model.config.use_cache=False #use_cache=true与模型训练时的梯度检查点不兼容 在模型训练时需要进行禁用 不开启
