import torch
from torch import nn
from dataset.dataloader import generate_loader, get_vocab
from backbones.speech_model import SpeechModel
from tqdm import tqdm
import torch.nn.functional as F

if __name__ == '__main__':
    vocab = get_vocab()
    vocab_size = len(vocab)
    device = torch.device("cuda")
    loader = generate_loader(batch_size=20)
    # 80：梅尔特征滤波器的数量
    model = SpeechModel(80, 100, 4, 1, vocab_size)
    model = model.to(device)

    # CTCLoss 语音识别/图文相关
    criterion = nn.CTCLoss(blank=0)
    optimizer = torch.optim.Adam(model.parameters(), lr=0.0001,weight_decay=0.01)

    epochs = 1000
    for epoch in range(epochs):
        loop = tqdm(loader)
        for mel_spec, token_idx, mel_spec_valid_lens, token_idx_valid_lens in loop:
            mel_spec = mel_spec.to(device)
            token_idx = token_idx.to(device)
            mel_spec_valid_lens = mel_spec_valid_lens.to(device)
            token_idx_valid_lens = token_idx_valid_lens.to(device)
            optimizer.zero_grad()
            predicts = model(mel_spec)
            predicts = F.log_softmax(predicts, dim=-1)
            loss = criterion(predicts.transpose(0, 1), token_idx, mel_spec_valid_lens // 2, token_idx_valid_lens)
            loss.backward()

            # nn.utils.clip_grad_norm_(model.parameters(), max_norm=1.)
            nn.utils.clip_grad_value_(model.parameters(),clip_value=0.5)
            optimizer.step()

            loop.set_description(f"loss:{loss.item():.4f}")
