import torch
import tqdm
from torch import nn
import torch.optim.lr_scheduler as lr_scheduler
import utils_func
from data_handle import get_iter, get_loss
from chat_model_module import GxlChatAttentionModel
from tokenize_vocab import GxlCharTokenizer
import config
import tokenize_vocab


def train_seq2seq(net, tokenizer: GxlCharTokenizer, data_iter, lr, num_epochs, device):
    """训练序列到序列模型"""
    net.to(device)
    net.train()
    optimizer = torch.optim.Adam(net.parameters(), lr=lr)
    lr_schedule = lr_scheduler.CosineAnnealingWarmRestarts(optimizer,T_0=5, T_mult=2)
    loss = get_loss()
    # animator = utils_func.Animator()
    timer = utils_func.Timer()
    logger = config.get_logger()
    logger.info("start training......")
    metric = utils_func.Accumulator(2)  # 训练损失总和，词元数量
    for epoch in (range(num_epochs)):
        for batch in tqdm.tqdm(data_iter):
            optimizer.zero_grad()
            X, X_valid_len, Y, Y_valid_len = [x.to(device) for x in batch]
            bos = torch.tensor([tokenizer.SOD_ID] * Y.shape[0],
                               device=device).reshape(-1, 1)
            dec_input = torch.cat([bos, Y[:, :-1]], 1)  # 强制教学, 强制让模型输出的句子以bos开头
            Y_hat = net(X, dec_input)
            l = loss(Y_hat, Y, Y_valid_len)
            l.sum().backward()  # 损失函数的标量进⾏“反向传播”
            utils_func.grad_clipping(net, 1)
            num_tokens = Y_valid_len.sum()
            optimizer.step()
            lr_schedule.step()
            with torch.no_grad():
                metric.add(l.sum(), num_tokens)
        logger.info(f'epoch:{epoch} loss: {metric[0] / metric[1]:.3f}')
        if (epoch + 1) % 1 == 0:
            torch.save(net.state_dict(), config.MODELSAVEPATH + f'model_params_{epoch}.pth')
            # animator.add_dynamic(epoch + 1, (metric[0] / metric[1]))
    logger.info(f'loss {metric[0] / metric[1]:.3f}, {metric[1] / timer.stop():.1f} 'f'tokens/sec on {str(device)}')


if __name__ == '__main__':
    """"""
    tokenizer = tokenize_vocab.get_tokenizer()
    big_model = GxlChatAttentionModel(len(tokenizer))
    big_model.load_state_dict(torch.load(config.MODELSAVEPATH + "model_params_47.pth"))
    train_seq2seq(big_model, tokenizer, get_iter(), 0.001, 500, torch.device('cuda:2'))
    # print("len:", len(tokenizer))
    # model = nn.Embedding(num_embeddings=len(tokenizer), embedding_dim=123)
    # text = "大家好，我是耿雪龙 ,你是谁"
    # tokens = tokenizer.encode(text)
    # print(tokens)
    # print(model(torch.tensor(tokens)).shape)
