import datetime
import sys
import time
import torch
import tqdm

from ai import utils
from ai.run.base_train_runner import GxlBaseRunner
from tokenize_vocab import get_tokenizer
import config

tokenizer = get_tokenizer()


class RunnerGxl(GxlBaseRunner):
    def __init__(self, model, optim, loss_f, train_loader,
                 logger, valid_loader=None, scheduler=None, multi=False,
                 local_rank=0, is_class=True,
                 device=torch.device('cpu')):
        super(RunnerGxl, self).__init__(model, optim, loss_f, train_loader,
                                        logger, valid_loader, scheduler, multi,
                                        local_rank, is_class, device)

    def train_function(self, epochs):
        for epoch in range(epochs):
            train_loss = 0.0
            total_y_valid_token = 0
            st = time.time()
            if self.local_rank == 0:
                iter_gxl = tqdm.tqdm(self.train_loader)
            else:
                iter_gxl = self.train_loader
            for batch in iter_gxl:
                self.optim.zero_grad()
                X, X_valid_len, Y, Y_valid_len = [x.to(self.device) for x in batch]
                bos = torch.tensor([tokenizer.SOD_ID] * Y.shape[0],
                                   device=self.device).reshape(-1, 1)
                dec_input = torch.cat([bos, Y[:, :-1]], 1)
                Y_hat = self.model(X, dec_input)
                l = self.loss_f(Y_hat, Y, Y_valid_len)
                l.sum().backward()  # 损失函数的标量进⾏“反向传播”
                utils.utils_model.grad_clipping(self.model, 1)
                num_tokens = Y_valid_len.sum()
                self.optim.step()
                if self.scheduler:
                    self.scheduler.step()
                with torch.no_grad():
                    total_y_valid_token += num_tokens
                    train_loss += l.sum()
            train_loss = train_loss / total_y_valid_token
            et = time.time()
            if self.local_rank == 0:
                self.logger.info(
                    f'epoch{epoch}:train_loss:{train_loss:.4f};;time:{et - st:.2f}s')
                if epoch % 50 == 0:
                    torch.save(self.model.state_dict(),
                               config.MODELSAVEPATH
                               + f'model_params_{epoch}_{datetime.datetime.now().strftime("%Y-%m-%d_%H:%M:%S")}.pth')
