import datetime
import os
import sys
import time
import torch
import tqdm

from common_runner import BaseRunner, put_data_to_device


class Runner(BaseRunner):
    def __init__(self, model, optim, loss_f, train_loader,
                 logger, valid_loader=None, scheduler=None, multi=False,
                 local_rank=0, is_class=True,
                 device=torch.device('cpu'), model_save_dir='./output/model1/'):

        super(Runner, self).__init__(model, optim, loss_f, train_loader,
                                     logger, valid_loader, scheduler, multi,
                                     local_rank, is_class, device)
        os.makedirs(model_save_dir, exist_ok=True)
        self.model_save_dir = model_save_dir

    def calculate_accuracy(self, is_valid=True):
        # accuracy = torch.mean((predicted_labels == true_labels).float())
        # accuracy_percentage = accuracy.item() * 100
        self.model.eval()
        all_accuracy = 0.0
        batch_size = 0
        # if is_valid:
        #     loader = self.valid_loader
        # else:
        #     loader = self.train_loader
        loader = self.train_loader
        with torch.no_grad():
            for X, _, y in loader:
                X, y = put_data_to_device(X, y, self.device)
                out = self.model(X)
                accuracy = self.accuracy(out, y)
                all_accuracy += accuracy
                batch_size += 1
        return all_accuracy / batch_size

    def train_function(self, epochs):
        for epoch in range(epochs):
            train_loss = 0.0
            total_y_valid_token = 0
            st = time.time()
            if self.local_rank == 0:
                iter_gxl = tqdm.tqdm(self.train_loader)
            else:
                iter_gxl = self.train_loader
            for batch in iter_gxl:
                self.optim.zero_grad()
                comments, comment_lens, titles = [x.to(self.device) for x in batch]
                Y_hat = self.model(comments)
                l = self.loss_f(Y_hat, titles)
                l.sum().backward()  # 损失函数的标量进⾏“反向传播”
                # utils.utils_model.grad_clipping(self.model, 1)
                self.optim.step()
                if self.scheduler:
                    self.scheduler.step()
                with torch.no_grad():
                    total_y_valid_token += 1
                    train_loss += l.sum()
            train_loss = train_loss / total_y_valid_token
            valid_cal = self.calculate_accuracy()
            et = time.time()
            if self.local_rank == 0:
                self.logger.info(
                    f'epoch{epoch}:train_loss:{train_loss:.4f};;valid_cal:{valid_cal:.4f};;time:{et - st:.2f}s')
                if epoch % 200 == 0:
                    torch.save(self.model.state_dict(),
                               self.model_save_dir
                               + f'model_params_{epoch}_{datetime.datetime.now().strftime("%Y-%m-%d_%H-%M-%S")}.pth')
