import time

import torch

from ai.config.config import GxlNode
from assistant import ASSIST_LOGGER

def put_data_to_device(X, y, device):
    if isinstance(X, (tuple, list)):
        X = [x.to(device) for x in X]
        y = y.to(device)
    else:
        X, y = X.to(device), y.to(device)
    return X, y


class BaseRunner:
    def __init__(self, model, optim, loss_f, train_loader, config: GxlNode,
                 valid_loader=None, scheduler=None, multi=False,
                 local_rank=0, is_class=True,
                 device=torch.device('cpu')):
        self.model = model
        self.optim = optim
        self.scheduler = scheduler
        self.config = config
        self.loss_f = loss_f
        self.device = device

        self.train_loader = train_loader
        self.valid_loader = valid_loader

        self.multi = multi
        self.local_rank = local_rank
        self.is_class = is_class
        self.logger = ASSIST_LOGGER(config.logger.log_file)
        if not self.multi:
            self.model.to(self.device)
        else:
            self.device = self.model.device  # 此时model不是nn.Module ,而是分布式模型对象,有device属性
        self.logger.info(f'使用设备: {self.model.device if self.multi else self.device}')

    def train_function(self, epochs, start_step=-1, start_epoch=-1):
        for epoch in range(start_epoch + 0, epochs):
            step = start_step + 1
            full_step = len(self.train_loader)
            st = time.time()
            epoch_accumulate_loss = 0
            for i, batch in enumerate(self.train_loader):
                if i < step:
                    continue
                step_st = time.time()
                step = i
                self.model.train()
                self.optim.zero_grad()
                loss = self.handle_batch(batch)
                if isinstance(loss, torch.Tensor):
                    loss = loss.sum().item()
                epoch_accumulate_loss += loss
                step_et = time.time()
                if self.local_rank == 0:
                    if self.config.train.log_level == 'step' and step % self.config.train.log_interval == 0:
                        self.logger.info(
                            f'epoch:{epoch};step{step}/{full_step};train_loss:{train_loss:.6f};consume_time:{step_et - step_st:.4f}s')

            epoch_train_loss = epoch_accumulate_loss / (full_step - start_step - 1)
            et = time.time()
            if self.local_rank == 0:
                if self.config.train.log_level == 'epoch':
                    if epoch % self.config.train.valid_interval == 0:
                        valid_loss = self.calculate_valid_loss()
                        self.logger.info(
                            f'epoch{epoch};train_loss:{epoch_train_loss:.6f};valid_loss:{valid_loss:.6f};consume_time:{et - st:.4f}s')
                    else:
                        self.logger.info(
                            f'epoch{epoch}:train_loss:{epoch_train_loss:.4f};;consume_time:{et - st:.4f}s')
                else:
                    if epoch % self.config.train_valid_interval == 0:
                        valid_loss = self.calculate_valid_loss()
                        self.logger.info(
                            f'epoch{epoch};valid_loss:{valid_loss:.6f};consume_time:{et - st:.4f}s')

    def calculate_valid_loss(self):
        raise NotImplementedError()

    def handle_batch(self, batch):
        raise NotImplementedError()

    def run(self, epochs):
        self.model.train()
        start_time = time.time()
        self.train_function(epochs)  # 训练过程函数，自定义
        end_time = time.time()
        duration = end_time - start_time
        if self.local_rank == 0:
            self.logger.info(f'训练完毕！用时:{duration:.2f}s,平均{duration * 1.0 / epochs:.2f}s/epoch')

    @staticmethod
    def accuracy(y_hat: torch.Tensor, y: torch.Tensor):
        """
        计算一个个batch的正确率
        y_hat: (batch, num_classes)
        y: (batch,)
        """
        # 返回每一行中最大元素的索引，也就是对每个样本预测的类别。其中，axis=1表示按行计算。具体来说，
        # 如果y_hat的shape为(n_samples,n_classes)，那么y_hat.argmax(dim=1)的shape就为
        # (n_samples,)，其中每个元素是一个整数，表示对应样本的预测类别。
        if len(y_hat.shape) > 1 and y_hat.shape[0] > 1:
            y_hat = y_hat.argmax(dim=1)
        one_hot_array = y_hat.type(y.dtype) == y
        sums = float(one_hot_array.type(y.dtype).sum())
        return sums / len(y)
