from torch.utils.data import DataLoader
import sys
from tqdm import tqdm


class GruTrainer:
    def __init__(self, model, optimizer, criterion, device):
        self.model = model.to(device)
        self.criterion = criterion
        self.optimizer = optimizer
        self.device = device
    
    def train(self, 
              trainloader: DataLoader, 
              valloader: DataLoader, 
              target_mean, 
              target_std, 
              base_mean, 
              base_std, 
              epochs: int, 
              valepoch: int=1, 
              patience: int=3
              ):
        '''
        训练方法，最核心的方法
        
        Args:
            trainloader (DataLoader): 训练数据
            valloader (DataLoader): 验证数据
            target_mean (): 
            target_std ():
            base_mean ():
            base_std ():
            epochs (int): 训练轮数
            valepoch (int): 每隔多少次训练进行一次验证
            patience (int): 早停耐心值
        '''
        notgood = 0
        bestloss = sys.float_info.max
        eta = 0.01
        for epoch in range(epochs):
            self.model.train()
            total_loss = 0
            for inputs, inputy, graph, targets, base in tqdm(trainloader, desc=f"training epoch {epoch}"):
                # 移动设备
                inputs = inputs.to(self.device)
                graph = graph.to(self.device)
                inputy = inputy.to(self.device)
                self.optimizer.zero_grad()
                # 模型输出
                outputs = self.model.forward2(inputs, graph)
                # calculation loss
                loss = self.criterion(outputs, inputy)
                loss.backward()
                self.optimizer.step()
                total_loss += loss.item()
            print(f"Epoch {epoch+1}, Training Loss: {total_loss / len(trainloader)}")
            if epoch % valepoch == 0:
                if epoch % 20 == 19:
                    eta /= 2
                self.model.eval()
                valloss = self.val(valloader, target_mean, target_std, base_mean, base_std)
                print(f"valloss is {valloss}")
                if valloss < bestloss:
                    notgood = 0
                    print(f"best loss from {bestloss} to {valloss}")
                    bestloss = valloss
                elif abs(valloss - bestloss) > eta:
                    notgood += 1
                    print(f"early stopping {notgood}/{patience}")
                if notgood == patience:
                    break

    def val(self, valloader, target_mean, target_std, base_mean, base_std):
        total_loss = 0.0
        self.model.eval()
        for inputs, inputy, graph, targets, base in tqdm(valloader, desc=f"valing"):
            # 移动设备
            inputs = inputs.to(self.device)
            graph = graph.to(self.device)
            inputy = inputy.to(self.device)
            # forward
            outputs = self.model.forward2(inputs, graph)
            # calculation loss
            loss = self.criterion(outputs, inputy)
            total_loss += loss.item()
        return total_loss / len(valloader)