from torch.nn import Module
from torch.optim import Optimizer
from torch.utils.data import DataLoader
import sys
from tqdm import tqdm
import torch



class Trainer:
    def __init__(self, model:Module, criterion:Module, optimizer:Optimizer):
        self.model = model
        self.criterion = criterion
        self.optimizer = optimizer


    def train(self, trainloader:DataLoader, valloader:DataLoader, testloader:DataLoader, epoch:int, valepoch:int, patience=5):
        best_val_loss = sys.float_info.max
        patience_count = 5
        for i in range(1, epoch + 1):
            print(f"training epoch {i}...")
            train_loss = self.train_epoch(trainloader)
            print(f"train loss is {train_loss}")
            if i % valepoch == 0:
                val_loss = self.val_epoch(valloader)
                print(f"val loss is {val_loss}")
                if best_val_loss > val_loss:
                    print(f"val loss decrease from {best_val_loss} to {val_loss}")
                    best_val_loss = val_loss
                    patience_count = patience
                    torch.save(self.model, "bestmodel.pth")
                else:
                    if abs(val_loss - best_val_loss) > 0.0001:
                        patience_count -= 1
                        print(f"early stopping {patience - patience_count}/{patience}")
                if patience_count == 0:
                    print("stopping the train...")
                    break
        test_loss = self.test_epoch(testloader)
        print(f"test loss is {test_loss}")
    

    def train_epoch(self, trainloader:DataLoader):
        total_loss = 0.0
        for base, diff, edge_index, edge_attr, y in tqdm(trainloader):
            total_loss += self.train_step(base, diff, edge_index, edge_attr, y)
        return total_loss / len(trainloader)
    

    def train_step(self, base, diff, edge_index, edge_attr, y):
        self.model.train()
        self.optimizer.zero_grad()
        y_hat = self.model(base, diff, edge_index, edge_attr)
        loss = self.criterion(y_hat, y)
        loss.backward()
        self.optimizer.step()
        return loss.item()
    

    def val_epoch(self, valloader:DataLoader):
        total_loss = 0.0
        for base, diff, edge_index, edge_attr, y in tqdm(valloader):
            total_loss += self.val_step(base, diff, edge_index, edge_attr, y)
        return total_loss / len(valloader)


    def val_step(self, base, diff, edge_index, edge_attr, y):
        self.model.eval()
        y_hat = self.model(base, diff, edge_index, edge_attr)
        loss = self.criterion(y_hat, y)
        return loss.item()


    def test_epoch(self, testloader:DataLoader):
        return self.val_epoch(testloader)
    

    def predict(self, dataloader:DataLoader):
        y_t = []
        y_p = []
        for base, trend, season, y in tqdm(dataloader):
            y_hat = self.model.predict(base, trend, season)
            y_t += y.tolist()
            y_p += y_hat.tolist()
        return y_t, y_p