"""
Use the default sampler:
    if shuffle:
        sampler = RandomSampler(dataset, generator=generator)
    else:
        sampler = SequentialSampler(dataset)

Use the default 'DataLoader'.
"""

import torch
import numpy as np
from torch.nn import MSELoss


class TorchTrainer(object):
    def __init__(self, loader, model, loss_func, optimizer, learning_rate, epochs):
        (self.train_loader, self.valid_loader) = loader
        self.epochs = epochs
        self.model = model
        self.model_init = model
        self.loss_func = eval(loss_func)()
        if optimizer == 'adam':
            self.optimizer = torch.optim.Adam(self.model.parameters(), lr=learning_rate)
        else:
            raise ValueError('Adam is recommended.')

        self.train_loss = []
        self.train_predictions = []
        self.valid_loss = []
        self.valid_predictions = []
        self.model_saved = []

    def train(self):
        for e in range(self.epochs):
            self.model.train()
            train_loss_batch = []
            train_predictions_batch = []
            for batch, (x, y) in enumerate(self.train_loader):
                out = self.model(x)

                loss = self.loss_func(out, y)

                self.optimizer.zero_grad()
                loss.backward()
                self.optimizer.step()

                train_loss_batch.append(loss.item())
                out = out.detach().numpy()
                train_predictions_batch.append(out)

            self.train_loss.append(np.mean(train_loss_batch))
            self.train_predictions.append(train_predictions_batch)
            self.train_predictions = self.train_predictions[-1]
            self.train_predictions = [i for j in self.train_predictions for i in j]

            self.valid()
            if e % 200 == 199:
                print('Epoch: {}, Train Loss: {:.6f}, Valid Loss: {:.6f}'.format(e + 1, self.train_loss[-1],
                                                                                 self.valid_loss[-1]))

    def valid(self):
        self.model.eval()
        valid_loss_batch = []
        valid_predictions_batch = []
        for batch, (x, y) in enumerate(self.valid_loader):
            out = self.model(x)
            loss = self.loss_func(out, y)

            self.optimizer.zero_grad()
            loss.backward()
            self.optimizer.step()

            valid_loss_batch.append(loss.item())
            out = out.detach().numpy()
            valid_predictions_batch.append(out)

        self.valid_loss.append(np.mean(valid_loss_batch))
        self.valid_predictions.append(valid_predictions_batch)
        self.valid_predictions = self.valid_predictions[-1]
        self.valid_predictions = [i for j in self.valid_predictions for i in j]

    def test(self, x, path):
        model = self.model_init
        model.load_state_dict(torch.load(path))
        out = model(x)
        out = out.detach().numpy()
        return out






