import logging
import torch

from loss import loss
from model.Conv_TasNet.hand.conv_tasnet import ConvTasNet

logging.basicConfig(level=logging.INFO)
logger = logging.getLogger(__name__)


class __Train:
    """
    Base class for training, should not be initialized
    """

    def __init__(
        self,
        model,
        loss_fn,
        optimizer,
        device,
        batch_size=20,
        log_batch=20,
        epoch=100,
        retrain=False,
    ):
        self.model = model
        self.loss_fn = loss_fn
        self.optimizer = optimizer
        self.device = device
        self.epoch = epoch
        self.log_batch = log_batch
        self.retrain = retrain
        self.batch_size = batch_size
        self.loss = []
        pass

    def train(self, train_dataloader, test_dataloader, path):
        if not self.retrain:
            checkpoint = torch.load(path)
            self.model.load_state_dict(checkpoint["model_state_dict"])
            self.optimizer.load_state_dict(checkpoint["optimizer_state_dict"])
            epoch_start = checkpoint["epoch"]
            self.loss = checkpoint["loss"]
            logger.info(
                f"training model starting from {checkpoint['epoch']} epochs with loss {self.loss[self.epoch-1]}"
            )
        else:
            epoch_start = 1
            self.loss = []
            logger.info(f"training the model from start")
        for ep in range(epoch_start, self.epoch + 1):
            logger.info(f"Epoch {epoch_start} \n ----------------")
            ## train
            self._trainEpoch(train_dataloader)
            ## test
            test_loss = self._testEpoch(test_dataloader)
            self.loss.append(test_loss)
            ## save model
            logger.info("saving model")
            torch.save(
                {
                    "epoch": ep,
                    "model_state_dict": self.model.state_dict(),
                    "optimizer_state_dict": self.optimizer.state_dict(),
                    "loss": self.loss,
                },
                path,
            )
            logger.info(f"epoch {ep}, loss {test_loss}, model, saved at {path}")

        pass

    def _trainEpoch(self, dataloader):
        """
        start training with the specific epoch
        """
        size = len(dataloader.dataset)
        self.model.train()
        for batch, (X, y) in enumerate(dataloader):
            X, y = X.to(self.device), y.to(self.device)
            pred = self.model(X)
            loss = self.loss_fn(pred, y)

            loss.backward()
            self.optimizer.step()
            self.optimizer.zero_grad()

            if batch % self.log_batch == 0:
                loss, current = loss.item(), (batch + 1) * len(X)
                logger.info(f"loss: {loss:>7f}  [{current:>5d}/{size:>5d}]")

    def _testEpoch(self, dataloader):
        num_batches = len(dataloader)
        self.model.eval()
        test_loss = 0
        with torch.no_grad():
            for X, y in dataloader:
                X, y = X.to(self.device), y.to(self.device)
                pred = self.model(X)
                test_loss += self.loss_fn(pred, y).item()
        test_loss /= num_batches
        return test_loss


class ConvTasNetTrainV1(__Train):
    def __init__(self, device, retrain=True):
        model = ConvTasNet(enc_dim=512, sr=8000).to(device)
        optimizer = torch.optim.Adam(model.parameters(), lr=1e-3)
        loss_fn = loss
        epoch = 100
        log_batch = 20
        batch_size = 15
        self.output = "convTasNetV1.pth"
        super().__init__(
            model=model,
            loss_fn=loss_fn,
            optimizer=optimizer,
            device=device,
            epoch=epoch,
            log_batch=log_batch,
            retrain=retrain,
            batch_size=batch_size,
        )

    def _trainEpoch(self, dataloader):
        """
        Based on the base __trainEpoch and apply the gradient clipping with l2 norm 5
        """
        size = len(dataloader.dataset)
        self.model.train()
        for batch, (X, y) in enumerate(dataloader):
            X, y = X.to(self.device), y.to(self.device)
            pred = self.model(X)
            loss = self.loss_fn(pred, y)

            loss.backward()
            torch.nn.utils.clip_grad_norm_(self.model.parameters(), 5)
            self.optimizer.step()
            self.optimizer.zero_grad()

            if batch % self.log_batch == 0:
                loss, current = loss.item(), (batch + 1) * len(X)
                logger.info(f"loss: {loss:>7f}  [{current:>5d}/{size:>5d}]")

    def train(self, train_dataloader, test_dataloader, path):
        """
        Based on the base train but also apply halving the learning rate and apply Gradient clipping with L2-norm of 5 as mentioned in
        the paper
        """
        if not self.retrain:
            checkpoint = torch.load(path)
            self.model.load_state_dict(checkpoint["model_state_dict"])
            self.optimizer.load_state_dict(checkpoint["optimizer_state_dict"])
            epoch_start = checkpoint["epoch"]
            self.loss = checkpoint["loss"]
            logger.info(
                f"training model starting from {checkpoint['epoch']} epochs with loss {self.loss[epoch_start-1]}"
            )
        else:
            epoch_start = 1
            self.loss = []
            logger.info(f"training the model from start")
        for ep in range(epoch_start, self.epoch + 1):
            ## check the performance and
            if ep > 3:
                if (
                    self.loss[ep - 4] < self.loss[ep - 3]
                    and self.loss[ep - 3] < self.loss[ep - 2]
                ):
                    ## half the learning rate
                    logger.info(
                        f"half the learing rate with loss{[self.loss[ep-4],self.loss[ep-3],self.loss[ep-2]]}"
                    )
                    for g in self.optimizer.param_groups:
                        g["lr"] = 0.5 * g["lr"]
            logger.info(f"Epoch {epoch_start} \n ----------------")
            ## train
            self._trainEpoch(train_dataloader)
            ## test
            test_loss = self._testEpoch(test_dataloader)
            self.loss.append(test_loss)
            ## save model
            logger.info("saving model")
            torch.save(
                {
                    "epoch": ep,
                    "model_state_dict": self.model.state_dict(),
                    "optimizer_state_dict": self.optimizer.state_dict(),
                    "loss": self.loss,
                },
                path,
            )
            logger.info(f"epoch {ep}, loss {test_loss}, model, saved at {path}")
