import torch
from torch.optim.lr_scheduler import ReduceLROnPlateau
from libs.loss import iou_loss, HairMattingLoss, bineary_iou_loss
from libs.utils import save_sample_imgs, showimg, showmask, batch_end_log, epoch_end_log
from tqdm import tqdm
from trainer.BaseTrainer import BaseTrainer
import os


class Trainer(BaseTrainer):
    def __init__(self, userconfig, train_data_loader, test_data_loader, checkpoint_dir, sample_dir):
        super(Trainer, self).__init__(userconfig, train_data_loader, test_data_loader, checkpoint_dir, sample_dir)

    def eval(self):
        print("=====> star to test...")
        self.net.eval()
        with torch.no_grad():
            self.reset_test_monitor()

            for samples in tqdm(self.test_data_loader):
                image = samples['image'].to(self.device)
                mask_gt = samples['mask_gt'].to(self.device)

                mask_pred = self.net(image)

                test_loss = self.lossfunc(mask_pred, mask_gt)
                self.test_loss_monitor.update(test_loss.item())

                aiou = iou_loss(mask_pred, mask_gt)
                self.test_aiou_monitor.update(aiou.item())

            avg_loss = self.test_loss_monitor.avg
            avg_aiou = self.test_aiou_monitor.avg

            print("test phase | loss: %.4f aiou: %.4f" % (avg_loss, avg_aiou))

        return avg_loss, avg_aiou

    def train(self):
        print("=====> star to train...")
        optimizer = torch.optim.Adam(self.net.parameters(), lr=self.userconfig.learning_rate, eps=1e-7)
        lr_scheduler = ReduceLROnPlateau(optimizer, 'min', factor=0.5, patience=4, verbose=True, threshold=0.015)

        self.lossfunc = torch.nn.BCELoss()

        for epoch in range(self.userconfig.max_epoch):
            self.reset_train_monitor()
            self.net.train()

            for step, samples in enumerate(self.train_data_loader):
                image = samples['image'].to(self.device)
                mask_gt = samples['mask_gt'].to(self.device)

                mask_pred = self.net(image)
                self.net.zero_grad()

                train_loss = self.lossfunc(mask_pred, mask_gt)
                train_loss_value = train_loss.item()
                self.train_loss_monitor.update(train_loss_value)

                aiou_value = iou_loss(mask_pred, mask_gt).item()
                self.train_aiou_monitor.update(aiou_value)

                train_loss.backward()
                optimizer.step()

                batch_end_log(log_intervel=self.userconfig.log_intervel, max_epoch=self.userconfig.max_epoch,
                              epoch=epoch, step=step, max_step=self.train_image_len, lr=optimizer.param_groups[0]['lr'],
                              train_loss=train_loss_value, aiou=aiou_value)

                if step % self.userconfig.sample_intervel == 0:
                    save_sample_imgs(samples, {"mask_pred": mask_pred, "dyemask": mask_pred}, self.sample_dir, epoch, step)

            epoch_end_log(phase="train", train_loss=self.train_loss_monitor.avg, aiou=self.train_aiou_monitor.avg)
            self.eval()

            lr_scheduler.step(self.train_loss_monitor.avg)

            torch.save(self.net.state_dict(), os.path.join(self.checkpoint_dir, self.ckpt_name()))
