import warnings
warnings.filterwarnings("ignore")
from dataset import *
from model.FCN import Resnet101_FCN8s
import torch
import torch.nn.functional as F
from metrics import *
from model.UNet import ResNet101_UNet

class Trainer(object):
    def __init__(self,train_path,label_path,
                 val_path,
                 val_label_path,
                 transfrom,
                 Epochs = 1000,lr = 0.1,BATCH_SIZE = 6,
                 is_resume =True):
        self.Epochs = Epochs
        self.start_epoch = 0
        self.device =torch.device('cuda' if torch.cuda.is_available() else 'cpu')
        self.criterion = torch.nn.NLLLoss()
        self.model = ResNet101_UNet(12).to(self.device)
        self.optimizer = torch.optim.SGD(self.model.parameters(),lr=lr,momentum=0.9)
        self.train_path = train_path
        self.label_path = label_path
        self.Cam_train_dataset = CamvidDataset([train_path, label_path], is_train=True, transform=transfrom)
        self.Cam_val_dataset = CamvidDataset([val_path,val_label_path],is_train =True,transform = transfrom )
        self.train_dataloader = DataLoader(self.Cam_train_dataset, batch_size=BATCH_SIZE, shuffle=True, num_workers=8)
        self.val_dataloader = DataLoader(self.Cam_val_dataset,batch_size=2,shuffle=False,num_workers=8)
        self.resume =is_resume
        self.best_iou = 0.0
        self.load_model_weight('./chekpoints')
        self.scheduler = torch.optim.lr_scheduler.ReduceLROnPlateau(self.optimizer,
                                                                    mode='max',
                                                                    factor=0.8,
                                                                    patience=8,
                                                                    verbose=True,
                                                                    threshold_mode='rel',
                                                                    cooldown=0,
                                                                    min_lr=0.001)

    def train(self):
        self.model.train()
        for epoch in range(self.start_epoch,self.Epochs+1):
            losss = 0
            train_acc = 0
            train_miou = 0
            train_class_Mean_acc = 0
            train_class_acc = 0
            for i,sample in enumerate(self.train_dataloader):
                img, mask = sample['image'].to(self.device), sample['mask'].to(self.device)
                predict = self.model(img)
                predict = F.log_softmax(predict)
                loss = self.criterion(predict,mask)
                losss+=loss
                # print(loss)
                self.optimizer.zero_grad()
                loss.backward()
                self.optimizer.step()

                #metrics
                pre_masks = predict.max(dim=1)[1].data.cpu().numpy()
                pre_masks = [i for i in pre_masks]
                gt_masks = mask.data.cpu().numpy()
                gt_masks = [i for i in gt_masks]

                eval_metrix = eval_metrics(pre_masks,gt_masks)
                train_acc+=eval_metrix['pa']
                train_miou += eval_metrix['miou']
                train_class_Mean_acc +=eval_metrix['mca']
                train_class_acc+=eval_metrix['ca']
            print('*'*30)
            print('|batch[{}/{}]|epoch_avg_loss {: .4f}|'.format(
                epoch,
                self.Epochs,
                losss.item()/len(self.train_dataloader)))

            metric_description = '|train PA|: {:.3f}|train MIoU|: {:.3f} |train_mean_class_acc: {:.3f} |train_class_acc{:}'.format(
                train_acc / len(self.train_dataloader),
                train_miou / len(self.train_dataloader),
                train_class_Mean_acc / len(self.train_dataloader),
                train_class_acc/len(self.train_dataloader)
            )
            print(metric_description)
            print('*' * 30)
            miou = train_miou / len(self.train_dataloader)
            self.save_model(epoch,miou)
            self.scheduler.step(miou)
            # self.valer(epoch)
    def load_model_weight(self,weight_path):
        if self.resume:
            last_weight = os.path.join(weight_path,'Unetlast.pth')
            check_point = torch.load(last_weight,map_location=self.device)
            self.model.load_state_dict(check_point['model_state_dict'])
            self.start_epoch = check_point['epoch']
            try:
                self.best_iou = check_point['best_MIoU']
            except:
                pass
            # if check_point['optimizer_state_dict'] is not None:
            #     self.optimizer.load_state_dict(check_point['optimizer_state_dict'])
        else:
            pass
    def save_model(self,epoch,miou):
        if epoch%5==0:
            checkpoint = {"model_state_dict": self.model.state_dict(),
                          "optimizer_state_dict": self.optimizer.state_dict(),
                          "epoch": epoch,
                          "best_MIoU": self.best_iou
                          }
            save_model_path = "./chekpoints/Unetlast.pth"
            dirs = "./chekpoints"
            if not os.path.exists(dirs):
                os.mkdir(dirs)
            torch.save(checkpoint, save_model_path)
        elif self.best_iou <= miou:
            self.best_iou = miou
            torch.save(self.model.state_dict(), "./chekpoints/Unetbest.pth")
            print("| best MIOU |:{:.3f}".format(self.best_iou))
    def valer(self,epoch):
        with torch.no_grad():
            self.model.eval()
            losss = 0
            val_acc = 0
            val_miou = 0
            val_class_acc = 0
            for i,sample in enumerate(self.val_dataloader):
                img, mask = sample['image'].to(self.device), sample['mask'].to(self.device)
                predict = self.model(img)
                predict = F.log_softmax(predict)
                loss = self.criterion(predict,mask)
                losss+=loss

                #metrics
                pre_masks = predict.max(dim=1)[1].data.cpu().numpy()
                pre_masks = [i for i in pre_masks]
                gt_masks = mask.data.cpu().numpy()
                gt_masks = [i for i in gt_masks]

                eval_metrix = eval_metrics(pre_masks,gt_masks)
                val_acc+=eval_metrix['pa']
                val_miou += eval_metrix['miou']
                val_class_acc +=eval_metrix['mca']
            print('*'*30)
            print("val")
            print('|batch[{}/{}]|epoch_avg_loss {: .4f}|'.format(
                epoch,
                self.Epochs,
                losss.item()/len(self.train_dataloader)))

            metric_description = '|train PA|: {:.3f}|train MIoU|: {:.3f} |train_mean_class_acc: {:.3f}'.format(
                val_acc / len(self.train_dataloader),
                val_miou / len(self.train_dataloader),
                val_class_acc / len(self.train_dataloader)
            )
            print(metric_description)
            print('*' * 30)



if __name__ == '__main__':
    train_path = './CamVid/train'
    label_path = './CamVid/train_labels'
    val_path = './CamVid/val'
    val_label_path = './CamVid/val_labels'
    transfrom = transforms.Compose(
        [
            transforms.ToTensor(),
            transforms.Normalize([0.485, 0.456, 0.406], [0.229, 0.224, 0.225])
        ]
    )
    Trainer(train_path,label_path,val_path,val_label_path,transfrom =transfrom).train()
    # train(model,train_dataloder,device,is_resume =True)



