# -*- coding: utf-8 -*-
# !/usr/bin/env python
"""
-------------------------------------------------
   File Name：     train
   Description :   
   Author :       lth
   date：          2022/8/3
-------------------------------------------------
   Change Activity:
                   2022/8/3 18:22: create this script
-------------------------------------------------
"""
__author__ = 'lth'

import os

import numpy as np
import torch
from torch import optim
from torch.backends import cudnn
from torch.utils.data import DataLoader
from tqdm import tqdm

from config import GetConfig
from datalist import FaceParsingData
from metric import Evaluator
from model import BiSeNet
from utils import OhemCELoss, DiceLoss

# torch.isnan(param).int().sum() !=0
total_loss = 99999999999999


# torch.autograd.set_detect_anomaly(True)

class FaceParsing:
    def __init__(self):
        self.args = GetConfig()
        print(f"-----------{self.args.project_name}-------------")
        use_cuda = self.args.use_cuda and torch.cuda.is_available()
        self.device = torch.device("cuda" if use_cuda else "cpu")
        # num_workers的设置数量为GPU的4倍，同时这个参数的提高会增加CPU的内存消耗
        kwargs = {"num_workers": 4, "pin_memory": True} if use_cuda else {"num_workers": 4, "pin_memory": False}

        data = FaceParsing.get_data("E:/Datasets2/CelebAMask-HQ/CelebA-HQ-img/")
        num_val = int(len(data) * self.args.val_per)
        num_train = len(data) - num_val
        self.train_dataloader = DataLoader(FaceParsingData(data[:num_train], mode="train"),
                                           batch_size=self.args.train_batch_size,
                                           shuffle=True, drop_last=True,
                                           **kwargs)
        self.test_dataloader = DataLoader(FaceParsingData(data[num_train:], mode="test"),
                                          batch_size=self.args.test_batch_size,
                                          shuffle=True, drop_last=True,
                                          **kwargs
                                          )
        #                           18+1
        self.model = BiSeNet(n_classes=19).to(self.device)
        self.model.init_weight()

        if use_cuda:
            self.model = torch.nn.DataParallel(self.model, device_ids=range(torch.cuda.device_count()))
            cudnn.benchmark = True
            cudnn.enabled = True

        if self.args.resume:
            try:
                print("\nload the weight from pretrained-weight file")
                model_dict = self.model.state_dict()
                checkpoint = torch.load(self.args.pretrained_weight)['model_state_dict']
                model_dict.update(checkpoint)
                self.model.load_state_dict(model_dict, strict=True)
                print("Restoring the weight from pretrained-weight file \nFinished loading the weight\n")
            except Exception as e:
                raise e

        # else:
        #     weights_init(self.model, init_type="normal", init_gain=0.01)
        # else:

        self.criterionP = OhemCELoss(0.7, 448 * 448, -100)
        self.criterion2 = OhemCELoss(0.7, 448 * 448, -100)
        self.criterion3 = OhemCELoss(0.7, 448 * 448, -100)

        self.diceP = DiceLoss()
        self.dice2 = DiceLoss()
        self.dice3 = DiceLoss()

        self.optimizer = optim.AdamW(self.model.parameters(), lr=self.args.lr, weight_decay=5e-4)
        self.scheduler = torch.optim.lr_scheduler.StepLR(self.optimizer, step_size=3, gamma=0.8)

        self.metric = Evaluator(num_class=19)
        self.metric16 = Evaluator(num_class=19)
        self.metric32 = Evaluator(num_class=19)

    def work(self):
        for epoch in range(1, self.args.epochs):

            # self.test(epoch)

            self.train(epoch)

        torch.cuda.empty_cache()
        print("model finish training")

    @torch.no_grad()
    def test(self, epoch):
        self.model.eval()
        self.metric.reset()
        # self.metric16.reset()
        # self.metric32.reset()
        pbar = tqdm(self.test_dataloader, desc=f'Train Epoch:{epoch}/{self.args.epochs}')
        for data, target in pbar:
            data, target = data.to(self.device), target.long().to(self.device)
            self.optimizer.zero_grad()
            output, output16, output32 = self.model(data)
            self.metric.update(target.cpu().numpy(), np.argmax(output.detach().cpu().numpy(), axis=1))
            # self.metric16.update(target.cpu().numpy(), np.argmax(output.detach().cpu().numpy(), axis=1))
            # self.metric32.update(target.cpu().numpy(), np.argmax(output.detach().cpu().numpy(), axis=1))
            pbar.set_description(
                f'Test Epoch:{epoch}/{self.args.epochs} '
                f' metric   acc:{self.metric.Pixel_Accuracy()}  miou:{self.metric.Mean_Intersection_over_Union()}'
                # f' metric16 acc:{self.metric16.Pixel_Accuracy()}  miou:{self.metric16.Mean_Intersection_over_Union()}'
                # f' metric32 acc:{self.metric32.Pixel_Accuracy()}  miou:{self.metric32.Mean_Intersection_over_Union()}'
                )

    def train(self, epoch):
        self.model.train()
        average_loss = []
        pbar = tqdm(self.train_dataloader, desc=f'Train Epoch:{epoch}/{self.args.epochs}')

        for data, target in pbar:
            data, target = data.to(self.device), target.long().to(self.device)
            self.optimizer.zero_grad()
            output, output16, output32 = self.model(data)
            # lossp = self.criterionP(output, target)
            # loss2 = self.criterion2(output16, target)
            # loss3 = self.criterion3(output32, target)

            lossdicep = self.diceP.calculate(output, target)
            lossdice2 = self.dice2.calculate(output, target)
            lossdice3 = self.dice3.calculate(output, target)

            loss = lossdicep + lossdice2 + lossdice3

            loss.backward()
            self.optimizer.step()

            average_loss.append(loss.item())

            pbar.set_description(
                f'Train Epoch: {epoch}/{self.args.epochs} '
                f' train_loss: {np.mean(average_loss)} '
                f' learning_rate: {self.optimizer.state_dict()["param_groups"][0]["lr"]}'

            )
        self.scheduler.step()
        global total_loss
        if np.mean(average_loss) < total_loss:
            torch.save({
                'model_state_dict': self.model.state_dict(),
            },
                './weights/best.pth')
            print("model saved")
            total_loss = np.mean(average_loss)

    @staticmethod
    def get_data(base_dir):
        data = []
        for root, dirs, files in os.walk(base_dir):
            for f in files:
                if f.endswith(".jpg"):
                    data.append(os.path.join(root, f))
        return data


if __name__ == "__main__":
    model = FaceParsing()
    model.work()
