# -*- coding: utf-8 -*-
# !/usr/bin/env python
"""
-------------------------------------------------
   File Name：     train
   Description :   
   Author :       lth
   date：          2022/1/27
-------------------------------------------------
   Change Activity:
                   2022/1/27 13:34: create this script
-------------------------------------------------
this is the core of this project
"""
__author__ = 'lth'

import os

import cv2
import numpy as np
import torch
from torch import nn
from torch import optim
from torch.backends import cudnn
from torch.utils.data import DataLoader
from torchvision import transforms
from tqdm import tqdm

from config import GetConfig
from datalist import UnetTableData
from metric import Evaluator
from model import output_model
from utils import weights_init, PostProcess

metric_loss = 100


class UNetTable:
    def __init__(self):
        self.args = GetConfig()

        print(f"-----------{self.args.project_name}-------------")
        use_cuda = self.args.use_cuda and torch.cuda.is_available()
        self.device = torch.device("cuda" if use_cuda else "cpu")
        # num_workers的设置数量为GPU的4倍，同时这个参数的提高会增加CPU的内存消耗
        kwargs = {"num_workers": 1, "pin_memory": True} if use_cuda else {"num_workers": 0, "pin_memory": False}

        self.data = self.get_data(self.args.base_dir)

        self.num_val = int(len(self.data) * self.args.val_per)
        self.num_train = len(self.data) - self.num_val

        self.train_dataloader = DataLoader(UnetTableData(self.data[:self.num_train]),
                                           batch_size=1,
                                           shuffle=True, drop_last=True,
                                           **kwargs)
        self.test_dataloader = DataLoader(UnetTableData(self.data[self.num_train:]),
                                          batch_size=1,
                                          shuffle=True, drop_last=True,
                                          **kwargs
                                          )

        '''
        定义模型
        '''

        self.model = output_model(out_channels=4).to(self.device)

        if use_cuda:
            self.generator = torch.nn.DataParallel(self.model, device_ids=range(torch.cuda.device_count()))
            cudnn.benchmark = True
            cudnn.enabled = True

        if not self.args.resume:
            try:
                print("load the weight from pretrained-weight file")
                model_dict = self.model.state_dict()
                checkpoint = torch.load(self.args.pretrained_weight)['model_state_dict']
                model_dict.update(checkpoint)
                self.model.load_state_dict(model_dict, strict=True)
                print("Restoring the weight from pretrained-weight file \nFinished loading the weight")
            except Exception as e:
                raise e

        # else:
        #     weights_init(self.model, init_type="normal", init_gain=0.001)

        '''
        构造loss目标函数
        选择优化器
        学习率变化选择
        '''
        self.criterion = nn.BCELoss()

        self.optimizer = optim.AdamW(self.model.parameters(), lr=self.args.lr)
        self.scheduler = torch.optim.lr_scheduler.StepLR(self.optimizer, step_size=1, gamma=0.9)

        self.metric = Evaluator(4)
        self.metric.reset()

    def work(self):
        for epoch in range(1, self.args.epochs):
            self.train(epoch)
            if epoch % 1 == 0:
                self.test(epoch)
        torch.cuda.empty_cache()
        print("model finish training")

    def train(self, epoch):
        self.model.train()
        self.metric.reset()
        average_loss = []
        pbar = tqdm(self.train_dataloader, desc=f'Train Epoch:{epoch}/{self.args.epochs}')
        for data, target, mask in pbar:
            data, target, mask = data.to(self.device), target.to(self.device), mask.to(self.device)
            self.optimizer.zero_grad()
            outputs = self.model(data)
            loss = self.criterion(torch.sigmoid(outputs)[mask], target.float()[mask])
            loss.backward()
            self.optimizer.step()
            self.metric.update(target.cpu().numpy(), torch.sigmoid(outputs).detach().cpu().numpy())
            average_loss.append(loss.item())

            pbar.set_description(
                f'Train Epoch: {epoch}/{self.args.epochs} '
                f' train_loss: {np.mean(average_loss)} '
                f' learning_rate: {self.optimizer.state_dict()["param_groups"][0]["lr"]}'
                f' acc: {self.metric.Pixel_Accuracy()}'
                f' miou: {self.metric.Mean_Intersection_over_Union()}'
            )

        self.scheduler.step()
        global metric_loss
        if np.mean(average_loss) < metric_loss:
            metric_loss = np.mean(average_loss)
            torch.save({
                'model_state_dict': self.model.state_dict(),
            },
                './weight/best.pth')
            print("model saved")

    @torch.no_grad()
    def test(self, epoch):
        self.model.eval()
        self.metric.reset()

        pbar = tqdm(self.test_dataloader, desc=f'Test Epoch{epoch}/{self.args.epochs}')
        for data, target,_ in pbar:
            data, target = data.to(self.device), target.to(self.device)
            outputs = self.model(data)
            pred = outputs[0].cpu()
            pred = UnetTableData.decode_label_map(pred,threshold=0.5)

            temp = np.zeros_like(pred[0])
            for index, i in enumerate(pred):
                if index == 0 or index == 1:
                    v = True
                else:
                    v = False
                i = PostProcess.process(i, v=v,index=index)
                temp = i | temp

            data = np.array(transforms.ToPILImage()(data[0]))

            cv2.addWeighted(temp, 0.5, data, 0.5, 0, temp)
            cv2.imwrite("result/1.jpg", temp)

            self.metric.update(target.cpu().numpy(), torch.sigmoid(outputs).detach().cpu().numpy())
            pbar.set_description(
                f' Test Epoch: {epoch}/{self.args.epochs} '
                f' acc: {self.metric.Pixel_Accuracy()}'
                f' miou: {self.metric.Mean_Intersection_over_Union()}')

    @staticmethod
    def get_data(base_dir):
        data = []
        for root, dirs, files in os.walk(base_dir):
            for f in files:
                if f.endswith(".png"):
                    data.append(os.path.join(root, f))
        return data


if __name__ == "__main__":
    model = UNetTable()
    model.work()
