# -*- coding: utf-8 -*-
# !/usr/bin/env python
"""
-------------------------------------------------
   File Name：     train
   Description :   
   Author :       lth
   date：          2022/6/24
-------------------------------------------------
   Change Activity:
                   2022/6/24 18:17: create this script
-------------------------------------------------
"""
__author__ = 'lth'

import warnings

import numpy as np
import torch
from colorama import Fore
from torch import optim
from torch.backends import cudnn
from torch.utils.data import DataLoader
from tqdm import tqdm

from config import GetConfig
from datalist import RetinaFaceDataset, retinaface_dataset_collate
from metric import Map
from model import RetinaFace
from utils import RetinaLoss, TargetEncode, TargetDecode,weights_init

warnings.filterwarnings('ignore')


class RetinaFaceNetTrain:
    def __init__(self):
        self.args = GetConfig()
        print(f"-----------{self.args.project_name}-------------")

        # region 项目运行配置
        use_cuda = self.args.use_cuda and torch.cuda.is_available()
        self.device = torch.device('cuda' if use_cuda else 'cpu')
        kwargs = {'num_workers': 4, 'pin_memory': True} if use_cuda else {}
        # endregion

        self.train_dataset = RetinaFaceDataset(label_path="G:/datasets/WIDER_FACE/train/label.txt")
        self.train_dataloader = DataLoader(self.train_dataset, batch_size=4, shuffle=True,
                                           collate_fn=retinaface_dataset_collate, **kwargs)

        self.model = RetinaFace()
        if use_cuda:
            self.model = torch.nn.DataParallel(self.model, device_ids=range(torch.cuda.device_count())).to(self.device)
            cudnn.benchmark = True

        # ToDo : use resume or not
        if  self.args.resume:
            print("load the weight from pretrained-weight file")
            model_dict = self.model.state_dict()
            pretrained_dict = torch.load(self.args.pretrained_weight, map_location=self.device)['model_state_dict']
            pretrained_dict = {k: v for k, v in pretrained_dict.items() if np.shape(model_dict[k]) == np.shape(v)}
            model_dict.update(pretrained_dict)
            self.model.load_state_dict(model_dict)
            print("Finished to load the weight")
        # else:
        #     weights_init(self.model, init_type="orthogonal", init_gain=0.001)
        #     print("train from scratch")

        self.optimizer = optim.SGD(self.model.parameters(), 1e-4,weight_decay=0.0005,momentum=0.9)
        self.scheduler = optim.lr_scheduler.StepLR(self.optimizer, 2, gamma=0.92)

        self.criterion = RetinaLoss()
        self.target_encode = TargetEncode().to(self.device)
        self.target_decode = TargetDecode().to(self.device)

        self.metric_map = Map()

    def work(self):
        for epoch in range(1, self.args.epochs):
            # self.train(epoch)
            self.test(epoch)
            torch.save({"model_state_dict": self.model.state_dict()}, "weights/best.pth")
            print("model saved")
        torch.cuda.empty_cache()
        print("finish model training")

    def train(self, epoch):
        self.model.train()
        pbar = tqdm(self.train_dataloader, desc=f'Test Epoch {epoch}/{self.args.epochs}',
                    bar_format='{l_bar}%s{bar}%s{r_bar}' % (Fore.BLUE, Fore.RESET))

        total_loss = []
        total_box_loss = []
        total_face_loss = []
        total_ldm_loss = []

        for data, target, line in pbar:
            data, target = data.to(self.device), [torch.from_numpy(ann).to(self.device) for ann in target]
            self.optimizer.zero_grad()
            outputs = self.model(data)
            target_encode = self.target_encode.Encode(target, outputs)
            loss, loss_box, loss_face, loss_ldm = self.criterion(outputs, target_encode)

            loss.backward()
            self.optimizer.step()
            total_loss.append(loss.item())
            total_box_loss.append(loss_box.item())
            total_face_loss.append(loss_face.item())
            total_ldm_loss.append(loss_ldm.item())

            pbar.set_description(
                f'Train Epoch: {epoch}/{self.args.epochs}  '
                f'Train loss :{np.mean(total_loss)} '
                f'box loss :{np.mean(total_box_loss)} '
                f'face loss :{np.mean(total_face_loss)} '
                f'ldm loss :{np.mean(total_ldm_loss)} '
                f'lr :{self.optimizer.param_groups[0]["lr"]}'
            )

        self.scheduler.step()

    @torch.no_grad()
    def test(self, epoch):
        self.model.eval()
        pbar = tqdm(self.train_dataloader, desc=f'Test Epoch {epoch}/{self.args.epochs}',
                    bar_format='{l_bar}%s{bar}%s{r_bar}' % (Fore.BLUE, Fore.RESET))

        for data, target, line in pbar:
            data, target = data.to(self.device), [torch.from_numpy(ann).to(self.device) for ann in target]
            self.optimizer.zero_grad()
            outputs = self.model(data)
            target_encode = self.target_encode.Encode(target, outputs)
            self.target_decode.Decode(data, target_encode)


if __name__ == "__main__":
    model = RetinaFaceNetTrain()
    model.work()
