# -*- coding: utf-8 -*-
# @Time    : 2021/7/7 14:46
# @Author  : LuoTianHang


import os
# ####################train.py 说明##########################
# this script is the core py to train the model
import random

import cv2
import numpy as np
import pandas as pd
import torch
from PIL import Image
from torch import nn
from torch import optim
from torch.backends import cudnn
from torch.utils.data import DataLoader
from tqdm import tqdm

from config import parser_config
from datalist import MCNNDataset2 as MCNNDataset
from model import MCNN2 as MCNN
from model import weights_normal_init
from otherutils import green_print, red_print, blue_print
from test import get_image

best_mae = 10000000000

np.set_printoptions(precision=4)
rand_seed = 64678
if rand_seed is not None:
    np.random.seed(rand_seed)
    torch.manual_seed(rand_seed)
    torch.cuda.manual_seed(rand_seed)


# def weight_init(m):
#     # 1. 根据网络层的不同定义不同的初始化方式
#     if isinstance(m, nn.Linear):
#         nn.init.xavier_normal_(m.weight)
#         nn.init.constant_(m.bias, 0)
#     # 也可以判断是否为conv2d，使用相应的初始化方式
#     elif isinstance(m, nn.Conv2d):
#         nn.init.kaiming_normal_(m.weight, mode='fan_out', nonlinearity='relu')
#     # 是否为批归一化层
#     elif isinstance(m, nn.BatchNorm2d):
#         nn.init.constant_(m.weight, 1)
#         nn.init.constant_(m.bias, 0)


def weight_init(m):
    if isinstance(m,nn.Linear):
        nn.init.xavier_normal_(m.weight)
        nn.init.constant_(m.bias,0)
    elif isinstance(m,nn.Conv2d):
        nn.init.kaiming_normal_(m.weight,mode='fan_in',nonlinearity='relu')
    elif isinstance(m,nn.BatchNorm2d):
        nn.init.constant_(m.weight,1)
        nn.init.constant_(m.bias,0)
    elif isinstance(m,nn.BatchNorm2d):
        nn.init.constant_(m.weight,1)
        nn.init.constant_(m.bias,0)

class Train(object):
    def __init__(self):
        self.args = parser_config()

        print(f"-----------{self.args.project_name}-------------")

        use_cuda = self.args.use_cuda and torch.cuda.is_available()

        self.device = torch.device('cuda' if use_cuda else "cpu")
        kwargs = {'num_workers': 0, "pin_memory": True} if use_cuda else {}

        lines_train = self.get_lines_train()
        lines_test = self.get_lines_test()
        # random.shuffle(lines)
        # num_train = int(len(lines) * 0.8)

        self.train_dataset = MCNNDataset(data_type="train",
                                         data=lines_train)
        self.test_dataset = MCNNDataset(data_type="test",
                                        data=lines_test)

        self.train_dataloader = DataLoader(self.train_dataset, batch_size=self.args.train_batch_size, shuffle=True,
                                           **kwargs)
        self.test_dataloader = DataLoader(self.test_dataset, batch_size=self.args.test_batch_size, shuffle=True,
                                          **kwargs)

        self.model = MCNN()

        if use_cuda:
            self.model = torch.nn.DataParallel(self.model, device_ids=range(torch.cuda.device_count())).to(self.device)
            cudnn.enabled = True
            cudnn.benchmark = True

        if  self.args.resume:
            try:
                blue_print("load the weight from pretrained-weight file")
                model_dict = self.model.state_dict()
                pretrained_dict = torch.load(self.args.pretrained_weight, map_location=self.device)['model_state_dict']
                pretrained_dict = {k: v for k, v in pretrained_dict.items() if np.shape(model_dict[k]) == np.shape(v)}
                model_dict.update(pretrained_dict)
                self.model.load_state_dict(model_dict)
                green_print("Finished to load the weight")
            except Exception as e:
                red_print("can not load the weight")
                raise e
        else:
            green_print("train from beginning")
            weights_normal_init(self.model, dev=0.01)
            # weight_init(self.model)
            green_print("initialize the weight with mea"
                        "n:0 std:0.02")

        self.criterion = nn.MSELoss().to(self.device)
        self.optimizer = optim.Adam(self.model.parameters(), self.args.lr)
        self.scheduler = optim.lr_scheduler.StepLR(self.optimizer, 1, 0.95)

        try:
            for epoch in range(1, self.args.epochs):
                self.train(epoch)
                self.test(epoch)
                self.predict('G:/datasets/crowdcounting/ProcessedData/shanghaitech_part_A/test/img/1.jpg')
        except KeyboardInterrupt:
            torch.save({
                'model_state_dict': self.model.state_dict(),

            }, 'weights/temp/best.pth')
            green_print("model_saved")
        torch.cuda.empty_cache()
        blue_print("finish model training")

    def train(self, epoch):
        MSE = []
        MAE = []
        total = 0
        self.model.train()
        losses = []
        pbar = tqdm(self.train_dataloader, desc=f'Train Epoch:{epoch}/{self.args.epochs}')
        for data, target in pbar:
            data, target = data.to(self.device), target.to(self.device)
            self.optimizer.zero_grad()
            outputs = self.model(data)
            loss = self.criterion(outputs, target)
            losses.append(loss.item())
            loss.backward()
            self.optimizer.step()

            output_sum = torch.sum(outputs).detach().cpu().numpy()
            target_sum = torch.sum(target).cpu().numpy()
            total += len(data)
            MSE.append((target_sum - output_sum) ** 2)
            MAE.append(abs(target_sum - output_sum))

            pbar.set_description(
                f'Train Epoch:{epoch}/{self.args.epochs}'
                f'\t train_loss:{np.around(np.mean(losses), 4)}'
                f'\t lr:{self.optimizer.param_groups[0]["lr"]}'
                f'\t MAE:{np.around(np.mean(MAE), 4)}'
                f'\t MSE:{np.around(np.sqrt(np.mean(MSE)), 4)}'
                f'\t gt:{target_sum} et:{output_sum}'

            )
        # self.scheduler.step()

    def test(self, epoch):
        MSE = []
        MAE = []
        total = 0
        self.model.eval()
        with torch.no_grad():
            pbar = tqdm(self.test_dataloader, desc=f'Test Epoch:{epoch}/{self.args.epochs}')
            for data, target in pbar:
                data, target = data.to(self.device), target.to(self.device)
                outputs = self.model(data)
                output_sum = torch.sum(outputs).cpu().numpy()
                target_sum = torch.sum(target).cpu().numpy()
                # print(output_sum,'-->',target_sum)
                total += len(data)
                MSE.append((target_sum - output_sum) ** 2)
                MAE.append(abs(target_sum - output_sum))
                pbar.set_description(
                    f'Test Epoch:{epoch}/{self.args.epochs} '
                    f' MAE:{np.mean(MAE)}'
                    f' MSE:{np.sqrt(np.mean(MSE))}'
                    f'\t gt:{target_sum} et:{output_sum}'
                )
        global best_mae
        if best_mae > np.around(np.mean(MAE), 4):
            best_mae = np.around(np.mean(MAE), 4)
            torch.save({
                'model_state_dict': self.model.state_dict(),
            }, 'weights/best.pth')
            green_print("model saved")

    @torch.no_grad()
    def predict(self, path):

        den = pd.read_csv(path.replace('jpg', 'csv').replace('img', 'den')).values

        den = den.astype(np.float32, copy=False)
        img = Image.open(path).convert('L')

        w = den.shape[0]
        h = den.shape[1]

        wd_1 = img.width // 4
        ht_1 = img.height // 4
        den = cv2.resize(den, (wd_1, ht_1))
        den = den * ((w * h) / (wd_1 * ht_1))

        gt_label = np.sum(den)
        print("gt:", gt_label)
        img = Image.open(path).convert('L')
        temp = img
        img = torch.from_numpy(np.array(img, dtype=np.float32)).unsqueeze(0)
        img = img.unsqueeze(0).to(self.device)
        output = self.model(img)
        img2 = get_image(output, temp)
        # img3 = get_gt_map(den)
        print("crowds:", torch.sum((output.cpu())).numpy())
        img3 = Image.new("RGB", (img2.width * 2, img2.height), (0, 0, 0))
        img3.paste(temp, (0, 0))
        img3.paste(img2, (img2.width, 0))
        # 拼接图片
        # img3.save("hstack1.jpg")
        temp.convert('RGBA')
        img2.convert('RGBA')
        # 混合图片
        img4 = Image.blend(temp, img2, 0.9)
        img4.save("blend1.jpg")

    def get_lines_train(self):
        ans = []
        dir = ['UCF-QNRF-1024x1024-mod16', 'UCF_CC_50', "WE_blurred", "shanghaitech_part_A", "shanghaitech_part_B"]
        dir = ["shanghaitech_part_A"]
        for d in dir:
            for root, dirs, files in os.walk("G:/datasets/crowdcounting/ProcessedData/" + d+"/train"):
                for name in files:
                    if name.endswith(".jpg"):
                        ans.append(os.path.join(root, name))

        return ans

    def get_lines_test(self):
        ans = []
        dir = ['UCF-QNRF-1024x1024-mod16', 'UCF_CC_50', "WE_blurred", "shanghaitech_part_A", "shanghaitech_part_B"]
        dir = ["shanghaitech_part_A"]
        for d in dir:
            for root, dirs, files in os.walk("G:/datasets/crowdcounting/ProcessedData/" + d+"/test"):
                for name in files:
                    if name.endswith(".jpg"):
                        ans.append(os.path.join(root, name))

        return ans


if __name__ == "__main__":
    train = Train()
    # MAE 121 MSE 192