# -*- coding: utf-8 -*-
"""
author:LTH
data:
"""

import numpy as np
import torch
from torch import nn
from torch.backends import cudnn

from centerloss import CenterLoss
from model import Net

seed = 123
import torchvision
from torchvision import transforms
from torch.utils import data
from tqdm import tqdm

transforms = transforms.Compose(
    [
        transforms.ToTensor(),
        transforms.Normalize(mean=[0.5], std=[0.5])
    ]
)


class train():
    def __init__(self):
        print(f"-----------center loss-------------")

        use_cuda = torch.cuda.is_available()

        if use_cuda:
            torch.cuda.manual_seed(seed)
            torch.cuda.manual_seed_all(seed)

        else:
            torch.manual_seed(seed)

        self.device = torch.device("cuda" if use_cuda else "cpu")

        self.model = Net().to(self.device)
        '''
        CUDA加速
        '''
        if use_cuda:
            # self.model = torch.nn.DataParallel(self.model, device_ids=range(torch.cuda.device_count()))
            cudnn.enabled = True
            cudnn.benchmark = True
        if False:
            try:
                print("load the weight from pretrained-weight file")
                model_dict = self.model.state_dict()
                checkpoint = torch.load("weights/best.pth")
                pretrained_dict = checkpoint['model_state_dict']
                new_dict = {}
                for k, v in pretrained_dict.items():
                    new_dict[k] = v
                pretrained_dict = {k: v for k, v in new_dict.items() if
                                   np.shape(model_dict[k]) == np.shape(v)}
                model_dict.update(pretrained_dict)
                self.model.load_state_dict(model_dict, strict=True)
                print("Restoring the weight from pretrained-weight file \nFinished to load the weight")
            except Exception as e:
                print("can not load weight \n train the model from beginning")
                raise e

        self.lossf1 = nn.NLLLoss().cuda()
        self.lossf2_centerloss = CenterLoss(10, 2).cuda()

        self.optimizer = torch.optim.Adam([{"params": self.model.parameters()}], lr=1e-3)

        train_data = torchvision.datasets.MNIST(root=r"E:\Datasets2\MNIST_data\MNIST", download=True, train=True,
                                                transform=transforms)
        test_data = torchvision.datasets.MNIST(root=r"E:\Datasets2\MNIST_data\MNIST", download=True, train=False,
                                               transform=transforms)
        self.train_loader = data.DataLoader(dataset=train_data, shuffle=True, batch_size=512,
                                            num_workers=0)
        self.test_loader = data.DataLoader(dataset=test_data, shuffle=True, batch_size=256,
                                           num_workers=0)

        for i in range(200):
            self.train(i)
            self.test(i)

    def train(self, epoch):
        self.model.train()
        average_loss = []
        feature_loader = []
        lablel_loader = []
        correct = 0
        total = 0
        pbar = tqdm(self.train_loader)
        for x, y in pbar:
            x = x.to(self.device)
            y = y.to(self.device)

            feature, output = self.model.forward(x)

            pred = torch.argmax(output, 1)
            correct += (pred == y).sum().float()
            total += len(y)

            feature_loader.append(feature)
            lablel_loader.append(y)

            loss_cls = self.lossf1(output, y)
            loss_center = 10 * self.lossf2_centerloss(feature, y.float())

            loss = loss_cls\
                   # + loss_center

            self.optimizer.zero_grad()
            loss.backward()
            self.optimizer.step()
            average_loss.append(loss.item())

            pbar.set_description(
                f'Train Epoch:{epoch} '
                f'Train Loss: {np.mean(average_loss)} '
                f'Acc:{correct/total} correct:{correct} total {total}'
            )
        feat = torch.cat(feature_loader, 0)
        lables = torch.cat(lablel_loader, 0)
        self.model.visualize(feat.data.cpu().numpy(), lables.data.cpu().numpy(), epoch,
                             self.lossf2_centerloss.center.cpu().numpy())

    @torch.no_grad()
    def test(self, epoch):
        self.model.eval()
        average_loss = []
        feature_loader = []
        lablel_loader = []
        correct=0
        total=0
        pbar = tqdm(self.test_loader)
        for x, y in pbar:
            x = x.to(self.device)
            y = y.to(self.device)
            feature, output = self.model.forward(x)

            pred=torch.argmax(output,1)
            correct+=(pred==y).sum().float()
            total+=len(y)

            feature_loader.append(feature)
            lablel_loader.append(y)

            loss_cls = self.lossf1(output, y)
            loss_center = 1 * self.lossf2_centerloss(feature, y.float())

            loss = loss_cls\
                   # + loss_center
            average_loss.append(loss.item())
            pbar.set_description(
                f'Test Epoch:{epoch} '
                f'Acc : {correct/total}'
                f'Test Loss: {np.mean(average_loss)} '
            )



        torch.save({"model_state_dict": self.model.state_dict()}, "weights/best.pth")


model = train()
