import numpy as np
import torch
import torch.backends.cudnn as cudnn
import torch.optim as optim
import torchvision
from tensorboardX import SummaryWriter
from torch import nn
from torch.nn.functional import log_softmax,softmax
from torch.utils.data import DataLoader
from torchvision import transforms
from tqdm import tqdm

from config import parser
from model import StudentModel, ResNet18

writer = SummaryWriter()
best_acc = 0


class MYLOSS(nn.Module):
    def __init__(self, type="teacher",T=10):
        super(MYLOSS, self).__init__()
        self.T=T
        self.type = "teacher" if type == "teacher" else "student"
        self.temp = torch.sparse.torch.eye(10).to('cuda')

    def forward(self, predict, target):
        if self.type == "teacher":
            return self.forward_teacher(predict/self.T, target/self.T)
        else:
            return self.forward_student(predict, target)
    '''
    T越大，除了之后softmax之后数据分布之间的距离就会越小
    '''
    def forward_teacher(self, predict, target):
        predict = log_softmax(predict)
        target = softmax(target)
        loss = target * predict
        x = torch.mean(loss)
        return -x

    def forward_student(self, predict, target):
        predict = log_softmax(predict)
        temp = self.temp.index_select(0, target)
        target = temp
        loss = target.to(target.device) * predict
        x = torch.mean(loss)
        return -x


class train(object):
    def __init__(self):
        self.args = parser.parse_args()
        print(f"-----------{self.args.project_name}-------------")

        use_cuda = self.args.use_cuda and torch.cuda.is_available()
        if use_cuda:
            torch.cuda.manual_seed(self.args.seed)
            torch.cuda.manual_seed_all(self.args.seed)
        else:
            torch.manual_seed(self.args.seed)

        self.device = torch.device('cuda' if use_cuda else 'cpu')

        '''
        构造DataLoader
        '''
        transform_train = transforms.Compose([
            transforms.RandomCrop(32, padding=4),  # 先四周填充0，在吧图像随机裁剪成32*32
            transforms.RandomHorizontalFlip(),  # 图像一半的概率翻转，一半的概率不翻转
            transforms.ToTensor(),
            transforms.Normalize((0.4914, 0.4822, 0.4465), (0.2023, 0.1994, 0.2010)),  # R,G,B每层的归一化用到的均值和方差
        ])
        transform_test = transforms.Compose([
            transforms.ToTensor(),
            transforms.Normalize((0.4914, 0.4822, 0.4465), (0.2023, 0.1994, 0.2010)),
        ])
        trainset = torchvision.datasets.CIFAR10(root='E:/Datasets2/CIFAR-10/', train=True, download=True,
                                                transform=transform_train)  # 训练数据集
        testset = torchvision.datasets.CIFAR10(root='E:/Datasets2/CIFAR-10/', train=False, download=True,
                                               transform=transform_test)
        # self.train_dataloader = DataLoader(Dog10Dataset(self.args.train_data_path),
        #                                    batch_size=self.args.train_batch_size, shuffle=True, **train_kwargs)
        # self.test_dataloader = DataLoader(Dog10Dataset(self.args.test_data_path), batch_size=self.args.test_batch_size,
        #                                   shuffle=True, **test_kwargs)

        self.train_dataloader = DataLoader(trainset,batch_size=self.args.train_batch_size)
        self.test_dataloader = DataLoader(testset,batch_size=self.args.test_batch_size)

        self.teacher_model = ResNet18().to(self.device)
        self.student_model = StudentModel().to(self.device)

        if use_cuda:
            self.teacher_model = torch.nn.DataParallel(self.teacher_model, device_ids=range(torch.cuda.device_count()))
            self.student_model = torch.nn.DataParallel(self.student_model, device_ids=range(torch.cuda.device_count()))
            cudnn.benchmark = True

        if True:
            try:
                print("load the weight from pretrained-weight file")
                model_dict = self.teacher_model.state_dict()
                pretrained_dict = torch.load("weights/teacher/best.pth", map_location=self.device)['model_state_dict']
                pretrained_dict = {k: v for k, v in pretrained_dict.items() if np.shape(model_dict[k]) == np.shape(v)}
                model_dict.update(pretrained_dict)
                self.teacher_model.load_state_dict(model_dict)
                print("Finished to load the weight")
            except:
                print("can not load weight \n train the model from stratch")

        '''
        构造loss目标函数
        选择优化器
        学习率变化选择
        '''

        self.criterion = MYLOSS("student")
        self.criterion2 = MYLOSS("teacher")

        # self.criterion = nn.CrossEntropyLoss()
        # self.criterion2 = nn.CrossEntropyLoss()
        # self.criterion2=nn.KLDivLoss()
        self.optimizer = optim.SGD(self.student_model.parameters(), lr=self.args.lr, momentum=self.args.momentum)
        self.scheduler = torch.optim.lr_scheduler.CosineAnnealingLR(self.optimizer, T_max=200)

        for epoch in range(1, self.args.epoches + 1):
            self.train(epoch)
            if epoch % 1 == 0:
                self.test(epoch)

        torch.cuda.empty_cache()
        print("finish model distilling")

    def train(self, epoch):
        a = np.array(5)
        a = torch.from_numpy(a)
        self.teacher_model.eval()
        self.student_model.train()
        average_loss = []
        pbar = tqdm(self.train_dataloader, desc=f'Train Epoch{epoch}/{self.args.epoches}')

        correct = torch.zeros(1).squeeze().cuda()
        total = torch.zeros(1).squeeze().cuda()

        for data, target in pbar:
            data, target = data.to(self.device), target.to(self.device)
            self.optimizer.zero_grad()  # 模型参数梯度清零
            output_student = self.student_model(data)
            with torch.no_grad():
                output_teacher = self.teacher_model(data)

            pred = torch.argmax(output_student, 1)

            correct += (pred == target).sum().float()
            total += len(target)
            predict_acc = correct / total

            # output_teacher=output_teacher/a

            # loss = 0.6 * self.criterion(output_student, target) + 0.4 * torch.abs(
            #     self.criterion2(output_student / a, output_teacher / a) / self.args.train_batch_size)
            # output_teacher=output_teacher.long()
            # loss = 0.6 * self.criterion(output_student, target) + \
            #        0.4 * self.criterion2(output_student / a, (output_teacher.float64() / a))

            loss = 0.6 * self.criterion(output_student, target) + \
                   0.4 * self.criterion2(output_student, output_teacher)
            loss.backward()
            average_loss.append(loss.item())
            self.optimizer.step()
            pbar.set_description(
                f'Train Epoch:{epoch}/{self.args.epoches} train_loss:{round(np.mean(average_loss), 4)} acc:{predict_acc} lr：{self.optimizer.param_groups[0]["lr"]}')
        self.scheduler.step()

        writer.add_scalar('train/loss', np.mean(average_loss), epoch)
        writer.add_scalar('train/acc', predict_acc, epoch)

    def test(self, epoch):

        global best_acc

        self.student_model.eval()
        test_loss = 0
        correct = torch.zeros(1).squeeze().cuda()
        total = torch.zeros(1).squeeze().cuda()

        average_loss = []

        pbar = tqdm(self.test_dataloader,
                    desc=f'Test Epoch{epoch}/{self.args.epoches}',
                    mininterval=0.3)

        for data, target in pbar:
            data, target = data.to(self.device), target.to(self.device)
            with torch.no_grad():
                output = self.student_model(data)
                average_loss.append(self.criterion(output, target).item())
                test_loss += self.criterion(output, target).item()  # sum up batch loss
                pred = torch.argmax(output, 1)

                correct += (pred == target).sum().float()
                total += len(target)
                predict_acc = correct / total
            pbar.set_description(
                f'【Test Epoch】:{epoch}/{self.args.epoches} acc:{predict_acc}')

            writer.add_scalar('test/loss', np.mean(average_loss), epoch)
            writer.add_scalar('test/acc', predict_acc, epoch)

        if self.args.save and predict_acc > best_acc:
            best_acc = predict_acc

            torch.save({
                'epoch': epoch,
                'model_state_dict': self.student_model.state_dict(),
                'optimizer_state_dict': self.optimizer.state_dict(),
                'loss': round(np.mean(average_loss), 2)
            },
                "./weights/student" + f'/Epoch-{epoch}-best_acc-{best_acc}_loss_{round(np.mean(average_loss), 4)}.pth')

            print(f"save Epoch-{epoch}-best_acc-{best_acc}_loss_{round(np.mean(average_loss), 4)}.pth")


if __name__ == "__main__":
    train = train()
