import numpy as np
import torch
import torch.backends.cudnn as cudnn
import torch.nn as nn
import torch.optim as optim
from tensorboardX import SummaryWriter
from torch.utils.data import DataLoader
from torchvision.transforms import transforms
from tqdm import tqdm

from config import parser
from model import ResNet18

writer = SummaryWriter()
best_acc = 0
import torchvision



'''
防止过拟合的套路
1、增加数据量
2、正则化
3、dropout
4、
'''

class train(object):
    def __init__(self):
        self.args = parser.parse_args()
        print(f"-----------{self.args.project_name}-------------")

        use_cuda = self.args.use_cuda and torch.cuda.is_available()
        if use_cuda:
            torch.cuda.manual_seed(self.args.seed)
            torch.cuda.manual_seed_all(self.args.seed)
        else:
            torch.manual_seed(self.args.seed)

        self.device = torch.device('cuda' if use_cuda else 'cpu')


        transform_train = transforms.Compose([
            transforms.RandomCrop(32, padding=4),  # 先四周填充0，在吧图像随机裁剪成32*32
            transforms.RandomHorizontalFlip(),  # 图像一半的概率翻转，一半的概率不翻转
            transforms.ToTensor(),
            transforms.Normalize((0.4914, 0.4822, 0.4465), (0.2023, 0.1994, 0.2010)),  # R,G,B每层的归一化用到的均值和方差
        ])
        transform_test = transforms.Compose([
            transforms.ToTensor(),
            transforms.Normalize((0.4914, 0.4822, 0.4465), (0.2023, 0.1994, 0.2010)),
        ])

        trainset = torchvision.datasets.CIFAR10(root='E:/Datasets2/CIFAR-10/', train=True, download=True,
                                                transform=transform_train)  # 训练数据集
        testset = torchvision.datasets.CIFAR10(root='E:/Datasets2/CIFAR-10/', train=False, download=True,
                                               transform=transform_test)
        '''
        构造DataLoader
        '''
        # self.train_dataloader = DataLoader(Dog10Dataset(self.args.train_data_path,data_type='train'),
        #                                    batch_size=self.args.train_batch_size, shuffle=True, **train_kwargs)
        # self.test_dataloader = DataLoader(Dog10Dataset(self.args.test_data_path), batch_size=self.args.test_batch_size,
        #                                   shuffle=True, **test_kwargs)

        self.train_dataloader = torch.utils.data.DataLoader(trainset, batch_size=128, shuffle=True,
                                                            num_workers=0)  # 生成一个个batch进行批训练，组成batch的时候顺序打乱取
        self.test_dataloader = torch.utils.data.DataLoader(testset, batch_size=128, shuffle=False,
                                                           num_workers=0)

        self.model = ResNet18().to(self.device)

        if use_cuda:
            self.model = torch.nn.DataParallel(self.model, device_ids=range(torch.cuda.device_count()))
            cudnn.benchmark = True

        '''
        构造loss目标函数
        选择优化器
        学习率变化选择
        '''

        self.criterion = nn.CrossEntropyLoss()
        self.optimizer = optim.SGD(self.model.parameters(), lr=self.args.lr, momentum=0.9,
                                   weight_decay=0.01)#L2惩罚
        # self.optimizer=optim.Adam(self.model.parameters(),lr=self.args.lr)

        self.scheduler = torch.optim.lr_scheduler.CosineAnnealingLR(self.optimizer, T_max=200)
        # self.scheduler=torch.optim.lr_scheduler.MultiStepLR(self.optimizer,milestones=[1,40],gamma=0.1,last_epoch=-1)
        start_epoch = 1

        if self.args.resume:
            try:
                # 加载权重文件
                train_dict = torch.load(self.args.pretrained_model, map_location=self.device)

                try:
                    # weight文件匹配
                    print("load the weight from pretrained-weight file")
                    model_dict = self.model.state_dict()
                    pretrained_dict = train_dict['model_state_dict']
                    pretrained_dict = {k: v for k, v in pretrained_dict.items() if
                                       np.shape(model_dict[k]) == np.shape(v)}
                    model_dict.update(pretrained_dict)
                    self.model.load_state_dict(model_dict)
                    print("Finished to load the weight successfully")
                except:
                    print("can not load the weight \n train the model from stratch")

                # try:
                      #optimizer文件匹配
                #     print("load the params from optimizer file")
                #     self.optimizer.load_state_dict(train_dict['optimizer_state_dict'])
                # except:
                #     print("can not load the params \n train the model from stratch")

                # try:
                      #epoch文件匹配
                #     print("load the epoch")
                #     start_epoch = train_dict['epoch']
                # except:
                #     print('can not load the epoch')
                #     start_epoch = 1

            except:
                print("can not load the pretrained model information")

        for epoch in range(start_epoch, start_epoch + self.args.epoches + 1):
            self.train(epoch)
            if epoch % 1 == 0:
                self.test(epoch)

        torch.cuda.empty_cache()
        print("finish model training")

    # 第一种蒸馏方案，先验证其可行性
    def train(self, epoch):
        self.model.train()
        average_loss = []
        pbar = tqdm(self.train_dataloader,
                    desc=f'Train Epoch{epoch}/{self.args.epoches}')
        #
        # correct = torch.zeros(1).squeeze().cuda()
        # total = torch.zeros(1).squeeze().cuda()
        correct=0
        total=0

        for data, target in pbar:
            data, target = data.to(self.device), target.to(self.device)
            self.optimizer.zero_grad()  # 模型参数梯度清零
            output = self.model(data)
            pred = torch.argmax(output, 1)
            correct += pred.eq(target).sum().item()
            total += target.size(0)
            predict_acc = correct / total
            loss = self.criterion(output, target)
            loss.backward()
            average_loss.append(loss.item())
            self.optimizer.step()
            pbar.set_description(
                f'Train Epoch:{epoch}/{self.args.epoches} train_loss:{round(np.mean(average_loss), 4)} total_acc:{predict_acc}  temp_acc:{(pred == target).sum() / output.size(0)} lr:{self.optimizer.param_groups[0]["lr"]} ')
        self.scheduler.step()
        # print("epoch:" + str(epoch) + " acc:" + str(correct) + "/" + str(total))

        writer.add_scalar('train/loss', np.mean(average_loss), epoch)
        writer.add_scalar('train/acc', predict_acc, epoch)

    def test(self, epoch):

        global best_acc

        self.model.eval()
        test_loss = 0
        # correct = torch.zeros(1).squeeze().cuda()
        # total = torch.zeros(1).squeeze().cuda()
        correct=0
        total=0
        average_loss = []

        pbar = tqdm(self.test_dataloader,
                    desc=f'【Test Epoch】:{epoch}/{self.args.epoches}',
                    mininterval=0.3)
        for data, target in pbar:
            with torch.no_grad():
                data, target = data.to(self.device), target.to(self.device)
                output = self.model(data)
                average_loss.append(self.criterion(output, target).item())
                test_loss += self.criterion(output, target).item()  # sum up batch loss
                pred = torch.argmax(output, 1)
                correct += pred.eq(target).sum().item()
                total += target.size(0)
                predict_acc = correct / total
                # correct += (pred == target).sum().float()
                # total += len(target)
                # predict_acc = correct / total
            pbar.set_description(
                f'【Test Epoch】:{epoch}/{self.args.epoches} acc:{predict_acc}')
        # print("epoch:" + str(epoch) + " acc:" + str(correct) + "/" + str(total))

        writer.add_scalar('test/loss', np.mean(average_loss), epoch)
        writer.add_scalar('test/acc', predict_acc, epoch)

        if self.args.save_model and predict_acc > best_acc:
            best_acc = predict_acc

            # torch.save({
            #     'epoch': epoch,
            #     'model_state_dict': self.model.state_dict(),
            #     'optimizer_state_dict': self.optimizer.state_dict(),
            #     'loss': round(np.mean(average_loss), 2),
            #     'acc': best_acc
            # },
            #     "./weights/student" + f'/Epoch-{epoch}-best_acc-{best_acc}_loss_{round(np.mean(average_loss), 2)}.pth')
            # 允许重复覆盖，便于以后pretrained的直接运行，而不需要每次都去对应的找相应的权重
            torch.save({
                'epoch': epoch,
                'model_state_dict': self.model.state_dict(),
                'optimizer_state_dict': self.optimizer.state_dict(),
                'loss': round(np.mean(average_loss), 2),
                'acc': best_acc
            },
                "./weights/teacher" + f'/best.pth')
            print(f"\nsave Epoch-{epoch}-best_acc-{best_acc}-loss-{round(np.mean(average_loss), 2)}.pth")


if __name__ == "__main__":
    train = train()
