import glob
import os.path
import pickle

import numpy as np
import tensorboardX
import torch
import torchvision
from torchvision import transforms
from torch.utils.data import DataLoader, Dataset
from PIL import Image
import torch.nn as nn
import torch.nn.functional as F

import cv2

from cifar10.data.dataset import MyDataset

# http://www.cs.toronto.edu/~kriz/cifar.html 数据集下载
device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")


def unpickle(file):
    import pickle
    with open(file, 'rb') as fo:
        dict = pickle.load(fo, encoding='bytes')
    return dict


# 'airplane'=0
# 'automobile'=1
# 'brid'=2
# 'cat'=3
# 'deer'=4
# 'dog'=5
# 'frog'=6
# 'horse'=7
# 'ship'=8
# 'truck'=9

label_name = ['airplane',
              'automobile',
              'brid',
              'cat',
              'deer',
              'dog',
              'frog',
              'horse',
              'ship',
              'truck',
              ]
label_dict = {}
for idx, name in enumerate(label_name):
    label_dict[name] = idx

# 对数据做一系列的增强，比如变成28*28，颜色变成灰色
# train_transform = transforms.Compose([
#     transforms.RandomResizedCrop((28, 28)),
#     transforms.RandomVerticalFlip(),
#     transforms.RandomHorizontalFlip(),
#     transforms.RandomRotation(90),
#     transforms.RandomGrayscale(0.1),
#     transforms.ColorJitter(0.3, 0.3, 0.3, 0.3),
#     transforms.ToTensor(),
# ])
train_transform = transforms.Compose([
    transforms.RandomCrop(28),
    transforms.RandomHorizontalFlip(),
    transforms.ToTensor(),
])
test_transform = transforms.Compose([
    transforms.Resize((28, 28)),
    transforms.ToTensor(),
])


# vggnet要求图片格式是32*32，但是上面加载到的数据是28*28，所以要先转换
class VggNet(nn.Module):
    def __init__(self):
        super(VggNet, self).__init__()
        # 第一个卷积层，入参28*28
        self.conv1 = nn.Sequential(
            nn.Conv2d(3, 64, kernel_size=3, stride=1, padding=1),
            nn.BatchNorm2d(64),
            nn.ReLU(),
        )
        self.max_pooling1 = nn.MaxPool2d(kernel_size=2, stride=2)

        # 第二个卷积层，入参14*14，max_pooling1中stride=2，所以由28缩小为14，因为大小缩小一半，所以channel层增加一倍，Conv2d从64升到128
        self.conv2_1 = nn.Sequential(
            nn.Conv2d(64, 128, kernel_size=3, stride=1, padding=1),
            nn.BatchNorm2d(128),
            nn.ReLU(),
        )
        self.conv2_2 = nn.Sequential(
            nn.Conv2d(128, 128, kernel_size=3, stride=1, padding=1),
            nn.BatchNorm2d(128),
            nn.ReLU(),
        )
        self.max_pooling2 = nn.MaxPool2d(kernel_size=2, stride=2)

        # 第三个卷积层，入参7*7
        self.conv3_1 = nn.Sequential(
            nn.Conv2d(128, 256, kernel_size=3, stride=1, padding=1),
            nn.BatchNorm2d(256),
            nn.ReLU(),
        )
        self.conv3_2 = nn.Sequential(
            nn.Conv2d(256, 256, kernel_size=3, stride=1, padding=1),
            nn.BatchNorm2d(256),
            nn.ReLU(),
        )
        # 这里增加一个padding，因为7*7再缩小一半就成了3*3，会丢失边缘的数据，所以增加一个padding把图片补充成4*4
        self.max_pooling3 = nn.MaxPool2d(kernel_size=2, stride=2, padding=1)

        # 第四个卷积层，入参4*4
        self.conv4_1 = nn.Sequential(
            nn.Conv2d(256, 512, kernel_size=3, stride=1, padding=1),
            nn.BatchNorm2d(512),
            nn.ReLU(),
        )
        self.conv4_2 = nn.Sequential(
            nn.Conv2d(512, 512, kernel_size=3, stride=1, padding=1),
            nn.BatchNorm2d(512),
            nn.ReLU(),
        )
        self.max_pooling4 = nn.MaxPool2d(kernel_size=2, stride=2)  # 这时候的结果格式是batch_size*512*2*2

        # batch_size*512*2*2经过fc转换成batch_size*512*4，就是特征图大小*channel个数；10就是结果10个类别，就是label_name里面的那10个
        self.fc = nn.Linear(512 * 4, 10)

    def forward(self, x):
        batch_size = x.size(0)
        out = self.conv1(x)
        out = self.max_pooling1(out)

        out = self.conv2_1(out)
        out = self.conv2_2(out)
        out = self.max_pooling2(out)

        out = self.conv3_1(out)
        out = self.conv3_2(out)
        out = self.max_pooling3(out)

        out = self.conv4_1(out)
        out = self.conv4_2(out)
        out = self.max_pooling4(out)

        out = out.view(batch_size, -1)
        # batch_size*c*h*w -> batch_size*n n=10
        out = self.fc(out)
        out = F.softmax(out, dim=1)
        return out


def vgg():
    if not os.path.exists("log"):
        os.mkdir("log")
    writer = tensorboardX.SummaryWriter("log")
    step_n = 0

    im_train_list = glob.glob("../data/train/*/*")
    im_test_list = glob.glob("../data/test/*/*")

    train_dataset = MyDataset(im_train_list, transform=train_transform)
    test_dataset = MyDataset(im_test_list, transform=test_transform)

    train_data_loader = DataLoader(dataset=train_dataset, batch_size=6, shuffle=True, num_workers=4)
    test_data_loader = DataLoader(dataset=test_dataset, batch_size=6, shuffle=False, num_workers=4)

    print(len(train_dataset))
    print(len(test_dataset))

    net = VggNet().to(device)
    loss_func = nn.CrossEntropyLoss()
    optimizer = torch.optim.Adam(net.parameters(), lr=0.01)
    # optimizer=torch.optim.SGD(net.parameters(),lr=0.01,momentum=0.9,weight_decay=5e-4)
    # step_size，每进行step_size次epoch进行一次学习率的调整，学习率变为上一次学习率的gamma倍
    scheduler = torch.optim.lr_scheduler.StepLR(optimizer=optimizer, step_size=5, gamma=0.9)
    epoch = 1
    for cur_epoch in range(epoch):
        net.train()  # 表明当前是在训练中，batchnm和dropout会实时更新
        for i, (inputs, labels) in enumerate(train_data_loader):
            print("epoch:", epoch)
            print("index:", i)
            inputs = inputs.to(device)
            labels = labels.to(device)
            out = net(inputs)
            loss = loss_func(out, labels)
            optimizer.zero_grad()
            loss.backward()
            optimizer.step()
            print("损失:", loss.item())
            _, pred = torch.max(out.data, dim=1)
            correct = pred.eq(labels).cpu().sum()
            print("正确率:", 100.0 * correct / len(labels))
            print("学习率:", optimizer.state_dict()["param_groups"][0]["lr"])
            writer.add_scalar("train loss", loss.item(), global_step=step_n)
            writer.add_scalar("train correct", 100.0 * correct / len(labels), global_step=step_n)
            step_n += 1
            print("*" * 100)
        scheduler.step()  # 更新学习率

    net.eval()
    loss_list = []
    acc_list = []
    for i, (inputs, labels) in enumerate(test_data_loader):
        print("index:", i)
        inputs = inputs.to(device)
        labels = labels.to(device)
        out = net(inputs)
        loss = loss_func(out, labels)
        print("损失:", loss.item())
        loss_list.append(loss.item())
        _, pred = torch.max(out.data, dim=1)
        print("正确率:", 100.0 * correct / len(labels))
        acc_list.append(100.0 * correct / len(labels))
        print("学习率:", optimizer.state_dict()["param_groups"][0]["lr"])
        writer.add_scalar("test correct", loss.item(), global_step=step_n)
        writer.add_scalar("test correct", 100.0 * correct / len(labels), global_step=step_n)
        print("*" * 100)
    print(np.array(loss_list).mean())
    print(np.array(acc_list).mean())


if __name__ == '__main__':
    # prepare_train()
    # prepare_test()
    vgg()
