import glob
import os.path
import pickle

import numpy as np
import tensorboardX
import torch
import torchvision
from torchvision import transforms
from torch.utils.data import DataLoader, Dataset
from PIL import Image
import torch.nn as nn
import torch.nn.functional as F

from cifar10.data.dataset import MyDataset

device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")

class ResBlock(nn.Module):
    def __init__(self, in_channel, out_channel, stride=1):
        super(ResBlock, self).__init__()
        self.layer = nn.Sequential(
            nn.Conv2d(in_channel, out_channel, kernel_size=3, stride=stride, padding=1),
            nn.BatchNorm2d(out_channel),
            nn.ReLU(),
            nn.Conv2d(out_channel, out_channel, kernel_size=3, stride=1, padding=1),  # 设置成1，形状不变化，大于1的时候下采样数据格式缩小
            nn.BatchNorm2d(out_channel),
        )
        self.shortcut = nn.Sequential()
        if in_channel != out_channel or stride > 1:
            # 跳连的分支，跳转到self.layer中
            self.shortcut = nn.Sequential(
                nn.Conv2d(in_channel, out_channel, kernel_size=3, stride=stride, padding=1),
                nn.BatchNorm2d(out_channel),
            )

    def forward(self, x):
        out1 = self.layer(x)
        out2 = self.shortcut(x)
        out = out1 + out2
        out = F.relu(out)
        return out


class ResNet(nn.Module):
    def __init__(self, ResBlock):
        super(ResNet, self).__init__()
        self.in_channel = 32

        self.conv1 = nn.Sequential(
            nn.Conv2d(3, self.in_channel, kernel_size=3, stride=1, padding=1),
            nn.BatchNorm2d(self.in_channel),
            nn.ReLU(),
        )
        self.layer1 = self.make_layer(ResBlock, 64, 2, 2)
        self.layer2 = self.make_layer(ResBlock, 128, 2, 2)
        self.layer3 = self.make_layer(ResBlock, 256, 2, 2)
        self.layer4 = self.make_layer(ResBlock, 512, 2, 2)

        self.fc = nn.Linear(512, 10)  # cifar10就是有10个类别

    def make_layer(self, block, out_channel, stride, num_block):
        layer_list = []
        for i in range(num_block):
            if i == 0:
                in_stride = stride
            else:
                in_stride = 1
            layer_list.append(block(self.in_channel, out_channel, stride))
            self.in_channel = out_channel
        return nn.Sequential(*layer_list)

    def forward(self, x):
        out = self.conv1(x)
        out = self.layer1(out)
        out = self.layer2(out)
        out = self.layer3(out)
        out = self.layer4(out)
        out = F.avg_pool2d(out, 1)
        out = out.view(out.size(0), -1)
        out = self.fc(out)
        # out = F.softmax(out, dim=-1) # 可以通过softmax把概率映射到0-1，如果是用交叉熵就不用softmax了，已经给映射完了
        return out


def resNet():
    return ResNet(ResBlock)

def run():
    if not os.path.exists("log"):
        os.mkdir("log")
    writer = tensorboardX.SummaryWriter("log")
    step_n = 0

    train_transform = transforms.Compose([
        transforms.RandomCrop(28),
        transforms.RandomHorizontalFlip(),
        transforms.ToTensor(),
    ])
    test_transform = transforms.Compose([
        transforms.Resize((28, 28)),
        transforms.ToTensor(),
    ])

    im_train_list = glob.glob("../data/train/*/*")
    im_test_list = glob.glob("../data/test/*/*")

    train_dataset = MyDataset(im_train_list, transform=train_transform)
    test_dataset = MyDataset(im_test_list, transform=test_transform)

    train_data_loader = DataLoader(dataset=train_dataset, batch_size=6, shuffle=True, num_workers=4)
    test_data_loader = DataLoader(dataset=test_dataset, batch_size=6, shuffle=False, num_workers=4)

    print(len(train_dataset))
    print(len(test_dataset))

    net = resNet().to(device)
    loss_func = nn.CrossEntropyLoss()
    optimizer = torch.optim.Adam(net.parameters(), lr=0.01)
    # optimizer=torch.optim.SGD(net.parameters(),lr=0.01,momentum=0.9,weight_decay=5e-4)
    # step_size，每进行step_size次epoch进行一次学习率的调整，学习率变为上一次学习率的gamma倍
    scheduler = torch.optim.lr_scheduler.StepLR(optimizer=optimizer, step_size=5, gamma=0.9)
    epoch = 1
    for cur_epoch in range(epoch):
        net.train()  # 表明当前是在训练中，batchnm和dropout会实时更新
        for i, (inputs, labels) in enumerate(train_data_loader):
            print("epoch:", epoch)
            print("index:", i)
            inputs = inputs.to(device)
            labels = labels.to(device)
            out = net(inputs)
            loss = loss_func(out, labels)
            optimizer.zero_grad()
            loss.backward()
            optimizer.step()
            print("损失:", loss.item())
            _, pred = torch.max(out.data, dim=1)
            correct = pred.eq(labels).cpu().sum()
            print("正确率:", 100.0 * correct / len(labels))
            print("学习率:", optimizer.state_dict()["param_groups"][0]["lr"])
            writer.add_scalar("train loss", loss.item(), global_step=step_n)
            writer.add_scalar("train correct", 100.0 * correct / len(labels), global_step=step_n)
            step_n += 1
            print("*" * 100)
        scheduler.step()  # 更新学习率

    net.eval()
    loss_list = []
    acc_list = []
    for i, (inputs, labels) in enumerate(test_data_loader):
        print("index:", i)
        inputs = inputs.to(device)
        labels = labels.to(device)
        out = net(inputs)
        loss = loss_func(out, labels)
        print("损失:", loss.item())
        loss_list.append(loss.item())
        _, pred = torch.max(out.data, dim=1)
        print("正确率:", 100.0 * correct / len(labels))
        acc_list.append(100.0 * correct / len(labels))
        print("学习率:", optimizer.state_dict()["param_groups"][0]["lr"])
        writer.add_scalar("test correct", loss.item(), global_step=step_n)
        writer.add_scalar("test correct", 100.0 * correct / len(labels), global_step=step_n)
        print("*" * 100)
    print(np.array(loss_list).mean())
    print(np.array(acc_list).mean())


if __name__ == '__main__':
    run()