import glob
import os.path
import pickle

import numpy as np
import tensorboardX
import torch
import torchvision
from torchvision import transforms
from torch.utils.data import DataLoader, Dataset
from PIL import Image
import torch.nn as nn
import torch.nn.functional as F

from cifar10.data.dataset import MyDataset

device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")


def ConvBNRelu(in_channel, out_channel, kernel_size):
    return nn.Sequential(
        nn.Conv2d(in_channel, out_channel, kernel_size=kernel_size, stride=1, padding=kernel_size // 2),
        nn.BatchNorm2d(out_channel),
        nn.ReLU(),
    )


class BaseInception(nn.Module):
    def __init__(self, in_channel, out_channel_list, reduce_channel_list):
        super(BaseInception, self).__init__()
        self.branch1_conv = ConvBNRelu(in_channel, out_channel_list[0], kernel_size=1)

        self.branch2_conv1 = ConvBNRelu(in_channel, reduce_channel_list[0], kernel_size=1)
        self.branch2_conv2 = ConvBNRelu(reduce_channel_list[0], out_channel_list[1], kernel_size=3)

        self.branch3_conv1 = ConvBNRelu(in_channel, reduce_channel_list[1], kernel_size=1)
        self.branch3_conv2 = ConvBNRelu(reduce_channel_list[1], out_channel_list[2], kernel_size=5)

        self.branch4_pool = nn.MaxPool2d(kernel_size=3, stride=1, padding=1)
        self.branch4_conv = ConvBNRelu(in_channel, out_channel_list[3], kernel_size=3)

    def forward(self, x):
        out1 = self.branch1_conv(x)

        out2 = self.branch2_conv1(x)
        out2 = self.branch2_conv2(out2)

        out3 = self.branch3_conv1(x)
        out3 = self.branch3_conv2(out3)

        out4 = self.branch4_pool(x)
        out4 = self.branch4_conv(out4)

        out = torch.cat([out1, out2, out3, out4], dim=1)
        return out


class InceptionNet(nn.Module):
    def __init__(self):
        super(InceptionNet, self).__init__()

        self.block1 = nn.Sequential(
            nn.Conv2d(3, 64, kernel_size=7, stride=2, padding=1),
            nn.BatchNorm2d(64),
            nn.ReLU(),
        )

        self.block2 = nn.Sequential(
            nn.Conv2d(64, 128, kernel_size=3, stride=1, padding=1),
            nn.BatchNorm2d(128),
            nn.ReLU(),
        )

        self.block3 = nn.Sequential(
            BaseInception(in_channel=128, out_channel_list=[64, 64, 64, 64], reduce_channel_list=[16, 16]),
            nn.MaxPool2d(kernel_size=3, stride=2, padding=1)
        )

        self.block4 = nn.Sequential(
            BaseInception(in_channel=256, out_channel_list=[96, 96, 96, 96], reduce_channel_list=[32, 32]),
            nn.MaxPool2d(kernel_size=3, stride=2, padding=1)
        )

        self.fc = nn.Linear(96 * 4, 10)

    def forward(self, x):
        out = self.block1(x)
        out = self.block2(out)
        out = self.block3(out)
        out = self.block4(out)

        out = F.avg_pool2d(out, 2)
        out = out.view(out.size(0), -1)
        out = self.fc(out)
        return out


def run():
    if not os.path.exists("log"):
        os.mkdir("log")
    writer = tensorboardX.SummaryWriter("log")
    step_n = 0

    train_transform = transforms.Compose([
        transforms.RandomCrop(28),
        transforms.RandomHorizontalFlip(),
        transforms.ToTensor(),
    ])
    test_transform = transforms.Compose([
        transforms.Resize((28, 28)),
        transforms.ToTensor(),
    ])

    im_train_list = glob.glob("../data/train/*/*")
    im_test_list = glob.glob("../data/test/*/*")

    train_dataset = MyDataset(im_train_list, transform=train_transform)
    test_dataset = MyDataset(im_test_list, transform=test_transform)

    train_data_loader = DataLoader(dataset=train_dataset, batch_size=6, shuffle=True, num_workers=4)
    test_data_loader = DataLoader(dataset=test_dataset, batch_size=6, shuffle=False, num_workers=4)

    print(len(train_dataset))
    print(len(test_dataset))

    net = InceptionNet().to(device)
    loss_func = nn.CrossEntropyLoss()
    optimizer = torch.optim.Adam(net.parameters(), lr=0.01)
    # optimizer=torch.optim.SGD(net.parameters(),lr=0.01,momentum=0.9,weight_decay=5e-4)
    # step_size，每进行step_size次epoch进行一次学习率的调整，学习率变为上一次学习率的gamma倍
    scheduler = torch.optim.lr_scheduler.StepLR(optimizer=optimizer, step_size=5, gamma=0.9)
    epoch = 1
    for cur_epoch in range(epoch):
        net.train()  # 表明当前是在训练中，batchnm和dropout会实时更新
        for i, (inputs, labels) in enumerate(train_data_loader):
            print("epoch:", epoch)
            print("index:", i)
            inputs = inputs.to(device)
            labels = labels.to(device)
            out = net(inputs)
            loss = loss_func(out, labels)
            optimizer.zero_grad()
            loss.backward()
            optimizer.step()
            print("损失:", loss.item())
            _, pred = torch.max(out.data, dim=1)
            correct = pred.eq(labels).cpu().sum()
            print("正确率:", 100.0 * correct / len(labels))
            print("学习率:", optimizer.state_dict()["param_groups"][0]["lr"])
            writer.add_scalar("train loss", loss.item(), global_step=step_n)
            writer.add_scalar("train correct", 100.0 * correct / len(labels), global_step=step_n)
            step_n += 1
            print("*" * 100)
        scheduler.step()  # 更新学习率

    net.eval()
    loss_list = []
    acc_list = []
    for i, (inputs, labels) in enumerate(test_data_loader):
        print("index:", i)
        inputs = inputs.to(device)
        labels = labels.to(device)
        out = net(inputs)
        loss = loss_func(out, labels)
        print("损失:", loss.item())
        loss_list.append(loss.item())
        _, pred = torch.max(out.data, dim=1)
        print("正确率:", 100.0 * correct / len(labels))
        acc_list.append(100.0 * correct / len(labels))
        print("学习率:", optimizer.state_dict()["param_groups"][0]["lr"])
        writer.add_scalar("test correct", loss.item(), global_step=step_n)
        writer.add_scalar("test correct", 100.0 * correct / len(labels), global_step=step_n)
        print("*" * 100)
    print(np.array(loss_list).mean())
    print(np.array(acc_list).mean())


if __name__ == '__main__':
    run()
