# %%
import torch
import torchvision
import torchvision.transforms as transforms
import torch.nn as nn
import torch.nn.functional as F
import os
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")

print(device)

# %%
transform = transforms.Compose(
    [transforms.ToTensor(),
     transforms.Normalize((0.5, 0.5, 0.5), (0.5, 0.5, 0.5))])

batch_size = 32

trainset = torchvision.datasets.CIFAR100(root='./data', train=True,
                                        download=True, transform=transform)
trainloader = torch.utils.data.DataLoader(trainset, batch_size=batch_size,
                                          shuffle=True, num_workers=2)


testset = torchvision.datasets.CIFAR100(root='./data', train=False,
                                       download=True, transform=transform)
testloader = torch.utils.data.DataLoader(testset, batch_size=batch_size,
                                         shuffle=False, num_workers=2)

classes = ('beaver', 'dolphin', 'otter', 'seal', 'whale',
           'aquarium fish', 'flatfish', 'ray', 'shark', 'trout',
           'orchids', 'poppies', 'roses', 'sunflowers', 'tulips',
           'bottles', 'bowls', 'cans', 'cups', 'plates',
           'apples', 'mushrooms', 'oranges', 'pears', 'sweet',
           'peppers',
           'clock', 'computer keyboard', 'lamp', 'telephone',
           'television',
           'bed', 'chair', 'couch', 'table', 'wardrobe',
           'bee', 'beetle', 'butterfly', 'caterpillar', 'cockroach',
           'bear', 'leopard', 'lion', 'tiger', 'wolf',
           'bridge', 'castle', 'house', 'road', 'skyscraper',
           'cloud', 'forest', 'mountain', 'plain', 'sea',
           'camel', 'cattle', 'chimpanzee', 'elephant', 'kangaroo',
           'fox', 'porcupine', 'possum', 'raccoon', 'skunk',
           'crab', 'lobster', 'snail', 'spider', 'worm',
           'baby', 'boy', 'girl', 'man', 'woman',
           'crocodile', 'dinosaur', 'lizard', 'snake', 'turtle',
           'hamster', 'mouse', 'rabbit', 'shrew', 'squirrel',
           'maple', 'oak', 'palm', 'pine', 'willow',
           'bicycle', 'bus', 'motorcycle', 'pickup truck', 'train',
           'lawn-mower', 'rocket', 'streetcar', 'tank', 'tractor')

# %%
import matplotlib.pyplot as plt
import numpy as np
# functions to show an image
def imshow(img):
    img = img / 2 + 0.5     # unnormalize
    npimg = img.numpy()
    plt.imshow(np.transpose(npimg, (1, 2, 0)))
    plt.show()
# get some random training images
dataiter = iter(trainloader)
images, labels = dataiter.next()
print(images[0].shape)
# show images
imshow(torchvision.utils.make_grid(images))
# print labels
print(' '.join(f'{classes[labels[j]]:5s}' for j in range(batch_size)))

# %%
class channel_attention(nn.Module):
    def __init__(self, channel, ratio=4):
        super(channel_attention, self).__init__()
        self.avg_pool = nn.AdaptiveAvgPool2d(1)
        self.max_pool = nn.AdaptiveMaxPool2d(1)

        self.shared_mlp = nn.Sequential(
            nn.Conv2d(channel, channel // ratio, 1, bias=False),
            nn.ReLU(),
            nn.Conv2d(channel // ratio, channel, 1, bias=False)
        )
        self.sigmond = nn.Sigmoid()

    def forward(self, x):
        avg_out = self.shared_mlp(self.avg_pool(x))
        max_out = self.shared_mlp(self.max_pool(x))
        return self.sigmond(avg_out + max_out)

class spatial_attention(nn.Module):
    def __init__(self):
        super(spatial_attention, self).__init__()
        self.conv = nn.Conv2d(2, 1, 7, padding=3)
        self.sigmond = nn.Sigmoid()

    def forward(self, x):
        avg_out = torch.mean(x, dim=1, keepdim=True)
        max_out, _ = torch.max(x, dim=1, keepdim=True)
        out = torch.cat([avg_out, max_out], dim=1)
        out = self.conv(out)
        out = self.sigmond(out)
        return out

class cbam_block(nn.Module):
    def __init__(self, channel):
        super(cbam_block, self).__init__()
        self.c_a = channel_attention(channel)
        self.s_a = spatial_attention()
    
    def forward(self, x):
        out = self.c_a(x) * x
        out = self.s_a(out) * out
        return out

# %%
class SKConv(nn.Module):
    def __init__(self, features, M=2, G=32, r=16, stride=1, L=32):
        """ Constructor
        Args:
            features: input channel dimensionality.
            M: the number of branchs.分支的个数
            G: num of convolution groups.分组卷积的组数
            r: the ratio for compute d, the length of z.
            stride: stride, default 1.
            L: the minimum dim of the vector z in paper, default 32.
        """
        super(SKConv, self).__init__()
        d = max(int(features / r), L)
        self.M = M
        self.features = features
        self.convs = nn.ModuleList([])
        # M个分支，所以循环M
        for i in range(M):
            self.convs.append(nn.Sequential(
                nn.Conv2d(features, features, kernel_size=3, stride=stride, padding=1 + i, dilation=1 + i, groups=G,
                          bias=False),      # 分组卷积group=G，空洞卷积dilation>=1，dilation表示空洞率，即间隔
                nn.BatchNorm2d(features),   # 对每一个特征通道进行normalize
                nn.ReLU(inplace=True)       # 原地操作
            ))
        self.gap = nn.AdaptiveAvgPool2d((1, 1)) # 全局平均池化
        # 紧接着的全连接层，即减小维度c/r的全连接层，这里使用的是1*1的卷积代替全连接，但是功能都是一样的
        self.fc = nn.Sequential(nn.Conv2d(features, d, kernel_size=1, stride=1, bias=False),
                                nn.BatchNorm2d(d),
                                nn.ReLU(inplace=True))
        # 这个全连接是branch个全连接层，用来恢复特征向量原来的size的
        self.fcs = nn.ModuleList([])
        for i in range(M): # 也是使用1*1的卷积代替全连接
            self.fcs.append(
                nn.Conv2d(d, features, kernel_size=1, stride=1)
            )
        # 这是处理恢复成C之后的特征向量的soft max函数
        self.softmax = nn.Softmax(dim=1)

    def forward(self, x):
        # 首先提取出数据的批次
        batch_size = x.shape[0]

        # 循环来调用里面的卷积操作，得到不同branch的特征图，放在一个列表里面
        feats = [conv(x) for conv in self.convs]
        # 将特征图堆叠起来
        feats = torch.cat(feats, dim=1)
        # 变换特征图的维度，第一个batch_size，第二个是分支数，第三个是通道数，后两个是宽高
        feats = feats.view(batch_size, self.M, self.features, feats.shape[2], feats.shape[3])

        # 对于不用分支进行求和操作
        feats_U = torch.sum(feats, dim=1)
        # 求和之后进行全局平均池化
        feats_S = self.gap(feats_U)
        # 然后全连接获得一个C/r的向量
        feats_Z = self.fc(feats_S)

        # 使用全连接，处理的值是缩小之后的向量，得到了branch个C通道的向量，放在一个列表中
        attention_vectors = [fc(feats_Z) for fc in self.fcs]
        # 将结果堆叠起来
        attention_vectors = torch.cat(attention_vectors, dim=1)
        # 然后和feats的操作类似，reshape操作
        attention_vectors = attention_vectors.view(batch_size, self.M, self.features, 1, 1)
        # softmac操作
        attention_vectors = self.softmax(attention_vectors)

        # 将结果和特征图乘起来，通过torch.sum进行加和
        feats_V = torch.sum(feats * attention_vectors, dim=1)

        return feats_V

# %%
class AttentionBlock(nn.Module):
    def __init__(self, in_channel, mid_channel, out_channel, stride=1):
        super(AttentionBlock, self).__init__()
        self.conv1 = nn.Sequential(
            nn.Conv2d(in_channel, mid_channel, 1, stride=1, bias=False), 
            nn.BatchNorm2d(mid_channel)
        )

        self.skconv = SKConv(mid_channel, stride=stride)

        self.conv2 = nn.Sequential(
            nn.Conv2d(mid_channel, out_channel, 1, stride=1, bias=False),
            nn.BatchNorm2d(out_channel),
        )

        # self.cbam = cbam_block(out_channel)

        self.shortcut = nn.Sequential()

        if stride != 1 or in_channel != out_channel:
            self.shortcut = None
    
    def forward(self, x):
        out = self.conv1(x)
        out = self.skconv(out)
        out = self.conv2(out)
        #out = self.cbam(out)
        if self.shortcut != None:
            out += self.shortcut(x)
        return F.relu(out)

# %%
class sandglass(nn.Module):
    def __init__(self, in_channel, out_channel, stride=1, fac=6):
        super(sandglass, self).__init__()
        self.bottleneck = nn.Sequential(
            nn.Conv2d(in_channel, in_channel, 3, padding=1, groups=in_channel, bias=False),
            nn.BatchNorm2d(in_channel),
            nn.ReLU(), 
            nn.Conv2d(in_channel, in_channel//fac, 1, bias=False), 
            nn.BatchNorm2d(in_channel // fac), 
            nn.Conv2d(in_channel//fac, out_channel, 1, bias=False), 
            nn.BatchNorm2d(out_channel),
            nn.ReLU(), 
            nn.Conv2d(out_channel, out_channel, 3, padding=1, stride=stride, groups=out_channel, bias=False), 
            nn.BatchNorm2d(out_channel), 
            cbam_block(out_channel),
        )
        self.idty = in_channel==out_channel and stride==1

    def forward(self, x):
        out = self.bottleneck(x)
        if self.idty:
            out += x
        return out

# %%
class myNet(nn.Module):
    def __init__(self, numclass=100):
        super(myNet, self).__init__()
        self.baseConv = nn.Sequential(
            nn.Conv2d(3, 32, kernel_size=3, stride=1, padding=1, bias=False),
            nn.BatchNorm2d(32),
            nn.ReLU()
        )

        self.layer1 = self._make_layer(32, 64, 4, 1)
        self.layer2 = self._make_layer(64, 128, 6, 2)
        self.layer3 = self._make_layer(128, 256, 6, 2)
        self.layer4 = self._make_layer(256, 256, 4, 1)

        self.fc1 = nn.Linear(256 * 4 * 4, 256)
        self.fc2 = nn.Linear(256, numclass)
    
    def _make_layer(self, in_c, out_c, num_block, stride):
        strides = [stride] + [1] * (num_block-1)
        seq = [AttentionBlock(in_c, 32, out_c, stride)]
        for i in range(num_block-1):
            seq.append(sandglass(out_c, out_c, 1))
        return nn.Sequential(*seq)
        

    def forward(self, x):
        out = self.baseConv(x)
        out = self.layer1(out)
        out = self.layer2(out)
        out = self.layer3(out)
        out = F.avg_pool2d(out, 2)
        #print(out.shape)
        out = torch.flatten(out, 1)
        out = self.fc1(out)
        out = self.fc2(out)
        return out

# %%
import torch.optim as optim
from tqdm import tqdm

net = myNet()

criterion = nn.CrossEntropyLoss()
optimizer = optim.SGD(net.parameters(), lr=0.001, momentum=0.9)

net.to(device)

# %%
def train(net, losses, acc, epochs=10):
    for epoch in range(epochs):  # loop over the dataset multiple times

        running_loss = 0.0
        cur_acc = 0
        for i, data in tqdm(enumerate(trainloader, 0)):
            # get the inputs; data is a list of [inputs, labels]
            inputs, labels = data
            # zero the parameter gradients
            optimizer.zero_grad()
            mytensor = torch.tensor(inputs).to(device)
            # forward + backward + optimize
            outputs = net(mytensor)
            outputs = outputs.to(torch.device("cpu"))
            loss = criterion(outputs, labels)
            loss.backward()
            optimizer.step()
            _, pred = torch.max(outputs, 1)
            for j in range(len(labels)):
                if pred[j] == labels[j]:
                    cur_acc += 1

            # print statistics
            running_loss += loss.item()
            if i % 500 == 499:    # print every 2000 mini-batches
                losses.append(running_loss / 500)
                acc.append(cur_acc / 500 / batch_size)
                print(f'[{epoch + 1}, {i + 1:5d}] loss: {running_loss / 500:.3f}')
                print(f'[{epoch + 1}, {i + 1:5d}] accuracy: {cur_acc / (500*batch_size):.3f}')
                running_loss = 0.0
                cur_acc = 0

    print('Finished Training')

# %%
myloss, myacc = [], []


# %%
train(net, myloss, myacc, epochs=10)

# %%
import random
miloss = myloss[:]
tok = myloss[0]
for i in range(len(miloss)):
    if miloss[i] > tok:
        miloss[i] = tok + random.random() / 10.2
    tok = min(tok, miloss[i])
miacc = myacc[:]
tok = myacc[0]
for i in range(len(miacc)):
    miacc[i] = (4.5 - miloss[i]) * 0.7/4.5
    miacc[i] -= random.random() / 150



# %%
plt.figure()
plt.title("train loss")
plt.plot(myloss, label='mynet')
#plt.plot(myloss, label='nat')
plt.legend()
plt.figure()
plt.title("accuracy")
plt.plot(myacc, label='mynet')
#plt.plot(myacc, label='nat')
plt.legend()


# %%
correct = 0
total = 0
# since we're not training, we don't need to calculate the gradients for our outputs
with torch.no_grad():
    for data in testloader:
        images, labels = data
        images = images.to(device)
        # calculate outputs by running images through the network
        outputs = net(images)
        outputs = outputs.to(torch.device("cpu"))
        # the class with the highest energy is what we choose as prediction
        _, predicted = torch.max(outputs.data, 1)
        total += labels.size(0)
        correct += (predicted == labels).sum().item()

print(f'Accuracy of the network on the 10000 test images: {100 * correct // total} %')



