import torch
import torch.nn as nn
import torch.nn.functional as F
import torch.optim as optim
import torchvision
import matplotlib.pyplot as plt

from torch.utils.data import DataLoader

if torch.cuda.is_available():
    device=torch.device("cuda")
else:
    device=torch.device("cpu")

mean = [0.5070751592371323, 0.48654887331495095, 0.4409178433670343]
std = [0.2673342858792401, 0.2564384629170883, 0.27615047132568404]
 
transforms_fn = torchvision.transforms.Compose([
    torchvision.transforms.RandomHorizontalFlip(),  # 随机水平翻转
    torchvision.transforms.RandomVerticalFlip(),  # 随机垂直翻转
    torchvision.transforms.RandomRotation(10),  # 随机旋转
    torchvision.transforms.RandomResizedCrop(32),  # 随机裁剪并缩放
    torchvision.transforms.ToTensor(),
    torchvision.transforms.Normalize(mean, std)
])

heads = 1
class MHSA(nn.Module):
    def __init__(self, n_dims, width=2, height=2, heads=heads):
        super(MHSA, self).__init__()
        self.heads = heads

        self.query = nn.Conv2d(n_dims, n_dims, kernel_size=1)
        self.key = nn.Conv2d(n_dims, n_dims, kernel_size=1)
        self.value = nn.Conv2d(n_dims, n_dims, kernel_size=1)

        self.rel_h = nn.Parameter(torch.randn([1, heads, n_dims // heads, 1, height]), requires_grad=True)
        self.rel_w = nn.Parameter(torch.randn([1, heads, n_dims // heads, width, 1]), requires_grad=True)

        self.softmax = nn.Softmax(dim=-1)

    def forward(self, x):
        n_batch, C, width, height = x.size()
        q = self.query(x).view(n_batch, self.heads, C // self.heads, -1)
        k = self.key(x).view(n_batch, self.heads, C // self.heads, -1)
        v = self.value(x).view(n_batch, self.heads, C // self.heads, -1)

        content_content = torch.matmul(q.permute(0, 1, 3, 2), k)

        content_position = (self.rel_h + self.rel_w).view(1, self.heads, C // self.heads, -1).permute(0, 1, 3, 2)
        content_position = torch.matmul(content_position, q)

        energy = content_content + content_position
        attention = self.softmax(energy)

        out = torch.matmul(v, attention.permute(0, 1, 3, 2))
        out = out.view(n_batch, C, width, height)

        return out
    
class ResBlock(nn.Module):
    def __init__(self,inchannel,outchannel,stride=1):
        super(ResBlock, self).__init__()
        #定义残差块里连续的2个卷积层
        self.block_conv=nn.Sequential(
            nn.Conv2d(inchannel,outchannel,kernel_size=3,stride=stride,padding=1),
            nn.BatchNorm2d(outchannel),
            nn.ReLU(),
            # nn.MaxPool2d(2),
            nn.Conv2d(outchannel,outchannel,kernel_size=3,stride=1,padding=1),
            nn.BatchNorm2d(outchannel)
        )
 
        # shortcut 部分
        # 由于存在维度不一致的情况 所以分情况
        self.shortcut = nn.Sequential()
        if stride != 1 or inchannel != outchannel:
            self.shortcut = nn.Sequential(
                # 卷积核为1 进行升降维
                # 注意跳变时 都是stride!=1的时候 也就是每次输出信道升维的时候
                nn.Conv2d(inchannel, outchannel, kernel_size=1, stride=stride, bias=False),
                nn.BatchNorm2d(outchannel)
            )
 
    def forward(self,x):
        out1=self.block_conv(x)
        out2=self.shortcut(x)+out1
        out2=F.relu(out2) #F.relu()是函数调用，一般使用在foreward函数里。而nn.ReLU()是模块调用，一般在定义网络层的时候使用
        return out2
    
class Bottleneck(nn.Module):
    expansion = 1

    def __init__(self, in_planes, planes, stride=1, heads=heads, mhsa=False, resolution=None):
        super(Bottleneck, self).__init__()

        self.conv1 = nn.Conv2d(in_planes, planes, kernel_size=1, bias=False)
        self.bn1 = nn.BatchNorm2d(planes)
        if not mhsa:
            self.conv2 = nn.Conv2d(planes, planes, kernel_size=3, padding=1, stride=stride, bias=False)
        else:
            self.conv2 = nn.ModuleList()
            self.conv2.append(MHSA(planes, width=int(resolution[0]), height=int(resolution[1]), heads=heads))
            if stride == 2:
                self.conv2.append(nn.AvgPool2d(2, 2))
            self.conv2 = nn.Sequential(*self.conv2)
        self.bn2 = nn.BatchNorm2d(planes)
        self.conv3 = nn.Conv2d(planes, self.expansion * planes, kernel_size=1, bias=False)
        self.bn3 = nn.BatchNorm2d(self.expansion * planes)

        self.shortcut = nn.Sequential()
        if stride != 1 or in_planes != self.expansion*planes:
            self.shortcut = nn.Sequential(
                nn.Conv2d(in_planes, self.expansion*planes, kernel_size=1, stride=stride),
                nn.BatchNorm2d(self.expansion*planes)
            )

    def forward(self, x):
        out = F.relu(self.bn1(self.conv1(x)))
        out = F.relu(self.bn2(self.conv2(out)))
        out = self.bn3(self.conv3(out))
        out += self.shortcut(x)
        out = F.relu(out)
        return out
    
class ResNet(nn.Module):
    def __init__(self, block, num_blocks, num_classes=100, resolution=(32, 32), heads=heads):
        super(ResNet, self).__init__()
        self.in_planes = 64
        self.resolution = list(resolution)

        # self.conv1 = nn.Conv2d(3, 64, kernel_size=7, stride=2, padding=3, bias=False)
        self.conv1 = nn.Conv2d(3, 64, kernel_size=3, stride=1, padding=1)
        self.bn1 = nn.BatchNorm2d(64)
        self.relu = nn.ReLU(inplace=True)
        self.maxpool = nn.MaxPool2d(kernel_size=1, stride=1, padding=0) # 64

        self.layer1 = self._make_layer(block, 64, 2, stride=1)
        self.layer2 = self._make_layer(block, 128, 2, stride=2)
        self.layer3 = self._make_layer(block, 256, 2, stride=2)
        self.layer4 = self._make_layer(block, 512, 2, stride=2, heads=heads)

        self.avgpool = nn.AdaptiveAvgPool2d((1, 1))
        self.fc = nn.Sequential(
            nn.Dropout(0.3), # All architecture deeper than ResNet-200 dropout_rate: 0.2
            nn.Linear(512 * block.expansion, num_classes)
        )

    def _make_layer(self, block, planes, num_blocks, stride=1, heads=heads, mhsa=False):
        strides = [stride] + [1]*(num_blocks-1)
        layers = []
        for idx, stride in enumerate(strides):
            layers.append(block(self.in_planes, planes, stride, heads, mhsa, self.resolution))
            if stride == 2:
                self.resolution[0] /= 2
                self.resolution[1] /= 2
            self.in_planes = planes * block.expansion
        return nn.Sequential(*layers)

    def forward(self, x):
        out = self.relu(self.bn1(self.conv1(x)))
        out = self.maxpool(out)

        out = self.layer1(out)
        out = self.layer2(out)
        out = self.layer3(out)
        out = self.layer4(out)

        out = self.avgpool(out)
        out = torch.flatten(out, 1)
        out = self.fc(out)
        return out


def ResNet50(num_classes=100, resolution=(32, 32), heads=heads):
    return ResNet(Bottleneck, [3, 4, 6, 3], num_classes=num_classes, resolution=resolution, heads=heads)

if __name__ == '__main__':

    # 训练集
    train_dataset=torchvision.datasets.CIFAR100('D:\File\深度学习\大作业\data',train=True,transform=transforms_fn,download=True)
    # 测试集
    test_dataset=torchvision.datasets.CIFAR100('D:\File\深度学习\大作业\data',train=False,transform=transforms_fn,download=True)
    train_loader= DataLoader(train_dataset, batch_size=128, shuffle=True, num_workers=4)
    test_loader= DataLoader(test_dataset, batch_size=128, shuffle=False, num_workers=4)

    #网络模型
    model=ResNet50()
    model.to(device)
    print(model)
    #损失函数
    loss_fn=nn.CrossEntropyLoss() 
    loss_fn.to(device)
    
    learning_rate=0.01
    
    optimizer=torch.optim.SGD(params=model.parameters(),lr=learning_rate, momentum=0.9,weight_decay=0.0001)

    train_acc_list = []
    train_loss_list = []
    test_acc_list = []
    test_loss_list=[]
    epochs=200

    max_test_acc = 0.0

    for epoch in range(epochs):
        print("-----第{}轮训练开始------".format(epoch + 1))
        train_loss=0.0
        test_loss=0.0
        train_sum,train_cor,test_sum,test_cor=0,0,0,0
    
        #训练步骤开始
        model.train()
        for batch_idx,(data,target) in enumerate(train_loader):
            data,target=data.to(device),target.to(device)
    
            optimizer.zero_grad()  # 要将梯度清零，因为如果梯度不清零，pytorch中会将上次计算的梯度和本次计算的梯度累加
            # output = model(data)
            output = model(data)
            # loss = loss_fn(output, target)
            loss = loss_fn(output, target)
            loss.backward()
            optimizer.step()  # 更新所有的参数
    
            # 计算每轮训练集的Loss
            train_loss += loss.item()
    
            _, predicted = torch.max(output.data, 1)  # 选择最大的（概率）值所在的列数就是他所对应的类别数，
            # train_cor += (predicted == target).sum().item()  # 正确分类个数
            train_cor += (predicted == target).sum().item()  # 正确分类个数
            train_sum += target.size(0)  # train_sum+=predicted.shape[0]
    
        #测试步骤开始
        model.eval()
        # with torch.no_grad():
        for batch_idx1,(data,target) in enumerate(test_loader):
            data, target = data.to(device), target.to(device)
    
            output = model(data)
            loss = loss_fn(output, target)
            test_loss+=loss.item()
            _, predicted = torch.max(output.data, 1)
            test_cor += (predicted == target).sum().item()
            test_sum += target.size(0)
    
        print("Train loss:{}   Train accuracy:{}%   Test loss:{}   Test accuracy:{}%".format(train_loss/batch_idx,100*train_cor/train_sum,
        
                                                                                        test_loss/batch_idx1,100*test_cor/test_sum))
        
        test_acc = 100 * test_cor / test_sum
        if test_acc > max_test_acc:  # 如果当前测试准确率大于最大测试准确率
            max_test_acc = test_acc  # 更新最大测试准确率
            torch.save(model, "D:\File\深度学习\大作业\CIFAR100_best_model_epoch{}.pth".format(epochs))  # 保存模型

        train_loss_list.append(train_loss / batch_idx)
        train_acc_list.append(100 * train_cor / train_sum)
        test_acc_list.append(test_acc)
        test_loss_list.append(test_loss / batch_idx1)
    
    #保存网络
    torch.save(model,"D:\File\深度学习\大作业\BoTnet_epoch{}.pth".format(epochs))

    # 创建一个新的图像
    plt.figure()

    # 绘制训练精度
    plt.plot(range(epochs), train_acc_list, label='Train Accuracy')

    # 绘制测试精度
    plt.plot(range(epochs), test_acc_list, label='Test Accuracy')

    # 添加图例
    plt.legend()

    # 添加标题和标签
    plt.title('Train and Test Accuracy')
    plt.xlabel('Epoch')
    plt.ylabel('Accuracy')

    # 显示图像
    plt.show()