import torch
import torch.nn as nn
from torchvision import transforms, datasets
from torch.utils.data.dataloader import DataLoader
import torch.optim as optim
import torch.nn.functional as F


class my_net(nn.Module):
    # 初始化
    def __init__(self):
        super(my_net, self).__init__()
        # 主体结构
        self.backbone = nn.Sequential(
            nn.Conv2d(in_channels=3, out_channels=6, kernel_size=3),
            nn.ReLU(inplace=True),
            nn.MaxPool2d(kernel_size=2, stride=2),
            nn.Conv2d(in_channels=6, out_channels=24, kernel_size=7),
            nn.ReLU(inplace=True),
        )

        # 用于多尺度特征感知的FPN层
        self.conv1 = nn.Conv2d(in_channels=24, out_channels=24, kernel_size=1)
        self.conv2 = nn.Conv2d(in_channels=24, out_channels=24, kernel_size=1)
        self.conv3 = nn.Conv2d(in_channels=24, out_channels=24, kernel_size=1)

        # 全连接层
        self.fc1 = nn.Linear(15000, 120)
        self.fc2 = nn.Linear(120, 3)

    def forward(self, x):
        """
        主干层-卷积1     输入是64*64*3  计算（64-3+2*0）/1+1=62 输出为 62*62*6
        主干层-最大池化   输入是62*62*12 计算 62/2=31           输出为 31*31*6
        主干层-卷积2     输入是31*31*6  计算（31-7+2*0）/1+1=25 输出为 25*25*24
        """
        c = self.backbone(x)

        """
        FPN层-卷积1    输入是25*25*24  卷积核大小为1,尺寸不变     输出为 25*25*24
        FPN层-卷积2    输入是25*25*24  卷积核大小为1,尺寸不变     输出为 25*25*24 
        FPN层-卷积3    输入是25*25*24  卷积核大小为1,尺寸不变     输出为 25*25*24
        interpolate 进行上采样  conv123进行特征融合
        """
        p1 = self.conv1(c)
        p2 = self.conv2(c) + F.interpolate(p1, scale_factor=1, mode="nearest")
        p3 = self.conv3(c) + F.interpolate(p2, scale_factor=1, mode="nearest")

        """
        一维展开  变为 1*8112
        """
        p3 = p3.view(-1, 15000)

        """
        全连接   输入为 1*8112  输出为 1*120
        """
        c = self.fc1(p3)

        """
        全连接   输入为 1*120  输出为 1*3
        """
        output = self.fc2(c)
        return output


def warm_up_lr(epoch):
    if epoch < 10:
        return (epoch + 1) / 10 * 0.001
    else:
        return 0.001


if __name__ == "__main__":
    # 设置transforms
    transforms = transforms.Compose(
        [transforms.ToTensor(), transforms.Normalize((0.5, 0.5, 0.5), (0.5, 0.5, 0.5))]
    )

    # 训练集
    trainset = datasets.ImageFolder(root="dataset/train", transform=transforms)
    tesrset1 = datasets.ImageFolder(root="dataset/test", transform=transforms)
    tesrset2 = datasets.ImageFolder(root="dataset/test2", transform=transforms)

    print(f"标签对应的ID：{trainset.class_to_idx}")

    # 一次的抓取数量
    BATCH_SIZE = 512

    # 加载数据集
    train_loader = DataLoader(
        trainset, batch_size=BATCH_SIZE, shuffle=True, num_workers=8, pin_memory=True
    )
    test_loader1 = DataLoader(
        tesrset1, batch_size=BATCH_SIZE, shuffle=True, num_workers=8, pin_memory=True
    )
    test_loader2 = DataLoader(
        tesrset2, batch_size=BATCH_SIZE, shuffle=True, num_workers=8, pin_memory=True
    )

    # 调用模型（用GPU）
    net = my_net().to("cuda")
    criterion = nn.CrossEntropyLoss()  # 交叉熵损失函数
    optimizer = optim.SGD(net.parameters(), lr=0.001, momentum=0.9)  # 优化器
    # scheduler = LambdaLR(optimizer, lr_lambda=warm_up_lr)
    # 定义轮数
    EPOCH = 700
    for epoch in range(EPOCH):
        train_loss = 0.0  # 初始化训练损失值
        for batch_id, (datas, labels) in enumerate(train_loader):
            datas, labels = datas.to("cuda"), labels.to("cuda")
            # 优化器梯度置零
            optimizer.zero_grad()
            # 训练
            outputs = net(datas)
            # 计算损失
            loss = criterion(outputs, labels)
            # 反向传播
            loss.backward()
            # 参数更新
            optimizer.step()
            # 累计损失
            train_loss += loss.item()
            # 每隔十个epoch打印一次消息
            if (epoch + 1) % 10 == 0:

                # 保存模型
                PATH = "pth/model4.pth"
                torch.save(net.state_dict(), PATH)
                # 加载已保存的模型
                model = my_net()
                model.load_state_dict(torch.load(PATH))

                # 初始化参数
                max_correct = 90
                correct1 = 0
                correct2 = 0
                total1 = 0
                total2 = 0
                # 第一个测试集测试
                with torch.no_grad():

                    # 测试第一个数据集
                    for i, (datas1, labels1) in enumerate(test_loader1):
                        output_test1 = model(datas1)
                        _, predicted1 = torch.max(output_test1.data, dim=1)
                        total1 += labels1.size(0)
                        correct1 += (predicted1 == labels1).sum()
                    # 测试第二个数据集
                    for i, (datas2, labels2) in enumerate(test_loader2):
                        output_test2 = model(datas2)
                        _, predicted2 = torch.max(output_test2.data, dim=1)
                        total2 += labels2.size(0)
                        correct2 += (predicted2 == labels2).sum()
                    # 打印消息
                    print(
                        f"epoch:{epoch + 1}\tbatch_id:{batch_id + 1}\taverage_loss:{train_loss / len(train_loader.dataset)}\t"
                        f"correct1:{correct1 / total1 * 100}%\tcorrect2:{correct2 / total2 * 100}%"
                    )
                    # 额外保存那些准确率超过90%的模型
                    if (correct1 / total1 * 100) > max_correct:
                        max_correct = correct1
                        MAX_PATH = "pth/model_" + str(correct1 / total1 * 100) + ".pth"
                        print(f"save  {MAX_PATH}")
                        torch.save(net.state_dict(), MAX_PATH)
        # scheduler.step()
