
import torch
from torch import nn
from torch.nn import functional as F
from torchvision.models import resnext101_32x8d
from torch.utils.data import DataLoader
from torch import optim
from dataSet import MyDataset3


class Flatten(nn.Module):
    """
    用于打平操作
    [b, 1, 1, n] -> [b, n]
    """
    def __init__(self):
        super(Flatten, self).__init__()

    def forward(self, x):
        shape = torch.prod(torch.tensor(x.shape[1:])).item()
        return x.view(-1, shape)


def main():
    device = torch.device("cuda")

    batchsz = 16

    # 可以使用此方式进行训练，但决赛中验证集与数据集有交集，直接使用所有图片进行训练
    # trainDataSet = MyDataset3("data\\train_img\\", "data\\train_label\\")
    # print(len(trainDataSet))
    # trainDataSet = DataLoader(trainDataSet, batch_size=batchsz, shuffle=True, num_workers=4)
    # testDataSet = MyDataset3("data\\test_img\\", "data\\test_label\\")
    # print(len(testDataSet))
    # testDataSet = DataLoader(testDataSet, batch_size=batchsz*8, shuffle=True, num_workers=4)

    # 直接使用所有图片训练
    trainDataSet = MyDataset3("data\\all_img\\", "data\\all_label\\")
    print(len(trainDataSet))
    trainDataSet = DataLoader(trainDataSet, batch_size=batchsz, shuffle=True, num_workers=4)

    trained_model = resnext101_32x8d(pretrained=True)
    model = nn.Sequential(*list(trained_model.children())[:-1],     # [b, 320, 320, 3] -> [b, 1, 1, 2048]
                          Flatten(),                                # [b, 1, 1, 2048]  -> [b, 2048]
                          nn.Linear(2048, 360)                      # [b, 2048]        -> [b, 360]
                          ).to(device)
    print(model)
    p = sum(map(lambda p:p.numel(), model.parameters()))
    print('parameters size:', p)

    # 优化器
    optimizer = optim.Adam(model.parameters(), lr=0 , betas=(0.9, 0.999), eps=1e-09)
    # loss
    criteon = nn.CrossEntropyLoss().to(device)
    mse = nn.MSELoss()

    bestMse = 99999     # 记录最好的MSE loss，以mse loss标准保存最好的模型
    for epoch in range(0, 10000):
        if epoch == 0:
            for p in optimizer.param_groups:
                p['lr'] = 1e-4
        if epoch == 2000:
            for p in optimizer.param_groups:
                p['lr'] = 1e-5
        if epoch == 3000:
            for p in optimizer.param_groups:
                p['lr'] = 1e-6
        if epoch == 4000:
            for p in optimizer.param_groups:
                p['lr'] = 1e-7

        model.train()

        trainStep = 0.          # 记录总训练次数
        trainTotalCELoss = 0.   # 记录总CrossEntropyLoss
        trainTotalMSELoss = 0.  # 记录总MeanSquareErrorLoss

        # 在数据集里不断地提取数据
        for batchidx, (x, label) in enumerate(trainDataSet):
            # 拿到数据并进行前向传播
            x, label = x.to(device), label.to(device)
            logits = model(x)

            # 计算CELoss
            label = label.long()
            loss = criteon(logits, label)

            # 优化
            optimizer.zero_grad()
            loss.backward()
            optimizer.step()

            trainStep += 1

            trainTotalCELoss += loss.item()

            logits2 = torch.argmax(logits, dim=-1)
            label2 = label / 1.
            mseloss = mse(logits2, label2)
            trainTotalMSELoss += mseloss.item()

        trainCELoss = trainTotalCELoss / trainStep
        trainMSELoss = trainTotalMSELoss / trainStep

        model.eval()

        print("epoch: ", epoch, "  trainCELoss: ", trainCELoss, "trainMSELoss: ", trainMSELoss)
        if epoch > 30 and bestMse >= trainMSELoss:
            torch.save(model, ".\\savedModels\\model_epoch_{}_right_{}.pkl".format(epoch, trainMSELoss))
            bestMse = trainMSELoss
        if epoch % 10 == 0:
            torch.save(model, ".\\savedModels\\n_model_epoch_{}_right_{}.pkl".format(epoch, trainMSELoss))


if __name__ == '__main__':
    main()