from model.trainer import Trainer
from model.data_helper import *
import model.args as args
import os
from tensorboardX.summary import scalar
import utils
import torchvision
import torch.nn as nn
import torch
import time
# HOW 可不可以把Yaml的内容和Live代码当中的内容结合起来？
# HOW 直接这样访问的话，很容易出现config.NUM_EPOCHES拼写错误的问题，而且这个变量也是写在string 里面的，智能提示也没办法

configs = args.parse_args_and_yaml()

# print(resnet)

# https://stackoverflow.com/questions/62629114/how-to-modify-resnet-50-with-4-channels-as-input-using-pre-trained-weights-in-py

device = "cuda" if torch.cuda.is_available() else "cpu"


class ModifiedResNet(nn.Module):
    def __init__(self):
        super(ModifiedResNet, self).__init__()
        # 取掉model的后1层
        self._resnet = torchvision.models.resnet18(pretrained=False)
        self.resnet_layer = nn.Sequential(*list(self._resnet.children())[:-1])
        self.Linear_layer = nn.Linear(512, configs.OUTPUT_DIM)  #加上一层参数修改好的全连接层

        # Fixed Optimizer
        # XXX 虽然这是可行的，但是应该有一个函数类似于`train_one_batch`，接收所有参数化的内容
        self.loss_fn = nn.CrossEntropyLoss
        self.optimizer = torch.optim.SGD(self.parameters(), lr=1e-3)

        # set to device
        device = "cuda" if torch.cuda.is_available() else "cpu"
        print("Using {} device".format(device))
        self = self.to(device)

        self.trained = False

    def forward(self, x):
        x = x.to(device)
        out = self.resnet_layer(x)
        # WHY view 是什么意思
        out = out.view(out.size(0), -1)
        out = self.Linear_layer(out)

        return out

    def no_bn(self):
        # !!! Erase BatchNorm!!!
        def erase_bn(submodule):
            if submodule.__class__.__name__.find("BatchNorm") == 1:
                submodule.eval()
                # submodule.detach()

        self.apply(erase_bn)

    def inference(self, inputs):
        assert self.trained == True, "train or load"
        return model(inputs)

    def load_checkpoint(self, checkpoint):
        model.load_state_dict(torch.load(checkpoint))
        model.eval()
        self.trained = True
        return


# class ModifiedResNet(nn.Module):
#     def __init__(self):
#         super(ModifiedResNet, self).__init__()
#         # self.net = torchvision.models.AlexNet(configs.OUTPUT_DIM)
#         self.net = torchvision.models.mobilenet.MobileNetV2(
#             num_classes=configs.OUTPUT_DIM)

#         # Fixed Optimizer
#         # XXX 虽然这是可行的，但是应该有一个函数类似于`train_one_batch`，接收所有参数化的内容
#         self.loss_fn = nn.CrossEntropyLoss
#         self.optimizer = torch.optim.SGD(self.parameters(), lr=1e-4)

#         # set to device
#         device = "cuda" if torch.cuda.is_available() else "cpu"
#         print("Using {} device".format(device))
#         self = self.to(device)

#         self.trained = False

#     def forward(self, x):
#         x = x.to(device)
#         out = self.net(x)

#         return out

#     def no_bn(self):
#         # !!! Erase BatchNorm!!!
#         def erase_bn(submodule):
#             if submodule.__class__.__name__.find("BatchNorm") == 1:
#                 submodule.eval()
#                 # submodule.detach()

#         self.apply(erase_bn)

#     def load_checkpoint(self, checkpoint):
#         model.load_state_dict(torch.load(checkpoint))
#         model.eval()
#         self.trained = True
#         return

if __name__ == "__main__":
    model = ModifiedResNet()
    # HOW CONFig 对象化
    args.parse_args_and_yaml()
    # TODO 相对路径转化为绝对路径？
    train_dataloader, val_dataloader, test_dataloader, idx_to_cls_dict = ImageFactory.get_iterable_image_dataloaders(
        os.path.abspath("../data/expert/label.txt"),
        batch_size=32,
        train_split_ratio=0.9,
        val_split_ratio=0.1)
    Trainer().train(model,
                    train_dataloader=train_dataloader,
                    val_dataloader=val_dataloader,
                    test_dataloader=test_dataloader,
                    num_epoches=configs.NUM_EPOCHES,
                    loss_function=nn.CrossEntropyLoss())
