from __future__ import print_function, division
import torch
import torch.nn as nn
import torch.optim as optim
from torch.optim import lr_scheduler
import argparse

from model.resnet import resnet18
from parser_utils.parse_args import get_config
from trainer import train_model
from logger.logger import create_logger
def parse_option():
    parser = argparse.ArgumentParser('Swin Transformer training and evaluation script', add_help=False)
    parser.add_argument("-c",help="add config ", default=None)
    parser.add_argument("--local_rank", type=int, required=False, help='local rank for DistributedDataParallel')

    config, unparsed = parser.parse_known_args()

    config = get_config(config.c)

    return config

def main(config):
    logger = create_logger(config)
    device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
    model_ft = resnet18(progress=True)
    num_ftrs = model_ft.fc.in_features
    # Here the size of each output sample is set to 2.
    # Alternatively, it can be generalized to nn.Linear(num_ftrs, len(class_names)).
    model_ft.fc = nn.Linear(num_ftrs, 2)

    model_ft = model_ft.to(device)

    criterion = nn.CrossEntropyLoss()

    # Observe that all parameters are being optimized
    optimizer_ft = optim.SGD(model_ft.parameters(), config.lr, config.momentum)

    # Decay LR by a factor of 0.1 every 7 epochs
    exp_lr_scheduler = lr_scheduler.StepLR(optimizer_ft, config.step_size, config.gamma)
    model_ft = train_model(model_ft, criterion, optimizer_ft, exp_lr_scheduler,
                           config,logger)

    return model_ft

if __name__=="__main__":
    args = parse_option()
    logger = create_logger(args)
    main(args)

    

