import os
import sys
import logging
import torch
import torch.optim

from torch import nn
from torch.utils.data import DataLoader, random_split
from torchvision import models

import torch.onnx

from model.utils.args import parser_init
from model.utils.unzip import unzip_file
from model.utils.search_file import walk_file

from model.utils.callback.accuracy_monitor_pytorch import accuracy

from model.utils.dataset.data_generator_pytorch import DatasetGenerator

CHECK_FILE_TYPE = 'pth'


def model_train(net, train_dts, test_dts, optimizer, loss_f, device, num_epoch, save_dir):
    max_valid_acc = 0
    save_file_pth = save_dir + 'auto-save.pth'
    best_file_pth = save_dir + 'best.pth'
    local_best_pt = './best.pth'

    net_d = net.to(device)
    for epoch in range(num_epoch):
        for x, y in train_dts:
            x = x.to(device)
            y = y.to(device)
            y_hat = net_d(x)
            los = loss_f(y_hat, y)
            optimizer.zero_grad()
            los.backward()
            optimizer.step()
            loss_sum = los.cpu().item()
        net_d.eval()
        acc_train = accuracy(net_d, train_dts, device)
        acc_eval = accuracy(net_d, test_dts, device)
        net_d.train()

        print('epoch:[{}/{}] Loss:{} Train Accuracy:{} Valid Accuracy:{}'.format(
            epoch, num_epoch, loss_sum, acc_train, acc_eval
        ))

        if acc_eval > max_valid_acc:
            max_valid_acc = acc_eval
            torch.save(net.state_dict(), local_best_pt)
            torch.save(net.state_dict(), best_file_pth)
            logging.debug('save net state to {}'.format(best_file_pth))
        elif not epoch % 10 and epoch:
            torch.save(net.state_dict(), save_file_pth)
            logging.debug('save net state to {}'.format(save_file_pth))


def load_network(network_type, class_num, input_size, data_dir, retrain_path):
    logging.info('network init:{}'.format(network_type))
    # The pretrain file should be 'resnet18--pretrain.pth', 'resnet50--pretrain.pth' like in the data dir.
    pretrain_file = network_type + '-pretrain.pth'
    if network_type == 'resnet18':
        network = models.resnet18()
    elif network_type == 'resnet50':
        network = models.resnet50()

    # When retrain file don't exist and pretrain file exist, can load the pretrain file.
    if not os.path.exists(retrain_path) and os.path.exists(data_dir + pretrain_file):
        network.load_state_dict(torch.load(data_dir + pretrain_file))
        logging.info('pretrain from {}'.format(pretrain_file))

    # freeze some layers
    only_train_fc = False
    if only_train_fc:
        for param in network.parameters():
            param.requires_grad_(False)

    # set the output layer
    channel_in = network.fc.in_features

    network.fc = nn.Sequential(
        nn.Linear(channel_in, 1024),
        nn.ReLU(),
        nn.Linear(1024, 512),
        nn.ReLU(),
        nn.Dropout(p=0.5),
        nn.Linear(512, class_num),
        nn.Softmax(dim=1),
    )
    network.fc.requires_grad_(True)

    # load the file retrain path
    if os.path.exists(retrain_path):
        network.load_state_dict(torch.load(retrain_path))
        logging.info('load param from {}'.format(retrain_path))
    else:
        logging.info('param dont exist in {}'.format(retrain_path))
    return network


def load_data(data_path, label_path, resize, batch_size):
    # data load
    ds_gen = DatasetGenerator(data_path=data_path,
                              label_path=label_path,
                              resize=resize
                              )
    data_len = len(ds_gen)
    split = 0.75
    train_data_len = int(split * data_len)
    train_data, valid_data = random_split(dataset=ds_gen,
                                          lengths=[train_data_len, data_len - train_data_len],
                                          # generator=torch.Generator().manual_seed(0)
                                          )
    train_ds = DataLoader(dataset=train_data, batch_size=batch_size, shuffle=True, num_workers=1)
    valid_ds = DataLoader(dataset=valid_data, batch_size=batch_size, shuffle=True, num_workers=1)

    return train_ds, valid_ds


def main(args):
    # logging init
    logging.basicConfig(level=logging.DEBUG,
                        format=r'[ResNet Train Log]%(asctime)-15s - %(levelname)s - %(message)s',
                        )

    # input dir
    data_dir = args.data_url
    image_dir = data_dir + args.image_dir
    label_dir = data_dir + args.label_dir

    # output dir
    save_dir = args.save_url + args.project + '/'
    if os.path.exists(save_dir):
        pass
    else:
        os.mkdir(save_dir)
        logging.info('create save direction:{}'.format(save_dir))

    # load network
    net = load_network(args.net, args.class_num, args.input_size,
                       data_dir, data_dir + args.pretrain+ '.' +CHECK_FILE_TYPE)

    PARAM_PATH = save_dir + args.project + '.' +CHECK_FILE_TYPE
    logging.info('PARAM_PATH:{}'.format(PARAM_PATH))

    # unzip data
    if args.unzip:
        zip_path = data_dir + args.unzip
        assert os.path.exists(zip_path), "file {} don't exist.".format(zip_path)
        unzip_file(zip_path, args.unzip_target)
        image_dir = args.unzip_target + args.image_dir
        walk_file(args.unzip_target)

    assert os.path.exists(image_dir), 'the path of image is not exist.'
    assert os.path.exists(label_dir), 'the path of label is not exist'

    t_dt, v_dt = load_data(data_path=image_dir,
                           label_path=label_dir,
                           resize=args.input_size,
                           batch_size=args.batch)

    os.environ['CUDA_DEVICE_ORDER'] = 'PCI_BUS_ID'
    os.environ['CUDA_VISIBLE_DEVICES'] = '0'
    device = "cuda:0" if torch.cuda.is_available() else "cpu"
    logging.debug('run in {}'.format(device))

    if args.optim == 'adam':
        optim = torch.optim.Adam(filter(lambda p: p.requires_grad, net.parameters()),
                                 lr=args.lr, betas=[args.b1, args.b2], weight_decay=args.wd)
    elif args.optim == 'momentum':
        optim = torch.optim.SGD(filter(lambda p: p.requires_grad, net.parameters()),
                                lr=args.lr, momentum=args.mm, weight_decay=args.wd)

    loss = torch.nn.CrossEntropyLoss()

    logging.info('train begin:epoch {} learning rate {} batch size {}'.format(args.epochs, args.lr, args.batch))

    model_train(net, t_dt, v_dt, optim, loss, torch.device(device), args.epochs, save_dir)

    logging.debug('save net state to {}'.format(PARAM_PATH))
    torch.save(net.state_dict(), PARAM_PATH)
    logging.debug('train complete')

    net.eval()
    export_input = torch.randn(1, 3, args.input_size, args.input_size, requires_grad=True).to(device)

    export_path = save_dir + args.export + '.onnx'
    torch.onnx.export(
        net,
        export_input,
        export_path
    )

    if os.path.exists('./best.pth'):
        net.load_state_dict(torch.load(save_dir + 'best.pth'))
        export_path = save_dir + args.export + '-best.onnx'
        torch.onnx.export(
            net,
            export_input,
            export_path
        )
    sys.exit(0)


if __name__ == '__main__':
    parser = parser_init()
    # 解析参数
    args, unknown_args = parser.parse_known_args()
    CHECK_FILE_TYPE = 'ckpt'
    main(args)
