import os
import sys
import logging
import mindspore
import mindspore.nn as nn
from mindspore import dtype, Model, export, Tensor
import mindspore.dataset as ds
from mindspore.dataset.vision import Inter
import mindspore.dataset.vision.c_transforms as cv_trans
import mindspore.dataset.transforms.c_transforms as c_trans
from mindspore.train.callback import SummaryCollector, ModelCheckpoint, CheckpointConfig, LossMonitor

import numpy as np

from model.res_net import ResNet18, ResNet50
from model.utils.args import parser_init
from model.utils.unzip import unzip_file
from model.utils.search_file import walk_file

from model.utils.callback.accuracy_monitor_mindspore import AccuracyMonitor

from model.utils.dataset.data_preprocess import random_split
from model.utils.dataset.data_generator_mindspore import DatasetGenerator


CHECK_FILE_TYPE = 'ckpt'


def load_network(network_type, class_num, input_size, retrain_path):
    if network_type == 'resnet18':
        network = ResNet18(class_num=class_num, input_size=input_size)
    elif network_type == 'resnet50':
        network = ResNet50(class_num=class_num, input_size=input_size)
    logging.info('Network init:{}'.format(network_type))

    if os.path.exists(retrain_path):
        param_dict = mindspore.load_checkpoint(retrain_path)
        mindspore.load_param_into_net(network, param_dict)
        logging.info('Load retrain params from {} into net'.format(retrain_path))
    else:
        logging.info("Retrain params don't exist.".format(retrain_path))
    return network


def load_data(data_path, label_path, resize, batch_size):
    # data load
    ds_gen = DatasetGenerator(data_path=data_path,
                              label_path=label_path,
                              # resize=resize
                              )

    # split dataset to train dataset and valid dataset
    train_gen, valid_gen = random_split(ds_gen, (0.75, 0.25))
    train_ds = ds.GeneratorDataset(train_gen, ["image", "label"], shuffle=True)
    valid_ds = ds.GeneratorDataset(valid_gen, ["image", "label"], shuffle=True)

    logging.info('Dataset init.')

    # preprocess dataset
    # step: 1.resize
    #       2.random horizontal flip
    #       3.random change rotation of image
    #       4.data type transform
    #       5.input channels change from (h, w, c) to (c, h, w)
    resize_op = cv_trans.Resize(size=(resize, resize), interpolation=Inter.BICUBIC)
    random_hor_flip_op = cv_trans.RandomHorizontalFlip()
    random_rotation_op = cv_trans.RandomRotation(10, resample=Inter.BICUBIC)
    type_cast_op_image = c_trans.TypeCast(dtype.float32)
    type_cast_op_label = c_trans.TypeCast(dtype.int32)
    HWC2CHW = cv_trans.HWC2CHW()

    preprocess_operation = [resize_op,
                            random_hor_flip_op,
                            random_rotation_op,
                            type_cast_op_image,
                            HWC2CHW]

    train_ds = train_ds.map(operations=preprocess_operation,
                            input_columns="image")
    train_ds = train_ds.map(operations=type_cast_op_label, input_columns="label")
    train_ds = train_ds.batch(batch_size)

    valid_ds = valid_ds.map(operations=preprocess_operation,
                            input_columns="image")
    valid_ds = valid_ds.map(operations=type_cast_op_label, input_columns="label")
    valid_ds = valid_ds.batch(batch_size)

    logging.info('Dataset preprocess completed.')

    return train_ds, valid_ds


def main(args):
    # logging init
    logging.basicConfig(level=logging.DEBUG,
                        format=r'[ResNet Train Log]%(asctime)-15s - %(levelname)s - %(message)s',
                        )

    # input
    data_dir = args.data_url
    image_dir = data_dir + args.image_dir
    label_dir = data_dir + args.label_dir

    # output
    save_dir = args.save_url + args.project + '/'
    if os.path.exists(save_dir):
        pass
    else:
        os.mkdir(save_dir)
        logging.info('Create save direction:{}'.format(save_dir))

    # load network
    net = load_network(args.net,
                       args.class_num,
                       args.input_size,
                       data_dir + args.pretrain + '.' + CHECK_FILE_TYPE)

    PARAM_PATH = save_dir + args.project + '.' + CHECK_FILE_TYPE
    logging.info('PARAM_PATH:{}'.format(PARAM_PATH))

    # unzip data
    if args.unzip:
        zip_path = data_dir + args.unzip
        assert os.path.exists(zip_path), "file {} don't exist.".format(zip_path)
        unzip_file(zip_path, args.unzip_target)
        image_dir = args.unzip_target + args.image_dir
        walk_file(args.unzip_target)

    assert os.path.exists(image_dir), 'the path of image is not exist.'
    assert os.path.exists(label_dir), 'the path of label is not exist'

    t_dt, v_dt = load_data(data_path=image_dir,
                           label_path=label_dir,
                           resize=args.input_size,
                           batch_size=args.batch)

    if args.optim == 'adam':
        optim = nn.Adam(net.trainable_params(),
                        learning_rate=args.lr,
                        beta1=args.b1, beta2=args.b2,
                        weight_decay=args.wd)
    elif args.optim == 'momentum':
        optim = nn.Momentum(net.trainable_params(),
                            learning_rate=args.lr,
                            momentum=args.mm,
                            weight_decay=args.wd)

    callback_dict = {'LossMonitor': LossMonitor(),
                     'ModelCheckpoint': ModelCheckpoint(config=CheckpointConfig(save_checkpoint_steps=64),
                                                        directory=save_dir + 'Model/ResNet/'),
                     'SummaryCollector': SummaryCollector(summary_dir=save_dir + 'summary_dir', collect_freq=32),
                     'AccuracyMonitor': AccuracyMonitor(save_dir, v_dt, args.export),
                     }
    callback_list = []
    for item in args.callback:
        callback_list.append(callback_dict.get(item))

    loss = nn.SoftmaxCrossEntropyWithLogits(sparse=True,
                                            reduction='mean')

    model = Model(net, loss_fn=loss, optimizer=optim)

    logging.info('Train begin:\nepoch {}\tlearning rate {}\tbatch size {}'.format(args.epochs, args.lr, args.batch))
    model.train(args.epochs,  t_dt, callback_list, False)
    logging.info('Train complete.')

    mindspore.save_checkpoint(net, PARAM_PATH)

    # export network
    net.set_train(False)
    # input
    export_input = np.random.uniform(0.0, 1.0, size=[1, 3, args.input_size, args.input_size]).astype(np.float32)
    # last params
    export(net, Tensor(export_input), file_name=save_dir + args.export, file_format='AIR')
    export(net, Tensor(export_input), file_name=save_dir + args.export, file_format='ONNX')
    # best params
    if os.path.exists('./best.ckpt'):
        best_parm = mindspore.load_checkpoint('./best.ckpt')
        mindspore.load_param_into_net(net, best_parm)
        export(net, Tensor(export_input), file_name=save_dir + args.export + '-best', file_format='AIR')
        export(net, Tensor(export_input), file_name=save_dir + args.export + '-best', file_format='ONNX')

    logging.info('Export finished.')

    sys.exit(0)


if __name__ == '__main__':
    parser = parser_init()
    # 解析参数
    args, unknown_args = parser.parse_known_args()
    CHECK_FILE_TYPE = 'ckpt'
    main(args)
