import ast
import math
import os
import argparse
from mindspore.train.callback import CheckpointConfig, TimeMonitor, LossMonitor, ModelCheckpoint
from mindspore import context, nn, Tensor, set_seed, Model
from mindspore.context import ParallelMode
from mindspore.communication.management import init, get_rank, get_group_size
from src.dataset import create_dataset_train
from src.lr_generator import get_lr
from src.model import BCNN
from mindspore.nn import Accuracy
from mindspore.train.serialization import load_checkpoint, load_param_into_net
from src.dataset import create_dataset
import mindspore.common.dtype as mstype
from mindspore import ops
from mindspore.nn.metrics import Accuracy
from src.utils import EvalCallBack, eval_show
import argparse

local_output_url = './output'

if __name__ == "__main__":
    
    parser = argparse.ArgumentParser()
    parser.add_argument("--lr", type=float, default=1e-4)
    parser.add_argument("--momentum", type=float, default=0.9)
    parser.add_argument("--save_ckpt_epoch", type=int, default=10)
    parser.add_argument("--keep_checkpoint_max", type=int, default=1000)
    parser.add_argument("--weight_decay", type=float, default=1e-4)
    parser.add_argument("--num_epochs", type=int, default=100)
    parser.add_argument("--num_classes", type=int, default=200)
    parser.add_argument("--num_train_images", type=int, default=5994)
    parser.add_argument("--num_test_images", type=int, default=5794)
    parser.add_argument("--batch_size", type=int, default=8)
    parser.add_argument("--prefix", type=str, default=None)
    parser.add_argument("--train_all", action="store_true")
    parser.add_argument("--ckpt_path", type=str, default=None)
    parser.add_argument("--target_device", type=str, default="GPU")
    parser.add_argument("--two_phase", action="store_true")
    parser.add_argument("--save_checkpoint", action="store_true")
    parser.add_argument("--show_acc_per_epoch", type=int, default=0)
    parser.add_argument("--dataset_path", type=str, default='/home/d1/dataset/CUB_200')
    parser.add_argument("--dyn_lr", action="store_true")
    parser.add_argument("--cnn_name", type=str, default="vgg")
    parser.add_argument("--pre_trained", type=str, default=None, choices=[None, "vgg16.ckpt", "resnet101.ckpt", "densenet121.ckpt"])
    config = parser.parse_args()

    learning_rate = config.lr
    num_epochs = config.num_epochs

    num_train_images = config.num_train_images
    batch_size = config.batch_size
    steps_per_epoch = math.ceil(num_train_images / batch_size)

    momentum = config.momentum
    weight_decay = config.weight_decay
    
    if not config.prefix:
        config.prefix = 'BCNN_lr' + str(learning_rate) + '_wd' + str(weight_decay)
        config.prefix += '_' + config.pre_trained.split('.')[-2]
        if config.two_phase:
            if config.train_all:
                config.prefix += '_2p_all'
            else:
                config.prefix += '_2p_fc'
        else:
            config.prefix += '_1p_all'
            
    context.set_context(mode=context.PYNATIVE_MODE, save_graphs=False, device_target=config.target_device)
    if config.dyn_lr:
        learning_rate = Tensor(get_lr(global_step=0,
                   lr_max=learning_rate,
                   total_epochs=num_epochs,
                   steps_per_epoch=steps_per_epoch,
                   train_all=config.two_phase and config.train_all))
        config.prefix += '_dyn-lr'
        # print(learning_rate)
        # print(learning_rate[:,:,750])
    net = BCNN(200, config.pre_trained, config.train_all, config.cnn_name)
    
    # print(net.trainable_params())
    # print(net.get_parameters(expand=True))
    
    if config.ckpt_path:
        ckpt_file = config.ckpt_path
        print("ckpt file path is {}".format(ckpt_file))
        load_param_into_net(net, load_checkpoint(ckpt_file))
    
    loss = nn.SoftmaxCrossEntropyWithLogits(sparse=True, reduction='mean')
    
    optimizer = nn.Momentum(net.trainable_params(), learning_rate=learning_rate, momentum=momentum, weight_decay=weight_decay)

    train_data_set = create_dataset(config.dataset_path,batch_size=batch_size,is_train=True)
    dataset_size = train_data_set.get_batch_size()
    time_cb = TimeMonitor(data_size=dataset_size)

    loss_cb = LossMonitor(per_print_times=10)
    
    cb = [time_cb, loss_cb]

    if config.save_checkpoint:
        ckptconfig = CheckpointConfig(save_checkpoint_steps=config.save_ckpt_epoch * steps_per_epoch,
                                      keep_checkpoint_max=config.keep_checkpoint_max)
        save_checkpoint_path = os.path.join(local_output_url, "checkpoint/")

        ckpoint_cb = ModelCheckpoint(prefix=config.prefix, directory=save_checkpoint_path, config=ckptconfig)
        cb += [ckpoint_cb]

    model = Model(net, loss_fn=loss, optimizer=optimizer, metrics={"Accuracy": Accuracy()})
    if config.show_acc_per_epoch > 0:
        test_data_set = create_dataset(config.dataset_path,batch_size=batch_size,is_train=False)
        epoch_per_eval = {"epoch": [], "acc": []}
        eval_cb = EvalCallBack(model, test_data_set, config.show_acc_per_epoch, epoch_per_eval)
        cb += [eval_cb]
        
    print("train start, lr is {}, weight_decay is {}, backbone is {}, prefix is {}".format(str(config.lr), str(config.weight_decay), config.cnn_name, config.prefix))
    model.train(config.num_epochs, train_data_set, callbacks=cb, dataset_sink_mode=False)
    print("train success")
    
    if config.show_acc_per_epoch > 0:
        eval_show(epoch_per_eval, config.prefix)
