import os
import argparse
import numpy as np

from mindspore import nn
from mindspore import context
from mindspore.common import set_seed
from mindspore.train.model import Model
from mindspore.train.serialization import load_checkpoint
from mindspore.communication.management import init, get_group_size, get_rank

from src.models.vit import FineTuneVit
from src.logger import get_logger
from src.datasets.dataset import get_dataset

from src.helper import parse_with_config, str2bool
from src.models.eval_engine import get_eval_engine
from src.monitors.obs_monitor import download


def context_init(args):
    np.random.seed(args.seed)
    set_seed(args.seed)
    rank_id = 0
    device_num = 1
    if args.use_parallel:
        init()
        device_id = int(os.getenv('DEVICE_ID'))  # 0 ~ 7
        rank_id = get_rank()  # local_rank
        device_num = get_group_size()  # world_size
        print("device_id is {}, rank_id is {}, device_num is {}".format(device_id, rank_id, device_num), flush=True)
        args.context["device_id"] = device_id
        context.set_context(**args.context)
        context.reset_auto_parallel_context()
        context.set_auto_parallel_context(
            parallel_mode=context.ParallelMode.DATA_PARALLEL,
            device_num=device_num,
            gradients_mean=True)
    else:
        context.set_context(**args.context)

    context.set_context(reserve_class_name_in_scope=False)
    return rank_id, device_num


def main(args):
    local_rank, device_num = context_init(args)
    args.device_num = device_num
    args.local_rank = local_rank
    args.logger = get_logger(args.save_dir, rank=args.local_rank)
    args.logger.info("model config: {}".format(args))

    # evaluation dataset
    eval_dataset = get_dataset(args, is_train=False)
    per_step_size = eval_dataset.get_dataset_size()
    if args.per_step_size:
        per_step_size = args.per_step_size
    args.logger.info("Create eval dataset finish, data size:{}".format(per_step_size))

    net = FineTuneVit(batch_size=args.batch_size, patch_size=args.patch_size,
                      image_size=args.image_size, dropout=args.dropout,
                      num_classes=args.num_classes, **args.model_config)
    eval_engine = get_eval_engine(args.eval_engine, net, eval_dataset, args)

    # define optimizer
    optimizer = nn.AdamWeightDecay(net.trainable_params(),
                                   learning_rate=1.0,
                                   weight_decay=args.weight_decay,
                                   beta1=args.beta1,
                                   beta2=args.beta2)

    # load eval ckpt
    if args.use_ckpt:
        params_dict = load_checkpoint(args.use_ckpt)
        net_not_load = net.init_weights(params_dict)
        args.logger.info(f"===============net_not_load================{net_not_load}")

    # define Model and begin training
    model = Model(net, metrics=eval_engine.metric, optimizer=optimizer,
                  eval_network=eval_engine.eval_network,
                  loss_scale_manager=None, amp_level="O3")

    eval_engine.set_model(model)
    # equal to model._init(dataset, sink_size=per_step_size)
    eval_engine.compile(sink_size=per_step_size)
    eval_engine.eval()
    output = eval_engine.get_result()
    args.logger.info('accuracy={:.6f}'.format(float(output)))


if __name__ == "__main__":
    parser = argparse.ArgumentParser()

    parser.add_argument(
        '--config', default="./config/eval.yaml", help='YAML config files')
    parser.add_argument(
        '--use_parallel', default=False, type=str2bool, help='use parallel config.')

    args_ = parse_with_config(parser)

    main(args_)
