"""SynergyNet model train."""
import os
import warnings

warnings.filterwarnings("ignore")
import os.path as osp
import sys
from pathlib import Path
import argparse
import time
import logging
from mindspore import context, nn, load_checkpoint, load_param_into_net, save_checkpoint
import mindspore.dataset as ds
import mindspore.ops as ops

from mind3d.utils.synergynet_util import AverageMeter, mkdir, str2bool
from mind3d.utils.synergynet_train_utils import benchmark_pipeline
from mind3d.utils.synergynet_builder import build_model, build_dataset
from mind3d.models.losses.synergynet_loss import Synergynet_Loss, CustomWithLossCell
from mind3d.utils.load_yaml import load_yaml
from mindspore.train.loss_scale_manager import FixedLossScaleManager



def count_parameters(model):
    return sum(p.numel() for p in model.parameters() if p.requires_grad)


def train(opt):
    """Main funtion for the training process"""

    context.set_context(max_call_depth=100000, device_id=opt['device_id'], mode=context.PYNATIVE_MODE,
                        device_target=opt['device_target'])

    # logging setup
    logging.basicConfig(
        format='[%(asctime)s] [p%(process)s] [%(pathname)s:%(lineno)d] [%(levelname)s] %(message)s',
        level=logging.INFO,
        handlers=[
            logging.FileHandler(opt['train']['log_file'], mode=opt['train']['log_mode']),
            logging.StreamHandler()
        ]
    )

    # step1: define the model structure
    model = build_model(img_size=opt['train']['img_size'],mode="train")

    # step2: optimization: loss and optimization method
    # step2.1 optim
    learning_rate = nn.cosine_decay_lr(max_lr=opt['train']['max_lr'],
                                       min_lr=opt['train']['min_lr'],
                                       total_step=2485 * 80,
                                       step_per_epoch=2485,
                                       decay_epoch=60)

    optim = nn.SGD(model.trainable_params(),
                   learning_rate=learning_rate,
                   momentum=opt['train']['momentum'],
                   weight_decay=opt['train']['weight_decay'],
                   nesterov=True)
    # step2.2 loss
    criterion = Synergynet_Loss()

    # step2.3 resume
    if opt['train']['resume']:
        path = opt['train']['resume']
        if Path(opt['train']['resume']).is_file():            
            logging.info(f'=> loading checkpoint {path}')
            checkpoint = load_checkpoint(path)
            load_param_into_net(model, checkpoint, strict_load=False)
            print(checkpoint)
            print('==> loading DONE!')

        else:
            logging.info(f'=> no checkpoint found at {path}')

    if opt['train']['test_initial']:  # if testing the performance from the initial
        logging.info('Testing from initial')
        benchmark_pipeline(opt['train']['resume'])

    # step3: net
    network = CustomWithLossCell(model, criterion)

    is_gpu = context.get_context("device_target") == "GPU"
    if is_gpu:
        network = nn.TrainOneStepCell(network, optim)
        network.set_train(True)

    else:
        network = nn.TrainOneStepCell(network, optim)
        network.set_train()

    # step4: data
    train_dataset = build_dataset(opt, mode="train")

    train_ds = ds.GeneratorDataset(train_dataset, ["data", "target"], shuffle=True)

    train_ds = train_ds.batch(opt['train']['batch_size'], drop_remainder=True)

    print('start training')
    best_mae = 100
    best_nme = 100
    t1 = time.time()
    for epoch in range(opt['train']['start_epoch'], opt['train']['epochs'] + 1):
        print("this is epoch {} / 120 ".format(epoch))
        epoch_loss = 0
        print("total batches = {}".format(train_ds.get_dataset_size()))
        start = time.time()
        for i, data in enumerate(train_ds.create_dict_iterator(), 0):
            input = data["data"]  # (4,3,120,120)
            target = data['target']  # (4,102)
            target = target[:, :62]
            loss = network(input, target)
            epoch_loss += float(loss)

            if i % 50 == 0:
                print("this is epoch {} batch {} loss={}".format(epoch, i, float(loss)))
        
        end = time.time()
        epoch_time = end - start
        print("epoch loss : {}".format(epoch_loss))
        print("epoch time : {}".format(epoch_time))
        savepath = os.path.join(opt['train']['save_root'], "synergynet_model_{}.ckpt".format(epoch))
        save_checkpoint(model, savepath) 
        logging.info(f'Save checkpoint to {savepath}')
        print('Saving model....') 
        print("====================================")
        logging.info('\nVal[{}]'.format(epoch))
        mean_nme, pose_mae = benchmark_pipeline(savepath)
        if mean_nme < best_nme:
            savepath = os.path.join(opt['train']['save_root'], "synergynet_model_best_nme.ckpt")
            save_checkpoint(model, savepath)
            best_nme = mean_nme                   
        if pose_mae < best_mae:
            savepath = os.path.join(opt['train']['save_root'], "synergynet_model_best_pose.ckpt")
            save_checkpoint(model, savepath) 
            best_mae = pose_mae
        print("====================================")
    logging.info('\nsynergynet_model_best_nme.ckpt Val:')
    best_nme, _ = benchmark_pipeline("./synergy_net/ckpts/synergynet_model_best_nme.ckpt")
    logging.info('\nsynergynet_model_best_pose.ckpt Val:')   
    _, best_mae = benchmark_pipeline("./synergy_net/ckpts/synergynet_model_best_pose.ckpt")
    t2 = time.time()
    total_time = t2 - t1
    logging.info('\ntotal time :{}'.format(total_time))
    print("total time : {}".format(total_time))
    print("training completed...")


if __name__ == "__main__":
    parser = argparse.ArgumentParser(description='SynergyNet train')
    parser.add_argument('-opt', type=str, default='mind3d/configs/synergy_net/synergynet.yaml', help='Path to option YAML file.')
    args = parser.parse_known_args()[0]
    opt = load_yaml(args.opt)

    snapshot_dir = osp.split(opt['train']['snapshot'])[0]
    mkdir(snapshot_dir)
    
    train(opt)
