from utils import get_datasets,get_model
from easydict import EasyDict as edict
from mindformers.models.gpt2 import GPT2LMHeadModel

import mindspore as ms
import mindspore.nn as nn
from mindspore.nn import Accuracy
from mindspore.train.callback import LossMonitor
from argparse import ArgumentParser
from mindformers.trainer.config_args import ConfigArguments, OptimizerConfig, \
    RunnerConfig, LRConfig, WrapperConfig
from mindspore.nn import AdamWeightDecay, WarmUpLR, \
    DynamicLossScaleUpdateCell, TrainOneStepWithLossScaleCell
from mindspore.train.callback import LossMonitor
from mindarmour.privacy.diff_privacy import DPModel
from mindarmour.privacy.diff_privacy import NoiseMechanismsFactory
from mindarmour.privacy.diff_privacy import ClipMechanismsFactory
from mindarmour.privacy.diff_privacy import PrivacyMonitorFactory
from mindarmour.utils import LogUtil
from mindformers.trainer import Trainer
LOGGER = LogUtil.get_instance()
LOGGER.set_level('INFO')
TAG = "train-dp-gpt2"

LOGGER = LogUtil.get_instance()
LOGGER.set_level('INFO')
TAG = 'Lenet5_train'

def set_config(trainer, cfg):
    trainer.config.runner_config['epochs'] = cfg.epoch_size
    trainer.config.lr_schedule['learning_rate'] = cfg.lr

def train_dp(args):
    cfg = edict({
        'lr': 2e-6,
        'momentum': 0.9,
        'epoch_size': 100,
        'batch_size': 2,
        'save_checkpoint_steps': 200,
        'keep_checkpoint_max': 30,
        'device_target': 'GPU',
        'dataset_sink_mode': False,
        'micro_batches': 1,
        'norm_bound': 0.5,
        'initial_noise_multiplier': 0.8,
        'noise_mechanisms': 'Gaussian',
        'clip_mechanisms': 'Gaussian',

        'clip_decay_policy': 'Linear',
        'clip_learning_rate': 0.001,
        'target_unclipped_quantile': 0.9,
        'fraction_stddev': 0.01,
        'optimizer': 'Momentum',
        'target_delta': 1e-5,
        'data_path':'./datasets/train-raw.txt'
    })
    cfg["lr"] = args.lr
    cfg["momentum"] = args.momentum
    cfg["epoch_size"] = args.epoch_size
    cfg["batch_size"] = args.batch_size
    cfg["device_target"] = args.device_target

    ms.set_context(mode=ms.GRAPH_MODE, max_call_depth=1000, device_target=cfg.device_target)

    if cfg.micro_batches and cfg.batch_size % cfg.micro_batches != 0:
        raise ValueError(
            "Number of micro_batches should divide evenly batch_size")
    noise_mech = NoiseMechanismsFactory().create(cfg.noise_mechanisms,
                                                 norm_bound=cfg.norm_bound,
                                                 initial_noise_multiplier=cfg.initial_noise_multiplier,
                                                 decay_policy=None)
    clip_mech = ClipMechanismsFactory().create(cfg.clip_mechanisms,
                                               decay_policy=cfg.clip_decay_policy,
                                               learning_rate=cfg.clip_learning_rate,
                                               target_unclipped_quantile=cfg.target_unclipped_quantile,
                                               fraction_stddev=cfg.fraction_stddev)
    rdp_monitor = PrivacyMonitorFactory.create('rdp',
                                               num_samples=50,
                                               batch_size=cfg.batch_size,
                                               initial_noise_multiplier=cfg.initial_noise_multiplier,
                                               per_print_times=10,
                                               target_delta=cfg.target_delta, noise_decay_mode=None)
    net_loss = nn.SoftmaxCrossEntropyWithLogits(sparse=True, reduction="mean")
    trainloader, tokenizer = get_datasets(cfg.data_path, cfg.batch_size,is_ds=True)

    network = get_model(set_train=True, is_opt=False)
    lr_schedule = WarmUpLR(learning_rate=cfg.lr,warmup_steps=2000)
    net_opt = AdamWeightDecay(learning_rate=lr_schedule,eps=1e-8,beta1=0.9,beta2=0.95,
                                params=network.trainable_params())
    config_ck = ms.CheckpointConfig(save_checkpoint_steps=cfg.save_checkpoint_steps,
                                    keep_checkpoint_max=cfg.keep_checkpoint_max)
    ckpoint_cb = ms.ModelCheckpoint(prefix="checkpoint_dp_gpt2",
                                    directory='./dp_ckpt/trained_dp_ckpt/',
                                    config=config_ck)

    model = DPModel(micro_batches=cfg.micro_batches,
                    norm_bound=cfg.norm_bound,
                    noise_mech=noise_mech,
                    clip_mech=clip_mech,
                    network=network,
                    loss_fn=net_loss,
                    optimizer=net_opt,
                    metrics={"Accuracy": Accuracy()})

    LOGGER.info(TAG, "============== Starting Training ==============")
    model.train(cfg['epoch_size'], trainloader,
                callbacks=[ckpoint_cb, ms.LossMonitor(), rdp_monitor],
                dataset_sink_mode=cfg.dataset_sink_mode,
                )

def train_raw_1(args):
    cfg = edict({
        'lr': 2e-5,
        'momentum': 0.9,
        'seq_length': 1024,
        'start_epoch': 0,
        'epoch_size': 100,
        'batch_size': 4,
        'save_checkpoint_steps': 20,
        'keep_checkpoint_max': 100,
        'device_target': 'GPU',
        'data_path': "./datasets/train-raw.txt",
        'chick_point_path' : "./backup/ori-model/gpt2-ori.ckpt"
    })
    cfg["lr"] = args.lr
    cfg["momentum"] = args.momentum
    cfg["epoch_size"] = args.epoch_size
    cfg["batch_size"] = args.batch_size
    cfg["device_target"] = args.device_target

    trainloader,tokenizer = get_datasets(path = cfg.data_path,bt = cfg.batch_size)
    trainer = Trainer(task='text_generation', model='gpt2', train_dataset=trainloader)
    set_config(trainer,cfg)
    trainer.finetune(finetune_checkpoint="./checkpoint_download/gpt2/gpt2.ckpt")

def train_raw_2(args):
    cfg = edict({
        'lr': 5e-5,
        'momentum': 0.9,
        'seq_length': 1024,
        'start_epoch': 0,
        'epoch_size': 100,
        'batch_size': 2,
        'save_checkpoint_steps': 20,
        'keep_checkpoint_max': 100,
        'device_target': 'GPU',
        'data_path': "./datasets/train-raw.txt"
    })
    cfg["lr"] = args.lr
    cfg["momentum"] = args.momentum
    cfg["epoch_size"] = args.epoch_size
    cfg["batch_size"] = args.batch_size
    cfg["device_target"] = args.device_target
    runner_config = RunnerConfig(epochs=cfg.epoch_size, batch_size=cfg.batch_size)
    lr_schedule_config = LRConfig(lr_type='WarmUpLR', learning_rate=cfg.lr, warmup_steps=2000
                                  ,end_learning_rate=0.000001)

    optim_config = OptimizerConfig(optim_type='Adam', beta1=0.9,beta2=0.95,
                                   weight_decay=0.1,
                                   learning_rate=lr_schedule_config)

    trainloader, tokenizer = get_datasets(cfg.data_path, cfg.batch_size)
    config = ConfigArguments(seed=1234, runner_config=runner_config,
                             optimizer=optim_config)
    loss_m = LossMonitor()
    #network = get_model(set_train=True, is_opt=False)
    network= GPT2LMHeadModel.from_pretrained("gpt2")
    lr_schedule = WarmUpLR(learning_rate=cfg.lr, warmup_steps=2000)
    optimizer = AdamWeightDecay(learning_rate=lr_schedule,
                                params=network.trainable_params())
    config_ck = ms.CheckpointConfig(save_checkpoint_steps=cfg.save_checkpoint_steps,
                                    keep_checkpoint_max=cfg.keep_checkpoint_max)
    ckpoint_cb = ms.ModelCheckpoint(prefix="checkpoint_gpt2",
                                    directory='./output/trained_ckpt_file/',
                                    config=config_ck)
    callbacks = [ckpoint_cb,loss_m]
    task_trainer = Trainer(task='text_generation',
                           model=network,
                           args=config,
                           optimizers=optimizer,
                           train_dataset=trainloader,
                           eval_dataset=trainloader,)
                           #callbacks=callbacks)
    task_trainer.finetune(finetune_checkpoint=False)

if __name__ == '__main__':

    parser = ArgumentParser()
    parser.add_argument("--train_model", default="train_dp",type=str, help="what kind of train? include dp or raw [train_dp/train_raw]")
    parser.add_argument("--lr", default=1e-4, type=float, help="learning rate")
    parser.add_argument("--momentum", default=0.9,type=float, help="sgd momentum")
    parser.add_argument("--data_path", default="./datasets/train-raw.txt")
    parser.add_argument("--epoch_size", default=300, type=int, help="total number of training epochs")
    parser.add_argument("--batch_size", default=4, type=int, help="total number of training epoch_size")

    parser.add_argument("--device_target", default="GPU", type=str, help="device_target:[device used]")
    parser.add_argument("--beta1", default=0.9, type=float, help="beta_1 in Adam")
    parser.add_argument("--beta2", default=0.999, type=float, help="beta_2 in Adam")

    args = parser.parse_args()
    if args.train_model == "train_dp":
        train_dp(args)
    else: train_raw_1(args)




