import copy
import datetime
import glob
import json
import math
import os
import time

import mindspore.common.dtype as mstype
import mindspore.communication.management as D
import mindspore.nn as nn
import numpy as np
from mindspore import Parameter, Tensor, context
from mindspore.common.initializer import initializer
from mindspore.context import ParallelMode
from mindspore.nn.wrap.cell_wrapper import (
    MicroBatchInterleaved,
    PipelineCell,
    _VirtualDatasetCell,
)
from mindspore.nn.wrap.loss_scale import DynamicLossScaleUpdateCell
from mindspore.ops import operations as P
from mindspore.train.callback import CheckpointConfig, ModelCheckpoint, TimeMonitor
from mindspore.train.model import Model
from mindspore.train.serialization import (
    load_checkpoint,
    load_distributed_checkpoint,
    load_param_into_net,
)

try:
    from mindformers.modules.transformer import (
        TransformerOpParallelConfig,
        TransformerRecomputeConfig,
    )

except ImportError as e:
    print("Import ERROR, expect mindformers to be installed. "
          "Please refer to the page https://gitee.com/mindspore/mindformers.git to install the mindformers.")
    print("Now exit the program.")
    exit(1)

from dataclasses import dataclass

import mindspore
import mindspore.common.dtype as mstype
import numpy as np
from mindspore import Profiler, context, nn, ops
from mindspore.dataset import GeneratorDataset, MnistDataset, transforms, vision
from mindspore.ops.operations._inner_ops import Receive, Send

from dataset import IteratorStore
from mindformers import (
    AutoConfig,
    AutoModel,
    AutoProcessor,
    AutoTokenizer,
    MindFormerBook,
)
from mindformers.core.parallel_config import build_parallel_config
from mindformers.models import BaseConfig, BaseModel, BaseProcessor, BaseTokenizer
from mindformers.models.bloom import BloomConfig, BloomLMHeadModel
from mindformers.tools.register import ActionDict, MindFormerConfig
from ppo_models import AdaptiveKLController, PPO_model, PPOConfig
from ppo_trainer import AcceleratePPOTrainer
from src.adam import AdamWeightDecayOp
from src.callbacks import EvalCallBack, LossCallBack
from src.dataset import create_dataset
from src.metrics import PPLMetric
from src.pangu_alpha import (
    PanguAlpha_Model,
    PanguAlphaModel,
    PanGUAlphaWithLoss,
    PanGuHead,
)
from src.pangu_alpha_config import PanguAlphaConfig, set_parse
from src.pangu_alpha_wrapcell import (
    PanguAlphaTrainOneStepWithLossScaleCell,
    PanguAlphaTrainPipelineWithLossScaleCell,
)
from src.utils import FP32StateAdamWeightDecay, LearningRate, download_data, get_args
from utils import IsLastStage, get_model_config, set_pipeline_parallel_context

root_path = os.getenv("RLHF_ROOT_DIR")


@dataclass
class opt:
    device_target = 'Ascend'
    parallel_mode = 'semi_auto_parallel'
    full_batch = True
    enable_alltoall = False
    micro_batch_interleaved = 1
    start_lr = 1e-04
    end_lr = 1e-05
    warmup_step = 500
    decay_steps = 200000
    opt_offload = False
    optimizer = 'adam'
    mind_dataset_dir = root_path + "/data/cvalues/cvalues_comparison_test_8192_with_pretrain.mindrecord"
    use_past = True
    inference_micro_size = 1


context.set_context(save_graphs=False, save_graphs_path='./graph', mode=context.GRAPH_MODE,
                    device_target=opt.device_target, enable_compile_cache=False,
                    compile_cache_path="./cache", max_call_depth=4096,
                    memory_optimize_level='O1')

# ipt
if root_path is None:
    raise ValueError(f"Please run `source env.sh` before running the program.")
config_root_path = root_path + "/mindformers/configs/gpt2/"
sft_model_yaml = "run_gpt2.yaml"
reward_model_yaml = "run_gpt2.yaml"
sft_model_path = config_root_path + sft_model_yaml
critic_cfg_path = config_root_path + reward_model_yaml
reward_cfg_path = config_root_path + reward_model_yaml

config = MindFormerConfig(sft_model_path)
build_parallel_config(config)
sft_model_config = AutoConfig.from_pretrained(sft_model_path)
sft_model_config.parallel_config = copy.deepcopy(config.parallel_config)
sft_model_config.use_past = opt.use_past
ref_model_config = AutoConfig.from_pretrained(sft_model_path)
ref_model_config.parallel_config = copy.deepcopy(config.parallel_config)
ref_model_config.use_past = False

config = MindFormerConfig(critic_cfg_path)
build_parallel_config(config)
critic_model_config = AutoConfig.from_pretrained(critic_cfg_path)
critic_model_config.parallel_config = copy.deepcopy(config.parallel_config)
critic_model_config.use_past = False

config = MindFormerConfig(reward_cfg_path)
build_parallel_config(config)
rm_model_config = AutoConfig.from_pretrained(reward_cfg_path)
rm_model_config.parallel_config = copy.deepcopy(config.parallel_config)
rm_model_config.use_past = False


ppo_config = PPOConfig()

ckpt_path = root_path + "/checkpoint_download/gpt2/gpt2.ckpt"
sft_model_config.checkpoint_name_or_path = ckpt_path
ref_model_config.checkpoint_name_or_path = None
rm_model_config.checkpoint_name_or_path = None

if opt.use_past:
    sft_model_config.batch_size = ppo_config.chunk_size
    ref_model_config.batch_size = ppo_config.chunk_size
    critic_model_config.batch_size = ppo_config.chunk_size
    rm_model_config.batch_size = ppo_config.chunk_size

print("[ACT Configure] is: ", sft_model_config, sft_model_config.parallel_config, flush=True)
print("[REF Configure] is: ", ref_model_config, ref_model_config.parallel_config, flush=True)
print("[CRT Configure] is: ", critic_model_config, critic_model_config.parallel_config, flush=True)
print("[RM Configure] is: ", rm_model_config, rm_model_config.parallel_config, flush=True)

trainer = AcceleratePPOTrainer(ppo_config=ppo_config,
                               sft_model_config=sft_model_config,
                               ref_model_config=ref_model_config,
                               critic_model_config=critic_model_config,
                               rm_model_config=rm_model_config,
                               opt=opt)
train_dataset = trainer.prompt_dataloader
test_dataset = trainer.prompt_dataloader
ppo_model = trainer.ppo_model
ppo_model.policy_model.model.add_flags_recursive(use_past=opt.use_past)

tokenizer_path = root_path + '/gpt2'
tokenizer = AutoTokenizer.from_pretrained(tokenizer_path)
question_1 = "请50字介绍一下郭德纲。"
qs = tokenizer.encode(question_1)
bs = sft_model_config.batch_size
input_ids = np.array([qs] * bs).astype(np.int32)
print(f"input_ids shape is {input_ids.shape}", flush=True)
output_ids = ppo_model.generate(input_ids)
for i, out in enumerate(output_ids):
    print(f"size of output {i} is {out.shape}", flush=True)
    print(f"output {i} is {tokenizer.decode(out)}", flush=True)
    print("==============================", flush=True)
