import argparse
import os
from functools import partial
from typing import Union, Optional

import deepspeed
import torch
from datasets import load_dataset
from deepspeed.runtime.zero.stage3 import estimate_zero3_model_states_mem_needs_all_live
from peft import LoraConfig, TaskType
from torch.utils.data import DataLoader
from tqdm import tqdm
from transformers import AutoTokenizer, Adafactor, pipeline, PreTrainedTokenizerBase
from transformers.utils import PaddingStrategy
from trl import PPOConfig, set_seed, AutoModelForCausalLMWithValueHead, PPOTrainer
from trl.core import LengthSampler
import sys
o_path = os.getcwd() # 返回当前工作目录
sys.path.append(o_path) # 添加自己指定的搜索路径
# from model.chatglm.modeling_chatglm import ChatGLMForConditionalGeneration
# from model.chatglm.tokenization_chatglm import ChatGLMTokenizer

parser = argparse.ArgumentParser(add_help=True, description='lijing')
parser.add_argument('--model_name_or_path', default="hfl/chinese-roberta-wwm-ext", type=str, help='lujing')
parser.add_argument('--reward_model_name_or_path', default="hfl/chinese-roberta-wwm-ext", type=str, help='lujing')
parser.add_argument('--dataset_name_or_path', default="/datasets_path/lvwerra/stack-exchange-paired/data/rl", type=str, help='lujing')
parser.add_argument('--batch_size', default=4, type=int, help='lujing')
parser.add_argument('--mini_batch_size', default=1, type=int, help='lujing')
parser.add_argument('--num_epochs', default=100, type=int, help='lujing')
parser.add_argument('--save_interval', default=100, type=int, help='lujing')
parser.add_argument('--device', default="cuda" if torch.cuda.is_available() else "cpu", type=str, help='lujing')
parser.add_argument('--save_dir', default="./save_model/", type=str, help='lujing')
parser.add_argument('--local_rank', default=0, type=int, help='lujing')
parser.add_argument('--text_column', default="Tweet text", type=str, help='lujing')
parser.add_argument('--label_column', default="text_label", type=str, help='lujing')
parser.add_argument('--seed', default=0, type=int, help='lujing')
parser.add_argument('--steps', default=20000, type=int, help='lujing')
parser.add_argument('--learning_rate', default=1.41e-5, type=float, help='lujing')
parser.add_argument('--log_with', default=None, type=str, help='lujing')
parser.add_argument('--gradient_accumulation_steps', default=4, type=int, help='lujing')
parser.add_argument('--optimize_cuda_cache', default=True, type=bool, help='lujing')
parser.add_argument('--early_stopping', default=False, type=bool, help='lujing')
parser.add_argument('--output_max_length', default=128, type=int, help='lujing')
parser.add_argument('--target_kl', default=0.1, type=float, help='lujing')
parser.add_argument('--ppo_epochs', default=4, type=int, help='lujing')
parser.add_argument('--max_length', default=512, type=int, help='lujing')
parser.add_argument('--lora_alpha', default=16, type=int, help='lujing')
parser.add_argument('--lora_dropout', default=0.1, type=int, help='lujing')
parser.add_argument('--init_kl_coef', default=0.2, type=float, help='lujing')
parser.add_argument('--adap_kl_ctrl', default=True, type=bool, help='lujing')
parser.add_argument('--reward_baseline', default=0.0, type=float, help='lujing')
parser.add_argument('--save_freq', default=None, type=int, help='lujing')

parser = deepspeed.add_config_arguments(parser)  #增加deepspeed的配置
args = parser.parse_args()

# args.model_name_or_path = "D:/model_path/Llama-2-7b-hf"
# args.reward_model_name_or_path = "D:\\model_path\\roberta-base"

train_dataset = load_dataset("parquet",
                             split="train",
                             data_dir=args.dataset_name_or_path)

train_dataset = train_dataset.select(range(100000))

tokenizer = AutoTokenizer.from_pretrained(args.model_name_or_path)
# Need to do this for gpt2, because it doesn't have an official pad token.
tokenizer.pad_token = tokenizer.eos_token


num_proc = 4

original_columns = train_dataset.column_names


# train_dataset = train_dataset.map(
#     preprocess_function,
#     batched=True,
#     # num_proc=num_proc,
#     remove_columns=original_columns,
# )


def collator(data, tokenizer: PreTrainedTokenizerBase, padding: Union[bool, str, PaddingStrategy] = True,
             max_length: Optional[int] = None,pad_to_multiple_of: Optional[int] = None,return_tensors: str = "pt"):


    querylist=[]
    for silcedata in data:
        question=silcedata["question"]
        query = "Question: " + question + "\n\nAnswer: "
        query=query[:max_length]
        querylist.append(query)

    batch_encode=tokenizer.batch_encode_plus(
        querylist,
        padding=padding,
        max_length=max_length,
        pad_to_multiple_of=pad_to_multiple_of,
        return_tensors=return_tensors,
    )
    #将原数据添加进去，方便reward模型进行判定
    batch_encode["query"]=querylist
    return batch_encode

#固定部分参数
partial_collator=partial(collator,tokenizer=tokenizer,max_length=args.max_length)


train_dataloader = DataLoader(
    train_dataset, shuffle=True,
    collate_fn=partial_collator,
    batch_size=args.batch_size
)

# set seed before initializing value head for deterministic eval
set_seed(args.seed)

peft_config = LoraConfig(
    task_type=TaskType.SEQ_CLS,
    inference_mode=False,
    r=8,
    lora_alpha=32,
    lora_dropout=0.1,
)

ppo_config = PPOConfig(
    steps=args.steps,
    model_name=args.model_name_or_path,
    learning_rate=args.learning_rate,
    log_with=args.log_with,
    batch_size=args.batch_size,
    mini_batch_size=args.mini_batch_size,
    gradient_accumulation_steps=args.gradient_accumulation_steps,
    optimize_cuda_cache=True,
    early_stopping=args.early_stopping,
    target_kl=args.target_kl,
    ppo_epochs=args.ppo_epochs,
    seed=args.seed,
    init_kl_coef=args.init_kl_coef,
    adap_kl_ctrl=args.adap_kl_ctrl,
)

model = AutoModelForCausalLMWithValueHead.from_pretrained(
    args.model_name_or_path,
    peft_config=peft_config,
)
model.config.pad_token_id = tokenizer.eos_token_id

print("--------------------------------------使用deepspeed加载------------------------------------------------")
estimate_zero3_model_states_mem_needs_all_live(model, num_gpus_per_node=3, num_nodes=1)

model, optimizer, _, _ = deepspeed.initialize(args=args, model=model,
                                              model_parameters=model.parameters())

optimizer = Adafactor(
    filter(lambda p: p.requires_grad, model.parameters()),
    scale_parameter=False,
    relative_step=False,
    warmup_init=False,
    lr=ppo_config.learning_rate,
)

# 3. initialize trainer
ppo_trainer = PPOTrainer(config=ppo_config, model=model,
                         # dataset=train_dataset,
                         tokenizer=tokenizer,
                         optimizer=optimizer)


output_min_length = 32
output_max_length = args.output_max_length
output_length_sampler = LengthSampler(output_min_length, output_max_length)

# 用来reward模型的推理
sentiment_pipe = pipeline(
    task="sentiment-analysis",
    model=args.reward_model_name_or_path,
    # tokenizer=tokenizer,
    return_token_type_ids=False,
)
# We then define the arguments to pass to the sentiment analysis pipeline.
# We set `return_all_scores` to True to get the sentiment score for each token.
reward_kwargs = {
    "return_all_scores": True,
    "function_to_apply": "none",
    "batch_size": 16,
    "truncation": True,
    "max_length":args.max_length
}


# We then define the arguments to pass to the `generate` function. These arguments
# are passed to the `generate` function of the PPOTrainer, which is a wrapper around
# the `generate` function of the trained model.
generation_kwargs = {
    # "min_length": -1,
    "top_k": 0.0,
    "top_p": 1.0,
    "do_sample": True,
    "pad_token_id": tokenizer.pad_token_id,
    "eos_token_id": 100_000,
}
for epoch, batch in tqdm(enumerate(train_dataloader)):
    if epoch >= ppo_config.total_ppo_epochs:
        break

    question_tensors = batch["input_ids"]

    response_tensors = ppo_trainer.generate(
        query_tensor=list(question_tensors),#输入要求为list(tensor)
        return_prompt=False,
        length_sampler=output_length_sampler,

    )
    batch["response"] = tokenizer.batch_decode(response_tensors, skip_special_tokens=True)

    # Compute reward score (using the sentiment analysis pipeline)
    texts = [q + r for q, r in zip(batch["query"], batch["response"])]
    pipe_outputs = sentiment_pipe(texts, **reward_kwargs)
    rewards = [torch.tensor(output[0]["score"] - args.reward_baseline) for output in pipe_outputs]

    # Run PPO step
    stats = ppo_trainer.step(list(question_tensors), response_tensors, rewards)
    ppo_trainer.log_stats(stats, batch, rewards)

    if args.save_freq and epoch and epoch % args.save_freq == 0:
        ppo_trainer.save_pretrained(args.output_dir + f"step_{epoch}")
