import torch
from numpy import mean
from tqdm import tqdm
from transformers import AutoTokenizer
from trl import PPOTrainer, PPOConfig, AutoModelForCausalLMWithValueHead, create_reference_model,RewardTrainer
from trl.core import respond_to_batch, LengthSampler
from torch.utils.data import Dataset
import torch.nn.utils.rnn as rnn_utils
import json
from transformers import AutoModelForSequenceClassification, AutoTokenizer, pipeline
import random
import torch.nn.functional as F

from baseconf import BASE_DISK

# get models
gen_model = AutoModelForCausalLMWithValueHead.from_pretrained(BASE_DISK + ':\model_path\gpt2-dialogbot-base-chinese')
model_ref = create_reference_model(gen_model)
tokenizerOne = AutoTokenizer.from_pretrained(BASE_DISK + ':\model_path\gpt2-dialogbot-base-chinese',
                                             padding_side='left')
tokenizerOne.eos_token_id = tokenizerOne.sep_token_id
# tokenizerOne.max_len_single_sentence=300 #这个gpt2模型只能接受300
ts_texts = ["我喜欢下雨。", "我讨厌他."]
cls_model = AutoModelForSequenceClassification.from_pretrained(
    BASE_DISK + ":\model_path\c2-roberta-base-finetuned-dianping-chinese", num_labels=2)
tokenizerTwo = AutoTokenizer.from_pretrained(BASE_DISK + ":\model_path\c2-roberta-base-finetuned-dianping-chinese")
classifier = pipeline('sentiment-analysis', model=cls_model, tokenizer=tokenizerTwo)
print(classifier(ts_texts))

data = []
with open("./data/qa_final.json", "r", encoding="utf-8") as f:
    for singlejson in json.load(f):
        data.append(singlejson)


def preprocess_conversation(data):
    sep_id = tokenizerOne.sep_token_id
    cls_id = tokenizerOne.cls_token_id
    dialogue_list = []
    for conver in data:
        input_ids = [cls_id]
        start = conver["turns"][0]
        # print(start["utterance"])
        input_ids += tokenizerOne.encode(start["text"], add_special_tokens=False)
        input_ids.append(sep_id)
        dialogue_list.append(input_ids)
    return dialogue_list


dialogue_list = preprocess_conversation(data)


class MyDataset(Dataset):
    def __init__(self, data):
        self.data = data

    def __getitem__(self, index):
        x = self.data[index]
        return torch.tensor(x)

    def __len__(self):
        return len(self.data)


mydataset = MyDataset(dialogue_list)


def collate_fn(batch):
    padded_batch = rnn_utils.pad_sequence(batch, batch_first=True, padding_value=tokenizerOne.sep_token_id)
    return padded_batch


generation_kwargs = {
    "min_length": -1,
    "top_k": 0.0,
    "top_p": 1.0,
    "do_sample": True,
    "pad_token_id": tokenizerOne.eos_token_id,
    "max_new_tokens": 32,

}

config = PPOConfig(
    model_name="gpt2-positive",
    learning_rate=1.41e-5,
    steps=2000,
    batch_size=16,
    ratio_threshold=50,
)

ppo_trainer = PPOTrainer(
    config=config,
    model=gen_model,
    ref_model=model_ref,
    tokenizer=tokenizerOne,
    dataset=mydataset,
    data_collator=collate_fn,
    # max_length=300,
    # max_prompt_length=300,
    # max_target_length=300,
)

output_min_length = 4
output_max_length = 256
# 在PPO的训练中也会有模型自问自答生成多轮回复的问题, 并且在这个情况下是训不出来的, 所以我们也相应的去截断整个输出, 需要注意的是我们需要对应截断返回来的response_tensors
output_length_sampler = LengthSampler(output_min_length, output_max_length)  # 暂时没用到

rewards_list = []
for epoch, batch in tqdm(enumerate(ppo_trainer.dataloader)):
    #### Get response from gpt2
    query_tensors = []
    response_tensors = []
    query_tensors = [torch.tensor(t).long() for t in batch]

    response_tensors = ppo_trainer.generate(
        query_tensor=query_tensors,  # 输入要求为list(tensor)
        # batch_size=1,  ## adjust according to your memory source
        return_prompt=False,
        # length_sampler=output_length_sampler,

        **generation_kwargs
    )

    responseSet = ppo_trainer.tokenizer.batch_decode(response_tensors, skip_special_tokens=True)

    print(responseSet)
    #### Get reward from sentiment model
    pipe_outputs = classifier(responseSet)
    rewards = [torch.tensor(output["score"]) for output in pipe_outputs]

    #### Run PPO step
    stats = ppo_trainer.step(query_tensors, response_tensors, rewards)
    print("epoch{}, reword is {}".format(epoch, sum(rewards)))
    rewards_list.append(mean(rewards))
