
import torch
from transformers import AutoModelForSequenceClassification, AutoTokenizer
import json
import os
import sys
import argparse
from tqdm import tqdm
import numpy as np
import datasets
from datasets import load_dataset, load_from_disk
import pandas as pd

from vllm import LLM, SamplingParams

sys.path.append(
    os.path.abspath(os.path.join(os.path.dirname(__file__), os.path.pardir)))
from judge_prompt.LLMBar import *
from judge_prompt.offsetbias import OffSET_JUDGE_TEMPLATE


def softmax(x1, x2):
    # 将输入转换为NumPy数组
    x = np.array([x1, x2])
    # 计算指数
    exp_x = np.exp(x)
    # 计算softmax
    softmax_x = exp_x / np.sum(exp_x)
    return softmax_x


parser = argparse.ArgumentParser()
parser.add_argument("--generation_file", type=str, default="datasets/Llama3_ultrafeedback_self-preferring_json/test-00000-of-00001_self-preferring.json", help="Path to the output generation file") # "train-00001-of-00002.parquet"
parser.add_argument("--device", type=str, default=0, help="GPU index")
parser.add_argument("--judge_template", type=str, default="LLMBar_JUDGE_TEMPLATE_PAIRWISE_REFERENCE")

parser.add_argument("--generative_pm_model", type=str, default="/home/zhaiyuanzhao/llm/Meta-Llama-3-8B-Instruct", help="Path to generative preference model")
parser.add_argument("--output_dir", type=str, default="datasets/Llama3_ultrafeedback_self-preferring_pairwise_reference_json/", help="Path to output directory")


args = parser.parse_args()

print(args)

# args.generation_file = "/home/zhaiyuanzhao/llm/dataset/princeton-nlp/llama3-ultrafeedback-armorm/data/" + args.generation_file
os.environ['CUDA_VISIBLE_DEVICES'] = str(args.device)

if args.judge_template == "LLMBar_JUDGE_TEMPLATE_PAIRWISE_REFERENCE":
    judge_template = LLMBar_JUDGE_TEMPLATE_PAIRWISE_REFERENCE
else:
    raise NotImplementedError

save_file_name = os.path.basename(args.generation_file).split('.parquet')[0] + "_self-preferring.json"
save_file_name = os.path.join(args.output_dir, save_file_name)



# original data
# df = pd.read_parquet(args.generation_file)
# all_data = df.to_dict('records')[0:]

with open(args.generation_file, 'r', encoding='utf-8') as file:
    # 加载JSON数据
    all_data = json.load(file)

prompts = [data["prompt"] for data in all_data]
candidates_responses = [data["all_generated_responses"] for data in all_data]




llm = LLM(model=args.generative_pm_model)
tokenizer = llm.get_tokenizer()
sampling_params = SamplingParams(temperature=0, 
                                 max_tokens=1, 
                                 logprobs=20)

token_index_A = tokenizer.encode_plus(" A", add_special_tokens=False)['input_ids'][0]
token_index_B = tokenizer.encode_plus(" B", add_special_tokens=False)['input_ids'][0]

if os.path.exists(save_file_name):
    with open(save_file_name, 'r') as f:
        all_data_self_pair_judge = json.load(f)
else:
    all_data_self_pair_judge = []

num_saved = 0



for index, data in enumerate(tqdm(all_data)):

    # Reference
    all_self_pair_scores = np.array(data['all_self_pair_scores']).mean(axis=-1)
    all_generated_responses = data["all_generated_responses"]
    sorted_indices = np.argsort(all_self_pair_scores)[::-1]
    max_two_indices = sorted_indices[:2]
    min_two_indices = sorted_indices[-2:]
    if len(all_generated_responses[max_two_indices[0]])<len(all_generated_responses[max_two_indices[1]]):
        reference_chosen_idx = max_two_indices[0]
    else:
        reference_chosen_idx = max_two_indices[1]

    # reference_chosen_idx = max_two_indices[0]
    
    # if len(all_generated_responses[min_two_indices[0]])>len(all_generated_responses[min_two_indices[1]]):
    #     reference_rejected_idx = min_two_indices[0]
    # else:
    reference_rejected_idx = min_two_indices[1]


    reference_chosen = data['all_generated_responses'][reference_chosen_idx]
    reference_rejected = data['all_generated_responses'][reference_rejected_idx]

    if index < num_saved:
        continue
    prompt = data["prompt"]
    candidates = data["all_generated_responses"]
    num_candidates = len(candidates)
    pair_scores = np.zeros((num_candidates, num_candidates))
    np.fill_diagonal(pair_scores, 0.5)

    for sampling_index1 in range(num_candidates):
        for sampling_index2 in range(sampling_index1+1, num_candidates):
            judge_prompt_forward = tokenizer.apply_chat_template([{'role': 'system', 'content': SYSTEM_PROMPT}, 
                                                {'role': 'user', 'content': judge_template.format(prompt=prompt, response_A=candidates[sampling_index1], response_B=candidates[sampling_index2], \
                                                                                                  reference_chosen=reference_chosen, reference_rejected=reference_rejected)}], tokenize=False, add_generation_prompt=True)
            judge_prompt_forward += 'RESPONSE'

            judge_prompt_backward = tokenizer.apply_chat_template([{'role': 'system', 'content': SYSTEM_PROMPT}, 
                                                {'role': 'user', 'content': judge_template.format(prompt=prompt, response_A=candidates[sampling_index2], response_B=candidates[sampling_index1], \
                                                                                                  reference_chosen=reference_chosen, reference_rejected=reference_rejected)}], tokenize=False, add_generation_prompt=True)
            judge_prompt_backward += 'RESPONSE'

            judge_prompt_list = [judge_prompt_forward, judge_prompt_backward]



            generations = llm.generate(judge_prompt_list, sampling_params, use_tqdm=False)

            try:
                logprob_A_forward = generations[0].outputs[0].logprobs[0][token_index_A].logprob
                logprob_B_forward = generations[0].outputs[0].logprobs[0][token_index_B].logprob
                score_A = softmax(logprob_A_forward, logprob_B_forward)[0]

                logprob_A_backward = generations[1].outputs[0].logprobs[0][token_index_A].logprob
                logprob_B_backward = generations[1].outputs[0].logprobs[0][token_index_B].logprob
                score_B = softmax(logprob_A_backward, logprob_B_backward)[1]
            except:
                score_A = 0.5
                score_B = 0.5

            pair_scores[sampling_index1][sampling_index2] = (score_A+score_B)/2
            pair_scores[sampling_index2][sampling_index1] = 1-(score_A+score_B)/2
    
    chosen_idx = np.argmax(pair_scores.mean(axis=-1))
    rejected_idx = np.argmin(pair_scores.mean(axis=-1))

    # original_chosen_idx = np.argmax(data["all_rm_scores"])
    # original_rejected_idx = np.argmin(data["all_rm_scores"])
    chosen = []
    chosen.append({
        "role": "user",
        "content": data["prompt"]
    })
    chosen.append({
        "role": "assistant",
        "content": data["all_generated_responses"][chosen_idx]
    })
    rejected = []
    rejected.append({
        "role": "user",
        "content": data["prompt"]
    })
    rejected.append({
        "role": "assistant",
        "content": data["all_generated_responses"][rejected_idx]
    })

    data['chosen_pairwise_reference'] = chosen
    data['rejected_pairwise_reference'] = rejected
    data['all_self_pair_scores_pairwise_reference'] = pair_scores.tolist()

    # if index % 100 == 0:

    #     with open(save_file_name, 'w') as f:
    #         json.dump(all_data_self_pair_judge, f, indent=4)
    #     print(f"Binarized outputs saved to {save_file_name}")

with open(save_file_name, 'w') as f:
    json.dump(all_data, f, indent=4)
print(f"Binarized outputs saved to {save_file_name}")

        # # Convert the data to Hugging Face datasets format
        # dataset = datasets.Dataset.from_list(all_data_self_pair_judge)
        # dataset.save_to_disk(os.path.join(args.output_dir))
        # print(f"Binarized dataset saved to {os.path.join(args.output_dir)}")

