
import torch
from transformers import AutoModelForSequenceClassification, AutoTokenizer
import json
import os
import sys
import argparse
from tqdm import tqdm
import numpy as np
import datasets
from datasets import load_dataset, load_from_disk
import pandas as pd

from vllm import LLM, SamplingParams
import re

sys.path.append(
    os.path.abspath(os.path.join(os.path.dirname(__file__), os.path.pardir)))
from judge_prompt.self_rewarding import SELF_REWARDING_POINTWISE_JUDGE_TEMPLATE


parser = argparse.ArgumentParser()
parser.add_argument("--generation_file", type=str, required=True, help="Path to the output generation file") # "train-00001-of-00002.parquet"
parser.add_argument("--device", type=str, required=True, help="GPU index")

parser.add_argument("--generative_pm_model", type=str, default="/home/zhaiyuanzhao/llm/Meta-Llama-3-8B-Instruct", help="Path to generative preference model")
parser.add_argument("--output_dir", type=str, default="datasets/Llama3_ultrafeedback_self-rewarding_json/", help="Path to output directory")


args = parser.parse_args()

print(args)

args.generation_file = "/home/zhaiyuanzhao/llm/dataset/princeton-nlp/llama3-ultrafeedback-armorm/data/" + args.generation_file
os.environ['CUDA_VISIBLE_DEVICES'] = args.device

judge_template = SELF_REWARDING_POINTWISE_JUDGE_TEMPLATE

save_file_name = os.path.basename(args.generation_file).split('.parquet')[0] + "_self-rewarding.json"
save_file_name = os.path.join(args.output_dir, save_file_name)

def extract_score_list(generations):
    score_list = []
    for i in range(len(generations)):
        text = generations[i].outputs[0].text
        pattern = r'Score: (\d+)'
        match = re.search(pattern, text)
        if match:
            score = int(match.group(1))
            if 1 <= score <= 5:
                score_list.append(score)
            else:
                score_list.append(-1)  # score 
        else:
            score_list.append(-2)  # can not output score
    return score_list


# data_files = {
#     'train': ['data/train-00000-of-00002.parquet', 'data/train-00001-of-00002.parquet'],
#     'test':['data/test-00000-of-00001.parquet']
# }
# train_dataset= load_dataset('parquet', data_dir=args.generation_file, data_files=data_files)

df = pd.read_parquet(args.generation_file)
all_data = df.to_dict('records')[0:]

# all_data = all_data[:10]


prompts = [data["prompt"] for data in all_data]
candidates_responses = [data["all_generated_responses"] for data in all_data]



llm = LLM(model=args.generative_pm_model)
tokenizer = llm.get_tokenizer()
sampling_params = SamplingParams(temperature=0, 
                                 max_tokens=256)

if os.path.exists(save_file_name):
    with open(save_file_name, 'r') as f:
        all_data_self_pair_judge = json.load(f)
else:
    all_data_self_pair_judge = []

num_saved = len(all_data_self_pair_judge)


for index, data in enumerate(tqdm(all_data)):
    if index < num_saved:
        continue
    prompt = data["prompt"]
    candidates = data["all_generated_responses"]
    num_candidates = len(candidates)
    self_rewarding_scores = []

    judge_prompt_list = []
    for candidate in candidates:
        judge_prompt = tokenizer.apply_chat_template([{'role': 'user', 'content': judge_template.format(query=prompt, response=candidate)}], tokenize=False, add_generation_prompt=True)
        judge_prompt_list.append(judge_prompt)

    generations = llm.generate(judge_prompt_list, sampling_params, use_tqdm=False)

    score_list = extract_score_list(generations)

    filtered_score_list = [score for score in score_list if score > 0]

    if len(filtered_score_list)==0:
        chosen_idx = 0
        rejected_idx = 0
    else:
        max_element = max(filtered_score_list)
        chosen_idx = score_list.index(max_element)

        min_element = min(filtered_score_list)
        rejected_idx = score_list.index(min_element)


    chosen = []
    chosen.append({
        "role": "user",
        "content": data["prompt"]
    })
    chosen.append({
        "role": "assistant",
        "content": data["all_generated_responses"][chosen_idx]
    })
    rejected = []
    rejected.append({
        "role": "user",
        "content": data["prompt"]
    })
    rejected.append({
        "role": "assistant",
        "content": data["all_generated_responses"][rejected_idx]
    })

    all_data_self_pair_judge.append({'prompt_id': data['prompt_id'],
                                     'prompt': data['prompt'],
                                     'all_generated_responses': data['all_generated_responses'].tolist(),
                                     "all_self_rewarding_scores": score_list,
                                     'chosen': chosen,
                                     'rejected':rejected,
                                     'all_rm_scores': data['all_rm_scores'].tolist()})
                                    # 'all_rm_scores': data['scores'].tolist()})
    if index % 100 == 0:

        with open(save_file_name, 'w') as f:
            json.dump(all_data_self_pair_judge, f, indent=4)
        print(f"Binarized outputs saved to {save_file_name}")

        # # Convert the data to Hugging Face datasets format
        # dataset = datasets.Dataset.from_list(all_data_self_pair_judge)
        # dataset.save_to_disk(os.path.join(args.output_dir))
        # print(f"Binarized dataset saved to {os.path.join(args.output_dir)}")

