import os
import json
import torch
import transformers
from transformers.trainer_pt_utils import LabelSmoother
import math
import json

import argparse

from fastchat.conversation import get_conv_template
IGNORE_TOKEN_ID = LabelSmoother.ignore_index


def get_batch_logps(
    logits: torch.FloatTensor,
    labels: torch.LongTensor,
    average_log_prob: bool = False,
    label_pad_token_id: int = -100,
    is_encoder_decoder: bool = False,
) -> torch.FloatTensor:
    """Compute the log probabilities of the given labels under the given logits.

    Args:
        logits: Logits of the model (unnormalized). Shape: (batch_size, sequence_length, vocab_size)
        labels: Labels for which to compute the log probabilities. Label tokens with a value of label_pad_token_id are ignored. Shape: (batch_size, sequence_length)
        average_log_prob: If True, return the average log probability per (non-masked) token. Otherwise, return the sum of the log probabilities of the (non-masked) tokens.

    Returns:
        A tensor of shape (batch_size,) containing the average/sum log probabilities of the given labels under the given logits.
    """
    if logits.shape[:-1] != labels.shape:
        raise ValueError("Logits (batch and sequence length dim) and labels must have the same shape.")

    if not is_encoder_decoder:
        labels = labels[:, 1:].clone()
        logits = logits[:, :-1, :]
    loss_mask = labels != label_pad_token_id

    # dummy token; we'll ignore the losses on these tokens later
    labels[labels == label_pad_token_id] = 0

    per_token_logps = torch.gather(logits.log_softmax(-1), dim=2, index=labels.unsqueeze(2)).squeeze(2)

    if average_log_prob:
        return (per_token_logps * loss_mask).sum(-1) / loss_mask.sum(-1)
    else:
        return (per_token_logps * loss_mask).sum(-1)
    

def get_q_value(conv, policy_model, reference_model, tokenizer, thought_action_p) -> float:
    prompt = conv.get_prompt()
    prompt_tokens = tokenizer(prompt, return_tensors="pt")

    # calculate preferred
    conv.append_message(conv.roles[1], thought_action_p)
    prompt_response = conv.get_prompt()
    prompt_response_tokens = tokenizer(prompt_response, return_tensors="pt")
    labels = prompt_response_tokens.input_ids.clone()
    labels[0][:len(prompt_tokens['input_ids'][0])] = IGNORE_TOKEN_ID

    with torch.no_grad():
        policy_logits = policy_model(
                prompt_response_tokens["input_ids"].to(policy_model.device),
                attention_mask=prompt_response_tokens["attention_mask"].to(policy_model.device),
            ).logits
        reference_logits = reference_model(
                prompt_response_tokens["input_ids"].to(policy_model.device),
                attention_mask=prompt_response_tokens["attention_mask"].to(policy_model.device),
            ).logits
    
        logits = policy_logits - reference_logits
    
        logps_value_p = get_batch_logps(
            logits,
            labels.to(policy_model.device),
            average_log_prob=True,
            is_encoder_decoder=False,
            label_pad_token_id=IGNORE_TOKEN_ID,
        )
    return logps_value_p


def parse_args():
    args = argparse.ArgumentParser()
    args.add_argument('--device', type=str,default="cuda:7")
    args.add_argument('--policy_model_name_or_path', type=str, default="/home/zhaiyuanzhao/FastChat/checkpoints-webshop-Phi-1_5-StepLevelVerifier-Phi3-iteration1/epoch2-chosen")
    args = args.parse_args()
    return args

args = parse_args()
device = args.device
model_max_length=4096
policy_model_name_or_path=args.policy_model_name_or_path
reference_model_name_or_path="/home/zhaiyuanzhao/llm/phi-1_5"
conv_template="phi3"


tokenizer = transformers.AutoTokenizer.from_pretrained(
    policy_model_name_or_path,
    padding_side='right',
    use_fast=False,
)

config = transformers.AutoConfig.from_pretrained(
    policy_model_name_or_path,
    cache_dir=None,
    # trust_remote_code=trust_remote_code,
)
orig_ctx_len = getattr(config, "max_position_embeddings", None)
if orig_ctx_len and model_max_length > orig_ctx_len:
    scaling_factor = float(math.ceil(model_max_length / orig_ctx_len))
    config.rope_scaling = {"type": "linear", "factor": scaling_factor}
config.use_cache = False

# Load model and tokenizer
dpo_policy_model = transformers.AutoModelForCausalLM.from_pretrained(
    policy_model_name_or_path,
    config=config,
    cache_dir=None,
    # trust_remote_code=trust_remote_code,
    # attn_implementation="flash_attention_2",
).to(device)

dpo_reference_model = transformers.AutoModelForCausalLM.from_pretrained(
    reference_model_name_or_path,
    config=config,
    cache_dir=None,
    # trust_remote_code=trust_remote_code,
    # attn_implementation="flash_attention_2",
).to(device)


trajectories_path = "/home/zhaiyuanzhao/ETO/outputs-test-Phi3_step-dpo-epoch2-61/Phi-3-1/webshop"
trajectories_path = '/home/zhaiyuanzhao/ETO/outputs/outputs-test-Phi3-30-54/Phi-3-mini-4k-instruct/webshop'

success_num=0
failure_num=0
# for file in os.listdir(trajectories_path):
#     if not file.endswith('json'):
#         continue
#     with open(os.path.join(trajectories_path, file)) as f:
#         trajectory=json.load(f)   
#     if trajectory['meta']['reward']==1:
#         success_num+=1
#     if trajectory['meta']['reward']==0:
#         failure_num+=1

success_q_value_list = []
failure_q_value_list = []

for file in os.listdir(trajectories_path):
    if not file.endswith('json'):
        continue
    with open(os.path.join(trajectories_path, file)) as f:
        trajectory=json.load(f)   
    if trajectory['meta']['reward']==1:
        success_num+=1
        if success_num>20:
            continue
        conv = get_conv_template(conv_template)
        conv.append_message(conv.roles[0], trajectory['conversations'][0]['value'])
        conv.append_message(conv.roles[1], trajectory['conversations'][1]['value'])
        conv.append_message(conv.roles[0], trajectory['conversations'][2]['value'])
        current_turn = 3
        while current_turn < len(trajectory['conversations']):
            assert trajectory['conversations'][current_turn]['from']=='gpt'
            q_value = get_q_value(conv, dpo_policy_model, dpo_reference_model, tokenizer, trajectory['conversations'][current_turn]['value'])
            success_q_value_list.append(q_value.item())
            conv.append_message(conv.roles[1], trajectory['conversations'][current_turn]['value'])
            conv.append_message(conv.roles[0], trajectory['conversations'][current_turn+1]['value'])
            current_turn +=2
    if trajectory['meta']['reward']==0:
        failure_num+=1
        if failure_num>20:
            continue
        conv = get_conv_template(conv_template)
        conv.append_message(conv.roles[0], trajectory['conversations'][0]['value'])
        conv.append_message(conv.roles[1], trajectory['conversations'][1]['value'])
        conv.append_message(conv.roles[0], trajectory['conversations'][2]['value'])
        current_turn = 3
        while current_turn < len(trajectory['conversations']):
            assert trajectory['conversations'][current_turn]['from']=='gpt'
            q_value = get_q_value(conv, dpo_policy_model, dpo_reference_model, tokenizer, trajectory['conversations'][current_turn]['value'])
            failure_q_value_list.append(q_value.item())
            conv.append_message(conv.roles[1], trajectory['conversations'][current_turn]['value'])
            conv.append_message(conv.roles[0], trajectory['conversations'][current_turn+1]['value'])
            current_turn +=2

# 将转换后的列表保存为JSON
with open('success_q_list.json', 'w') as f:
    json.dump(success_q_value_list, f)

with open('failure_q_value_list.json', 'w') as f:
    json.dump(failure_q_value_list, f)
    
        
