import os
import copy
import logging
from dataclasses import dataclass, field
from typing import Dict, Optional, Sequence, List
import json
import random
from collections import defaultdict
from accelerate import init_empty_weights,infer_auto_device_map,load_checkpoint_in_model,dispatch_model
from sklearn.metrics.pairwise import cosine_distances
from tqdm import tqdm

import torch
import torch.distributed as dist
import transformers

from torch.utils.data import Dataset
from transformers import Trainer, AutoConfig
from transformers import EvalPrediction


from model import LlamaRewardModel

from reward_datasets import TextRewardDataset, reward_data_collactor
from reward_datasets import load_text_score_dataset
from arguments import CustomTrainingArguments
from trainer import RewardModelTrainer, compute_metrics

from utils import print_rank_0, set_reward_tokenizer, merge_json_or_jsonl_data
from utils import DEFAULT_PAD_TOKEN, DEFAULT_BOS_TOKEN, DEFAULT_EOS_TOKEN, DEFAULT_UNK_TOKEN
from utils import QUERY_PROMPT, SEP_TOKEN, STRING_SEP, INFER_TMP_FILE

import argparse


def preprocess_response(response):    
    while "\nHuman" in response:
        # remove the additional generation of LLM after the current turn responses.
        response = response.split("\nHuman")[0].strip()

    return response


def align(args):
    # load data
    #---------------------------------------------------------------------------------
    # f1_name = '/home/boai/APO/data/hh-split/rm_data/hh_split_rm_alpaca_v1.sample.json'
    # f2_name = '/home/boai/APO/data/hh-split/rm_data/hh_split_rm_alpaca_v0.sample.json'

    sample_names_1 = ['sample_0', 'sample_1', 'sample_2', 'sample_3']
    sample_names_2 = ['sample_0', 'sample_1']
    dict_sample = defaultdict(dict)
    dict_reference = defaultdict(dict) # ref response data
    with open(args.source_file) as file:
        source_file = json.load(file)

    with open(args.reference_file) as file:
        reference_file = json.load(file)

    with open(args.gold_file) as file:
        gold_file = json.load(file)

    for item in source_file:
        query_id = str(item['query_id'])
        dict_sample[query_id]['query'] = item['query']
        for sample_name in sample_names_1:
            dict_sample[query_id][sample_name] = preprocess_response(item[sample_name])
    
    for item in reference_file:
        query_id = str(item['query_id'])
        for text_id, (text, score) in enumerate(zip(item['text'], item['scores'])):
            query, reponse = text.split(SEP_TOKEN, 1)
            dict_reference[query_id]['query'] = query
            dict_reference[query_id][f'sample_{text_id}'] = preprocess_response(reponse)
            dict_reference[query_id][f'sample_{text_id}_score'] = score
    
    for item in gold_file:
        query_id = str(item['query_id'])
        dict_sample[query_id]['golden'] = preprocess_response(item['golden'])

    # setup model
    #---------------------------------------------------------------------------------
    device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
    # model_name_or_path = '/home/boai/APO/outputs/rm_base_model/'
    model_name_or_path = args.model_path
    print_rank_0(f"Begin loading model from {model_name_or_path}")
    model = LlamaRewardModel.from_pretrained(model_name_or_path)
    print_rank_0(f"Finished loading model from {model_name_or_path}")

    model.is_parallelizable = True
    model.model_parallel = True

    # setup tokenizer
    #---------------------------------------------------------------------------------
    tokenizer = transformers.AutoTokenizer.from_pretrained(
        model_name_or_path,      
        model_max_length=512,        
        padding_side='right',
        truncation_side='left',
        use_fast=False,
    )
    
    model, tokenizer = set_reward_tokenizer(model=model, tokenizer=tokenizer)
    model.to(dtype=torch.float16, device=device)

    # find nearest embedding and build daataset
    #---------------------------------------------------------------------------------
    with torch.no_grad():
        score_data = []
        for query_id in tqdm(dict_sample.keys()):
            sample_name = random.choice(sample_names_1)
            tokens = tokenizer(dict_sample[query_id][sample_name])
            input_ids = torch.tensor(tokens.input_ids).unsqueeze(0).to(device)
            outputs = model(
                input_ids=input_ids,
                padding_side='right',
                pooling_type='last'
            )
            # compute cosine distance between sample responses and reference responses
            # and find the most similiar response
            min_similarity = 2
            nearest_sample = None
            nearest_res = None
            nearest_score = None
            for s_v0 in sample_names_2:
                tokens = tokenizer(dict_reference[query_id][s_v0])
                input_ids = torch.tensor(tokens.input_ids).unsqueeze(0).to(device)
                o_v0 = model(
                    input_ids=input_ids,
                    padding_side='right',
                    pooling_type='last'
                )
                v1 = outputs['rm_embeddings']
                v0 = o_v0['rm_embeddings']
                simlarity = cosine_distances(v1.detach().cpu().numpy(), v0.detach().cpu().numpy())
                if simlarity < min_similarity:
                    nearest_sample = s_v0
                    nearest_res = dict_reference[query_id][s_v0]
                    nearest_score = float(o_v0['rm_logits'].squeeze().detach().cpu().numpy())
                    min_similarity = simlarity
            # text list [train_0, train_1, sample, positive, negative]
            data_point = {
                "text": [dict_sample[query_id]['query']+'<sep>'+dict_reference[query_id]['sample_0'],
                         dict_sample[query_id]['query']+'<sep>'+dict_reference[query_id]['sample_1'],
                         dict_sample[query_id]['query']+'<sep>'+dict_sample[query_id][sample_name], 
                         dict_reference[query_id]['query']+'<sep>'+nearest_res,
                         dict_sample[query_id]['query']+'<sep>'+dict_sample[query_id]['golden']],
                "scores": [dict_reference[query_id]['sample_0_score'], 
                           dict_reference[query_id]['sample_1_score'], 
                           float(outputs['rm_logits'].squeeze().detach().cpu().numpy()), 
                           nearest_score, 
                           -1],
                "type": "tlpo"
            }
            score_data.append(data_point)
    # "/home/boai/APO/data/hh-split/tpo_data/rm_tpo_data_p100_v1_text_scores.json"
    with open(args.output_path, 'w') as f:
        json.dump(score_data, f, ensure_ascii=False, indent=2)


if __name__ == "__main__":
    parsers = argparse.ArgumentParser()
    parsers.add_argument('--source_file', type=str)
    parsers.add_argument('--reference_file', type=str)
    parsers.add_argument('--gold_file', type=str)
    parsers.add_argument('--model_path', type=str)
    parsers.add_argument('--output_path', type=str)
    args = parsers.parse_args()
    align(args)
