"""Send a test message."""
import argparse
import json
import sys

import requests

from tqdm import tqdm

from fastchat.model.model_adapter import get_conversation_template
from fastchat.conversation import get_conv_template

import numpy as np

sys.path.insert(0, "/home/zhaiyuanzhao/SimPO")
from judge_prompt.bias_reflection import *


def main(reflection_prompt):
    model_name = args.model_name

    if args.worker_address:
        worker_addr = args.worker_address
    else:
        controller_addr = args.controller_address
        ret = requests.post(controller_addr + "/refresh_all_workers")
        ret = requests.post(controller_addr + "/list_models")
        models = ret.json()["models"]
        models.sort()
        print(f"Models: {models}")

        ret = requests.post(
            controller_addr + "/get_worker_address", json={"model": model_name}
        )
        worker_addr = ret.json()["address"]
        print(f"worker_addr: {worker_addr}")

    if worker_addr == "":
        print(f"No available workers for {model_name}")
        return

    if args.conv_template is not None:
        conv = get_conv_template(args.conv_template)
    else:
        conv = get_conversation_template(model_name)
    
    conv.system_message = SYSTEM_PROMPT

    conv.append_message(conv.roles[0], reflection_prompt)
    conv.append_message(conv.roles[1], None)
    prompt = conv.get_prompt()

    headers = {"User-Agent": "FastChat Client"}
    gen_params = {
        "model": model_name,
        "prompt": prompt,
        "temperature": args.temperature,
        "max_new_tokens": args.max_new_tokens,
        "stop": conv.stop_str,
        "stop_token_ids": conv.stop_token_ids,
        "echo": False,
    }
    response = requests.post(
        worker_addr + "/worker_generate_stream",
        headers=headers,
        json=gen_params,
        stream=True,
    )

    print(f"{conv.roles[0]}: {reflection_prompt}")
    print(f"{conv.roles[1]}: ", end="")
    prev = 0
    for chunk in response.iter_lines(decode_unicode=False, delimiter=b"\0"):
        if chunk:
            data = json.loads(chunk.decode())
            output = data["text"].strip()
            print(output[prev:], end="", flush=True)
            prev = len(output)
    print("")
    return output


if __name__ == "__main__":
    parser = argparse.ArgumentParser()
    parser.add_argument(
        "--controller-address", type=str, default="http://0.0.0.0:21001"
    )
    parser.add_argument("--worker-address", type=str)
    parser.add_argument("--model-name", type=str, default="Mistral-SFT")
    parser.add_argument("--temperature", type=float, default=0.0)
    parser.add_argument("--max-new-tokens", type=int, default=5120)
    parser.add_argument("--conv_template", type=str, default=None)
    parser.add_argument("--self_generated_data_path", type=str, required=True)
    # parser.add_argument(
    #     "--message", type=str, default=" Cache replaced问题怎么解决？请简短回答："
    # )
    parser.add_argument("--reflection_template", type=str, required=True)
    args = parser.parse_args()

    if args.reflection_template=="REFLECTION_PROMPT_V1":
        reflection_template = REFLECTION_PROMPT_V1
    elif args.reflection_template=="REFLECTION_PROMPT_V2":
        reflection_template = REFLECTION_PROMPT_V2
    elif args.reflection_template=="REFLECTION_PROMPT_V3":
        reflection_template = REFLECTION_PROMPT_V3
    elif args.reflection_template=="REFLECTION_PROMPT_V4":
        reflection_template = REFLECTION_PROMPT_V4

    with open(args.self_generated_data_path, 'r', encoding='utf-8') as file:
        data_list = json.load(file)

    reflection_list = []

    new_data_list = []
    for data_index,data in enumerate(data_list):

        prompt = data['prompt']
   
        all_self_pair_scores = np.array(data['all_self_pair_scores'])

        # 获取最大元素的索引（展平后的一维索引）
        max_index_1d = np.argmax(all_self_pair_scores)

        # 将一维索引转换为二维索引
        max_index_2d = np.unravel_index(max_index_1d, all_self_pair_scores.shape)

        
        response_SFT = data['all_generated_responses'][max_index_2d[1]]
        response_OSP = data['all_generated_responses'][max_index_2d[0]]
        
        # token_index_A = tokenizer.encode_plus("A", add_special_tokens=False)['input_ids'][0]
        # token_index_B = tokenizer.encode_plus("B", add_special_tokens=False)['input_ids'][0]
        
        

        reflection_prompt = reflection_template.format(prompt=prompt, response_OSP=response_OSP, response_SFT=response_SFT)
        output = main(reflection_prompt)
        # print("-------------------------------")
        # reflection_list.append({"prompt": prompt,
        #                         "response_SFT": response_SFT,
        #                         "response_OSP": response_OSP,
        #                         "output": output})
        data['reflection'] = output
        new_data_list.append(data)
        print("response_SFT length: ", len(response_SFT))
        print("response_OSP length: ", len(response_OSP))
        # 将数据保存为 JSON 文件

        if data_index % 100 == 0:
            with open(args.self_generated_data_path.split('.')[0]+'_reflection_'+str(data_index)+'_sample_'+args.conv_template+'.json', 'w', encoding='utf-8') as json_file:
                json.dump(new_data_list, json_file, ensure_ascii=False, indent=4)

    
            