import asyncio
import json
import argparse
from datetime import timedelta

import torch
import numpy as np
from transformers import AutoProcessor

from megatron_datasets.tools.lmdb_read_cli import fetch_images_from_lmdb
from megatron_datasets.mm_dataset import convert_conversations, remove_bos
from megatron_datasets.qwen2vl_dataset import resize_image

from gpatch.rpc import call_once_rpc


# 匹配: RadGenome/PMC-VQA
def parse_sft_jsonl(sammple, rm_bos, lmdb_port, min_pixels_num, max_pixels_num, processor):
    json_data = json.loads(sammple.strip())
    assert json_data['conversations'][-1]['role'] == "assistant"
    answer = json_data['conversations'][-1]['content']

    conversations = convert_conversations(json_data['conversations'][:-1])
    prompt_texts = processor.apply_chat_template(
        conversations,
        tools=None,
        tokenize=False,
        add_generation_prompt=True,
    )
    if rm_bos:
        prompt_texts = remove_bos(all_text)
    prompt_ids = processor.tokenizer([prompt_texts])["input_ids"][0]

    imgs = []
    if "images" in json_data and len(json_data["images"]) > 0:
        imgs = fetch_images_from_lmdb(json_data["images"], lmdb_port)
        imgs = [
            np.array(resize_image(ele, img, min_pixels_num, max_pixels_num))
            for ele, img in zip(json_data['images'], imgs)
        ]

    return prompt_texts, prompt_ids, answer, imgs


async def run_eval_client(args, rank, world_size):
    with open(args.config_path, 'r') as f:
        config = json.load(f)
    ip_port_map = [(x['ip'], x['port']) for x in config['sampler']['rpc_servers']]
    num_workers = len(ip_port_map)
    processor = AutoProcessor.from_pretrained(args.model_path)

    # parse the data info
    with open(args.jsonl_path) as f:
        samples = f.readlines()

    samples_cnt = len(samples)
    samples_cnt_each_rank = (samples_cnt + world_size - 1) // world_size
    begin_idx = rank * samples_cnt_each_rank
    end_idx = min((rank + 1) * samples_cnt_each_rank, samples_cnt)
    samples = samples[begin_idx:end_idx]
    print(f"{rank=} send the sample cnt:{len(samples)}")

    cos = []
    answers = []
    for i, sample in enumerate(samples):
        ep_ip, ep_port = ip_port_map[i % num_workers]
        url = f'http://{ep_ip}:{ep_port}/generate'
        prompt_texts, prompt_ids, answer, imgs = parse_sft_jsonl(
            sample, False, args.lmdb_port, None, None, processor
        )
        req_dict = dict(
            prompt=prompt_texts,
            prompt_token_ids=prompt_ids,
            image=imgs,
            sampling_params=dict(
                temperature=args.temperature,
                top_k=args.top_k,
                top_p=args.top_p,
                seed=args.seed,
                max_tokens=args.max_tokens,
            ),
        )
        cos.append(call_once_rpc(url, req_dict, timeout=10 * 60))
        answers.append(answer)

    acc_cnt = 0
    resp_cos = await asyncio.gather(*cos)
    batch_output_ids = [req_resp["output_token_ids"] for req_resp in resp_cos]
    resp_strs = processor.tokenizer.batch_decode(batch_output_ids, skip_special_tokens=True)
    for answer, resp_str in zip(answers, resp_strs):
        if answer.strip() == resp_str.strip():
            acc_cnt += 1

    return acc_cnt, len(answers)


def parse_args():
    parser = argparse.ArgumentParser(description="Evaluation client for image and text queries")
    parser.add_argument("--jsonl-path", type=str, help="Path to the JSONL file")
    parser.add_argument("--config-path", type=str, help="Path to the config file")
    parser.add_argument("--model-path", type=str, help="Path to the model")
    parser.add_argument("--lmdb-port", type=int, help="Port for LMDB")
    # sample param
    parser.add_argument("--temperature", type=float, default=0.)
    parser.add_argument("--top-k", type=int, default=1)
    parser.add_argument("--top-p", type=float, default=1.)
    parser.add_argument("--max-tokens", type=int, default=2048)
    parser.add_argument("--seed", type=int, default=42)
    return parser.parse_args()


if __name__ == '__main__':
    # 8h
    torch.distributed.init_process_group(backend='gloo', timeout=timedelta(seconds=8 * 60 * 60))
    rank = torch.distributed.get_rank()
    world_size = torch.distributed.get_world_size()
    if rank == 0:
        print("init distruibed finish")

    args = parse_args()
    acc_cnt, total_cnt = asyncio.run(run_eval_client(args, rank, world_size))
    print(f"{rank=} {acc_cnt=} {total_cnt=}")
    torch.distributed.barrier()
    gather_list = [None for _ in range(world_size)]
    torch.distributed.all_gather_object(gather_list, [(acc_cnt, total_cnt)])
    if rank == 0:
        acc_cnt = 0
        total_cnt = 0
        for ele in gather_list:
            acc_cnt += ele[0][0]
            total_cnt += ele[0][1]

        print(f"sum: {acc_cnt=} {total_cnt=} {acc_cnt/total_cnt=}")
    torch.distributed.barrier()
