# from llmtuner.data.template import get_template_and_fix_tokenizer
# import torch
# from vllm import LLM, SamplingParams
import random
import json
import time

import fire
from transformers import Seq2SeqTrainingArguments

from llamafactory.data import get_dataset, get_template_and_fix_tokenizer
from llamafactory.extras.constants import IGNORE_INDEX
from llamafactory.extras.misc import get_device_count
from llamafactory.extras.packages import is_pillow_available, is_vllm_available
from llamafactory.hparams import get_infer_args
from llamafactory.model import load_tokenizer

from script.evaluation import *

import re

if is_pillow_available():
    from PIL import Image
    from PIL.Image import Image as ImageObject


if is_vllm_available():
    from vllm import LLM, EngineArgs, LLMEngine, RequestOutput, SamplingParams
    from vllm.lora.request import LoRARequest

MAX_RDIS = 15
relative_distances = [ str(dis) for dis in range(1, MAX_RDIS + 1)]
relations = [
    'Question-answer_pair', 'Comment', 'Acknowledgement',
    'Continuation', 'Elaboration', 'Q-Elab', 
    'Contrast', 'Explanation', 'Clarification_question', 
    'Result', 'Correction', 'Parallel', 
    'Conditional', 'Alternation', 'Narration', 'Background'
]
def load_examples(file_path):
    examples = []
    instruction_str = \
        "Please perform SDRT parsing to the index of linked utterance " + \
        "and select the relation between them from the following relation set <" +  " ".join(relations) + ">"
    with open(file_path, encoding='utf8', mode='r') as inf:
        data = json.load(inf)

        print("instance num: ", len(data))
        input_str = ""

        for inst in data:

            for rel in inst['relations']:
                cur_idx = rel['y']
                linked_idx = rel['x']
                if cur_idx > linked_idx:
                    
                    edus = ["<u-1> root: root"]
                    for idx in range(cur_idx + 1):
                        edu = inst['edus'][idx]
                        edu_text  = "<u" +  str(idx) + "> "  +  edu['speaker']  + ": " + edu['text']
                        edus.append(edu_text)
                    
                    input_str = " ".join(edus)
                    
                    r_distance = linked_idx
                    sdrt_relation = rel['type']

                    output_str = str(r_distance) + " " + sdrt_relation

                    examples.append( dict( instruction=instruction_str, input=input_str, output=output_str) ) 
    return examples

def vllm_sdrt_infer(
    model_name_or_path: str,
    adapter_name_or_path: str = None,
    dataset: str = "alpaca_en_demo",
    dataset_dir: str = "data",
    template: str = "default",
    cutoff_len: int = 2048,
    max_samples: int = None,
    vllm_config: str = "{}",
    save_name: str = "generated_predictions.jsonl",
    temperature: float = 0.95,
    top_p: float = 0.7,
    top_k: int = 50,
    max_new_tokens: int = 1024,
    repetition_penalty: float = 1.0,
    infer_dtype: str = 'auto',
    p_doc: int = 38,
):
    r"""
    Performs batch generation using vLLM engine, which supports tensor parallelism.
    Usage: python vllm_infer.py --model_name_or_path meta-llama/Llama-2-7b-hf --template llama --dataset alpaca_en_demo
    """
    model_args, data_args, _, generating_args = get_infer_args(
        dict(
            model_name_or_path=model_name_or_path,
            adapter_name_or_path=adapter_name_or_path,
            dataset=dataset,
            dataset_dir=dataset_dir,
            template=template,
            cutoff_len=cutoff_len,
            max_samples=max_samples,
            vllm_config=vllm_config,
            temperature=temperature,
            top_p=top_p,
            top_k=top_k,
            max_new_tokens=max_new_tokens,
            repetition_penalty=repetition_penalty,
            infer_dtype=infer_dtype,
        )
    )
    # output_dir = data_args.dataset_dir + ".out"
    # os.makedirs(output_dir, exist_ok=True)
    training_args = Seq2SeqTrainingArguments(output_dir="dummy_dir")
    tokenizer_module = load_tokenizer(model_args)
    tokenizer = tokenizer_module["tokenizer"]
    template_obj = get_template_and_fix_tokenizer(tokenizer, data_args)
    template_obj.mm_plugin.expand_mm_tokens = False  # for vllm generate

    
    with open(data_args.dataset[0], encoding='utf8', mode='r') as inf:
        sdrt_data = json.load(inf)

    sampling_params = SamplingParams(
        repetition_penalty=generating_args.repetition_penalty or 1.0,  # repetition_penalty must > 0
        temperature=generating_args.temperature,
        top_p=generating_args.top_p or 1.0,  # top_p must > 0
        top_k=generating_args.top_k,
        stop_token_ids=[tokenizer.eos_token_id] + tokenizer.additional_special_tokens_ids  + [token_id for token_id in tokenizer.added_tokens_decoder],
        max_tokens=generating_args.max_new_tokens,
        skip_special_tokens=False,
    )
    if model_args.adapter_name_or_path is not None:
        lora_request = LoRARequest("default", 1, model_args.adapter_name_or_path[0])
    else:
        lora_request = None
    engine_args = {
        "model": model_args.model_name_or_path,
        "trust_remote_code": True,
        "dtype": model_args.infer_dtype,
        "tensor_parallel_size": get_device_count() or 1,
        "disable_log_stats": True,
        "enable_lora": model_args.adapter_name_or_path is not None,
        "enforce_eager": True,
    }
    if template_obj.mm_plugin.__class__.__name__ != "BasePlugin":
        engine_args["limit_mm_per_prompt"] = {"image": 4, "video": 2}
    if isinstance(model_args.vllm_config, dict):
        engine_args.update(model_args.vllm_config)
    llm = LLM(**engine_args)

    instruction_str = \
        "Please perform SDRT parsing to output the arc <uy> <ux> " + \
        "and the relation <" +  " ".join(relations) + "> between the linked utterance and the last utterance."
    
    inputs = []
    start = time.time()
    for inst in sdrt_data: 
        edus = []
        for idx, edu in enumerate( inst['edus'] ):
            edu_text  = "<u" + str(idx) + "> " + edu['speaker']  + ": " + edu['text']
            edus.append(edu_text)

        max_step = len(inst['edus'])

        for step_idx in range(1, max_step):
            cur_idx = step_idx
            start_idx = cur_idx - MAX_RDIS

            if start_idx < 0: start_idx = 0
            input_str = " ".join(edus[start_idx: cur_idx + 1])

            content = instruction_str + input_str
            prompt, _ = template_obj.encode_oneturn(
                tokenizer=tokenizer, messages=[{"role": "user", "content": content}] + [{"role": "assistant", "content": ""}]
            )
            inputs.append({"prompt_token_ids": prompt, "multi_modal_data": None})
        

    results = llm.generate(inputs, sampling_params=sampling_params, lora_request=lora_request)
    preds = [result.outputs[0].text for result in results]

    start_idx = 0
    
    pred_instances = [] 
    for inst in sdrt_data:
        max_step = len(inst['edus'])
        pred_labels = preds[start_idx: start_idx + max_step - 1]
        start_idx += max_step - 1
        p_relations = pred2real(pred_labels)

        pred_instances.append( dict(
            edus = inst['edus'],
            id = inst['id'],
            relations = p_relations,
        ) )

    outputFile = data_args.dataset[0] + ".out"
    with open(outputFile, 'w', encoding='utf8') as out_f:
        json.dump(pred_instances, out_f, indent=4, ensure_ascii=False)
    
    print("dialogue num: %d,  parsing time = %.2f " % (len(pred_instances), float(time.time() - start)))
    evaluation(data_args.dataset[0], outputFile)


def pred2real(p_labels):
    p_relations = []

    for idx, label in enumerate(p_labels):
        y, x, rel = -1, -1, ""

        info = label.split(" ")

        if len(info) >= 1:
            m = re.search(r'\d+', info[0])
            if m is not None:
                y = int(m.group())
        if len(info) >= 2:
            m = re.search(r'\d+', info[1])
            if m is not None:
                x = int(m.group())
        if len(info) >= 3: rel = info[2]

        if y == -1 or x == -1 or rel == "": continue

        if y != idx + 1 or y < x: continue

        #if y > len(p_labels) + 1 or x > len(p_labels): continue
        # if y != idx + 1: y = idx + 1
        # if x >= y or x < 0: x = random.randint(0, idx)
        # if rel not in set(relations):
        #     r_index = random.randint(0, len(relations) - 1)
        #     rel = relations[r_index]
        
        relation = dict(y=y, x=x, type=rel)
        p_relations.append(relation)
    return p_relations



if __name__ == "__main__":
    fire.Fire(vllm_sdrt_infer)