import numpy as np
from datasets import DatasetDict, load_dataset
from transformers import (
    AutoTokenizer,
    Seq2SeqTrainer,
    Seq2SeqTrainingArguments,
    AutoModelForSeq2SeqLM,
    DataCollatorForSeq2Seq,
    EarlyStoppingCallback,
    PreTrainedTokenizer,
    AutoConfig,
    PreTrainedModel,
)
from instruction_re.metrics import calculate_metrics
from instruction_re.utils.evaluate_utils import (
    show_classification_report,
    # parse_relation_triple_with_svo_format,
    # parse_relation_triple_with_template_format,
    # parse_relation_triple_with_triple_format,
    parse_re_task_output_to_entities_and_triples,
    parse_output_to_entities_and_triples,
    parse_output_to_quintuples,
    compute_micro_metric,
    compute_lack_new_fn_fp_tp,
    parse_simple_template_output_to_quintuples,
    parse_re_task_output_to_quintuples,
)
from instruction_re.arg_parse import get_train_args
from instruction_re.core.datatypes import (
    TaskType,
    AnswerType,
    string_list_to_task_type,
    string_to_enum,
    Entity,
    Relation,
)
from instruction_re.utils.utils import (
    load_config,
    load_json,
    set_global_seed,
    pick_nearest_sample_from_dataset,
)
from instruction_re.formatters import *
from instruction_re.utils.data_utils import *

import os

# from instruction_re.models.CopyT5 import T5RestrictedVocabWithCopy

os.environ["TOKENIZERS_PARALLELISM"] = "false"
from datetime import datetime
from peft import PeftConfig, get_peft_config, get_peft_model
from typing import Optional, Tuple
from torch import nn

os.environ["CUDA_VISIBLE_DEVICES"] = "0"


def print_model_size(model: PreTrainedModel):
    print("--> Model")
    total_params = sum(p.numel() for p in model.parameters() if p.requires_grad)
    print(f"\n--> model has {total_params / 1e6}M params\n")


def load_tokenizer_and_model(
    model_dir: str,
    peft_config: Optional[PeftConfig] = None,
) -> Tuple[PreTrainedTokenizer, nn.Module]:
    tokenizer = AutoTokenizer.from_pretrained(model_dir, trust_remote_code=True)
    if peft_config is not None:
        if peft_config.peft_type.name == "PREFIX_TUNING":
            config = AutoConfig.from_pretrained(model_dir, trust_remote_code=True)
            config.pre_seq_len = peft_config.num_virtual_tokens
            config.use_cache = False
            model = AutoModelForSeq2SeqLM.from_pretrained(
                model_dir,
                trust_remote_code=True,
                config=config,
            )
        if peft_config.peft_type.name == "LORA":
            model = AutoModelForSeq2SeqLM.from_pretrained(
                model_dir, trust_remote_code=True, use_cache=False
            )
            model = get_peft_model(model, peft_config)
            model.print_trainable_parameters()
    else:
        model = AutoModelForSeq2SeqLM.from_pretrained(
            model_dir, trust_remote_code=True, use_cache=False
        )
    print_model_size(model)
    return tokenizer, model


def main():
    is_few_shot = False

    args = get_train_args()

    # 读取 args.json 文件 作为 args
    # args = load_json(output_dir + "args.json")

    # args = json_to_object(args)

    training_num = args.training_num
    context_format = args.context_format

    config = load_config(args.path_to_model_config)
    tokenizer_kwargs = dict(config["tokenizer"])
    training_args = dict(config["training_args"])

    answer_type = string_to_enum(args.answer_template)

    set_global_seed(args.seed)

    # load instructions
    instructions = load_json(args.path_to_instructions)

    dataset_name = args.dataset_name

    data_config = get_data_config(
        args.root_data_dir,
        dataset_name,
        training_num,
        merge_dataset_dir=args.merge_dataset_dir,
        rules=args.rules,
        is_few_shot=is_few_shot,
    )

    print(data_config)

    dataset_path = os.path.join(args.root_data_dir, dataset_name)

    label2token = get_label2token_by_dataset(
        dataset_path, config["replace_labels_with_special_tokens"]
    )

    # template_order = config.get("template_order", 0)
    # TODO:暂时性解决训练多个数据集时，template_order 问题
    if "semval" in dataset_name:
        template_order = 3
    else:
        template_order = 0

    # return

    raw_datasets = load_dataset("json", data_files=data_config)


    # 根据dataset_name选择不同的valid_tasks,如果是semval, 则使用RE, 否则使用RE_STRICT
    if "semval" in dataset_name:
        valid_tasks = [TaskType.RE]
    valid_tasks = [TaskType.RE_STRICT]


    # TODO: 暂时跳过只训练 RE_STRICT 的情况
    # if len(train_tasks) == 1 and train_tasks[0] == TaskType.RE_STRICT:
    #     print("skip train only RE_STRICT")
    #     return
    # return

    model_name = config["model"]["model_name"]

    training_args = config["training_args"]
    # training_args["per_device_train_batch_size"] = per_device_train_batch_size
    # training_args["learning_rate"] = learning_rate

    now = datetime.now()
    # 根据主任务和辅助任务的组合，设置输出目录
    training_args["output_dir"] = (
        f"{training_args['output_dir']}/{dataset_name}/{args.rules}/{model_name}/{task_str}/{args.aug_times}/{now.strftime('%m%d_%H_%M')}"
    )

    # 复制一份配置文件到输出目录
    os.makedirs(training_args["output_dir"], exist_ok=True)
    os.system(
        f"cp {args.path_to_model_config} {training_args['output_dir']}/config.yaml"
    )
    # 复制 train4hf.sh 到输出目录
    os.system(f"cp train4hf.sh {training_args['output_dir']}/train4hf.sh")

    # return

    # 判断输出目录是否存在，如果存在，则对应的任务已经训练过， 退出
    # if os.path.exists(training_args["output_dir"]):
    #     print(f"output dir: {training_args['output_dir']} exists.")
    #     return


    format_kwargs = {
        "context_format": context_format,
        "answer_type": answer_type,
        "dataset_name": dataset_path,
        "replace_label_to_special_token": config["replace_labels_with_special_tokens"],
        "sep": config["sep"],
        "template_order": template_order,
    }

    valid_dataset = format_datasets_from_tasks(
        valid_tasks,
        raw_datasets["valid"],
        instructions,
        **format_kwargs,
    )

    test_dataset = format_datasets_from_tasks(
        valid_tasks,
        raw_datasets["test"],
        instructions,
        **format_kwargs,
    )

    print(valid_dataset[0])
    print("=========processing data done=========")
    print(valid_dataset.num_rows)

    # return

    datasets = DatasetDict(
        {"valid": valid_dataset, "test": test_dataset}
    )

    datasets.shuffle(seed=config["seed"])

    # load model and tokenizer

    model_path = (
        config["model"]["model_path"] if "model_path" in config["model"] else model_name
    )
    peft_config = config.get("peft_config", None)

    if peft_config is not None:
        peft_config = get_peft_config(peft_config)

    tokenizer, model = load_tokenizer_and_model(model_path, peft_config)

    relation_type_options = list(label2token.values())
    entity_labels = get_entity_labels(dataset_path)

    # add tokens
    if config["replace_sep_token"]:
        tokenizer.add_special_tokens({"sep_token": "<sep>"})
        assert tokenizer.sep_token == "<sep>", "sep token is not <sep>"

    if config["replace_labels_with_special_tokens"]:
        added_toekn = filter(
            lambda o: o[0] == "<" and o[-1] == ">", relation_type_options
        )
        tokenizer.add_tokens(list(added_toekn))
    print(label2token)
    print(len(tokenizer))


    if training_args["do_train"] and config["replace_labels_with_special_tokens"]:
        model.resize_token_embeddings(len(tokenizer))

        if config["replace_special_token_embedding"]:
            # 为 added_tokens 设置embedding，将其设置原始label的平均embedding
            embedding_layer = model.get_input_embeddings()

            for label, token in label2token.items():
                if token[0] == "<" and token[-1] == ">":
                    input_ids = tokenizer(label, return_tensors="pt")["input_ids"]
                    avg_embedding = embedding_layer(input_ids).squeeze(0).mean(dim=0)
                    token_id = tokenizer.convert_tokens_to_ids(token)
                    model.shared.weight.data[token_id] = avg_embedding

        if config["replace_special_token_embedding_with_encoder"]:
            encoder = model.get_encoder()
            for label, token in label2token.items():
                if token[0] == "<" and token[-1] == ">":
                    input_ids = tokenizer(label, return_tensors="pt")["input_ids"]
                    # avg_embedding = encoder(input_ids).last_hidden_state.squeeze(0).mean(dim=0)
                    avg_embedding = encoder(input_ids).last_hidden_state.squeeze(0)[0]
                    token_id = tokenizer.convert_tokens_to_ids(token)
                    model.shared.weight.data[token_id] = avg_embedding


    def preprocess_function(examples):
        instructions = examples["instruction"]
        context = examples["context"]
        labels = examples["labels"]

        return tokenizer(
            text=instructions,
            text_pair=context,
            text_target=labels,
            **tokenizer_kwargs,
            padding=False,  # 使用DataCollatorForSeq2Seq进行padding
        )

    tokenized_dataset = datasets.map(
        preprocess_function, batched=True, remove_columns=["instruction", "context"]
    )

    if template_order == 0 or template_order == 1:
        parse_fn = parse_output_to_quintuples
    elif template_order == 2:
        parse_fn = parse_simple_template_output_to_quintuples
    elif template_order == 3:
        parse_fn = parse_re_task_output_to_quintuples

    seq2seq_collator = DataCollatorForSeq2Seq(
        tokenizer=tokenizer,
        model=model,
        max_length=tokenizer_kwargs["max_length"],
        padding="max_length",
    )

    training_arguments = Seq2SeqTrainingArguments(
        **training_args,
    )

    def remove_special_tokens(text):
        return text.replace(tokenizer.pad_token, "").replace(tokenizer.eos_token, "")

    def clean_output(eval_preds):
        """
        清理输出，移除[pad]特殊token，并将token id list 解析为文字
        """
        preds, labels = eval_preds
        if isinstance(preds, tuple):
            preds = preds[0]

        preds = np.where(preds != -100, preds, tokenizer.pad_token_id)
        decoded_preds = tokenizer.batch_decode(preds, skip_special_tokens=False)

        # 移除<pad> <sep> </s> token
        decoded_preds = [remove_special_tokens(p) for p in decoded_preds]

        # Replace -100 in the labels as we can't decode them.
        labels = np.where(labels != -100, labels, tokenizer.pad_token_id)
        decoded_labels = tokenizer.batch_decode(labels, skip_special_tokens=False)
        decoded_labels = [remove_special_tokens(p) for p in decoded_labels]

        return decoded_preds, decoded_labels

    def compute_metric_for_res(decoded_preds, decoded_labels):
        """严格关系抽取任务评估

        Args:
            decoded_preds (str): 预测的文本
            decoded_labels (str): 真实的文本

        Returns:
            Dict: 评估指标
                - precision (float): 精确率
                - recall (float): 召回率
                - f1 (float): f1值
        """

        batch_preds_for_entity = []
        batch_truths_for_entity = []

        for p in decoded_preds:
            quintuples = parse_fn(
                p, relation_type_options, entity_labels, tokenizer.sep_token
            )
            # batch_preds.extend(quintuples)
            batch_preds_for_entity.append(quintuples)

        for p in decoded_labels:
            result = parse_fn(
                p, relation_type_options, entity_labels, tokenizer.sep_token
            )
            batch_truths_for_entity.append(result)

        # 将五元组凭借为三元组，其中 实体name + 实体类型 分别作为三元组的头实体和尾实体

        def extract_triples(batch_data):
            triples = []
            for k, pred in enumerate(batch_data):
                triples.append([])
                triples[k] = [
                    Relation(
                        x[0] + " " + x[1],
                        x[3] + " " + x[4],
                        x[2],
                    )
                    for x in pred
                ]
            return triples

        batch_preds = extract_triples(batch_preds_for_entity)
        batch_truths = extract_triples(batch_truths_for_entity)

        epoch_metrics = calculate_metrics(
            batch_preds, batch_truths, options=relation_type_options
        )

        show_classification_report(epoch_metrics)

        eval_result = {}

        for k, v in epoch_metrics.items():
            for k1, v1 in v.items():
                eval_result[f"{k}_{k1}"] = v1

        def extract_entities(batch_data):
            entities = []
            for k, pred in enumerate(batch_data):
                entities.append([])
                for x in pred:
                    entities[k].append(Entity(x[0], x[1]))
                    entities[k].append(Entity(x[3], x[4]))
            return entities

        pred_entities = extract_entities(batch_preds_for_entity)
        truth_entities = extract_entities(batch_truths_for_entity)

        entity_metrics = calculate_metrics(
            pred_entities, truth_entities, options=entity_labels
        )

        show_classification_report(entity_metrics)

        for k, v in entity_metrics.items():
            for k1, v1 in v.items():
                eval_result[f"entity_{k}_{k1}"] = v1

        return eval_result

    def compute_metric(eval_preds):
        # 清理文本
        decoded_preds, decoded_labels = clean_output(eval_preds)

        # print("preds:", decoded_preds[:5])
        # print("labels:", decoded_labels[:5])
        result = compute_metric_for_res(decoded_preds, decoded_labels)
        # print(result)
        return result


    trainer = Seq2SeqTrainer(
        model=model,
        args=training_arguments,
        train_dataset=tokenized_dataset["train"],
        eval_dataset=tokenized_dataset["valid"],
        data_collator=seq2seq_collator,
        tokenizer=tokenizer,
        compute_metrics=compute_metric,
    )


    # trainer.evaluate(tokenized_dataset["train"])
    # trainer.evaluate(tokenized_dataset["valid"])
    test_dataset_eval_result = trainer.evaluate(tokenized_dataset["test"])
    import json

    # 将该结果保存到 training_args["output_dir"] 目录下
    save_path = os.path.join(training_args["output_dir"], "test_eval_result.json")

    test_dataset_eval_result["output_dir"] = training_args["output_dir"]

    for k, v in data_config.items():
        test_dataset_eval_result[k] = v

    with open(save_path, "w") as f:
        json.dump(test_dataset_eval_result, f, indent=4)

    # 保存 args 到文件
    args_save_path = os.path.join(training_args["output_dir"], "args.json")
    with open(args_save_path, "w") as f:
        json.dump(vars(args), f, indent=4)

if __name__ == "__main__":
    main()
