import numpy as np
from datasets import DatasetDict, load_dataset
from transformers import (
    AutoTokenizer,
    Seq2SeqTrainer,
    Seq2SeqTrainingArguments,
    AutoModelForSeq2SeqLM,
    DataCollatorForSeq2Seq,
    PreTrainedTokenizer,
    AutoConfig,
    PreTrainedModel,
)

from instruction_re.arg_parse import get_train_args
from instruction_re.utils.utils import (
    load_config,
    load_json,
    set_global_seed,
)
from instruction_re.utils.data_utils import get_data_config

import os

os.environ["TOKENIZERS_PARALLELISM"] = "false"
from datetime import datetime
from peft import PeftConfig, get_peft_config, get_peft_model
from typing import Optional, Tuple, Union
from torch import nn
from pathlib import Path

os.environ["CUDA_VISIBLE_DEVICES"] = "0"


def print_model_size(model: PreTrainedModel):
    print("--> Model")
    total_params = sum(p.numel() for p in model.parameters() if p.requires_grad)
    print(f"\n--> model has {total_params / 1e6}M params\n")


def load_tokenizer_and_model(
    model_dir: str,
    peft_config: Optional[PeftConfig] = None,
) -> Tuple[PreTrainedTokenizer, nn.Module]:
    tokenizer = AutoTokenizer.from_pretrained(model_dir, trust_remote_code=True)
    if peft_config is not None:
        if peft_config.peft_type.name == "PREFIX_TUNING":
            config = AutoConfig.from_pretrained(model_dir, trust_remote_code=True)
            config.pre_seq_len = peft_config.num_virtual_tokens
            config.use_cache = False
            model = AutoModelForSeq2SeqLM.from_pretrained(
                model_dir,
                trust_remote_code=True,
                config=config,
            )
        if peft_config.peft_type.name == "LORA":
            model = AutoModelForSeq2SeqLM.from_pretrained(
                model_dir, trust_remote_code=True, use_cache=False
            )
            model = get_peft_model(model, peft_config)
            model.print_trainable_parameters()
    else:
        model = AutoModelForSeq2SeqLM.from_pretrained(
            model_dir, trust_remote_code=True, use_cache=False
        )
    print_model_size(model)
    return tokenizer, model


def main():

    args = get_train_args()

    config = load_config(args.path_to_model_config)
    tokenizer_kwargs = dict(config["tokenizer"])
    training_args = dict(config["training_args"])

    set_global_seed(config["seed"])

    # load instructions
    instructions = load_json(args.path_to_instructions)

    dataset_name = args.dataset_name
    data_config = get_data_config(
        args.root_data_dir,
        dataset_name,
        is_few_shot=1,
        few_shot_num=0,
    )

    print(data_config)

    raw_datasets = load_dataset("json", data_files=data_config)

    # entity_by_type = group_entity_by_type(raw_datasets["train"])

    # print(entity_by_type)
    # return

    # 对于训练集中的每一个样本，替换其relations 字段中相同类型的实体

    def gen_data_augmentation_item(data):
        # 对于不包含实体类型的数据集，使用三元组
        if dataset_name in ["ADE_corpus", "semval-RE"]:
            triples = list(
                map(
                    lambda x: (
                        x["head"]["name"],
                        x["type"],
                        x["tail"]["name"],
                    ),
                    data["relations"],
                )
            )
            triple_str = ",".join([f"({x[0]}, {x[1]}, {x[2]})" for x in triples])
        else:
            # 标注实体类型的数据集，使用五元组
            triples = list(
                map(
                    lambda x: (
                        x["head"]["name"],
                        x["head"]["type"],
                        x["type"],
                        x["tail"]["name"],
                        x["tail"]["type"],
                    ),
                    data["relations"],
                )
            )

            triple_str = ",".join(
                [f"({x[0]}, {x[1]}, {x[2]}, {x[3]}, {x[4]})" for x in triples]
            )

        return {
            "labels": data["sentence"],
            "context": triple_str,
        }

    # 对于raw_datasets["train"]中的每一个样本，生成一个data_augmentation_item
    process_dataset = raw_datasets.map(
        gen_data_augmentation_item,
        remove_columns=["relations", "sentence"],
        load_from_cache_file=False,
        desc="generate data augmentation item",
    )

    # 加入 instruction
    process_dataset = process_dataset.map(
        lambda x: {"instruction": instructions["DA"], **x},
        load_from_cache_file=False,
        desc="add instruction",
    )

    # 打印示例数据
    print(process_dataset["train"][0])

    # return

    model_name = config["model"]["model_name"]
    training_args = config["training_args"]
    now = datetime.now()
    # 设置输出目录
    training_args["output_dir"] = (
        f"{training_args['output_dir']}/{dataset_name}/{model_name}/{now.strftime('%m%d_%H_%M')}"
    )

    raw_datasets.shuffle(seed=config["seed"])

    # load model and tokenizer
    model_path = (
        config["model"]["model_path"] if "model_path" in config["model"] else model_name
    )
    peft_config = config.get("peft_config", None)

    if peft_config is not None:
        peft_config = get_peft_config(peft_config)

    tokenizer, model = load_tokenizer_and_model(model_path, peft_config)

    print(len(tokenizer))

    # return

    def preprocess_function(examples):
        instructions = examples["instruction"]
        context = examples["context"]
        labels = examples["labels"]

        return tokenizer(
            text=instructions,
            text_pair=context,
            text_target=labels,
            **tokenizer_kwargs,
            padding=False,  # 使用DataCollatorForSeq2Seq进行padding
        )

    tokenized_dataset = process_dataset.map(
        preprocess_function, batched=True, remove_columns=["instruction", "context"]
    )

    seq2seq_collator = DataCollatorForSeq2Seq(
        tokenizer=tokenizer,
        model=model,
        max_length=tokenizer_kwargs["max_length"],
        padding="max_length",
    )

    training_arguments = Seq2SeqTrainingArguments(
        **training_args,
    )

    trainer = Seq2SeqTrainer(
        model=model,
        args=training_arguments,
        train_dataset=tokenized_dataset["train"],
        # eval_dataset=tokenized_dataset["valid"],
        data_collator=seq2seq_collator,
        # tokenizer=tokenizer if peft_config.peft_type != "LORA" else None,
        tokenizer=tokenizer,
    )

    trainer.train()

    trainer.save_model(training_args["output_dir"])


if __name__ == "__main__":
    main()
