"""
1. huggingface-cli login
2. torchrun --nnodes 1 --nproc_per_node 4 --master_addr 127.0.0.1 --master_port 29500 finetune.py
"""
import os
import json
import random
import torch
from torch.utils.data import Dataset
from transformers import (
    AutoTokenizer,
    AutoModelForCausalLM,
    TrainingArguments,
    Trainer,
    DataCollatorForSeq2Seq
)

from peft import (
    LoraConfig,
    get_peft_model,
)

import yaml
from munch import munchify


class InstructDataset(Dataset):
    def __init__(self, data, dataset_size, reverse=False, callback=None):
        assert isinstance(data, list), f"Expected data object of type list got {type(data)}"
        if reverse:
            self.data = data[-dataset_size:]
        else:
            self.data = data[:dataset_size]
        self.callback = callback
        for idx in range(len(data)):
            data[idx]['index'] = idx

    def __getitem__(self, idx):
        return self.callback(self.data[idx])

    def __len__(self) -> int:
        return len(self.data)


# read arguments
with open("config.yaml", "r") as stream:
    args = yaml.safe_load(stream)
    args = munchify(args)

# load tokenizer
tokenizer = AutoTokenizer.from_pretrained(args.model_config.base_model_name_or_path, use_auth_token=True)
tokenizer.pad_token = 0


# tokenize the prompt
def tokenize(prompt, add_eos_token=True):
    result = tokenizer(
        prompt,
        truncation=args.tokenizer_config.truncation,
        max_length=args.tokenizer_config.max_length,
        padding=args.tokenizer_config.padding,
        return_tensors=None,
    )
    # If there is no eos token at the end and the sentence length is less than max_length, append an eos token.
    if (
        result["input_ids"][-1] != tokenizer.eos_token_id
        and len(result["input_ids"]) < args.tokenizer_config.max_length
        and add_eos_token
    ):
        result["input_ids"].append(tokenizer.eos_token_id)
        result["attention_mask"].append(1)
    result["labels"] = result["input_ids"].copy()
    return result


# generate prompt according to the read json data
def generate_prompt(data_point):
    if data_point["input"]:
        return f"""
        Below is an instruction that describes a task, paired with an input that provides further context. Write a response that appropriately completes the request.
        ### Instruction:
        {data_point["instruction"]}
        ### Input:
        {data_point["input"]}
        ### Response:
        {data_point["output"]}
        """
    else:
        return f"""
        Below is an instruction that describes a task. Write a response that appropriately completes the request.
        ### Instruction:
        {data_point["instruction"]}
        ### Response:
        {data_point["output"]}
        """


# generate and tokenize the prompt
def generate_and_tokenize_prompt(data_point):
    full_prompt = generate_prompt(data_point)
    tokenized_full_prompt = tokenize(full_prompt)

    # If not train the model on the input, mask the output part of input_ids with pad tokens and input part of the labels with -100.
    if not args.train_on_inputs:
        input = generate_prompt({**data_point, "output": ""})  # remove the content of the output field
        tokenized_input = tokenize(input, add_eos_token=False)
        input_len = len(tokenized_input["input_ids"])  # length of input
        tokenized_full_prompt["labels"][:input_len] = [-100] * input_len  # mask the input part with -100

    tokenized_full_prompt.pop("token_type_ids")  # remove key token_type_ids
    return tokenized_full_prompt


# read json data file
assert args.data_config.file_type == 'json', 'Invalid file type'
with open(args.data_config.data_path) as f:
    data = json.load(f)
    if isinstance(data, dict):
        data = data[args.data_config.data_field]
    random.shuffle(data)
    train_size = min(args.data_config.train_set_size, len(data))
    train_dataset = InstructDataset(data, train_size, callback=generate_and_tokenize_prompt)
    val_size = min(args.data_config.val_set_size, len(data) - train_size)
    eval_dataset = InstructDataset(data, val_size, reverse=True, callback=generate_and_tokenize_prompt)

# initialize distributed training config
local_rank = int(os.environ.get("LOCAL_RANK", -1))
world_size = int(os.environ.get("WORLD_SIZE", -1))
print(f"{local_rank=}, {world_size=}")

if torch.cuda.is_available():
    torch.distributed.init_process_group("nccl", rank=local_rank, world_size=world_size)
    torch.cuda.set_device(local_rank)
    device_map = {"": local_rank}
else:
    torch.distributed.init_process_group("gloo", rank=local_rank, world_size=world_size)
    device_map = {"": torch.device("cpu")}

model = AutoModelForCausalLM.from_pretrained(
    args.model_config.base_model_name_or_path,
    load_in_8bit=False,
    device_map=device_map,
    cache_dir=args.model_config.cache_dir,
    use_auth_token=True,
)
print('Load model successfully!')

if args.fp16 and torch.cuda.is_available():
    model.half()

config = LoraConfig(
    r=args.lora_config.r,
    lora_alpha=args.lora_config.alpha,
    target_modules=args.lora_config.target_modules,
    lora_dropout=args.lora_config.dropout,
    bias=args.lora_config.bias,
    task_type=args.lora_config.task_type,
)
model = get_peft_model(model, config)

training_args = TrainingArguments(
    per_device_train_batch_size=args.micro_batch_size,
    gradient_accumulation_steps=args.batch_size // args.micro_batch_size // world_size,
    warmup_steps=args.warmup_step,
    num_train_epochs=args.num_train_epochs,
    learning_rate=args.learning_rate,
    fp16=args.fp16,
    logging_steps=args.logging_steps,
    evaluation_strategy="steps" if args.data_config.val_set_size > 0 else "no",
    save_strategy="steps",
    eval_steps=args.eval_steps if args.data_config.val_set_size > 0 else None,
    save_steps=args.save_steps,
    output_dir=args.output_dir,
    save_total_limit=3,
    load_best_model_at_end=True if args.data_config.val_set_size > 0 else False,
    group_by_length=args.group_by_length,
)

data_collator = DataCollatorForSeq2Seq(tokenizer, pad_to_multiple_of=8, return_tensors="pt", padding=True)

trainer = Trainer(
    model=model,
    train_dataset=train_dataset,
    eval_dataset=eval_dataset,
    args=training_args,
    data_collator=data_collator
)

trainer.train()