import argparse
import copy
import dataclasses
import os

import numpy as np
import torch
import datasets
import torch.distributed as dist
import deepspeed

from dataclasses import dataclass, field
from tensorboardX import SummaryWriter
from transformers.integrations import TensorBoardCallback
from transformers.trainer_utils import PREFIX_CHECKPOINT_DIR

from transformers import Trainer, TrainingArguments, HfArgumentParser
BAICHUAN2_MODEL_NAME_OR_PATH = '/workspace/psycho_trainning/resources/Baichuan2-13B-chat'


@dataclass
class DataArguments:
    customized_dataset: bool = field(default=False)
    train_file_path: str = field(default='./data/train.txt')
    load: str = field(default='')
    model_name: str = field(default='')


class CustomizedModifiedTrainer(Trainer):
    def compute_loss(self, model, inputs, return_outputs=False):
        device = model.device
        loss = model(
            input_ids=inputs['input_ids'].to(device),
            labels=inputs['labels'].to(device),
        ).loss
        return loss.mean()

    def _save_checkpoint(self, model, trial, metrics=None):
        checkpoint_folder = f"{PREFIX_CHECKPOINT_DIR}-{self.state.global_step}"

        run_dir = self._get_output_dir(trial=trial)
        output_dir = os.path.join(run_dir, checkpoint_folder)

        if metrics is not None and self.args.metric_for_best_model is not None:
            metric_to_check = self.args.metric_for_best_model
            if not metric_to_check.startswith("eval_"):
                metric_to_check = f"eval_{metric_to_check}"
            metric_value = metrics[metric_to_check]

            operator = np.greater if self.args.greater_is_better else np.less
            if (self.state.best_metric is None or self.state.best_model_checkpoint is None
                    or operator(metric_value, self.state.best_metric)):
                self.state.best_metric = metric_value

                self.state.best_model_checkpoint = output_dir

        os.makedirs(output_dir, exist_ok=True)

        if self.args.should_save:
            self._rotate_checkpoints(use_mtime=True, output_dir=run_dir)

        # get state dict through deepspeed engine
        engine_state_dict = self.model_wrapped._zero3_consolidated_16bit_state_dict()
        if self.args.local_rank == 0:
            torch.save(engine_state_dict, os.path.join(output_dir, "pytorch_model.bin"))
            print(f"Save model at {output_dir}")


def customized_data_collator(batch):
    len_ids = [len(feature["input_ids"]) for feature in batch]
    longest = max(len_ids)
    input_ids = []
    labels_list = []

    for ids_l, feature in sorted(zip(len_ids, batch), key=lambda x: -x[0]):
        ids = feature["input_ids"]
        context_len = feature["context_len"]
        labels = (
                [-100] * context_len + ids[context_len:] + [-100] * (longest - ids_l)
        )
        ids = ids + [tokenizer.pad_token_id] * (longest - ids_l)
        _ids = torch.LongTensor(ids)
        labels_list.append(torch.LongTensor(labels))
        input_ids.append(_ids)
    input_ids = torch.stack(input_ids)
    labels = torch.stack(labels_list)

    return {
        "input_ids": input_ids,
        "labels": labels,
    }


def main():
    # writer = SummaryWriter()
    data_args, training_args = \
        HfArgumentParser((DataArguments, TrainingArguments)).parse_args_into_dataclasses()

    # init mode
    global tokenizer

    from src.models.baichuan2 import BaichuanForCausalLM, BaichuanTokenizer
    model = BaichuanForCausalLM.from_pretrained(BAICHUAN2_MODEL_NAME_OR_PATH)
    tokenizer = BaichuanTokenizer.from_pretrained(BAICHUAN2_MODEL_NAME_OR_PATH)

    if data_args.load:
        print("loading checkpoint...")
        model.load_state_dict(torch.load(data_args.load), strict=False)
        print("loaded model")

    model.float()
    # set gradient checkpointing to save GPU-memory
    model.gradient_checkpointing_enable()
    model.is_parallelizable = True
    model.model_parallel = True
    #
    model.config.use_cache = (
        False  # silence the warnings. Please re-enable for inference!
    )

    print('Total trainable params number: ', sum(p.numel() for p in model.parameters() if p.requires_grad))

    # load_dataset & setup trainer
    train_dataset = datasets.load_from_disk(dataset_path=data_args.train_file_path)
    train_dataset.shuffle()
    trainer = CustomizedModifiedTrainer(
        model=model,
        train_dataset=train_dataset,
        args=training_args,
        data_collator=customized_data_collator,
    )

    trainer.train()


if __name__ == '__main__':
    tokenize = None
    main()
