import os

import numpy as np
import torch
import datasets
from peft import get_peft_model, LoraConfig, TaskType
from dataclasses import dataclass, field
from tensorboardX import SummaryWriter
from transformers.integrations import TensorBoardCallback
from transformers.trainer_utils import PREFIX_CHECKPOINT_DIR
import pdb

from transformers import Trainer, TrainingArguments, HfArgumentParser
BAICHUAN2_MODEL_NAME_OR_PATH = '/workspace/psycho/resources/Baichuan2-13B-chat'
QWEN_MODEL_NAME_OR_PATH = '/workspace/psycho_trainning/resources/Qwen-14B-Chat'

@dataclass
class DataArguments:
    customized_dataset: bool = field(default=False)
    train_file_path: str = field(default='./data/train.txt')
    lora_rank: int = field(default=8)
    load: str = field(default='')
    model_name: str = field(default='')


class CustomizedModifiedTrainer(Trainer):
    def compute_loss(self, model, inputs, return_outputs=False):
        device = model.device
        loss = model(
            input_ids=inputs['input_ids'].to(device),
            labels=inputs['labels'].to(device),
        ).loss
        return loss.mean()

    def save_model(self, output_dir=None, _internal_call=False):
        from transformers.trainer import TRAINING_ARGS_NAME

        os.makedirs(output_dir, exist_ok=True)
        torch.save(self.args, os.path.join(output_dir, TRAINING_ARGS_NAME))
        saved_params = {
            k: v.to("cpu") for k, v in self.model.named_parameters() if v.requires_grad
        }
        torch.save(saved_params, os.path.join(output_dir, "adapter_model.bin"))


def customized_data_collator(batch):
    len_ids = [len(feature["input_ids"]) for feature in batch]
    longest = max(len_ids)
    input_ids = []
    labels_list = []

    for ids_l, feature in sorted(zip(len_ids, batch), key=lambda x: -x[0]):
        ids = feature["input_ids"]
        context_len = feature["context_len"]
        labels = (
                [-100] * context_len + ids[context_len:] + [-100] * (longest - ids_l)
        )
        pad_token_id = tokenizer.pad_token_id
        if not pad_token_id:
            pad_token_id = tokenizer.eod_id
        ids = ids + [pad_token_id] * (longest - ids_l)
        _ids = torch.LongTensor(ids)
        labels_list.append(torch.LongTensor(labels))
        input_ids.append(_ids)
    input_ids = torch.stack(input_ids)
    labels = torch.stack(labels_list)

    return {
        "input_ids": input_ids,
        "labels": labels,
    }


def main():
    # writer = SummaryWriter()
    data_args, training_args = \
        HfArgumentParser((DataArguments, TrainingArguments)).parse_args_into_dataclasses()

    # init mode
    global tokenizer

    from transformers import AutoModelForCausalLM, AutoTokenizer
    
    model = AutoModelForCausalLM.from_pretrained(QWEN_MODEL_NAME_OR_PATH, trust_remote_code=True)
    tokenizer = AutoTokenizer.from_pretrained(QWEN_MODEL_NAME_OR_PATH, trust_remote_code=True)

    if data_args.load:
        print("loading checkpoint...")
        model.load_state_dict(torch.load(data_args.load), strict=False)
        print("loaded model")

    model.float()
    # set gradient checkpointing to save GPU-memory
    model.gradient_checkpointing_enable()
    model.is_parallelizable = True
    model.model_parallel = True
    #
    model.config.use_cache = (
        False  # silence the warnings. Please re-enable for inference!
    )
    
    peft_config = LoraConfig(
        task_type=TaskType.CAUSAL_LM,
        inference_mode=False,
        r=data_args.lora_rank,
        target_modules=['c_attn'],
        lora_alpha=32,
        lora_dropout=0.1,
    )
    model = get_peft_model(model, peft_config)

    print('Total trainable params number: ', sum(p.numel() for p in model.parameters() if p.requires_grad))

    # load_dataset & setup trainer
    train_dataset = datasets.load_from_disk(dataset_path=data_args.train_file_path)
    train_dataset.shuffle()
    trainer = CustomizedModifiedTrainer(
        model=model,
        train_dataset=train_dataset,
        args=training_args,
        data_collator=customized_data_collator,
    )

    trainer.train()
    model.save_pretrained(training_args.output_dir)


if __name__ == '__main__':
    tokenize = None
    main()
