import os
from dataclasses import dataclass, field
from typing import Optional

from datasets import Dataset
from loguru import logger
from transformers import (
    AutoModelForCausalLM,
    AutoTokenizer,
    PreTrainedModel,
    PreTrainedTokenizer,
    DataCollatorForLanguageModeling,
    HfArgumentParser)
from transformers.trainer_utils import get_last_checkpoint, set_seed
from trl import (
    SFTTrainer,
    SFTConfig,
    DataCollatorForCompletionOnlyLM)
from trl import setup_chat_format as origin_setup_chat_format

from common import load_multi_datasets, EvaluationCallback

os.environ["HF_ENDPOINT"] = "https://hf-mirror.com"


"""
普通单机单/多卡训练，通过 device_map="auto" 把参数分散在各个GPU单元，GPU都是在串接执行，没有充分利用。

Congliu/Chinese-DeepSeek-R1-Distill-data-110k-SFT,shibing624/alpaca-zh 数据集下，总共19512steps, 1.86s/it， 预计训练时间10个小时。

"""

@dataclass
class TrainConfig:
    model_name_or_path: Optional[str] = field(
        default="deepseek-ai/deepseek-llm-7b-base",
        metadata={"help": "Model checkpoint for weights initialization."})
    dataset_name: Optional[str] = field(
        default="llamafactory/alpaca_gpt4_zh,llamafactory/alpaca_gpt4_en,shibing624/sharegpt_gpt4",
        metadata={"help": "Dataset names. The default names include english/chinese single/multi round corpus."})
    eval_dataset_name: Optional[str] = field(
        default=None,
        metadata={"help": "Eval dataset names."})
    output_dir: Optional[str] = field(
        default=None,
        metadata={"help": "Output directory where the model will be saved."}, )
    max_seq_length: Optional[int] = field(
        default=4096,
        metadata={"help": "Number of processes to use for dataset loading."}, )
    dataset_num_proc: Optional[int] = field(
        default=20,
        metadata={"help": "Number of processes to use for dataset loading."}, )
    test_split: Optional[float] = field(
        default=0.01,
        metadata={"help": "test_split"},
    )
    sampling: Optional[float] = field(
        default=1.0,
        metadata={"help": "Sampling mode."}, )
    resume_from_checkpoint: Optional[str] = field(
        default=None,
        metadata={"help": "The model checkpoint to resume training from."}, )
    per_device_batch_size: Optional[int] = field(
        default=2,
        metadata={
            "help": "per_device_train_batch_size and per_device_eval_batch_size"})
    gradient_accumulation_steps: Optional[int] = field(
        default=4,
        metadata={"help": "gradient_accumulation_steps"})
    epochs: Optional[int] = field(
        default=2,
        metadata={"help": "num_epochs"})
    learning_rate: float = field(
        default=5e-5,
        metadata={"help": "The initial learning rate for AdamW."}
    )
    seed: Optional[int] = field(
        default=42,
        metadata={"help": "A seed for reproducible training."},
    )
    eval_steps: Optional[int] = field(
        default=200,
        metadata={"help": "eval_steps"},
    )
    save_steps: Optional[int] = field(
        default=1000,
        metadata={"help": "save_steps"},
    )
    logging_steps: Optional[int] = field(
        default=1,
        metadata={"help": "logging_steps"},
    )
    check_steps: Optional[int] = field(
        default=50,
        metadata={"help": "check_steps"}
    )
    torch_dtype: Optional[str] = field(
        default="bfloat16",
        metadata={"help": "torch_dtype"},
    )


def setup_chat_format(model: PreTrainedModel, tokenizer: PreTrainedTokenizer) -> tuple[
    PreTrainedModel, PreTrainedTokenizer, DataCollatorForLanguageModeling]:
    """
    可以使用  trl.setup_chat_format，但是里面 pad_token = eos_token = <|im_end|>，有说法是容易产生重复的回复，所以这里自己写
    不干涉原始的pad,eos设置
    :param model:
    :param tokenizer:
    :return:
    """
    if tokenizer.pad_token_id == tokenizer.eos_token_id:
        tokenizer.add_special_tokens({'additional_special_tokens': ['<|pad|>']})
        tokenizer.pad_token = '<|pad|>'

    if tokenizer.chat_template is None:
        logger.warning("setup chat format")
        tokenizer.add_special_tokens({'additional_special_tokens': ['<|im_start|>', '<|im_end|>']})
        tokenizer.chat_template = (
            "{% for message in messages %}"
            f"{{{{'<|im_start|>' + message['role'] + '\n' + message['content'] + '<|im_end|>' + '\n'}}}}"
            "{% endfor %}"
            "{% if add_generation_prompt %}"
            f"{{{{ '<|im_start|>assistant\n' }}}}"
            "{% else %}"
            f"{tokenizer.eos_token}"
            "{% endif %}"
        )

        model.resize_token_embeddings(len(tokenizer))
    else:
        logger.warning("setup chat is ignored: {}", tokenizer.chat_template)

    logger.warning("tokenizer bos={} eos={} pad={}"
                       .format(tokenizer.bos_token_id, tokenizer.eos_token_id, tokenizer.pad_token_id))

    if getattr(model, "config", None) is not None:
        logger.warning("config bos={} eos={} pad={}"
                       .format(model.config.bos_token_id, model.config.eos_token_id, model.config.pad_token_id))

    if getattr(model, "generation_config", None) is not None:
        logger.warning("generation_config bos={} eos={} pad={}"
                       .format(model.generation_config.bos_token_id, model.generation_config.eos_token_id, model.generation_config.pad_token_id))

    if getattr(model, "config", None) is not None:
        if model.config.pad_token_id is None:
            model.config.pad_token_id = tokenizer.pad_token_id

    if getattr(model, "generation_config", None) is not None:
        if model.generation_config.pad_token_id is None:
            model.generation_config.pad_token_id = tokenizer.pad_token_id

    instruction_template = "<|im_start|>user\n"
    # instruction_template = "<|im_end|>"
    instruction_template_ids = tokenizer.encode(instruction_template, add_special_tokens=False)
    response_template = "<|im_start|>assistant\n"
    response_template_ids = tokenizer.encode(response_template, add_special_tokens=False)
    data_collator = DataCollatorForCompletionOnlyLM(
        response_template=response_template_ids,
        instruction_template=instruction_template_ids,
        tokenizer=tokenizer,
        mlm=False)

    return model, tokenizer, data_collator


def get_checkpoint_dir(args: TrainConfig):
    # Check for last checkpoint
    last_checkpoint_dir = None
    if os.path.isdir(args.output_dir):
        try:
            last_checkpoint_dir = get_last_checkpoint(args.output_dir)
        except:
            pass

    checkpoint_dir = None
    if args.resume_from_checkpoint is not None and os.path.isdir(args.resume_from_checkpoint):
        checkpoint_dir = args.resume_from_checkpoint
    elif last_checkpoint_dir is not None:
        checkpoint_dir = last_checkpoint_dir

    if checkpoint_dir is not None:
        logger.warning(f"resuming training at {checkpoint_dir}.")

    return checkpoint_dir


def load_slit_datasets(args: TrainConfig, tokenizer) -> tuple[Dataset, Dataset]:
    dataset = load_multi_datasets(args.dataset_name, tokenizer, args.dataset_num_proc, args.max_seq_length, args.sampling)

    if args.eval_dataset_name is None:
        split_dataset = dataset.train_test_split(test_size=args.test_split)
        train_dataset = split_dataset["train"]
        eval_dataset = split_dataset["test"]
    else:
        train_dataset = dataset
        eval_dataset = load_multi_datasets(args.eval_dataset_name, tokenizer, args, args.max_seq_length)

    return train_dataset, eval_dataset


def main(args: TrainConfig):
    set_seed(args.seed)

    if args.torch_dtype == "none":
        args.torch_dtype = None

    logger.info("Loading model {}", args.model_name_or_path)
    model = AutoModelForCausalLM.from_pretrained(
        args.model_name_or_path,
        quantization_config=None,
        device_map="auto",  # 自动分配多GPU
        torch_dtype=args.torch_dtype,
        trust_remote_code=True
    )

    tokenizer = AutoTokenizer.from_pretrained(args.model_name_or_path)
    model, tokenizer, data_collator = setup_chat_format(model, tokenizer)
    logger.info(tokenizer)

    train_dataset, eval_dataset = load_slit_datasets(args, tokenizer)
    logger.warning("train dataset {}", train_dataset)
    logger.warning("eval dataset {}", eval_dataset)

    args.max_seq_length = min(args.max_seq_length, tokenizer.model_max_length)
    logger.info("Max sequence length: {}", args.max_seq_length)

    args.output_dir = "models/" + args.model_name_or_path.split("/")[-1] + "_sft" if args.output_dir is None else args.output_dir
    logger.info("Output directory: {}", args.output_dir)

    logger.info("save_steps={}, eval_steps={}, logging_steps={}".format(args.save_steps, args.eval_steps, args.logging_steps))
    training_args = SFTConfig(
        output_dir=args.output_dir,
        num_train_epochs=args.epochs,
        max_seq_length=args.max_seq_length,
        per_device_train_batch_size=args.per_device_batch_size,
        per_device_eval_batch_size=args.per_device_batch_size,
        gradient_accumulation_steps=args.gradient_accumulation_steps,
        learning_rate=args.learning_rate,
        bf16=args.torch_dtype == "bfloat16",
        fp16=args.torch_dtype == "float16",
        save_strategy="steps",
        save_steps=args.save_steps,
        save_total_limit=2,
        dataset_num_proc=args.dataset_num_proc,
        eval_strategy="steps",
        eval_steps=args.eval_steps,
        report_to="tensorboard",
        logging_steps=args.logging_steps,
        logging_first_step=True
    )

    trainer = SFTTrainer(
        model=model,
        args=training_args,
        train_dataset=train_dataset,
        eval_dataset=eval_dataset,
        data_collator=data_collator,
        processing_class=tokenizer,
        callbacks=[EvaluationCallback(args.check_steps)]
    )

    trainer.train(resume_from_checkpoint=get_checkpoint_dir(args))
    trainer.save_model()


if __name__ == '__main__':
    parser = HfArgumentParser((TrainConfig))
    train_args = parser.parse_args_into_dataclasses()[0]
    main(train_args)
