import logging
import math
import os
import sys
from dataclasses import dataclass, field
from typing import Optional
from datasets import load_dataset, load_from_disk
import transformers
from transformers import (
    CONFIG_MAPPING,
    MODEL_FOR_CAUSAL_LM_MAPPING,
    AutoConfig,
    AutoModelForCausalLM,
    AutoTokenizer,
    HfArgumentParser,
    Trainer,
    TrainingArguments,
    default_data_collator,
    set_seed,
)
from transformers.trainer_utils import get_last_checkpoint, is_main_process
from modeling_bloom import BloomForCausalLM

logger = logging.getLogger(__name__)

MODEL_CONFIG_CLASSES = list(MODEL_FOR_CAUSAL_LM_MAPPING.keys())
MODEL_TYPES = tuple(conf.model_type for conf in MODEL_CONFIG_CLASSES)


@dataclass
class ModelArguments:
    model_name_or_path: Optional[str] = field(default=None)
    model_type: Optional[str] = field(default=None)
    config_name: Optional[str] = field(default=None, metadata={"help": "Pretrained config name"})
    tokenizer_name: Optional[str] = field(default=None, metadata={"help": "Pretrained tokenizer name"})
    cache_dir: Optional[str] = field(default=None, metadata={"help": "保存从huggingface下载模型的缓存目录"})
    use_fast_tokenizer: bool = field(default=True)
    model_revision: str = field(default="main", metadata={"help": "branch name, tag name or commit id"})
    use_auth_token: bool = field(default=False,
                                 metadata={"help": "Will use the token generated when running `transformers-cli login"})


@dataclass
class DataTrainingArguments:
    dataset_name: Optional[str] = field(default=None)
    dataset_config_name: Optional[str] = field(default=None, metadata={"help": "The configuration name of the dataset"})
    train_file: Optional[str] = field(default=None)
    validation_file: Optional[str] = field(default=None)
    pickle: Optional[bool] = field(default=True, metadata={"help": "数据集是否为pickle"})
    max_train_samples: Optional[int] = field(default=None, metadata={"help": "截断训练集,取topN,用于debug"})
    max_val_samples: Optional[int] = field(default=None, metadata={"help": "截断训练集,取topN,用于debug"})
    block_size: Optional[int] = field(default=None,
                                      metadata={"help": "tokenization之后的句子长度,默认为模型的最大输入长度"})
    overwrite_cache: bool = field(default=False, metadata={"help": "Overwrite the cached training and evaluation sets"})
    validation_split_percentage: Optional[int] = field(default=5, metadata={"help": "如果没有验证集，给比例进行切割"})
    preprocessing_num_workers: Optional[int] = field(default=None, metadata={"help": "预处理进程数"})


def main():
    parser = HfArgumentParser((ModelArguments, DataTrainingArguments, TrainingArguments))
    if len(sys.argv) == 2 and sys.argv[1].endswith(".json"):
        model_args, data_args, training_args = parser.parse_json_file(json_file=os.path.abspath(sys.argv[1]))
    else:
        model_args, data_args, training_args = parser.parse_args_into_dataclasses()

    # Detecting last checkpoint.
    last_checkpoint = None
    if os.path.isdir(training_args.output_dir) and training_args.do_train and not training_args.overwrite_output_dir:
        last_checkpoint = get_last_checkpoint(training_args.output_dir)
        if last_checkpoint is None and len(os.listdir(training_args.output_dir)) > 0:
            raise ValueError(f"输出目录 ({training_args.output_dir}) 已经存在了. 可以使用--overwrite_output_dir覆盖")
        elif last_checkpoint is not None:
            logger.info(
                f"从last_checkpoint:{last_checkpoint}开始训练. 如果不想这样, 改变 `--output_dir` 或者添加 `--overwrite_output_dir`")

    # Set info of the Transformers logger
    if is_main_process(training_args.local_rank):
        transformers.utils.logging.set_verbosity_info()
        transformers.utils.logging.enable_default_handler()
        transformers.utils.logging.enable_explicit_format()
    logger.info("Training/evaluation parameters %s", training_args)

    # Set seed before initializing model.
    set_seed(training_args.seed)

    # download the datasets or you can either provide your own CSV/JSON/TXT training and evaluation files
    if data_args.dataset_name is not None:
        datasets = load_dataset(data_args.dataset_name, data_args.dataset_config_name)
        if "validation" not in datasets.keys():
            datasets["validation"] = load_dataset(
                data_args.dataset_name,
                data_args.dataset_config_name,
                split=f"train[:{data_args.validation_split_percentage}%]",
            )
            datasets["train"] = load_dataset(
                data_args.dataset_name,
                data_args.dataset_config_name,
                split=f"train[{data_args.validation_split_percentage}%:]",
            )
    else:
        data_files = {}
        if data_args.train_file is not None:
            data_files["train"] = data_args.train_file
        if data_args.validation_file is not None:
            data_files["validation"] = data_args.validation_file
        extension = (
            data_args.train_file.split(".")[-1]
            if data_args.train_file is not None
            else data_args.validation_file.split(".")[-1]
        )
        if extension == "txt":
            extension = "text"
        # if data_args.pickle is False:
        #     datasets = load_dataset(extension, data_files=data_files)
        # else:
        #     datasets = load_from_disk(data_args.train_file)

    config_kwargs = {
        "cache_dir": model_args.cache_dir,
        "revision": model_args.model_revision,
        "use_auth_token": True if model_args.use_auth_token else None,
    }
    if model_args.config_name:
        config = AutoConfig.from_pretrained(model_args.config_name, **config_kwargs)
    elif model_args.model_name_or_path:
        config = AutoConfig.from_pretrained(model_args.model_name_or_path, **config_kwargs)
    else:
        config = CONFIG_MAPPING[model_args.model_type]()
        logger.warning("You are instantiating a new config instance from scratch.")

    # Things that were changed from the huggingface file
    config.gradient_checkpointing = True
    config.use_cache = False

    tokenizer_kwargs = {
        "cache_dir": model_args.cache_dir,
        "use_fast": model_args.use_fast_tokenizer,
        "revision": model_args.model_revision,
        "use_auth_token": True if model_args.use_auth_token else None,
    }
    if model_args.tokenizer_name:
        tokenizer = AutoTokenizer.from_pretrained(model_args.tokenizer_name, truncation=True, padding=True,
                                                  **tokenizer_kwargs)
    elif model_args.model_name_or_path:
        tokenizer = AutoTokenizer.from_pretrained(model_args.model_name_or_path, truncation=True, padding=True,
                                                  **tokenizer_kwargs)
    else:
        raise ValueError("没有指定tokenizer_name")

    # tokenizer.add_tokens(["<用户>", "<卫鞅>"])

    if model_args.model_name_or_path:
        model = AutoModelForCausalLM.from_pretrained(
            model_args.model_name_or_path,
            from_tf=bool(".ckpt" in model_args.model_name_or_path),
            config=config,
            cache_dir=model_args.cache_dir,
            revision=model_args.model_revision,
            use_auth_token=True if model_args.use_auth_token else None,
        )
        import peft
        model = peft.get_peft_model(model, peft.LoraConfig(task_type=peft.TaskType.CAUSAL_LM,
                                                          inference_mode=False,
                                                          r=8,
                                                          lora_alpha=32,
                                                          lora_dropout=0.1,
                                                          target_modules=["q_proj", "v_proj"]))
    else:
        logger.info("Training new model from scratch")
        model = AutoModelForCausalLM.from_config(config)
    model.config.use_cache = False
    model.resize_token_embeddings(len(tokenizer))

    lm_datasets = load_from_disk(data_args.train_file)
    trainer = Trainer(
        model=model,
        args=training_args,
        train_dataset=lm_datasets if training_args.do_train else None,
        tokenizer=tokenizer,
        # default to DataCollatorWithPadding, change it.
        data_collator=default_data_collator,
    )

    # Training
    if training_args.do_train:
        if last_checkpoint is not None:
            checkpoint = last_checkpoint
        elif model_args.model_name_or_path is not None and os.path.isdir(model_args.model_name_or_path):
            checkpoint = model_args.model_name_or_path
        else:
            checkpoint = None
        train_result = trainer.train()  # resume_from_checkpoint=checkpoint)
        trainer.save_model()
        metrics = train_result.metrics
        trainer.log_metrics("train", metrics)
        trainer.save_metrics("train", metrics)
        trainer.save_state()


if __name__ == "__main__":
    main()

# nohup bash run.sh > .log 2>&1 &
