import json
import os

from tqdm import tqdm
import torch
from datasets import load_dataset
from peft import LoraConfig, get_peft_model, prepare_model_for_kbit_training
from transformers import AutoModelForCausalLM, AutoTokenizer, BitsAndBytesConfig, Trainer, TrainingArguments, set_seed

from config import *
from fim import ConstantLengthDataset

# 设置随机种子，确保实验的可重复性
set_seed(SEED)

# 数据集大小
with open(DATASET, "r", encoding="utf-8") as f:
    dataset_size = len(json.load(f))

# 加载训练数据集
train_dataset = load_dataset(
    path=os.sep.join(DATASET.split(os.sep)[:-1]),
    data_files=DATASET.split(os.sep)[-1],
    streaming=True,
    split="train",
)
train_dataset = train_dataset.shuffle(buffer_size=5000, seed=SEED)

# 定义分词器
tokenizer = AutoTokenizer.from_pretrained(MODEL, trust_remote_code=True)

# 估计数据集中每个字符对应的平均token数
total_characters, total_tokens = 0, 0
for _, example in tqdm(zip(range(dataset_size), iter(train_dataset)), total=dataset_size):
    total_characters += len(example[DATA_COLUMN])
    total_tokens += len(tokenizer(example[DATA_COLUMN]).tokens())
chars_per_token = total_characters / total_tokens
print(f"The character to token ratio of the dataset is: {chars_per_token:.2f}")

# 定义数据集
train_dataset = ConstantLengthDataset(
    tokenizer,
    train_dataset,
    infinite=True,  # 决定了数据集在迭代过程中的行为模式。True则为无限循环，False则为一次性迭代。
    seq_length=SEQ_LENGTH,  # 表示期望输出的每个 token 块的固定长度
    chars_per_token=chars_per_token,  # 代表平均每个 token 对应的字符数，是一个浮点数，用于大致估算文本量。
    content_field=DATA_COLUMN,  # 指定从输入数据集中获取文本内容的字段名。
    fim_rate=FIM_RATE,  # 用于控制对每个样本执行 FIM 操作的概率。
    fim_spm_rate=FIM_SPM_RATE,  # 在对样本执行 FIM 操作时，用于控制选择两种不同的 FIM 排列方式之一的概率。
    seed=SEED,  # 随机数生成的种子
)

# 加载和优化模型
compute_dtype = getattr(torch, BNB_4BIT_COMPUTE_DTYPE)  # 定义计算精度
bnb_config = BitsAndBytesConfig(
    load_in_4bit=True,
    bnb_4bit_quant_type="nf4",
    bnb_4bit_compute_dtype=compute_dtype,
    bnb_4bit_use_double_quant=USE_NESTED_QUANT,
)  # 定义模型配置
device_map = "auto"  # 定义设备映射
model = AutoModelForCausalLM.from_pretrained(
    MODEL,
    load_in_8bit=False,
    quantization_config=bnb_config,
    device_map=device_map,
    use_cache=False,  # 使用梯度检查点
    trust_remote_code=True,
    use_flash_attention_2=False,
)  # 加载模型
model = prepare_model_for_kbit_training(model)  # 优化模型
peft_config = LoraConfig(
    lora_alpha=LORA_ALPHA,
    lora_dropout=LORA_DROPOUT,
    r=LORA_R,
    bias="none",
    task_type="CAUSAL_LM"
)  # 定义PEFT配置
model = get_peft_model(model, peft_config)  # 加载PEFT模块
model.print_trainable_parameters()  # 打印可训练参数数量

# 训练模型
training_args = TrainingArguments(
    output_dir=OUTPUT_DIR,
    dataloader_drop_last=True,
    eval_strategy="no",
    save_strategy="steps",
    max_steps=MAX_STEPS,
    eval_steps=EVAL_FREQ,
    save_steps=SAVE_FREQ,
    logging_steps=LOG_FREQ,
    per_device_train_batch_size=BATCH_SIZE,
    per_device_eval_batch_size=BATCH_SIZE,
    learning_rate=LR,
    lr_scheduler_type=LR_SCHEDULER_TYPE,
    warmup_steps=NUM_WARMUP_STEPS,
    gradient_accumulation_steps=GR_ACC_STEPS,
    gradient_checkpointing=True,
    fp16=FP16,
    bf16=BF16,
    weight_decay=WEIGHT_DECAY,
    include_tokens_per_second=True,
)

# 定义训练器
trainer = Trainer(
    model=model,
    args=training_args,
    train_dataset=train_dataset
)

# 训练模型
print("Training...")
trainer.train()

# 保存模型
merged_model = model.merge_and_unload()
merged_model.save_pretrained(OUTPUT_DIR)
tokenizer.save_pretrained(OUTPUT_DIR)