import transformers
import datasets
import pickle
from pathlib import Path
from torchviz import make_dot

raw_datasets = datasets.load_dataset(
    "json", data_files="dataset/wikipedia-zh-cn-20241020.json"
)

raw_datasets = raw_datasets["train"].train_test_split(test_size=0.1, seed=2333)
print("dataset info")
print(raw_datasets)


tokenizer = transformers.AutoTokenizer.from_pretrained("dataset/Qwen2-0.5B")
print(tokenizer)

config = transformers.AutoConfig.from_pretrained(
        "dataset/llama2",
    )
print("Model Config:")
print(config)

model = transformers.LlamaForCausalLM(config)
model.config.use_cache = False
model_size = sum(t.numel() for t in model.parameters())
print(f"Model Size: {model_size/1000**2:.1f}M parameters")

args = transformers.TrainingArguments(
    output_dir="checkpoints",
    per_device_train_batch_size=24,  # 每个GPU的训练batch数
    per_device_eval_batch_size=24,  # 每个GPU的测试batch数
    eval_strategy="steps",
    eval_steps=10,
    logging_steps=10,
    gradient_accumulation_steps=12,  # 梯度累计总数
    num_train_epochs=2, # 训练epoch数
    weight_decay=0.1,
    warmup_steps=1_000,
    optim="adamw_torch",  # 优化器使用adamw
    lr_scheduler_type="cosine",  # 学习率衰减策略
    learning_rate=5e-4,  # 基础学习率，
    save_steps=5_000,
    save_total_limit=10,
    bf16=True,  # 开启bf16训练, 对于Amper架构以下的显卡建议替换为fp16=True
    resume_from_checkpoint=False,
)
print("Train Args:")
print(args)

context_length = 512

def tokenize(element):
    outputs = tokenizer(
        element["text"],
        truncation=True,
        max_length=context_length,
        return_overflowing_tokens=True,
        return_length=True,
    )
    input_batch = []
    for length, input_ids in zip(outputs["length"], outputs["input_ids"]):
        if length == context_length:
            input_batch.append(input_ids)
    return {"input_ids": input_batch}

tokenized_datasets = raw_datasets.map(
    tokenize, batched=True, remove_columns=raw_datasets["train"].column_names
)

print("tokenize dataset info")
print(tokenized_datasets)

tokenizer.pad_token = tokenizer.eos_token
data_collator = transformers.DataCollatorForLanguageModeling(tokenizer, mlm=False)

trainer = transformers.Trainer(
    model=model,
    tokenizer=tokenizer,
    args=args,
    data_collator=data_collator,
    train_dataset=tokenized_datasets["train"],
    eval_dataset=tokenized_datasets["test"],
)

# 导出反向传播的计算图
# grad_dot = make_dot(loss, params=dict(trainer.model.named_parameters()))

# 保存反向传播的计算图
# grad_dot.render('backward_graph', format='png')

# print(trainer.model.named_parameters())

trainer.train()
# save model
model.save_pretrained("./WikiLLM/Weight")
