import os
import torch
from datasets import load_dataset, DatasetDict

# 替换为 ModelScope 相关模块
# from modelscope.pipelines import pipeline
from modelscope.utils.constant import Tasks
from modelscope.models import Model
from modelscope import AutoTokenizer
from transformers import TrainingArguments, Trainer, DataCollatorForLanguageModeling, AutoModelForCausalLM
from peft import LoraConfig, get_peft_model, prepare_model_for_kbit_training

# 设置环境变量
os.environ["WANDB_DISABLED"] = "true"

# 模型和数据集路径
model_name = "LLM-Research/Llama-3.2-1B-Instruct"  # 可以在 ModelScope 上找到对应的模型 ID
dataset_path = "dataset-car.json"

# 使用 ModelScope 加载 tokenizer
tokenizer = AutoTokenizer.from_pretrained(model_name, trust_remote_code=True, token="5c5e78e1-db14-48fc-a046-3d9a4c9cc92a")
tokenizer.pad_token = tokenizer.eos_token

# 1. 加载各划分数据集
train_dataset = load_dataset("json", data_files="train.jsonl")["train"]
val_dataset = load_dataset("json", data_files="validation.jsonl")["train"]  # 注：json加载后默认只有'train'键
test_dataset = load_dataset("json", data_files="test.jsonl")["train"]

# 2. 合并为DatasetDict格式（Hugging Face标准划分格式）
dataset = DatasetDict({
    "train": train_dataset,
    "validation": val_dataset,
    "test": test_dataset
})

# 预处理数据集
def preprocess_function(examples):
    return tokenizer(examples["instruction"], truncation=True, padding="max_length", max_length=512)

tokenized_dataset = dataset.map(preprocess_function, batched=True)

# 使用 ModelScope 加载量化后的基础模型
# model = Model.from_pretrained(
#     model_name,
#     load_in_4bit=True,
#     torch_dtype=torch.float16,
#     device_map="auto",
#     trust_remote_code=True,
# )

model_dir = "/home/yangxing/.cache/modelscope/hub/models/LLM-Research/Llama-3___2-1B-Instruct"

model = AutoModelForCausalLM.from_pretrained(
    model_dir,
    load_in_4bit=True,
    torch_dtype=torch.float16,
    device_map="auto",
    trust_remote_code=True,
    local_files_only=True
)

# 准备模型进行 k-bit 训练
model = prepare_model_for_kbit_training(model)

# 配置 LoRA 参数
lora_config = LoraConfig(
    r=16,  # LoRA 注意力维度
    lora_alpha=32,  # Alpha 参数
    target_modules=["q_proj", "k_proj", "v_proj", "o_proj"],  # 目标模块
    lora_dropout=0.05,  # Dropout 率
    bias="none",
    task_type="CAUSAL_LM",
)

# 应用 LoRA 配置
model = get_peft_model(model, lora_config)

# 定义训练参数
training_args = TrainingArguments(
    output_dir="./results",
    overwrite_output_dir=True,
    num_train_epochs=3,
    per_device_train_batch_size=4,
    per_device_eval_batch_size=4,
    gradient_accumulation_steps=4,
    evaluation_strategy="epoch",
    save_strategy="epoch",
    learning_rate=2e-5,
    weight_decay=0.01,
    warmup_steps=100,
    fp16=True,
    logging_steps=10,
)

# 定义数据整理器
data_collator = DataCollatorForLanguageModeling(tokenizer=tokenizer, mlm=False)

# 初始化 Trainer
trainer = Trainer(
    model=model,
    args=training_args,
    train_dataset=tokenized_dataset["train"],
    eval_dataset=tokenized_dataset["validation"],
    data_collator=data_collator,
)

# 开始训练
trainer.train()

# 保存微调后的模型
model.save_pretrained("./car_llama2_model")
tokenizer.save_pretrained("./car_llama2_model")