import json
from datasets import Dataset
from transformers import AutoTokenizer, AutoModelForCausalLM, TrainingArguments, Trainer, DataCollatorForLanguageModeling
import torch

# 1. 加载数据
def load_jsonl(file_path):
    data = []
    with open(file_path, 'r', encoding='utf-8') as f:
        for line in f:
            obj = json.loads(line)
            data.append({'text': obj['text']})
    return data

train_data = load_jsonl('./0721/pre-train-texts.jsonl')  # 替换为你的数据文件路径

# 2. 构建 Huggingface Dataset
dataset = Dataset.from_list(train_data)

# 3. 加载 tokenizer 和 model
from modelscope import AutoModel, AutoTokenizer as MS_AutoTokenizer

# 使用 transformers 的接口加载本地模型
tokenizer = AutoTokenizer.from_pretrained('/home/llm/qwen3-1.7', trust_remote_code=True)
model = AutoModelForCausalLM.from_pretrained('/home/llm/qwen3-1.7', trust_remote_code=True)

# 4. 数据预处理
def tokenize_function(examples):
    return tokenizer(examples["text"], truncation=True, max_length=1024)

tokenized_datasets = dataset.map(tokenize_function, batched=True, remove_columns=["text"])

# 5. 数据整理
data_collator = DataCollatorForLanguageModeling(
    tokenizer=tokenizer,
    mlm=False,
)

# 6. 训练参数
training_args = TrainingArguments(
    output_dir="./pre-train/qwen3-1.7b-continued-pretrain",
    overwrite_output_dir=True,
    num_train_epochs=1,
    per_device_train_batch_size=1,
    save_steps=1000,
    save_total_limit=2,
    prediction_loss_only=True,
    logging_steps=10,
    learning_rate=2e-5,
    fp16=torch.cuda.is_available(),
)

# 7. Trainer
trainer = Trainer(
    model=model,
    args=training_args,
    train_dataset=tokenized_datasets,
    data_collator=data_collator,
)

# 8. 开始训练
trainer.train()
