from transformers import AutoModelForCausalLM, AutoTokenizer

model_id = "shenzhi-wang/Llama3.1-8B-Chinese-Chat"
tokenizer = AutoTokenizer.from_pretrained(model_id)
model = AutoModelForCausalLM.from_pretrained(model_id)


from datasets import load_dataset

dataset = load_dataset('json', data_files='dataset.json')

from peft import LoraConfig

lora_config = LoraConfig(
    peft_type="LORA",
    r=8,
    lora_alpha=32,
    lora_dropout=0.1,
    target_modules=["q_proj", "v_proj", "o_proj"]
)

from transformers import Trainer, TrainingArguments

training_args = TrainingArguments(
    output_dir="./results",
    evaluation_strategy="epoch",
    learning_rate=2e-5,
    per_device_train_batch_size=4,
    num_train_epochs=3,
    weight_decay=0.01,
)

trainer = Trainer(
    model=model,
    args=training_args,
    train_dataset=dataset['train'],
)

trainer.train()


model.save_pretrained("./fine_tuned_llama3")
tokenizer.save_pretrained("./fine_tuned_llama3")


from transformers import pipeline

fine_tuned_model = AutoModelForCausalLM.from_pretrained("./fine_tuned_llama3")
fine_tuned_tokenizer = AutoTokenizer.from_pretrained("./fine_tuned_llama3")

nlp = pipeline("text-generation", model=fine_tuned_model, tokenizer=fine_tuned_tokenizer)

output = nlp("你是谁", max_length=50)
print(output)