from dis import Instruction

from unsloth import FastLanguageModel
import torch

max_seq_length = 2048
dtype = None
load_in_4bits = True

# 1加载大语言模型
model, tokenizer = FastLanguageModel.from_pretrained(
    model_name = "gpt2",
    max_seq_length = max_seq_length,
    dtype = dtype,
    load_in_4bits = load_in_4bits
)
# 2使用LoRA进行微调更新模型1-10%的参数
model = FastLanguageModel.get_peft_model(
    model,
    r = 16,
    terget_modules = ["q_proj","k_proj", "v_proj", "o_proj","gate_proj","up_proj","down_proj"],
    lora_alpha = 16,
    lora_dropout = 0,
    bias = "none",
    use_gradient_checkpointing = "unsloth",
    random_state = 3407,
    use_rslora = False,
    loftq_config = None
)
# 3使用Stanford Alpaca格式定语数据
alpaca_prompt = """
### Instruction:
{}

### Input:
{}

### Response:
{}
"""
EOS_TOKEN = tokenizer.eos_token
def formatting_prompt_func(examples):
    instructions = examples["instruction"]
    inputs = examples["input"]
model.train_from_json(
    train_file = "data/stanford_alpaca.json",
    num_epochs = 3,
    batch_size = 2,
    learning_rate = 1e-4,
    warmup_steps = 100,
    save_steps = 100,
    save_total_limit = 10,
    logging_steps = 10,
    fp16 = True,
    gradient_checkpointing = True,
    gradient_accumulation_steps = 1,
)