# -*- coding: utf-8 -*-
# 使用Qlora微调


from transformers import AutoTokenizer, AutoModelForMaskedLM, BitsAndBytesConfig
from peft import LoraConfig, get_peft_model

model_dir = "/root/autodl-tmp/models/pretrained/google-bert/bert-base-chinese"

# 加载分词器
tokenizer = AutoTokenizer.from_pretrained(model_dir)

# 配置4bit量化 (QLoRA核心)
bnb_config = BitsAndBytesConfig(
    load_in_4bit=True,
    bnb_4bit_quant_type="nf4",       # 推荐nf4量化
    bnb_4bit_use_double_quant=True,  # 双重量化减少内存
    bnb_4bit_compute_dtype="float16" # 计算用float16
)

# 加载BERT并量化
model = AutoModelForMaskedLM.from_pretrained(
    model_dir,
    quantization_config=bnb_config,
    device_map="auto"
)

# 配置LoRA
lora_config = LoraConfig(
    task_type="TOKEN_CLS",         # BERT是token-level任务
    r=8,
    lora_alpha=32,
    lora_dropout=0.1,
    target_modules=["query", "value"]
)

# 注入LoRA
model = get_peft_model(model, lora_config)

print(model)
