library_name: transformers
datasets:
- DeL-TaiseiOzaki/Tengentoppa-sft-v1.0
base_model:
- google/gemma-2-27b
#Usage '''python !pip install -U bitsandbytes !pip install -U transformers !pip install -U accelerate !pip install -U datasets !pip install -U peft
from transformers import ( AutoModelForCausalLM, AutoTokenizer, BitsAndBytesConfig, ) from peft import PeftModel import torch from tqdm import tqdm import json
Hugging Faceで取得したTokenをこちらに貼る。
HF_TOKEN = ""
ベースとなるモデルと学習したLoRAのアダプタ。
model_id = "google/gemma-2-27b" adapter_id = "JunkAto6/gemma-2-27b-finetune" # こちらにアップロードしたHugging FaceのIDを指定してください。
QLoRA config
bnb_config = BitsAndBytesConfig( load_in_4bit=True, bnb_4bit_quant_type="nf4", bnb_4bit_compute_dtype=torch.bfloat16, )
Load model
model = AutoModelForCausalLM.from_pretrained( model_id, quantization_config=bnb_config, device_map="auto", token = HF_TOKEN )
Load tokenizer
tokenizer = AutoTokenizer.from_pretrained(model_id, trust_remote_code=True, token = HF_TOKEN)
元のモデルにLoRAのアダプタを統合。
model = PeftModel.from_pretrained(model, adapter_id, token = HF_TOKEN)
データセットの読み込み。
datasets = [] with open("./elyza-tasks-100-TV_0.jsonl", "r") as f: item = "" for line in f: line = line.strip() item += line if item.endswith("}"): datasets.append(json.loads(item)) item = ""
推論
results = [] for data in tqdm(datasets):
input = data["input"]
prompt = f"""### 指示 {input}
回答
"""
tokenized_input = tokenizer.encode(prompt, add_special_tokens=False, return_tensors="pt").to(model.device) attention_mask = torch.ones_like(tokenized_input) with torch.no_grad(): outputs = model.generate( tokenized_input, attention_mask=attention_mask, max_new_tokens=100, do_sample=False, repetition_penalty=1.2, pad_token_id=tokenizer.eos_token_id )[0] output = tokenizer.decode(outputs[tokenized_input.size(1):], skip_special_tokens=True)
results.append({"task_id": data["task_id"], "input": input, "output": output})