推論用コード
Hugging Faceにアップロードしたモデルを用いてELYZA-tasks-100-TVの出力を得るためのコードです。
このコードで生成されたjsonlファイルは課題の成果として提出可能なフォーマットになっております。

!pip install -U bitsandbytes  
!pip install -U transformers  
!pip install -U accelerate  
!pip install -U datasets  
!pip install -U peft  
!pip install ipywidgets --upgrade  

from transformers import (
    AutoModelForCausalLM,
    AutoTokenizer,
    BitsAndBytesConfig,
)
from peft import PeftModel
import torch
from tqdm import tqdm
import json

HF_TOKEN = "Hugging Face Token"
base_model_id = "llm-jp/llm-jp-3-13b"
adapter_id = "takatuki56/llm-jp-3-13b-finetune56_2"

QLoRA config

bnb_config = BitsAndBytesConfig(
    load_in_4bit=True,
    bnb_4bit_quant_type="nf4",
    bnb_4bit_compute_dtype=torch.bfloat16,
)

Load model

model = AutoModelForCausalLM.from_pretrained(
    model_id,
    quantization_config=bnb_config,
    device_map="auto",
    token = HF_TOKEN
)

Load tokenizer

tokenizer = AutoTokenizer.from_pretrained(model_id, trust_remote_code=True, token = HF_TOKEN)

model = PeftModel.from_pretrained(model, adapter_id, token = HF_TOKEN)

datasets = []
with open("./elyza-tasks-100-TV_0.jsonl", "r") as f:
    item = ""
    for line in f:
      line = line.strip()
      item += line
      if item.endswith("}"):
        datasets.append(json.loads(item))
        item = ""

gemma

results = []
for data in tqdm(datasets):

  input = data["input"]
  prompt = f"""### 指示
  {input}
  ### 回答
  """

  input_ids = tokenizer(prompt, return_tensors="pt").to(model.device)
  outputs = model.generate(**input_ids, max_new_tokens=512, do_sample=False, repetition_penalty=1.2,)
  output = tokenizer.decode(outputs[0][input_ids.input_ids.size(1):], skip_special_tokens=True)

  results.append({"task_id": data["task_id"], "input": input, "output": output})

llmjp

results = []
for data in tqdm(datasets):

  input = data["input"]

  prompt = f"""### 指示
  {input}
  ### 回答
  """

  tokenized_input = tokenizer.encode(prompt, add_special_tokens=False, return_tensors="pt").to(model.device)
  attention_mask = torch.ones_like(tokenized_input)
  with torch.no_grad():
      outputs = model.generate(
          tokenized_input,
          attention_mask=attention_mask,
          max_new_tokens=100,
          do_sample=False,
          repetition_penalty=1.2,
          pad_token_id=tokenizer.eos_token_id
      )[0]
  output = tokenizer.decode(outputs[tokenized_input.size(1):], skip_special_tokens=True)

  results.append({"task_id": data["task_id"], "input": input, "output": output})

import re
jsonl_id = re.sub(".*/", "", adapter_id)
with open(f"./{jsonl_id}-outputs.jsonl", 'w', encoding='utf-8') as f:
    for result in results:
        json.dump(result, f, ensure_ascii=False)  # ensure_ascii=False for handling non-ASCII characters
        f.write('\n')
Downloads last month

-

Downloads are not tracked for this model. How to track
Inference Providers NEW
This model is not currently available via any of the supported Inference Providers.
The model cannot be deployed to the HF Inference API: The model has no pipeline_tag.

Model tree for takatuki56/llm-jp-3-13b-finetune56_2

Finetuned
(1120)
this model

Dataset used to train takatuki56/llm-jp-3-13b-finetune56_2