Mahiro0698 commited on
Commit
8c0abaa
·
verified ·
1 Parent(s): 0ada99c

Update README.md

Browse files
Files changed (1) hide show
  1. README.md +50 -5
README.md CHANGED
@@ -26,11 +26,56 @@ This llama model was trained 2x faster with [Unsloth](https://github.com/unsloth
26
  以下は出力方法の記載です(elyza-tasks-100-TV_0.jsonlの回答を得る方法)。
27
 
28
 
29
- '''
 
 
 
30
 
31
- %%capture
32
- !pip install unsloth
33
- !pip uninstall unsloth -y && pip install --upgrade --no-cache-dir "unsloth[colab-new] @ git+https://github.com/unslothai/unsloth.git"
34
 
 
35
 
36
- '''
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
26
  以下は出力方法の記載です(elyza-tasks-100-TV_0.jsonlの回答を得る方法)。
27
 
28
 
29
+ ~~~
30
+ %%capture
31
+ !pip install unsloth
32
+ !pip uninstall unsloth -y && pip install --upgrade --no-cache-dir "unsloth[colab-new] @ git+https://github.com/unslothai/unsloth.git"
33
 
34
+ from unsloth import FastLanguageModel
35
+ import torch
36
+ import json
37
 
38
+ model_name = "Mahiro0698/llm-jp-3-13b-it_MN_v2"
39
 
40
+ max_seq_length = 2048
41
+ dtype = None
42
+ load_in_4bit = True
43
+
44
+ model, tokenizer = FastLanguageModel.from_pretrained(
45
+ model_name = model_name,
46
+ max_seq_length = max_seq_length,
47
+ dtype = dtype,
48
+ load_in_4bit = load_in_4bit,
49
+ token = "your token",
50
+ )
51
+ FastLanguageModel.for_inference(model)
52
+
53
+ # データセットの読み込み。
54
+ # omnicampusの開発環境では、左にタスクのjsonlをドラッグアンドドロップしてから実行。
55
+ datasets = []
56
+ with open("/content/elyza-tasks-100-TV_0 .jsonl", "r") as f:
57
+ item = ""
58
+ for line in f:
59
+ line = line.strip()
60
+ item += line
61
+ if item.endswith("}"):
62
+ datasets.append(json.loads(item))
63
+ item = ""
64
+
65
+ from tqdm import tqdm
66
+
67
+ # 推論
68
+ results = []
69
+ for dt in tqdm(datasets):
70
+ input = dt["input"]
71
+ prompt = f"""### 指示\n{input}\n### 回答\n"""
72
+ inputs = tokenizer([prompt], return_tensors = "pt").to(model.device)
73
+ outputs = model.generate(**inputs, max_new_tokens = 512, use_cache = True, do_sample=False, repetition_penalty=1.2)
74
+ prediction = tokenizer.decode(outputs[0], skip_special_tokens=True).split('\n### 回答')[-1]
75
+ results.append({"task_id": dt["task_id"], "input": input, "output": prediction})
76
+
77
+ with open(f"/content/llm-jp-3-13b-it_MN_v2_output.jsonl", 'w', encoding='utf-8') as f:
78
+ for result in results:
79
+ json.dump(result, f, ensure_ascii=False)
80
+ f.write('\n')
81
+ ~~~