etri-xainlp
commited on
Commit
•
6ec2338
1
Parent(s):
391db2b
Update README.md
Browse files
README.md
CHANGED
@@ -25,6 +25,49 @@ The following hyperparameters were used during training:
|
|
25 |
- lr_scheduler_type: linear
|
26 |
- num_epochs: 5.0
|
27 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
28 |
### Framework versions
|
29 |
|
30 |
- Transformers 4.30.2
|
|
|
25 |
- lr_scheduler_type: linear
|
26 |
- num_epochs: 5.0
|
27 |
|
28 |
+
### Inference
|
29 |
+
'''python
|
30 |
+
import torch
|
31 |
+
from transformers import pipeline, AutoModelForCausalLM
|
32 |
+
|
33 |
+
MODEL = 'etri-xainlp/polyglot-ko-12.8b-instruct'
|
34 |
+
|
35 |
+
model = AutoModelForCausalLM.from_pretrained(
|
36 |
+
MODEL,
|
37 |
+
torch_dtype=torch.float16,
|
38 |
+
low_cpu_mem_usage=True,
|
39 |
+
).to(device=7, non_blocking=True)
|
40 |
+
model.eval()
|
41 |
+
|
42 |
+
pipe = pipeline(
|
43 |
+
'text-generation',
|
44 |
+
model=model,
|
45 |
+
tokenizer=MODEL,
|
46 |
+
device=7
|
47 |
+
)
|
48 |
+
pipe.model.config.pad_token_id = pipe.model.config.eos_token_id
|
49 |
+
|
50 |
+
def ask(x, context='', is_input_full=False):
|
51 |
+
ans = pipe(
|
52 |
+
f"### 질문: {x}\n\n### 맥락: {context}\n\n### 답변:" if context else f"### 질문: {x}\n\n### 답변:",
|
53 |
+
do_sample=True,
|
54 |
+
max_new_tokens=2048,
|
55 |
+
temperature=0.9,
|
56 |
+
top_p=0.9,
|
57 |
+
return_full_text=False,
|
58 |
+
eos_token_id=2,
|
59 |
+
)
|
60 |
+
return ans[0]['generated_text']
|
61 |
+
|
62 |
+
while True:
|
63 |
+
quit = input('prompt?: ')
|
64 |
+
if quit == 'q':
|
65 |
+
break
|
66 |
+
else:
|
67 |
+
generation = ask(quit)
|
68 |
+
print("etri_ai:", generation)
|
69 |
+
...
|
70 |
+
|
71 |
### Framework versions
|
72 |
|
73 |
- Transformers 4.30.2
|