ldhldh commited on
Commit
d8dd85a
โ€ข
1 Parent(s): 434f773

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +3 -3
app.py CHANGED
@@ -37,7 +37,7 @@ model_name = "quantumaikr/llama-2-70b-fb16-korean"
37
  #daekeun-ml/Llama-2-ko-DPO-13B
38
  #daekeun-ml/Llama-2-ko-instruct-13B
39
  #quantumaikr/llama-2-70b-fb16-korean
40
- tokenizer = AutoTokenizer.from_pretrained(model_name, use_fast=False)
41
 
42
  model = None
43
  model = AutoDistributedModelForCausalLM.from_pretrained(model_name)
@@ -263,8 +263,8 @@ def chat(id, npc, text):
263
  [๋Œ€ํ™”๊ธฐ๋ก]{history[npc][id]}
264
  """
265
 
266
- #inputs = tokenizer("์•„", return_tensors="pt")["input_ids"]
267
- #outputs = model.generate(inputs, do_sample=True, temperature=0.6, top_p=0.75, max_new_tokens=2)
268
  #output = tokenizer.decode(outputs[0])[len(prom)+3:-1].split("<")[0].split("###")[0].replace(". ", ".\n")
269
  #output = cleanText(output)
270
  #print(tokenizer.decode(outputs[0]))
 
37
  #daekeun-ml/Llama-2-ko-DPO-13B
38
  #daekeun-ml/Llama-2-ko-instruct-13B
39
  #quantumaikr/llama-2-70b-fb16-korean
40
+ tokenizer = AutoTokenizer.from_pretrained(model_name)
41
 
42
  model = None
43
  model = AutoDistributedModelForCausalLM.from_pretrained(model_name)
 
263
  [๋Œ€ํ™”๊ธฐ๋ก]{history[npc][id]}
264
  """
265
 
266
+ inputs = tokenizer("์•„", return_tensors="pt")["input_ids"]
267
+ outputs = model.generate(inputs, do_sample=True, temperature=0.6, top_p=0.75, max_new_tokens=2)
268
  #output = tokenizer.decode(outputs[0])[len(prom)+3:-1].split("<")[0].split("###")[0].replace(". ", ".\n")
269
  #output = cleanText(output)
270
  #print(tokenizer.decode(outputs[0]))