mkthoma commited on
Commit
d13cd66
1 Parent(s): 61aed97

app update

Browse files
Files changed (1) hide show
  1. app.py +4 -3
app.py CHANGED
@@ -6,9 +6,9 @@ model_path = "finetuned_phi2"
6
  model = AutoModelForCausalLM.from_pretrained(model_path, trust_remote_code=True)
7
  tokenizer = AutoTokenizer.from_pretrained(model_path, trust_remote_code=True)
8
 
9
- gen = pipeline('text-generation', model=model, tokenizer=tokenizer)
10
- result = gen(prompt)
11
- print(result[0]['generated_text'])
12
 
13
 
14
  def generate(prompt, history, temperature=0.3, max_new_tokens=512, top_p=0.95, repetition_penalty=1.0,):
@@ -25,6 +25,7 @@ def generate(prompt, history, temperature=0.3, max_new_tokens=512, top_p=0.95, r
25
  seed=42)
26
 
27
  #output = gen(prompt, **generate_kwargs)
 
28
  output = gen(prompt)
29
  return output[0]['generated_text']
30
 
 
6
  model = AutoModelForCausalLM.from_pretrained(model_path, trust_remote_code=True)
7
  tokenizer = AutoTokenizer.from_pretrained(model_path, trust_remote_code=True)
8
 
9
+ # gen = pipeline('text-generation', model=model, tokenizer=tokenizer)
10
+ # result = gen(prompt)
11
+ # print(result[0]['generated_text'])
12
 
13
 
14
  def generate(prompt, history, temperature=0.3, max_new_tokens=512, top_p=0.95, repetition_penalty=1.0,):
 
25
  seed=42)
26
 
27
  #output = gen(prompt, **generate_kwargs)
28
+ gen = pipeline('text-generation', model=model, tokenizer=tokenizer)
29
  output = gen(prompt)
30
  return output[0]['generated_text']
31