ToletiSri commited on
Commit
d101868
1 Parent(s): ef9fa1a

Reduced examples and size to save computation

Browse files
Files changed (1) hide show
  1. app.py +2 -3
app.py CHANGED
@@ -30,15 +30,14 @@ tokenizer.pad_token = tokenizer.eos_token
30
 
31
  def inference(prompt, count):
32
  count = int(count)
33
- pipe = pipeline(task="text-generation", model=model, tokenizer=tokenizer, max_length=200)
34
  result = pipe(f"### Human: {prompt}",max_new_tokens=count)
35
  out_text = result[0]['generated_text']
36
  return out_text
37
 
38
  title = "TSAI S21 Assignment: Adaptive QLoRA training on open assist oasst1 dataset, using microsoft/phi2 model"
39
  description = "A simple Gradio interface that accepts a context and generates GPT like text "
40
- examples = [["What is a large language model?","200"],
41
- ["Explain about monopsony","200"]
42
  ]
43
 
44
 
 
30
 
31
  def inference(prompt, count):
32
  count = int(count)
33
+ pipe = pipeline(task="text-generation", model=model, tokenizer=tokenizer)
34
  result = pipe(f"### Human: {prompt}",max_new_tokens=count)
35
  out_text = result[0]['generated_text']
36
  return out_text
37
 
38
  title = "TSAI S21 Assignment: Adaptive QLoRA training on open assist oasst1 dataset, using microsoft/phi2 model"
39
  description = "A simple Gradio interface that accepts a context and generates GPT like text "
40
+ examples = [["What is a large language model?","20"]
 
41
  ]
42
 
43