nazneen commited on
Commit
9a316f2
β€’
1 Parent(s): f16da92

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +2 -2
app.py CHANGED
@@ -18,8 +18,8 @@ instruct_pipeline_falcon = pipeline(model="tiiuae/falcon-7b-instruct", tokenizer
18
  instruct_pipeline_llama = pipeline(model="HuggingFaceH4/llama-7b-ift-ds-save-test4", torch_dtype=torch.bfloat16, trust_remote_code=True, device_map="auto", use_auth_token=TOKEN)
19
 
20
  def generate(query, temperature, top_p, top_k, max_new_tokens):
21
- return [instruct_pipeline_falcon(query, temperature=temperature, top_p=top_p, top_k=top_k, max_new_tokens=max_new_tokens),
22
- instruct_pipeline_llama(query, temperature=temperature, top_p=top_p, top_k=top_k, max_new_tokens=max_new_tokens)]
23
 
24
 
25
 
 
18
  instruct_pipeline_llama = pipeline(model="HuggingFaceH4/llama-7b-ift-ds-save-test4", torch_dtype=torch.bfloat16, trust_remote_code=True, device_map="auto", use_auth_token=TOKEN)
19
 
20
  def generate(query, temperature, top_p, top_k, max_new_tokens):
21
+ return [instruct_pipeline_falcon(query, temperature=temperature, top_p=top_p, top_k=top_k, max_new_tokens=max_new_tokens)[0]["generated_text"],
22
+ instruct_pipeline_llama(query, temperature=temperature, top_p=top_p, top_k=top_k, max_new_tokens=max_new_tokens)[0]["generated_text"]]
23
 
24
 
25