Tonic commited on
Commit
8877ae0
1 Parent(s): 3d8814d

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +4 -4
app.py CHANGED
@@ -17,10 +17,10 @@ raven_pipeline = pipeline(
17
  device_map="auto",
18
  )
19
 
20
- @spaces.GPU
21
  def process_text(input_text: str) -> str:
22
  prompt = f"User Query: {input_text}<human_end>"
23
- result = raven_pipeline(prompt, temperature=0.001, max_new_tokens=1200, return_full_text=False, do_sample=False)[0]["generated_text"].replace("Call:", "").strip()
24
  # torch.cuda.empty_cache()
25
  return result
26
 
@@ -40,9 +40,9 @@ def main():
40
  with gr.Blocks() as demo:
41
  gr.Markdown(title)
42
  gr.Markdown(description)
43
- input_text = gr.Code( language='json', label="Input your functions then your task :")
44
  submit_button = gr.Button("Submit")
45
- output_text = gr.Code( language='json' , label="Nexus🐦‍⬛Raven")
46
  submit_button.click(process_text, inputs=input_text, outputs=output_text)
47
 
48
  demo.launch()
 
17
  device_map="auto",
18
  )
19
 
20
+ @spaces.GPU(enable_queue=True)
21
  def process_text(input_text: str) -> str:
22
  prompt = f"User Query: {input_text}<human_end>"
23
+ result = raven_pipeline(prompt, temperature=0.001, max_new_tokens=300, return_full_text=False, do_sample=False)[0]["generated_text"].replace("Call:", "").strip()
24
  # torch.cuda.empty_cache()
25
  return result
26
 
 
40
  with gr.Blocks() as demo:
41
  gr.Markdown(title)
42
  gr.Markdown(description)
43
+ input_text = gr.Code( language='python', label="Input your functions then your task :")
44
  submit_button = gr.Button("Submit")
45
+ output_text = gr.Code( language='python' , label="Nexus🐦‍⬛Raven")
46
  submit_button.click(process_text, inputs=input_text, outputs=output_text)
47
 
48
  demo.launch()