kat33 commited on
Commit
ab4a091
1 Parent(s): 9fe5d5a

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +7 -6
app.py CHANGED
@@ -27,15 +27,16 @@ def download_model():
27
  print("Downloaded " + file)
28
  return file
29
 
30
- def question_answer(context, question):
31
  mfile=download_model()
32
  # structure the prompt to make it easier for the ai
33
- question="\"\"\"\n" + question + "\"\"\"\n"
34
- text=context + "\n\nQuestion: " + question + "\nPlease use markdown formatting for answer. \nAnswer:\n"
35
  llm = Llama(model_path=mfile)
36
- output = llm(text, max_tokens=33, stop=["### Response"], echo=True)
37
  print(output)
38
- return question,output['choices'][0]['text']
 
39
  '''
40
  Output is of the form:
41
  {
@@ -64,5 +65,5 @@ def question_answer(context, question):
64
  #return generator(text)
65
 
66
 
67
- app=gr.Interface(fn=question_answer, inputs=["text", "text"], outputs=["textbox", "text"])
68
  app.launch()
 
27
  print("Downloaded " + file)
28
  return file
29
 
30
+ def question_answer(context, question, max_tokens):
31
  mfile=download_model()
32
  # structure the prompt to make it easier for the ai
33
+ question1="\"\"\"\n" + question + "\n\"\"\"\n"
34
+ text=context + "\n\nQuestion: " + question1 + "\nPlease use markdown formatting for answer. \nAnswer:\n"
35
  llm = Llama(model_path=mfile)
36
+ output = llm(text, max_tokens=max_tokens, stop=["### Response"], echo=True)
37
  print(output)
38
+
39
+ return question, gr.Markdown(output['choices'][0]['text'])
40
  '''
41
  Output is of the form:
42
  {
 
65
  #return generator(text)
66
 
67
 
68
+ app=gr.Interface(fn=question_answer, inputs=["text", "text",gr.Slider(33, 2333)], outputs=["textbox", "text"])
69
  app.launch()