ManishThota commited on
Commit
a0c8a0e
1 Parent(s): 26329a9

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +10 -10
app.py CHANGED
@@ -38,14 +38,14 @@ def gradio_predict(image, question, max_tokens):
38
  return answer
39
 
40
 
41
- examples = [["data/week_01_page_024.png", 'Can you explain the slide?',100],
42
- ["data/week_03_page_091.png", 'Can you explain the slide?',100],
43
- ["data/week_01_page_062.png", 'Are the training images labeled?',100],
44
- ["data/week_05_page_027.png", 'What is meant by eigenvalue multiplicity?',100],
45
- ["data/week_05_page_030.png", 'What does K represent?',100],
46
- ["data/week_15_page_046.png", 'How are individual heterogeneous models trained?',100],
47
- ["data/week_15_page_021.png", 'How does Bagging affect error?',100],
48
- ["data/week_15_page_037.png", "What does the '+' and '-' represent?",100]]
49
 
50
  # Define the Gradio interface
51
  iface = gr.Interface(
@@ -54,8 +54,8 @@ iface = gr.Interface(
54
  gr.Textbox(label="Question", placeholder="e.g. Can you explain the slide?", scale=4),
55
  gr.Slider(2, 500, value=100, label="Token Count", info="Choose between 2 and 500")],
56
  outputs=gr.TextArea(label="Answer"),
57
- examples=examples,
58
- title="Sparrow - Tiny 3B | Visual Question Answering",
59
  description="An interactive chat model that can answer questions about images in an Academic context. \n We can input images, and the system will analyze them to provide information about their contents. I've utilized this capability by feeding slides from PowerPoint presentations used in classes and the lecture content passed as text. Consequently, the model now mimics the behavior and responses of my professors. So, if I present any PowerPoint slide, it explains it just like my professor would, further it can be personalized.",
60
  )
61
 
 
38
  return answer
39
 
40
 
41
+ # examples = [["data/week_01_page_024.png", 'Can you explain the slide?',100],
42
+ # ["data/week_03_page_091.png", 'Can you explain the slide?',100],
43
+ # ["data/week_01_page_062.png", 'Are the training images labeled?',100],
44
+ # ["data/week_05_page_027.png", 'What is meant by eigenvalue multiplicity?',100],
45
+ # ["data/week_05_page_030.png", 'What does K represent?',100],
46
+ # ["data/week_15_page_046.png", 'How are individual heterogeneous models trained?',100],
47
+ # ["data/week_15_page_021.png", 'How does Bagging affect error?',100],
48
+ # ["data/week_15_page_037.png", "What does the '+' and '-' represent?",100]]
49
 
50
  # Define the Gradio interface
51
  iface = gr.Interface(
 
54
  gr.Textbox(label="Question", placeholder="e.g. Can you explain the slide?", scale=4),
55
  gr.Slider(2, 500, value=100, label="Token Count", info="Choose between 2 and 500")],
56
  outputs=gr.TextArea(label="Answer"),
57
+ # examples=examples,
58
+ title="SparrowVQE - Tiny 3B | Visual Question Answering",
59
  description="An interactive chat model that can answer questions about images in an Academic context. \n We can input images, and the system will analyze them to provide information about their contents. I've utilized this capability by feeding slides from PowerPoint presentations used in classes and the lecture content passed as text. Consequently, the model now mimics the behavior and responses of my professors. So, if I present any PowerPoint slide, it explains it just like my professor would, further it can be personalized.",
60
  )
61