tosin2013 commited on
Commit
ba9ed60
1 Parent(s): beeaade

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +30 -2
app.py CHANGED
@@ -2,6 +2,9 @@ import gradio as gr
2
  from transformers import pipeline
3
  import PyPDF2
4
  import markdown
 
 
 
5
 
6
  # Preload models
7
  models = {
@@ -19,6 +22,23 @@ def load_model(model_name):
19
  loaded_models[model_name] = pipeline("question-answering", model=models[model_name])
20
  return loaded_models[model_name]
21
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
22
  def answer_question(model_name, file, question, status):
23
  status = "Loading model..."
24
  model = load_model(model_name)
@@ -42,11 +62,17 @@ def answer_question(model_name, file, question, status):
42
  answer = result['answer']
43
  score = result['score']
44
 
 
 
 
45
  # Explain score
46
  score_explanation = f"The confidence score ranges from 0 to 1, where a higher score indicates higher confidence in the answer's correctness. In this case, the score is {score:.2f}. A score closer to 1 implies the model is very confident about the answer."
47
 
 
 
 
48
  status = "Model loaded"
49
- return answer, f"{score:.2f}", score_explanation, status
50
 
51
  # Define the Gradio interface
52
  with gr.Blocks() as interface:
@@ -73,6 +99,8 @@ with gr.Blocks() as interface:
73
  answer_output = gr.Textbox(label="Answer")
74
  score_output = gr.Textbox(label="Confidence Score")
75
  explanation_output = gr.Textbox(label="Score Explanation")
 
 
76
 
77
  with gr.Row():
78
  submit_button = gr.Button("Submit")
@@ -85,7 +113,7 @@ with gr.Blocks() as interface:
85
  submit_button.click(
86
  on_submit,
87
  inputs=[model_dropdown, file_input, question_input],
88
- outputs=[answer_output, score_output, explanation_output, status_output]
89
  )
90
 
91
  if __name__ == "__main__":
 
2
  from transformers import pipeline
3
  import PyPDF2
4
  import markdown
5
+ import matplotlib.pyplot as plt
6
+ import io
7
+ import base64
8
 
9
  # Preload models
10
  models = {
 
22
  loaded_models[model_name] = pipeline("question-answering", model=models[model_name])
23
  return loaded_models[model_name]
24
 
25
+ def generate_score_chart(score):
26
+ plt.figure(figsize=(6, 4))
27
+ plt.bar(["Confidence Score"], [score], color='skyblue')
28
+ plt.ylim(0, 1)
29
+ plt.ylabel("Score")
30
+ plt.title("Confidence Score")
31
+
32
+ buf = io.BytesIO()
33
+ plt.savefig(buf, format='png')
34
+ plt.close()
35
+ buf.seek(0)
36
+ return base64.b64encode(buf.getvalue()).decode()
37
+
38
+ def generate_report(answer, score, score_explanation, score_chart):
39
+ report = f"### Answer:\n\n{answer}\n\n### Confidence Score: {score}\n\n### Score Explanation:\n\n{score_explanation}\n\n![Score Chart](data:image/png;base64,{score_chart})"
40
+ return report
41
+
42
  def answer_question(model_name, file, question, status):
43
  status = "Loading model..."
44
  model = load_model(model_name)
 
62
  answer = result['answer']
63
  score = result['score']
64
 
65
+ # Generate the score chart
66
+ score_chart = generate_score_chart(score)
67
+
68
  # Explain score
69
  score_explanation = f"The confidence score ranges from 0 to 1, where a higher score indicates higher confidence in the answer's correctness. In this case, the score is {score:.2f}. A score closer to 1 implies the model is very confident about the answer."
70
 
71
+ # Generate the report
72
+ report = generate_report(answer, f"{score:.2f}", score_explanation, score_chart)
73
+
74
  status = "Model loaded"
75
+ return answer, f"{score:.2f}", score_explanation, score_chart, report, status
76
 
77
  # Define the Gradio interface
78
  with gr.Blocks() as interface:
 
99
  answer_output = gr.Textbox(label="Answer")
100
  score_output = gr.Textbox(label="Confidence Score")
101
  explanation_output = gr.Textbox(label="Score Explanation")
102
+ chart_output = gr.Image(label="Score Chart")
103
+ report_output = gr.Markdown(label="Report")
104
 
105
  with gr.Row():
106
  submit_button = gr.Button("Submit")
 
113
  submit_button.click(
114
  on_submit,
115
  inputs=[model_dropdown, file_input, question_input],
116
+ outputs=[answer_output, score_output, explanation_output, chart_output, report_output, status_output]
117
  )
118
 
119
  if __name__ == "__main__":