Spaces:
Running
Running
File size: 6,550 Bytes
ac8a60b 8434471 ac8a60b 8434471 ac8a60b 8434471 69e20d0 ac8a60b 8434471 69e20d0 8434471 69e20d0 8434471 ac8a60b 8434471 69e20d0 8434471 69e20d0 8434471 69e20d0 ac8a60b 69e20d0 ac8a60b 8434471 c81f36b 8434471 b580d80 c81f36b b580d80 ac8a60b c81f36b ac8a60b 69e20d0 ac8a60b 8434471 c81f36b 8434471 ac8a60b 8434471 69e20d0 c81f36b 8434471 ac8a60b 8434471 ac8a60b |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 |
import sqlite3
import streamlit as st
from pydantic import BaseModel, Field
from llama_index.core.tools import FunctionTool
import time
db_path = "./database/mock_qna.sqlite"
qna_question_description = """
Only trigger this when user wants to be tested with a question.
Use this tool to extract the chapter number from the body of input text,
thereafter, chapter number will be used as a filtering criteria for
extracting the right questions set from database.
Thereafter, the chapter_n argument will be passed to the function for Q&A question retrieval.
If no chapter number specified or user requested for random question,
or user has no preference over which chapter of textbook to be tested,
set function argument `chapter_n` to be `Chapter_0`.
"""
qna_question_data_format = """
The format of the function argument `chapter_n` looks as follow:
It should be in the format with `Chapter_` as prefix.
Example 1: `Chapter_1` for first chapter
Example 2: For chapter 12 of the textbook, you should return `Chapter_12`
Example 3: `Chapter_5` for fifth chapter
"""
qna_answer_description = """
Use this tool to trigger the evaluation of user's provided input with the
correct answer of the Q&A question asked. When user provides answer to the
question asked, they can reply in natural language or giving the alphabet
letter of which selected choice they think it's the right answer.
If user's answer is not a single alphabet letter, but is contextually
closer to a particular answer choice, return the corresponding
alphabet A, B, C, D or Z for which the answer's meaning is closest to.
Thereafter, the `user_selected_answer` argument will be passed to the
function for Q&A question evaluation.
"""
qna_answer_data_format = """
The format of the function argument `user_selected_answer` looks as follow:
It should be in the format of single character such as `A`, `B`, `C`, `D` or `Z`.
Example 1: User's answer is `a`, it means choice `A`.
Example 2: User's answer is contextually closer to 3rd answer choice, it means `C`.
Example 3: User says last is the answer, it means `D`.
Example 4: If user doesn't know about the answer, it means `Z`.
"""
class Question_Model(BaseModel):
chapter_n: str = Field(...,
pattern=r'^Chapter_\d*$',
description=qna_question_data_format
)
class Answer_Model(BaseModel):
user_selected_answer: str = Field(...,
pattern=r'^[ABCDZ]$',
description=qna_answer_data_format
)
def get_qna_question(chapter_n: str) -> str:
con = sqlite3.connect(db_path)
cur = con.cursor()
filter_clause = "WHERE a.id IS NULL" if chapter_n == "Chapter_0" else f"WHERE a.id IS NULL AND chapter='{chapter_n}'"
sql_string = """SELECT q.id, question, option_1, option_2, option_3, option_4, q.correct_answer, q.reasoning
FROM qna_tbl q LEFT JOIN answer_tbl a
ON q.id = a.id
""" + filter_clause
# sql_string = sql_string + " ORDER BY RANDOM() LIMIT 1"
res = cur.execute(sql_string)
result = res.fetchone()
id = result[0]
question = result[1]
option_1 = result[2]
option_2 = result[3]
option_3 = result[4]
option_4 = result[5]
c_answer = result[6]
reasons = result[7]
qna_str = "As requested, here is the retrieved question: \n" + \
"============================================= \n" + \
question.replace("\\n", "\n") + "\n" + \
"A) " + option_1 + "\n" + \
"B) " + option_2 + "\n" + \
"C) " + option_3 + "\n" + \
"D) " + option_4
st.session_state.question_id = id
st.session_state.qna_answer = c_answer
st.session_state.reasons = reasons
con.close()
return qna_str
def evaluate_qna_answer(user_selected_answer: str) -> str:
try:
answer_mapping = {
"A": 1,
"B": 2,
"C": 3,
"D": 4,
"Z": 0
}
num_mapping = dict((v,k) for k,v in answer_mapping.items())
user_answer_numeric = answer_mapping.get(user_selected_answer, 0)
question_id = st.session_state.question_id
qna_answer = st.session_state.qna_answer
reasons = st.session_state.reasons
qna_answer_alphabet = num_mapping.get(int(qna_answer), "ERROR")
con = sqlite3.connect(db_path)
cur = con.cursor()
sql_string = f"""INSERT INTO answer_tbl
VALUES ({question_id}, {qna_answer}, {user_answer_numeric})
"""
res = cur.execute(sql_string)
con.commit()
con.close()
if qna_answer == user_answer_numeric:
st.toast("π― yummy yummy, hooray!", icon="π")
time.sleep(2)
st.toast("π»ππ― You got it right!", icon="π")
time.sleep(2)
st.toast("π₯ You are amazing! π―π―", icon="πͺ")
st.balloons()
else:
st.toast("πΌ Something doesn't seem right.. π₯π π₯", icon="π")
time.sleep(2)
st.toast("π₯Ά Are you sure..? π¬π¬", icon="π")
time.sleep(2)
st.toast("π€π€ Nevertheless, it was a good try!! ποΈββοΈποΈββοΈ", icon="π")
st.snow()
reasoning = "" if "textbook" in reasons else "Rationale is that: " + reasons
qna_answer_response = (
f"Your selected answer is `{user_selected_answer}`, "
f"but the actual answer is `{qna_answer_alphabet}`. " + reasoning
)
except Exception as e:
print(e)
return qna_answer_response
get_qna_question_tool = FunctionTool.from_defaults(
fn=get_qna_question,
name="Extract_Question",
description=qna_question_description,
fn_schema=Question_Model
)
evaluate_qna_answer_tool = FunctionTool.from_defaults(
fn=evaluate_qna_answer,
name="Evaluate_Answer",
description=qna_answer_description,
fn_schema=Answer_Model
) |