Spaces:
Runtime error
Runtime error
Add logging and error validation to nlu endpoint
Browse files- app.py +180 -105
- mathtext_fastapi/conversation_manager.py +6 -2
- mathtext_fastapi/curriculum_mapper.py +183 -0
- mathtext_fastapi/data/Rori_Framework_v1.xlsx +0 -0
- mathtext_fastapi/data/curriculum_framework_for_tests.xlsx +0 -0
- mathtext_fastapi/data/text2int_results.csv +6 -6
- mathtext_fastapi/global_state_manager.py +23 -0
- mathtext_fastapi/nlu.py +114 -20
- mathtext_fastapi/v2_conversation_manager.py +271 -0
- pyproject.toml +6 -3
- requirements.txt +9 -6
- scripts/bump_version.py +36 -0
- scripts/cleanpyc.sh +2 -0
- scripts/make_request.py +82 -33
- scripts/pin_requirements.py +62 -0
- scripts/quiz/__init__.py +0 -0
- scripts/quiz/data.csv +56 -0
- users.json +1 -0
app.py
CHANGED
@@ -1,33 +1,42 @@
|
|
1 |
"""FastAPI endpoint
|
2 |
To run locally use 'uvicorn app:app --host localhost --port 7860'
|
|
|
|
|
3 |
"""
|
4 |
import ast
|
5 |
-
import
|
6 |
-
|
7 |
-
|
8 |
-
import
|
|
|
9 |
import sentry_sdk
|
10 |
|
11 |
from fastapi import FastAPI, Request
|
12 |
from fastapi.responses import JSONResponse
|
13 |
from fastapi.staticfiles import StaticFiles
|
14 |
from fastapi.templating import Jinja2Templates
|
15 |
-
from mathtext.sentiment import sentiment
|
16 |
from mathtext.text2int import text2int
|
17 |
-
from pydantic import BaseModel
|
18 |
-
|
19 |
from mathtext_fastapi.logging import prepare_message_data_for_logging
|
20 |
from mathtext_fastapi.conversation_manager import manage_conversation_response
|
|
|
21 |
from mathtext_fastapi.nlu import evaluate_message_with_nlu
|
22 |
from mathtext_fastapi.nlu import run_intent_classification
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
23 |
|
24 |
sentry_sdk.init(
|
25 |
-
dsn=
|
26 |
|
27 |
# Set traces_sample_rate to 1.0 to capture 100%
|
28 |
# of transactions for performance monitoring.
|
29 |
# We recommend adjusting this value in production,
|
30 |
-
traces_sample_rate=0
|
31 |
)
|
32 |
|
33 |
app = FastAPI()
|
@@ -57,11 +66,11 @@ def hello(content: Text = None):
|
|
57 |
return JSONResponse(content=content)
|
58 |
|
59 |
|
60 |
-
@app.post("/sentiment-analysis")
|
61 |
-
def sentiment_analysis_ep(content: Text = None):
|
62 |
-
|
63 |
-
|
64 |
-
|
65 |
|
66 |
|
67 |
@app.post("/text2int")
|
@@ -71,7 +80,40 @@ def text2int_ep(content: Text = None):
|
|
71 |
return JSONResponse(content=content)
|
72 |
|
73 |
|
74 |
-
@app.post("/manager")
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
75 |
async def programmatic_message_manager(request: Request):
|
76 |
"""
|
77 |
Calls conversation management function to determine the next state
|
@@ -123,172 +165,205 @@ async def evaluate_user_message_with_nlu_api(request: Request):
|
|
123 |
{'type':'integer', 'data': '8', 'confidence': 0}
|
124 |
{'type':'sentiment', 'data': 'negative', 'confidence': 0.99}
|
125 |
"""
|
126 |
-
|
127 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
128 |
nlu_response = evaluate_message_with_nlu(message_data)
|
129 |
return JSONResponse(content=nlu_response)
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
130 |
|
131 |
|
132 |
-
@app.post("/
|
133 |
async def ask_math_question(request: Request):
|
134 |
-
"""Generate a question
|
135 |
|
136 |
Input
|
137 |
-
request.body: json - amount of correct and incorrect answers in the account
|
138 |
{
|
139 |
-
'
|
140 |
-
'
|
141 |
-
'level': 'easy'
|
142 |
}
|
143 |
|
144 |
Output
|
145 |
-
context: dict - the information for the current state
|
146 |
{
|
147 |
'text': 'What is 1+2?',
|
148 |
-
'
|
149 |
-
'
|
150 |
-
'number_correct': 0,
|
151 |
-
'number_incorrect': 0,
|
152 |
-
'hints_used': 0
|
153 |
}
|
154 |
"""
|
155 |
data_dict = await request.json()
|
156 |
message_data = ast.literal_eval(data_dict.get('message_data', '').get('message_body', ''))
|
157 |
-
|
158 |
-
|
159 |
-
level = message_data['level']
|
160 |
|
161 |
-
return JSONResponse(generators.start_interactive_math(
|
162 |
|
163 |
|
164 |
@app.post("/hint")
|
165 |
async def get_hint(request: Request):
|
166 |
-
"""Generate a hint
|
167 |
|
168 |
Input
|
169 |
-
request.body:
|
170 |
{
|
171 |
-
'
|
172 |
-
'
|
173 |
-
'
|
174 |
-
'number_incorrect': 0,
|
175 |
-
'level': 'easy',
|
176 |
-
'hints_used': 0
|
177 |
}
|
178 |
|
179 |
Output
|
180 |
-
context: dict - the information for the current state
|
181 |
{
|
182 |
-
'text': 'What is
|
183 |
-
'
|
184 |
-
'
|
185 |
-
'number_correct': 0,
|
186 |
-
'number_incorrect': 0,
|
187 |
-
'level': 'easy',
|
188 |
-
'hints_used': 0
|
189 |
}
|
190 |
"""
|
191 |
data_dict = await request.json()
|
192 |
message_data = ast.literal_eval(data_dict.get('message_data', '').get('message_body', ''))
|
193 |
-
|
194 |
-
|
195 |
-
|
196 |
-
number_incorrect = message_data['number_incorrect']
|
197 |
-
level = message_data['level']
|
198 |
-
hints_used = message_data['hints_used']
|
199 |
|
200 |
-
return JSONResponse(hints.generate_hint(
|
201 |
|
202 |
|
203 |
-
@app.post("/
|
204 |
-
async def
|
205 |
-
"""Generate a
|
206 |
|
207 |
Input
|
208 |
-
request.body: json - level
|
209 |
{
|
210 |
-
'
|
|
|
|
|
211 |
}
|
212 |
|
213 |
Output
|
214 |
-
context: dict - the information for the current state
|
215 |
{
|
216 |
-
|
217 |
-
|
|
|
|
|
218 |
}
|
219 |
"""
|
220 |
data_dict = await request.json()
|
221 |
message_data = ast.literal_eval(data_dict.get('message_data', '').get('message_body', ''))
|
222 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
223 |
|
224 |
-
return JSONResponse(questions.generate_question_data(
|
225 |
|
226 |
|
227 |
-
@app.post("/
|
228 |
-
async def
|
229 |
-
"""Generate
|
230 |
|
231 |
Input
|
232 |
-
request.body: json - level
|
233 |
{
|
234 |
-
'
|
|
|
235 |
}
|
236 |
|
237 |
-
Output
|
238 |
-
|
239 |
-
{
|
240 |
-
"current_number": 10,
|
241 |
-
"ordinal_number": 2,
|
242 |
-
"times": 1
|
243 |
-
}
|
244 |
"""
|
245 |
data_dict = await request.json()
|
246 |
message_data = ast.literal_eval(data_dict.get('message_data', '').get('message_body', ''))
|
247 |
-
|
248 |
-
|
|
|
|
|
249 |
|
250 |
|
251 |
-
@app.post("/
|
252 |
-
async def
|
253 |
-
"""Generate a
|
254 |
|
255 |
Input
|
256 |
-
request.body: json - level
|
257 |
{
|
258 |
-
|
259 |
-
|
260 |
-
"times": 1
|
261 |
}
|
262 |
|
263 |
-
Output
|
264 |
-
|
265 |
-
... 1 2 3
|
266 |
-
1 2 3 ...
|
267 |
"""
|
268 |
data_dict = await request.json()
|
269 |
message_data = ast.literal_eval(data_dict.get('message_data', '').get('message_body', ''))
|
270 |
-
|
271 |
-
|
272 |
-
|
273 |
-
|
|
|
|
|
|
|
274 |
|
|
|
275 |
|
276 |
-
|
277 |
-
|
278 |
-
|
|
|
279 |
|
280 |
Input
|
281 |
-
request.body: json - level
|
282 |
{
|
283 |
-
|
284 |
-
|
|
|
285 |
}
|
286 |
|
287 |
Output
|
288 |
-
|
289 |
"""
|
290 |
data_dict = await request.json()
|
291 |
message_data = ast.literal_eval(data_dict.get('message_data', '').get('message_body', ''))
|
292 |
-
|
293 |
-
|
294 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
"""FastAPI endpoint
|
2 |
To run locally use 'uvicorn app:app --host localhost --port 7860'
|
3 |
+
or
|
4 |
+
`python -m uvicorn app:app --reload --host localhost --port 7860`
|
5 |
"""
|
6 |
import ast
|
7 |
+
import json
|
8 |
+
from json import JSONDecodeError
|
9 |
+
from logging import getLogger
|
10 |
+
import mathactive.microlessons.num_one as num_one_quiz
|
11 |
+
import os
|
12 |
import sentry_sdk
|
13 |
|
14 |
from fastapi import FastAPI, Request
|
15 |
from fastapi.responses import JSONResponse
|
16 |
from fastapi.staticfiles import StaticFiles
|
17 |
from fastapi.templating import Jinja2Templates
|
18 |
+
# from mathtext.sentiment import sentiment
|
19 |
from mathtext.text2int import text2int
|
|
|
|
|
20 |
from mathtext_fastapi.logging import prepare_message_data_for_logging
|
21 |
from mathtext_fastapi.conversation_manager import manage_conversation_response
|
22 |
+
from mathtext_fastapi.v2_conversation_manager import manage_conversation_response
|
23 |
from mathtext_fastapi.nlu import evaluate_message_with_nlu
|
24 |
from mathtext_fastapi.nlu import run_intent_classification
|
25 |
+
from pydantic import BaseModel
|
26 |
+
|
27 |
+
|
28 |
+
from dotenv import load_dotenv
|
29 |
+
load_dotenv()
|
30 |
+
|
31 |
+
log = getLogger(__name__)
|
32 |
|
33 |
sentry_sdk.init(
|
34 |
+
dsn=os.environ.get('SENTRY_DSN'),
|
35 |
|
36 |
# Set traces_sample_rate to 1.0 to capture 100%
|
37 |
# of transactions for performance monitoring.
|
38 |
# We recommend adjusting this value in production,
|
39 |
+
traces_sample_rate=1.0,
|
40 |
)
|
41 |
|
42 |
app = FastAPI()
|
|
|
66 |
return JSONResponse(content=content)
|
67 |
|
68 |
|
69 |
+
# @app.post("/sentiment-analysis")
|
70 |
+
# def sentiment_analysis_ep(content: Text = None):
|
71 |
+
# ml_response = sentiment(content.content)
|
72 |
+
# content = {"message": ml_response}
|
73 |
+
# return JSONResponse(content=content)
|
74 |
|
75 |
|
76 |
@app.post("/text2int")
|
|
|
80 |
return JSONResponse(content=content)
|
81 |
|
82 |
|
83 |
+
@app.post("/v1/manager")
|
84 |
+
async def programmatic_message_manager(request: Request):
|
85 |
+
"""
|
86 |
+
Calls conversation management function to determine the next state
|
87 |
+
|
88 |
+
Input
|
89 |
+
request.body: dict - message data for the most recent user response
|
90 |
+
{
|
91 |
+
"author_id": "+47897891",
|
92 |
+
"contact_uuid": "j43hk26-2hjl-43jk-hnk2-k4ljl46j0ds09",
|
93 |
+
"author_type": "OWNER",
|
94 |
+
"message_body": "a test message",
|
95 |
+
"message_direction": "inbound",
|
96 |
+
"message_id": "ABJAK64jlk3-agjkl2QHFAFH",
|
97 |
+
"message_inserted_at": "2022-07-05T04:00:34.03352Z",
|
98 |
+
"message_updated_at": "2023-02-14T03:54:19.342950Z",
|
99 |
+
}
|
100 |
+
|
101 |
+
Output
|
102 |
+
context: dict - the information for the current state
|
103 |
+
{
|
104 |
+
"user": "47897891",
|
105 |
+
"state": "welcome-message-state",
|
106 |
+
"bot_message": "Welcome to Rori!",
|
107 |
+
"user_message": "",
|
108 |
+
"type": "ask"
|
109 |
+
}
|
110 |
+
"""
|
111 |
+
data_dict = await request.json()
|
112 |
+
context = manage_conversation_response(data_dict)
|
113 |
+
return JSONResponse(context)
|
114 |
+
|
115 |
+
|
116 |
+
@app.post("/v2/manager")
|
117 |
async def programmatic_message_manager(request: Request):
|
118 |
"""
|
119 |
Calls conversation management function to determine the next state
|
|
|
165 |
{'type':'integer', 'data': '8', 'confidence': 0}
|
166 |
{'type':'sentiment', 'data': 'negative', 'confidence': 0.99}
|
167 |
"""
|
168 |
+
log.info(f'Received request: {request}')
|
169 |
+
log.info(f'Request header: {request.headers}')
|
170 |
+
request_body = await request.body()
|
171 |
+
log.info(f'Request body: {request_body}')
|
172 |
+
request_body_str = request_body.decode()
|
173 |
+
log.info(f'Request_body_str: {request_body_str}')
|
174 |
+
|
175 |
+
try:
|
176 |
+
data_dict = await request.json()
|
177 |
+
except JSONDecodeError:
|
178 |
+
log.error(f'Request.json failed: {dir(request)}')
|
179 |
+
data_dict = {}
|
180 |
+
message_data = data_dict.get('message_data')
|
181 |
+
|
182 |
+
if not message_data:
|
183 |
+
log.error(f'Data_dict: {data_dict}')
|
184 |
+
message_data = data_dict.get('message', {})
|
185 |
nlu_response = evaluate_message_with_nlu(message_data)
|
186 |
return JSONResponse(content=nlu_response)
|
187 |
+
|
188 |
+
|
189 |
+
@app.post("/num_one")
|
190 |
+
async def num_one(request: Request):
|
191 |
+
"""
|
192 |
+
Input:
|
193 |
+
{
|
194 |
+
"user_id": 1,
|
195 |
+
"message_text": 5,
|
196 |
+
}
|
197 |
+
Output:
|
198 |
+
{
|
199 |
+
'messages':
|
200 |
+
["Let's", 'practice', 'counting', '', '', '46...', '47...', '48...', '49', '', '', 'After', '49,', 'what', 'is', 'the', 'next', 'number', 'you', 'will', 'count?\n46,', '47,', '48,', '49'],
|
201 |
+
'input_prompt': '50',
|
202 |
+
'state': 'question'
|
203 |
+
}
|
204 |
+
"""
|
205 |
+
data_dict = await request.json()
|
206 |
+
message_data = ast.literal_eval(data_dict.get('message_data', '').get('message_body', ''))
|
207 |
+
user_id = message_data['user_id']
|
208 |
+
message_text = message_data['message_text']
|
209 |
+
return num_one_quiz.process_user_message(user_id, message_text)
|
210 |
|
211 |
|
212 |
+
@app.post("/start")
|
213 |
async def ask_math_question(request: Request):
|
214 |
+
"""Generate a question data
|
215 |
|
216 |
Input
|
|
|
217 |
{
|
218 |
+
'difficulty': 0.1,
|
219 |
+
'do_increase': True | False
|
|
|
220 |
}
|
221 |
|
222 |
Output
|
|
|
223 |
{
|
224 |
'text': 'What is 1+2?',
|
225 |
+
'difficulty': 0.2,
|
226 |
+
'question_numbers': [3, 1, 4]
|
|
|
|
|
|
|
227 |
}
|
228 |
"""
|
229 |
data_dict = await request.json()
|
230 |
message_data = ast.literal_eval(data_dict.get('message_data', '').get('message_body', ''))
|
231 |
+
difficulty = message_data['difficulty']
|
232 |
+
do_increase = message_data['do_increase']
|
|
|
233 |
|
234 |
+
return JSONResponse(generators.start_interactive_math(difficulty, do_increase))
|
235 |
|
236 |
|
237 |
@app.post("/hint")
|
238 |
async def get_hint(request: Request):
|
239 |
+
"""Generate a hint data
|
240 |
|
241 |
Input
|
|
|
242 |
{
|
243 |
+
'start': 5,
|
244 |
+
'step': 1,
|
245 |
+
'difficulty': 0.1
|
|
|
|
|
|
|
246 |
}
|
247 |
|
248 |
Output
|
|
|
249 |
{
|
250 |
+
'text': 'What number is greater than 4 and less than 6?',
|
251 |
+
'difficulty': 0.1,
|
252 |
+
'question_numbers': [5, 1, 6]
|
|
|
|
|
|
|
|
|
253 |
}
|
254 |
"""
|
255 |
data_dict = await request.json()
|
256 |
message_data = ast.literal_eval(data_dict.get('message_data', '').get('message_body', ''))
|
257 |
+
start = message_data['start']
|
258 |
+
step = message_data['step']
|
259 |
+
difficulty = message_data['difficulty']
|
|
|
|
|
|
|
260 |
|
261 |
+
return JSONResponse(hints.generate_hint(start, step, difficulty))
|
262 |
|
263 |
|
264 |
+
@app.post("/question")
|
265 |
+
async def ask_math_question(request: Request):
|
266 |
+
"""Generate a question data
|
267 |
|
268 |
Input
|
|
|
269 |
{
|
270 |
+
'start': 5,
|
271 |
+
'step': 1,
|
272 |
+
'question_num': 1 # optional
|
273 |
}
|
274 |
|
275 |
Output
|
|
|
276 |
{
|
277 |
+
'question': 'What is 1+2?',
|
278 |
+
'start': 5,
|
279 |
+
'step': 1,
|
280 |
+
'answer': 6
|
281 |
}
|
282 |
"""
|
283 |
data_dict = await request.json()
|
284 |
message_data = ast.literal_eval(data_dict.get('message_data', '').get('message_body', ''))
|
285 |
+
start = message_data['start']
|
286 |
+
step = message_data['step']
|
287 |
+
arg_tuple = (start, step)
|
288 |
+
try:
|
289 |
+
question_num = message_data['question_num']
|
290 |
+
arg_tuple += (question_num,)
|
291 |
+
except KeyError:
|
292 |
+
pass
|
293 |
|
294 |
+
return JSONResponse(questions.generate_question_data(*arg_tuple))
|
295 |
|
296 |
|
297 |
+
@app.post("/difficulty")
|
298 |
+
async def get_hint(request: Request):
|
299 |
+
"""Generate a number matching difficulty
|
300 |
|
301 |
Input
|
|
|
302 |
{
|
303 |
+
'difficulty': 0.01,
|
304 |
+
'do_increase': True
|
305 |
}
|
306 |
|
307 |
+
Output - value from 0.01 to 0.99 inclusively:
|
308 |
+
0.09
|
|
|
|
|
|
|
|
|
|
|
309 |
"""
|
310 |
data_dict = await request.json()
|
311 |
message_data = ast.literal_eval(data_dict.get('message_data', '').get('message_body', ''))
|
312 |
+
difficulty = message_data['difficulty']
|
313 |
+
do_increase = message_data['do_increase']
|
314 |
+
|
315 |
+
return JSONResponse(utils.get_next_difficulty(difficulty, do_increase))
|
316 |
|
317 |
|
318 |
+
@app.post("/start_step")
|
319 |
+
async def get_hint(request: Request):
|
320 |
+
"""Generate a start and step values
|
321 |
|
322 |
Input
|
|
|
323 |
{
|
324 |
+
'difficulty': 0.01,
|
325 |
+
'path_to_csv_file': 'scripts/quiz/data.csv' # optional
|
|
|
326 |
}
|
327 |
|
328 |
+
Output - tuple (start, step):
|
329 |
+
(5, 1)
|
|
|
|
|
330 |
"""
|
331 |
data_dict = await request.json()
|
332 |
message_data = ast.literal_eval(data_dict.get('message_data', '').get('message_body', ''))
|
333 |
+
difficulty = message_data['difficulty']
|
334 |
+
arg_tuple = (difficulty,)
|
335 |
+
try:
|
336 |
+
path_to_csv_file = message_data['path_to_csv_file']
|
337 |
+
arg_tuple += (path_to_csv_file,)
|
338 |
+
except KeyError:
|
339 |
+
pass
|
340 |
|
341 |
+
return JSONResponse(utils.get_next_difficulty(*arg_tuple))
|
342 |
|
343 |
+
|
344 |
+
@app.post("/sequence")
|
345 |
+
async def generate_question(request: Request):
|
346 |
+
"""Generate a sequence from start, step and optional separator parameter
|
347 |
|
348 |
Input
|
|
|
349 |
{
|
350 |
+
'start': 5,
|
351 |
+
'step': 1,
|
352 |
+
'sep': ', ' # optional
|
353 |
}
|
354 |
|
355 |
Output
|
356 |
+
5, 6, 7
|
357 |
"""
|
358 |
data_dict = await request.json()
|
359 |
message_data = ast.literal_eval(data_dict.get('message_data', '').get('message_body', ''))
|
360 |
+
start = message_data['start']
|
361 |
+
step = message_data['step']
|
362 |
+
arg_tuple = (start, step)
|
363 |
+
try:
|
364 |
+
sep = message_data['sep']
|
365 |
+
arg_tuple += (sep,)
|
366 |
+
except KeyError:
|
367 |
+
pass
|
368 |
+
|
369 |
+
return JSONResponse(utils.convert_sequence_to_string(*arg_tuple))
|
mathtext_fastapi/conversation_manager.py
CHANGED
@@ -14,8 +14,8 @@ from mathtext_fastapi.math_subtraction_fsm import MathSubtractionFSM
|
|
14 |
from supabase import create_client
|
15 |
from transitions import Machine
|
16 |
|
17 |
-
from
|
18 |
-
from
|
19 |
|
20 |
load_dotenv()
|
21 |
|
@@ -39,6 +39,7 @@ def create_text_message(message_text, whatsapp_id):
|
|
39 |
"preview_url": False,
|
40 |
"recipient_type": "individual",
|
41 |
"to": whatsapp_id,
|
|
|
42 |
"type": "text",
|
43 |
"text": {
|
44 |
"body": message_text
|
@@ -136,6 +137,9 @@ def manage_math_quiz_fsm(user_message, contact_uuid, type):
|
|
136 |
|
137 |
# Make a completely new entry
|
138 |
if fsm_check.data == []:
|
|
|
|
|
|
|
139 |
if type == 'addition':
|
140 |
math_quiz_state_machine = MathQuizFSM()
|
141 |
else:
|
|
|
14 |
from supabase import create_client
|
15 |
from transitions import Machine
|
16 |
|
17 |
+
from mathactive.generators import start_interactive_math
|
18 |
+
from mathactive.hints import generate_hint
|
19 |
|
20 |
load_dotenv()
|
21 |
|
|
|
39 |
"preview_url": False,
|
40 |
"recipient_type": "individual",
|
41 |
"to": whatsapp_id,
|
42 |
+
# FIXME: Better to use "message_type" (but be careful with refactor)
|
43 |
"type": "text",
|
44 |
"text": {
|
45 |
"body": message_text
|
|
|
137 |
|
138 |
# Make a completely new entry
|
139 |
if fsm_check.data == []:
|
140 |
+
# FIXME: Try not to use the Python reserved keyword `type` as a variable name
|
141 |
+
# It's better to use `kind` or `convo_type` or `convo_name`
|
142 |
+
# And the variable `type` is not defined here so I don't understand how this is working at all.
|
143 |
if type == 'addition':
|
144 |
math_quiz_state_machine = MathQuizFSM()
|
145 |
else:
|
mathtext_fastapi/curriculum_mapper.py
ADDED
@@ -0,0 +1,183 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import numpy as np
|
2 |
+
import pandas as pd
|
3 |
+
import re
|
4 |
+
|
5 |
+
from pathlib import Path
|
6 |
+
|
7 |
+
|
8 |
+
def read_and_preprocess_spreadsheet(file_name):
|
9 |
+
""" Creates a pandas dataframe from the curriculum overview spreadsheet """
|
10 |
+
DATA_DIR = Path(__file__).parent.parent / "mathtext_fastapi" / "data" / file_name
|
11 |
+
script_df = pd.read_excel(DATA_DIR, engine='openpyxl')
|
12 |
+
# Ensures the grade level columns are integers instead of floats
|
13 |
+
script_df.columns = script_df.columns[:2].tolist() + script_df.columns[2:11].astype(int).astype(str).tolist() + script_df.columns[11:].tolist()
|
14 |
+
script_df.fillna('', inplace=True)
|
15 |
+
return script_df
|
16 |
+
|
17 |
+
|
18 |
+
def extract_skill_code(skill):
|
19 |
+
""" Looks within a curricular skill description for its descriptive code
|
20 |
+
|
21 |
+
Input
|
22 |
+
- skill: str - a brief description of a curricular skill
|
23 |
+
|
24 |
+
>>> extract_skill_code('A3.3.4 - Solve inequalities')
|
25 |
+
'A3.3.4'
|
26 |
+
>>> extract_skill_code('A3.3.2 - Graph linear equations, and identify the x- and y-intercepts or the slope of a line')
|
27 |
+
'A3.3.2'
|
28 |
+
"""
|
29 |
+
pattern = r'[A-Z][0-9]\.\d+\.\d+'
|
30 |
+
result = re.search(pattern, skill)
|
31 |
+
return result.group()
|
32 |
+
|
33 |
+
|
34 |
+
def build_horizontal_transitions(script_df):
|
35 |
+
""" Build a list of transitional relationships within a curricular skill
|
36 |
+
|
37 |
+
Inputs
|
38 |
+
- script_df: pandas dataframe - an overview of the curriculum skills by grade level
|
39 |
+
|
40 |
+
Output
|
41 |
+
- horizontal_transitions: array of arrays - transition data with label, from state, and to state
|
42 |
+
|
43 |
+
>>> script_df = read_and_preprocess_spreadsheet('curriculum_framework_for_tests.xlsx')
|
44 |
+
>>> build_horizontal_transitions(script_df)
|
45 |
+
[['right', 'N1.1.1_G1', 'N1.1.1_G2'], ['right', 'N1.1.1_G2', 'N1.1.1_G3'], ['right', 'N1.1.1_G3', 'N1.1.1_G4'], ['right', 'N1.1.1_G4', 'N1.1.1_G5'], ['right', 'N1.1.1_G5', 'N1.1.1_G6'], ['left', 'N1.1.1_G6', 'N1.1.1_G5'], ['left', 'N1.1.1_G5', 'N1.1.1_G4'], ['left', 'N1.1.1_G4', 'N1.1.1_G3'], ['left', 'N1.1.1_G3', 'N1.1.1_G2'], ['left', 'N1.1.1_G2', 'N1.1.1_G1'], ['right', 'N1.1.2_G1', 'N1.1.2_G2'], ['right', 'N1.1.2_G2', 'N1.1.2_G3'], ['right', 'N1.1.2_G3', 'N1.1.2_G4'], ['right', 'N1.1.2_G4', 'N1.1.2_G5'], ['right', 'N1.1.2_G5', 'N1.1.2_G6'], ['left', 'N1.1.2_G6', 'N1.1.2_G5'], ['left', 'N1.1.2_G5', 'N1.1.2_G4'], ['left', 'N1.1.2_G4', 'N1.1.2_G3'], ['left', 'N1.1.2_G3', 'N1.1.2_G2'], ['left', 'N1.1.2_G2', 'N1.1.2_G1']]
|
46 |
+
"""
|
47 |
+
horizontal_transitions = []
|
48 |
+
for index, row in script_df.iterrows():
|
49 |
+
skill_code = extract_skill_code(row['Knowledge or Skill'])
|
50 |
+
|
51 |
+
rightward_matches = []
|
52 |
+
for i in range(9):
|
53 |
+
# Grade column
|
54 |
+
current_grade = i+1
|
55 |
+
if row[current_grade].lower().strip() == 'x':
|
56 |
+
rightward_matches.append(i)
|
57 |
+
|
58 |
+
for match in rightward_matches:
|
59 |
+
if rightward_matches[-1] != match:
|
60 |
+
horizontal_transitions.append([
|
61 |
+
"right",
|
62 |
+
f"{skill_code}_G{match}",
|
63 |
+
f"{skill_code}_G{match+1}"
|
64 |
+
])
|
65 |
+
|
66 |
+
leftward_matches = []
|
67 |
+
for i in reversed(range(9)):
|
68 |
+
current_grade = i
|
69 |
+
if row[current_grade].lower().strip() == 'x':
|
70 |
+
leftward_matches.append(i)
|
71 |
+
|
72 |
+
for match in leftward_matches:
|
73 |
+
if leftward_matches[0] != match:
|
74 |
+
horizontal_transitions.append([
|
75 |
+
"left",
|
76 |
+
f"{skill_code}_G{match}",
|
77 |
+
f"{skill_code}_G{match-1}"
|
78 |
+
])
|
79 |
+
|
80 |
+
return horizontal_transitions
|
81 |
+
|
82 |
+
|
83 |
+
def gather_all_vertical_matches(script_df):
|
84 |
+
""" Build a list of transitional relationships within a grade level across skills
|
85 |
+
|
86 |
+
Inputs
|
87 |
+
- script_df: pandas dataframe - an overview of the curriculum skills by grade level
|
88 |
+
|
89 |
+
Output
|
90 |
+
- all_matches: array of arrays - represents skills at each grade level
|
91 |
+
|
92 |
+
>>> script_df = read_and_preprocess_spreadsheet('curriculum_framework_for_tests.xlsx')
|
93 |
+
>>> gather_all_vertical_matches(script_df)
|
94 |
+
[['N1.1.1', '1'], ['N1.1.2', '1'], ['N1.1.1', '2'], ['N1.1.2', '2'], ['N1.1.1', '3'], ['N1.1.2', '3'], ['N1.1.1', '4'], ['N1.1.2', '4'], ['N1.1.1', '5'], ['N1.1.2', '5'], ['N1.1.1', '6'], ['N1.1.2', '6']]
|
95 |
+
"""
|
96 |
+
all_matches = []
|
97 |
+
columns = ['1', '2', '3', '4', '5', '6', '7', '8', '9']
|
98 |
+
|
99 |
+
for column in columns:
|
100 |
+
for index, value in script_df[column].iteritems():
|
101 |
+
row_num = index + 1
|
102 |
+
if value == 'x':
|
103 |
+
# Extract skill code
|
104 |
+
skill_code = extract_skill_code(
|
105 |
+
script_df['Knowledge or Skill'][row_num-1]
|
106 |
+
)
|
107 |
+
|
108 |
+
all_matches.append([skill_code, column])
|
109 |
+
return all_matches
|
110 |
+
|
111 |
+
|
112 |
+
def build_vertical_transitions(script_df):
|
113 |
+
""" Build a list of transitional relationships within a grade level across skills
|
114 |
+
|
115 |
+
Inputs
|
116 |
+
- script_df: pandas dataframe - an overview of the curriculum skills by grade level
|
117 |
+
|
118 |
+
Output
|
119 |
+
- vertical_transitions: array of arrays - transition data with label, from state, and to state
|
120 |
+
|
121 |
+
>>> script_df = read_and_preprocess_spreadsheet('curriculum_framework_for_tests.xlsx')
|
122 |
+
>>> build_vertical_transitions(script_df)
|
123 |
+
[['down', 'N1.1.1_G1', 'N1.1.2_G1'], ['down', 'N1.1.2_G1', 'N1.1.1_G1'], ['down', 'N1.1.1_G2', 'N1.1.2_G2'], ['down', 'N1.1.2_G2', 'N1.1.1_G2'], ['down', 'N1.1.1_G3', 'N1.1.2_G3'], ['down', 'N1.1.2_G3', 'N1.1.1_G3'], ['down', 'N1.1.1_G4', 'N1.1.2_G4'], ['down', 'N1.1.2_G4', 'N1.1.1_G4'], ['down', 'N1.1.1_G5', 'N1.1.2_G5'], ['down', 'N1.1.2_G5', 'N1.1.1_G5'], ['down', 'N1.1.1_G6', 'N1.1.2_G6'], ['up', 'N1.1.2_G6', 'N1.1.1_G6'], ['up', 'N1.1.1_G6', 'N1.1.2_G6'], ['up', 'N1.1.2_G5', 'N1.1.1_G5'], ['up', 'N1.1.1_G5', 'N1.1.2_G5'], ['up', 'N1.1.2_G4', 'N1.1.1_G4'], ['up', 'N1.1.1_G4', 'N1.1.2_G4'], ['up', 'N1.1.2_G3', 'N1.1.1_G3'], ['up', 'N1.1.1_G3', 'N1.1.2_G3'], ['up', 'N1.1.2_G2', 'N1.1.1_G2'], ['up', 'N1.1.1_G2', 'N1.1.2_G2'], ['up', 'N1.1.2_G1', 'N1.1.1_G1']]
|
124 |
+
"""
|
125 |
+
vertical_transitions = []
|
126 |
+
|
127 |
+
all_matches = gather_all_vertical_matches(script_df)
|
128 |
+
|
129 |
+
# Downward
|
130 |
+
for index, match in enumerate(all_matches):
|
131 |
+
skill = match[0]
|
132 |
+
row_num = match[1]
|
133 |
+
if all_matches[-1] != match:
|
134 |
+
vertical_transitions.append([
|
135 |
+
"down",
|
136 |
+
f"{skill}_G{row_num}",
|
137 |
+
f"{all_matches[index+1][0]}_G{row_num}"
|
138 |
+
])
|
139 |
+
|
140 |
+
# Upward
|
141 |
+
for index, match in reversed(list(enumerate(all_matches))):
|
142 |
+
skill = match[0]
|
143 |
+
row_num = match[1]
|
144 |
+
if all_matches[0] != match:
|
145 |
+
vertical_transitions.append([
|
146 |
+
"up",
|
147 |
+
f"{skill}_G{row_num}",
|
148 |
+
f"{all_matches[index-1][0]}_G{row_num}"
|
149 |
+
])
|
150 |
+
|
151 |
+
return vertical_transitions
|
152 |
+
|
153 |
+
|
154 |
+
def build_all_states(all_transitions):
|
155 |
+
""" Creates an array with all state labels for the curriculum
|
156 |
+
|
157 |
+
Input
|
158 |
+
- all_transitions: list of lists - all possible up, down, left, or right transitions in curriculum
|
159 |
+
|
160 |
+
Output
|
161 |
+
- all_states: list - a collection of state labels (skill code and grade number)
|
162 |
+
|
163 |
+
>>> all_transitions = [['right', 'N1.1.1_G1', 'N1.1.1_G2'], ['right', 'N1.1.1_G2', 'N1.1.1_G3'], ['right', 'N1.1.1_G3', 'N1.1.1_G4'], ['right', 'N1.1.1_G4', 'N1.1.1_G5'], ['right', 'N1.1.1_G5', 'N1.1.1_G6'], ['left', 'N1.1.1_G6', 'N1.1.1_G5'], ['left', 'N1.1.1_G5', 'N1.1.1_G4'], ['left', 'N1.1.1_G4', 'N1.1.1_G3'], ['left', 'N1.1.1_G3', 'N1.1.1_G2'], ['left', 'N1.1.1_G2', 'N1.1.1_G1'], ['right', 'N1.1.2_G1', 'N1.1.2_G2'], ['right', 'N1.1.2_G2', 'N1.1.2_G3'], ['right', 'N1.1.2_G3', 'N1.1.2_G4'], ['right', 'N1.1.2_G4', 'N1.1.2_G5'], ['right', 'N1.1.2_G5', 'N1.1.2_G6'], ['left', 'N1.1.2_G6', 'N1.1.2_G5'], ['left', 'N1.1.2_G5', 'N1.1.2_G4'], ['left', 'N1.1.2_G4', 'N1.1.2_G3'], ['left', 'N1.1.2_G3', 'N1.1.2_G2'], ['left', 'N1.1.2_G2', 'N1.1.2_G1'], ['down', 'N1.1.1_G1', 'N1.1.2_G1'], ['down', 'N1.1.2_G1', 'N1.1.1_G1'], ['down', 'N1.1.1_G2', 'N1.1.2_G2'], ['down', 'N1.1.2_G2', 'N1.1.1_G2'], ['down', 'N1.1.1_G3', 'N1.1.2_G3'], ['down', 'N1.1.2_G3', 'N1.1.1_G3'], ['down', 'N1.1.1_G4', 'N1.1.2_G4'], ['down', 'N1.1.2_G4', 'N1.1.1_G4'], ['down', 'N1.1.1_G5', 'N1.1.2_G5'], ['down', 'N1.1.2_G5', 'N1.1.1_G5'], ['down', 'N1.1.1_G6', 'N1.1.2_G6'], ['up', 'N1.1.2_G6', 'N1.1.1_G6'], ['up', 'N1.1.1_G6', 'N1.1.2_G6'], ['up', 'N1.1.2_G5', 'N1.1.1_G5'], ['up', 'N1.1.1_G5', 'N1.1.2_G5'], ['up', 'N1.1.2_G4', 'N1.1.1_G4'], ['up', 'N1.1.1_G4', 'N1.1.2_G4'], ['up', 'N1.1.2_G3', 'N1.1.1_G3'], ['up', 'N1.1.1_G3', 'N1.1.2_G3'], ['up', 'N1.1.2_G2', 'N1.1.1_G2'], ['up', 'N1.1.1_G2', 'N1.1.2_G2'], ['up', 'N1.1.2_G1', 'N1.1.1_G1']]
|
164 |
+
>>> build_all_states(all_transitions)
|
165 |
+
['N1.1.1_G1', 'N1.1.1_G2', 'N1.1.1_G3', 'N1.1.1_G4', 'N1.1.1_G5', 'N1.1.1_G6', 'N1.1.2_G1', 'N1.1.2_G2', 'N1.1.2_G3', 'N1.1.2_G4', 'N1.1.2_G5', 'N1.1.2_G6']
|
166 |
+
"""
|
167 |
+
all_states = []
|
168 |
+
for transition in all_transitions:
|
169 |
+
for index, state in enumerate(transition):
|
170 |
+
if index == 0:
|
171 |
+
continue
|
172 |
+
if state not in all_states:
|
173 |
+
all_states.append(state)
|
174 |
+
return all_states
|
175 |
+
|
176 |
+
|
177 |
+
def build_curriculum_logic():
|
178 |
+
script_df = read_and_preprocess_spreadsheet('Rori_Framework_v1.xlsx')
|
179 |
+
horizontal_transitions = build_horizontal_transitions(script_df)
|
180 |
+
vertical_transitions = build_vertical_transitions(script_df)
|
181 |
+
all_transitions = horizontal_transitions + vertical_transitions
|
182 |
+
all_states = build_all_states(all_transitions)
|
183 |
+
return all_states, all_transitions
|
mathtext_fastapi/data/Rori_Framework_v1.xlsx
ADDED
Binary file (420 kB). View file
|
|
mathtext_fastapi/data/curriculum_framework_for_tests.xlsx
ADDED
Binary file (510 kB). View file
|
|
mathtext_fastapi/data/text2int_results.csv
CHANGED
@@ -20,10 +20,10 @@ eight oh,80.0,8.0,False
|
|
20 |
eighty,80.0,80.0,True
|
21 |
ate,8.0,1.0,False
|
22 |
double eight,88.0,8.0,False
|
23 |
-
eight three seven five three O nine,8375309.0,
|
24 |
eight three seven five three oh nine,8375309.0,8375309.0,True
|
25 |
eight three seven five three zero nine,8375309.0,8375309.0,True
|
26 |
-
eight three seven five three oh ni-ee-ine,8375309.0,
|
27 |
two eight,28.0,16.0,False
|
28 |
seven oh eleven,7011.0,77.0,False
|
29 |
seven elevens,77.0,77.0,True
|
@@ -31,10 +31,10 @@ seven eleven,711.0,77.0,False
|
|
31 |
ninety nine oh five,9905.0,149.0,False
|
32 |
seven 0 seven 0 seven 0 seven,7070707.0,7070707.0,True
|
33 |
123 hundred,123000.0,223.0,False
|
34 |
-
5 o 5,505.0,
|
35 |
-
15 o 5,1505.0,
|
36 |
-
15-o 5,1505.0,
|
37 |
-
15 o-5,1505.0,
|
38 |
911-thousand,911000.0,911000.0,True
|
39 |
twenty-two twenty-two,2222.0,44.0,False
|
40 |
twenty-two twenty-twos,484.0,44.0,False
|
|
|
20 |
eighty,80.0,80.0,True
|
21 |
ate,8.0,1.0,False
|
22 |
double eight,88.0,8.0,False
|
23 |
+
eight three seven five three O nine,8375309.0,8375319.0,False
|
24 |
eight three seven five three oh nine,8375309.0,8375309.0,True
|
25 |
eight three seven five three zero nine,8375309.0,8375309.0,True
|
26 |
+
eight three seven five three oh ni-ee-ine,8375309.0,837530111.0,False
|
27 |
two eight,28.0,16.0,False
|
28 |
seven oh eleven,7011.0,77.0,False
|
29 |
seven elevens,77.0,77.0,True
|
|
|
31 |
ninety nine oh five,9905.0,149.0,False
|
32 |
seven 0 seven 0 seven 0 seven,7070707.0,7070707.0,True
|
33 |
123 hundred,123000.0,223.0,False
|
34 |
+
5 o 5,505.0,515.0,False
|
35 |
+
15 o 5,1505.0,21.0,False
|
36 |
+
15-o 5,1505.0,21.0,False
|
37 |
+
15 o-5,1505.0,21.0,False
|
38 |
911-thousand,911000.0,911000.0,True
|
39 |
twenty-two twenty-two,2222.0,44.0,False
|
40 |
twenty-two twenty-twos,484.0,44.0,False
|
mathtext_fastapi/global_state_manager.py
ADDED
@@ -0,0 +1,23 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
from transitions import Machine
|
2 |
+
from mathtext_fastapi.curriculum_mapper import build_curriculum_logic
|
3 |
+
|
4 |
+
all_states, all_transitions = build_curriculum_logic()
|
5 |
+
|
6 |
+
class GlobalStateManager(object):
|
7 |
+
states = all_states
|
8 |
+
|
9 |
+
transitions = all_transitions
|
10 |
+
|
11 |
+
def __init__(
|
12 |
+
self,
|
13 |
+
initial_state='N1.1.1_G1',
|
14 |
+
):
|
15 |
+
self.machine = Machine(
|
16 |
+
model=self,
|
17 |
+
states=GlobalStateManager.states,
|
18 |
+
transitions=GlobalStateManager.transitions,
|
19 |
+
initial=initial_state
|
20 |
+
)
|
21 |
+
|
22 |
+
|
23 |
+
curriculum = GlobalStateManager()
|
mathtext_fastapi/nlu.py
CHANGED
@@ -1,15 +1,32 @@
|
|
|
|
|
|
|
|
|
|
|
|
1 |
from fuzzywuzzy import fuzz
|
|
|
2 |
from mathtext_fastapi.logging import prepare_message_data_for_logging
|
3 |
from mathtext.sentiment import sentiment
|
4 |
-
from mathtext.text2int import text2int
|
5 |
-
|
6 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
7 |
|
8 |
|
9 |
-
def build_nlu_response_object(
|
10 |
""" Turns nlu results into an object to send back to Turn.io
|
11 |
Inputs
|
12 |
-
-
|
13 |
- data: str/int - the student message
|
14 |
- confidence: - the nlu confidence score (sentiment) or '' (integer)
|
15 |
|
@@ -19,7 +36,11 @@ def build_nlu_response_object(type, data, confidence):
|
|
19 |
>>> build_nlu_response_object('sentiment', 'POSITIVE', 0.99)
|
20 |
{'type': 'sentiment', 'data': 'POSITIVE', 'confidence': 0.99}
|
21 |
"""
|
22 |
-
return {
|
|
|
|
|
|
|
|
|
23 |
|
24 |
|
25 |
# def test_for_float_or_int(message_data, message_text):
|
@@ -107,6 +128,16 @@ def run_intent_classification(message_text):
|
|
107 |
'hint',
|
108 |
'next',
|
109 |
'stop',
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
110 |
]
|
111 |
|
112 |
for command in commands:
|
@@ -121,6 +152,70 @@ def run_intent_classification(message_text):
|
|
121 |
return nlu_response
|
122 |
|
123 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
124 |
def evaluate_message_with_nlu(message_data):
|
125 |
""" Process a student's message using NLU functions and send the result
|
126 |
|
@@ -131,20 +226,19 @@ def evaluate_message_with_nlu(message_data):
|
|
131 |
{'type': 'sentiment', 'data': 'NEGATIVE', 'confidence': 0.9997807145118713}
|
132 |
"""
|
133 |
# Keeps system working with two different inputs - full and filtered @event object
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
134 |
try:
|
135 |
-
message_text = str(message_data
|
136 |
-
except
|
137 |
-
|
138 |
-
|
139 |
-
|
140 |
-
'contact_uuid': message_data['message']['_vnd']['v1']['chat']['contact_uuid'],
|
141 |
-
'message_body': message_data['message']['text']['body'],
|
142 |
-
'message_direction': message_data['message']['_vnd']['v1']['direction'],
|
143 |
-
'message_id': message_data['message']['id'],
|
144 |
-
'message_inserted_at': message_data['message']['_vnd']['v1']['chat']['inserted_at'],
|
145 |
-
'message_updated_at': message_data['message']['_vnd']['v1']['chat']['updated_at'],
|
146 |
-
}
|
147 |
-
message_text = str(message_data['message_body'])
|
148 |
|
149 |
# Run intent classification only for keywords
|
150 |
intent_api_response = run_intent_classification(message_text)
|
@@ -154,7 +248,7 @@ def evaluate_message_with_nlu(message_data):
|
|
154 |
|
155 |
number_api_resp = text2int(message_text.lower())
|
156 |
|
157 |
-
if number_api_resp ==
|
158 |
# Run intent classification with logistic regression model
|
159 |
predicted_label = predict_message_intent(message_text)
|
160 |
if predicted_label['confidence'] > 0.01:
|
|
|
1 |
+
from collections.abc import Mapping
|
2 |
+
from logging import getLogger
|
3 |
+
import datetime as dt
|
4 |
+
from dateutil.parser import isoparse
|
5 |
+
|
6 |
from fuzzywuzzy import fuzz
|
7 |
+
from mathtext_fastapi.intent_classification import predict_message_intent
|
8 |
from mathtext_fastapi.logging import prepare_message_data_for_logging
|
9 |
from mathtext.sentiment import sentiment
|
10 |
+
from mathtext.text2int import text2int, TOKENS2INT_ERROR_INT
|
11 |
+
|
12 |
+
log = getLogger(__name__)
|
13 |
+
|
14 |
+
PAYLOAD_VALUE_TYPES = {
|
15 |
+
'author_id': str,
|
16 |
+
'author_type': str,
|
17 |
+
'contact_uuid': str,
|
18 |
+
'message_body': str,
|
19 |
+
'message_direction': str,
|
20 |
+
'message_id': str,
|
21 |
+
'message_inserted_at': str,
|
22 |
+
'message_updated_at': str,
|
23 |
+
}
|
24 |
|
25 |
|
26 |
+
def build_nlu_response_object(nlu_type, data, confidence):
|
27 |
""" Turns nlu results into an object to send back to Turn.io
|
28 |
Inputs
|
29 |
+
- nlu_type: str - the type of nlu run (integer or sentiment-analysis)
|
30 |
- data: str/int - the student message
|
31 |
- confidence: - the nlu confidence score (sentiment) or '' (integer)
|
32 |
|
|
|
36 |
>>> build_nlu_response_object('sentiment', 'POSITIVE', 0.99)
|
37 |
{'type': 'sentiment', 'data': 'POSITIVE', 'confidence': 0.99}
|
38 |
"""
|
39 |
+
return {
|
40 |
+
'type': nlu_type,
|
41 |
+
'data': data,
|
42 |
+
'confidence': confidence
|
43 |
+
}
|
44 |
|
45 |
|
46 |
# def test_for_float_or_int(message_data, message_text):
|
|
|
128 |
'hint',
|
129 |
'next',
|
130 |
'stop',
|
131 |
+
'tired',
|
132 |
+
'tomorrow',
|
133 |
+
'finished',
|
134 |
+
'help',
|
135 |
+
'please',
|
136 |
+
'understand',
|
137 |
+
'question',
|
138 |
+
'easier',
|
139 |
+
'easy',
|
140 |
+
'support'
|
141 |
]
|
142 |
|
143 |
for command in commands:
|
|
|
152 |
return nlu_response
|
153 |
|
154 |
|
155 |
+
def payload_is_valid(payload_object):
|
156 |
+
"""
|
157 |
+
>>> payload_is_valid({'author_id': '+5555555', 'author_type': 'OWNER', 'contact_uuid': '3246-43ad-faf7qw-zsdhg-dgGdg', 'message_body': 'thirty one', 'message_direction': 'inbound', 'message_id': 'SDFGGwafada-DFASHA4aDGA', 'message_inserted_at': '2022-07-05T04:00:34.03352Z', 'message_updated_at': '2023-04-06T10:08:23.745072Z'})
|
158 |
+
True
|
159 |
+
|
160 |
+
>>> payload_is_valid({"author_id": "@event.message._vnd.v1.chat.owner", "author_type": "@event.message._vnd.v1.author.type", "contact_uuid": "@event.message._vnd.v1.chat.contact_uuid", "message_body": "@event.message.text.body", "message_direction": "@event.message._vnd.v1.direction", "message_id": "@event.message.id", "message_inserted_at": "@event.message._vnd.v1.chat.inserted_at", "message_updated_at": "@event.message._vnd.v1.chat.updated_at"})
|
161 |
+
False
|
162 |
+
"""
|
163 |
+
try:
|
164 |
+
isinstance(
|
165 |
+
isoparse(payload_object.get('message_inserted_at','')),
|
166 |
+
dt.datetime
|
167 |
+
)
|
168 |
+
isinstance(
|
169 |
+
isoparse(payload_object.get('message_updated_at','')),
|
170 |
+
dt.datetime
|
171 |
+
)
|
172 |
+
except ValueError:
|
173 |
+
return False
|
174 |
+
return (
|
175 |
+
isinstance(payload_object, Mapping) and
|
176 |
+
isinstance(payload_object.get('author_id'), str) and
|
177 |
+
isinstance(payload_object.get('author_type'), str) and
|
178 |
+
isinstance(payload_object.get('contact_uuid'), str) and
|
179 |
+
isinstance(payload_object.get('message_body'), str) and
|
180 |
+
isinstance(payload_object.get('message_direction'), str) and
|
181 |
+
isinstance(payload_object.get('message_id'), str) and
|
182 |
+
isinstance(payload_object.get('message_inserted_at'), str) and
|
183 |
+
isinstance(payload_object.get('message_updated_at'), str)
|
184 |
+
)
|
185 |
+
|
186 |
+
|
187 |
+
def log_payload_errors(payload_object):
|
188 |
+
errors = []
|
189 |
+
try:
|
190 |
+
assert isinstance(payload_object, Mapping)
|
191 |
+
except Exception as e:
|
192 |
+
log.error(f'Invalid HTTP request payload object: {e}')
|
193 |
+
errors.append(e)
|
194 |
+
for k, typ in PAYLOAD_VALUE_TYPES.items():
|
195 |
+
try:
|
196 |
+
assert isinstance(payload_object.get(k), typ)
|
197 |
+
except Exception as e:
|
198 |
+
log.error(f'Invalid HTTP request payload object: {e}')
|
199 |
+
errors.append(e)
|
200 |
+
try:
|
201 |
+
assert isinstance(
|
202 |
+
dt.datetime.fromisoformat(payload_object.get('message_inserted_at')),
|
203 |
+
dt.datetime
|
204 |
+
)
|
205 |
+
except Exception as e:
|
206 |
+
log.error(f'Invalid HTTP request payload object: {e}')
|
207 |
+
errors.append(e)
|
208 |
+
try:
|
209 |
+
isinstance(
|
210 |
+
dt.datetime.fromisoformat(payload_object.get('message_updated_at')),
|
211 |
+
dt.datetime
|
212 |
+
)
|
213 |
+
except Exception as e:
|
214 |
+
log.error(f'Invalid HTTP request payload object: {e}')
|
215 |
+
errors.append(e)
|
216 |
+
return errors
|
217 |
+
|
218 |
+
|
219 |
def evaluate_message_with_nlu(message_data):
|
220 |
""" Process a student's message using NLU functions and send the result
|
221 |
|
|
|
226 |
{'type': 'sentiment', 'data': 'NEGATIVE', 'confidence': 0.9997807145118713}
|
227 |
"""
|
228 |
# Keeps system working with two different inputs - full and filtered @event object
|
229 |
+
# Call validate payload
|
230 |
+
log.info(f'Starting evaluate message: {message_data}')
|
231 |
+
|
232 |
+
if not payload_is_valid(message_data):
|
233 |
+
log_payload_errors(message_data)
|
234 |
+
return {'type': 'error', 'data': TOKENS2INT_ERROR_INT, 'confidence': 0}
|
235 |
+
|
236 |
try:
|
237 |
+
message_text = str(message_data.get('message_body', ''))
|
238 |
+
except:
|
239 |
+
log.error(f'Invalid request payload: {message_data}')
|
240 |
+
# use python logging system to do this//
|
241 |
+
return {'type': 'error', 'data': TOKENS2INT_ERROR_INT, 'confidence': 0}
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
242 |
|
243 |
# Run intent classification only for keywords
|
244 |
intent_api_response = run_intent_classification(message_text)
|
|
|
248 |
|
249 |
number_api_resp = text2int(message_text.lower())
|
250 |
|
251 |
+
if number_api_resp == TOKENS2INT_ERROR_INT:
|
252 |
# Run intent classification with logistic regression model
|
253 |
predicted_label = predict_message_intent(message_text)
|
254 |
if predicted_label['confidence'] > 0.01:
|
mathtext_fastapi/v2_conversation_manager.py
ADDED
@@ -0,0 +1,271 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import base64
|
2 |
+
import copy
|
3 |
+
import dill
|
4 |
+
import os
|
5 |
+
import json
|
6 |
+
import jsonpickle
|
7 |
+
import pickle
|
8 |
+
import random
|
9 |
+
import requests
|
10 |
+
import mathtext_fastapi.global_state_manager as gsm
|
11 |
+
|
12 |
+
from dotenv import load_dotenv
|
13 |
+
from mathtext_fastapi.nlu import evaluate_message_with_nlu
|
14 |
+
from mathtext_fastapi.math_quiz_fsm import MathQuizFSM
|
15 |
+
from mathtext_fastapi.math_subtraction_fsm import MathSubtractionFSM
|
16 |
+
from supabase import create_client
|
17 |
+
from transitions import Machine
|
18 |
+
|
19 |
+
from mathactive.generators import start_interactive_math
|
20 |
+
from mathactive.hints import generate_hint
|
21 |
+
from mathactive.microlessons import num_one
|
22 |
+
|
23 |
+
load_dotenv()
|
24 |
+
|
25 |
+
SUPA = create_client(
|
26 |
+
os.environ.get('SUPABASE_URL'),
|
27 |
+
os.environ.get('SUPABASE_KEY')
|
28 |
+
)
|
29 |
+
|
30 |
+
|
31 |
+
def pickle_and_encode_state_machine(state_machine):
|
32 |
+
dump = pickle.dumps(state_machine)
|
33 |
+
dump_encoded = base64.b64encode(dump).decode('utf-8')
|
34 |
+
return dump_encoded
|
35 |
+
|
36 |
+
|
37 |
+
def manage_math_quiz_fsm(user_message, contact_uuid, type):
|
38 |
+
fsm_check = SUPA.table('state_machines').select("*").eq(
|
39 |
+
"contact_uuid",
|
40 |
+
contact_uuid
|
41 |
+
).execute()
|
42 |
+
|
43 |
+
# This doesn't allow for when one FSM is present and the other is empty
|
44 |
+
"""
|
45 |
+
1
|
46 |
+
data=[] count=None
|
47 |
+
|
48 |
+
2
|
49 |
+
data=[{'id': 29, 'contact_uuid': 'j43hk26-2hjl-43jk-hnk2-k4ljl46j0ds09', 'addition3': None, 'subtraction': None, 'addition':
|
50 |
+
|
51 |
+
- but problem is there is no subtraction , but it's assuming there's a subtration
|
52 |
+
|
53 |
+
Cases
|
54 |
+
- make a completely new record
|
55 |
+
- update an existing record with an existing FSM
|
56 |
+
- update an existing record without an existing FSM
|
57 |
+
"""
|
58 |
+
print("MATH QUIZ FSM ACTIVITY")
|
59 |
+
print("user_message")
|
60 |
+
print(user_message)
|
61 |
+
# Make a completely new entry
|
62 |
+
if fsm_check.data == []:
|
63 |
+
if type == 'addition':
|
64 |
+
math_quiz_state_machine = MathQuizFSM()
|
65 |
+
else:
|
66 |
+
math_quiz_state_machine = MathSubtractionFSM()
|
67 |
+
messages = [math_quiz_state_machine.response_text]
|
68 |
+
dump_encoded = pickle_and_encode_state_machine(math_quiz_state_machine)
|
69 |
+
|
70 |
+
SUPA.table('state_machines').insert({
|
71 |
+
'contact_uuid': contact_uuid,
|
72 |
+
f'{type}': dump_encoded
|
73 |
+
}).execute()
|
74 |
+
# Update an existing record with a new state machine
|
75 |
+
elif not fsm_check.data[0][type]:
|
76 |
+
if type == 'addition':
|
77 |
+
math_quiz_state_machine = MathQuizFSM()
|
78 |
+
else:
|
79 |
+
math_quiz_state_machine = MathSubtractionFSM()
|
80 |
+
messages = [math_quiz_state_machine.response_text]
|
81 |
+
dump_encoded = pickle_and_encode_state_machine(math_quiz_state_machine)
|
82 |
+
|
83 |
+
SUPA.table('state_machines').update({
|
84 |
+
f'{type}': dump_encoded
|
85 |
+
}).eq(
|
86 |
+
"contact_uuid", contact_uuid
|
87 |
+
).execute()
|
88 |
+
# Update an existing record with an existing state machine
|
89 |
+
elif fsm_check.data[0][type]:
|
90 |
+
undump_encoded = base64.b64decode(
|
91 |
+
fsm_check.data[0][type].encode('utf-8')
|
92 |
+
)
|
93 |
+
math_quiz_state_machine = pickle.loads(undump_encoded)
|
94 |
+
|
95 |
+
math_quiz_state_machine.student_answer = user_message
|
96 |
+
math_quiz_state_machine.correct_answer = str(math_quiz_state_machine.correct_answer)
|
97 |
+
messages = math_quiz_state_machine.validate_answer()
|
98 |
+
dump_encoded = pickle_and_encode_state_machine(math_quiz_state_machine)
|
99 |
+
SUPA.table('state_machines').update({
|
100 |
+
f'{type}': dump_encoded
|
101 |
+
}).eq(
|
102 |
+
"contact_uuid", contact_uuid
|
103 |
+
).execute()
|
104 |
+
return messages
|
105 |
+
|
106 |
+
|
107 |
+
def retrieve_microlesson_content(context_data, user_message, microlesson, contact_uuid):
|
108 |
+
# TODO: This is being filtered by both the local and global states, so not changing
|
109 |
+
if microlesson == 'addition':
|
110 |
+
messages = manage_math_quiz_fsm(user_message, contact_uuid, 'addition')
|
111 |
+
|
112 |
+
if user_message == 'exit':
|
113 |
+
state_label = 'exit'
|
114 |
+
else:
|
115 |
+
state_label = 'addition-question-sequence'
|
116 |
+
|
117 |
+
input_prompt = messages.pop()
|
118 |
+
message_package = {
|
119 |
+
'messages': messages,
|
120 |
+
'input_prompt': input_prompt,
|
121 |
+
'state': state_label
|
122 |
+
}
|
123 |
+
elif context_data['local_state'] == 'addition2' or microlesson == 'addition2':
|
124 |
+
if user_message == 'harder' or user_message == 'easier':
|
125 |
+
user_message = ''
|
126 |
+
message_package = num_one.process_user_message(contact_uuid, user_message)
|
127 |
+
message_package['state'] = 'addition2'
|
128 |
+
message_package['input_prompt'] = '?'
|
129 |
+
|
130 |
+
elif context_data['local_state'] == 'subtraction-question-sequence' or \
|
131 |
+
user_message == 'subtract' or \
|
132 |
+
microlesson == 'subtraction':
|
133 |
+
messages = manage_math_quiz_fsm(user_message, contact_uuid, 'subtraction')
|
134 |
+
|
135 |
+
if user_message == 'exit':
|
136 |
+
state_label = 'exit'
|
137 |
+
else:
|
138 |
+
state_label = 'subtraction-question-sequence'
|
139 |
+
|
140 |
+
input_prompt = messages.pop()
|
141 |
+
|
142 |
+
message_package = {
|
143 |
+
'messages': messages,
|
144 |
+
'input_prompt': input_prompt,
|
145 |
+
'state': state_label
|
146 |
+
}
|
147 |
+
print("MICROLESSON CONTENT RESPONSE")
|
148 |
+
print(message_package)
|
149 |
+
return message_package
|
150 |
+
|
151 |
+
|
152 |
+
curriculum_lookup_table = {
|
153 |
+
'N1.1.1_G1': 'addition',
|
154 |
+
'N1.1.1_G2': 'addition2',
|
155 |
+
'N1.1.2_G1': 'subtraction'
|
156 |
+
}
|
157 |
+
|
158 |
+
|
159 |
+
def lookup_local_state(next_state):
|
160 |
+
microlesson = curriculum_lookup_table[next_state]
|
161 |
+
return microlesson
|
162 |
+
|
163 |
+
|
164 |
+
def create_text_message(message_text, whatsapp_id):
|
165 |
+
""" Fills a template with input values to send a text message to Whatsapp
|
166 |
+
|
167 |
+
Inputs
|
168 |
+
- message_text: str - the content that the message should display
|
169 |
+
- whatsapp_id: str - the message recipient's phone number
|
170 |
+
|
171 |
+
Outputs
|
172 |
+
- message_data: dict - a preformatted template filled with inputs
|
173 |
+
"""
|
174 |
+
message_data = {
|
175 |
+
"preview_url": False,
|
176 |
+
"recipient_type": "individual",
|
177 |
+
"to": whatsapp_id,
|
178 |
+
"type": "text",
|
179 |
+
"text": {
|
180 |
+
"body": message_text
|
181 |
+
}
|
182 |
+
}
|
183 |
+
return message_data
|
184 |
+
|
185 |
+
|
186 |
+
def manage_conversation_response(data_json):
|
187 |
+
""" Calls functions necessary to determine message and context data """
|
188 |
+
print("V2 ENDPOINT")
|
189 |
+
|
190 |
+
# whatsapp_id = data_json['author_id']
|
191 |
+
message_data = data_json['message_data']
|
192 |
+
context_data = data_json['context_data']
|
193 |
+
whatsapp_id = message_data['author_id']
|
194 |
+
user_message = message_data['message_body']
|
195 |
+
print("MESSAGE DATA")
|
196 |
+
print(message_data)
|
197 |
+
print("CONTEXT DATA")
|
198 |
+
print(context_data)
|
199 |
+
print("=================")
|
200 |
+
|
201 |
+
# nlu_response = evaluate_message_with_nlu(message_data)
|
202 |
+
|
203 |
+
# context_data = {
|
204 |
+
# 'contact_uuid': 'abcdefg',
|
205 |
+
# 'current_state': 'N1.1.1_G2',
|
206 |
+
# 'user_message': '1',
|
207 |
+
# 'local_state': ''
|
208 |
+
# }
|
209 |
+
print("STEP 1")
|
210 |
+
print(data_json)
|
211 |
+
print(f"1: {context_data['current_state']}")
|
212 |
+
if not context_data['current_state']:
|
213 |
+
context_data['current_state'] = 'N1.1.1_G1'
|
214 |
+
print(f"2: {context_data['current_state']}")
|
215 |
+
|
216 |
+
curriculum_copy = copy.deepcopy(gsm.curriculum)
|
217 |
+
curriculum_copy.state = context_data['current_state']
|
218 |
+
print("STEP 2")
|
219 |
+
if user_message == 'easier':
|
220 |
+
curriculum_copy.left()
|
221 |
+
next_state = curriculum_copy.state
|
222 |
+
elif user_message == 'harder':
|
223 |
+
curriculum_copy.right()
|
224 |
+
next_state = curriculum_copy.state
|
225 |
+
else:
|
226 |
+
next_state = context_data['current_state']
|
227 |
+
print("next_state")
|
228 |
+
print(next_state)
|
229 |
+
|
230 |
+
print("STEP 3")
|
231 |
+
microlesson = lookup_local_state(next_state)
|
232 |
+
|
233 |
+
print("microlesson")
|
234 |
+
print(microlesson)
|
235 |
+
|
236 |
+
microlesson_content = retrieve_microlesson_content(context_data, user_message, microlesson, context_data['contact_uuid'])
|
237 |
+
|
238 |
+
headers = {
|
239 |
+
'Authorization': f"Bearer {os.environ.get('TURN_AUTHENTICATION_TOKEN')}",
|
240 |
+
'Content-Type': 'application/json'
|
241 |
+
}
|
242 |
+
|
243 |
+
# Send all messages for the current state before a user input prompt (text/button input request)
|
244 |
+
for message in microlesson_content['messages']:
|
245 |
+
data = create_text_message(message, whatsapp_id)
|
246 |
+
|
247 |
+
# print("data")
|
248 |
+
# print(data)
|
249 |
+
|
250 |
+
r = requests.post(
|
251 |
+
f'https://whatsapp.turn.io/v1/messages',
|
252 |
+
data=json.dumps(data),
|
253 |
+
headers=headers
|
254 |
+
)
|
255 |
+
|
256 |
+
print("STEP 4")
|
257 |
+
# combine microlesson content and context_data object
|
258 |
+
|
259 |
+
updated_context = {
|
260 |
+
"context": {
|
261 |
+
"contact_id": whatsapp_id,
|
262 |
+
"contact_uuid": context_data['contact_uuid'],
|
263 |
+
"current_state": next_state,
|
264 |
+
"local_state": microlesson_content['state'],
|
265 |
+
"bot_message": microlesson_content['input_prompt'],
|
266 |
+
"user_message": user_message,
|
267 |
+
"type": 'ask'
|
268 |
+
}
|
269 |
+
}
|
270 |
+
print(updated_context)
|
271 |
+
return updated_context
|
pyproject.toml
CHANGED
@@ -20,14 +20,17 @@ classifiers = [
|
|
20 |
|
21 |
|
22 |
[tool.poetry.dependencies]
|
|
|
23 |
mathtext = {git = "https://gitlab.com/tangibleai/community/mathtext", rev = "main"}
|
24 |
-
fastapi = "0.
|
25 |
pydantic = "*"
|
26 |
-
python = "^3.8
|
27 |
-
requests = "2.27.*"
|
28 |
sentencepiece = "0.1.*"
|
29 |
supabase = "*"
|
30 |
uvicorn = "0.17.*"
|
|
|
|
|
31 |
|
32 |
[tool.poetry.group.dev.dependencies]
|
33 |
pytest = "^7.2"
|
|
|
20 |
|
21 |
|
22 |
[tool.poetry.dependencies]
|
23 |
+
mathactive = {git = "git@gitlab.com:tangibleai/community/mathactive.git", rev = "vlad"}
|
24 |
mathtext = {git = "https://gitlab.com/tangibleai/community/mathtext", rev = "main"}
|
25 |
+
fastapi = "^0.90.0"
|
26 |
pydantic = "*"
|
27 |
+
python = "^3.8"
|
28 |
+
requests = "2.27.*"
|
29 |
sentencepiece = "0.1.*"
|
30 |
supabase = "*"
|
31 |
uvicorn = "0.17.*"
|
32 |
+
pandas = "^1.5.3"
|
33 |
+
scipy = "^1.10.1"
|
34 |
|
35 |
[tool.poetry.group.dev.dependencies]
|
36 |
pytest = "^7.2"
|
requirements.txt
CHANGED
@@ -3,14 +3,17 @@ en-core-web-sm @ https://github.com/explosion/spacy-models/releases/download/en_
|
|
3 |
fuzzywuzzy
|
4 |
jsonpickle
|
5 |
mathtext @ git+https://gitlab.com/tangibleai/community/mathtext@main
|
6 |
-
|
7 |
-
|
|
|
|
|
|
|
|
|
8 |
python-Levenshtein
|
9 |
-
requests==2.27.*
|
10 |
-
sentencepiece==0.1.*
|
11 |
sentence-transformers
|
12 |
sentry-sdk[fastapi]
|
13 |
supabase
|
14 |
transitions
|
15 |
-
uvicorn
|
16 |
-
|
|
|
|
3 |
fuzzywuzzy
|
4 |
jsonpickle
|
5 |
mathtext @ git+https://gitlab.com/tangibleai/community/mathtext@main
|
6 |
+
mathactive @ git+https://gitlab.com/tangibleai/community/mathactive@main
|
7 |
+
fastapi
|
8 |
+
pydantic
|
9 |
+
requests
|
10 |
+
sentencepiece
|
11 |
+
openpyxl
|
12 |
python-Levenshtein
|
|
|
|
|
13 |
sentence-transformers
|
14 |
sentry-sdk[fastapi]
|
15 |
supabase
|
16 |
transitions
|
17 |
+
uvicorn
|
18 |
+
pandas
|
19 |
+
scipy
|
scripts/bump_version.py
ADDED
@@ -0,0 +1,36 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
#!/usr/bin/env python
|
2 |
+
from pathlib import Path
|
3 |
+
import re
|
4 |
+
import shutil
|
5 |
+
|
6 |
+
BASE_DIR = Path(__file__).parent.parent
|
7 |
+
PYPROJECT_PATH = BASE_DIR / 'pyproject.toml'
|
8 |
+
PATTERN = re.compile(r'(version\s*=\s*)[\'"]?(\d(\.\d+)+)[\'"]?\s*')
|
9 |
+
|
10 |
+
if __name__ == '__main__':
|
11 |
+
verline = None
|
12 |
+
with PYPROJECT_PATH.open() as fin:
|
13 |
+
lines = []
|
14 |
+
verline = None
|
15 |
+
for line in fin:
|
16 |
+
lines.append(line)
|
17 |
+
if verline:
|
18 |
+
continue
|
19 |
+
match = PATTERN.match(line)
|
20 |
+
if match:
|
21 |
+
print(f'Found match.groups(): {dict(list(enumerate(match.groups())))}')
|
22 |
+
ver = [int(x) for x in match.groups()[1].split('.')]
|
23 |
+
print(f' Old ver: {ver}')
|
24 |
+
ver[-1] += 1
|
25 |
+
print(f' New ver: {ver}')
|
26 |
+
ver = '.'.join([str(x) for x in ver])
|
27 |
+
print(f' New ver str: {ver}')
|
28 |
+
verline = f'version = "{ver}"\n'
|
29 |
+
print(f' New ver line: {verline}')
|
30 |
+
lines[-1] = verline
|
31 |
+
print(f' New ver line: {lines[-1]}')
|
32 |
+
|
33 |
+
if verline:
|
34 |
+
shutil.copy(PYPROJECT_PATH, PYPROJECT_PATH.with_suffix('.toml.bak'))
|
35 |
+
with PYPROJECT_PATH.open('w') as fout:
|
36 |
+
fout.writelines(lines)
|
scripts/cleanpyc.sh
ADDED
@@ -0,0 +1,2 @@
|
|
|
|
|
|
|
1 |
+
#!usr/bin/env bash
|
2 |
+
find . | grep -E "(/__pycache__$|\.pyc$|\.pyo$)" | xargs rm -rf
|
scripts/make_request.py
CHANGED
@@ -22,7 +22,11 @@ def add_message_text_to_sample_object(message_text):
|
|
22 |
message_data = '{' + f'"author_id": "+57787919091", "author_type": "OWNER", "contact_uuid": "j43hk26-2hjl-43jk-hnk2-k4ljl46j0ds09", "message_body": "{message_text}", "message_direction": "inbound", "message_id": "4kl209sd0-a7b8-2hj3-8563-3hu4a89b32", "message_inserted_at": "2023-01-10T02:37:28.477940Z", "message_updated_at": "2023-01-10T02:37:28.487319Z"' + '}'
|
23 |
# context_data = '{' + '"user":"", "state":"addition-question-sequence", "bot_message":"", "user_message":"{message_text}"' + '}'
|
24 |
|
25 |
-
|
|
|
|
|
|
|
|
|
26 |
|
27 |
# context_data = '{' + '"user":"", "state":"addition-question-sequence", "bot_message":"", "user_message":"{message_text}","text": "What is 2+3?","question_numbers": [4,3],"right_answer": 7,"number_correct": 2, "number_incorrect": 0, "hints_used": 0, "level": "easy"' + '}'
|
28 |
|
@@ -40,14 +44,15 @@ def add_message_text_to_sample_object(message_text):
|
|
40 |
# """
|
41 |
|
42 |
|
43 |
-
def run_simulated_request(endpoint,
|
44 |
-
print(f"Case: {
|
45 |
-
|
|
|
46 |
|
47 |
if endpoint == 'sentiment-analysis' or endpoint == 'text2int' or endpoint =='intent-classification':
|
48 |
request = requests.post(
|
49 |
url=f'http://localhost:7860/{endpoint}',
|
50 |
-
json={'content':
|
51 |
).json()
|
52 |
else:
|
53 |
request = requests.post(
|
@@ -58,54 +63,98 @@ def run_simulated_request(endpoint, sample_answer, context=None):
|
|
58 |
print(request)
|
59 |
|
60 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
61 |
# run_simulated_request('intent-classification', 'exit')
|
62 |
# run_simulated_request('intent-classification', "I'm not sure")
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
63 |
# run_simulated_request('sentiment-analysis', 'I reject it')
|
64 |
# run_simulated_request('text2int', 'seven thousand nine hundred fifty seven')
|
65 |
-
run_simulated_request('nlu', 'test message')
|
66 |
-
run_simulated_request('nlu', 'eight')
|
67 |
-
run_simulated_request('nlu', 'is it 8')
|
68 |
-
run_simulated_request('nlu', 'can I know how its 0.5')
|
69 |
run_simulated_request('nlu', 'eight, nine, ten')
|
70 |
run_simulated_request('nlu', '8, 9, 10')
|
71 |
run_simulated_request('nlu', '8')
|
72 |
run_simulated_request('nlu', "I don't know")
|
73 |
run_simulated_request('nlu', "I don't know eight")
|
74 |
run_simulated_request('nlu', "I don't 9")
|
75 |
-
run_simulated_request('nlu', "0.2")
|
76 |
-
run_simulated_request('nlu', 'Today is a wonderful day')
|
77 |
-
run_simulated_request('nlu', 'IDK 5?')
|
|
|
|
|
|
|
|
|
|
|
78 |
# run_simulated_request('manager', '')
|
79 |
# run_simulated_request('manager', 'add')
|
80 |
# run_simulated_request('manager', 'subtract')
|
81 |
-
|
82 |
-
#
|
83 |
-
# '
|
84 |
-
# '
|
85 |
# })
|
86 |
# run_simulated_request("hint", {
|
87 |
-
# '
|
88 |
-
# '
|
89 |
-
# '
|
90 |
-
# 'number_incorrect': 0,
|
91 |
-
# 'level': 'easy',
|
92 |
-
# 'hints_used': 0
|
93 |
# })
|
94 |
-
# run_simulated_request("
|
95 |
-
# '
|
|
|
|
|
|
|
|
|
|
|
|
|
96 |
# })
|
97 |
-
#
|
98 |
-
#
|
|
|
|
|
|
|
99 |
# })
|
100 |
-
# run_simulated_request("
|
101 |
-
# "
|
102 |
-
# "
|
103 |
-
# "times": 1
|
104 |
# })
|
105 |
-
# run_simulated_request("
|
106 |
-
#
|
107 |
-
#
|
|
|
108 |
# })
|
|
|
109 |
# run_simulated_request('manager', 'exit')
|
110 |
|
111 |
|
|
|
22 |
message_data = '{' + f'"author_id": "+57787919091", "author_type": "OWNER", "contact_uuid": "j43hk26-2hjl-43jk-hnk2-k4ljl46j0ds09", "message_body": "{message_text}", "message_direction": "inbound", "message_id": "4kl209sd0-a7b8-2hj3-8563-3hu4a89b32", "message_inserted_at": "2023-01-10T02:37:28.477940Z", "message_updated_at": "2023-01-10T02:37:28.487319Z"' + '}'
|
23 |
# context_data = '{' + '"user":"", "state":"addition-question-sequence", "bot_message":"", "user_message":"{message_text}"' + '}'
|
24 |
|
25 |
+
# V1
|
26 |
+
# context_data = '{' + '"user":"", "state":"start-conversation", "bot_message":"", "user_message":"{message_text}"' + '}'
|
27 |
+
|
28 |
+
#V2
|
29 |
+
context_data = '{' + '"contact_uuid": "j43hk26-2hjl-43jk-hnk2-k4ljl46j0ds09", "current_state":"", "local_state": "", "user_message":""' + '}'
|
30 |
|
31 |
# context_data = '{' + '"user":"", "state":"addition-question-sequence", "bot_message":"", "user_message":"{message_text}","text": "What is 2+3?","question_numbers": [4,3],"right_answer": 7,"number_correct": 2, "number_incorrect": 0, "hints_used": 0, "level": "easy"' + '}'
|
32 |
|
|
|
44 |
# """
|
45 |
|
46 |
|
47 |
+
def run_simulated_request(endpoint, sample_payload, context=None):
|
48 |
+
print(f"Case: {sample_payload}")
|
49 |
+
# Used for testing full message object - deprecated April 7
|
50 |
+
b_string = add_message_text_to_sample_object(sample_payload)
|
51 |
|
52 |
if endpoint == 'sentiment-analysis' or endpoint == 'text2int' or endpoint =='intent-classification':
|
53 |
request = requests.post(
|
54 |
url=f'http://localhost:7860/{endpoint}',
|
55 |
+
json={'content': sample_payload}
|
56 |
).json()
|
57 |
else:
|
58 |
request = requests.post(
|
|
|
63 |
print(request)
|
64 |
|
65 |
|
66 |
+
def run_full_nlu_endpoint_payload_test(sample_payload):
|
67 |
+
request = requests.post(
|
68 |
+
url=f'http://localhost:7860/nlu',
|
69 |
+
data=sample_payload
|
70 |
+
).json()
|
71 |
+
print(request)
|
72 |
+
|
73 |
+
|
74 |
+
# # Case: Wrong key
|
75 |
+
run_full_nlu_endpoint_payload_test(b'{"message": {"author_id": "@event.message._vnd.v1.chat.owner", "author_type": "@event.message._vnd.v1.author.type", "contact_uuid": "@event.message._vnd.v1.chat.contact_uuid", "message_body": "@event.message.text.body", "message_direction": "@event.message._vnd.v1.direction", "message_id": "@event.message.id", "message_inserted_at": "@event.message._vnd.v1.chat.inserted_at", "message_updated_at": "@event.message._vnd.v1.chat.updated_at"}}')
|
76 |
+
|
77 |
+
# # Case: Correct payload
|
78 |
+
run_full_nlu_endpoint_payload_test(b'{"message_data": {"author_id": "57787919091", "author_type": "OWNER", "contact_uuid": "df78gsdf78df", "message_body": "8", "message_direction": "inbound", "message_id": "dfgha789789ag9ga", "message_inserted_at": "2023-01-10T02:37:28.487319Z", "message_updated_at": "2023-01-10T02:37:28.487319Z"}}')
|
79 |
+
|
80 |
+
# # Case: Incorrect payload values
|
81 |
+
run_full_nlu_endpoint_payload_test(b'{"message_data": {"author_id": "@event.message._vnd.v1.chat.owner", "author_type": "@event.message._vnd.v1.author.type", "contact_uuid": "@event.message._vnd.v1.chat.contact_uuid", "message_body": "@event.message.text.body", "message_direction": "@event.message._vnd.v1.direction", "message_id": "@event.message.id", "message_inserted_at": "@event.message._vnd.v1.chat.inserted_at", "message_updated_at": "@event.message._vnd.v1.chat.updated_at"}}')
|
82 |
+
|
83 |
+
# Case: Wrong payload object
|
84 |
+
run_full_nlu_endpoint_payload_test(b'{"message_data": {"_vnd": {"v1": {"author": {"id": 54327547257, "name": "Jin", "type": "OWNER"}, "card_uuid": None, "chat": {"assigned_to": None, "contact_uuid": "f7889-f78dfgb798-f786ah89g7-f78f9a", "inserted_at": "2023-03-28T13:21:47.581221Z", "owner": "+43789789146", "permalink": "", "state": "OPEN", "state_reason": "Re-opened by inbound message.", "unread_count": 97, "updated_at": "2023-04-07T21:05:27.389948Z", "uuid": "dfg9a78-d76a786dghas-78d9fga789g-a78d69a9"}, "direction": "inbound", "faq_uuid": None, "in_reply_to": None, "inserted_at": "2023-04-07T21:05:27.368580Z", "labels": [], "last_status": None, "last_status_timestamp": None, "on_fallback_channel": False, "rendered_content": None, "uuid": "hf78s7s89b-789fb68d9fg-789fb789dfb-f79sfb789"}}, "from": 5475248689, "id": "SBDE4zgAAy7887sfdT35SHFS", "text": {"body": 1000}, "timestamp": 1680901527, "type": "text"}, "type": "message"}')
|
85 |
+
|
86 |
+
|
87 |
# run_simulated_request('intent-classification', 'exit')
|
88 |
# run_simulated_request('intent-classification', "I'm not sure")
|
89 |
+
# run_simulated_request('intent-classification', "easier")
|
90 |
+
# run_simulated_request('intent-classification', "easy")
|
91 |
+
# run_simulated_request('intent-classification', "harder")
|
92 |
+
# run_simulated_request('intent-classification', "hard")
|
93 |
+
# run_simulated_request('intent-classification', "hint")
|
94 |
+
# run_simulated_request('intent-classification', "hin")
|
95 |
+
# run_simulated_request('intent-classification', "hnt")
|
96 |
+
# run_simulated_request('intent-classification', "stop")
|
97 |
+
# run_simulated_request('intent-classification', "stp")
|
98 |
+
# run_simulated_request('intent-classification', "sop")
|
99 |
+
# run_simulated_request('intent-classification', "please stop")
|
100 |
# run_simulated_request('sentiment-analysis', 'I reject it')
|
101 |
# run_simulated_request('text2int', 'seven thousand nine hundred fifty seven')
|
102 |
+
# run_simulated_request('nlu', 'test message')
|
103 |
+
# run_simulated_request('nlu', 'eight')
|
104 |
+
# run_simulated_request('nlu', 'is it 8')
|
105 |
+
# run_simulated_request('nlu', 'can I know how its 0.5')
|
106 |
run_simulated_request('nlu', 'eight, nine, ten')
|
107 |
run_simulated_request('nlu', '8, 9, 10')
|
108 |
run_simulated_request('nlu', '8')
|
109 |
run_simulated_request('nlu', "I don't know")
|
110 |
run_simulated_request('nlu', "I don't know eight")
|
111 |
run_simulated_request('nlu', "I don't 9")
|
112 |
+
# run_simulated_request('nlu', "0.2")
|
113 |
+
# run_simulated_request('nlu', 'Today is a wonderful day')
|
114 |
+
# run_simulated_request('nlu', 'IDK 5?')
|
115 |
+
# run_simulated_request('nlu', 'hin')
|
116 |
+
# run_simulated_request('nlu', 'exi')
|
117 |
+
# run_simulated_request('nlu', 'easier')
|
118 |
+
# run_simulated_request('nlu', 'stp')
|
119 |
+
# run_simulated_request('nlu', '')
|
120 |
# run_simulated_request('manager', '')
|
121 |
# run_simulated_request('manager', 'add')
|
122 |
# run_simulated_request('manager', 'subtract')
|
123 |
+
|
124 |
+
# run_simulated_request("start", {
|
125 |
+
# 'difficulty': 0.04,
|
126 |
+
# 'do_increase': True
|
127 |
# })
|
128 |
# run_simulated_request("hint", {
|
129 |
+
# 'start': 5,
|
130 |
+
# 'step': 1,
|
131 |
+
# 'difficulty': 0.56 # optional
|
|
|
|
|
|
|
132 |
# })
|
133 |
+
# run_simulated_request("question", {
|
134 |
+
# 'start': 2,
|
135 |
+
# 'step': 1,
|
136 |
+
# 'question_num': 2 # optional
|
137 |
+
# })
|
138 |
+
# run_simulated_request("difficulty", {
|
139 |
+
# 'difficulty': 0.01,
|
140 |
+
# 'do_increase': False # True | False
|
141 |
# })
|
142 |
+
# Need to start with this command to populate users.json
|
143 |
+
# If users.json is not already made
|
144 |
+
# run_simulated_request("num_one", {
|
145 |
+
# "user_id": "1",
|
146 |
+
# "message_text": "",
|
147 |
# })
|
148 |
+
# run_simulated_request("num_one", {
|
149 |
+
# "user_id": "1",
|
150 |
+
# "message_text": "61",
|
|
|
151 |
# })
|
152 |
+
# run_simulated_request("sequence", {
|
153 |
+
# 'start': 2,
|
154 |
+
# 'step': 1,
|
155 |
+
# 'sep': '... '
|
156 |
# })
|
157 |
+
|
158 |
# run_simulated_request('manager', 'exit')
|
159 |
|
160 |
|
scripts/pin_requirements.py
ADDED
@@ -0,0 +1,62 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
""" Parse requirements.txt and pyproject.toml and move versions to pyproject.toml """
|
2 |
+
from pathlib import Path
|
3 |
+
import re
|
4 |
+
import sys
|
5 |
+
import toml
|
6 |
+
|
7 |
+
def get_requirement_versions(path='requirements.txt'):
|
8 |
+
""" Read requirements.txt file and return dict of package versions """
|
9 |
+
path = Path(path or '')
|
10 |
+
if path.is_dir():
|
11 |
+
path = next(iter(path.glob('**/requirements.txt')))
|
12 |
+
reqdict = {}
|
13 |
+
text = Path(path).open().read()
|
14 |
+
for line in text.splitlines():
|
15 |
+
if line.strip():
|
16 |
+
match = re.match(r'([-_a-zA-Z0-9]+)\s*([ >=<~^,.rabc0-9]+)\s*', line)
|
17 |
+
if match:
|
18 |
+
name, ver = match.groups()
|
19 |
+
reqdict[name] = ver
|
20 |
+
return reqdict
|
21 |
+
|
22 |
+
|
23 |
+
def normalize_name(name):
|
24 |
+
return str(name).strip().replace('_', '-').replace(' ', '-').lower()
|
25 |
+
|
26 |
+
|
27 |
+
def pin_versions(pyproject='pyproject.toml', reqdict=None, overwrite=False):
|
28 |
+
if not reqdict or isinstance(reqdict, (str, Path)):
|
29 |
+
reqdict = get_requirement_versions(path=reqdict)
|
30 |
+
reqdict = {
|
31 |
+
normalize_name(k): v for (k, v) in
|
32 |
+
reqdict.items()
|
33 |
+
}
|
34 |
+
|
35 |
+
pyproj = toml.load(pyproject)
|
36 |
+
depdict = pyproj.get('tool', {}).get('poetry', {}).get('dependencies', {})
|
37 |
+
depdict = {
|
38 |
+
normalize_name(k): v for (k, v) in
|
39 |
+
depdict.items()
|
40 |
+
}
|
41 |
+
|
42 |
+
for name, spec in reqdict.items():
|
43 |
+
if name in depdict:
|
44 |
+
ver = depdict[name]
|
45 |
+
if isinstance(ver, str) and (overwrite or ver == '*'):
|
46 |
+
depdict[name] = spec
|
47 |
+
|
48 |
+
pyproj['tool']['poetry']['dependencies'] = depdict
|
49 |
+
overwrite = overwrite or (input(f'Overwrite {pyproject}?')[0].lower() == 'y')
|
50 |
+
if overwrite:
|
51 |
+
with open(pyproject, 'w') as stream:
|
52 |
+
toml.dump(pyproj, stream)
|
53 |
+
return pyproj
|
54 |
+
|
55 |
+
|
56 |
+
if __name__ == '__main__':
|
57 |
+
path = 'requirements.txt'
|
58 |
+
if sys.argv[1:]:
|
59 |
+
path = sys.argv[1]
|
60 |
+
pyproj = pin_versions(reqdict=path)
|
61 |
+
print(toml.dumps(pyproj))
|
62 |
+
|
scripts/quiz/__init__.py
ADDED
File without changes
|
scripts/quiz/data.csv
ADDED
@@ -0,0 +1,56 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
difficulty,start
|
2 |
+
0.01,1
|
3 |
+
0.02,0
|
4 |
+
0.05,5
|
5 |
+
0.07,10
|
6 |
+
0.08,14
|
7 |
+
0.1,20
|
8 |
+
0.11,22
|
9 |
+
0.13,27
|
10 |
+
0.14,28
|
11 |
+
0.16,30
|
12 |
+
0.17,32
|
13 |
+
0.18,34
|
14 |
+
0.2,37
|
15 |
+
0.21,39
|
16 |
+
0.23,42
|
17 |
+
0.25,43
|
18 |
+
0.27,46
|
19 |
+
0.3,50
|
20 |
+
0.34,57
|
21 |
+
0.35,64
|
22 |
+
0.37,78
|
23 |
+
0.39,89
|
24 |
+
0.41,100
|
25 |
+
0.44,112
|
26 |
+
0.45,130
|
27 |
+
0.48,147
|
28 |
+
0.5,164
|
29 |
+
0.52,180
|
30 |
+
0.55,195
|
31 |
+
0.58,209
|
32 |
+
0.6,223
|
33 |
+
0.61,236
|
34 |
+
0.63,248
|
35 |
+
0.64,259
|
36 |
+
0.65,271
|
37 |
+
0.67,284
|
38 |
+
0.69,296
|
39 |
+
0.7,308
|
40 |
+
0.72,321
|
41 |
+
0.73,333
|
42 |
+
0.75,346
|
43 |
+
0.78,359
|
44 |
+
0.8,370
|
45 |
+
0.81,385
|
46 |
+
0.83,399
|
47 |
+
0.84,408
|
48 |
+
0.87,420
|
49 |
+
0.88,435
|
50 |
+
0.89,447
|
51 |
+
0.9,458
|
52 |
+
0.93,469
|
53 |
+
0.94,483
|
54 |
+
0.96,494
|
55 |
+
0.97,500
|
56 |
+
0.99,513
|
users.json
ADDED
@@ -0,0 +1 @@
|
|
|
|
|
1 |
+
{"1": {"skill_score": 0.04, "state": "question", "start": 1, "stop": 1, "step": 1, "answer": 2}}
|