Greg Thompson commited on
Commit
b306076
2 Parent(s): 78274fe 74e75c0

Merge staging versions

Browse files
app.py CHANGED
@@ -1,11 +1,10 @@
1
  """FastAPI endpoint
2
  To run locally use 'uvicorn app:app --host localhost --port 7860'
 
 
3
  """
4
  import ast
5
- import scripts.quiz.generators as generators
6
- import scripts.quiz.hints as hints
7
- import scripts.quiz.questions as questions
8
- import scripts.quiz.utils as utils
9
  import sentry_sdk
10
 
11
  from fastapi import FastAPI, Request
@@ -18,16 +17,22 @@ from pydantic import BaseModel
18
 
19
  from mathtext_fastapi.logging import prepare_message_data_for_logging
20
  from mathtext_fastapi.conversation_manager import manage_conversation_response
 
21
  from mathtext_fastapi.nlu import evaluate_message_with_nlu
22
  from mathtext_fastapi.nlu import run_intent_classification
23
 
 
 
 
 
 
24
  sentry_sdk.init(
25
- dsn="https://143c9ac3f429452eb036deda0e4d5aef@o1297809.ingest.sentry.io/4504896688881664",
26
 
27
  # Set traces_sample_rate to 1.0 to capture 100%
28
  # of transactions for performance monitoring.
29
  # We recommend adjusting this value in production,
30
- traces_sample_rate=0.20,
31
  )
32
 
33
  app = FastAPI()
@@ -71,7 +76,40 @@ def text2int_ep(content: Text = None):
71
  return JSONResponse(content=content)
72
 
73
 
74
- @app.post("/manager")
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
75
  async def programmatic_message_manager(request: Request):
76
  """
77
  Calls conversation management function to determine the next state
@@ -127,168 +165,188 @@ async def evaluate_user_message_with_nlu_api(request: Request):
127
  message_data = data_dict.get('message_data', '')
128
  nlu_response = evaluate_message_with_nlu(message_data)
129
  return JSONResponse(content=nlu_response)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
130
 
131
 
132
- @app.post("/question")
133
  async def ask_math_question(request: Request):
134
- """Generate a question and return it as response along with question data
135
 
136
  Input
137
- request.body: json - amount of correct and incorrect answers in the account
138
  {
139
- 'number_correct': 0,
140
- 'number_incorrect': 0,
141
- 'level': 'easy'
142
  }
143
 
144
  Output
145
- context: dict - the information for the current state
146
  {
147
  'text': 'What is 1+2?',
148
- 'question_numbers': [1,2,3], #3 numbers - current number, ordinal number, times
149
- 'right_answer': 3,
150
- 'number_correct': 0,
151
- 'number_incorrect': 0,
152
- 'hints_used': 0
153
  }
154
  """
155
  data_dict = await request.json()
156
  message_data = ast.literal_eval(data_dict.get('message_data', '').get('message_body', ''))
157
- right_answers = message_data['number_correct']
158
- wrong_answers = message_data['number_incorrect']
159
- level = message_data['level']
160
 
161
- return JSONResponse(generators.start_interactive_math(right_answers, wrong_answers, level))
162
 
163
 
164
  @app.post("/hint")
165
  async def get_hint(request: Request):
166
- """Generate a hint and return it as response along with hint data
167
 
168
  Input
169
- request.body:
170
  {
171
- 'question_numbers': [1,2,3], #3 numbers - current number, ordinal number, times
172
- 'right_answer': 3,
173
- 'number_correct': 0,
174
- 'number_incorrect': 0,
175
- 'level': 'easy',
176
- 'hints_used': 0
177
  }
178
 
179
  Output
180
- context: dict - the information for the current state
181
  {
182
- 'text': 'What is 1+2?',
183
- 'question_numbers': [1,2,3], #2 or 3 numbers
184
- 'right_answer': 3,
185
- 'number_correct': 0,
186
- 'number_incorrect': 0,
187
- 'level': 'easy',
188
- 'hints_used': 0
189
  }
190
  """
191
  data_dict = await request.json()
192
  message_data = ast.literal_eval(data_dict.get('message_data', '').get('message_body', ''))
193
- question_numbers = message_data['question_numbers']
194
- right_answer = message_data['right_answer']
195
- number_correct = message_data['number_correct']
196
- number_incorrect = message_data['number_incorrect']
197
- level = message_data['level']
198
- hints_used = message_data['hints_used']
199
 
200
- return JSONResponse(hints.generate_hint(question_numbers, right_answer, number_correct, number_incorrect, level, hints_used))
201
 
202
 
203
- @app.post("/generate_question")
204
- async def generate_question(request: Request):
205
- """Generate a bare question and return it as response
206
 
207
  Input
208
- request.body: json - level
209
  {
210
- 'level': 'easy'
 
 
211
  }
212
 
213
  Output
214
- context: dict - the information for the current state
215
  {
216
- "question": "Let's count up by 2s. What number is next if we start from 10?
217
- 6 8 10 ..."
 
 
218
  }
219
  """
220
  data_dict = await request.json()
221
  message_data = ast.literal_eval(data_dict.get('message_data', '').get('message_body', ''))
222
- level = message_data['level']
 
 
 
 
 
 
 
223
 
224
- return JSONResponse(questions.generate_question_data(level)['question'])
225
 
226
 
227
- @app.post("/numbers_by_level")
228
- async def get_numbers_by_level(request: Request):
229
- """Generate three numbers and return them as response
230
 
231
  Input
232
- request.body: json - level
233
  {
234
- 'level': 'easy'
 
235
  }
236
 
237
- Output
238
- context: dict - three generated numbers for specified level
239
- {
240
- "current_number": 10,
241
- "ordinal_number": 2,
242
- "times": 1
243
- }
244
  """
245
  data_dict = await request.json()
246
  message_data = ast.literal_eval(data_dict.get('message_data', '').get('message_body', ''))
247
- level = message_data['level']
248
- return JSONResponse(questions.generate_numbers_by_level(level))
249
 
 
250
 
251
- @app.post("/number_sequence")
252
- async def get_number_sequence(request: Request):
253
- """Generate a number sequence
 
254
 
255
  Input
256
- request.body: json - level
257
  {
258
- "current_number": 10,
259
- "ordinal_number": 2,
260
- "times": 1
261
  }
262
 
263
- Output
264
- one of following strings with (numbers differ):
265
- ... 1 2 3
266
- 1 2 3 ...
267
  """
268
  data_dict = await request.json()
269
  message_data = ast.literal_eval(data_dict.get('message_data', '').get('message_body', ''))
270
- cur_num = message_data['current_number']
271
- ord_num = message_data['ordinal_number']
272
- times = message_data['times']
273
- return JSONResponse(questions.generate_number_sequence(cur_num, ord_num, times))
 
 
 
 
 
274
 
275
 
276
- @app.post("/level")
277
- async def get_next_level(request: Request):
278
- """Depending on current level and desire to level up/down return next level
279
 
280
  Input
281
- request.body: json - level
282
  {
283
- "current_level": "easy",
284
- "level_up": True
 
285
  }
286
 
287
  Output
288
- Literal - "easy", "medium" or "hard"
289
  """
290
  data_dict = await request.json()
291
  message_data = ast.literal_eval(data_dict.get('message_data', '').get('message_body', ''))
292
- cur_level = message_data['current_level']
293
- level_up = message_data['level_up']
294
- return JSONResponse(utils.get_next_level(cur_level, level_up))
 
 
 
 
 
 
 
 
1
  """FastAPI endpoint
2
  To run locally use 'uvicorn app:app --host localhost --port 7860'
3
+ or
4
+ `python -m uvicorn app:app --reload --host localhost --port 7860`
5
  """
6
  import ast
7
+ import mathactive.microlessons.num_one as num_one_quiz
 
 
 
8
  import sentry_sdk
9
 
10
  from fastapi import FastAPI, Request
 
17
 
18
  from mathtext_fastapi.logging import prepare_message_data_for_logging
19
  from mathtext_fastapi.conversation_manager import manage_conversation_response
20
+ from mathtext_fastapi.v2_conversation_manager import manage_conversation_response
21
  from mathtext_fastapi.nlu import evaluate_message_with_nlu
22
  from mathtext_fastapi.nlu import run_intent_classification
23
 
24
+ import os
25
+ from dotenv import load_dotenv
26
+
27
+ load_dotenv()
28
+
29
  sentry_sdk.init(
30
+ dsn=os.environ.get('SENTRY_DNS'),
31
 
32
  # Set traces_sample_rate to 1.0 to capture 100%
33
  # of transactions for performance monitoring.
34
  # We recommend adjusting this value in production,
35
+ traces_sample_rate=1.0,
36
  )
37
 
38
  app = FastAPI()
 
76
  return JSONResponse(content=content)
77
 
78
 
79
+ @app.post("/v1/manager")
80
+ async def programmatic_message_manager(request: Request):
81
+ """
82
+ Calls conversation management function to determine the next state
83
+
84
+ Input
85
+ request.body: dict - message data for the most recent user response
86
+ {
87
+ "author_id": "+47897891",
88
+ "contact_uuid": "j43hk26-2hjl-43jk-hnk2-k4ljl46j0ds09",
89
+ "author_type": "OWNER",
90
+ "message_body": "a test message",
91
+ "message_direction": "inbound",
92
+ "message_id": "ABJAK64jlk3-agjkl2QHFAFH",
93
+ "message_inserted_at": "2022-07-05T04:00:34.03352Z",
94
+ "message_updated_at": "2023-02-14T03:54:19.342950Z",
95
+ }
96
+
97
+ Output
98
+ context: dict - the information for the current state
99
+ {
100
+ "user": "47897891",
101
+ "state": "welcome-message-state",
102
+ "bot_message": "Welcome to Rori!",
103
+ "user_message": "",
104
+ "type": "ask"
105
+ }
106
+ """
107
+ data_dict = await request.json()
108
+ context = manage_conversation_response(data_dict)
109
+ return JSONResponse(context)
110
+
111
+
112
+ @app.post("/v2/manager")
113
  async def programmatic_message_manager(request: Request):
114
  """
115
  Calls conversation management function to determine the next state
 
165
  message_data = data_dict.get('message_data', '')
166
  nlu_response = evaluate_message_with_nlu(message_data)
167
  return JSONResponse(content=nlu_response)
168
+
169
+
170
+ @app.post("/num_one")
171
+ async def num_one(request: Request):
172
+ """
173
+ Input:
174
+ {
175
+ "user_id": 1,
176
+ "message_text": 5,
177
+ }
178
+ Output:
179
+ {
180
+ 'messages':
181
+ ["Let's", 'practice', 'counting', '', '', '46...', '47...', '48...', '49', '', '', 'After', '49,', 'what', 'is', 'the', 'next', 'number', 'you', 'will', 'count?\n46,', '47,', '48,', '49'],
182
+ 'input_prompt': '50',
183
+ 'state': 'question'
184
+ }
185
+ """
186
+ print("STEP 1")
187
+ data_dict = await request.json()
188
+ message_data = ast.literal_eval(data_dict.get('message_data', '').get('message_body', ''))
189
+ user_id = message_data['user_id']
190
+ message_text = message_data['message_text']
191
+ print("STEP 2")
192
+ return num_one_quiz.process_user_message(user_id, message_text)
193
 
194
 
195
+ @app.post("/start")
196
  async def ask_math_question(request: Request):
197
+ """Generate a question data
198
 
199
  Input
 
200
  {
201
+ 'difficulty': 0.1,
202
+ 'do_increase': True | False
 
203
  }
204
 
205
  Output
 
206
  {
207
  'text': 'What is 1+2?',
208
+ 'difficulty': 0.2,
209
+ 'question_numbers': [3, 1, 4]
 
 
 
210
  }
211
  """
212
  data_dict = await request.json()
213
  message_data = ast.literal_eval(data_dict.get('message_data', '').get('message_body', ''))
214
+ difficulty = message_data['difficulty']
215
+ do_increase = message_data['do_increase']
 
216
 
217
+ return JSONResponse(generators.start_interactive_math(difficulty, do_increase))
218
 
219
 
220
  @app.post("/hint")
221
  async def get_hint(request: Request):
222
+ """Generate a hint data
223
 
224
  Input
 
225
  {
226
+ 'start': 5,
227
+ 'step': 1,
228
+ 'difficulty': 0.1
 
 
 
229
  }
230
 
231
  Output
 
232
  {
233
+ 'text': 'What number is greater than 4 and less than 6?',
234
+ 'difficulty': 0.1,
235
+ 'question_numbers': [5, 1, 6]
 
 
 
 
236
  }
237
  """
238
  data_dict = await request.json()
239
  message_data = ast.literal_eval(data_dict.get('message_data', '').get('message_body', ''))
240
+ start = message_data['start']
241
+ step = message_data['step']
242
+ difficulty = message_data['difficulty']
 
 
 
243
 
244
+ return JSONResponse(hints.generate_hint(start, step, difficulty))
245
 
246
 
247
+ @app.post("/question")
248
+ async def ask_math_question(request: Request):
249
+ """Generate a question data
250
 
251
  Input
 
252
  {
253
+ 'start': 5,
254
+ 'step': 1,
255
+ 'question_num': 1 # optional
256
  }
257
 
258
  Output
 
259
  {
260
+ 'question': 'What is 1+2?',
261
+ 'start': 5,
262
+ 'step': 1,
263
+ 'answer': 6
264
  }
265
  """
266
  data_dict = await request.json()
267
  message_data = ast.literal_eval(data_dict.get('message_data', '').get('message_body', ''))
268
+ start = message_data['start']
269
+ step = message_data['step']
270
+ arg_tuple = (start, step)
271
+ try:
272
+ question_num = message_data['question_num']
273
+ arg_tuple += (question_num,)
274
+ except KeyError:
275
+ pass
276
 
277
+ return JSONResponse(questions.generate_question_data(*arg_tuple))
278
 
279
 
280
+ @app.post("/difficulty")
281
+ async def get_hint(request: Request):
282
+ """Generate a number matching difficulty
283
 
284
  Input
 
285
  {
286
+ 'difficulty': 0.01,
287
+ 'do_increase': True
288
  }
289
 
290
+ Output - value from 0.01 to 0.99 inclusively:
291
+ 0.09
 
 
 
 
 
292
  """
293
  data_dict = await request.json()
294
  message_data = ast.literal_eval(data_dict.get('message_data', '').get('message_body', ''))
295
+ difficulty = message_data['difficulty']
296
+ do_increase = message_data['do_increase']
297
 
298
+ return JSONResponse(utils.get_next_difficulty(difficulty, do_increase))
299
 
300
+
301
+ @app.post("/start_step")
302
+ async def get_hint(request: Request):
303
+ """Generate a start and step values
304
 
305
  Input
 
306
  {
307
+ 'difficulty': 0.01,
308
+ 'path_to_csv_file': 'scripts/quiz/data.csv' # optional
 
309
  }
310
 
311
+ Output - tuple (start, step):
312
+ (5, 1)
 
 
313
  """
314
  data_dict = await request.json()
315
  message_data = ast.literal_eval(data_dict.get('message_data', '').get('message_body', ''))
316
+ difficulty = message_data['difficulty']
317
+ arg_tuple = (difficulty,)
318
+ try:
319
+ path_to_csv_file = message_data['path_to_csv_file']
320
+ arg_tuple += (path_to_csv_file,)
321
+ except KeyError:
322
+ pass
323
+
324
+ return JSONResponse(utils.get_next_difficulty(*arg_tuple))
325
 
326
 
327
+ @app.post("/sequence")
328
+ async def generate_question(request: Request):
329
+ """Generate a sequence from start, step and optional separator parameter
330
 
331
  Input
 
332
  {
333
+ 'start': 5,
334
+ 'step': 1,
335
+ 'sep': ', ' # optional
336
  }
337
 
338
  Output
339
+ 5, 6, 7
340
  """
341
  data_dict = await request.json()
342
  message_data = ast.literal_eval(data_dict.get('message_data', '').get('message_body', ''))
343
+ start = message_data['start']
344
+ step = message_data['step']
345
+ arg_tuple = (start, step)
346
+ try:
347
+ sep = message_data['sep']
348
+ arg_tuple += (sep,)
349
+ except KeyError:
350
+ pass
351
+
352
+ return JSONResponse(utils.convert_sequence_to_string(*arg_tuple))
mathtext_fastapi/conversation_manager.py CHANGED
@@ -14,8 +14,8 @@ from mathtext_fastapi.math_subtraction_fsm import MathSubtractionFSM
14
  from supabase import create_client
15
  from transitions import Machine
16
 
17
- from scripts.quiz.generators import start_interactive_math
18
- from scripts.quiz.hints import generate_hint
19
 
20
  load_dotenv()
21
 
 
14
  from supabase import create_client
15
  from transitions import Machine
16
 
17
+ from mathactive.generators import start_interactive_math
18
+ from mathactive.hints import generate_hint
19
 
20
  load_dotenv()
21
 
mathtext_fastapi/curriculum_mapper.py ADDED
@@ -0,0 +1,183 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import numpy as np
2
+ import pandas as pd
3
+ import re
4
+
5
+ from pathlib import Path
6
+
7
+
8
+ def read_and_preprocess_spreadsheet(file_name):
9
+ """ Creates a pandas dataframe from the curriculum overview spreadsheet """
10
+ DATA_DIR = Path(__file__).parent.parent / "mathtext_fastapi" / "data" / file_name
11
+ script_df = pd.read_excel(DATA_DIR, engine='openpyxl')
12
+ # Ensures the grade level columns are integers instead of floats
13
+ script_df.columns = script_df.columns[:2].tolist() + script_df.columns[2:11].astype(int).astype(str).tolist() + script_df.columns[11:].tolist()
14
+ script_df.fillna('', inplace=True)
15
+ return script_df
16
+
17
+
18
+ def extract_skill_code(skill):
19
+ """ Looks within a curricular skill description for its descriptive code
20
+
21
+ Input
22
+ - skill: str - a brief description of a curricular skill
23
+
24
+ >>> extract_skill_code('A3.3.4 - Solve inequalities')
25
+ 'A3.3.4'
26
+ >>> extract_skill_code('A3.3.2 - Graph linear equations, and identify the x- and y-intercepts or the slope of a line')
27
+ 'A3.3.2'
28
+ """
29
+ pattern = r'[A-Z][0-9]\.\d+\.\d+'
30
+ result = re.search(pattern, skill)
31
+ return result.group()
32
+
33
+
34
+ def build_horizontal_transitions(script_df):
35
+ """ Build a list of transitional relationships within a curricular skill
36
+
37
+ Inputs
38
+ - script_df: pandas dataframe - an overview of the curriculum skills by grade level
39
+
40
+ Output
41
+ - horizontal_transitions: array of arrays - transition data with label, from state, and to state
42
+
43
+ >>> script_df = read_and_preprocess_spreadsheet('curriculum_framework_for_tests.xlsx')
44
+ >>> build_horizontal_transitions(script_df)
45
+ [['right', 'N1.1.1_G1', 'N1.1.1_G2'], ['right', 'N1.1.1_G2', 'N1.1.1_G3'], ['right', 'N1.1.1_G3', 'N1.1.1_G4'], ['right', 'N1.1.1_G4', 'N1.1.1_G5'], ['right', 'N1.1.1_G5', 'N1.1.1_G6'], ['left', 'N1.1.1_G6', 'N1.1.1_G5'], ['left', 'N1.1.1_G5', 'N1.1.1_G4'], ['left', 'N1.1.1_G4', 'N1.1.1_G3'], ['left', 'N1.1.1_G3', 'N1.1.1_G2'], ['left', 'N1.1.1_G2', 'N1.1.1_G1'], ['right', 'N1.1.2_G1', 'N1.1.2_G2'], ['right', 'N1.1.2_G2', 'N1.1.2_G3'], ['right', 'N1.1.2_G3', 'N1.1.2_G4'], ['right', 'N1.1.2_G4', 'N1.1.2_G5'], ['right', 'N1.1.2_G5', 'N1.1.2_G6'], ['left', 'N1.1.2_G6', 'N1.1.2_G5'], ['left', 'N1.1.2_G5', 'N1.1.2_G4'], ['left', 'N1.1.2_G4', 'N1.1.2_G3'], ['left', 'N1.1.2_G3', 'N1.1.2_G2'], ['left', 'N1.1.2_G2', 'N1.1.2_G1']]
46
+ """
47
+ horizontal_transitions = []
48
+ for index, row in script_df.iterrows():
49
+ skill_code = extract_skill_code(row['Knowledge or Skill'])
50
+
51
+ rightward_matches = []
52
+ for i in range(9):
53
+ # Grade column
54
+ current_grade = i+1
55
+ if row[current_grade].lower().strip() == 'x':
56
+ rightward_matches.append(i)
57
+
58
+ for match in rightward_matches:
59
+ if rightward_matches[-1] != match:
60
+ horizontal_transitions.append([
61
+ "right",
62
+ f"{skill_code}_G{match}",
63
+ f"{skill_code}_G{match+1}"
64
+ ])
65
+
66
+ leftward_matches = []
67
+ for i in reversed(range(9)):
68
+ current_grade = i
69
+ if row[current_grade].lower().strip() == 'x':
70
+ leftward_matches.append(i)
71
+
72
+ for match in leftward_matches:
73
+ if leftward_matches[0] != match:
74
+ horizontal_transitions.append([
75
+ "left",
76
+ f"{skill_code}_G{match}",
77
+ f"{skill_code}_G{match-1}"
78
+ ])
79
+
80
+ return horizontal_transitions
81
+
82
+
83
+ def gather_all_vertical_matches(script_df):
84
+ """ Build a list of transitional relationships within a grade level across skills
85
+
86
+ Inputs
87
+ - script_df: pandas dataframe - an overview of the curriculum skills by grade level
88
+
89
+ Output
90
+ - all_matches: array of arrays - represents skills at each grade level
91
+
92
+ >>> script_df = read_and_preprocess_spreadsheet('curriculum_framework_for_tests.xlsx')
93
+ >>> gather_all_vertical_matches(script_df)
94
+ [['N1.1.1', '1'], ['N1.1.2', '1'], ['N1.1.1', '2'], ['N1.1.2', '2'], ['N1.1.1', '3'], ['N1.1.2', '3'], ['N1.1.1', '4'], ['N1.1.2', '4'], ['N1.1.1', '5'], ['N1.1.2', '5'], ['N1.1.1', '6'], ['N1.1.2', '6']]
95
+ """
96
+ all_matches = []
97
+ columns = ['1', '2', '3', '4', '5', '6', '7', '8', '9']
98
+
99
+ for column in columns:
100
+ for index, value in script_df[column].iteritems():
101
+ row_num = index + 1
102
+ if value == 'x':
103
+ # Extract skill code
104
+ skill_code = extract_skill_code(
105
+ script_df['Knowledge or Skill'][row_num-1]
106
+ )
107
+
108
+ all_matches.append([skill_code, column])
109
+ return all_matches
110
+
111
+
112
+ def build_vertical_transitions(script_df):
113
+ """ Build a list of transitional relationships within a grade level across skills
114
+
115
+ Inputs
116
+ - script_df: pandas dataframe - an overview of the curriculum skills by grade level
117
+
118
+ Output
119
+ - vertical_transitions: array of arrays - transition data with label, from state, and to state
120
+
121
+ >>> script_df = read_and_preprocess_spreadsheet('curriculum_framework_for_tests.xlsx')
122
+ >>> build_vertical_transitions(script_df)
123
+ [['down', 'N1.1.1_G1', 'N1.1.2_G1'], ['down', 'N1.1.2_G1', 'N1.1.1_G1'], ['down', 'N1.1.1_G2', 'N1.1.2_G2'], ['down', 'N1.1.2_G2', 'N1.1.1_G2'], ['down', 'N1.1.1_G3', 'N1.1.2_G3'], ['down', 'N1.1.2_G3', 'N1.1.1_G3'], ['down', 'N1.1.1_G4', 'N1.1.2_G4'], ['down', 'N1.1.2_G4', 'N1.1.1_G4'], ['down', 'N1.1.1_G5', 'N1.1.2_G5'], ['down', 'N1.1.2_G5', 'N1.1.1_G5'], ['down', 'N1.1.1_G6', 'N1.1.2_G6'], ['up', 'N1.1.2_G6', 'N1.1.1_G6'], ['up', 'N1.1.1_G6', 'N1.1.2_G6'], ['up', 'N1.1.2_G5', 'N1.1.1_G5'], ['up', 'N1.1.1_G5', 'N1.1.2_G5'], ['up', 'N1.1.2_G4', 'N1.1.1_G4'], ['up', 'N1.1.1_G4', 'N1.1.2_G4'], ['up', 'N1.1.2_G3', 'N1.1.1_G3'], ['up', 'N1.1.1_G3', 'N1.1.2_G3'], ['up', 'N1.1.2_G2', 'N1.1.1_G2'], ['up', 'N1.1.1_G2', 'N1.1.2_G2'], ['up', 'N1.1.2_G1', 'N1.1.1_G1']]
124
+ """
125
+ vertical_transitions = []
126
+
127
+ all_matches = gather_all_vertical_matches(script_df)
128
+
129
+ # Downward
130
+ for index, match in enumerate(all_matches):
131
+ skill = match[0]
132
+ row_num = match[1]
133
+ if all_matches[-1] != match:
134
+ vertical_transitions.append([
135
+ "down",
136
+ f"{skill}_G{row_num}",
137
+ f"{all_matches[index+1][0]}_G{row_num}"
138
+ ])
139
+
140
+ # Upward
141
+ for index, match in reversed(list(enumerate(all_matches))):
142
+ skill = match[0]
143
+ row_num = match[1]
144
+ if all_matches[0] != match:
145
+ vertical_transitions.append([
146
+ "up",
147
+ f"{skill}_G{row_num}",
148
+ f"{all_matches[index-1][0]}_G{row_num}"
149
+ ])
150
+
151
+ return vertical_transitions
152
+
153
+
154
+ def build_all_states(all_transitions):
155
+ """ Creates an array with all state labels for the curriculum
156
+
157
+ Input
158
+ - all_transitions: list of lists - all possible up, down, left, or right transitions in curriculum
159
+
160
+ Output
161
+ - all_states: list - a collection of state labels (skill code and grade number)
162
+
163
+ >>> all_transitions = [['right', 'N1.1.1_G1', 'N1.1.1_G2'], ['right', 'N1.1.1_G2', 'N1.1.1_G3'], ['right', 'N1.1.1_G3', 'N1.1.1_G4'], ['right', 'N1.1.1_G4', 'N1.1.1_G5'], ['right', 'N1.1.1_G5', 'N1.1.1_G6'], ['left', 'N1.1.1_G6', 'N1.1.1_G5'], ['left', 'N1.1.1_G5', 'N1.1.1_G4'], ['left', 'N1.1.1_G4', 'N1.1.1_G3'], ['left', 'N1.1.1_G3', 'N1.1.1_G2'], ['left', 'N1.1.1_G2', 'N1.1.1_G1'], ['right', 'N1.1.2_G1', 'N1.1.2_G2'], ['right', 'N1.1.2_G2', 'N1.1.2_G3'], ['right', 'N1.1.2_G3', 'N1.1.2_G4'], ['right', 'N1.1.2_G4', 'N1.1.2_G5'], ['right', 'N1.1.2_G5', 'N1.1.2_G6'], ['left', 'N1.1.2_G6', 'N1.1.2_G5'], ['left', 'N1.1.2_G5', 'N1.1.2_G4'], ['left', 'N1.1.2_G4', 'N1.1.2_G3'], ['left', 'N1.1.2_G3', 'N1.1.2_G2'], ['left', 'N1.1.2_G2', 'N1.1.2_G1'], ['down', 'N1.1.1_G1', 'N1.1.2_G1'], ['down', 'N1.1.2_G1', 'N1.1.1_G1'], ['down', 'N1.1.1_G2', 'N1.1.2_G2'], ['down', 'N1.1.2_G2', 'N1.1.1_G2'], ['down', 'N1.1.1_G3', 'N1.1.2_G3'], ['down', 'N1.1.2_G3', 'N1.1.1_G3'], ['down', 'N1.1.1_G4', 'N1.1.2_G4'], ['down', 'N1.1.2_G4', 'N1.1.1_G4'], ['down', 'N1.1.1_G5', 'N1.1.2_G5'], ['down', 'N1.1.2_G5', 'N1.1.1_G5'], ['down', 'N1.1.1_G6', 'N1.1.2_G6'], ['up', 'N1.1.2_G6', 'N1.1.1_G6'], ['up', 'N1.1.1_G6', 'N1.1.2_G6'], ['up', 'N1.1.2_G5', 'N1.1.1_G5'], ['up', 'N1.1.1_G5', 'N1.1.2_G5'], ['up', 'N1.1.2_G4', 'N1.1.1_G4'], ['up', 'N1.1.1_G4', 'N1.1.2_G4'], ['up', 'N1.1.2_G3', 'N1.1.1_G3'], ['up', 'N1.1.1_G3', 'N1.1.2_G3'], ['up', 'N1.1.2_G2', 'N1.1.1_G2'], ['up', 'N1.1.1_G2', 'N1.1.2_G2'], ['up', 'N1.1.2_G1', 'N1.1.1_G1']]
164
+ >>> build_all_states(all_transitions)
165
+ ['N1.1.1_G1', 'N1.1.1_G2', 'N1.1.1_G3', 'N1.1.1_G4', 'N1.1.1_G5', 'N1.1.1_G6', 'N1.1.2_G1', 'N1.1.2_G2', 'N1.1.2_G3', 'N1.1.2_G4', 'N1.1.2_G5', 'N1.1.2_G6']
166
+ """
167
+ all_states = []
168
+ for transition in all_transitions:
169
+ for index, state in enumerate(transition):
170
+ if index == 0:
171
+ continue
172
+ if state not in all_states:
173
+ all_states.append(state)
174
+ return all_states
175
+
176
+
177
+ def build_curriculum_logic():
178
+ script_df = read_and_preprocess_spreadsheet('Rori_Framework_v1.xlsx')
179
+ horizontal_transitions = build_horizontal_transitions(script_df)
180
+ vertical_transitions = build_vertical_transitions(script_df)
181
+ all_transitions = horizontal_transitions + vertical_transitions
182
+ all_states = build_all_states(all_transitions)
183
+ return all_states, all_transitions
mathtext_fastapi/data/Rori_Framework_v1.xlsx ADDED
Binary file (420 kB). View file
 
mathtext_fastapi/data/curriculum_framework_for_tests.xlsx ADDED
Binary file (510 kB). View file
 
mathtext_fastapi/data/text2int_results.csv CHANGED
@@ -20,10 +20,17 @@ eight oh,80.0,8.0,False
20
  eighty,80.0,80.0,True
21
  ate,8.0,1.0,False
22
  double eight,88.0,8.0,False
 
23
  eight three seven five three O nine,8375309.0,8375329.0,False
24
  eight three seven five three oh nine,8375309.0,8375309.0,True
25
  eight three seven five three zero nine,8375309.0,8375309.0,True
26
  eight three seven five three oh ni-ee-ine,8375309.0,837530619.0,False
 
 
 
 
 
 
27
  two eight,28.0,16.0,False
28
  seven oh eleven,7011.0,77.0,False
29
  seven elevens,77.0,77.0,True
@@ -31,10 +38,17 @@ seven eleven,711.0,77.0,False
31
  ninety nine oh five,9905.0,149.0,False
32
  seven 0 seven 0 seven 0 seven,7070707.0,7070707.0,True
33
  123 hundred,123000.0,223.0,False
 
34
  5 o 5,505.0,525.0,False
35
  15 o 5,1505.0,22.0,False
36
  15-o 5,1505.0,22.0,False
37
  15 o-5,1505.0,22.0,False
 
 
 
 
 
 
38
  911-thousand,911000.0,911000.0,True
39
  twenty-two twenty-two,2222.0,44.0,False
40
  twenty-two twenty-twos,484.0,44.0,False
 
20
  eighty,80.0,80.0,True
21
  ate,8.0,1.0,False
22
  double eight,88.0,8.0,False
23
+ <<<<<<< HEAD
24
  eight three seven five three O nine,8375309.0,8375329.0,False
25
  eight three seven five three oh nine,8375309.0,8375309.0,True
26
  eight three seven five three zero nine,8375309.0,8375309.0,True
27
  eight three seven five three oh ni-ee-ine,8375309.0,837530619.0,False
28
+ =======
29
+ eight three seven five three O nine,8375309.0,8375319.0,False
30
+ eight three seven five three oh nine,8375309.0,8375309.0,True
31
+ eight three seven five three zero nine,8375309.0,8375309.0,True
32
+ eight three seven five three oh ni-ee-ine,8375309.0,837530111.0,False
33
+ >>>>>>> 74e75c0104d51b24ef06a73810ddd2c758f557b9
34
  two eight,28.0,16.0,False
35
  seven oh eleven,7011.0,77.0,False
36
  seven elevens,77.0,77.0,True
 
38
  ninety nine oh five,9905.0,149.0,False
39
  seven 0 seven 0 seven 0 seven,7070707.0,7070707.0,True
40
  123 hundred,123000.0,223.0,False
41
+ <<<<<<< HEAD
42
  5 o 5,505.0,525.0,False
43
  15 o 5,1505.0,22.0,False
44
  15-o 5,1505.0,22.0,False
45
  15 o-5,1505.0,22.0,False
46
+ =======
47
+ 5 o 5,505.0,515.0,False
48
+ 15 o 5,1505.0,21.0,False
49
+ 15-o 5,1505.0,21.0,False
50
+ 15 o-5,1505.0,21.0,False
51
+ >>>>>>> 74e75c0104d51b24ef06a73810ddd2c758f557b9
52
  911-thousand,911000.0,911000.0,True
53
  twenty-two twenty-two,2222.0,44.0,False
54
  twenty-two twenty-twos,484.0,44.0,False
mathtext_fastapi/global_state_manager.py ADDED
@@ -0,0 +1,23 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from transitions import Machine
2
+ from mathtext_fastapi.curriculum_mapper import build_curriculum_logic
3
+
4
+ all_states, all_transitions = build_curriculum_logic()
5
+
6
+ class GlobalStateManager(object):
7
+ states = all_states
8
+
9
+ transitions = all_transitions
10
+
11
+ def __init__(
12
+ self,
13
+ initial_state='N1.1.1_G1',
14
+ ):
15
+ self.machine = Machine(
16
+ model=self,
17
+ states=GlobalStateManager.states,
18
+ transitions=GlobalStateManager.transitions,
19
+ initial=initial_state
20
+ )
21
+
22
+
23
+ curriculum = GlobalStateManager()
mathtext_fastapi/nlu.py CHANGED
@@ -1,9 +1,15 @@
 
 
 
1
  from fuzzywuzzy import fuzz
2
  from mathtext_fastapi.logging import prepare_message_data_for_logging
3
  from mathtext.sentiment import sentiment
4
  from mathtext.text2int import text2int
5
  from mathtext_fastapi.intent_classification import create_intent_classification_model, retrieve_intent_classification_model, predict_message_intent
6
- import re
 
 
 
7
 
8
 
9
  def build_nlu_response_object(type, data, confidence):
@@ -107,6 +113,16 @@ def run_intent_classification(message_text):
107
  'hint',
108
  'next',
109
  'stop',
 
 
 
 
 
 
 
 
 
 
110
  ]
111
 
112
  for command in commands:
@@ -131,20 +147,13 @@ def evaluate_message_with_nlu(message_data):
131
  {'type': 'sentiment', 'data': 'NEGATIVE', 'confidence': 0.9997807145118713}
132
  """
133
  # Keeps system working with two different inputs - full and filtered @event object
 
134
  try:
135
- message_text = str(message_data['message_body'])
136
- except KeyError:
137
- message_data = {
138
- 'author_id': message_data['message']['_vnd']['v1']['chat']['owner'],
139
- 'author_type': message_data['message']['_vnd']['v1']['author']['type'],
140
- 'contact_uuid': message_data['message']['_vnd']['v1']['chat']['contact_uuid'],
141
- 'message_body': message_data['message']['text']['body'],
142
- 'message_direction': message_data['message']['_vnd']['v1']['direction'],
143
- 'message_id': message_data['message']['id'],
144
- 'message_inserted_at': message_data['message']['_vnd']['v1']['chat']['inserted_at'],
145
- 'message_updated_at': message_data['message']['_vnd']['v1']['chat']['updated_at'],
146
- }
147
- message_text = str(message_data['message_body'])
148
 
149
  # Run intent classification only for keywords
150
  intent_api_response = run_intent_classification(message_text)
@@ -154,7 +163,7 @@ def evaluate_message_with_nlu(message_data):
154
 
155
  number_api_resp = text2int(message_text.lower())
156
 
157
- if number_api_resp == 32202:
158
  # Run intent classification with logistic regression model
159
  predicted_label = predict_message_intent(message_text)
160
  if predicted_label['confidence'] > 0.01:
 
1
+ from logging import getLogger
2
+ import re
3
+
4
  from fuzzywuzzy import fuzz
5
  from mathtext_fastapi.logging import prepare_message_data_for_logging
6
  from mathtext.sentiment import sentiment
7
  from mathtext.text2int import text2int
8
  from mathtext_fastapi.intent_classification import create_intent_classification_model, retrieve_intent_classification_model, predict_message_intent
9
+
10
+ log = getLogger(__name__)
11
+
12
+ ERROR_CODE = 32202
13
 
14
 
15
  def build_nlu_response_object(type, data, confidence):
 
113
  'hint',
114
  'next',
115
  'stop',
116
+ 'tired',
117
+ 'tomorrow',
118
+ 'finished',
119
+ 'help',
120
+ 'please',
121
+ 'understand',
122
+ 'question',
123
+ 'easier',
124
+ 'easy',
125
+ 'support'
126
  ]
127
 
128
  for command in commands:
 
147
  {'type': 'sentiment', 'data': 'NEGATIVE', 'confidence': 0.9997807145118713}
148
  """
149
  # Keeps system working with two different inputs - full and filtered @event object
150
+ log.info(f'Starting evaluate message: {message_data}')
151
  try:
152
+ message_text = str(message_data.get('message_body', ''))
153
+ except:
154
+ log.error(f'Invalid request payload: {message_data}')
155
+ # use python logging system to do this//
156
+ return {'type': 'error', 'data': ERROR_CODE, 'confidence': 0}
 
 
 
 
 
 
 
 
157
 
158
  # Run intent classification only for keywords
159
  intent_api_response = run_intent_classification(message_text)
 
163
 
164
  number_api_resp = text2int(message_text.lower())
165
 
166
+ if number_api_resp == ERROR_CODE:
167
  # Run intent classification with logistic regression model
168
  predicted_label = predict_message_intent(message_text)
169
  if predicted_label['confidence'] > 0.01:
mathtext_fastapi/v2_conversation_manager.py ADDED
@@ -0,0 +1,271 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import base64
2
+ import copy
3
+ import dill
4
+ import os
5
+ import json
6
+ import jsonpickle
7
+ import pickle
8
+ import random
9
+ import requests
10
+ import mathtext_fastapi.global_state_manager as gsm
11
+
12
+ from dotenv import load_dotenv
13
+ from mathtext_fastapi.nlu import evaluate_message_with_nlu
14
+ from mathtext_fastapi.math_quiz_fsm import MathQuizFSM
15
+ from mathtext_fastapi.math_subtraction_fsm import MathSubtractionFSM
16
+ from supabase import create_client
17
+ from transitions import Machine
18
+
19
+ from mathactive.generators import start_interactive_math
20
+ from mathactive.hints import generate_hint
21
+ from mathactive.microlessons import num_one
22
+
23
+ load_dotenv()
24
+
25
+ SUPA = create_client(
26
+ os.environ.get('SUPABASE_URL'),
27
+ os.environ.get('SUPABASE_KEY')
28
+ )
29
+
30
+
31
+ def pickle_and_encode_state_machine(state_machine):
32
+ dump = pickle.dumps(state_machine)
33
+ dump_encoded = base64.b64encode(dump).decode('utf-8')
34
+ return dump_encoded
35
+
36
+
37
+ def manage_math_quiz_fsm(user_message, contact_uuid, type):
38
+ fsm_check = SUPA.table('state_machines').select("*").eq(
39
+ "contact_uuid",
40
+ contact_uuid
41
+ ).execute()
42
+
43
+ # This doesn't allow for when one FSM is present and the other is empty
44
+ """
45
+ 1
46
+ data=[] count=None
47
+
48
+ 2
49
+ data=[{'id': 29, 'contact_uuid': 'j43hk26-2hjl-43jk-hnk2-k4ljl46j0ds09', 'addition3': None, 'subtraction': None, 'addition':
50
+
51
+ - but problem is there is no subtraction , but it's assuming there's a subtration
52
+
53
+ Cases
54
+ - make a completely new record
55
+ - update an existing record with an existing FSM
56
+ - update an existing record without an existing FSM
57
+ """
58
+ print("MATH QUIZ FSM ACTIVITY")
59
+ print("user_message")
60
+ print(user_message)
61
+ # Make a completely new entry
62
+ if fsm_check.data == []:
63
+ if type == 'addition':
64
+ math_quiz_state_machine = MathQuizFSM()
65
+ else:
66
+ math_quiz_state_machine = MathSubtractionFSM()
67
+ messages = [math_quiz_state_machine.response_text]
68
+ dump_encoded = pickle_and_encode_state_machine(math_quiz_state_machine)
69
+
70
+ SUPA.table('state_machines').insert({
71
+ 'contact_uuid': contact_uuid,
72
+ f'{type}': dump_encoded
73
+ }).execute()
74
+ # Update an existing record with a new state machine
75
+ elif not fsm_check.data[0][type]:
76
+ if type == 'addition':
77
+ math_quiz_state_machine = MathQuizFSM()
78
+ else:
79
+ math_quiz_state_machine = MathSubtractionFSM()
80
+ messages = [math_quiz_state_machine.response_text]
81
+ dump_encoded = pickle_and_encode_state_machine(math_quiz_state_machine)
82
+
83
+ SUPA.table('state_machines').update({
84
+ f'{type}': dump_encoded
85
+ }).eq(
86
+ "contact_uuid", contact_uuid
87
+ ).execute()
88
+ # Update an existing record with an existing state machine
89
+ elif fsm_check.data[0][type]:
90
+ undump_encoded = base64.b64decode(
91
+ fsm_check.data[0][type].encode('utf-8')
92
+ )
93
+ math_quiz_state_machine = pickle.loads(undump_encoded)
94
+
95
+ math_quiz_state_machine.student_answer = user_message
96
+ math_quiz_state_machine.correct_answer = str(math_quiz_state_machine.correct_answer)
97
+ messages = math_quiz_state_machine.validate_answer()
98
+ dump_encoded = pickle_and_encode_state_machine(math_quiz_state_machine)
99
+ SUPA.table('state_machines').update({
100
+ f'{type}': dump_encoded
101
+ }).eq(
102
+ "contact_uuid", contact_uuid
103
+ ).execute()
104
+ return messages
105
+
106
+
107
+ def retrieve_microlesson_content(context_data, user_message, microlesson, contact_uuid):
108
+ # TODO: This is being filtered by both the local and global states, so not changing
109
+ if microlesson == 'addition':
110
+ messages = manage_math_quiz_fsm(user_message, contact_uuid, 'addition')
111
+
112
+ if user_message == 'exit':
113
+ state_label = 'exit'
114
+ else:
115
+ state_label = 'addition-question-sequence'
116
+
117
+ input_prompt = messages.pop()
118
+ message_package = {
119
+ 'messages': messages,
120
+ 'input_prompt': input_prompt,
121
+ 'state': state_label
122
+ }
123
+ elif context_data['local_state'] == 'addition2' or microlesson == 'addition2':
124
+ if user_message == 'harder' or user_message == 'easier':
125
+ user_message = ''
126
+ message_package = num_one.process_user_message(contact_uuid, user_message)
127
+ message_package['state'] = 'addition2'
128
+ message_package['input_prompt'] = '?'
129
+
130
+ elif context_data['local_state'] == 'subtraction-question-sequence' or \
131
+ user_message == 'subtract' or \
132
+ microlesson == 'subtraction':
133
+ messages = manage_math_quiz_fsm(user_message, contact_uuid, 'subtraction')
134
+
135
+ if user_message == 'exit':
136
+ state_label = 'exit'
137
+ else:
138
+ state_label = 'subtraction-question-sequence'
139
+
140
+ input_prompt = messages.pop()
141
+
142
+ message_package = {
143
+ 'messages': messages,
144
+ 'input_prompt': input_prompt,
145
+ 'state': state_label
146
+ }
147
+ print("MICROLESSON CONTENT RESPONSE")
148
+ print(message_package)
149
+ return message_package
150
+
151
+
152
+ curriculum_lookup_table = {
153
+ 'N1.1.1_G1': 'addition',
154
+ 'N1.1.1_G2': 'addition2',
155
+ 'N1.1.2_G1': 'subtraction'
156
+ }
157
+
158
+
159
+ def lookup_local_state(next_state):
160
+ microlesson = curriculum_lookup_table[next_state]
161
+ return microlesson
162
+
163
+
164
+ def create_text_message(message_text, whatsapp_id):
165
+ """ Fills a template with input values to send a text message to Whatsapp
166
+
167
+ Inputs
168
+ - message_text: str - the content that the message should display
169
+ - whatsapp_id: str - the message recipient's phone number
170
+
171
+ Outputs
172
+ - message_data: dict - a preformatted template filled with inputs
173
+ """
174
+ message_data = {
175
+ "preview_url": False,
176
+ "recipient_type": "individual",
177
+ "to": whatsapp_id,
178
+ "type": "text",
179
+ "text": {
180
+ "body": message_text
181
+ }
182
+ }
183
+ return message_data
184
+
185
+
186
+ def manage_conversation_response(data_json):
187
+ """ Calls functions necessary to determine message and context data """
188
+ print("V2 ENDPOINT")
189
+
190
+ # whatsapp_id = data_json['author_id']
191
+ message_data = data_json['message_data']
192
+ context_data = data_json['context_data']
193
+ whatsapp_id = message_data['author_id']
194
+ user_message = message_data['message_body']
195
+ print("MESSAGE DATA")
196
+ print(message_data)
197
+ print("CONTEXT DATA")
198
+ print(context_data)
199
+ print("=================")
200
+
201
+ # nlu_response = evaluate_message_with_nlu(message_data)
202
+
203
+ # context_data = {
204
+ # 'contact_uuid': 'abcdefg',
205
+ # 'current_state': 'N1.1.1_G2',
206
+ # 'user_message': '1',
207
+ # 'local_state': ''
208
+ # }
209
+ print("STEP 1")
210
+ print(data_json)
211
+ print(f"1: {context_data['current_state']}")
212
+ if not context_data['current_state']:
213
+ context_data['current_state'] = 'N1.1.1_G1'
214
+ print(f"2: {context_data['current_state']}")
215
+
216
+ curriculum_copy = copy.deepcopy(gsm.curriculum)
217
+ curriculum_copy.state = context_data['current_state']
218
+ print("STEP 2")
219
+ if user_message == 'easier':
220
+ curriculum_copy.left()
221
+ next_state = curriculum_copy.state
222
+ elif user_message == 'harder':
223
+ curriculum_copy.right()
224
+ next_state = curriculum_copy.state
225
+ else:
226
+ next_state = context_data['current_state']
227
+ print("next_state")
228
+ print(next_state)
229
+
230
+ print("STEP 3")
231
+ microlesson = lookup_local_state(next_state)
232
+
233
+ print("microlesson")
234
+ print(microlesson)
235
+
236
+ microlesson_content = retrieve_microlesson_content(context_data, user_message, microlesson, context_data['contact_uuid'])
237
+
238
+ headers = {
239
+ 'Authorization': f"Bearer {os.environ.get('TURN_AUTHENTICATION_TOKEN')}",
240
+ 'Content-Type': 'application/json'
241
+ }
242
+
243
+ # Send all messages for the current state before a user input prompt (text/button input request)
244
+ for message in microlesson_content['messages']:
245
+ data = create_text_message(message, whatsapp_id)
246
+
247
+ # print("data")
248
+ # print(data)
249
+
250
+ r = requests.post(
251
+ f'https://whatsapp.turn.io/v1/messages',
252
+ data=json.dumps(data),
253
+ headers=headers
254
+ )
255
+
256
+ print("STEP 4")
257
+ # combine microlesson content and context_data object
258
+
259
+ updated_context = {
260
+ "context": {
261
+ "contact_id": whatsapp_id,
262
+ "contact_uuid": context_data['contact_uuid'],
263
+ "current_state": next_state,
264
+ "local_state": microlesson_content['state'],
265
+ "bot_message": microlesson_content['input_prompt'],
266
+ "user_message": user_message,
267
+ "type": 'ask'
268
+ }
269
+ }
270
+ print(updated_context)
271
+ return updated_context
pyproject.toml CHANGED
@@ -20,14 +20,17 @@ classifiers = [
20
 
21
 
22
  [tool.poetry.dependencies]
 
23
  mathtext = {git = "https://gitlab.com/tangibleai/community/mathtext", rev = "main"}
24
- fastapi = "0.74.*"
25
  pydantic = "*"
26
- python = "^3.8,<3.10"
27
- requests = "2.27.*"
28
  sentencepiece = "0.1.*"
29
  supabase = "*"
30
  uvicorn = "0.17.*"
 
 
31
 
32
  [tool.poetry.group.dev.dependencies]
33
  pytest = "^7.2"
 
20
 
21
 
22
  [tool.poetry.dependencies]
23
+ mathactive = {git = "git@gitlab.com:tangibleai/community/mathactive.git", rev = "vlad"}
24
  mathtext = {git = "https://gitlab.com/tangibleai/community/mathtext", rev = "main"}
25
+ fastapi = "^0.90.0"
26
  pydantic = "*"
27
+ python = "^3.8"
28
+ requests = "2.27.*"
29
  sentencepiece = "0.1.*"
30
  supabase = "*"
31
  uvicorn = "0.17.*"
32
+ pandas = "^1.5.3"
33
+ scipy = "^1.10.1"
34
 
35
  [tool.poetry.group.dev.dependencies]
36
  pytest = "^7.2"
requirements.txt CHANGED
@@ -3,14 +3,17 @@ en-core-web-sm @ https://github.com/explosion/spacy-models/releases/download/en_
3
  fuzzywuzzy
4
  jsonpickle
5
  mathtext @ git+https://gitlab.com/tangibleai/community/mathtext@main
6
- fastapi==0.74.*
7
- pydantic==1.10.*
 
 
 
 
8
  python-Levenshtein
9
- requests==2.27.*
10
- sentencepiece==0.1.*
11
  sentence-transformers
12
  sentry-sdk[fastapi]
13
  supabase
14
  transitions
15
- uvicorn==0.17.*
16
-
 
 
3
  fuzzywuzzy
4
  jsonpickle
5
  mathtext @ git+https://gitlab.com/tangibleai/community/mathtext@main
6
+ mathactive @ git+https://gitlab.com/tangibleai/community/mathactive@main
7
+ fastapi
8
+ pydantic
9
+ requests
10
+ sentencepiece
11
+ openpyxl
12
  python-Levenshtein
 
 
13
  sentence-transformers
14
  sentry-sdk[fastapi]
15
  supabase
16
  transitions
17
+ uvicorn
18
+ pandas
19
+ scipy
scripts/make_request.py CHANGED
@@ -22,7 +22,11 @@ def add_message_text_to_sample_object(message_text):
22
  message_data = '{' + f'"author_id": "+57787919091", "author_type": "OWNER", "contact_uuid": "j43hk26-2hjl-43jk-hnk2-k4ljl46j0ds09", "message_body": "{message_text}", "message_direction": "inbound", "message_id": "4kl209sd0-a7b8-2hj3-8563-3hu4a89b32", "message_inserted_at": "2023-01-10T02:37:28.477940Z", "message_updated_at": "2023-01-10T02:37:28.487319Z"' + '}'
23
  # context_data = '{' + '"user":"", "state":"addition-question-sequence", "bot_message":"", "user_message":"{message_text}"' + '}'
24
 
25
- context_data = '{' + '"user":"", "state":"start-conversation", "bot_message":"", "user_message":"{message_text}"' + '}'
 
 
 
 
26
 
27
  # context_data = '{' + '"user":"", "state":"addition-question-sequence", "bot_message":"", "user_message":"{message_text}","text": "What is 2+3?","question_numbers": [4,3],"right_answer": 7,"number_correct": 2, "number_incorrect": 0, "hints_used": 0, "level": "easy"' + '}'
28
 
@@ -60,6 +64,17 @@ def run_simulated_request(endpoint, sample_answer, context=None):
60
 
61
  # run_simulated_request('intent-classification', 'exit')
62
  # run_simulated_request('intent-classification', "I'm not sure")
 
 
 
 
 
 
 
 
 
 
 
63
  # run_simulated_request('sentiment-analysis', 'I reject it')
64
  # run_simulated_request('text2int', 'seven thousand nine hundred fifty seven')
65
  run_simulated_request('nlu', 'test message')
@@ -75,37 +90,49 @@ run_simulated_request('nlu', "I don't 9")
75
  run_simulated_request('nlu', "0.2")
76
  run_simulated_request('nlu', 'Today is a wonderful day')
77
  run_simulated_request('nlu', 'IDK 5?')
 
 
 
 
 
78
  # run_simulated_request('manager', '')
79
  # run_simulated_request('manager', 'add')
80
  # run_simulated_request('manager', 'subtract')
81
- # run_simulated_request("question", {
82
- # 'number_correct': 0,
83
- # 'number_incorrect': 0,
84
- # 'level': 'easy'
85
  # })
86
  # run_simulated_request("hint", {
87
- # 'question_numbers': [1, 2, 3],
88
- # 'right_answer': 3,
89
- # 'number_correct': 0,
90
- # 'number_incorrect': 0,
91
- # 'level': 'easy',
92
- # 'hints_used': 0
93
  # })
94
- # run_simulated_request("generate_question", {
95
- # 'level': 'medium'
 
 
 
 
 
 
96
  # })
97
- # run_simulated_request("numbers_by_level", {
98
- # 'level': 'medium'
 
 
 
99
  # })
100
- # run_simulated_request("number_sequence", {
101
- # "current_number": 10,
102
- # "ordinal_number": 2,
103
- # "times": 1
104
  # })
105
- # run_simulated_request("level", {
106
- # "current_level": "hard",
107
- # "level_up": False
 
108
  # })
 
109
  # run_simulated_request('manager', 'exit')
110
 
111
 
 
22
  message_data = '{' + f'"author_id": "+57787919091", "author_type": "OWNER", "contact_uuid": "j43hk26-2hjl-43jk-hnk2-k4ljl46j0ds09", "message_body": "{message_text}", "message_direction": "inbound", "message_id": "4kl209sd0-a7b8-2hj3-8563-3hu4a89b32", "message_inserted_at": "2023-01-10T02:37:28.477940Z", "message_updated_at": "2023-01-10T02:37:28.487319Z"' + '}'
23
  # context_data = '{' + '"user":"", "state":"addition-question-sequence", "bot_message":"", "user_message":"{message_text}"' + '}'
24
 
25
+ # V1
26
+ # context_data = '{' + '"user":"", "state":"start-conversation", "bot_message":"", "user_message":"{message_text}"' + '}'
27
+
28
+ #V2
29
+ context_data = '{' + '"contact_uuid": "j43hk26-2hjl-43jk-hnk2-k4ljl46j0ds09", "current_state":"", "local_state": "", "user_message":""' + '}'
30
 
31
  # context_data = '{' + '"user":"", "state":"addition-question-sequence", "bot_message":"", "user_message":"{message_text}","text": "What is 2+3?","question_numbers": [4,3],"right_answer": 7,"number_correct": 2, "number_incorrect": 0, "hints_used": 0, "level": "easy"' + '}'
32
 
 
64
 
65
  # run_simulated_request('intent-classification', 'exit')
66
  # run_simulated_request('intent-classification', "I'm not sure")
67
+ # run_simulated_request('intent-classification', "easier")
68
+ # run_simulated_request('intent-classification', "easy")
69
+ # run_simulated_request('intent-classification', "harder")
70
+ # run_simulated_request('intent-classification', "hard")
71
+ # run_simulated_request('intent-classification', "hint")
72
+ # run_simulated_request('intent-classification', "hin")
73
+ # run_simulated_request('intent-classification', "hnt")
74
+ # run_simulated_request('intent-classification', "stop")
75
+ # run_simulated_request('intent-classification', "stp")
76
+ # run_simulated_request('intent-classification', "sop")
77
+ # run_simulated_request('intent-classification', "please stop")
78
  # run_simulated_request('sentiment-analysis', 'I reject it')
79
  # run_simulated_request('text2int', 'seven thousand nine hundred fifty seven')
80
  run_simulated_request('nlu', 'test message')
 
90
  run_simulated_request('nlu', "0.2")
91
  run_simulated_request('nlu', 'Today is a wonderful day')
92
  run_simulated_request('nlu', 'IDK 5?')
93
+ run_simulated_request('nlu', 'hin')
94
+ run_simulated_request('nlu', 'exi')
95
+ run_simulated_request('nlu', 'easier')
96
+ run_simulated_request('nlu', 'stp')
97
+ run_simulated_request('nlu', '')
98
  # run_simulated_request('manager', '')
99
  # run_simulated_request('manager', 'add')
100
  # run_simulated_request('manager', 'subtract')
101
+
102
+ # run_simulated_request("start", {
103
+ # 'difficulty': 0.04,
104
+ # 'do_increase': True
105
  # })
106
  # run_simulated_request("hint", {
107
+ # 'start': 5,
108
+ # 'step': 1,
109
+ # 'difficulty': 0.56 # optional
 
 
 
110
  # })
111
+ # run_simulated_request("question", {
112
+ # 'start': 2,
113
+ # 'step': 1,
114
+ # 'question_num': 2 # optional
115
+ # })
116
+ # run_simulated_request("difficulty", {
117
+ # 'difficulty': 0.01,
118
+ # 'do_increase': False # True | False
119
  # })
120
+ # Need to start with this command to populate users.json
121
+ # If users.json is not already made
122
+ # run_simulated_request("num_one", {
123
+ # "user_id": "1",
124
+ # "message_text": "",
125
  # })
126
+ # run_simulated_request("num_one", {
127
+ # "user_id": "1",
128
+ # "message_text": "61",
 
129
  # })
130
+ # run_simulated_request("sequence", {
131
+ # 'start': 2,
132
+ # 'step': 1,
133
+ # 'sep': '... '
134
  # })
135
+
136
  # run_simulated_request('manager', 'exit')
137
 
138
 
scripts/quiz/__init__.py ADDED
File without changes
scripts/quiz/data.csv ADDED
@@ -0,0 +1,56 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ difficulty,start
2
+ 0.01,1
3
+ 0.02,0
4
+ 0.05,5
5
+ 0.07,10
6
+ 0.08,14
7
+ 0.1,20
8
+ 0.11,22
9
+ 0.13,27
10
+ 0.14,28
11
+ 0.16,30
12
+ 0.17,32
13
+ 0.18,34
14
+ 0.2,37
15
+ 0.21,39
16
+ 0.23,42
17
+ 0.25,43
18
+ 0.27,46
19
+ 0.3,50
20
+ 0.34,57
21
+ 0.35,64
22
+ 0.37,78
23
+ 0.39,89
24
+ 0.41,100
25
+ 0.44,112
26
+ 0.45,130
27
+ 0.48,147
28
+ 0.5,164
29
+ 0.52,180
30
+ 0.55,195
31
+ 0.58,209
32
+ 0.6,223
33
+ 0.61,236
34
+ 0.63,248
35
+ 0.64,259
36
+ 0.65,271
37
+ 0.67,284
38
+ 0.69,296
39
+ 0.7,308
40
+ 0.72,321
41
+ 0.73,333
42
+ 0.75,346
43
+ 0.78,359
44
+ 0.8,370
45
+ 0.81,385
46
+ 0.83,399
47
+ 0.84,408
48
+ 0.87,420
49
+ 0.88,435
50
+ 0.89,447
51
+ 0.9,458
52
+ 0.93,469
53
+ 0.94,483
54
+ 0.96,494
55
+ 0.97,500
56
+ 0.99,513
users.json ADDED
@@ -0,0 +1 @@
 
 
1
+ {"1": {"skill_score": 0.04, "state": "question", "start": 1, "stop": 1, "step": 1, "answer": 2}}