Greg Thompson commited on
Commit
9a45711
2 Parent(s): 6785aa6 d14bd4c

Merge branch 'feature-global-state' into 'feature-wormhole'

Browse files

Integrate initial work developing a state tree

See merge request tangibleai/community/mathtext-fastapi!14

app.py CHANGED
@@ -17,6 +17,7 @@ from pydantic import BaseModel
17
 
18
  from mathtext_fastapi.logging import prepare_message_data_for_logging
19
  from mathtext_fastapi.conversation_manager import manage_conversation_response
 
20
  from mathtext_fastapi.nlu import evaluate_message_with_nlu
21
  from mathtext_fastapi.nlu import run_intent_classification
22
 
@@ -56,7 +57,40 @@ def text2int_ep(content: Text = None):
56
  return JSONResponse(content=content)
57
 
58
 
59
- @app.post("/manager")
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
60
  async def programmatic_message_manager(request: Request):
61
  """
62
  Calls conversation management function to determine the next state
 
17
 
18
  from mathtext_fastapi.logging import prepare_message_data_for_logging
19
  from mathtext_fastapi.conversation_manager import manage_conversation_response
20
+ from mathtext_fastapi.v2_conversation_manager import manage_conversation_response
21
  from mathtext_fastapi.nlu import evaluate_message_with_nlu
22
  from mathtext_fastapi.nlu import run_intent_classification
23
 
 
57
  return JSONResponse(content=content)
58
 
59
 
60
+ @app.post("/v1/manager")
61
+ async def programmatic_message_manager(request: Request):
62
+ """
63
+ Calls conversation management function to determine the next state
64
+
65
+ Input
66
+ request.body: dict - message data for the most recent user response
67
+ {
68
+ "author_id": "+47897891",
69
+ "contact_uuid": "j43hk26-2hjl-43jk-hnk2-k4ljl46j0ds09",
70
+ "author_type": "OWNER",
71
+ "message_body": "a test message",
72
+ "message_direction": "inbound",
73
+ "message_id": "ABJAK64jlk3-agjkl2QHFAFH",
74
+ "message_inserted_at": "2022-07-05T04:00:34.03352Z",
75
+ "message_updated_at": "2023-02-14T03:54:19.342950Z",
76
+ }
77
+
78
+ Output
79
+ context: dict - the information for the current state
80
+ {
81
+ "user": "47897891",
82
+ "state": "welcome-message-state",
83
+ "bot_message": "Welcome to Rori!",
84
+ "user_message": "",
85
+ "type": "ask"
86
+ }
87
+ """
88
+ data_dict = await request.json()
89
+ context = manage_conversation_response(data_dict)
90
+ return JSONResponse(context)
91
+
92
+
93
+ @app.post("/v2/manager")
94
  async def programmatic_message_manager(request: Request):
95
  """
96
  Calls conversation management function to determine the next state
mathtext_fastapi/curriculum_mapper.py ADDED
@@ -0,0 +1,183 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import numpy as np
2
+ import pandas as pd
3
+ import re
4
+
5
+ from pathlib import Path
6
+
7
+
8
+ def read_and_preprocess_spreadsheet(file_name):
9
+ """ Creates a pandas dataframe from the curriculum overview spreadsheet """
10
+ DATA_DIR = Path(__file__).parent.parent / "mathtext_fastapi" / "data" / file_name
11
+ script_df = pd.read_excel(DATA_DIR, engine='openpyxl')
12
+ # Ensures the grade level columns are integers instead of floats
13
+ script_df.columns = script_df.columns[:2].tolist() + script_df.columns[2:11].astype(int).astype(str).tolist() + script_df.columns[11:].tolist()
14
+ script_df.fillna('', inplace=True)
15
+ return script_df
16
+
17
+
18
+ def extract_skill_code(skill):
19
+ """ Looks within a curricular skill description for its descriptive code
20
+
21
+ Input
22
+ - skill: str - a brief description of a curricular skill
23
+
24
+ >>> extract_skill_code('A3.3.4 - Solve inequalities')
25
+ 'A3.3.4'
26
+ >>> extract_skill_code('A3.3.2 - Graph linear equations, and identify the x- and y-intercepts or the slope of a line')
27
+ 'A3.3.2'
28
+ """
29
+ pattern = r'[A-Z][0-9]\.\d+\.\d+'
30
+ result = re.search(pattern, skill)
31
+ return result.group()
32
+
33
+
34
+ def build_horizontal_transitions(script_df):
35
+ """ Build a list of transitional relationships within a curricular skill
36
+
37
+ Inputs
38
+ - script_df: pandas dataframe - an overview of the curriculum skills by grade level
39
+
40
+ Output
41
+ - horizontal_transitions: array of arrays - transition data with label, from state, and to state
42
+
43
+ >>> script_df = read_and_preprocess_spreadsheet('curriculum_framework_for_tests.xlsx')
44
+ >>> build_horizontal_transitions(script_df)
45
+ [['right', 'N1.1.1_G1', 'N1.1.1_G2'], ['right', 'N1.1.1_G2', 'N1.1.1_G3'], ['right', 'N1.1.1_G3', 'N1.1.1_G4'], ['right', 'N1.1.1_G4', 'N1.1.1_G5'], ['right', 'N1.1.1_G5', 'N1.1.1_G6'], ['left', 'N1.1.1_G6', 'N1.1.1_G5'], ['left', 'N1.1.1_G5', 'N1.1.1_G4'], ['left', 'N1.1.1_G4', 'N1.1.1_G3'], ['left', 'N1.1.1_G3', 'N1.1.1_G2'], ['left', 'N1.1.1_G2', 'N1.1.1_G1'], ['right', 'N1.1.2_G1', 'N1.1.2_G2'], ['right', 'N1.1.2_G2', 'N1.1.2_G3'], ['right', 'N1.1.2_G3', 'N1.1.2_G4'], ['right', 'N1.1.2_G4', 'N1.1.2_G5'], ['right', 'N1.1.2_G5', 'N1.1.2_G6'], ['left', 'N1.1.2_G6', 'N1.1.2_G5'], ['left', 'N1.1.2_G5', 'N1.1.2_G4'], ['left', 'N1.1.2_G4', 'N1.1.2_G3'], ['left', 'N1.1.2_G3', 'N1.1.2_G2'], ['left', 'N1.1.2_G2', 'N1.1.2_G1']]
46
+ """
47
+ horizontal_transitions = []
48
+ for index, row in script_df.iterrows():
49
+ skill_code = extract_skill_code(row['Knowledge or Skill'])
50
+
51
+ rightward_matches = []
52
+ for i in range(9):
53
+ # Grade column
54
+ current_grade = i+1
55
+ if row[current_grade].lower().strip() == 'x':
56
+ rightward_matches.append(i)
57
+
58
+ for match in rightward_matches:
59
+ if rightward_matches[-1] != match:
60
+ horizontal_transitions.append([
61
+ "right",
62
+ f"{skill_code}_G{match}",
63
+ f"{skill_code}_G{match+1}"
64
+ ])
65
+
66
+ leftward_matches = []
67
+ for i in reversed(range(9)):
68
+ current_grade = i
69
+ if row[current_grade].lower().strip() == 'x':
70
+ leftward_matches.append(i)
71
+
72
+ for match in leftward_matches:
73
+ if leftward_matches[0] != match:
74
+ horizontal_transitions.append([
75
+ "left",
76
+ f"{skill_code}_G{match}",
77
+ f"{skill_code}_G{match-1}"
78
+ ])
79
+
80
+ return horizontal_transitions
81
+
82
+
83
+ def gather_all_vertical_matches(script_df):
84
+ """ Build a list of transitional relationships within a grade level across skills
85
+
86
+ Inputs
87
+ - script_df: pandas dataframe - an overview of the curriculum skills by grade level
88
+
89
+ Output
90
+ - all_matches: array of arrays - represents skills at each grade level
91
+
92
+ >>> script_df = read_and_preprocess_spreadsheet('curriculum_framework_for_tests.xlsx')
93
+ >>> gather_all_vertical_matches(script_df)
94
+ [['N1.1.1', '1'], ['N1.1.2', '1'], ['N1.1.1', '2'], ['N1.1.2', '2'], ['N1.1.1', '3'], ['N1.1.2', '3'], ['N1.1.1', '4'], ['N1.1.2', '4'], ['N1.1.1', '5'], ['N1.1.2', '5'], ['N1.1.1', '6'], ['N1.1.2', '6']]
95
+ """
96
+ all_matches = []
97
+ columns = ['1', '2', '3', '4', '5', '6', '7', '8', '9']
98
+
99
+ for column in columns:
100
+ for index, value in script_df[column].iteritems():
101
+ row_num = index + 1
102
+ if value == 'x':
103
+ # Extract skill code
104
+ skill_code = extract_skill_code(
105
+ script_df['Knowledge or Skill'][row_num-1]
106
+ )
107
+
108
+ all_matches.append([skill_code, column])
109
+ return all_matches
110
+
111
+
112
+ def build_vertical_transitions(script_df):
113
+ """ Build a list of transitional relationships within a grade level across skills
114
+
115
+ Inputs
116
+ - script_df: pandas dataframe - an overview of the curriculum skills by grade level
117
+
118
+ Output
119
+ - vertical_transitions: array of arrays - transition data with label, from state, and to state
120
+
121
+ >>> script_df = read_and_preprocess_spreadsheet('curriculum_framework_for_tests.xlsx')
122
+ >>> build_vertical_transitions(script_df)
123
+ [['down', 'N1.1.1_G1', 'N1.1.2_G1'], ['down', 'N1.1.2_G1', 'N1.1.1_G1'], ['down', 'N1.1.1_G2', 'N1.1.2_G2'], ['down', 'N1.1.2_G2', 'N1.1.1_G2'], ['down', 'N1.1.1_G3', 'N1.1.2_G3'], ['down', 'N1.1.2_G3', 'N1.1.1_G3'], ['down', 'N1.1.1_G4', 'N1.1.2_G4'], ['down', 'N1.1.2_G4', 'N1.1.1_G4'], ['down', 'N1.1.1_G5', 'N1.1.2_G5'], ['down', 'N1.1.2_G5', 'N1.1.1_G5'], ['down', 'N1.1.1_G6', 'N1.1.2_G6'], ['up', 'N1.1.2_G6', 'N1.1.1_G6'], ['up', 'N1.1.1_G6', 'N1.1.2_G6'], ['up', 'N1.1.2_G5', 'N1.1.1_G5'], ['up', 'N1.1.1_G5', 'N1.1.2_G5'], ['up', 'N1.1.2_G4', 'N1.1.1_G4'], ['up', 'N1.1.1_G4', 'N1.1.2_G4'], ['up', 'N1.1.2_G3', 'N1.1.1_G3'], ['up', 'N1.1.1_G3', 'N1.1.2_G3'], ['up', 'N1.1.2_G2', 'N1.1.1_G2'], ['up', 'N1.1.1_G2', 'N1.1.2_G2'], ['up', 'N1.1.2_G1', 'N1.1.1_G1']]
124
+ """
125
+ vertical_transitions = []
126
+
127
+ all_matches = gather_all_vertical_matches(script_df)
128
+
129
+ # Downward
130
+ for index, match in enumerate(all_matches):
131
+ skill = match[0]
132
+ row_num = match[1]
133
+ if all_matches[-1] != match:
134
+ vertical_transitions.append([
135
+ "down",
136
+ f"{skill}_G{row_num}",
137
+ f"{all_matches[index+1][0]}_G{row_num}"
138
+ ])
139
+
140
+ # Upward
141
+ for index, match in reversed(list(enumerate(all_matches))):
142
+ skill = match[0]
143
+ row_num = match[1]
144
+ if all_matches[0] != match:
145
+ vertical_transitions.append([
146
+ "up",
147
+ f"{skill}_G{row_num}",
148
+ f"{all_matches[index-1][0]}_G{row_num}"
149
+ ])
150
+
151
+ return vertical_transitions
152
+
153
+
154
+ def build_all_states(all_transitions):
155
+ """ Creates an array with all state labels for the curriculum
156
+
157
+ Input
158
+ - all_transitions: list of lists - all possible up, down, left, or right transitions in curriculum
159
+
160
+ Output
161
+ - all_states: list - a collection of state labels (skill code and grade number)
162
+
163
+ >>> all_transitions = [['right', 'N1.1.1_G1', 'N1.1.1_G2'], ['right', 'N1.1.1_G2', 'N1.1.1_G3'], ['right', 'N1.1.1_G3', 'N1.1.1_G4'], ['right', 'N1.1.1_G4', 'N1.1.1_G5'], ['right', 'N1.1.1_G5', 'N1.1.1_G6'], ['left', 'N1.1.1_G6', 'N1.1.1_G5'], ['left', 'N1.1.1_G5', 'N1.1.1_G4'], ['left', 'N1.1.1_G4', 'N1.1.1_G3'], ['left', 'N1.1.1_G3', 'N1.1.1_G2'], ['left', 'N1.1.1_G2', 'N1.1.1_G1'], ['right', 'N1.1.2_G1', 'N1.1.2_G2'], ['right', 'N1.1.2_G2', 'N1.1.2_G3'], ['right', 'N1.1.2_G3', 'N1.1.2_G4'], ['right', 'N1.1.2_G4', 'N1.1.2_G5'], ['right', 'N1.1.2_G5', 'N1.1.2_G6'], ['left', 'N1.1.2_G6', 'N1.1.2_G5'], ['left', 'N1.1.2_G5', 'N1.1.2_G4'], ['left', 'N1.1.2_G4', 'N1.1.2_G3'], ['left', 'N1.1.2_G3', 'N1.1.2_G2'], ['left', 'N1.1.2_G2', 'N1.1.2_G1'], ['down', 'N1.1.1_G1', 'N1.1.2_G1'], ['down', 'N1.1.2_G1', 'N1.1.1_G1'], ['down', 'N1.1.1_G2', 'N1.1.2_G2'], ['down', 'N1.1.2_G2', 'N1.1.1_G2'], ['down', 'N1.1.1_G3', 'N1.1.2_G3'], ['down', 'N1.1.2_G3', 'N1.1.1_G3'], ['down', 'N1.1.1_G4', 'N1.1.2_G4'], ['down', 'N1.1.2_G4', 'N1.1.1_G4'], ['down', 'N1.1.1_G5', 'N1.1.2_G5'], ['down', 'N1.1.2_G5', 'N1.1.1_G5'], ['down', 'N1.1.1_G6', 'N1.1.2_G6'], ['up', 'N1.1.2_G6', 'N1.1.1_G6'], ['up', 'N1.1.1_G6', 'N1.1.2_G6'], ['up', 'N1.1.2_G5', 'N1.1.1_G5'], ['up', 'N1.1.1_G5', 'N1.1.2_G5'], ['up', 'N1.1.2_G4', 'N1.1.1_G4'], ['up', 'N1.1.1_G4', 'N1.1.2_G4'], ['up', 'N1.1.2_G3', 'N1.1.1_G3'], ['up', 'N1.1.1_G3', 'N1.1.2_G3'], ['up', 'N1.1.2_G2', 'N1.1.1_G2'], ['up', 'N1.1.1_G2', 'N1.1.2_G2'], ['up', 'N1.1.2_G1', 'N1.1.1_G1']]
164
+ >>> build_all_states(all_transitions)
165
+ ['N1.1.1_G1', 'N1.1.1_G2', 'N1.1.1_G3', 'N1.1.1_G4', 'N1.1.1_G5', 'N1.1.1_G6', 'N1.1.2_G1', 'N1.1.2_G2', 'N1.1.2_G3', 'N1.1.2_G4', 'N1.1.2_G5', 'N1.1.2_G6']
166
+ """
167
+ all_states = []
168
+ for transition in all_transitions:
169
+ for index, state in enumerate(transition):
170
+ if index == 0:
171
+ continue
172
+ if state not in all_states:
173
+ all_states.append(state)
174
+ return all_states
175
+
176
+
177
+ def build_curriculum_logic():
178
+ script_df = read_and_preprocess_spreadsheet('Rori_Framework_v1.xlsx')
179
+ horizontal_transitions = build_horizontal_transitions(script_df)
180
+ vertical_transitions = build_vertical_transitions(script_df)
181
+ all_transitions = horizontal_transitions + vertical_transitions
182
+ all_states = build_all_states(all_transitions)
183
+ return all_states, all_transitions
mathtext_fastapi/data/Rori_Framework_v1.xlsx ADDED
Binary file (420 kB). View file
 
mathtext_fastapi/data/curriculum_framework_for_tests.xlsx ADDED
Binary file (510 kB). View file
 
mathtext_fastapi/data/text2int_results.csv CHANGED
@@ -20,10 +20,10 @@ eight oh,80.0,8.0,False
20
  eighty,80.0,80.0,True
21
  ate,8.0,1.0,False
22
  double eight,88.0,8.0,False
23
- eight three seven five three O nine,8375309.0,8375329.0,False
24
  eight three seven five three oh nine,8375309.0,8375309.0,True
25
  eight three seven five three zero nine,8375309.0,8375309.0,True
26
- eight three seven five three oh ni-ee-ine,8375309.0,837530619.0,False
27
  two eight,28.0,16.0,False
28
  seven oh eleven,7011.0,77.0,False
29
  seven elevens,77.0,77.0,True
@@ -31,10 +31,10 @@ seven eleven,711.0,77.0,False
31
  ninety nine oh five,9905.0,149.0,False
32
  seven 0 seven 0 seven 0 seven,7070707.0,7070707.0,True
33
  123 hundred,123000.0,223.0,False
34
- 5 o 5,505.0,525.0,False
35
- 15 o 5,1505.0,22.0,False
36
- 15-o 5,1505.0,22.0,False
37
- 15 o-5,1505.0,22.0,False
38
  911-thousand,911000.0,911000.0,True
39
  twenty-two twenty-two,2222.0,44.0,False
40
  twenty-two twenty-twos,484.0,44.0,False
 
20
  eighty,80.0,80.0,True
21
  ate,8.0,1.0,False
22
  double eight,88.0,8.0,False
23
+ eight three seven five three O nine,8375309.0,8375319.0,False
24
  eight three seven five three oh nine,8375309.0,8375309.0,True
25
  eight three seven five three zero nine,8375309.0,8375309.0,True
26
+ eight three seven five three oh ni-ee-ine,8375309.0,837530111.0,False
27
  two eight,28.0,16.0,False
28
  seven oh eleven,7011.0,77.0,False
29
  seven elevens,77.0,77.0,True
 
31
  ninety nine oh five,9905.0,149.0,False
32
  seven 0 seven 0 seven 0 seven,7070707.0,7070707.0,True
33
  123 hundred,123000.0,223.0,False
34
+ 5 o 5,505.0,515.0,False
35
+ 15 o 5,1505.0,21.0,False
36
+ 15-o 5,1505.0,21.0,False
37
+ 15 o-5,1505.0,21.0,False
38
  911-thousand,911000.0,911000.0,True
39
  twenty-two twenty-two,2222.0,44.0,False
40
  twenty-two twenty-twos,484.0,44.0,False
mathtext_fastapi/global_state_manager.py ADDED
@@ -0,0 +1,23 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from transitions import Machine
2
+ from mathtext_fastapi.curriculum_mapper import build_curriculum_logic
3
+
4
+ all_states, all_transitions = build_curriculum_logic()
5
+
6
+ class GlobalStateManager(object):
7
+ states = all_states
8
+
9
+ transitions = all_transitions
10
+
11
+ def __init__(
12
+ self,
13
+ initial_state='N1.1.1_G1',
14
+ ):
15
+ self.machine = Machine(
16
+ model=self,
17
+ states=GlobalStateManager.states,
18
+ transitions=GlobalStateManager.transitions,
19
+ initial=initial_state
20
+ )
21
+
22
+
23
+ curriculum = GlobalStateManager()
mathtext_fastapi/nlu.py CHANGED
@@ -155,15 +155,14 @@ def evaluate_message_with_nlu(message_data):
155
  predicted_label = predict_message_intent(message_text)
156
  if predicted_label['confidence'] > 0.01:
157
  nlu_response = predicted_label
158
- return nlu_response
159
-
160
- # Run sentiment analysis
161
- sentiment_api_resp = sentiment(message_text)
162
- nlu_response = build_nlu_response_object(
163
- 'sentiment',
164
- sentiment_api_resp[0]['label'],
165
- sentiment_api_resp[0]['score']
166
- )
167
  else:
168
  nlu_response = build_nlu_response_object(
169
  'integer',
 
155
  predicted_label = predict_message_intent(message_text)
156
  if predicted_label['confidence'] > 0.01:
157
  nlu_response = predicted_label
158
+ else:
159
+ # Run sentiment analysis
160
+ sentiment_api_resp = sentiment(message_text)
161
+ nlu_response = build_nlu_response_object(
162
+ 'sentiment',
163
+ sentiment_api_resp[0]['label'],
164
+ sentiment_api_resp[0]['score']
165
+ )
 
166
  else:
167
  nlu_response = build_nlu_response_object(
168
  'integer',
mathtext_fastapi/v2_conversation_manager.py ADDED
@@ -0,0 +1,196 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import base64
2
+ import copy
3
+ import dill
4
+ import os
5
+ import json
6
+ import jsonpickle
7
+ import pickle
8
+ import random
9
+ import requests
10
+ import mathtext_fastapi.global_state_manager as gsm
11
+
12
+ from dotenv import load_dotenv
13
+ from mathtext_fastapi.nlu import evaluate_message_with_nlu
14
+ from mathtext_fastapi.math_quiz_fsm import MathQuizFSM
15
+ from mathtext_fastapi.math_subtraction_fsm import MathSubtractionFSM
16
+ from supabase import create_client
17
+ from transitions import Machine
18
+
19
+ from scripts.quiz.generators import start_interactive_math
20
+ from scripts.quiz.hints import generate_hint
21
+
22
+ load_dotenv()
23
+
24
+ SUPA = create_client(
25
+ os.environ.get('SUPABASE_URL'),
26
+ os.environ.get('SUPABASE_KEY')
27
+ )
28
+
29
+
30
+ def pickle_and_encode_state_machine(state_machine):
31
+ dump = pickle.dumps(state_machine)
32
+ dump_encoded = base64.b64encode(dump).decode('utf-8')
33
+ return dump_encoded
34
+
35
+
36
+ def manage_math_quiz_fsm(user_message, contact_uuid, type):
37
+ fsm_check = SUPA.table('state_machines').select("*").eq(
38
+ "contact_uuid",
39
+ contact_uuid
40
+ ).execute()
41
+
42
+ # This doesn't allow for when one FSM is present and the other is empty
43
+ """
44
+ 1
45
+ data=[] count=None
46
+
47
+ 2
48
+ data=[{'id': 29, 'contact_uuid': 'j43hk26-2hjl-43jk-hnk2-k4ljl46j0ds09', 'addition3': None, 'subtraction': None, 'addition':
49
+
50
+ - but problem is there is no subtraction , but it's assuming there's a subtration
51
+
52
+ Cases
53
+ - make a completely new record
54
+ - update an existing record with an existing FSM
55
+ - update an existing record without an existing FSM
56
+ """
57
+ print("MATH QUIZ FSM ACTIVITY")
58
+ print("user_message")
59
+ print(user_message)
60
+ # Make a completely new entry
61
+ if fsm_check.data == []:
62
+ if type == 'addition':
63
+ math_quiz_state_machine = MathQuizFSM()
64
+ else:
65
+ math_quiz_state_machine = MathSubtractionFSM()
66
+ messages = [math_quiz_state_machine.response_text]
67
+ dump_encoded = pickle_and_encode_state_machine(math_quiz_state_machine)
68
+
69
+ SUPA.table('state_machines').insert({
70
+ 'contact_uuid': contact_uuid,
71
+ f'{type}': dump_encoded
72
+ }).execute()
73
+ # Update an existing record with a new state machine
74
+ elif not fsm_check.data[0][type]:
75
+ if type == 'addition':
76
+ math_quiz_state_machine = MathQuizFSM()
77
+ else:
78
+ math_quiz_state_machine = MathSubtractionFSM()
79
+ messages = [math_quiz_state_machine.response_text]
80
+ dump_encoded = pickle_and_encode_state_machine(math_quiz_state_machine)
81
+
82
+ SUPA.table('state_machines').update({
83
+ f'{type}': dump_encoded
84
+ }).eq(
85
+ "contact_uuid", contact_uuid
86
+ ).execute()
87
+ # Update an existing record with an existing state machine
88
+ elif fsm_check.data[0][type]:
89
+ undump_encoded = base64.b64decode(
90
+ fsm_check.data[0][type].encode('utf-8')
91
+ )
92
+ math_quiz_state_machine = pickle.loads(undump_encoded)
93
+
94
+ math_quiz_state_machine.student_answer = user_message
95
+ math_quiz_state_machine.correct_answer = str(math_quiz_state_machine.correct_answer)
96
+ messages = math_quiz_state_machine.validate_answer()
97
+ dump_encoded = pickle_and_encode_state_machine(math_quiz_state_machine)
98
+ SUPA.table('state_machines').update({
99
+ f'{type}': dump_encoded
100
+ }).eq(
101
+ "contact_uuid", contact_uuid
102
+ ).execute()
103
+ return messages
104
+
105
+
106
+ def retrieve_microlesson_content(context_data, user_message, microlesson, contact_uuid):
107
+ if context_data['local_state'] == 'addition-question-sequence' or \
108
+ user_message == 'add' or \
109
+ microlesson == 'addition':
110
+ messages = manage_math_quiz_fsm(user_message, contact_uuid, 'addition')
111
+
112
+ if user_message == 'exit':
113
+ state_label = 'exit'
114
+ else:
115
+ state_label = 'addition-question-sequence'
116
+
117
+ input_prompt = messages.pop()
118
+ message_package = {
119
+ 'messages': messages,
120
+ 'input_prompt': input_prompt,
121
+ 'state': state_label
122
+ }
123
+ elif context_data['local_state'] == 'subtraction-question-sequence' or \
124
+ user_message == 'subtract' or \
125
+ microlesson == 'subtraction':
126
+ messages = manage_math_quiz_fsm(user_message, contact_uuid, 'subtraction')
127
+
128
+ if user_message == 'exit':
129
+ state_label = 'exit'
130
+ else:
131
+ state_label = 'subtraction-question-sequence'
132
+
133
+ input_prompt = messages.pop()
134
+
135
+ message_package = {
136
+ 'messages': messages,
137
+ 'input_prompt': input_prompt,
138
+ 'state': state_label
139
+ }
140
+ print("MICROLESSON CONTENT RESPONSE")
141
+ print(message_package)
142
+ return message_package
143
+
144
+
145
+ curriculum_lookup_table = {
146
+ 'N1.1.1_G1': 'addition',
147
+ 'N1.1.1_G2': 'subtraction'
148
+ }
149
+
150
+
151
+ def lookup_local_state(next_state):
152
+ microlesson = curriculum_lookup_table[next_state]
153
+ return microlesson
154
+
155
+
156
+ def manage_conversation_response(data_json):
157
+ """ Calls functions necessary to determine message and context data """
158
+ print("V2 ENDPOINT")
159
+
160
+ user_message = ''
161
+
162
+ # nlu_response = evaluate_message_with_nlu(message_data)
163
+
164
+ context_data = {
165
+ 'contact_uuid': 'abcdefg',
166
+ 'current_state': 'N1.1.1_G2',
167
+ 'user_message': '1',
168
+ 'local_state': ''
169
+ }
170
+ print("STEP 1")
171
+ if not context_data['current_state']:
172
+ context_data['current_state'] = 'N1.1.1_G1'
173
+
174
+ curriculum_copy = copy.deepcopy(gsm.curriculum)
175
+
176
+ print("STEP 2")
177
+ if context_data['user_message'] == 'easier':
178
+ curriculum_copy.left()
179
+ next_state = curriculum_copy.state
180
+ elif context_data['user_message'] == 'harder':
181
+ curriculum_copy.right()
182
+ next_state = curriculum_copy.state
183
+ else:
184
+ next_state = context_data['current_state']
185
+
186
+ print("STEP 3")
187
+ microlesson = lookup_local_state(next_state)
188
+
189
+ print("microlesson")
190
+ print(microlesson)
191
+
192
+ microlesson_content = retrieve_microlesson_content(context_data, context_data['user_message'], microlesson, context_data['contact_uuid'])
193
+
194
+ print("STEP 4")
195
+ # combine microlesson content and context_data object
196
+ return context_data
requirements.txt CHANGED
@@ -4,6 +4,7 @@ fuzzywuzzy
4
  jsonpickle
5
  mathtext @ git+https://gitlab.com/tangibleai/community/mathtext@main
6
  fastapi==0.74.*
 
7
  pydantic==1.10.*
8
  python-Levenshtein
9
  requests==2.27.*
 
4
  jsonpickle
5
  mathtext @ git+https://gitlab.com/tangibleai/community/mathtext@main
6
  fastapi==0.74.*
7
+ openpyxl
8
  pydantic==1.10.*
9
  python-Levenshtein
10
  requests==2.27.*
scripts/make_request.py CHANGED
@@ -69,12 +69,13 @@ def run_simulated_request(endpoint, sample_answer, context=None):
69
  # run_simulated_request('nlu', 'eight, nine, ten')
70
  # run_simulated_request('nlu', '8, 9, 10')
71
  # run_simulated_request('nlu', '8')
72
- run_simulated_request('nlu', "I don't know")
73
  # run_simulated_request('nlu', "I don't know eight")
74
  # run_simulated_request('nlu', "I don't 9")
75
  # run_simulated_request('nlu', "0.2")
76
  # run_simulated_request('nlu', 'Today is a wonderful day')
77
  # run_simulated_request('nlu', 'IDK 5?')
 
78
  # run_simulated_request('manager', '')
79
  # run_simulated_request('manager', 'add')
80
  # run_simulated_request('manager', 'subtract')
 
69
  # run_simulated_request('nlu', 'eight, nine, ten')
70
  # run_simulated_request('nlu', '8, 9, 10')
71
  # run_simulated_request('nlu', '8')
72
+ # run_simulated_request('nlu', "I don't know")
73
  # run_simulated_request('nlu', "I don't know eight")
74
  # run_simulated_request('nlu', "I don't 9")
75
  # run_simulated_request('nlu', "0.2")
76
  # run_simulated_request('nlu', 'Today is a wonderful day')
77
  # run_simulated_request('nlu', 'IDK 5?')
78
+ run_simulated_request('v2/manager', '')
79
  # run_simulated_request('manager', '')
80
  # run_simulated_request('manager', 'add')
81
  # run_simulated_request('manager', 'subtract')