Nitesh_Kumar commited on
Commit
57a6cc3
1 Parent(s): beac690

test my dev

Browse files
.streamlit/config.toml ADDED
@@ -0,0 +1,2 @@
 
 
 
1
+ [theme]
2
+ base="light"
Screenshot 2024-08-07 at 1.05.24/342/200/257PM.png ADDED
li_analysys.py ADDED
@@ -0,0 +1,301 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+
2
+ import streamlit as st
3
+ import pandas as pd
4
+ import seaborn as sns
5
+ import matplotlib.pyplot as plt
6
+ import numpy as np
7
+ from scipy.stats import zscore
8
+ st.set_page_config(layout="wide")
9
+
10
+ def categorize_marks(normalized_marks):
11
+ if normalized_marks >= 0.8:
12
+ return '80-100%'
13
+ elif normalized_marks >= 0.6:
14
+ return '60-80%'
15
+ else:
16
+ return '<60%'
17
+
18
+ def analyze_student_performance_by_li(question_data, student_performance_data):
19
+ # Merge the dataframes on question number
20
+ merged_data = pd.merge(student_performance_data, question_data, on='question_number')
21
+
22
+ merged_data = merged_data.groupby(["student_id","learning_indicator_id"])[["marks_obtained","maximum_marks"]].sum().reset_index()
23
+ merged_data['normalized_marks'] = merged_data['marks_obtained'] / merged_data['maximum_marks']
24
+ # Categorize the normalized marks
25
+ merged_data['category'] = merged_data['normalized_marks'].apply(categorize_marks)
26
+
27
+ # Group by learning indicator ID and category, and count the number of students in each category
28
+ merged_data = merged_data.groupby(['learning_indicator_id', 'category']).size().unstack(fill_value=0)
29
+
30
+ # Rename the columns for better readability
31
+ merged_data.columns = ['<60%', '60-80%', '80-100%']
32
+ merged_data = merged_data.sort_values(['<60%', '60-80%', '80-100%'],ascending=[False,False,False]).reset_index()
33
+
34
+ # Display the results
35
+ return merged_data
36
+
37
+ def prioritize_lis(category_counts):
38
+ # Add a rank column based on the order of rows
39
+ category_counts['Rank'] = category_counts.index + 1
40
+
41
+ # Determine the number of LIs
42
+ total_lis = len(category_counts)
43
+
44
+ # Determine the cutoff points for high, medium, and low priority
45
+ high_priority_cutoff = int(total_lis * 0.3)
46
+ medium_priority_cutoff = int(total_lis * 0.6)
47
+
48
+ # Classify the LIs based on their rank
49
+ category_counts['Priority'] = 'Low'
50
+ category_counts.loc[category_counts['Rank'] <= high_priority_cutoff, 'Priority'] = 'High'
51
+ category_counts.loc[(category_counts['Rank'] > high_priority_cutoff) & (category_counts['Rank'] <= medium_priority_cutoff), 'Priority'] = 'Medium'
52
+
53
+ return category_counts
54
+
55
+ def mean_li_level_analysis(student_data, question_data):
56
+ merged_data = pd.merge(student_data, question_data, on='question_number',how="inner")
57
+
58
+ # Normalize the marks obtained for each learning indicator by each student
59
+ merged_data = merged_data.groupby(['learning_indicator_id'])[['marks_obtained', 'maximum_marks']].sum().reset_index()
60
+ merged_data["normalised_score"]=merged_data["marks_obtained"]/merged_data["maximum_marks"]
61
+ merged_data = pd.merge(merged_data,question_data[["learning_indicator_id","learning_indicator_text"]].drop_duplicates(),on="learning_indicator_id")
62
+ return merged_data
63
+
64
+
65
+
66
+ def student_level_analysis(student_data, question_data, prioritized_lis):
67
+ # Merge the student data with question data
68
+ merged_data = pd.merge(student_data, question_data, on='question_number',how="inner")
69
+
70
+ # Normalize the marks obtained for each learning indicator by each student
71
+ merged_data = merged_data.groupby(['student_id', 'learning_indicator_id'])[['marks_obtained', 'maximum_marks']].sum().reset_index()
72
+ merged_data['normalized_marks'] = merged_data['marks_obtained'] / merged_data['maximum_marks']
73
+ merged_data = merged_data[merged_data["normalized_marks"]<0.80]
74
+
75
+ # Merge with prioritized_lis to get the priority and rank
76
+ merged_data = pd.merge(merged_data, prioritized_lis[['learning_indicator_id', 'Rank']], on='learning_indicator_id', how='left')
77
+
78
+ # Rank the LIs for each student based on normalized marks and class-level LI priority
79
+ merged_data['student_rank'] = merged_data.groupby('student_id')['normalized_marks'].rank(method='dense', ascending=False)
80
+ merged_data = merged_data.sort_values(by=['student_id', 'student_rank', 'Rank'])
81
+
82
+ # Ensure unique ranks by adding a secondary sort by Rank
83
+ merged_data['unique_rank'] = merged_data.groupby('student_id').cumcount() + 1
84
+
85
+ # Create the final dataframe
86
+ student_ranking = merged_data.pivot(index='student_id', columns='unique_rank', values='learning_indicator_id').reset_index()
87
+ student_ranking.columns = ['student_id'] + [f'P{I+1}_li' for I in range(student_ranking.shape[1] - 1)]
88
+
89
+ li_text_mapping = question_data.drop_duplicates(subset='learning_indicator_id').set_index('learning_indicator_id')['learning_indicator_text']
90
+
91
+ for col in student_ranking.columns[1:]:
92
+ student_ranking[col] = student_ranking[col].map(li_text_mapping)
93
+ return student_ranking
94
+
95
+
96
+ def prepare_data_for_ridge_plot(student_data, question_data):
97
+ # Merge the DataFrames
98
+ merged_data = pd.merge(student_data, question_data, on='question_number', how='inner')
99
+
100
+ # Normalize the marks obtained for each learning indicator by each student
101
+ normalized_data = merged_data.groupby(['student_id', 'learning_indicator_id'])[['marks_obtained', 'maximum_marks']].sum().reset_index()
102
+ normalized_data['normalized_marks'] = normalized_data['marks_obtained'] / normalized_data['maximum_marks']
103
+
104
+ # Add learning_indicator_text to normalized_data
105
+ plot_data = pd.merge(normalized_data, question_data[['learning_indicator_id', 'learning_indicator_text']].drop_duplicates(), on='learning_indicator_id')
106
+
107
+ return plot_data
108
+
109
+ def calculate_logical_quantiles(data, num_quantiles=5):
110
+ """
111
+ Calculate logical quantiles for a given data set to ensure they are informative.
112
+
113
+ Parameters:
114
+ data (array-like): The input data for which to calculate quantiles.
115
+ num_quantiles (int): The number of quantiles to calculate. Default is 5.
116
+
117
+ Returns:
118
+ list: A list of quantile values.
119
+ """
120
+ # Ensure there are enough unique values to calculate the quantiles
121
+ if len(np.unique(data)) < num_quantiles:
122
+ # If not enough unique values, use unique values as quantiles
123
+ quantiles = np.unique(data)
124
+ else:
125
+ # Calculate evenly spaced quantiles
126
+ quantiles = np.percentile(data, np.linspace(0, 100, num_quantiles))
127
+
128
+ return quantiles.tolist()
129
+
130
+ def create_ridge_plot(plot_data):
131
+ unique_learning_indicators = plot_data['learning_indicator_text'].unique()
132
+ n_indicators = len(unique_learning_indicators)
133
+ bandwidth = 0.5 # Adjust bandwidth for smoother graphs
134
+ darkgreen = '#9BC184'
135
+ midgreen = '#C2D6A4'
136
+ lightgreen = '#E7E5CB'
137
+ colors = [lightgreen, midgreen, darkgreen, midgreen, lightgreen]
138
+
139
+ fig, axs = plt.subplots(nrows=n_indicators, ncols=1, figsize=(10, n_indicators * 1.5), sharex=True)
140
+ axs = axs.flatten() # Flatten in case of single plot
141
+
142
+ for i, indicator in enumerate(unique_learning_indicators):
143
+ # Subset the data for each learning indicator
144
+ subset = plot_data[plot_data['learning_indicator_text'] == indicator]
145
+
146
+ # Plot the distribution of normalized marks
147
+ sns.kdeplot(
148
+ subset['normalized_marks'],
149
+ shade=True,
150
+ bw_adjust=bandwidth,
151
+ ax=axs[i],
152
+ color=sns.color_palette('coolwarm', n_colors=n_indicators)[i]
153
+ )
154
+ quantiles = calculate_logical_quantiles(subset["normalized_marks"].tolist())
155
+
156
+ # fill space between each pair of quantiles
157
+ for j in range(len(quantiles) - 1):
158
+ axs[i].fill_between(
159
+ [quantiles[j], # lower bound
160
+ quantiles[j+1]], # upper bound
161
+ 0.1, # max y=0
162
+ 0.3, # max y=0.0002
163
+ color=colors[j]
164
+ )
165
+ mean = subset['marks_obtained'].sum()/subset['maximum_marks'].sum()
166
+ axs[i].scatter([mean], [0.3], color='black', s=15)
167
+
168
+ global_mean = plot_data['normalized_marks'].mean()
169
+ axs[i].axvline(global_mean, color='#525252', linestyle='--')
170
+
171
+
172
+ axs[i].set_xlim(0, 1)
173
+ axs[i].set_ylim(0,3)
174
+
175
+ # Add the learning indicator text as the title
176
+ axs[i].set_title(indicator, loc='left', fontsize=12, fontweight='bold')
177
+
178
+ # Remove y-axis label
179
+ axs[i].set_ylabel('')
180
+
181
+ # Add a horizontal line for the baseline
182
+ axs[i].axhline(0, color='black', linewidth=1.3, linestyle='-')
183
+
184
+ # Set common labels
185
+ plt.xlabel('Normalized Marks', fontsize=12, fontweight='bold')
186
+
187
+
188
+ plt.tight_layout()
189
+ return fig
190
+
191
+ def remediation_groups(student_df, question_df, z_threshold=-1.35):
192
+ # Merge student performance with question data to get full marks
193
+ student_df["student_id"]=student_df["student_id"].astype(str)
194
+ merged_df = pd.merge(student_df, question_df, on='question_number')
195
+
196
+ # Apply minimum marks validation and fill NaN with 0
197
+ merged_df['marks_obtained'] = np.minimum(merged_df['marks_obtained'].fillna(0), merged_df['full_marks'])
198
+
199
+ # Calculate normalized scores
200
+ merged_df['normalized_score'] = merged_df['marks_obtained'] / merged_df['full_marks']
201
+
202
+ # Calculate z-scores for each learning indicator
203
+ z_scores = merged_df.groupby('learning_indicator_id')['normalized_score'].transform(zscore)
204
+ merged_df['z_score'] = z_scores
205
+
206
+ # Identify students needing remediation
207
+ remediation_df = merged_df[merged_df['z_score'] < z_threshold]
208
+
209
+ # Group by learning indicator to find students needing remediation
210
+ li_remediation_groups = remediation_df.groupby(['learning_indicator_id', 'learning_indicator_text'])['student_id'].apply(lambda x: ', '.join(x.unique())).reset_index()
211
+
212
+ # Identify students who don't need remediation
213
+ students_needing_remediation = remediation_df['student_id'].unique()
214
+ students_no_remediation = merged_df[~merged_df['student_id'].isin(students_needing_remediation)]['student_id'].unique()
215
+ no_remediation_df = pd.DataFrame(students_no_remediation, columns=['student_id'])
216
+
217
+ return li_remediation_groups, no_remediation_df
218
+
219
+ def main():
220
+ col_logo , col_name = st.columns([1,3])
221
+ with col_logo:
222
+ st.image("Screenshot 2024-08-07 at 1.05.24 PM.png")
223
+ with col_name:
224
+ st.title("Learning Indicator Analysis")
225
+ st.subheader("Student and Class Remediation based on Question Data and Student Data. Upload Files")
226
+
227
+ # Upload the dataframes
228
+ col_A,colB = st.columns(2)
229
+ with col_A:
230
+ question_data_file = st.file_uploader("Upload Question Data CSV", type="csv")
231
+ with colB:
232
+ student_performance_data_file = st.file_uploader("Upload Student Performance Data CSV", type="csv")
233
+ st.write("----------------------")
234
+
235
+ if question_data_file and student_performance_data_file:
236
+ question_data = pd.read_csv(question_data_file)
237
+ student_performance_data = pd.read_csv(student_performance_data_file)
238
+
239
+ # Analyze performance and prioritize LIs
240
+ category_counts = analyze_student_performance_by_li(question_data, student_performance_data)
241
+ prioritized_lis = prioritize_lis(category_counts)
242
+
243
+ # Merge with original question data to get the learning indicator text
244
+ prioritized_lis = pd.merge(prioritized_lis, question_data[['learning_indicator_id', 'learning_indicator_text']].drop_duplicates(), on='learning_indicator_id', how='left')
245
+
246
+ # Display the results with filters
247
+ st.write("Learning Indicator Analysis with Priority")
248
+
249
+ def highlight_priority(row):
250
+ if row.Priority == 'High':
251
+ return ['background-color: red']*len(row)
252
+ elif row.Priority == 'Medium':
253
+ return ['background-color: yellow']*len(row)
254
+ elif row.Priority == 'Low':
255
+ return ['background-color: green']*len(row)
256
+ else:
257
+ return ['']*len(row)
258
+
259
+ col1,col2 = st.columns(2)
260
+ with col1:
261
+ st.dataframe(prioritized_lis.style.apply(highlight_priority, axis=1))
262
+ overall_li_level = mean_li_level_analysis(student_performance_data, question_data)
263
+ overall_li_level = overall_li_level.sort_values("normalised_score")
264
+ st.dataframe(overall_li_level)
265
+ with col2:
266
+ plt_data=prepare_data_for_ridge_plot(student_performance_data, question_data)
267
+ plt_fig = create_ridge_plot(plt_data)
268
+ st.pyplot(plt_fig)
269
+ st.write("---------------------------")
270
+ col3,col4 = st.columns(2)
271
+ li_remediation_groups, no_remediation_df = remediation_groups(student_performance_data,question_data)
272
+ with col3:
273
+ st.write("Student Group Remediation based on LI")
274
+ st.dataframe(li_remediation_groups)
275
+ with col4:
276
+ st.write("Students That are not part of group remediation")
277
+ st.dataframe(no_remediation_df)
278
+
279
+
280
+ # Filters for LI ID and Priority
281
+ li_id_filter = st.multiselect("Exclude Li_ids :", prioritized_lis['learning_indicator_id'].unique())
282
+ priority_filter = st.multiselect("Exclude Priority:",prioritized_lis["Priority"].unique())
283
+ if li_id_filter:
284
+ prioritized_lis = prioritized_lis[~prioritized_lis["learning_indicator_id"].isin(li_id_filter)]
285
+ question_data = question_data[~question_data["learning_indicator_id"].isin(li_id_filter)]
286
+ if priority_filter:
287
+ li_ids_out = prioritized_lis[prioritized_lis["Priority"].isin(priority_filter)]["learning_indicator_id"].unique().tolist()
288
+ question_data = question_data[~question_data["learning_indicator_id"].isin(li_ids_out)]
289
+
290
+
291
+
292
+ # Button to generate student-level ranking
293
+ if st.button("Generate Student Level Ranking"):
294
+ print(len(question_data),"==question")
295
+ print(len(prioritized_lis),"===priotisex")
296
+ student_ranking = student_level_analysis(student_performance_data, question_data, prioritized_lis)
297
+ st.write("Student Level Learning Indicator Ranking")
298
+ st.dataframe(student_ranking)
299
+
300
+ if __name__ == "__main__":
301
+ main()
make_data.ipynb ADDED
The diff for this file is too large to render. See raw diff
 
question_paper.csv ADDED
@@ -0,0 +1,16 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ question_number,full_marks,learning_indicator_id,learning_indicator_text
2
+ 1,5,LI001,Understand basic algebraic concepts
3
+ 2,4,LI002,Apply geometric principles
4
+ 3,3,LI003,Solve linear equations
5
+ 4,6,LI004,Interpret statistical data
6
+ 5,5,LI005,Analyze historical events
7
+ 6,4,LI006,Understand economic theories
8
+ 7,3,LI007,Apply scientific method
9
+ 8,6,LI008,Comprehend literary texts
10
+ 9,5,LI001,Understand basic algebraic concepts
11
+ 10,4,LI002,Apply geometric principles
12
+ 11,3,LI003,Solve linear equations
13
+ 12,6,LI004,Interpret statistical data
14
+ 13,5,LI005,Analyze historical events
15
+ 14,4,LI006,Understand economic theories
16
+ 15,3,LI007,Apply scientific method
requirements.txt ADDED
@@ -0,0 +1,73 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ altair==5.3.0
2
+ appnope==0.1.4
3
+ asttokens==2.4.1
4
+ attrs==24.1.0
5
+ blinker==1.8.2
6
+ cachetools==5.4.0
7
+ certifi==2024.7.4
8
+ charset-normalizer==3.3.2
9
+ click==8.1.7
10
+ comm==0.2.2
11
+ contourpy==1.2.1
12
+ cycler==0.12.1
13
+ debugpy==1.8.3
14
+ decorator==5.1.1
15
+ exceptiongroup==1.2.2
16
+ executing==2.0.1
17
+ fonttools==4.53.1
18
+ gitdb==4.0.11
19
+ GitPython==3.1.43
20
+ idna==3.7
21
+ ipykernel==6.29.5
22
+ ipython==8.26.0
23
+ jedi==0.19.1
24
+ Jinja2==3.1.4
25
+ jsonschema==4.23.0
26
+ jsonschema-specifications==2023.12.1
27
+ jupyter_client==8.6.2
28
+ jupyter_core==5.7.2
29
+ kiwisolver==1.4.5
30
+ markdown-it-py==3.0.0
31
+ MarkupSafe==2.1.5
32
+ matplotlib==3.9.1
33
+ matplotlib-inline==0.1.7
34
+ mdurl==0.1.2
35
+ nest-asyncio==1.6.0
36
+ numpy==2.0.1
37
+ packaging==24.1
38
+ pandas==2.2.2
39
+ parso==0.8.4
40
+ pexpect==4.9.0
41
+ pillow==10.4.0
42
+ platformdirs==4.2.2
43
+ prompt_toolkit==3.0.47
44
+ protobuf==5.27.3
45
+ psutil==6.0.0
46
+ ptyprocess==0.7.0
47
+ pure_eval==0.2.3
48
+ pyarrow==17.0.0
49
+ pydeck==0.9.1
50
+ Pygments==2.18.0
51
+ pyparsing==3.1.2
52
+ python-dateutil==2.9.0.post0
53
+ pytz==2024.1
54
+ pyzmq==26.1.0
55
+ referencing==0.35.1
56
+ requests==2.32.3
57
+ rich==13.7.1
58
+ rpds-py==0.19.1
59
+ scipy==1.14.0
60
+ seaborn==0.13.2
61
+ six==1.16.0
62
+ smmap==5.0.1
63
+ stack-data==0.6.3
64
+ streamlit==1.37.0
65
+ tenacity==8.5.0
66
+ toml==0.10.2
67
+ toolz==0.12.1
68
+ tornado==6.4.1
69
+ traitlets==5.14.3
70
+ typing_extensions==4.12.2
71
+ tzdata==2024.1
72
+ urllib3==2.2.2
73
+ wcwidth==0.2.13
students_df.csv ADDED
@@ -0,0 +1,526 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ question_number,student_id,marks_obtained,maximum_marks
2
+ 1,1,3,5
3
+ 2,1,1,4
4
+ 3,1,0,3
5
+ 4,1,6,6
6
+ 5,1,1,5
7
+ 6,1,1,4
8
+ 7,1,3,3
9
+ 8,1,6,6
10
+ 9,1,4,5
11
+ 10,1,4,4
12
+ 11,1,3,3
13
+ 12,1,2,6
14
+ 13,1,5,5
15
+ 14,1,0,4
16
+ 15,1,0,3
17
+ 1,2,2,5
18
+ 2,2,4,4
19
+ 3,2,0,3
20
+ 4,2,4,6
21
+ 5,2,2,5
22
+ 6,2,1,4
23
+ 7,2,3,3
24
+ 8,2,1,6
25
+ 9,2,4,5
26
+ 10,2,1,4
27
+ 11,2,3,3
28
+ 12,2,3,6
29
+ 13,2,5,5
30
+ 14,2,0,4
31
+ 15,2,2,3
32
+ 1,3,4,5
33
+ 2,3,1,4
34
+ 3,3,1,3
35
+ 4,3,4,6
36
+ 5,3,0,5
37
+ 6,3,1,4
38
+ 7,3,3,3
39
+ 8,3,6,6
40
+ 9,3,3,5
41
+ 10,3,0,4
42
+ 11,3,1,3
43
+ 12,3,2,6
44
+ 13,3,2,5
45
+ 14,3,3,4
46
+ 15,3,3,3
47
+ 1,4,3,5
48
+ 2,4,4,4
49
+ 3,4,0,3
50
+ 4,4,6,6
51
+ 5,4,2,5
52
+ 6,4,2,4
53
+ 7,4,2,3
54
+ 8,4,1,6
55
+ 9,4,3,5
56
+ 10,4,3,4
57
+ 11,4,0,3
58
+ 12,4,4,6
59
+ 13,4,1,5
60
+ 14,4,4,4
61
+ 15,4,3,3
62
+ 1,5,4,5
63
+ 2,5,1,4
64
+ 3,5,1,3
65
+ 4,5,6,6
66
+ 5,5,1,5
67
+ 6,5,3,4
68
+ 7,5,0,3
69
+ 8,5,0,6
70
+ 9,5,3,5
71
+ 10,5,0,4
72
+ 11,5,2,3
73
+ 12,5,3,6
74
+ 13,5,0,5
75
+ 14,5,3,4
76
+ 15,5,2,3
77
+ 1,6,3,5
78
+ 2,6,2,4
79
+ 3,6,3,3
80
+ 4,6,4,6
81
+ 5,6,4,5
82
+ 6,6,2,4
83
+ 7,6,2,3
84
+ 8,6,4,6
85
+ 9,6,1,5
86
+ 10,6,3,4
87
+ 11,6,0,3
88
+ 12,6,3,6
89
+ 13,6,5,5
90
+ 14,6,1,4
91
+ 15,6,3,3
92
+ 1,7,4,5
93
+ 2,7,0,4
94
+ 3,7,3,3
95
+ 4,7,2,6
96
+ 5,7,4,5
97
+ 6,7,2,4
98
+ 7,7,3,3
99
+ 8,7,1,6
100
+ 9,7,2,5
101
+ 10,7,2,4
102
+ 11,7,1,3
103
+ 12,7,4,6
104
+ 13,7,3,5
105
+ 14,7,1,4
106
+ 15,7,1,3
107
+ 1,8,0,5
108
+ 2,8,2,4
109
+ 3,8,0,3
110
+ 4,8,2,6
111
+ 5,8,3,5
112
+ 6,8,4,4
113
+ 7,8,1,3
114
+ 8,8,3,6
115
+ 9,8,3,5
116
+ 10,8,1,4
117
+ 11,8,3,3
118
+ 12,8,3,6
119
+ 13,8,4,5
120
+ 14,8,3,4
121
+ 15,8,0,3
122
+ 1,9,3,5
123
+ 2,9,2,4
124
+ 3,9,2,3
125
+ 4,9,4,6
126
+ 5,9,1,5
127
+ 6,9,0,4
128
+ 7,9,0,3
129
+ 8,9,5,6
130
+ 9,9,2,5
131
+ 10,9,2,4
132
+ 11,9,0,3
133
+ 12,9,2,6
134
+ 13,9,3,5
135
+ 14,9,1,4
136
+ 15,9,2,3
137
+ 1,10,4,5
138
+ 2,10,0,4
139
+ 3,10,3,3
140
+ 4,10,2,6
141
+ 5,10,1,5
142
+ 6,10,1,4
143
+ 7,10,3,3
144
+ 8,10,4,6
145
+ 9,10,0,5
146
+ 10,10,2,4
147
+ 11,10,2,3
148
+ 12,10,1,6
149
+ 13,10,0,5
150
+ 14,10,2,4
151
+ 15,10,2,3
152
+ 1,11,4,5
153
+ 2,11,3,4
154
+ 3,11,1,3
155
+ 4,11,4,6
156
+ 5,11,2,5
157
+ 6,11,0,4
158
+ 7,11,2,3
159
+ 8,11,2,6
160
+ 9,11,3,5
161
+ 10,11,4,4
162
+ 11,11,0,3
163
+ 12,11,3,6
164
+ 13,11,2,5
165
+ 14,11,3,4
166
+ 15,11,3,3
167
+ 1,12,3,5
168
+ 2,12,1,4
169
+ 3,12,3,3
170
+ 4,12,5,6
171
+ 5,12,2,5
172
+ 6,12,2,4
173
+ 7,12,1,3
174
+ 8,12,2,6
175
+ 9,12,2,5
176
+ 10,12,1,4
177
+ 11,12,3,3
178
+ 12,12,6,6
179
+ 13,12,0,5
180
+ 14,12,0,4
181
+ 15,12,2,3
182
+ 1,13,3,5
183
+ 2,13,1,4
184
+ 3,13,3,3
185
+ 4,13,2,6
186
+ 5,13,5,5
187
+ 6,13,4,4
188
+ 7,13,3,3
189
+ 8,13,6,6
190
+ 9,13,1,5
191
+ 10,13,2,4
192
+ 11,13,0,3
193
+ 12,13,6,6
194
+ 13,13,0,5
195
+ 14,13,2,4
196
+ 15,13,3,3
197
+ 1,14,3,5
198
+ 2,14,2,4
199
+ 3,14,1,3
200
+ 4,14,1,6
201
+ 5,14,0,5
202
+ 6,14,4,4
203
+ 7,14,0,3
204
+ 8,14,0,6
205
+ 9,14,5,5
206
+ 10,14,1,4
207
+ 11,14,0,3
208
+ 12,14,0,6
209
+ 13,14,0,5
210
+ 14,14,0,4
211
+ 15,14,1,3
212
+ 1,15,0,5
213
+ 2,15,2,4
214
+ 3,15,2,3
215
+ 4,15,0,6
216
+ 5,15,0,5
217
+ 6,15,0,4
218
+ 7,15,2,3
219
+ 8,15,2,6
220
+ 9,15,3,5
221
+ 10,15,4,4
222
+ 11,15,0,3
223
+ 12,15,3,6
224
+ 13,15,4,5
225
+ 14,15,2,4
226
+ 15,15,2,3
227
+ 1,16,3,5
228
+ 2,16,0,4
229
+ 3,16,0,3
230
+ 4,16,5,6
231
+ 5,16,0,5
232
+ 6,16,3,4
233
+ 7,16,1,3
234
+ 8,16,5,6
235
+ 9,16,0,5
236
+ 10,16,2,4
237
+ 11,16,2,3
238
+ 12,16,4,6
239
+ 13,16,4,5
240
+ 14,16,1,4
241
+ 15,16,1,3
242
+ 1,17,1,5
243
+ 2,17,2,4
244
+ 3,17,1,3
245
+ 4,17,3,6
246
+ 5,17,4,5
247
+ 6,17,4,4
248
+ 7,17,3,3
249
+ 8,17,0,6
250
+ 9,17,0,5
251
+ 10,17,2,4
252
+ 11,17,2,3
253
+ 12,17,4,6
254
+ 13,17,1,5
255
+ 14,17,0,4
256
+ 15,17,3,3
257
+ 1,18,3,5
258
+ 2,18,2,4
259
+ 3,18,2,3
260
+ 4,18,5,6
261
+ 5,18,3,5
262
+ 6,18,1,4
263
+ 7,18,0,3
264
+ 8,18,5,6
265
+ 9,18,2,5
266
+ 10,18,3,4
267
+ 11,18,3,3
268
+ 12,18,2,6
269
+ 13,18,3,5
270
+ 14,18,4,4
271
+ 15,18,3,3
272
+ 1,19,0,5
273
+ 2,19,1,4
274
+ 3,19,3,3
275
+ 4,19,5,6
276
+ 5,19,2,5
277
+ 6,19,3,4
278
+ 7,19,2,3
279
+ 8,19,1,6
280
+ 9,19,1,5
281
+ 10,19,3,4
282
+ 11,19,2,3
283
+ 12,19,4,6
284
+ 13,19,0,5
285
+ 14,19,2,4
286
+ 15,19,3,3
287
+ 1,20,1,5
288
+ 2,20,1,4
289
+ 3,20,1,3
290
+ 4,20,5,6
291
+ 5,20,4,5
292
+ 6,20,0,4
293
+ 7,20,2,3
294
+ 8,20,4,6
295
+ 9,20,4,5
296
+ 10,20,4,4
297
+ 11,20,0,3
298
+ 12,20,3,6
299
+ 13,20,2,5
300
+ 14,20,0,4
301
+ 15,20,2,3
302
+ 1,21,2,5
303
+ 2,21,4,4
304
+ 3,21,3,3
305
+ 4,21,3,6
306
+ 5,21,2,5
307
+ 6,21,0,4
308
+ 7,21,1,3
309
+ 8,21,2,6
310
+ 9,21,3,5
311
+ 10,21,2,4
312
+ 11,21,3,3
313
+ 12,21,3,6
314
+ 13,21,3,5
315
+ 14,21,0,4
316
+ 15,21,0,3
317
+ 1,22,2,5
318
+ 2,22,2,4
319
+ 3,22,1,3
320
+ 4,22,6,6
321
+ 5,22,3,5
322
+ 6,22,2,4
323
+ 7,22,0,3
324
+ 8,22,6,6
325
+ 9,22,5,5
326
+ 10,22,1,4
327
+ 11,22,3,3
328
+ 12,22,3,6
329
+ 13,22,1,5
330
+ 14,22,3,4
331
+ 15,22,0,3
332
+ 1,23,5,5
333
+ 2,23,2,4
334
+ 3,23,2,3
335
+ 4,23,3,6
336
+ 5,23,2,5
337
+ 6,23,0,4
338
+ 7,23,0,3
339
+ 8,23,3,6
340
+ 9,23,5,5
341
+ 10,23,2,4
342
+ 11,23,1,3
343
+ 12,23,6,6
344
+ 13,23,1,5
345
+ 14,23,1,4
346
+ 15,23,3,3
347
+ 1,24,5,5
348
+ 2,24,4,4
349
+ 3,24,0,3
350
+ 4,24,6,6
351
+ 5,24,4,5
352
+ 6,24,1,4
353
+ 7,24,2,3
354
+ 8,24,0,6
355
+ 9,24,3,5
356
+ 10,24,1,4
357
+ 11,24,3,3
358
+ 12,24,0,6
359
+ 13,24,2,5
360
+ 14,24,2,4
361
+ 15,24,0,3
362
+ 1,25,5,5
363
+ 2,25,3,4
364
+ 3,25,1,3
365
+ 4,25,0,6
366
+ 5,25,0,5
367
+ 6,25,4,4
368
+ 7,25,2,3
369
+ 8,25,5,6
370
+ 9,25,1,5
371
+ 10,25,4,4
372
+ 11,25,1,3
373
+ 12,25,3,6
374
+ 13,25,0,5
375
+ 14,25,1,4
376
+ 15,25,1,3
377
+ 1,26,3,5
378
+ 2,26,3,4
379
+ 3,26,2,3
380
+ 4,26,1,6
381
+ 5,26,1,5
382
+ 6,26,3,4
383
+ 7,26,1,3
384
+ 8,26,6,6
385
+ 9,26,3,5
386
+ 10,26,1,4
387
+ 11,26,1,3
388
+ 12,26,4,6
389
+ 13,26,2,5
390
+ 14,26,0,4
391
+ 15,26,2,3
392
+ 1,27,3,5
393
+ 2,27,4,4
394
+ 3,27,2,3
395
+ 4,27,6,6
396
+ 5,27,1,5
397
+ 6,27,3,4
398
+ 7,27,3,3
399
+ 8,27,4,6
400
+ 9,27,3,5
401
+ 10,27,1,4
402
+ 11,27,1,3
403
+ 12,27,5,6
404
+ 13,27,4,5
405
+ 14,27,0,4
406
+ 15,27,0,3
407
+ 1,28,5,5
408
+ 2,28,3,4
409
+ 3,28,0,3
410
+ 4,28,4,6
411
+ 5,28,2,5
412
+ 6,28,1,4
413
+ 7,28,3,3
414
+ 8,28,5,6
415
+ 9,28,5,5
416
+ 10,28,1,4
417
+ 11,28,3,3
418
+ 12,28,0,6
419
+ 13,28,4,5
420
+ 14,28,4,4
421
+ 15,28,2,3
422
+ 1,29,4,5
423
+ 2,29,3,4
424
+ 3,29,2,3
425
+ 4,29,4,6
426
+ 5,29,1,5
427
+ 6,29,3,4
428
+ 7,29,2,3
429
+ 8,29,2,6
430
+ 9,29,1,5
431
+ 10,29,1,4
432
+ 11,29,2,3
433
+ 12,29,0,6
434
+ 13,29,3,5
435
+ 14,29,4,4
436
+ 15,29,2,3
437
+ 1,30,0,5
438
+ 2,30,4,4
439
+ 3,30,3,3
440
+ 4,30,3,6
441
+ 5,30,1,5
442
+ 6,30,0,4
443
+ 7,30,1,3
444
+ 8,30,6,6
445
+ 9,30,1,5
446
+ 10,30,4,4
447
+ 11,30,1,3
448
+ 12,30,6,6
449
+ 13,30,4,5
450
+ 14,30,1,4
451
+ 15,30,0,3
452
+ 1,31,3,5
453
+ 2,31,3,4
454
+ 3,31,3,3
455
+ 4,31,0,6
456
+ 5,31,1,5
457
+ 6,31,1,4
458
+ 7,31,2,3
459
+ 8,31,5,6
460
+ 9,31,0,5
461
+ 10,31,3,4
462
+ 11,31,2,3
463
+ 12,31,1,6
464
+ 13,31,0,5
465
+ 14,31,3,4
466
+ 15,31,3,3
467
+ 1,32,0,5
468
+ 2,32,0,4
469
+ 3,32,1,3
470
+ 4,32,4,6
471
+ 5,32,2,5
472
+ 6,32,1,4
473
+ 7,32,3,3
474
+ 8,32,0,6
475
+ 9,32,3,5
476
+ 10,32,4,4
477
+ 11,32,0,3
478
+ 12,32,1,6
479
+ 13,32,2,5
480
+ 14,32,4,4
481
+ 15,32,3,3
482
+ 1,33,4,5
483
+ 2,33,2,4
484
+ 3,33,3,3
485
+ 4,33,5,6
486
+ 5,33,5,5
487
+ 6,33,3,4
488
+ 7,33,0,3
489
+ 8,33,5,6
490
+ 9,33,5,5
491
+ 10,33,4,4
492
+ 11,33,0,3
493
+ 12,33,4,6
494
+ 13,33,5,5
495
+ 14,33,1,4
496
+ 15,33,1,3
497
+ 1,34,3,5
498
+ 2,34,3,4
499
+ 3,34,3,3
500
+ 4,34,4,6
501
+ 5,34,3,5
502
+ 6,34,4,4
503
+ 7,34,1,3
504
+ 8,34,5,6
505
+ 9,34,5,5
506
+ 10,34,2,4
507
+ 11,34,0,3
508
+ 12,34,2,6
509
+ 13,34,4,5
510
+ 14,34,2,4
511
+ 15,34,2,3
512
+ 1,35,1,5
513
+ 2,35,1,4
514
+ 3,35,1,3
515
+ 4,35,6,6
516
+ 5,35,3,5
517
+ 6,35,1,4
518
+ 7,35,3,3
519
+ 8,35,4,6
520
+ 9,35,3,5
521
+ 10,35,1,4
522
+ 11,35,2,3
523
+ 12,35,0,6
524
+ 13,35,2,5
525
+ 14,35,1,4
526
+ 15,35,0,3