togokah commited on
Commit
42785cf
1 Parent(s): 2609fac
pages/2_👨‍🏫_Начало_работы.py CHANGED
@@ -194,7 +194,6 @@ if st.session_state.get('-LOGGED_IN_BOOL-'):
194
  logs=LOGS,
195
  logs_d=LOGS_D,
196
  progress=PROGRESS_BAR,
197
- progress_d=PROGRESS_BAR_D,
198
  progress_s=PROGRESS_BAR_S,
199
  level=CEFR_TEXT_LEVEL,
200
  tw_mode_automatic_mode=TARGET_WORDS_MODE,
 
194
  logs=LOGS,
195
  logs_d=LOGS_D,
196
  progress=PROGRESS_BAR,
 
197
  progress_s=PROGRESS_BAR_S,
198
  level=CEFR_TEXT_LEVEL,
199
  tw_mode_automatic_mode=TARGET_WORDS_MODE,
pages/4_📝_Онлайн-тест.py CHANGED
@@ -38,7 +38,7 @@ if st.session_state.get('-ONLINE_TEST_READY-') and st.session_state.get('-LOGGED
38
  "Неуместные дистракторы": ''}
39
  for i in range(len(tasks))]),
40
  num_rows="fixed",
41
- height=40*len_answers,
42
  use_container_width=True)
43
  COMMENTS = ONLINE_TEST.text_input(label='**Прокомментировать**',
44
  placeholder='Напишите комментарий')
 
38
  "Неуместные дистракторы": ''}
39
  for i in range(len(tasks))]),
40
  num_rows="fixed",
41
+ height=45*len_answers,
42
  use_container_width=True)
43
  COMMENTS = ONLINE_TEST.text_input(label='**Прокомментировать**',
44
  placeholder='Напишите комментарий')
utilities_language_bert/esp_main_workflow_bert.py CHANGED
@@ -22,7 +22,6 @@ def main_workflow(
22
  logs: ST_WIDGETS,
23
  logs_d: ST_WIDGETS,
24
  progress: st_progress,
25
- progress_d: st_progress,
26
  progress_s: st_progress,
27
  level: str,
28
  tw_mode_automatic_mode: str,
@@ -40,7 +39,6 @@ def main_workflow(
40
  :param logs: widget to output logs to
41
  :param logs_d: show how many distractors already processed
42
  :param progress: progress bar
43
- :param progress_d: distractors progress bar
44
  :param progress_s: sentences progress bar
45
  :param target_words: how target words are chosen: by user or automatically
46
  :param tw_mode_automatic_mode:
 
22
  logs: ST_WIDGETS,
23
  logs_d: ST_WIDGETS,
24
  progress: st_progress,
 
25
  progress_s: st_progress,
26
  level: str,
27
  tw_mode_automatic_mode: str,
 
39
  :param logs: widget to output logs to
40
  :param logs_d: show how many distractors already processed
41
  :param progress: progress bar
 
42
  :param progress_s: sentences progress bar
43
  :param target_words: how target words are chosen: by user or automatically
44
  :param tw_mode_automatic_mode:
utilities_language_bert/esp_sentence_bert.py CHANGED
@@ -73,7 +73,7 @@ class SENTENCE:
73
  'sentence_text': self.original,
74
  'original_text': f'{original_token1.text} {original_token2.text}',
75
  'lemma': token[0],
76
- 'pos': 'phrase',
77
  'gender': tags.get('Gender'),
78
  'tags': tags,
79
  'position_in_sentence': self.original.find(original_token1.text),
 
73
  'sentence_text': self.original,
74
  'original_text': f'{original_token1.text} {original_token2.text}',
75
  'lemma': token[0],
76
+ 'pos': ('phrase', 'phrase'),
77
  'gender': tags.get('Gender'),
78
  'tags': tags,
79
  'position_in_sentence': self.original.find(original_token1.text),
utilities_language_general/esp_utils.py CHANGED
@@ -186,7 +186,7 @@ def get_distractors_from_model_bert(model, text_with_masked_task: str, lemma: st
186
  try:
187
  if distractor_minimum:
188
  bert_candidates = [token for token in model(text_with_masked_task,
189
- top_k=max_num_distractors + 20,
190
  targets=list(distractor_minimum))]
191
  else:
192
  bert_candidates = [token for token in model(text_with_masked_task, top_k=max_num_distractors + 100)]
@@ -197,7 +197,8 @@ def get_distractors_from_model_bert(model, text_with_masked_task: str, lemma: st
197
  continue
198
  if candidate['token_str'].isalpha():
199
  candidate_morph = nlp(candidate['token_str'])[0]
200
- inflected_candidates.append((f"{candidate_morph.lemma_}_{candidate_morph.text}_{candidate_morph.pos_}", candidate['score']))
 
201
  except KeyError:
202
  return None
203
  for candidate_distractor in inflected_candidates:
 
186
  try:
187
  if distractor_minimum:
188
  bert_candidates = [token for token in model(text_with_masked_task,
189
+ top_k=max_num_distractors + 100,
190
  targets=list(distractor_minimum))]
191
  else:
192
  bert_candidates = [token for token in model(text_with_masked_task, top_k=max_num_distractors + 100)]
 
197
  continue
198
  if candidate['token_str'].isalpha():
199
  candidate_morph = nlp(candidate['token_str'])[0]
200
+ inflected_candidates.append((f"{candidate_morph.lemma_}_{candidate_morph.text}_{candidate_morph.pos_}",
201
+ candidate['score']))
202
  except KeyError:
203
  return None
204
  for candidate_distractor in inflected_candidates: