a-v-bely commited on
Commit
e5cd1a4
1 Parent(s): 94004b3
utilities_cookies/encrypted_cookie_manager.py CHANGED
@@ -1,10 +1,10 @@
1
  import os
2
  import base64
3
  import streamlit as st
4
- from typing import Tuple, Optional, MutableMapping
5
  from cryptography import fernet
6
  from cryptography.fernet import Fernet
7
  from cryptography.hazmat.primitives import hashes
 
8
  from utilities_cookies.cookie_manager import CookieManager
9
  from cryptography.hazmat.primitives.kdf.pbkdf2 import PBKDF2HMAC
10
 
 
1
  import os
2
  import base64
3
  import streamlit as st
 
4
  from cryptography import fernet
5
  from cryptography.fernet import Fernet
6
  from cryptography.hazmat.primitives import hashes
7
+ from typing import Tuple, Optional, MutableMapping
8
  from utilities_cookies.cookie_manager import CookieManager
9
  from cryptography.hazmat.primitives.kdf.pbkdf2 import PBKDF2HMAC
10
 
utilities_database/user_database_widgets.py CHANGED
@@ -91,8 +91,8 @@ class LogIn:
91
 
92
  if login_submit_button:
93
  authenticate_user_check = db_utils.check_usr_pass(user_log_in_database=user_login_table,
94
- user_name=user_name,
95
- password=password)
96
 
97
  if not authenticate_user_check:
98
  st.error("Неверное имя пользователя или пароль!")
 
91
 
92
  if login_submit_button:
93
  authenticate_user_check = db_utils.check_usr_pass(user_log_in_database=user_login_table,
94
+ user_name=user_name,
95
+ password=password)
96
 
97
  if not authenticate_user_check:
98
  st.error("Неверное имя пользователя или пароль!")
utilities_language_general/esp_utils.py CHANGED
@@ -113,7 +113,6 @@ def get_distractors_from_model(doc, model, scaler, classifier, pos_dict:dict, ta
113
 
114
  distractors = []
115
  query = lemma if '_' in lemma else f'{lemma}_{pos}'
116
- raw_lemma = query
117
  lemma = '_'.join(lemma.split('_')[::2])
118
  if model.has_index_for(query):
119
  candidates = model.most_similar(query, topn=max_num_distractors + 100)
@@ -128,7 +127,7 @@ def get_distractors_from_model(doc, model, scaler, classifier, pos_dict:dict, ta
128
  if candidate[0].count('_') == 1 and pos != 'phrase':
129
  distractor_lemma, distractor_pos = candidate[0].split('_')
130
  decision = make_decision(doc, model_type='w2v', model=model, scaler=scaler, classifier=classifier, pos_dict=pos_dict,
131
- level=level_name, target_lemma=raw_lemma, target_text=target_text, target_pos=pos, target_position=lemma_index,
132
  substitute_lemma=distractor_lemma, substitute_pos=distractor_pos)
133
  distractor_similarity = candidate[1]
134
  candidate_gender = get_tags(distractor_lemma).get('Gender')
@@ -161,7 +160,7 @@ def get_distractors_from_model(doc, model, scaler, classifier, pos_dict:dict, ta
161
  distractor_lemma = f'{d1_lemma}_{d2_lemma}'
162
  distractor_similarity = candidate[1]
163
  decision = make_decision(doc, model_type='w2v', model=model, scaler=scaler, classifier=classifier, pos_dict=pos_dict,
164
- level=level_name, target_lemma=raw_lemma, target_text=target_text, target_pos=pos, target_position=lemma_index,
165
  substitute_lemma=candidate[0], substitute_pos=d_pos)
166
  condition = (((d1_pos == pos or d2_pos == pos)
167
  or (COMBINE_POS['phrase'][level_name].get(d_pos) is not None and COMBINE_POS['phrase'][level_name].get(pos) is not None
 
113
 
114
  distractors = []
115
  query = lemma if '_' in lemma else f'{lemma}_{pos}'
 
116
  lemma = '_'.join(lemma.split('_')[::2])
117
  if model.has_index_for(query):
118
  candidates = model.most_similar(query, topn=max_num_distractors + 100)
 
127
  if candidate[0].count('_') == 1 and pos != 'phrase':
128
  distractor_lemma, distractor_pos = candidate[0].split('_')
129
  decision = make_decision(doc, model_type='w2v', model=model, scaler=scaler, classifier=classifier, pos_dict=pos_dict,
130
+ level=level_name, target_lemma=query, target_text=target_text, target_pos=pos, target_position=lemma_index,
131
  substitute_lemma=distractor_lemma, substitute_pos=distractor_pos)
132
  distractor_similarity = candidate[1]
133
  candidate_gender = get_tags(distractor_lemma).get('Gender')
 
160
  distractor_lemma = f'{d1_lemma}_{d2_lemma}'
161
  distractor_similarity = candidate[1]
162
  decision = make_decision(doc, model_type='w2v', model=model, scaler=scaler, classifier=classifier, pos_dict=pos_dict,
163
+ level=level_name, target_lemma=query, target_text=target_text, target_pos=pos, target_position=lemma_index,
164
  substitute_lemma=candidate[0], substitute_pos=d_pos)
165
  condition = (((d1_pos == pos or d2_pos == pos)
166
  or (COMBINE_POS['phrase'][level_name].get(d_pos) is not None and COMBINE_POS['phrase'][level_name].get(pos) is not None