RMakushkin commited on
Commit
03f8214
1 Parent(s): 2152801

Update func.py

Browse files
Files changed (1) hide show
  1. func.py +27 -15
func.py CHANGED
@@ -15,24 +15,36 @@ def filter_by_ganre(df: pd.DataFrame, ganre_list: list):
15
  filt_ind = filtered_df.index.to_list()
16
  return filt_ind
17
 
18
- def mean_pooling(model_output, attention_mask):
19
- token_embeddings = model_output['last_hidden_state']
20
- input_mask_expanded = attention_mask.unsqueeze(-1).expand(token_embeddings.size()).float()
21
- sum_embeddings = torch.sum(token_embeddings * input_mask_expanded, 1)
22
- sum_mask = torch.clamp(input_mask_expanded.sum(1), min=1e-9)
23
- return sum_embeddings / sum_mask
24
 
25
- def recommendation(filt_ind: list, embeddings: np.array, user_text: str, n=10):
26
- token_user_text = tokenizer(user_text, return_tensors='pt', padding='max_length', truncation=True, max_length=512)
27
- user_embeddings = torch.Tensor().to(device)
 
 
 
 
 
 
 
 
 
 
 
 
 
28
  model.to(device)
29
  model.eval()
30
  with torch.no_grad():
31
- batch = {k: v.to(device) for k, v in token_user_text.items()}
32
- outputs = model(**batch)
33
- user_embeddings = torch.cat([user_embeddings, mean_pooling(outputs, batch['attention_mask'])])
34
- user_embeddings = user_embeddings.cpu().numpy()
35
- cosine_similarities = cosine_similarity(embeddings[filt_ind], user_embeddings.reshape(1, -1))
36
  df_res = pd.DataFrame(cosine_similarities.ravel(), columns=['cos_sim']).sort_values('cos_sim', ascending=False)
37
  dict_topn = df_res.iloc[:n, :].cos_sim.to_dict()
38
- return dict_topn
 
15
  filt_ind = filtered_df.index.to_list()
16
  return filt_ind
17
 
18
+ # def mean_pooling(model_output, attention_mask):
19
+ # token_embeddings = model_output['last_hidden_state']
20
+ # input_mask_expanded = attention_mask.unsqueeze(-1).expand(token_embeddings.size()).float()
21
+ # sum_embeddings = torch.sum(token_embeddings * input_mask_expanded, 1)
22
+ # sum_mask = torch.clamp(input_mask_expanded.sum(1), min=1e-9)
23
+ # return sum_embeddings / sum_mask
24
 
25
+ # def recommendation(filt_ind: list, embeddings: np.array, user_text: str, n=10):
26
+ # token_user_text = tokenizer(user_text, return_tensors='pt', padding='max_length', truncation=True, max_length=512)
27
+ # user_embeddings = torch.Tensor().to(device)
28
+ # model.to(device)
29
+ # model.eval()
30
+ # with torch.no_grad():
31
+ # batch = {k: v.to(device) for k, v in token_user_text.items()}
32
+ # outputs = model(**batch)
33
+ # user_embeddings = torch.cat([user_embeddings, mean_pooling(outputs, batch['attention_mask'])])
34
+ # user_embeddings = user_embeddings.cpu().numpy()
35
+ # cosine_similarities = cosine_similarity(embeddings[filt_ind], user_embeddings.reshape(1, -1))
36
+ # df_res = pd.DataFrame(cosine_similarities.ravel(), columns=['cos_sim']).sort_values('cos_sim', ascending=False)
37
+ # dict_topn = df_res.iloc[:n, :].cos_sim.to_dict()
38
+ # return dict_topn
39
+ def recommendation(filt_ind: list, embeddings:np.array, user_text: str, n=10):
40
+ tokens = tokenizer(user_text, return_tensors="pt", padding=True, truncation=True)
41
  model.to(device)
42
  model.eval()
43
  with torch.no_grad():
44
+ tokens = {key: value.to(model.device) for key, value in tokens.items()}
45
+ outputs = model(**tokens)
46
+ user_embedding = outputs.last_hidden_state.mean(dim=1).squeeze().cpu().detach().numpy()
47
+ cosine_similarities = cosine_similarity(embeddings[filt_ind], user_embedding.reshape(1, -1))
 
48
  df_res = pd.DataFrame(cosine_similarities.ravel(), columns=['cos_sim']).sort_values('cos_sim', ascending=False)
49
  dict_topn = df_res.iloc[:n, :].cos_sim.to_dict()
50
+ return dict_topn