Geraldine commited on
Commit
b407a17
1 Parent(s): b6402a7

Update README.md

Browse files
Files changed (1) hide show
  1. README.md +25 -28
README.md CHANGED
@@ -14,7 +14,7 @@ The parameters passed in the url request are :
14
 
15
  https://api.archives-ouvertes.fr/search/UNIV-COTEDAZUR/?q=docType_s:ART&fq=abstract_s:[%22%22%20TO%20*]&fq=domain_s:*shs*&fq=submittedDateY_i:[2020%20TO%202023]&fl=doiId_s,uri_s,title_s,subTitle_s,authFullName_s,producedDate_s,journalTitle_s,journalPublisher_s,abstract_s,domain_s,openAccess_bool
16
 
17
- The embeddings column stores the embeddings of the "combined" column values converted in vectors with the sentence-transformers/all-MiniLM-L6-v2 embeddinsg model.
18
 
19
  ## Metadata extraction
20
 
@@ -31,54 +31,51 @@ print(total_cout)
31
 
32
  # Loop over the records and get metadata
33
  step = 500
34
- appended_data = []
35
  for i in range(1, int(total_count), int(step)):
36
  url = f"{url}&rows={step}&start={i}&wt=csv"
37
  df = pd.read_csv(url, encoding="utf-8")
38
- appended_data.append(df)
39
- appended_data = pd.concat(appended_data)
40
 
41
  # dedup
42
- appended_data = appended_data.drop_duplicates(subset=['uri_s'])
43
 
44
- appended_data.shape
45
  # returns 2405
46
- ```
47
-
48
- ## Add embeddings (CPU)
49
 
50
- ### Solution 1 : with HF Inference API
 
51
 
 
 
52
  ```
53
- import requests
54
- import json
55
- from typing import Optional, List, Dict, Any
56
-
57
- HF_TOKEN = "<hf_token>"
58
- model_id = "sentence-transformers/all-MiniLM-L6-v2"
59
-
60
- embeddings_api_url = f"https://api-inference.huggingface.co/pipeline/feature-extraction/{model_id}"
61
- headers = {"Authorization": f"Bearer {HF_TOKEN}"}
62
 
63
- def embeddings_query(text:str) -> List:
64
- response = requests.post(embeddings_api_url, headers=headers, json={"inputs": text, "options":{"wait_for_model":True}})
65
- return response.json()
66
 
67
- df = appended_data.replace(np.nan, '')
68
- df['embeddings'] = df.combined.apply(lambda x:embeddings_query(x.strip()))
69
  ```
70
 
71
  ### Solution 2 : with sentence-transformers library
72
 
73
- ```
74
  from sentence_transformers import SentenceTransformer
 
75
 
76
  model_id = "sentence-transformers/all-MiniLM-L6-v2"
77
  embedder = SentenceTransformer(model_id)
78
 
79
- def embeddings_query(text:str) -> List:
80
- return embedder.encode(text,convert_to_tensor=True)
 
 
 
 
 
 
 
81
 
82
- df['embeddings'] = df.combined.apply(lambda x:embeddings_query(x.strip().to_list()))
 
 
83
 
84
  ```
 
14
 
15
  https://api.archives-ouvertes.fr/search/UNIV-COTEDAZUR/?q=docType_s:ART&fq=abstract_s:[%22%22%20TO%20*]&fq=domain_s:*shs*&fq=submittedDateY_i:[2020%20TO%202023]&fl=doiId_s,uri_s,title_s,subTitle_s,authFullName_s,producedDate_s,journalTitle_s,journalPublisher_s,abstract_s,domain_s,openAccess_bool
16
 
17
+ The embeddings corpus stores the embeddings of the "combined" column values converted in vectors with the sentence-transformers/all-MiniLM-L6-v2 embeddings model.
18
 
19
  ## Metadata extraction
20
 
 
31
 
32
  # Loop over the records and get metadata
33
  step = 500
34
+ df = []
35
  for i in range(1, int(total_count), int(step)):
36
  url = f"{url}&rows={step}&start={i}&wt=csv"
37
  df = pd.read_csv(url, encoding="utf-8")
38
+ df.append(df)
39
+ df = pd.concat(appended_data)
40
 
41
  # dedup
42
+ df = appended_data.drop_duplicates(subset=['uri_s'])
43
 
44
+ df.shape
45
  # returns 2405
 
 
 
46
 
47
+ # New column of concatenated textuel data
48
+ df["combined"] = df.title_s + ". " + df.subTitle_s + ". " +df.abstract_s
49
 
50
+ # Save
51
+ df.to_csv("hal_data.csv", index=False, encoding="utf-8")
52
  ```
 
 
 
 
 
 
 
 
 
53
 
54
+ ## Create embeddings with sentence-transformers library (CPU)
 
 
55
 
 
 
56
  ```
57
 
58
  ### Solution 2 : with sentence-transformers library
59
 
60
+
61
  from sentence_transformers import SentenceTransformer
62
+ import pickle
63
 
64
  model_id = "sentence-transformers/all-MiniLM-L6-v2"
65
  embedder = SentenceTransformer(model_id)
66
 
67
+ # Test on simple piece of text
68
+ embedder.encode(["The Ecology of Fear and Climate"],convert_to_tensor=True)
69
+
70
+ # Create corpus
71
+ corpus_embeddings = embedder.encode(df.combined.to_list(), show_progress_bar=True)
72
+
73
+ # Save
74
+ with open('hal_embeddings.pkl', "wb") as fOut:
75
+ pickle.dump(corpus_embeddings, fOut)
76
 
77
+ # Reload
78
+ file = open("hal_embeddings.pkl",'rb')
79
+ corpus_embeddings = pickle.load(file)
80
 
81
  ```