update readme
Browse files
README.md
CHANGED
@@ -1327,15 +1327,17 @@ for doc in docs:
|
|
1327 |
Note, depending on the language, the download can be quite large.
|
1328 |
|
1329 |
## Search
|
1330 |
-
A full search example:
|
1331 |
```python
|
1332 |
-
#Run: pip install cohere datasets
|
1333 |
from datasets import load_dataset
|
1334 |
-
import
|
1335 |
import cohere
|
|
|
1336 |
|
1337 |
-
co = cohere.Client(
|
1338 |
lang = "simple"
|
|
|
1339 |
|
1340 |
#Load at max 1000 chunks + embeddings
|
1341 |
max_docs = 1000
|
@@ -1350,20 +1352,20 @@ for doc in docs_stream:
|
|
1350 |
if len(docs) >= max_docs:
|
1351 |
break
|
1352 |
|
1353 |
-
doc_embeddings =
|
1354 |
|
1355 |
-
query = 'Who
|
1356 |
response = co.embed(texts=[query], model='embed-multilingual-v3.0', input_type="search_query")
|
1357 |
query_embedding = response.embeddings
|
1358 |
-
query_embedding =
|
1359 |
|
1360 |
# Compute dot score between query embedding and document embeddings
|
1361 |
-
dot_scores =
|
1362 |
-
|
1363 |
|
1364 |
# Print results
|
1365 |
print("Query:", query)
|
1366 |
-
for doc_id in
|
1367 |
print(docs[doc_id]['title'])
|
1368 |
print(docs[doc_id]['text'])
|
1369 |
print(docs[doc_id]['url'], "\n")
|
|
|
1327 |
Note, depending on the language, the download can be quite large.
|
1328 |
|
1329 |
## Search
|
1330 |
+
A full search example (on the first 1,000 paragraphs):
|
1331 |
```python
|
1332 |
+
#Run: pip install cohere datasets numpy
|
1333 |
from datasets import load_dataset
|
1334 |
+
import numpy as np
|
1335 |
import cohere
|
1336 |
+
import os
|
1337 |
|
1338 |
+
co = cohere.Client("YOUR_COHERE_API_KEY") # Add your cohere API key from www.cohere.com
|
1339 |
lang = "simple"
|
1340 |
+
top_k = 3
|
1341 |
|
1342 |
#Load at max 1000 chunks + embeddings
|
1343 |
max_docs = 1000
|
|
|
1352 |
if len(docs) >= max_docs:
|
1353 |
break
|
1354 |
|
1355 |
+
doc_embeddings = np.asarray(doc_embeddings)
|
1356 |
|
1357 |
+
query = 'Who was Alan Turing'
|
1358 |
response = co.embed(texts=[query], model='embed-multilingual-v3.0', input_type="search_query")
|
1359 |
query_embedding = response.embeddings
|
1360 |
+
query_embedding = np.asarray(query_embedding)
|
1361 |
|
1362 |
# Compute dot score between query embedding and document embeddings
|
1363 |
+
dot_scores = np.matmul(query_embedding, doc_embeddings.transpose())[0]
|
1364 |
+
top_k_hits = np.argpartition(dot_scores, -top_k)[-top_k:]
|
1365 |
|
1366 |
# Print results
|
1367 |
print("Query:", query)
|
1368 |
+
for doc_id in top_k_hits:
|
1369 |
print(docs[doc_id]['title'])
|
1370 |
print(docs[doc_id]['text'])
|
1371 |
print(docs[doc_id]['url'], "\n")
|