Update files from the datasets library (from 1.9.0)
Browse filesRelease notes: https://github.com/huggingface/datasets/releases/tag/1.9.0
- README.md +1 -0
- code_search_net.py +2 -2
README.md
CHANGED
|
@@ -31,6 +31,7 @@ task_categories:
|
|
| 31 |
task_ids:
|
| 32 |
- language-modeling
|
| 33 |
paperswithcode_id: codesearchnet
|
|
|
|
| 34 |
---
|
| 35 |
|
| 36 |
# Dataset Card for CodeSearchNet corpus
|
|
|
|
| 31 |
task_ids:
|
| 32 |
- language-modeling
|
| 33 |
paperswithcode_id: codesearchnet
|
| 34 |
+
pretty_name: CodeSearchNet
|
| 35 |
---
|
| 36 |
|
| 37 |
# Dataset Card for CodeSearchNet corpus
|
code_search_net.py
CHANGED
|
@@ -199,9 +199,9 @@ class CodeSearchNet(datasets.GeneratorBasedBuilder):
|
|
| 199 |
for file_id_, filepath in enumerate(filepaths):
|
| 200 |
with open(filepath, encoding="utf-8") as f:
|
| 201 |
for row_id_, row in enumerate(f):
|
| 202 |
-
# Key of the example =
|
| 203 |
# to ensure all examples have a distinct key
|
| 204 |
-
id_ = file_id_
|
| 205 |
data = json.loads(row)
|
| 206 |
yield id_, {
|
| 207 |
"repository_name": data["repo"],
|
|
|
|
| 199 |
for file_id_, filepath in enumerate(filepaths):
|
| 200 |
with open(filepath, encoding="utf-8") as f:
|
| 201 |
for row_id_, row in enumerate(f):
|
| 202 |
+
# Key of the example = file_id + row_id,
|
| 203 |
# to ensure all examples have a distinct key
|
| 204 |
+
id_ = f"{file_id_}_{row_id_}"
|
| 205 |
data = json.loads(row)
|
| 206 |
yield id_, {
|
| 207 |
"repository_name": data["repo"],
|