point to github instead of HF
Browse files- dataset_infos.json +1 -1
- duvel.py +1 -1
dataset_infos.json
CHANGED
@@ -1 +1 @@
|
|
1 |
-
{"default": {"description": "This dataset was created to identity oligogenic variant combinations, i.e. relation between several genes and their mutations, causing genetic diseases in scientific articles written in english. At the moment, it contains only digenic variant combinations, i.e. relations between two genes and at least two variants. The dataset is intended for binary relation extraction where the entities are masked within the text.\n", "citation": "@InProceedings{huggingface:dataset,\ntitle = {A great new dataset},\nauthor={huggingface, Inc.\n},\nyear={2020}\n}\n", "homepage": "https://github.com/cnachteg/DUVEL", "license": "cc-by-nc-sa-4.0", "features": {"sentence": {"dtype": "string", "id": null, "_type": "Value"}, "pmcid": {"dtype": "int32", "id": null, "_type": "Value"}, "gene1": {"dtype": "string", "id": null, "_type": "Value"}, "gene2": {"dtype": "string", "id": null, "_type": "Value"}, "variant1": {"dtype": "string", "id": null, "_type": "Value"}, "variant2": {"dtype": "string", "id": null, "_type": "Value"}, "label": {"num_classes": 2, "names": [0, 1], "id": null, "_type": "ClassLabel"}}, "post_processed": null, "supervised_keys": null, "task_templates": [{"task": "text-classification", "text_column": "sentence", "label_column": "label"}], "builder_name": "duvel", "config_name": "default", "version": {"version_str": "1.1.0", "description": null, "major": 1, "minor": 1, "patch": 0}, "splits": {"train": {"name": "train", "num_bytes": 5134410, "num_examples": 6553, "dataset_name": "duvel"}, "validation": {"name": "validation", "num_bytes": 155767, "num_examples": 200, "dataset_name": "duvel"}, "test": {"name": "test", "num_bytes": 1310400, "num_examples": 1689, "dataset_name": "duvel"}}, "download_checksums": {"data/train.csv": {"num_bytes": 5023919, "checksum": "7a648d8de3a3bd08076042c2f1d9068f1726334465d0bffcbf9afba52002b098"}, "data/validation.csv": {"num_bytes": 152438, "checksum": "120d07603f066cc59fc24fc6bf9e03a6b347dfbbefdeb2eea84fc7b150dff09a"}, "data/test.csv": {"num_bytes": 1281931, "checksum": "76bd20610f9000b2107f166ffa8e5ba54bf7867b98228b1b7257e55ddfb08f9c"}}, "download_size": 6458288, "post_processing_size": null, "dataset_size": 6600577, "size_in_bytes": 13058865}}
|
|
|
1 |
+
{"default": {"description": "This dataset was created to identity oligogenic variant combinations, i.e. relation between several genes and their mutations, causing genetic diseases in scientific articles written in english. At the moment, it contains only digenic variant combinations, i.e. relations between two genes and at least two variants. The dataset is intended for binary relation extraction where the entities are masked within the text.\n", "citation": "@InProceedings{huggingface:dataset,\ntitle = {A great new dataset},\nauthor={huggingface, Inc.\n},\nyear={2020}\n}\n", "homepage": "https://github.com/cnachteg/DUVEL", "license": "cc-by-nc-sa-4.0", "features": {"sentence": {"dtype": "string", "id": null, "_type": "Value"}, "pmcid": {"dtype": "int32", "id": null, "_type": "Value"}, "gene1": {"dtype": "string", "id": null, "_type": "Value"}, "gene2": {"dtype": "string", "id": null, "_type": "Value"}, "variant1": {"dtype": "string", "id": null, "_type": "Value"}, "variant2": {"dtype": "string", "id": null, "_type": "Value"}, "label": {"num_classes": 2, "names": [0, 1], "id": null, "_type": "ClassLabel"}}, "post_processed": null, "supervised_keys": null, "task_templates": [{"task": "text-classification", "text_column": "sentence", "label_column": "label"}], "builder_name": "duvel", "config_name": "default", "version": {"version_str": "1.1.0", "description": null, "major": 1, "minor": 1, "patch": 0}, "splits": {"train": {"name": "train", "num_bytes": 5134410, "num_examples": 6553, "dataset_name": "duvel"}, "validation": {"name": "validation", "num_bytes": 155767, "num_examples": 200, "dataset_name": "duvel"}, "test": {"name": "test", "num_bytes": 1310400, "num_examples": 1689, "dataset_name": "duvel"}}, "download_checksums": {"https://raw.githubusercontent.com/cnachteg/DUVEL/main/data/train.csv": {"num_bytes": 5023919, "checksum": "7a648d8de3a3bd08076042c2f1d9068f1726334465d0bffcbf9afba52002b098"}, "https://raw.githubusercontent.com/cnachteg/DUVEL/main/data/validation.csv": {"num_bytes": 152438, "checksum": "120d07603f066cc59fc24fc6bf9e03a6b347dfbbefdeb2eea84fc7b150dff09a"}, "https://raw.githubusercontent.com/cnachteg/DUVEL/main/data/test.csv": {"num_bytes": 1281931, "checksum": "76bd20610f9000b2107f166ffa8e5ba54bf7867b98228b1b7257e55ddfb08f9c"}}, "download_size": 6458288, "post_processing_size": null, "dataset_size": 6600577, "size_in_bytes": 13058865}}
|
duvel.py
CHANGED
@@ -44,7 +44,7 @@ _LICENSE = "cc-by-nc-sa-4.0"
|
|
44 |
|
45 |
# The HuggingFace Datasets library doesn't host the datasets but only points to the original files.
|
46 |
# This can be an arbitrary nested dict/list of URLs (see below in `_split_generators` method)
|
47 |
-
_URL = ""
|
48 |
_URLS = {
|
49 |
"train": _URL + "data/train.csv",
|
50 |
"dev": _URL + "data/validation.csv",
|
|
|
44 |
|
45 |
# The HuggingFace Datasets library doesn't host the datasets but only points to the original files.
|
46 |
# This can be an arbitrary nested dict/list of URLs (see below in `_split_generators` method)
|
47 |
+
_URL = "https://raw.githubusercontent.com/cnachteg/DUVEL/main/"
|
48 |
_URLS = {
|
49 |
"train": _URL + "data/train.csv",
|
50 |
"dev": _URL + "data/validation.csv",
|