MicPie commited on
Commit
0c195d6
1 Parent(s): c7e168b

Delete adaptable_5k.py

Browse files
Files changed (1) hide show
  1. adaptable_5k.py +0 -100
adaptable_5k.py DELETED
@@ -1,100 +0,0 @@
1
- # Copyright 2020 The HuggingFace Datasets Authors and the current dataset script contributor.
2
- #
3
- # Licensed under the Apache License, Version 2.0 (the "License");
4
- # you may not use this file except in compliance with the License.
5
- # You may obtain a copy of the License at
6
- #
7
- # http://www.apache.org/licenses/LICENSE-2.0
8
- #
9
- # Unless required by applicable law or agreed to in writing, software
10
- # distributed under the License is distributed on an "AS IS" BASIS,
11
- # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12
- # See the License for the specific language governing permissions and
13
- # limitations under the License.
14
- """This loads the AdapTable-5k dataset."""
15
-
16
- import json
17
- import os
18
- import pandas as pd
19
-
20
- import datasets
21
-
22
-
23
- # TODO: Add BibTeX citation
24
- _CITATION = """\
25
- @InProceedings{huggingface:dataset,
26
- title = {TODO: UPDATE TITLE HERE},
27
- author={TODO: BUT AUTHORS HERE},
28
- year={2020}
29
- }
30
- """
31
-
32
- # TODO: Update description
33
- _DESCRIPTION = """\
34
- The AdapTable dataset consists of tables that naturally occur on the web, that are formatted as few-shot tasks for fine-tuning language models to improve their few-shot performance. For more details please see the accompanying dataset card.
35
- """
36
-
37
- # TODO: Add a link to an official homepage for the dataset here
38
- _HOMEPAGE = ""
39
-
40
- _LICENSE = "Apache 2.0"
41
-
42
- _URL = "https://huggingface.co/datasets/MicPie/adaptable_5k/resolve/main/data/adaptable_5k.jsonl"
43
-
44
- logger = datasets.logging.get_logger(__name__)
45
-
46
-
47
- class AdapTable5k(datasets.GeneratorBasedBuilder):
48
- # TODO: Update docs:
49
- """
50
- The AdapTable dataset consists of tables that naturally occur on the web, that are formatted as few-shot tasks for fine-tuning language models to improve their few-shot performance. For more details please see the accompanying dataset card.
51
- """
52
-
53
- VERSION = datasets.Version("1.0.0")
54
-
55
- def _info(self):
56
- features = datasets.Features(
57
- {
58
- "task": datasets.Value("string"),
59
- "input": datasets.Value("string"),
60
- "output": datasets.Value("string"),
61
- "options": datasets.Sequence([datasets.Value("string")]),
62
- "pageTitle": datasets.Value("string"),
63
- "outputColName": datasets.Value("string"),
64
- "url": datasets.Value("string"),
65
- "wdcFile": datasets.Value("string")
66
- }
67
- )
68
- return datasets.DatasetInfo(
69
- description=_DESCRIPTION,
70
- features=features,
71
- license=_LICENSE,
72
- citation=_CITATION,
73
- )
74
-
75
- def _split_generators(self, dl_manager):
76
- """Returns SplitGenerators."""
77
- data_dir = dl_manager.download_and_extract(_URL)
78
- return [
79
- datasets.SplitGenerator(
80
- name=datasets.Split.TRAIN,
81
- gen_kwargs={"filepath": data_dir},
82
- ),
83
- ]
84
-
85
- def _generate_examples(self, filepath):
86
- """Yields examples."""
87
- with open(filepath, encoding="utf-8") as f:
88
- for i, row in enumerate(f):
89
- data = json.loads(row)
90
- key = f"{data['task']}_{i}"
91
- yield key, {
92
- "task": data["task"],
93
- "input": data["input"],
94
- "output": data["output"],
95
- "options": data["options"],
96
- "pageTitle": data["pageTitle"],
97
- "outputColName": data["outputColName"],
98
- "url": data["url"],
99
- "wdcFile": data["wdcFile"],
100
- }