MicPie commited on
Commit
81dfea8
1 Parent(s): cf70d3b

Delete adaptable_full.py

Browse files
Files changed (1) hide show
  1. adaptable_full.py +0 -97
adaptable_full.py DELETED
@@ -1,97 +0,0 @@
1
- # Copyright 2020 The HuggingFace Datasets Authors and the current dataset script contributor.
2
- #
3
- # Licensed under the Apache License, Version 2.0 (the "License");
4
- # you may not use this file except in compliance with the License.
5
- # You may obtain a copy of the License at
6
- #
7
- # http://www.apache.org/licenses/LICENSE-2.0
8
- #
9
- # Unless required by applicable law or agreed to in writing, software
10
- # distributed under the License is distributed on an "AS IS" BASIS,
11
- # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12
- # See the License for the specific language governing permissions and
13
- # limitations under the License.
14
- """This loads the AdapTable-full dataset."""
15
-
16
- import json
17
- import os
18
- import pandas as pd
19
-
20
- import datasets
21
-
22
-
23
- _CITATION = """\
24
- @misc{https://ethanperez.net/adaptable,
25
- author = {Chan, Jun Shern and Pieler, Michael and Jao, Jonathan and Scheurer, Jérémy and Perez, Ethan},
26
- title = {Exploring Few-Shot Adaptation of Language Models with Tables},
27
- publisher = {arXiv},
28
- year = {2022},
29
- }
30
- """
31
-
32
- _DESCRIPTION = """\
33
- The AdapTable dataset consists of web tables formatted as few-shot tasks for fine-tuning language models to improve their few-shot performance. For more details please see the accompanying dataset card.
34
- """
35
-
36
- _HOMEPAGE = "https://ethanperez.net/adaptable"
37
-
38
- _LICENSE = "Apache 2.0"
39
-
40
- _URL = "https://huggingface.co/datasets/MicPie/adaptable_full/resolve/main/data/adaptable_full.jsonl"
41
-
42
- logger = datasets.logging.get_logger(__name__)
43
-
44
-
45
- class AdapTableFull(datasets.GeneratorBasedBuilder):
46
- """
47
- The AdapTable dataset consists of web tables formatted as few-shot tasks for fine-tuning language models to improve their few-shot performance. For more details please see the accompanying dataset card.
48
- """
49
-
50
- VERSION = datasets.Version("1.0.0")
51
-
52
- def _info(self):
53
- features = datasets.Features(
54
- {
55
- "task": datasets.Value("string"),
56
- "input": datasets.Value("string"),
57
- "output": datasets.Value("string"),
58
- "options": datasets.Sequence([datasets.Value("string")]),
59
- "pageTitle": datasets.Value("string"),
60
- "outputColName": datasets.Value("string"),
61
- "url": datasets.Value("string"),
62
- "wdcFile": datasets.Value("string")
63
- }
64
- )
65
- return datasets.DatasetInfo(
66
- description=_DESCRIPTION,
67
- features=features,
68
- license=_LICENSE,
69
- citation=_CITATION,
70
- )
71
-
72
- def _split_generators(self, dl_manager):
73
- """Returns SplitGenerators."""
74
- data_dir = dl_manager.download_and_extract(_URL)
75
- return [
76
- datasets.SplitGenerator(
77
- name=datasets.Split.TRAIN,
78
- gen_kwargs={"filepath": data_dir},
79
- ),
80
- ]
81
-
82
- def _generate_examples(self, filepath):
83
- """Yields examples."""
84
- with open(filepath, encoding="utf-8") as f:
85
- for i, row in enumerate(f):
86
- data = json.loads(row)
87
- key = f"{data['task']}_{i}"
88
- yield key, {
89
- "task": data["task"],
90
- "input": data["input"],
91
- "output": data["output"],
92
- "options": data["options"],
93
- "pageTitle": data["pageTitle"],
94
- "outputColName": data["outputColName"],
95
- "url": data["url"],
96
- "wdcFile": data["wdcFile"],
97
- }