Upload 3 files
Browse files- config.py +117 -0
- dataset-script.py +161 -0
- dataset-train.py +91 -0
config.py
ADDED
@@ -0,0 +1,117 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import argparse
|
2 |
+
import sys
|
3 |
+
import torch
|
4 |
+
from multiprocessing import cpu_count
|
5 |
+
|
6 |
+
class Config:
|
7 |
+
def __init__(self):
|
8 |
+
self.device = "cuda:0"
|
9 |
+
self.is_half = True
|
10 |
+
self.n_cpu = 0
|
11 |
+
self.gpu_name = None
|
12 |
+
self.gpu_mem = None
|
13 |
+
(
|
14 |
+
self.python_cmd,
|
15 |
+
self.listen_port,
|
16 |
+
self.colab,
|
17 |
+
self.noparallel,
|
18 |
+
self.noautoopen,
|
19 |
+
self.api
|
20 |
+
) = self.arg_parse()
|
21 |
+
self.x_pad, self.x_query, self.x_center, self.x_max = self.device_config()
|
22 |
+
|
23 |
+
@staticmethod
|
24 |
+
def arg_parse() -> tuple:
|
25 |
+
exe = sys.executable or "python"
|
26 |
+
parser = argparse.ArgumentParser()
|
27 |
+
parser.add_argument("--port", type=int, default=7865, help="Listen port")
|
28 |
+
parser.add_argument("--pycmd", type=str, default=exe, help="Python command")
|
29 |
+
parser.add_argument("--colab", action="store_true", help="Launch in colab")
|
30 |
+
parser.add_argument(
|
31 |
+
"--noparallel", action="store_true", help="Disable parallel processing"
|
32 |
+
)
|
33 |
+
parser.add_argument(
|
34 |
+
"--noautoopen",
|
35 |
+
action="store_true",
|
36 |
+
help="Do not open in browser automatically",
|
37 |
+
)
|
38 |
+
parser.add_argument("--api", action="store_true", help="Launch with api")
|
39 |
+
cmd_opts = parser.parse_args()
|
40 |
+
|
41 |
+
cmd_opts.port = cmd_opts.port if 0 <= cmd_opts.port <= 65535 else 7865
|
42 |
+
|
43 |
+
return (
|
44 |
+
cmd_opts.pycmd,
|
45 |
+
cmd_opts.port,
|
46 |
+
cmd_opts.colab,
|
47 |
+
cmd_opts.noparallel,
|
48 |
+
cmd_opts.noautoopen,
|
49 |
+
cmd_opts.api
|
50 |
+
)
|
51 |
+
|
52 |
+
# has_mps is only available in nightly pytorch (for now) and MasOS 12.3+.
|
53 |
+
# check `getattr` and try it for compatibility
|
54 |
+
@staticmethod
|
55 |
+
def has_mps() -> bool:
|
56 |
+
if not torch.backends.mps.is_available():
|
57 |
+
return False
|
58 |
+
try:
|
59 |
+
torch.zeros(1).to(torch.device("mps"))
|
60 |
+
return True
|
61 |
+
except Exception:
|
62 |
+
return False
|
63 |
+
|
64 |
+
def device_config(self) -> tuple:
|
65 |
+
if torch.cuda.is_available():
|
66 |
+
i_device = int(self.device.split(":")[-1])
|
67 |
+
self.gpu_name = torch.cuda.get_device_name(i_device)
|
68 |
+
if (
|
69 |
+
("16" in self.gpu_name and "V100" not in self.gpu_name.upper())
|
70 |
+
or "P40" in self.gpu_name.upper()
|
71 |
+
or "1060" in self.gpu_name
|
72 |
+
or "1070" in self.gpu_name
|
73 |
+
or "1080" in self.gpu_name
|
74 |
+
):
|
75 |
+
print("Found GPU", self.gpu_name, ", force to fp32")
|
76 |
+
self.is_half = False
|
77 |
+
else:
|
78 |
+
print("Found GPU", self.gpu_name)
|
79 |
+
self.gpu_mem = int(
|
80 |
+
torch.cuda.get_device_properties(i_device).total_memory
|
81 |
+
/ 1024
|
82 |
+
/ 1024
|
83 |
+
/ 1024
|
84 |
+
+ 0.4
|
85 |
+
)
|
86 |
+
elif self.has_mps():
|
87 |
+
print("No supported Nvidia GPU found, use MPS instead")
|
88 |
+
self.device = "mps"
|
89 |
+
self.is_half = False
|
90 |
+
else:
|
91 |
+
print("No supported Nvidia GPU found, use CPU instead")
|
92 |
+
self.device = "cpu"
|
93 |
+
self.is_half = False
|
94 |
+
|
95 |
+
if self.n_cpu == 0:
|
96 |
+
self.n_cpu = cpu_count()
|
97 |
+
|
98 |
+
if self.is_half:
|
99 |
+
# 6G显存配置
|
100 |
+
x_pad = 3
|
101 |
+
x_query = 10
|
102 |
+
x_center = 60
|
103 |
+
x_max = 65
|
104 |
+
else:
|
105 |
+
# 5G显存配置
|
106 |
+
x_pad = 1
|
107 |
+
x_query = 6
|
108 |
+
x_center = 38
|
109 |
+
x_max = 41
|
110 |
+
|
111 |
+
if self.gpu_mem != None and self.gpu_mem <= 4:
|
112 |
+
x_pad = 1
|
113 |
+
x_query = 5
|
114 |
+
x_center = 30
|
115 |
+
x_max = 32
|
116 |
+
|
117 |
+
return x_pad, x_query, x_center, x_max
|
dataset-script.py
ADDED
@@ -0,0 +1,161 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
from datasets import load_dataset
|
2 |
+
load_dataset("path/to/my_dataset")
|
3 |
+
datasets.Features(
|
4 |
+
{
|
5 |
+
"id": datasets.Value("string"),
|
6 |
+
"title": datasets.Value("string"),
|
7 |
+
"context": datasets.Value("string"),
|
8 |
+
"question": datasets.Value("string"),
|
9 |
+
"answers": datasets.Sequence(
|
10 |
+
{
|
11 |
+
"text": datasets.Value("string"),
|
12 |
+
"answer_start": datasets.Value("int32"),
|
13 |
+
}
|
14 |
+
),
|
15 |
+
}
|
16 |
+
)
|
17 |
+
def _info(self):
|
18 |
+
return datasets.DatasetInfo(
|
19 |
+
description=_DESCRIPTION,
|
20 |
+
features=datasets.Features(
|
21 |
+
{
|
22 |
+
"id": datasets.Value("string"),
|
23 |
+
"title": datasets.Value("string"),
|
24 |
+
"context": datasets.Value("string"),
|
25 |
+
"question": datasets.Value("string"),
|
26 |
+
"answers": datasets.features.Sequence(
|
27 |
+
{"text": datasets.Value("string"), "answer_start": datasets.Value("int32"),}
|
28 |
+
),
|
29 |
+
}
|
30 |
+
),
|
31 |
+
# No default supervised_keys (as we have to pass both question
|
32 |
+
# and context as input).
|
33 |
+
supervised_keys=None,
|
34 |
+
homepage="https://rajpurkar.github.io/SQuAD-explorer/",
|
35 |
+
citation=_CITATION,
|
36 |
+
)
|
37 |
+
class SuperGlueConfig(datasets.BuilderConfig):
|
38 |
+
"""BuilderConfig for SuperGLUE."""
|
39 |
+
|
40 |
+
def __init__(self, features, data_url, citation, url, label_classes=("False", "True"), **kwargs):
|
41 |
+
"""BuilderConfig for SuperGLUE.
|
42 |
+
|
43 |
+
Args:
|
44 |
+
features: *list[string]*, list of the features that will appear in the
|
45 |
+
feature dict. Should not include "label".
|
46 |
+
data_url: *string*, url to download the zip file from.
|
47 |
+
citation: *string*, citation for the data set.
|
48 |
+
url: *string*, url for information about the data set.
|
49 |
+
label_classes: *list[string]*, the list of classes for the label if the
|
50 |
+
label is present as a string. Non-string labels will be cast to either
|
51 |
+
'False' or 'True'.
|
52 |
+
**kwargs: keyword arguments forwarded to super.
|
53 |
+
"""
|
54 |
+
# Version history:
|
55 |
+
# 1.0.2: Fixed non-nondeterminism in ReCoRD.
|
56 |
+
# 1.0.1: Change from the pre-release trial version of SuperGLUE (v1.9) to
|
57 |
+
# the full release (v2.0).
|
58 |
+
# 1.0.0: S3 (new shuffling, sharding and slicing mechanism).
|
59 |
+
# 0.0.2: Initial version.
|
60 |
+
super(SuperGlueConfig, self).__init__(version=datasets.Version("1.0.2"), **kwargs)
|
61 |
+
self.features = features
|
62 |
+
self.label_classes = label_classes
|
63 |
+
self.data_url = data_url
|
64 |
+
self.citation = citation
|
65 |
+
self.url = url
|
66 |
+
class SuperGlue(datasets.GeneratorBasedBuilder):
|
67 |
+
"""The SuperGLUE benchmark."""
|
68 |
+
|
69 |
+
BUILDER_CONFIGS = [
|
70 |
+
SuperGlueConfig(
|
71 |
+
name="boolq",
|
72 |
+
description=_BOOLQ_DESCRIPTION,
|
73 |
+
features=["question", "passage"],
|
74 |
+
data_url="https://dl.fbaipublicfiles.com/glue/superglue/data/v2/BoolQ.zip",
|
75 |
+
citation=_BOOLQ_CITATION,
|
76 |
+
url="https://github.com/google-research-datasets/boolean-questions",
|
77 |
+
),
|
78 |
+
...
|
79 |
+
...
|
80 |
+
SuperGlueConfig(
|
81 |
+
name="axg",
|
82 |
+
description=_AXG_DESCRIPTION,
|
83 |
+
features=["premise", "hypothesis"],
|
84 |
+
label_classes=["entailment", "not_entailment"],
|
85 |
+
data_url="https://dl.fbaipublicfiles.com/glue/superglue/data/v2/AX-g.zip",
|
86 |
+
citation=_AXG_CITATION,
|
87 |
+
url="https://github.com/rudinger/winogender-schemas",
|
88 |
+
),
|
89 |
+
from datasets import load_dataset
|
90 |
+
dataset = load_dataset('super_glue', 'boolq')
|
91 |
+
class NewDataset(datasets.GeneratorBasedBuilder):
|
92 |
+
|
93 |
+
VERSION = datasets.Version("1.1.0")
|
94 |
+
|
95 |
+
BUILDER_CONFIGS = [
|
96 |
+
datasets.BuilderConfig(name="first_domain", version=VERSION, description="This part of my dataset covers a first domain"),
|
97 |
+
datasets.BuilderConfig(name="second_domain", version=VERSION, description="This part of my dataset covers a second domain"),
|
98 |
+
]
|
99 |
+
|
100 |
+
DEFAULT_CONFIG_NAME = "first_domain"
|
101 |
+
_URL = "https://rajpurkar.github.io/SQuAD-explorer/dataset/"
|
102 |
+
_URLS = {
|
103 |
+
"train": _URL + "train-v1.1.json",
|
104 |
+
"dev": _URL + "dev-v1.1.json",
|
105 |
+
}
|
106 |
+
def _split_generators(self, dl_manager: datasets.DownloadManager) -> List[datasets.SplitGenerator]:
|
107 |
+
urls_to_download = self._URLS
|
108 |
+
downloaded_files = dl_manager.download_and_extract(urls_to_download)
|
109 |
+
|
110 |
+
return [
|
111 |
+
datasets.SplitGenerator(name=datasets.Split.TRAIN, gen_kwargs={"filepath": downloaded_files["train"]}),
|
112 |
+
datasets.SplitGenerator(name=datasets.Split.VALIDATION, gen_kwargs={"filepath": downloaded_files["dev"]}),
|
113 |
+
]
|
114 |
+
def _generate_examples(self, filepath):
|
115 |
+
"""This function returns the examples in the raw (text) form."""
|
116 |
+
logger.info("generating examples from = %s", filepath)
|
117 |
+
with open(filepath) as f:
|
118 |
+
squad = json.load(f)
|
119 |
+
for article in squad["data"]:
|
120 |
+
title = article.get("title", "").strip()
|
121 |
+
for paragraph in article["paragraphs"]:
|
122 |
+
context = paragraph["context"].strip()
|
123 |
+
for qa in paragraph["qas"]:
|
124 |
+
question = qa["question"].strip()
|
125 |
+
id_ = qa["id"]
|
126 |
+
|
127 |
+
answer_starts = [answer["answer_start"] for answer in qa["answers"]]
|
128 |
+
answers = [answer["text"].strip() for answer in qa["answers"]]
|
129 |
+
|
130 |
+
# Features currently used are "context", "question", and "answers".
|
131 |
+
# Others are extracted here for the ease of future expansions.
|
132 |
+
yield id_, {
|
133 |
+
"title": title,
|
134 |
+
"context": context,
|
135 |
+
"question": question,
|
136 |
+
"id": id_,
|
137 |
+
"answers": {"answer_start": answer_starts, "text": answers,},
|
138 |
+
}
|
139 |
+
datasets-cli test datasets/<your-dataset-folder> --save_infos --all_configs
|
140 |
+
datasets-cli dummy_data datasets/<your-dataset-folder> --auto_generate
|
141 |
+
datasets-cli dummy_data datasets/<your-dataset-folder>
|
142 |
+
|
143 |
+
==============================DUMMY DATA INSTRUCTIONS==============================
|
144 |
+
- In order to create the dummy data for my-dataset, please go into the folder './datasets/my-dataset/dummy/1.1.0' with *cd ./datasets/my-dataset/dummy/1.1.0* .
|
145 |
+
|
146 |
+
- Please create the following dummy data files 'dummy_data/TREC_10.label, dummy_data/train_5500.label' from the folder './datasets/my-dataset/dummy/1.1.0'
|
147 |
+
|
148 |
+
- For each of the splits 'train, test', make sure that one or more of the dummy data files provide at least one example
|
149 |
+
|
150 |
+
- If the method *_generate_examples(...)* includes multiple *open()* statements, you might have to create other files in addition to 'dummy_data/TREC_10.label, dummy_data/train_5500.label'. In this case please refer to the *_generate_examples(...)* method
|
151 |
+
|
152 |
+
- After all dummy data files are created, they should be zipped recursively to 'dummy_data.zip' with the command *zip -r dummy_data.zip dummy_data/*
|
153 |
+
|
154 |
+
- You can now delete the folder 'dummy_data' with the command *rm -r dummy_data*
|
155 |
+
|
156 |
+
- To get the folder 'dummy_data' back for further changes to the dummy data, simply unzip dummy_data.zip with the command *unzip dummy_data.zip*
|
157 |
+
|
158 |
+
- Make sure you have created the file 'dummy_data.zip' in './datasets/my-dataset/dummy/1.1.0'
|
159 |
+
===================================================================================
|
160 |
+
RUN_SLOW=1 pytest tests/test_dataset_common.py::LocalDatasetTest::test_load_real_dataset_<your_dataset_name>
|
161 |
+
RUN_SLOW=1 pytest tests/test_dataset_common.py::LocalDatasetTest::test_load_dataset_all_configs_<your_dataset_name>
|
dataset-train.py
ADDED
@@ -0,0 +1,91 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
my_dataset_repository/
|
2 |
+
├── README.md
|
3 |
+
├── data.csv
|
4 |
+
└── holdout.csv
|
5 |
+
---
|
6 |
+
configs:
|
7 |
+
- config_name: default data_files:
|
8 |
+
- split: train
|
9 |
+
path: "data.csv"
|
10 |
+
- split: test
|
11 |
+
path: "holdout.csv"
|
12 |
+
---
|
13 |
+
my_dataset_repository/
|
14 |
+
├── README.md
|
15 |
+
├── data/
|
16 |
+
│ ├── abc.csv
|
17 |
+
│ └── def.csv
|
18 |
+
└── holdout/
|
19 |
+
└── ghi.csv
|
20 |
+
---
|
21 |
+
configs:
|
22 |
+
- config_name: default
|
23 |
+
data_files:
|
24 |
+
- split: train
|
25 |
+
path:
|
26 |
+
- "data/abc.csv"
|
27 |
+
- "data/def.csv"
|
28 |
+
- split: test
|
29 |
+
path: "holdout/ghi.csv"
|
30 |
+
---
|
31 |
+
configs:
|
32 |
+
- config_name: default
|
33 |
+
data_files:
|
34 |
+
- split: train
|
35 |
+
path: "data/*.csv"
|
36 |
+
- split: test
|
37 |
+
path: "holdout/*.csv"
|
38 |
+
---
|
39 |
+
from datasets import load_dataset
|
40 |
+
|
41 |
+
main_data = load_dataset("my_dataset_repository", "main_data")
|
42 |
+
additional_data = load_dataset("my_dataset_repository", "additional_data")
|
43 |
+
---
|
44 |
+
configs:
|
45 |
+
- config_name: tab
|
46 |
+
data_files: "main_data.csv"
|
47 |
+
sep: "\t"
|
48 |
+
- config_name: comma
|
49 |
+
data_files: "additional_data.csv"
|
50 |
+
sep: ","
|
51 |
+
---
|
52 |
+
configs:
|
53 |
+
- config_name: tab
|
54 |
+
data_files: "main_data.csv"
|
55 |
+
sep: "\t"
|
56 |
+
- config_name: comma
|
57 |
+
data_files: "additional_data.csv"
|
58 |
+
sep: ","
|
59 |
+
---
|
60 |
+
- config_name: main_data
|
61 |
+
data_files: "main_data.csv"
|
62 |
+
default: true
|
63 |
+
my_dataset_repository/
|
64 |
+
├── README.md
|
65 |
+
└── data/
|
66 |
+
├── train-00000-of-00003.csv
|
67 |
+
├── train-00001-of-00003.csv
|
68 |
+
├── train-00002-of-00003.csv
|
69 |
+
├── test-00000-of-00001.csv
|
70 |
+
├── random-00000-of-00003.csv
|
71 |
+
├── random-00001-of-00003.csv
|
72 |
+
└── random-00002-of-00003.csv
|
73 |
+
my_dataset_repository/
|
74 |
+
├── README.md
|
75 |
+
├── train_0.csv
|
76 |
+
├── train_1.csv
|
77 |
+
├── train_2.csv
|
78 |
+
├── train_3.csv
|
79 |
+
├── test_0.csv
|
80 |
+
└── test_1.csv
|
81 |
+
my_dataset_repository/
|
82 |
+
├── README.md
|
83 |
+
└── data/
|
84 |
+
├── train/
|
85 |
+
│ ├── shard_0.csv
|
86 |
+
│ ├── shard_1.csv
|
87 |
+
│ ├── shard_2.csv
|
88 |
+
│ └── shard_3.csv
|
89 |
+
└── test/
|
90 |
+
├── shard_0.csv
|
91 |
+
└── shard_1.csv
|