zpn commited on
Commit
0c000f7
1 Parent(s): 63e7ac3

train_test_split (#1)

Browse files

- feat: dataloading script (d2a5d425462eb420f1e663e9443cba35df1dd36b)

Files changed (2) hide show
  1. GRCh38.py +85 -0
  2. README.md +16 -0
GRCh38.py ADDED
@@ -0,0 +1,85 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright 2020 The HuggingFace Datasets Authors and the current dataset script contributor.
2
+ #
3
+ # Licensed under the Apache License, Version 2.0 (the "License");
4
+ # you may not use this file except in compliance with the License.
5
+ # You may obtain a copy of the License at
6
+ #
7
+ # http://www.apache.org/licenses/LICENSE-2.0
8
+ #
9
+ # Unless required by applicable law or agreed to in writing, software
10
+ # distributed under the License is distributed on an "AS IS" BASIS,
11
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12
+ # See the License for the specific language governing permissions and
13
+ # limitations under the License.
14
+
15
+ import json
16
+ import random
17
+
18
+ import datasets
19
+
20
+ # You can copy an official description
21
+ _DESCRIPTION = """\
22
+ A dataset of all autosomal and sex chromosomes sequences from reference assembly GRCh38/hg38 1 and reached a total of 3.2 billion nucleotides.
23
+ """
24
+
25
+ _HOMEPAGE = "https://www.ncbi.nlm.nih.gov/assembly/GCF_000001405.26"
26
+
27
+ FILES = ["intervals.jsonl"]
28
+
29
+ class PubchemSelfies(datasets.GeneratorBasedBuilder):
30
+ """A dataset of all autosomal and sex chromosomes sequences from reference assembly GRCh38/hg38 and reached a total of 3.2 billion nucleotides."""
31
+
32
+ VERSION = datasets.Version("1.1.0")
33
+
34
+ # You will be able to load one or the other configurations in the following list with
35
+ BUILDER_CONFIG = datasets.BuilderConfig(
36
+ version=VERSION, description="A dataset of all autosomal and sex chromosomes sequences from reference assembly GRCh38/hg38 and reached a total of 3.2 billion nucleotides."
37
+ )
38
+
39
+ def _info(self):
40
+ return datasets.DatasetInfo(
41
+ # This is the description that will appear on the datasets page.
42
+ description=_DESCRIPTION,
43
+ # This defines the different columns of the dataset and their types
44
+ features=datasets.Features(
45
+ {
46
+ "chr": datasets.Value("string"),
47
+ "description": datasets.Value("string"),
48
+ "seq": datasets.Value("string"),
49
+ "split": datasets.Value("string"),
50
+ }
51
+ ),
52
+ # Homepage of the dataset for documentation
53
+ homepage=_HOMEPAGE,
54
+ )
55
+
56
+ def _split_generators(self, dl_manager):
57
+ downloaded_files = dl_manager.download(FILES)
58
+
59
+ return [
60
+ datasets.SplitGenerator(
61
+ name=datasets.Split.TRAIN,
62
+ # These kwargs will be passed to _generate_examples
63
+ gen_kwargs={
64
+ "filename": downloaded_files[0]
65
+ },
66
+ ),
67
+ ]
68
+
69
+ # method parameters are unpacked from `gen_kwargs` as given in `_split_generators`
70
+ def _generate_examples(self, filename):
71
+ # The `key` is for legacy reasons (tfds) and is not important in itself, but must be unique for each example.
72
+ with open(filename) as jsonl_file:
73
+ for row, line in enumerate(jsonl_file):
74
+ data = json.loads(line)
75
+
76
+ # 5% of the time the data is validation so we set the split accordingly
77
+ # This is kind of a hacky but it's so we can load in streaming
78
+ split = "valid" if random.random() < 0.05 else "train"
79
+
80
+ yield row, {
81
+ "chr": data["chr"],
82
+ "description": data["description"],
83
+ "seq": data["seq"],
84
+ "split": split,
85
+ }
README.md CHANGED
@@ -1,3 +1,19 @@
1
  ---
2
  license: mit
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
3
  ---
 
1
  ---
2
  license: mit
3
+ dataset_info:
4
+ features:
5
+ - name: chr
6
+ dtype: string
7
+ - name: description
8
+ dtype: string
9
+ - name: seq
10
+ dtype: string
11
+ - name: split
12
+ dtype: string
13
+ splits:
14
+ - name: train
15
+ num_bytes: 3158692879
16
+ num_examples: 510445
17
+ download_size: 3166859999
18
+ dataset_size: 3158692879
19
  ---