Quoron commited on
Commit
3ae01c7
1 Parent(s): 381a617

Convert dataset to Parquet (#1)

Browse files

- Convert dataset to Parquet (047aac91755e7d47bdff9c19640156d471ce5872)
- Delete data file (ae1caf1d84c4fcd851142dfa0baedeb34e157697)
- Delete loading script (a411ac5c1d64ee7be5812e5c02d78833c96d2335)
- Delete data file (dafe707d0037d9b2b96fdbf85e8abf36db243092)

EEG-semantic-text-relevance.py DELETED
@@ -1,157 +0,0 @@
1
- import datasets
2
- import numpy as np
3
- import pandas as pd
4
-
5
- # TODO: Add BibTeX citation
6
- # Find for instance the citation on arxiv or on the dataset repo/website
7
- _CITATION = """\
8
- @InProceedings{Submitted to ICLR 2025,
9
- title = {An EEG dataset of word-level brain responses for
10
- semantic text relevance},
11
- author={},
12
- year={2024}
13
- }
14
- """
15
-
16
- # You can copy an official description
17
- _DESCRIPTION = """\
18
- A dataset containing 23,270 time-locked (0.7s) word-level EEG
19
- recordings acquired from participants who read both text that was
20
- semantically relevant and irrelevant to self-selected topics.
21
- """
22
-
23
- _HOMEPAGE = "https://anonymous.4open.science/r/EEG-semantic-text-relevance-651D"
24
-
25
- _LICENSE = "apache-2.0"
26
-
27
- _URLS = {
28
- "data": {
29
- "eeg": "./data/cleanedEEG.npy",
30
- "metadata": "./data/metadataForCleanedEEG.pkl"
31
- }
32
- }
33
-
34
-
35
- class EEGSemanticTextRelevance(datasets.GeneratorBasedBuilder):
36
- """
37
- A dataset containing 23,270 time-locked (0.7s) word-level EEG
38
- recordings acquired from 15 participants who read both text that was
39
- semantically relevant and irrelevant to self-selected topics."""
40
-
41
- VERSION = datasets.Version("1.1.0")
42
-
43
- # This is an example of a dataset with multiple configurations.
44
- # If you don't want/need to define several sub-sets in your dataset,
45
- # just remove the BUILDER_CONFIG_CLASS and the BUILDER_CONFIGS attributes.
46
-
47
- # If you need to make complex sub-parts in the datasets with configurable options
48
- # You can create your own builder configuration class to store attribute, inheriting from datasets.BuilderConfig
49
- # BUILDER_CONFIG_CLASS = MyBuilderConfig
50
-
51
- # You will be able to load one or the other configurations in the following list with
52
- # data = datasets.load_dataset('my_dataset', 'first_domain')
53
- # data = datasets.load_dataset('my_dataset', 'second_domain')
54
- BUILDER_CONFIGS = [
55
- datasets.BuilderConfig(name="data", version=VERSION,
56
- description="Load the preprocessed (data) EEG data"),
57
- ]
58
-
59
- DEFAULT_CONFIG_NAME = "data" # It's not mandatory to have a default configuration. Just use one if it make sense.
60
-
61
- def _info(self):
62
- # TODO: This method specifies the datasets.DatasetInfo object which contains informations and typings for the dataset
63
- if self.config.name == "data": # This is the name of the configuration selected in BUILDER_CONFIGS above
64
- features = datasets.Features(
65
- {
66
- "event": datasets.Value("int64"),
67
- "word": datasets.Value("string"),
68
- "topic": datasets.Value("string"),
69
- "selected_topic": datasets.Value("string"),
70
- "semantic_relevance": datasets.Value("int64"),
71
- "interestingness": datasets.Value("int64"),
72
- "pre-knowledge": datasets.Value("int64"),
73
- "sentence_number": datasets.Value("int64"),
74
- "participant": datasets.Value("string"),
75
- "eeg": datasets.Array2D(shape=(32, 2001), dtype="float64"),
76
- # These are the features of your dataset like images, labels ...
77
- }
78
- )
79
- else: # This is an example to show how to have different features for "first_domain" and "second_domain"
80
- raise ValueError("Not implemented.")
81
-
82
- return datasets.DatasetInfo(
83
- # This is the description that will appear on the datasets page.
84
- description=_DESCRIPTION,
85
- # This defines the different columns of the dataset and their types
86
- features=features,
87
- # Here we define them above because they are different between the two configurations
88
- # If there's a common (input, target) tuple from the features, uncomment supervised_keys line below and
89
- # specify them. They'll be used if as_supervised=True in builder.as_dataset.
90
- # supervised_keys=("sentence", "label"),
91
- # Homepage of the dataset for documentation
92
- homepage=_HOMEPAGE,
93
- # License for the dataset if available
94
- license=_LICENSE,
95
- # Citation for the dataset
96
- citation=_CITATION,
97
- )
98
-
99
- def _split_generators(self, dl_manager):
100
- # TODO: This method is tasked with downloading/extracting the data and defining the splits depending on the configuration
101
- # If several configurations are possible (listed in BUILDER_CONFIGS), the configuration selected by the user is in self.config.name
102
-
103
- # dl_manager is a datasets.download.DownloadManager that can be used to download and extract URLS
104
- # It can accept any type or nested list/dict and will give back the same structure with the url replaced with path to local files.
105
- # By default the archives will be extracted and a path to a cached folder where they are extracted is returned instead of the archive
106
- urls = _URLS[self.config.name]
107
- # data_dir = dl_manager.download_and_extract(urls)
108
- return [
109
- datasets.SplitGenerator(
110
- name=datasets.Split.TRAIN,
111
- # These kwargs will be passed to _generate_examples
112
- gen_kwargs={
113
- "filepath_eeg": urls["eeg"],
114
- "filepath_metadata": urls["metadata"],
115
- },
116
- ),
117
- # datasets.SplitGenerator(
118
- # name=datasets.Split.VALIDATION,
119
- # # These kwargs will be passed to _generate_examples
120
- # gen_kwargs={
121
- # "filepath": os.path.join(data_dir, "dev.jsonl"),
122
- # "split": "dev",
123
- # },
124
- # ),
125
- # datasets.SplitGenerator(
126
- # name=datasets.Split.TEST,
127
- # # These kwargs will be passed to _generate_examples
128
- # gen_kwargs={
129
- # "filepath": os.path.join(data_dir, "test.jsonl"),
130
- # "split": "test"
131
- # },
132
- # ),
133
- ]
134
-
135
- # method parameters are unpacked from `gen_kwargs` as given in `_split_generators`
136
- def _generate_examples(self, filepath_eeg, filepath_metadata):
137
- # TODO: This method handles input defined in _split_generators to yield (key, example) tuples from the dataset.
138
- # The `key` is for legacy reasons (tfds) and is not important in itself, but must be unique for each example.
139
- eeg_data = np.load(filepath_eeg)
140
- metadata = pd.read_pickle(filepath_metadata)
141
- for key, row in metadata.iterrows():
142
- if self.config.name == "data":
143
- # Yields examples as (key, example) tuples
144
- yield key, {
145
- "event": row["event"],
146
- "word": row["word"],
147
- "topic": row["topic"],
148
- "selected_topic": row["selected_topic"],
149
- "semantic_relevance": row["semantic_relevance"],
150
- "interestingness": row["interestingness"],
151
- "pre-knowledge": row["pre-knowledge"],
152
- "sentence_number": row["sentence_number"],
153
- "participant": row["participant"],
154
- "eeg": eeg_data[key],
155
- }
156
- else:
157
- raise ValueError("Not implemented.")
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
README.md CHANGED
@@ -1,3 +1,43 @@
1
  ---
2
  license: apache-2.0
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
3
  ---
 
1
  ---
2
  license: apache-2.0
3
+ dataset_info:
4
+ config_name: data
5
+ features:
6
+ - name: event
7
+ dtype: int64
8
+ - name: word
9
+ dtype: string
10
+ - name: topic
11
+ dtype: string
12
+ - name: selected_topic
13
+ dtype: string
14
+ - name: semantic_relevance
15
+ dtype: int64
16
+ - name: interestingness
17
+ dtype: int64
18
+ - name: pre-knowledge
19
+ dtype: int64
20
+ - name: sentence_number
21
+ dtype: int64
22
+ - name: participant
23
+ dtype: string
24
+ - name: eeg
25
+ dtype:
26
+ array2_d:
27
+ shape:
28
+ - 32
29
+ - 2001
30
+ dtype: float64
31
+ splits:
32
+ - name: train
33
+ num_bytes: 11925180913
34
+ num_examples: 23270
35
+ download_size: 11927979870
36
+ dataset_size: 11925180913
37
+ configs:
38
+ - config_name: data
39
+ data_files:
40
+ - split: train
41
+ path: data/train-*
42
+ default: true
43
  ---
data/{metadataForCleanedEEG.pkl → train-00000-of-00024.parquet} RENAMED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:26d563cd715c89fc925a4932bf1cf22af34cbe657921d637db647ec39c414aff
3
- size 1818442
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:213df2d4738344c52eacc5124a6893117f6dbdc18f49b98ccfb947d39e5a9b38
3
+ size 497284247
data/{cleanedEEG.npy → train-00001-of-00024.parquet} RENAMED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:9411041a08732f2f479930fbd583378bcc0a5601694cfce898a1c28348f02302
3
- size 11920197248
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:f76efe05356f79c8e0bae1a630a7e2f3d7333f27f149b249ea6bbb9d59be49db
3
+ size 497284013
data/train-00002-of-00024.parquet ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:15092e1fecc1c189d3a25bd38444617cd303ba09bcd1007811c018d536c3e074
3
+ size 497060154
data/train-00003-of-00024.parquet ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:7a135b6e47a58f65e0f01399a260ae94dbf1840dd9d621d29c8ff62355a81135
3
+ size 496996991
data/train-00004-of-00024.parquet ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:2bf9eccd579aff0491ca73aa0a48f4261fd7265da68176db059be28896649318
3
+ size 497284491
data/train-00005-of-00024.parquet ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:c00c4347617d3d24e55020d7df5c884a863430c81e382efa2d9c925168519f52
3
+ size 497284596
data/train-00006-of-00024.parquet ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:a2663678bb16533c78750ded4670fd95674586d17c6573151b88e51ea160feae
3
+ size 497284105
data/train-00007-of-00024.parquet ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:ecf0f304e8d7bb7f219805b3bd0f39acf4d6c3e7b0d8aa2aca6d48db3995912a
3
+ size 497285071
data/train-00008-of-00024.parquet ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:68616833fe04608761cbd037c613c36cdd6c3b0cfa662080824baf043e8c77b0
3
+ size 497284759
data/train-00009-of-00024.parquet ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:b2d9a8720380e478ec48988d3126bde4a46d586b2c81f3794f514f3d39fee65e
3
+ size 496921032
data/train-00010-of-00024.parquet ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:865b41d2af4ad3b1035bbeaad9e93fc71317dd9a09b237812b4cc6caaa0ccc73
3
+ size 497284487
data/train-00011-of-00024.parquet ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:6b45ee1af824dd8fddc4c09eee809ea3597edf6116ff99f6a3657885eec492d0
3
+ size 497284166
data/train-00012-of-00024.parquet ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:64803778e84cb1f9e9d191ba8fbbb8e0c5f83921320e2d95c2be66d8d86c07ad
3
+ size 497284376
data/train-00013-of-00024.parquet ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:d6cdad7aef9a38995cd5114df9be8621fa3ed0bf39efa5207f3e3b0177178b2c
3
+ size 497284767
data/train-00014-of-00024.parquet ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:354f58e62ce4ed9f9dc7ad06f7cf61116ef17625377924ac6ef406fe63a3c86b
3
+ size 496530194
data/train-00015-of-00024.parquet ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:498536e72711a22df325b82fcbcc46eb6dfffa8b179634791e3a0a95e09beb18
3
+ size 496772299
data/train-00016-of-00024.parquet ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:41a6e02613a5e849f45f03672d42f274324d9a175436a220d6e02e05a2f00442
3
+ size 496772533
data/train-00017-of-00024.parquet ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:408f44909b52112f06cf2d1f680a81304b4c519b8d34f165ed7998ca5d5d546f
3
+ size 496772528
data/train-00018-of-00024.parquet ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:85773ee1f7a35763ca2b14c3daf3919df6ec2c0b72422671f01da71a8a7102cc
3
+ size 496772412
data/train-00019-of-00024.parquet ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:77ab3e557844cb21cc188bfee22cc4a83cfabddfca263874d4882eb6d3084b30
3
+ size 496772135
data/train-00020-of-00024.parquet ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:34701c69f4ce6e851d185fedf36384637e49b4a2f72bd4d4901147a20bfb8a7d
3
+ size 496772395
data/train-00021-of-00024.parquet ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:f554717b2acbae742e637f093b8e1fd53d9502aae36592704d0625e134f96d4a
3
+ size 496772307
data/train-00022-of-00024.parquet ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:e2d3a40916bfbcbd168861b854a61e8cc5d440d8996b069cb0ff4272295448e4
3
+ size 496163531
data/train-00023-of-00024.parquet ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:9c0d81462a0bb71b92af48a41b02af9e2341306a7505722300f7dca5bcbd875c
3
+ size 496772281