Quoron commited on
Commit
a411ac5
·
verified ·
1 Parent(s): 047aac9

Delete loading script

Browse files
Files changed (1) hide show
  1. EEG-semantic-text-relevance.py +0 -157
EEG-semantic-text-relevance.py DELETED
@@ -1,157 +0,0 @@
1
- import datasets
2
- import numpy as np
3
- import pandas as pd
4
-
5
- # TODO: Add BibTeX citation
6
- # Find for instance the citation on arxiv or on the dataset repo/website
7
- _CITATION = """\
8
- @InProceedings{Submitted to ICLR 2025,
9
- title = {An EEG dataset of word-level brain responses for
10
- semantic text relevance},
11
- author={},
12
- year={2024}
13
- }
14
- """
15
-
16
- # You can copy an official description
17
- _DESCRIPTION = """\
18
- A dataset containing 23,270 time-locked (0.7s) word-level EEG
19
- recordings acquired from participants who read both text that was
20
- semantically relevant and irrelevant to self-selected topics.
21
- """
22
-
23
- _HOMEPAGE = "https://anonymous.4open.science/r/EEG-semantic-text-relevance-651D"
24
-
25
- _LICENSE = "apache-2.0"
26
-
27
- _URLS = {
28
- "data": {
29
- "eeg": "./data/cleanedEEG.npy",
30
- "metadata": "./data/metadataForCleanedEEG.pkl"
31
- }
32
- }
33
-
34
-
35
- class EEGSemanticTextRelevance(datasets.GeneratorBasedBuilder):
36
- """
37
- A dataset containing 23,270 time-locked (0.7s) word-level EEG
38
- recordings acquired from 15 participants who read both text that was
39
- semantically relevant and irrelevant to self-selected topics."""
40
-
41
- VERSION = datasets.Version("1.1.0")
42
-
43
- # This is an example of a dataset with multiple configurations.
44
- # If you don't want/need to define several sub-sets in your dataset,
45
- # just remove the BUILDER_CONFIG_CLASS and the BUILDER_CONFIGS attributes.
46
-
47
- # If you need to make complex sub-parts in the datasets with configurable options
48
- # You can create your own builder configuration class to store attribute, inheriting from datasets.BuilderConfig
49
- # BUILDER_CONFIG_CLASS = MyBuilderConfig
50
-
51
- # You will be able to load one or the other configurations in the following list with
52
- # data = datasets.load_dataset('my_dataset', 'first_domain')
53
- # data = datasets.load_dataset('my_dataset', 'second_domain')
54
- BUILDER_CONFIGS = [
55
- datasets.BuilderConfig(name="data", version=VERSION,
56
- description="Load the preprocessed (data) EEG data"),
57
- ]
58
-
59
- DEFAULT_CONFIG_NAME = "data" # It's not mandatory to have a default configuration. Just use one if it make sense.
60
-
61
- def _info(self):
62
- # TODO: This method specifies the datasets.DatasetInfo object which contains informations and typings for the dataset
63
- if self.config.name == "data": # This is the name of the configuration selected in BUILDER_CONFIGS above
64
- features = datasets.Features(
65
- {
66
- "event": datasets.Value("int64"),
67
- "word": datasets.Value("string"),
68
- "topic": datasets.Value("string"),
69
- "selected_topic": datasets.Value("string"),
70
- "semantic_relevance": datasets.Value("int64"),
71
- "interestingness": datasets.Value("int64"),
72
- "pre-knowledge": datasets.Value("int64"),
73
- "sentence_number": datasets.Value("int64"),
74
- "participant": datasets.Value("string"),
75
- "eeg": datasets.Array2D(shape=(32, 2001), dtype="float64"),
76
- # These are the features of your dataset like images, labels ...
77
- }
78
- )
79
- else: # This is an example to show how to have different features for "first_domain" and "second_domain"
80
- raise ValueError("Not implemented.")
81
-
82
- return datasets.DatasetInfo(
83
- # This is the description that will appear on the datasets page.
84
- description=_DESCRIPTION,
85
- # This defines the different columns of the dataset and their types
86
- features=features,
87
- # Here we define them above because they are different between the two configurations
88
- # If there's a common (input, target) tuple from the features, uncomment supervised_keys line below and
89
- # specify them. They'll be used if as_supervised=True in builder.as_dataset.
90
- # supervised_keys=("sentence", "label"),
91
- # Homepage of the dataset for documentation
92
- homepage=_HOMEPAGE,
93
- # License for the dataset if available
94
- license=_LICENSE,
95
- # Citation for the dataset
96
- citation=_CITATION,
97
- )
98
-
99
- def _split_generators(self, dl_manager):
100
- # TODO: This method is tasked with downloading/extracting the data and defining the splits depending on the configuration
101
- # If several configurations are possible (listed in BUILDER_CONFIGS), the configuration selected by the user is in self.config.name
102
-
103
- # dl_manager is a datasets.download.DownloadManager that can be used to download and extract URLS
104
- # It can accept any type or nested list/dict and will give back the same structure with the url replaced with path to local files.
105
- # By default the archives will be extracted and a path to a cached folder where they are extracted is returned instead of the archive
106
- urls = _URLS[self.config.name]
107
- # data_dir = dl_manager.download_and_extract(urls)
108
- return [
109
- datasets.SplitGenerator(
110
- name=datasets.Split.TRAIN,
111
- # These kwargs will be passed to _generate_examples
112
- gen_kwargs={
113
- "filepath_eeg": urls["eeg"],
114
- "filepath_metadata": urls["metadata"],
115
- },
116
- ),
117
- # datasets.SplitGenerator(
118
- # name=datasets.Split.VALIDATION,
119
- # # These kwargs will be passed to _generate_examples
120
- # gen_kwargs={
121
- # "filepath": os.path.join(data_dir, "dev.jsonl"),
122
- # "split": "dev",
123
- # },
124
- # ),
125
- # datasets.SplitGenerator(
126
- # name=datasets.Split.TEST,
127
- # # These kwargs will be passed to _generate_examples
128
- # gen_kwargs={
129
- # "filepath": os.path.join(data_dir, "test.jsonl"),
130
- # "split": "test"
131
- # },
132
- # ),
133
- ]
134
-
135
- # method parameters are unpacked from `gen_kwargs` as given in `_split_generators`
136
- def _generate_examples(self, filepath_eeg, filepath_metadata):
137
- # TODO: This method handles input defined in _split_generators to yield (key, example) tuples from the dataset.
138
- # The `key` is for legacy reasons (tfds) and is not important in itself, but must be unique for each example.
139
- eeg_data = np.load(filepath_eeg)
140
- metadata = pd.read_pickle(filepath_metadata)
141
- for key, row in metadata.iterrows():
142
- if self.config.name == "data":
143
- # Yields examples as (key, example) tuples
144
- yield key, {
145
- "event": row["event"],
146
- "word": row["word"],
147
- "topic": row["topic"],
148
- "selected_topic": row["selected_topic"],
149
- "semantic_relevance": row["semantic_relevance"],
150
- "interestingness": row["interestingness"],
151
- "pre-knowledge": row["pre-knowledge"],
152
- "sentence_number": row["sentence_number"],
153
- "participant": row["participant"],
154
- "eeg": eeg_data[key],
155
- }
156
- else:
157
- raise ValueError("Not implemented.")