jeanpoll commited on
Commit
79b7d5e
1 Parent(s): f4da552

adding new zipped data files

Browse files
Files changed (4) hide show
  1. .gitattributes +2 -0
  2. data/test.zip +3 -0
  3. data/train.zip +3 -0
  4. wikiner_fr.py +56 -59
.gitattributes CHANGED
@@ -17,3 +17,5 @@
17
  data/dataset_dict.json filter=lfs diff=lfs merge=lfs -text
18
  data/test filter=lfs diff=lfs merge=lfs -text
19
  data/train filter=lfs diff=lfs merge=lfs -text
 
 
 
17
  data/dataset_dict.json filter=lfs diff=lfs merge=lfs -text
18
  data/test filter=lfs diff=lfs merge=lfs -text
19
  data/train filter=lfs diff=lfs merge=lfs -text
20
+ data/test.zip filter=lfs diff=lfs merge=lfs -text
21
+ data/train.zip filter=lfs diff=lfs merge=lfs -text
data/test.zip ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:eb7bab57150a9a64ad9a59e60f351bca97805199b9dbd322b37de3fcb4019f51
3
+ size 30521828
data/train.zip ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:2c71f9c61dc301430c33856a12466f0fa1fcd2d0e58fa55467994e3841cac53d
3
+ size 70366252
wikiner_fr.py CHANGED
@@ -12,7 +12,7 @@
12
  # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13
  # See the License for the specific language governing permissions and
14
  # limitations under the License.
15
- """TODO: Add a description here."""
16
 
17
  from __future__ import absolute_import, division, print_function
18
 
@@ -23,25 +23,40 @@ import os
23
  import datasets
24
 
25
 
 
 
 
 
 
 
 
 
 
 
 
26
  # TODO: Add BibTeX citation
27
  # Find for instance the citation on arxiv or on the dataset repo/website
28
  _CITATION = """\
29
  @InProceedings{huggingface:dataset,
30
- title = {A great new dataset},
31
- authors={huggingface, Inc.
32
- },
33
- year={2020}
34
  }
35
  """
36
 
 
 
 
 
 
37
  # TODO: Add description of the dataset here
38
  # You can copy an official description
39
  _DESCRIPTION = """\
40
- This new dataset is designed to solve this great NLP task and is crafted with a lot of care.
41
  """
42
 
43
  # TODO: Add a link to an official homepage for the dataset here
44
- _HOMEPAGE = ""
45
 
46
  # TODO: Add the licence for the dataset here if you can find it
47
  _LICENSE = ""
@@ -50,16 +65,14 @@ _LICENSE = ""
50
  # The HuggingFace dataset library don't host the datasets but only point to the original files
51
  # This can be an arbitrary nested dict/list of URLs (see below in `_split_generators` method)
52
  _URLs = {
53
- 'first_domain': "https://huggingface.co/great-new-dataset-first_domain.zip",
54
- 'second_domain': "https://huggingface.co/great-new-dataset-second_domain.zip",
55
  }
56
 
57
 
58
  # TODO: Name of the dataset usually match the script name with CamelCase instead of snake_case
59
- class NewDataset(datasets.GeneratorBasedBuilder):
60
- """TODO: Short description of my dataset."""
61
 
62
- VERSION = datasets.Version("1.1.0")
63
 
64
  # This is an example of a dataset with multiple configurations.
65
  # If you don't want/need to define several sub-sets in your dataset,
@@ -73,32 +86,24 @@ class NewDataset(datasets.GeneratorBasedBuilder):
73
  # data = datasets.load_dataset('my_dataset', 'first_domain')
74
  # data = datasets.load_dataset('my_dataset', 'second_domain')
75
  BUILDER_CONFIGS = [
76
- datasets.BuilderConfig(name="first_domain", version=VERSION, description="This part of my dataset covers a first domain"),
77
- datasets.BuilderConfig(name="second_domain", version=VERSION, description="This part of my dataset covers a second domain"),
78
  ]
79
 
80
- DEFAULT_CONFIG_NAME = "first_domain" # It's not mandatory to have a default configuration. Just use one if it make sense.
81
 
82
  def _info(self):
83
  # TODO: This method specifies the datasets.DatasetInfo object which contains informations and typings for the dataset
84
- if self.config.name == "first_domain": # This is the name of the configuration selected in BUILDER_CONFIGS above
85
- features = datasets.Features(
86
- {
87
- "sentence": datasets.Value("string"),
88
- "option1": datasets.Value("string"),
89
- "answer": datasets.Value("string")
90
- # These are the features of your dataset like images, labels ...
91
- }
92
- )
93
- else: # This is an example to show how to have different features for "first_domain" and "second_domain"
94
  features = datasets.Features(
95
  {
96
- "sentence": datasets.Value("string"),
97
- "option2": datasets.Value("string"),
98
- "second_domain_answer": datasets.Value("string")
99
- # These are the features of your dataset like images, labels ...
 
100
  }
101
  )
 
102
  return datasets.DatasetInfo(
103
  # This is the description that will appear on the datasets page.
104
  description=_DESCRIPTION,
@@ -107,7 +112,7 @@ class NewDataset(datasets.GeneratorBasedBuilder):
107
  # If there's a common (input, target) tuple from the features,
108
  # specify them here. They'll be used if as_supervised=True in
109
  # builder.as_dataset.
110
- supervised_keys=None,
111
  # Homepage of the dataset for documentation
112
  homepage=_HOMEPAGE,
113
  # License for the dataset if available
@@ -124,14 +129,20 @@ class NewDataset(datasets.GeneratorBasedBuilder):
124
  # dl_manager is a datasets.download.DownloadManager that can be used to download and extract URLs
125
  # It can accept any type or nested list/dict and will give back the same structure with the url replaced with path to local files.
126
  # By default the archives will be extracted and a path to a cached folder where they are extracted is returned instead of the archive
127
- my_urls = _URLs[self.config.name]
128
- data_dir = dl_manager.download_and_extract(my_urls)
 
 
 
 
 
 
129
  return [
130
  datasets.SplitGenerator(
131
  name=datasets.Split.TRAIN,
132
  # These kwargs will be passed to _generate_examples
133
  gen_kwargs={
134
- "filepath": os.path.join(data_dir, "train.jsonl"),
135
  "split": "train",
136
  },
137
  ),
@@ -139,18 +150,10 @@ class NewDataset(datasets.GeneratorBasedBuilder):
139
  name=datasets.Split.TEST,
140
  # These kwargs will be passed to _generate_examples
141
  gen_kwargs={
142
- "filepath": os.path.join(data_dir, "test.jsonl"),
143
  "split": "test"
144
  },
145
- ),
146
- datasets.SplitGenerator(
147
- name=datasets.Split.VALIDATION,
148
- # These kwargs will be passed to _generate_examples
149
- gen_kwargs={
150
- "filepath": os.path.join(data_dir, "dev.jsonl"),
151
- "split": "dev",
152
- },
153
- ),
154
  ]
155
 
156
  def _generate_examples(
@@ -159,19 +162,13 @@ class NewDataset(datasets.GeneratorBasedBuilder):
159
  """ Yields examples as (key, example) tuples. """
160
  # This method handles input defined in _split_generators to yield (key, example) tuples from the dataset.
161
  # The `key` is here for legacy reason (tfds) and is not important in itself.
162
-
163
- with open(filepath, encoding="utf-8") as f:
164
- for id_, row in enumerate(f):
165
- data = json.loads(row)
166
- if self.config.name == "first_domain":
167
- yield id_, {
168
- "sentence": data["sentence"],
169
- "option1": data["option1"],
170
- "answer": "" if split == "test" else data["answer"],
171
- }
172
- else:
173
- yield id_, {
174
- "sentence": data["sentence"],
175
- "option2": data["option2"],
176
- "second_domain_answer": "" if split == "test" else data["second_domain_answer"],
177
- }
 
12
  # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13
  # See the License for the specific language governing permissions and
14
  # limitations under the License.
15
+ """Wikiner dataset for NER in french language """
16
 
17
  from __future__ import absolute_import, division, print_function
18
 
 
23
  import datasets
24
 
25
 
26
+
27
+ _NER_LABEL_NAMES = [
28
+ "O",
29
+ "LOC",
30
+ "PER",
31
+ "MISC",
32
+ "ORG"
33
+ ]
34
+
35
+
36
+
37
  # TODO: Add BibTeX citation
38
  # Find for instance the citation on arxiv or on the dataset repo/website
39
  _CITATION = """\
40
  @InProceedings{huggingface:dataset,
41
+ title = {Wikiner dataset for NER task in french},
42
+ authors={Created by Nothman et al. at 2013},
43
+ year={2013}
 
44
  }
45
  """
46
 
47
+
48
+
49
+
50
+
51
+
52
  # TODO: Add description of the dataset here
53
  # You can copy an official description
54
  _DESCRIPTION = """\
55
+ Dataset can be used to train on NER task for french langugage.
56
  """
57
 
58
  # TODO: Add a link to an official homepage for the dataset here
59
+ _HOMEPAGE = "https://www.sciencedirect.com/science/article/pii/S0004370212000276?via%3Dihub"
60
 
61
  # TODO: Add the licence for the dataset here if you can find it
62
  _LICENSE = ""
 
65
  # The HuggingFace dataset library don't host the datasets but only point to the original files
66
  # This can be an arbitrary nested dict/list of URLs (see below in `_split_generators` method)
67
  _URLs = {
68
+ # "NER": "https://huggingface.co/datasets/Jean-Baptiste/wikiner_fr/tree/main/data"
 
69
  }
70
 
71
 
72
  # TODO: Name of the dataset usually match the script name with CamelCase instead of snake_case
73
+ class WikinerFr(datasets.GeneratorBasedBuilder):
 
74
 
75
+ VERSION = datasets.Version("1.0.0")
76
 
77
  # This is an example of a dataset with multiple configurations.
78
  # If you don't want/need to define several sub-sets in your dataset,
 
86
  # data = datasets.load_dataset('my_dataset', 'first_domain')
87
  # data = datasets.load_dataset('my_dataset', 'second_domain')
88
  BUILDER_CONFIGS = [
89
+ datasets.BuilderConfig(name="NER", version="0.0.1", description="Dataset for entity recognition")
 
90
  ]
91
 
92
+ DEFAULT_CONFIG_NAME = "NER" # It's not mandatory to have a default configuration. Just use one if it make sense.
93
 
94
  def _info(self):
95
  # TODO: This method specifies the datasets.DatasetInfo object which contains informations and typings for the dataset
96
+ if self.config.name == "NER": # This is the name of the configuration selected in BUILDER_CONFIGS above
 
 
 
 
 
 
 
 
 
97
  features = datasets.Features(
98
  {
99
+ "id": datasets.Sequence(feature=datasets.Value("int32")),
100
+ "ner_tags": datasets.Sequence(
101
+ feature=datasets.ClassLabel(num_classes=len(_NER_LABEL_NAMES), names=_NER_LABEL_NAMES)
102
+ ),
103
+ "tokens": datasets.Sequence(feature=datasets.Value("string")),
104
  }
105
  )
106
+
107
  return datasets.DatasetInfo(
108
  # This is the description that will appear on the datasets page.
109
  description=_DESCRIPTION,
 
112
  # If there's a common (input, target) tuple from the features,
113
  # specify them here. They'll be used if as_supervised=True in
114
  # builder.as_dataset.
115
+ supervised_keys=("id", "ner_tags"),
116
  # Homepage of the dataset for documentation
117
  homepage=_HOMEPAGE,
118
  # License for the dataset if available
 
129
  # dl_manager is a datasets.download.DownloadManager that can be used to download and extract URLs
130
  # It can accept any type or nested list/dict and will give back the same structure with the url replaced with path to local files.
131
  # By default the archives will be extracted and a path to a cached folder where they are extracted is returned instead of the archive
132
+
133
+ if self.config.data_dir:
134
+ data_dir = self.config.data_dir
135
+ else:
136
+ my_urls = _URLs[self.config.name]
137
+ data_dir = dl_manager.download_and_extract(my_urls)
138
+
139
+
140
  return [
141
  datasets.SplitGenerator(
142
  name=datasets.Split.TRAIN,
143
  # These kwargs will be passed to _generate_examples
144
  gen_kwargs={
145
+ "filepath": os.path.join(data_dir, "train.zip"),
146
  "split": "train",
147
  },
148
  ),
 
150
  name=datasets.Split.TEST,
151
  # These kwargs will be passed to _generate_examples
152
  gen_kwargs={
153
+ "filepath": os.path.join(data_dir, "test.zip"),
154
  "split": "test"
155
  },
156
+ )
 
 
 
 
 
 
 
 
157
  ]
158
 
159
  def _generate_examples(
 
162
  """ Yields examples as (key, example) tuples. """
163
  # This method handles input defined in _split_generators to yield (key, example) tuples from the dataset.
164
  # The `key` is here for legacy reason (tfds) and is not important in itself.
165
+ print(filepath)
166
+ # with open(filepath, encoding="utf-8") as f:
167
+ # for id_, row in enumerate(f):
168
+ # data = json.loads(row)
169
+ # if self.config.name == "NER":
170
+ # yield id_, {
171
+ # "sentence": data["sentence"],
172
+ # "option1": data["option1"],
173
+ # "answer": "" if split == "test" else data["answer"],
174
+ # }