system HF staff commited on
Commit
cd83ae2
1 Parent(s): ab8179b

Update files from the datasets library (from 1.16.0)

Browse files

Release notes: https://github.com/huggingface/datasets/releases/tag/1.16.0

Files changed (3) hide show
  1. README.md +1 -0
  2. dummy/1.0.1/dummy_data.zip +2 -2
  3. norec.py +39 -36
README.md CHANGED
@@ -1,4 +1,5 @@
1
  ---
 
2
  annotations_creators:
3
  - expert-generated
4
  language_creators:
1
  ---
2
+ pretty_name: NoReC
3
  annotations_creators:
4
  - expert-generated
5
  language_creators:
dummy/1.0.1/dummy_data.zip CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:29102914e9e34179e41feca385b9af8f7feebceaa9722482c80a43c4eaa37c5b
3
- size 19217
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:566085645c954b68377e7a79ce9ea694cbca4aefee955600632f160a82f658f6
3
+ size 19489
norec.py CHANGED
@@ -13,9 +13,7 @@
13
  # See the License for the specific language governing permissions and
14
  # limitations under the License.
15
 
16
- # Lint as: python3
17
- import glob
18
- import os
19
 
20
  import conllu
21
 
@@ -41,9 +39,6 @@ NoReC was created as part of the SANT project (Sentiment Analysis for Norwegian
41
  """
42
 
43
  _URL = "https://www.mn.uio.no/ifi/english/research/projects/sant/data/norec/norec-1.0.1.tar.gz"
44
- _TRAIN = "conllu/train"
45
- _DEV = "conllu/dev"
46
- _TEST = "conllu/test"
47
 
48
 
49
  class Norec(datasets.GeneratorBasedBuilder):
@@ -95,52 +90,60 @@ class Norec(datasets.GeneratorBasedBuilder):
95
  )
96
 
97
  def _split_generators(self, dl_manager):
98
- path = dl_manager.download_and_extract(_URL)
99
- sub_path = os.path.join(path, "norec", "conllu.tar.gz")
100
- conllu_path = dl_manager.extract(sub_path)
101
  return [
102
  datasets.SplitGenerator(
103
  name=datasets.Split.TRAIN,
104
  gen_kwargs={
105
- "datapath": os.path.join(conllu_path, "conllu", "train"),
106
- "path": path,
 
107
  },
108
  ),
109
  datasets.SplitGenerator(
110
  name=datasets.Split.VALIDATION,
111
  gen_kwargs={
112
- "datapath": os.path.join(conllu_path, "conllu", "dev"),
113
- "path": path,
 
114
  },
115
  ),
116
  datasets.SplitGenerator(
117
  name=datasets.Split.TEST,
118
  gen_kwargs={
119
- "datapath": os.path.join(conllu_path, "conllu", "test"),
120
- "path": path,
 
121
  },
122
  ),
123
  ]
124
 
125
- def _generate_examples(self, datapath, path):
126
- conllu_files = sorted(glob.glob(os.path.join(datapath, "*.conllu")))
127
  counter = 0
128
- for cf in conllu_files:
129
- with open(cf, "r", encoding="utf-8") as data_file:
130
- tokenlist = list(conllu.parse_incr(data_file))
131
- for sent in tokenlist:
132
- res = {
133
- "idx": sent.metadata["sent_id"],
134
- "text": sent.metadata["text"],
135
- "tokens": [str(token["form"]) for token in sent],
136
- "lemmas": [str(token["lemma"]) for token in sent],
137
- "pos_tags": [str(token["upostag"]) for token in sent],
138
- "xpos_tags": [str(token["xpostag"]) for token in sent],
139
- "feats": [str(token["feats"]) for token in sent],
140
- "head": [str(token["head"]) for token in sent],
141
- "deprel": [str(token["deprel"]) for token in sent],
142
- "deps": [str(token["deps"]) for token in sent],
143
- "misc": [str(token["misc"]) for token in sent],
144
- }
145
- yield counter, res
146
- counter += 1
 
 
 
 
 
 
 
13
  # See the License for the specific language governing permissions and
14
  # limitations under the License.
15
 
16
+ import tarfile
 
 
17
 
18
  import conllu
19
 
39
  """
40
 
41
  _URL = "https://www.mn.uio.no/ifi/english/research/projects/sant/data/norec/norec-1.0.1.tar.gz"
 
 
 
42
 
43
 
44
  class Norec(datasets.GeneratorBasedBuilder):
90
  )
91
 
92
  def _split_generators(self, dl_manager):
93
+ archive = dl_manager.download(_URL)
94
+ subarchive_path = "norec/conllu.tar.gz"
 
95
  return [
96
  datasets.SplitGenerator(
97
  name=datasets.Split.TRAIN,
98
  gen_kwargs={
99
+ "data_dir": "conllu/train",
100
+ "subarchive_path": subarchive_path,
101
+ "files": dl_manager.iter_archive(archive),
102
  },
103
  ),
104
  datasets.SplitGenerator(
105
  name=datasets.Split.VALIDATION,
106
  gen_kwargs={
107
+ "data_dir": "conllu/dev",
108
+ "subarchive_path": subarchive_path,
109
+ "files": dl_manager.iter_archive(archive),
110
  },
111
  ),
112
  datasets.SplitGenerator(
113
  name=datasets.Split.TEST,
114
  gen_kwargs={
115
+ "data_dir": "conllu/test",
116
+ "subarchive_path": subarchive_path,
117
+ "files": dl_manager.iter_archive(archive),
118
  },
119
  ),
120
  ]
121
 
122
+ def _generate_examples(self, data_dir, subarchive_path, files):
 
123
  counter = 0
124
+ for path, f in files:
125
+ if path == subarchive_path:
126
+ stream = tarfile.open(fileobj=f, mode="r|*")
127
+ for tarinfo in stream:
128
+ file_path = tarinfo.name
129
+ if file_path.startswith(data_dir) and file_path.endswith(".conllu"):
130
+ data = stream.extractfile(tarinfo).read().decode("utf-8")
131
+ for sent in conllu.parse(data):
132
+ res = {
133
+ "idx": sent.metadata["sent_id"],
134
+ "text": sent.metadata["text"],
135
+ "tokens": [str(token["form"]) for token in sent],
136
+ "lemmas": [str(token["lemma"]) for token in sent],
137
+ "pos_tags": [str(token["upostag"]) for token in sent],
138
+ "xpos_tags": [str(token["xpostag"]) for token in sent],
139
+ "feats": [str(token["feats"]) for token in sent],
140
+ "head": [str(token["head"]) for token in sent],
141
+ "deprel": [str(token["deprel"]) for token in sent],
142
+ "deps": [str(token["deps"]) for token in sent],
143
+ "misc": [str(token["misc"]) for token in sent],
144
+ }
145
+ yield counter, res
146
+ counter += 1
147
+ stream.members = []
148
+ del stream
149
+ break