Datasets:

Languages:
Greek
Multilinguality:
monolingual
Size Categories:
10K<n<100K
Language Creators:
found
Annotations Creators:
found
Source Datasets:
original
ArXiv:
Tags:
License:
mariosasko commited on
Commit
02459b5
1 Parent(s): 82380de

Streaming support

Browse files
Files changed (2) hide show
  1. README.md +0 -0
  2. greek_legal_code.py +29 -16
README.md CHANGED
The diff for this file is too large to render. See raw diff
greek_legal_code.py CHANGED
@@ -19,11 +19,9 @@ a collection of Greek legislative documents classified into multi-level (from br
19
 
20
 
21
  import json
22
- import os
23
 
24
  import datasets
25
 
26
-
27
  _CITATION = """\
28
  @inproceedings{papaloukas-etal-2021-glc,
29
  title = "Multi-granular Legal Topic Classification on Greek Legislation",
@@ -2817,7 +2815,10 @@ class GreekLegalCode(datasets.GeneratorBasedBuilder):
2817
 
2818
  def _info(self):
2819
  features = datasets.Features(
2820
- {"text": datasets.Value("string"), "label": datasets.ClassLabel(names=self.config.label_names)}
 
 
 
2821
  )
2822
  return datasets.DatasetInfo(
2823
  # This is the description that will appear on the datasets page.
@@ -2835,45 +2836,57 @@ class GreekLegalCode(datasets.GeneratorBasedBuilder):
2835
  # Citation for the dataset
2836
  citation=_CITATION,
2837
  # Task template
2838
- task_templates=[datasets.tasks.TextClassification(text_column="text", label_column="label")],
 
 
 
 
2839
  )
2840
 
2841
  def _split_generators(self, dl_manager):
2842
  """Returns SplitGenerators."""
2843
- data_dir = dl_manager.download_and_extract(URL)
2844
  return [
2845
  datasets.SplitGenerator(
2846
  name=datasets.Split.TRAIN,
2847
  # These kwargs will be passed to _generate_examples
2848
  gen_kwargs={
2849
- "filepath": os.path.join(data_dir, "train.jsonl"),
2850
  "split": "train",
2851
  },
2852
  ),
2853
  datasets.SplitGenerator(
2854
  name=datasets.Split.TEST,
2855
  # These kwargs will be passed to _generate_examples
2856
- gen_kwargs={"filepath": os.path.join(data_dir, "test.jsonl"), "split": "test"},
 
 
 
2857
  ),
2858
  datasets.SplitGenerator(
2859
  name=datasets.Split.VALIDATION,
2860
  # These kwargs will be passed to _generate_examples
2861
  gen_kwargs={
2862
- "filepath": os.path.join(data_dir, "dev.jsonl"),
2863
  "split": "dev",
2864
  },
2865
  ),
2866
  ]
2867
 
2868
  def _generate_examples(
2869
- self, filepath, split # method parameters are unpacked from `gen_kwargs` as given in `_split_generators`
 
 
2870
  ):
2871
  """Yields examples as (key, example) tuples."""
2872
 
2873
- with open(filepath, encoding="utf-8") as f:
2874
- for id_, row in enumerate(f):
2875
- data = json.loads(row)
2876
- yield id_, {
2877
- "text": data["text"],
2878
- "label": data[self.config.label_type],
2879
- }
 
 
 
19
 
20
 
21
  import json
 
22
 
23
  import datasets
24
 
 
25
  _CITATION = """\
26
  @inproceedings{papaloukas-etal-2021-glc,
27
  title = "Multi-granular Legal Topic Classification on Greek Legislation",
2815
 
2816
  def _info(self):
2817
  features = datasets.Features(
2818
+ {
2819
+ "text": datasets.Value("string"),
2820
+ "label": datasets.ClassLabel(names=self.config.label_names),
2821
+ }
2822
  )
2823
  return datasets.DatasetInfo(
2824
  # This is the description that will appear on the datasets page.
2836
  # Citation for the dataset
2837
  citation=_CITATION,
2838
  # Task template
2839
+ task_templates=[
2840
+ datasets.tasks.TextClassification(
2841
+ text_column="text", label_column="label"
2842
+ )
2843
+ ],
2844
  )
2845
 
2846
  def _split_generators(self, dl_manager):
2847
  """Returns SplitGenerators."""
2848
+ archive = dl_manager.download(URL)
2849
  return [
2850
  datasets.SplitGenerator(
2851
  name=datasets.Split.TRAIN,
2852
  # These kwargs will be passed to _generate_examples
2853
  gen_kwargs={
2854
+ "archive": dl_manager.iter_archive(archive),
2855
  "split": "train",
2856
  },
2857
  ),
2858
  datasets.SplitGenerator(
2859
  name=datasets.Split.TEST,
2860
  # These kwargs will be passed to _generate_examples
2861
+ gen_kwargs={
2862
+ "archive": dl_manager.iter_archive(archive),
2863
+ "split": "test",
2864
+ },
2865
  ),
2866
  datasets.SplitGenerator(
2867
  name=datasets.Split.VALIDATION,
2868
  # These kwargs will be passed to _generate_examples
2869
  gen_kwargs={
2870
+ "archive": dl_manager.iter_archive(archive),
2871
  "split": "dev",
2872
  },
2873
  ),
2874
  ]
2875
 
2876
  def _generate_examples(
2877
+ self,
2878
+ archive,
2879
+ split, # method parameters are unpacked from `gen_kwargs` as given in `_split_generators`
2880
  ):
2881
  """Yields examples as (key, example) tuples."""
2882
 
2883
+ idx = 0
2884
+ for path, f in archive:
2885
+ if path == split + ".jsonl":
2886
+ for line in f:
2887
+ data = json.loads(line)
2888
+ yield idx, {
2889
+ "text": data["text"],
2890
+ "label": data[self.config.label_type],
2891
+ }
2892
+ idx += 1