Datasets:

Languages:
Hebrew
Multilinguality:
monolingual
Size Categories:
n<1K
Language Creators:
crowdsourced
Annotations Creators:
no-annotation
Source Datasets:
original
Tags:
License:
imvladikon commited on
Commit
c59a4e1
1 Parent(s): 8354883
data/kneset16.tar.gz ADDED
@@ -0,0 +1,3 @@
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:fd7c14c3a33fb72c9600801bca2546e3e6ab3a2858e26f4a229d634ed41e9fad
3
+ size 85944068
data/kneset17.tar.gz ADDED
@@ -0,0 +1,3 @@
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:7802d68f4e091759390e79af4b552e3215fc4c577910744505efd7474a5f8a91
3
+ size 53181817
data/knesset_tagged.tar.gz ADDED
@@ -0,0 +1,3 @@
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:3814d76bd0d1f1532d49aa9cdb8d8b2993ed520110b2c76e435494de6e7b98ae
3
+ size 495548087
knesset_meetings_corpus.py ADDED
@@ -0,0 +1,71 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import glob
2
+ import os
3
+ from functools import partial
4
+
5
+ import datasets
6
+
7
+ VERSION = datasets.Version("0.0.1")
8
+
9
+ SUBSET_NAMES = [
10
+ "kneset16",
11
+ "kneset17",
12
+ "knesset_tagged",
13
+ ]
14
+
15
+
16
+ class KnessetMeetingsCorpus(datasets.GeneratorBasedBuilder):
17
+ """Knesset meetings corpus"""
18
+
19
+ BUILDER_CONFIGS = [
20
+ datasets.BuilderConfig(name=name, version=VERSION,
21
+ description=f"{name} meetings corpus")
22
+ for name in SUBSET_NAMES
23
+ ]
24
+
25
+ def _info(self):
26
+ return datasets.DatasetInfo(
27
+ description="A corpus of transcriptions of Knesset (Israeli parliament) meetings between January 2004 and November 2005",
28
+ features=datasets.Features(
29
+ {
30
+ "path": datasets.Value("string"),
31
+ "text": datasets.Value("string"),
32
+ }
33
+ ),
34
+ homepage="https://zenodo.org/record/2707356",
35
+ citation="""TODO""",
36
+ )
37
+
38
+ def _split_generators(self, dl_manager):
39
+ downloader = partial(
40
+ lambda split: dl_manager.download_and_extract(
41
+ f"data/{self.config.name}.tar.gz"),
42
+ )
43
+
44
+ return [
45
+ datasets.SplitGenerator(
46
+ name=datasets.Split.TRAIN,
47
+ gen_kwargs={
48
+ "root_path": downloader("train"),
49
+ "split": "train",
50
+ "subset_name": self.config.name,
51
+ },
52
+ )
53
+ ]
54
+
55
+ def _generate_examples(self, root_path, split, subset_name):
56
+ data_folder = os.path.join(root_path, subset_name)
57
+ if subset_name == "knesset_tagged":
58
+ for xml_file in glob.glob(f"{data_folder}/16/*.xml"):
59
+ uid = os.path.splitext(os.path.basename(xml_file))[0]
60
+ yield uid, {
61
+ "path": xml_file,
62
+ "text": None,
63
+ }
64
+ else:
65
+ for txt_file in glob.glob(f"{data_folder}/txt/*.txt"):
66
+ uid = os.path.splitext(os.path.basename(txt_file))[0]
67
+ docx_file = os.path.join(data_folder, "docx", f"{uid}.docx")
68
+ yield uid, {
69
+ "path": docx_file,
70
+ "text": open(txt_file, "r").read(),
71
+ }