Divyanshu commited on
Commit
16fdc7c
1 Parent(s): e2fa4c4
.history/IE-SemParse_20230707232622.py ADDED
@@ -0,0 +1,162 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # coding=utf-8
2
+
3
+
4
+ # Lint as: python3
5
+ """IndicXNLI: The Cross-Lingual NLI Corpus for Indic Languages."""
6
+
7
+
8
+ import os
9
+ import json
10
+
11
+ import pandas as pd
12
+
13
+ import datasets
14
+
15
+ from datasets import DownloadManager
16
+
17
+
18
+ _CITATION = """\
19
+ @misc{aggarwal2023evaluating,
20
+ title={Evaluating Inter-Bilingual Semantic Parsing for Indian Languages},
21
+ author={Divyanshu Aggarwal and Vivek Gupta and Anoop Kunchukuttan},
22
+ year={2023},
23
+ eprint={2304.13005},
24
+ archivePrefix={arXiv},
25
+ primaryClass={cs.CL}
26
+ }
27
+ """
28
+
29
+ _DESCRIPTION = """\
30
+ IE-SemParse is an Inter-bilingual Seq2seq Semantic parsing dataset for 11 distinct Indian languages
31
+ """
32
+
33
+ _LANGUAGES = (
34
+ 'hi',
35
+ 'bn',
36
+ 'mr',
37
+ 'as',
38
+ 'ta',
39
+ 'te',
40
+ 'or',
41
+ 'ml',
42
+ 'pa',
43
+ 'gu',
44
+ 'kn'
45
+ )
46
+
47
+
48
+ _DATASETS = (
49
+ 'iTOP',
50
+ 'indic-atis',
51
+ 'indic-TOP'
52
+ )
53
+
54
+
55
+ _URL = "https://huggingface.co/datasets/Divyanshu/IE-SemParse/resolve/main/"
56
+
57
+
58
+ class IESemParseConfig(datasets.BuilderConfig):
59
+ """BuilderConfig for IE-SemParse."""
60
+
61
+ def __init__(self, dataset: str, language: str, **kwargs):
62
+ """BuilderConfig for IE-SemParse.
63
+
64
+ Args:
65
+ language: One of hi, bn, mr, as, ta, te, or, ml, pa, gu, kn
66
+ **kwargs: keyword arguments forwarded to super.
67
+ """
68
+ super(IESemParseConfig, self).__init__(**kwargs)
69
+
70
+ self.dataset = dataset
71
+ self.language = language
72
+ self.languages = _LANGUAGES
73
+ self.datasets = _DATASETS
74
+
75
+ self._URLS = [os.path.join(
76
+ _URL, "unfiltered_data", dataset, f"{language}.json")]
77
+
78
+
79
+ class IESemParse(datasets.GeneratorBasedBuilder):
80
+ """IE-SemParse: Inter-Bilingual Semantic Parsing Dataset for Indic Languages. Version 1.0."""
81
+
82
+ VERSION = datasets.Version("1.0.0", "")
83
+ BUILDER_CONFIG_CLASS = IESemParseConfig
84
+ BUILDER_CONFIGS = [
85
+ IESemParseConfig(
86
+ name=f"{dataset}_{language}",
87
+ language=language,
88
+ dataset=dataset,
89
+ version=datasets.Version("1.0.0", ""),
90
+ description=f"Plain text import of IE-SemParse for the {language} language for {dataset} dataset",
91
+ )
92
+ for language, dataset in zip(_LANGUAGES, _DATASETS)
93
+ ]
94
+
95
+ def _info(self):
96
+ dl_manager = datasets.DownloadManager()
97
+
98
+ urls_to_download = self.config._URLS
99
+
100
+ filepath = dl_manager.download_and_extract(urls_to_download)[0]
101
+
102
+ with open(filepath, "r") as f:
103
+ data = json.load(f)
104
+ data = data[list(data.keys())[0]]
105
+
106
+ features = datasets.Features(
107
+ {k: datasets.Value("string") for k in data[0].keys()}
108
+ )
109
+
110
+ return datasets.DatasetInfo(
111
+ description=_DESCRIPTION,
112
+ features=features,
113
+ # No default supervised_keys (as we have to pass both premise
114
+ # and hypothesis as input).
115
+ supervised_keys=None,
116
+ homepage="https://github.com/divyanshuaggarwal/IE-SemParse",
117
+ citation=_CITATION,
118
+ )
119
+
120
+ def _split_generators(self, dl_manager):
121
+ urls_to_download = self.config._URLS
122
+
123
+ downloaded_file = dl_manager.download_and_extract(urls_to_download)[0]
124
+
125
+ return [
126
+ datasets.SplitGenerator(
127
+ name=datasets.Split.TRAIN,
128
+ gen_kwargs={
129
+ "split_key": "train",
130
+ "file_path": downloaded_file,
131
+ "data_format": "IE-SemParse"
132
+ },
133
+ ),
134
+ datasets.SplitGenerator(
135
+ name=datasets.Split.TEST,
136
+ gen_kwargs={
137
+ "split_key": "test",
138
+ "files": downloaded_file,
139
+ "data_format": "IE-SemParse"
140
+ },
141
+ ),
142
+ datasets.SplitGenerator(
143
+ name=datasets.Split.VALIDATION,
144
+ gen_kwargs={
145
+ "split_key": "val",
146
+ "files": downloaded_file,
147
+ "data_format": "IE-SemParse"
148
+ },
149
+ ),
150
+ ]
151
+
152
+ def _generate_examples(self, data_format, filepath):
153
+ """This function returns the examples in the raw (text) form."""
154
+
155
+ with open(filepath, "r") as f:
156
+ data = json.load(f)
157
+ data = data[list(data.keys())[0]]
158
+
159
+ for idx, row in enumerate(data):
160
+ yield idx, {
161
+ k: v for k, v in data.items()
162
+ }
IE-SemParse.py CHANGED
@@ -54,6 +54,7 @@ _DATASETS = (
54
 
55
  _URL = "https://huggingface.co/datasets/Divyanshu/IE-SemParse/resolve/main/"
56
 
 
57
  class IESemParseConfig(datasets.BuilderConfig):
58
  """BuilderConfig for IE-SemParse."""
59
 
@@ -65,13 +66,14 @@ class IESemParseConfig(datasets.BuilderConfig):
65
  **kwargs: keyword arguments forwarded to super.
66
  """
67
  super(IESemParseConfig, self).__init__(**kwargs)
68
-
69
  self.dataset = dataset
70
  self.language = language
71
  self.languages = _LANGUAGES
72
  self.datasets = _DATASETS
73
-
74
- self._URLS = [os.path.join(_URL, "unfiltered_data", dataset, f"{language}.json")]
 
75
 
76
 
77
  class IESemParse(datasets.GeneratorBasedBuilder):
@@ -83,7 +85,7 @@ class IESemParse(datasets.GeneratorBasedBuilder):
83
  IESemParseConfig(
84
  name=f"{dataset}_{language}",
85
  language=language,
86
- dataset = dataset,
87
  version=datasets.Version("1.0.0", ""),
88
  description=f"Plain text import of IE-SemParse for the {language} language for {dataset} dataset",
89
  )
@@ -92,20 +94,19 @@ class IESemParse(datasets.GeneratorBasedBuilder):
92
 
93
  def _info(self):
94
  dl_manager = datasets.DownloadManager()
95
-
96
  urls_to_download = self.config._URLS
97
-
98
  filepath = dl_manager.download_and_extract(urls_to_download)[0]
99
-
100
  with open(filepath, "r") as f:
101
  data = json.load(f)
102
  data = data[list(data.keys())[0]]
103
-
104
-
105
  features = datasets.Features(
106
- { k: datasets.Value("string") for k in data.keys()}
107
  )
108
-
109
  return datasets.DatasetInfo(
110
  description=_DESCRIPTION,
111
  features=features,
@@ -118,34 +119,33 @@ class IESemParse(datasets.GeneratorBasedBuilder):
118
 
119
  def _split_generators(self, dl_manager):
120
  urls_to_download = self.config._URLS
121
-
122
  downloaded_file = dl_manager.download_and_extract(urls_to_download)[0]
123
 
124
  return [
125
  datasets.SplitGenerator(
126
  name=datasets.Split.TRAIN,
127
  gen_kwargs={
128
- "split_key": "train",
129
- "file_path": downloaded_file,
130
- "data_format": "IE-SemParse"
131
- }
132
- ,
133
  ),
134
  datasets.SplitGenerator(
135
  name=datasets.Split.TEST,
136
  gen_kwargs={
137
- "split_key": "test",
138
- "files": downloaded_file,
139
- "data_format": "IE-SemParse"
140
- },
141
  ),
142
  datasets.SplitGenerator(
143
  name=datasets.Split.VALIDATION,
144
  gen_kwargs={
145
- "split_key": "val",
146
- "files": downloaded_file,
147
- "data_format": "IE-SemParse"
148
- },
149
  ),
150
  ]
151
 
@@ -158,5 +158,5 @@ class IESemParse(datasets.GeneratorBasedBuilder):
158
 
159
  for idx, row in enumerate(data):
160
  yield idx, {
161
- k:v for k,v in data.items()
162
  }
 
54
 
55
  _URL = "https://huggingface.co/datasets/Divyanshu/IE-SemParse/resolve/main/"
56
 
57
+
58
  class IESemParseConfig(datasets.BuilderConfig):
59
  """BuilderConfig for IE-SemParse."""
60
 
 
66
  **kwargs: keyword arguments forwarded to super.
67
  """
68
  super(IESemParseConfig, self).__init__(**kwargs)
69
+
70
  self.dataset = dataset
71
  self.language = language
72
  self.languages = _LANGUAGES
73
  self.datasets = _DATASETS
74
+
75
+ self._URLS = [os.path.join(
76
+ _URL, "unfiltered_data", dataset, f"{language}.json")]
77
 
78
 
79
  class IESemParse(datasets.GeneratorBasedBuilder):
 
85
  IESemParseConfig(
86
  name=f"{dataset}_{language}",
87
  language=language,
88
+ dataset=dataset,
89
  version=datasets.Version("1.0.0", ""),
90
  description=f"Plain text import of IE-SemParse for the {language} language for {dataset} dataset",
91
  )
 
94
 
95
  def _info(self):
96
  dl_manager = datasets.DownloadManager()
97
+
98
  urls_to_download = self.config._URLS
99
+
100
  filepath = dl_manager.download_and_extract(urls_to_download)[0]
101
+
102
  with open(filepath, "r") as f:
103
  data = json.load(f)
104
  data = data[list(data.keys())[0]]
105
+
 
106
  features = datasets.Features(
107
+ {k: datasets.Value("string") for k in data[0].keys()}
108
  )
109
+
110
  return datasets.DatasetInfo(
111
  description=_DESCRIPTION,
112
  features=features,
 
119
 
120
  def _split_generators(self, dl_manager):
121
  urls_to_download = self.config._URLS
122
+
123
  downloaded_file = dl_manager.download_and_extract(urls_to_download)[0]
124
 
125
  return [
126
  datasets.SplitGenerator(
127
  name=datasets.Split.TRAIN,
128
  gen_kwargs={
129
+ "split_key": "train",
130
+ "file_path": downloaded_file,
131
+ "data_format": "IE-SemParse"
132
+ },
 
133
  ),
134
  datasets.SplitGenerator(
135
  name=datasets.Split.TEST,
136
  gen_kwargs={
137
+ "split_key": "test",
138
+ "files": downloaded_file,
139
+ "data_format": "IE-SemParse"
140
+ },
141
  ),
142
  datasets.SplitGenerator(
143
  name=datasets.Split.VALIDATION,
144
  gen_kwargs={
145
+ "split_key": "val",
146
+ "files": downloaded_file,
147
+ "data_format": "IE-SemParse"
148
+ },
149
  ),
150
  ]
151
 
 
158
 
159
  for idx, row in enumerate(data):
160
  yield idx, {
161
+ k: v for k, v in data.items()
162
  }