Diwank Singh commited on
Commit
b47751a
β€’
1 Parent(s): 3591636

change data format to hdfs

Browse files

Signed-off-by: Diwank Singh <diwank.singh@gmail.com>

.gitattributes CHANGED
@@ -1,3 +1,4 @@
 
1
  *.txt filter=lfs diff=lfs merge=lfs -text
2
  *.json filter=lfs diff=lfs merge=lfs -text
3
  *.xml filter=lfs diff=lfs merge=lfs -text
1
+ *.h5 filter=lfs diff=lfs merge=lfs -text
2
  *.txt filter=lfs diff=lfs merge=lfs -text
3
  *.json filter=lfs diff=lfs merge=lfs -text
4
  *.xml filter=lfs diff=lfs merge=lfs -text
data/crowd_transliteration/{crowd_transliterations.hi-en.txt β†’ data.h5} RENAMED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:2e45d29dec243dea84f0d233e453bc14efe48d795da4258e64aba9144d36365f
3
- size 380351
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:3526e48f077b7dfafbdedcbbed739675cceaacef2a31ab651802afc4404300e8
3
+ size 3634483
data/crowd_transliteration/process.ipynb ADDED
@@ -0,0 +1,178 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "cells": [
3
+ {
4
+ "cell_type": "code",
5
+ "execution_count": 1,
6
+ "id": "bed45d12-7681-4ba4-9c89-48a3515704e2",
7
+ "metadata": {},
8
+ "outputs": [
9
+ {
10
+ "name": "stdout",
11
+ "output_type": "stream",
12
+ "text": [
13
+ "File β€˜crowd_transliterations.hi-en.txt’ already there; not retrieving.\n",
14
+ "\n"
15
+ ]
16
+ }
17
+ ],
18
+ "source": [
19
+ "!wget -nc https://raw.githubusercontent.com/chsasank/indic-transliteration/master/data/crowd_transliterations.hi-en.txt"
20
+ ]
21
+ },
22
+ {
23
+ "cell_type": "code",
24
+ "execution_count": 3,
25
+ "id": "965589a9-c62e-4659-a6bc-6f0a2bad5d19",
26
+ "metadata": {},
27
+ "outputs": [
28
+ {
29
+ "data": {
30
+ "text/plain": [
31
+ "Index(['target_hinglish', 'source_hindi'], dtype='object')"
32
+ ]
33
+ },
34
+ "execution_count": 3,
35
+ "metadata": {},
36
+ "output_type": "execute_result"
37
+ }
38
+ ],
39
+ "source": [
40
+ "import pandas as pd\n",
41
+ "\n",
42
+ "df = pd.read_csv(\"./crowd_transliterations.hi-en.txt\", sep='\\t', names=[\"target_hinglish\", \"source_hindi\"])\n",
43
+ "df.columns"
44
+ ]
45
+ },
46
+ {
47
+ "cell_type": "markdown",
48
+ "id": "b5c7c7c7-b9a6-4ea2-a5ef-edaf982ae0ad",
49
+ "metadata": {},
50
+ "source": [
51
+ "### Required columns\n",
52
+ "- target_hinglish\n",
53
+ "- source_hindi\n",
54
+ "- parallel_english\n",
55
+ "- annotations\n",
56
+ "- raw_input\n",
57
+ "- alternates\n",
58
+ "\n",
59
+ "> For **crowd_transliterations.hi-en.txt**, only `target_hinglish` and `source_hindi` are valid"
60
+ ]
61
+ },
62
+ {
63
+ "cell_type": "code",
64
+ "execution_count": 5,
65
+ "id": "b54fdd52-1ab0-4c84-89e5-0bcb8fcbfbeb",
66
+ "metadata": {},
67
+ "outputs": [],
68
+ "source": [
69
+ "# Add empty columns\n",
70
+ "df[\"raw_input\"] = \\\n",
71
+ " df[\"parallel_english\"] = \\\n",
72
+ " df[\"alternates\"] = \\\n",
73
+ " df[\"annotations\"] = None\n",
74
+ "\n",
75
+ "# Split dataset\n",
76
+ "from sklearn.model_selection import train_test_split\n",
77
+ "_train_eval_df, test_df = train_test_split(df, test_size=0.1)\n",
78
+ "train_df, eval_df = train_test_split(_train_eval_df, test_size=0.1)"
79
+ ]
80
+ },
81
+ {
82
+ "cell_type": "code",
83
+ "execution_count": 6,
84
+ "id": "6e804366-34cd-45c7-b3c6-46b7b8c1b420",
85
+ "metadata": {},
86
+ "outputs": [
87
+ {
88
+ "name": "stdout",
89
+ "output_type": "stream",
90
+ "text": [
91
+ "Requirement already satisfied: tables in /opt/conda/lib/python3.7/site-packages (3.7.0)\n",
92
+ "Requirement already satisfied: numexpr>=2.6.2 in /opt/conda/lib/python3.7/site-packages (from tables) (2.8.1)\n",
93
+ "Requirement already satisfied: numpy>=1.19.0 in /opt/conda/lib/python3.7/site-packages (from tables) (1.19.5)\n",
94
+ "Requirement already satisfied: packaging in /opt/conda/lib/python3.7/site-packages (from tables) (21.3)\n",
95
+ "Requirement already satisfied: pyparsing!=3.0.5,>=2.0.2 in /opt/conda/lib/python3.7/site-packages (from packaging->tables) (3.0.6)\n"
96
+ ]
97
+ },
98
+ {
99
+ "name": "stderr",
100
+ "output_type": "stream",
101
+ "text": [
102
+ "/opt/conda/lib/python3.7/site-packages/pandas/core/generic.py:2718: PerformanceWarning: \n",
103
+ "your performance may suffer as PyTables will pickle object types that it cannot\n",
104
+ "map directly to c-types [inferred_type->mixed,key->block0_values] [items->Index(['target_hinglish', 'source_hindi', 'raw_input', 'parallel_english',\n",
105
+ " 'alternates', 'annotations'],\n",
106
+ " dtype='object')]\n",
107
+ "\n",
108
+ " encoding=encoding,\n"
109
+ ]
110
+ }
111
+ ],
112
+ "source": [
113
+ "!pip install tables\n",
114
+ "\n",
115
+ "# Save to hdfs files\n",
116
+ "train_df.to_hdf(\"./data.h5\", \"train\", complevel=9)\n",
117
+ "test_df.to_hdf(\"./data.h5\", \"test\", complevel=9)\n",
118
+ "eval_df.to_hdf(\"./data.h5\", \"eval\", complevel=9)"
119
+ ]
120
+ },
121
+ {
122
+ "cell_type": "code",
123
+ "execution_count": 7,
124
+ "id": "3298f2f3-3e21-478e-b027-947c992f880d",
125
+ "metadata": {},
126
+ "outputs": [],
127
+ "source": [
128
+ "# Confirm that everything worked as expected\n",
129
+ "\n",
130
+ "# Load from hdfs files\n",
131
+ "_train_df = pd.read_hdf(\"./data.h5\", \"train\")\n",
132
+ "_test_df = pd.read_hdf(\"./data.h5\", \"test\")\n",
133
+ "_eval_df = pd.read_hdf(\"./data.h5\", \"eval\")\n",
134
+ "\n",
135
+ "assert (len(_train_df) == len(train_df)) == \\\n",
136
+ " (len(_eval_df) == len(eval_df)) == \\\n",
137
+ " (len(_test_df) == len(test_df))"
138
+ ]
139
+ },
140
+ {
141
+ "cell_type": "code",
142
+ "execution_count": 8,
143
+ "id": "60461121-bed5-4ba0-ba7d-dd46256c62e3",
144
+ "metadata": {},
145
+ "outputs": [],
146
+ "source": [
147
+ "!rm crowd_transliterations.hi-en.txt"
148
+ ]
149
+ }
150
+ ],
151
+ "metadata": {
152
+ "environment": {
153
+ "kernel": "python3",
154
+ "name": "managed-notebooks.m87",
155
+ "type": "gcloud",
156
+ "uri": "gcr.io/deeplearning-platform-release/base-cu110:latest"
157
+ },
158
+ "kernelspec": {
159
+ "display_name": "Python (Local)",
160
+ "language": "python",
161
+ "name": "local-base"
162
+ },
163
+ "language_info": {
164
+ "codemirror_mode": {
165
+ "name": "ipython",
166
+ "version": 3
167
+ },
168
+ "file_extension": ".py",
169
+ "mimetype": "text/x-python",
170
+ "name": "python",
171
+ "nbconvert_exporter": "python",
172
+ "pygments_lexer": "ipython3",
173
+ "version": "3.7.12"
174
+ }
175
+ },
176
+ "nbformat": 4,
177
+ "nbformat_minor": 5
178
+ }
data/hindi_romanized_dump/{hi_rom.txt β†’ data.h5} RENAMED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:964952e12f06aee34c9a689636bd56bca6541b78558975f7ee15b277a9d31ca4
3
- size 517324149
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:c67ea7c1fd0400f01f2be2779af5d8d112936cf99ba99c11fa12c73fb95873c5
3
+ size 579369992
data/hindi_romanized_dump/process.ipynb ADDED
@@ -0,0 +1,204 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "cells": [
3
+ {
4
+ "cell_type": "code",
5
+ "execution_count": 2,
6
+ "id": "bed45d12-7681-4ba4-9c89-48a3515704e2",
7
+ "metadata": {},
8
+ "outputs": [
9
+ {
10
+ "name": "stdout",
11
+ "output_type": "stream",
12
+ "text": [
13
+ "File β€˜hi_rom.txt.xz’ already there; not retrieving.\n",
14
+ "\n",
15
+ "hi_rom.txt.xz (1/1)\n",
16
+ "xz: hi_rom.txt: File exists\n"
17
+ ]
18
+ }
19
+ ],
20
+ "source": [
21
+ "!wget -nc http://data.statmt.org/cc-100/hi_rom.txt.xz\n",
22
+ "!xz -d -v hi_rom.txt.xz\n",
23
+ "!rm hi_rom.txt.xz"
24
+ ]
25
+ },
26
+ {
27
+ "cell_type": "markdown",
28
+ "id": "b5c7c7c7-b9a6-4ea2-a5ef-edaf982ae0ad",
29
+ "metadata": {
30
+ "tags": []
31
+ },
32
+ "source": [
33
+ "### Required columns\n",
34
+ "- target_hinglish\n",
35
+ "- source_hindi\n",
36
+ "- parallel_english\n",
37
+ "- annotations\n",
38
+ "- raw_input\n",
39
+ "- alternates\n",
40
+ "\n",
41
+ "> For **hi_rom.txt**, only `target_hinglish` is valid"
42
+ ]
43
+ },
44
+ {
45
+ "cell_type": "code",
46
+ "execution_count": 7,
47
+ "id": "965589a9-c62e-4659-a6bc-6f0a2bad5d19",
48
+ "metadata": {},
49
+ "outputs": [
50
+ {
51
+ "name": "stdout",
52
+ "output_type": "stream",
53
+ "text": [
54
+ "Requirement already satisfied: clean-text[gpl] in /opt/conda/lib/python3.7/site-packages (0.6.0)\n",
55
+ "Requirement already satisfied: tqdm in /opt/conda/lib/python3.7/site-packages (4.62.3)\n",
56
+ "Requirement already satisfied: emoji<2.0.0,>=1.0.0 in /opt/conda/lib/python3.7/site-packages (from clean-text[gpl]) (1.6.3)\n",
57
+ "Requirement already satisfied: ftfy<7.0,>=6.0 in /opt/conda/lib/python3.7/site-packages (from clean-text[gpl]) (6.1.1)\n",
58
+ "Requirement already satisfied: unidecode<2.0.0,>=1.1.1 in /opt/conda/lib/python3.7/site-packages (from clean-text[gpl]) (1.3.2)\n",
59
+ "Requirement already satisfied: wcwidth>=0.2.5 in /opt/conda/lib/python3.7/site-packages (from ftfy<7.0,>=6.0->clean-text[gpl]) (0.2.5)\n"
60
+ ]
61
+ },
62
+ {
63
+ "name": "stderr",
64
+ "output_type": "stream",
65
+ "text": [
66
+ "10251114it [20:30, 8333.31it/s] \n"
67
+ ]
68
+ }
69
+ ],
70
+ "source": [
71
+ "!pip install clean-text[gpl] tqdm\n",
72
+ "import pandas as pd\n",
73
+ "from tqdm import tqdm\n",
74
+ "from cleantext import clean as clean_\n",
75
+ "\n",
76
+ "clean = lambda x: clean_(\n",
77
+ " x, \n",
78
+ " no_line_breaks=True, no_punct=True,\n",
79
+ " no_urls=True, replace_with_url=\"<|url|>\",\n",
80
+ " no_emoji=True, no_phone_numbers=True, replace_with_phone_number=\"<|phonenumber|>\",\n",
81
+ " no_emails=True, replace_with_email=\"<|email|>\",\n",
82
+ " no_currency_symbols=True, replace_with_currency_symbol=\"\", )\n",
83
+ "\n",
84
+ "with open(\"./hi_rom.txt\", 'r') as file:\n",
85
+ " df = pd.DataFrame(\n",
86
+ " [(clean(line), None, None, None, None, None) for line in tqdm(file)],\n",
87
+ " columns=[\"target_hinglish\", \"source_hindi\", \"parallel_english\", \"annotations\", \"raw_input\", \"alternates\"] )"
88
+ ]
89
+ },
90
+ {
91
+ "cell_type": "code",
92
+ "execution_count": 8,
93
+ "id": "b54fdd52-1ab0-4c84-89e5-0bcb8fcbfbeb",
94
+ "metadata": {},
95
+ "outputs": [],
96
+ "source": [
97
+ "# Split dataset\n",
98
+ "from sklearn.model_selection import train_test_split\n",
99
+ "_train_eval_df, test_df = train_test_split(df, test_size=0.1)\n",
100
+ "train_df, eval_df = train_test_split(_train_eval_df, test_size=0.1)"
101
+ ]
102
+ },
103
+ {
104
+ "cell_type": "code",
105
+ "execution_count": 9,
106
+ "id": "6e804366-34cd-45c7-b3c6-46b7b8c1b420",
107
+ "metadata": {},
108
+ "outputs": [
109
+ {
110
+ "name": "stdout",
111
+ "output_type": "stream",
112
+ "text": [
113
+ "Collecting tables\n",
114
+ " Using cached tables-3.7.0-cp37-cp37m-manylinux_2_17_x86_64.manylinux2014_x86_64.whl (5.9 MB)\n",
115
+ "Requirement already satisfied: numpy>=1.19.0 in /opt/conda/lib/python3.7/site-packages (from tables) (1.19.5)\n",
116
+ "Collecting numexpr>=2.6.2\n",
117
+ " Using cached numexpr-2.8.1-cp37-cp37m-manylinux_2_17_x86_64.manylinux2014_x86_64.whl (379 kB)\n",
118
+ "Requirement already satisfied: packaging in /opt/conda/lib/python3.7/site-packages (from tables) (21.3)\n",
119
+ "Requirement already satisfied: pyparsing!=3.0.5,>=2.0.2 in /opt/conda/lib/python3.7/site-packages (from packaging->tables) (3.0.6)\n",
120
+ "Installing collected packages: numexpr, tables\n",
121
+ "Successfully installed numexpr-2.8.1 tables-3.7.0\n"
122
+ ]
123
+ },
124
+ {
125
+ "name": "stderr",
126
+ "output_type": "stream",
127
+ "text": [
128
+ "/opt/conda/lib/python3.7/site-packages/pandas/core/generic.py:2718: PerformanceWarning: \n",
129
+ "your performance may suffer as PyTables will pickle object types that it cannot\n",
130
+ "map directly to c-types [inferred_type->mixed,key->block0_values] [items->Index(['target_hinglish', 'source_hindi', 'parallel_english', 'annotations',\n",
131
+ " 'raw_input', 'alternates'],\n",
132
+ " dtype='object')]\n",
133
+ "\n",
134
+ " encoding=encoding,\n"
135
+ ]
136
+ }
137
+ ],
138
+ "source": [
139
+ "!pip install tables\n",
140
+ "\n",
141
+ "# Save to hdfs files\n",
142
+ "train_df.to_hdf(\"./data.h5\", \"train\", complevel=9)\n",
143
+ "test_df.to_hdf(\"./data.h5\", \"test\", complevel=9)\n",
144
+ "eval_df.to_hdf(\"./data.h5\", \"eval\", complevel=9)"
145
+ ]
146
+ },
147
+ {
148
+ "cell_type": "code",
149
+ "execution_count": 10,
150
+ "id": "3298f2f3-3e21-478e-b027-947c992f880d",
151
+ "metadata": {},
152
+ "outputs": [],
153
+ "source": [
154
+ "# Confirm that everything worked as expected\n",
155
+ "\n",
156
+ "# Load from hdfs files\n",
157
+ "_train_df = pd.read_hdf(\"./data.h5\", \"train\")\n",
158
+ "_test_df = pd.read_hdf(\"./data.h5\", \"test\")\n",
159
+ "_eval_df = pd.read_hdf(\"./data.h5\", \"eval\")\n",
160
+ "\n",
161
+ "assert (len(_train_df) == len(train_df)) == \\\n",
162
+ " (len(_eval_df) == len(eval_df)) == \\\n",
163
+ " (len(_test_df) == len(test_df))"
164
+ ]
165
+ },
166
+ {
167
+ "cell_type": "code",
168
+ "execution_count": 11,
169
+ "id": "60461121-bed5-4ba0-ba7d-dd46256c62e3",
170
+ "metadata": {},
171
+ "outputs": [],
172
+ "source": [
173
+ "!rm hi_rom.txt"
174
+ ]
175
+ }
176
+ ],
177
+ "metadata": {
178
+ "environment": {
179
+ "kernel": "python3",
180
+ "name": "managed-notebooks.m87",
181
+ "type": "gcloud",
182
+ "uri": "gcr.io/deeplearning-platform-release/base-cu110:latest"
183
+ },
184
+ "kernelspec": {
185
+ "display_name": "Python (Local)",
186
+ "language": "python",
187
+ "name": "local-base"
188
+ },
189
+ "language_info": {
190
+ "codemirror_mode": {
191
+ "name": "ipython",
192
+ "version": 3
193
+ },
194
+ "file_extension": ".py",
195
+ "mimetype": "text/x-python",
196
+ "name": "python",
197
+ "nbconvert_exporter": "python",
198
+ "pygments_lexer": "ipython3",
199
+ "version": "3.7.12"
200
+ }
201
+ },
202
+ "nbformat": 4,
203
+ "nbformat_minor": 5
204
+ }
data/hindi_xlit/HiEn_ann1_train.json DELETED
@@ -1,3 +0,0 @@
1
- version https://git-lfs.github.com/spec/v1
2
- oid sha256:1c92b3c8e955dcc3574223b4fc0516dfb3df1205131b3ad0fef00e6bd35745b4
3
- size 2993734
 
 
 
data/hindi_xlit/HiEn_ann1_valid.json DELETED
@@ -1,3 +0,0 @@
1
- version https://git-lfs.github.com/spec/v1
2
- oid sha256:4c9336adbabcfcd8602eee9866f8193e4d38300d933217fe3eb77ffd6b993f4c
3
- size 335141
 
 
 
data/hindi_xlit/data.h5 ADDED
@@ -0,0 +1,3 @@
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:c79a4fab51fc5303da465270be9994a13aa803d04eb7c452ac488239d47eadcf
3
+ size 10892954
data/hindi_xlit/process.ipynb ADDED
@@ -0,0 +1,200 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "cells": [
3
+ {
4
+ "cell_type": "code",
5
+ "execution_count": 3,
6
+ "id": "bed45d12-7681-4ba4-9c89-48a3515704e2",
7
+ "metadata": {},
8
+ "outputs": [
9
+ {
10
+ "name": "stdout",
11
+ "output_type": "stream",
12
+ "text": [
13
+ "--2022-03-05 12:25:22-- https://github.com/AI4Bharat/IndianNLP-Transliteration/releases/download/DATA/Hindi_Xlit_dataset.zip\n",
14
+ "Resolving github.com (github.com)... 140.82.114.3\n",
15
+ "Connecting to github.com (github.com)|140.82.114.3|:443... connected.\n",
16
+ "HTTP request sent, awaiting response... 302 Found\n",
17
+ "Location: https://objects.githubusercontent.com/github-production-release-asset-2e65be/231321785/14c95280-01a2-11eb-921f-4221081fa4b2?X-Amz-Algorithm=AWS4-HMAC-SHA256&X-Amz-Credential=AKIAIWNJYAX4CSVEH53A%2F20220305%2Fus-east-1%2Fs3%2Faws4_request&X-Amz-Date=20220305T122522Z&X-Amz-Expires=300&X-Amz-Signature=ef0c94bb0f3602f5edbca49df20bb64a477fe34bfec15b5ee78b43f9be4da4e6&X-Amz-SignedHeaders=host&actor_id=0&key_id=0&repo_id=231321785&response-content-disposition=attachment%3B%20filename%3DHindi_Xlit_dataset.zip&response-content-type=application%2Foctet-stream [following]\n",
18
+ "--2022-03-05 12:25:22-- https://objects.githubusercontent.com/github-production-release-asset-2e65be/231321785/14c95280-01a2-11eb-921f-4221081fa4b2?X-Amz-Algorithm=AWS4-HMAC-SHA256&X-Amz-Credential=AKIAIWNJYAX4CSVEH53A%2F20220305%2Fus-east-1%2Fs3%2Faws4_request&X-Amz-Date=20220305T122522Z&X-Amz-Expires=300&X-Amz-Signature=ef0c94bb0f3602f5edbca49df20bb64a477fe34bfec15b5ee78b43f9be4da4e6&X-Amz-SignedHeaders=host&actor_id=0&key_id=0&repo_id=231321785&response-content-disposition=attachment%3B%20filename%3DHindi_Xlit_dataset.zip&response-content-type=application%2Foctet-stream\n",
19
+ "Resolving objects.githubusercontent.com (objects.githubusercontent.com)... 185.199.108.133, 185.199.110.133, 185.199.111.133, ...\n",
20
+ "Connecting to objects.githubusercontent.com (objects.githubusercontent.com)|185.199.108.133|:443... connected.\n",
21
+ "HTTP request sent, awaiting response... 200 OK\n",
22
+ "Length: 609266 (595K) [application/octet-stream]\n",
23
+ "Saving to: β€˜Hindi_Xlit_dataset.zip’\n",
24
+ "\n",
25
+ "Hindi_Xlit_dataset. 100%[===================>] 594.99K --.-KB/s in 0.04s \n",
26
+ "\n",
27
+ "2022-03-05 12:25:23 (13.8 MB/s) - β€˜Hindi_Xlit_dataset.zip’ saved [609266/609266]\n",
28
+ "\n",
29
+ "Archive: ./Hindi_Xlit_dataset.zip\n",
30
+ " inflating: HiEn_ann1_test.json \n",
31
+ " inflating: HiEn_ann1_train.json \n",
32
+ " inflating: HiEn_ann1_valid.json \n",
33
+ " inflating: legalcode.txt \n"
34
+ ]
35
+ }
36
+ ],
37
+ "source": [
38
+ "!wget -nc https://github.com/AI4Bharat/IndianNLP-Transliteration/releases/download/DATA/Hindi_Xlit_dataset.zip\n",
39
+ "!unzip -n ./Hindi_Xlit_dataset.zip\n",
40
+ "!rm ./legalcode.txt ./Hindi_Xlit_dataset.zip"
41
+ ]
42
+ },
43
+ {
44
+ "cell_type": "markdown",
45
+ "id": "b5c7c7c7-b9a6-4ea2-a5ef-edaf982ae0ad",
46
+ "metadata": {},
47
+ "source": [
48
+ "### Required columns\n",
49
+ "- target_hinglish\n",
50
+ "- source_hindi\n",
51
+ "- parallel_english\n",
52
+ "- annotations\n",
53
+ "- raw_input\n",
54
+ "- alternates\n",
55
+ "\n",
56
+ "> For **HiEn_ann1**, only `target_hinglish` and `source_hindi` are valid"
57
+ ]
58
+ },
59
+ {
60
+ "cell_type": "code",
61
+ "execution_count": 4,
62
+ "id": "965589a9-c62e-4659-a6bc-6f0a2bad5d19",
63
+ "metadata": {},
64
+ "outputs": [],
65
+ "source": [
66
+ "import pandas as pd\n",
67
+ "import json\n",
68
+ "\n",
69
+ "with open(\"./HiEn_ann1_train.json\", 'r') as f:\n",
70
+ " train_data = json.load(f)\n",
71
+ "\n",
72
+ "with open(\"./HiEn_ann1_valid.json\", 'r') as f:\n",
73
+ " eval_data = json.load(f)\n",
74
+ "\n",
75
+ "with open(\"./HiEn_ann1_test.json\", 'r') as f:\n",
76
+ " test_data = json.load(f)\n",
77
+ "\n",
78
+ "train_df = pd.DataFrame(\n",
79
+ " [(source_hindi, target_hinglish, None, None, None, None) \n",
80
+ " for source_hindi, values in train_data.items() \n",
81
+ " for target_hinglish in values ], \n",
82
+ " columns=[\"source_hindi\", \"target_hinglish\", \"parallel_english\", \"annotations\", \"raw_input\", \"alternates\"] )\n",
83
+ "\n",
84
+ "eval_df = pd.DataFrame(\n",
85
+ " [(source_hindi, target_hinglish, None, None, None, None) \n",
86
+ " for source_hindi, values in eval_data.items() \n",
87
+ " for target_hinglish in values ], \n",
88
+ " columns=[\"source_hindi\", \"target_hinglish\", \"parallel_english\", \"annotations\", \"raw_input\", \"alternates\"] )\n",
89
+ "\n",
90
+ "test_df = pd.DataFrame(\n",
91
+ " [(source_hindi, target_hinglish, None, None, None, None) \n",
92
+ " for source_hindi, values in test_data.items() \n",
93
+ " for target_hinglish in values ], \n",
94
+ " columns=[\"source_hindi\", \"target_hinglish\", \"parallel_english\", \"annotations\", \"raw_input\", \"alternates\"] )\n"
95
+ ]
96
+ },
97
+ {
98
+ "cell_type": "code",
99
+ "execution_count": 5,
100
+ "id": "6e804366-34cd-45c7-b3c6-46b7b8c1b420",
101
+ "metadata": {},
102
+ "outputs": [
103
+ {
104
+ "name": "stdout",
105
+ "output_type": "stream",
106
+ "text": [
107
+ "Collecting tables\n",
108
+ " Using cached tables-3.7.0-cp37-cp37m-manylinux_2_17_x86_64.manylinux2014_x86_64.whl (5.9 MB)\n",
109
+ "Requirement already satisfied: numpy>=1.19.0 in /opt/conda/lib/python3.7/site-packages (from tables) (1.19.5)\n",
110
+ "Requirement already satisfied: packaging in /opt/conda/lib/python3.7/site-packages (from tables) (21.3)\n",
111
+ "Collecting numexpr>=2.6.2\n",
112
+ " Using cached numexpr-2.8.1-cp37-cp37m-manylinux_2_17_x86_64.manylinux2014_x86_64.whl (379 kB)\n",
113
+ "Requirement already satisfied: pyparsing!=3.0.5,>=2.0.2 in /opt/conda/lib/python3.7/site-packages (from packaging->tables) (3.0.6)\n",
114
+ "Installing collected packages: numexpr, tables\n",
115
+ "Successfully installed numexpr-2.8.1 tables-3.7.0\n"
116
+ ]
117
+ },
118
+ {
119
+ "name": "stderr",
120
+ "output_type": "stream",
121
+ "text": [
122
+ "/opt/conda/lib/python3.7/site-packages/pandas/core/generic.py:2718: PerformanceWarning: \n",
123
+ "your performance may suffer as PyTables will pickle object types that it cannot\n",
124
+ "map directly to c-types [inferred_type->mixed,key->block0_values] [items->Index(['source_hindi', 'target_hinglish', 'parallel_english', 'annotations',\n",
125
+ " 'raw_input', 'alternates'],\n",
126
+ " dtype='object')]\n",
127
+ "\n",
128
+ " encoding=encoding,\n"
129
+ ]
130
+ }
131
+ ],
132
+ "source": [
133
+ "!pip install tables\n",
134
+ "\n",
135
+ "# Save to hdfs files\n",
136
+ "train_df.to_hdf(\"./data.h5\", \"train\", complevel=9)\n",
137
+ "test_df.to_hdf(\"./data.h5\", \"test\", complevel=9)\n",
138
+ "eval_df.to_hdf(\"./data.h5\", \"eval\", complevel=9)"
139
+ ]
140
+ },
141
+ {
142
+ "cell_type": "code",
143
+ "execution_count": 6,
144
+ "id": "3298f2f3-3e21-478e-b027-947c992f880d",
145
+ "metadata": {},
146
+ "outputs": [],
147
+ "source": [
148
+ "# Confirm that everything worked as expected\n",
149
+ "\n",
150
+ "# Load from hdfs files\n",
151
+ "_train_df = pd.read_hdf(\"./data.h5\", \"train\")\n",
152
+ "_test_df = pd.read_hdf(\"./data.h5\", \"test\")\n",
153
+ "_eval_df = pd.read_hdf(\"./data.h5\", \"eval\")\n",
154
+ "\n",
155
+ "assert (len(_train_df) == len(train_df)) == \\\n",
156
+ " (len(_eval_df) == len(eval_df)) == \\\n",
157
+ " (len(_test_df) == len(test_df))"
158
+ ]
159
+ },
160
+ {
161
+ "cell_type": "code",
162
+ "execution_count": 7,
163
+ "id": "60461121-bed5-4ba0-ba7d-dd46256c62e3",
164
+ "metadata": {},
165
+ "outputs": [],
166
+ "source": [
167
+ "!rm HiEn_ann1_test.json\n",
168
+ "!rm HiEn_ann1_train.json\n",
169
+ "!rm HiEn_ann1_valid.json"
170
+ ]
171
+ }
172
+ ],
173
+ "metadata": {
174
+ "environment": {
175
+ "kernel": "python3",
176
+ "name": "managed-notebooks.m87",
177
+ "type": "gcloud",
178
+ "uri": "gcr.io/deeplearning-platform-release/base-cu110:latest"
179
+ },
180
+ "kernelspec": {
181
+ "display_name": "Python (Local)",
182
+ "language": "python",
183
+ "name": "local-base"
184
+ },
185
+ "language_info": {
186
+ "codemirror_mode": {
187
+ "name": "ipython",
188
+ "version": 3
189
+ },
190
+ "file_extension": ".py",
191
+ "mimetype": "text/x-python",
192
+ "name": "python",
193
+ "nbconvert_exporter": "python",
194
+ "pygments_lexer": "ipython3",
195
+ "version": "3.7.12"
196
+ }
197
+ },
198
+ "nbformat": 4,
199
+ "nbformat_minor": 5
200
+ }
data/hinge/.ipynb_checkpoints/eval_synthetic-checkpoint.csv DELETED
@@ -1,3 +0,0 @@
1
- version https://git-lfs.github.com/spec/v1
2
- oid sha256:01179fcb3c0dfbf45cddd0c89cd40a867676668d10b5cdea29f6f2a765d2ec82
3
- size 67597
 
 
 
data/{fire2013/HindiEnglish_FIRE2013_AnnotatedDev.txt β†’ hinge/data.h5} RENAMED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:6251057ea0c2e2e1f2daa9d96a0726f4bb713762296b3eb96ed6dfbab0b5dba6
3
- size 54391
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:f4a0a15fa9c69d5a705707c73d1fea20abf9882fad18219579e4660a0fd213ea
3
+ size 5243006
data/hinge/eval_human.csv DELETED
@@ -1,3 +0,0 @@
1
- version https://git-lfs.github.com/spec/v1
2
- oid sha256:a48b0adeaa5a65f097ff644f695b20ef6227ef7ff46bd0c573b84186130ff49c
3
- size 139425
 
 
 
data/hinge/eval_human.pkl DELETED
@@ -1,3 +0,0 @@
1
- version https://git-lfs.github.com/spec/v1
2
- oid sha256:a48b0adeaa5a65f097ff644f695b20ef6227ef7ff46bd0c573b84186130ff49c
3
- size 139425
 
 
 
data/hinge/eval_synthetic.csv DELETED
@@ -1,3 +0,0 @@
1
- version https://git-lfs.github.com/spec/v1
2
- oid sha256:6d6c862f0d2654b08a20a60cd4d4d68b041da9c3c0029fd91e52e9fd3cabe240
3
- size 176109
 
 
 
data/hinge/process.ipynb ADDED
@@ -0,0 +1,169 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "cells": [
3
+ {
4
+ "cell_type": "code",
5
+ "execution_count": 3,
6
+ "id": "bed45d12-7681-4ba4-9c89-48a3515704e2",
7
+ "metadata": {},
8
+ "outputs": [
9
+ {
10
+ "name": "stdout",
11
+ "output_type": "stream",
12
+ "text": [
13
+ "Please request data from https://codalab.lisn.upsaclay.fr/competitions/1688\n",
14
+ "We only need eval_synthetic.csv and train_synthetic.csv\n",
15
+ "eval_synthetic.csv process.ipynb train_synthetic.csv\n"
16
+ ]
17
+ }
18
+ ],
19
+ "source": [
20
+ "print(\"Please request data from https://codalab.lisn.upsaclay.fr/competitions/1688\")\n",
21
+ "print(\"We only need eval_synthetic.csv and train_synthetic.csv\")\n",
22
+ "!ls ."
23
+ ]
24
+ },
25
+ {
26
+ "cell_type": "markdown",
27
+ "id": "b5c7c7c7-b9a6-4ea2-a5ef-edaf982ae0ad",
28
+ "metadata": {},
29
+ "source": [
30
+ "### Required columns\n",
31
+ "- target_hinglish\n",
32
+ "- source_hindi\n",
33
+ "- parallel_english\n",
34
+ "- annotations\n",
35
+ "- raw_input\n",
36
+ "- alternates\n",
37
+ "\n",
38
+ "> For **HingE**, only `target_hinglish`, `parallel_english` and `source_hindi` are valid"
39
+ ]
40
+ },
41
+ {
42
+ "cell_type": "code",
43
+ "execution_count": 14,
44
+ "id": "965589a9-c62e-4659-a6bc-6f0a2bad5d19",
45
+ "metadata": {},
46
+ "outputs": [],
47
+ "source": [
48
+ "import pandas as pd\n",
49
+ "\n",
50
+ "train_df = pd.read_csv(\"./train_synthetic.csv\", names=[\"parallel_english\", \"source_hindi\", \"target_hinglish\"], header=0, usecols=[0, 1, 2])\n",
51
+ "_test_eval_df = pd.read_csv(\"./train_synthetic.csv\", names=[\"parallel_english\", \"source_hindi\", \"target_hinglish\"], header=0, usecols=[0, 1, 2])\n",
52
+ "\n",
53
+ "# Add empty columns\n",
54
+ "train_df[\"raw_input\"] = \\\n",
55
+ " train_df[\"alternates\"] = \\\n",
56
+ " train_df[\"annotations\"] = None\n",
57
+ "\n",
58
+ "_test_eval_df[\"raw_input\"] = \\\n",
59
+ " _test_eval_df[\"alternates\"] = \\\n",
60
+ " _test_eval_df[\"annotations\"] = None\n",
61
+ "\n",
62
+ "# Split dataset\n",
63
+ "from sklearn.model_selection import train_test_split\n",
64
+ "eval_df, test_df = train_test_split(_test_eval_df, test_size=0.5)"
65
+ ]
66
+ },
67
+ {
68
+ "cell_type": "code",
69
+ "execution_count": 15,
70
+ "id": "6e804366-34cd-45c7-b3c6-46b7b8c1b420",
71
+ "metadata": {},
72
+ "outputs": [
73
+ {
74
+ "name": "stdout",
75
+ "output_type": "stream",
76
+ "text": [
77
+ "Collecting tables\n",
78
+ " Using cached tables-3.7.0-cp37-cp37m-manylinux_2_17_x86_64.manylinux2014_x86_64.whl (5.9 MB)\n",
79
+ "Collecting numexpr>=2.6.2\n",
80
+ " Using cached numexpr-2.8.1-cp37-cp37m-manylinux_2_17_x86_64.manylinux2014_x86_64.whl (379 kB)\n",
81
+ "Requirement already satisfied: packaging in /opt/conda/lib/python3.7/site-packages (from tables) (21.3)\n",
82
+ "Requirement already satisfied: numpy>=1.19.0 in /opt/conda/lib/python3.7/site-packages (from tables) (1.19.5)\n",
83
+ "Requirement already satisfied: pyparsing!=3.0.5,>=2.0.2 in /opt/conda/lib/python3.7/site-packages (from packaging->tables) (3.0.6)\n",
84
+ "Installing collected packages: numexpr, tables\n",
85
+ "Successfully installed numexpr-2.8.1 tables-3.7.0\n"
86
+ ]
87
+ },
88
+ {
89
+ "name": "stderr",
90
+ "output_type": "stream",
91
+ "text": [
92
+ "/opt/conda/lib/python3.7/site-packages/pandas/core/generic.py:2718: PerformanceWarning: \n",
93
+ "your performance may suffer as PyTables will pickle object types that it cannot\n",
94
+ "map directly to c-types [inferred_type->mixed,key->block0_values] [items->Index(['parallel_english', 'source_hindi', 'target_hinglish', 'raw_input',\n",
95
+ " 'alternates', 'annotations'],\n",
96
+ " dtype='object')]\n",
97
+ "\n",
98
+ " encoding=encoding,\n"
99
+ ]
100
+ }
101
+ ],
102
+ "source": [
103
+ "!pip install tables\n",
104
+ "\n",
105
+ "# Save to hdfs files\n",
106
+ "train_df.to_hdf(\"./data.h5\", \"train\", complevel=9)\n",
107
+ "test_df.to_hdf(\"./data.h5\", \"test\", complevel=9)\n",
108
+ "eval_df.to_hdf(\"./data.h5\", \"eval\", complevel=9)"
109
+ ]
110
+ },
111
+ {
112
+ "cell_type": "code",
113
+ "execution_count": 16,
114
+ "id": "3298f2f3-3e21-478e-b027-947c992f880d",
115
+ "metadata": {},
116
+ "outputs": [],
117
+ "source": [
118
+ "# Confirm that everything worked as expected\n",
119
+ "\n",
120
+ "# Load from hdfs files\n",
121
+ "_train_df = pd.read_hdf(\"./data.h5\", \"train\")\n",
122
+ "_test_df = pd.read_hdf(\"./data.h5\", \"test\")\n",
123
+ "_eval_df = pd.read_hdf(\"./data.h5\", \"eval\")\n",
124
+ "\n",
125
+ "assert (len(_train_df) == len(train_df)) == \\\n",
126
+ " (len(_eval_df) == len(eval_df)) == \\\n",
127
+ " (len(_test_df) == len(test_df))"
128
+ ]
129
+ },
130
+ {
131
+ "cell_type": "code",
132
+ "execution_count": 17,
133
+ "id": "60461121-bed5-4ba0-ba7d-dd46256c62e3",
134
+ "metadata": {},
135
+ "outputs": [],
136
+ "source": [
137
+ "!rm eval_synthetic.csv\n",
138
+ "!rm train_synthetic.csv"
139
+ ]
140
+ }
141
+ ],
142
+ "metadata": {
143
+ "environment": {
144
+ "kernel": "python3",
145
+ "name": "managed-notebooks.m87",
146
+ "type": "gcloud",
147
+ "uri": "gcr.io/deeplearning-platform-release/base-cu110:latest"
148
+ },
149
+ "kernelspec": {
150
+ "display_name": "Python (Local)",
151
+ "language": "python",
152
+ "name": "local-base"
153
+ },
154
+ "language_info": {
155
+ "codemirror_mode": {
156
+ "name": "ipython",
157
+ "version": 3
158
+ },
159
+ "file_extension": ".py",
160
+ "mimetype": "text/x-python",
161
+ "name": "python",
162
+ "nbconvert_exporter": "python",
163
+ "pygments_lexer": "ipython3",
164
+ "version": "3.7.12"
165
+ }
166
+ },
167
+ "nbformat": 4,
168
+ "nbformat_minor": 5
169
+ }
data/hinge/train_human.csv DELETED
@@ -1,3 +0,0 @@
1
- version https://git-lfs.github.com/spec/v1
2
- oid sha256:8fe6085bbcb39c014f79faaac540ce7fa03d054924a600f340247f9ebb0dcf21
3
- size 675953
 
 
 
data/hinge/train_human.pkl DELETED
@@ -1,3 +0,0 @@
1
- version https://git-lfs.github.com/spec/v1
2
- oid sha256:8fe6085bbcb39c014f79faaac540ce7fa03d054924a600f340247f9ebb0dcf21
3
- size 675953
 
 
 
data/hinge/train_synthetic.csv DELETED
@@ -1,3 +0,0 @@
1
- version https://git-lfs.github.com/spec/v1
2
- oid sha256:1e7b09167b56515a617a90f8b83c61711df9e2e3796557f7c54dc4f7e036a666
3
- size 1265593
 
 
 
data/{fire2013/HindiEnglish_FIRE2013_Test_GT.txt β†’ hinglish_norm/data.h5} RENAMED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:c31a05d3884a680f9507b7101d1c7d33fb62d870e568ce25cb5b763574c90aae
3
- size 29326
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:ebc7cf76907c37dc6f726b69a6c8532628680fcba5ae1cb13609707180beb3ec
3
+ size 4837335
data/hinglish_norm/hinglishNorm_trainSet.json DELETED
@@ -1,3 +0,0 @@
1
- version https://git-lfs.github.com/spec/v1
2
- oid sha256:cf622e18d943de8fc28a869b0a7d76076676b83238ebbd61f96850ef86c2f337
3
- size 2370544
 
 
 
data/hinglish_norm/process.ipynb ADDED
@@ -0,0 +1,201 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "cells": [
3
+ {
4
+ "cell_type": "code",
5
+ "execution_count": 1,
6
+ "id": "bed45d12-7681-4ba4-9c89-48a3515704e2",
7
+ "metadata": {},
8
+ "outputs": [
9
+ {
10
+ "name": "stdout",
11
+ "output_type": "stream",
12
+ "text": [
13
+ "File β€˜hinglishNorm.json’ already there; not retrieving.\n",
14
+ "\n"
15
+ ]
16
+ }
17
+ ],
18
+ "source": [
19
+ "!wget -nc https://raw.githubusercontent.com/piyushmakhija5/hinglishNorm/master/dataset/hinglishNorm.json"
20
+ ]
21
+ },
22
+ {
23
+ "cell_type": "code",
24
+ "execution_count": 2,
25
+ "id": "965589a9-c62e-4659-a6bc-6f0a2bad5d19",
26
+ "metadata": {},
27
+ "outputs": [
28
+ {
29
+ "data": {
30
+ "text/plain": [
31
+ "Index(['id', 'inputText', 'tags', 'normalizedText'], dtype='object')"
32
+ ]
33
+ },
34
+ "execution_count": 2,
35
+ "metadata": {},
36
+ "output_type": "execute_result"
37
+ }
38
+ ],
39
+ "source": [
40
+ "import pandas as pd\n",
41
+ "\n",
42
+ "df = pd.read_json(\"./hinglishNorm.json\")\n",
43
+ "df.columns"
44
+ ]
45
+ },
46
+ {
47
+ "cell_type": "markdown",
48
+ "id": "b5c7c7c7-b9a6-4ea2-a5ef-edaf982ae0ad",
49
+ "metadata": {},
50
+ "source": [
51
+ "### Required columns\n",
52
+ "- target_hinglish\n",
53
+ "- source_hindi\n",
54
+ "- parallel_english\n",
55
+ "- annotations\n",
56
+ "- raw_input\n",
57
+ "- alternates\n",
58
+ "\n",
59
+ "> For **hinglishNorm**, only `target_hinglish`, `raw_input` and `annotations` are valid\n",
60
+ "\n",
61
+ "### Mappings\n",
62
+ "- `normalizedText` _=>_ `target_hinglish`\n",
63
+ "- `inputText` _=>_ `raw_input`\n",
64
+ "- `tags` _=>_ `annotations` (after json.loads)"
65
+ ]
66
+ },
67
+ {
68
+ "cell_type": "code",
69
+ "execution_count": 3,
70
+ "id": "b54fdd52-1ab0-4c84-89e5-0bcb8fcbfbeb",
71
+ "metadata": {},
72
+ "outputs": [],
73
+ "source": [
74
+ "# Add empty columns\n",
75
+ "df[\"source_hindi\"] = \\\n",
76
+ " df[\"parallel_english\"] = \\\n",
77
+ " df[\"alternates\"] = None\n",
78
+ "\n",
79
+ "# Remove unnecessary columns\n",
80
+ "df = df.drop(\"id\", axis=1)\n",
81
+ "\n",
82
+ "# Rename columns\n",
83
+ "df = df.rename(columns={\n",
84
+ " \"normalizedText\": \"target_hinglish\", \n",
85
+ " \"inputText\": \"raw_input\", \n",
86
+ " \"tags\": \"annotations\", })\n",
87
+ "\n",
88
+ "# Parse annotations json\n",
89
+ "import json\n",
90
+ "df[\"annotations\"] = df[\"annotations\"].map(lambda x: json.loads(x.replace(\"'\", '\"')))\n",
91
+ "\n",
92
+ "# Split dataset\n",
93
+ "from sklearn.model_selection import train_test_split\n",
94
+ "_train_eval_df, test_df = train_test_split(df, test_size=0.1)\n",
95
+ "train_df, eval_df = train_test_split(_train_eval_df, test_size=0.1)"
96
+ ]
97
+ },
98
+ {
99
+ "cell_type": "code",
100
+ "execution_count": 5,
101
+ "id": "6e804366-34cd-45c7-b3c6-46b7b8c1b420",
102
+ "metadata": {},
103
+ "outputs": [
104
+ {
105
+ "name": "stdout",
106
+ "output_type": "stream",
107
+ "text": [
108
+ "Collecting tables\n",
109
+ " Downloading tables-3.7.0-cp37-cp37m-manylinux_2_17_x86_64.manylinux2014_x86_64.whl (5.9 MB)\n",
110
+ " |β–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆ| 5.9 MB 4.9 MB/s \n",
111
+ "\u001b[?25hRequirement already satisfied: numpy>=1.19.0 in /opt/conda/lib/python3.7/site-packages (from tables) (1.19.5)\n",
112
+ "Requirement already satisfied: packaging in /opt/conda/lib/python3.7/site-packages (from tables) (21.3)\n",
113
+ "Collecting numexpr>=2.6.2\n",
114
+ " Downloading numexpr-2.8.1-cp37-cp37m-manylinux_2_17_x86_64.manylinux2014_x86_64.whl (379 kB)\n",
115
+ " |β–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆ| 379 kB 73.4 MB/s \n",
116
+ "\u001b[?25hRequirement already satisfied: pyparsing!=3.0.5,>=2.0.2 in /opt/conda/lib/python3.7/site-packages (from packaging->tables) (3.0.6)\n",
117
+ "Installing collected packages: numexpr, tables\n",
118
+ "Successfully installed numexpr-2.8.1 tables-3.7.0\n"
119
+ ]
120
+ },
121
+ {
122
+ "name": "stderr",
123
+ "output_type": "stream",
124
+ "text": [
125
+ "/opt/conda/lib/python3.7/site-packages/pandas/core/generic.py:2718: PerformanceWarning: \n",
126
+ "your performance may suffer as PyTables will pickle object types that it cannot\n",
127
+ "map directly to c-types [inferred_type->mixed,key->block0_values] [items->Index(['raw_input', 'annotations', 'target_hinglish', 'source_hindi',\n",
128
+ " 'parallel_english', 'alternates'],\n",
129
+ " dtype='object')]\n",
130
+ "\n",
131
+ " encoding=encoding,\n"
132
+ ]
133
+ }
134
+ ],
135
+ "source": [
136
+ "!pip install tables\n",
137
+ "\n",
138
+ "# Save to hdfs files\n",
139
+ "train_df.to_hdf(\"./data.h5\", \"train\", complevel=9)\n",
140
+ "test_df.to_hdf(\"./data.h5\", \"test\", complevel=9)\n",
141
+ "eval_df.to_hdf(\"./data.h5\", \"eval\", complevel=9)"
142
+ ]
143
+ },
144
+ {
145
+ "cell_type": "code",
146
+ "execution_count": 6,
147
+ "id": "c8908455-c76f-4ee2-9608-b215f6fafa7c",
148
+ "metadata": {},
149
+ "outputs": [],
150
+ "source": [
151
+ "# Confirm that everything worked as expected\n",
152
+ "\n",
153
+ "# Load from hdfs files\n",
154
+ "_train_df = pd.read_hdf(\"./data.h5\", \"train\")\n",
155
+ "_test_df = pd.read_hdf(\"./data.h5\", \"test\")\n",
156
+ "_eval_df = pd.read_hdf(\"./data.h5\", \"eval\")\n",
157
+ "\n",
158
+ "assert (len(_train_df) == len(train_df)) == \\\n",
159
+ " (len(_eval_df) == len(eval_df)) == \\\n",
160
+ " (len(_test_df) == len(test_df))"
161
+ ]
162
+ },
163
+ {
164
+ "cell_type": "code",
165
+ "execution_count": 7,
166
+ "id": "60461121-bed5-4ba0-ba7d-dd46256c62e3",
167
+ "metadata": {},
168
+ "outputs": [],
169
+ "source": [
170
+ "!rm hinglishNorm.json"
171
+ ]
172
+ }
173
+ ],
174
+ "metadata": {
175
+ "environment": {
176
+ "kernel": "python3",
177
+ "name": "managed-notebooks.m87",
178
+ "type": "gcloud",
179
+ "uri": "gcr.io/deeplearning-platform-release/base-cu110:latest"
180
+ },
181
+ "kernelspec": {
182
+ "display_name": "Python (Local)",
183
+ "language": "python",
184
+ "name": "local-base"
185
+ },
186
+ "language_info": {
187
+ "codemirror_mode": {
188
+ "name": "ipython",
189
+ "version": 3
190
+ },
191
+ "file_extension": ".py",
192
+ "mimetype": "text/x-python",
193
+ "name": "python",
194
+ "nbconvert_exporter": "python",
195
+ "pygments_lexer": "ipython3",
196
+ "version": "3.7.12"
197
+ }
198
+ },
199
+ "nbformat": 4,
200
+ "nbformat_minor": 5
201
+ }
data/news2018/NEWS2018_M-EnHi_dev.xml DELETED
@@ -1,3 +0,0 @@
1
- version https://git-lfs.github.com/spec/v1
2
- oid sha256:754d4174f291a0f6209f6ddccca4a7af04700f6bfd4d9b83ffb24c0711d4aaa3
3
- size 127866
 
 
 
data/news2018/NEWS2018_M-EnHi_trn.xml DELETED
@@ -1,3 +0,0 @@
1
- version https://git-lfs.github.com/spec/v1
2
- oid sha256:32a0f41ebe7baa635d0fc2ada5feb1ad93e18a03a22dea8f7bc015cab5adc0b6
3
- size 1681958
 
 
 
data/news2018/NEWS2018_M-EnHi_tst.xml DELETED
@@ -1,3 +0,0 @@
1
- version https://git-lfs.github.com/spec/v1
2
- oid sha256:57db5526c30ec34cd707ed365558231283788b34be4a77ae2b807f2867e6be30
3
- size 60532
 
 
 
data/{hindi_xlit/HiEn_ann1_test.json β†’ news2018/data.h5} RENAMED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:8e8f8b749849ed6c69220b46da33a27f7a2f7e97e053935b834c5e2ae68600e7
3
- size 338964
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:692631290d13d664e4ea55f1fbfb6d6138dd7836c08dea658fabf36ed70beda1
3
+ size 3820798
data/news2018/process.ipynb ADDED
@@ -0,0 +1,188 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "cells": [
3
+ {
4
+ "cell_type": "code",
5
+ "execution_count": 1,
6
+ "id": "bed45d12-7681-4ba4-9c89-48a3515704e2",
7
+ "metadata": {},
8
+ "outputs": [
9
+ {
10
+ "name": "stdout",
11
+ "output_type": "stream",
12
+ "text": [
13
+ "Please request data from http://workshop.colips.org/news2018/terms&conditions_msri.html\n"
14
+ ]
15
+ }
16
+ ],
17
+ "source": [
18
+ "print(\"Please request data from http://workshop.colips.org/news2018/terms&conditions_msri.html\")"
19
+ ]
20
+ },
21
+ {
22
+ "cell_type": "markdown",
23
+ "id": "b5c7c7c7-b9a6-4ea2-a5ef-edaf982ae0ad",
24
+ "metadata": {},
25
+ "source": [
26
+ "### Required columns\n",
27
+ "- target_hinglish\n",
28
+ "- source_hindi\n",
29
+ "- parallel_english\n",
30
+ "- annotations\n",
31
+ "- raw_input\n",
32
+ "- alternates\n",
33
+ "\n",
34
+ "> For **NEWS2018**, only `target_hinglish` and `source_hindi` are valid"
35
+ ]
36
+ },
37
+ {
38
+ "cell_type": "code",
39
+ "execution_count": 7,
40
+ "id": "965589a9-c62e-4659-a6bc-6f0a2bad5d19",
41
+ "metadata": {},
42
+ "outputs": [
43
+ {
44
+ "name": "stdout",
45
+ "output_type": "stream",
46
+ "text": [
47
+ "Requirement already satisfied: lxml in /opt/conda/lib/python3.7/site-packages (4.8.0)\n"
48
+ ]
49
+ }
50
+ ],
51
+ "source": [
52
+ "!pip install lxml\n",
53
+ "import pandas as pd\n",
54
+ "\n",
55
+ "train_df = pd.read_xml(\"./NEWS2018_M-EnHi_trn.xml\", names=[\"target_hinglish\", \"source_hindi\"], elems_only=True)\n",
56
+ "test_df = pd.read_xml(\"./NEWS2018_M-EnHi_tst.xml\", names=[\"target_hinglish\", \"source_hindi\"], elems_only=True)\n",
57
+ "eval_df = pd.read_xml(\"./NEWS2018_M-EnHi_dev.xml\", names=[\"target_hinglish\", \"source_hindi\"], elems_only=True)\n",
58
+ "\n",
59
+ "# Add empty columns\n",
60
+ "train_df[\"raw_input\"] = \\\n",
61
+ " train_df[\"parallel_english\"] = \\\n",
62
+ " train_df[\"alternates\"] = \\\n",
63
+ " train_df[\"annotations\"] = None\n",
64
+ "\n",
65
+ "# Add empty columns\n",
66
+ "test_df[\"raw_input\"] = \\\n",
67
+ " test_df[\"parallel_english\"] = \\\n",
68
+ " test_df[\"alternates\"] = \\\n",
69
+ " test_df[\"annotations\"] = None\n",
70
+ "\n",
71
+ "# Add empty columns\n",
72
+ "eval_df[\"raw_input\"] = \\\n",
73
+ " eval_df[\"parallel_english\"] = \\\n",
74
+ " eval_df[\"alternates\"] = \\\n",
75
+ " eval_df[\"annotations\"] = None\n"
76
+ ]
77
+ },
78
+ {
79
+ "cell_type": "code",
80
+ "execution_count": 8,
81
+ "id": "6e804366-34cd-45c7-b3c6-46b7b8c1b420",
82
+ "metadata": {},
83
+ "outputs": [
84
+ {
85
+ "name": "stdout",
86
+ "output_type": "stream",
87
+ "text": [
88
+ "Collecting tables\n",
89
+ " Using cached tables-3.7.0-cp37-cp37m-manylinux_2_17_x86_64.manylinux2014_x86_64.whl (5.9 MB)\n",
90
+ "Requirement already satisfied: numpy>=1.19.0 in /opt/conda/lib/python3.7/site-packages (from tables) (1.19.5)\n",
91
+ "Collecting numexpr>=2.6.2\n",
92
+ " Using cached numexpr-2.8.1-cp37-cp37m-manylinux_2_17_x86_64.manylinux2014_x86_64.whl (379 kB)\n",
93
+ "Requirement already satisfied: packaging in /opt/conda/lib/python3.7/site-packages (from tables) (21.3)\n",
94
+ "Requirement already satisfied: pyparsing!=3.0.5,>=2.0.2 in /opt/conda/lib/python3.7/site-packages (from packaging->tables) (3.0.6)\n",
95
+ "Installing collected packages: numexpr, tables\n",
96
+ "Successfully installed numexpr-2.8.1 tables-3.7.0\n"
97
+ ]
98
+ },
99
+ {
100
+ "name": "stderr",
101
+ "output_type": "stream",
102
+ "text": [
103
+ "/opt/conda/lib/python3.7/site-packages/pandas/core/generic.py:2718: PerformanceWarning: \n",
104
+ "your performance may suffer as PyTables will pickle object types that it cannot\n",
105
+ "map directly to c-types [inferred_type->mixed,key->block0_values] [items->Index(['target_hinglish', 'source_hindi', 'raw_input', 'parallel_english',\n",
106
+ " 'alternates', 'annotations'],\n",
107
+ " dtype='object')]\n",
108
+ "\n",
109
+ " encoding=encoding,\n",
110
+ "/opt/conda/lib/python3.7/site-packages/pandas/core/generic.py:2718: PerformanceWarning: \n",
111
+ "your performance may suffer as PyTables will pickle object types that it cannot\n",
112
+ "map directly to c-types [inferred_type->mixed,key->block0_values] [items->Index(['target_hinglish', 'raw_input', 'parallel_english', 'alternates',\n",
113
+ " 'annotations'],\n",
114
+ " dtype='object')]\n",
115
+ "\n",
116
+ " encoding=encoding,\n"
117
+ ]
118
+ }
119
+ ],
120
+ "source": [
121
+ "!pip install tables\n",
122
+ "\n",
123
+ "# Save to hdfs files\n",
124
+ "train_df.to_hdf(\"./data.h5\", \"train\", complevel=9)\n",
125
+ "test_df.to_hdf(\"./data.h5\", \"test\", complevel=9)\n",
126
+ "eval_df.to_hdf(\"./data.h5\", \"eval\", complevel=9)"
127
+ ]
128
+ },
129
+ {
130
+ "cell_type": "code",
131
+ "execution_count": 11,
132
+ "id": "3298f2f3-3e21-478e-b027-947c992f880d",
133
+ "metadata": {},
134
+ "outputs": [],
135
+ "source": [
136
+ "# Confirm that everything worked as expected\n",
137
+ "\n",
138
+ "# Load from hdfs files\n",
139
+ "_train_df = pd.read_hdf(\"./data.h5\", \"train\")\n",
140
+ "_test_df = pd.read_hdf(\"./data.h5\", \"test\")\n",
141
+ "_eval_df = pd.read_hdf(\"./data.h5\", \"eval\")\n",
142
+ "\n",
143
+ "assert (len(_train_df) == len(train_df)) == \\\n",
144
+ " (len(_eval_df) == len(eval_df)) == \\\n",
145
+ " (len(_test_df) == len(test_df))"
146
+ ]
147
+ },
148
+ {
149
+ "cell_type": "code",
150
+ "execution_count": 12,
151
+ "id": "60461121-bed5-4ba0-ba7d-dd46256c62e3",
152
+ "metadata": {},
153
+ "outputs": [],
154
+ "source": [
155
+ "!rm NEWS2018_M-EnHi_trn.xml\n",
156
+ "!rm NEWS2018_M-EnHi_tst.xml\n",
157
+ "!rm NEWS2018_M-EnHi_dev.xml"
158
+ ]
159
+ }
160
+ ],
161
+ "metadata": {
162
+ "environment": {
163
+ "kernel": "python3",
164
+ "name": "managed-notebooks.m87",
165
+ "type": "gcloud",
166
+ "uri": "gcr.io/deeplearning-platform-release/base-cu110:latest"
167
+ },
168
+ "kernelspec": {
169
+ "display_name": "Python (Local)",
170
+ "language": "python",
171
+ "name": "local-base"
172
+ },
173
+ "language_info": {
174
+ "codemirror_mode": {
175
+ "name": "ipython",
176
+ "version": 3
177
+ },
178
+ "file_extension": ".py",
179
+ "mimetype": "text/x-python",
180
+ "name": "python",
181
+ "nbconvert_exporter": "python",
182
+ "pygments_lexer": "ipython3",
183
+ "version": "3.7.12"
184
+ }
185
+ },
186
+ "nbformat": 4,
187
+ "nbformat_minor": 5
188
+ }
hinglish-dump.py CHANGED
@@ -37,9 +37,7 @@ _URLS = {
37
  ])),
38
  "hinge": list(map(lambda x: f"{_HOMEPAGE}/resolve/main/data/{x}" , [
39
  "hinge/eval_human.csv",
40
- "hinge/eval_human.pkl",
41
  "hinge/train_human.csv",
42
- "hinge/train_human.pkl",
43
  "hinge/train_synthetic.csv",
44
  "hinge/eval_synthetic.csv",
45
  ])),
@@ -56,7 +54,7 @@ _URLS = {
56
  config_names = _URLS.keys()
57
  version = datasets.Version("1.0.0")
58
 
59
- class HinglishDumpDataset(datasets.GeneratorBasedBuilder):
60
  """Raw merged dump of Hinglish (hi-EN) datasets."""
61
 
62
  VERSION = version
@@ -66,6 +64,8 @@ class HinglishDumpDataset(datasets.GeneratorBasedBuilder):
66
  datasets.BuilderConfig(name=subset, version=version, description=f"Config for {subset}")
67
  for subset in config_names
68
  ]
 
 
69
 
70
  def _info(self):
71
 
@@ -81,7 +81,8 @@ class HinglishDumpDataset(datasets.GeneratorBasedBuilder):
81
  # It can accept any type or nested list/dict and will give back the same structure with the url replaced with path to local files.
82
  # By default the archives will be extracted and a path to a cached folder where they are extracted is returned instead of the archive
83
 
84
- data_dir = dl_manager.download_and_extract(_URLS)
 
85
  return [
86
  datasets.SplitGenerator(
87
  name=datasets.Split.TRAIN,
37
  ])),
38
  "hinge": list(map(lambda x: f"{_HOMEPAGE}/resolve/main/data/{x}" , [
39
  "hinge/eval_human.csv",
 
40
  "hinge/train_human.csv",
 
41
  "hinge/train_synthetic.csv",
42
  "hinge/eval_synthetic.csv",
43
  ])),
54
  config_names = _URLS.keys()
55
  version = datasets.Version("1.0.0")
56
 
57
+ class HinglishDumpDataset(datasets.DatasetBuilder):
58
  """Raw merged dump of Hinglish (hi-EN) datasets."""
59
 
60
  VERSION = version
64
  datasets.BuilderConfig(name=subset, version=version, description=f"Config for {subset}")
65
  for subset in config_names
66
  ]
67
+
68
+ DEFAULT_CONFIG_NAME = None
69
 
70
  def _info(self):
71
 
81
  # It can accept any type or nested list/dict and will give back the same structure with the url replaced with path to local files.
82
  # By default the archives will be extracted and a path to a cached folder where they are extracted is returned instead of the archive
83
 
84
+ data_dir = self.data_dir = dl_manager.download_and_extract(_URLS)
85
+
86
  return [
87
  datasets.SplitGenerator(
88
  name=datasets.Split.TRAIN,