{ "cells": [ { "cell_type": "code", "execution_count": 3, "id": "bed45d12-7681-4ba4-9c89-48a3515704e2", "metadata": {}, "outputs": [ { "name": "stdout", "output_type": "stream", "text": [ "--2022-03-05 12:25:22-- https://github.com/AI4Bharat/IndianNLP-Transliteration/releases/download/DATA/Hindi_Xlit_dataset.zip\n", "Resolving github.com (github.com)... 140.82.114.3\n", "Connecting to github.com (github.com)|140.82.114.3|:443... connected.\n", "HTTP request sent, awaiting response... 302 Found\n", "Location: https://objects.githubusercontent.com/github-production-release-asset-2e65be/231321785/14c95280-01a2-11eb-921f-4221081fa4b2?X-Amz-Algorithm=AWS4-HMAC-SHA256&X-Amz-Credential=AKIAIWNJYAX4CSVEH53A%2F20220305%2Fus-east-1%2Fs3%2Faws4_request&X-Amz-Date=20220305T122522Z&X-Amz-Expires=300&X-Amz-Signature=ef0c94bb0f3602f5edbca49df20bb64a477fe34bfec15b5ee78b43f9be4da4e6&X-Amz-SignedHeaders=host&actor_id=0&key_id=0&repo_id=231321785&response-content-disposition=attachment%3B%20filename%3DHindi_Xlit_dataset.zip&response-content-type=application%2Foctet-stream [following]\n", "--2022-03-05 12:25:22-- https://objects.githubusercontent.com/github-production-release-asset-2e65be/231321785/14c95280-01a2-11eb-921f-4221081fa4b2?X-Amz-Algorithm=AWS4-HMAC-SHA256&X-Amz-Credential=AKIAIWNJYAX4CSVEH53A%2F20220305%2Fus-east-1%2Fs3%2Faws4_request&X-Amz-Date=20220305T122522Z&X-Amz-Expires=300&X-Amz-Signature=ef0c94bb0f3602f5edbca49df20bb64a477fe34bfec15b5ee78b43f9be4da4e6&X-Amz-SignedHeaders=host&actor_id=0&key_id=0&repo_id=231321785&response-content-disposition=attachment%3B%20filename%3DHindi_Xlit_dataset.zip&response-content-type=application%2Foctet-stream\n", "Resolving objects.githubusercontent.com (objects.githubusercontent.com)... 185.199.108.133, 185.199.110.133, 185.199.111.133, ...\n", "Connecting to objects.githubusercontent.com (objects.githubusercontent.com)|185.199.108.133|:443... connected.\n", "HTTP request sent, awaiting response... 200 OK\n", "Length: 609266 (595K) [application/octet-stream]\n", "Saving to: ‘Hindi_Xlit_dataset.zip’\n", "\n", "Hindi_Xlit_dataset. 100%[===================>] 594.99K --.-KB/s in 0.04s \n", "\n", "2022-03-05 12:25:23 (13.8 MB/s) - ‘Hindi_Xlit_dataset.zip’ saved [609266/609266]\n", "\n", "Archive: ./Hindi_Xlit_dataset.zip\n", " inflating: HiEn_ann1_test.json \n", " inflating: HiEn_ann1_train.json \n", " inflating: HiEn_ann1_valid.json \n", " inflating: legalcode.txt \n" ] } ], "source": [ "!wget -nc https://github.com/AI4Bharat/IndianNLP-Transliteration/releases/download/DATA/Hindi_Xlit_dataset.zip\n", "!unzip -n ./Hindi_Xlit_dataset.zip\n", "!rm ./legalcode.txt ./Hindi_Xlit_dataset.zip" ] }, { "cell_type": "markdown", "id": "b5c7c7c7-b9a6-4ea2-a5ef-edaf982ae0ad", "metadata": {}, "source": [ "### Required columns\n", "- target_hinglish\n", "- source_hindi\n", "- parallel_english\n", "- annotations\n", "- raw_input\n", "- alternates\n", "\n", "> For **HiEn_ann1**, only `target_hinglish` and `source_hindi` are valid" ] }, { "cell_type": "code", "execution_count": 4, "id": "965589a9-c62e-4659-a6bc-6f0a2bad5d19", "metadata": {}, "outputs": [], "source": [ "import pandas as pd\n", "import json\n", "\n", "with open(\"./HiEn_ann1_train.json\", 'r') as f:\n", " train_data = json.load(f)\n", "\n", "with open(\"./HiEn_ann1_valid.json\", 'r') as f:\n", " eval_data = json.load(f)\n", "\n", "with open(\"./HiEn_ann1_test.json\", 'r') as f:\n", " test_data = json.load(f)\n", "\n", "train_df = pd.DataFrame(\n", " [(source_hindi, target_hinglish, None, None, None, None) \n", " for source_hindi, values in train_data.items() \n", " for target_hinglish in values ], \n", " columns=[\"source_hindi\", \"target_hinglish\", \"parallel_english\", \"annotations\", \"raw_input\", \"alternates\"] )\n", "\n", "eval_df = pd.DataFrame(\n", " [(source_hindi, target_hinglish, None, None, None, None) \n", " for source_hindi, values in eval_data.items() \n", " for target_hinglish in values ], \n", " columns=[\"source_hindi\", \"target_hinglish\", \"parallel_english\", \"annotations\", \"raw_input\", \"alternates\"] )\n", "\n", "test_df = pd.DataFrame(\n", " [(source_hindi, target_hinglish, None, None, None, None) \n", " for source_hindi, values in test_data.items() \n", " for target_hinglish in values ], \n", " columns=[\"source_hindi\", \"target_hinglish\", \"parallel_english\", \"annotations\", \"raw_input\", \"alternates\"] )\n" ] }, { "cell_type": "code", "execution_count": 5, "id": "6e804366-34cd-45c7-b3c6-46b7b8c1b420", "metadata": {}, "outputs": [ { "name": "stdout", "output_type": "stream", "text": [ "Collecting tables\n", " Using cached tables-3.7.0-cp37-cp37m-manylinux_2_17_x86_64.manylinux2014_x86_64.whl (5.9 MB)\n", "Requirement already satisfied: numpy>=1.19.0 in /opt/conda/lib/python3.7/site-packages (from tables) (1.19.5)\n", "Requirement already satisfied: packaging in /opt/conda/lib/python3.7/site-packages (from tables) (21.3)\n", "Collecting numexpr>=2.6.2\n", " Using cached numexpr-2.8.1-cp37-cp37m-manylinux_2_17_x86_64.manylinux2014_x86_64.whl (379 kB)\n", "Requirement already satisfied: pyparsing!=3.0.5,>=2.0.2 in /opt/conda/lib/python3.7/site-packages (from packaging->tables) (3.0.6)\n", "Installing collected packages: numexpr, tables\n", "Successfully installed numexpr-2.8.1 tables-3.7.0\n" ] }, { "name": "stderr", "output_type": "stream", "text": [ "/opt/conda/lib/python3.7/site-packages/pandas/core/generic.py:2718: PerformanceWarning: \n", "your performance may suffer as PyTables will pickle object types that it cannot\n", "map directly to c-types [inferred_type->mixed,key->block0_values] [items->Index(['source_hindi', 'target_hinglish', 'parallel_english', 'annotations',\n", " 'raw_input', 'alternates'],\n", " dtype='object')]\n", "\n", " encoding=encoding,\n" ] } ], "source": [ "!pip install tables\n", "\n", "# Save to hdfs files\n", "train_df.to_hdf(\"./data.h5\", \"train\", complevel=9)\n", "test_df.to_hdf(\"./data.h5\", \"test\", complevel=9)\n", "eval_df.to_hdf(\"./data.h5\", \"eval\", complevel=9)" ] }, { "cell_type": "code", "execution_count": 6, "id": "3298f2f3-3e21-478e-b027-947c992f880d", "metadata": {}, "outputs": [], "source": [ "# Confirm that everything worked as expected\n", "\n", "# Load from hdfs files\n", "_train_df = pd.read_hdf(\"./data.h5\", \"train\")\n", "_test_df = pd.read_hdf(\"./data.h5\", \"test\")\n", "_eval_df = pd.read_hdf(\"./data.h5\", \"eval\")\n", "\n", "assert (len(_train_df) == len(train_df)) == \\\n", " (len(_eval_df) == len(eval_df)) == \\\n", " (len(_test_df) == len(test_df))" ] }, { "cell_type": "code", "execution_count": 7, "id": "60461121-bed5-4ba0-ba7d-dd46256c62e3", "metadata": {}, "outputs": [], "source": [ "!rm HiEn_ann1_test.json\n", "!rm HiEn_ann1_train.json\n", "!rm HiEn_ann1_valid.json" ] } ], "metadata": { "environment": { "kernel": "python3", "name": "managed-notebooks.m87", "type": "gcloud", "uri": "gcr.io/deeplearning-platform-release/base-cu110:latest" }, "kernelspec": { "display_name": "Python (Local)", "language": "python", "name": "local-base" }, "language_info": { "codemirror_mode": { "name": "ipython", "version": 3 }, "file_extension": ".py", "mimetype": "text/x-python", "name": "python", "nbconvert_exporter": "python", "pygments_lexer": "ipython3", "version": "3.7.12" } }, "nbformat": 4, "nbformat_minor": 5 }