{
 "cells": [
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": [
    "# !pip install datasets\n",
    "# !pip install huggingface_hub"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": [
    "from huggingface_hub import notebook_login\n",
    "notebook_login()"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": [
    "import os\n",
    "import json\n",
    "\n",
    "def process_json_files(directory):\n",
    "    for root, _, files in os.walk(directory):\n",
    "        for file in files:\n",
    "            if file.endswith(\".json\"):\n",
    "                file_path = os.path.join(root, file)\n",
    "                with open(file_path, 'r', encoding='utf-8') as f:\n",
    "                    data = json.load(f)\n",
    "                \n",
    "                if \"examples\" in data:\n",
    "                    examples = data[\"examples\"]\n",
    "                    new_data = examples\n",
    "\n",
    "                    with open(file_path, 'w', encoding='utf-8') as f:\n",
    "                        json.dump(new_data, f, indent=4)\n",
    "                else:\n",
    "                    print(f\"No 'examples' found in {file_path}\")\n",
    "\n",
    "# Replace 'your_directory' with the path to the directory containing your JSON files\n",
    "process_json_files('./xquad_in/')"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": [
    "from datasets import load_dataset\n",
    "\n",
    "splits = [\"test\" , \"dev\"]\n",
    "languages = [\"hi\",\"kn\",\"ta\",\"te\",\"mr\",\"ml\",\"gu\"]\n",
    "# languages = [\"hi\"]\n",
    "\n",
    "# for language in languages:\n",
    "#     for split in splits:\n",
    "#         file_name = f\"./flores_in/flores_en_{language}_{split}.json\"\n",
    "#         dataset = load_dataset(\"json\" ,data_files=file_name)\n",
    "        \n",
    "#         dataset.push_to_hub(\"Cognitive-Lab/GoogleIndicGenBench_flores_in\" ,f\"{language}\")\n",
    "#     print(f\"completed {language}\")\n",
    "for language in languages:\n",
    "    file_name = f\"./flores_in/flores_{language}_en_test.json\"\n",
    "    test_dataset = load_dataset(\"json\" ,data_files=file_name)\n",
    "\n",
    "    file_name_1 = f\"./flores_in/flores_{language}_en_dev.json\"\n",
    "    dev_dataset = load_dataset(\"json\" ,data_files=file_name)\n",
    "\n",
    "    from datasets import DatasetDict\n",
    "\n",
    "    final_dataset = DatasetDict({\n",
    "        \"test\" : test_dataset[\"train\"],\n",
    "        \"dev\" : dev_dataset[\"train\"]\n",
    "    })\n",
    "    final_dataset.push_to_hub(\"Cognitive-Lab/GoogleIndicGenBench_flores_xxen_in\" , f\"{language}\")\n",
    "    print(f\"completed {language}\")\n",
    "    "
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "hi\n",
    "kn\n",
    "ta\n",
    "te\n",
    "mr\n",
    "ml\n",
    "gu"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": [
    "from datasets import load_dataset\n",
    "\n",
    "languages = [\"hi\",\"kn\",\"ta\",\"te\",\"mr\",\"ml\",\"gu\"]\n",
    "# languages = [\"hi\"]\n",
    "\n",
    "# for language in languages:\n",
    "#     for split in splits:\n",
    "#         file_name = f\"./flores_in/flores_en_{language}_{split}.json\"\n",
    "#         dataset = load_dataset(\"json\" ,data_files=file_name)\n",
    "        \n",
    "#         dataset.push_to_hub(\"Cognitive-Lab/GoogleIndicGenBench_flores_in\" ,f\"{language}\")\n",
    "#     print(f\"completed {language}\")\n",
    "for language in languages:\n",
    "    train_file_name = f\"./crosssum_in/crosssum_english-{language}_train.json\"\n",
    "    train_dataset = load_dataset(\"json\" ,data_files=train_file_name)\n",
    "    \n",
    "    test_file_name = f\"./crosssum_in/crosssum_english-{language}_test.json\"\n",
    "    test_dataset = load_dataset(\"json\" ,data_files=test_file_name)\n",
    "\n",
    "    dev_file_name = f\"./crosssum_in/crosssum_english-{language}_dev.json\"\n",
    "    dev_dataset = load_dataset(\"json\" ,data_files=dev_file_name)\n",
    "\n",
    "    from datasets import DatasetDict\n",
    "\n",
    "    final_dataset = DatasetDict({\n",
    "        \"train\" : train_dataset[\"train\"],\n",
    "        \"test\" : test_dataset[\"train\"],\n",
    "        \"dev\" : dev_dataset[\"train\"]\n",
    "    })\n",
    "    final_dataset.push_to_hub(\"Cognitive-Lab/GoogleIndicGenBench_crosssum_in\" , f\"{language}\")\n",
    "    print(f\"completed {language}\")\n",
    "    "
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": [
    "from datasets import load_dataset\n",
    "\n",
    "languages = [\"hi\",\"kn\",\"ta\",\"te\",\"mr\",\"ml\",\"gu\"]\n",
    "# languages = [\"hi\"]\n",
    "\n",
    "# for language in languages:\n",
    "#     for split in splits:\n",
    "#         file_name = f\"./flores_in/flores_en_{language}_{split}.json\"\n",
    "#         dataset = load_dataset(\"json\" ,data_files=file_name)\n",
    "        \n",
    "#         dataset.push_to_hub(\"Cognitive-Lab/GoogleIndicGenBench_flores_in\" ,f\"{language}\")\n",
    "#     print(f\"completed {language}\")\n",
    "for language in languages:\n",
    "    train_file_name = f\"./xorqa_in/xorqa_{language}_train.json\"\n",
    "    train_dataset = load_dataset(\"json\" ,data_files=train_file_name)\n",
    "    \n",
    "    test_file_name = f\"./xorqa_in/xorqa_{language}_test.json\"\n",
    "    test_dataset = load_dataset(\"json\" ,data_files=test_file_name)\n",
    "\n",
    "    dev_file_name = f\"./xorqa_in/xorqa_{language}_dev.json\"\n",
    "    dev_dataset = load_dataset(\"json\" ,data_files=dev_file_name)\n",
    "\n",
    "    from datasets import DatasetDict\n",
    "\n",
    "    final_dataset = DatasetDict({\n",
    "        \"train\" : train_dataset[\"train\"],\n",
    "        \"test\" : test_dataset[\"train\"],\n",
    "        \"dev\" : dev_dataset[\"train\"]\n",
    "    })\n",
    "    final_dataset.push_to_hub(\"Cognitive-Lab/GoogleIndicGenBench_xorqa_in\" , f\"{language}\")\n",
    "    print(f\"completed {language}\")\n",
    "    "
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": [
    "from datasets import load_dataset\n",
    "\n",
    "languages = [\"hi\",\"kn\",\"ta\",\"te\",\"mr\",\"ml\",\"gu\"]\n",
    "# languages = [\"hi\"]\n",
    "\n",
    "# for language in languages:\n",
    "#     for split in splits:\n",
    "#         file_name = f\"./flores_in/flores_en_{language}_{split}.json\"\n",
    "#         dataset = load_dataset(\"json\" ,data_files=file_name)\n",
    "        \n",
    "#         dataset.push_to_hub(\"Cognitive-Lab/GoogleIndicGenBench_flores_in\" ,f\"{language}\")\n",
    "#     print(f\"completed {language}\")\n",
    "for language in languages:\n",
    "    train_file_name = f\"./xquad_in/xquad_{language}_train.json\"\n",
    "    train_dataset = load_dataset(\"json\" ,data_files=train_file_name)\n",
    "    \n",
    "    test_file_name = f\"./xquad_in/xquad_{language}_test.json\"\n",
    "    test_dataset = load_dataset(\"json\" ,data_files=test_file_name)\n",
    "\n",
    "    dev_file_name = f\"./xquad_in/xquad_{language}_dev.json\"\n",
    "    dev_dataset = load_dataset(\"json\" ,data_files=dev_file_name)\n",
    "\n",
    "    from datasets import DatasetDict\n",
    "\n",
    "    final_dataset = DatasetDict({\n",
    "        \"train\" : train_dataset[\"train\"],\n",
    "        \"test\" : test_dataset[\"train\"],\n",
    "        \"dev\" : dev_dataset[\"train\"]\n",
    "    })\n",
    "    final_dataset.push_to_hub(\"Cognitive-Lab/GoogleIndicGenBench_xquad_in\" , f\"{language}\")\n",
    "    print(f\"completed {language}\")\n",
    "    "
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": [
    "def get_indic_language(language_code: str):\n",
    "        \"takes in a indic language code and returns the language \"\n",
    "        \n",
    "        language_code_mapping = {\"hi\" :\"hindi\", \n",
    "                                 \"kn\" : \"kannada\",\n",
    "                                 \"ta\" : \"tamil\",\n",
    "                                 \"te\" : \"telgu\",\n",
    "                                 \"mr\" : \"marathi\",\n",
    "                                 \"ml\" : \"malayalam\",\n",
    "                                 \"gu\" : \"gujarati\"}\n",
    "        return str(language_code_mapping[language_code])"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": [
    "get_indic_language(\"hi\")"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 51,
   "metadata": {},
   "outputs": [],
   "source": [
    "train_file_name = f\"./xquad_in/xquad_hi_train.json\"\n",
    "train_dataset = load_dataset(\"json\" ,data_files=train_file_name)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 58,
   "metadata": {},
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "मे और टेलर\n",
      "ऐब्सलूट ग्रेटेस्ट\n",
      "विजेता क्रिस ऐलेन और रनर-अप ऐडम लैंबर्ट\n",
      "16 नवंबर\n",
      "दीवान शायरी\n",
      "फ़ारसी शायरी\n",
      "छंद संरचना\n",
      "मसनवी\n",
      "ग़ज़लें या कसीदे. ग़ज़लें इस पारंपरिक कला का सबसे बड़ा ख़ज़ाना हैं.\n",
      "क्वीन + पॉल रॉजर्स\n",
      "2008\n",
      "द कॉज़मोस रॉक्स\n",
      "3,50,000\n",
      "HIV/एड्स\n",
      "बेबी\n",
      "मैनचेस्टर स्मॉल-स्केल एक्सपेरिमेंटल मशीन\n",
      "विक्टोरिया यूनिवर्सिटी ऑफ़ मैनचेस्टर\n",
      "फ़्रेडरिक सी॰ विलियम्स, टॉम किलबर्न, और जेफ़ टूटिल\n",
      "21 जून, 1948\n",
      "फ़्रेमवर्क कन्वेंशन फ़ॉर द प्रोटेक्शन ऑफ़ नैशनल माइनॉरिटीज़\n",
      "15,000\n",
      "10 लाख\n",
      "30%\n",
      "2%\n",
      "शहीद हुए सैनिकों के अवशेषों को स्वेदश लौटाना\n",
      "4,167\n",
      "नैशनल मेमोरियल सेमेटेरी ऑफ़ द पैसिफ़िक\n",
      "220\n",
      "डिजिटल मॉन्सटर्स\n",
      "वर्चुअल पेट वाले खिलौने, ऐनमे, मांगा, वीडियो गेम, फ़िल्में, और एक ट्रेडिंग कार्ड गेम\n",
      "यह एक दूसरी दुनिया है, जो पृथ्वी के अलग-अलग कम्यूनिकेशन के नेटवर्क से बनती है\n",
      "\"डिजिडेस्टिंड\" या \"टेमर्स\"\n",
      "डिजिटल दुनिया के ताने-बाने को बर्बाद करना\n",
      "1989 से 1992 तक\n",
      "ऐडलेड\n",
      "मेलबर्न\n",
      "केनेट\n",
      "पलाज़िक\n",
      "80 लाख किलोमीटर\n",
      "मेक्स शीयरवॉटर\n",
      "50 साल\n",
      "14,000 कि॰मी॰\n",
      "इराक की सेना ने\n",
      "साल 1982 के मध्य में\n",
      "संयुक्त राष्ट्र\n",
      "1988\n",
      "11,000–16,000\n",
      "कैरेज हाउस\n",
      "सपनों की जगह\n",
      "मोहॉक\n",
      "ऑनेररी चीफ़\n",
      "ओनेंनडागा\n",
      "एचएमएस कंबरलैंड\n",
      "तीन महीने\n",
      "पहले विश्व युद्ध में\n",
      "साथी अधिकारियों ने\n",
      "ग्रहणी में अल्सर की वजह से\n",
      "वेनेटाई\n",
      "विस्चुला नदी के पूर्व में\n",
      "वेनेटाई\n",
      "रोमन काल में\n",
      "Photo Gallery\n",
      "2.60\n",
      "Facebook\n",
      "अलग से इंस्टॉल करना पड़ता है\n",
      "चेहरे के हाव-भाव\n",
      "कोइने ग्रीक\n",
      "उत्तर-पूर्व अफ़्रीका\n",
      "दक्षिण एशिया\n",
      "दक्षिण एशिया\n",
      "दक्षिण-पश्चिम एशिया\n",
      "उत्तर-पूर्व अफ़्रीका\n",
      "फ़ारसी साम्राज्य\n",
      "एशिया और अफ़्रीका\n",
      "कोइने ग्रीक\n",
      "प्रीमियर वन जाबाओ\n",
      "जियो मैकेनिक्स\n",
      "बचाव से जुड़े काम का\n",
      "50,000\n",
      "90 मिनट के बाद\n",
      "10\n",
      "50,000\n",
      "भूकंप के केंद्र के पास होने की वजह से\n",
      "विएना\n",
      "1968\n",
      "विएना कन्वेंशन के रोड साइन ऐंड सिग्नल 1968 के तहत\n",
      "खतरे और चेतावनी\n",
      "दिन के समय\n",
      "हॉल बी॰ वॉलिस\n",
      "मैक्सवेल एंडरसन\n",
      "1971\n",
      "रिचर्ड बर्टन\n",
      "रूस्टर कॉगबर्न\n",
      "एफ़बीआई की जांच करने वाला सिटिज़न कमीशन\n",
      "कई फ़ाइलें निकाली गई थीं\n",
      "कोइनटेलप्रो प्रोग्राम\n",
      "आम नागरिकों की ज़िंदगियों की जांच से जुड़ी जानकारी\n",
      "पूरा देश \"हिल\" गया था\n",
      "इटली\n",
      "वेनिस\n",
      "नए फ़्रांस\n",
      "भद्दे पहनावे\n",
      "पोप ग्रेगरी XVI\n"
     ]
    }
   ],
   "source": [
    "for i in train_dataset[\"train\"]:\n",
    "    # print(i[\"answers\"][0][\"text\"])\n",
    "    print(i[\"answers\"][0][\"text\"])"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": []
  }
 ],
 "metadata": {
  "kernelspec": {
   "display_name": "llm-venv",
   "language": "python",
   "name": "python3"
  },
  "language_info": {
   "codemirror_mode": {
    "name": "ipython",
    "version": 3
   },
   "file_extension": ".py",
   "mimetype": "text/x-python",
   "name": "python",
   "nbconvert_exporter": "python",
   "pygments_lexer": "ipython3",
   "version": "3.11.0"
  }
 },
 "nbformat": 4,
 "nbformat_minor": 2
}
