{
 "cells": [
  {
   "cell_type": "code",
   "execution_count": 1,
   "metadata": {},
   "outputs": [],
   "source": [
    "import os\n",
    "\n",
    "# for i in range(3):\n",
    "#     url = f'https://f000.backblazeb2.com/file/malay-dataset/summary/results-semi-{i}.json'\n",
    "#     os.system(f'wget {url}')"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 2,
   "metadata": {},
   "outputs": [],
   "source": [
    "# from glob import glob\n",
    "\n",
    "# files = glob('results-semi-*.json')\n",
    "# files"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 3,
   "metadata": {},
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "https://malaya-dataset.s3-ap-southeast-1.amazonaws.com/summary/cnn/translated-0-5000.json\n",
      "https://malaya-dataset.s3-ap-southeast-1.amazonaws.com/summary/cnn/cnn-news-translated-5000-10000.json\n",
      "https://malaya-dataset.s3-ap-southeast-1.amazonaws.com/summary/cnn/translated-10000-20000.json\n",
      "https://malaya-dataset.s3-ap-southeast-1.amazonaws.com/summary/cnn/translated-20000-30000.json\n",
      "https://malaya-dataset.s3-ap-southeast-1.amazonaws.com/summary/cnn/cnn-news-translated-30000-40000.json\n",
      "https://malaya-dataset.s3-ap-southeast-1.amazonaws.com/summary/cnn/cnn-news-translated-50000-60000.json\n",
      "https://malaya-dataset.s3-ap-southeast-1.amazonaws.com/summary/cnn/cnn-news-translated-60000-70000.json\n",
      "https://malaya-dataset.s3-ap-southeast-1.amazonaws.com/summary/cnn/cnn-news-translated-70000-80000.json\n",
      "https://malaya-dataset.s3-ap-southeast-1.amazonaws.com/summary/cnn/cnn-news-translated-80000-90000.json\n",
      "https://malaya-dataset.s3-ap-southeast-1.amazonaws.com/summary/cnn/cnn-news-translated-90000-100000.json\n"
     ]
    }
   ],
   "source": [
    "from tqdm import tqdm\n",
    "\n",
    "links = \"\"\"\n",
    "1. part1, https://malaya-dataset.s3-ap-southeast-1.amazonaws.com/summary/cnn/translated-0-5000.json\n",
    "2. part2, https://malaya-dataset.s3-ap-southeast-1.amazonaws.com/summary/cnn/cnn-news-translated-5000-10000.json\n",
    "3. part3, https://malaya-dataset.s3-ap-southeast-1.amazonaws.com/summary/cnn/translated-10000-20000.json\n",
    "4. part4, https://malaya-dataset.s3-ap-southeast-1.amazonaws.com/summary/cnn/translated-20000-30000.json\n",
    "5. part5, https://malaya-dataset.s3-ap-southeast-1.amazonaws.com/summary/cnn/cnn-news-translated-30000-40000.json\n",
    "6. part6, https://malaya-dataset.s3-ap-southeast-1.amazonaws.com/summary/cnn/cnn-news-translated-50000-60000.json\n",
    "7. part7, https://malaya-dataset.s3-ap-southeast-1.amazonaws.com/summary/cnn/cnn-news-translated-60000-70000.json\n",
    "8. part8, https://malaya-dataset.s3-ap-southeast-1.amazonaws.com/summary/cnn/cnn-news-translated-70000-80000.json\n",
    "9. part9, https://malaya-dataset.s3-ap-southeast-1.amazonaws.com/summary/cnn/cnn-news-translated-80000-90000.json\n",
    "10. part10, https://malaya-dataset.s3-ap-southeast-1.amazonaws.com/summary/cnn/cnn-news-translated-90000-100000.json\n",
    "\"\"\"\n",
    "\n",
    "cnn = []\n",
    "urls, cnn = [], []\n",
    "for i in links.split('\\n'):\n",
    "    if len(i):\n",
    "        urls.append(i.split(', ')[1])\n",
    "        cnn.append(i.split('/')[-1])\n",
    "\n",
    "# uncomment to download\n",
    "for url in urls:\n",
    "    print(url)\n",
    "    os.system(f'wget {url}')"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 5,
   "metadata": {},
   "outputs": [
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      "100%|██████████| 81/81 [00:38<00:00,  2.13it/s]\n"
     ]
    }
   ],
   "source": [
    "links = \"\"\"\n",
    "1. summary/multinews/translated-multinews-0.json\n",
    "2. summary/multinews/translated-multinews-10500.json\n",
    "3. summary/multinews/translated-multinews-11200.json\n",
    "4. summary/multinews/translated-multinews-11900.json\n",
    "5. summary/multinews/translated-multinews-12600.json\n",
    "6. summary/multinews/translated-multinews-13300.json\n",
    "7. summary/multinews/translated-multinews-1400.json\n",
    "8. summary/multinews/translated-multinews-14000.json\n",
    "9. summary/multinews/translated-multinews-14700.json\n",
    "10. summary/multinews/translated-multinews-15400.json\n",
    "11. summary/multinews/translated-multinews-16100.json\n",
    "12. summary/multinews/translated-multinews-16800.json\n",
    "13. summary/multinews/translated-multinews-17500.json\n",
    "14. summary/multinews/translated-multinews-18200.json\n",
    "15. summary/multinews/translated-multinews-18900.json\n",
    "16. summary/multinews/translated-multinews-19600.json\n",
    "17. summary/multinews/translated-multinews-20300.json\n",
    "18. summary/multinews/translated-multinews-2100.json\n",
    "19. summary/multinews/translated-multinews-21000.json\n",
    "20. summary/multinews/translated-multinews-21700.json\n",
    "21. summary/multinews/translated-multinews-22400.json\n",
    "22. summary/multinews/translated-multinews-23100.json\n",
    "23. summary/multinews/translated-multinews-23800.json\n",
    "24. summary/multinews/translated-multinews-24500.json\n",
    "25. summary/multinews/translated-multinews-25200.json\n",
    "26. summary/multinews/translated-multinews-25900.json\n",
    "27. summary/multinews/translated-multinews-26600.json\n",
    "28. summary/multinews/translated-multinews-27300.json\n",
    "29. summary/multinews/translated-multinews-2800.json\n",
    "30. summary/multinews/translated-multinews-28000.json\n",
    "31. summary/multinews/translated-multinews-28700.json\n",
    "32. summary/multinews/translated-multinews-29400.json\n",
    "33. summary/multinews/translated-multinews-30100.json\n",
    "34. summary/multinews/translated-multinews-30800.json\n",
    "35. summary/multinews/translated-multinews-31500.json\n",
    "36. summary/multinews/translated-multinews-32200.json\n",
    "37. summary/multinews/translated-multinews-32900.json\n",
    "38. summary/multinews/translated-multinews-33600.json\n",
    "39. summary/multinews/translated-multinews-34300.json\n",
    "40. summary/multinews/translated-multinews-3500.json\n",
    "41. summary/multinews/translated-multinews-35000.json\n",
    "42. summary/multinews/translated-multinews-35700.json\n",
    "43. summary/multinews/translated-multinews-36400.json\n",
    "44. summary/multinews/translated-multinews-37100.json\n",
    "45. summary/multinews/translated-multinews-37800.json\n",
    "46. summary/multinews/translated-multinews-38500.json\n",
    "47. summary/multinews/translated-multinews-39200.json\n",
    "48. summary/multinews/translated-multinews-39900.json\n",
    "49. summary/multinews/translated-multinews-40600.json\n",
    "50. summary/multinews/translated-multinews-41300.json\n",
    "51. summary/multinews/translated-multinews-4200.json\n",
    "52. summary/multinews/translated-multinews-42000.json\n",
    "53. summary/multinews/translated-multinews-42700.json\n",
    "54. summary/multinews/translated-multinews-43400.json\n",
    "55. summary/multinews/translated-multinews-44100.json\n",
    "56. summary/multinews/translated-multinews-44800.json\n",
    "57. summary/multinews/translated-multinews-45500.json\n",
    "58. summary/multinews/translated-multinews-46200.json\n",
    "59. summary/multinews/translated-multinews-46900.json\n",
    "60. summary/multinews/translated-multinews-47600.json\n",
    "61. summary/multinews/translated-multinews-48300.json\n",
    "62. summary/multinews/translated-multinews-4900.json\n",
    "63. summary/multinews/translated-multinews-49000.json\n",
    "64. summary/multinews/translated-multinews-49700.json\n",
    "65. summary/multinews/translated-multinews-50400.json\n",
    "66. summary/multinews/translated-multinews-51100.json\n",
    "67. summary/multinews/translated-multinews-51800.json\n",
    "68. summary/multinews/translated-multinews-52500.json\n",
    "69. summary/multinews/translated-multinews-53200.json\n",
    "70. summary/multinews/translated-multinews-53900.json\n",
    "71. summary/multinews/translated-multinews-54600.json\n",
    "72. summary/multinews/translated-multinews-55300.json\n",
    "73. summary/multinews/translated-multinews-5600.json\n",
    "74. summary/multinews/translated-multinews-56000.json\n",
    "75. summary/multinews/translated-multinews-6300.json\n",
    "76. summary/multinews/translated-multinews-700.json\n",
    "77. summary/multinews/translated-multinews-7000.json\n",
    "78. summary/multinews/translated-multinews-7700.json\n",
    "79. summary/multinews/translated-multinews-8400.json\n",
    "80. summary/multinews/translated-multinews-9100.json\n",
    "81. summary/multinews/translated-multinews-9800.json\n",
    "\"\"\"\n",
    "\n",
    "urls, multinews = [], []\n",
    "for i in links.split('\\n'):\n",
    "    if len(i):\n",
    "        urls.append('https://malaya-dataset.s3-ap-southeast-1.amazonaws.com/' + i.split('. ')[1])\n",
    "        multinews.append(i.split('/')[-1])\n",
    "        \n",
    "for url in tqdm(urls):\n",
    "    os.system(f'wget {url}')"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 6,
   "metadata": {},
   "outputs": [],
   "source": [
    "# links = \"\"\"\n",
    "# 1. part1, https://malaya-dataset.s3-ap-southeast-1.amazonaws.com/summary/gigawords/translated-0.json\n",
    "# 2. part2, https://malaya-dataset.s3-ap-southeast-1.amazonaws.com/summary/gigawords/translated-100000.json\n",
    "# 3. part3, https://malaya-dataset.s3-ap-southeast-1.amazonaws.com/summary/gigawords/translated-200000.json\n",
    "# 4. part4, https://malaya-dataset.s3-ap-southeast-1.amazonaws.com/summary/gigawords/translated-300000.json\n",
    "# 5. part5, https://malaya-dataset.s3-ap-southeast-1.amazonaws.com/summary/gigawords/translated-400000.json\n",
    "# 6. part6, https://malaya-dataset.s3-ap-southeast-1.amazonaws.com/summary/gigawords/translated-500000.json\n",
    "# 7. part7, https://malaya-dataset.s3-ap-southeast-1.amazonaws.com/summary/gigawords/translated-600000.json\n",
    "# 8. part8, https://malaya-dataset.s3-ap-southeast-1.amazonaws.com/summary/gigawords/translated-700000.json\n",
    "# 9. part9, https://malaya-dataset.s3-ap-southeast-1.amazonaws.com/summary/gigawords/translated-800000.json\n",
    "# 10. part10, https://malaya-dataset.s3-ap-southeast-1.amazonaws.com/summary/gigawords/translated-900000.json\n",
    "# 11. part11, https://malaya-dataset.s3-ap-southeast-1.amazonaws.com/summary/gigawords/translated-1000000.json\n",
    "# \"\"\"\n",
    "\n",
    "# urls, gigawords = [], []\n",
    "# for i in links.split('\\n'):\n",
    "#     if len(i):\n",
    "#         urls.append(i.split('. ')[1])\n",
    "#         gigawords.append(i.split('/')[-1])\n",
    "\n",
    "# # for url in tqdm(urls):\n",
    "# #     os.system(f'wget {url}')"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 7,
   "metadata": {},
   "outputs": [],
   "source": [
    "import re\n",
    "from unidecode import unidecode\n",
    "from malaya.text.rules import normalized_chars\n",
    "\n",
    "def filter_news(string):\n",
    "    string = string.lower()\n",
    "    return 'javascript is disabled' in string or 'requires javascript' in string or 'javascript' in string \\\n",
    "    or 'président' in string\n",
    "\n",
    "def make_cleaning(s, c_dict):\n",
    "    s = s.translate(c_dict)\n",
    "    return s\n",
    "\n",
    "def transformer_textcleaning(string):\n",
    "    \"\"\"\n",
    "    use by any transformer model before tokenization\n",
    "    \"\"\"\n",
    "    string = unidecode(string)\n",
    "    string = ' '.join(\n",
    "        [make_cleaning(w, normalized_chars) for w in string.split()]\n",
    "    )\n",
    "    string = re.sub('\\(dot\\)', '.', string)\n",
    "    string = (\n",
    "        re.sub(re.findall(r'\\<a(.*?)\\>', string)[0], '', string)\n",
    "        if (len(re.findall(r'\\<a (.*?)\\>', string)) > 0)\n",
    "        and ('href' in re.findall(r'\\<a (.*?)\\>', string)[0])\n",
    "        else string\n",
    "    )\n",
    "    string = re.sub(\n",
    "        r'\\w+:\\/{2}[\\d\\w-]+(\\.[\\d\\w-]+)*(?:(?:\\/[^\\s/]*))*', ' ', string\n",
    "    )\n",
    "    string = string.replace('\\n', ' ').replace('\\t', ' ')\n",
    "    string = re.sub(r'[ ]+', ' ', string).strip().split()\n",
    "    string = [w for w in string if w[0] != '@']\n",
    "    return ' '.join(string)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 8,
   "metadata": {},
   "outputs": [
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      "100%|██████████| 10/10 [01:29<00:00,  8.99s/it]\n"
     ]
    },
    {
     "data": {
      "text/plain": [
       "(80196, 80196)"
      ]
     },
     "execution_count": 8,
     "metadata": {},
     "output_type": "execute_result"
    }
   ],
   "source": [
    "import json\n",
    "from glob import glob\n",
    "from tqdm import tqdm\n",
    "\n",
    "before, after = [], []\n",
    "total = 0\n",
    "\n",
    "for news in tqdm(cnn):\n",
    "    with open(news) as fopen:\n",
    "        data = json.load(fopen)\n",
    "    total += len(data)\n",
    "    for i in range(len(data)):\n",
    "        l = data[i][:-1]\n",
    "        r = data[i][-1]\n",
    "        l = ' '.join(l)\n",
    "        r = r.replace('<SPPPPLIIIT>', '.')\n",
    "        r = '. '.join([k.strip() for k in r.split('.') if len(k.strip())])\n",
    "        r = transformer_textcleaning(r)\n",
    "        l = transformer_textcleaning(l)\n",
    "        if len(r) > 10 and len(l) > 10:\n",
    "            before.append(l)\n",
    "            after.append(r)        \n",
    "\n",
    "len(before), len(after)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 9,
   "metadata": {},
   "outputs": [],
   "source": [
    "test = 10000\n",
    "test_before = before[-test:]\n",
    "before = before[:-test]\n",
    "test_after = after[-test:]\n",
    "after = after[:-test]"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 10,
   "metadata": {},
   "outputs": [],
   "source": [
    "with open('test-set-cnn.json', 'w') as fopen:\n",
    "    json.dump({'X': test_before, 'Y': test_after}, fopen)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 11,
   "metadata": {},
   "outputs": [],
   "source": [
    "batches = []\n",
    "batch = 50000\n",
    "for i in range(0, len(before), batch):\n",
    "    index = min(i + batch, len(before))\n",
    "    x = before[i: index]\n",
    "    y = after[i: index]\n",
    "    batches.append((x, y))"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 12,
   "metadata": {},
   "outputs": [],
   "source": [
    "import tensorflow as tf\n",
    "\n",
    "for i in range(len(batches)):\n",
    "    before = batches[i][0]\n",
    "    after = batches[i][1]\n",
    "    filename = f't5-data/cnn-summarization-{i}.tsv'\n",
    "    with tf.io.gfile.GFile(filename, 'w') as outfile:\n",
    "        for i in range(len(before)):\n",
    "            outfile.write('%s\\t%s\\n' % (before[i], after[i]))"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 13,
   "metadata": {},
   "outputs": [
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      "100%|██████████| 81/81 [03:58<00:00,  2.94s/it]\n"
     ]
    },
    {
     "data": {
      "text/plain": [
       "(55787, 55787)"
      ]
     },
     "execution_count": 13,
     "metadata": {},
     "output_type": "execute_result"
    }
   ],
   "source": [
    "def remove_incomplete(string):\n",
    "    string = string.split('.')\n",
    "    if len(string[-1]) < 10:\n",
    "        string = string[:-1]\n",
    "    return '.'.join(string) + '.'\n",
    "\n",
    "before, after = [], []\n",
    "total = 0\n",
    "\n",
    "for news in tqdm(multinews):\n",
    "    with open(news) as fopen:\n",
    "        data = json.load(fopen)\n",
    "    total += len(data)\n",
    "    for i in range(len(data)):\n",
    "        l = data[i][:-1]\n",
    "        r = data[i][-1]\n",
    "        l = ' '.join(l)\n",
    "        r = r.replace('<SPPPPLIIIT>', '.')\n",
    "        r = '. '.join([k.strip() for k in r.split('.') if len(k.strip())])\n",
    "        r = remove_incomplete(transformer_textcleaning(r))\n",
    "        l = transformer_textcleaning(l)\n",
    "        if len(r) > 10 and len(l) > 10:\n",
    "            before.append(l)\n",
    "            after.append(r)\n",
    "        \n",
    "        \n",
    "len(before), len(after)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 14,
   "metadata": {},
   "outputs": [],
   "source": [
    "test = 5000\n",
    "test_before = before[-test:]\n",
    "before = before[:-test]\n",
    "test_after = after[-test:]\n",
    "after = after[:-test]"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 15,
   "metadata": {},
   "outputs": [],
   "source": [
    "batches = []\n",
    "batch = 40000\n",
    "for i in range(0, len(before), batch):\n",
    "    index = min(i + batch, len(before))\n",
    "    x = before[i: index]\n",
    "    y = after[i: index]\n",
    "    batches.append((x, y))"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 16,
   "metadata": {},
   "outputs": [],
   "source": [
    "for i in range(len(batches)):\n",
    "    before = batches[i][0]\n",
    "    after = batches[i][1]\n",
    "    filename = f't5-data/multinews-summarization-{i}.tsv'\n",
    "    with tf.io.gfile.GFile(filename, 'w') as outfile:\n",
    "        for i in range(len(before)):\n",
    "            outfile.write('%s\\t%s\\n' % (before[i], after[i]))"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 17,
   "metadata": {},
   "outputs": [],
   "source": [
    "with open('test-set-multinews.json', 'w') as fopen:\n",
    "    json.dump({'X': test_before, 'Y': test_after}, fopen)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 17,
   "metadata": {},
   "outputs": [
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      "100%|██████████| 11/11 [01:14<00:00,  6.75s/it]\n"
     ]
    },
    {
     "data": {
      "text/plain": [
       "(1090815, 1090815)"
      ]
     },
     "execution_count": 17,
     "metadata": {},
     "output_type": "execute_result"
    }
   ],
   "source": [
    "# before, after = [], []\n",
    "# total = 0\n",
    "\n",
    "# for news in tqdm(gigawords):\n",
    "#     with open(news) as fopen:\n",
    "#         data = json.load(fopen)\n",
    "#     for i in range(len(data)):\n",
    "#         splitted = data[i].split('<>')\n",
    "#         if len(splitted) != 2:\n",
    "#             continue\n",
    "#         l, r = splitted\n",
    "#         l = l.strip()\n",
    "#         r = r.strip()\n",
    "#         if len(l) > 10 and len(r) > 5:\n",
    "#             before.append(transformer_textcleaning(l))\n",
    "#             after.append(transformer_textcleaning(r))\n",
    "        \n",
    "# len(before), len(after)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 19,
   "metadata": {},
   "outputs": [],
   "source": [
    "# test = 100000\n",
    "# test_before = before[-test:]\n",
    "# before = before[:-test]\n",
    "# test_after = after[-test:]\n",
    "# after = after[:-test]"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 20,
   "metadata": {},
   "outputs": [],
   "source": [
    "# batches = []\n",
    "# batch = 400000\n",
    "# for i in range(0, len(before), batch):\n",
    "#     index = min(i + batch, len(before))\n",
    "#     x = before[i: index]\n",
    "#     y = after[i: index]\n",
    "#     batches.append((x, y))"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 21,
   "metadata": {},
   "outputs": [],
   "source": [
    "# for i in range(len(batches)):\n",
    "#     before = batches[i][0]\n",
    "#     after = batches[i][1]\n",
    "#     filename = f't5-data/gigawords-summarization-{i}.tsv'\n",
    "#     with tf.io.gfile.GFile(filename, 'w') as outfile:\n",
    "#         for i in range(len(before)):\n",
    "#             outfile.write('%s\\t%s\\n' % (before[i], after[i]))"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 22,
   "metadata": {},
   "outputs": [],
   "source": [
    "# with open('test-set-gigawords.json', 'w') as fopen:\n",
    "#     json.dump({'X': test_before, 'Y': test_after}, fopen)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 24,
   "metadata": {
    "scrolled": true
   },
   "outputs": [],
   "source": [
    "# import json\n",
    "# import tensorflow as tf\n",
    "# import itertools\n",
    "\n",
    "# filename = 'summary.tsv'\n",
    "# with tf.io.gfile.GFile(filename, 'w') as outfile:\n",
    "#     for file in files:\n",
    "#         with open(file) as fopen:\n",
    "#             data = json.load(fopen)\n",
    "        \n",
    "#         merged = list(itertools.chain(*data))\n",
    "        \n",
    "#         print(file, len(merged))\n",
    "        \n",
    "#         for i in range(len(merged)):\n",
    "#             l, r = transformer_textcleaning(merged[i][0]), transformer_textcleaning(merged[i][1])\n",
    "#             outfile.write('%s\\t%s\\n' % (l, r))"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 25,
   "metadata": {},
   "outputs": [
    {
     "data": {
      "text/plain": [
       "dict_keys(['text', 'title'])"
      ]
     },
     "execution_count": 25,
     "metadata": {},
     "output_type": "execute_result"
    }
   ],
   "source": [
    "with open('../news/populate-news.json') as fopen:\n",
    "    data = json.load(fopen)\n",
    "data.keys()"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 26,
   "metadata": {},
   "outputs": [],
   "source": [
    "before = data['text']\n",
    "after = data['title']\n",
    "\n",
    "test = 10000\n",
    "test_before = before[-test:]\n",
    "before = before[:-test]\n",
    "test_after = after[-test:]\n",
    "after = after[:-test]"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 27,
   "metadata": {},
   "outputs": [],
   "source": [
    "batches = []\n",
    "batch = 40000\n",
    "for i in range(0, len(before), batch):\n",
    "    index = min(i + batch, len(before))\n",
    "    x = before[i: index]\n",
    "    y = after[i: index]\n",
    "    batches.append((x, y))"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 28,
   "metadata": {},
   "outputs": [],
   "source": [
    "for i in range(len(batches)):\n",
    "    before = batches[i][0]\n",
    "    after = batches[i][1]\n",
    "    filename = f't5-data/news-title-{i}.tsv'\n",
    "    with tf.io.gfile.GFile(filename, 'w') as outfile:\n",
    "        for i in range(len(before)):\n",
    "            outfile.write('%s\\t%s\\n' % (before[i], after[i]))"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 29,
   "metadata": {},
   "outputs": [],
   "source": [
    "with open('test-set-news.json', 'w') as fopen:\n",
    "    json.dump({'X': test_before, 'Y': test_after}, fopen)"
   ]
  }
 ],
 "metadata": {
  "kernelspec": {
   "display_name": "Python 3",
   "language": "python",
   "name": "python3"
  },
  "language_info": {
   "codemirror_mode": {
    "name": "ipython",
    "version": 3
   },
   "file_extension": ".py",
   "mimetype": "text/x-python",
   "name": "python",
   "nbconvert_exporter": "python",
   "pygments_lexer": "ipython3",
   "version": "3.6.8"
  }
 },
 "nbformat": 4,
 "nbformat_minor": 2
}
