{
 "cells": [
  {
   "cell_type": "code",
   "execution_count": 1,
   "metadata": {},
   "outputs": [],
   "source": [
    "import os\n",
    "os.environ['CUDA_VISIBLE_DEVICES'] = ''"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 2,
   "metadata": {},
   "outputs": [],
   "source": [
    "# !wget https://huggingface.co/datasets/mesolitica/malaysian-news/resolve/main/news-2022-11-18.json"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 3,
   "metadata": {},
   "outputs": [],
   "source": [
    "import json\n",
    "import malaya\n",
    "from tqdm import tqdm\n",
    "from unidecode import unidecode"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 4,
   "metadata": {},
   "outputs": [],
   "source": [
    "import re\n",
    "\n",
    "def cleaning(string):\n",
    "    splitted = malaya.text.function.split_into_sentences(string)\n",
    "    if not len(splitted):\n",
    "        splitted = '. '.join([k.strip() for k in string.split('.') if len(k.strip())])\n",
    "    if splitted[0][0] == '-':\n",
    "        splitted[0] = splitted[0].replace('- ','')\n",
    "    points = [f'{no + 1}. {s}' for no, s in enumerate(splitted)]\n",
    "    points = ' '.join(points)\n",
    "    return points\n",
    "\n",
    "def simple_cleaning(string):\n",
    "    return re.sub(r'[ ]+', ' ', unidecode(string).replace('\\n', ' ').replace('--', ' ').replace('/', ' ')).strip()"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 5,
   "metadata": {},
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "{\"title\": \"Pelancong ke M'sia nak tengok Mat, bukan Doraemon\", \"url\": \"https://www.malaysiakini.com/letters/473507\", \"authors\": [], \"top-image\": \"https://i.malaysiakini.com/777/13e94b92eae8996a850667bb8c2c06a2.jpg\", \"text\": \"Saya pertama kali menonton watak Doraemon di televisyen sekitar tahun 1993, ketika sedang dalam latihan amali di sebuah kilang di Sungai Way.\\n\\nSaya dalam satu rombongan ke Cameron Highlands bersama-sama pekerja kilang dan kami menginap di sebuah banglo di sana. Sebaik sampai, kami berehat dan menonton televisyen beramai-ramai di ruang tamu dan itulah pertama kali menyaksikan watak kartun itu. Sebelumnya, saya hanya mendengar dari cakap-cakap orang sahaja.\\n\\nMaknanya, Doraemon ini sudah masuk ke negara kita begitu lama. Sebelum tahun 1993, ianya telah bertapak sebagai satu watak kegemaran kanak-kanak (dan sebilangan orang dewasa juga).\\n\\nIa seperti juga watak Ultraman, telah menjadi topik perbualan sejak dari waktu itu lagi. Menariknya, Ultraman telah bertukar ganti, - dari Ultraman Ace ke Ultraman Tiga dan entah apa lagi wataknya. Namun Doraemon tetap sama, wataknya dan juga jalan ceritanya. Sudah sebati dengan sebahagian rakyat Malaysia yang membesar dengan cerita tersebut.\\n\\nDoraemon tentunya jauh lebih awal dari Upin dan Ipin, malahan juga lebih awal dari Kluangman serta rasanya kalau tak silap juga lebih awal dari animasi Kampung Boy yang sebelum itu hanya wujud dalam bentuk lukisan. Di zaman kita belum mampu mempunyai animasi sendiri, Doraemon telah mengisi kekosongan tersebut sebagai penghibur khayalan kita.\\n\\nTetapi adakah kerana watak itu telah lama bertapak di sini, kita boleh mengakuinya sebagai satu warisan negara lebih daripada watak tempatan yang wujud lebih kemudian darinya? Serta bolehkah kita perlihatkan kepada orang luar sebagai satu ikon tempatan yang mampu menarik orang luar?\\n\\nSudah tentu tidak. Berpuluh tahun dia di sini, dia tetap asing. Dia tetap produk orang lain yang bukan kita. Itu perlu kita akui.\\n\\nApabila karakter Doraemon diangkat sebagai satu daripada agenda dalam mempromosikan Tahun Melawat Malaysia 2020, banyak pihak yang tidak bersetuju dan ia berasas. Kenapa kita perlu ambil satu karakter asing di saat kita punya karakter sendiri yang mampu dipasarkan ke luar? Tidak dapat dipastikan bagaimanakah cara mereka berfikir.\\n\\nBarangkali ada segolongan tertentu cuba buat duit mudah melalui keadaan ini. Rasanya bukan hanya seorang dua yang berfikiran begini, malah tentunya ramai lagi. Tidak logik untuk kita mengetengahkan satu watak asing untuk menarik orang luar ke negara kita. Tak logik.\\n\\nProgram Tahun Melawat Malaysia 2020 sebenarnya telah banyak terpalit dengan kontroversi, dengan logo yang katanya bertaraf sampah dan promosi yang entah apa-apa. Kini ditambah lagi dengan isu watak Doraemon dan kegilaan menteri untuk bawa kumpulan K-Pop untuk mengadakan konsert di sini.\\n\\nAdakah kita masih perlu bergantung kepada kreativiti luar setelah lebih 60 tahun merdeka? Adakah kita tak punya watak-watak kita sendiri, sama ada dari sudut animasi atau penghibur?\\n\\nTak mungkin pihak berkenaan tak nampak tentang isu ini. Tetapi mungkin sebab terlalu banyak berpolitik, sehingga 'kerja' yang patut dijalankan, terabai dan tidak dilakukan sepenuh hati. Amat malang sekali.\\n\\nSecara jujur, watak Mat dalam Kampung Boy lebih sesuai untuk diangkat dalam program sempena Tahun Melawat Malaysia. Watak kanak-kanak yang menjalani kehidupan seharian di kampung, bermain, mandi sungai dan pelbagai aktiviti lagi itu jelas lebih sesuai dan lebih layak.\\n\\nPelancong datang ke sini bukan untuk melihat kecanggihan teknologi, atau berhibur dengan muzik dan sebagainya kerana mereka mendapat perkara sebegitu yang lebih maju di tempat asal mereka. Mereka datang untuk mengenal budaya kita, dan melihat sendiri kehidupan yang tak pernah mereka rasai.\\n\\nKita masih punya masa untuk mengubah keadaan. Masa yang ada harus digunakan secara bijak untuk memastikan program yang dirancang ini mendatangkan manafaat kepada rakyat seluruhnya.\\n\\nTahun Melawat Malaysia 2020 seharusnya menjadi satu program yang menaikkan nama Malaysia di mata dunia, seterusnya membawa masuk lebih banyak peluang untuk memperbaiki status ekonomi rakyat.\\n\\nKerajaan sepatutnya melakukan semuanya ini, dengan meletakkan rakyat sebagai pemegang taruh terpenting, bukannya sebagai pelanggan perniagaan pihak tertentu yang akan mengaut keuntungan tanpa mempedulikan apa akan jadi kepada rakyat.\\n\\nWAN ZAMZAHIDI WAN ZAHID ialah seorang penulis, penceramah dan konsultan perniagaan bebas.\\n\\nTulisan ini tidak semestinya mencerminkan pendirian rasmi Malaysiakini.\", \"keyword\": [\"kampung\", \"msia\", \"tengok\", \"doraemon\", \"ultraman\", \"animasi\", \"nak\", \"pelancong\", \"watak\", \"karakter\", \"melawat\", \"wujud\", \"wataknya\", \"2020\", \"mat\"], \"summary\": \"Saya pertama kali menonton watak Doraemon di televisyen sekitar tahun 1993, ketika sedang dalam latihan amali di sebuah kilang di Sungai Way.\\nIa seperti juga watak Ultraman, telah menjadi topik perbualan sejak dari waktu itu lagi.\\nDi zaman kita belum mampu mempunyai animasi sendiri, Doraemon telah mengisi kekosongan tersebut sebagai penghibur khayalan kita.\\nApabila karakter Doraemon diangkat sebagai satu daripada agenda dalam mempromosikan Tahun Melawat Malaysia 2020, banyak pihak yang tidak bersetuju dan ia berasas.\\nKini ditambah lagi dengan isu watak Doraemon dan kegilaan menteri untuk bawa kumpulan K-Pop untuk mengadakan konsert di sini.\", \"news\": \"Malaysiakini (caruman)\", \"date\": \"04-23-2019\", \"language\": \"INDONESIA\", \"semisupervised-summaries\": [\"Doraemon telah bertapak sebagai satu watak kegemaran kanak-kanak. Watak ini telah menjadi topik perbualan sejak dari masa ke masa lagi. Tetapi adakah kerana watak itu telah lama bertapak di sini, kita boleh mengakuinya sebagai warisan negara.\", \"Watak Doraemon telah bertapak sebagai watak kegemaran kanak-kanak. Watak ini telah menjadi topik perbualan sejak dari masa itu lagi. Watak Mat dalam Kampung Boy lebih sesuai untuk diangkat dalam program Tahun Melawat Malaysia 2020.\"]}\r\n",
      "{\"title\": \"Seni latte 3D\", \"url\": \"https://www.hmetro.com.my/node/155492\", \"authors\": [], \"top-image\": \"https://assets.hmetro.com.my/images/articles/senlatte_field_image_listing_featured.var_1469912795.jpg\", \"text\": \"Seorang barista di Jepun mencipta hasil seni menarik dan comel hanya menggunakan buih susu dalam latte dihidangkannya.\\n\\nKazuki Yamamoto, 29, dari wilayah Okayama mencipta pelbagai haiwan kecil yang nampak realistik ketika bekerja di sebuah restoran, sebelum hasil seninya itu mula mendapat perhatian di seluruh dunia.\\n\\nKerja tangannya yang paling popular seekor kucing yang cuba masuk dari satu cawan ke cawan lain untuk menangkap ikan.\\n\\nDia yang kini pengurus sebuah kafe di Harajyuku juga membuat buih berbentuk arnab dan kucing yang lain, dianggap pelanggannya sebagai \\u2018terlalu comel sehingga tidak tergamak untuk meminumnya.\\u2019\\n\\n\\u201cGambar kucing menangkap ikan menjadi popular selepas tersebar di Internet sehingga saya mendapat tawaran untuk menghadiri pelbagai acara dan bekerja di luar negara.\\n\\n\\u201cSelepas itulah saya menerima tawaran kerja di kafe ini sekarang. Paling penting untuk rekaan saya adalah menggunakan krim yang lembut dan susu berkualiti tinggi.\\n\\n\\u201cSaya tidak menafikan teknik ini boleh ditiru, namun tiada sesiapa mampu meniru idea saya yang tersendiri ini. Bagi saya seni latte 3D ini mungkin mudah dimusnahkan, tetapi melihatnya menggembirakan pelanggan saya,\\u201d katanya.\", \"keyword\": [\"seni\", \"mencipta\", \"susu\", \"kafe\", \"3d\", \"comel\", \"popular\", \"latte\", \"cawan\", \"menangkap\", \"kucing\"], \"summary\": \"Seorang barista di Jepun mencipta hasil seni menarik dan comel hanya menggunakan buih susu dalam latte dihidangkannya.\\nKerja tangannya yang paling popular seekor kucing yang cuba masuk dari satu cawan ke cawan lain untuk menangkap ikan.\\nPaling penting untuk rekaan saya adalah menggunakan krim yang lembut dan susu berkualiti tinggi.\\n\\u201cSaya tidak menafikan teknik ini boleh ditiru, namun tiada sesiapa mampu meniru idea saya yang tersendiri ini.\\nBagi saya seni latte 3D ini mungkin mudah dimusnahkan, tetapi melihatnya menggembirakan pelanggan saya,\\u201d katanya.\", \"news\": \"Harian Metro\", \"date\": \"07-30-2016\", \"language\": \"INDONESIA\", \"semisupervised-summaries\": [\"Kazuki Yamamoto mencipta haiwan kecil yang kelihatan realistik ketika bekerja di sebuah restoran. Dia juga membuat buih berbentuk arnab dan kucing yang lain. Dia menerima tawaran untuk menghadiri pelbagai acara dan bekerja di luar negara.\", \"Kazuki Yamamoto, 29, dari wilayah Okayama mencipta pelbagai haiwan kecil yang kelihatan realistik ketika bekerja di sebuah restoran. Hasil seninya mula mendapat perhatian di seluruh dunia.\"]}\r\n"
     ]
    }
   ],
   "source": [
    "!tail -n 2 populate-news.json.semisupervised"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 6,
   "metadata": {},
   "outputs": [],
   "source": [
    "from sklearn.feature_extraction.text import CountVectorizer\n",
    "from malaya.text.vectorizer import SkipGramCountVectorizer\n",
    "\n",
    "stopwords = malaya.text.function.get_stopwords()\n",
    "bow = CountVectorizer(\n",
    "    ngram_range = (1, 4),\n",
    "    stop_words = stopwords,\n",
    "    lowercase = False,\n",
    ")\n",
    "\n",
    "stopwords = malaya.text.function.get_stopwords()\n",
    "skip_bow = SkipGramCountVectorizer(\n",
    "    ngram_range = (1, 4),\n",
    "    stop_words = stopwords,\n",
    "    lowercase = False,\n",
    "    skip = 2\n",
    ")"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 7,
   "metadata": {
    "scrolled": true
   },
   "outputs": [
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      "81717it [00:05, 15792.67it/s]\n"
     ]
    }
   ],
   "source": [
    "before, after = [], []\n",
    "\n",
    "count = 0\n",
    "rejected = []\n",
    "languages = []\n",
    "accepted = []\n",
    "para = []\n",
    "malaysian_news = {\n",
    "    'kosmo',\n",
    "    'hmetro',\n",
    "    'malaymail',\n",
    "    'projekmm',\n",
    "    'bharian',\n",
    "    'utusan',\n",
    "    'astroawani',\n",
    "    'themalaysianinsight',\n",
    "    'malaysiakini',\n",
    "    'bernama'\n",
    "}\n",
    "\n",
    "def reject(data):\n",
    "    if data['news'] in malaysian_news:\n",
    "        return False\n",
    "    if any([n in data['top-image'] for n in malaysian_news]):\n",
    "        return False\n",
    "    if any([n in data['url'] for n in malaysian_news]):\n",
    "        return False\n",
    "    if 'com.my' in data['top-image']:\n",
    "        return False\n",
    "    if data['language'] == 'malay':\n",
    "        return False\n",
    "    if 'Siaran Pers' in data['news']:\n",
    "        return True\n",
    "    if '.id' in data['news']:\n",
    "        return True\n",
    "    \n",
    "    return True\n",
    "\n",
    "with open('populate-news.json.semisupervised') as fopen:\n",
    "    for l in tqdm(fopen):\n",
    "        data = json.loads(l)\n",
    "        text = re.sub(r'[ ]+', ' ', data['text']).strip()\n",
    "        if 'kindly register' in text.lower() or 'disabled in your browser' in text.lower():\n",
    "            continue\n",
    "        if len(text.split()) < 30:\n",
    "            rejected.append(data)\n",
    "            continue\n",
    "            \n",
    "        if data['language'] == 'ENGLISH':\n",
    "            continue\n",
    "            \n",
    "        accepted.append(data)\n",
    "            \n",
    "        \n",
    "#         befores = data['semisupervised-summaries']\n",
    "#         for b in befores:\n",
    "#             b = cleaning(b)\n",
    "#             if len(b.split()) < 3:\n",
    "#                 continue\n",
    "#             before.append(b)\n",
    "#             after.append(text)\n",
    "            \n",
    "#         if count == 10:\n",
    "#             break\n",
    "            \n",
    "        count += 1"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 8,
   "metadata": {},
   "outputs": [
    {
     "data": {
      "text/plain": [
       "79560"
      ]
     },
     "execution_count": 8,
     "metadata": {},
     "output_type": "execute_result"
    }
   ],
   "source": [
    "len(accepted)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 9,
   "metadata": {},
   "outputs": [
    {
     "data": {
      "text/plain": [
       "{'title': 'Ibu saudara Haziq terharu sokongan rakyat Malaysia',\n",
       " 'url': 'https://www.themalaysianinsight.com/bahasa/s/142491',\n",
       " 'news': 'themalaysianinsight',\n",
       " 'language': 'malay',\n",
       " 'top-image': 'https://www.themalaysianinsight.com/resources/stories_images/142491/perhimpunanan_solidarity_kedamaian_03__full.jpg',\n",
       " 'text': 'WARIS keluarga Allahyarham Muhammad Haziq Mohd Tarmizi, 17, yang terkorban dalam tragedi tembakan di Christchurch, New Zealand, pada 15 Mac lepas, melahirkan rasa terharu akan sokongan diberi rakyat Malaysia semasa perhimpunan Solidariti Kedamaian.\\n\\nZarina Shuib , ibu saudara Muhammad Haziq, memanjatkan kesyukuran kepada Allah SWT kerana berkesempatan menyertai rakyat Malaysia dalam perhimpunan itu di Kuala Lumpur hari ini.',\n",
       " 'date': '2019-03-23T03:52:02',\n",
       " 'date_utc': '2019-03-22T19:52:02',\n",
       " 'semisupervised-summaries': ['Perhimpunan Solidariti Kedamaian () adalah perhimpunan solidariti yang diadakan di Kuala Lumpur, Malaysia, pada 15 Mac 2013.',\n",
       "  'WARIS keluarga Muhammad Haziq Mohd Tarmizi, 17, yang meninggal dunia dalam tragedi Christchurch, New Zealand, melahirkan rasa terharu dengan sokongan yang diberikan rakyat Malaysia semasa perhimpunan Solidariti Kedamaian.']}"
      ]
     },
     "execution_count": 9,
     "metadata": {},
     "output_type": "execute_result"
    }
   ],
   "source": [
    "accepted[0]"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 60,
   "metadata": {},
   "outputs": [],
   "source": [
    "rejected = {'saya', 'awak', 'kami', 'dia', 'mereka', 'anda', 'kita'}\n",
    "\n",
    "tokenize = malaya.tokenizer.Tokenizer().tokenize\n",
    "\n",
    "def headline(string, length = 300):\n",
    "    splitted = malaya.text.function.split_into_sentences(string)\n",
    "    splitted = [s for s in splitted if all([r not in s for r in rejected])]\n",
    "    selected, index, results = '', 0, []\n",
    "    while len(selected) <= length and index < len(splitted):\n",
    "        selected += splitted[index]\n",
    "        results.append(splitted[index])\n",
    "        index += 1\n",
    "    return ' '.join(results)\n",
    "\n",
    "def reject(string):\n",
    "    splitted = malaya.text.function.split_into_sentences(string)\n",
    "    splitted = [s for s in splitted if not len(set(tokenize(s.lower())) & rejected)]\n",
    "    return ' '.join(splitted)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 57,
   "metadata": {},
   "outputs": [],
   "source": [
    "s = 'IBU negara Korea Selatan, Seoul menduduki tempat ketujuh dalam senarai bandar raya paling mahal untuk didiami pada tahun ini, lapor agensi berita Yonhap.'"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 58,
   "metadata": {},
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "['KUALA LUMPUR, 26 Mac (Bernama) -- Malaysia akan sentiasa bertegas dan vokal dalam isu yang melibatkan penindasan ke atas etnik Rohingya di Myanmar dan konsisten untuk membantu pelarian tersebut daripada terus dinafikan hak mereka, kata Menteri Luar Datuk Saifuddin Abdullah.', 'Beliau berkata tiga perkara utama yang akan diteruskan kerajaan ialah menegakkan keadilan dan membawa mereka yang terlibat dalam pelanggaran hak asasi manusa itu ke pengadilan dan diadili mengikut undang-undang antarabangsa, pemberian status kewarganegaraan Myanmar kepada etnik Rohingya dan membawa pulang mereka yang melarikan diri ke Bangladesh untuk pulang ke Myanmar\".', 'Malaysia memainkan peranan yang besar khususnya di Hospital Cox\\'s Bazar, Bangladesh dan kita juga membantu mereka di dalam negara\".', 'Semuanya telah kita laksanakan tetapi isu ini masih juga belum selesai.', 'Kita bimbang jika ia tidak diselesaikan, ia semakin berpanjangan dan lebih ramai (Etnik Rohingya di Myanmar) yang akan melarikan diri dan lebih ramai yang akan dibunuh, \" katanya......']\n",
      "KUALA LUMPUR, 26 Mac (Bernama) -- Malaysia akan sentiasa bertegas dan vokal dalam isu yang melibatkan penindasan ke atas etnik Rohingya di Myanmar dan konsisten untuk membantu pelarian tersebut daripada terus dinafikan hak mereka, kata Menteri Luar Datuk Saifuddin Abdullah. {'mereka'}\n",
      "Beliau berkata tiga perkara utama yang akan diteruskan kerajaan ialah menegakkan keadilan dan membawa mereka yang terlibat dalam pelanggaran hak asasi manusa itu ke pengadilan dan diadili mengikut undang-undang antarabangsa, pemberian status kewarganegaraan Myanmar kepada etnik Rohingya dan membawa pulang mereka yang melarikan diri ke Bangladesh untuk pulang ke Myanmar\". {'mereka'}\n",
      "Malaysia memainkan peranan yang besar khususnya di Hospital Cox's Bazar, Bangladesh dan kita juga membantu mereka di dalam negara\". {'kita', 'mereka'}\n",
      "Semuanya telah kita laksanakan tetapi isu ini masih juga belum selesai. {'kita'}\n",
      "Kita bimbang jika ia tidak diselesaikan, ia semakin berpanjangan dan lebih ramai (Etnik Rohingya di Myanmar) yang akan melarikan diri dan lebih ramai yang akan dibunuh, \" katanya...... {'kita'}\n"
     ]
    },
    {
     "data": {
      "text/plain": [
       "'KUALA LUMPUR, 26 Mac (Bernama) -- Malaysia akan sentiasa bertegas dan vokal dalam isu yang melibatkan penindasan ke atas etnik Rohingya di Myanmar dan konsisten untuk membantu pelarian tersebut daripada terus dinafikan hak mereka, kata Menteri Luar Datuk Saifuddin Abdullah. Beliau berkata tiga perkara utama yang akan diteruskan kerajaan ialah menegakkan keadilan dan membawa mereka yang terlibat dalam pelanggaran hak asasi manusa itu ke pengadilan dan diadili mengikut undang-undang antarabangsa, pemberian status kewarganegaraan Myanmar kepada etnik Rohingya dan membawa pulang mereka yang melarikan diri ke Bangladesh untuk pulang ke Myanmar\". Malaysia memainkan peranan yang besar khususnya di Hospital Cox\\'s Bazar, Bangladesh dan kita juga membantu mereka di dalam negara\". Semuanya telah kita laksanakan tetapi isu ini masih juga belum selesai. Kita bimbang jika ia tidak diselesaikan, ia semakin berpanjangan dan lebih ramai (Etnik Rohingya di Myanmar) yang akan melarikan diri dan lebih ramai yang akan dibunuh, \" katanya......'"
      ]
     },
     "execution_count": 58,
     "metadata": {},
     "output_type": "execute_result"
    }
   ],
   "source": [
    "reject(accepted[i]['text'])"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 13,
   "metadata": {},
   "outputs": [],
   "source": [
    "import random"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 14,
   "metadata": {
    "scrolled": true
   },
   "outputs": [
    {
     "data": {
      "text/plain": [
       "(['Hospital Pulau Pinang mangsa',\n",
       "  'Pulau Pinang mangsa menafikan',\n",
       "  'mangsa laporan polis mangsa',\n",
       "  'kelmarin Christopher individu nama'],\n",
       " ['Ketua Polis Daerah Barat Daya',\n",
       "  '000 menerusi nombor akaun CIMB',\n",
       "  'individu nama Lee Song',\n",
       "  'mangsa menafikan mengenali individu',\n",
       "  'nama Christopher Anak Anor',\n",
       "  'disambungkan menerusi telefon',\n",
       "  'PULAU PINANG 23 Mac'])"
      ]
     },
     "execution_count": 14,
     "metadata": {},
     "output_type": "execute_result"
    }
   ],
   "source": [
    "keywords = malaya.keyword.extractive.rake(accepted[2]['text'], \n",
    "                                                      vectorizer = bow, \n",
    "                                                      top_k = random.randint(3, 10))\n",
    "keywords = [k[1] for k in keywords]\n",
    "keywords_rake = malaya.keyword.extractive.rake(accepted[2]['text'], atleast = 1,\n",
    "                                          top_k = random.randint(3, 10))\n",
    "keywords_rake = [k[1] for k in keywords_rake]\n",
    "keywords, keywords_rake"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 15,
   "metadata": {},
   "outputs": [],
   "source": [
    "months = {\n",
    "    'january',\n",
    "    'jan',\n",
    "    'januari',\n",
    "    'february',\n",
    "    'feb',\n",
    "    'februari',\n",
    "    'march',\n",
    "    'mac',\n",
    "    'april',\n",
    "    'apr',\n",
    "    'may',\n",
    "    'mei',\n",
    "    'june',\n",
    "    'jun',\n",
    "    'july',\n",
    "    'julai',\n",
    "    'august',\n",
    "    'ogos',\n",
    "    'aug',\n",
    "    'september',\n",
    "    'sep',\n",
    "    'october',\n",
    "    'oktober',\n",
    "    'oct',\n",
    "    'november',\n",
    "    'nov',\n",
    "    'december',\n",
    "    'disember',\n",
    "    'dec',\n",
    "    'utusan',\n",
    "    'malaysiakini',\n",
    "    'astroawani',\n",
    "    'bernama',\n",
    "    'com',\n",
    "}"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 16,
   "metadata": {},
   "outputs": [],
   "source": [
    "from malaya.text.rouge import postprocess_summary, filter_rouge, _get_word_ngrams, _rouge_clean, cal_rouge\n",
    "from malaya.text.function import split_into_sentences\n",
    "\n",
    "def filtering_rouge(summary, contents, start = 0.15, increment = 0.05, break_at = 120, n = 1):\n",
    "    filtered = contents[:]\n",
    "    i = 0\n",
    "    while len(filtered.split()) > break_at:\n",
    "        filtered = filter_rouge(summary, filtered, n = n, threshold = start + increment * i)\n",
    "        i += 1\n",
    "    return filtered"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 17,
   "metadata": {
    "scrolled": true
   },
   "outputs": [
    {
     "data": {
      "text/plain": [
       "(['Hospital Pulau Pinang mangsa',\n",
       "  'Pulau Pinang mangsa menafikan',\n",
       "  'mangsa laporan polis mangsa',\n",
       "  'kelmarin Christopher individu nama'],)"
      ]
     },
     "execution_count": 17,
     "metadata": {},
     "output_type": "execute_result"
    }
   ],
   "source": [
    "i = 2\n",
    "keywords = malaya.keyword.extractive.rake(accepted[i]['text'], \n",
    "                                                  vectorizer = bow, \n",
    "                                                  top_k = 50)\n",
    "keywords = [simple_cleaning(k[1]) for k in keywords if len(k[1].split()) > 1 and len(k[1]) > 10 \\\n",
    "                and len(set(k[1].lower().replace('-', '').split()) & months) == 0]\n",
    "keywords = keywords[:random.randint(3, 10)]\n",
    "keywords, "
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 61,
   "metadata": {
    "scrolled": true
   },
   "outputs": [
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      "  0%|▏                                     | 274/79560 [00:02<10:00, 132.05it/s]"
     ]
    },
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "empty vocabulary; perhaps the documents only contain stop words\n"
     ]
    },
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      "  0%|▏                                     | 381/79560 [00:03<10:40, 123.55it/s]"
     ]
    },
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "empty vocabulary; perhaps the documents only contain stop words\n"
     ]
    },
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      "  1%|▎                                     | 608/79560 [00:04<09:35, 137.28it/s]"
     ]
    },
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "empty vocabulary; perhaps the documents only contain stop words\n",
      "empty vocabulary; perhaps the documents only contain stop words\n"
     ]
    },
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      "  1%|▍                                     | 933/79560 [00:07<11:19, 115.76it/s]"
     ]
    },
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "empty vocabulary; perhaps the documents only contain stop words\n"
     ]
    },
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      "  2%|▌                                    | 1293/79560 [00:10<10:57, 119.01it/s]"
     ]
    },
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "empty vocabulary; perhaps the documents only contain stop words\n",
      "empty vocabulary; perhaps the documents only contain stop words\n"
     ]
    },
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      "  3%|█▏                                   | 2453/79560 [00:21<10:18, 124.76it/s]"
     ]
    },
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "empty vocabulary; perhaps the documents only contain stop words\n"
     ]
    },
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      "  4%|█▎                                   | 2793/79560 [00:23<10:32, 121.30it/s]"
     ]
    },
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "empty vocabulary; perhaps the documents only contain stop words\n"
     ]
    },
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      "  4%|█▌                                    | 3374/79560 [00:29<16:11, 78.46it/s]"
     ]
    },
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "empty vocabulary; perhaps the documents only contain stop words\n"
     ]
    },
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      "  5%|█▋                                    | 3639/79560 [00:32<13:03, 96.85it/s]"
     ]
    },
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "empty vocabulary; perhaps the documents only contain stop words\n",
      "empty vocabulary; perhaps the documents only contain stop words\n"
     ]
    },
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      "  5%|█▊                                   | 3849/79560 [00:34<11:24, 110.65it/s]"
     ]
    },
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "empty vocabulary; perhaps the documents only contain stop words\n"
     ]
    },
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      "  6%|██▏                                  | 4630/79560 [00:43<11:49, 105.64it/s]"
     ]
    },
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "empty vocabulary; perhaps the documents only contain stop words\n"
     ]
    },
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      "  6%|██▏                                  | 4727/79560 [00:44<11:11, 111.48it/s]"
     ]
    },
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "empty vocabulary; perhaps the documents only contain stop words\n"
     ]
    },
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      "  7%|██▌                                  | 5637/79560 [00:53<09:05, 135.57it/s]"
     ]
    },
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "empty vocabulary; perhaps the documents only contain stop words\n"
     ]
    },
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      "  8%|██▊                                  | 6078/79560 [00:57<09:06, 134.44it/s]"
     ]
    },
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "empty vocabulary; perhaps the documents only contain stop words\n"
     ]
    },
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      "  9%|███▏                                 | 6831/79560 [01:03<09:12, 131.66it/s]"
     ]
    },
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "empty vocabulary; perhaps the documents only contain stop words\n"
     ]
    },
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      "  9%|███▎                                 | 7000/79560 [01:04<10:05, 119.76it/s]"
     ]
    },
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "empty vocabulary; perhaps the documents only contain stop words\n"
     ]
    },
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      "  9%|███▎                                 | 7228/79560 [01:06<10:13, 117.95it/s]"
     ]
    },
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "empty vocabulary; perhaps the documents only contain stop words\n"
     ]
    },
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      " 10%|███▊                                 | 8270/79560 [01:16<09:55, 119.64it/s]"
     ]
    },
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "empty vocabulary; perhaps the documents only contain stop words\n"
     ]
    },
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      " 11%|████                                 | 8632/79560 [01:19<10:30, 112.43it/s]"
     ]
    },
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "empty vocabulary; perhaps the documents only contain stop words\n"
     ]
    },
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      " 13%|████▋                                | 9977/79560 [01:32<10:05, 114.89it/s]"
     ]
    },
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "empty vocabulary; perhaps the documents only contain stop words\n"
     ]
    },
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      " 14%|████▉                               | 11029/79560 [01:43<08:10, 139.58it/s]"
     ]
    },
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "empty vocabulary; perhaps the documents only contain stop words\n"
     ]
    },
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      " 14%|█████                               | 11267/79560 [01:44<09:06, 125.06it/s]"
     ]
    },
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "empty vocabulary; perhaps the documents only contain stop words\n"
     ]
    },
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      " 15%|█████▎                              | 11654/79560 [01:47<08:53, 127.32it/s]"
     ]
    },
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "empty vocabulary; perhaps the documents only contain stop words\n"
     ]
    },
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      " 15%|█████▎                              | 11683/79560 [01:48<08:41, 130.14it/s]"
     ]
    },
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "empty vocabulary; perhaps the documents only contain stop words\n"
     ]
    },
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      " 15%|█████▎                              | 11828/79560 [01:49<09:42, 116.35it/s]"
     ]
    },
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "empty vocabulary; perhaps the documents only contain stop words\n"
     ]
    },
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      " 15%|█████▍                              | 12046/79560 [01:51<09:38, 116.70it/s]"
     ]
    },
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "empty vocabulary; perhaps the documents only contain stop words\n"
     ]
    },
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      " 16%|█████▋                              | 12516/79560 [01:55<10:58, 101.85it/s]"
     ]
    },
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "empty vocabulary; perhaps the documents only contain stop words\n"
     ]
    },
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      " 17%|██████                              | 13291/79560 [02:04<10:32, 104.80it/s]"
     ]
    },
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "empty vocabulary; perhaps the documents only contain stop words\n"
     ]
    },
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      " 18%|██████▎                             | 13953/79560 [02:09<08:03, 135.82it/s]"
     ]
    },
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "empty vocabulary; perhaps the documents only contain stop words\n"
     ]
    },
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      " 18%|██████▎                             | 14042/79560 [02:10<10:07, 107.84it/s]"
     ]
    },
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "empty vocabulary; perhaps the documents only contain stop words\n"
     ]
    },
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      " 18%|██████▎                             | 14078/79560 [02:10<09:58, 109.38it/s]"
     ]
    },
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "empty vocabulary; perhaps the documents only contain stop words\n"
     ]
    },
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      " 18%|██████▍                             | 14142/79560 [02:11<08:57, 121.80it/s]"
     ]
    },
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "empty vocabulary; perhaps the documents only contain stop words\n"
     ]
    },
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      " 18%|██████▍                             | 14193/79560 [02:11<09:33, 113.91it/s]"
     ]
    },
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "empty vocabulary; perhaps the documents only contain stop words\n"
     ]
    },
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      " 20%|███████▏                            | 15787/79560 [02:27<08:24, 126.39it/s]"
     ]
    },
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "empty vocabulary; perhaps the documents only contain stop words\n"
     ]
    },
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      " 21%|███████▍                            | 16450/79560 [02:32<07:53, 133.28it/s]"
     ]
    },
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "empty vocabulary; perhaps the documents only contain stop words\n"
     ]
    },
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      " 21%|███████▌                            | 16833/79560 [02:35<09:11, 113.75it/s]"
     ]
    },
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "empty vocabulary; perhaps the documents only contain stop words\n"
     ]
    },
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      " 21%|███████▋                            | 17019/79560 [02:37<09:51, 105.73it/s]"
     ]
    },
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "empty vocabulary; perhaps the documents only contain stop words\n"
     ]
    },
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      " 23%|████████▍                           | 18681/79560 [02:52<08:06, 125.25it/s]"
     ]
    },
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "empty vocabulary; perhaps the documents only contain stop words\n"
     ]
    },
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      " 24%|████████▋                           | 19092/79560 [02:56<09:22, 107.56it/s]"
     ]
    },
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "empty vocabulary; perhaps the documents only contain stop words\n"
     ]
    },
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      " 24%|████████▉                            | 19331/79560 [02:58<10:11, 98.45it/s]"
     ]
    },
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "empty vocabulary; perhaps the documents only contain stop words\n"
     ]
    },
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      " 25%|█████████                           | 19965/79560 [03:05<09:30, 104.47it/s]"
     ]
    },
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "empty vocabulary; perhaps the documents only contain stop words\n"
     ]
    },
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      " 25%|█████████                           | 20004/79560 [03:06<08:24, 117.99it/s]"
     ]
    },
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "empty vocabulary; perhaps the documents only contain stop words\n",
      "empty vocabulary; perhaps the documents only contain stop words\n"
     ]
    },
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      " 26%|█████████▌                          | 21062/79560 [03:14<06:44, 144.68it/s]"
     ]
    },
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "empty vocabulary; perhaps the documents only contain stop words\n"
     ]
    },
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      " 27%|█████████▊                          | 21691/79560 [03:20<08:32, 112.87it/s]"
     ]
    },
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "empty vocabulary; perhaps the documents only contain stop words\n",
      "empty vocabulary; perhaps the documents only contain stop words\n"
     ]
    },
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      " 28%|██████████                          | 22116/79560 [03:23<07:57, 120.34it/s]"
     ]
    },
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "empty vocabulary; perhaps the documents only contain stop words\n"
     ]
    },
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      " 28%|██████████                          | 22197/79560 [03:24<07:41, 124.31it/s]"
     ]
    },
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "empty vocabulary; perhaps the documents only contain stop words\n"
     ]
    },
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      " 28%|██████████                          | 22275/79560 [03:25<07:47, 122.50it/s]"
     ]
    },
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "empty vocabulary; perhaps the documents only contain stop words\n"
     ]
    },
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      " 28%|██████████▏                         | 22472/79560 [03:26<07:32, 126.03it/s]"
     ]
    },
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "empty vocabulary; perhaps the documents only contain stop words\n"
     ]
    },
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      " 32%|███████████▊                         | 25345/79560 [03:57<12:51, 70.28it/s]"
     ]
    },
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "empty vocabulary; perhaps the documents only contain stop words\n"
     ]
    },
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      " 32%|███████████▌                        | 25419/79560 [03:57<07:57, 113.44it/s]"
     ]
    },
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "empty vocabulary; perhaps the documents only contain stop words\n"
     ]
    },
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      " 33%|████████████                        | 26597/79560 [04:10<07:12, 122.38it/s]"
     ]
    },
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "empty vocabulary; perhaps the documents only contain stop words\n"
     ]
    },
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      " 34%|████████████▏                       | 26865/79560 [04:13<08:21, 105.04it/s]"
     ]
    },
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "empty vocabulary; perhaps the documents only contain stop words\n"
     ]
    },
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      " 39%|██████████████▍                      | 30987/79560 [05:03<10:00, 80.90it/s]"
     ]
    },
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "empty vocabulary; perhaps the documents only contain stop words\n"
     ]
    },
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      " 43%|███████████████▊                     | 33992/79560 [05:42<09:08, 83.12it/s]"
     ]
    },
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "empty vocabulary; perhaps the documents only contain stop words\n"
     ]
    },
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      " 46%|█████████████████▏                   | 36901/79560 [06:20<08:47, 80.90it/s]"
     ]
    },
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "empty vocabulary; perhaps the documents only contain stop words\n"
     ]
    },
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      " 47%|█████████████████▍                   | 37466/79560 [06:27<09:01, 77.76it/s]"
     ]
    },
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "empty vocabulary; perhaps the documents only contain stop words\n"
     ]
    },
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      " 48%|█████████████████▎                  | 38235/79560 [06:37<06:27, 106.66it/s]"
     ]
    },
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "empty vocabulary; perhaps the documents only contain stop words\n"
     ]
    },
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      " 51%|██████████████████▎                 | 40597/79560 [07:06<06:23, 101.58it/s]"
     ]
    },
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "empty vocabulary; perhaps the documents only contain stop words\n"
     ]
    },
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      " 56%|████████████████████▊                | 44695/79560 [07:58<08:03, 72.08it/s]"
     ]
    },
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "empty vocabulary; perhaps the documents only contain stop words\n"
     ]
    },
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      " 61%|██████████████████████▌              | 48464/79560 [08:44<05:56, 87.20it/s]"
     ]
    },
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "empty vocabulary; perhaps the documents only contain stop words\n"
     ]
    },
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      " 67%|████████████████████████▋            | 53097/79560 [09:39<05:07, 86.08it/s]"
     ]
    },
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "empty vocabulary; perhaps the documents only contain stop words\n"
     ]
    },
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      " 73%|██████████████████████████▉          | 57977/79560 [10:40<04:08, 86.93it/s]"
     ]
    },
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "empty vocabulary; perhaps the documents only contain stop words\n"
     ]
    },
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      " 80%|█████████████████████████████▍       | 63253/79560 [11:46<03:06, 87.26it/s]"
     ]
    },
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "empty vocabulary; perhaps the documents only contain stop words\n"
     ]
    },
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      " 81%|████████████████████████████▉       | 64066/79560 [11:56<02:30, 102.75it/s]"
     ]
    },
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "empty vocabulary; perhaps the documents only contain stop words\n"
     ]
    },
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      " 84%|██████████████████████████████▎     | 67048/79560 [12:31<01:14, 167.35it/s]"
     ]
    },
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "empty vocabulary; perhaps the documents only contain stop words\n"
     ]
    },
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      " 88%|███████████████████████████████▌    | 69800/79560 [13:04<01:29, 109.24it/s]"
     ]
    },
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "empty vocabulary; perhaps the documents only contain stop words\n"
     ]
    },
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      " 88%|███████████████████████████████▌    | 69849/79560 [13:05<01:26, 111.98it/s]"
     ]
    },
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "empty vocabulary; perhaps the documents only contain stop words\n"
     ]
    },
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      " 90%|█████████████████████████████████▎   | 71756/79560 [13:26<01:42, 75.89it/s]"
     ]
    },
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "empty vocabulary; perhaps the documents only contain stop words\n"
     ]
    },
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      " 94%|█████████████████████████████████▉  | 74950/79560 [14:03<00:40, 113.07it/s]"
     ]
    },
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "empty vocabulary; perhaps the documents only contain stop words\n"
     ]
    },
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      " 94%|██████████████████████████████████▉  | 75101/79560 [14:04<00:52, 85.63it/s]"
     ]
    },
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "empty vocabulary; perhaps the documents only contain stop words\n"
     ]
    },
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      " 96%|███████████████████████████████████▌ | 76374/79560 [14:18<00:34, 92.28it/s]"
     ]
    },
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "empty vocabulary; perhaps the documents only contain stop words\n"
     ]
    },
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      " 97%|███████████████████████████████████▋ | 76779/79560 [14:23<00:28, 96.07it/s]"
     ]
    },
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "empty vocabulary; perhaps the documents only contain stop words\n"
     ]
    },
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      " 99%|████████████████████████████████████▋| 78945/79560 [14:48<00:06, 97.15it/s]"
     ]
    },
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "empty vocabulary; perhaps the documents only contain stop words\n"
     ]
    },
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      "100%|█████████████████████████████████████| 79560/79560 [14:55<00:00, 88.85it/s]\n"
     ]
    }
   ],
   "source": [
    "before, after = [], []\n",
    "for i in tqdm(range(len(accepted))):\n",
    "    h = reject(accepted[i]['text'])\n",
    "    try:\n",
    "\n",
    "        keywords = malaya.keyword.extractive.rake(h, vectorizer = bow, top_k = 100)\n",
    "        keywords = [simple_cleaning(k[1]) for k in keywords if len(k[1].split()) > 1 and len(k[1]) > 10 \\\n",
    "                        and len(set(k[1].lower().replace('-', '').split()) & months) == 0]\n",
    "        keywords = keywords[:random.randint(3, 10)]\n",
    "        \n",
    "        r = filtering_rouge('. '.join(keywords), h, n = 2, start = 0.01)\n",
    "        \n",
    "        if len(r.split()) > 20:\n",
    "            before.append(keywords)\n",
    "            after.append(r)\n",
    "        \n",
    "        keywords_rake = malaya.keyword.extractive.rake(h, top_k = 100)\n",
    "        keywords_rake = [simple_cleaning(k[1]) for k in keywords_rake if len(k[1].split()) > 1 and len(k[1]) > 10 \\\n",
    "                        and len(set(k[1].lower().replace('-', '').split()) & months) == 0]\n",
    "        keywords_rake = keywords_rake[:random.randint(3, 10)]\n",
    "        r = filtering_rouge('. '.join(keywords_rake), h, n = 2, start = 0.01)\n",
    "        \n",
    "        if len(r.split()) > 20:\n",
    "            before.append(keywords_rake)\n",
    "            after.append(r)\n",
    "        \n",
    "    except Exception as e:\n",
    "        # print(h, accepted[i]['text'])\n",
    "        print(e)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 62,
   "metadata": {},
   "outputs": [
    {
     "data": {
      "text/plain": [
       "(155967, 155967)"
      ]
     },
     "execution_count": 62,
     "metadata": {},
     "output_type": "execute_result"
    }
   ],
   "source": [
    "len(before), len(after)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 64,
   "metadata": {
    "scrolled": true
   },
   "outputs": [
    {
     "data": {
      "text/plain": [
       "(['Perdana Menteri Pakistan Imran Khan menyifatkan rakan sejawatnya Pertana Menteri Malaysia',\n",
       "  'krisis ekonomi melanda Asia Tenggara',\n",
       "  'meningkatkan taraf hidup rakyatnya',\n",
       "  'tenaga kerja warganegara Pakistan',\n",
       "  'isu-isu membabitkan dunia Islam',\n",
       "  'pengganas menembak mati',\n",
       "  'peluang-peluang pekerjaan diwujudkan',\n",
       "  'umat Islam dilayan',\n",
       "  '3 bilion umat Islam',\n",
       "  'menjejaskan dunia Islam'],\n",
       " ': Perdana Menteri Pakistan Imran Khan menyifatkan rakan sejawatnya Pertana Menteri Malaysia Tun Dr Mahathir Mohamad sebagai negarawan Islam yang telah merubah Malaysia menjadi sebuah model teladan untuk dunia Islam. Beliau berkata Pakistan berbangga dengan cara Malaysia membangun dan meningkatkan taraf hidup rakyatnya di bawah kepimpinan Dr Mahathir\". Seperti memerangi keganasan, bagaimana ia telah menjejaskan dunia Islam. Bagaimana umat Islam dilayan disebabkan perjuangan politik yang mengelirukan dan disengajakan\". Kini tenaga kerja warganegara Pakistan sudah mula pergi ke Malaysia... peluang-peluang pekerjaan diwujudkan agar tenaga kerja dari Pakistan akan pergi ke Malaysia,\" kata beliau. Semasa krisis ekonomi melanda Asia Tenggara pada 1998, Malaysia adalah satu-satunya negara yang berjaya keluar daripada krisis itu menggunakan jalan penyelesaiannya sendiri.')"
      ]
     },
     "execution_count": 64,
     "metadata": {},
     "output_type": "execute_result"
    }
   ],
   "source": [
    "before[100], after[100]"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 65,
   "metadata": {},
   "outputs": [],
   "source": [
    "with open('keywords-headline.json', 'w') as fopen:\n",
    "    json.dump({'before': before, 'after': after}, fopen)"
   ]
  }
 ],
 "metadata": {
  "kernelspec": {
   "display_name": "Python 3 (ipykernel)",
   "language": "python",
   "name": "python3"
  },
  "language_info": {
   "codemirror_mode": {
    "name": "ipython",
    "version": 3
   },
   "file_extension": ".py",
   "mimetype": "text/x-python",
   "name": "python",
   "nbconvert_exporter": "python",
   "pygments_lexer": "ipython3",
   "version": "3.8.10"
  }
 },
 "nbformat": 4,
 "nbformat_minor": 4
}
