{ "cells": [ { "cell_type": "code", "execution_count": 1, "id": "22f19553", "metadata": {}, "outputs": [], "source": [ "from bs4 import BeautifulSoup\n", "import os\n", "import pandas\n", "from datetime import datetime, timedelta\n", "from elasticsearch import Elasticsearch\n", "from elasticsearch_dsl import Search\n", "from tqdm import tqdm\n", "import requests\n", "import json\n", "from tika import parser\n", "import re\n", "from unidecode import unidecode\n", "from tqdm import tqdm\n", "from glob import glob\n", "import json\n", "import openai\n", "import time" ] }, { "cell_type": "code", "execution_count": 6, "id": "a223ea7f", "metadata": {}, "outputs": [ { "name": "stdout", "output_type": "stream", "text": [ "['01012024', '02012024', '03012024', '04012024', '05012024', '06012024', '07012024', '08012024', '09012024', '10012024', '11012024', '12012024', '13012024', '14012024', '15012024', '16012024', '17012024', '18012024', '19012024', '20012024', '21012024', '22012024', '23012024', '24012024', '25012024', '26012024', '27012024', '28012024', '29012024', '30012024', '31012024', '01022024', '02022024', '03022024', '04022024', '05022024', '06022024', '07022024', '08022024', '09022024', '10022024', '11022024', '12022024', '13022024', '14022024', '15022024', '16022024', '17022024', '18022024', '19022024', '20022024', '21022024', '22022024', '23022024', '24022024', '25022024', '26022024', '27022024', '28022024', '29022024', '01032024', '02032024', '03032024', '04032024', '05032024', '06032024', '07032024', '08032024', '09032024', '10032024', '11032024', '12032024', '13032024', '14032024', '15032024', '16032024', '17032024', '18032024', '19032024', '20032024', '21032024', '22032024', '23032024', '24032024', '25032024', '26032024', '27032024', '28032024', '29032024', '30032024', '31032024', '01042024', '02042024', '03042024', '04042024', '05042024', '06042024', '07042024', '08042024', '09042024', '10042024', '11042024', '12042024', '13042024', '14042024', '15042024', '16042024', '17042024', '18042024', '19042024', '20042024', '21042024', '22042024', '23042024', '24042024', '25042024', '26042024', '27042024', '28042024', '29042024', '30042024', '01052024', '02052024', '03052024', '04052024', '05052024', '06052024', '07052024', '08052024', '09052024', '10052024', '11052024', '12052024', '13052024', '14052024', '15052024', '16052024', '17052024', '18052024', '19052024', '20052024', '21052024', '22052024', '23052024', '24052024', '25052024', '26052024', '27052024', '28052024', '29052024', '30052024', '31052024', '01062024', '02062024', '03062024', '04062024', '05062024', '06062024', '07062024', '08062024', '09062024', '10062024', '11062024', '12062024', '13062024', '14062024', '15062024', '16062024', '17062024', '18062024', '19062024', '20062024', '21062024', '22062024', '23062024', '24062024', '25062024', '26062024', '27062024', '28062024', '29062024', '30062024', '01072024', '02072024', '03072024', '04072024', '05072024', '06072024', '07072024', '08072024', '09072024', '10072024', '11072024', '12072024', '13072024', '14072024', '15072024', '16072024', '17072024', '18072024', '19072024', '20072024', '21072024', '22072024', '23072024', '24072024', '25072024', '26072024', '27072024', '28072024', '29072024', '30072024', '31072024', '01082024', '02082024', '03082024', '04082024', '05082024', '06082024', '07082024', '08082024', '09082024', '10082024', '11082024', '12082024', '13082024', '14082024', '15082024', '16082024', '17082024', '18082024', '19082024', '20082024', '21082024', '22082024', '23082024', '24082024', '25082024', '26082024', '27082024']\n" ] } ], "source": [ "TIKA_HOST = os.environ.get('TIKA_HOST', 'http://localhost:9998')\n", "\n", "directory = 'pdf_temp'\n", "index = 'hansard'\n", "\n", "def cleaning(string):\n", " string = unidecode(string).replace('\\t', ' ').replace('\\r', ' ').replace('\\n', ' ').replace('_', ' ')\n", " string = string.replace(' -', '-').replace(' ANYA', 'ANYA').replace('ki ta', 'kita').replace('s aya', 'saya')\n", " string = string.replace('m enjadi', 'menjadi').replace('meno lak', 'menolak')\n", " try:\n", " soup = BeautifulSoup(string, 'lxml')\n", " string = soup.text\n", " except:\n", " pass\n", " return re.sub(r'[ ]+', ' ', string).strip()\n", "\n", "def download(url, file_name):\n", " with open(file_name, \"wb\") as file:\n", " response = requests.get(url)\n", " file.write(response.content)\n", "\n", "os.makedirs(directory, exist_ok = True)\n", "latest_date = datetime.strptime('2024-01-01', '%Y-%m-%d')\n", "\n", "startDate = latest_date\n", "endDate = latest_date + timedelta(days=30 * 8)\n", "datesRange = pandas.date_range(startDate,endDate-timedelta(days=1),freq='d')\n", "datesRange = [d.strftime('%d%m%Y') for d in datesRange]" ] }, { "cell_type": "code", "execution_count": 4, "id": "6cb663dd", "metadata": {}, "outputs": [ { "name": "stderr", "output_type": "stream", "text": [ "100%|███████████████████████████████████████████████████████████████████████████████████████████████████████████| 240/240 [00:47<00:00, 5.05it/s]\n" ] } ], "source": [ "for d in tqdm(datesRange):\n", " filename = f'DR-{d}.pdf'\n", " output_filename = os.path.join(directory, filename)\n", " if os.path.exists(output_filename) and os.path.getsize(output_filename) > 1000:\n", " print(f'{output_filename} exists, skip.')\n", " continue\n", " url = f'https://www.parlimen.gov.my/files/hindex/pdf/{filename}'\n", " try:\n", " download(url, output_filename)\n", " except Exception as e:\n", " print(e)" ] }, { "cell_type": "code", "execution_count": 8, "id": "74ca1c07", "metadata": {}, "outputs": [], "source": [ "pdfs = sorted(glob(f'{directory}/*.pdf'))\n", "print(pdfs)\n", "\n", "print('writing hansard-temp.jsonl')\n", "with open('hansard-temp.jsonl', 'w') as fopen:\n", " for file in tqdm(pdfs):\n", "\n", " raw_xml = parser.from_file(file, f'{TIKA_HOST}/tika', xmlContent=True)\n", " body = raw_xml['content'].split('')[1].split('')[0]\n", " body_without_tag = body.replace(\"

\", \"\").replace(\"

\", \"\").replace(\"
\", \"\").replace(\"
\",\"\").replace(\"

\",\"\")\n", " text_pages = body_without_tag.split(\"\"\"

\"\"\")[1:]\n", "\n", " for i, t in enumerate(text_pages):\n", " r = re.findall(r'DR[\\. ]\\s*[0-3]?[0-9].[0-3]?[0-9].(?:[0-9]{2})?[0-9]{2}\\s+\\d+\\b', t)\n", " r_ = re.findall(r'\\d+\\s+DR[\\. ]\\s*[0-3]?[0-9].[0-3]?[0-9].(?:[0-9]{2})?[0-9]{2}\\b', t)\n", " found = True\n", " if len(r):\n", " no_page = r[0].split()[-1]\n", " elif len(r_):\n", " no_page = r_[0].split()[0]\n", " else:\n", " found = False\n", "\n", " if not found:\n", " continue\n", "\n", " splitted = t.split('\\n \\n')\n", " splitted = [s for s in splitted if len(s.strip()) > 1]\n", " if len(splitted) < 3:\n", " splitted = t.split('\\n\\n')\n", " splitted = [cleaning(s) for s in splitted[1:]]\n", " splitted = [s + '.' if s[-1] not in '.;:,' else s for s in splitted if len(s)]\n", " splitted = ' '.join(splitted)\n", " date = datetime.strptime(file, f'{directory}/DR-%d%m%Y.pdf').strftime('%Y-%m-%d')\n", " \n", " if not len(splitted):\n", " print(f, i, t)\n", "\n", " d = {\n", " 'original': t,\n", " 'cleaned': splitted,\n", " 'no_page': int(no_page),\n", " 'actual_no_page': i + 1,\n", " 'date': date,\n", " 'url': f'https://www.parlimen.gov.my/files/hindex/{file}'.replace('/pdf_temp', '/pdf')\n", " }\n", " fopen.write(f'{json.dumps(d)}\\n')" ] }, { "cell_type": "code", "execution_count": null, "id": "fa47c63b", "metadata": {}, "outputs": [], "source": [] } ], "metadata": { "kernelspec": { "display_name": "Python 3 (ipykernel)", "language": "python", "name": "python3" }, "language_info": { "codemirror_mode": { "name": "ipython", "version": 3 }, "file_extension": ".py", "mimetype": "text/x-python", "name": "python", "nbconvert_exporter": "python", "pygments_lexer": "ipython3", "version": "3.8.10" } }, "nbformat": 4, "nbformat_minor": 5 }