File size: 9,308 Bytes
f840d29
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
{
 "cells": [
  {
   "cell_type": "code",
   "execution_count": 1,
   "id": "22f19553",
   "metadata": {},
   "outputs": [],
   "source": [
    "from bs4 import BeautifulSoup\n",
    "import os\n",
    "import pandas\n",
    "from datetime import datetime, timedelta\n",
    "from elasticsearch import Elasticsearch\n",
    "from elasticsearch_dsl import Search\n",
    "from tqdm import tqdm\n",
    "import requests\n",
    "import json\n",
    "from tika import parser\n",
    "import re\n",
    "from unidecode import unidecode\n",
    "from tqdm import tqdm\n",
    "from glob import glob\n",
    "import json\n",
    "import openai\n",
    "import time"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 6,
   "id": "a223ea7f",
   "metadata": {},
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "['01012024', '02012024', '03012024', '04012024', '05012024', '06012024', '07012024', '08012024', '09012024', '10012024', '11012024', '12012024', '13012024', '14012024', '15012024', '16012024', '17012024', '18012024', '19012024', '20012024', '21012024', '22012024', '23012024', '24012024', '25012024', '26012024', '27012024', '28012024', '29012024', '30012024', '31012024', '01022024', '02022024', '03022024', '04022024', '05022024', '06022024', '07022024', '08022024', '09022024', '10022024', '11022024', '12022024', '13022024', '14022024', '15022024', '16022024', '17022024', '18022024', '19022024', '20022024', '21022024', '22022024', '23022024', '24022024', '25022024', '26022024', '27022024', '28022024', '29022024', '01032024', '02032024', '03032024', '04032024', '05032024', '06032024', '07032024', '08032024', '09032024', '10032024', '11032024', '12032024', '13032024', '14032024', '15032024', '16032024', '17032024', '18032024', '19032024', '20032024', '21032024', '22032024', '23032024', '24032024', '25032024', '26032024', '27032024', '28032024', '29032024', '30032024', '31032024', '01042024', '02042024', '03042024', '04042024', '05042024', '06042024', '07042024', '08042024', '09042024', '10042024', '11042024', '12042024', '13042024', '14042024', '15042024', '16042024', '17042024', '18042024', '19042024', '20042024', '21042024', '22042024', '23042024', '24042024', '25042024', '26042024', '27042024', '28042024', '29042024', '30042024', '01052024', '02052024', '03052024', '04052024', '05052024', '06052024', '07052024', '08052024', '09052024', '10052024', '11052024', '12052024', '13052024', '14052024', '15052024', '16052024', '17052024', '18052024', '19052024', '20052024', '21052024', '22052024', '23052024', '24052024', '25052024', '26052024', '27052024', '28052024', '29052024', '30052024', '31052024', '01062024', '02062024', '03062024', '04062024', '05062024', '06062024', '07062024', '08062024', '09062024', '10062024', '11062024', '12062024', '13062024', '14062024', '15062024', '16062024', '17062024', '18062024', '19062024', '20062024', '21062024', '22062024', '23062024', '24062024', '25062024', '26062024', '27062024', '28062024', '29062024', '30062024', '01072024', '02072024', '03072024', '04072024', '05072024', '06072024', '07072024', '08072024', '09072024', '10072024', '11072024', '12072024', '13072024', '14072024', '15072024', '16072024', '17072024', '18072024', '19072024', '20072024', '21072024', '22072024', '23072024', '24072024', '25072024', '26072024', '27072024', '28072024', '29072024', '30072024', '31072024', '01082024', '02082024', '03082024', '04082024', '05082024', '06082024', '07082024', '08082024', '09082024', '10082024', '11082024', '12082024', '13082024', '14082024', '15082024', '16082024', '17082024', '18082024', '19082024', '20082024', '21082024', '22082024', '23082024', '24082024', '25082024', '26082024', '27082024']\n"
     ]
    }
   ],
   "source": [
    "TIKA_HOST = os.environ.get('TIKA_HOST', 'http://localhost:9998')\n",
    "\n",
    "directory = 'pdf_temp'\n",
    "index = 'hansard'\n",
    "\n",
    "def cleaning(string):\n",
    "    string = unidecode(string).replace('\\t', ' ').replace('\\r', ' ').replace('\\n', ' ').replace('_', ' ')\n",
    "    string = string.replace(' -', '-').replace(' ANYA', 'ANYA').replace('ki ta', 'kita').replace('s aya', 'saya')\n",
    "    string = string.replace('m enjadi', 'menjadi').replace('meno lak', 'menolak')\n",
    "    try:\n",
    "        soup = BeautifulSoup(string, 'lxml')\n",
    "        string = soup.text\n",
    "    except:\n",
    "        pass\n",
    "    return re.sub(r'[ ]+', ' ', string).strip()\n",
    "\n",
    "def download(url, file_name):\n",
    "    with open(file_name, \"wb\") as file:\n",
    "        response = requests.get(url)\n",
    "        file.write(response.content)\n",
    "\n",
    "os.makedirs(directory, exist_ok = True)\n",
    "latest_date = datetime.strptime('2024-01-01', '%Y-%m-%d')\n",
    "\n",
    "startDate = latest_date\n",
    "endDate = latest_date + timedelta(days=30 * 8)\n",
    "datesRange = pandas.date_range(startDate,endDate-timedelta(days=1),freq='d')\n",
    "datesRange = [d.strftime('%d%m%Y') for d in datesRange]"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 4,
   "id": "6cb663dd",
   "metadata": {},
   "outputs": [
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      "100%|β–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆ| 240/240 [00:47<00:00,  5.05it/s]\n"
     ]
    }
   ],
   "source": [
    "for d in tqdm(datesRange):\n",
    "    filename = f'DR-{d}.pdf'\n",
    "    output_filename = os.path.join(directory, filename)\n",
    "    if os.path.exists(output_filename) and os.path.getsize(output_filename) > 1000:\n",
    "        print(f'{output_filename} exists, skip.')\n",
    "        continue\n",
    "    url = f'https://www.parlimen.gov.my/files/hindex/pdf/{filename}'\n",
    "    try:\n",
    "        download(url, output_filename)\n",
    "    except Exception as e:\n",
    "        print(e)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 8,
   "id": "74ca1c07",
   "metadata": {},
   "outputs": [],
   "source": [
    "pdfs = sorted(glob(f'{directory}/*.pdf'))\n",
    "print(pdfs)\n",
    "\n",
    "print('writing hansard-temp.jsonl')\n",
    "with open('hansard-temp.jsonl', 'w') as fopen:\n",
    "    for file in tqdm(pdfs):\n",
    "\n",
    "        raw_xml = parser.from_file(file, f'{TIKA_HOST}/tika', xmlContent=True)\n",
    "        body = raw_xml['content'].split('<body>')[1].split('</body>')[0]\n",
    "        body_without_tag = body.replace(\"<p>\", \"\").replace(\"</p>\", \"\").replace(\"<div>\", \"\").replace(\"</div>\",\"\").replace(\"<p />\",\"\")\n",
    "        text_pages = body_without_tag.split(\"\"\"<div class=\"page\">\"\"\")[1:]\n",
    "\n",
    "        for i, t in enumerate(text_pages):\n",
    "            r = re.findall(r'DR[\\. ]\\s*[0-3]?[0-9].[0-3]?[0-9].(?:[0-9]{2})?[0-9]{2}\\s+\\d+\\b', t)\n",
    "            r_ = re.findall(r'\\d+\\s+DR[\\. ]\\s*[0-3]?[0-9].[0-3]?[0-9].(?:[0-9]{2})?[0-9]{2}\\b', t)\n",
    "            found = True\n",
    "            if len(r):\n",
    "                no_page = r[0].split()[-1]\n",
    "            elif len(r_):\n",
    "                no_page = r_[0].split()[0]\n",
    "            else:\n",
    "                found = False\n",
    "\n",
    "            if not found:\n",
    "                continue\n",
    "\n",
    "            splitted = t.split('\\n \\n')\n",
    "            splitted = [s for s in splitted if len(s.strip()) > 1]\n",
    "            if len(splitted) < 3:\n",
    "                splitted = t.split('\\n\\n')\n",
    "            splitted = [cleaning(s) for s in splitted[1:]]\n",
    "            splitted = [s + '.' if s[-1] not in  '.;:,' else s for s in splitted if len(s)]\n",
    "            splitted = ' '.join(splitted)\n",
    "            date = datetime.strptime(file, f'{directory}/DR-%d%m%Y.pdf').strftime('%Y-%m-%d')\n",
    "            \n",
    "            if not len(splitted):\n",
    "                print(f, i, t)\n",
    "\n",
    "            d = {\n",
    "                'original': t,\n",
    "                'cleaned': splitted,\n",
    "                'no_page': int(no_page),\n",
    "                'actual_no_page': i + 1,\n",
    "                'date': date,\n",
    "                'url': f'https://www.parlimen.gov.my/files/hindex/{file}'.replace('/pdf_temp', '/pdf')\n",
    "            }\n",
    "            fopen.write(f'{json.dumps(d)}\\n')"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "id": "fa47c63b",
   "metadata": {},
   "outputs": [],
   "source": []
  }
 ],
 "metadata": {
  "kernelspec": {
   "display_name": "Python 3 (ipykernel)",
   "language": "python",
   "name": "python3"
  },
  "language_info": {
   "codemirror_mode": {
    "name": "ipython",
    "version": 3
   },
   "file_extension": ".py",
   "mimetype": "text/x-python",
   "name": "python",
   "nbconvert_exporter": "python",
   "pygments_lexer": "ipython3",
   "version": "3.8.10"
  }
 },
 "nbformat": 4,
 "nbformat_minor": 5
}