wirthual commited on
Commit
6293f3a
1 Parent(s): aa03200

Upload QuestionAndAnswerExtraction.ipynb

Browse files

Add jupyter script used to create first version of dataset.

Files changed (1) hide show
  1. QuestionAndAnswerExtraction.ipynb +317 -0
QuestionAndAnswerExtraction.ipynb ADDED
@@ -0,0 +1,317 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "cells": [
3
+ {
4
+ "cell_type": "markdown",
5
+ "id": "f31245e6",
6
+ "metadata": {},
7
+ "source": [
8
+ "### Download entries from DIP-Bundestag and put them in a csv to further process them.\n",
9
+ "\n",
10
+ "See https://search.dip.bundestag.de/api/v1/swagger-ui/ for the API reference. We only request documents of the type antwort and based on the start and end date provided."
11
+ ]
12
+ },
13
+ {
14
+ "cell_type": "code",
15
+ "execution_count": null,
16
+ "id": "8af55e90",
17
+ "metadata": {},
18
+ "outputs": [],
19
+ "source": [
20
+ "import requests\n",
21
+ "from pprint import pprint\n",
22
+ "import pandas as pd\n",
23
+ "from pathlib import Path\n",
24
+ "from pprint import pprint\n",
25
+ "from tqdm import tqdm\n",
26
+ "from concurrent.futures import ThreadPoolExecutor\n",
27
+ "\n",
28
+ "DIP_URL = \"https://search.dip.bundestag.de/api/v1/drucksache\"\n",
29
+ "DIP_TOKEN = \"rgsaY4U.oZRQKUHdJhF9qguHMkwCGIoLaqEcaHjYLF\"\n",
30
+ "\n",
31
+ "START_DATE = \"2015-05-07\"\n",
32
+ "END_DATE = \"2023-07-09\"\n",
33
+ "\n",
34
+ "REQUEST_URL = f\"{DIP_URL}?f.drucksachetyp=Antwort&f.datum.start={START_DATE}&f.datum.end={END_DATE}&format=json&apikey={DIP_TOKEN}\"\n",
35
+ "\n",
36
+ "df = pd.DataFrame()\n",
37
+ "docs = []\n",
38
+ "res = requests.get(REQUEST_URL)\n",
39
+ "r_json = res.json()\n",
40
+ "old_cursor = r_json[\"cursor\"]\n",
41
+ "\n",
42
+ "with ThreadPoolExecutor(max_workers=10) as pool:\n",
43
+ " count = 0\n",
44
+ " while True:\n",
45
+ " for doc in tqdm(r_json[\"documents\"]):\n",
46
+ " docs.append(doc)\n",
47
+ " doc_id = doc[\"id\"]\n",
48
+ " doc_number = doc[\"fundstelle\"][\"dokumentnummer\"]\n",
49
+ " url = doc[\"fundstelle\"][\"pdf_url\"] \n",
50
+ " count += 1\n",
51
+ " res = requests.get(f\"{REQUEST_URL}&cursor={old_cursor}\")\n",
52
+ " r_json = res.json()\n",
53
+ " new_cursor = r_json[\"cursor\"]\n",
54
+ " if new_cursor == old_cursor:\n",
55
+ " print(\"Found same cursor. No new results.\")\n",
56
+ " break\n",
57
+ " old_cursor = new_cursor\n",
58
+ "\n",
59
+ "\n",
60
+ "df = df.from_records(docs)\n",
61
+ "print(f\"Extracted {len(df)} entries.\")\n",
62
+ "df.to_csv(\"raw_entries.csv\")"
63
+ ]
64
+ },
65
+ {
66
+ "cell_type": "markdown",
67
+ "id": "58b0055f",
68
+ "metadata": {},
69
+ "source": [
70
+ "### Read back csv written in previous step, and download the associated PDF with each entry"
71
+ ]
72
+ },
73
+ {
74
+ "cell_type": "code",
75
+ "execution_count": null,
76
+ "id": "1bf8044e",
77
+ "metadata": {},
78
+ "outputs": [],
79
+ "source": [
80
+ "\n",
81
+ "df = pd.read_csv(\"raw_entries.csv\")\n",
82
+ "\n",
83
+ "def download_file(download_path:Path,doc_id:str,url:str) -> None:\n",
84
+ " r = requests.get(url, allow_redirects=True)\n",
85
+ " if r.status_code != 200:\n",
86
+ " print(f\"Got status {r.status_code} for url {doc_id} and {url}\")\n",
87
+ " return False\n",
88
+ " with open(download_path / f\"{doc_id}.pdf\", 'wb') as f:\n",
89
+ " f.write(r.content)\n",
90
+ " \n",
91
+ " return True\n",
92
+ "\n",
93
+ "download_path = Path(\"./downloads2/\")\n",
94
+ "download_path.mkdir(exist_ok=True)\n",
95
+ "\n",
96
+ "\n",
97
+ "for i,row in df.iterrows():\n",
98
+ " pdf_url = eval(row[\"fundstelle\"])[\"pdf_url\"]\n",
99
+ " success = download_file(download_path,row[\"id\"],pdf_url)\n",
100
+ " df.at[i,\"download_success\"]=success\n",
101
+ "\n",
102
+ "df.to_csv(\"entries_with_download_status.csv\")"
103
+ ]
104
+ },
105
+ {
106
+ "cell_type": "markdown",
107
+ "id": "15959ea4",
108
+ "metadata": {},
109
+ "source": [
110
+ "\n",
111
+ "### Extract the text out of the downloaded pdfs"
112
+ ]
113
+ },
114
+ {
115
+ "cell_type": "code",
116
+ "execution_count": null,
117
+ "id": "7417bc4e",
118
+ "metadata": {},
119
+ "outputs": [],
120
+ "source": [
121
+ "import pandas as pd\n",
122
+ "import sys\n",
123
+ "import pdftotext\n",
124
+ "import fitz\n",
125
+ "import re\n",
126
+ "from pathlib import Path\n",
127
+ "from dehyphen import FlairScorer\n",
128
+ "from dehyphen import format\n",
129
+ "from tqdm import tqdm\n",
130
+ "\n",
131
+ "HEADER_HEIGHT = 78\n",
132
+ "FOOTER_HEIGHT = 70\n",
133
+ "\n",
134
+ "QUESTION_FONT_SIZE = 9.609999656677246\n",
135
+ "ANSWER_FONT_SIZE = 10.678000450134277\n",
136
+ "BULLET_POINT_ANSWER_SIZE = 6.0\n",
137
+ "\n",
138
+ "OUTPUT_PATH = 'raw_text_blocks.csv'\n",
139
+ "\n",
140
+ "scorer = FlairScorer(lang=\"de\")\n",
141
+ "pattern = r'^\\s*\\d+\\.\\s*' # Matches a number followed by a dot and a space at the beginning of the string\n",
142
+ "\n",
143
+ "\n",
144
+ "def process_text_block(block:dict = {},pdf_path:Path = None,remove_q_numbers: bool = False):\n",
145
+ " txt = []\n",
146
+ " font = None\n",
147
+ "\n",
148
+ " for line in block.get(\"lines\", []):\n",
149
+ " for span in line[\"spans\"]:\n",
150
+ " span_txt = span[\"text\"]\n",
151
+ " span_font = span[\"font\"]\n",
152
+ " span_font_size = span[\"size\"]\n",
153
+ " if span_txt==\"\" or span_txt.isspace():\n",
154
+ " #print(f\"Found empty string or only spaces in document {pdf_path}\")\n",
155
+ " continue\n",
156
+ " if font is None:\n",
157
+ " span_type = \"Unknown\"\n",
158
+ " if span_font_size == QUESTION_FONT_SIZE:\n",
159
+ " span_type = \"Question\"\n",
160
+ " if remove_q_numbers:\n",
161
+ " span_txt = re.sub(pattern,\"\",span_txt)\n",
162
+ " elif span_font_size in [BULLET_POINT_ANSWER_SIZE,ANSWER_FONT_SIZE]:\n",
163
+ " span_type = \"Answer\"\n",
164
+ " font = (span_font, span_font_size, span_type)\n",
165
+ " txt.append(span_txt)\n",
166
+ "\n",
167
+ " if len(txt) > 1:\n",
168
+ " txt_joined = \"\\n\".join(txt)\n",
169
+ " txt_formatted = format.text_to_format(txt_joined)\n",
170
+ " txt_dehyphenated = scorer.dehyphen(txt_formatted)\n",
171
+ " txt = format.format_to_text(txt_dehyphenated)\n",
172
+ " else:\n",
173
+ " if len(txt) == 0:\n",
174
+ " txt = \"\"\n",
175
+ " else:\n",
176
+ " txt = txt[0]\n",
177
+ " txt = txt.strip()\n",
178
+ " if font is not None:\n",
179
+ " result = {\"file\": pdf_path.name, \"txt\": txt, \"font\": font[0], \"size\": font[1], \"type\": font[2]}\n",
180
+ " else:\n",
181
+ " result = {\"file\": pdf_path.name, \"txt\": \"Error\", \"font\": \"Error\", \"size\":\"Error\", \"type\": \"Error\"}\n",
182
+ " return result\n",
183
+ "\n",
184
+ "\n",
185
+ "processed = []\n",
186
+ "if Path(OUTPUT_PATH).exists():\n",
187
+ " df = pd.read_csv(OUTPUT_PATH,sep=\"|\")\n",
188
+ " processed = df[\"file\"].values\n",
189
+ "else:\n",
190
+ " df = pd.DataFrame()\n",
191
+ "\n",
192
+ "res = []\n",
193
+ "for pdf_path in tqdm(Path(\"./downloads2\").glob(\"*.pdf\"),desc=\"docs\"):\n",
194
+ "\n",
195
+ " if pdf_path.name in processed:\n",
196
+ " print(f\"Found pdf in df: {pdf_path}\")\n",
197
+ " continue\n",
198
+ "\n",
199
+ " doc = fitz.open(pdf_path) # open a document\n",
200
+ "\n",
201
+ " for i,page in enumerate(doc): # iterate the document pages\n",
202
+ " #page.draw_rect([0,HEADER_HEIGHT,page.rect.width,page.rect.height - FOOTER_HEIGHT])\n",
203
+ " res_raw = page.get_text(\"dict\",clip = [0,HEADER_HEIGHT,page.rect.width,page.rect.height - FOOTER_HEIGHT])\n",
204
+ " blocks = res_raw[\"blocks\"] # blocks on page\n",
205
+ "\n",
206
+ " for block in blocks:\n",
207
+ " try:\n",
208
+ " block_res = process_text_block(block,pdf_path)\n",
209
+ " if block_res[\"type\"] != \"Unknown\" and block_res[\"type\"] != \"Error\":\n",
210
+ " res.append(block_res)\n",
211
+ " except Exception as e:\n",
212
+ " print(str(e),block)\n",
213
+ "\n",
214
+ "\n",
215
+ " #print(len(res))\n",
216
+ " if len(res) > 100:\n",
217
+ " df = pd.DataFrame.from_dict(res)\n",
218
+ "\n",
219
+ " df.to_csv(OUTPUT_PATH, mode='a', header=not Path(OUTPUT_PATH).exists(),index=False,sep=\"|\")\n",
220
+ " df = pd.DataFrame()\n",
221
+ " res = []\n"
222
+ ]
223
+ },
224
+ {
225
+ "cell_type": "markdown",
226
+ "id": "c9d709c4",
227
+ "metadata": {},
228
+ "source": [
229
+ "### Transform raw text into question / answer tuples"
230
+ ]
231
+ },
232
+ {
233
+ "cell_type": "code",
234
+ "execution_count": null,
235
+ "id": "98cb494b",
236
+ "metadata": {},
237
+ "outputs": [],
238
+ "source": [
239
+ "df_f = pd.read_csv(\"./raw_text_blocks.csv\",sep=\"|\")\n",
240
+ "print(len(df_f))\n",
241
+ "files = df_f.groupby('file')\n",
242
+ "\n",
243
+ "pairs = []\n",
244
+ "for i,group in files:\n",
245
+ "\n",
246
+ " i = iter(group.groupby([(group.type != group.type.shift()).cumsum()]))\n",
247
+ "\n",
248
+ " try:\n",
249
+ " while True:\n",
250
+ " elem1 = next(i)\n",
251
+ " if set(elem1[1].type.values) != {\"Question\"}:\n",
252
+ " print(\"Broken\")\n",
253
+ " continue\n",
254
+ " elem2 = next(i)\n",
255
+ " if set(elem2[1].type.values) != {\"Answer\"}:\n",
256
+ " print(\"Broken\")\n",
257
+ " continue\n",
258
+ "\n",
259
+ " pair = {}\n",
260
+ " pair[\"question\"] = \"\\n\".join(list(elem1[1].txt.values))\n",
261
+ " pair[\"answer\"] = \"\\n\".join(list(elem2[1].txt.values))\n",
262
+ " pair[\"doc_id\"] = group.file.unique()[0].split(\".\")[0]\n",
263
+ " pairs.append(pair)\n",
264
+ " except StopIteration:\n",
265
+ " pass\n",
266
+ " \n",
267
+ "df_res = pd.DataFrame.from_records(pairs)\n",
268
+ "df_res.to_csv(\"final.csv\")"
269
+ ]
270
+ },
271
+ {
272
+ "cell_type": "code",
273
+ "execution_count": null,
274
+ "id": "9a816523",
275
+ "metadata": {},
276
+ "outputs": [],
277
+ "source": []
278
+ },
279
+ {
280
+ "cell_type": "code",
281
+ "execution_count": null,
282
+ "id": "4a21ca40",
283
+ "metadata": {},
284
+ "outputs": [],
285
+ "source": []
286
+ },
287
+ {
288
+ "cell_type": "code",
289
+ "execution_count": null,
290
+ "id": "1ca5572b",
291
+ "metadata": {},
292
+ "outputs": [],
293
+ "source": []
294
+ }
295
+ ],
296
+ "metadata": {
297
+ "kernelspec": {
298
+ "display_name": "Python 3 (ipykernel)",
299
+ "language": "python",
300
+ "name": "python3"
301
+ },
302
+ "language_info": {
303
+ "codemirror_mode": {
304
+ "name": "ipython",
305
+ "version": 3
306
+ },
307
+ "file_extension": ".py",
308
+ "mimetype": "text/x-python",
309
+ "name": "python",
310
+ "nbconvert_exporter": "python",
311
+ "pygments_lexer": "ipython3",
312
+ "version": "3.10.6"
313
+ }
314
+ },
315
+ "nbformat": 4,
316
+ "nbformat_minor": 5
317
+ }