{ "cells": [ { "cell_type": "markdown", "id": "f31245e6", "metadata": {}, "source": [ "### Download entries from DIP-Bundestag and put them in a csv to further process them.\n", "\n", "See https://search.dip.bundestag.de/api/v1/swagger-ui/ for the API reference. We only request documents of the type antwort and based on the start and end date provided." ] }, { "cell_type": "code", "execution_count": null, "id": "8af55e90", "metadata": {}, "outputs": [], "source": [ "import requests\n", "from pprint import pprint\n", "import pandas as pd\n", "from pathlib import Path\n", "from pprint import pprint\n", "from tqdm import tqdm\n", "from concurrent.futures import ThreadPoolExecutor\n", "\n", "DIP_URL = \"https://search.dip.bundestag.de/api/v1/drucksache\"\n", "DIP_TOKEN = \"rgsaY4U.oZRQKUHdJhF9qguHMkwCGIoLaqEcaHjYLF\"\n", "\n", "START_DATE = \"2015-05-07\"\n", "END_DATE = \"2023-07-09\"\n", "\n", "REQUEST_URL = f\"{DIP_URL}?f.drucksachetyp=Antwort&f.datum.start={START_DATE}&f.datum.end={END_DATE}&format=json&apikey={DIP_TOKEN}\"\n", "\n", "df = pd.DataFrame()\n", "docs = []\n", "res = requests.get(REQUEST_URL)\n", "r_json = res.json()\n", "old_cursor = r_json[\"cursor\"]\n", "\n", "with ThreadPoolExecutor(max_workers=10) as pool:\n", " count = 0\n", " while True:\n", " for doc in tqdm(r_json[\"documents\"]):\n", " docs.append(doc)\n", " doc_id = doc[\"id\"]\n", " doc_number = doc[\"fundstelle\"][\"dokumentnummer\"]\n", " url = doc[\"fundstelle\"][\"pdf_url\"] \n", " count += 1\n", " res = requests.get(f\"{REQUEST_URL}&cursor={old_cursor}\")\n", " r_json = res.json()\n", " new_cursor = r_json[\"cursor\"]\n", " if new_cursor == old_cursor:\n", " print(\"Found same cursor. No new results.\")\n", " break\n", " old_cursor = new_cursor\n", "\n", "\n", "df = df.from_records(docs)\n", "print(f\"Extracted {len(df)} entries.\")\n", "df.to_csv(\"raw_entries.csv\")" ] }, { "cell_type": "markdown", "id": "58b0055f", "metadata": {}, "source": [ "### Read back csv written in previous step, and download the associated PDF with each entry" ] }, { "cell_type": "code", "execution_count": null, "id": "1bf8044e", "metadata": {}, "outputs": [], "source": [ "\n", "df = pd.read_csv(\"raw_entries.csv\")\n", "\n", "def download_file(download_path:Path,doc_id:str,url:str) -> None:\n", " r = requests.get(url, allow_redirects=True)\n", " if r.status_code != 200:\n", " print(f\"Got status {r.status_code} for url {doc_id} and {url}\")\n", " return False\n", " with open(download_path / f\"{doc_id}.pdf\", 'wb') as f:\n", " f.write(r.content)\n", " \n", " return True\n", "\n", "download_path = Path(\"./downloads2/\")\n", "download_path.mkdir(exist_ok=True)\n", "\n", "\n", "for i,row in df.iterrows():\n", " pdf_url = eval(row[\"fundstelle\"])[\"pdf_url\"]\n", " success = download_file(download_path,row[\"id\"],pdf_url)\n", " df.at[i,\"download_success\"]=success\n", "\n", "df.to_csv(\"entries_with_download_status.csv\")" ] }, { "cell_type": "markdown", "id": "15959ea4", "metadata": {}, "source": [ "\n", "### Extract the text out of the downloaded pdfs" ] }, { "cell_type": "code", "execution_count": null, "id": "7417bc4e", "metadata": {}, "outputs": [], "source": [ "import pandas as pd\n", "import sys\n", "import pdftotext\n", "import fitz\n", "import re\n", "from pathlib import Path\n", "from dehyphen import FlairScorer\n", "from dehyphen import format\n", "from tqdm import tqdm\n", "\n", "HEADER_HEIGHT = 78\n", "FOOTER_HEIGHT = 70\n", "\n", "QUESTION_FONT_SIZE = 9.609999656677246\n", "ANSWER_FONT_SIZE = 10.678000450134277\n", "BULLET_POINT_ANSWER_SIZE = 6.0\n", "\n", "OUTPUT_PATH = 'raw_text_blocks.csv'\n", "\n", "scorer = FlairScorer(lang=\"de\")\n", "pattern = r'^\\s*\\d+\\.\\s*' # Matches a number followed by a dot and a space at the beginning of the string\n", "\n", "\n", "def process_text_block(block:dict = {},pdf_path:Path = None,remove_q_numbers: bool = False):\n", " txt = []\n", " font = None\n", "\n", " for line in block.get(\"lines\", []):\n", " for span in line[\"spans\"]:\n", " span_txt = span[\"text\"]\n", " span_font = span[\"font\"]\n", " span_font_size = span[\"size\"]\n", " if span_txt==\"\" or span_txt.isspace():\n", " #print(f\"Found empty string or only spaces in document {pdf_path}\")\n", " continue\n", " if font is None:\n", " span_type = \"Unknown\"\n", " if span_font_size == QUESTION_FONT_SIZE:\n", " span_type = \"Question\"\n", " if remove_q_numbers:\n", " span_txt = re.sub(pattern,\"\",span_txt)\n", " elif span_font_size in [BULLET_POINT_ANSWER_SIZE,ANSWER_FONT_SIZE]:\n", " span_type = \"Answer\"\n", " font = (span_font, span_font_size, span_type)\n", " txt.append(span_txt)\n", "\n", " if len(txt) > 1:\n", " txt_joined = \"\\n\".join(txt)\n", " txt_formatted = format.text_to_format(txt_joined)\n", " txt_dehyphenated = scorer.dehyphen(txt_formatted)\n", " txt = format.format_to_text(txt_dehyphenated)\n", " else:\n", " if len(txt) == 0:\n", " txt = \"\"\n", " else:\n", " txt = txt[0]\n", " txt = txt.strip()\n", " if font is not None:\n", " result = {\"file\": pdf_path.name, \"txt\": txt, \"font\": font[0], \"size\": font[1], \"type\": font[2]}\n", " else:\n", " result = {\"file\": pdf_path.name, \"txt\": \"Error\", \"font\": \"Error\", \"size\":\"Error\", \"type\": \"Error\"}\n", " return result\n", "\n", "\n", "processed = []\n", "if Path(OUTPUT_PATH).exists():\n", " df = pd.read_csv(OUTPUT_PATH,sep=\"|\")\n", " processed = df[\"file\"].values\n", "else:\n", " df = pd.DataFrame()\n", "\n", "res = []\n", "for pdf_path in tqdm(Path(\"./downloads2\").glob(\"*.pdf\"),desc=\"docs\"):\n", "\n", " if pdf_path.name in processed:\n", " print(f\"Found pdf in df: {pdf_path}\")\n", " continue\n", "\n", " doc = fitz.open(pdf_path) # open a document\n", "\n", " for i,page in enumerate(doc): # iterate the document pages\n", " #page.draw_rect([0,HEADER_HEIGHT,page.rect.width,page.rect.height - FOOTER_HEIGHT])\n", " res_raw = page.get_text(\"dict\",clip = [0,HEADER_HEIGHT,page.rect.width,page.rect.height - FOOTER_HEIGHT])\n", " blocks = res_raw[\"blocks\"] # blocks on page\n", "\n", " for block in blocks:\n", " try:\n", " block_res = process_text_block(block,pdf_path)\n", " if block_res[\"type\"] != \"Unknown\" and block_res[\"type\"] != \"Error\":\n", " res.append(block_res)\n", " except Exception as e:\n", " print(str(e),block)\n", "\n", "\n", " #print(len(res))\n", " if len(res) > 100:\n", " df = pd.DataFrame.from_dict(res)\n", "\n", " df.to_csv(OUTPUT_PATH, mode='a', header=not Path(OUTPUT_PATH).exists(),index=False,sep=\"|\")\n", " df = pd.DataFrame()\n", " res = []\n" ] }, { "cell_type": "markdown", "id": "c9d709c4", "metadata": {}, "source": [ "### Transform raw text into question / answer tuples" ] }, { "cell_type": "code", "execution_count": null, "id": "98cb494b", "metadata": {}, "outputs": [], "source": [ "df_f = pd.read_csv(\"./raw_text_blocks.csv\",sep=\"|\")\n", "print(len(df_f))\n", "files = df_f.groupby('file')\n", "\n", "pairs = []\n", "for i,group in files:\n", "\n", " i = iter(group.groupby([(group.type != group.type.shift()).cumsum()]))\n", "\n", " try:\n", " while True:\n", " elem1 = next(i)\n", " if set(elem1[1].type.values) != {\"Question\"}:\n", " print(\"Broken\")\n", " continue\n", " elem2 = next(i)\n", " if set(elem2[1].type.values) != {\"Answer\"}:\n", " print(\"Broken\")\n", " continue\n", "\n", " pair = {}\n", " pair[\"question\"] = \"\\n\".join(list(elem1[1].txt.values))\n", " pair[\"answer\"] = \"\\n\".join(list(elem2[1].txt.values))\n", " pair[\"doc_id\"] = group.file.unique()[0].split(\".\")[0]\n", " pairs.append(pair)\n", " except StopIteration:\n", " pass\n", " \n", "df_res = pd.DataFrame.from_records(pairs)\n", "df_res.to_csv(\"final.csv\")" ] }, { "cell_type": "code", "execution_count": null, "id": "9a816523", "metadata": {}, "outputs": [], "source": [] }, { "cell_type": "code", "execution_count": null, "id": "4a21ca40", "metadata": {}, "outputs": [], "source": [] }, { "cell_type": "code", "execution_count": null, "id": "1ca5572b", "metadata": {}, "outputs": [], "source": [] } ], "metadata": { "kernelspec": { "display_name": "Python 3 (ipykernel)", "language": "python", "name": "python3" }, "language_info": { "codemirror_mode": { "name": "ipython", "version": 3 }, "file_extension": ".py", "mimetype": "text/x-python", "name": "python", "nbconvert_exporter": "python", "pygments_lexer": "ipython3", "version": "3.10.6" } }, "nbformat": 4, "nbformat_minor": 5 }