{
 "cells": [
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": [
    "import os\n",
    "import random\n",
    "import shutil\n",
    "from pathlib import Path\n",
    "\n",
    "import arxiv\n",
    "import pandas as pd\n",
    "import pymupdf\n",
    "from tqdm.contrib.concurrent import thread_map"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": [
    "PAPERS_OUTPUT_PATH = Path(\"papers\")\n",
    "\n",
    "PAPERS_PER_CATEGORY = 20\n",
    "\n",
    "IMAGES_OUTPUT_PATH = Path(\"images\")\n",
    "\n",
    "DATASET_OUTPUT_PATH = Path(\"dataset\")"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": [
    "with open(\"categories.txt\", \"r\") as f:\n",
    "    categories = f.read().strip().split(\"\\n\")\n",
    "\n",
    "print(f\"Found {len(categories)} categories: {categories}\")"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": [
    "def download_paper(args):\n",
    "    paper, category = args\n",
    "    try:\n",
    "        return paper.download_pdf(dirpath=PAPERS_OUTPUT_PATH / category)\n",
    "    except Exception as e:\n",
    "        print(f\"Error downloading paper {paper.title}: {e}\")\n",
    "        return None\n",
    "\n",
    "\n",
    "def process_category(category):\n",
    "    print(\"Downloading papers for\", category)\n",
    "\n",
    "    # Create category directory\n",
    "    (PAPERS_OUTPUT_PATH / category).mkdir(parents=True, exist_ok=True)\n",
    "\n",
    "    # Count the number of papers already downloaded\n",
    "    existing_files = len(list(PAPERS_OUTPUT_PATH.glob(f\"{category}/*.pdf\")))\n",
    "\n",
    "    # Calculate number of papers to download\n",
    "    papers_needed = PAPERS_PER_CATEGORY - existing_files\n",
    "\n",
    "    # Skip if we already have enough papers\n",
    "    if papers_needed <= 0:\n",
    "        print(f\"Skipping {category} as we already have {existing_files} papers\")\n",
    "        return\n",
    "\n",
    "    # Download papers\n",
    "    arxiv_client = arxiv.Client()\n",
    "\n",
    "    search = arxiv.Search(\n",
    "        query=f\"cat:{category}\",\n",
    "        max_results=max(100, papers_needed * 2),\n",
    "        sort_by=arxiv.SortCriterion.Relevance,\n",
    "    )\n",
    "\n",
    "    results = list(arxiv_client.results(search))\n",
    "\n",
    "    # Shuffle the results randomly and take only the first papers_needed\n",
    "    random.shuffle(results)\n",
    "    results = results[:papers_needed]\n",
    "\n",
    "    # Create list of (paper, category) tuples for download_paper function\n",
    "    download_args = [(paper, category) for paper in results]\n",
    "\n",
    "    download_results = thread_map(\n",
    "        download_paper,\n",
    "        download_args,\n",
    "        max_workers=1,\n",
    "        chunksize=1,\n",
    "        desc=f\"Downloading papers for {category}\",\n",
    "    )\n",
    "\n",
    "    print(f\"Downloaded {len(download_results)} papers for {category}\")\n",
    "\n",
    "\n",
    "for category in categories:\n",
    "    process_category(category)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": [
    "assert len(set(PAPERS_OUTPUT_PATH.glob(\"**/*.pdf\"))) == PAPERS_PER_CATEGORY * len(categories)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": [
    "def process_pdf(pdf):\n",
    "    image_output_dir = IMAGES_OUTPUT_PATH / pdf.parent.name\n",
    "    image_output_dir.mkdir(parents=True, exist_ok=True)\n",
    "    image_output_path = image_output_dir / f\"{pdf.stem}.png\"\n",
    "\n",
    "    if image_output_path.exists():\n",
    "        return\n",
    "\n",
    "    try:\n",
    "        doc = pymupdf.open(pdf)\n",
    "    except pymupdf.FileDataError:\n",
    "        # Delete the pdf if it's corrupted\n",
    "        print(f\"Deleting corrupted PDF: {pdf}\")\n",
    "        os.remove(pdf)\n",
    "        return\n",
    "\n",
    "    page = doc.load_page(0)\n",
    "    pixmap = page.get_pixmap(dpi=300, colorspace=pymupdf.csRGB)\n",
    "\n",
    "    pixmap.save(image_output_path)\n",
    "\n",
    "\n",
    "def process_pdfs():\n",
    "    pdfs = sorted(PAPERS_OUTPUT_PATH.glob(\"**/*.pdf\"))\n",
    "\n",
    "    print(f\"Found {len(pdfs)} PDFs\")\n",
    "\n",
    "    thread_map(process_pdf, pdfs, max_workers=8, chunksize=1, desc=\"Processing PDFs\")\n",
    "\n",
    "\n",
    "process_pdfs()"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": [
    "assert len(set(IMAGES_OUTPUT_PATH.glob(\"**/*.png\"))) == PAPERS_PER_CATEGORY * len(categories)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": [
    "def generate_dataset():\n",
    "    # Create dataset directories\n",
    "    dataset_dir = Path(\"dataset\")\n",
    "    train_dir = dataset_dir / \"train\"\n",
    "    test_dir = dataset_dir / \"test\"\n",
    "\n",
    "    train_dir.mkdir(parents=True, exist_ok=True)\n",
    "    test_dir.mkdir(parents=True, exist_ok=True)\n",
    "\n",
    "    # Initialize lists to store CSV data\n",
    "    csv_data = []\n",
    "\n",
    "    # Process each category\n",
    "    for category_dir in IMAGES_OUTPUT_PATH.glob(\"*\"):\n",
    "        if not category_dir.is_dir():\n",
    "            continue\n",
    "\n",
    "        category = category_dir.name\n",
    "        image_files = sorted(category_dir.glob(\"*.png\"))\n",
    "\n",
    "        # Take 5 images for train and 5 for test\n",
    "        train_images = image_files[:5]\n",
    "        test_images = image_files[5:10]\n",
    "\n",
    "        # Copy images and collect CSV data\n",
    "        for img_path in train_images:\n",
    "            dest = train_dir / img_path.name\n",
    "            shutil.copy2(img_path, dest)\n",
    "            csv_data.append({\"document\": str(dest), \"label\": category, \"is_train\": 1})\n",
    "\n",
    "        for img_path in test_images:\n",
    "            dest = test_dir / img_path.name\n",
    "            shutil.copy2(img_path, dest)\n",
    "            csv_data.append({\"document\": str(dest), \"label\": category, \"is_train\": 0})\n",
    "\n",
    "        # Create CSV file\n",
    "        df = pd.DataFrame(csv_data)\n",
    "        df.to_csv(\"labels.csv\", index=False)\n",
    "\n",
    "\n",
    "generate_dataset()"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "## Optional: Compress Images\n",
    "\n",
    "- Install [`pngquant`](https://pngquant.org/).\n",
    "- Run the following commands to compress the images:\n",
    "    ```bash\n",
    "    cd dataset\n",
    "    pngquant --quality=65-80 --force --ext=.png **/*.png\n",
    "    ```\n",
    "\n",
    "## Optional: Build a tarball\n",
    "\n",
    "```bash\n",
    "tar -czf multimodal-vision-finetuning.tar.gz data/dataset/ data/labels.csv\n",
    "```"
   ]
  }
 ],
 "metadata": {
  "kernelspec": {
   "display_name": ".venv",
   "language": "python",
   "name": "python3"
  },
  "language_info": {
   "codemirror_mode": {
    "name": "ipython",
    "version": 3
   },
   "file_extension": ".py",
   "mimetype": "text/x-python",
   "name": "python",
   "nbconvert_exporter": "python",
   "pygments_lexer": "ipython3"
  }
 },
 "nbformat": 4,
 "nbformat_minor": 2
}
