{
 "cells": [
  {
   "cell_type": "code",
   "execution_count": 1,
   "metadata": {},
   "outputs": [
    {
     "ename": "ModuleNotFoundError",
     "evalue": "No module named 'requests_ip_rotator'",
     "output_type": "error",
     "traceback": [
      "\u001b[0;31m---------------------------------------------------------------------------\u001b[0m",
      "\u001b[0;31mModuleNotFoundError\u001b[0m                       Traceback (most recent call last)",
      "\u001b[1;32m/Users/jaywang/Documents/Programs/phd/23-supernova/metadata/parse-table.ipynb Cell 1\u001b[0m line \u001b[0;36m1\n\u001b[1;32m     <a href='vscode-notebook-cell:/Users/jaywang/Documents/Programs/phd/23-supernova/metadata/parse-table.ipynb#W0sZmlsZQ%3D%3D?line=10'>11</a>\u001b[0m \u001b[39mfrom\u001b[39;00m \u001b[39mPIL\u001b[39;00m \u001b[39mimport\u001b[39;00m Image\n\u001b[1;32m     <a href='vscode-notebook-cell:/Users/jaywang/Documents/Programs/phd/23-supernova/metadata/parse-table.ipynb#W0sZmlsZQ%3D%3D?line=11'>12</a>\u001b[0m \u001b[39mfrom\u001b[39;00m \u001b[39mtime\u001b[39;00m \u001b[39mimport\u001b[39;00m sleep\n\u001b[0;32m---> <a href='vscode-notebook-cell:/Users/jaywang/Documents/Programs/phd/23-supernova/metadata/parse-table.ipynb#W0sZmlsZQ%3D%3D?line=12'>13</a>\u001b[0m \u001b[39mfrom\u001b[39;00m \u001b[39mrequests_ip_rotator\u001b[39;00m \u001b[39mimport\u001b[39;00m ApiGateway, EXTRA_REGIONS, ALL_REGIONS\n\u001b[1;32m     <a href='vscode-notebook-cell:/Users/jaywang/Documents/Programs/phd/23-supernova/metadata/parse-table.ipynb#W0sZmlsZQ%3D%3D?line=13'>14</a>\u001b[0m \u001b[39mfrom\u001b[39;00m \u001b[39mtqdm\u001b[39;00m \u001b[39mimport\u001b[39;00m tqdm\n",
      "\u001b[0;31mModuleNotFoundError\u001b[0m: No module named 'requests_ip_rotator'"
     ]
    }
   ],
   "source": [
    "import numpy as np\n",
    "import re\n",
    "import requests\n",
    "import io\n",
    "import yaml\n",
    "\n",
    "from glob import glob\n",
    "from os.path import exists, join, basename\n",
    "from bs4 import BeautifulSoup\n",
    "from shutil import copyfileobj, copyfile\n",
    "from PIL import Image\n",
    "from time import sleep\n",
    "from requests_ip_rotator import ApiGateway, EXTRA_REGIONS, ALL_REGIONS\n",
    "from tqdm import tqdm"
   ]
  },
  {
   "attachments": {},
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "## Package Thumbnail Screenshot"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 169,
   "metadata": {},
   "outputs": [],
   "source": [
    "with open('./Interesting.html', 'r') as fp:\n",
    "    content = fp.read()\n",
    "    soup = BeautifulSoup(content, 'html.parser')\n",
    "\n",
    "table = soup.find('table')\n",
    "rows = table.find_all('tr')\n"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 171,
   "metadata": {},
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "Starting API gateways in 10 regions.\n",
      "Using 10 endpoints with name 'https://lh3.googleusercontent.com - IP Rotate API' (10 new).\n"
     ]
    }
   ],
   "source": [
    "gateway = ApiGateway(\"https://lh3.googleusercontent.com\")\n",
    "gateway.start()\n",
    "\n",
    "session = requests.Session()\n",
    "session.mount(\"https://lh3.googleusercontent.com\", gateway)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 188,
   "metadata": {},
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "Deleting gateways for site 'https://lh3.googleusercontent.com'.\n",
      "Deleted 10 endpoints with for site 'https://lh3.googleusercontent.com'.\n"
     ]
    },
    {
     "data": {
      "text/plain": [
       "['tsm1w4s5fl',\n",
       " 'nyneblenn4',\n",
       " 'gczc70jn89',\n",
       " 'tw3a1u6385',\n",
       " 'p0tbo4h6md',\n",
       " 'glmj64pmcl',\n",
       " '6elbecj32k',\n",
       " '60z8214b31',\n",
       " 'co86x63i1a',\n",
       " 'wikndn9lqi']"
      ]
     },
     "execution_count": 188,
     "metadata": {},
     "output_type": "execute_result"
    }
   ],
   "source": [
    "gateway.shutdown()"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 189,
   "metadata": {},
   "outputs": [],
   "source": [
    "# for i, row in enumerate(columns):\n",
    "#     print(i, row)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 187,
   "metadata": {},
   "outputs": [
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      "105it [00:00, 59586.24it/s]\n"
     ]
    }
   ],
   "source": [
    "existing_files = set([basename(f).replace('.webp', '') for f in glob('./thumbnails/*.webp')])\n",
    "\n",
    "for i, row in tqdm(enumerate(rows)):\n",
    "\n",
    "    columns = row.find_all('td')\n",
    "    if len(columns) < 1:\n",
    "        continue\n",
    "\n",
    "    name = columns[0]\n",
    "    if name is None:\n",
    "        continue\n",
    "\n",
    "    name = name.text.lower().replace(' ', '-')\n",
    "    name = name.split('.')[0]\n",
    "\n",
    "    if name in existing_files or name == 'module' or name == '':\n",
    "        continue\n",
    "\n",
    "    # Remove the size query from image_src\n",
    "    image_src = columns[19].find('img').get('src')\n",
    "    image_src = re.sub(r'^(.*)=w\\d+-h\\d+$', r'\\1', image_src)\n",
    "\n",
    "    # Save the image as webp\n",
    "    response = session.get(image_src, stream=True)\n",
    "\n",
    "    if response.status_code == 200:\n",
    "        image = Image.open(io.BytesIO(response.content))\n",
    "        image_file_path = f'./thumbnails/{name}.webp'\n",
    "        image.save(image_file_path, lossless=False, quality=80)\n",
    "        sleep(0.5)\n",
    "    else:\n",
    "        print(response.status_code, response, name)"
   ]
  },
  {
   "attachments": {},
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "## Paper Thumbnail Screenshot"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 7,
   "metadata": {},
   "outputs": [],
   "source": [
    "with open('./Papers.html', 'r') as fp:\n",
    "    content = fp.read()\n",
    "    soup = BeautifulSoup(content, 'html.parser')\n",
    "\n",
    "table = soup.find('table')\n",
    "rows = table.find_all('tr')"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 4,
   "metadata": {},
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "Starting API gateways in 10 regions.\n",
      "Using 10 endpoints with name 'https://lh3.googleusercontent.com - IP Rotate API' (10 new).\n"
     ]
    }
   ],
   "source": [
    "gateway = ApiGateway(\"https://lh3.googleusercontent.com\")\n",
    "gateway.start()\n",
    "\n",
    "session = requests.Session()\n",
    "session.mount(\"https://lh3.googleusercontent.com\", gateway)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 196,
   "metadata": {},
   "outputs": [],
   "source": [
    "# row = rows[5]\n",
    "# columns = row.find_all('td')\n",
    "# for i, r in enumerate(columns):\n",
    "#     print(i, r)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 9,
   "metadata": {},
   "outputs": [
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      "100%|██████████| 67/67 [00:01<00:00, 42.24it/s]\n"
     ]
    }
   ],
   "source": [
    "existing_files = set([basename(f).replace('.webp', '') for f in glob('./thumbnails/*.webp')])\n",
    "\n",
    "for i, row in tqdm(enumerate(rows), total=len(rows)):\n",
    "    columns = row.find_all('td')\n",
    "\n",
    "    if len(columns) < 1:\n",
    "        continue\n",
    "\n",
    "    name = columns[0]\n",
    "    if name is None:\n",
    "        continue\n",
    "\n",
    "    name = name.text.lower().replace(' ', '-')\n",
    "    name = name.split('.')[0]\n",
    "\n",
    "    if name in existing_files or name == 'name' or name == '':\n",
    "        continue\n",
    "\n",
    "    # Remove the size query from image_src\n",
    "    image = columns[20]\n",
    "\n",
    "    if image is None:\n",
    "        continue\n",
    "\n",
    "    image_src = image.find('img').get('src')\n",
    "    image_src = re.sub(r'^(.*)=w\\d+-h\\d+$', r'\\1', image_src)\n",
    "\n",
    "    # Save the image as webp\n",
    "    response = session.get(image_src, stream=True)\n",
    "\n",
    "    if response.status_code == 200:\n",
    "        image = Image.open(io.BytesIO(response.content))\n",
    "        image_file_path = f'./thumbnails/{name}.webp'\n",
    "        image.save(image_file_path, lossless=False, quality=80)\n",
    "        sleep(0.5)\n",
    "    else:\n",
    "        print(response.status_code, response, name)\n"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 10,
   "metadata": {},
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "Deleting gateways for site 'https://lh3.googleusercontent.com'.\n",
      "Deleted 10 endpoints with for site 'https://lh3.googleusercontent.com'.\n"
     ]
    },
    {
     "data": {
      "text/plain": [
       "['t5gq2nzgqj',\n",
       " 'pxdvfoojgk',\n",
       " 'snm98cm2b8',\n",
       " 'xqzoehu7bk',\n",
       " 'xpf9r3psp3',\n",
       " 'kjvtedlnal',\n",
       " 'vw3jd29zx9',\n",
       " 'bwggs60n79',\n",
       " 'nxxinxp6wl',\n",
       " 'io9kh261t5']"
      ]
     },
     "execution_count": 10,
     "metadata": {},
     "output_type": "execute_result"
    }
   ],
   "source": [
    "gateway.shutdown()"
   ]
  },
  {
   "attachments": {},
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "## Parse BibTex"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 5,
   "metadata": {},
   "outputs": [
    {
     "data": {
      "text/plain": [
       "225"
      ]
     },
     "execution_count": 5,
     "metadata": {},
     "output_type": "execute_result"
    }
   ],
   "source": [
    "with open('./notebook-va.bib', 'r') as fp:\n",
    "    lines = [l for l in fp.readlines()]\n",
    "\n",
    "bibtex_entries = []\n",
    "cur_entry = ''\n",
    "for line in lines:\n",
    "    if line == '\\n':\n",
    "       bibtex_entries.append(cur_entry)\n",
    "       cur_entry = ''\n",
    "\n",
    "    else:\n",
    "        cur_entry += line\n",
    "\n",
    "bibtex_entries.append(cur_entry)\n",
    "\n",
    "# Create a dictionary that maps bibtex key to the entry\n",
    "bibtex_dict = {}\n",
    "\n",
    "for i, entry in enumerate(bibtex_entries):\n",
    "    first_line = entry.split('\\n')[0]\n",
    "    key = re.sub(r'^@.+{(.+),.*$', r'\\1', first_line)\n",
    "\n",
    "    # Also parse the year\n",
    "    year = int(re.sub(r'^.*(\\d{4})[a-z]?$', r'\\1', key))\n",
    "\n",
    "    bibtex_dict[key] = {\n",
    "        'bibtex': entry,\n",
    "        'year': year\n",
    "    }\n",
    "\n",
    "len(bibtex_entries)"
   ]
  },
  {
   "attachments": {},
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "## Parse Meta Data (Packages + Papers)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 6,
   "metadata": {},
   "outputs": [],
   "source": [
    "with open('./Interesting.html', 'r') as fp:\n",
    "    content = fp.read()\n",
    "    soup = BeautifulSoup(content, 'html.parser')\n",
    "\n",
    "package_table = soup.find('table')\n",
    "package_rows = package_table.find_all('tr')\n",
    "\n",
    "with open('./Papers.html', 'r') as fp:\n",
    "    content = fp.read()\n",
    "    soup = BeautifulSoup(content, 'html.parser')\n",
    "\n",
    "paper_table = soup.find('table')\n",
    "paper_rows = paper_table.find_all('tr')\n",
    "\n",
    "def create_new_entry():\n",
    "    return {\n",
    "        'name': '',\n",
    "        'nameDisplay': '',\n",
    "        'githubURL': '',\n",
    "        'paperURL': '',\n",
    "        'otherURLs': [],\n",
    "        'description': '',\n",
    "        'bibtex': '',\n",
    "        'bibtexKey': '',\n",
    "        'sourceType': '',\n",
    "        'releaseYear': 0,\n",
    "        'communication': '',\n",
    "        'materials': [],\n",
    "        'layouts': [],\n",
    "        'supportedNotebooks': [],\n",
    "        'modularity': '',\n",
    "        'user': '',\n",
    "        'implementation': '',\n",
    "    }"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 7,
   "metadata": {},
   "outputs": [],
   "source": [
    "entries = []\n",
    "\n",
    "# Parse papers first\n",
    "for i, row in tqdm(enumerate(paper_rows), disable=True):\n",
    "\n",
    "    columns = row.find_all('td')\n",
    "    if columns is None or len(columns) == 0:\n",
    "        continue\n",
    "\n",
    "    entry = create_new_entry()\n",
    "    entry['sourceType'] = 'paper'\n",
    "\n",
    "    # Get the package name\n",
    "    name = columns[0]\n",
    "    display_name = name.text.split('.')[0]\n",
    "    lower_name = display_name.lower().replace(' ', '-')\n",
    "\n",
    "    entry['name'] = lower_name\n",
    "    entry['nameDisplay'] = display_name\n",
    "\n",
    "    if lower_name is None or lower_name == '' or lower_name == 'name':\n",
    "        continue\n",
    "\n",
    "    # Get the paper url\n",
    "    tags = columns[2]\n",
    "    tags = tags.find_all('a', recursive=True)\n",
    "    urls = [tag.get('href') for tag in tags]\n",
    "\n",
    "    for url in urls:\n",
    "        if entry['paperURL'] == '':\n",
    "            entry['paperURL'] = url\n",
    "\n",
    "    # Get the description\n",
    "    description = columns[4].text\n",
    "    entry['description'] = description\n",
    "\n",
    "    # Get the github url\n",
    "    tag = columns[5]\n",
    "    tag = tag.find('a', recursive=True)\n",
    "    if tag is not None:\n",
    "        entry['githubURL'] = tag.get('href')\n",
    "\n",
    "    # Get the bibtex\n",
    "    bibtex_key = columns[3].text\n",
    "\n",
    "    try:\n",
    "        entry['bibtex'] = bibtex_dict[bibtex_key]['bibtex']\n",
    "        entry['bibtexKey'] = bibtex_key\n",
    "        entry['releaseYear'] = bibtex_dict[bibtex_key]['year']\n",
    "\n",
    "    except:\n",
    "        print(bibtex_key)\n",
    "\n",
    "    # Get the platform\n",
    "    platforms = [p.replace(' ', '') for p in columns[10].text.split(',')]\n",
    "    entry['supportedNotebooks'] = platforms\n",
    "\n",
    "    # Get the user\n",
    "    user = columns[13].text\n",
    "    entry['user'] = user\n",
    "\n",
    "    # Get the implementation detail\n",
    "    implementation = columns[15].text\n",
    "    entry['implementation'] = implementation\n",
    "\n",
    "    # Get the data types\n",
    "    materials = [p.replace(' ', '') for p in columns[16].text.split(',')]\n",
    "    entry['materials'] = materials\n",
    "\n",
    "    # Get the communication style\n",
    "    communication = columns[17].text\n",
    "    entry['communication'] = communication\n",
    "\n",
    "    # Get the modularity style\n",
    "    modularity = columns[18].text\n",
    "    entry['modularity'] = modularity\n",
    "\n",
    "    # Get the layout style\n",
    "    layout = columns[19].text\n",
    "    entry['layouts'] = [layout]\n",
    "\n",
    "    entries.append(entry)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 8,
   "metadata": {},
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "Duplicate:  interpretml\n",
      "Duplicate:  visual-auditor\n",
      "Duplicate:  pipelineprofiler\n",
      "Duplicate:  vizseq\n",
      "Duplicate:  what-if-tool\n",
      "no bibtex: ToyplotInteractivePlotting2022\n"
     ]
    }
   ],
   "source": [
    "# Prase packages\n",
    "existing_names = set([entry['name'] for entry in entries])\n",
    "\n",
    "for i, row in tqdm(enumerate(package_rows), disable=True):\n",
    "\n",
    "    columns = row.find_all('td')\n",
    "    if columns is None or len(columns) == 0:\n",
    "        continue\n",
    "\n",
    "    entry = create_new_entry()\n",
    "    entry['sourceType'] = 'package'\n",
    "\n",
    "    # Get the package name\n",
    "    name = columns[0]\n",
    "    display_name = name.text.split('.')[0]\n",
    "    display_name = display_name.replace('_', '-')\n",
    "    lower_name = display_name.lower().replace(' ', '-')\n",
    "\n",
    "    # skip duplicates from papers\n",
    "    if lower_name in existing_names:\n",
    "        print('Duplicate: ', lower_name)\n",
    "        continue\n",
    "\n",
    "    entry['name'] = lower_name\n",
    "    entry['nameDisplay'] = display_name\n",
    "\n",
    "    if lower_name is None or lower_name == '' or lower_name == 'module':\n",
    "        continue\n",
    "\n",
    "    # Get the package urls\n",
    "    tags = columns[3]\n",
    "    tags = tags.find_all('a', recursive=True)\n",
    "    urls = [tag.get('href') for tag in tags]\n",
    "\n",
    "    for url in urls:\n",
    "        if 'github.com' in url:\n",
    "            if entry['githubURL'] == '':\n",
    "                entry['githubURL'] = url\n",
    "            else:\n",
    "                entry['otherURLs'].append(url)\n",
    "        else:\n",
    "            entry['otherURLs'].append(url)\n",
    "\n",
    "    # Get the description\n",
    "    description = columns[4].text\n",
    "    entry['description'] = description\n",
    "\n",
    "    # Get the bibtex\n",
    "    bibtex_key = columns[5].text\n",
    "\n",
    "    try:\n",
    "        entry['bibtex'] = bibtex_dict[bibtex_key]['bibtex']\n",
    "        entry['bibtexKey'] = bibtex_key\n",
    "        entry['releaseYear'] = bibtex_dict[bibtex_key]['year']\n",
    "\n",
    "    except:\n",
    "        print('no bibtex:', bibtex_key)\n",
    "\n",
    "    # Get the platform\n",
    "    platforms = [p.replace(' ', '') for p in columns[9].text.split(',')]\n",
    "    entry['supportedNotebooks'] = platforms\n",
    "\n",
    "    # Get the user\n",
    "    user = columns[12].text\n",
    "    entry['user'] = user\n",
    "\n",
    "    # Get the implementation detail\n",
    "    implementation = columns[14].text\n",
    "    entry['implementation'] = implementation\n",
    "\n",
    "    # Get the data types\n",
    "    materials = [p.replace(' ', '') for p in columns[15].text.split(',')]\n",
    "    entry['materials'] = materials\n",
    "\n",
    "    # Get the communication style\n",
    "    communication = columns[16].text\n",
    "    entry['communication'] = communication\n",
    "\n",
    "    # Get the modularity style\n",
    "    modularity = columns[17].text\n",
    "    entry['modularity'] = modularity\n",
    "\n",
    "    # Get the layout style\n",
    "    layout = columns[18].text\n",
    "    entry['layouts'] = [layout]\n",
    "\n",
    "    entries.append(entry)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 24,
   "metadata": {},
   "outputs": [],
   "source": [
    "with open('./resources/supernova.yaml', 'w+') as fp:\n",
    "    for entry in entries:\n",
    "        fp.write(yaml.dump([entry]))\n",
    "        fp.write('\\n')"
   ]
  },
  {
   "attachments": {},
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "## Generate Table"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 9,
   "metadata": {},
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "161\n"
     ]
    }
   ],
   "source": [
    "entries = yaml.safe_load(open('./resources/supernova.yaml', 'r'))\n",
    "print(len(entries))\n",
    "entries.sort(key=lambda x: x['name'])"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 26,
   "metadata": {},
   "outputs": [],
   "source": [
    "latex_table = \"\"\n",
    "\n",
    "# icon_map = {\n",
    "#     \"scientist\": \"icon-user-scientist-alt\",\n",
    "#     \"educator\": \"icon-user-student-alt\",\n",
    "#     \"data scientist\": \"icon-user-data-alt\",\n",
    "#     \"monolithic\": \"icon-mono\",\n",
    "#     \"modular\": \"icon-modular\",\n",
    "#     \"on-demand\": \"icon-display-demand\",\n",
    "#     \"always-on\": \"icon-display-always\",\n",
    "#     \"runtime\": \"icon-data-runtime\",\n",
    "#     \"code\": \"icon-data-text\",\n",
    "#     \"external\": \"icon-data-external\",\n",
    "#     \"no\": \"icon-comm-no\",\n",
    "#     \"one-way\": \"icon-comm-one\",\n",
    "#     \"two-way\": \"icon-comm-two\",\n",
    "#     \"jupyter\": \"icon-plat-jupyter\",\n",
    "#     \"jupyter-lab\": \"icon-plat-jupyter-lab\",\n",
    "#     \"lab\": \"icon-plat-lab\",\n",
    "#     \"all\": \"icon-plat-all\",\n",
    "#     \"custom\": \"icon-impl-custom\",\n",
    "#     \"html\": \"icon-impl-html\",\n",
    "#     \"ipywidget\": \"icon-impl-widget\",\n",
    "#     \"extension\": \"icon-impl-extension\",\n",
    "#     \"other-package\": \"icon-impl-package\",\n",
    "#     \"nova\": \"icon-impl-nova\",\n",
    "# }\n",
    "\n",
    "icon_map = {\n",
    "    \"scientist\": \"icon-user-scientist-alt-c\",\n",
    "    \"educator\": \"icon-user-student-alt-c\",\n",
    "    \"data scientist\": \"icon-user-data-alt-c\",\n",
    "    \"monolithic\": \"icon-mono-c\",\n",
    "    \"modular\": \"icon-modular-c\",\n",
    "    \"on-demand\": \"icon-display-demand-c\",\n",
    "    \"always-on\": \"icon-display-always-c\",\n",
    "    \"runtime\": \"icon-data-runtime-c\",\n",
    "    \"code\": \"icon-data-text-c\",\n",
    "    \"external\": \"icon-data-external-c\",\n",
    "    \"no\": \"icon-comm-no-c\",\n",
    "    \"one-way\": \"icon-comm-one-c\",\n",
    "    \"two-way\": \"icon-comm-two-c\",\n",
    "    \"jupyter\": \"icon-plat-jupyter\",\n",
    "    \"jupyter-lab\": \"icon-plat-jupyter-lab\",\n",
    "    \"lab\": \"icon-plat-lab\",\n",
    "    \"all\": \"icon-plat-all\",\n",
    "    \"custom\": \"icon-impl-custom\",\n",
    "    \"html\": \"icon-impl-html\",\n",
    "    \"ipywidget\": \"icon-impl-widget\",\n",
    "    \"extension\": \"icon-impl-extension\",\n",
    "    \"other-package\": \"icon-impl-package\",\n",
    "    \"nova\": \"icon-impl-nova\",\n",
    "}\n",
    "\n",
    "for i, entry in enumerate(entries):\n",
    "    cur_row = \"\"\n",
    "\n",
    "    if i % 2 == 1:\n",
    "        cur_row += \"\\\\rowcolor{tablerowcolor}\\n\"\n",
    "\n",
    "    # Name\n",
    "    cur_row += f'{entry[\"nameDisplay\"]}~\\citeapp{{{entry[\"bibtexKey\"]}}} & '\n",
    "\n",
    "    # User\n",
    "    user = entry[\"user\"]\n",
    "    cur_row += f\"\\inlinetablefig{{{icon_map[user]}}} & \"\n",
    "\n",
    "    # Communication\n",
    "    comm = entry[\"communication\"]\n",
    "    cur_row += f\"\\inlinetablefig{{{icon_map[comm]}}} & \"\n",
    "\n",
    "    # Data\n",
    "    for data in entry[\"materials\"]:\n",
    "        cur_row += f\"\\inlinetablefig{{{icon_map[data]}}} \"\n",
    "    cur_row += \"&\"\n",
    "\n",
    "    # Context\n",
    "    layout = entry[\"layouts\"][0]\n",
    "    cur_row += f\"\\inlinetablefig{{{icon_map[layout]}}} & \"\n",
    "\n",
    "    # Modularity\n",
    "    modularity = entry[\"modularity\"]\n",
    "    cur_row += f\"\\inlinetablefig{{{icon_map[modularity]}}} & \"\n",
    "\n",
    "    # Platforms\n",
    "    platforms = entry[\"supportedNotebooks\"]\n",
    "    if len(platforms) == 3:\n",
    "        cur_row += f'\\inlinetablefig{{{icon_map[\"all\"]}}} & '\n",
    "    elif len(platforms) == 2:\n",
    "        cur_row += f'\\inlinetablefig{{{icon_map[\"jupyter-lab\"]}}} & '\n",
    "    else:\n",
    "        platform = platforms[0]\n",
    "        cur_row += f\"\\inlinetablefig{{{icon_map[platform]}}} & \"\n",
    "\n",
    "    # Implementations\n",
    "    implementation = entry[\"implementation\"]\n",
    "    cur_row += f\"\\inlinetablefig{{{icon_map[implementation]}}} \\\\\\\\ \\n\\n\"\n",
    "\n",
    "    latex_table += cur_row\n",
    "\n",
    "with open('./resources/latex-table.txt', 'w') as fp:\n",
    "    fp.write(latex_table)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 5,
   "metadata": {},
   "outputs": [],
   "source": [
    "entries = yaml.safe_load(open('./../src/data/supernova.yaml', 'r'))\n",
    "for e in entries:\n",
    "    e['thumbnail'] = e['name']+'.webp'"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 6,
   "metadata": {},
   "outputs": [],
   "source": [
    "with open('./../src/data/supernova.yaml', 'w') as fp:\n",
    "    for entry in entries:\n",
    "        fp.write(yaml.dump([entry]))\n",
    "        fp.write('\\n')"
   ]
  }
 ],
 "metadata": {
  "kernelspec": {
   "display_name": "gam",
   "language": "python",
   "name": "python3"
  },
  "language_info": {
   "codemirror_mode": {
    "name": "ipython",
    "version": 3
   },
   "file_extension": ".py",
   "mimetype": "text/x-python",
   "name": "python",
   "nbconvert_exporter": "python",
   "pygments_lexer": "ipython3",
   "version": "3.9.13"
  },
  "orig_nbformat": 4
 },
 "nbformat": 4,
 "nbformat_minor": 2
}
