{
 "cells": [
  {
   "cell_type": "code",
   "execution_count": 2,
   "metadata": {},
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "anti-FOLR1 antibody safety information pool size 11937\n",
      "anti-FOLR1 antibody adverse events pool size 11939\n",
      "anti-FOLR1 antibody side effects pool size 11939\n",
      "anti-FOLR1 antibody safety assessment pool size 11939\n"
     ]
    }
   ],
   "source": [
    "from duckduckgo_search import DDGS\n",
    "from dotenv import dotenv_values\n",
    "from datetime import datetime\n",
    "import sqlite3\n",
    "import pandas as pd\n",
    "import os\n",
    "import pickle\n",
    "\n",
    "config = dotenv_values(\".env\")\n",
    "conn = sqlite3.connect(config['webdb'])\n",
    "cursor = conn.cursor()\n",
    "\n",
    "# 创建表，如果不存在的话\n",
    "cursor.execute('''CREATE TABLE IF NOT EXISTS webpages\n",
    "             (title TEXT, href TEXT PRIMARY KEY, body TEXT, add_date TEXT)''')\n",
    "\n",
    "# 检查临时文件是否存在，如果存在则加载href_pool，否则创建一个空的href_pool\n",
    "if os.path.exists(config['temp_file_path']):\n",
    "    with open(config['temp_file_path'], 'rb') as f:\n",
    "        href_pool = pickle.load(f)\n",
    "else:\n",
    "    href_pool = set()\n",
    "\n",
    "if os.path.exists(config['query_pool_path']):\n",
    "    with open(config['query_pool_path'], 'rb') as f:\n",
    "        query_pool = pickle.load(f)\n",
    "else:\n",
    "    query_pool = set()\n",
    "target_df = pd.read_csv(config['targetfile'])\n",
    "current_datetime = datetime.now().strftime('%Y-%m-%d')\n",
    "query_templates = pd.read_csv(config['query_template'])['template']\n",
    "query_pool = set()\n",
    "for i, row in target_df.iloc.iterrows():\n",
    "    target = row['GENE symbol']\n",
    "    for template in query_templates:\n",
    "        query = template.format(target=target)\n",
    "        df = pd.DataFrame(DDGS(proxy=config['proxy']).text(query, max_results=100))\n",
    "        if query in query_pool:\n",
    "            continue\n",
    "        query_pool.add(query)\n",
    "        with open(config['query_pool_path'], 'wb') as f:\n",
    "            pickle.dump(query_pool, f)\n",
    "        df = df[~df['href'].isin(href_pool)]\n",
    "        df['add_date'] = current_datetime\n",
    "        href_pool.update(set(df['href']))  # 更新href_pool\n",
    "        try:\n",
    "            df.to_sql('webpages', conn, if_exists='append', index=False)\n",
    "        except:\n",
    "            continue\n",
    "        print(query,'pool size',len(href_pool))\n",
    "        # 将更新后的href_pool保存到临时文件中，并打印数量\n",
    "        with open(config['temp_file_path'], 'wb') as f:\n",
    "            pickle.dump(href_pool, f)\n",
    "# 关闭数据库连接\n",
    "conn.close()"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": []
  }
 ],
 "metadata": {
  "kernelspec": {
   "display_name": "base",
   "language": "python",
   "name": "python3"
  },
  "language_info": {
   "codemirror_mode": {
    "name": "ipython",
    "version": 3
   },
   "file_extension": ".py",
   "mimetype": "text/x-python",
   "name": "python",
   "nbconvert_exporter": "python",
   "pygments_lexer": "ipython3",
   "version": "3.11.7"
  },
  "orig_nbformat": 4
 },
 "nbformat": 4,
 "nbformat_minor": 2
}
