{
 "cells": [
  {
   "cell_type": "code",
   "execution_count": 3,
   "id": "26ec378c-1ee6-4873-9b70-89f4bebf97e0",
   "metadata": {},
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "Requirement already satisfied: requests in e:\\anaconda3-2024.02-1\\lib\\site-packages (2.31.0)\n",
      "Requirement already satisfied: charset-normalizer<4,>=2 in e:\\anaconda3-2024.02-1\\lib\\site-packages (from requests) (2.0.4)\n",
      "Requirement already satisfied: idna<4,>=2.5 in e:\\anaconda3-2024.02-1\\lib\\site-packages (from requests) (3.4)\n",
      "Requirement already satisfied: urllib3<3,>=1.21.1 in e:\\anaconda3-2024.02-1\\lib\\site-packages (from requests) (2.0.7)\n",
      "Requirement already satisfied: certifi>=2017.4.17 in e:\\anaconda3-2024.02-1\\lib\\site-packages (from requests) (2024.2.2)\n",
      "Collecting Workbook\n",
      "  Downloading workbook-1.1.tar.gz (2.0 kB)\n",
      "  Preparing metadata (setup.py): started\n",
      "  Preparing metadata (setup.py): finished with status 'done'\n",
      "Collecting xlutils (from Workbook)\n",
      "  Downloading xlutils-2.0.0-py2.py3-none-any.whl.metadata (3.4 kB)\n",
      "Collecting xlwt (from Workbook)\n",
      "  Downloading xlwt-1.3.0-py2.py3-none-any.whl.metadata (3.5 kB)\n",
      "Collecting xlrd>=0.7.2 (from xlutils->Workbook)\n",
      "  Downloading xlrd-2.0.1-py2.py3-none-any.whl.metadata (3.4 kB)\n",
      "Downloading xlutils-2.0.0-py2.py3-none-any.whl (55 kB)\n",
      "   ---------------------------------------- 0.0/55.1 kB ? eta -:--:--\n",
      "   ------- -------------------------------- 10.2/55.1 kB ? eta -:--:--\n",
      "   ---------------------- ----------------- 30.7/55.1 kB 435.7 kB/s eta 0:00:01\n",
      "   ---------------------------------------- 55.1/55.1 kB 407.4 kB/s eta 0:00:00\n",
      "Downloading xlwt-1.3.0-py2.py3-none-any.whl (99 kB)\n",
      "   ---------------------------------------- 0.0/100.0 kB ? eta -:--:--\n",
      "   ------------------------ --------------- 61.4/100.0 kB 1.7 MB/s eta 0:00:01\n",
      "   ------------------------------------ --- 92.2/100.0 kB 1.7 MB/s eta 0:00:01\n",
      "   -------------------------------------- 100.0/100.0 kB 823.3 kB/s eta 0:00:00\n",
      "Downloading xlrd-2.0.1-py2.py3-none-any.whl (96 kB)\n",
      "   ---------------------------------------- 0.0/96.5 kB ? eta -:--:--\n",
      "   ---------------------------------------- 96.5/96.5 kB 2.7 MB/s eta 0:00:00\n",
      "Building wheels for collected packages: Workbook\n",
      "  Building wheel for Workbook (setup.py): started\n",
      "  Building wheel for Workbook (setup.py): finished with status 'done'\n",
      "  Created wheel for Workbook: filename=workbook-1.1-py3-none-any.whl size=2386 sha256=1fee16dabe2499b02fe371c5d23c30b9b40b132b9fd261c1dd8219efd2abeb2d\n",
      "  Stored in directory: c:\\users\\32691\\appdata\\local\\pip\\cache\\wheels\\4c\\4f\\e8\\82dd05c4dc7da1a5957f7c46118e239e6b0591a83b3e5c42e5\n",
      "Successfully built Workbook\n",
      "Installing collected packages: xlwt, xlrd, xlutils, Workbook\n",
      "Successfully installed Workbook-1.1 xlrd-2.0.1 xlutils-2.0.0 xlwt-1.3.0\n"
     ]
    }
   ],
   "source": [
    "!pip install requests \n",
    "!pip install Workbook\n",
    "from bs4 import   BeautifulSoup\n",
    "import requests\n",
    "import pandas as pd\n",
    "import re\n",
    "from openpyxl import Workbook\n"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 13,
   "id": "08cfa59f-cc96-44fb-901f-ba4f2efbc998",
   "metadata": {},
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "https://sci.upc.edu.cn/2016/0926/c6899a96638/page.htm\n",
      "https://sci.upc.edu.cn/2019/0513/c14558a206193/page.htm\n",
      "https://www.phb123.com/city/GDP/\n"
     ]
    }
   ],
   "source": [
    "#打开文件\n",
    "with open('info.txt', 'r') as file:  \n",
    "    # 使用readlines()方法读取所有行\n",
    "    lines = file.readlines()\n",
    "    first_url = lines[0].strip()\n",
    "    second_url = lines[1].strip()\n",
    "    third_url = lines[2].strip()\n",
    "    print(first_url)\n",
    "    print(second_url)\n",
    "    print(third_url)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 14,
   "id": "2b801479-edc9-4f20-9a61-7c61ac896940",
   "metadata": {},
   "outputs": [],
   "source": [
    "#创建3个工作簿\n",
    "workbook = Workbook()\n",
    "first_sheet = workbook.active\n",
    "first_sheet.title = \"陈华\" \n",
    "second_sheet = workbook.create_sheet(title=\"宋允全\")\n",
    "third_sheet = workbook.create_sheet(title=\"GDP\")"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "id": "07be66f5-a4ef-4b06-84b8-98e462e4a678",
   "metadata": {},
   "outputs": [],
   "source": [
    "#模拟请求头1\n",
    "first_header = {\n",
    "       \"User-Agent\":\n",
    "            \"Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/122.0.0.0 Safari/537.36 Edg/122.0.0.0\",\n",
    "        \"Cookie\":\"JSESSIONID=29DB1B37459E74837C14147FC7C34F3E\"\n",
    "           }\n",
    "first_sess = requests.session()\n",
    "first_response = first_sess.get(first_url, headers=first_header)\n",
    "first_response.encoding = 'utf-8'  # 指定编码\n",
    "#模拟请求头2\n",
    "second_header = {\n",
    "       \"User-Agent\":\"Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/122.0.0.0 Safari/537.36 Edg/122.0.0.0\",\n",
    "        \"Cookie\":\"JSESSIONID=F11B0C675DCCCDBBC24F837A5577F6E4\"\n",
    "           }\n",
    "second_sess = requests.session()\n",
    "second_response = second_sess.get(second_url, headers=second_header)\n",
    "second_response.encoding = 'utf-8'  # 指定编码\n",
    "#模拟请求头3\n",
    "third_header = {\n",
    "       \"User-Agent\":\"Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/122.0.0.0 Safari/537.36 Edg/122.0.0.0\",\n",
    "        \"Cookie\":\"Hm_lvt_bbb87091aa9f5e84eda2a6042eb4842b=1712672452; Hm_lpvt_bbb87091aa9f5e84eda2a6042eb4842b=1712674938; Hm_lvt_e6412cfc059a1c0a7d30b1915c762c86=1712672452; Hm_lpvt_e6412cfc059a1c0a7d30b1915c762c86=1712674938\"\n",
    "           }\n",
    "third_sess = requests.session()\n",
    "third_response = third_sess.get(third_url, headers=third_header)\n",
    "third_response.encoding = 'utf-8' "
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "id": "2f874f93-8a68-44a4-9e0f-1e06a95e9823",
   "metadata": {},
   "outputs": [],
   "source": [
    "#检查响应状态\n",
    "if first_response.status_code != 200:\n",
    "    print(\"Failed to get the page:\", first_response.status_code)\n",
    "    exit()\n",
    "if second_response.status_code != 200:\n",
    "    print(\"Failed to get the page:\", second_response.status_code)\n",
    "    exit()\n",
    "if third_response.status_code != 200:\n",
    "    print(\"Failed to get the page:\", third_response.status_code)\n",
    "    exit()"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "id": "0e02391b-bbb4-4f12-bd42-07ef93fab0db",
   "metadata": {},
   "outputs": [],
   "source": [
    "#处理第一个网址\n",
    "\n",
    "#解析html 1\n",
    "first_soup = BeautifulSoup(first_response.text, 'html.parser')\n",
    "\n",
    "# 查找网页中的所有表格\n",
    "specific_table = first_soup.find('table', class_='MsoNormalTable')\n",
    "    # 找到table标签中的所有tr标签\n",
    "first_tr_tags = specific_table.find_all('tr')\n",
    "    # 遍历所有tr标签\n",
    "for first_row_index, first_tr in enumerate(first_tr_tags, start=1):\n",
    "        # 找到tr标签中的所有td或th标签\n",
    "    first_td_tags = first_tr.find_all(['td', 'th'])\n",
    "        # 遍历所有td或th标签\n",
    "    for first_col_index, first_td in enumerate(first_td_tags, start=1):\n",
    "            # 将td或th标签的文本内容写入Excel表格中\n",
    "        first_sheet.cell(row=first_row_index, column=first_col_index, value=first_td.get_text())"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "id": "d15a579b-368f-40bf-8d70-82597944fe87",
   "metadata": {},
   "outputs": [],
   "source": [
    "#处理第二个网址\n",
    "\n",
    "#解析html 2\n",
    "second_soup = BeautifulSoup(second_response.text, 'html.parser')\n",
    "\n",
    "# 查找网页中的所有表格\n",
    "second_specific_table = second_soup.find('table', class_='article')\n",
    "    # 找到table标签中的所有tr标签\n",
    "second_tr_tags = second_specific_table.find_all('tr')\n",
    "    # 遍历所有tr标签\n",
    "for second_row_index, second_tr in enumerate(second_tr_tags, start=1):\n",
    "        # 找到tr标签中的所有td或th标签\n",
    "    second_td_tags = second_tr.find_all(['td', 'th'])\n",
    "        # 遍历所有td或th标签\n",
    "    for second_col_index, second_td in enumerate(second_td_tags, start=1):\n",
    "            # 将td或th标签的文本内容写入Excel表格中\n",
    "        second_sheet.cell(row=second_row_index, column=second_col_index, value=second_td.get_text())"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "id": "23ee003a-d8c1-4014-8ecc-54fa7a40306b",
   "metadata": {},
   "outputs": [],
   "source": [
    "#解析html 3\n",
    "third_soup = BeautifulSoup(third_response.text, 'html.parser')\n",
    "\n",
    "# 查找网页中的所有表格\n",
    "third_specific_table = third_soup.find('table', class_='table-ui')\n",
    "    # 找到table标签中的所有tr标签\n",
    "third_tr_tags = third_specific_table.find_all('tr')\n",
    "    # 遍历所有tr标签\n",
    "for third_row_index, third_tr in enumerate(third_tr_tags, start=1):\n",
    "        # 找到tr标签中的所有td或th标签\n",
    "    third_td_tags = third_tr.find_all(['td', 'th'])\n",
    "        # 遍历所有td或th标签\n",
    "    for third_col_index, third_td in enumerate(third_td_tags, start=1):\n",
    "            # 将td或th标签的文本内容写入Excel表格中\n",
    "        third_sheet.cell(row=third_row_index, column=third_col_index, value=third_td.get_text())"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "id": "bf0cd2fc-1e60-40c2-abd6-3211c119bb53",
   "metadata": {},
   "outputs": [],
   "source": [
    "# 保存Excel文件\n",
    "excel_file = 'write.xlsx'\n",
    "workbook.save('write.xlsx')"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "id": "f41355a0-ffdd-409c-8fc3-bb8396305450",
   "metadata": {},
   "outputs": [],
   "source": [
    "# 显示DataFrame的前几行数据\n",
    "print(f'Data has been written to {excel_file}')"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "id": "38e2f402-257a-4176-9b98-e56c9667f00e",
   "metadata": {},
   "outputs": [],
   "source": []
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "id": "677a39dd-9c58-4dbe-a9c0-cf18355d9631",
   "metadata": {},
   "outputs": [],
   "source": []
  }
 ],
 "metadata": {
  "kernelspec": {
   "display_name": "Python 3 (ipykernel)",
   "language": "python",
   "name": "python3"
  },
  "language_info": {
   "codemirror_mode": {
    "name": "ipython",
    "version": 3
   },
   "file_extension": ".py",
   "mimetype": "text/x-python",
   "name": "python",
   "nbconvert_exporter": "python",
   "pygments_lexer": "ipython3",
   "version": "3.11.7"
  }
 },
 "nbformat": 4,
 "nbformat_minor": 5
}
