{
 "cells": [
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": [
    "import requests\n",
    "from bs4 import BeautifulSoup\n",
    "import json\n",
    "import time\n",
    "import datetime\n",
    "from config import *\n",
    "\n",
    "\n",
    "def get_html(url,data):\n",
    "    '''\n",
    "    :param url:请求的url地址\n",
    "    :param data: 请求的参数\n",
    "    :return: 返回网页的源码html\n",
    "    '''\n",
    "    response = requests.get(url,data)\n",
    "    return response.text\n",
    "\n",
    "\n",
    "def parse_html(html):\n",
    "    '''\n",
    "    :param html: 传入html源码\n",
    "    :return: 通过yield生成一个生成器，存储爬取的每行信息\n",
    "    '''\n",
    "    soup = BeautifulSoup(html, 'lxml')\n",
    "\n",
    "    table = soup.find(\"table\", attrs={\"id\": \"report\"})\n",
    "    trs = table.find(\"tr\").find_next_siblings()\n",
    "    for tr in trs:\n",
    "        tds = tr.find_all(\"td\")\n",
    "        yield [\n",
    "            tds[0].text.strip(),\n",
    "            tds[1].text.strip(),\n",
    "            tds[2].text.strip(),\n",
    "            tds[3].text.strip(),\n",
    "            tds[4].text.strip(),\n",
    "            tds[5].text.strip(),\n",
    "            tds[6].text.strip(),\n",
    "            tds[7].text.strip(),\n",
    "            tds[8].text.strip(),\n",
    "        ]\n",
    "\n",
    "def write_to_file(content):\n",
    "    '''\n",
    "    :param content:要写入文件的内容\n",
    "    '''\n",
    "    with open(\"result.txt\",'a',encoding=\"utf-8\") as f:\n",
    "        f.write(json.dumps(content,ensure_ascii=False)+\"\\n\")\n",
    "\n",
    "\n",
    "def get_page_nums():\n",
    "    '''\n",
    "    :return:返回的是需要爬取的总页数\n",
    "    '''\n",
    "    base_url = \"http://www.hshfy.sh.cn/shfy/gweb/ktgg_search_content.jsp?\"\n",
    "    date_time = datetime.date.fromtimestamp(time.time())\n",
    "    data = {\n",
    "        \"pktrqks\": date_time,\n",
    "        \"ktrqjs\": date_time,\n",
    "    }\n",
    "    while True:\n",
    "        html = get_html(base_url,data)\n",
    "        soup = BeautifulSoup(html, 'lxml')\n",
    "        if soup.body.text.strip() == \"系统繁忙\":\n",
    "            print(\"系统繁忙，登录太频繁，ip被封锁\")\n",
    "            time.sleep(ERROR_SLEEP_TIME)\n",
    "            continue\n",
    "        else:\n",
    "            break\n",
    "    res = soup.find(\"div\",attrs={\"class\":\"meneame\"})\n",
    "\n",
    "    page_nums = res.find('strong').text\n",
    "    #这里获得page_nums是一个爬取的总条数，每页是15条数据，通过下面方法获取总页数\n",
    "    page_nums = int(page_nums)\n",
    "    if page_nums %15 == 0:\n",
    "        page_nums = page_nums//15\n",
    "    else:\n",
    "        page_nums = page_nums//15 + 1\n",
    "    print(\"总页数：\",page_nums)\n",
    "    return page_nums\n",
    "\n",
    "\n",
    "def main():\n",
    "    '''\n",
    "    这里是一个死循环爬取数据\n",
    "    '''\n",
    "    page_nums = get_page_nums()\n",
    "    if not True:\n",
    "        return\n",
    "    base_url = \"http://www.hshfy.sh.cn/shfy/gweb/ktgg_search_content.jsp?\"\n",
    "    while True:\n",
    "        date_time = datetime.date.fromtimestamp(time.time())\n",
    "        page_num = 1\n",
    "        data = {\n",
    "            \"pktrqks\": date_time,\n",
    "            \"ktrqjs\": date_time,\n",
    "            \"pagesnum\":page_num\n",
    "        }\n",
    "        while page_num <= page_nums:\n",
    "            print(data)\n",
    "            while True:\n",
    "                html = get_html(base_url, data)\n",
    "                soup = BeautifulSoup(html, 'lxml')\n",
    "                if soup.body.text.strip() == \"系统繁忙\":\n",
    "                    print(\"系统繁忙，登录太频繁，ip被封锁\")\n",
    "                    time.sleep(ERROR_SLEEP_TIME)\n",
    "                    continue\n",
    "                else:\n",
    "                    break\n",
    "            res = parse_html(html)\n",
    "            for i in res:\n",
    "                write_to_file(i)\n",
    "            print(\"爬取完第【%s】页,总共【%s】页\" %(page_num,page_nums))\n",
    "            page_num+=1\n",
    "            data[\"pagesnum\"] = page_num\n",
    "            time.sleep(1)\n",
    "        else:\n",
    "            print(\"爬取完毕\")\n",
    "        print(\"开始休眠.......\")\n",
    "        time.sleep(SLEEP_TIME)\n",
    "if __name__ == '__main__':\n",
    "    main()"
   ]
  }
 ],
 "metadata": {
  "kernelspec": {
   "display_name": "Python 3",
   "language": "python",
   "name": "python3"
  },
  "language_info": {
   "codemirror_mode": {
    "name": "ipython",
    "version": 3
   },
   "file_extension": ".py",
   "mimetype": "text/x-python",
   "name": "python",
   "nbconvert_exporter": "python",
   "pygments_lexer": "ipython3",
   "version": "3.7.1"
  }
 },
 "nbformat": 4,
 "nbformat_minor": 2
}
