{
 "metadata": {
  "language_info": {
   "codemirror_mode": {
    "name": "ipython",
    "version": 3
   },
   "file_extension": ".py",
   "mimetype": "text/x-python",
   "name": "python",
   "nbconvert_exporter": "python",
   "pygments_lexer": "ipython3",
   "version": "3.9.2-final"
  },
  "orig_nbformat": 2,
  "kernelspec": {
   "name": "python39164bit3e81fb3f53ff41368dd31efd247a2ba2",
   "display_name": "Python 3.9.1 64-bit"
  }
 },
 "nbformat": 4,
 "nbformat_minor": 2,
 "cells": [
  {
   "cell_type": "code",
   "execution_count": 1,
   "metadata": {},
   "outputs": [],
   "source": [
    "import requests\n",
    "from lxml import etree\n",
    "import re\n",
    "import urllib\n",
    "import os\n",
    "import zipfile\n",
    "import pandas as pd\n",
    "from sqlalchemy import create_engine\n",
    "import threading as thi"
   ]
  },
  {
   "source": [
    "## 解压函数和状态写入函数"
   ],
   "cell_type": "markdown",
   "metadata": {}
  },
  {
   "cell_type": "code",
   "execution_count": 2,
   "metadata": {},
   "outputs": [],
   "source": [
    "def zip_unpack(projectname):\n",
    "    try:\n",
    "        os.mkdir('./Project-files')  # 存放解压后的文件\n",
    "    except:\n",
    "        pass\n",
    "\n",
    "    # 拼接文件路径\n",
    "    i = os.path.join('./Projectzipfiles', projectname+'.zip')\n",
    "    try:\n",
    "        zip_filecontents = zipfile.ZipFile(i, 'r')  # 读取压缩文件\n",
    "    except:\n",
    "        print('Unpack failed: %s' % projectname)\n",
    "        return None\n",
    "    # 遍历所有压缩包并查看文件\n",
    "    for file in zip_filecontents.namelist():\n",
    "        try:\n",
    "            files_name = file.encode('cp437').decode('gbk')  # 防止乱码\n",
    "            zip_filecontents.extract(file, './Project-files/%s' % projectname)\n",
    "            file = './Project-files/%s/%s' % (projectname, file)\n",
    "            files_name = './Project-files/%s/%s' % (projectname, files_name)\n",
    "            os.rename(file, files_name)\n",
    "        except:\n",
    "            # uncompress_failure += 1\n",
    "            # print('Fail to uncompress this file: %s'%projectname)\n",
    "            continue\n",
    "    t2 = thi.Thread(target=write_status, args=([projectname]))\n",
    "    t2.setDaemon(False)\n",
    "    t2.start()\n",
    "\n",
    "\n",
    "def write_status(projectname):\n",
    "    if re.match('(\\w){4}', projectname):\n",
    "        status = re.match('(\\w){4}', projectname).group()\n",
    "        # 写入txt\n",
    "        fileswrite = open('./Project-files/%s/status.txt' % projectname, 'w')\n",
    "        fileswrite.write(status)\n",
    "        fileswrite.close()"
   ]
  },
  {
   "source": [
    "## 数据获取"
   ],
   "cell_type": "markdown",
   "metadata": {}
  },
  {
   "cell_type": "code",
   "execution_count": 3,
   "metadata": {},
   "outputs": [],
   "source": [
    "# 创建文件夹以存放招标文件\n",
    "os.mkdir('./Projectzipfiles')\n",
    "\n",
    "# 监控总下载数量\n",
    "download_success = 0\n",
    "download_failure = 0\n",
    "\n",
    "for i in range(1,382):\n",
    "    url = 'http://ecp.sgcc.com.cn/ecp1.0/project_list.jsp?site=global&column_code=014001001&project_type=1&company_id=&status=&project_name=&pageNo=%d'%i\n",
    "    header = {'User-Agent': 'Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/89.0.4389.82 Safari/537.36'}\n",
    "\n",
    "    # 国家电网不需要担心反爬，所以headers可加可不加\n",
    "    reponse = requests.get(url = url, headers = header) \n",
    "    # 查看返回数据\n",
    "    # reponse.text\n",
    "\n",
    "    '''\n",
    "    http://ecp.sgcc.com.cn/ecp1.0/html/project/014001001/9900000000000075434.html\n",
    "    http://ecp.sgcc.com.cn/ecp1.0/html/project/014001001/9900000000000075383.html\n",
    "    http://ecp.sgcc.com.cn/ecp1.0/html/project/014001001/9900000000000075306.html\n",
    "    http://ecp.sgcc.com.cn/ecp1.0/html/project/014001001/9900000000000075325.html\n",
    "    # 从以上url可以看出，之后最后几位数字不一样，在网页查找中可以看见这些数字属于“a/onclick/showProjectDetail\"\n",
    "    '''\n",
    "\n",
    "    # 用xpath进行提取\n",
    "    xpathtree = etree.HTML(reponse.text)\n",
    "    res = xpathtree.xpath('//tr[@align=\"left\"]/td[@class=\"black40\"]/a')\n",
    "    # print(res, len(res))\n",
    "\n",
    "    # 监控每页下载数量\n",
    "    per_page_succ = 0\n",
    "\n",
    "    # 对res对象进行遍历提取onclick属性\n",
    "    for resi in res:\n",
    "        try:\n",
    "            detail = resi.xpath('./@onclick')[0]\n",
    "            # print(detail)\n",
    "            # 再利用正则表达式进行最后几位数的提取\n",
    "            patterns = 'showProjectDetail\\((.*?),\\'(.*?)\\'\\);'\n",
    "            # 正则表达式中的\"\\\"表示转义，将作为匹配表达式的部分识别的部分转化为纯字符串\n",
    "            # 正则表达式中有两个匹配组，group的作用就是选择所需要的组\n",
    "            num1 = re.search(patterns, detail).group(1)\n",
    "            num2 = re.search(patterns, detail).group(2)\n",
    "            # 正则表达式中的\\'(.*?)\\'已经用转义符将引号去掉了\n",
    "            # print(num2)\n",
    "\n",
    "            # 二级页面提取\n",
    "            url2 = 'http://ecp.sgcc.com.cn/ecp1.0/html/project/014001001/{}.html'.format(num2)\n",
    "            # 或者这样也可：url2 = 'http://ecp.sgcc.com.cn/ecp1.0/html/project/014001001/%s.html'%num2\n",
    "            # print(url2)\n",
    "\n",
    "            reponse2 = requests.get(url = url2, headers = header) \n",
    "            # print(reponse2.text)\n",
    "            xpathtree2 = etree.HTML(reponse2.text)\n",
    "            res2 = xpathtree2.xpath('//tr/td/text()')[1]\n",
    "            \n",
    "            # 项目招标状态\n",
    "            proj_status = re.search('(\\S+.*?)', res2).group(1)\n",
    "            # print(proj_status)\n",
    "\n",
    "            # 项目名称\n",
    "            proj_name = etree.HTML(reponse2.text).xpath('//tr/td[2]/text()')[2]\n",
    "            # print(proj_name)\n",
    "            proj_name = proj_status + '-' + proj_name # 为每个项目对应上招标状态\n",
    "\n",
    "            # 项目招标文件下载\n",
    "            install_part_url = etree.HTML(reponse2.text).xpath('//tr/td/a/@href')[0] # 该步骤获取的url只是下载链接的后半部分\n",
    "            # print(install_part_url)\n",
    "            original_part = 'http://ecp.sgcc.com.cn'\n",
    "            install_url = original_part + install_part_url # 拼接获得完整下载链接\n",
    "            # print(install_url)\n",
    "        except:\n",
    "            print('Oops! Some problems occured in page %d'%i)\n",
    "            continue\n",
    "\n",
    "        # 使用urllib进行下载\n",
    "        # filename如果不变的话，则每一次下载都会默认覆盖前一个下载，所以此处用前面得到的proj_name（20个）作为文件名\n",
    "        try:\n",
    "            urllib.request.urlretrieve(url=install_url, filename='./Projectzipfiles/%s.zip'%proj_name)\n",
    "            download_success += 1\n",
    "            per_page_succ += 1\n",
    "            \n",
    "            t1 = thi.Thread(target=zip_unpack, args=([projectname]))\n",
    "            t1.setDaemon(False)\n",
    "            t1.start()\n",
    "\n",
    "        except:\n",
    "            download_failure += 1\n",
    "            # print(\"This URL couldn't be downloaded: \",url2)\n",
    "            continue # 即使下载失败则直接跳过，继续后面的下载\n",
    "    print('%s files in page %s have been downloaded successfully'%(per_page_succ, i))\n",
    "    print('All files in page %d have been downloaded successfully'%i)\n",
    "    print('%s files have been downloaded totally'%download_success)\n",
    "\n",
    "print('Download finished! Files downloaded successfully: ', download_success)\n",
    "print('Download finished! Fail to download: ', download_failure)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": []
  }
 ]
}