{
 "cells": [
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {
    "scrolled": false
   },
   "outputs": [],
   "source": [
    "# -*- coding: utf-8 -*-\n",
    "import os\n",
    "import time\n",
    "import datetime\n",
    "from bs4 import BeautifulSoup\n",
    "from fake_useragent import UserAgent\n",
    "from selenium import webdriver\n",
    "import pandas as pd\n",
    "import re\n",
    "import requests\n",
    "import threading\n",
    "import random\n",
    "import pymysql\n",
    "from sqlalchemy import create_engine\n",
    "from toolz import apply\n",
    "from fake_useragent import UserAgent\n",
    "import string\n",
    "import zipfile\n",
    "# 代理服务器\n",
    "proxyHost = \"http-cla.abuyun.com\"\n",
    "proxyPort = \"9030\"\n",
    "\n",
    "# 代理隧道验证信息\n",
    "proxyUser = \"H48H4ND4877422QC\"\n",
    "proxyPass = \"39C66075BCF8B818\"\n",
    "    \n",
    "#location = os.getcwd() + '/fake_useragent.json'\n",
    "ua = UserAgent()\n",
    "# 构造请求头User-Agent  利用python的 fake_useragent 随机获得User-Agent\n",
    "headers = {\n",
    "    'accept': 'text/html,application/xhtml+xml,application/xml;q=0.9,image/webp,image/apng,*/*;q=0.8,application/signed-exchange;v=b3',\n",
    "    'accept-encoding': 'gzip, deflate, br',\n",
    "    'accept-language': 'zh-CN,zh;q=0.9',\n",
    "    'cache - control': 'max - age = 0',\n",
    "    'referer': 'https://www.zhipin.com/',\n",
    "    'sec-fetch-mode': 'navigate',\n",
    "    'sec-fetch-site': 'same-origin',\n",
    "    'sec-fetch-user': '?1',\n",
    "    'upgrade-insecure-requests': '1',\n",
    "    'user-agent': ua.random,\n",
    "    'X-Requested-With': 'XMLHttpRequest'\n",
    "}\n",
    "mutex1 = threading.Lock()\n",
    "mutex2 = threading.Lock()\n",
    "data_out_path = 'C:/Users/xuzhen/boss_py/boss_position_detail.csv'\n",
    "log_out_path = 'C:/Users/xuzhen/boss_py/boss_spider_log.txt'\n",
    "url_in_path = 'C:/Users/xuzhen/boss_py/new_list.txt'\n",
    "executable_path = r'C:\\Program Files (x86)\\Google\\Chrome\\Application\\chromedriver.exe'\n",
    "PROXY = {\n",
    "    'http': 'http://' + '127.0.0.1:1080',\n",
    "    'https': 'https://' + '127.0.0.1:1080'\n",
    "    }\n",
    "db = 'boss'\n",
    "\n",
    "def create_proxy_auth_extension(proxy_host, proxy_port,\n",
    "                               proxy_username, proxy_password,\n",
    "                               scheme='http', plugin_path=None):\n",
    "    if plugin_path is None:\n",
    "        plugin_path = r'D:/{}_{}@http-cla.abuyun.com_9030.zip'.format(proxy_username, proxy_password)\n",
    "\n",
    "    manifest_json = \"\"\"\n",
    "    {\n",
    "        \"version\": \"1.0.0\",\n",
    "        \"manifest_version\": 2,\n",
    "        \"name\": \"Abuyun Proxy\",\n",
    "        \"permissions\": [\n",
    "            \"proxy\",\n",
    "            \"tabs\",\n",
    "            \"unlimitedStorage\",\n",
    "            \"storage\",\n",
    "            \"<all_urls>\",\n",
    "            \"webRequest\",\n",
    "            \"webRequestBlocking\"\n",
    "        ],\n",
    "        \"background\": {\n",
    "            \"scripts\": [\"background.js\"]\n",
    "        },\n",
    "        \"minimum_chrome_version\":\"22.0.0\"\n",
    "    }\n",
    "    \"\"\"\n",
    "\n",
    "    background_js = string.Template(\n",
    "        \"\"\"\n",
    "        var config = {\n",
    "            mode: \"fixed_servers\",\n",
    "            rules: {\n",
    "                singleProxy: {\n",
    "                    scheme: \"${scheme}\",\n",
    "                    host: \"${host}\",\n",
    "                    port: parseInt(${port})\n",
    "                },\n",
    "                bypassList: [\"foobar.com\"]\n",
    "            }\n",
    "          };\n",
    "\n",
    "        chrome.proxy.settings.set({value: config, scope: \"regular\"}, function() {});\n",
    "\n",
    "        function callbackFn(details) {\n",
    "            return {\n",
    "                authCredentials: {\n",
    "                    username: \"${username}\",\n",
    "                    password: \"${password}\"\n",
    "                }\n",
    "            };\n",
    "        }\n",
    "\n",
    "        chrome.webRequest.onAuthRequired.addListener(\n",
    "            callbackFn,\n",
    "            {urls: [\"<all_urls>\"]},\n",
    "            ['blocking']\n",
    "        );\n",
    "        \"\"\"\n",
    "    ).substitute(\n",
    "        host=proxy_host,\n",
    "        port=proxy_port,\n",
    "        username=proxy_username,\n",
    "        password=proxy_password,\n",
    "        scheme=scheme,\n",
    "    )\n",
    "\n",
    "    with zipfile.ZipFile(plugin_path, 'w') as zp:\n",
    "        zp.writestr(\"manifest.json\", manifest_json)\n",
    "        zp.writestr(\"background.js\", background_js)\n",
    "\n",
    "    return plugin_path\n",
    "\n",
    "# proxy_auth_plugin_path = create_proxy_auth_extension(\n",
    "#     proxy_host=proxyHost,\n",
    "#     proxy_port=proxyPort,\n",
    "#     proxy_username=proxyUser,\n",
    "#     proxy_password=proxyPass)\n",
    "\n",
    "# option = webdriver.ChromeOptions()\n",
    "\n",
    "# option.add_argument(\"--start-maximized\")\n",
    "# option.add_extension(proxy_auth_plugin_path)\n",
    "\n",
    "# driver = webdriver.Chrome(chrome_options=option)\n",
    "\n",
    "# driver.get(\"http://test.abuyun.com\")\n",
    "\n",
    "# df保存到本地文件\n",
    "def to_local_csv(df, path):\n",
    "    # 多个线程共享文件，要加锁\n",
    "    mutex1.acquire()\n",
    "    df.to_csv(path, mode='a', index=False, header=False, sep=',', encoding='utf_8_sig')\n",
    "    mutex1.release()\n",
    "\n",
    "\n",
    "# 自己写的简单保存日志的方法，用于程序运行的监控\n",
    "def save_log(text):\n",
    "    with open(log_out_path, 'a', encoding='UTF-8') as f:\n",
    "        # 多个线程共享文件，要加锁\n",
    "        mutex2.acquire()\n",
    "        f.write(text + '\\n')\n",
    "        mutex2.release()\n",
    "\n",
    "\n",
    "# 读取文件，获得url列表\n",
    "def read_url():\n",
    "#     with open(url_in_path, 'r', encoding='utf-8') as f:\n",
    "#         url_new = []\n",
    "#         for line in f.readlines():\n",
    "#             url_new.append(line.strip())\n",
    "#     return url_new\n",
    "    url_new = read_mysql(db)\n",
    "    return url_new\n",
    "\n",
    "# url_list:需要爬取的url列表，split_num:线程数\n",
    "# 我没有稳定的代理ip 所以没设置代理，所以线程数别太多（自己用5个线程爬挺稳定的）\n",
    "def split_url(url_list, split_num):\n",
    "    thread_list = []\n",
    "    # 每个线程需处理的list 大小\n",
    "    list_size = (len(url_list) // split_num) if (len(url_list) % split_num == 0) else ((len(url_list) // split_num) + 1)\n",
    "    # 开启线程\n",
    "    for i in range(split_num):\n",
    "        # 获得当前线程需要处理的url\n",
    "        url_list_split = url_list[\n",
    "                         i * list_size:(i + 1) * list_size if len(url_list) > (i + 1) * list_size else len(url_list)]\n",
    "        thread = threading.Thread(target=get_driver, args=(url_list_split,))\n",
    "        thread.setName(\"线程\" + str(i))\n",
    "        thread_list.append(thread)\n",
    "        # 在子线程中运行任务\n",
    "        thread.start()\n",
    "        print(thread.getName() + \"开始运行\")\n",
    "    # 所有线程结束以后再结束\n",
    "    for _item in thread_list:\n",
    "        _item.join()\n",
    "\n",
    "\n",
    "    \n",
    "def get_driver(url_list_split):\n",
    "    # 此步骤很重要，设置为开发者模式，防止被各大网站识别出来使用了Selenium\n",
    "    #阿布云代理设置\n",
    "    proxy_auth_plugin_path = create_proxy_auth_extension(\n",
    "    proxy_host=proxyHost,\n",
    "    proxy_port=proxyPort,\n",
    "    proxy_username=proxyUser,\n",
    "    proxy_password=proxyPass)\n",
    "\n",
    "    options = webdriver.ChromeOptions()\n",
    "    #使用阿布云代理\n",
    "    #options.add_argument(\"--start-maximized\")\n",
    "    options.add_extension(proxy_auth_plugin_path)\n",
    "    \n",
    "    options.add_argument('--no-sandbox')\n",
    "    options.add_experimental_option('excludeSwitches', ['enable-automation'])\n",
    "    #options.add_argument(\"--headless\")\n",
    "    #options.add_argument('--proxy-server=socks5://localhost:1080')\n",
    "\n",
    "    \n",
    "    #options.add_argument('--proxy-server=%s' % PROXY)\n",
    "#     chrome = webdriver.Chrome(chrome_options=chrome_options)\n",
    "#     chrome.get(\"http://whatismyipaddress.com\")\n",
    "    driver = webdriver.Chrome(executable_path=executable_path, options=options)\n",
    "    driver.maximize_window()\n",
    "    \n",
    "    #response_test = requests.get('http://httpbin.org/get', proxies={'http': 'http://127.0.0.1:1080','https': 'https://127.0.0.1:1080'})\n",
    "    #print('ip test:',response_test.text)\n",
    "    \n",
    " # 要访问的目标页面\n",
    "    targetUrl = \"http://test.abuyun.com\"\n",
    "    #targetUrl = \"http://proxy.abuyun.com/switch-ip\"\n",
    "    #targetUrl = \"http://proxy.abuyun.com/current-ip\"\n",
    "\n",
    "#     # 代理服务器\n",
    "#     proxyHost = \"http-cla.abuyun.com\"\n",
    "#     proxyPort = \"9030\"\n",
    "\n",
    "#     # 代理隧道验证信息\n",
    "#     proxyUser = \"H01234567890123C\"\n",
    "#     proxyPass = \"0123456789012345\"\n",
    "\n",
    "    proxyMeta = \"http://%(user)s:%(pass)s@%(host)s:%(port)s\" % {\n",
    "      \"host\" : proxyHost,\n",
    "      \"port\" : proxyPort,\n",
    "      \"user\" : proxyUser,\n",
    "      \"pass\" : proxyPass,\n",
    "    }\n",
    "\n",
    "    proxies = {\n",
    "        \"http\"  : proxyMeta,\n",
    "        \"https\" : proxyMeta,\n",
    "    }\n",
    "\n",
    "    resp = requests.get(targetUrl, proxies=proxies)\n",
    "\n",
    "    print(resp.status_code)\n",
    "    print(resp.text)         \n",
    "    \n",
    "    \n",
    "    time.sleep(random.random()*5)\n",
    "    # 获得当前线程的name 用于监控线程运行情况\n",
    "    thr_name = threading.current_thread().name\n",
    "    count = len(url_list_split)\n",
    "    num = 0\n",
    "    for position_url in url_list_split:\n",
    "        num += 1\n",
    "        page_url = position_url \n",
    "        print('page_url:', page_url)\n",
    "        try:\n",
    "            # 获得详情页\n",
    "            time.sleep(random.random()*5)\n",
    "            position_detail_list = get_detail(driver, page_url,[])\n",
    "            #print(position_detail_list)\n",
    "            # list 转换为DataFrame\n",
    "            \n",
    "            df = pd.DataFrame(position_detail_list)\n",
    "#             print('position_detail_list:')\n",
    "#             print(df)\n",
    "            # 将爬取结果保存到csv\n",
    "            to_local_csv(df, data_out_path)\n",
    "            print(position_url + '保存成功  ' + thr_name + '共需处理' + str(count) + '个网址,已保存' + str(num) + '个。')\n",
    "            save_log(position_url + '保存成功  ' + thr_name + '  ' + datetime.datetime.now().strftime(\"%Y-%m-%d %H:%M:%S\"))\n",
    "        except Exception as e:\n",
    "            print(position_url + '保存失败\\n' + str(e))\n",
    "            failed_to_sql(page_url)\n",
    "            save_log(position_url + '保存失败\\n' + str(e))\n",
    "    # 爬取完毕关闭 driver\n",
    "    driver.close()\n",
    "    print(thr_name + \"执行完毕\")\n",
    "\n",
    "\n",
    "#核心方法，获得详情页并访问详情页\n",
    "def get_detail(driver, page_url, position_detail_list):\n",
    "    match = re.match(r'https://m.zhipin.com/(.*)/\\?page=(.*)', page_url, re.M | re.I)\n",
    "    #page_url = 'https://geoseis.cn/北京产品经理招聘_2020年北京产品经理最新人才招聘信息_BOSS直聘.html'\n",
    "    print('valid page_url: ############\\n', page_url)\n",
    "    print(page_url.strip())\n",
    "    page_url = page_url.strip()\n",
    "    time.sleep(random.random()*5)\n",
    "    \n",
    "    try:\n",
    "        #driver.get('https://www.baidu.com/s?wd=ip')\n",
    "        time.sleep(5)\n",
    "        driver.get(page_url)\n",
    "        #driver.get(\"http://test.abuyun.com\")\n",
    "        # 为了进行渲染时间的控制，下面这两种设置都进行才有效\n",
    "        driver.set_page_load_timeout(10)\n",
    "        driver.set_script_timeout(10)  \n",
    "        \n",
    "        time.sleep(3)\n",
    "        # 获取页面数据\n",
    "        bs = BeautifulSoup(driver.page_source, \"html.parser\")\n",
    "        #get_job_details(bs)\n",
    "        print('*'*100)\n",
    "        job_details = []\n",
    "        job_list = bs.find_all(name='li',attrs={\"class\":\"item\"})\n",
    "        #print(job_list)\n",
    "\n",
    "        data_index = []\n",
    "        data_itemid = []\n",
    "        data_lid = []\n",
    "        href = []\n",
    "        ka = []\n",
    "        page_num = 0\n",
    "        url_list = []\n",
    "        for i in range(0, len(job_list)):\n",
    "            print('job list details:')\n",
    "            data_index.append(job_list[i].a['data-index'])\n",
    "            data_itemid.append(job_list[i].a['data-itemid'])\n",
    "            data_lid.append(job_list[i].a['data-lid'])\n",
    "            href.append(job_list[i].a['href'])\n",
    "            ka.append(job_list[i].a['ka'])\n",
    "            url_list.append('https://m.zhipin.com' + href[i])\n",
    "            print('data-index{}: {}'.format(i,data_index[i]))\n",
    "            print('data-itemid{}: {}'.format(i,data_itemid[i]))\n",
    "            print('data-lid{}: {}'.format(i,data_lid[i]))\n",
    "            print('href{}: {}'.format(i,href[i]))\n",
    "            print('ka{}: {}'.format(i,ka[i]))\n",
    "            print(page_num)\n",
    "            #print('\\n')\n",
    "            page_num += 1\n",
    "        job_details = list(zip(url_list, data_index, data_itemid, data_lid, href, ka))\n",
    "        print('#'*100,'job list details done!')  \n",
    "        print(job_details)\n",
    "\n",
    "        print('\\nfirst one in job list:')\n",
    "        print(job_list[0])\n",
    "        print('\\nnum of jobs on this page:', len(url_list))\n",
    "\n",
    "        #job_url_list = []\n",
    "        for job_url in url_list:\n",
    "            # 详情页地址\n",
    "\n",
    "            # 爬取详情页(job_url)\n",
    "            print('job_url:########\\n',job_url)\n",
    "            job_url = job_url.strip()\n",
    "            print('valid job_url:########\\n',job_url)\n",
    "            \n",
    "            time.sleep(random.random()*5)\n",
    "            #time.sleep(5)\n",
    "            driver.get(job_url)\n",
    "            time.sleep(3)\n",
    "            bs_detail = BeautifulSoup(driver.page_source, \"html.parser\")\n",
    "            parse_one_page(bs_detail,job_url)\n",
    "            #driver.get('https://www.baidu.com/s?wd=ip')\n",
    "            time.sleep(1)\n",
    "            driver.close()\n",
    "        return(job_details)\n",
    "    except Exception as e:\n",
    "    # 当出现异常的时候，会打开log.txt文件在文件最后添加一行“错误url”\n",
    "        save_log(page_url + 'index_page保存失败\\n' + str(e))\n",
    "        \n",
    "    #参考：https://blog.csdn.net/qq_28053189/article/details/70500266\n",
    "    \n",
    "    # 去掉各种括号\n",
    "def clean(s):\n",
    "    s = re.sub(u\"\\\\<.*?\\\\>|\\\\{.*?}|\\\\[.*?]\", \"\", str(s))\n",
    "    return s\n",
    "# 去掉换行符\n",
    "def reg_newline(tags_):\n",
    "    tags = []\n",
    "    print(tags)\n",
    "    for i in tags_:\n",
    "        if i != '\\n':\n",
    "            i = clean(i)\n",
    "            tags.append(i)\n",
    "    return tags\n",
    "# 去掉多余空格\n",
    "def remove_spaces(para): \n",
    "    k = ''\n",
    "    for i in para:\n",
    "        if i not in ['\\n','[',']']:\n",
    "            j = i.replace(' ','')\n",
    "            k += j\n",
    "    return k\n",
    "\n",
    "\n",
    "def parse_one_page(bs_detail,job_url):\n",
    "    print('#'*100)\n",
    "    # 该url是详情页\n",
    "    print('I\\'m ready to parse every single job!')\n",
    "    print('inspect soup object and navigate through:')\n",
    "    print('\\n')\n",
    "    #file2 = soup2.find_all('div')\n",
    "    soup2= bs_detail\n",
    "    details = soup2.find('h1', {'class' : 'name'})\n",
    "    salary_range = re.sub(u\"\\\\<.*?\\\\>|\\\\{.*?}|\\\\[.*?]\", \"\", str(details.contents[0]))\n",
    "    print(salary_range)\n",
    "    print('Salary range complete! ##############################\\n')\n",
    "\n",
    "    # 获取职位名称\n",
    "    position = details.contents[1]\n",
    "    print(position)\n",
    "    print('Position complete! ##############################\\n')\n",
    "\n",
    "    # 获取工作要求（地点，工作年限，学位）\n",
    "    #soup2.find_all('p')[2].contents\n",
    "    location = soup2.find_all('p')[2].contents[0]\n",
    "    yrs_experience = soup2.find_all('p')[2].contents[2]\n",
    "    degree = soup2.find_all('p')[2].contents[4]\n",
    "\n",
    "    print(location, yrs_experience, degree)\n",
    "    print('Location/year/degree complete! ##############################\\n')\n",
    "\n",
    "    # 获取工作技能标签\n",
    "    # div = soup2.find_all('div')\n",
    "    # type(div)\n",
    "    # print(div[7])\n",
    "\n",
    "\n",
    "    #soup2.find(name='div',attrs={\"class\":\"job-tags\"}).text.replace('\\n',' ')\n",
    "    tags = soup2.find(name='div',attrs={\"class\":\"job-tags\"}).text.replace('\\n',' ').strip()\n",
    "    print(tags)\n",
    "    print('Skills tag complete! ##############################\\n')\n",
    "\n",
    "    # 获取职位发布或者更新时间标签\n",
    "    updated_time = soup2.find(name='div',attrs={\"class\":\"time\"}).contents[0].split(':')[1].strip()\n",
    "    print(updated_time)\n",
    "    print('Update time complete! ##############################\\n')\n",
    "\n",
    "    #获取HR名字，BS比正则表达式要慢一些，但是方便\n",
    "    HR = soup2.find_all(name='div',attrs={\"class\":\"name\"})[0]#按照字典的形式给attrs参数赋值\n",
    "    print(HR.contents[0])\n",
    "    HR = HR.contents[0]\n",
    "    print('HR name complete! ##############################\\n')\n",
    "\n",
    "    # 获取JD（职责描述）标签    \n",
    "    JD = soup2.find_all(name='div',attrs={\"class\":\"text\"})#按照字典的形式给attrs参数赋值\n",
    "    JD = clean(JD)\n",
    "    JD = remove_spaces(JD)\n",
    "    JD = JD.strip()\n",
    "    print(JD)\n",
    "    print('JD complete! ##############################\\n')\n",
    "\n",
    "    # 获取公司信息标签\n",
    "    # 注意：这里并没有抓取全部信息，需要进入公司详情页才能抓全，不值得再次request一个网页https://m.zhipin.com/gongsi/89587503c10154d41XBz2dw~.html?ka=job-comintroduce\n",
    "    company_intro = soup2.find_all(name='p',attrs={\"class\":\"detail-text show-switch four-lines\"})#按照字典的形式给attrs参数赋值\n",
    "    company_intro = clean(company_intro[0])\n",
    "    print(company_intro)\n",
    "    print('company_intro complete! ##############################\\n')\n",
    "\n",
    "    business_info = soup2.find('h4')#按照字典的形式给attrs参数赋值\n",
    "    #business_info = business_info_\n",
    "    str(business_info).strip('[').strip(']')\n",
    "    business_info = clean(business_info)\n",
    "    print(business_info)\n",
    "    print('business_info complete! ##############################\\n')\n",
    "\n",
    "    business_detail_ = soup2.find_all(name='div',attrs={\"class\":\"business-detail\"})#按照字典的形式给attrs参数赋值\n",
    "    business_detail_ = clean(business_detail_)\n",
    "    business_detail = ''\n",
    "    for i in business_detail_:\n",
    "        if i != '\\n' and i != '[' and i != ']':\n",
    "            #i.strip()\n",
    "            business_detail += i\n",
    "        else:\n",
    "            i = ' '\n",
    "            business_detail += i\n",
    "    business_detail = business_detail.strip()\n",
    "    print(business_detail)\n",
    "    print('business_detail complete! ##############################\\n')\n",
    "\n",
    "    # 获取工作地点具体位置标签\n",
    "    address_ = soup2.find_all(name='div',attrs={\"class\":\"location-address\"})#按照字典的形式给attrs参数赋值\n",
    "    #type(address_)\n",
    "    address = address_[0]\n",
    "    address = clean(address)\n",
    "    print(address)\n",
    "    print('address complete! ##############################\\n')\n",
    "\n",
    "    # 将所有的职位列表信息写道dataframe\n",
    "    jobs_in_one_page = [job_url, salary_range, position, location, yrs_experience, degree, tags, updated_time, JD,\\\n",
    "                            company_intro, business_info, business_detail, address]\n",
    "    columns=['job_url', 'salary_range', 'position', 'location', 'yrs_experience',\\\n",
    "            'degree', 'tags', 'updated_time', 'JD', 'company_intro', 'business_info',\\\n",
    "            'business_detail', 'address']\n",
    "    #job2df = list(zip(columns, jobs_in_one_page))\n",
    "    print('jobs_in_one_page:\\n', jobs_in_one_page)\n",
    "    tbl = pd.DataFrame(jobs_in_one_page)\n",
    "    print('DataFrame'*10)\n",
    "    print(tbl)\n",
    "    print('!'*100)\n",
    "    #write_to_sql(tbl)\n",
    "    save_in_mysql(db,job_url, salary_range, position, location, yrs_experience, degree, tags, updated_time, JD, company_intro, business_info, business_detail, address)\n",
    "    return(tbl)\n",
    "\n",
    "def read_mysql(db):\n",
    "    # 从sql读取每个列表页，将所有列表页存到dataframe；测试时先读取page1从而剔除后面不存在的列表页\n",
    "    conn = pymysql.connect(\n",
    "        host='localhost',\n",
    "        user='root',\n",
    "        password='root@SQL',\n",
    "        port=3306,\n",
    "        charset = 'utf8',  \n",
    "        db = 'boss')\n",
    "    cursor = conn.cursor()\n",
    "    #提取main_url\n",
    "    #df = pd.read_sql(\"\"\"SELECT main_url FROM url WHERE main_url LIKE '%page=1'\"\"\", con);\n",
    "    sql = \"SELECT `main_url` FROM index_url WHERE main_url LIKE '%page=1'\"\n",
    "    cursor.execute(sql)\n",
    "    df = pd.read_sql(sql, conn, index_col=None,columns='main_url')\n",
    "    #df = pd.read_sql(\"\"\"SELECT `main_url` FROM index_url WHERE main_url LIKE '%%page=1' LIMIT 1000\"\"\", con);\n",
    "\n",
    "    print(df)\n",
    "#     print(df_employee)\n",
    "    return df['main_url']\n",
    "    #con.close()\n",
    "\n",
    "def delete_nul_mysql(page):\n",
    "    conn = pymysql.connect(\n",
    "        host='localhost',\n",
    "        user='root',\n",
    "        password='root@SQL',\n",
    "        port=3306,\n",
    "        charset = 'utf8',  \n",
    "        db = 'boss')\n",
    "    cursor = conn.cursor()\n",
    "    page1 = page.split()\n",
    "    page2 = page1[0] + page1[1] \n",
    "    page_num = page2[2]\n",
    "    page2del = page2 + page_num\n",
    "            \n",
    "    #删除不存在的列表页(https://blog.51cto.com/9291927/2092195)：\n",
    "    sql = 'DELETE FROM index_url WHERE main_url = page2del'\n",
    "    #更新示例：\n",
    "    #     sql = 'UPDATE listed_company4 SET city=province WHERE city='曹妃甸区' or city='沈北新区' or city='红寺堡区'\\\n",
    "    #     or city='七里河区' or city='八步区' or city='禹王 台区' or city='麦积区' or city='七里河区';'\n",
    "    cursor.execute(sql)\n",
    "    conn.commit()\n",
    "\n",
    "def write_to_sql(tbl):\n",
    "    db = 'boss'\n",
    "    engine = create_engine('mysql+pymysql://root:root@SQL@localhost:3306/{0}?charset=utf8'.format(db))\n",
    "    try:\n",
    "        # df = pd.read_csv(df)\n",
    "        tbl.to_sql('zhipin',con = engine,if_exists='append',index=True)\n",
    "        # append表示在原有表基础上增加，但该表要有表头(header)\n",
    "    except Exception as e:\n",
    "        print(e)    \n",
    "\n",
    "def failed_to_sql(failed_url):\n",
    "    # 从sql读取每个列表页，将所有列表页存到dataframe；测试时先读取page1从而剔除后面不存在的列表页\n",
    "    try:\n",
    "        conn = pymysql.connect(\n",
    "            host='localhost',\n",
    "            user='root',\n",
    "            password='root@SQL',\n",
    "            port=3306,\n",
    "            charset = 'utf8',  \n",
    "            db = 'boss')\n",
    "        cursor = conn.cursor()\n",
    "        #提取main_url\n",
    "    #     sql = \"INSERT INTO customers (name, address) VALUES (%s, %s)\"\n",
    "    #     val = (\"John\", \"Highway 21\")\n",
    "    #     mycursor.execute(sql, val)\n",
    "\n",
    "        sql = \"INSERT INTO failed_index (failed_url) VALUES (%s)\"\n",
    "        VALUES = failed_url\n",
    "        cursor.execute(sql, VALUES)\n",
    "    except Exception as e:\n",
    "        print(e)    \n",
    "        \n",
    "def save_in_mysql(db,job_url, salary_range, position, location, yrs_experience, degree, tags, updated_time, JD, company_intro, business_info, business_detail, address):\n",
    "    try:\n",
    "        conn = pymysql.connect(\n",
    "            host='localhost',\n",
    "            user='root',\n",
    "            password='root@SQL',\n",
    "            port=3306,\n",
    "            charset = 'utf8',  \n",
    "            db = 'boss')\n",
    "        cursor = conn.cursor()        \n",
    "        engine = create_engine('mysql+pymysql://root:root@SQL@localhost:3306/{0}?charset=utf8'.format(db))\n",
    "        sql = 'INSERT INTO zhipin(job_url, salary_range, position, location, yrs_experience, degree, tags, updated_time, JD, company_intro, business_info, business_detail, address)\\\n",
    "        VALUES (%s, %s , %s, %s, %s, %s, %s, %s, %s , %s, %s, %s, %s)'   # 插入数据库的SQL语句\n",
    "        print(sql)\n",
    "        cursor.execute(sql, (job_url, salary_range, position, location, yrs_experience, degree, tags, updated_time, JD, company_intro, business_info, business_detail, address))\n",
    "        conn.commit()\n",
    "    except Exception as e:\n",
    "        print('sql writing error:')\n",
    "        print(e)\n",
    "\n",
    "def main():\n",
    "    # 获得需爬取列表页url 列表\n",
    "    url_list = read_url()\n",
    "    split_url(url_list, 1)\n",
    "\n",
    "\n",
    "if __name__ == '__main__':\n",
    "    start = datetime.datetime.now()\n",
    "    save_log('爬取开始：' + start.strftime(\"%Y-%m-%d %H:%M:%S\"))\n",
    "    print('爬取开始：' + start.strftime(\"%Y-%m-%d %H:%M:%S\"))\n",
    "    main()\n",
    "    end = datetime.datetime.now()\n",
    "    save_log('爬取结束：' + end.strftime(\"%Y-%m-%d %H:%M:%S\") + '\\n' + '总共用时' + str((end - start).seconds) + '秒')\n",
    "    print('爬取结束：' + end.strftime(\"%Y-%m-%d %H:%M:%S\"))\n",
    "    print('总共用时' + str((end - start).seconds))"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": [
    "import os\n",
    "import time\n",
    "import datetime\n",
    "from bs4 import BeautifulSoup\n",
    "from fake_useragent import UserAgent\n",
    "from selenium import webdriver\n",
    "import pandas as pd\n",
    "import re\n",
    "import threading\n",
    "import random\n",
    "\n",
    "from toolz import apply\n",
    "from fake_useragent import UserAgent\n",
    "\n",
    "page = r'C:\\Users\\xuzhen\\boss_py\\mobile_detail_page_selenium_no_login_jiankewang.html'\n",
    "#page = r'C:\\Users\\xuzhen\\boss_py\\index_response_not_login.html'\n",
    "\n",
    "with open(page, encoding=\"utf-8\") as fp:\n",
    "    soup2 = BeautifulSoup(fp)\n",
    "\n",
    "\n",
    "print('#'*100)\n",
    "# 该url是详情页\n",
    "print('I\\'m ready to parse every single job!\\n')\n",
    "\n",
    "print('\\ninspect soup object and navigate through:')\n",
    "print('\\n')\n",
    "#file2 = soup2.find_all('div')\n",
    "details = soup2.find('h1', {'class' : 'name'})\n",
    "salary_range = re.sub(u\"\\\\<.*?\\\\>|\\\\{.*?}|\\\\[.*?]\", \"\", str(details.contents[0]))\n",
    "print(salary_range)\n",
    "print('Salary range complete! ##############################\\n')\n",
    "\n",
    "# 获取职位名称\n",
    "position = details.contents[1]\n",
    "print(position)\n",
    "print('Position complete! ##############################\\n')\n",
    "\n",
    "# 获取工作要求（地点，工作年限，学位）\n",
    "#soup2.find_all('p')[2].contents\n",
    "location = soup2.find_all('p')[2].contents[0]\n",
    "yrs_experience = soup2.find_all('p')[2].contents[2]\n",
    "degree = soup2.find_all('p')[2].contents[4]\n",
    "\n",
    "print(location, yrs_experience, degree)\n",
    "print('Location/year/degree complete! ##############################\\n')\n",
    "\n",
    "# 获取工作技能标签\n",
    "# div = soup2.find_all('div')\n",
    "# type(div)\n",
    "# print(div[7])\n",
    "\n",
    "\n",
    "#soup2.find(name='div',attrs={\"class\":\"job-tags\"}).text.replace('\\n',' ')\n",
    "tags = soup2.find(name='div',attrs={\"class\":\"job-tags\"}).text.replace('\\n',' ')\n",
    "print(tags)\n",
    "print('Skills tag complete! ##############################\\n')\n",
    "\n",
    "# 获取职位发布或者更新时间标签\n",
    "updated_time = soup2.find(name='div',attrs={\"class\":\"time\"}).contents[0]\n",
    "print(updated_time)\n",
    "print('Update time complete! ##############################\\n')\n",
    "\n",
    "#获取HR名字，BS比正则表达式要慢一些，但是方便\n",
    "HR = soup2.find_all(name='div',attrs={\"class\":\"name\"})[0]#按照字典的形式给attrs参数赋值\n",
    "print(HR.contents[0])\n",
    "HR = HR.contents[0]\n",
    "print('HR name complete! ##############################\\n')\n",
    "\n",
    "# 获取JD（职责描述）标签    \n",
    "JD = soup2.find_all(name='div',attrs={\"class\":\"text\"})#按照字典的形式给attrs参数赋值\n",
    "JD = clean(JD)\n",
    "JD = remove_spaces(JD)\n",
    "JD = JD.strip()\n",
    "print(JD)\n",
    "print('JD complete! ##############################\\n')\n",
    "\n",
    "# 获取公司信息标签\n",
    "# 注意：这里并没有抓取全部信息，需要进入公司详情页才能抓全，不值得再次request一个网页https://m.zhipin.com/gongsi/89587503c10154d41XBz2dw~.html?ka=job-comintroduce\n",
    "company_intro = soup2.find_all(name='p',attrs={\"class\":\"detail-text show-switch four-lines\"})#按照字典的形式给attrs参数赋值\n",
    "company_intro = clean(company_intro[0])\n",
    "print(company_intro)\n",
    "print('company_intro complete! ##############################\\n')\n",
    "\n",
    "business_info = soup2.find('h4')#按照字典的形式给attrs参数赋值\n",
    "#business_info = business_info_\n",
    "str(business_info).strip('[').strip(']')\n",
    "business_info = clean(business_info)\n",
    "print(business_info)\n",
    "print('business_info complete! ##############################\\n')\n",
    "\n",
    "business_detail_ = soup2.find_all(name='div',attrs={\"class\":\"business-detail\"})#按照字典的形式给attrs参数赋值\n",
    "business_detail_ = clean(business_detail_)\n",
    "business_detail = ''\n",
    "for i in business_detail_:\n",
    "    if i != '\\n' and i != '[' and i != ']':\n",
    "        #i.strip()\n",
    "        business_detail += i\n",
    "    else:\n",
    "        i = ' '\n",
    "        business_detail += i\n",
    "business_detail = business_detail.strip()\n",
    "print(business_detail)\n",
    "print('business_detail complete! ##############################\\n')\n",
    "\n",
    "# 获取工作地点具体位置标签\n",
    "address_ = soup2.find_all(name='div',attrs={\"class\":\"location-address\"})#按照字典的形式给attrs参数赋值\n",
    "#type(address_)\n",
    "address = address_[0]\n",
    "address = clean(address)\n",
    "print(address)\n",
    "print('address complete! ##############################\\n')\n",
    "soup2\n",
    "\n",
    "# 将所有的职位列表信息写道dataframe\n",
    "jobs_in_one_page = [salary_range, position, location, yrs_experience, degree, tags, updated_time, JD,\\\n",
    "                        company_intro, business_info, business_detail, address]\n"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": [
    "# -*- coding: utf-8 -*-\n",
    "import os\n",
    "import time\n",
    "import datetime\n",
    "from bs4 import BeautifulSoup\n",
    "from fake_useragent import UserAgent\n",
    "from selenium import webdriver\n",
    "import pandas as pd\n",
    "import re\n",
    "import requests\n",
    "import threading\n",
    "import random\n",
    "import pymysql\n",
    "from sqlalchemy import create_engine\n",
    "from toolz import apply\n",
    "from fake_useragent import UserAgent\n",
    "import string\n",
    "import zipfile\n",
    "# 代理服务器\n",
    "proxyHost = \"http-cla.abuyun.com\"\n",
    "proxyPort = \"9030\"\n",
    "\n",
    "# 代理隧道验证信息\n",
    "proxyUser = \"H48H4ND4877422QC\"\n",
    "proxyPass = \"39C66075BCF8B818\"\n",
    "    \n",
    "\n",
    "def create_proxy_auth_extension(proxy_host, proxy_port,\n",
    "                               proxy_username, proxy_password,\n",
    "                               scheme='http', plugin_path=None):\n",
    "    if plugin_path is None:\n",
    "        plugin_path = r'D:/{}_{}@http-cla.abuyun.com_9030.zip'.format(proxy_username, proxy_password)\n",
    "\n",
    "    manifest_json = \"\"\"\n",
    "    {\n",
    "        \"version\": \"1.0.0\",\n",
    "        \"manifest_version\": 2,\n",
    "        \"name\": \"Abuyun Proxy\",\n",
    "        \"permissions\": [\n",
    "            \"proxy\",\n",
    "            \"tabs\",\n",
    "            \"unlimitedStorage\",\n",
    "            \"storage\",\n",
    "            \"<all_urls>\",\n",
    "            \"webRequest\",\n",
    "            \"webRequestBlocking\"\n",
    "        ],\n",
    "        \"background\": {\n",
    "            \"scripts\": [\"background.js\"]\n",
    "        },\n",
    "        \"minimum_chrome_version\":\"22.0.0\"\n",
    "    }\n",
    "    \"\"\"\n",
    "\n",
    "    background_js = string.Template(\n",
    "        \"\"\"\n",
    "        var config = {\n",
    "            mode: \"fixed_servers\",\n",
    "            rules: {\n",
    "                singleProxy: {\n",
    "                    scheme: \"${scheme}\",\n",
    "                    host: \"${host}\",\n",
    "                    port: parseInt(${port})\n",
    "                },\n",
    "                bypassList: [\"foobar.com\"]\n",
    "            }\n",
    "          };\n",
    "\n",
    "        chrome.proxy.settings.set({value: config, scope: \"regular\"}, function() {});\n",
    "\n",
    "        function callbackFn(details) {\n",
    "            return {\n",
    "                authCredentials: {\n",
    "                    username: \"${username}\",\n",
    "                    password: \"${password}\"\n",
    "                }\n",
    "            };\n",
    "        }\n",
    "\n",
    "        chrome.webRequest.onAuthRequired.addListener(\n",
    "            callbackFn,\n",
    "            {urls: [\"<all_urls>\"]},\n",
    "            ['blocking']\n",
    "        );\n",
    "        \"\"\"\n",
    "    ).substitute(\n",
    "        host=proxy_host,\n",
    "        port=proxy_port,\n",
    "        username=proxy_username,\n",
    "        password=proxy_password,\n",
    "        scheme=scheme,\n",
    "    )\n",
    "\n",
    "    with zipfile.ZipFile(plugin_path, 'w') as zp:\n",
    "        zp.writestr(\"manifest.json\", manifest_json)\n",
    "        zp.writestr(\"background.js\", background_js)\n",
    "\n",
    "    return plugin_path\n",
    "\n",
    "proxy_auth_plugin_path = create_proxy_auth_extension(\n",
    "    proxy_host=proxyHost,\n",
    "    proxy_port=proxyPort,\n",
    "    proxy_username=proxyUser,\n",
    "    proxy_password=proxyPass)\n",
    "\n",
    "option = webdriver.ChromeOptions()\n",
    "\n",
    "option.add_argument(\"--start-maximized\")\n",
    "option.add_extension(proxy_auth_plugin_path)\n",
    "\n",
    "driver = webdriver.Chrome(chrome_options=option)\n",
    "driver.get('https://www.baidu.com/s?wd=ip')\n",
    "\n",
    "#driver.get(\"http://test.abuyun.com\")"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": []
  }
 ],
 "metadata": {
  "kernelspec": {
   "display_name": "pyecharts",
   "language": "python",
   "name": "pyecharts"
  },
  "language_info": {
   "codemirror_mode": {
    "name": "ipython",
    "version": 3
   },
   "file_extension": ".py",
   "mimetype": "text/x-python",
   "name": "python",
   "nbconvert_exporter": "python",
   "pygments_lexer": "ipython3",
   "version": "3.6.10"
  }
 },
 "nbformat": 4,
 "nbformat_minor": 4
}
