# -*- coding: UTF-8 -*-
# Project : weeklyTask
# File : weeklyInspection.py
# IDE : PyCharm
# Author : 博科（鑫贝西）田聪
# Date : 2021/10/30 13:17
import copy
import random
import re

import requests
from urllib.parse import urljoin
from lxml import etree
import datetime
import time

import pandas as pd
import numpy as np
from tqdm import tqdm

Characters = [
    # 标题 姓名 百度 首页 新闻页 提取类型 列表提取规则 日期提取规则 日期是否需要处理 下一页链接
    ['二氧化碳培养箱', '李晓杰', 0, 'http://pyx.gx-biosensor.com', 'http://pyx.gx-biosensor.com/a/xinwenzixun/', 'xpath',
     '//ul[@class="news"]/li', './span/text()', 'null', '//div[@class="paging"]/a[@class="next"]/@href'],
    ['高芯箱体设备', '李晓杰', 1, 'http://xt.gx-biosensor.com', 'http://xt.gx-biosensor.com/xinwenzhongxin/', 'xpath',
     '//div[@class="new_inside"]/ul/li', './/div[@class="news_time"]/span/text()|.//div[@class="news_time"]/p/text()',
     'xt', '//div[@class="page_list"]/a[last()-1]/@href'],
    ['黄疸仪', '尹玉清', 3, 'http://hdy.gx-biosensor.com', 'http://hdy.gx-biosensor.com/news/', 'xpath',
     '//div[@class="txtimglist"]/div[@class="item"]',
     './/div[@class="time fr"]/span[2]/text()|.//div[@class="time fr"]/span[@class="day"]/text()', 'xt',
     '//div[@class="pglist"]/a[last()-1]/@href'],
    ['实验室', '尹玉清', 9, 'http://sys.gx-biosensor.com', 'http://sys.gx-biosensor.com/xinwen/', 'xpath',
     '//div[@class="w-News-list-in clearfix"]/ul/li', './/div[@class="date"]//text()', 'null',
     '//div[@class="pagination"]/li[last()-1]/a/@href'],
    ['通风柜', '潘晓静', 0, 'http://tfg.gx-biosensor.com', 'http://tfg.gx-biosensor.com/a/xinwen/', 'xpath',
     '//div[@class="list container"]/ul/li', './div[@class="fr"]/span/text()', 'null',
     '//div[@class="pages"]/ul/a[last()-1]/@href'],
    ['康复器械', '张雯雯', 0, 'http://kfcp.gx-biosensor.com', 'http://kfcp.gx-biosensor.com/a/xinwen/', 'xpath',
     '//div[@class="NewsList"]/ul/li', './/span/text()', 'null', '//div[@class="pagea"]/a[last()-1]/@href'],
    ['PCR实验室', '韩鑫鑫', 0, 'http://pcr.gx-biosensor.com', 'http://pcr.gx-biosensor.com/a/xinwenzhongxin/', 'xpath',
     '//ul[@class="list-5"]/li', './/h4/span/text()', 'null', ''],  # 没有下一页 回头再看
    ['核酸提取仪', '李浩', 0, 'http://hstqy.gx-biosensor.com', 'http://hstqy.gx-biosensor.com/a/xinwenzhongxin/', 'xpath',
     '//ul[@class="new_inside"]/li', './/p[@class="news_date"]/i/text()|.//p[@class="news_date"]/span/text()', 'xt',
     '//div[@class="page_list"]/a[last()-1]/@href'],  # ###############没有下一页 回头再看
    ['发热门诊', '高悦', 0, 'http://frmz.gx-biosensor.com', 'http://frmz.gx-biosensor.com/html/news/', 'xpath',
     '//ul[@class="news_list"]/li', './p[@class="newslisttime"]/span/text()', 'frmz',
     '//div[@class="page"]/li[last()-1]/a/@href'],
    ['经皮黄疸仪', '高悦', 0, 'http://jphdy.gx-biosensor.com/', 'http://jphdy.gx-biosensor.com/a/news/', 'xpath',
     '//ul[@class="news-list"]/li', './span/text()', 'null', ''],  # 没有下一页 回头再看
    ['核酸采样亭', '范春馨', 0, 'http://cyt.gx-biosensor.com', 'http://cyt.gx-biosensor.com/a/xinwen/', 'xpath',
     '//ul[@class="news_list"]/li', './a[@class="venoboxframe"]/@href', 'cyt',
     '//div[@class="fenye"]/ul/a[last()-1]/@href'],
    ['纯水机与超纯水机', '张延浕', 0, 'http://csj.gx-biosensor.com/', 'http://csj.gx-biosensor.com/a/xinwenzhongxin/', 'xpath',
     '//ul[@class="new_inside"]/li', './/p[@class="news_date"]/span/text()|.//p[@class="news_date"]/i/text()', 'xt',
     '//div[@class="page_list"]/a[last()-1]/@href'],
    ['离心机', '孙庆磊', 0, 'http://lxj.gx-biosensor.com/', 'http://lxj.gx-biosensor.com/a/xinwenxinxi/', 'xpath',
     '//div[@class="articleList"]/ul/div', './/div[@class="art_time"]/span/text()', 'lxj',
     '//div[@class="pages"]/li[last()-1]/a/@href'],
    ['移液器', '王铭阳', 0, 'http://yyq.gx-biosensor.com', 'http://yyq.gx-biosensor.com/a/xinwen/', 'xpath',
     '//div[@class="NewsList"]/ul/li', './div[@class="news_title"]/a/@href', 'yyq',
     '//div[@class="pagea"]/a[last()-1]/@href'],
    ['负压隔离舱', '刘金召', 0, 'http://fyglc.gx-biosensor.com/', 'http://fyglc.gx-biosensor.com/a/xinwendongtai/', 'xpath',
     '//div[@class="xzCont"]/ul/li', './h5/a/@href', 'fyqlc', '//div[@class="uls fr"]/a[@class="nextBtn tBtn"]/@href'],
    # ####没有下一页 回头再看
    ['酶免工作站', '王鹏', 0, 'http://mmgzz.gx-biosensor.com/', 'http://mmgzz.gx-biosensor.com/a/xinwendongtai/', 'xpath',
     '//div[@class="txtimglist"]/div[@class="item"]', './/span[@class="time"]/text()', 'null',
     '//div[@class="pglist"]/li[last()-1]/a/@href'],
    ['灭菌器', '潘闯', 0, 'http://mjq.gx-biosensor.com', 'http://mjq.gx-biosensor.com/a/news/', 'xpath',
     '//ul[@class="wul105"]/li', './/span[@class="data block"]/text()', 'mjg',
     '//div[@class="pagess"]/ul/li[last()-1]/a/@href'],
    ['低温保存箱', '刘东帅', 0, 'http://dwbcx.gx-biosensor.com', 'http://dwbcx.gx-biosensor.com/xinwen/', 'xpath',
     '//ul[@class="news_list"]/li', './/a[@class="venoboxframe"]/@href', 'dwbcx',
     '//div[@class="fenye"]/ul/a[last()-1]/@href'],
    ['高压蒸汽灭菌器', '刘东帅', 0, 'http://gyzqmjq.gx-biosensor.com/', 'http://gyzqmjq.gx-biosensor.com/xwzx/', 'xpath',
     '//ul[@class="right_new"]/li', './span/text()', 'null',
     ''],  # ######没有下一页 回头再看
    ['化学品安全储存柜', '李彦喜', 0, 'http://ccg.gx-biosensor.com', 'http://ccg.gx-biosensor.com/xwzx/', 'xpath',
     '//ul[@class="right_new"]/li', './/span[@class="news_time"]/text()', 'null',
     '//div[@class="dede_pages"]/ul/li[last()-1]/a/@href'],
    ['空气洁净屏', '陈昊莹', 0, 'http://kqjjp.gx-biosensor.com', 'http://kqjjp.gx-biosensor.com/a/xingyexinwen/', 'xpath',
     '//div[@class="articles"]/div', './/span[@class="article-create"]/text()', 'kqjjp', ''],  # 没有下一页 回头再看
    ['空气消毒机', '张恒辉', 0, 'http://kqxdj.gx-biosensor.com', 'http://kqxdj.gx-biosensor.com/a/xinwen/', 'xpath',
     '//div[@class="list"]/ul/li', './/span/text()', 'mjg', '//div[@class="pagination"]/li[last()-1]/a/@href'],
    ['洁净工作台', '张恒辉', 0, 'http://jjgzt.gx-biosensor.com/', 'http://jjgzt.gx-biosensor.com/a/xinwenzhongxin/', 'xpath',
     '//ul[@class="new_inside"]/li', './/p[@class="news_date"]/span/text()|.//p[@class="news_date"]/i/text()', 'xt',
     '//div[@class="page_list"]/a[last()-1]/@href'],  # ######没有下一页 回头再看
    ['洁净工作台', '王超', 0, 'http://cjgzt.gx-biosensor.com', 'http://cjgzt.gx-biosensor.com/a/xinwen/', 'xpath',
     '//div[@class="NewsList"]/ul/li', './/div[@class="news_title"]/span/text()', 'null',
     '//div[@class="pagea"]/a[last()-1]/@href'],  # #######没有下一页 回头再看
    ['博科自动洗板机', '田承仟', 0, 'http://blqct.gx-biosensor.com/', 'http://blqct.gx-biosensor.com/a/xinwen/', 'xpath',
     '//div[@class="news"]/ul/li', './/span/text()', 'null', '//div[@class="pagination"]/li[last()-1]/a/@href'],
    ['除颤仪', '王学峰', 0, 'http://ccy.gx-biosensor.com', 'http://ccy.gx-biosensor.com/a/xinwenzhongxin/', 'xpath',
     '//ul[@class="list-5"]/li', './/h4/span/text()', 'null', '//div[@class="mpagination"]/li[last()-1]/a/@href'],
    ['负压隔离舱', '刘倩', 0, 'http://fuyagelicang.gx-biosensor.com', 'http://fuyagelicang.gx-biosensor.com/a/xinwendongtai/',
     'xpath', '//div[@class="news2_list wow_list"]/ul/li',
     './/div[@class="date"]/span/text()|.//div[@class="date"]/label/text()', 'xt', '//a[@class="next"]/@href'],
    ['过滤器更换', '刘倩', 0, 'http://glq.gx-biosensor.com/', 'http://glq.gx-biosensor.com/a/xinwen/', 'xpath',
     '//ul[@class="xw"]/li', './/span/text()', 'null', '//ul[@class="pagelist"]/li[3]/a/@href'],
]
USERLIST = [
    'Mozilla/5.0 (Linux; U; Android 8.1.0; zh-cn; BLA-AL00 Build/HUAWEIBLA-AL00) AppleWebKit/537.36 (KHTML, like Gecko) Version/4.0 Chrome/57.0.2987.132 MQQBrowser/8.9 Mobile Safari/537.36',

    'Mozilla/5.0 (Linux; Android 8.1; PAR-AL00 Build/HUAWEIPAR-AL00; wv) AppleWebKit/537.36 (KHTML, like Gecko) Version/4.0 Chrome/57.0.2987.132 MQQBrowser/6.2 TBS/044304 Mobile Safari/537.36 MicroMessenger/6.7.3.1360(0x26070333) NetType/WIFI Language/zh_CN Process/tools',

    'Mozilla/5.0 (Linux; Android 8.1.0; ALP-AL00 Build/HUAWEIALP-AL00; wv) AppleWebKit/537.36 (KHTML, like Gecko) Version/4.0 Chrome/63.0.3239.83 Mobile Safari/537.36 T7/10.13 baiduboxapp/10.13.0.11 (Baidu; P1 8.1.0)',

    'Mozilla/5.0 (Linux; Android 8.1; EML-AL00 Build/HUAWEIEML-AL00; wv) AppleWebKit/537.36 (KHTML, like Gecko) Version/4.0 Chrome/53.0.2785.143 Crosswalk/24.53.595.0 XWEB/358 MMWEBSDK/23 Mobile Safari/537.36 MicroMessenger/6.7.2.1340(0x2607023A) NetType/4G Language/zh_CN',

    'Mozilla/5.0 (Linux; U; Android 8.0.0; zh-CN; MHA-AL00 Build/HUAWEIMHA-AL00) AppleWebKit/537.36 (KHTML, like Gecko) Version/4.0 Chrome/57.0.2987.108 UCBrowser/12.1.4.994 Mobile Safari/537.36',

    'Mozilla/5.0 (Linux; Android 8.0; MHA-AL00 Build/HUAWEIMHA-AL00; wv) AppleWebKit/537.36 (KHTML, like Gecko) Version/4.0 Chrome/57.0.2987.132 MQQBrowser/6.2 TBS/044304 Mobile Safari/537.36 MicroMessenger/6.7.3.1360(0x26070333) NetType/NON_NETWORK Language/zh_CN Process/tools',

    'Mozilla/5.0 (Linux; U; Android 8.0.0; zh-CN; MHA-AL00 Build/HUAWEIMHA-AL00) AppleWebKit/537.36 (KHTML, like Gecko) Version/4.0 Chrome/40.0.2214.89 UCBrowser/11.6.4.950 UWS/2.11.1.50 Mobile Safari/537.36 AliApp(DingTalk/4.5.8) com.alibaba.android.rimet/10380049 Channel/227200 language/zh-CN',

    'Mozilla/5.0 (Linux; U; Android 8.1.0; zh-CN; EML-AL00 Build/HUAWEIEML-AL00) AppleWebKit/537.36 (KHTML, like Gecko) Version/4.0 Chrome/57.0.2987.108 UCBrowser/11.9.4.974 UWS/2.13.1.48 Mobile Safari/537.36 AliApp(DingTalk/4.5.11) com.alibaba.android.rimet/10487439 Channel/227200 language/zh-CN',

    'Mozilla/5.0 (Linux; U; Android 8.1.0; zh-CN; EML-TL00 Build/HUAWEIEML-TL00) AppleWebKit/537.36 (KHTML, like Gecko) Version/4.0 Chrome/57.0.2987.108 UCBrowser/11.9.4.974 UWS/2.14.0.13 Mobile Safari/537.36 AliApp(TB/7.10.4) UCBS/2.11.1.1 TTID/227200@taobao_android_7.10.4 WindVane/8.3.0 1080X2244',

    'Mozilla/5.0 (Linux; U; Android 4.1.2; zh-cn; HUAWEI MT1-U06 Build/HuaweiMT1-U06) AppleWebKit/534.30 (KHTML, like Gecko) Version/4.0 Mobile Safari/534.30 baiduboxapp/042_2.7.3_diordna_8021_027/IEWAUH_61_2.1.4_60U-1TM+IEWAUH/7300001a/91E050E40679F078E51FD06CD5BF0A43%7C544176010472968/1',

    'Mozilla/5.0 (Linux; Android 8.0; MHA-AL00 Build/HUAWEIMHA-AL00; wv) AppleWebKit/537.36 (KHTML, like Gecko) Version/4.0 Chrome/57.0.2987.132 MQQBrowser/6.2 TBS/044304 Mobile Safari/537.36 MicroMessenger/6.7.3.1360(0x26070333) NetType/4G Language/zh_CN Process/tools',
    'Mozilla/5.0 (iPhone; CPU iPhone OS 12_0 like Mac OS X) AppleWebKit/605.1.15 (KHTML, like Gecko) Mobile/16A366 MicroMessenger/6.7.3(0x16070321) NetType/WIFI Language/zh_CN',

    'Mozilla/5.0 (iPhone; CPU iPhone OS 12_0 like Mac OS X) AppleWebKit/605.1.15 (KHTML, like Gecko) Mobile/16A366 MicroMessenger/6.7.3(0x16070321) NetType/WIFI Language/zh_HK',

    'Mozilla/5.0 (iPhone; CPU iPhone OS 11_2 like Mac OS X) AppleWebKit/604.3.5 (KHTML, like Gecko) Version/11.0 MQQBrowser/8.8.2 Mobile/15B87 Safari/604.1 MttCustomUA/2 QBWebViewType/1 WKType/1',

    'Mozilla/5.0 (iPhone 6s; CPU iPhone OS 11_4_1 like Mac OS X) AppleWebKit/604.3.5 (KHTML, like Gecko) Version/11.0 MQQBrowser/8.3.0 Mobile/15B87 Safari/604.1 MttCustomUA/2 QBWebViewType/1 WKType/1',

    'Mozilla/5.0 (iPhone; CPU iPhone OS 10_1 like Mac OS X) AppleWebKit/602.2.14 (KHTML, like Gecko) Version/10.0 MQQBrowser/8.8.2 Mobile/14B72c Safari/602.1 MttCustomUA/2 QBWebViewType/1 WKType/1',

    'Mozilla/5.0 (iPhone; CPU iPhone OS 11_0_2 like Mac OS X) AppleWebKit/604.1.38 (KHTML, like Gecko) Mobile/15A421 wxwork/2.5.8 MicroMessenger/6.3.22 Language/zh',

    'Mozilla/5.0 (iPhone; CPU iPhone OS 11_4_1 like Mac OS X) AppleWebKit/605.1.15 (KHTML, like Gecko) Mobile/15G77 wxwork/2.5.1 MicroMessenger/6.3.22 Language/zh',

    'Mozilla/5.0 (iPhone; CPU iPhone OS 10_1_1 like Mac OS X) AppleWebKit/602.2.14 (KHTML, like Gecko) Version/10.0 MQQBrowser/8.8.2 Mobile/14B100 Safari/602.1 MttCustomUA/2 QBWebViewType/1 WKType/1',

    'Mozilla/5.0 (Linux; U; Android 8.0.0; zh-cn; Mi Note 2 Build/OPR1.170623.032) AppleWebKit/537.36 (KHTML, like Gecko) Version/4.0 Chrome/61.0.3163.128 Mobile Safari/537.36 XiaoMi/MiuiBrowser/10.1.1',

    'Mozilla/5.0 (Linux; U; Android 7.0; zh-cn; MI 5s Build/NRD90M) AppleWebKit/537.36 (KHTML, like Gecko) Version/4.0 Chrome/61.0.3163.128 Mobile Safari/537.36 XiaoMi/MiuiBrowser/10.2.2',

    'Mozilla/5.0 (Linux; Android 8.0.0; MI 6 Build/OPR1.170623.027; wv) AppleWebKit/537.36 (KHTML, like Gecko) Version/4.0 Chrome/63.0.3239.83 Mobile Safari/537.36 T7/10.13 baiduboxapp/10.13.0.11 (Baidu; P1 8.0.0)',

    'Mozilla/5.0 (Linux; U; Android 8.0.0; zh-CN; MI 5 Build/OPR1.170623.032) AppleWebKit/537.36 (KHTML, like Gecko) Version/4.0 Chrome/57.0.2987.108 UCBrowser/11.8.9.969 Mobile Safari/537.36',

    'Mozilla/5.0 (Linux; Android 8.0.0; MI 6 Build/OPR1.170623.027) AppleWebKit/537.36 (KHTML, like Gecko) Version/4.0 Chrome/62.0.3202.84 Mobile Safari/537.36 Maxthon/3235',

    'Mozilla/5.0 (Linux; U; Android 8.1.0; zh-cn; Mi Note 3 Build/OPM1.171019.019) AppleWebKit/537.36 (KHTML, like Gecko) Version/4.0 Chrome/61.0.3163.128 Mobile Safari/537.36 XiaoMi/MiuiBrowser/10.0.2',

    'Mozilla/5.0 (Linux; Android 6.0.1; OPPO A57 Build/MMB29M; wv) AppleWebKit/537.36 (KHTML, like Gecko) Version/4.0 Chrome/48.0.2564.116 Mobile Safari/537.36 T7/9.1 baidubrowser/7.18.21.0 (Baidu; P1 6.0.1)',

    'Mozilla/5.0 (Linux; Android 6.0.1; OPPO A57 Build/MMB29M; wv) AppleWebKit/537.36 (KHTML, like Gecko) Version/4.0 Chrome/63.0.3239.83 Mobile Safari/537.36 T7/10.13 baiduboxapp/10.13.0.10 (Baidu; P1 6.0.1)',

    'Mozilla/5.0 (Linux; U; Android 8.1.0; zh-CN; vivo Y85 Build/OPM1.171019.011) AppleWebKit/537.36 (KHTML, like Gecko) Version/4.0 Chrome/57.0.2987.108 UCBrowser/11.9.6.976 Mobile Safari/537.36',

    'Mozilla/5.0 (Linux; Android 5.1.1; OPPO R9 Plustm A Build/LMY47V; wv) AppleWebKit/537.36 (KHTML, like Gecko) Version/4.0 Chrome/63.0.3239.83 Mobile Safari/537.36 T7/10.12 baiduboxapp/10.12.0.12 (Baidu; P1 5.1.1)',

    'Mozilla/5.0 (Linux; Android 7.1.1; OPPO R11 Build/NMF26X; wv) AppleWebKit/537.36 (KHTML, like Gecko) Version/4.0 Chrome/63.0.3239.83 Mobile Safari/537.36 T7/10.13 baiduboxapp/10.13.0.11 (Baidu; P1 7.1.1)',

    'Mozilla/5.0 (Linux; Android 5.1.1; vivo X6S A Build/LMY47V; wv) AppleWebKit/537.36 (KHTML, like Gecko) Version/4.0 Chrome/57.0.2987.132 MQQBrowser/6.2 TBS/044207 Mobile Safari/537.36 MicroMessenger/6.7.3.1340(0x26070332) NetType/4G Language/zh_CN Process/tools',

    'Mozilla/5.0 (Linux; Android 8.1.0; PACM00 Build/O11019; wv) AppleWebKit/537.36 (KHTML, like Gecko) Version/4.0 Chrome/63.0.3239.83 Mobile Safari/537.36 T7/10.13 baiduboxapp/10.13.0.11 (Baidu; P1 8.1.0)',

    'Mozilla/5.0 (Linux; Android 7.1.1; vivo X20A Build/NMF26X; wv) AppleWebKit/537.36 (KHTML, like Gecko) Version/4.0 Chrome/57.0.2987.132 MQQBrowser/6.2 TBS/044304 Mobile Safari/537.36 MicroMessenger/6.7.2.1340(0x2607023A) NetType/WIFI Language/zh_CN',

    'Mozilla/5.0 (Linux; Android 8.1.0; vivo Y71A Build/OPM1.171019.011; wv) AppleWebKit/537.36 (KHTML, like Gecko) Version/4.0 Chrome/63.0.3239.83 Mobile Safari/537.36 T7/10.13 baiduboxapp/10.13.0.11 (Baidu; P1 8.1.0)',

]


def htmlGet(url):
    headers = {
        'User-Agent': random.choice(USERLIST)
    }
    response = requests.get(
        url=url,
        headers=headers
    )
    response.encoding = response.apparent_encoding
    return response


def get_every_day(begin_date, end_date):
    """
    获取指定日期之间的日期，和日期对应的星期
    :param begin_date:
    :param end_date:
    :return:
    """
    week = {1: "星期一", 2: "星期二", 3: "星期三", 4: "星期四", 5: "星期五", 6: "星期六", 0: "星期天"}
    date_list = []
    date_list2 = []
    begin_date = datetime.datetime.strptime(begin_date, "%Y-%m-%d")
    end_date = datetime.datetime.strptime(end_date, "%Y-%m-%d")
    while begin_date <= end_date:
        date_str = int(datetime.datetime.strptime(begin_date.strftime("%Y-%m-%d"), '%Y-%m-%d').strftime('%w'))
        date_str2 = begin_date.strftime("%Y-%m-%d")
        date_list.append(date_str)
        date_list2.append(date_str2)
        begin_date += datetime.timedelta(days=1)

    d = [0 for i in range(0, len(date_list2))]
    c = [week.get(i) for i in date_list]
    return dict(zip(date_list2, d)), dict(zip(date_list2, c))


def dateFormat(l, dateformat, datexpath, _date, url):
    if dateformat == 'null':
        dt = l.xpath(datexpath)[0]
        # print(dt)
        if dt in _date:
            _date[dt] += 1

    if dateformat == 'kqjjp':
        dt = f'20{l.xpath(datexpath)[0]}'
        # print(dt)
        if dt in _date:
            _date[dt] += 1


    elif dateformat == 'xt':
        dt = f'{l.xpath(datexpath)[1]}-{l.xpath(datexpath)[0]}'
        # print(dt)
        if dt in _date:
            _date[dt] += 1

    elif dateformat == 'frmz':
        dt = l.xpath(datexpath)[0].replace('/', '-')
        # print(dt)
        if dt in _date:
            _date[dt] += 1

    elif dateformat == 'lxj':
        dt = f'{l.xpath(datexpath)[1]}-{l.xpath(datexpath)[0]}'.replace('/', '-')
        # print(dt)
        if dt in _date:
            _date[dt] += 1

    elif dateformat == 'cyt':
        dt = urljoin(url, l.xpath(datexpath)[0])
        response = htmlGet(dt)
        HTML = etree.HTML(response.text)
        _dt = re.findall('(\d{4}-\d{1,2}-\d{1,2})', HTML.xpath('//div[@class="show_t"]/h5/text()')[0])[0]
        # print(_dt)
        if _dt in _date:
            _date[_dt] += 1

    elif dateformat == 'mjg':
        dt = l.xpath(datexpath)[0]
        dt = re.findall('(\d{4}-\d{1,2}-\d{1,2})', dt)[0]
        # print(dt)
        if dt in _date:
            _date[dt] += 1


    elif dateformat == 'yyq':
        dt = urljoin(url, l.xpath(datexpath)[0])
        response = htmlGet(dt)
        HTML = etree.HTML(response.text)
        _dt = re.findall('(\d{4}/\d{1,2}/\d{1,2})', HTML.xpath('//div[@class="news_other"]/text()')[0])[0].replace('/',
                                                                                                                   '-')
        # print(_dt)
        if _dt in _date:
            _date[_dt] += 1

    elif dateformat == 'dwbcx':
        dt = urljoin(url, l.xpath(datexpath)[0])
        response = htmlGet(dt)
        HTML = etree.HTML(response.text)
        _dt = re.findall('(\d{4}-\d{1,2}-\d{1,2})', HTML.xpath('//div[@class="show_t"]/p/text()')[0])[0]
        # print(_dt)
        if _dt in _date:
            _date[_dt] += 1

    elif dateformat == 'fyqlc':
        dt = urljoin(url, l.xpath(datexpath)[0])
        response = htmlGet(dt)
        HTML = etree.HTML(response.text)
        _dt = re.findall('(\d{4}-\d{1,2}-\d{1,2})', HTML.xpath('//div[@class="n_tit"]/span[last()]/text()')[0])[0]
        # print(_dt)
        if _dt in _date:
            _date[_dt] += 1

    return _date


def main(begin_date, end_date):
    # date 日期：次数
    # week 日期：星期
    items = []
    date, week = get_every_day(begin_date, end_date)

    for k, character in tqdm(enumerate(Characters, start=2)):
        _date = copy.deepcopy(date)
        title = character[0]  # 网站名称
        name = character[1]  # 姓名
        baidu = character[2]  # 百度收录
        index = character[3]  # 网站首页网址
        fg = False
        if index == 'http://jjgzt.gx-biosensor.com/':
            fg = True
        url = character[4]  # 网站新闻网址
        types = character[5]  # 提取类型
        listxpath = character[6]  # 列表提取规则
        datexpath = character[7]  # 日期提取规则
        dateformat = character[8]  # 日期处理
        nexturl = character[9]  # 下一页
        item = {
            '网站名称': title,
            '姓名': name,
            '百度收录': baidu,
            '网站网址': index,

        }
        response = htmlGet(url)
        HTML = etree.HTML(response.text)
        if listxpath:
            ls = HTML.xpath(listxpath)
            for l in ls:
                _date = dateFormat(l, dateformat, datexpath, _date, url)
            try:
                if len(nexturl) > 0:
                    _nextlist = []
                    _nexturl = HTML.xpath(nexturl)[0]
                    _nexturl = urljoin(url, _nexturl)

                    while _nexturl and _nexturl not in _nextlist:
                        _nextlist.append(_nexturl)
                        response = htmlGet(_nexturl)
                        HTML = etree.HTML(response.text)
                        try:
                            _nexturl = HTML.xpath(nexturl)[0]
                            _nexturl = urljoin(url, _nexturl)
                        except IndexError:
                            _nexturl = False
                        finally:
                            ls2 = HTML.xpath(listxpath)
                            for l in ls2:
                                _date = dateFormat(l, dateformat, datexpath, _date, url)
            except IndexError as e:
                print(f"出现错误：下一页不存在\r\n出现错误的网址为：{url}")
        if fg:
            jswz_url = 'http://jjgzt.gx-biosensor.com/a/jishuwenzhang/'
            _response = htmlGet(jswz_url)
            _HTML = etree.HTML(_response.text)
            ls = _HTML.xpath(listxpath)
            for l in ls:
                _date = dateFormat(l, dateformat, datexpath, _date, url)
            try:
                if len(nexturl) > 0:
                    _nextlist = []
                    _nexturl = _HTML.xpath(nexturl)[0]
                    _nexturl = urljoin(url, _nexturl)

                    while _nexturl and _nexturl not in _nextlist:
                        _nextlist.append(_nexturl)
                        response = htmlGet(_nexturl)
                        _HTML = etree.HTML(response.text)
                        try:
                            _nexturl = _HTML.xpath(nexturl)[0]
                            _nexturl = urljoin(url, _nexturl)
                        except IndexError:
                            _nexturl = False
                        finally:
                            ls2 = _HTML.xpath(listxpath)
                            for l in ls2:
                                _date = dateFormat(l, dateformat, datexpath, _date, url)
            except IndexError as e:
                print(f"出现错误：下一页不存在\r\n出现错误的网址为：{url}")

        item.update(_date)
        # 附加
        itemadded = {
            '罚款': 0,
            '其他问题': ''
        }
        for i in _date:
            if _date.get(i) == 0:
                itemadded['罚款'] += 1
        item.update(itemadded)
        # 修改字典key值
        _item = {}
        for k in item:
            if week.get(k):
                newkey = re.findall("-(\d{1,2}-\d{1,2})", k)[0].replace("-", ".")
                keys = f'{week.get(k)}\r\n{newkey}'
                _item[keys] = item.get(k)
            else:
                _item[k] = item.get(k)

        items.append(_item)
    print('导出完成')
    df = pd.DataFrame(items)
    newdate = datetime.datetime.now().strftime('%Y%m%d')
    df.to_excel(f'{newdate}.xlsx', index=0)
    time.sleep(10000)


if __name__ == '__main__':
    startdate = input('输入开始日期 年-月-日：\r\n')
    enddate = input('输入结束日期 年-月-日：\r\n')
    print('\r\n')
    main(startdate, enddate)
    time.sleep(10000000)
