# coding=utf-8


from lxml.html import tostring
from lxml import etree
import datetime
from selenium import webdriver
import time
import pinyin
import re
import pandas
from sqlalchemy import create_engine
from random import sample, shuffle, randint, choice
from selenium.webdriver import ActionChains
import mysql.connector
import argparse
from qichacha_log import get_logger
import logging
from selenium.webdriver.common.desired_capabilities import DesiredCapabilities
from selenium.webdriver.common.proxy import Proxy, ProxyType
import fake_useragent
from fix_proxy import tunnels
import requests
import json
import os
from threading import Thread

selfname = os.path.basename(__file__)
debug_mode = False if "production" in selfname else True
config_headless = False if debug_mode else True
db_name_config = "yinda8888" if debug_mode else "yinda8888"
proxy_config = True
task_from = 5
dict_styles = {"工商信息": "横向成双", "企业基本信息": "横向成双", "股权出质": "纯竖向", "企业资产状况信息": "横向成双", "行政许可": "纯竖向", "社保信息": "横向成双需忽略rowspan", "商标信息": "纯竖向", "专利信息": "纯竖向", "软件著作权": "纯竖向", "行政处罚": "纯竖向", "动产抵押": "纯竖向", "网站或网店信息": "纯竖向", "股东（发起人）出资信息": "纯竖向", "股东信息": "纯竖向", "主要人员": "纯竖向", "变更记录": "纯竖向"}
config_dict_goal = {"法人机构简介": ["内容"], "企业年报": ["企业基本信息", "网站或网店信息", "股东（发起人）出资信息", "企业资产状况信息", "社保信息"], "基本信息": ["工商信息", "股东信息", "主要人员", "变更记录"], "经营状况": ["行政许可", "地块公示", "购地信息", "土地转让"], "经营风险": ["行政处罚", "股权出质", "动产抵押", "土地抵押", "经营异常"], "知识产权": ["商标信息", "专利信息", "软件著作权"]}
# config_dict_goal = {"知识产权": ["商标信息", "专利信息", "软件著作权"]}
data_pos_goal_pairs = {"内容": " ", "商标信息": "商标信息", "专利信息": "专利信息", "软件著作权": "软件著作权", "行政处罚": "行政处罚", "股权出质": "股权出质", "动产抵押": "动产抵押", "土地抵押": "土地抵押", "经营异常": "经营异常", "工商信息": "工商信息", "股东信息": "股东信息", "主要人员": "主要人员", "变更记录": "变更记录", "企业基本信息": "年度报告", "网站或网店信息": "年度报告", "股东（发起人）出资信息": "年度报告", "企业资产状况信息": "年度报告", "社保信息": "年度报告", "行政许可": "行政许可", "地块公示": "地块公示", "购地信息": "购地信息", "土地转让": "土地转让"}
# from pymongo import MongoClient
# mongo_cli = MongoClient('localhost', 27017)
# mongo_db = mongo_cli.yinda8888


parser = argparse.ArgumentParser(description='qichacha srawler 命令行使用方法')
parser.add_argument("-db", help="database  默认'{}',没有的话爬虫将新建一个, 使用新的数据库，爬虫就会建新的表".format(db_name_config), default="{}".format(db_name_config), dest="dbname")
parser.add_argument("-host", help="HOST 是 ip , 默认 'localhost'", default="localhost", dest="host")
parser.add_argument("-user", help="USER 是 用户名 , 默认 'root' ", default="root", dest="user")
parser.add_argument("-password", help="PASSWORD 是密码 默认 'root' ", default="root", dest="password")
parser.add_argument("-port", help="PORT 是端口 默认 3306", default=3306, dest="port")

args_cmd = parser.parse_args()
init_con = mysql.connector.connect(host=args_cmd.host, user=args_cmd.user, password=args_cmd.password)
init_cursor = init_con.cursor()
init_cursor.execute('CREATE DATABASE IF NOT EXISTS `{}` CHARACTER SET utf8mb4 COLLATE utf8mb4_unicode_ci;'.format(args_cmd.dbname))
"""第一件事情是从数据库里面读出所有列名 覆盖 拼音汉字对照表  读入pandas dataframe
"""

init_cursor.close()
eng = create_engine('mysql+mysqlconnector://{user}:{password}@{host}:{port}/{dbname}'.format(dbname=args_cmd.dbname, port=args_cmd.port, host=args_cmd.host, password=args_cmd.password, user=args_cmd.user))
eng.execute('CREATE TABLE IF NOT EXISTS `pinyinhanziduizhaobiao` (hanzi VARCHAR (50), pinyin VARCHAR(255), xinxileibie VARCHAR(255), biaoming VARCHAR(255), biaoming_pinyin VARCHAR(255)) CHARACTER SET utf8mb4 COLLATE utf8mb4_unicode_ci;')
eng.execute('CREATE TABLE IF NOT EXISTS `paqumubiaojilubiao` (farenjigouquancheng VARCHAR (50), xinxileibie VARCHAR(255), biaoming VARCHAR(255), nianbaosuoshunianfen VARCHAR(255) DEFAULT "-", biaocaozuogengxinshijian DATETIME DEFAULT CURRENT_TIMESTAMP ON UPDATE CURRENT_TIMESTAMP) CHARACTER SET utf8mb4 COLLATE utf8mb4_unicode_ci;')
eng.execute('CREATE TABLE IF NOT EXISTS `paqushibaibiao` (farenjigouquancheng VARCHAR (50), biaoming VARCHAR(255), biaocaozuogengxinshijian DATETIME DEFAULT CURRENT_TIMESTAMP ON UPDATE CURRENT_TIMESTAMP) CHARACTER SET utf8mb4 COLLATE utf8mb4_unicode_ci;')
eng.execute('CREATE TABLE IF NOT EXISTS `tijiaomingchengyichangbiao` (tijiaomingcheng VARCHAR (50), sousuojieguo VARCHAR(255), biaocaozuogengxinshijian DATETIME DEFAULT CURRENT_TIMESTAMP ON UPDATE CURRENT_TIMESTAMP) CHARACTER SET utf8mb4 COLLATE utf8mb4_unicode_ci;')
eng.execute('CREATE TABLE IF NOT EXISTS `task_qichacha` (farenjigouquancheng VARCHAR (50), biaocaozuogengxinshijian DATETIME DEFAULT CURRENT_TIMESTAMP ON UPDATE CURRENT_TIMESTAMP) CHARACTER SET utf8mb4 COLLATE utf8mb4_unicode_ci;')
eng.execute('CREATE TABLE IF NOT EXISTS `task_close` (farenjigouquancheng VARCHAR (50), biaocaozuogengxinshijian DATETIME DEFAULT CURRENT_TIMESTAMP ON UPDATE CURRENT_TIMESTAMP) CHARACTER SET utf8mb4 COLLATE utf8mb4_unicode_ci;')
eng.execute('CREATE TABLE IF NOT EXISTS `caijijindubiao` (farenjigouquancheng VARCHAR (50), caijijindu VARCHAR(50) DEFAULT "0", biaocaozuogengxinshijian DATETIME DEFAULT CURRENT_TIMESTAMP ON UPDATE CURRENT_TIMESTAMP) CHARACTER SET utf8mb4 COLLATE utf8mb4_unicode_ci;')
eng.execute('CREATE TABLE IF NOT EXISTS `pagesources` (farenjigouquancheng VARCHAR (50), pagetitle  VARCHAR(255), pagesource LONGTEXT, biaocaozuogengxinshijian DATETIME DEFAULT CURRENT_TIMESTAMP ON UPDATE CURRENT_TIMESTAMP) CHARACTER SET utf8mb4 COLLATE utf8mb4_unicode_ci;')

pinyin_char_pairs = pandas.read_sql_table("pinyinhanziduizhaobiao", con=eng, columns=('hanzi', 'pinyin', 'xinxileibie'))

df_piny_char_quadro = pandas.read_sql_table("pinyinhanziduizhaobiao", con=eng)
df_piny_char_quadro.drop_duplicates(inplace=True)

FORMATTER = logging.Formatter("%(asctime)s - %(name)s - %(levelname)s - %(message)s")
scrawl_bug_logger = get_logger("qichacha")

location = 'fake_useragent_0.1.10.json'

ua = fake_useragent.UserAgent(path=location)


def sleep():
    time.sleep(randint(25, 55) * 0.1)


def switch_window(driv, now):
    all_handles = driv.window_handles  # 得到当前开启的所有窗口的句柄
    for handle in all_handles:
        if handle != now:  # 获取到与当前窗口不一样的窗口
            driv.switch_to_window(handle)


def strip_st(tex):
    tex = re.sub(r'\s\s', ' ', tex)
    tex = re.sub(r'\n\n', '\n', tex)
    """将删除结尾处所有 空格 冒号 回车 """
    tex = re.sub(r'[\s：\n]+$', '', tex)
    """再次将回车换成空格，以防数据库录入报错"""
    tex = re.sub(r'\n', ' ', tex)
    return tex


def get_pinyin(strr1):
    assert len(strr1) > 0
    strr2 = re.sub(r'\W', '', strr1)
    if len(strr2) == 0:
        scrawl_bug_logger.info("非法字符，是:++++++`{}`++++++".format(strr1))
    assert len(strr2) > 0
    strr3 = pinyin.get(strr2, format='strip')
    assert len(strr3) > 0
    return strr3 if len(strr3) < 64 else strr3[:64]


# todo 提取有bug 企業變更信息解析有bug
def extract_text_td(xml):
    source_xml = tostring(xml, encoding='utf8', method='html').decode('utf8')
    res = re.findall(r'(?<=>)[^<>]+(?=<)', source_xml)
    res_filtered = [re.sub(r"\s+", "", x) for x in res if not re.match(r"^\s+$", x)]
    if len(xml.xpath("//div")) > 0:
        # 如果td裏面含有div則將整個td的字符全部呈現
        result = "".join(res_filtered)
        return result
    return res_filtered[0] if len(res_filtered) > 0 else "-"


def parse_partical_table(table, co_name, page_title, table_name):
    tr_list = [x for x in table.xpath('.//tr') if extract_text_td(x) != "-"]  # table里面的每一行
    table_array = []
    if len(tr_list) == 0:
        scrawl_bug_logger.info("the table array is NONE !!! nothing in the table co_name{}   page_title{}  table_name{}".format(co_name, page_title, table_name))
        return
    # 纯竖向的
    if dict_styles[table_name] == "纯竖向":
        for index, tr_row in enumerate(tr_list):
            list_td = tr_row.xpath('./child::*')
            list_td_tex = [strip_st(extract_text_td(x)) for x in list_td]
            if index == 0:
                assert min([len(re.sub(r'\W', '', x)) for x in list_td_tex]) > 0
            table_array.append(list_td_tex)
        pandas_insert_table_mysql(table_array, co_name, page_title, table_name)

    # 横向成双 没有rowspan的
    elif dict_styles[table_name] == "横向成双":
        table_column = []
        table_vals = []
        for index, tr_row in enumerate(tr_list):
            list_td = tr_row.xpath('./child::*')
            list_td_tex = [strip_st(extract_text_td(x)) for x in list_td]
            table_column += list_td_tex[0::2]
            table_vals += list_td_tex[1::2]
            assert len(table_column) == len(table_vals)
        table_array = [table_column, table_vals]
        pandas_insert_table_mysql(table_array, co_name, page_title, table_name)

    elif dict_styles[table_name] == "横向成双需忽略rowspan" and table_name == "社保信息":
        column = []
        vals = []
        for index, tr_row in enumerate(tr_list):
            list_td = tr_row.xpath('./child::*')
            list_td_tex_raw = [strip_st(extract_text_td(x)) for x in list_td]
            list_td_tex = list_td_tex_raw[1:] if len(list_td_tex_raw) == 3 else list_td_tex_raw
            column += list_td_tex[0::2]
            vals += list_td_tex[1::2]
            assert len(column) == len(vals)
        table_array = [column, vals]
        pandas_insert_table_mysql(table_array, co_name, page_title, table_name)


def pandas_insert_table_mysql(table_array1, co_name, page_tile, table_name):
    table_array2 = [[re.sub(r'\s+.关联\d+家公司[^畀]+$', '', str(x)) for x in y] for y in table_array1]
    table_array = [[re.sub(r'\s+\d+$', '', x) for x in y] for y in table_array2]
    try:
        assert min([len(x) for x in table_array[0]]) > 0
        assert len(co_name) > 0
        assert len(page_tile) > 0
        assert len(table_name) > 0
    except Exception as e:
        scrawl_bug_logger.info("表格解析逻辑有问题 co_name:{} ---- page_tile:{} ---- table_name:{} ---- error:{} ---- table_array: {}  ".format(co_name, page_tile, table_name, e, str(table_array)))
        raise Exception
    pagename = re.sub(r'\d+', '', page_tile) if "企业年报" in page_tile else page_tile
    table_name_pinyin = get_pinyin(pagename) + "_" + get_pinyin(table_name)
    cols_pinyin = [get_pinyin(x) for x in table_array[0]]
    df = pandas.DataFrame(data=table_array[1:], columns=cols_pinyin)
    df["farenjigouquancheng"] = co_name
    df["biaocaozuogengxinshijian"] = pandas.to_datetime("now")

    if "企业年报" in page_tile:
        df["nianbaosuoshunianfen"] = re.findall(r'\d+', page_tile)[0]
    pairs_char_pinyin = [table_array[0], cols_pinyin]
    pinyin_char_pair = list(map(list, zip(*pairs_char_pinyin)))
    df_tem_pinyin = pandas.DataFrame(data=pinyin_char_pair, columns=["hanzi", "pinyin"])
    df_tem_pinyin["biaoming"] = table_name
    df_tem_pinyin["biaoming_pinyin"] = table_name_pinyin
    df_tem_pinyin["xinxileibie"] = page_tile
    df_noin = df_tem_pinyin[~df_tem_pinyin.isin(df_piny_char_quadro).all(axis=1)].copy()
    if len(df_noin.index) > 0:
        df_noin.drop_duplicates(inplace=True)
        df_noin.to_sql('pinyinhanziduizhaobiao', con=eng, if_exists='append', index=False)
        df_piny_char_quadro.append(df_noin, sort=True)
    """ 如果数据库已有的列名包含了此次输入所需 就直接插入
    """
    try:
        df.to_sql(table_name_pinyin, con=eng, if_exists='append', index=False)
        scrawl_bug_logger.info("成功插入一张表格 co_name: {} ---- page_tile: {} ---- table_name: {}".format(co_name, page_tile, table_name))
    except Exception as e:
        scrawl_bug_logger.info("pandas插入mysql出错 尝试增加列  co_name:{} ---- page_tile:{} ---- table_name:{} ---- e:{} ---- table_array:{} ".format(co_name, page_tile, table_name, e, str(table_array)))
        #  这里需要读取该表存在的列， 使用pandas read sql query
        sql_chk_columns = "DESCRIBE `{}`".format(table_name_pinyin)
        df_chk_columns = pandas.read_sql_query(sql_chk_columns, con=eng)
        column_list = df_chk_columns['Field'].tolist()
        additive_cols_pinyin = [x for x in cols_pinyin if x not in column_list]
        try:
            assert len(additive_cols_pinyin) > 0
            cols2add_re = ['ADD COLUMN `' + x + '` TEXT' for x in additive_cols_pinyin]
            format_cols2add = ', '.join(cols2add_re)
            sql_add_cols = "ALTER TABLE `" + table_name_pinyin + "` " + format_cols2add + ', ALGORITHM=INPLACE;'
            eng.execute(sql_add_cols)
        except Exception as e:
            scrawl_bug_logger.info("添加新的column报错 co_name:{} ---- page_tile:{} ---- table_name:{} ---- e:{} ---- table_array:{} ".format(co_name, page_tile, table_name, e, str(table_array)))
            raise Exception
        sleep()
        df.to_sql(table_name_pinyin, con=eng, if_exists='append', index=False)
        scrawl_bug_logger.info("成功插入一张表格 co_name: {} ---- page_tile: {} ---- table_name: {}".format(co_name, page_tile, table_name))


#   重构 爬取逻辑  page-title不含年份了
def parse_page_2mysql(driverr, target_v1, page_title, df_exists_records, proxy):
    goals_config = config_dict_goal[re.sub(r"\s", "", page_title)]
    source_page_goal_2scrawl = []
    page_source = driverr.page_source
    html = etree.HTML(page_source)
    if "企业年报" in page_title:

        annual_div = html.xpath("//h2[contains(text(),'企业年报')]/parent::a/following-sibling::div")
        annual_years = list(set(re.findall(r'\d\d\d\d(?=年)', tostring(annual_div[0], method='html', encoding='utf-8').decode('utf-8'))))
        annual_years.sort(key=lambda k: int(k), reverse=True)

        if len(annual_years) == 0:
            scrawl_bug_logger.info("这个公司这个大类没有信息，各个表都不存在 target {}  page_title {}".format(target_v1, page_title))
            eng.execute("INSERT INTO `paqumubiaojilubiao` (farenjigouquancheng, xinxileibie, biaoming) VALUES (%s, %s, %s)", (target_v1, page_title, "0"))
            return
        if df_exists_records["biaoming"].loc[df_exists_records["biaoming"] == "pagesources"].empty:
            eng.execute("INSERT INTO `pagesources` (farenjigouquancheng, pagetitle, pagesource) VALUES (%s, %s, %s)", (target_v1, page_title, page_source))
        if "qiyenianbao_gongburiqi" not in df_exists_records["biaoming"].values:
            divs_press_date = html.xpath("//div[contains(text(), '公布')]")
            ann_press_dates = [extract_text_td(x) for x in divs_press_date]
            pair_rpt_press = list(zip(annual_years, ann_press_dates))
            df_press_date = pandas.DataFrame(data=pair_rpt_press, columns=["nianbaosuoshunianfen", "gongburiqi"])
            df_press_date["farenjigouquancheng"] = target_v1
            df_press_date["biaocaozuogengxinshijian"] = pandas.to_datetime("now")
            df_press_date.to_sql("qiyenianbao_gongburiqi", con=eng, if_exists='append', index=False)

        index_dict = {}
        for tab in goals_config:
            ele_page_divs = html.xpath("//*[contains(@class, 'title') and contains(text(),'{}')]/parent::*/following-sibling::table[1]/parent::div".format(tab))
            ele_ranks = []
            if len(ele_page_divs) > 0:
                # 把每一个page的id 提取出来 用于索引，与年份的相对应  注意有可能是两位数
                for ele in ele_page_divs:
                    ele_source = tostring(ele, method='html', encoding='utf-8').decode('utf-8')
                    rank = int(re.findall(r'(?<=id=")\d+', ele_source)[0])
                    ele_ranks.append(rank)
                ele_ranks.sort()
                list_year_tab = [annual_years[x] for x in ele_ranks]
                index_dict[tab] = list_year_tab
        df_arr = []
        # 将所有表遍历，记录到链表里
        for key, vlist in index_dict.items():
            for yea in vlist:
                df_arr.append([target_v1, "企业年报", key, yea])
        #   下面这一句是为了验证年报信息的存在，但是导致一些没公布年报的公司爬取报错 要用subspan判断法
        # annual_page_count = max([len(index_dict[x]) for x in index_dict.keys()])
        df_annual_tabs = pandas.DataFrame(data=df_arr, columns=['farenjigouquancheng', 'xinxileibie', 'biaoming', 'nianbaosuoshunianfen'])
        df_annual_tabs.to_sql("paqumubiaojilubiao", con=eng, if_exists='append', index=False)

        for tab in goals_config:
            table_tabs = html.xpath("//*[contains(@class, 'title') and contains(text(),'{}')]/parent::*/following-sibling::table[1]".format(tab))
            if len(table_tabs) > 0:
                for year, table_ele in list(zip(index_dict[tab], table_tabs)):
                    table_name_pinyin = "qiyenianbao_" + get_pinyin(tab)
                    if df_exists_records[["biaoming", "nianbaosuoshunianfen"]].loc[(df_exists_records["biaoming"] == table_name_pinyin) & (df_exists_records["nianbaosuoshunianfen"] == year)].empty:
                        parse_partical_table(table_ele, target_v1, year + "企业年报", tab)
    else:
        if df_exists_records["biaoming"].loc[df_exists_records["biaoming"] == "pagesources"].empty:
            eng.execute("INSERT INTO `pagesources` (farenjigouquancheng, pagetitle, pagesource) VALUES (%s, %s, %s)", (target_v1, page_title, page_source))
        for tab in goals_config:
            try:
                if re.search(r"经营异常", tab):
                    divs_follow = html.xpath("//*[contains(@class, 'title') and contains(text(),'{}')]/parent::*/following-sibling::div".format(tab))[0]
                else:

                    table = html.xpath("//*[contains(@class, 'title') and contains(text(),'{}')]/ancestor::section//table[{}]".format(tab, 2 if "工商信息" in tab else 1))[0]
            except Exception as _:
                # 加一条判断 如果页面没有 title 就raise exception
                htmltitle = re.sub(r"\W", "", extract_text_td(html.xpath("//title")[0]))
                if not re.search(re.sub(r"\W", "", target_v1), htmltitle):
                    scrawl_bug_logger.info("真的反爬了? html title: {}  target {}  page_title {} table_name {}".format(htmltitle, target_v1, page_title, tab))
                    raise Exception
                else:
                    scrawl_bug_logger.info("这个表不存在 target {}  page_title {} table_name {}".format(target_v1, page_title, tab))
                    continue
            source_page_goal_2scrawl.append(tab)
            source_page_goal_2scrawl.append(tab)

        if len(source_page_goal_2scrawl) == 0:
            scrawl_bug_logger.info("这大类各表都不存在 target {}  page_title {}".format(target_v1, page_title))
            eng.execute("INSERT INTO `paqumubiaojilubiao` (farenjigouquancheng, xinxileibie, biaoming) VALUES (%s, %s, %s)", (target_v1, page_title, "0"))

        else:
            df_pqmb = pandas.DataFrame(data=source_page_goal_2scrawl, columns=["biaoming"])
            df_pqmb["xinxileibie"] = page_title
            df_pqmb["farenjigouquancheng"] = target_v1
            df_pqmb["nianbaosuoshunianfen"] = "-"
            df_pqmb.to_sql("paqumubiaojilubiao", con=eng, if_exists='append', index=False)

        for tab in source_page_goal_2scrawl:
            table_name_pinyin = get_pinyin(page_title) + "_" + get_pinyin(tab)
            # 这里是闸门 如果已经有记录的直接跳过(continue)
            if df_exists_records["biaoming"].loc[df_exists_records["biaoming"] == table_name_pinyin].any():
                continue

            try:
                xpath_pattern = "//*[contains(@class, 'title') and contains(text(),'{}')]".format(tab)
                tcaption_h3 = html.xpath(xpath_pattern)[0]
            except Exception as _:
                continue
            if re.search(r"地块公示|购地信息|土地转让|土地抵押", tab):
                #  地块详情 提取 "//*[contains(text(), '地块公示详情')]/parent::div/following::table"  and visible is _displayed

                land_table_tr = html.xpath("//*[contains(text(), '{}')]/ancestor::section//table[1]//tr".format(tab))
                text_1st_tr_land = [extract_text_td(x) for x in land_table_tr[0].xpath("./child::*")]
                indx_xingzhengqu = text_1st_tr_land.index("行政区")
                for tr in land_table_tr[1:]:
                    xingzhengqu = extract_text_td(tr.xpath("./child::*")[indx_xingzhengqu])
                    tr_source = tostring(tr, method='html', encoding='utf-8').decode('utf-8')
                    land_details = re.findall(r"land.+Detail\([^)]+", tr_source)[0]
                    head, id_land = re.findall(r"\w+", land_details)
                    if proxy_config:
                        http_proxy = proxy
                        https_proxy = proxy
                        proxy_dict = {"http": http_proxy, "https": https_proxy}
                        r = requests.get("https://www.qichacha.com/company_{}?id={}".format(head, id_land), headers={'User-agent': ua.random}, proxies=proxy_dict)
                    else:
                        r = requests.get("https://www.qichacha.com/company_{}?id={}".format(head, id_land), headers={'User-agent': ua.random})

                    rj = json.loads(r.text)
                    table_array = [['farenjigouquancheng'], [target_v1]]
                    for k, v in rj['data'].items():
                        if type(v) == dict:
                            table_array[0].append(k)
                            table_array[1].append(str(v['Name']) if v['Name'] != '' else "-")
                        else:
                            table_array[0].append(k)
                            table_array[1].append(str(v) if v != '' else "-")
                    table_array[0].append("yemianxingzhengqu")
                    table_array[1].append(xingzhengqu)
                    pandas_insert_table_mysql(table_array, target_v1, page_title, tab)

            elif re.search(r"经营异常", tab):
                divs_table = html.xpath("//*[contains(text(), '经营异常') and contains(@class, 'title')]/parent::div/following-sibling::div")
                table_array = [["xuhao", "neirong"]]
                for sub_div in divs_table:
                    source_sub = tostring(sub_div, encoding='utf8', method='html').decode('utf8')
                    source_sub = re.sub(r"<[^<>]+>", "", source_sub)
                    source_sub = re.sub(r"\s+", " ", source_sub)
                    xuhao = re.match(r'^\s+\d+\s+', source_sub).group()
                    neirong = re.sub(r"(?=\s列入)|(?=\s作出)", "\n", source_sub[len(xuhao):])
                    table_array.append([xuhao, neirong])
                    pandas_insert_table_mysql(table_array, target_v1, page_title, "经营异常")
            else:
                # scrawl_bug_logger.info("bug高发区域   now  {}".format(tab))  # xpath 此处是找不到商标信息的
                table_2scr = html.xpath("//*[contains(@class, 'title') and contains(text(),'{}')]/ancestor::section//table[{}]".format(tab, 2 if "工商信息" in tab else 1))[0]
                parse_partical_table(table_2scr, target_v1, page_title, tab)


def scrawl(target_v, driver11, proxy):
    driver11.get("https://www.qichacha.com/search?key={}".format(target_v))
    sleep()
    if "您的操作过于频繁，验证后再操作" in driver11.page_source:
        scrawl_bug_logger.info("企查查反爬，报错：操作过于频繁+-+-{}+-+-".format(target_v))
        raise Exception

    """ 找到第一个搜索结果 或者如果没有搜索结果"""
    search_pagesource = driver11.page_source
    search_html = etree.HTML(search_pagesource)
    xpath_countold_div = "//*[contains(text(), '小查为您找到')]/parent::*"
    countold_div = search_html.xpath(xpath_countold_div)
    countold_div_source = tostring(countold_div[0], encoding='utf8', method='html').decode('utf8')
    text_danger = re.findall(r"\d+", re.sub("\s", "", "".join(re.findall(r"(?<=>)[^<>]+(?=<)", countold_div_source))))[0]
    if text_danger != "0":
        target_splits = re.split(r"\W", target_v)
        contain_query = ["contains(text(),'{}')".format(x) for x in target_splits]
        xpath_first = "//section//table//*[{}][1]/ancestor::a[1]".format(" and ".join(contain_query))
        em1 = search_html.xpath(xpath_first)
        target_link_text = "搜索结构不能精确匹配"
        if len(em1) > 0:
            driver11.find_element_by_xpath(xpath_first).click()
            em1source = tostring(em1[0], encoding='utf8', method='html').decode('utf8')
            # 对第链接对比相似度，如果不相似，写入名称异常表  如果完全相同继续
            target_link_text = ''.join(re.findall(r"[^a-z0-9\"'_/\sA-Z><.=]+", em1source))
            target_link_text_strip = re.sub("\W", "", target_link_text)
            target_v_strip = re.sub("\W", "", target_v)
            bool_com_exists = True if re.match(target_v_strip, target_link_text_strip) else False
        else:
            bool_com_exists = False
    else:
        bool_com_exists = False
        target_link_text = "搜索结果为空"
    if not bool_com_exists:
        eng.execute('INSERT INTO `tijiaomingchengyichangbiao` (tijiaomingcheng, sousuojieguo) VALUES (%s, %s)', (target_v, target_link_text))
        eng.execute("INSERT INTO `task_close` (farenjigouquancheng) VALUES (%s)", (target_v))

        scrawl_bug_logger.info("插入名称异常表成功 客户提交名称：{}  搜索结果：{}".format(target_v, target_link_text))
        raise Exception

    sleep()
    sleep()
    now = driver11.current_window_handle
    driver11.close()
    sleep()
    switch_window(driver11, now)
    sleep()

    # assert re.search(r'{}'.format(re.sub(r'\W', '', target_v)), re.sub(r"\W", "", driver11.title))
    #  首页机构简介也用 lxml库吧
    main_page_source = driver11.page_source
    html_main = etree.HTML(main_page_source)
    # todo 主页header导航栏目中有很多相关的项目，需要模糊匹配 可以做一个json映射 保障细节完整
    # data-pos //div[contains(@class, 'company-nav-items')]//a/parent::*/preceding-sibling::*
    data_pos_list = html_main.xpath("//a[@data-pos]")
    # features_present = html_main.xpath("//*[contains(@class, 'company-nav')]/div/a")
    assert len(html_main.xpath("//*[contains(text(),'工商信息')]")) > 1
    eng.execute("INSERT INTO `paqumubiaojilubiao` (farenjigouquancheng, xinxileibie, biaoming) VALUES (%s, %s, %s)", (target_v, "法人机构简介", "内容"))
    df_exists_records = chk_eixists_tables(target_v)
    list_keys_goals = list(config_dict_goal.keys())
    # 先生成xpath表达式  找到目标列表 都是元素 然后跟爬取到的数据对照，看缺哪些，缺的最多的排列最前面
    xpath_contains_list = []
    for goal_list in config_dict_goal.values():
        xpath_contains_list += goal_list
    xpath_query = "//a[{}]".format(" or ".join(["contains(text(),'{}')".format(x) for x in xpath_contains_list]))
    a_posdata_elements = html_main.xpath(xpath_query)  # 这里需要把 posdata中的数字去掉 好对比已爬取数据
    text_pos_data_a = [re.sub(r"\d", "", extract_text_td(x)) for x in a_posdata_elements]
    a_posdata_title_dict = {x[0]: x[1] for x in re.findall(r"(\w+)': '(\w+)", str([{v: k for v in config_dict_goal[k]} for k in ('知识产权', '经营风险', '基本信息', '经营状况')]))}
    biaoming_pinyin_list = [get_pinyin(a_posdata_title_dict[x]) + "_" + get_pinyin(x) for x in text_pos_data_a]
    df_pos_recorded = df_exists_records['biaoming'].loc[df_exists_records['biaoming'].isin(biaoming_pinyin_list)]
    table_pinyin_pos_empty = [x.split("_")[0] for x in biaoming_pinyin_list if x not in df_pos_recorded.values]
    table_pinyin_pos_empty.append("qiyenianbao")
    list_keys_goals_sorted = sorted(list_keys_goals, key=lambda k: table_pinyin_pos_empty.count(get_pinyin(k)), reverse=True)

    #  使用广度优先策略 先把所有的潜在任务 跟exists对比，大类缺哪个找那个
    for title in list_keys_goals_sorted:
        if title == "法人机构简介" or len(config_dict_goal[title]) == 0:
            continue
        span_subr = html_main.xpath("//*[text()='{}']/following-sibling::span".format(title))[0]
        if extract_text_td(span_subr) != "0":
            switch2_category_page(driver11, target_v, title)
            scrawl_bug_logger.info("现在开始爬取target_v:{}--------title:{}".format(target_v, title))
            html_source_page = driver11.page_source
            html_sub_page = etree.HTML(html_source_page)
            if title == "基本信息" and "farenjigoujianjie_neirong" not in df_exists_records["biaoming"].values and "法人机构简介" in list_keys_goals:
                intro_content = html_sub_page.xpath("//h1/parent::div/parent::div")[0]  # //div[contains(@class, 'panel padder')]
                source_intro = tostring(intro_content, encoding="utf8", method="html").decode("utf8")
                text_source_intro = re.sub(r"<[^<>]+>", "", source_intro)
                text_source_intro = re.sub(r"\s+", " ", text_source_intro)
                # list_intro_col = [["法人代表", "法人机构全称", "曾用名", "经营状态", "电话", "官网", "邮箱", "地址", "简介"], []]
                list_intro_col = [["法人代表", "法人机构全称", "电话", "官网", "邮箱", "简介"], []]
                farendaibiao_td = html_sub_page.xpath("//*[contains(text(), '工商信息')]/ancestor::section//table[1]//tr[2]/child::*[1]")
                farendaibiao_name = extract_text_td(farendaibiao_td[0])
                list_intro_col[1].append(farendaibiao_name)
                list_intro_col[1].append(target_v)

                # list_intro_col[1].append(re.findall(r"正常|注销|在业|存续", text_source_intro)[0] if re.search(r"正常|注销|在业|存续", text_source_intro) else "无")
                list_intro_col[1].append(re.findall(r"(?<=电话：)\s?\s?[^\n\s]+", text_source_intro)[0] if re.search(r"电话：", text_source_intro) else "无")
                list_intro_col[1].append(re.findall(r'(?<=href=")\s?\s?[^\n\s"]+', re.findall(r'<a.+href="http.+data-original-title="进入官网"', source_intro)[0])[0] if not re.search(r"官网：[^暂无]{,70}暂无", text_source_intro) else "暂无")
                list_intro_col[1].append(re.findall(r"(?<=邮箱：)\s?\s?[^\n\s]+", text_source_intro)[0] if re.search(r"邮箱：", text_source_intro) else "无")
                if re.search(r"查看详情", source_intro):
                    target_splits = re.split("\W", re.findall(r"(?<=简介：)[^…]+", text_source_intro)[0])
                    contain_query = ["contains(text(),'{}')".format(x) for x in target_splits if len(x) > 0]
                    xiangqing_div_xpath = "//*[{}]/parent::div".format(" and ".join(contain_query))
                    xiangqing_divs = html_sub_page.xpath(xiangqing_div_xpath)
                    text_xiangqing_divs = [re.sub(r"\s", "", re.sub(r"<[^<>]+>", "", tostring(i, encoding='utf8', method='html').decode('utf8'))) for i in xiangqing_divs]
                    list_xiangqing = sorted(text_xiangqing_divs, key=lambda k: len(k), reverse=True)
                    xiangqing = list_xiangqing[0]
                elif re.search(r"简介：", text_source_intro):
                    xiangqing = re.findall(r"(?<=简介：)\s?\s?[^\n\s]+", text_source_intro)[0]
                else:
                    xiangqing = "无"
                list_intro_col[1].append(xiangqing)
                pandas_insert_table_mysql(list_intro_col, target_v, "法人机构简介", "内容")
                # if "基本信息" in list_keys_goals:
                #     parse_page_2mysql(driver11, target_v, "基本信息", df_exists_records, proxy)

            parse_page_2mysql(driver11, target_v, title, df_exists_records, proxy)
        else:
            scrawl_bug_logger.info("这大类各表都不存在 target {}  page_title {}".format(target_v, title))
            eng.execute("INSERT INTO `paqumubiaojilubiao` (farenjigouquancheng, xinxileibie, biaoming) VALUES (%s, %s, %s)", (target_v, title, "0"))


# todo 写一个遍历所有表并且将target 已经存在的信息罗列出来
def chk_eixists_tables(targetv):
    all_table = list_all_table_exists()
    date_start = (datetime.datetime.today() - datetime.timedelta(task_from)).strftime("%Y-%m-%d")
    df_all_exists = pandas.DataFrame(columns=["farenjigouquancheng", "biaoming", "nianbaosuoshunianfen"])
    for table in all_table:
        sql_chk_exists = "SELECT * FROM {} WHERE farenjigouquancheng = '{}' AND biaocaozuogengxinshijian >= {}".format(table, targetv, date_start)
        df_table_exists = pandas.read_sql_query(sql_chk_exists, con=eng)
        if len(df_table_exists.index) > 0:
            if "qiyenianbao" in table:
                df_table_exists["biaoming"] = table
                df_all_exists = df_all_exists.append(df_table_exists[["farenjigouquancheng", "biaoming", "nianbaosuoshunianfen"]], ignore_index=True, sort=True)
            else:
                df_table_exists["biaoming"] = table
                df_table_exists["nianbaosuoshunianfen"] = "-"
                df_all_exists = df_all_exists.append(df_table_exists[["farenjigouquancheng", "biaoming", "nianbaosuoshunianfen"]], ignore_index=True, sort=True)
    return df_all_exists


#   写一个函数判断爬取目标表 / dict goal / task-close 的各个关系 决定何时写入task_close 输入是dictgoal 其他自己查
def judge_full_scrawled(target_v):
    # todo 添加一种预警，在某个企业添加之后超过四个小时没有爬下来，报送给公众号，只需要两步：查询录入时间，计算持续时间后预警
    sql_chk_task_start = "select biaocaozuogengxinshijian from `task_qichacha` where farenjigouquancheng = '{}'".format(target_v)
    df_task_start = pandas.read_sql_query(sql_chk_task_start, con=eng)
    sss = df_task_start['biaocaozuogengxinshijian'].values[0]
    ss = pandas.to_datetime('now').to_datetime64()
    delta = (ss - sss).astype('timedelta64[s]')
    seconds_total = delta.item().total_seconds()
    if seconds_total > 14400:
        data_alarm = {"text": "alarm!!!!!", "desp": "{}\n任务迟迟没有完成已经超过四个小时".format(target_v)}
        alarm_url = "https://sc.ftqq.com/SCU32672T726352c49500580872ebed22f0150cec5ba5f09211f9c.send"
        r = requests.post(alarm_url, data=data_alarm)
    #  第一种： 爬取目标表里面涵盖了所有产品经理要求的大类，并且数据库录入数据中相应的子表都有录入 就录入task close表里
    #  首先查询 paqumubiaojilubiao 如果除年报信息类别外大类信息都有涵盖5次以上，年报类有年份信息，看年份进入几次了
    date_start = (datetime.datetime.today() - datetime.timedelta(task_from)).strftime("%Y-%m-%d")
    read_paqumb = "SELECT * FROM `paqumubiaojilubiao` WHERE farenjigouquancheng = '{}' AND biaocaozuogengxinshijian >= '{}' ".format(target_v, date_start)
    df_paqujilu = pandas.read_sql_query(read_paqumb, con=eng)

    if len(set(df_paqujilu["xinxileibie"])) == 6:
        #   要将年份考虑进来 年份
        df_freq = df_paqujilu.groupby(["xinxileibie", "biaoming", "nianbaosuoshunianfen"]).size().reset_index(name='frequence')
        progress_percentage = round(len(set(df_freq["biaoming"])) * 100 / 25)
        eng.execute("insert into `caijijindubiao` (farenjigouquancheng, caijijindu) values (%s, %s)", (target_v, progress_percentage))
        if min(df_freq['frequence']) > 2:
            # 如果所有细分表格都已经进来N次 可以确定的是必须close了，即使有些表没录入进来
            eng.execute("INSERT INTO `task_close` (farenjigouquancheng) VALUES (%s)", (target_v))
            #   要遍历数据库中存在的目标表 如果都有记录信息就pass 但是如果目标表里面的目标没有在相应表格存在记录 就计入失败表
            df_freq["table_name_pinyin"] = df_freq[["xinxileibie", "biaoming"]].apply(lambda x: get_pinyin("_".join(x)), axis=1)
            empty_record = []
            for tabname, year in df_freq[["table_name_pinyin", "nianbaosuoshunianfen"]].values:
                if "0" in tabname:
                    continue
                sql_chk_exist = "SELECT * FROM `{}` WHERE farenjigouquancheng = '{}' AND biaocaozuogengxinshijian >= '{}' {}".format(tabname, target_v, date_start, " AND nianbaosuoshunianfen = '{}'".format(year) if "qiyenianbao" in tabname else "")
                df_exist = pandas.read_sql_query(sql_chk_exist, con=eng)
                if len(df_exist.index) == 0:
                    empty_record.append(tabname)
            if len(empty_record) > 0:
                df_empty_rec = pandas.DataFrame(data=list(set(empty_record)), columns=["biaoming"])
                df_empty_rec["farenjigouquancheng"] = target_v
                df_empty_rec.to_sql('paqushibaibiao', con=eng, if_exists='append', index=False)
                eng.execute("insert into `caijijindubiao` (farenjigouquancheng, caijijindu) values (%s, %s)", (target_v, "70"))
                data_alarm = {"text": "bug!!!!!", "desp": "{}的如下失败：\n{}".format(target_v, "\n".join(list(set(empty_record))))}
                alarm_url = "https://sc.ftqq.com/SCU32672T726352c49500580872ebed22f0150cec5ba5f09211f9c.send"
                r = requests.post(alarm_url, data=data_alarm)
                if r.status_code == 200:
                    scrawl_bug_logger.info("向方糖发送报警一次，200，desp：{}".format(data_alarm["desp"]))
            else:
                eng.execute("insert into `caijijindubiao` (farenjigouquancheng, caijijindu) values (%s, %s)", (target_v, "100"))


def super_iter_xpath(drivv, path):
    count = 0
    elements = []
    while count < 60 and len(elements) == 0:
        elements = drivv.find_elements_by_xpath(xpath=path)
        count += 1
        time.sleep(0.1)
    return elements[0] if len(elements) > 0 else 0


def switch2_category_page(driver11, target_v, title):
    try:
        scroll_randomly(driver11)
        super_iter_xpath(driver11, "//h2[text()='{}']".format(title)).click()
    except Exception as e:
        scrawl_bug_logger.info("大类页面按钮text()方法没找到 +-+-{}+-+-{}\n {}".format(target_v, title, e))
        try:
            pagesource = driver11.page_source
            href = re.findall(r'(?<={}\'\)\"\shref=\"/)[^"]+'.format(title), pagesource)[0]
            navi_header_btn = super_iter_xpath(driver11, "//*[contains(@href,'{}')".format(href))
            ActionChains(driver11).move_to_element(navi_header_btn).click(navi_header_btn).perform()
            sleep()
        except Exception as e:
            scrawl_bug_logger.info(" 大类页面按钮 href方法 也没找到  +-+-{}+-+-{}\n {}".format(target_v, title, e))
            raise Exception
    # assert re.search(r'{}'.format(re.sub(r'\W', '', target_v)), re.sub(r"\W", "", driver11.title))
    sleep()
    highlight_btn = super_iter_xpath(driver11, "//h2[text()='{}']".format(title))
    highlighted_btn_color = highlight_btn.value_of_css_property("color")
    assert highlighted_btn_color == 'rgba(18, 139, 237, 1)'
    scroll_randomly(driver11)


def list_all_table_exists():
    chk_table_names = "select table_name from information_schema.tables where table_schema='{}' and table_type='base table';".format(db_name_config)
    df_tables_exists = pandas.read_sql_query(chk_table_names, con=eng)
    table_exists = df_tables_exists.iloc[:, 0].tolist()
    table_exists = [x for x in table_exists if x not in ["tijiaomingchengyichangbiao", "pinyinhanziduizhaobiao"]]
    return table_exists


def check_nums_value_exists(xinxileibie1, biaoming1, target_v):
    biaoming = re.sub(r'\W|[\d\s]+$|\n|\s|^\d+', '', biaoming1)
    xinxileibie = re.sub(r'\s|\n', '', xinxileibie1)
    table_name_pinyin = get_pinyin(xinxileibie) + "_" + get_pinyin(biaoming)
    table_exists = list_all_table_exists()
    if table_name_pinyin not in table_exists:
        return 0
    target_v = re.sub(r"\s|\n", "", target_v)
    date_start = (datetime.datetime.today() - datetime.timedelta(task_from)).strftime("%Y-%m-%d")
    sql_check_exists = "SELECT  * FROM `{}` WHERE farenjigouquancheng = '{}'  AND biaocaozuogengxinshijian >= '{}'".format(table_name_pinyin, target_v, date_start)
    df_chk_exists = pandas.read_sql_query(sql_check_exists, con=eng)
    len_df_chk_exists = len(df_chk_exists.index)
    return len_df_chk_exists


# todo 更改一下 录入年报表之前验证是否存在
def chek_year_col_val_exists(target_v, biaoming, year):
    table_exists = list_all_table_exists()
    date_start = (datetime.datetime.today() - datetime.timedelta(task_from)).strftime("%Y-%m-%d")
    annual_table_exits = [x for x in table_exists if x.startswith("qiyenianbao")]
    table_na_pinyin = "qiyenianbao_" + get_pinyin(biaoming)
    if table_na_pinyin in annual_table_exits:
        sql_check_exists = "SELECT  * FROM `{}` WHERE farenjigouquancheng = '{}' AND nianbaosuoshunianfen = '{}' AND biaocaozuogengxinshijian >= '{}'".format(table_na_pinyin, target_v, year, date_start)
        df_chk_exists = pandas.read_sql_query(sql_check_exists, con=eng)
        return 0 if len(df_chk_exists.index) == 0 else 1
    else:
        return 0


def scroll_randomly(drivv):
    element_list = drivv.find_elements_by_xpath("./child::div")
    element_list = [x for x in element_list if x.is_displayed()]
    for ele in sample(element_list, min(2, round(len(element_list) * 0.6))):
        ActionChains(drivv).move_to_element(ele).perform()
        sleep()


def scraw_task():
    while True:  # len(target_list) - len(completed_list) > 0:       #and time.time() - time_start < len(target_list)*400:
        date_start_task = (datetime.datetime.today() - datetime.timedelta(task_from)).strftime("%Y-%m-%d-%H-%M")
        read_task_coms = "SELECT * FROM `task_qichacha` WHERE biaocaozuogengxinshijian >= '{}' ".format(date_start_task)
        read_closed_coms = "SELECT * FROM `task_close` WHERE biaocaozuogengxinshijian >= '{}' ".format(date_start_task)
        df_task2do = pandas.read_sql_query(read_task_coms, con=eng)
        if len(df_task2do.index) == 0:
            scrawl_bug_logger.info("len(set_mubiao)==0，暂时没任务可做，sleep100秒")
            time.sleep(100)
            continue
        # df_task2do["farenjigouquancheng"].replace(to_replace="[\Wa-zA-Z_0-9]+", value="", inplace=True, regex=True)
        df_closed_task = pandas.read_sql_query(read_closed_coms, con=eng)
        df_task2do.sort_values(by=['biaocaozuogengxinshijian'], ascending=False)

        #  df 转成list set 之后相减 找到第一个任务开始爬取， 如果咩有任何任务，直接sleep一下
        set_mubiao = set(df_task2do["farenjigouquancheng"])
        set_close = set(df_closed_task["farenjigouquancheng"])

        task_2do_list = [x for x in set_mubiao if x not in set_close]
        #  对任务列表进行第一次筛滤   过滤掉名称异常的标的
        task_2do_entity = []
        for x in task_2do_list:
            if not re.match(r"^[\s\u4e00-\u9fa5（）]+$", x) or len(x) < 6:
                eng.execute("INSERT INTO `tijiaomingchengyichangbiao` (tijiaomingcheng, sousuojieguo) VALUES (%s, %s)", (x, "无搜索结果"))
                eng.execute("INSERT INTO `task_close` (farenjigouquancheng) VALUES (%s)", (x))
            else:
                task_2do_entity.append(x)
        # todo 判斷所需線程數量 來定義相應targe和proxy，啓動線程，join等待最後結束，然後重複此任務
        len_task_list = len(task_2do_list)
        thread_list = []
        if 1 <= len_task_list < 8:
            proxy_list = sample(tunnels, len_task_list)
            for ii in range(len_task_list):
                ti = Thread(target=start_scrawl_thread, args=(task_2do_list[ii], proxy_list[ii],))
                thread_list.append(ti)

        elif len_task_list == 0:
            scrawl_bug_logger.info("任务都已经完成了，sleep100秒")
            time.sleep(100)
        else:
            proxy_list = sample(tunnels, 8)
            task_list = sample(task_2do_list, 8)
            for iii in range(8):
                ti = Thread(target=start_scrawl_thread, args=(task_list[iii], proxy_list[iii],))
                thread_list.append(ti)
        for threa in thread_list:
            threa.start()
        for thr in thread_list:
            thr.join()
        continue


def start_scrawl_thread(target, pxy):
    judge_full_scrawled(target)
    chrom_path = 'predict_model/chromedriver'
    options = webdriver.ChromeOptions()
    options.add_argument('lang=zh_CN.UTF-8')
    options.add_argument("'user-agent'='{}'".format(ua.random))
    options.add_argument('--ignore-certificate-errors')

    if config_headless:
        options.add_argument('headless')
    if proxy_config:
        capabilities = DesiredCapabilities.CHROME
        prox = Proxy()
        prox.proxy_type = ProxyType.MANUAL
        prox.ssl_proxy = pxy
        prox.http_proxy = pxy
        prox.add_to_capabilities(capabilities)
        driver = webdriver.Chrome(chrom_path, chrome_options=options, desired_capabilities=capabilities)
    else:
        driver = webdriver.Chrome(chrom_path, chrome_options=options)
    driver.delete_all_cookies()
    if debug_mode:
        scrawl_bug_logger.info("++++++++++++++++++++{} 开始爬取+++++++++++++++++++++++".format(target))
        scrawl(target, driver, pxy)
        driver.quit()
        if proxy_config:
            scrawl_bug_logger.info("$$$$$$$$$$$${} 完成爬取， 没有出现意外的异常使用的代理是 {} ".format(target, pxy))
        else:
            scrawl_bug_logger.info("$$$$$$$$$$$${} 完成爬取， 没有出现意外的异常".format(target))
    else:
        try:
            scrawl_bug_logger.info("++++++++++++++++++++{} 开始爬取+++++++++++++++++++++++".format(target))
            scrawl(target, driver, pxy)
            driver.quit()
            if proxy_config:
                scrawl_bug_logger.info("$$$$$$$$$$$${} 完成爬取， 没有出现意外的异常使用的代理是 {} ".format(target, pxy))
            else:
                scrawl_bug_logger.info("$$$$$$$$$$$${} 完成爬取， 没有出现意外的异常".format(target))
        except Exception as e:
            driver.quit()
            scrawl_bug_logger.info("爬取出现异常：  +-+-{}+-+-{} 使用的代理是：{}".format(target, e, pxy))


if __name__ == "__main__":
    scraw_task()

    # from threading import Thread

    # leng_tsk = len(scrawl_task_list)
    # thread_count = 1
    # quarter = round(leng_tsk/thread_count)
    # task_thread_list = []
    # for i in range(thread_count):
    #     index0 = i*quarter
    #     index1 = (i+1)*quarter if i < 3 else leng_tsk - 1
    # task = Thread(target=scraw_task)
    # task.start()
