import json
import math
import os
import random
import re
import sys
import time
import warnings

import facade
import pymysql
import requests
from pymongo import MongoClient
from pymongo.errors import DuplicateKeyError
from xjlibrary.configread import MyConfigParser
from xjlibrary.mLog.mylogger import BaseLogFilter
from xjlibrary.mdatetime.mtime import MDateTime
from xjlibrary.mprocesspoll.MThreadingRun import MThreadingRun
from xjlibrary.our_file_dir import BaseDir

curPath = BaseDir.get_file_dir_absolute(__file__)
sys.path.append(curPath)

from base_sipo import BaseSipo

configfile = BaseDir.get_new_path(curPath, "db.ini")

"""
多线程中cookie打架 现在使用单线程进行逻辑排查
"""


class LogFilter(BaseLogFilter):
    def __init__(self):
        super(LogFilter, self).__init__(name='', logname="_ajax")

    def filter(self, record):
        if isinstance(record.msg, str):
            if record.msg.find("IsAjaxAndJsonData") > -1:
                return True
            elif record.msg.find("errSql") > -1:
                return True
            elif record.msg.find("ExeSqlMany") > -1 or record.msg.find("ExeSqlToDB") > -1:
                return True
            else:
                return False
        return False


class SearchDown(BaseSipo):
    """
    按日期搜索 然后下载对应日期的页并解析
    """

    def __init__(self):
        self.filelogger = facade.get_timerotatingfilelogger(
            BaseDir.get_new_path(curPath, "logs", "step2" + str(os.getpid())), LogFilter())
        super().__init__(self.filelogger)
        self.search_data = {
            # 公开（公告）号=(CN+ OR HK+ OR MO+ OR TW+) AND 公开（公告）日=20181123 可能出现翻页无数据的情况
            "searchCondition.searchExp": "公开（公告）日=20180501 AND 公开（公告）号=(CN+ OR HK+ OR MO+ OR TW+)",
            # "searchCondition.searchExp": '(公开（公告）日=20181123) AND 公开国家/地区/组织=(HK OR MO OR TW OR (发明类型=("I" OR "U" OR "D") AND 公开国家/地区/组织=(CN)))',
            "searchCondition.dbId": "VDB",
            "searchCondition.searchType": "Sino_foreign",
            "searchCondition.extendInfo['MODE']": "MODE_TABLE",
            "searchCondition.extendInfo['STRATEGY']": "STRATEGY_CALCULATE",
            "searchCondition.originalLanguage": "",
            "searchCondition.targetLanguage": "",
            "wee.bizlog.modulelevel": "0200201",
            "resultPagination.limit": "12"
        }
        self.header = {
            "Accept": "application/json, text/javascript, */*; q=0.01",
            "Accept-Encoding": "gzip, deflate",
            "Accept-Language": "zh-CN,zh;q=0.9,en;q=0.8",
            "Connection": "keep-alive",
            "Host": "pss-system.gov.cn",
            "Origin": "http://pss-system.cnipa.gov.cn",
            "Pragma": "no-cache",
            "Referer": "http://pss-system.cnipa.gov.cn/sipopublicsearch/patentsearch/tableSearch-showTableSearchIndex.shtml",
            "Content-Type": "application/x-www-form-urlencoded; charset=UTF-8",
            "User-Agent": "Mozilla/5.0 (Windows NT 10.0; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/67.0.3396.62 Safari/537.36",
            "X-Requested-With": "XMLHttpRequest"
        }
        # 搜索的url
        self.url = "http://pss-system.cnipa.gov.cn/sipopublicsearch/patentsearch/executeTableSearch0529-executeCommandSearch.shtml"
        # 翻页的url
        self.url2 = "http://pss-system.cnipa.gov.cn/sipopublicsearch/patentsearch/showSearchResult-startWa.shtml"

        self.proxys = {
            # "http": "192.168.30.176:8119",
            # "https": "192.168.30.176:8119"
        }
        self.connection = MongoClient(
            "mongodb://xujiangrw:vipdatacenter@192.168.30.171:27017",
            # 如果“True”且服务器正在运行而没有日志记录，阻塞，直到服务器将所有数据文件同步到磁盘为止。
            fsync=False
        )
        self.db = self.connection['cnipa']
        self.downpage_value_dicts = {}
        self.datestart = ""
        self.dateend = ""
        self.cf = MyConfigParser(configfile).set_keep_keys_case().read_config()
        self.ipaddr = ""

    def set_result_queue(self, queue):
        self.result_queue = queue

    def disasters(self, datestart, dateend):
        """
        打乱顺序
        :return:
        """
        list2 = []
        list1 = ["CN+", "HK+", "MO+", "TW+"]
        random.shuffle(list1)
        stringa = " OR ".join(list1)
        if stringa == "CN+ OR HK+ OR MO+ OR TW+":
            stringa = "HK+ OR MO+ OR TW+ OR CN+"

        if datestart == "":
            list2.append("公开（公告）日<={}".format(dateend))
        elif dateend == "":
            list2.append("公开（公告）日={}".format(datestart))
        else:
            list2.append("公开（公告）日<={}".format(dateend))
            list2.append("公开（公告）日>={}".format(datestart))
        list2.append("公开（公告）号=({})".format(stringa))
        # random.shuffle(list2)
        searchExp = " AND ".join(list2)
        self.logger.info("searchExp　搜索的结构是{}".format(searchExp))
        return searchExp, list1

    def set_data(self, datestart, dateend):
        """
        设置搜索日期
        :return:
        """
        self.datestart = datestart
        self.dateend = dateend
        searchExp, list1 = self.disasters(datestart, dateend)
        self.search_data["searchCondition.searchExp"] = searchExp
        # self.search_data[
        #     "searchCondition.searchExp"] = '(公开（公告）日={}) AND 公开国家/地区/组织=(HK OR MO OR TW OR (发明类型=("I" OR "U" OR "D") AND 公开国家/地区/组织=(CN)))'.format(
        #     date)

    def down_search_page_3(self, sn, proxys, search_data):
        """
        按三种方式下载
        :param sn:
        :param proxys:
        :return:
        """
        self.filelogger.info("下载第一页 解析出共有多少页:" + self.datestart)
        # self.header["X-Forwarded-For"] = self.localVal.ipaddr
        BoolResult, errString, r = facade.BaseRequestPost(self.url,
                                                          sn=sn,
                                                          proxies=proxys,
                                                          data=search_data,
                                                          endstring="",
                                                          mark="searchResultRecord",
                                                          allow_redirects=True,
                                                          headers=self.header,
                                                          timeout=(30, 60))
        if BoolResult:
            dicts = json.loads(r.text)
            print(dicts)
            # 搜索出来的总量
            totalCount = dicts["resultPagination"]["totalCount"]
            # 如果获取的值为0 代表该日期下没有数据
            if totalCount == "0" or totalCount == 0 or totalCount == '-1' or totalCount == -1:
                self.logger.info("判断到没有数据")
                return True, r
            else:
                return True, r
        else:
            self.logger.info("下载错误 错误原因" + errString)
            if r and r.text.find("IsAjaxAndJsonData") > -1:
                self.logger.info("IsAjaxAndJsonData")
                return "IsAjaxAndJsonData", r
            self.logger.info("请求出错" + errString)
            return False, r

    def down_first_page(self, sn, proxys):
        """
        下载第一页 解析出共有多少页
        :return:
        """
        results, r = self.down_search_page_3(sn, proxys, self.search_data)
        if results == "IsAjaxAndJsonData":
            return False, "IsAjaxAndJsonData"
        elif results is False:
            return False, "下载错误"
        if r:
            dicts = json.loads(r.text)
            self.logger.info("down_search_page_3　text is *******************")
            self.logger.info(dicts)
            self.logger.info("down_search_page_3　text is end*******************")
            # 搜索出来的总量
            totalCount = dicts["resultPagination"]["totalCount"]
            self.logger.info("totalCount is {}".format(totalCount))
            # 如果获取的值为0 代表该日期下没有数据
            if totalCount == "0" or totalCount == 0 or totalCount == '-1' or totalCount == -1:
                sql = "insert ignore into `page` (`pub_date`,`page`,`allcount`,`parameter`,`stat`) value ('%s','%s','%s','%s',1)"
                sql = sql % (self.datestart + ":" + self.dateend, "0", "0", "")
                self.logger.info(sql)
                # self.mysqlutils.ExeSqlToDB(sql, errExit=True)
                self.result_queue.put(("one", sql))
                return True, ""
            # 在有数据的情况下
            # 翻页请求需要
            searchKeywords = dicts["searchResultDTO"]["dealSearchKeywords"]
            print(searchKeywords)
            self.downpage_value_dicts["searchCondition.searchKeywords"] = ",".join(searchKeywords)

            strategy = dicts["searchCondition"]["strategy"]
            if strategy == "null":
                strategy = ""
            self.downpage_value_dicts["searchCondition.strategy"] = strategy
            self.downpage_value_dicts["searchCondition.resultMode"] = "SEARCH_MODE"
            self.downpage_value_dicts["searchCondition.targetLanguage"] = ""
            self.downpage_value_dicts["searchCondition.literatureSF"] = dicts["searchResultDTO"]["literatureSF"]
            self.downpage_value_dicts["searchCondition.dbId"] = ""
            self.downpage_value_dicts["searchCondition.executableSearchExp"] = dicts["searchResultDTO"][
                "executableSearchExp"]
            self.downpage_value_dicts["searchCondition.searchExp"] = dicts["searchCondition"]["searchExp"]
            self.downpage_value_dicts["resultPagination.limit"] = "12"
            self.downpage_value_dicts["resultPagination.sumLimit"] = "10"
            self.downpage_value_dicts["resultPagination.start"] = "36"
            self.downpage_value_dicts["resultPagination.totalCount"] = str(totalCount)
            self.downpage_value_dicts["searchCondition.sortFields"] = "-APD, +PD"
            self.downpage_value_dicts["searchCondition.searchType"] = "Sino_foreign"
            self.downpage_value_dicts["searchCondition.originalLanguage"] = ""
            self.downpage_value_dicts["searchCondition.extendInfo['MODE']"] = "MODE_TABLE"
            self.downpage_value_dicts["searchCondition.extendInfo['STRATEGY']"] = "STRATEGY_CALCULATE"

            if int(totalCount) <= 12:
                sql = "insert ignore into `page` (`pub_date`,`page`,`allcount`,`parameter`,`stat`) value ('%s','%s','%s','%s',1)"
                self.downpage_value_dicts["resultPagination.start"] = str(0)
                jsonmsg = json.dumps(self.downpage_value_dicts, ensure_ascii=False)
                sql = sql % (self.datestart + ":" + self.dateend, "1", totalCount, pymysql.escape_string(jsonmsg))
                # self.mysqlutils.ExeSqlToDB(sql, errExit=True)
                self.result_queue.put(("one", sql))
                return True, ""

            listvalue = []
            for i in range(2, int(math.ceil(int(totalCount) / 12) + 1)):
                self.downpage_value_dicts["resultPagination.start"] = str((i - 1) * 12)
                jsonmsg = json.dumps(self.downpage_value_dicts, ensure_ascii=False)
                listvalue.append((self.datestart + ":" + self.dateend, str(i), totalCount, jsonmsg))
            sql = "insert ignore into `page` (`pub_date`,`page`,`allcount`,`parameter`) value (%s,%s,%s,%s)"
            # self.mysqlutils.ExeSqlMany(sql, listvalue, errExit=True)
            self.result_queue.put(("many", sql, listvalue))
            # 解析当前页，这样就不需要下载第一页了，因为这里返回的就是第一页的数据
            self.para_page_html(r, "1")
            try:
                self.db.pagehtml.insert({"date": self.datestart + ":" + self.dateend, "page": "1", "html": r.text})
            except DuplicateKeyError as e:
                warnings.warn(str(e))
            return True, ""
        else:
            self.logger.info("判断到r无值")
            return False, ""

    def para_page_html(self, r, page):
        """
        解析json数据
        :param r:
        :param page:
        :return:
        """
        count = 0
        dicts = json.loads(r.text)
        listsipo = dicts["searchResultDTO"]["searchResultRecord"]
        for onetable in listsipo:
            count += 1
            requestsid = onetable["fieldMap"]["ID"]
            app_no = onetable["fieldMap"]["AP"].replace("<FONT>", "").replace("</FONT>", "")
            app_date = onetable["fieldMap"]["APD"].replace("<FONT>", "").replace("</FONT>", "")
            pub_no = onetable["fieldMap"]["PN"].replace("<FONT>", "").replace("</FONT>", "")
            pub_date = onetable["fieldMap"]["PD"].replace("<FONT>", "").replace("</FONT>", "")
            title = onetable["fieldMap"]["TIVIEW"]
            author = onetable["fieldMap"]["INVIEW"]
            cpnum = onetable["fieldMap"]["CPNUM"]
            msg = json.dumps(onetable)
            app_date = app_date.replace(".", "")
            pub_date = pub_date.replace(".", "")
            sql = "insert ignore into `article`(rawid,app_no,pub_no,app_date,pub_date,json_info,cpnum) values('%s','%s','%s','%s','%s','%s','%s')"
            msg = pymysql.escape_string(msg)
            sql = sql % (requestsid, app_no, pub_no, app_date, pub_date, msg, cpnum)

            # self.mysqlutils.ExeSqlToDB(sql)
            self.result_queue.put(("one", sql))
            #
            # dicts = {"requestsid": requestsid,
            #          "app_no": app_no,
            #          "app_date": app_date,
            #          "pub_no": pub_no,
            #          "region": pub_no[:2],
            #          "pub_date": pub_date,
            #          "title": title,
            #          "author": author,
            #          "date": self.date,
            #          "page": page,
            #          "cpnum": cpnum,
            #          "stat": 0}
            # try:
            #     # 存入 pub_all_id
            #     self.db.pub_all_id.insert(dicts)
            # except DuplicateKeyError as e:
            #     warnings.warn(str(e))
            # dicts["pageidinfo"] = msg
            # del dicts["date"]
            # del dicts["page"]
            # dicts["downdate"] = getDateTime()
            # try:
            #     # 每条数据存入 pageidjson pageidjson的数据更全
            #     self.db.pageidjson.insert(dicts)
            # except DuplicateKeyError as e:
            #     warnings.warn(str(e))
        if count == 0:
            self.logger.info(listsipo)
            self.logger.info("下载的数据有误 将状态写为-1")
            return False
        return True

    def down_page(self, pub_date, page, para, sn, proxys, threadval):
        self.datestart = pub_date.split(":")[0]
        self.dateend = pub_date.split(":")[1]
        # time.sleep(2)
        datedownpage = json.loads(para)
        # 只为打乱表达式顺序
        ############################################################
        searchExp, list1 = self.disasters(self.datestart, self.dateend)
        datedownpage["searchCondition.literatureSF"] = searchExp
        datedownpage["searchCondition.searchExp"] = searchExp
        VDB = "VDB:(({}))".format(searchExp)
        VDB = VDB.replace("公开（公告）日", "PD")
        list2 = []
        for value in list1:
            list2.append("UNI_PN='{}'".format(value.replace("+", "%")))
        Strings = "({})".format(" OR ".join(list2))
        pattern = re.compile(r'(公开（公告）号=\(.*\))')
        VDB = re.sub(pattern, Strings, VDB)
        datedownpage["searchCondition.executableSearchExp"] = VDB
        # list3 = []
        # for value in list1:
        #     value = value.replace("+", "")
        #     list3.append("[%s][ ]{0,}[%s][ ]{0,}[ ]{0,}" % (value[0], value[1]))
        # if self.datestart:
        #     list3.append("[%s][ ]{0,}[%s][ ]{0,}[%s][ ]{0,}[%s][ ]{0,}[.][ ]{0,}[%s][ ]{0,}[%s][ ]{0,}[.][ ]{0,}[%s][ ]{0,}[%s][ ]{0,}" % tuple(self.datestart))
        # if self.dateend:
        #     list3.append("[%s][ ]{0,}[%s][ ]{0,}[%s][ ]{0,}[%s][ ]{0,}[.][ ]{0,}[%s][ ]{0,}[%s][ ]{0,}[.][ ]{0,}[%s][ ]{0,}[%s][ ]{0,}" % tuple(self.dateend))
        # datedownpage["searchCondition.searchKeywords"] = ",".join(list3)
        #################################################################over
        # print("***************************************************************")
        # print(datedownpage)
        # print("****************************************************************")
        # sys.exit(-1)
        if datedownpage["searchCondition.strategy"] in [None, 'None', '', 'null']:
            datedownpage["searchCondition.strategy"] = ""
        self.logger.info(proxys)
        # self.header["X-Forwarded-For"] = self.localVal.ipaddr
        if not hasattr(threadval, "is_datedownpage"):
            threadval.is_datedownpage = 0
        elif threadval.is_datedownpage:
            self.logger.info("设置为翻页模式*************")
            datedownpage["searchCondition.executableSearchExp"] = ""
            datedownpage["searchCondition.dbId"] = "VDB"
            datedownpage["resultPagination.totalCount"] = "-1"
        else:
            pass
        print(datedownpage)
        BoolResult, errString, r = facade.BaseRequestPost(self.url2,
                                                          sn=sn,
                                                          proxies=proxys,
                                                          data=datedownpage,
                                                          endstring="",
                                                          mark="searchResultRecord",
                                                          allow_redirects=True,
                                                          headers=self.header,
                                                          timeout=(30, 60))
        self.logger.info("***当前的 threadval.is_datedownpage 的值是{}".format(threadval.is_datedownpage))
        if BoolResult:
            if r.text.find("非法索引") != -1:
                self.logger.info(datedownpage)
                self.logger.error("出现非法索引,翻页失败")
                sql = "update `page` set stat=-2 where `page`='{}' and pub_date='{}' ".format(page, pub_date)
                # self.mysqlutils.ExeSqlToDB(sql)
                self.result_queue.put(("one", sql))
                return True, "出现非法索引,翻页失败"
            resultbool = self.para_page_html(r, str(page))
            try:
                # 每一页的数据存入pagehtml
                self.db.pagehtml.insert({"date": pub_date, "page": page, "html": r.text})
            except DuplicateKeyError as e:
                warnings.warn(str(e))
            stat = 1
            if not resultbool:
                threadval.is_datedownpage = threadval.is_datedownpage ^ 1
                stat = -1
            sql = "update `page` set stat={} where `page`='{}' and pub_date='{}' ".format(stat, page, pub_date)
            # self.mysqlutils.ExeSqlToDB(sql)
            self.result_queue.put(("one", sql))
            return True, ""
        else:
            self.logger.info("down_page 下载失败 消息为{}".format(errString))
            if r is not None:
                if r.status_code == 404:
                    return False, "404"
                elif r.text.find("IsAjaxAndJsonData") > -1:
                    return False, "IsAjaxAndJsonData"
                else:
                    self.logger.error(r.text)
            return False, errString

    def selectDB(self):
        sql = "select pub_date,`page`,parameter from `page` where stat=0"
        rows = self.mysqlutils.SelectFromDB(sql)
        return rows

    def select_date(self):
        sql = "select pub_date from `page` GROUP BY pub_date ORDER BY pub_date ASC limit 1"
        rows = self.mysqlutils.SelectFromDBFetchOne(sql)
        return rows

    def func(self, n, threadval):
        self.threadval = threadval
        # time.sleep(3)
        sn = threadval.sn
        proxys = threadval.proxys
        if n[0] == "down_search_page":
            """
            这个是新的日期
            """
            self.logger.info("down_search_page")
            self.set_data(n[1], n[2])
            resultbool = self.down_search_page(sn, proxys)
            if not resultbool:
                return False
            # 下载一个新的日期 获取页
            boolresult, msg = self.down_first_page(sn, proxys)
            self.logger.info(msg)
            if not boolresult:
                return False
            sql = "update `needdownpage` set stat=1 where `startdate`='{}' and `enddate`='{}'".format(n[1], n[2])
            self.result_queue.put(("one", sql))
        elif n[0] == "down_page":
            """
            这个是翻页
            down_page 有可能 出现 IsAjaxAndJsonData_{"generalSearchItemList":null,"resultPagination":null,"resultQC":null,"searchCondition":null,"searchMenuUrl":null,"searchMode":null,"searchResultDTO":null}
            且在登陆阶段正常
            """
            time.sleep(2)
            self.logger.info("down_page")
            boolresult, msg = self.down_page(n[1], n[2], n[3], sn, proxys, threadval)
            if msg == "404":
                results = self.down_search_page(sn, proxys)
                if not results:
                    self.logger.error("遭遇404后搜索失败，程序逻辑需要修复")
                else:
                    self.logger.info("搜索成功 虽然本次会跳出循环但下次应该会成功")
                return True
            elif msg == "IsAjaxAndJsonData":
                # 该错误状态码为200 但没有返回想要的数据 初步估计是因为网站做了某种反扒机制
                # 限制了一段时间的访问 某个用户访问过多 一般第二天恢复
                self.logger.error(
                    "IsAjaxAndJsonData proxys:{},user:{},cookies:{}".format(proxys,
                                                                            threadval.rows["user"],
                                                                            threadval.rows["_id"]))
                sql = "update `user` set isajxcount=isajxcount+1,`isajxupdatetime`='{}' where `username`='{}'".format(
                    MDateTime.get_msec_time()[0], threadval.rows["user"])
                self.result_queue.put(("one", sql))
                return False
            elif not boolresult:
                self.logger.error(msg)
                return False
            else:
                return True
        else:
            self.logger.info("队列出错")
            return False

    def deal_resilt(self, result):
        if result[0] == "one":
            self.mysqlutils.ExeSqlToDB(result[1])
        elif result[0] == "many":
            sql = result[1]
            values = result[2]
            self.mysqlutils.ExeSqlMany(sql, values)
        else:
            raise Exception("sql有情况没被判断到{}".format(result))


"""
**********************多进程分布式代码**************************
"""
from multiprocessing.managers import BaseManager


# 创建类似的QueueManager:
class QueueManager(BaseManager):
    pass


class NodeTask(object):
    def __init__(self):
        self.register()
        # 连接到服务器，也就是运行task_master.py的机器:
        self.server_addr = '192.168.30.123'
        print('Connect to server %s...' % self.server_addr)
        # 端口和验证码注意保持与task_master.py设置的完全一致:
        self.m = QueueManager(address=(self.server_addr, 5001), authkey=b'abc')
        self.task = None
        self.result = None

    def register(self):
        # 由于这个QueueManager只从网络上获取Queue，所以注册时只提供名字:
        QueueManager.register('get_task_queue')
        QueueManager.register('get_result_queue')

    def conn(self):
        # 从网络连接:
        self.m.connect()

    def set_task_result_obj(self):
        # 获取Queue的对象:
        self.task = self.m.get_task_queue()
        self.result = self.m.get_result_queue()


class SearchThreadRun(MThreadingRun):

    def __init__(self, num):
        self.down = SearchDown()
        self.args = None
        super(SearchThreadRun, self).__init__(num, self.down.filelogger)
        self.down.set_result_queue(self.thread_pool.result_queue)
        self.down.localVal = self.thread_pool.localVal
        self.node = NodeTask()
        self.node.conn()
        self.node.set_task_result_obj()
        self.thread_pool.set_is_static_max(True)

    def getTask(self, *args, **kwargs):
        sql = "select pub_date,`page`,parameter from `page` where stat<=0 and pub_date=':19850101' ORDER BY pub_date ASC limit 100"
        rows = self.down.mysqlutils.SelectFromDB(sql)
        return rows

    def setTask(self, results=None, *args, **kwargs):
        for row in results:
            n = ("down_page", row[0], row[1], row[2])
            self.add_job(self.func, n, *args, **kwargs)
        time.sleep(10)

    def dealresult(self, *args, **kwargs):
        for result in self.results:
            self.down.deal_resilt(result)

    def check_is_need_cookie(self):
        """
        检查是否需要cookies
        """
        for threadname in self.thread_pool.thread_pool_dicts:
            thread = self.thread_pool.thread_pool_dicts[threadname]["thread"]
            if thread.threadval.is_sleep:
                return True
        if self.thread_pool.get_thread_num() < self.thread_pool.max_workers:
            return True
        return False

    def setProxy(self, proxysList=None):
        self.thread_pool.add_thread(1)
        time.sleep(30)

    def thread_pool_hook(self, thread_pool_dicts, thread, args, kwargs) -> dict:
        if thread_pool_dicts is self.thread_pool.thread_pool_dicts:
            self.init_threadval(thread)
        return {}

    def fun(self, threadval, *args, **kwargs):
        self.logger.info("获取一个需要请求的数据")
        # 每个cookie 下载10个任务后将该标志设为True
        if threadval.is_sleep:
            self.logger.info("cookie 到达上限 等待新的cookie")
            boolresult, msg = self.down.set_cookie_from_db(updatetime="searchupdatetime",
                                                           source="kuaidaili",
                                                           is_time=True,
                                                           timesleep=60 * 3)
            self.logger.info(msg)
            if not boolresult:
                if msg == "代理为空" or msg == "cookie表为空":
                    time.sleep(20)
                elif isinstance(msg, int):
                    self.logger.info("开始睡眠")
                    time.sleep(msg)
                return
            self.logger.info("开始设置新的cookies")
            # 线程内部允许删除 表示程序运行到可以删除的位置
            threadval.sn = requests.session()
            threadval.proxys = self.down.localVal.proxys
            threadval.cookies = self.down.localVal.cookies
            threadval.user = msg["user"]
            threadval.cookies_id = msg["_id"]
            threadval.rows = msg
            threadval.sn.cookies.update(threadval.cookies)
            self.down.set_ipaddr(threadval.proxys)
            threadval.count = 0
            threadval.is_sleep = False
        # 不能被删除
        threadval.thread_delete = False
        self.logger.info("当前cookie已获取{},共30个".format(threadval.count))
        if threadval and threadval.cookies and threadval.proxys:
            if args and args[0]:
                n = args[0]
                if threadval.count > 30:
                    self.logger.info("判断到count 大于指定数量")
                    threadval.sn = None
                    threadval.is_sleep = True
                    threadval.count = 0
                    return
                if threadval.sn:
                    resultsbool = self.down.func(n, threadval)
                    if not resultsbool:
                        boolresult, msg = self.down.is_login_and_ip(threadval.sn, threadval.proxys, threadval.cookies)
                        self.logger.info("检查cookie登陆情况的msg:{}".format(msg))
                        if not boolresult:
                            self.down.db.cookies.update(
                                {"_id": threadval.rows["_id"], "source": threadval.rows["source"]},
                                {"$set": {"stat": -1,
                                          "searchupdatetime": MDateTime.get_now_datetime(
                                              struct_time=time.localtime(time.time()))}})
                            threadval.is_sleep = True
                    threadval.count += 1
                else:
                    threadval.sn = requests.session()
                    threadval.sn.cookies.update(threadval.cookies)
                    self.down.set_ipaddr(threadval.proxys)
                    resultsbool = self.down.func(n, threadval)
                    if not resultsbool:
                        boolresult, msg = self.down.is_login_and_ip(threadval.sn, threadval.proxys, threadval.cookies)
                        if not boolresult:
                            self.logger.info("检查cookie登陆情况的msg:{}".format(msg))
                            self.down.db.cookies.update(
                                {"_id": threadval.rows["_id"], "source": threadval.rows["source"]},
                                {"$set": {"stat": -1,
                                          "searchupdatetime": MDateTime.get_now_datetime(
                                              struct_time=time.localtime(time.time()))}})
                    threadval.count += 1
            else:
                self.logger.info("任务不存在")
        else:
            self.logger.info("cookie 或代理不存在")

    def init_threadval(self, thread):
        """
        该函数用于初始化线程管理器的threadval对象
        :return:
        """
        thread.threadval.is_sleep = True


if __name__ == "__main__":
    down = SearchThreadRun(0)
    down.thread_pool.set_work_queue(5)
    down.thread_pool.set_is_static_max(False)
    down.thread_pool.set_max_workers(1)
    down.thread_pool.set_is_static_max(True)
    down.run()
