import copy
import json
import pickle
import warnings

import click
import facade
import requests
from pymongo import MongoClient
from pymongo.errors import DuplicateKeyError
from xjlibrary.configread import MyConfigParser
from xjlibrary.mdatetime.mtime import getTodayDate, getDateTime, MDateTime
from xjlibrary.our_file_dir import BaseDir, ImageFile
from xjlibrary.tools.BaseIp import get_ip

curPath = BaseDir.get_file_dir_absolute(__file__)
upPath = BaseDir.get_upper_dir(curPath, -1)
configfile = BaseDir.get_new_path(upPath, "db.ini")
topPath = BaseDir.get_upper_dir(upPath, -2)
coverPath = BaseDir.get_new_path(topPath, "download", "sipogov", "download", "cover")
BaseDir.create_dir(coverPath)
cookiedir = BaseDir.get_new_path(upPath, "cookie")
import sys

sys.path.append(upPath)
from Step1_login import login_once


class DownDetail(object):
    def __init__(self):
        self.logger = facade.get_streamlogger()
        self.cf = MyConfigParser(configfile).set_keep_keys_case().read_config()
        self.mysqlutils = facade.MysqlUtiles(configfile,
                                             "db",
                                             logger=self.logger)
        self.connection = MongoClient(
            "mongodb://xujiangrw:vipdatacenter@192.168.30.171:27017",
            # 如果“True”且服务器正在运行而没有日志记录，阻塞，直到服务器将所有数据文件同步到磁盘为止。
            fsync=False
        )
        self.db = self.connection['cnipa']
        self.proxy = None
        self.userloginname = ""
        self.j_username = ""
        self.j_password = ""
        self.sn = requests.session()
        self.cf = MyConfigParser(configfile).set_keep_keys_case().read_config()
        # 下载同族url(需要翻页 废弃)
        # self.url1 = "http://pss-system.cnipa.gov.cn/sipopublicsearch/patentsearch/showCognationInfo-showCognationList.shtml"
        # self.postdata1={
        #     "cognationQC.radioValue": "TWM551664U",
        #     "cognationQC.radioKey": "PN",
        #     "wee.bizlog.modulelevel": "0201901",
        #     "pagination.start": "0"
        # }
        # self.header1={
        #     "Accept": "*/*",
        #     "Accept-Encoding": "gzip, deflate",
        #     "Accept-Language": "zh-CN,zh;q=0.9",
        #     "Content-Type": "application/x-www-form-urlencoded; charset=UTF-8",
        #     "Host": "www.pss-system.gov.cn",
        #     "Origin": "http://pss-system.cnipa.gov.cn",
        #     "Proxy-Connection": "keep-alive",
        #     "User-Agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/71.0.3578.98 Safari/537.36",
        #     "X-Requested-With": "XMLHttpRequest"
        # }
        # 详情页url
        self.url2 = "http://pss-system.cnipa.gov.cn/sipopublicsearch/patentsearch/viewAbstractInfo0529-viewAbstractInfo.shtml"
        self.postdata2 = {
            "nrdAn": "TW105212609",
            "cid": "TWM532100105212609",
            "sid": "TWM532100105212609",
            "wee.bizlog.modulelevel": "0201101"
        }
        self.header2 = {
            "Accept": "application/json, text/javascript, */*; q=0.01",
            "Accept-Encoding": "gzip, deflate",
            "Accept-Language": "zh-CN,zh;q=0.9",
            "Content-Type": "application/x-www-form-urlencoded; charset=UTF-8",
            "Host": "www.pss-system.gov.cn",
            "Origin": "http://pss-system.cnipa.gov.cn",
            "Proxy-Connection": "keep-alive",
            "User-Agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/71.0.3578.98 Safari/537.36",
            "X-Requested-With": "XMLHttpRequest"
        }
        # 全文下载
        self.url3 = "http://pss-system.cnipa.gov.cn/sipopublicsearch/patentsearch/showFullText0529-viewFullText.shtml"
        self.postdata3 = {
            "nrdAn": "TW105212609",
            "cid": "TWM532100105212609",
            "sid": "TWM532100105212609",
        }
        self.header3 = {
            "Accept": "application/json, text/javascript, */*; q=0.01",
            "Accept-Encoding": "gzip, deflate",
            "Accept-Language": "zh-CN,zh;q=0.9",
            "Content-Type": "application/x-www-form-urlencoded; charset=UTF-8",
            "Host": "www.pss-system.gov.cn",
            "Origin": "http://pss-system.cnipa.gov.cn",
            # "Proxy-Connection": "keep-alive",
            "User-Agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/71.0.3578.98 Safari/537.36",
            "X-Requested-With": "XMLHttpRequest"
        }
        # 该请求无post参数 公用header3
        self.url4 = "http://pss-system.cnipa.gov.cn/sipopublicsearch/patentsearch/showViewList-showViewList.shtml"

        # 从详情页获取同族专利(要想获取fn 需要一个请求，使用3的header)
        self.url5 = "http://pss-system.cnipa.gov.cn/sipopublicsearch/patentsearch/showPatentInfo0405-showPatentInfo.shtml"
        self.postdata5 = {
            "literaInfo.nrdAn": "",  # HK15105078
            "literaInfo.nrdPn": "",  # HK1202221A2
            "literaInfo.fn": ""  # 54196616
        }

        # 获取图片地址的url (共用header3)
        self.url7 = "http://pss-system.cnipa.gov.cn/sipopublicsearch/patentsearch/retrieveUrls.shtml"
        self.postdata7 = {
            "figureUrl": "3964135814",
            "rids[0]": "3964135814",
            "wee.bizlog.modulelevel": "0201203"
        }

        # 用于引证文献的翻页 由于引证文献每次只返回5条 使用header3
        self.url8 = "http://pss-system.cnipa.gov.cn/sipopublicsearch/patentsearch/showQuotationLitera-queryPatcitList.shtml"
        self.postdata8 = {
            "quotationQC.nrdAn": "CN201110364999",
            "quotationQC.nrdPn": "CN102445619A",
            "wee.bizlog.modulelevel": "0201702",
            "papagination.start": "5"
        }

        # 图片地址
        self.imageurl = ""

    def down_url2(self):
        """
        详情页下载 获取基本信息和摘要
        :return:
        """
        BoolResult, errString, r = facade.BaseRequestPost(self.url2,
                                                          sn=self.sn,
                                                          data=self.postdata2,
                                                          endstring="",
                                                          proxies=self.proxy,
                                                          mark="abstractItemList",
                                                          allow_redirects=True,
                                                          headers=self.header2,
                                                          timeout=(30, 60))
        if BoolResult:
            self.logger.info("下载摘要成功")
            return True, r
        else:
            self.logger.info("下载摘要失败")
            return False, r

    def down_url3(self):
        """
        全文下载
        :return:
        """
        BoolResult, errString, r = facade.BaseRequestPost(self.url3,
                                                          sn=self.sn,
                                                          data=self.postdata3,
                                                          endstring="",
                                                          proxies=self.proxy,
                                                          mark="fullTextDTO",
                                                          allow_redirects=True,
                                                          headers=self.header3,
                                                          timeout=(30, 60))
        if BoolResult:
            self.logger.info("下载全文成功")
            return True, r
        else:
            self.logger.info("下载全文失败")
            return False, r

    def down_url4(self):
        """
        好像get也可以 该下载为下载其他信息的做准备
        需要其中的一个key
        :return:
        """
        BoolResult, errString, r = facade.BaseRequestPost(self.url4,
                                                          sn=self.sn,
                                                          endstring="",
                                                          proxies=self.proxy,
                                                          mark="literaInfo",
                                                          allow_redirects=True,
                                                          headers=self.header3,
                                                          timeout=(30, 60))
        if BoolResult:
            self.logger.info("下载showviewlist成功")
            return True, r
        else:
            self.logger.info("下载showviewlist失败")
            return False, r

    def down_url5(self):
        """
        下载其他信息 包括法律状态同族专利等
        :return:
        """
        header = copy.deepcopy(self.header3)
        header["Content-Length"] = "{0}".format(self.get_content_length(self.postdata5))
        BoolResult, errString, r = facade.BaseRequestPost(self.url5,
                                                          sn=self.sn,
                                                          data=self.postdata5,
                                                          endstring="",
                                                          proxies=self.proxy,
                                                          mark="lawStateList",
                                                          allow_redirects=True,
                                                          headers=header,
                                                          timeout=(30, 60))
        if BoolResult:
            self.logger.info("下载法律状态等其他信息成功")
            return True, r
        else:
            self.logger.info("下载法律状态等其他信息失败")
            return False, r

    def down_search_page(self):
        """
        进行一次搜索 有时第一次登陆需要搜索后才能使其他请求有效(事实证明搜索无效)

        :return:
        """
        # self.logger.info("search one page get all pages")
        # self.logger.info(self.postdata6)
        # BoolResult, errString, r = facade.BaseRequestPost(self.url6,
        #                                                   sn=self.sn,
        #                                                   proxies=self.proxy,
        #                                                   data=self.postdata6,
        #                                                   endstring="",
        #                                                   mark="searchResultRecord",
        #                                                   allow_redirects=True,
        #                                                   headers=self.header3,
        #                                                   timeout=(30, 60))
        # if BoolResult:
        #     self.logger.info("搜索成功")
        #     return True
        # else:
        #     self.logger.info("搜索失败")
        #     return False
        url = "http://pss-system.cnipa.gov.cn/sipopublicsearch/patentsearch/pageIsUesd-pageUsed.shtml"
        BoolResult, errString, r = facade.BaseRequestPost(url,
                                                          sn=self.sn,
                                                          proxies=self.proxy,
                                                          endstring="",
                                                          mark="IP",
                                                          allow_redirects=True,
                                                          headers=self.header3,
                                                          timeout=(30, 60))
        if BoolResult:
            self.logger.info("搜索成功")
            return True
        else:
            self.logger.info("搜索失败")
            return False

    def down_url7(self):
        """
        该请求可以获取图片url的地址
        :return:
        """
        BoolResult, errString, r = facade.BaseRequestPost(self.url7,
                                                          sn=self.sn,
                                                          data=self.postdata7,
                                                          endstring="",
                                                          proxies=self.proxy,
                                                          mark="figureUrls",
                                                          allow_redirects=True,
                                                          headers=self.header3,
                                                          timeout=(30, 60))
        if BoolResult:
            self.logger.info("down_url7 图片地址下载成功")
            return True, r
        else:
            self.logger.info("down_url7 图片地址下载失败")
            self.logger.info(self.postdata7)
            return False, r

    def down_image_cover(self, requestsid):
        BoolResult, errString, r = facade.BaseRequest(self.imageurl,
                                                      sn=self.sn,
                                                      endstring="",
                                                      proxies=self.proxy,
                                                      mark="",
                                                      allow_redirects=True,
                                                      timeout=(30, 60))
        if BoolResult:
            self.logger.info("下载图片成功")
            BaseDir.single_write_wb_file(r.content, BaseDir.get_new_path(coverPath, requestsid + ".jpg"))
            return True
        else:
            self.logger.info("下载图片失败")
            return False

    def get_content_length(self, data):
        """
        Content-Length 长度计算
        :param data:
        :return:
        """
        length = len(data.keys()) * 2 - 1
        total = ''.join(list(data.keys()) + list(data.values()))
        length += len(total)
        return length

    def selectdb(self):
        """
        从mongodb获取需要下载的数据
        :return:
        """
        return self.db.pageidjson.find({"stat": 0}).limit(100)

    def set_cookie(self):
        """
        设置登陆的cookie
        :return:
        """
        ip = get_ip(self.proxy)
        cookiepath = BaseDir.get_new_path(cookiedir, ip + "_" + str(
            getTodayDate()) + "_" + self.userloginname + ".txt")
        cookiepath_login = BaseDir.get_new_path(cookiedir,
                                                ip + "_" + str(
                                                    getTodayDate()) + "_" + self.userloginname + "_login.txt")

        if BaseDir.is_file_exists(cookiepath):
            self.logger.info("set cookie:" + cookiepath)
            with open(cookiepath, "rb") as f:
                self.sn.cookies.update(pickle.load(f))
        elif BaseDir.is_file_exists(cookiepath_login):
            self.logger.info("set cookie:" + cookiepath_login)
            with open(cookiepath_login, "rb") as f:
                self.sn.cookies.update(pickle.load(f))
        # else:
        #     self.logger.info("无cookie 重新登陆")
        #     self.login()
        #     self.set_cookie()

    def login(self):
        """
        调用登陆程序
        :return:
        """
        result, msg = login_once(self.userloginname, self.j_username, self.j_password, self.proxy, self.logger)
        if not result:
            if msg == "IPError":
                print("ip被封了 休息30分钟看是否切换ip")
                time.sleep(30 * 60)
            if msg == "await":
                print("当前验证码系统需要等待,建议sleep")

        self.set_cookie()

    def set_user(self):
        """
        多用户下载 需要设置该进程的用户
        :param user:
        :return:
        """
        sql = "select `username`,`j_username`,`j_password` from `user` where stat=1 order by `createtime` ASC limit 1"
        row = self.mysqlutils.SelectFromDBFetchOne(sql)
        sql = "update `user` set `createtime`='{}' where `username`='{}'".format(
            MDateTime.get_msec_time()[0], row[0])
        self.mysqlutils.ExeSqlToDB(sql)
        self.userloginname = row[0]
        self.j_username = row[1]
        self.j_password = row[2]

    def set_proxys(self, proxy=None):
        """
        为请求设置代理
        :param proxy:
        :return:
        """
        self.proxy = proxy

    def is_login(self):
        """
        判断cookie的有效性
        :return:
        """
        self.set_cookie()
        url = "http://pss-system.cnipa.gov.cn/sipopublicsearch/portal/uiIndex.shtml"
        header = {
            "Accept": "text/html,application/xhtml+xml,application/xml;q=0.9,image/webp,image/apng,*/*;q=0.8",
            "Accept-Encoding": "gzip, deflate",
            "Accept-Language": "zh-CN,zh;q=0.9,en;q=0.8",
            "Cache-Control": "no-cache",
            "Host": "www.pss-system.gov.cn",
            "Pragma": "no-cache",
            "Proxy-Connection": "keep-alive",
            "Upgrade-Insecure-Requests": "1",
            "User-Agent": "Mozilla/5.0 (Windows NT 10.0; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/70.0.3538.110 Safari/537.36"
        }
        BoolResult, errString, r = facade.BaseRequest(url,
                                                      sn=self.sn,
                                                      proxies=self.proxy,
                                                      mark="欢迎访问专利检索及分析",
                                                      allow_redirects=True,
                                                      headers=header,
                                                      timeout=(30, 60))
        if BoolResult:
            if r.text.find("请登录") > -1:
                # 登陆失败
                print("登陆是失败的，cookie 无效")
                return False
            else:
                # 登陆成功
                print("登陆是成功的，cookie有效")
                return True

    def search_detail(self, id):
        """
        发现当遭遇417错误时 调用search可能会解决417错误
        :return:
        """
        url = "http://pss-system.cnipa.gov.cn/sipopublicsearch/patentsearch/showViewList-jumpToView.shtml"
        header = {
            # "Accept": "application/json, text/javascript, */*; q=0.01",
            "Accept": "text/html,application/xhtml+xml,application/xml;q=0.9,image/webp,image/apng,*/*;q=0.8",
            "Accept-Encoding": "gzip, deflate",
            "Accept-Language": "zh-CN,zh;q=0.9,en;q=0.8",
            "Content-Type": "application/x-www-form-urlencoded; charset=UTF-8",
            # "Cache-Control": "no-cache",
            "Host": "www.pss-system.gov.cn",
            # "Pragma": "no-cache",
            # "Referer": "http://pss-system.cnipa.gov.cn/sipopublicsearch/patentsearch/showViewList-jumpToView.shtml",
            # "Proxy-Connection": "keep-alive",
            "Origin": "http://pss-system.cnipa.gov.cn",
            "User-Agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/71.0.3578.98 Safari/537.36",
            "X-Requested-With": "XMLHttpRequest"
        }
        datas = {
            "viewQC.viewLiteraQCList[0].srcCnName": "检索式:申请（专利权）人=(北京交通大学 or %22Beijing Jiaotong Univ%22 or %22Beijing Jiaotong University%22 or %22Univ Beijing Jiaotong%22 or %22University Beijing Jiaotong%22)",
            "viewQC.viewLiteraQCList[0].srcEnName": "SearchStatement:申请（专利权）人=(北京交通大学 or %22Beijing Jiaotong Univ%22 or %22Beijing Jiaotong University%22 or %22Univ Beijing Jiaotong%22 or %22University Beijing Jiaotong%22)",
            "viewQC.viewLiteraQCList[0].searchStrategy": "",
            "viewQC.viewLiteraQCList[0].searchCondition.executableSearchExp": "VDB:(ID='{}')".format(id),
            "viewQC.searchKeywords[0]": "[北][ ]{0,}[京][ ]{0,}[交][ ]{0,}[通][ ]{0,}[大][ ]{0,}[学][ ]{0,}",
            "viewQC.searchKeywords[1]": "[ ]{0,}[U][ ]{0,}[n][ ]{0,}[i][ ]{0,}[v][ ]{0,}[e][ ]{0,}[r][ ]{0,}[s][ ]{0,}[i][ ]{0,}[t][ ]{0,}[y][ ]{0,}[ ]{0,}[B][ ]{0,}[e][ ]{0,}[i][ ]{0,}[j][ ]{0,}[i][ ]{0,}[n][ ]{0,}[g][ ]{0,}[ ]{0,}[J][ ]{0,}[i][ ]{0,}[a][ ]{0,}[o][ ]{0,}[t][ ]{0,}[o][ ]{0,}[n][ ]{0,}[g][ ]{0,}[ ]{0,}",
            "viewQC.searchKeywords[2]": "[ ]{0,}[U][ ]{0,}[n][ ]{0,}[i][ ]{0,}[v][ ]{0,}[ ]{0,}[B][ ]{0,}[e][ ]{0,}[i][ ]{0,}[j][ ]{0,}[i][ ]{0,}[n][ ]{0,}[g][ ]{0,}[ ]{0,}[J][ ]{0,}[i][ ]{0,}[a][ ]{0,}[o][ ]{0,}[t][ ]{0,}[o][ ]{0,}[n][ ]{0,}[g][ ]{0,}[ ]{0,}",
            "viewQC.searchKeywords[3]": "[ ]{0,}[B][ ]{0,}[e][ ]{0,}[i][ ]{0,}[j][ ]{0,}[i][ ]{0,}[n][ ]{0,}[g][ ]{0,}[ ]{0,}[J][ ]{0,}[i][ ]{0,}[a][ ]{0,}[o][ ]{0,}[t][ ]{0,}[o][ ]{0,}[n][ ]{0,}[g][ ]{0,}[ ]{0,}[U][ ]{0,}[n][ ]{0,}[i][ ]{0,}[v][ ]{0,}[e][ ]{0,}[r][ ]{0,}[s][ ]{0,}[i][ ]{0,}[t][ ]{0,}[y][ ]{0,}[ ]{0,}",
            "viewQC.searchKeywords[4]": "[ ]{0,}[B][ ]{0,}[e][ ]{0,}[i][ ]{0,}[j][ ]{0,}[i][ ]{0,}[n][ ]{0,}[g][ ]{0,}[ ]{0,}[J][ ]{0,}[i][ ]{0,}[a][ ]{0,}[o][ ]{0,}[t][ ]{0,}[o][ ]{0,}[n][ ]{0,}[g][ ]{0,}[ ]{0,}[U][ ]{0,}[n][ ]{0,}[i][ ]{0,}[v][ ]{0,}[ ]{0,}",
            "viewQC.viewLiteraQCList[0].searchCondition.sortFields": "-APD,+PD",
            "viewQC.needSearch": "true",
            "viewQC.type": "SEARCH",
            "wee.bizlog.modulelevel": "0200604"
        }
        BoolResult, errString, r = facade.BaseRequestPost(url,
                                                          sn=self.sn,
                                                          headers=header,
                                                          proxies=self.proxy,
                                                          data=datas,
                                                          endstring="",
                                                          allow_redirects=True,
                                                          mark="",
                                                          timeout=(30, 60))
        if BoolResult:
            print("解决417问题")
            return True, r
        else:
            return False, r

    def set_post5(self, fn, row):
        self.postdata5["literaInfo.fn"] = fn
        self.postdata5["literaInfo.nrdAn"] = row["app_no"]
        self.postdata5["literaInfo.nrdPn"] = row["pub_no"]

    def set_post2(self, row):
        self.postdata2["nrdAn"] = row["app_no"]
        self.postdata2["cid"] = row["requestsid"]
        self.postdata2["sid"] = row["requestsid"]

    def set_post7(self, figureRid):
        self.postdata7["figureUrl"] = figureRid
        self.postdata7["rids[0]"] = figureRid

    def set_post3(self, row):
        self.postdata3["nrdAn"] = row["app_no"]
        self.postdata3["cid"] = row["requestsid"]
        self.postdata3["sid"] = row["requestsid"]

    def insertabs(self, dicts):
        try:
            self.db.absfull.insert(dicts)
        except DuplicateKeyError as e:
            warnings.warn(str(e))

    def insertpatent(self, dicts):
        try:
            self.db.patentinfo.insert(dicts)
        except DuplicateKeyError as e:
            warnings.warn(str(e))

    def deal_patentinfo(self, dicts):
        jsondicts = json.loads(dicts["patentinfo"])
        # 同族
        cognation_count = jsondicts["cognation_count"]
        # 引证
        patcit_count = jsondicts["patcit_count"]
        # 法律
        totalCount = jsondicts["lawStatePagination"]["totalCount"]
        app_no = jsondicts["literaInfo"]["nrdAn"]
        pub_no = jsondicts["literaInfo"]["nrdPn"]
        stat = 1
        if int(patcit_count) > 5 or int(totalCount) > 5 or int(dicts["cpnum"]) >0:
            stat = 0
        dicts["ccount"] = int(cognation_count)
        dicts["pcount"] = int(patcit_count)
        dicts["tcount"] = int(totalCount)
        dicts["app_no"] = app_no
        dicts["pub_no"] = pub_no
        dicts["lawmsg"] = ""
        dicts["cmsg"] = ""
        dicts["pmsg"] = ""
        dicts["cpmsg"] = ""
        dicts["stat"] = stat
        return dicts


def printr(r):
    try:
        print(r.text)
    except:
        pass


@click.command()
@click.option('--proxy', default='proxy5', type=str, help='请输入使用的代理配置文件的opt_key')
# @click.option('--user', default='userc', type=str, help='请输入用户的sesc')
def start(proxy):
    down = DownDetail()
    down.set_user()
    if proxy == "":
        proxy = None
    else:
        proxy = down.cf.get_value("proxy", proxy)
        proxy = {
            "http": proxy,
            "https": proxy
        }
    # proxy = {
    #     "http": "192.168.30.179:9999",
    #     "https": "192.168.30.179:9999"
    # }
    down.set_proxys(proxy)
    down.set_cookie()
    status = 0
    while True:
        if status != 0:
            time.sleep(30 * 60)
        status += 1
        if down.is_login():
            rows = down.selectdb()
            count = 0
            for row in rows:
                time.sleep(30)
                count += 1
                dicts = {}
                dicts1 = {}
                dicts["requestsid"] = row["requestsid"]
                dicts1["requestsid"] = row["requestsid"]
                results, r = down.search_detail(row["requestsid"])
                time.sleep(2)
                if not results:
                    if r.status_code == 404:
                        down.logger.info("遭遇404错误开始搜索")
                        results = down.down_search_page()
                        if not results:
                            down.logger.info("遭遇404后搜索失败，程序逻辑需要修复")
                            sys.exit(-1)
                        else:
                            down.logger.info("搜索成功 虽然本次会跳出循环但下次应该会成功")
                    else:
                        down.login()
                        time.sleep(2)
                    down.logger.info("解决417问题失败,跳出本次for循环")
                    continue
                boolresult, r = down.down_url4()
                time.sleep(2)
                if boolresult:
                    dicts["showviewlist"] = r.text
                    fn = json.loads(r.text)["viewLiteraDTOList"][0]["literaInfo"]["fn"]
                    down.logger.info("fn 的值为:" + fn)
                    # 设置post参数
                    down.set_post5(fn, row)
                    # 下载周边
                    boolresult, r = down.down_url5()
                    time.sleep(2)
                    if not boolresult:
                        down.logger.info("下载法律状态等周边信息失败,跳出循环")
                        printr(r)
                        continue
                    dicts1["patentinfo"] = r.text
                    # 设置参数
                    down.set_post2(row)
                    # 下载abs
                    boolresult, r = down.down_url2()
                    time.sleep(2)
                    if not boolresult:
                        print("下载摘要失败，跳出循环")
                        continue
                    dicts["abs"] = r.text
                    figureRid = json.loads(r.text)["abstractInfoDTO"]["figureRid"]
                    if figureRid == "" or figureRid == "null" or figureRid == "None":
                        down.logger.info("没有图片 不下载")
                        dicts["imageurl"] = ""
                    else:
                        # 设置参数
                        down.set_post7(figureRid)
                        # 下载abs
                        boolresult, r = down.down_url7()
                        time.sleep(2)
                        if not boolresult:
                            down.logger.info("下载图片地址失败")
                            printr(r)

                            continue
                        url = json.loads(r.text)["figureUrls"][0]
                        down.imageurl = "http://pss-system.cnipa.gov.cn/sipopublicsearch" + url
                        # 下载图片
                        boolresult = down.down_image_cover(row["requestsid"])
                        if not boolresult:
                            down.logger.info("下载图片失败本次循环退出")
                            printr(r)
                            continue
                        dicts["imageurl"] = down.imageurl
                    # 设置参数
                    down.set_post3(row)
                    # 下载全文
                    boolresult, r = down.down_url3()
                    time.sleep(2)
                    if not boolresult:
                        down.logger.info("下载全文失败")
                        printr(r)
                        if r is not None:
                            printr(r.status_code)
                            if r.status_code == 417:
                                print("经过实验 发现确实存在该情况 比如 公开号 US8855085B2 US2015078434A1")
                                dicts["fulltxt"] = "-1"
                            else:
                                print("r not is 417")
                                continue
                        else:
                            print("r is None")
                            continue
                    else:
                        dicts["fulltxt"] = r.text
                    now = getDateTime()
                    dicts["downdate"] = now
                    down.logger.info("开始保存数据")
                    down.insertabs(dicts)
                    dicts1["downdate"] = now
                    down.insertpatent(dicts1)
                    down.logger.info("开始更新状态")
                    objid = row["_id"]
                    down.db.pageidjson.update({'_id': objid}, {"$set": {'stat': 1}})
                else:
                    down.logger.info("请求失败 该步骤为了获取pn请求法律状态等")
                    time.sleep(2)
                    if not down.is_login():
                        down.login()
                        time.sleep(2)
        else:
            down.login()
            time.sleep(2)


#
# if __name__ == "__main__":
#     start()


"""
**********************多进程分布式代码**************************
"""
import queue
import time
from multiprocessing.managers import BaseManager

# 创建类似的QueueManager:
import click


class QueueManager(BaseManager):
    pass


class NodeTask(object):
    def __init__(self):
        self.register()
        # 连接到服务器，也就是运行task_master.py的机器:
        self.server_addr = '192.168.30.171'
        print('Connect to server %s...' % self.server_addr)
        # 端口和验证码注意保持与task_master.py设置的完全一致:
        self.m = QueueManager(address=(self.server_addr, 5000), authkey=b'abc')
        self.task = None
        self.result = None

    def register(self):
        # 由于这个QueueManager只从网络上获取Queue，所以注册时只提供名字:
        QueueManager.register('get_task_queue')
        QueueManager.register('get_result_queue')

    def conn(self):
        # 从网络连接:
        self.m.connect()

    def set_task_result_obj(self):
        # 获取Queue的对象:
        self.task = self.m.get_task_queue()
        self.result = self.m.get_result_queue()


@click.command()
@click.option('--proxy', default='proxy5', type=str, help='请输入使用的代理配置文件的opt_key')
# @click.option('--user', default='userc', type=str, help='请输入用户的sesc')
def task_distributed(proxy):
    node = NodeTask()
    node.conn()
    node.set_task_result_obj()
    down = DownDetail()
    down.set_user()
    if proxy == "":
        proxy = None
    else:
        proxy = down.cf.get_value("proxy", proxy)
        proxy = {
            "http": proxy,
            "https": proxy
        }
    down.set_proxys(proxy)
    down.set_cookie()
    while True:
        try:
            row = node.task.get()
            resultrow = down.db.absfull.find_one({"requestsid": row["requestsid"]})
            # 如果结果表中存在就不需要下载
            if resultrow:
                print("存在 不需要重复下载")
                requestsid = row["requestsid"]
                down.db.pageidjson.update({'requestsid': requestsid}, {"$set": {'stat': 1}})
                down.db.pub_all_id_school.update({'requestsid': requestsid}, {"$set": {'stat': 1}})
                continue
            dicts = {}
            dicts1 = {}
            dicts["requestsid"] = row["requestsid"]
            dicts1["requestsid"] = row["requestsid"]
            results, r = down.search_detail(row["requestsid"])
            if not results:
                if r is not None and r.status_code == 404:
                    down.logger.info("遭遇404错误开始搜索")
                    results = down.down_search_page()
                    if not results:
                        down.logger.info("遭遇404后搜索失败，程序逻辑需要修复")
                        sys.exit(-1)
                    else:
                        down.logger.info("搜索成功 虽然本次会跳出循环但下次应该会成功")
                else:
                    if not down.is_login():
                        down.logger.info("不是404错误 重新登录")
                        down.login()
                        continue
            boolresult, r = down.down_url4()
            if boolresult:
                dicts["showviewlist"] = r.text
                try:
                    fn = json.loads(r.text)["viewLiteraDTOList"][0]["literaInfo"]["fn"]
                except:
                    down.logger.info(r.text)
                    continue
                down.logger.info("fn 的值为:" + fn)
                # 设置post参数
                down.set_post5(fn, row)
                # 下载周边
                boolresult, r = down.down_url5()
                if not boolresult:
                    down.logger.info("下载法律状态等周边信息失败,跳出循环")
                    printr(r)
                    continue
                dicts1["patentinfo"] = r.text
                # 设置参数
                down.set_post2(row)
                # 下载abs
                boolresult, r = down.down_url2()
                if not boolresult:
                    print("下载摘要失败，跳出循环")
                    continue
                dicts["abs"] = r.text
                figureRid = json.loads(r.text)["abstractInfoDTO"]["figureRid"]
                print(figureRid)
                if not figureRid or figureRid == "" or figureRid == "null" or figureRid == "None":
                    down.logger.info("没有图片 不下载")
                    dicts["imageurl"] = ""
                else:
                    # 设置参数
                    down.set_post7(figureRid)
                    # 下载abs
                    boolresult, r = down.down_url7()
                    if not boolresult:
                        down.logger.info("下载图片地址失败")
                        printr(r)
                        continue
                    url = json.loads(r.text)["figureUrls"][0]
                    down.imageurl = "http://pss-system.cnipa.gov.cn/sipopublicsearch" + url
                    down.logger.info("***image url is: %s" % down.imageurl)
                    # 下载图片
                    boolresult = down.down_image_cover(row["requestsid"])
                    if not boolresult:
                        down.logger.info("下载图片失败本次循环退出")
                        printr(r)
                        continue
                    dicts["imageurl"] = down.imageurl
                # 设置参数
                down.set_post3(row)
                # 下载全文
                boolresult, r = down.down_url3()
                if not boolresult:
                    down.logger.info("下载全文失败")
                    printr(r)
                    if r is not None:
                        printr(r.status_code)
                        if r.status_code == 417:
                            print("经过实验 发现确实存在该情况 比如 公开号 US8855085B2 US2015078434A1")
                            dicts["fulltxt"] = "-1"
                        else:
                            print("r not is 417")
                            continue
                    else:
                        print("r is None")
                        continue
                else:
                    dicts["fulltxt"] = r.text
                now = getDateTime()
                dicts["downdate"] = now
                down.logger.info("开始保存数据")
                down.insertabs(dicts)
                dicts1["downdate"] = now
                dicts1["cpnum"] = row["cpnum"]
                dicts1 = down.deal_patentinfo(dicts1)
                down.insertpatent(dicts1)
                down.logger.info("开始更新状态")
                requestsid = row["requestsid"]
                down.db.pageidjson.update({'requestsid': requestsid}, {"$set": {'stat': 1}})
                down.db.pub_all_id_school.update({'requestsid': requestsid}, {"$set": {'stat': 1}})
            else:
                down.logger.info("请求失败 该步骤为了获取pn请求法律状态等")
                printr(r)
                # time.sleep(2)
                if not down.is_login():
                    down.login()
                    # time.sleep(2)
                    if not down.is_login():
                        # 重新登陆后还是无效 认为是账号被封 故在此处更换账号
                        down.set_user()

        except queue.Empty:
            print('task queue is empty.')


if __name__ == "__main__":
    task_distributed()
