import json
import math
import multiprocessing
import os
import re
import sys
import time
import traceback
from ctypes import windll, c_char_p
from multiprocessing import Process
from urllib import parse

import facade
import pymysql
import requests
from xjlibrary.mdatetime.mtime2 import MDateTimeUtils
from xjlibrary.our_file_dir import BaseDir
from xjlibrary.tools.BaseUrl import BaseUrl

curPath = BaseDir.get_file_dir_absolute(__file__)
TopPath = BaseDir.get_upper_dir(curPath, -2)
dirPath = BaseDir.get_new_path(TopPath, "download", "EI", "download", "json", "AN")


class DiscernCode(object):
    """
    将识别验证码封装成一个类
    """

    def __init__(self, logger):
        self.appId = 6249  # 软件ＩＤ，开发者分成必要参数。登录开发者后台【我的软件】获得！
        self.appKey = b'439aa4305465df04545dad067bb31751'  # 软件密钥，开发者分成必要参数。登录开发者后台【我的软件】获得！
        self.YDMApi = windll.LoadLibrary('yundamaAPI-x64')
        # 注意这里是普通会员账号，不是开发者账号，注册地址 http://www.yundama.com/index/reg/user
        # 开发者可以联系客服领取免费调试题分
        # self.username = b'xujiang'
        # self.password = b'xujiang1994323'
        self.username = b'office'
        self.password = b'officeHelper$123'
        # 例：1004表示4位字母数字，不同类型收费不同。请准确填写，否则影响识别率。在此查询所有类型 http://www.yundama.com/price.html
        self.codetype = 3006
        # 分配30个字节存放识别结果
        self.result = c_char_p(b"                              ")
        # 验证码文件路径
        # self.filename = jpgpath.encode('utf-8')
        self.logger = logger
        # 识别超时时间 单位：秒
        self.timeout = 60

    def easy_decode_by_path(self, jpgpath):
        self.logger.info("正在开始一键识别{}".format(jpgpath))
        # 一键识别函数，无需调用 YDM_SetAppInfo 和 YDM_Login，适合脚本调用
        captchaId = self.YDMApi.YDM_EasyDecodeByPath(self.username,
                                                     self.password,
                                                     self.appId,
                                                     self.appKey,
                                                     jpgpath,
                                                     self.codetype,
                                                     self.timeout,
                                                     self.result)

        self.logger.info("一键识别：验证码ID：%d，识别结果：%s" % (captchaId, self.result.value))
        if int(captchaId) > 0:
            return True, str(self.result.value)
        else:
            self.logger.error("识别失败 请检查原因")
            return False, ""


class DownIndexByAN(object):
    """
    这个代码还要下载被引量和eid
    """
    # 可用ip
    listip = [
        "192.168.30.176:8207",  # 暨南大学
        "192.168.30.176:8012",  # 重庆大学
        "192.168.30.176:8021",  # 福建省高校数字图书馆
        "192.168.30.36:8041",  ## 东南大学
        "192.168.30.176:8076",  # 兰州理工大学
        "192.168.30.36:8082",  ## 西南科大
        "192.168.30.176:8165",  # 南京工业大学
        "192.168.30.176:8171",  # 重庆交大（868）
        "192.168.30.36:8182",  ## 西南科大
        "192.168.30.176:8195",  # 内蒙古大学图书馆
        "192.168.30.176:8243",  # 内蒙古大学图书馆
        "192.168.30.176:8031",  # 贵州理工大学

    ]

    def __init__(self):
        self.configfile = BaseDir.get_new_path(curPath, "db.ini")
        self.logger = facade.get_streamlogger()
        self.mysqlutils = facade.MysqlUtiles(self.configfile, "db", logger=self.logger)
        # 8207 8082 8012
        # self.Proxies = {
        #     "http": "192.168.30.176:8012",
        #     "https": "192.168.30.176:8012"  # key是指目标网站的协议
        # }
        self.year_floor = None
        self.year_ceil = None
        self.ac_list = []
        self.NumPerPage = 100  # 检索结果页每页25条
        self.NumAllPage = 5200  # 最大条数限制
        self.sn = requests.Session()
        self.UserAgent = 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/51.0.2704.106 Safari/537.36'
        self.sn.headers['User-Agent'] = self.UserAgent
        self.init_form()
        self.discern_code_time = int(time.time()) - 300
        self.Proxies = None

    def set_proxy(self, proxy):
        self.Proxies = {
            "http": proxy,
            "https": proxy  # key是指目标网站的协议
        }

    def init_form(self):
        self.Headers = {
            'Accept': '*/*',
            'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/51.0.2704.106 Safari/537.36',
        }
        self.headers = {
            "Accept": "application/json, text/javascript, */*; q=0.01",
            "Accept-Encoding": "gzip, deflate",
            "Accept-Language": "zh-CN,zh;q=0.9,en;q=0.8",
            "Connection": "keep-alive",
            "Content-Type": "application/json",
            "Host": "www.engineeringvillage.com",
            "Referer": "https://www.engineeringvillage.com/search/expert.url",
            "User-Agent": "Mozilla/5.0 (Windows NT 10.0; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/67.0.3396.62 Safari/537.36",
            "X-NewRelic-ID": "VQQAUldRCRAFUFFQBwgCUQ==",
            "X-Requested-With": "XMLHttpRequest"
        }
        self.headers2 = {
            "Accept": "application/json, text/javascript, */*; q=0.01",
            "Accept-Encoding": "gzip, deflate",
            "Accept-Language": "zh-CN,zh;q=0.9,en;q=0.8",
            "Connection": "keep-alive",
            "Content-Type": "application/x-www-form-urlencoded; charset=UTF-8",
            "Host": "www.engineeringvillage.com",
            "Referer": "https://www.engineeringvillage.com/search/expert.url",
            "User-Agent": "Mozilla/5.0 (Windows NT 10.0; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/67.0.3396.62 Safari/537.36",
            "X-NewRelic-ID": "VQQAUldRCRAFUFFQBwgCUQ==",
            "X-Requested-With": "XMLHttpRequest"
        }
        self.UserAgent = 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/51.0.2704.106 Safari/537.36'
        self.t = int(round(time.time() * 1000))
        self.Form = {
            "usageOrigin": "searchform",
            "usageZone": "expertsearch",
            "editSearch": "",
            "isFullJsonResult": "true",
            "angularReq": "true",
            "CID": "searchSubmit",
            "searchtype": "Expert",
            "origin": "searchform",
            "category": "expertsearch",
            "searchWord1": "((1994*) WN AN)",
            "allDb": "1",
            "database": "1",
            "yearselect": "yearrange",
            "startYear": "1884",
            "endYear": "2020",
            "updatesNo": "1",  # "4",
            "sort": "yr",  # "sort": "relevance",  yr代表最新 relevance相关度排序
            "autostem": "true",
            "searchStartTimestamp": str(int(round(time.time() * 1000))),
            "_": str(self.t)
        }

        self.FormPage100 = {
            "pageSizeVal": "100",
            "SEARCHID": "7cfe8a66d4934a6385e4120b3aa5210c",
            "sortsort": "relevance",
            "sortdir": "dw",
            "angularReq": "true",
            "isFullJsonResult": "false",
            "usageOrigin": "searchresults",
            "usageZone": "resultsperpagetop",
            "_": str(int(round(time.time() * 1000)))
        }

    def init_organ_list(self):
        """
        获取coden
        :return:
        """
        self.logger.info('InitOrganList ...')
        sql = "SELECT left(AccessionNumber, 1) as ac FROM `article` GROUP BY ac"
        rows = self.mysqlutils.SelectFromDB(sql)
        for row in rows:
            if row[0] != "":
                sql = "replace into accnumcn(`AN`) values ('{}')".format(row[0])
                self.mysqlutils.ExeSqlToDB(sql)
            ac = row[0]
            self.ac_list.append(ac)
        self.logger.info('code_list size:%d' % len(self.ac_list))

    def down_all(self):
        for ac in self.ac_list:
            try:
                self.down_one(ac)
            except:
                traceback.print_exc()
        self.logger.info('Game Over! Good Boy!')

    def discern_code(self, jpgpath):
        """
        识别验证码通过云打码
        :return:
        """
        if int(time.time()) - self.discern_code_time > 60 * 5:  #
            boolresult, code = DiscernCode(self.logger).easy_decode_by_path(jpgpath)
            BaseDir.single_add_file("./code.txt", "***code is :{}\n".format(code))
            self.discern_code_time = int(time.time())
            if boolresult:
                code = code.replace("b'", "").replace("'", "")
                return True, code
            else:
                return False, ""
        else:
            self.logger.info("时间还没到休息60秒 差时为{} 应该相隔5分钟".format(int(time.time()) - self.discern_code_time))
            time.sleep(60)
            return False, ""

    def down_one(self, ac):
        self.logger.info('DownOneOrgan %s, %s ...' % (ac, repr(self.Proxies)))

        # 如果目录存在表示下载过 但也有可能是下载一部分后中断了
        # outDir = os.path.join(dirPath, ac)
        # if os.path.exists(outDir):
        #     return

        def init_home():
            url = r'https://www.engineeringvillage.com/home.url'
            header = {
                "Accept": "text/html,application/xhtml+xml,application/xml;q=0.9,image/webp,image/apng,*/*;q=0.8,application/signed-exchange;v=b3",
                "Accept-Encoding": "gzip, deflate",
                "Accept-Language": "zh-CN,zh;q=0.9,en;q=0.8",
                "Host": "www.engineeringvillage.com",
                "Upgrade-Insecure-Requests": "1",
                "User-Agent": "Mozilla/5.0 (Windows NT 10.0; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/73.0.3683.86 Safari/537.36"
            }
            BoolResult, errString, r = facade.BaseRequest(url,
                                                          sn=self.sn,
                                                          headers=header,
                                                          verify=False,
                                                          proxies=self.Proxies,
                                                          timeout=60)
            self.logger.info(errString)
            if not BoolResult:
                return False
            self.logger.info("首页访问完成")
            print(r.url)
            BaseDir.single_write_file(r.text, "./test.html")
            if r.text.find("Chongqing University Library, General Access") > -1:
                obj = re.search(r'csrfToken\.value = "(.*?)"', r.text)
                csrfToken = obj.group(1)
                print(csrfToken)
                url = "https://www.engineeringvillage.com/customer/authenticate.url"
                postdate = {
                    "path_choice": "13022631",
                    "remember_path_flag": "true",
                    "origin": "pathChoice",
                    "zone": "main",
                    "auth_type": "CANDIDATE_PATH",
                    "csrfToken": csrfToken
                }
                BoolResult, errString, r = facade.BaseRequestPost(url=url,
                                                                  sn=self.sn,
                                                                  data=postdate,
                                                                  proxies=self.Proxies,
                                                                  allow_redirects=True,
                                                                  endstring="",
                                                                  verify=False,
                                                                  timeout=60)
                if BoolResult:
                    print("suceess")
                else:
                    print("failed")
                    sys.exit(-1)

        form = self.Form
        # 5846789  17977686
        form['searchWord1'] = "((({}*) WN AN) AND (china WN ALL))".format(ac)
        # form['startYear'] = str(year)
        #         # form['endYear'] = str(year)
        url = r'https://www.engineeringvillage.com/search/submit.url'
        url = url + "?" + BaseUrl.dicts_to_url(form)
        BoolResult, errString, r = facade.BaseRequest(url=url,
                                                      sn=self.sn,
                                                      headers=self.headers,
                                                      proxies=self.Proxies,
                                                      allow_redirects=True,
                                                      endstring="",
                                                      verify=False,
                                                      timeout=60)
        self.logger.info(errString)

        # self.logger.info(r.status_code)
        if errString == "request":
            return False
        # if r and r.status_code == 200:
        #
        if not BoolResult:
            if r.status_code == 400:
                imagetextenc = json.loads(r.text)["imagetextenc"]
                url = "https://www.engineeringvillage.com/rest/captcha/image?imagetextenc={}".format(imagetextenc)
                BoolResult, errString, r = facade.BaseRequest(url=url,
                                                              sn=self.sn,
                                                              proxies=self.Proxies,
                                                              allow_redirects=True,
                                                              endstring="",
                                                              timeout=60)
                if BoolResult:
                    jpgpath = BaseDir.get_new_path(curPath, str(os.getpid()) + ".jpg")
                    BaseDir.single_write_wb_file(r.content, jpgpath)
                else:
                    print("获取验证码失败")
                    return False
                # code = input("please input code:")
                jpgpath = BaseDir.get_new_path(curPath, str(os.getpid()) + ".jpg")
                jpgpath = jpgpath.encode('utf-8')
                bools, code = self.discern_code(jpgpath)
                if not bools:
                    return False
                url = "https://www.engineeringvillage.com/rest/captcha/verify?imagetextenc={}&userentry={}".format(
                    imagetextenc, code)
                BoolResult, errString, r = facade.BaseRequest(url=url,
                                                              sn=self.sn,
                                                              proxies=self.Proxies,
                                                              allow_redirects=True,
                                                              endstring="",
                                                              timeout=60)
                if BoolResult:
                    url = r'https://www.engineeringvillage.com/search/submit.url'
                    url = url + "?" + BaseUrl.dicts_to_url(form)
                    BoolResult, errString, r = facade.BaseRequest(url=url,
                                                                  sn=self.sn,
                                                                  proxies=self.Proxies,
                                                                  allow_redirects=True,
                                                                  endstring="",
                                                                  timeout=60)
                    if not BoolResult:
                        print("验证后失败")
                        return False
                else:
                    print("验证失败")
                    return False
            else:
                return False
        self.logger.info("搜索成功")
        print(r.url)
        BaseDir.single_write_file(r.text, "./test2.html")
        try:
            param_dict = parse.parse_qs(parse.urlparse(r.url).query)
            SEARCHID = param_dict['SEARCHID'][0]
        except:
            init_home()
            return False

        if not json.loads(r.text)["results"]:
            self.logger.info("没有 results 退出")
            sql = "update accnumcn set stat = 1 where AN='{}'".format(ac)
            self.mysqlutils.ExeSqlToDB(sql)
            return

        # 获取搜索出来的总量
        searchnum = json.loads(r.text)["pagenav"]["resultscount"]
        # 页总量
        pagecount = json.loads(r.text)["pagenav"]["pagecount"]
        self.logger.info("25: 第一次搜索每页25的数量和页数:总量为{},页数为{}".format(searchnum, pagecount))
        self.logger.info("获取数据长度{}".format(len(json.loads(r.text)["results"])))
        sql = "update accnumcn set allnum = {} where AN='{}'".format(searchnum, ac)
        self.mysqlutils.ExeSqlToDB(sql)
        # outDir = os.path.join(dirPath, ac)
        # if not os.path.exists(outDir):
        #     os.makedirs(outDir)

        if len(json.loads(r.text)["results"]) < 25:
            # pathfile = os.path.join(outDir, str(1) + '.json')
            # BaseDir.single_write_wb_file(r.content, pathfile)
            listcite = []
            for onedate in json.loads(r.text)["results"]:
                listcite.append(onedate["citedby"])
                sql = "INSERT INTO articlenew (`docid`,`AccessionNumber`,`doi`,`citemsg`,`country`) VALUES ('{docid}','{AccessionNumber}','{doi}','{citemsg}','{country}') ON DUPLICATE KEY UPDATE docid='{docid}',`doi`='{doi}',citemsg='{citemsg}',`country`='{country}'".format(
                    docid=onedate["doc"]["docid"],
                    AccessionNumber=onedate["accnum"],
                    citemsg=pymysql.escape_string(json.dumps(onedate["citedby"], ensure_ascii=False)),
                    doi=pymysql.escape_string(onedate["doi"]),
                    country='CN')
                self.mysqlutils.ExeSqlToDB(sql)
            url = "https://www.engineeringvillage.com/toolsinscopus/citedbycount.url"
            sessionid = json.loads(r.text)["searchMetaData"]["searchesEntity"]["sessionId"]
            listcite2 = []
            for dicts in listcite:
                if not dicts:
                    continue
                dicts["security"] = dicts["md5"]
                dicts["vol"] = dicts["firstvolume"]
                dicts["issue"] = dicts["firstissue"]
                dicts["page"] = dicts["firstpage"]
                dicts["sid"] = sessionid
                del dicts["md5"]
                del dicts["firstissue"]
                del dicts["firstvolume"]
                del dicts["firstpage"]
                del dicts["doiencoded"]
                listcite2.append(dicts)
            strings = str(listcite2).replace("'", "\"")
            if not strings or strings == '[]':
                sql = "update accnumcn set stat = 1 where AN='{}'".format(ac)
                self.mysqlutils.ExeSqlToDB(sql)
                return
            postdate = {
                "citedby": strings
            }
            self.logger.info("postdate is:" + str(postdate))

            BoolResult, errString, r = facade.BaseRequestPost(url,
                                                              sn=self.sn,
                                                              proxies=self.Proxies,
                                                              headers=self.headers2,
                                                              data=postdate,
                                                              allow_redirects=True,
                                                              endstring="",
                                                              timeout=60)
            if BoolResult:
                for onedate in json.loads(r.text)["results"]:
                    count = onedate["COUNT"]
                    eid = onedate["EID"]
                    accnumber = onedate["ID"]
                    sid = onedate["SID"]
                    citedtime = MDateTimeUtils.get_today_date_strings()
                    sql = "update articlenew set `cited_cnt`='{}',`eid`='{}',`sid`='{}',citedtime='{}' where AccessionNumber='{}'".format(
                        count, eid, sid, citedtime, accnumber)
                    self.mysqlutils.ExeSqlToDB(sql)
            else:
                print("下载eid失败")
                sys.exit(-1)
            sql = "update accnumcn set stat = 1 where AN='{}'".format(ac)
            self.mysqlutils.ExeSqlToDB(sql)
            return

        # 将每页转换成100
        self.FormPage100["SEARCHID"] = SEARCHID
        self.FormPage100["_"] = str(self.t)
        url = "https://www.engineeringvillage.com/search/results/quick.url"
        url = url + "?" + BaseUrl.dicts_to_url(self.FormPage100)
        BoolResult, errString, r = facade.BaseRequest(url=url,
                                                      sn=self.sn,
                                                      proxies=self.Proxies,
                                                      headers=self.headers,
                                                      allow_redirects=True,
                                                      endstring="",
                                                      timeout=60)
        self.logger.info(errString)
        if not BoolResult:
            return False

        searchnum = json.loads(r.text)["pagenav"]["resultscount"]
        pagecount = json.loads(r.text)["pagenav"]["pagecount"]
        self.logger.info("100: 转化搜索每页100的数量和页数:总量为{},页数为{}".format(searchnum, pagecount))
        # pathfile = os.path.join(outDir, str(1) + '.json')
        # BaseDir.single_write_wb_file(r.content, pathfile)
        if len(json.loads(r.text)["results"]) < 100:
            listcite = []
            for onedate in json.loads(r.text)["results"]:
                listcite.append(onedate["citedby"])
                sql = "INSERT INTO articlenew (`docid`,`AccessionNumber`,`doi`,`citemsg`,`country`) VALUES ('{docid}','{AccessionNumber}','{doi}','{citemsg}','{country}') ON DUPLICATE KEY UPDATE docid='{docid}',`doi`='{doi}',citemsg='{citemsg}',`country`='{country}'".format(
                    docid=onedate["doc"]["docid"],
                    AccessionNumber=onedate["accnum"],
                    citemsg=pymysql.escape_string(json.dumps(onedate["citedby"], ensure_ascii=False)),
                    doi=pymysql.escape_string(onedate["doi"]),
                    country='CN')
                self.mysqlutils.ExeSqlToDB(sql)
            url = "https://www.engineeringvillage.com/toolsinscopus/citedbycount.url"
            sessionid = json.loads(r.text)["searchMetaData"]["searchesEntity"]["sessionId"]
            listcite2 = []
            print(listcite)
            for dicts in listcite:
                if not dicts:
                    continue
                dicts["security"] = dicts["md5"]
                dicts["vol"] = dicts["firstvolume"]
                dicts["issue"] = dicts["firstissue"]
                dicts["page"] = dicts["firstpage"]
                dicts["sid"] = sessionid
                del dicts["md5"]
                del dicts["firstissue"]
                del dicts["firstvolume"]
                del dicts["firstpage"]
                del dicts["doiencoded"]
                listcite2.append(dicts)
            strings = str(listcite2).replace("'", "\"")
            postdate = {
                "citedby": strings
            }
            self.logger.info("postdate is:" + str(postdate))

            BoolResult, errString, r = facade.BaseRequestPost(url,
                                                              sn=self.sn,
                                                              proxies=self.Proxies,
                                                              headers=self.headers2,
                                                              data=postdate,
                                                              allow_redirects=True,
                                                              endstring="",
                                                              timeout=60)
            if BoolResult:
                for onedate in json.loads(r.text)["results"]:
                    count = onedate["COUNT"]
                    eid = onedate["EID"]
                    accnumber = onedate["ID"]
                    sid = onedate["SID"]
                    citedtime = MDateTimeUtils.get_today_date_strings()
                    sql = "update articlenew set `cited_cnt`='{}',`eid`='{}',`sid`='{}',citedtime='{}' where AccessionNumber='{}'".format(
                        count, eid, sid, citedtime, accnumber)
                    self.mysqlutils.ExeSqlToDB(sql)
            else:
                print("下载eid失败")
                sys.exit(-1)
            sql = "update accnumcn set stat = 1 where AN='{}'".format(ac)
            self.mysqlutils.ExeSqlToDB(sql)
            return

        if pagecount > int(math.ceil(self.NumAllPage / self.NumPerPage)):
            pagecount = math.ceil(self.NumAllPage / self.NumPerPage)  # 假定最大页码
        for page in range(2, int(pagecount) + 1):
            sql = "insert ignore into accnumpagecn (AN,allnum,`page`) values ('{}','{}','{}')".format(ac, searchnum,
                                                                                                      str(page))
            self.mysqlutils.ExeSqlToDB(sql)

        for PAGE in range(2, int(pagecount) + 1):  #
            sql = "select * from accnumpagecn where `page`='{}' and `AN`='{}' and stat=1".format(str(PAGE), ac)
            rows = self.mysqlutils.SelectFromDB(sql)
            if len(rows) > 0:
                self.logger.info("存在 跳过")
                continue

            if not self.DownOnePage(SEARCHID, pagecount, PAGE, ac):  # 下载失败，不再继续本次会话
                break
        sql = "update accnumcn set stat = 1 where AN='{}'".format(ac)
        self.mysqlutils.ExeSqlToDB(sql)
        return True

    def DownOnePage(self, SEARCHID, pagecount, PAGE, ac):
        self.logger.info('DownOnePage ' + str(PAGE) + '/' + str(pagecount) + ' ...')

        BaseUrls = "https://www.engineeringvillage.com/search/results/expert.url"
        FormPagenumber = {
            "navigator": "NEXT",
            "SEARCHID": SEARCHID,
            "database": "1",
            "angularReq": "true",
            "isFullJsonResult": "false",
            "usageOrigin": "searchresults",
            "COUNT": str((int(PAGE) - 1) * 100 + 1),
            "usageZone": "nextpage",
            "_": str(self.t)
        }

        url = BaseUrls + "?" + BaseUrl.dicts_to_url(FormPagenumber)

        BoolResult, errString, r = facade.BaseRequest(url=url,
                                                      sn=self.sn,
                                                      proxies=self.Proxies,
                                                      endstring="",
                                                      timeout=60)
        self.logger.info(errString)
        if not BoolResult:
            return False

        # 写入文件
        # outPathFile = os.path.join(outDir, str(PAGE) + '.json')
        # self.logger.info('outPathFile:' + outPathFile)
        # BaseDir.single_write_wb_file(r.content, outPathFile)
        r1 = r
        listcite = []
        for onedate in json.loads(r.text)["results"]:
            listcite.append(onedate["citedby"])
            sql = "INSERT INTO articlenew (`docid`,`AccessionNumber`,`doi`,`citemsg`,`country`) VALUES ('{docid}','{AccessionNumber}','{doi}','{citemsg}','{country}') ON DUPLICATE KEY UPDATE docid='{docid}',`doi`='{doi}',citemsg='{citemsg}',`country`='{country}'".format(
                docid=onedate["doc"]["docid"],
                AccessionNumber=onedate["accnum"],
                citemsg=pymysql.escape_string(json.dumps(onedate["citedby"], ensure_ascii=False)),
                doi=pymysql.escape_string(onedate["doi"]),
                country='CN')
            self.mysqlutils.ExeSqlToDB(sql)
        url = "https://www.engineeringvillage.com/toolsinscopus/citedbycount.url"
        sessionid = json.loads(r.text)["searchMetaData"]["searchesEntity"]["sessionId"]
        listcite2 = []
        for dicts in listcite:
            if not dicts:
                continue
            try:
                dicts["security"] = dicts["md5"]
            except:
                return False
            dicts["security"] = dicts["md5"]
            dicts["vol"] = dicts["firstvolume"]
            dicts["issue"] = dicts["firstissue"]
            dicts["page"] = dicts["firstpage"]
            dicts["sid"] = sessionid
            del dicts["md5"]
            del dicts["firstissue"]
            del dicts["firstvolume"]
            del dicts["firstpage"]
            del dicts["doiencoded"]
            listcite2.append(dicts)
        strings = str(listcite2).replace("'", "\"")
        postdate = {
            "citedby": strings
        }
        self.logger.info("postdate is:" + str(postdate))

        BoolResult, errString, r = facade.BaseRequestPost(url,
                                                          sn=self.sn,
                                                          proxies=self.Proxies,
                                                          headers=self.headers2,
                                                          data=postdate,
                                                          allow_redirects=True,
                                                          endstring="",
                                                          timeout=60)
        if BoolResult:

            try:
                json.loads(r.text)["results"]
            except:
                if PAGE == 52:
                    sql = "update accnumpagecn set stat=1 where `page`='{}' and `AN`='{}'".format(str(PAGE), ac)
                    self.mysqlutils.ExeSqlToDB(sql)
                print("解析错误")
                return True
            for onedate in json.loads(r.text)["results"]:
                count = onedate["COUNT"]
                eid = onedate["EID"]
                accnumber = onedate["ID"]
                sid = onedate["SID"]
                citedtime = MDateTimeUtils.get_today_date_strings()
                sql = "update articlenew set `cited_cnt`='{}',`eid`='{}',`sid`='{}',citedtime='{}' where AccessionNumber='{}'".format(
                    count, eid, sid, citedtime, accnumber)
                self.mysqlutils.ExeSqlToDB(sql)
            sql = "update accnumpagecn set stat=1 where `page`='{}' and `AN`='{}'".format(str(PAGE), ac)
            self.mysqlutils.ExeSqlToDB(sql)
        else:
            print("下载eid失败")
            return False

        if len(json.loads(r1.text)["results"]) < 100:
            return False

        return True

    def select_db(self):
        self.ac_list.clear()
        sql = "select AN from accnumcn where stat=0 limit 1000"
        rows = self.mysqlutils.SelectFromDB(sql)
        for row in rows:
            AN = row[0]
            self.ac_list.append(AN)

    def select_stat_db(self):
        """
        获取状态为0的
        :return:
        """
        self.ac_list.clear()
        sql = "select AN from accnumcn where stat=0 ORDER BY RAND() limit 50"
        rows = self.mysqlutils.SelectFromDB(sql)
        for row in rows:
            ac = row[0]
            self.ac_list.append(ac)

    def run(self):
        self.init_organ_list()
        self.down_all()

    def update_db(self):
        # 查询需要继续细分的数据
        sql = "select allnum,AN from accnumcn where stat=1 and allnum > 5000;"
        rows = self.mysqlutils.SelectFromDB(sql)
        if rows:
            # 进行位数细分
            for row in rows:
                num = len(row[1])
                sql = "SELECT left(AccessionNumber, %s) as ac FROM `article` WHERE left(AccessionNumber, %s) = '%s' group by ac"
                sql = sql % (num + 1, num, row[1])
                rows2 = self.mysqlutils.SelectFromDB(sql)
                for row1 in rows2:
                    if row1[0] != "":
                        sql = "insert ignore into accnumcn(`AN`) values ('{}')".format(row1[0])
                        self.mysqlutils.ExeSqlToDB(sql)
                # 更新状态为2代表已经细分
                sql = "update accnumcn set stat=2 where AN='{}'".format(row[1])
                self.mysqlutils.ExeSqlToDB(sql)
        else:
            sql = "SELECT left(AccessionNumber, 1) as ac FROM `article` group by ac"
            rows2 = self.mysqlutils.SelectFromDB(sql)
            for row1 in rows2:
                if row1[0] != "":
                    sql = "insert ignore into accnumcn(`AN`) values ('{}')".format(row1[0])
                    self.mysqlutils.ExeSqlToDB(sql)
        self.logger.info('code_list size:%d' % len(self.ac_list))

    def jsonerr(self):
        """
        修复由于数据库varchar长度限制导致的数据缺失逻辑
        :return:
        """
        sql = "SELECT AccessionNumber FROM `articlenew` WHERE stat=0 and `citemsg` NOT LIKE '%}' ORDER BY `AccessionNumber`  LIMIT 0,1000"
        rows = self.mysqlutils.SelectFromDB(sql)
        if rows:
            accnum = rows[0][0]
            for i in range(1, len(accnum)):
                donwpage = accnum[:i]
                sql = "select * from accnumcn where AN='{}' and stat=1 ".format(donwpage)
                rowsis = self.mysqlutils.SelectFromDB(sql)
                if rowsis:
                    sql = "update accnumcn set stat=0 where AN='{}'".format(donwpage)
                    self.mysqlutils.ExeSqlToDB(sql)
                    sql = "update articlenew set stat = -1 where `AccessionNumber` LIKE '{}%'".format(donwpage)
                    self.mysqlutils.ExeSqlToDB(sql)
        else:
            print("完成")
            sys.exit(-1)


def poolrun(proxy, queue):
    down = DownIndexByAN()
    down.set_proxy(proxy)
    while True:
        try:
            args = queue.get()
            down.down_one(args)
        except:
            traceback.print_exc()


def setTask(queue):
    down = DownIndexByAN()
    while True:
        down.update_db()
        down.select_db()
        if down.ac_list:
            for args in down.ac_list:
                queue.put(args)
        else:
            time.sleep(30)


if __name__ == "__main__":
    # 如果要在windows下使用multiprocessing，
    # 必须在代码入口添加multiprocessing.freeze_support( )，
    # 否则用pyinstaller打包成exe，执行中会出现主进程无限循环的问题。
    # multiprocessing.freeze_support()
    with multiprocessing.Manager() as manager:
        queue = manager.Queue(300)
        Process_list = []
        Process_list.append(Process(target=setTask, args=(queue,)))
        for proxy in DownIndexByAN.listip:
            Process_list.append(Process(target=poolrun, args=(proxy, queue)))
        for process in Process_list:
            process.start()
        for process in Process_list:
            process.join()

#
# if __name__ == "__main__":
#     down = DownIndexByAN()
#     down.set_proxy("192.168.30.176:8012")
#     while True:
#         try:
#             down.update_db()
#             # down.select_stat_db()
#             down.select_db()
#             if len(down.ac_list) == 0:
#                 break
#             down.down_all()
#         except:
#             traceback.print_exc()
#             print("出现错误")
#             time.sleep(60)
