import json
from multiprocessing import Manager, Process

import math
import os
import re
import sys
import time
import traceback
from urllib import parse

import facade
import pymysql
import requests
from vip.DiscernCode import DiscernCode
from xjlibrary.mdatetime.mtime2 import MDateTimeUtils
from xjlibrary.our_file_dir import BaseDir
from xjlibrary.tools.BaseUrl import BaseUrl

curPath = BaseDir.get_file_dir_absolute(__file__)
TopPath = BaseDir.get_upper_dir(curPath, -3)
dirPath = BaseDir.get_new_path(TopPath, "download", "EI", "download", "json", "AN")

insertpath = os.path.dirname(os.path.abspath(__file__))
sys.path.insert(0, BaseDir.get_upper_dir(curPath, -1))

from globalproxy import proxy_list


class DownIndexByAN(object):
    """
    这个代码还要下载被引量和eid
    """

    def __init__(self):
        self.configfile = BaseDir.get_new_path(curPath, "db.ini")
        self.logger = facade.get_streamlogger()
        self.mysqlutils = facade.MysqlUtiles(self.configfile, "db", logger=self.logger)
        # 8207 8082 8012
        # self.Proxies = {
        #     "http": "192.168.30.176:8207",
        #     "https": "192.168.30.176:8207"  # key是指目标网站的协议
        # }
        self.year_floor = None
        self.year_ceil = None
        self.ac_list = []
        self.NumPerPage = 100  # 检索结果页每页25条
        self.NumAllPage = 5200  # 最大条数限制
        self.init_sn()
        self.init_form()
        self.discern_code_time = int(time.time()) - 300

    def init_sn(self):
        """
        新的会话 有时访问到一定数量后会出现各种错误，使用新的会话可解决问题
        :return:
        """
        self.sn = requests.Session()
        self.UserAgent = 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/51.0.2704.106 Safari/537.36'
        self.sn.headers['User-Agent'] = self.UserAgent

    def set_proxy(self, proxy):
        """
        设置代理
        :param proxy:
        :return:
        """
        self.Proxies = {
            "http": proxy,
            "https": proxy  # key是指目标网站的协议
        }

    def init_form(self):
        """
        初始化头和各种表单
        :return:
        """
        self.Headers = {
            'Accept': '*/*',
            'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/51.0.2704.106 Safari/537.36',
        }
        self.headers = {
            "Accept": "application/json, text/javascript, */*; q=0.01",
            "Accept-Encoding": "gzip, deflate",
            "Accept-Language": "zh-CN,zh;q=0.9,en;q=0.8",
            "Connection": "keep-alive",
            "Content-Type": "application/json",
            "Host": "www.engineeringvillage.com",
            "Referer": "https://www.engineeringvillage.com/search/expert.url",
            "User-Agent": "Mozilla/5.0 (Windows NT 10.0; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/67.0.3396.62 Safari/537.36",
            "X-NewRelic-ID": "VQQAUldRCRAFUFFQBwgCUQ==",
            "X-Requested-With": "XMLHttpRequest"
        }
        self.headers2 = {
            "Accept": "application/json, text/javascript, */*; q=0.01",
            "Accept-Encoding": "gzip, deflate",
            "Accept-Language": "zh-CN,zh;q=0.9,en;q=0.8",
            "Connection": "keep-alive",
            "Content-Type": "application/x-www-form-urlencoded; charset=UTF-8",
            "Host": "www.engineeringvillage.com",
            "Referer": "https://www.engineeringvillage.com/search/expert.url",
            "User-Agent": "Mozilla/5.0 (Windows NT 10.0; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/67.0.3396.62 Safari/537.36",
            "X-NewRelic-ID": "VQQAUldRCRAFUFFQBwgCUQ==",
            "X-Requested-With": "XMLHttpRequest"
        }
        self.UserAgent = 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/51.0.2704.106 Safari/537.36'
        self.t = int(round(time.time() * 1000))
        self.Form = {
            "usageOrigin": "searchform",
            "usageZone": "expertsearch",
            "editSearch": "",
            "isFullJsonResult": "true",
            "angularReq": "true",
            "CID": "searchSubmit",
            "searchtype": "Expert",
            "origin": "searchform",
            "category": "expertsearch",
            "searchWord1": "((1994*) WN AN)",
            "allDb": "1",
            "database": "1",
            "yearselect": "yearrange",
            "startYear": "1884",
            "endYear": "2020",
            "updatesNo": "1",  # "4",
            "sort": "yr",  # "sort": "relevance",  yr代表最新 relevance相关度排序
            "autostem": "true",
            "searchStartTimestamp": str(int(round(time.time() * 1000))),
            "_": str(self.t)
        }

        self.FormPage100 = {
            "pageSizeVal": "100",
            "SEARCHID": "7cfe8a66d4934a6385e4120b3aa5210c",
            "sortsort": "relevance",
            "sortdir": "dw",
            "angularReq": "true",
            "isFullJsonResult": "false",
            "usageOrigin": "searchresults",
            "usageZone": "resultsperpagetop",
            "_": str(int(round(time.time() * 1000)))
        }

    # def init_ac_list(self):
    #     """
    #     accnum 记录着以谁开头的搜索表达式
    #     如果 accnum表中没有任何数据 调用该函数初始化
    #     如果有就没必要调用
    #     :return:
    #     """
    #     # 取第一位
    #     sql = "SELECT left(AccessionNumber, 1) as ac FROM `articlenew` GROUP BY ac"
    #     rows = self.mysqlutils.SelectFromDB(sql)
    #     for row in rows:
    #         if row[0] != "":
    #             sql = "insert ignore into accnum(`AN`) values ('{}')".format(row[0])
    #             self.mysqlutils.ExeSqlToDB(sql)
    #         ac = row[0]
    #         self.ac_list.append(ac)
    #     self.logger.info('ac_list size:%d' % len(self.ac_list))

    def down_all(self):
        """
        :return:
        """
        for ac, allnum in self.ac_list:
            try:
                self.down_one(ac, allnum)
            except:
                traceback.print_exc()
        self.logger.info('Game Over! Good Boy!')

    def discern_code(self, jpgpath):
        """
        识别验证码通过云打码
        :return:
        """
        if int(time.time()) - self.discern_code_time > 60 * 5:  #
            boolresult, code = DiscernCode(self.logger, codetype=3006).discern_code(jpgpath)
            BaseDir.single_add_file("./code.txt", "***code is :{}\n".format(code))
            self.discern_code_time = int(time.time())
            if boolresult:
                return True, code
            else:
                return False, ""
        else:
            self.logger.info("时间还没到休息60秒 差时为{} 应该相隔5分钟".format(int(time.time()) - self.discern_code_time))
            time.sleep(60)
            return False, ""

    def init_home(self):
        """
        下载久了会报错，需要重新请求首页更新cookies
        :return:
        """
        url = r'https://www.engineeringvillage.com/home.url'
        header = {
            "Accept": "text/html,application/xhtml+xml,application/xml;q=0.9,image/webp,image/apng,*/*;q=0.8,application/signed-exchange;v=b3",
            "Accept-Encoding": "gzip, deflate",
            "Accept-Language": "zh-CN,zh;q=0.9,en;q=0.8",
            "Host": "www.engineeringvillage.com",
            "Upgrade-Insecure-Requests": "1",
            "User-Agent": "Mozilla/5.0 (Windows NT 10.0; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/73.0.3683.86 Safari/537.36"
        }
        BoolResult, errString, r = facade.BaseRequest(url,
                                                      sn=self.sn,
                                                      headers=header,
                                                      verify=False,
                                                      proxies=self.Proxies,
                                                      timeout=60)
        self.logger.info(errString)
        if not BoolResult:
            return False
        self.logger.info("首页访问完成")
        print(r.url)
        BaseDir.single_write_file(r.text, "./test.html")
        if r.text.find("Chongqing University Library, General Access") > -1:
            obj = re.search(r'csrfToken\.value = "(.*?)"', r.text)
            csrfToken = obj.group(1)
            print(csrfToken)
            url = "https://www.engineeringvillage.com/customer/authenticate.url"
            postdate = {
                "path_choice": "13022631",
                "remember_path_flag": "true",
                "origin": "pathChoice",
                "zone": "main",
                "auth_type": "CANDIDATE_PATH",
                "csrfToken": csrfToken
            }
            BoolResult, errString, r = facade.BaseRequestPost(url=url,
                                                              sn=self.sn,
                                                              data=postdate,
                                                              proxies=self.Proxies,
                                                              allow_redirects=True,
                                                              endstring="",
                                                              verify=False,
                                                              timeout=60)
            if BoolResult:
                print("suceess")
            else:
                print("failed")
                sys.exit(-1)

    def discern(self, r, form):
        """
        验证码认证
        :param r:
        :return:
        """
        imagetextenc = json.loads(r.text)["imagetextenc"]
        url = "https://www.engineeringvillage.com/rest/captcha/image?imagetextenc={}".format(imagetextenc)
        BoolResult, errString, r = facade.BaseRequest(url=url,
                                                      sn=self.sn,
                                                      proxies=self.Proxies,
                                                      allow_redirects=True,
                                                      endstring="",
                                                      timeout=60)
        if BoolResult:
            jpgpath = BaseDir.get_new_path(curPath, str(os.getpid()) + ".jpg")
            BaseDir.single_write_wb_file(r.content, jpgpath)
        else:
            print("获取验证码失败")
            return False
        jpgpath = BaseDir.get_new_path(curPath, str(os.getpid()) + ".jpg")
        bools, code = self.discern_code(jpgpath)
        if not bools:
            return False
        url = "https://www.engineeringvillage.com/rest/captcha/verify?imagetextenc={}&userentry={}".format(
            imagetextenc, code)
        BoolResult, errString, r = facade.BaseRequest(url=url,
                                                      sn=self.sn,
                                                      proxies=self.Proxies,
                                                      allow_redirects=True,
                                                      endstring="",
                                                      timeout=60)
        if BoolResult:
            url = r'https://www.engineeringvillage.com/search/submit.url'
            url = url + "?" + BaseUrl.dicts_to_url(form)
            BoolResult, errString, r = facade.BaseRequest(url=url,
                                                          sn=self.sn,
                                                          proxies=self.Proxies,
                                                          allow_redirects=True,
                                                          endstring="",
                                                          timeout=60)
            if not BoolResult:
                print("验证后失败")
                return False
        else:
            print("验证失败")
            return False

    def down_one(self, ac, allnum):
        self.logger.info('DownOne %s, %s ...' % (ac, repr(self.Proxies)))

        form = self.Form
        form['searchWord1'] = "(({}*) WN AN)".format(ac)

        url = r'https://www.engineeringvillage.com/search/submit.url'
        url = url + "?" + BaseUrl.dicts_to_url(form)
        BoolResult, errString, r = facade.BaseRequest(url=url,
                                                      sn=self.sn,
                                                      headers=self.headers,
                                                      proxies=self.Proxies,
                                                      allow_redirects=True,
                                                      endstring="",
                                                      verify=False,
                                                      timeout=60)
        self.logger.info(errString)
        if errString == "request":
            # 下载出错
            return False
        if not BoolResult:
            if r.status_code == 400:
                # 验证码问题
                return self.discern(r, form)
            else:
                return False
        self.logger.info("搜索成功")
        print(r.url)
        BaseDir.single_write_file(r.text, "./test2.html")
        try:
            param_dict = parse.parse_qs(parse.urlparse(r.url).query)
            SEARCHID = param_dict['SEARCHID'][0]
        except:
            self.init_home()
            return False

        if not json.loads(r.text)["results"]:
            self.logger.info("没有 results 退出")
            sql = "update accnum set stat = 1 where AN='{}'".format(ac)
            self.mysqlutils.ExeSqlToDB(sql)
            return

        # 获取搜索出来的总量
        searchnum = json.loads(r.text)["pagenav"]["resultscount"]
        self.logger.info(searchnum)
        self.logger.info("searchnum:allnum is {}:{}".format(searchnum, allnum))
        if not allnum:
            allnum = 0
        if int(searchnum) <= int(allnum):
            self.logger.info("搜索量小于等于本地总量")
            sql = "update accnum set stat = 1 where AN='{}'".format(ac)
            self.mysqlutils.ExeSqlToDB(sql)
            return

        sql = "update accnum set allnum = {} where AN='{}'".format(searchnum, ac)
        self.mysqlutils.ExeSqlToDB(sql)

        if int(searchnum) >= 5000:
            self.logger.info("总量大于5000 值为1等待继续细分")
            sql = "update accnum set stat = 1 where AN='{}'".format(ac)
            self.mysqlutils.ExeSqlToDB(sql)
            return

        # 页总量
        pagecount = json.loads(r.text)["pagenav"]["pagecount"]

        self.logger.info("25: 第一次搜索每页25的数量和页数:总量为{},页数为{}".format(searchnum, pagecount))
        self.logger.info("获取数据长度{}".format(len(json.loads(r.text)["results"])))

        if len(json.loads(r.text)["results"]) < 25:
            self.logger.info("本页小于25 直接处理不需翻页")
            listcite = []
            for onedate in json.loads(r.text)["results"]:
                listcite.append(onedate["citedby"])
                sql = "INSERT INTO articlenew (`docid`,`AccessionNumber`,`doi`,`citemsg`) VALUES ('{docid}','{AccessionNumber}','{doi}','{citemsg}') ON DUPLICATE KEY UPDATE docid='{docid}',`doi`='{doi}',citemsg='{citemsg}'".format(
                    docid=onedate["doc"]["docid"],
                    AccessionNumber=onedate["accnum"],
                    citemsg=pymysql.escape_string(json.dumps(onedate["citedby"], ensure_ascii=False)),
                    doi=pymysql.escape_string(onedate["doi"]))
                self.mysqlutils.ExeSqlToDB(sql)
            url = "https://www.engineeringvillage.com/toolsinscopus/citedbycount.url"
            sessionid = json.loads(r.text)["searchMetaData"]["searchesEntity"]["sessionId"]
            listcite2 = []
            for dicts in listcite:
                if not dicts:
                    continue
                dicts["security"] = dicts["md5"]
                dicts["vol"] = dicts["firstvolume"]
                dicts["issue"] = dicts["firstissue"]
                dicts["page"] = dicts["firstpage"]
                dicts["sid"] = sessionid
                del dicts["md5"]
                del dicts["firstissue"]
                del dicts["firstvolume"]
                del dicts["firstpage"]
                del dicts["doiencoded"]
                listcite2.append(dicts)
            strings = str(listcite2).replace("'", "\"")
            if not strings or strings == '[]':
                sql = "update accnum set stat = 1 where AN='{}'".format(ac)
                self.mysqlutils.ExeSqlToDB(sql)
                return
            postdate = {
                "citedby": strings
            }
            self.logger.info("postdate is:" + str(postdate))

            BoolResult, errString, r = facade.BaseRequestPost(url,
                                                              sn=self.sn,
                                                              proxies=self.Proxies,
                                                              headers=self.headers2,
                                                              data=postdate,
                                                              allow_redirects=True,
                                                              endstring="",
                                                              timeout=60)
            if BoolResult:
                for onedate in json.loads(r.text)["results"]:
                    count = onedate["COUNT"]
                    eid = onedate["EID"]
                    accnumber = onedate["ID"]
                    sid = onedate["SID"]
                    citedtime = MDateTimeUtils.get_today_date_strings()
                    sql = "update articlenew set `cited_cnt`='{}',`eid`='{}',`sid`='{}',citedtime='{}' where AccessionNumber='{}'".format(
                        count, eid, sid, citedtime, accnumber)
                    self.mysqlutils.ExeSqlToDB(sql)
            else:
                print("下载eid失败")
                sys.exit(-1)
            sql = "update accnum set stat = 1 where AN='{}'".format(ac)
            self.mysqlutils.ExeSqlToDB(sql)
            return

        # 将每页转换成100
        self.FormPage100["SEARCHID"] = SEARCHID
        self.FormPage100["_"] = str(self.t)
        url = "https://www.engineeringvillage.com/search/results/quick.url"
        url = url + "?" + BaseUrl.dicts_to_url(self.FormPage100)
        BoolResult, errString, r = facade.BaseRequest(url=url,
                                                      sn=self.sn,
                                                      proxies=self.Proxies,
                                                      headers=self.headers,
                                                      allow_redirects=True,
                                                      endstring="",
                                                      timeout=60)
        self.logger.info(errString)
        if not BoolResult:
            return False

        searchnum = json.loads(r.text)["pagenav"]["resultscount"]
        pagecount = json.loads(r.text)["pagenav"]["pagecount"]
        self.logger.info("100: 转化搜索每页100的数量和页数:总量为{},页数为{}".format(searchnum, pagecount))
        templen = len(json.loads(r.text)["results"])
        if templen <= 100:
            self.logger.info("处理第一页")
            listcite = []
            # BaseDir.single_add_file("./sql.txt", "第1页\n")
            for onedate in json.loads(r.text)["results"]:
                listcite.append(onedate["citedby"])
                sql = "INSERT INTO articlenew (`docid`,`AccessionNumber`,`doi`,`citemsg`) VALUES ('{docid}','{AccessionNumber}','{doi}','{citemsg}') ON DUPLICATE KEY UPDATE docid='{docid}',`doi`='{doi}',citemsg='{citemsg}'".format(
                    docid=onedate["doc"]["docid"],
                    AccessionNumber=onedate["accnum"],
                    citemsg=pymysql.escape_string(json.dumps(onedate["citedby"], ensure_ascii=False)),
                    doi=pymysql.escape_string(onedate["doi"]))
                # BaseDir.single_add_file("./sql.txt",sql+"\n")
                self.mysqlutils.ExeSqlToDB(sql)
            url = "https://www.engineeringvillage.com/toolsinscopus/citedbycount.url"
            sessionid = json.loads(r.text)["searchMetaData"]["searchesEntity"]["sessionId"]
            listcite2 = []
            print(listcite)
            for dicts in listcite:
                if not dicts:
                    continue
                dicts["security"] = dicts["md5"]
                dicts["vol"] = dicts["firstvolume"]
                dicts["issue"] = dicts["firstissue"]
                dicts["page"] = dicts["firstpage"]
                dicts["sid"] = sessionid
                del dicts["md5"]
                del dicts["firstissue"]
                del dicts["firstvolume"]
                del dicts["firstpage"]
                del dicts["doiencoded"]
                listcite2.append(dicts)
            strings = str(listcite2).replace("'", "\"")
            postdate = {
                "citedby": strings
            }
            self.logger.info("postdate is:" + str(postdate))

            BoolResult, errString, r = facade.BaseRequestPost(url,
                                                              sn=self.sn,
                                                              proxies=self.Proxies,
                                                              headers=self.headers2,
                                                              data=postdate,
                                                              allow_redirects=True,
                                                              endstring="",
                                                              timeout=60)
            if BoolResult:
                for onedate in json.loads(r.text)["results"]:
                    count = onedate["COUNT"]
                    eid = onedate["EID"]
                    accnumber = onedate["ID"]
                    sid = onedate["SID"]
                    citedtime = MDateTimeUtils.get_today_date_strings()
                    sql = "update articlenew set `cited_cnt`='{}',`eid`='{}',`sid`='{}',citedtime='{}' where AccessionNumber='{}'".format(
                        count, eid, sid, citedtime, accnumber)
                    self.mysqlutils.ExeSqlToDB(sql)
            else:
                print("下载eid失败")
                sys.exit(-1)
            if templen < 100:
                sql = "update accnum set stat = 1 where AN='{}'".format(ac)
                self.mysqlutils.ExeSqlToDB(sql)
                print(len(json.loads(r.text)["results"]))
                self.logger.info("小于100 跳过")
                return

        if pagecount > int(math.ceil(self.NumAllPage / self.NumPerPage)):
            pagecount = math.ceil(self.NumAllPage / self.NumPerPage)  # 假定最大页码
        for page in range(2, int(pagecount) + 1):
            sql = "insert ignore into accnumpage (AN,allnum,`page`) values ('{}','{}','{}')".format(ac, searchnum,
                                                                                                    str(page))
            self.mysqlutils.ExeSqlToDB(sql)

        for PAGE in range(2, int(pagecount) + 1):  #
            sql = "select * from accnumpage where `page`='{}' and `AN`='{}' and stat=1".format(str(PAGE), ac)
            rows = self.mysqlutils.SelectFromDB(sql)
            if len(rows) > 0:
                self.logger.info("存在 跳过")
                continue

            if not self.DownOnePage(SEARCHID, pagecount, PAGE, ac):  # 下载失败，不再继续本次会话
                break
        sql = "update accnum set stat = 1 where AN='{}'".format(ac)
        self.mysqlutils.ExeSqlToDB(sql)
        return True

    def DownOnePage(self, SEARCHID, pagecount, PAGE, ac):
        self.logger.info('DownOnePage ' + str(PAGE) + '/' + str(pagecount) + ' ...')

        BaseUrls = "https://www.engineeringvillage.com/search/results/expert.url"
        FormPagenumber = {
            "navigator": "NEXT",
            "SEARCHID": SEARCHID,
            "database": "1",
            "angularReq": "true",
            "isFullJsonResult": "false",
            "usageOrigin": "searchresults",
            "COUNT": str((int(PAGE) - 1) * 100 + 1),
            "usageZone": "nextpage",
            "_": str(self.t)
        }

        url = BaseUrls + "?" + BaseUrl.dicts_to_url(FormPagenumber)

        BoolResult, errString, r = facade.BaseRequest(url=url,
                                                      sn=self.sn,
                                                      proxies=self.Proxies,
                                                      endstring="",
                                                      timeout=60)
        self.logger.info(errString)
        if not BoolResult:
            return False
        r1 = r
        listcite = []
        # BaseDir.single_add_file("./sql.txt", "第{}页\n".format(PAGE))
        for onedate in json.loads(r.text)["results"]:
            listcite.append(onedate["citedby"])
            sql = "INSERT INTO articlenew (`docid`,`AccessionNumber`,`doi`,`citemsg`) VALUES ('{docid}','{AccessionNumber}','{doi}','{citemsg}') ON DUPLICATE KEY UPDATE docid='{docid}',`doi`='{doi}',citemsg='{citemsg}'".format(
                docid=onedate["doc"]["docid"],
                AccessionNumber=onedate["accnum"],
                citemsg=pymysql.escape_string(json.dumps(onedate["citedby"], ensure_ascii=False)),
                doi=pymysql.escape_string(onedate["doi"]))
            self.mysqlutils.ExeSqlToDB(sql)
            # BaseDir.single_add_file("./sql.txt", sql + "\n")
        url = "https://www.engineeringvillage.com/toolsinscopus/citedbycount.url"
        sessionid = json.loads(r.text)["searchMetaData"]["searchesEntity"]["sessionId"]
        listcite2 = []
        for dicts in listcite:
            if not dicts:
                continue
            try:
                dicts["security"] = dicts["md5"]
            except:
                return False
            dicts["security"] = dicts["md5"]
            dicts["vol"] = dicts["firstvolume"]
            dicts["issue"] = dicts["firstissue"]
            dicts["page"] = dicts["firstpage"]
            dicts["sid"] = sessionid
            del dicts["md5"]
            del dicts["firstissue"]
            del dicts["firstvolume"]
            del dicts["firstpage"]
            del dicts["doiencoded"]
            listcite2.append(dicts)
        strings = str(listcite2).replace("'", "\"")
        postdate = {
            "citedby": strings
        }
        self.logger.info("postdate is:" + str(postdate))

        BoolResult, errString, r = facade.BaseRequestPost(url,
                                                          sn=self.sn,
                                                          proxies=self.Proxies,
                                                          headers=self.headers2,
                                                          data=postdate,
                                                          allow_redirects=True,
                                                          endstring="",
                                                          timeout=60)
        if BoolResult:
            try:
                json.loads(r.text)["results"]
            except:
                if PAGE == 52:
                    sql = "update accnumpage set stat=1 where `page`='{}' and `AN`='{}'".format(str(PAGE), ac)
                    self.mysqlutils.ExeSqlToDB(sql)
                self.logger.info("解析错误")
                return True
            for onedate in json.loads(r.text)["results"]:
                count = onedate["COUNT"]
                eid = onedate["EID"]
                accnumber = onedate["ID"]
                sid = onedate["SID"]
                citedtime = MDateTimeUtils.get_today_date_strings()
                sql = "update articlenew set `cited_cnt`='{}',`eid`='{}',`sid`='{}',citedtime='{}' where AccessionNumber='{}'".format(
                    count, eid, sid, citedtime, accnumber)
                self.mysqlutils.ExeSqlToDB(sql)
            sql = "update accnumpage set stat=1 where `page`='{}' and `AN`='{}'".format(str(PAGE), ac)
            self.mysqlutils.ExeSqlToDB(sql)
        else:
            print("下载eid失败")
            return False

        if len(json.loads(r1.text)["results"]) < 100:
            return False

        return True

    def select_db(self):
        self.ac_list.clear()
        sql = "select AN,allnum from accnum where stat=0 order by AN ASC limit 1000"
        rows = self.mysqlutils.SelectFromDB(sql)
        for row in rows:
            AN = row[0]
            allnum = row[1]
            self.ac_list.append((AN, allnum))

    def run(self):
        # self.init_ac_list()
        self.down_all()

    def update_db(self):
        """
        这是根据原来已经有的数据进行的位数细分
        原来依赖于 article 这张老表进行的细分，第一次初始化已完成
        现在不依赖于老的表 依赖于新的表articlenew
        :return:
        """
        # 查询需要继续细分的数据
        sql = "select allnum,AN from accnum where stat=1 and allnum > 5000;"
        rows = self.mysqlutils.SelectFromDB(sql)
        # 进行位数细分
        if rows:
            for row in rows:
                num = len(row[1])
                sql = "SELECT left(AccessionNumber, %s) as ac FROM `articlenew` WHERE left(AccessionNumber, %s) = '%s' group by ac"
                sql = sql % (num + 1, num, row[1])
                rows2 = self.mysqlutils.SelectFromDB(sql)
                for row1 in rows2:
                    if row1[0] != "":
                        sql = "insert ignore into accnum(`AN`) values ('{}')".format(row1[0])
                        self.mysqlutils.ExeSqlToDB(sql)
                # 更新状态为2代表已经细分
                sql = "update accnum set stat=2 where AN='{}'".format(row[1])
                self.mysqlutils.ExeSqlToDB(sql)
        else:
            sql = "SELECT left(AccessionNumber, 1) as ac FROM `articlenew` group by ac"
            rows2 = self.mysqlutils.SelectFromDB(sql)
            for row1 in rows2:
                if row1[0] != "":
                    sql = "insert ignore into accnum(`AN`) values ('{}')".format(row1[0])
                    self.mysqlutils.ExeSqlToDB(sql)
        self.logger.info('ac_list size:%d' % len(self.ac_list))

    def select_stat_db(self):
        """
        获取状态为0的
        :return:
        """
        self.ac_list.clear()
        sql = "select AN from accnum where stat=0 ORDER BY RAND() limit 50"
        rows = self.mysqlutils.SelectFromDB(sql)
        for row in rows:
            ac = row[0]
            self.ac_list.append(ac)

    def jsonerr(self):
        """
        修复由于数据库varchar长度限制导致的数据缺失逻辑
        :return:
        """
        # sql = "SELECT AccessionNumber FROM `articlenew` WHERE stat=0 and `citemsg` NOT LIKE '%}' ORDER BY `AccessionNumber` LIMIT 0,1000"
        sql = "SELECT AccessionNumber FROM `article` WHERE stat=0 ORDER BY `AccessionNumber` LIMIT 0,1000"
        rows = self.mysqlutils.SelectFromDB(sql)
        if rows:
            accnum = rows[0][0]
            for i in range(1, len(accnum)):
                donwpage = accnum[:i]
                sql = "select * from accnum where AN='{}' and stat=1 ".format(donwpage)
                rowsis = self.mysqlutils.SelectFromDB(sql)
                if rowsis:
                    sql = "update accnum set stat=0 where AN='{}'".format(donwpage)
                    self.mysqlutils.ExeSqlToDB(sql)
                    sql = "update articlenew set stat = -1 where `AccessionNumber` LIKE '{}%'".format(donwpage)
                    self.mysqlutils.ExeSqlToDB(sql)
                    break
        else:
            print("完成")
            sys.exit(-1)


def setTask(queue):
    """
    多进程设置队列任务函数
    :param queue:
    :return:
    """
    down = DownIndexByAN()
    while True:
        # 更新大于5000数据量的数据 进行再细分
        down.update_db()
        down.select_db()
        if down.ac_list:
            for args in down.ac_list:
                queue.put(args)
        else:
            time.sleep(30)


def poolrun(proxy, queue):
    down = DownIndexByAN()
    down.set_proxy(proxy)
    while True:
        try:
            AN, allnum = queue.get()
            down.down_one(AN, allnum)
        except:
            traceback.print_exc()


if __name__ == "__main__":
    # 如果要在windows下使用multiprocessing，
    # 必须在代码入口添加multiprocessing.freeze_support( )，
    # 否则用pyinstaller打包成exe，执行中会出现主进程无限循环的问题。
    # multiprocessing.freeze_support()
    Process_list = []
    with Manager() as manager:
        queue = manager.Queue(300)
        Process_list.append(Process(target=setTask, args=(queue,)))
        for proxy in proxy_list:
            Process_list.append(Process(target=poolrun, args=(proxy, queue)))
        for process in Process_list:
            process.start()
        for process in Process_list:
            process.join()

# if __name__ == "__main__":
#     down = DownIndexByAN()
#     down.set_proxy("192.168.30.176:8171")
#     # down.select_db()
#     # down.down_all()
#     while True:
#         try:
#             down.update_db()
#             down.select_stat_db()
#             if len(down.ac_list) == 0:
#                 break
#             down.select_db()
#             down.down_all()
#         except:
#             traceback.print_exc()
#             print("出现错误")
#             time.sleep(60)

# if __name__ == "__main__":
#     down = DownIndexByAN()
#     while True:
#         down.jsonerr()
