import json
import os
import sys
import time
import facade
import requests
from xjlibrary.our_file_dir.base_dir import BaseDir

curPath = BaseDir.get_file_dir_absolute(__file__)
TopPath = BaseDir.get_upper_dir(curPath, -2)
dirPath = BaseDir.get_new_path(TopPath, "download", "EI", "download", "excel")

configfile = BaseDir.get_new_path(curPath, "db.ini")
logger = facade.get_streamlogger()
mysqlutils = facade.MysqlUtiles(configfile, "db", logger=logger)

sn = requests.Session()
t = int(round(time.time() * 1000))
proxy = {
    "http": "192.168.30.36:8041",
    "https": "192.168.30.36:8041"
}

import urllib.parse as parse


def url2Dict(url):
    query = parse.urlparse(url).query
    return dict([(k, v[0]) for k, v in parse.parse_qs(query).items()])


BaseHeader = {"Accept": "text/html,application/xhtml+xml,application/xml;q=0.9,image/webp,image/apng,*/*;q=0.8",
              "Accept-Encoding": "gzip, deflate, br",
              "Accept-Language": "zh-CN,zh;q=0.9",
              "Connection": "keep-alive",
              "Host": "www.engineeringvillage.com",
              "Upgrade-Insecure-Requests": "1",
              "User-Agent": "Mozilla/5.0 (Windows NT 10.0; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) "
                            "Chrome/67.0.3396.62 Safari/537.36",
              }


def init():
    global sn

    BaseSearchUrl = "https://www.engineeringvillage.com/search/quick.url"
    # r = sn.get(BaseSearchUrl, proxies=proxy, headers=BaseHeader)
    Feature = "search-menu-contents"
    bool, strins, r = facade.BaseRequest(BaseSearchUrl,
                                         sn=sn,
                                         mark=Feature,
                                         headers=BaseHeader,
                                         proxies=proxy,
                                         verify=False,
                                         timeout=(30, 60))
    # bool, sn, r = MRequestSn(sn, BaseSearchUrl, Feature=Feature, HEADERS=BaseHeader,Proxies=proxy)
    if not bool:
        sys.exit("初始化请求失败")

    SearchChinaData = {
        "usageOrigin": "searchform",
        "usageZone": "quicksearch",
        "editSearch": "",
        "isFullJsonResult": "true",
        "angularReq": "true",
        "CID": "searchSubmit",
        "searchtype": "Quick",
        "origin": "searchform",
        "category": "quicksearch",
        "section1": "CO",
        "searchWord1": "China",
        "allDb": "1",
        "database": "1",
        "yearselect": "yearrange",
        "startYear": "1884",
        "endYear": "2018",
        "updatesNo": "1",
        "language": "NO-LIMIT",
        "doctype": "NO-LIMIT",
        "sort": "relevance",
        "treatmentType": "NO-LIMIT",
        "searchStartTimestamp": str(int(round(time.time() * 1000))),
        "_": str(t)
    }

    parastring = ""
    for key, value in SearchChinaData.items():
        parastring += key + "=" + value + "&"
    parastring = parastring[:-1]

    searchUrl = "https://www.engineeringvillage.com/search/submit.url" + "?" + parastring
    Feature = "results"
    bool, strins, r = facade.BaseRequest(searchUrl,
                                         sn=sn,
                                         proxies=proxy,
                                         headers=BaseHeader,
                                         mark=Feature,
                                         endstring=None,
                                         verify=False)
    if not bool:
        sys.exit("搜索请求失败")
    print(r.text)
    try:
        sessionId = json.loads(r.text)["searchMetaData"]["searchesEntity"]["sessionId"]
    except:
        # init_home()
        return False
    dic = url2Dict(r.url)
    SEARCHID = dic["SEARCHID"]
    print(sessionId)
    print(SEARCHID)
    return SEARCHID, sessionId


def commitlistcpx(SEARCHID, paradic):
    global sn
    # citedbycounturl = "https://www.engineeringvillage.com/toolsinscopus/citedbycount.url"

    # 选择需要下载的url
    urlselect = "https://www.engineeringvillage.com/basket/mark.url"
    header = {"Accept": "*/*",
              "Accept-Encoding": "gzip, deflate",
              "Accept-Language": "zh-CN,zh;q=0.9,en;q=0.8",
              "Connection": "keep-alive",
              "Content-Type": "application/x-www-form-urlencoded; charset=UTF-8",
              "Host": "www.engineeringvillage.com",
              "Origin": "https://www.engineeringvillage.com",
              "Referer": "https://www.engineeringvillage.com/search/quick.url?SEARCHID={}&COUNT=1&usageOrigin=&usageZone=".format(
                  SEARCHID),
              "User-Agent": "Mozilla/5.0 (Windows NT 10.0; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/67.0.3396.62 Safari/537.36",
              "X-NewRelic-ID": "VQQAUldRCRAFUFFQBwgCUQ==",
              "X-Requested-With": "XMLHttpRequest"
              }

    # postData = {
    #     "searchid": searchid,
    #     "max": "true"
    # }
    handle = []
    docid = []
    for key, value in paradic.items():
        handle.append(str(key))
        docid.append(value)

    postData = {
        "handle": handle,
        "docid": docid,
        "searchid": SEARCHID
    }

    Feature = "status"
    bool, strins, r = facade.BaseRequestPost(urlselect,
                                             sn=sn,
                                             data=postData,
                                             mark=Feature,
                                             headers=header,
                                             endstring=None,
                                             proxies=proxy,
                                             verify=False)
    print(r.text)
    if not bool:
        sys.exit("搜索请求失败")

    # result = sn.post(urlselect,
    #                  proxies=proxy,
    #                  data=postData,
    #                  headers=header)


def getcsrfToken():
    global sn
    url = "https://www.engineeringvillage.com/rest/sessionsettings/0?overrideMethod=PATCH"
    header = {"Accept": "application/json, text/javascript, */*; q=0.01",
              "Accept-Encoding": "gzip, deflate, br",
              "Accept-Language": "zh-CN,zh;q=0.9,en;q=0.8",
              "Connection": "keep-alive",
              "Content-Type": "application/json",
              "Host": "www.engineeringvillage.com",
              "Origin": "https://www.engineeringvillage.com",
              "User-Agent": "Mozilla/5.0 (Windows NT 10.0; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/67.0.3396.62 Safari/537.36",
              "X-HTTP-Method-Override": "PATCH",
              "X-NewRelic-ID": "VQQAUldRCRAFUFFQBwgCUQ==",

              "X-Requested-With": "XMLHttpRequest"
              }

    payload = {"dlRemoveSelected": "false", "dlSummary": "false"}
    Feature = "csrfToken"
    bool, strins, r = facade.BaseRequestPost(url,
                                             sn=sn,
                                             data=json.dumps(payload),
                                             mark=Feature,
                                             headers=header,
                                             endstring="",
                                             proxies=proxy,
                                             verify=False)

    if not bool:
        sys.exit("csrfToken请求失败")
    csrfToken = json.loads(r.text)["csrfToken"]
    return csrfToken


def writeexcel(sessionId, csrfToken, num):
    global sn
    submitdata = {
        "sessionid": sessionId,
        "docidlist": "",
        "folderid": "",
        "database": "1",
        "baseaddress": "www.engineeringvillage.com",
        "sortBy": "",
        "sortDirn": "",
        "outputLocation": "mypc",
        "downloadformat": "excel",
        "csrfToken": csrfToken,
        "displayformat": "detailed",
        "filenameprefix": "Engineering_village",
        "usageOrigin": "searchresults"
    }

    url = "https://www.engineeringvillage.com/delivery/download/submit.url"
    header = {"Accept": "text/html,application/xhtml+xml,application/xml;q=0.9,image/webp,image/apng,*/*;q=0.8",
              "Accept-Encoding": "gzip, deflate, br",
              "Accept-Language": "zh-CN,zh;q=0.9",
              "Connection": "keep-alive",
              "Content-Type": "application/x-www-form-urlencoded",
              "Host": "www.engineeringvillage.com",
              "Origin": "https://www.engineeringvillage.com",
              "User-Agent": "Mozilla/5.0 (Windows NT 10.0; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/67.0.3396.62 Safari/537.36",
              "Upgrade-Insecure-Requests": "1",
              }
    r = sn.post(url, proxies=proxy, data=submitdata, headers=header, verify=False)
    if not os.path.exists(dirPath):
        os.makedirs(dirPath)
    filepath = os.path.join(dirPath, str(int(time.time())) + "_" + str(num) + ".xlsx")
    with open(filepath, 'wb') as f:
        f.write(r.content)


# 插入数据库
def InsertIntoDbFromList():
    # 数据库连接
    sSql = "select `AccessionNumber`,`docid` from `articlenew` where `stat`=0 LIMIT 500"
    rows = mysqlutils.SelectFromDB(sSql)
    if rows is None:
        print("select出现错误 请检查")
        sys.exit(-1)
    return rows


# 插入数据库
def Updatelist(list):
    strins = tuple(list)
    sSql = "update `articlenew` set `stat`=2 where AccessionNumber in {}".format(strins)
    # 数据库连接
    result = mysqlutils.ExeSqlToDB(sSql)
    if not result:
        print("更新语句执行错误")
        sys.exit(-1)


def main():
    global sn
    bool = True
    while bool:
        # try:
        sn = requests.Session()
        SEARCHID, sessionId = init()
        rows = InsertIntoDbFromList()
        num = len(rows)
        if len(rows) < 500:
            bool = False
        index = 1
        paradic = {}
        idlist = []
        for docid in rows:
            paradic[index] = docid[1]
            idlist.append(docid[0])
            index += 1
        commitlistcpx(SEARCHID, paradic)
        csrfToken = getcsrfToken()
        writeexcel(sessionId, csrfToken, num)
        Updatelist(idlist)
        time.sleep(5)
        # except:
        #     traceback.format_exc()
        #     print("出现错误，休息5秒 重新下载")
        #     time.sleep(5)


if __name__ == "__main__":
    main()
