import os
import sys
import time

import facade
import xlrd
from xjlibrary.mprocesspoll.MThreadingRun import MThreadingRun
from xjlibrary.myredis.myredisclient import getDataFromRedis
from xjlibrary.our_file_dir.base_dir import BaseDir

curPath = BaseDir.get_file_dir_absolute(__file__)
TopPath = BaseDir.get_upper_dir(curPath, -2)
dirPath = BaseDir.get_new_path(TopPath, "download", "EI", "download", "excel")
doidirPath = BaseDir.get_new_path(TopPath, "download", "EI", "download", "doi")

configfile = BaseDir.get_new_path(curPath, "db.ini")
logger = facade.get_streamlogger()
mysqlutils = facade.MysqlUtiles(configfile, "db", logger=logger)


# 更新数据库状态
def ExeSqlLists(sql, values):
    """
    该函数处理一个sql列表 没有返回并将列表置空
    :param sSqlList:
    :return:
    """
    mysqlutils.ExeSqlMany(sql, values)


#
# def readExcelDataByName(fileName, sheetName):
#     table = None
#     errorMsg = None
#     try:
#         data = xlrd.open_workbook(fileName)
#         table = data.sheet_by_name(sheetName)
#     except Exception as msg:
#         errorMsg = msg
#     return table, errorMsg


def readExcelDataByIndex(fileName, sheetIndex):
    table = None
    errorMsg = ""
    try:
        data = xlrd.open_workbook(fileName)
        table = data.sheet_by_index(sheetIndex)
    except Exception as msg:
        errorMsg = msg
    return table, errorMsg


def getColumnIndex(table, columnName):
    columnIndex = None
    for i in range(table.ncols):
        if table.cell_value(0, i) == columnName:
            columnIndex = i
            break
    return columnIndex


def getDOI(filePath):
    sh, err = readExcelDataByIndex(filePath, 0)
    if err:
        print(err)
        sys.exit(-1)
    doilist = sh.col_values(getColumnIndex(sh, "DOI"))
    accNumlist = sh.col_values(getColumnIndex(sh, "Accession number"))
    listvalues = []
    for accnum, doi in zip(accNumlist, doilist):
        if "DOI" == doi.strip():
            continue
        if doi:
            listvalues.append((str(doi.strip()), str(accnum)))
    sql = "update `article` set `DOI`=%s where AccessionNumber=%s"
    ExeSqlLists(sql, listvalues)
    listvalues.clear()


def SelectProxy():
    #     dbMsg = None
    #     # 数据库连接
    #     sSql = "SELECT `proxy` FROM `proxy_pool`"
    #     conn = MajorDbConnect()
    #     rows = SelctSqlFromDB(sSql, conn)
    #     return rows
    return getDataFromRedis(curPath, 'db.ini')


def Selectdoi():
    dbMsg = None
    # 数据库连接
    sSql = "SELECT `doi` FROM `article` where cc_stat=0 and doi !='' limit 10000"
    rows = mysqlutils.SelectFromDB(sSql)
    tasks_list = [one_task[0] for one_task in rows if rows]
    if len(tasks_list) > 1:
        Sql = "update article set cc_stat=-1 where doi in {}".format(tuple(tasks_list))
        mysqlutils.ExeSqlToDB(Sql)
    if len(tasks_list) == 1:
        Sql = "update article set cc_stat=-1 where doi = '{}'".format(tasks_list[0])
        mysqlutils.ExeSqlToDB(Sql)
    print("select success")
    return rows


def save_file(name, r):
    name = name.replace("/", "~").replace("\\", "~")
    name = BaseDir.replace_dir_special_string(name)
    outfile = "{}\\{}.html".format(doidirPath, name)
    if not os.path.exists(doidirPath):
        os.makedirs(doidirPath)
    if os.path.exists(outfile):
        return
    with open(outfile, mode='w', encoding='utf-8') as f:
        f.write(r.text)
    print(outfile)


def donw_doi_run(threadval, list_proxy, doi):
    result_queue = threadval.result_queue
    print(doi)
    if not doi:
        return
    # url = "https://plu.mx/plum/a/?doi=" + doi
    url = "https://plu.mx/api/v1/artifact/doi/{}".format(doi)
    name = doi.replace("/", "~").replace("\\", "~")
    name = BaseDir.replace_dir_special_string(name)
    outfile = "{}/{}.html".format(doidirPath, name)
    if not os.path.exists(doidirPath):
        os.makedirs(doidirPath)
    if os.path.exists(outfile):
        print("文件存在:" + outfile)
        result_queue.put((doi, 1))
        return
    Headers = {
        "Referer": "https://plu.mx/plum/a/?doi={}".format(doi),
        "User-Agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/71.0.3578.98 Safari/537.36"
    }
    BoolResult, errString, r = facade.MProxyRequest(url,
                                                    endstring="",
                                                    retrynum=2,
                                                    Feature="sort_count",
                                                    HEADERS=Headers,
                                                    proxyRingList=list_proxy,
                                                    verify=False)

    if BoolResult:
        save_file(doi, r)
        result_queue.put((doi, 1))
    else:
        if r:
            if r.status_code == 404:
                result_queue.put((doi, 1))
            elif r.text.find("NOT_FOUND") > -1:
                result_queue.put((doi, -3))
                print("没有数据需要换方式下载")
            print("下载文件失败,请检查")
        else:
            result_queue.put((doi, -2))


def UpdateDbFromList(ListSqls):
    dicts = {}
    for doi, value in ListSqls:
        # print(doi)
        # print(value)
        # print(dicts)
        if value in dicts:
            # print("value is :"+str(value))
            listdump = dicts[value]
            listdump.append(doi)
            dicts[value] = listdump
            # dicts[value] = dicts[value].append(doi)
            # print("dicts is:"+str(dicts))
        else:
            # print("else :"+str(value))
            dicts[value] = list()
    for key in dicts:
        listSqls = dicts[key]
        if len(listSqls) == 1:
            listSqls.append("test")
        sql = "update `article` set cc_stat={} where `doi` in {}".format(key, tuple(listSqls))
        mysqlutils.ExeSqlToDB(sql)


class DOIThreadRun(MThreadingRun):
    def __init__(self, num):
        super(DOIThreadRun, self).__init__(num)

    def is_break(self):
        return True

    def getTask(self, *args, **kwargs):
        rows = Selectdoi()
        return rows

    def setTask(self, results=None, *args, **kwargs):
        if not results:
            time.sleep(10)
            return
        for doi in results:
            # 将每一页加入任务队列
            self.add_job(self.func, doi[0])

    def dealresult(self, *args, **kwargs):
        UpdateDbFromList(self.results)

    def setProxy(self, proxysList=None):
        rows = SelectProxy()
        MThreadingRun.setProxy(self, rows)
        time.sleep(10)

    def thread_pool_hook(self, thread_pool_dicts, thread, *args, **kwargs) -> dict:
        """
        钩子函数 可以被重写
        主要重写里面的dicts部分
        :return:
        """
        return {}

    def fun(self, threadval, *args, **kwargs):
        doi = args[0]
        donw_doi_run(threadval, self.list_proxy, doi)


def main():
    # 这一步现在废弃 因为已经将doi写入article中 不用单独开一个表
    # 第一步  将所有DOI写入数据库中
    # for path, dirNames, fileNames in os.walk(dirPath):
    #     for fileName in fileNames:
    #         filePath = os.path.join(path, fileName)
    #         print(filePath)
    #         getDOI(filePath)
    # 第二步 将数据库中的doi通过请求网路获取引用量
    print(sys.path)
    sql = "update `article` set `cited_cnt`='0',cc_stat=1 where `doi`='' and cc_stat=0"
    mysqlutils.ExeSqlToDB(sql)
    threadrun = DOIThreadRun(40)
    threadrun.run()


if __name__ == "__main__":
    main()
