import datetime
import json
import os
import sys
import time

import facade
import xlrd
from collections import OrderedDict
import codecs

from xjlibrary.our_file_dir.base_dir import BaseDir

curPath = BaseDir.get_file_dir_absolute(__file__)
TopPath = BaseDir.get_upper_dir(curPath, -2)
sPath = BaseDir.get_new_path(TopPath, "download", "EI", "download", "excel")
sPath2 = BaseDir.get_new_path(TopPath, "download", "EI", "download", "bigjson")
BaseDir.create_dir(sPath2)
configfile = BaseDir.get_new_path(curPath, "db.ini")
logger = facade.get_streamlogger()
mysqlutils = facade.MysqlUtiles(configfile, "db", logger=logger)


def SelectFromDB(sql):
    rows = mysqlutils.SelectFromDB(sql)
    return rows


def getColumnIndex(table, columnName):
    columnIndex = None
    for i in range(table.ncols):
        if table.cell_value(0, i) == columnName:
            columnIndex = i
            break
    return columnIndex


def exceltojson(filePath):
    global filename
    # 如果出现xlsx错误 就直接跳过
    try:
        wb = xlrd.open_workbook(filePath)
    except:
        print("打开文件失败")
        return

    # convert_list = []

    sh = wb.sheet_by_index(0)

    title = sh.row_values(0)

    # doilist = sh.col_values(getColumnIndex(sh, "DOI"))
    accnumberlist = sh.col_values(getColumnIndex(sh, "Accession number"))

    listvalues = []
    for accnumber in accnumberlist:
        listvalues.append(str(accnumber.strip()))
    # 为什么要加test 防止一个参数时错误
    listvalues.append("test")
    sql = "select `cited_cnt`,`docid`,`doi`,`AccessionNumber` from `articlenew` where `AccessionNumber` in {}".format(
        str(tuple(listvalues)))
    rows = SelectFromDB(sql)
    doidic = {}
    if rows:
        for row in rows:
            doidic[row[3]] = (row[0], row[1], row[2])
    else:
        print("数据库中没有对应的数据，请在上一步完成更新")
        return
    print(sh.nrows)
    for rownum in range(1, sh.nrows):
        rowvalue = sh.row_values(rownum)
        single = OrderedDict()
        for colnum in range(0, len(rowvalue)):
            single[title[colnum]] = rowvalue[colnum]
        if single["Accession number"]:
            try:
                (cited_cnt, docid, doi) = doidic[single["Accession number"]]
            except:
                print("出现错误")
                return
            if not docid:
                print("数据库中没有对应的数据，请在上一步完成更新1")
                sys.exit(1)
                # return
            else:
                single["ei_cc"] = cited_cnt
                single["rawid"] = docid
        else:
            print(" no  single[Accession number]")
            # single["ei_cc"] = "0"
            continue
        single["DOWNDate"] = str(datetime.datetime.now().strftime('%Y%m%d'))
        j = json.dumps(single, ensure_ascii=False)
        big_path = BaseDir.get_new_path(sPath2, filename)
        with codecs.open(big_path, "a", "utf-8") as f:
            f.write(j + "\n")
        if BaseDir.get_file_size(big_path) > 1100000000:
            filename = str(int(time.time())) + ".big_json"



def main():
    global filename
    filename = str(int(time.time())) + ".big_json"
    con = 0
    for path, dirNames, fileNames in os.walk(sPath):
        for fileName in fileNames:
            filePath = os.path.join(path, fileName)
            print(filePath)
            exceltojson(filePath)
            con += 1
            print("完成文件个数{}".format(con))


if __name__ == "__main__":
    main()
