import logging
import os
import re

import facade
from bs4 import BeautifulSoup
from xjlibrary.our_file_dir import BaseDir

curPath = BaseDir.get_file_dir_absolute(__file__)
TopPath = BaseDir.get_upper_dir(curPath, -2)
sPath = BaseDir.get_new_path(TopPath, "download", "cell", "download", "volume")

nCount = 0
ListSqls = []
mysqlutils = facade.MysqlUtiles(BaseDir.get_new_path(curPath, "db.ini"), "db", facade.get_streamlogger())


# # 数据库链接
# def MajorDbConnect():
#     return MySqlDbConnect(curPath, "db.ini")


def GetFileName(filePath):
    filepathlist = filePath.split(os.sep)
    filename = filepathlist.pop()
    filename = filename.split(os.extsep)[0]
    soup = BeautifulSoup(open(filePath, 'r', encoding='GB18030'), "lxml")
    vol_tag = soup.find_all("li", class_="group volume-header")
    voldict = {}
    for vol in vol_tag:
        voltext = "".join(vol.stripped_strings)
        tupledata = re.search("Volume (\d+) \((\d{4})\)", voltext).groups()
        volume = tupledata[0]
        year = tupledata[1]
        voldict[year] = volume
    a_all_tag = soup.find_all("a")
    num = len(a_all_tag)
    for a_tag in a_all_tag:
        href = a_tag["href"]
        title = "".join(a_tag.stripped_strings)
        print(title)
        try:
            issuedata = re.search("(\d{4})\s?Issue", title).groups()
            year = issuedata[0]
            try:
                vol = voldict[year]
            except:
                vol = voldict[str(int(year) + 1)]
        except:
            issuedata = re.search("(\d{4})\s?", title).groups()
            year = issuedata[0]
            try:
                vol = voldict[year]
            except:
                vol = voldict[str(int(year) + 1)]
        print(vol)
        if title.find("In ProgressAn") > -1:
            title = title.split("In ProgressAn")[0]
        sql = "INSERT INTO `issue` (`journal_name`, `Issue_text`,`vol`,`url`,`num`) VALUES('{name}','{Issue}','{vol}','{url}', {num}) on DUPLICATE key update `stat`=0".format(
            name=filename, Issue=title, url=href, num=num, vol=vol)
        print(sql)
        ListSqls.append(sql)
    if len(ListSqls) >= 100:
        InsertIntoDbFromList()


# 插入数据库
def InsertIntoDbFromList():
    global ListSqls
    # 数据库连接
    # conn = MajorDbConnect()
    # ExeSqlList(ListSqls, conn)
    mysqlutils.ExeSqlListToDB(ListSqls)
    ListSqls = list()


def main(logger1: logging.Logger = None):
    global logger
    logger = logger1
    filenum = 0
    for filePath in BaseDir.get_dir_all_files(sPath):
        print(filePath)
        GetFileName(filePath)
        filenum += 1
        print("完成解析文件数量{}".format(filenum))
    print(len(ListSqls))
    InsertIntoDbFromList()

"""
解析上一步保存的html文件 解析出卷 期 以及下一级的url地址等信息
更新到issue表 并把stat置0
"""
if __name__ == "__main__":
    main()
