import sys

import facade
from bs4 import BeautifulSoup
from xjlibrary.database_moudel.simple.sqlite3cloent import sqliteEscape
from xjlibrary.our_file_dir.base_dir import BaseDir

curPath = BaseDir.get_file_dir_absolute(__file__)
TopPath = BaseDir.get_upper_dir(curPath, -2)
configfile = BaseDir.get_new_path(curPath, "db.ini")
sPath = BaseDir.get_new_path(TopPath, "download", "jstorbook", "download", "home")
logger = facade.get_streamlogger()
mysqlutils = facade.MysqlUtiles(configfile, "db", logger=logger)
ListSql = []


def parahtml(filePath):
    global ListSql
    soup = BeautifulSoup(open(filePath, 'r', encoding='utf-8'), "lxml")
    charName = BaseDir.get_filename_not_extsep(filePath)
    tbody_tag = soup.find_all("tbody")
    if len(tbody_tag) != 1:
        sys.exit("理论上应该只会出现一个，如果出现多个的情况请检查")
    tr_all_tag = tbody_tag[0].find_all("tr")
    num = len(tr_all_tag)
    for tr_tag in tr_all_tag:
        td_tag = tr_tag.find_all("td")
        titletext = "".join(td_tag[0].stripped_strings)
        if titletext.strip() == "There is currently no content available in JSTOR under this category. Try viewing another category.":
            return
        href = td_tag[0].a["href"]
        if href and titletext:
            sql = "INSERT IGNORE INTO `book` (`url`,`bookname`,`charName`,`num`) values('{url}','{journalName}','{charName}',{num})"
            sql = sql.format(url=href, journalName=sqliteEscape(titletext), charName=charName, num=num)
            ListSql.append(sql)
            if len(ListSql) >= 100:
                mysqlutils.ExeSqlListToDB(ListSql)
                ListSql.clear()
        else:
            print("解析url和期刊名出现错误")
            sys.exit(-1)
    mysqlutils.ExeSqlListToDB(ListSql)


def main():
    for filepath in BaseDir.get_dir_all_files(sPath):
        parahtml(filepath)


if __name__ == "__main__":
    main()
