"""
翻页依赖于上一页的参数  如果某个之母的页数不对
删除该字母的所有html网页 重新下载
"""

from bs4 import BeautifulSoup
from xjlibrary.database_moudel.simple.mysqlclient import MySqlDbConnect, ExeSqlList
from xjlibrary.database_moudel.simple.sqlite3cloent import sqliteEscape
from xjlibrary.our_file_dir.base_dir import BaseDir

curPath = BaseDir.get_file_dir_absolute(__file__)
TopPath = BaseDir.get_upper_dir(curPath, -2)
sPath = BaseDir.get_new_path(TopPath, "download", "dqlib", "download", "home")
ListSql = []


# 数据库链接
def MajorDbConnect():
    return MySqlDbConnect(curPath, "db.ini")


def InsertSql():
    global nCount, ListSql
    conn = MajorDbConnect()
    success, failed = ExeSqlList(ListSql, conn, errExit=True)
    ListSql = list()


def parahtml(filePath):
    global ListSql
    charName = BaseDir.get_filename_not_extsep(filepath)
    soup = BeautifulSoup(open(filePath, 'r', encoding='utf-8'), "lxml")
    div_all_tag = soup.find_all("div", class_="anpager")
    b_all_tag = div_all_tag[1].find_all("b")
    num = int("".join(b_all_tag[0].stripped_strings))

    div_tag = soup.find_all("div", class_="orisearchlist")
    jdiv_all_tag = div_tag[0].find_all("div", class_="orisearchunit")
    for div_tag in jdiv_all_tag:
        href = div_tag.h2.a["href"]
        title = "".join(div_tag.h2.a.stripped_strings)
        sql = "INSERT IGNORE INTO `journal` (`url`,`journalName`,`charName`,`num`) values('{url}','{journalName}','{charName}',{num})"
        sql = sql.format(url=href, journalName=sqliteEscape(title), charName=charName, num=num)
        ListSql.append(sql)
    if len(ListSql) >= 100:
        InsertSql()


if __name__ == "__main__":
    for filepath in BaseDir.get_dir_all_files(sPath):
        parahtml(filepath)
    InsertSql()
