"""
翻页依赖于上一页的参数  如果某个之母的页数不对
删除该字母的所有html网页 重新下载
"""

from bs4 import BeautifulSoup
from xjlibrary.database_moudel.simple.mysqlclient import MySqlDbConnect, ExeSqlList
from xjlibrary.our_file_dir.base_dir import BaseDir

curPath = BaseDir.get_file_dir_absolute(__file__)
TopPath = BaseDir.get_upper_dir(curPath, -2)
sPath = BaseDir.get_new_path(TopPath, "download", "dqlib", "download", "issue")
ListSql = []


# 数据库链接
def MajorDbConnect():
    return MySqlDbConnect(curPath, "db.ini")


def InsertSql():
    global nCount, ListSql
    conn = MajorDbConnect()
    success, failed = ExeSqlList(ListSql, conn, errExit=True)
    ListSql = list()


def parahtml(filePath):
    global ListSql
    print(filePath)
    charName = BaseDir.get_filename_not_extsep(filepath)
    soup = BeautifulSoup(open(filePath, 'r', encoding='utf-8'), "lxml")
    div_all_tag = soup.find_all("div", class_="maglistbox")
    dl_all_tag = div_all_tag[0].find_all("dl")
    num = len(dl_all_tag)
    for dl in dl_all_tag:
        classtext = "".join(dl.dt.span.stripped_strings)
        try:
            href = dl.dd.span.a['href']
            title = dl.dd.span.a['title']
            Sql = "INSERT IGNORE `archive` (`url`,`filename`,`title`,`classtext`,`num`) value ('{url}','{filename}','{title}','{classtext}',{num})"
            Sql = Sql.format(url=href, filename=charName, title=sqliteEscape(title), classtext=sqliteEscape(classtext), num=num)
            ListSql.append(Sql)
        except:
            print("不存在文章  跳过")
    if len(ListSql) >= 100:
        InsertSql()


if __name__ == "__main__":
    for filepath in BaseDir.get_dir_all_files(sPath):
        parahtml(filepath)
    InsertSql()
