"""
翻页依赖于上一页的参数  如果某个之母的页数不对
删除该字母的所有html网页 重新下载
"""

from bs4 import BeautifulSoup
from xjlibrary.mysqlmoudel.simple.mysqlclient import MySqlDbConnect, ExeSqlList
from xjlibrary.tools.BaseFile import BaseFile
from xjlibrary.tools.filename import getDirAllFiles, get_filename_not_extsep

curPath = BaseFile.get_path_absolute(__file__)
TopPath = BaseFile.get_top_path(curPath, -2)
sPath = BaseFile.getNewPath(TopPath, "download", "tianfang", "download", "home")
ListSql = []


# 数据库链接
def MajorDbConnect():
    return MySqlDbConnect(curPath, "db.ini")


def InsertSql():
    global nCount, ListSql
    conn = MajorDbConnect()
    success, failed = ExeSqlList(ListSql, conn, errExit=True)
    ListSql = list()


def parahtml(filePath):
    global ListSql
    print(filePath)
    charName = get_filename_not_extsep(filepath)
    soup = BeautifulSoup(open(filePath, 'r', encoding='utf-8'), "lxml")
    div_tag = soup.find("div", class_="content")
    right_div = div_tag.find("div", class_="right")
    tit_all_div = right_div.find_all("div", class_="tit")
    for tit in tit_all_div:
        title = "".join(tit.p.b.stripped_strings)
        chapternum = "".join(tit.p.span.stripped_strings)
        href = tit.a["href"]
        Sql = "INSERT IGNORE INTO `book` (`url`,`bookname`,`chapternum`,`filename`) VALUES ('{url}','{bookname}','{chapternum}','{filename}')"
        Sql = Sql.format(url=href, bookname=title, chapternum=chapternum, filename=charName)
        ListSql.append(Sql)
    if len(ListSql) >= 100:
        InsertSql()


if __name__ == "__main__":
    for filepath in getDirAllFiles(sPath):
        parahtml(filepath)
    InsertSql()
