"""
翻页依赖于上一页的参数  如果某个之母的页数不对
删除该字母的所有html网页 重新下载
"""
import os
import sys

from bs4 import BeautifulSoup
from xjlibrary.mrequest.baserequest import BaseRequest
from xjlibrary.mysqlmoudel.simple.mysqlclient import MySqlDbConnect, ExeSqlList, SelctSqlFromDB, ExeSqlToDB
from xjlibrary.tools.BaseFile import BaseFile, SingleWriteFile

BaseUrl = "http://10.38.48.163/tingbook/"
HEADERS = {
    "Accept": "text/html,application/xhtml+xml,application/xml;q=0.9,image/webp,image/apng,*/*;q=0.8",
    "Accept-Encoding": "gzip, deflate",
    "Accept-Language": "zh-CN,zh;q=0.9,en;q=0.8",
    "Host": "10.38.48.163",
    "Proxy-Connection": "keep-alive",
    "Upgrade-Insecure-Requests": "1",
    "User-Agent": "Mozilla/5.0 (Windows NT 10.0; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/67.0.3396.62 Safari/537.36"
}
ListSql = []
curPath = BaseFile.get_path_absolute(__file__)
TopPath = BaseFile.get_top_path(curPath, -2)
sPath = BaseFile.getNewPath(TopPath, "download", "tianfang", "download", "home")

proxyss = {
    "http": "192.168.30.176:8033",
    "https": "192.168.30.176:8033",
}


def requesthome(url):
    BoolResult, errString, r = BaseRequest(url, mark="right", headers=HEADERS, proxies=proxyss, timeout=45)
    if not BoolResult:
        print("请检查失败原因:" + errString)
        sys.exit(-1)
    return r


# 数据库链接
def MajorDbConnect():
    return MySqlDbConnect(curPath, "db.ini")


def InsertSql():
    global nCount, ListSql
    conn = MajorDbConnect()
    success, failed = ExeSqlList(ListSql, conn, errExit=True)
    ListSql = list()


def SelectFromDB():
    sql = "select `url`,`title` from `home` where `stat`=0"
    conn = MajorDbConnect()
    rows = SelctSqlFromDB(sql, conn)
    return rows


def UpdateSql(sql):
    conn = MajorDbConnect()
    ExeSqlToDB(sql, conn, errExit=True)


def para_home(r):
    soup = BeautifulSoup(r.text, "lxml")
    div_all_tag = soup.find_all("div", class_="content")
    div_box_tag = div_all_tag[0].find_all("div", class_="box")
    for box in div_box_tag:
        href = box.h1.a['href']
        title = "".join(box.h1.a.stripped_strings)
        Sql = "Insert IGNORE `home` (`url`,`title`) values ('{url}','{title}')"
        Sql = Sql.format(url=href, title=title)
        ListSql.append(Sql)
    if len(ListSql) >= 100:
        InsertSql()


def savefile(r, filePath):
    SingleWriteFile(r.text, filePath)


def start():
    for row in SelectFromDB():
        url = BaseUrl + row[0]
        r = requesthome(url)
        if not os.path.exists(sPath):
            os.makedirs(sPath)

        filePath = os.path.join(sPath, row[1] + ".html")
        if os.path.exists(filePath):
            print("文件存在" + filePath)
            continue
        savefile(r, filePath)
        print("save success")
        sql = "update `home` set `stat`=1 where `url`='{}'".format(row[0])
        UpdateSql(sql)


def init():
    sql = "update `home` set `stat`=0"
    UpdateSql(sql)


if __name__ == "__main__":
    # 更新时使用
    init()
    start()
    InsertSql()
