"""
翻页依赖于上一页的参数  如果某个之母的页数不对
删除该字母的所有html网页 重新下载
"""
import os
import re
import sys
import time

from bs4 import BeautifulSoup
from xjlibrary.mrequest.baserequest import BaseRequest, BaseRequestPost
from xjlibrary.mysqlmoudel.simple.mysqlclient import MySqlDbConnect, ExeSqlList, SelctSqlFromDB, ExeSqlToDB
from xjlibrary.tools.BaseFile import BaseFile, SingleWriteFile

baseurl = "http://10.38.48.163/tingbook/"
HEADERS = {
    "Accept": "text/html,application/xhtml+xml,application/xml;q=0.9,image/webp,image/apng,*/*;q=0.8",
    "Accept-Encoding": "gzip, deflate",
    "Accept-Language": "zh-CN,zh;q=0.9,en;q=0.8",
    "Host": "10.38.48.163",
    "Proxy-Connection": "keep-alive",
    "Upgrade-Insecure-Requests": "1",
    "User-Agent": "Mozilla/5.0 (Windows NT 10.0; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/67.0.3396.62 Safari/537.36"
}

HEADERSPOST = {
    "Accept": "text/html,application/xhtml+xml,application/xml;q=0.9,image/webp,image/apng,*/*;q=0.8",
    "Accept-Encoding": "gzip, deflate",
    "Accept-Language": "zh-CN,zh;q=0.9,en;q=0.8",
    "Content-Type": "application/x-www-form-urlencoded",
    "Host": "10.38.48.163",
    "Proxy-Connection": "keep-alive",
    "Upgrade-Insecure-Requests": "1",
    "User-Agent": "Mozilla/5.0 (Windows NT 10.0; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/67.0.3396.62 Safari/537.36"
}
ListSql = []
curPath = BaseFile.get_path_absolute(__file__)
TopPath = BaseFile.get_top_path(curPath, -2)
sPath = BaseFile.getNewPath(TopPath, "download", "tianfang", "download", "book")

proxyss = {
    "http": "192.168.30.176:8033",
    "https": "192.168.30.176:8033",
}


def requesthome(url):
    BoolResult, errString, r = BaseRequest(url, mark="table", headers=HEADERS, proxies=proxyss, timeout=45)
    if not BoolResult:
        print("请检查失败原因:" + errString)
        sys.exit(-1)
    return r


# 数据库链接
def MajorDbConnect():
    return MySqlDbConnect(curPath, "db.ini")


def InsertSql():
    global nCount, ListSql
    conn = MajorDbConnect()
    success, failed = ExeSqlList(ListSql, conn, errExit=True)
    ListSql = list()


def SelectFromDB():
    sql = "select `url`,`bookname` from `book` where `stat`=0"
    conn = MajorDbConnect()
    rows = SelctSqlFromDB(sql, conn)
    return rows


def UpdateSql(sql):
    conn = MajorDbConnect()
    ExeSqlToDB(sql, conn, errExit=True)


def para_home(r):
    soup = BeautifulSoup(r.text, "lxml")
    div_all_tag = soup.find_all("div", class_="content")
    div_box_tag = div_all_tag[0].find_all("div", class_="box")
    for box in div_box_tag:
        href = box.h1.a['href']
        title = "".join(box.h1.a.stripped_strings)
        Sql = "Insert IGNORE `home` (`url`,`title`) values ('{url}','{title}')"
        Sql = Sql.format(url=href, title=title)
        ListSql.append(Sql)
    if len(ListSql) >= 100:
        InsertSql()


def savefile(r, filePath):
    SingleWriteFile(r.text, filePath)


def get_page_num(r):
    soup = BeautifulSoup(r.text, "lxml")
    div_all_tag = soup.find_all("div", class_="", attrs={"style":" text-align:center;"})
    a_tags = div_all_tag[0].find_all("a", class_=re.compile("pagerLink_UcfarPager"))
    page = int("".join(a_tags[-2].stripped_strings))
    return page

def getPostData(r):
    soup = BeautifulSoup(r.text, "lxml")
    input_tag = soup.find_all("input", id="__VIEWSTATE")
    __VIEWSTATE = input_tag[0]["value"]
    input_tag = soup.find_all("input", id="__VIEWSTATEGENERATOR")
    __VIEWSTATEGENERATOR = input_tag[0]["value"]
    return __VIEWSTATE, __VIEWSTATEGENERATOR


def requestJournalsPost(url, data):
    num = 0
    BoolResult = False
    errString = ""
    r = None
    while num < 3:
        num += 1
        BoolResult, errString, r = BaseRequestPost(url, data=data, mark="table", proxies=proxyss,headers=HEADERSPOST, timeout=45)
        if BoolResult:
            break
        else:
            print("请求失败  现在重请求:" + errString)
            time.sleep(3)
            continue

    if not BoolResult:
        print("请检查失败原因:" + errString)
        # 出现错误的不杀死程序 设置数据库方便后面查看
        url = url.replace(baseurl,"")
        sql = "update `book` set stat=2 where `url`='{}'".format(url)
        UpdateSql(sql)
        return None
    return r


def get_page(allpage, r, filename, url):
    for page in range(2, allpage + 1):
        filePath = os.path.join(sPath, filename + "_" + str(page) + ".html")
        if not os.path.exists(sPath):
            os.makedirs(sPath)
        __VIEWSTATE, __VIEWSTATEGENERATOR = getPostData(r)
        pageData = {
            '__VIEWSTATE': __VIEWSTATE,
            '__VIEWSTATEGENERATOR': __VIEWSTATEGENERATOR,
            'UcfarPager2': '',
            'UcfarPager1': str(page)
        }
        r = requestJournalsPost(url, data=pageData)
        if r:
            savefile(r, filePath)
        print("letter is:{}, all page is:{}, now page is {}".format(filename, allpage, page))


def start():
    for row in SelectFromDB():
        url = baseurl + row[0]
        if not os.path.exists(sPath):
            os.makedirs(sPath)
        filePath = os.path.join(sPath, row[1] + "_1.html")
        if os.path.exists(filePath):
            print("文件存在" + filePath)
            continue
        r = requesthome(url)
        savefile(r, filePath)
        page = get_page_num(r)
        print("filename is:{}, all page is:{}".format(row[1], page))
        if page > 1:
            get_page(page, r, row[1], url)
        print("save success")
        sql = "update `book` set `stat`=1 where `url`='{}'".format(row[0])
        UpdateSql(sql)


def init():
    sql = "update `book` set `stat`=0"
    UpdateSql(sql)


if __name__ == "__main__":
    # 更新时使用
    init()
    start()
    InsertSql()
