"""
翻页依赖于上一页的参数  如果某个之母的页数不对
删除该字母的所有html网页 重新下载
"""
import os
import re
import string
import sys

from bs4 import BeautifulSoup
from xjlibrary.mrequest.baserequest import BaseRequest, BaseRequestPost
from xjlibrary.our_file_dir.base_dir import BaseDir

BaseUrl = "http://dqlib.vip.qikan.com/text/TextSerach.aspx?l="
HEADERS = {
    "Accept": "text/html,application/xhtml+xml,application/xml;q=0.9,image/webp,image/apng,*/*;q=0.8",
    "Accept-Encoding": "gzip, deflate",
    "Accept-Language": "zh-CN,zh;q=0.9,en;q=0.8",
    "Cache-Control": "no-cache",
    "Host": "dqlib.vip.qikan.com",
    "Pragma": "no-cache",
    "Proxy-Connection": "keep-alive",
    "Upgrade-Insecure-Requests": "1",
    "User-Agent": "Mozilla/5.0 (Windows NT 10.0; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/67.0.3396.62 Safari/537.36"
}

HEADERSPOST = {
    "Accept": "text/html,application/xhtml+xml,application/xml;q=0.9,image/webp,image/apng,*/*;q=0.8",
    "Accept-Encoding": "gzip, deflate",
    "Accept-Language": "zh-CN,zh;q=0.9,en;q=0.8",
    "Cache-Control": "no-cache",
    "Content-Type": "application/x-www-form-urlencoded",
    "Host": "dqlib.vip.qikan.com",
    "Pragma": "no-cache",
    "Proxy-Connection": "keep-alive",
    "Upgrade-Insecure-Requests": "1",
    "User-Agent": "Mozilla/5.0 (Windows NT 10.0; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/67.0.3396.62 Safari/537.36"
}

curPath = BaseDir.get_path_absolute(__file__)
TopPath = BaseDir.get_upper_dir(curPath, -2)
sPath = BaseDir.get_new_path(TopPath, "download", "dqlib", "download", "home")


def requestJournals(url):
    BoolResult, errString, r = BaseRequest(url, mark="orisearchlist", headers=HEADERS, timeout=45)
    if not BoolResult:
        print("请检查失败原因:" + errString)
        sys.exit(-1)
    return r


def requestJournalsPost(url, data):
    BoolResult, errString, r = BaseRequestPost(url, data=data, mark="orisearchlist", headers=HEADERSPOST, timeout=45)
    if not BoolResult:
        print("请检查失败原因:" + errString)
        sys.exit(-1)
    return r


def savefile(r, filePath):
    BaseDir.single_write_file(r.text, filePath)


def get_page_num(r):
    soup = BeautifulSoup(r.text, "lxml")
    div_all_tag = soup.find_all("div", class_="anpager")
    b_all_tag = div_all_tag[1].find_all("b")
    page = int("".join(b_all_tag[1].stripped_strings))
    return page


def getPostData(r):
    soup = BeautifulSoup(r.text, "lxml")
    div_all_tag = soup.find_all("div", class_="anpager")
    a_all_tag = div_all_tag[2].find_all("a")
    href = a_all_tag[2]["href"]
    print(href)
    searchobj = re.search("doPostBack\('(.*?)','(\d+)'", href)
    __EVENTTARGET = searchobj.group(1)
    input_tag = soup.find_all("input", id="__VIEWSTATE")
    __VIEWSTATE = input_tag[0]["value"]
    return __EVENTTARGET, __VIEWSTATE


def get_page(allpage, r, letter):
    for page in range(2, allpage + 1):
        filePath = os.path.join(sPath, letter + "_" + str(page) + ".html")
        if not os.path.exists(sPath):
            os.makedirs(sPath)
        url = BaseUrl + letter
        __EVENTTARGET, __VIEWSTATE = getPostData(r)
        pageData = {
            '__EVENTTARGET': __EVENTTARGET,
            '__EVENTARGUMENT': str(page),
            '__VIEWSTATE': __VIEWSTATE,
            'r1': '1',
            'slt': '1'
        }
        r = requestJournalsPost(url, data=pageData)
        savefile(r, filePath)
        print("letter is:{}, all page is:{}, now page is {}".format(letter, allpage,page))


def start():
    letter = "0"
    url = BaseUrl + letter
    filePath = os.path.join(sPath, letter + "_1.html")
    if not os.path.exists(sPath):
        os.makedirs(sPath)
    r = requestJournals(url)
    savefile(r, filePath)
    page = get_page_num(r)
    print("letter is:{}, all page is:{}".format(letter, page))
    if page > 1:
        get_page(page, r, letter)

    for letter in string.ascii_uppercase:
        filePath = os.path.join(sPath, letter + "_1.html")
        if os.path.exists(filePath):
            print("文件存在" + filePath)
            continue
        url = BaseUrl + letter
        r = requestJournals(url)
        savefile(r, filePath)
        page = get_page_num(r)
        print("letter is:{}, all page is:{}".format(letter, page))
        if page > 1:
            get_page(page, r, letter)

    print("请求完成")


if __name__ == "__main__":
    start()
