"""
广东科技职业技术学院 ebmbook  ebm图书
http://10.10.177.1:8081 
需要广东科技职业技术学院的代理才能访问
首页 url http://10.10.177.1:8081/search.aspx
通过标题 A-Z进行下载  只有列表里面的信息 没有下一级信息
"""
import json
import string
import sys

from parsel import Selector

import facade
import requests
from xjlibrary.mdatetime.mtime2 import MDateTimeUtils

from xjlibrary.our_file_dir import BaseDir

curpath = BaseDir.get_file_dir_absolute(__file__)
TopPath = BaseDir.get_upper_dir(curpath, -2)
dirPath = BaseDir.get_new_path(TopPath, "download", "ebmbook_mirrorgdit", "download", "html")
BaseDir.create_dir(dirPath)


class DownList(object):
    def __init__(self):
        self.sn = requests.Session()
        self.url = "http://10.10.177.1:8081/search.aspx?k={}"
        self.proxy = {
            "http": "192.168.30.176:8017",
            "https": "192.168.30.176:8017"
        }
        self.logger = facade.get_streamlogger()
        self.headers1 = {
            "Accept": "text/html,application/xhtml+xml,application/xml;q=0.9,image/webp,image/apng,*/*;q=0.8,application/signed-exchange;v=b3",
            "Accept-Encoding": "gzip, deflate",
            "Accept-Language": "zh-CN,zh;q=0.9,en;q=0.8",
            "Host": "10.10.177.1:8081",
            "Upgrade-Insecure-Requests": "1",
            "User-Agent": "Mozilla/5.0 (Windows NT 10.0; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/74.0.3729.131 Safari/537.36"
        }
        self.headers = {
            "Accept": "text/html,application/xhtml+xml,application/xml;q=0.9,image/webp,image/apng,*/*;q=0.8,application/signed-exchange;v=b3",
            "Accept-Encoding": "gzip, deflate",
            "Accept-Language": "zh-CN,zh;q=0.9,en;q=0.8",
            "Host": "10.10.177.1:8081",
            "Referer": "http://10.10.177.1:8081/search.aspx",
            "Upgrade-Insecure-Requests": "1",
            "User-Agent": "Mozilla/5.0 (Windows NT 10.0; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/74.0.3729.131 Safari/537.36"
        }
        self.allpage = 0
        self.ascii = ""
        self.VIEWSTATE = ""

    def requests_home(self):
        url = "http://10.10.177.1:8081/"
        BoolResult, errString, r = facade.BaseRequest(url,
                                                      sn=self.sn,
                                                      headers=self.headers1,
                                                      proxies=self.proxy,
                                                      timeout=(30, 60))
        if BoolResult:
            self.logger.info("首页访问成功")
            return True
        else:
            self.logger.info("首页访问失败")
            return False

    def search_requests(self):
        """
        高级搜索页面
        :return:
        """
        url = "http://10.10.177.1:8081/search.aspx"
        BoolResult, errString, r = facade.BaseRequest(url,
                                                      sn=self.sn,
                                                      headers=self.headers1,
                                                      proxies=self.proxy,
                                                      timeout=(30, 60))
        if BoolResult:
            self.logger.info("高级搜索页访问成功")
            return True
        else:
            self.logger.info("高级搜索页访问失败")
            return False

    def down_list(self):

        for ascii in string.ascii_uppercase:
            self.ascii = ascii
            url = self.url.format(ascii)
            BoolResult, errString, r = facade.BaseRequest(url,
                                                          sn=self.sn,
                                                          mark='book_listPage',
                                                          headers=self.headers,
                                                          proxies=self.proxy,
                                                          timeout=(30, 60))
            if BoolResult:
                # 开始解析该字母的总页数及请求参数
                select = Selector(text=r.text)
                self.VIEWSTATE = select.xpath('//*[@id="__VIEWSTATE"]/@value').get()
                self.EVENTVALIDATION = select.xpath('//*[@id="__EVENTVALIDATION"]/@value').get()
                self.VIEWSTATEGENERATOR = select.xpath('//*[@id="__VIEWSTATEGENERATOR"]/@value').get()
                atag = select.xpath('//*[@id="AspNetPager"]/a[contains(text(),"Last")]/@href').get()
                if atag:
                    allpage = atag.replace("javascript:__doPostBack('AspNetPager','", "").replace("')", "")
                    self.allpage = int(allpage)
                else:
                    self.allpage = 1
                dicts = {}
                dicts["downdate"] = MDateTimeUtils.get_today_date_strings()
                dicts["html"] = r.text
                strings = json.dumps(dicts, ensure_ascii=False)
                BaseDir.single_write_file(strings + "\n", BaseDir.get_new_path(dirPath, ascii + "_1.html"))
                self.down_all_page()

    def down_all_page(self):
        for i in range(2, self.allpage + 1):
            filename = self.ascii + "_" + str(i) + ".html"
            if BaseDir.is_file_exists(BaseDir.get_new_path(dirPath, filename)):
                self.logger.info(filename+"文件存在跳过")
                continue
            if not self.down_one_page(i, filename):
                self.logger.info("下载失败 检查原因")
                sys.exit(-1)

    def down_one_page(self, page, filename):
        url = self.url.format(self.ascii)
        postdata = {
            "__EVENTTARGET": "AspNetPager",
            "__EVENTARGUMENT": str(page),
            "__VIEWSTATE": self.VIEWSTATE,
            "__VIEWSTATEGENERATOR": self.VIEWSTATEGENERATOR,
            "__EVENTVALIDATION": self.EVENTVALIDATION,
            "top$searchtext": "Search for eBooks and authors",
            "txtkey": "",
            "classification": "ALL"
        }
        self.headers["Referer"] = "http://10.10.177.1:8081/search.aspx?k={}".format(self.ascii)
        BoolResult, errString, r = facade.BaseRequestPost(url,
                                                          sn=self.sn,
                                                          data=postdata,
                                                          mark='book_listPage',
                                                          headers=self.headers,
                                                          proxies=self.proxy,
                                                          timeout=(30, 60))
        if BoolResult:
            select = Selector(text=r.text)
            self.VIEWSTATE = select.xpath('//*[@id="__VIEWSTATE"]/@value').get()
            self.EVENTVALIDATION = select.xpath('//*[@id="__EVENTVALIDATION"]/@value').get()
            self.VIEWSTATEGENERATOR = select.xpath('//*[@id="__VIEWSTATEGENERATOR"]/@value').get()
            dicts = {}
            dicts["downdate"] = MDateTimeUtils.get_today_date_strings()
            dicts["html"] = r.text
            strings = json.dumps(dicts, ensure_ascii=False)
            BaseDir.single_write_file(strings + "\n", BaseDir.get_new_path(dirPath, filename))
            return True
        else:
            self.logger.info("下载失败")
            return False


if __name__ == "__main__":
    down = DownList()
    down.requests_home()
    down.search_requests()
    down.down_list()
