# coding:utf-8
# 博图列表页
import os
import string

import facade
from bs4 import BeautifulSoup
from xjlibrary.our_file_dir import BaseDir

curpath = BaseDir.get_file_dir_absolute(__file__)
TopPath = BaseDir.get_upper_dir(curpath, -2)
dirPath = BaseDir.get_new_path(TopPath, "download", "gzlg_botu", "download", "list")
BaseDir.create_dir(dirPath)


class DownList(object):

    def __init__(self):
        self.logger = facade.get_streamlogger()
        self.proxy = {
            "http": "192.168.30.176:8031",
            "https": "192.168.30.176:8031"
        }

        self.proxy1 = {
            "http": "192.168.30.176:8012",
            "https": "192.168.30.176:8012"
        }

        self.headers = {
            'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/49.0.2623.108 Safari/537.36',
            'Cookie': 'ASP.NET_SessionId=phlgo5anzkdmhzcrzhnytaag',
            'Host': 'www.cnbooksearch.com',
            'Accept': 'text/html,application/xhtml+xml,application/xml;q=0.9,image/webp,*/*;q=0.8'
        }
        self.headers1 = {
            'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/49.0.2623.108 Safari/537.36',
            'Cookie': 'ASP.NET_SessionId=wnmlwinh2oyrcdq3nwuu3i45',
            'Host': '222.198.130.68',
            'Accept': 'text/html,application/xhtml+xml,application/xml;q=0.9,image/webp,*/*;q=0.8'
        }
        self.url = "http://www.cnbooksearch.com/Categories.aspx?name="

    def select(self):
        for name in string.ascii_uppercase:
            url_name = self.url + name
            all_page = self.get_page(name)
            self.logger.info(all_page)
            self.down(name, url_name, all_page)

    def get_page(self, name):
        url = "http://222.198.130.68/Categories.aspx?name="
        url_name = url + name
        BoolResult, errString, r = facade.BaseRequest(url_name,
                                                      proxies=self.proxy1,
                                                      headers=self.headers1,
                                                      timeout=(30, 60))
        if BoolResult:
            soup = BeautifulSoup(r.content, 'lxml')
            div = soup.find('div', id="xulie2")
            all_page = div.find_all('a')
            return len(all_page) - 2
        else:
            self.logger.info("请求出错")

    def down(self, name, url, all_page):
        for page in range(all_page):
            url2 = url + '&author=&page=' + str(page)
            self.down_one(name, page, url2)

    def down_one(self, name, page, url):
        file = os.path.join(dirPath, name + "_" + str(page) + ".html")
        if os.path.exists(file):
            self.logger.info(file + "文件存在")
            return True

        BoolResult, errString, r = facade.BaseRequest(url,
                                                      proxies=self.proxy,
                                                      mark="Cs_main",
                                                      headers=self.headers,
                                                      timeout=(30, 60))
        if BoolResult:
            BaseDir.single_write_file(r.text, file)
            self.logger.info(name + ": 第" + str(page) + "下载成功")
            # time.sleep(5)
        else:
            self.logger.info("下载页失败")


if __name__ == "__main__":
    down = DownList()
    down.select()
