import json
import string
import sys
import time

import facade
from parsel import Selector
from xjlibrary.our_file_dir import BaseDir
from xjlibrary.tools.BaseUrl import BaseUrl

curPath = BaseDir.get_file_dir_absolute(__file__)
topPath = BaseDir.get_upper_dir(curPath, -2)
filepath = BaseDir.get_new_path(topPath, "download", "wanfang_bz", "download", "page")
filepath2 = BaseDir.get_new_path(topPath, "download", "wanfang_bz", "download", "pagetechstreet")
BaseDir.create_dir(filepath)
BaseDir.create_dir(filepath2)


class DownSearch(object):

    def __init__(self):
        self.url = "http://www.wanfangdata.com.cn/navigations/standards.do"
        self.BaseUrl = "http://www.wanfangdata.com.cn"
        self.logger = facade.get_streamlogger()
        self.header = {
            "Referer": "http://www.wanfangdata.com.cn/navigations/standards.do",
            "Accept": "text/html,application/xhtml+xml,application/xml;q=0.9,image/webp,image/apng,*/*;q=0.8",
            "User-Agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/71.0.3578.98 Safari/537.36"
        }
        self.seturl = set()
        self.count = 0
        self.dictsyear = {}
        self.filename = ""

        # self.pageurl = "http://www.wanfangdata.com.cn/search/searchList.do?beetlansyId=aysnsearch&searchType=standards&pageSize=50&page=2&searchWord=class_code%3AC&order=correlation&showType=detail&isCheck=check&$issue_date02:2018&facetName=2018:$issue_date02&firstAuthor=false&navSearchType=standards"
        self.pageurl = ""
        self.pageurlTechStreet = ""

    def down_para_home(self):
        """
        下载分类首页 得到所有分类的url到self.seturl
        :return:
        """
        BoolResult, errString, r = facade.BaseRequest(self.url,
                                                      headers=self.header,
                                                      timeout=(30, 60),
                                                      mark="lib_Tab3"
                                                      )
        if BoolResult:
            selector = Selector(text=r.text)
            results = selector.xpath('//*[contains(@id, "con_one_")]/ul/li//@href').getall()
            for result in results:
                self.seturl.add(result)
        else:
            self.logger.info("下载首页失败，请检查是否因为有验证码")

    def down_first_page(self):
        """
        下载self.seturl里的数据
        :return:
        """
        # self.pageurl = "http://www.wanfangdata.com.cn/search/searchList.do?beetlansyId=aysnsearch&searchType=standards&pageSize=50&page={}&searchWord={}&order=correlation&showType=detail&isCheck=check&isHit=&isHitUnit=&firstAuthor=false&rangeParame=&navSearchType=standards"
        for url in self.seturl:
            time.sleep(1)
            url = self.BaseUrl + url
            BoolResult, errString, r = facade.BaseRequest(url,
                                                          headers=self.header,
                                                          timeout=(30, 60),
                                                          mark="ResultBlock",
                                                          endstring=""
                                                          )
            if BoolResult:
                selector = Selector(text=r.text)
                allnum = selector.xpath('//*[@id="here"]/div[3]/div[4]/div[3]/span/text()').get()
                print(allnum)
                # 翻页限制
                if int(allnum) > 5000:
                    self.down_b_asscii(url, 5000)
                    searchWord = selector.xpath('//*[@id="resourceSearchWord"]/@value').get()
                    self.get_year(searchWord.replace(":", "%3A"))
                    self.down_b_asscii_year(url)
                else:
                    self.down_b_asscii(url, allnum)
                self.count += int(allnum)
                print(self.count)

            else:
                print("下载每个选项的首页失败")
                print(r.text)

    def down_b_asscii_year(self, url):
        """
        页码过多 细分年
        :param url:
        :return:
        """
        dicts = BaseUrl.urlQuery2Dict(url)
        accssii = dicts["searchWord"]
        for year in self.dictsyear:
            value = self.dictsyear[year]
            if int(value) > 5000:
                value = 5000
            for page in range(1, (int(value) - 1) // 50 + 2):
                self.pageurl = "http://www.wanfangdata.com.cn/search/searchList.do?beetlansyId=aysnsearch&searchType=standards&pageSize=50&page={}&searchWord={}&order=correlation&showType=detail&isCheck=check&isHit=&isHitUnit=&facetField=$issue_date02:{}&facetName={}:$issue_date02&firstAuthor=false&rangeParame=&navSearchType=standards"
                self.pageurl = self.pageurl.format(page, accssii, year, year)
                self.filename = "{}_{}_{}.html".format(accssii, year, page)
                self.filesavepath = BaseDir.get_new_path(filepath, BaseDir.new_title(self.filename))
                if BaseDir.is_file_exists(self.filesavepath):
                    print("文件存在")
                    continue
                self.down_one_page()

    def down_b_asscii(self, url, allnum):
        """
        翻页下载
        :param url:
        :param allnum:
        :return:
        """
        dicts = BaseUrl.urlQuery2Dict(url)
        asscii = dicts["searchWord"]
        for page in range(1, (int(allnum) - 1) // 50 + 2):
            self.pageurl = "http://www.wanfangdata.com.cn/search/searchList.do?beetlansyId=aysnsearch&searchType=standards&pageSize=50&page={}&searchWord={}&order=correlation&showType=detail&isCheck=check&isHit=&isHitUnit=&firstAuthor=false&rangeParame=&navSearchType=standards"
            self.pageurl = self.pageurl.format(page, asscii)
            self.filename = "{}_{}.html".format(asscii, page)
            self.filesavepath = BaseDir.get_new_path(filepath, BaseDir.new_title(self.filename))
            if BaseDir.is_file_exists(self.filesavepath):
                print("文件存在")
                continue
            self.down_one_page()

    def down_A_Z(self):
        for assci in string.ascii_uppercase:
            url = "http://www.wanfangdata.com.cn/search/searchList.do?searchType=standards&searchWord=class_code:{}"
            time.sleep(1)
            url = url.format(assci)
            BoolResult, errString, r = facade.BaseRequest(url,
                                                          headers=self.header,
                                                          timeout=(30, 60),
                                                          endstring=""
                                                          )
            if BoolResult:
                selector = Selector(text=r.text)
                allnum = selector.xpath('//*[@id="here"]/div[3]/div[4]/div[3]/span/text()').get()
                print(assci)
                print(allnum)
                if allnum is None:
                    continue
                if int(allnum) > 5000:
                    # self.down_asscii(assci, allnum)
                    self.get_year("(class_code%3A{})".format(assci))
                    self.down_asscii_year(assci)
                else:
                    self.down_asscii(assci, allnum)
                self.count += int(allnum)
                print(self.count)
            else:
                print("下载每个选项的首页失败")
                print(r.text)

            sys.exit(-1)

    def down_asscii(self, asscii, allnum):
        # self.pageurl = "http://www.wanfangdata.com.cn/search/searchList.do?beetlansyId=aysnsearch&searchType=standards&pageSize=50&page=3&searchWord=class_code:C&order=correlation&showType=detail&isCheck=check&firstAuthor=false&navSearchType=standards"
        for page in range(1, (int(allnum) - 1) // 50 + 2):
            self.pageurl = "http://www.wanfangdata.com.cn/search/searchList.do?beetlansyId=aysnsearch&searchType=standards&pageSize=50&page={}&searchWord=class_code%3A{}&order=correlation&showType=detail&isCheck=check&isHit=&isHitUnit=&firstAuthor=false&rangeParame=&navSearchType=standards"
            self.pageurl = self.pageurl.format(page, asscii)
            self.filename = "TechStreet_{}_{}.html".format(asscii, page)
            self.filesavepath = BaseDir.get_new_path(filepath, self.filename)
            if BaseDir.is_file_exists(self.filesavepath):
                print("文件存在")
                continue
            self.down_one_page()

    def down_asscii_year(self, accssii):
        for year in self.dictsyear:
            value = self.dictsyear[year]
            if int(value) > 5000:
                value = 5000
            for page in range(1, (int(value) - 1) // 50 + 2):
                self.pageurl = "http://www.wanfangdata.com.cn/search/searchList.do?beetlansyId=aysnsearch&searchType=standards&pageSize=50&page={}&searchWord=class_code%3A{}&order=correlation&showType=detail&isCheck=check&isHit=&isHitUnit=&facetField=$issue_date02:{}&facetName={}:$issue_date02&firstAuthor=false&rangeParame=&navSearchType=standards"
                self.pageurl = self.pageurl.format(page, accssii, year, year)
                self.filename = "{}_{}_{}.html".format(accssii, year, page)
                self.filesavepath = BaseDir.get_new_path(filepath, BaseDir.new_title(self.filename))
                if BaseDir.is_file_exists(self.filesavepath):
                    print("文件存在")
                    continue
                self.down_one_page()

    def down_one_page(self):
        time.sleep(1)
        BoolResult, errString, r = facade.BaseRequest(self.pageurl,
                                                      headers=self.header,
                                                      timeout=(15, 30),
                                                      endstring="",
                                                      mark="ResultBlock"
                                                      )
        if BoolResult:
            print(self.filesavepath)
            BaseDir.single_write_file(r.text, self.filesavepath)
        else:
            print("下载某一页失败")

    def get_year(self, searchWord):
        url = "http://www.wanfangdata.com.cn/search/navigation.do"
        postdate = {
            "searchType": "standards",
            "searchWord": searchWord,
            "facetField": "$issue_date02",
            "isHit": "",
            "startYear": "",
            "endYear": "",
            "limit": "11",
            "hqfwfacetField": "",
            "navSearchType": "",
            "single": "true",
            "bindFieldLimit": "{}"
        }
        header = {
            "Accept": "application/json, text/javascript, */*; q=0.01",
            "Content-Type": "application/x-www-form-urlencoded; charset=UTF-8",
            "Host": "www.wanfangdata.com.cn",
            "Origin": "http://www.wanfangdata.com.cn",
            "User-Agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/71.0.3578.98 Safari/537.36",
            "X-Requested-With": "XMLHttpRequest"
        }

        BoolResult, errString, r = facade.BaseRequestPost(url,
                                                          data=postdate,
                                                          headers=header,
                                                          timeout=(30, 60),
                                                          mark="facetTree",
                                                          endstring="")
        if BoolResult:
            self.dictsyear.clear()
            dicts = json.loads(r.text)
            yearlist = dicts["facetTree"]
            for yeardicts in yearlist:
                if int(yeardicts["count"]) != 0:
                    count = yeardicts["count"]
                    year = yeardicts["value"]
                    self.dictsyear[year] = count
            print("请求年成功并解析到字典")

        else:
            print("请求出错 请检查")
            sys.exit(-1)

    def down_TechStreet_A_Z(self):
        """
        下载 科睿唯安 的 A到Z 之前的A到Z是中文的  这个主要是外文
        :return:
        """
        for assci in string.ascii_uppercase:
            url = "http://www.wanfangdata.com.cn/search/searchList.do?searchType=standards&pageSize=&searchWord={}&facetField=$source_db:科睿唯安&pageSize=50"
            time.sleep(1)
            url = url.format(assci)
            BoolResult, errString, r = facade.BaseRequest(url,
                                                          headers=self.header,
                                                          timeout=(30, 60),
                                                          endstring=""
                                                          )
            if BoolResult:
                selector = Selector(text=r.text)
                allnum = selector.xpath('//*[@id="here"]/div[3]/div[4]/div[3]/span/text()').get()
                print(assci)
                print(allnum)
                if allnum is None:
                    continue
                # 翻页限制
                if int(allnum) > 5000:
                    self.get_year("({})*%24source_db%3A%E7%A7%91%E7%9D%BF%E5%94%AF%E5%AE%89".format(assci))
                    self.down_asscii_year_techstreet(assci)
                else:
                    self.down_asscii(assci, allnum)
                self.count += int(allnum)
                print(self.count)
            else:
                print("下载每个选项的首页失败")
                if r:
                    print(r.text)

    def down_asscii_year_techstreet(self, accssii):
        """
        科睿唯安 按年下载
        :param accssii:
        :return:
        """
        for year in self.dictsyear:
            value = self.dictsyear[year]
            if int(value) > 5000:
                value = 5000
            for page in range(1, (int(value) - 1) // 50 + 2):
                self.pageurl = "http://www.wanfangdata.com.cn/search/searchList.do?beetlansyId=aysnsearch&searchType=standards&pageSize=50&page={}&searchWord={}&order=correlation&showType=detail&facetField=$source_db:科睿唯安*$issue_date02:{}&facetName=*{}:$issue_date02&firstAuthor=false&navSearchType=standards"
                self.pageurl = self.pageurl.format(page, accssii, year, year)
                self.filename = "techstreet_{}_{}_{}.html".format(accssii, year, page)
                self.filesavepath = BaseDir.get_new_path(filepath2, self.filename)
                if BaseDir.is_file_exists(self.filesavepath):
                    print("文件存在")
                    continue
                self.down_one_page()

    def down_asscii_techstreet(self, asscii, allnum):
        """

        :param asscii:
        :param allnum:
        :return:
        """
        for page in range(1, (int(allnum) - 1) // 50 + 2):
            self.pageurl = "http://www.wanfangdata.com.cn/search/searchList.do?beetlansyId=aysnsearch&searchType=standards&pageSize=50&page={}&searchWord={}&order=correlation&showType=detail&isCheck=check&facetField=$source_db:科睿唯安&firstAuthor=false&navSearchType=standards"
            self.pageurl = self.pageurl.format(page, asscii)
            self.filename = "{}_{}.html".format(asscii, page)
            self.filesavepath = BaseDir.get_new_path(filepath2, self.filename)
            if BaseDir.is_file_exists(self.filesavepath):
                print("文件存在")
                continue
            self.down_one_page()


if __name__ == "__main__":
    down = DownSearch()
    down.down_TechStreet_A_Z()
    down.down_A_Z()

    down.down_para_home()
    down.down_first_page()

    # print(down.seturl)
