import json
import re

import facade
import parsel
from xjlibrary.our_file_dir import BaseDir

curPath = BaseDir.get_file_dir_absolute(__file__)
configfile = BaseDir.get_new_path(curPath, "db.ini")


class DownBooks(object):

    def __init__(self):
        self.mysqlutils = facade.MysqlUtiles(configfile,
                                             "db",
                                             logger=facade.get_streamlogger())

    def down_books_list(self):
        headers = {
            "accept": "text/html,application/xhtml+xml,application/xml;q=0.9,image/webp,image/apng,*/*;q=0.8",
            "accept-encoding": "gzip, deflate",
            "accept-language": "zh-CN,zh;q=0.9",
            "cache-control": "no-cache",
            "pragma": "no-cache",
            "upgrade-insecure-requests": "1",
            "user-agent": "Mozilla/5.0 (Windows NT 10.0; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/72.0.3626.121 Safari/537.36"
        }
        url = "https://www.spiedigitallibrary.org/eBooks?all=1&pageSize=100"
        BoolResult, errString, r = facade.BaseRequest(url,
                                                      headers=headers,
                                                      mark="DisplayResults",
                                                      allow_redirects=True,
                                                      verify=False,
                                                      timeout=60)
        if BoolResult:
            searchObj = re.search("DisplayResults\(\[(.*?)\]\[0\]\);", r.text, flags=0)
            print("searchObj.group(1) : ", searchObj.group(1))
            dicts = json.loads(searchObj.group(1))
            all_books_num = int(dicts["ResultsCount"])
            self.para_html(r.text)
            pages = int(all_books_num / 100) + 1
            print("总页数为{}".format(pages))
            for i in range(2, pages + 1):
                print(i)
                self.down_page(i)



        else:
            print("下载错误")

    def down_page(self, page):
        url = "https://www.spiedigitallibrary.org/searchajax/eBook/results?all=1&pageSize=100&page={}"
        url = url.format(page)
        header = {
            "accept": "*/*",
            "accept-encoding": "gzip, deflate",
            "accept-language": "zh-CN,zh;q=0.9",
            "cache-control": "no-cache",
            "pragma": "no-cache",
            "referer": "https://www.spiedigitallibrary.org/eBooks?all=1&pageSize=100",
            "user-agent": "Mozilla/5.0 (Windows NT 10.0; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/72.0.3626.121 Safari/537.36",
            "x-requested-with": "XMLHttpRequest"
        }
        BoolResult, errString, r = facade.BaseRequest(url,
                                                      mark="Items",
                                                      endstring="",
                                                      allow_redirects=True,
                                                      verify=False,
                                                      headers=header,
                                                      timeout=60)
        if BoolResult:
            self.para_html2(r.text)
        else:
            print("下载页失败")

    def para_html2(self, jsonmsg):
        dicts = json.loads(jsonmsg)
        items = dicts["Items"]
        HTML = dicts["ResultsHTML"]
        selector = parsel.Selector(text=HTML)
        divtag = selector.xpath("//div[contains(@class,'EBookFilterRow')]")
        dicts = {}
        for div in divtag:
            print(div)
            herf = div.xpath("./div[1]/a/@href").get()
            image = div.xpath("./div[1]/a/img/@src").get()
            # text = selector.xpath("//div[contains(@class,'EBookFilterRow')]/div[2]/a/text/text()").get()
            doi = "/".join(herf.split("/")[-2:])
            print(doi)
            print(image)
            dicts[doi] = [herf, image]
        Lists = []
        for obj in items:
            rowid = obj["UniqueId"]
            Abstract = obj["Abstract"]
            AuthorEditorLinks = obj["AuthorEditorLinks"]
            author = AuthorEditorLinks.split("|")[0]
            authorid = AuthorEditorLinks.split("|")[1]
            ISBN_ePub = obj["ISBN_ePub"]
            ISBN_Kindle = obj["ISBN_Kindle"]
            ISBN_PDF = obj["ISBN_PDF"]
            PublicationDateTime = obj["PublicationDateTime"]
            PublisherName = obj["PublisherName"]
            Title = obj["Title"]
            DOI = obj["DOI"]
            Keywords = obj["Keywords"]
            list1 = dicts[DOI]

            herf = list1[0]
            image = list1[1]
            dicts1 = {
                "rowid": rowid,
                "Abstract": Abstract,
                "AuthorEditorLinks": AuthorEditorLinks,
                "author": author,
                "authorid": authorid,
                "ISBN_ePub": ISBN_ePub,
                "ISBN_Kindle": ISBN_Kindle,
                "ISBN_PDF": ISBN_PDF,
                "PublicationDateTime": PublicationDateTime,
                "PublisherName": PublisherName,
                "Title": Title,
                "DOI": DOI,
                "herf": herf,
                "image": image,
                "Keywords": Keywords
            }
            jsonmsg = json.dumps(dicts1)
            Lists.append((rowid, image, jsonmsg, Title))
        sql = "insert ignore into books (uid,coverurl,`json`,title) values (%s,%s,%s,%s)"
        self.mysqlutils.ExeSqlMany(sql, Lists)

    def para_html(self, html):
        BaseDir.single_write_file(html, "./test.html")
        searchObj = re.search("DisplayResults\(\[(.*?)\]\[0\]\);", html, flags=0)
        # print("searchObj.group(1) : ", searchObj.group(1))
        dicts = json.loads(searchObj.group(1))
        listobj = dicts["Items"]
        HTML = dicts["ResultsHTML"]
        selector = parsel.Selector(text=HTML)
        divtag = selector.xpath("//div[contains(@class,'EBookFilterRow')]")
        dicts = {}
        for div in divtag:
            print(div)
            herf = div.xpath("./div[1]/a/@href").get()
            image = div.xpath("./div[1]/a/img/@src").get()
            # text = selector.xpath("//div[contains(@class,'EBookFilterRow')]/div[2]/a/text/text()").get()
            doi = "/".join(herf.split("/")[-2:])
            print(doi)
            print(image)
            dicts[doi] = [herf, image]
        Lists = []
        for obj in listobj:
            rowid = obj["UniqueId"]
            Abstract = obj["Abstract"]
            AuthorEditorLinks = obj["AuthorEditorLinks"]
            author = AuthorEditorLinks.split("|")[0]
            authorid = AuthorEditorLinks.split("|")[1]
            ISBN_ePub = obj["ISBN_ePub"]
            ISBN_Kindle = obj["ISBN_Kindle"]
            ISBN_PDF = obj["ISBN_PDF"]
            PublicationDateTime = obj["PublicationDateTime"]
            PublisherName = obj["PublisherName"]
            Title = obj["Title"]
            DOI = obj["DOI"]
            Keywords = obj["Keywords"]
            list1 = dicts[DOI]
            herf = list1[0]
            image = list1[1]
            dicts1 = {
                "rowid": rowid,
                "Abstract": Abstract,
                "AuthorEditorLinks": AuthorEditorLinks,
                "author": author,
                "authorid": authorid,
                "ISBN_ePub": ISBN_ePub,
                "ISBN_Kindle": ISBN_Kindle,
                "ISBN_PDF": ISBN_PDF,
                "PublicationDateTime": PublicationDateTime,
                "PublisherName": PublisherName,
                "Title": Title,
                "DOI": DOI,
                "herf": herf,
                "image": image,
                "Keywords": Keywords
            }
            jsonmsg = json.dumps(dicts1)
            Lists.append((rowid, image, jsonmsg, Title))
        sql = "insert ignore into books (uid,coverurl,`json`,title) values (%s,%s,%s,%s)"
        self.mysqlutils.ExeSqlMany(sql, Lists)


if __name__ == "__main__":
    down = DownBooks()
    down.down_books_list()
