"""
IP波士顿国际出版社电子期刊
"""
import facade
import requests
from parsel import Selector
from xjlibrary.our_file_dir import BaseDir

curpath = BaseDir.get_file_dir_absolute(__file__)
TopPath = BaseDir.get_upper_dir(curpath, -2)
configfile = BaseDir.get_new_path(curpath, "db.ini")


class DoownList(object):
    def __init__(self):
        self.logger = facade.get_streamlogger()
        self.mysqlutils = facade.MysqlUtiles(configfile, "db", logger=self.logger)
        self.url = "https://www.intlpress.com/site/pub/pages/journals/_home/_default/index.php"
        self.sn = requests.Session()
        self.headers = {
            "accept": "text/html,application/xhtml+xml,application/xml;q=0.9,image/webp,image/apng,*/*;q=0.8,application/signed-exchange;v=b3",
            "accept-encoding": "gzip, deflate",
            "accept-language": "zh-CN,zh;q=0.9,en;q=0.8",
            "upgrade-insecure-requests": "1",
            "sec-fetch-mode": "navigate",
            "sec-fetch-site": "none",
            "sec-fetch-user": "?1",
            "user-agent": "Mozilla/5.0 (Windows NT 10.0; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/74.0.3729.131 Safari/537.36"
        }

    def downlist(self):
        BoolResult, errString, r = facade.BaseRequest(self.url,
                                                      sn=self.sn,
                                                      mark="list_itemswithimages",
                                                      headers=self.headers,
                                                      timeout=(30, 60))
        if BoolResult:
            self.logger.info("开始解析")
            self.parahtml(r.text)
        else:
            BaseDir.single_write_file(r.text, "./err.html")
            self.logger.info("下载失败")

    def parahtml(self, html):
        select = Selector(text=html)
        tabletag = select.xpath('//*[@id="list"]/table')
        trlisttag = tabletag.xpath('.//tr')
        Listdata = []
        for tr in trlisttag:
            coverurl = tr.xpath('.//td[1]/a/img/@src').get()
            url = tr.xpath('.//td[1]/a/@href').get()
            title = tr.xpath(".//td[2]/div/a/p/text()").get()
            if url.find("books") > -1:
                types = "books"
            else:
                types = "journals"
            if types == "journals":
                jid = url.replace("site/pub/pages/journals/items/", "").replace("/_home/_main/index.php", "")
            else:
                jid = url.replace("site/pub/pages/books/_home/series/", "").replace("/index.php", "")

            Listdata.append((jid, url, title, coverurl, types))
            coverurl = tr.xpath('.//td[3]/a/img/@src').get()
            url = tr.xpath('.//td[3]/a/@href').get()
            title = tr.xpath(".//td[4]/div/a/p/text()").get()
            if url.find("books") > -1:
                types = "books"
            else:
                types = "journals"
            if types == "journals":
                jid = url.replace("site/pub/pages/journals/items/", "").replace("/_home/_main/index.php", "")
            else:
                jid = url.replace("site/pub/pages/books/_home/series/", "").replace("/index.php", "")
            Listdata.append((jid, url, title, coverurl, types))
        self.insertsql(Listdata)

    def insertsql(self, Listdata):
        sql = "replace into journal (jid,url,title,coverurl,types) values (%s,%s,%s,%s,%s)"
        self.mysqlutils.ExeSqlMany(sql, Listdata)
        Listdata = []


def main():
    down = DoownList()
    down.downlist()

if __name__ == "__main__":
    """
    这里面有两本是图书 不属于期刊不需要下
    """
    main()
