"""
这里有两本特殊的book 需要单独一个来下载
并将其添加到ebook 和ebookinfo
添加到ebook是为了下载图片和统计图书本数时方便
"""

# 这两个是特殊的  没有在两大类中
import json
import random

import pymysql
from bs4 import BeautifulSoup
from facade.loggerfacade import get_streamlogger
from facade.mysqlfacade import MysqlUtiles
from xjlibrary.mrequest.baserequest import USER_AGENTS, BaseRequest
from xjlibrary.our_file_dir import BaseDir
from xjlibrary.tools.BaseString import BaseString

HEADERS = {'User-Agent': random.choice(USER_AGENTS)}

curPath = BaseDir.get_file_dir_absolute(__file__)
configfile = BaseDir.get_new_path(curPath, "db.ini")

listurl = ["/isbn/9780841239999", "/isbn/9780841230460"]

BookBaseUrl = "https://pubs.acs.org"

keytransformdicts = {"port": "db_port", "host": "db_host", "passwd": "db_pw", "user": "db_user",
                     "db": "db_name",
                     "chartset": "db_charset"}

logger = get_streamlogger()
myutil = MysqlUtiles(configfile, "db", keytransformdicts=keytransformdicts, logger=logger)


def getBookInfo(r, url1):
    html = r.text
    bs = BeautifulSoup(html, "lxml")
    div_tag = bs.find("div", class_="meta1")
    div_chrild_tas = div_tag.find_all("div")[0]
    sub_tag = bs.find("div", class_="subtitles")
    if sub_tag:
        substrings = sub_tag.get_text().strip()
    else:
        substrings = ""
    date_tag = bs.find("div", class_="epubdate")
    if date_tag:
        date = date_tag.get_text().strip()
    else:
        date = ""
    divlist2 = bs.find("div", class_="meta2").find_all("div")
    isbn13 = eisbn = doi = ""
    for div in divlist2:
        strings = div.get_text().strip()
        if strings.find("ISBN13") != -1:
            isbn13 = strings
        if strings.find("eISBN") != -1:
            eisbn = strings
        if strings.find("DOI") != -1:
            doi = strings
    # 机构dicts
    dicts1 = {}
    authors = ""
    author = ""
    key = ""
    if div_chrild_tas.find("Editor") != -1:
        for tag in div_chrild_tas.contents:
            if tag.name == "span":
                author = tag.get_text().strip()
                print("author is :" + author)
            if tag.name == "sup":
                if key == "":
                    key = tag.get_text().strip()
                else:
                    key = key + "," + tag.get_text().strip()
                print("key is :" + key)
            if tag == ", ":
                if key or author:
                    if key != "":
                        authors += author + "[" + key + "]" + ";"
                    else:
                        authors += author + ";"
                author = ""
                key = ""
        if key or author:
            if key != "":
                authors += author + "[" + key + "]" + ";"
            else:
                authors += author + ";"
        authors = BaseString.r_strip_one_char(authors)

        div_tag = bs.find("div", class_="affiliations")
        if div_tag:
            for tag in div_tag.contents:
                num = tag.sup.get_text()
                strings = tag.contents[1].strip()
                dicts1[num] = strings
    # 版权
    copyright = bs.find("span", class_="NLM_publisher-name").get_text().strip()
    # 赞助
    spon = bs.find("div", class_="sponsors")
    if spon:
        spon = spon.get_text().strip()
    else:
        spon = ""
    spon = spon.replace("Sponsoring Divisions", "").replace(":", "").strip()

    author_institution = ""
    for key, value in dicts1.items():
        author_institution += "[" + key + "]" + value + ";"
    author_institution = BaseString.r_strip_one_char(author_institution)

    title = bs.find("h1", class_="noMargin").get_text().strip()
    jsondicts = {}
    jsondicts["copyright"] = copyright
    jsondicts["spon"] = spon
    jsondicts["author"] = authors
    jsondicts["author_institution"] = author_institution
    jsondicts["substrings"] = substrings
    jsondicts["date"] = date
    jsondicts["isbn13"] = isbn13
    jsondicts["eisbn"] = eisbn
    jsondicts["vol"] = ""
    jsondicts["issn"] = ""
    jsondicts["eissn"] = ""
    jsonmsg = json.dumps(jsondicts, ensure_ascii=False)
    sql = "insert into `ebookinfo` (`title`,`url`,`doi`,`jsonmsg`) values ('%s','%s','%s','%s')" \
          " on DUPLICATE key update `url`='%s',`jsonmsg`='%s'" % (
        pymysql.escape_string(title), url1, doi, jsonmsg.replace("'", "\\\'"),url1,jsonmsg.replace("'", "\\\'")
    )
    myutil.ExeSqlToDB(sql, errExit=True)
    img = bs.find("img", class_="book-cover-img")["src"]
    sql = "insert into `ebook` (`title`,`url`,`coverurl`,`doi`,`stat`) values ('%s','%s','%s','%s',%d) on " \
          " DUPLICATE key update `url`='%s',`coverurl`='%s'" % (
              pymysql.escape_string(title), url1, img, doi,
              1, url1, img)
    myutil.ExeSqlToDB(sql, errExit=True)


def down_page(url1):
    while True:
        url = BookBaseUrl + url1
        BoolResult, errString, r = BaseRequest(url,
                                               mark="bookBox",
                                               headers=HEADERS,
                                               verify=False,
                                               timeout=(30, 60))
        if not BoolResult:
            logger.error(errString)
            continue
        getBookInfo(r, url1)
        break


if __name__ == "__main__":
    for url in listurl:
        down_page(url)
