"""
成都信息工程大学图书馆地址
http://www.lib.cuit.edu.cn/
AMS美国气象学会会刊
https://journals.ametsoc.org/
需要走代理
xujiang
"""
import os
import sys

import facade
from bs4 import BeautifulSoup
from xjlibrary.our_file_dir import BaseDir

nCount = 0
ListSqls = []
list_failed = []
curPath = os.path.dirname(os.path.abspath(__file__))
configfile = BaseDir.get_new_path(curPath, "db.ini")
logger = facade.get_streamlogger()
mysqlutils = facade.MysqlUtiles(configfile, "db", logger)


BaseUrl = "https://journals.ametsoc.org/"
HEADERS = {'Accept': 'text/html,application/xhtml+xml,application/xml;q=0.9,image/webp,image/apng,*/*;q=0.8',
           'Accept-Encoding': 'gzip, deflate, br', 'Connection': 'keep-alive',
           'Accept-Language': 'zh-CN,zh;q=0.9', 'Cache-Control': 'max-age=0',
           'Host': 'journals.ametsoc.org',
           'Upgrade-Insecure-Requests': '1',
           'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) '
                         'Chrome/66.0.3359.139 Safari/537.36', }

# Proxies = {
#     'http': '192.168.30.176:8184',
#     # 'http':'162.105.138.192:8092',
#     'https': '192.168.30.176:8184'  # key是指目标网站的协议
# }


def get_url(url):
    BoolResult, errString, r = facade.BaseRequest(url,
                                                  headers=HEADERS,
                                                  timeout=45,
                                                  verify=False)
    if not BoolResult:
        logger.error("请求失败原因: %s" % errString)
        sys.exit(-1)
    # if not r.content.decode('GB18030').strip().endswith('</html>'):
    #     print('not find </html>')
    #     sys.exit(-1)
    return r


def souphtml(r):
    # 全局sql语句
    global ListSqls
    # 开始解析网页
    soup = BeautifulSoup(r.text, 'lxml')
    ul_tag = soup.find_all('ul', class_="grid")
    li_tag = ul_tag[0].find_all("li")
    print(len(li_tag))
    for li in li_tag:
        div_tag = li.find('div', class_='SmallCover')
        imag_tag = div_tag.find('img')
        title = imag_tag['title']
        image_url = imag_tag['src']
        div_tag = li.find('div', class_='journalMeta')
        url = div_tag.find("a")['href']
        tbody = div_tag.find("tbody")
        td_tag = tbody.find_all("td")
        strong_tag = div_tag.find("strong")
        ISSN = ''
        eISSN = ''
        num = ''
        try:
            ISSN = ''.join(td_tag[0].stripped_strings)
        except:
            pass
        try:
            eISSN = ''.join(td_tag[1].stripped_strings)
        except:
            pass
        try:
            num = "".join(strong_tag.stripped_strings)
        except:
            pass
        print(title, image_url, ISSN, eISSN, num)
        # 解析出数据并存入数据库 使用ignore  重复数据会自动忽略
        sql = "INSERT INTO ams_journal(`name`, `url`, `imageurl`, `ISSN`,`eISSN`,`num`) VALUES('{name}', '{url}', '{imageurl}', '{ISSN}','{eISSN}','{num}') on DUPLICATE key update `imageurl`='{imageurl}',`stat`=0".format(
            name=title, url=url, imageurl=image_url, ISSN=ISSN, eISSN=eISSN, num=num)
        ListSqls.append(sql)
        if len(ListSqls) > 1000:
            InsertIntoDbFromList()
    InsertIntoDbFromList()


# 插入数据库
def InsertIntoDbFromList():
    global nCount, ListSqls
    # conn = MajorDbConnect()
    # success, failed = ExeSqlList(ListSqls, conn)
    mysqlutils.ExeSqlListToDB(ListSqls)
    ListSqls = list()


def main():
    # 下载首页 并返回结果
    r = get_url(BaseUrl)
    # 解析首页
    souphtml(r)


if __name__ == "__main__":
    main()
