"""
成都信息工程大学图书馆地址
http://www.lib.cuit.edu.cn/
AMS美国气象学会会刊
https://journals.ametsoc.org/
需要走代理
xujiang
"""
import os
import sys

import facade
from xjlibrary.our_file_dir import BaseDir

curPath = BaseDir.get_file_dir_absolute(__file__)
TopPath = BaseDir.get_upper_dir(curPath, -2)
sPath = BaseDir.get_new_path(TopPath, "download", "cdxg_ams", "download", "volume")
configfile = BaseDir.get_new_path(curPath, "db.ini")
logger = facade.get_streamlogger()
mysqlutils = facade.MysqlUtiles(configfile, "db", logger)

nCount = 0
ListSqls = []
list_failed = []

BaseUrl = "https://journals.ametsoc.org"
HEADERS = {'Accept': 'text/html,application/xhtml+xml,application/xml;q=0.9,image/webp,image/apng,*/*;q=0.8',
           'Accept-Encoding': 'gzip, deflate, br', 'Connection': 'keep-alive',
           'Accept-Language': 'zh-CN,zh;q=0.9', 'Cache-Control': 'max-age=0',
           'Host': 'journals.ametsoc.org',
           'Upgrade-Insecure-Requests': '1',
           'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) '
                         'Chrome/66.0.3359.139 Safari/537.36', }

Proxies = {
    'http': '192.168.30.176:8184',
    # 'http':'162.105.138.192:8092',
    'https': '192.168.30.176:8184'  # key是指目标网站的协议
}


def get_url(url):
    BoolResult, errString, r = facade.BaseRequest(url,
                                                  headers=HEADERS,
                                                  timeout=30,
                                                  verify=False)
    if not BoolResult:
        logger.error("出现错误，请检查")
        sys.exit(-1)
    # if not r.content.decode('utf-8').strip().endswith('</html>'):
    #     print('not find </html>')
    #     sys.exit(-1)
    if not r.content.decode('utf-8').find('<li class="active" aria-selected="true">') == -1:
        print('not find 特征值')
        sys.exit(-1)
    # if not r.content.decode('GB18030').strip().endswith('</html>'):
    #     print('not find </html>')
    #     sys.exit(-1)

    return r


def save_file(name, r):
    outfile = "{}/{}.html".format(sPath, name)
    if os.path.exists(outfile):
        return
    try:
        with open(outfile, mode='w', encoding='utf-8') as f:
            f.write(r.content.decode("utf-8"))
    except:
        with open(outfile, mode='w', encoding='GB18030') as f:
            f.write(r.content.decode("GB18030"))


def get_list_url(rows):
    global nCount
    count = len(rows)
    for name, url in rows:
        urlid = url
        list_url = url.split("/")
        name = list_url[-2]
        url = BaseUrl + url
        outfile = "{}/{}.html".format(sPath, name)
        # 这里检查文件是否存在
        if not os.path.exists(sPath):
            os.makedirs(sPath)
        if os.path.exists(outfile):
            print("{}文件存在".format(outfile))
            sql = "update ams_journal set stat=1 where url='{}'".format(urlid)
            ListSqls.append(sql)
            continue
        r = get_url(url)
        # 保存文件
        save_file(name, r)
        nCount = nCount + 1
        sql = "update ams_journal set stat=1 where url='{}'".format(urlid)
        ListSqls.append(sql)
        print("完成{ncount}个，总共{count}个".format(ncount=nCount, count=count))



def SelectListFromDB():
    global nCount, ListSqls
    sSql = "SELECT `name`,`url` FROM `ams_journal`"
    # conn = MajorDbConnect()
    # rows = SelctSqlFromDB(sSql, conn)
    rows = mysqlutils.SelectFromDB(sSql)
    return rows


def main():
    '''
    注意点 更新该程序时应先清除该目录文件
    2、更新完成应检查文件数与数据库中表ams_journal的个数是否一致
    '''
    global ListSqls
    # 从journal表获取url
    rows = SelectListFromDB()
    get_list_url(rows)
    mysqlutils.ExeSqlListToDB(ListSqls)

if __name__ == "__main__":
    main()
