import logging
import os
import traceback

import facade
import requests
from bs4 import BeautifulSoup
from xjlibrary.mrequest import baserequest
from xjlibrary.our_file_dir import BaseDir
from xjlibrary.tools.BaseUrl import BaseUrl

# 禁用安全请求警告


curPath = BaseDir.get_file_dir_absolute(__file__)
TopPath = BaseDir.get_upper_dir(curPath, -2)
sPath = BaseDir.get_new_path(TopPath, "download", "cell", "download", "volume")
BaseDir.create_dir(sPath)

absurl = []
BaseUrls = "https://www.cell.com"
HEADERS = {'Accept': 'text/html,application/xhtml+xml,application/xml;q=0.9,image/webp,image/apng,*/*;q=0.8',
           'Accept-Encoding': 'gzip, deflate, br', 'Connection': 'keep-alive',
           'Accept-Language': 'zh-CN,zh;q=0.9', 'Cache-Control': 'max-age=0',
           'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) '
                         'Chrome/66.0.3359.139 Safari/537.36', }
mysqlutils = facade.MysqlUtiles(BaseDir.get_new_path(curPath, "db.ini"), "db", facade.get_streamlogger())

HEADERSPOST = {
    # ":authority": "www.cell.com",
    # ":method": "POST",
    # ":path": "/pb/widgets/loi/issues",
    # ":scheme": "https",
    "accept": "text/plain, */*; q=0.01",
    "accept-encoding": "gzip, deflate, br",
    "accept-language": "zh-CN,zh;q=0.9,en;q=0.8",
    # "cache-control": "no-cache",
    # "pragma": "no-cache",
    # "referer": "https://www.cell.com/cell-host-microbe/archive",
    "content-type": "application/x-www-form-urlencoded; charset=UTF-8",
    "origin": "https://www.cell.com",
    "user-agent": "Mozilla/5.0 (X11; Linux i686) AppleWebKit/537.31 (KHTML, like Gecko) Chrome/26.0.1410.43 Safari/537.31",
    "x-requested-with": "XMLHttpRequest"
}
nCount = 0
ListSqls = []


# def MajorDbConnect():
#     return MySqlDbConnect(curPath, "db.ini")


def SelectListFromDB():
    global ListSqls
    # 数据库连接
    # conn = MajorDbConnect()
    sSql = "SELECT `name`,`url` FROM `journal` WHERE `stat`=0"
    # rows = SelctSqlFromDB(sSql, conn)
    rows = mysqlutils.SelectFromDB(sSql)
    return rows


def UpdateSql(sql):
    # conn = MajorDbConnect()
    # statusbool = ExeSqlToDB(sql, conn)
    statusbool, _ = mysqlutils.ExeSqlToDB(sql)
    if statusbool:
        print("执行更新success")


# 测试文字输出到文本
def output(value, files='log.txt'):
    f = open(files, 'w', encoding='utf-8')
    f.write(value)
    f.close()


def save_file(outfile, r):
    if os.path.exists(outfile):
        return
    with open(outfile, mode='w', encoding='GB18030') as f:
        f.write(r.content.decode("GB18030"))


def souphtml(r):
    """
    解析每10年的url参数
    :param r:
    :return:
    """
    listurl = []
    soup = BeautifulSoup(r.text, 'lxml')
    url = soup.find("form")["action"]
    content = soup.find("meta", attrs={"name": "pbContext"})["content"]
    journalCode = BaseUrl.urlQuery2Dict(url)["journalCode"]
    a_tag = soup.find("a", class_="expanded")
    if a_tag:
        url = a_tag["href"]
        listurl.append(url)
        a_tag = soup.find_all("a", class_="collapsed")
        for a in a_tag:
            url = a["href"]
            listurl.append(url)
        return content, journalCode, listurl
    return '','',[]


def get_list_url(rows):
    global absurl, nCount
    count = len(rows)
    sn = requests.session()
    for name, url in rows:
        urlsql = url
        list_url = url.split("/")
        list_url.pop()
        list_url.append("archive")
        url = "/".join(list_url)
        url = BaseUrls + url
        # HEADERSPOST["referer"] = url
        BoolResult, errString, r = baserequest.BaseRequest(url,
                                                           sn=sn,
                                                           headers=HEADERS,
                                                           timeout=(30, 60))
        if not r or not BoolResult:
            sql = "update `journal` set `stat`=-1 where `url`='{}'".format(urlsql)
            UpdateSql(sql)
            nCount = nCount + 1
            continue

        content, journalCode, urllist = souphtml(r)
        for url in urllist:
            decade = BaseUrl.urlQuery2Dict(url)["decade"]

            outfile = "{}/{}.html".format(sPath, name + "_" + decade)
            if os.path.exists(outfile):
                print("{}文件存在".format(outfile))
                sql = "update `journal` set `stat`=1 where `url`='{}'".format(urlsql)
                UpdateSql(sql)
                nCount = nCount + 1
                continue
            widget = BaseUrl.urlQuery2Dict(url)["widget"]
            posturl = "https://www.cell.com/pb/widgets/loi/issues"
            pageData = {
                "pbContext": content,
                "widgetId": widget,
                "journalCode": journalCode,
                "decade": decade
            }
            print(pageData)
            BoolResult, errString, r = baserequest.BaseRequestPost(posturl,
                                                                   sn=sn,
                                                                   data=pageData,
                                                                   endstring="",
                                                                   headers=HEADERSPOST,
                                                                   verify=False,
                                                                   timeout=(30, 60))
            if not BoolResult:
                print(errString)
                print("下载失败")
                print(r.status_code)
                print(r.content)
                print(r.text)
                return
            if not os.path.exists(sPath):
                os.makedirs(sPath)
            if r:
                save_file(outfile, r)
        nCount = nCount + 1
        print("完成{ncount}个，总共{count}个，绝对路径{abs}个".format(ncount=nCount, count=count, abs=len(absurl)))
        sql = "update `journal` set `stat`=1 where `url`='{}'".format(urlsql)
        UpdateSql(sql)


def main(logger1: logging.Logger = None):
    global logger
    logger = logger1
    while True:
        rows = SelectListFromDB()
        if rows:
            try:
                get_list_url(rows)
            except Exception as e:
                traceback.print_exc()
        else:
            break


"""
根据每本刊的url下载 进去不是直接的期列表 需要通过 Archive 这个地方点进去
所以先下载 解析出 archive的url 再访问 然后保存到文件  保存下来的页面有
期和卷,时间等的信息
"""
if __name__ == "__main__":
    main()
