import logging
import sys

import facade
from bs4 import BeautifulSoup
from xjlibrary.mrequest import baserequest
from xjlibrary.our_file_dir import BaseDir

curPath = BaseDir.get_file_dir_absolute(__file__)

nCount = 0
ListSqls = []
list_failed = []
BaseUrl = "https://www.cell.com"
HEADERS = {'Accept': 'text/html,application/xhtml+xml,application/xml;q=0.9,image/webp,image/apng,*/*;q=0.8',
           'Accept-Encoding': 'gzip, deflate, br', 'Connection': 'keep-alive',
           'Accept-Language': 'zh-CN,zh;q=0.9', 'Cache-Control': 'max-age=0',
           'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/66.0.3359.139 Safari/537.36', }
mysqlutils = facade.MysqlUtiles(BaseDir.get_new_path(curPath, "db.ini"), "db", facade.get_streamlogger())


def get_url(url):
    BoolResult, errString, r = baserequest.BaseRequest(url,
                                                       verify=False,
                                                       headers=HEADERS,
                                                       timeout=(30, 60))
    if not BoolResult:
        print("请求失败 检查")
        sys.exit(-1)
    print("首页下载成功")
    return r


def down_and_parse(url):
    r = get_url(url)
    soup = BeautifulSoup(r.content.decode("utf-8"), "lxml")
    nav_tag = soup.find('nav', attrs={"role": "navigation"})
    ul_tag = nav_tag.find("ul", class_="menubar rlist--inline")
    li_tag_0 = ul_tag.find("li", id="menu-item-main-menu-0")
    li_all = li_tag_0.ul.find_all("li", attrs={"tabindex": "-1"})
    li_all = li_all[:4]
    for li_tag_url in li_all:
        url = li_tag_url.a["href"]
        title = li_tag_url.a["title"]
        sql = "INSERT INTO journal(`name`, `url`) VALUES('{name}', '{url}') on DUPLICATE key update `stat`=0".format(
            name=title, url=url)
        ListSqls.append(sql)


def souphtml(r):
    # 开始解析网页
    soup = BeautifulSoup(r.text, 'lxml')
    nav_tag = soup.find('nav', attrs={"role": "navigation"})
    ul_tag = nav_tag.find("ul", class_="menubar rlist--inline")
    li_tag_1 = ul_tag.find("li", id="menu-item-main-menu-1")
    li_tag_2 = ul_tag.find("li", id="menu-item-main-menu-2")
    li_tag_3 = ul_tag.find("li", id="menu-item-main-menu-3")
    allnum = 0
    for li in [li_tag_1, li_tag_2, li_tag_3]:
        li_all = li.ul.find_all("li", attrs={"tabindex": "-1"})
        for li_tag_url in li_all:
            url = li_tag_url.a["href"]
            title = li_tag_url.a["title"]
            print(url)
            print(title)

            if title == "Molecular Therapy Family":
                sql = "INSERT INTO journal(`name`, `url`, `stat`, `explain`) VALUES('{name}', '{url}',{stat},'{explain}') on DUPLICATE key update `stat`={stat},`explain`='{explain}'".format(
                    name=title, url=url, stat=0, explain="本页特殊，进入页面后有4本刊，需要单独解析,但现在该url本身也是一本刊了")
                ListSqls.append(sql)
                # 下载这个特殊的页面并解析
                down_and_parse(BaseUrl + url)
                continue
            if url.find("http") != -1:
                sql = "INSERT INTO journal(`name`, `url`, `stat`, `explain`) VALUES('{name}', '{url}',{stat},'{explain}') on DUPLICATE key update `stat`={stat},`explain`='{explain}' ".format(
                    name=title, url=url, stat=2, explain="跳出本网站，不需要解析")
                ListSqls.append(sql)
                continue
            # if title == "STAR Protocols" or title == "Cell Reports Physical Science":
            #     sql = "INSERT INTO journal(`name`, `url`, `stat`, `explain`) VALUES('{name}', '{url}',{stat},'{explain}') on DUPLICATE key update `stat`={stat},`explain`='{explain}'".format(
            #         name=title, url=url, stat=2, explain="这个较为特殊 里面没有期刊")
            #     ListSqls.append(sql)
            #     continue
            if title and url:
                sql = "INSERT INTO journal(`name`, `url`) VALUES('{name}', '{url}') on DUPLICATE key update `stat`=0 ".format(
                    name=title, url=url)
                ListSqls.append(sql)
                allnum += 1
            else:
                string = "{name}:{url}".format(name=title, url=url)
                list_failed.append(string)
                print(list_failed)


# 插入数据库
def InsertIntoDbFromList():
    global nCount, ListSqls
    # 数据库连接

    mysqlutils.ExeSqlListToDB(ListSqls)
    ListSqls = list()


def main(logger1: logging.Logger = None):
    global logger
    logger = logger1
    r = get_url(BaseUrl)
    souphtml(r)
    InsertIntoDbFromList()


"""
下载首页，解析出刊的url 将有效的刊状态置0 不需要下载的置2 
"""
if __name__ == "__main__":
    main()
