"""
根据类别获取期刊名以及期刊url
"""
import random
import traceback

import facade
import math
import requests
from bs4 import BeautifulSoup
from xjlibrary.configread import MyConfigParser
from xjlibrary.myredis.myredisclient import getDataFromRedis
from xjlibrary.our_file_dir.base_dir import BaseDir

# 处理数据库语句中的转义
dealSqlTextFunc = lambda text: text.replace("'", "''") if isinstance(text, str) else ''

# 处理字典中键值对
dealDictKVFunc = lambda key, dict_temp: dict_temp[key] if isinstance(dict_temp,
                                                                     dict) and key in dict_temp.keys() else None

curPath = BaseDir.get_file_dir_absolute(__file__)
configfile = BaseDir.get_new_path(curPath, "db.ini")


class DownJournal(object):

    def __init__(self):
        self.logger = facade.get_streamlogger()
        self.mysqlutils = facade.MysqlUtiles(configfile, "db", logger=self.logger)
        self.req_error_log = BaseDir.get_new_path(curPath, "log", "reqArticleErrorLog.log")
        self.HOME_URL = 'https://www.tandfonline.com'
        self.USER_AGENT_LIST = [
            'Mozilla/5.0 (Windows NT 10.0; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/65.0.3325.181 Safari/537.36',
            'Mozilla/5.0 (Windows NT 10.0; WOW64; Trident/7.0; rv:11.0) like Gecko',
            'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/58.0.3029.110 Safari/537.36 Edge/16.16299'
        ]
        self.HEADERS = {
            'referer': 'https://www.tandfonline.com',
            # 'user-agent': 'Mozilla/5.0 (Windows NT 10.0; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/65.0.3325.181 Safari/537.36'
            'user-agent': random.choice(self.USER_AGENT_LIST)
        }
        self.page_size = 50

        self.LIST_FORMAT_URL = 'https://www.tandfonline.com/topic/{ssid}?content=title&target=titleSearch&pageSize={pagesize}&subjectTitle=&startPage={pagenum}'

        self.init_sn()

        self.ini_dicts = MyConfigParser().set_config_path(
            configfile).get_configparser().set_keep_keys_case().read_config_string(
            'utf-8').conf_to_dict()

    def init_sn(self):
        self.session = requests.Session()
        self.session.get(self.HOME_URL, headers=self.HEADERS)  # 请求主页的以获得会话内容

    # 从数据库中获取类别ID
    def get_work(self):
        """
        根据每个类别下载
        :return:
        """
        sql_str = "SELECT catid,downpage,maxpage,alljournalnum FROM `subject` WHERE stat=0 AND failcount < 9"
        result_raw = self.mysqlutils.SelectFromDB(sql_str)
        return result_raw

    # 获取下一页的url
    def getStartOrNextListPageUrl(self, ssid, page_num=0):
        """
        获取每个类别的第一页url
        :param ssid: subject_id
        :param page_num: 页码 0开始
        :param page_size: 每页请求数量 固定
        :return: url
        """
        return self.LIST_FORMAT_URL.format(ssid=ssid, pagenum=page_num, pagesize=self.page_size)

    # 解析一个列表页面,返回当前页面中的journal名\链接,以及最大页码数(无法判断是否存在下一页来返回结果)
    def parseOnelistPage(self, html):
        """
        :param html: html text
        :return: 当前页码的期刊列表名期刊url字典，当前分类下的最大页数
        """
        soup = BeautifulSoup(html, 'html.parser')
        names_content = soup.find('ol', {'class': 'browse-results'})
        # 获取总数量
        spantag = soup.find_all("span", class_="search-tab-counts")
        # 去除括号
        alljournalnum = int("".join(spantag[1].stripped_strings).replace("(", "").replace(")", ""))
        # 获取标题的a标签
        journal_names_list = names_content.find_all('a', {'class': 'ref'})
        one_page_result = []
        for item in journal_names_list:
            one_journal_dict = {}
            # 获取标题
            journal_name = item.get_text()
            one_journal_dict['name'] = journal_name
            one_journal_dict['url'] = item['href']
            # print(one_journal_dict)
            one_page_result.append(one_journal_dict)

        max_page_num = math.ceil(int(alljournalnum) / self.page_size)
        # 结果集，最大页码 数量
        return one_page_result, max_page_num, alljournalnum

    def getJournalList(self, ssid, first_url, alljournalnumsql):
        """
        下载 该页并解析
        :param session: requests.Session
        :param conn: 数据库连接对象
        :param ssid: subject_id
        :param first_url: 该类别下首页url
        :return: 当前类别下成功爬取的期刊数量
        """
        self.init_sn()
        try:
            BoolResult, errString, r = facade.BaseRequest(first_url,
                                                          sn=self.session,
                                                          mark='searchResultContainer',
                                                          timeout=30,
                                                          verify=False)
            if BoolResult:
                # 获取 第一个参数为dict 包含期刊 标题和url 最大页码 期刊数量
                #  结果集，最大页码 数量
                one_page_result, max_page_num, alljournalnum = self.parseOnelistPage(r.text)
                if not self.ini_dicts["other"]["is_all"] == "true":
                    if alljournalnum == alljournalnumsql:
                        return True
                # 保存到jounarl
                self.save2DB(one_page_result, ssid)
                # 更新 subject表
                self.updatenum2DB(alljournalnum, ssid, max_page_num, 1)
            else:
                max_page_num, one_page_result = 0, []
                self.updatenum2DB(0, ssid, max_page_num, 0)
                return False

            if max_page_num == 1:  # 当只有单页时
                return True
            return self.get_page_list(1, ssid, alljournalnum, max_page_num)
        except:
            print(traceback.format_exc())
            return False

    def get_page_list(self, startpage, ssid, alljournalnum, max_page_num):
        page = 0
        for page in range(startpage, max_page_num):
            print("{}总量为{};总页码为{};当前页码{}".format(ssid, alljournalnum, max_page_num, page))
            next_page_url = self.getStartOrNextListPageUrl(ssid=ssid, page_num=page)
            BoolResult, errString, r = facade.BaseRequest(next_page_url,
                                                          sn=self.session,
                                                          mark='searchResultContainer',
                                                          timeout=30,
                                                          verify=False)
            if BoolResult:
                one_page_result, max_page_num, alljournalnum = self.parseOnelistPage(r.text)
                self.save2DB(one_page_result, ssid)
                self.updatenum2DB("null", ssid, max_page_num, page + 1)
            else:
                return False
        if page + 1 == max_page_num:
            return True
        else:
            return False

    def control(self):
        catid_list = self.get_work()
        for each_id, downpage, maxpage, alljournalnum in catid_list:
            if downpage:
                if downpage == maxpage:
                    self.updateDBSubject(each_id, True)
                else:
                    self.init_sn()
                    status = self.get_page_list(downpage - 1, each_id, alljournalnum, maxpage)
                    self.updateDBSubject(each_id, status)

            # 获取期刊列表
            first_url = self.getStartOrNextListPageUrl(each_id)  # 获取某subject首页连接
            status = self.getJournalList(each_id, first_url, alljournalnum)

            self.updateDBSubject(each_id, status)
        if not self.ini_dicts["other"]["is_all"] == "true":
            sql = "UPDATE subject SET stat=0,failcount=0"
            self.mysqlutils.ExeSqlToDB(sql)

    # 将期刊名字以及URL保存至数据库journal表中

    def save2DB(self, journal_dict_list, ssid):
        """
        :param conn: 数据库连接对象
        :param journal_dict_list: 期刊键值对列表
        :return: None
        """
        SqlList = []
        for item in journal_dict_list:
            # 这里使用连接字符串  但这样会出现重复的数据 一直没有找到比较好的去重的方法
            if self.ini_dicts["other"]["is_all"] == "true":
                sql_str = "replace INTO journal(`url`,`jid`,`name`,`subcatid`) VALUES('{url}','{jid}','{name}','{subcatid}')"  # on DUPLICATE key update `subcatid`=CONCAT_WS(';',`subcatid`,'{subcatid}')"
                jid = item['url'].replace('/loi/', '').replace('/current', '').replace('/toc/', '')
                Sql = sql_str.format(url=dealSqlTextFunc(dealDictKVFunc('url', item)), jid=jid,
                                     name=dealSqlTextFunc(dealDictKVFunc('name', item)),
                                     subcatid=ssid)
                SqlList.append(Sql)
            else:
                sql_str = "insert ignore INTO journal(`url`,`jid`,`name`,`subcatid`) VALUES('{url}','{jid}','{name}','{subcatid}')"  # on DUPLICATE key update `subcatid`=CONCAT_WS(';',`subcatid`,'{subcatid}')"
                jid = item['url'].replace('/loi/', '').replace('/current', '').replace('/toc/', '')
                Sql = sql_str.format(url=dealSqlTextFunc(dealDictKVFunc('url', item)), jid=jid,
                                     name=dealSqlTextFunc(dealDictKVFunc('name', item)),
                                     subcatid=ssid)
                SqlList.append(Sql)

        self.mysqlutils.ExeSqlListToDB(SqlList)

    def updatenum2DB(self, allnum, catid, max_page_num, downpagenum):
        """
        :param conn: 数据库连接对象
        :param journal_dict_list: 期刊键值对列表
        :return: None
        """
        if allnum == "null":
            Sql = "update `subject` set maxpage={maxpage},downpage={downpage} where catid='{catid}'".format(
                catid=catid, maxpage=max_page_num, downpage=downpagenum)
        else:
            Sql = "update `subject` set `alljournalnum`={alljournalnum},maxpage={maxpage},downpage={downpage} where  catid='{catid}'".format(
                catid=catid, alljournalnum=allnum, maxpage=max_page_num, downpage=downpagenum)

        self.mysqlutils.ExeSqlToDB(Sql)

    # 更新数据库中表subject
    def updateDBSubject(self, subject_id, status):
        # conn = MajorDbConnect()
        if status:
            Sql = "UPDATE subject SET stat=1 WHERE catid='%s'" % subject_id
        else:
            Sql = "UPDATE subject SET failcount=failcount+1 WHERE catid='%s'"
        # ExeSqlToDB(Sql, conn)
        self.mysqlutils.ExeSqlToDB(Sql)

    def get_proxy(self):
        return getDataFromRedis(curPath, 'db.ini')


def main():
    down = DownJournal()
    down.control()


if __name__ == '__main__':
    # 更新时使用
    main()
