"""
通过期获取文章，获取卷和期合并在了第三步
"""
import json
import random
import re
import time
import traceback
from urllib import parse

import facade
import requests
from bs4 import BeautifulSoup
from xjlibrary.configread import MyConfigParser
from xjlibrary.mprocesspoll.MThreadingRun import MThreadingRun
from xjlibrary.myredis.myredisclient import getDataFromRedis
from xjlibrary.our_file_dir import BaseDir

curPath = BaseDir.get_file_dir_absolute(__file__)
configfile = BaseDir.get_new_path(curPath, "db.ini")

# 根据key提取字典中的value
dealDictValueFunc = lambda key, temp_dict: temp_dict[key] if isinstance(temp_dict,
                                                                        dict) and key in temp_dict.keys() else ''


class DownIssue(object):
    def __init__(self):
        self.logger = facade.get_streamlogger()
        self.mysqlutils = facade.MysqlUtiles(configfile, "db", logger=self.logger)
        self.req_error_log = BaseDir.get_new_path(curPath, "log", "reqArticleErrorLog.log")
        self.ini_dicts = MyConfigParser().set_config_path(
            configfile).get_configparser().set_keep_keys_case().read_config_string(
            'utf-8').conf_to_dict()
        self.USER_AGENT_LIST = [
            'Mozilla/5.0 (Windows NT 10.0; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/65.0.3325.181 Safari/537.36',
            'Mozilla/5.0 (Windows NT 10.0; WOW64; Trident/7.0; rv:11.0) like Gecko',
            'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/58.0.3029.110 Safari/537.36 Edge/16.16299'
        ]
        self.HEADERS = {
            'referer': 'https://www.tandfonline.com',
            # 'user-agent': 'Mozilla/5.0 (Windows NT 10.0; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/65.0.3325.181 Safari/537.36'
            'user-agent': random.choice(self.USER_AGENT_LIST)
        }
        self.BASE_URL = 'https://www.tandfonline.com'

    def init_sn(self):
        self.session = requests.Session()
        self.session.headers = self.HEADERS  # 同一会话连接时间过长会引起远程服务器关闭连接

    def get_list_url_run(self, threadval, ringproxy, urltuple):
        result_queue = threadval.result_queue
        self.init_sn()
        each_url = urltuple[0]
        article_doi_issn_temp = self.parseArticleItems(self.session, each_url, ringproxy)
        if article_doi_issn_temp:
            for article in article_doi_issn_temp:
                dictmsg = {}
                dictmsg["vol"] = urltuple[1]
                dictmsg["jname"] = urltuple[2]
                dictmsg["jid"] = urltuple[3]
                dictmsg["date"] = urltuple[4]
                dictmsg["issue"] = urltuple[5]
                dictmsg["issn"] = article["issn"]
                dictmsg["page"] = article["page"]
                dictmsg["datetime"] = article["datetime"]
                dictmsg["doi"] = article["doi"]
                jsonmsg = json.dumps(dictmsg)

                Sql = "INSERT INTO article(`doi`, `jsonmsg`,`num`,`issuurl`) VALUES('{doi}', '{jsonmsg}','{num}','{issuurl}') on DUPLICATE key update `num`={num},`jsonmsg`='{jsonmsg}',`issuurl`='{issuurl}'"
                Sql = Sql.format(doi=article['doi'], jsonmsg=jsonmsg.replace("'", "\\'"), num=article['num'],
                                 issuurl=each_url)
                result_queue.put(Sql)
            Sql = "update vol set stat=1 where issue_url = '{}'".format(each_url)
            result_queue.put(Sql)
        else:
            Sql = "update vol set failcount=failcount+1 where issue_url = '{}'".format(each_url)
            result_queue.put(Sql)

    def parseArticleItems(self, session, issue_url, ringproxy):
        """
        通过issue_url获取文章doi
        :param session: 请求会话
        :param issue_url: issue url
        :return: article doi_issn列表
        """

        try:
            BoolResult, errString, r = facade.MRequestSn(session,
                                                         self.BASE_URL + issue_url,
                                                         HEADERS=self.HEADERS,
                                                         timeout=30,
                                                         proxyRingList=ringproxy,
                                                         verify=False)
            if not BoolResult:
                return False
            bsObj = BeautifulSoup(r.text, 'html.parser')
            articleContent = bsObj.find('div', {'class': 'tocContent'})
            if articleContent is None:
                print("articleContent is None")
                return None
            article_list = articleContent.find_all('td', {'valign': 'top'})
            if not article_list:
                print("article_list is None")
                return None
            article_doi_list = []
            num = len(article_list)
            for article in article_list:  # 20180615：修改提取规则
                article_dict = {}
                article_dict['page'] = ''
                article_dict['datetime'] = ''
                try:
                    article_doi = article.find('li', {'class': 'value', 'data': True})['data']
                except:
                    article_url = article.find("a", {'class': 'ref nowrap', 'href': True})["href"]
                    article_doi = article_url.replace("/doi/full/", "")
                pageTag = article.find("div", class_="tocPageRange maintextleft")
                if pageTag:
                    page = "".join(pageTag.stripped_strings).replace("Pages:", "").replace("Page:", "").strip()
                    article_dict['page'] = page
                dateTag = article.find("span", class_="maintextleft")
                if dateTag:
                    # 只用两个 取最后一个 如果出现了3个或以上的情况 请重新处理
                    for strings in dateTag.stripped_strings:
                        datetime = strings
                        article_dict['datetime'] = datetime
                article_info_dict = article.find('a', attrs={'href': re.compile(r'^/servlet/linkout')})
                article_dict['doi'] = article_doi
                article_dict['issn'] = ''
                article_dict['num'] = num
                article_dict['issuurl'] = issue_url
                if article_info_dict:
                    parameter_str = parse.unquote(article_info_dict['href'].split('?')[-1])
                    parameter_dict = parse.parse_qs(parameter_str)
                    article_dict['issn'] = dealDictValueFunc('issn', parameter_dict)[-1]
                article_doi_list.append(article_dict)
            return article_doi_list
        except:
            print(traceback.format_exc())
            return None

    def getTasksFromDB(self):
        Sql = "SELECT issue_url,volume,jname,jid,`year`,issue,pages FROM vol WHERE stat = 0 AND failcount < 5 limit 1000"
        result = self.mysqlutils.SelectFromDB(Sql)
        if not result:
            Sql = "SELECT issue_url,volume,jname,jid,`year`,issue,pages FROM vol WHERE stat = -1 AND failcount < 5 limit 1000"
            result = self.mysqlutils.SelectFromDB(Sql)
        return result


class IssueThreadRun(MThreadingRun):
    def __init__(self, num):
        super(IssueThreadRun, self).__init__(num)
        self.down = DownIssue()

    def getTask(self, *args, **kwargs):
        results = self.down.getTasksFromDB()
        return results

    def setTask(self, results=None, *args, **kwargs):
        if results:
            for url_tasks in results:
                # 将每一页加入任务队列
                self.add_job(self.func, url_tasks)
        else:
            time.sleep(30)

    def dealresult(self, *args, **kwargs):
        self.down.mysqlutils.ExeSqlListToDB(self.results)

    def setProxy(self, proxysList=None):
        result = getDataFromRedis(curPath, 'db.ini')
        ProxyPool = list(result)
        MThreadingRun.setProxy(self, ProxyPool)
        time.sleep(30)

    def is_break(self):
        return True

    def thread_pool_hook(self, thread_pool_dicts, thread, args, kwargs) -> dict:
        return {}

    def fun(self, threadval, *args, **kwargs):
        url_tasks = args[0]
        self.down.get_list_url_run(threadval, self.list_proxy, url_tasks)


def main():
    threadrun = IssueThreadRun(40)
    threadrun.run()


if __name__ == '__main__':
    main()
