#!/usr/bin/env python 
# -*- coding: utf-8 -*- 
# @Time : 2018-06-09 15:09 
# @Author : Leo
# @File : Step6_downloadArticleDetail_grequests.py
import json
import os
import random
import socket
import threading
import time
from urllib import parse

import facade
import requests
from bs4 import BeautifulSoup
from xjlibrary.mdatetime.mtime import getTodayDate
from xjlibrary.mprocesspoll.MThreadingRun import MThreadingRun
from xjlibrary.myredis.myredisclient import getDataFromRedis
from xjlibrary.our_file_dir.base_dir import BaseDir

# logging.basicConfig(level=logging.INFO,  # 最低输出
#                     format='%(asctime)s %(filename)s[line:%(lineno)d] %(levelname)s %(message)s',
#                     datefmt='%d %b %H:%M:%S')

cur_path = BaseDir.get_file_dir_absolute(__file__)
TopPath = BaseDir.get_upper_dir(cur_path, -3)
filePath = BaseDir.get_new_path(TopPath, "download", "TandfJournal", "bigjson")
BaseDir.create_dir(filePath)
IPADDR = socket.gethostbyname(socket.gethostname())
PID = os.getpid()

configfile = BaseDir.get_new_path(cur_path, "db.ini")
logger = facade.get_streamlogger()
mysqlutils = facade.MysqlUtiles(configfile, "db", logger=logger)

USER_AGENT_LIST = [
    'Mozilla/5.0 (Windows NT 10.0; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/65.0.3325.181 Safari/537.36',
    'Mozilla/5.0 (Windows NT 10.0; WOW64; Trident/7.0; rv:11.0) like Gecko',
    'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/58.0.3029.110 Safari/537.36 Edge/16.16299'
]
HEADERS = {
    'referer': 'https://www.tandfonline.com',
    # 'user-agent': 'Mozilla/5.0 (Windows NT 10.0; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/65.0.3325.181 Safari/537.36'
    'user-agent': random.choice(USER_AGENT_LIST)
}
# 请求总概信息的URL
BASE_ABS_DOI_URL = 'https://www.tandfonline.com/doi/abs/{doi}'
# 请求参考信息的URL
BASE_REF_DOI_URL = 'https://www.tandfonline.com/doi/ref/{doi}?scroll=top'
# 请求ISSN的URL-->需要获取权限
BASE_PERM_DOI_URL = 'https://www.tandfonline.com/doi/abs/{doi}?tab=permissions&scroll=top'
# 请求ISSN的URL-->开放权限
BASE_CITA_DOI_URL = 'https://www.tandfonline.com/doi/citedby/{doi}?scroll=top&needAccess=true'


#
# # 数据库链接
# def MajorDbConnect():
#     return MySqlDbConnect(cur_path, "TandfJournalConfig.ini")


def getTasksFromDB():
    """
    从数据库获取doi
    :param conn: 数据库连接对象
    :return: doi列表
    """
    Sql = "SELECT doi FROM article WHERE stat = 0 AND failcount < 3 ORDER BY RAND() LIMIT 100000"
    # conn = MajorDbConnect()
    # rows = SelctSqlFromDB(Sql, conn)
    rows = mysqlutils.SelectFromDB(Sql)
    tasks_list = [one_task[0] for one_task in rows if rows]
    return tasks_list


def setBaseUrl(doi):
    """
    设置url
    :param doi: article_doi
    :return: url_tuple
    """
    abs_doi_url = BASE_ABS_DOI_URL.format(doi=doi)
    ref_doi_url = BASE_REF_DOI_URL.format(doi=doi)
    perm_doi_url = BASE_PERM_DOI_URL.format(doi=doi)
    cita_doi_url = BASE_CITA_DOI_URL.format(doi=doi)
    return abs_doi_url, ref_doi_url, perm_doi_url, cita_doi_url


# 从数据库代理IP池中随机获取代理IP
def proxyIpFromDB():
    Sql = "SELECT proxy FROM proxy_pool"
    # conn = MajorDbConnect()
    # rows = SelctSqlFromDB(Sql, conn)
    rows = mysqlutils.SelectFromDB(Sql)
    return rows


def reqUrl(session, url, proxylist):
    """
    通用请求方法
    :param session: 请求会话
    :param url: 请求url
    :return: resp.text或None
    """
    BoolResult, errString, r = facade.MRequestSn(sn=session,
                                                 pageUrl=url,
                                                 HEADERS=HEADERS,
                                                 proxyRingList=proxylist
                                                 )
    if BoolResult:
        return r.text
    return None


def cleanUpHtmlDoc(raw_html_doc):
    """
    去除html不必要字符串
    :param raw_html_doc: html文本
    :return: 处理后的html文本
    """
    html_doc = raw_html_doc.replace("\r", " ").replace("\n", " ").replace("\0", " ")
    return html_doc


def getArticleAbsPage(session, url, proxy):
    """
    获取abs页面的html
    :param session: 请求会话
    :param url: abs_url
    :return: abs_html文本
    """
    logger.info('ABS:' + url)
    abs_html = reqUrl(session, url, proxy)
    abs_html = cleanUpHtmlDoc(abs_html)
    return abs_html


def getArticleRefPage(session, url, proxy):
    """
    获取ref页面的html
    :param session: 请求会话
    :param url: ref_url
    :return: abs_html
    """
    logger.info('Ref:' + url)
    ref_html = reqUrl(session, url, proxy)
    ref_html = cleanUpHtmlDoc(ref_html)
    return ref_html


def parseJurnalPermInfo(perm_html_doc):
    """
    解析perm_html，可以获取标题、作者、页码、出版商、出版日期等
    :param perm_html_doc: perm_html
    :return: dict类型 article部分信息
    """
    if perm_html_doc is None:
        return None
    bsObj = BeautifulSoup(perm_html_doc, 'html.parser')
    issn_href = bsObj.find('a', {'class': 'permissions'})
    try:
        if not issn_href['href']:
            return None
        parameter_str = parse.unquote(issn_href['href'].split('?')[-1])
        parameter_dict = parse.parse_qs(parameter_str)
        if not parameter_dict:
            return None
        article_info_dict = {}
        for each in parameter_dict.items():
            article_info_dict[each[0]] = ';'.join(each[1])
        logger.debug('Books Info: %s' % article_info_dict)
        return article_info_dict
    except:
        return None


def getArticlePermPage(session, url, proxy):
    """
    获取perm页面相关的信息
    :param session: 请求会话
    :param url: perm_url
    :return: dict类型 article的部分信息
    """
    logger.info('Perm:' + url)
    perm_html = reqUrl(session, url, proxy)
    if perm_html:
        article_info_dict = parseJurnalPermInfo(perm_html)
        if article_info_dict is None:
            return {}
        return article_info_dict
    else:
        return {}


def parseJurnalCitaInfo(cita_html_doc):
    """
    解析citations_html，可以获取标题、作者、页码、出版商、出版日期等
    :param cita_html_doc: citations_html
    :return: dict类型 article部分信息
    """
    if cita_html_doc is None:
        return None
    bsObj = BeautifulSoup(cita_html_doc, 'html.parser')
    issn_href = bsObj.find('a', {'class': 'rightslink', 'href': True})
    try:
        if not issn_href['href']:
            return None
        parameter_str = parse.unquote(issn_href['href'].split('?')[-1])
        parameter_dict = parse.parse_qs(parameter_str)
        if not parameter_dict:
            return None
        article_info_dict = {}
        for each in parameter_dict.items():
            article_info_dict[each[0]] = ';'.join(each[1])
        logger.debug('Books Info: %s' % article_info_dict)
        return article_info_dict
    except:
        return None


def getArticleCitaPage(session, url, proxy):
    """
    获取citations页面相关内容
    :param session:
    :param url:
    :return:
    """
    logger.info('Cita:' + url)
    cita_html = reqUrl(session, url, proxy)
    if cita_html:
        article_info_dict = parseJurnalCitaInfo(cita_html)
        if article_info_dict is None:
            return {}
        return article_info_dict
    else:
        return {}


# 多网卡情况下，根据前缀获取IP
def GetLocalIPByPrefix(prefix):
    localIP = ''
    for ip in socket.gethostbyname_ex(socket.gethostname())[2]:
        if ip.startswith(prefix):
            localIP = ip

    return localIP


def todoOneArticleDoi(threadval, session, doi, jsonmsg, proxy):
    """
    处理一个article doi
    :param session: 请求会话
    :param doi: article doi
    :return: str类型 json.dumps 该doi的信息
    """
    result_queue = threadval.result_queue
    session = requests.Session()  # 每次获取任务重新创建请求会话，避免单一会话连接太长服务端被断开
    logger.info('Start processing one doi: %s' % doi)
    url_result = setBaseUrl(doi)
    print("开始请求abs_html")
    abs_html = getArticleAbsPage(session, url_result[0], proxy)
    if not abs_html:
        print("abs 请求失败 进行返回")
        sql = "UPDATE article SET failcount=failcount+1,`explain`='%s' WHERE doi='%s'" % ("abs", doi)
        result_queue.put(sql)
    print("开始请求 ref_html")
    ref_html = getArticleRefPage(session, url_result[1], proxy)
    if not ref_html:
        print("ref 请求失败 进行返回")
        sql = "UPDATE article SET failcount=failcount+1,`explain`='%s' WHERE doi='%s'" % ("ref", doi)
        result_queue.put(sql)
    print("开始请求per_dict")
    perm_dict = getArticlePermPage(session, url_result[2], proxy)
    if not perm_dict:  # 当不需要获取权限时
        cita_dict = getArticleCitaPage(session, url_result[3], proxy)
        perm_dict.update(cita_dict)
    print("*********".format(perm_dict))
    if perm_dict:
        print("开始保存数据")
        # if abs_html and ref_html:  # 部分article不存在perm页面，故去除了对perm_dict的限制(X)
        one_article = dict()
        one_article['doi'] = doi
        one_article['down_date'] = getTodayDate()
        one_article['absHtml'] = abs_html
        one_article['refHtml'] = ref_html
        one_article.update(perm_dict)
        one_article['jsonmsg'] = jsonmsg
        one_article_dumps = json.dumps(one_article, ensure_ascii=False)

        nodeIP = GetLocalIPByPrefix('192.168.')
        nowDate = time.strftime('%Y%m%d', time.localtime())
        outPathFile = os.path.join(filePath,
                                   '%s_%s_%d_%d.big_json' % (
                                       nowDate, nodeIP, os.getpid(), threading.get_ident())
                                   )
        if not os.path.exists(filePath):
            os.makedirs(filePath)
        print("out file is %s " % outPathFile)
        write2BigJson(outPathFile, one_article_dumps)
        Sql = "UPDATE article SET stat=2 WHERE doi='%s'" % doi
        result_queue.put(Sql)
    else:
        print("perm_dict 请求失败 进行返回没有 perm_dict")
        sql = "UPDATE article SET failcount=failcount+1,`explain`='%s' WHERE doi='%s'" % ('perm', doi)
        result_queue.put(sql)


def write2BigJson(file_path, article_line):
    with open(file=file_path, mode="a", encoding="utf-8") as f:
        f.write(article_line + '\n')
    logger.info('Write to the big_json successfully!')


# 刷新代理池（从Redis获取代理）
def UpdateProxyPool():
    result = getDataFromRedis(cur_path, "TandfJournalConfig.ini")
    ProxyPool = list(result)

    print('****** ProxyPool: %d' % len(ProxyPool))
    # print('current time:' + time.strftime('%Y-%m-%d %H:%M:%S', time.localtime(time.time())))
    return ProxyPool


# 刷新任务池（从数据库获取任务）
def UpdateTaskPool():
    print('UpdateTaskPool ...')
    sql = "SELECT `doi`,`jsonmsg` FROM article WHERE stat=0 AND failcount<4 ORDER BY `explain`  limit 300;"
    # conn = MajorDbConnect()
    # rows = SelctSqlFromDB(sql, conn)
    rows = mysqlutils.SelectFromDB(sql)
    listdoi = []
    for row in rows:
        listdoi.append(row[0])
    if len(listdoi) == 1:
        listdoi.append("test")
    if len(listdoi) == 0:
        return rows
    sql = "update article set stat=-1 where doi in {}".format(tuple(listdoi))
    # ExeSqlToDB(sql, MajorDbConnect())
    mysqlutils.ExeSqlToDB(sql)
    return rows


# 插入数据库
def InsertIntoDbFromList(ListSqls):
    # conn = MajorDbConnect()
    # ExeSqlList(ListSqls, conn)
    mysqlutils.ExeSqlListToDB(ListSqls)


class ArchiveThreadRun(MThreadingRun):

    def __init__(self, num):
        super(ArchiveThreadRun, self).__init__(num)

    def getTask(self, *args, **kwargs):
        rows = UpdateTaskPool()
        return rows

    def setTask(self, results=None, *args, **kwargs):
        if results:
            for doi in results:
                self.add_job(self.func, "", doi[0], doi[1])
        else:
            time.sleep(30)

    def dealresult(self, *args, **kwargs):
        InsertIntoDbFromList(self.results)

    def setProxy(self, proxysList=None):
        list_proxy = getDataFromRedis(cur_path, 'db.ini')
        MThreadingRun.setProxy(self, list_proxy)
        time.sleep(10)

    def is_break(self):
        return True

    def thread_pool_hook(self, thread_pool_dicts, thread, args, kwargs) -> dict:
        return {}

    def fun(self, threadval, *args, **kwargs):
        session, doi, jsonmsg = args
        todoOneArticleDoi(threadval, session, doi, jsonmsg, self.list_proxy)


def main():
    threadrun = ArchiveThreadRun(40)
    threadrun.run()


if __name__ == "__main__":
    main()
