#!/usr/bin/env python 
# -*- coding: utf-8 -*- 
# @Time : 2018-06-14 9:49 
# @Author : Leo
# @File : countJournal.py 

"""
获取Tandfjournal的杂志总数量
"""

import requests
import pymysql
import os
import configparser
import logging
from requests.adapters import HTTPAdapter
from bs4 import BeautifulSoup

config_path = os.path.join('..', 'TandfJournalConfig.ini')

conf = configparser.ConfigParser()
conf.read(config_path, encoding='utf-8')

host = conf.get('DB_CONFIG', 'host')
user = conf.get('DB_CONFIG', 'user')
passwd = conf.get('DB_CONFIG', 'passwd')
port = int(conf.get('DB_CONFIG', 'port'))
db = conf.get('DB_CONFIG', 'db')

BASE_URL = 'https://www.tandfonline.com'
LIST_FORMAT_URL = 'https://www.tandfonline.com/topic/{catid}?startPage=&content=title&target=titleSearch'

HEADERS = {
    'referer': 'https://www.tandfonline.com',
    'user-agent': 'Mozilla/5.0 (Windows NT 10.0; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/65.0.3325.181 Safari/537.36'
}


# 从数据库中获取类别ID
def getIDFromDB(conn):
    cursor = conn.cursor()
    sql_str = "SELECT catid FROM subject"
    cursor.execute(sql_str)
    result_raw = cursor.fetchall()
    result_list = []
    for r in result_raw:
        if r is not None:
            result_list.append(r[0])
    cursor.close()
    return result_list


# 从数据库代理IP池中随机获取代理IP
def proxyIpFromDB():
    proxy_conn = pymysql.connect(host=host, user=user, passwd=passwd, db=db, charset='utf8mb4', port=port)
    logging.info('Getting proxy ip from DB....')
    cursor = proxy_conn.cursor()
    Sql = "SELECT proxy FROM proxy_pool ORDER BY RAND() LIMIT 1"
    cursor.execute(Sql)
    proxy_pool = cursor.fetchall()
    if proxy_pool:
        proxy_ip = {'http': 'http://' + proxy_pool[0][0],
                    'https': 'https://' + proxy_pool[0][0]}
        return proxy_ip
    else:
        return {}


def reqUrl(session, url):
    """
    通用请求方法
    :param session: 请求会话
    :param url: 请求url
    :return: resp.text或None
    """

    try:
        resp = session.get(url, timeout=10)
        print('HTTP STATUS CODE: %s' % resp.status_code)
        if resp.status_code == 200:
            # print(resp.text)
            return resp
        else:
            # 设置代理再次请求一次
            resp = session.get(url, timeout=20, proxies=proxyIpFromDB())
            print('HTTP STATUS CODE WITH PROXY: %s' % resp.status_code)
            if resp.status_code == 200:
                return resp
            return None
    except:
        logging.error('Request url failed: %s' % url)
        return None


def getJournalCount(session, cat_url):
    print(cat_url)
    resp = reqUrl(session, cat_url)
    if resp is None:
        return None
    bsObj = BeautifulSoup(resp.text, 'html.parser')
    # journal_count_tag = bsObj.find('li', {'class': 'searchResultTabs active', 'role': 'tab'})
    journal_count_tag = bsObj.findAll('span', {'class': 'search-tab-counts'})
    if journal_count_tag:
        article_count = journal_count_tag[0].get_text()[1:-1]
        journal_count = journal_count_tag[1].get_text()[1:-1]
        print(article_count)
        print(journal_count)
        return int(article_count), int(journal_count)


def start():
    session = requests.Session()
    session.headers = HEADERS
    session.mount('http://', HTTPAdapter(max_retries=3))  # 设置重试次数
    session.mount('https://', HTTPAdapter(max_retries=3))
    session.get(BASE_URL)  # 请求主页的以获得会话内容
    conn = pymysql.connect(host=host, user=user, passwd=passwd, db=db, charset='utf8mb4', port=port)
    catid_list = getIDFromDB(conn=conn)
    logging.info('Get %s fields from the database.' % len(catid_list))
    article_all, journal_all = 0, 0
    for each_catid in catid_list:
        first_url = LIST_FORMAT_URL.format(catid=each_catid)
        article_count, journal_count = getJournalCount(session, first_url)
        article_all += article_count
        journal_all += journal_count
    print('各类别文章总数为：%s，期刊总数目为：%s' % (article_all, journal_all))


if __name__ == '__main__':
    start()
