#!/usr/bin/env python 
# -*- coding: utf-8 -*- 
# @Time : 2018-04-19 17:27 
# @Author : Woolei
# @File : Step1_OneStep2GetData.py


"""
根据书籍的类别下载
请求页面为类别的列表页面，列表页面中的返回信息就已经包括了该书籍的所有信息
请求方式post，Ajax返回json数据
https://www.taylorfrancis.com/
"""

import datetime
import json
import logging
import os
import random
import time
import traceback
from multiprocessing.pool import Pool

import facade
import requests
from urllib3.exceptions import InsecureRequestWarning
from xjlibrary.mrequest.baserequest import BaseRequestPost
from xjlibrary.our_file_dir import BaseDir

curPath = BaseDir.get_file_dir_absolute(__file__)
TopPath = BaseDir.get_upper_dir(curPath, -2)
dirPath = BaseDir.get_new_path(TopPath, "download", "TandBook", "download", "big_json")
BaseDir.create_dir(dirPath)
configfile = BaseDir.get_new_path(curPath, "db.ini")
mysqlutils = facade.MysqlUtiles(configfile,
                                "db1",
                                logger=facade.get_streamlogger())
logger = facade.get_filelogger("./logs")
requests.urllib3.disable_warnings(InsecureRequestWarning)
# 所有类别
SUBJECTS_DICT = {
    'SCAS': 'Area Studies',
    'SCAR': 'Arts',
    'SCBE': 'Behavioral Sciences',
    'SCBS': 'Bioscience',
    'SCBU': 'Built Environment',
    'SCCS': 'Communication Studies',
    'SCCM': 'Computer Science',
    'SCDS': 'Development Studies',
    'SCEA': 'Earth Sciences',
    'SCEB': 'Economics, Finance, Business & Industry',
    'SCED': 'Education',
    'SCEC': 'Engineering & Technology',
    'SCAG': 'Environment & Agriculture',
    'SCFS': 'Food Science & Technology',
    'SCGE': 'Geography',
    'SCHS': 'Health and Social Care',
    'SCHU': 'Humanities',
    'SCIF': 'Information Science',
    'SCLA': 'Language & Literature',
    'SCLW': 'Law',
    'SCMA': 'Mathematics & Statistics',
    'SCME': 'Medicine, Dentistry, Nursing & Allied Health',
    'SCAH': 'Museum and Heritage Studies',
    'SCPC': 'Physical Sciences',
    'SCPI': 'Politics & International Relations',
    'SCRF': 'Reference & Information Science',
    'SCSN': 'Social Sciences',
    'SCSL': 'Sports and Leisure',
    'SCSP': 'Tourism, Hospitality and Events'
}
# payload字段，单次请求10条数据
PAYLOAD_DATA = {"keyword": "", "limit": 10, "offset": 0, "scoreOffset": 0.5,
                "sortCriteria": [{"type": "relevance", "order": "desc", "fields": ["_score", "datePublication"]}],
                "outputFields": ["categories", "classifications", "coverImages", "dacKey", "datePublication",
                                 "description", "edition", "firstPublishedOn", "formats.bindingStyle",
                                 "formats.bindingStyleCode", "formats.coverImages", "formats.datePublication",
                                 "formats.isbn13", "formats.isbnPdf", "formats.isbnEpub3", "formats.isbnEpub",
                                 "formats.isbnMobi", "formats.licensedEntities", "formats.status", "formats.statusCode",
                                 "formats.versionType", "formats.versionTypeCode", "imprint", "isbn13", "meta.abstract",
                                 "meta.contributors", "meta.doi", "meta.pdfSize", "meta.span", "meta.subtitle",
                                 "meta.title", "originators", "pages", "pdfSize", "subjectGroup", "subtitle", "title"],
                "fieldConfig": [{"name": "formats.isbn13", "boost": "40"}, {"name": "formats.isbnPdf", "boost": "40"},
                                {"name": "formats.isbnPdfFree", "boost": "40"},
                                {"name": "formats.isbnEpub3", "boost": "40"},
                                {"name": "formats.isbnEpub", "boost": "40"},
                                {"name": "formats.isbnMobi", "boost": "40"}, {"name": "formats.isbnDx", "boost": "40"},
                                {"name": "formats.classifications.classifications.stringValue", "boost": "40"},
                                {"name": "formats.keywords", "boost": "20"}, {"name": "keywords", "boost": "20"}],
                "filter": {"must": [{"range": {"datePublication": {"lte": "2018-07-27T15:23:41+08:00"}}}, {
                    "nested": {"path": "formats", "query": {"bool": {
                        "must": [{"term": {"formats.versionTypeCode.raw": "EBK"}},
                                 {"terms": {"formats.statusCode.raw": ["LFB", "VGR", "PLZ", "IHST", "WNN"]}}]}}}}, {
                                        "bool": {"should": [{"term": {"formats.isbn13.raw": ""}},
                                                            {"term": {"formats.isbnPdf.raw": ""}},
                                                            {"term": {"formats.isbnPdfFree.raw": ""}},
                                                            {"term": {"formats.isbnEpub3.raw": ""}},
                                                            {"term": {"formats.isbnEpub.raw": ""}},
                                                            {"term": {"formats.isbnMobi.raw": ""}},
                                                            {"term": {"formats.isbnDx.raw": ""}},
                                                            {"term": {"formats.keywords.raw": ""}}]}},
                                    {"terms": {"categories.code.raw": ["SCAS"]}}], "must_not": [],
                           "should": [{"exists": {"field": "formats.licensedEntities.raw"}}, {
                               "nested": {"path": "formats", "query": {"bool": {
                                   "must": [{"term": {"formats.versionTypeCode.raw": "EBK"}}, {
                                       "terms": {"formats.statusCode.raw": ["LFB", "VGR", "PLZ", "IHST", "WNN"]}}],
                                   "must_not": [{"terms": {
                                       "formats.classifications.classifications.code.raw": ["DRMY", "EBRRTL"]}}]}}}}]},
                "aggs": [{"fieldName": "datePublication",
                          "ranges": [{"key": "Upcoming", "from": "2018-04-13T15:23:41+08:00",
                                      "to": "2018-07-27T15:23:41+08:00"}], "type": "custom_date_range"},
                         {"fieldName": "categories.code", "type": "terms"},
                         {"fieldName": "imprint", "type": "terms", "limit": 3},
                         {"fieldName": "originators.originators.name.full", "type": "terms", "limit": 4}],
                "customAggs": {"datePublication": {"date_range": {"field": "datePublication", "ranges": [
                    {"key": "Older", "to": "2017-04-13T15:23:41+08:00"}]}},
                               "categoriesCode": {"terms": {"field": "categories.code.raw", "size": 10}},
                               "imprint": {"terms": {"field": "imprint.raw", "size": 3}},
                               "originators.originators.name.full": {
                                   "terms": {"field": "originators.originators.name.full.raw", "size": 4}}}}

HOME_URL = "https://www.taylorfrancis.com/"
# 使用上面注释的headers，会导致请求信息异常
HEADERS = {
    'content-type': 'application/json',
    'origin': 'https://www.taylorfrancis.com',
    'user-agent': 'Mozilla/5.0 (Windows NT 10.0; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/65.0.3325.181 Safari/537.36'
}

API_URL = 'https://api.taylorandfrancis.com/v1/search/title'

success_count = 0
fail_count = 0


# 等到105天后的日期和当前日期，用于构造post请求中的payload字段
def getTimeStamp():
    """
    payload字段中两个时间值
    :return: 当前时间，150天后时间
    """
    now = datetime.datetime.now()
    later_now = now + datetime.timedelta(days=105)  # 计算105天之后
    return now.strftime('%Y-%m-%dT%H:%M:%S+08:00'), later_now.strftime('%Y-%m-%dT%H:%M:%S+08:00')


# 构造payload字段
def setPayloadField(subject, offset=0):
    try:
        now_time_stamp, later_time_stamp = getTimeStamp()
        PAYLOAD_DATA['filter']['must'][-1]['terms']['categories.code.raw'][0] = subject
        PAYLOAD_DATA['filter']['must'][0]['range']['datePublication']['lte'] = later_time_stamp
        PAYLOAD_DATA['aggs'][0]['ranges'][0]['from'] = now_time_stamp
        PAYLOAD_DATA['customAggs']['datePublication']['date_range']['ranges'][0]['to'] = now_time_stamp
        PAYLOAD_DATA['offset'] = offset
        payload_str = json.dumps(PAYLOAD_DATA)
        return payload_str
    except:
        logger.error('[13]>> Set payload filed ERROR, Subject:' + subject + ', offset:' + offset)
        return False


# 更新HEADERS信息，请求10次更新一次idtoken
def updateHeaders(session, req_count):
    """
    更新Headers信息
    :param session:先前的会话
    :param req_count: 当前会话已用于请求的次数
    :return: 请求已使用次数
    """
    # resp = session.get(HOME_URL)
    if req_count > 10:
        boolrequets = 1
        while boolrequets < 5:
            try:
                # verify=False 不验证ssl证书
                resp = requests.get(HOME_URL, verify=False)
                if resp.status_code == 200:
                    try:
                        token = resp.cookies['_token']
                        session.headers.update({'Authorization': 'idtoken ' + token})
                        return 0
                    except:
                        print("获取_tokan失败")
                else:
                    print("请求header失败")
            except:
                print("请求header失败")
            boolrequets += 1
    return req_count


def write2bigJson(json_text, subject_name):
    if not os.path.exists(dirPath):
        os.makedirs(dirPath)
    file_path = os.path.join(dirPath, subject_name + ".big_json")

    # file_path = dir_path + subject_name + ".big_json"
    with open(file_path, 'a', encoding='utf-8') as f:
        f.write(json_text + '\n')
        logging.debug('[12]>> Writing into a filed %s.big_json' % subject_name)


# 请求API接口
def post2API(session, payload_str):
    time.sleep(random.random() * 4)  # 随机延时0~4s
    global success_count, fail_count
    BoolResult, errString, r = BaseRequestPost(API_URL,
                                               sn=session,
                                               data=payload_str,
                                               headers=HEADERS,
                                               timeout=60,
                                               endstring="",
                                               verify=False)
    if BoolResult:
        data_json = json.loads(r.text)
        if data_json['metadata'] and data_json['metadata']["status"] and data_json['metadata']['status'] == "Success":
            success_count += 1
            return data_json
        else:
            return False
    else:
        fail_count += 1
        # BaseDir.create_dir(req_fail_log_path)
        # with open(req_fail_log_path, 'a') as f:  # 将请求错误的payload字段记录入日志中
        #     f.write(payload_str + '\n')
        logger.error('[5]>> Request API ERROR, Add Failed Payload into log')
        return False


# 请求书籍列表信息
def getBookInfo(para):
    subject = para[0]
    offset = para[1]
    while True:
        session = requests.Session()
        session.headers = HEADERS
        updateHeaders(session, 11)
        req_count = 0  # 用于记录请求次数
        global success_count, fail_count
        payload_str = setPayloadField(subject, offset=offset)
        if payload_str:
            result = post2API(session, payload_str)
            req_count += 1
            if result and result['data']:
                write2bigJson(json.dumps(result), subject)
                # 判断是否存在下一页
                total_books = result["data"]['productCount']
                if offset + 10 >= int(total_books):
                    break
                if offset >= 19990:
                    break
                sql = "Update `tbook` set `allnum`={},`offset`={} where `subject`='{}'".format(int(total_books),
                                                                                               int(offset),
                                                                                               str(subject))

                mysqlutils.ExeSqlToDB(sql)
                logging.warning('[4]>> Current Subject has %s Pages' % total_books)
                if total_books and total_books > 10:
                    ListSql = []
                    while True:
                        offset += 10
                        payload_str_more = setPayloadField(subject, offset=offset)
                        req_count += 1
                        req_count = updateHeaders(session, req_count)
                        result_more = post2API(session, payload_str_more)
                        if result_more and result_more["data"]['resultSet']:
                            write2bigJson(json.dumps(result_more), subject)
                            logging.info('[7]>> Already got more book info, Current Page: %s' % int(offset / 10 + 1))
                            sql = "Update `tbook` set `allnum`={},`offset`={} where `subject`='{}'".format(
                                int(total_books),
                                int(offset),
                                str(subject))
                            ListSql.append(sql)
                            if len(ListSql) > 10:
                                mysqlutils.ExeSqlListToDB(ListSql)
                                ListSql.clear()
                            # success_count += 1
                        else:
                            break
                        logging.warning('[8]>> Success %s, Fail Count %s, Will UpdateHeaders: %s' % (
                            success_count, fail_count, 10 - req_count))
                        if req_count > 5:
                            break
                        if offset > (total_books - 10):
                            logging.info('[9]>> Current Subject API End')
                            break
                    if len(ListSql) > 0:
                        mysqlutils.ExeSqlListToDB(ListSql)
                        ListSql.clear()
            else:
                # writeFailStr2Log(payload_str)
                logging.error('[10]>> result is Error')
                # fail_count += 1
        else:
            logging.error('[11]>> Post payload ERROR >>> ' + payload_str)


def start(pool):
    listargs = []
    sql = "select `allnum`,`offset`,`subject` from `tbook` ORDER BY `allnum` DESC "
    # rows = SelectFromDBall(sql)
    rows = mysqlutils.SelectFromDB(sql)
    for row in rows:
        offset = int(row[1])
        # 这里为什么使用+10,因为最后一页offset没有写入
        if int(row[0]) <= int(row[1]) + 10 and row[0] != 0:
            print(row[2] + "存在，跳过")
            continue
        listargs.append((row[2], offset))
    print(listargs)
    pool.map(getBookInfo, listargs)
    pool.close()
    pool.join()

    logging.warning('[3]>> Crawl End, Success:%s, Fail:%s' % (success_count, fail_count))
    # checkFailLog(session, conn=conn)


# 发现一个问题  翻页到 19990后就无法翻页了
# 分类以字典的形式在程序中 如果后续发现有分类的增加或减少
# 请修改程序
if __name__ == '__main__':
    # pool = Pool(6)  # 指定解析进程数
    while True:
        try:
            pool = Pool(6)
            start(pool)
            print("start over")
            time.sleep(10)
        except:
            print(traceback.format_exc())
            time.sleep(10)
