#!/usr/bin/env python 
# -*- coding: utf-8 -*- 
# @Time : 2018-04-21 11:23 
# @Author : Woolei
# @File : Step3_ParseBigJson_废弃.py

import json
import logging
import os
import shutil
import time
from multiprocessing import Pool

from bs4 import BeautifulSoup
from xjlibrary.database_moudel.simple.mysqlclient import MySqlDbConnect, ExeSqlToDB, SelctSqlFromDB
from xjlibrary.our_file_dir import BaseDir

curPath = BaseDir.get_file_dir_absolute(__file__)
TopPath = BaseDir.get_upper_dir(curPath, -2)
dirPath = BaseDir.get_new_path(TopPath, "download", "TandBook", "download", "big_json")

logging.basicConfig(level=logging.INFO,  # 最低输出
                    format='%(asctime)s %(filename)s[line:%(lineno)d] %(levelname)s %(message)s',
                    datefmt='%a, %d %b %Y %H:%M:%S')


# 数据库链接
def MajorDbConnect():
    return MySqlDbConnect(curPath, "db.ini")


# 用于处理键值对的匿名函数
filter_func = lambda key, json_field: json_field[key] if key in json_field and json_field[key] else ''

# 处理字符串的匿名函数
dealtext_func = lambda text: str(text).strip().replace('\n', '').replace("'", "''")

# 处理出版年的匿名函数
dealdate_func = lambda date: str(date).split('-')[0] if date else ''

# 处理出版时间的匿名函数
dealdateC_func = lambda date: str(date).split('T')[0].replace('-', '') if date else ''

# 处理identifier_isbn的匿名函数
dealIdentifier_func = lambda isbn_set: ';'.join(isbn_set) if isbn_set else ''

# 处理不规范的isbn
isbnFilter_func = lambda isbn: isbn if len(str(isbn)) == 13 else 0000000000000

# 处理插入数据库中的引号问题
dealSqlQuote_func = lambda text: text.replace(r"'", r"''") if isinstance(text, str) else ''

# 拼接isbn集合字段
dealIsbnSetFunc = lambda isbn: str(isbn) + '+' if len(str(isbn)) == 13 else '0000000000000+'


# dealSqlQuote_func = lambda text: text if isinstance(text, str) else ''

# 复制上一次更新的DB3文件到新的DB3路径中
def copyPreDB32NewDB3(pre_db3_path, new_db3_out):
    """
    :param pre_db3_path: 先前的DB3文件路径，带具体文件名
    :param new_db3_out: 新的DB3文件路径，不带文件名
    :return: 拷贝失败返回False，拷贝成功返回新DB3路径
    """
    if not os.path.exists(pre_db3_path):
        logging.error('<%s> is not exists!')
        return False
    pre_db3_name = pre_db3_path.split('\\')[-1]
    new_db3_name = pre_db3_name.split('_')[0] + str(getTodayDate()) + '.db3'
    new_db3_path = os.path.join(new_db3_out, new_db3_name)
    logging.info('Copying DB3<%s ==> %s> file...' % (pre_db3_name, new_db3_name))
    shutil.copyfile(pre_db3_path, new_db3_path)
    logging.info('Copying files is done.')
    return new_db3_path


# 获取BigJson文件的所有路径列表
def getBigJsonPath():
    """
    :return: 返回所有big_json的路径列表
    """
    file_path_list = []
    if os.path.exists(dirPath):
        for each_file in os.listdir(dirPath):
            file_path = os.path.join(dirPath, each_file)
            if not os.path.isdir(file_path) and file_path.endswith('.big_json'):
                file_path_list.append(file_path)
    return file_path_list


# 获取当天日期
def getTodayDate():
    return time.strftime('%Y%m%d', time.localtime(time.time()))


# # 根据isbn码保存该书的其他isbn码
# def saveISBNSet2DB(isbn, other_isbn_set):
#     other_isbn_str = ';'.join(other_isbn_set)
#     Sql = "INSERT IGNORE INTO cover(isbn13, otherIsbn) VALUES('%s','%s')" % (isbn, other_isbn_str)
#     conn = MajorDbConnect()
#     ExeSqlToDB(Sql, conn)


# 保存isbn码已经对应的封面url字符串至MySQL数据库
def saveCoverUrl2DB(book_isbn, cover_url_set):
    cover_url_str = '**'.join(cover_url_set)
    Select_Sql = "SELECT isbn13,ava_url FROM cover WHERE isbn13='%s'" % book_isbn
    conn = MajorDbConnect()
    item_result = SelctSqlFromDB(Select_Sql, conn)
    if item_result and not item_result[0][1]:  # 当cover表中存在该isbn，但无可用封面url时
        Update_Sql = "UPDATE cover SET url='%s' WHERE isbn13='%s'" % (cover_url_str, book_isbn)
        conn = MajorDbConnect()
        updatebool, rows = ExeSqlToDB(Update_Sql, conn)
        if updatebool:
            if rows > 0:
                Update_Sql = "UPDATE cover SET failcount=0 WHERE isbn13='%s'" % book_isbn
                conn = MajorDbConnect()
                ExeSqlToDB(Update_Sql, conn)
        logging.info('Update cover.url:%s' % book_isbn)
    elif not item_result:  # cover表中不存在该isbn，则插入新的封面url字段
        Insert_Sql = "INSERT IGNORE INTO cover(isbn13, url) VALUES('%s','%s')" % (book_isbn, cover_url_str)
        conn = MajorDbConnect()
        ExeSqlToDB(Insert_Sql, conn)
        logging.info('Insert cover.url:%s' % book_isbn)


# # 判断DB3中是否已存在该条数据
# def isExistField(db3_conn, pisbn_set, eisbn_set):
#     Sql = "SELECT * FROM modify_title_info_zt WHERE identifier_pisbn like '%{isbn}%' OR identifier_eisbn like '%{isbn}%'"
#     cursor = db3_conn.cursor()
#     all_isbn_set = pisbn_set | eisbn_set
#     # print(all_isbn_set)
#     for each in all_isbn_set:
#         Sql = Sql.format(isbn=each)
#         cursor.execute(Sql)
#         result = cursor.fetchall()
#         # 如果存在该数据字段，返回该字段的rawid
#         if result and result[0][1]:
#             return result[0][1]
#     # 若不存在该字段，说明是新数据，则返回None
#     cursor.close()
#     return None


# 判断数据库中是否已存在该条数据
def isExistField(pisbn_set, eisbn_set):
    """
    :param mysql_conn: db3数据库连接对象
    :param pisbn_set: pisbn集合
    :param eisbn_set: eisbn集合
    :return: rawid 或 None
    """
    # 并集
    all_isbn_set = pisbn_set | eisbn_set
    isbn_values_str = '|'.join(map(dealIsbnSetFunc, all_isbn_set))
    # Sql = "SELECT * FROM modify_title_info_zt WHERE identifier_pisbn REGEXP '{isbn_set}' OR identifier_eisbn REGEXP '{isbn_set}'"
    print("isbn_values_str is : {}".format(isbn_values_str))
    Sql = "SELECT * FROM allfields WHERE identifier_pisbn REGEXP '{isbn_set}' OR identifier_eisbn REGEXP '{isbn_set}'"
    Sql = Sql.format(isbn_set=isbn_values_str)
    print(Sql)
    conn = MajorDbConnect()
    result = SelctSqlFromDB(Sql, conn)
    # 如果存在该数据字段，返回该字段的rawid
    if result and result[0][1]:
        print(result[0][1])
        return result[0][1]
    # 若不存在该字段，说明是新数据，则返回None
    return None


# 更新或插入解析得到的新数据
def updateOrInsertField(book_unique_id, book_eisbn_set, book_pisbn_set, cover_url_set, lngid, book_isbn, title,
                        title_sub, title_edition, creator, publisher, description,
                        provider_subject, page, date, date_created, language, country, provider, provider_url, typeCode,
                        medium, batch):
    # print(batch)
    provider_id = provider + "@" + book_isbn
    identifier_eisbn = dealIdentifier_func(book_eisbn_set)
    identifier_pisbn = dealIdentifier_func(book_pisbn_set)
    if book_unique_id:  # 已存在该字段，进行更新操作
        Sql = "UPDATE allfields SET title='{title}', title_sub='{title_sub}', title_edition='{title_edition}', identifier_pisbn='{identifier_pisbn}', identifier_eisbn='{identifier_eisbn}', creator='{creator}', publisher='{publisher}', description='{description}', provider_subject='{provider_subject}', page='{page}', `date`='{date}',date_created='{date_created}', `language`='{language}', country='{country}', provider='{provider}', provider_url='{provider_url}', `type`='{type}', medium='{medium}' WHERE rawid='{rawid}'"
        Sql = Sql.format(rawid=book_unique_id, title=title, title_sub=title_sub, title_edition=title_edition,
                         identifier_pisbn=identifier_pisbn, identifier_eisbn=identifier_eisbn,
                         creator=creator, publisher=publisher, description=description,
                         provider_subject=provider_subject, page=page, date=date, date_created=date_created,
                         language=language, country=country, provider=provider,
                         provider_url=provider_url, type=typeCode, medium=medium)
        conn = MajorDbConnect()
        resultbool, rows = ExeSqlToDB(Sql, conn)
        # 通过返回的rows判断是否有更新
        if resultbool:
            if rows > 0:
                sql = "update allfields set batch='{batch}'WHERE rawid='{rawid}'"
                sql = sql.format(rawid=book_unique_id, batch=batch)
                conn = MajorDbConnect()
                resultbool, rows = ExeSqlToDB(sql, conn)
                assert resultbool, ValueError("更新失败,请检查，并手动将该句插入数据库 保证数据不会漏")

    else:  # 不存在该字段则插入新的数据
        book_isbn_set = book_eisbn_set | book_pisbn_set
        if book_isbn_set is not None:
            identifier_eisbn = dealIdentifier_func(book_eisbn_set)
            identifier_pisbn = dealIdentifier_func(book_pisbn_set)

            Sql_filed = "INSERT IGNORE INTO allfields(lngid, rawid, title, title_sub, title_edition, identifier_pisbn, identifier_eisbn, creator, publisher, `description`, provider_subject, `page`, `date`,date_created, `language`, country, provider, provider_url,provider_id, type, `medium`, batch)"
            Sql_values = " VALUES('%s', '%s','%s', '%s','%s','%s', '%s','%s', '%s','%s','%s','%s', '%s','%s', '%s','%s', '%s','%s', '%s','%s','%s','%s')" % (
                lngid, book_isbn, title, title_sub, title_edition, identifier_pisbn, identifier_eisbn, creator,
                publisher, description, provider_subject, page, date,
                date_created, language, country, provider, provider_url, provider_id, typeCode, medium, batch)
            Sql = Sql_filed + Sql_values
            conn = MajorDbConnect()
            ExeSqlToDB(Sql, conn)
    # 更新MySQL数据库中的封面url
    saveCoverUrl2DB(book_isbn, cover_url_set)


# 获取isbn和封面img列表(传入为单本书所有类型)
def getISBNandCover(formats_json):
    eisbn_list = []
    pisbn_list = []
    img_cover_list = []
    if formats_json is not None:
        for each in formats_json:
            # 先获取封面img(有很多图片路径)
            if 'coverImages' in each.keys() and each['coverImages']:
                # extend() 函数用于在列表末尾一次性追加另一个序列中的多个值（用新列表扩展原来的列表）。
                img_cover_list.extend(each['coverImages'])
            # 首先取eBook中的isbn
            if 'versionTypeCode' in each.keys() and each['versionTypeCode'] == 'EBK':  # 首选EBOOK中的isbn字段
                if 'isbnPdf' in each and each['isbnPdf']:  # 有eBook的情况下首选isbnPdf
                    eisbn_list.append(each['isbnPdf'].replace('-', ''))
                if 'isbn13' in each and each['isbn13']:  # 无isbnPdf的情况下选isbn13
                    eisbn_list.append(each['isbn13'].replace('-', ''))
                if 'isbnMobi' in each and each['isbnMobi']:  # eBook中最后选isbnMobi
                    eisbn_list.append(each['isbnMobi'].replace('-', ''))
            # 选取其他eBook中的isbn
            if 'versionTypeCode' in each.keys() and each['versionTypeCode'] == 'AEBK':  # 首选EBOOK中的isbn字段
                if 'isbnPdf' in each and each['isbnPdf']:  # 有eBook的情况下首选isbnPdf
                    eisbn_list.append(each['isbnPdf'].replace('-', ''))
                if 'isbn13' in each and each['isbn13']:  # 无isbnPdf的情况下选isbn13
                    eisbn_list.append(each['isbn13'].replace('-', ''))
                if 'isbnMobi' in each and each['isbnMobi']:  # eBook中最后选isbnMobi
                    eisbn_list.append(each['isbnMobi'].replace('-', ''))
            # 获取纸质书籍精装本中的isbn
            if 'versionTypeCode' in each.keys() and (
                    each['versionTypeCode'] == 'HARD' or each['versionTypeCode'] == 'PAPE'):
                if 'isbnPdf' in each and each['isbnPdf']:  # 当无eBook的情况下选择其它字段中的isbnPdf
                    pisbn_list.append(each['isbnPdf'].replace('-', ''))
                if 'isbn13' in each and each['isbn13']:
                    pisbn_list.append(each['isbn13'].replace('-', ''))
                if 'isbnMobi' in each and each['isbnMobi']:
                    pisbn_list.append(each['isbnMobi'].replace('-', ''))
        isbn_list = eisbn_list + pisbn_list  # eBook_isbn在前
        if isbn_list:
            return isbn_list[0], set(img_cover_list), set(eisbn_list), set(pisbn_list)

        else:
            return '', set(), set(), set()
    else:
        return '', set(), set(), set()


# 获取类别关键词
def getCategoriesToSubjects(categories):
    subject_list = []
    if categories:
        for each_sub in categories:
            level = filter_func('level', each_sub)
            if level == 1 and filter_func('text', each_sub):
                subject_list.append(filter_func('text', each_sub))
    subject = ';'.join(subject_list)
    return subject


# 获取作者名
def getAutherName(author_info):
    if isinstance(author_info, list):
        author_list = []
        originators = filter_func('originators', author_info[0])
        if originators and isinstance(originators, list):
            for each_author in originators:
                inner_name = filter_func('name', each_author)
                if inner_name is not None:
                    author_name = filter_func('full', inner_name)  # 可能不存在full这个key
                    if author_name:
                        author_list.append(author_name)
                    else:
                        author_name = filter_func('first', inner_name) + ' ' + filter_func('last',
                                                                                           inner_name)  # 不存在full这个key时使用名字拼接
                        author_list.append(author_name)
            author_set = set(author_list)
            return ';'.join(author_set)
    return ''


# 获取摘要
def getDescription(description_info):
    if isinstance(description_info, str) and description_info:
        soup = BeautifulSoup(description_info, 'html.parser')
        description = soup.get_text()
        return description
    else:
        return ''


# 获取书籍的其他信息
def bookOtherInfo(each_book):
    title = filter_func('title', each_book)  # 标题
    title_sub = filter_func('subtitle', each_book)  # 副标题
    title_edition = filter_func('edition', each_book)  # 出版版本
    page = filter_func('pages', each_book)  # 页码
    date_info = filter_func('firstPublishedOn', each_book)  # 出版年
    if not date_info:
        date_info = filter_func('datePublication', each_book)  # 次要出版年
    if not date_info:
        date_info = '1900-00-00T00:00:00.000Z'  # 当两者出版年份都不存在时取默认值
    date = dealdate_func(date_info)
    date_created = dealdateC_func(date_info)
    publisher = filter_func('imprint', each_book)  # 出版社
    categories_info = filter_func('categories', each_book)
    # subject = getCategoriesToSubjects(categories=categories_info)  # 关键词
    provider_subject = getCategoriesToSubjects(categories=categories_info)  # 所属类别
    author_info = filter_func('originators', each_book)
    creator = getAutherName(author_info=author_info)  # 作者
    description_info = filter_func('description', each_book)
    description = getDescription(description_info)  # 摘要

    language = 'EN'
    country = 'UK'
    return dealtext_func(title), dealtext_func(title_sub), dealtext_func(title_edition), dealtext_func(
        page), dealtext_func(date), dealtext_func(date_created), dealtext_func(
        publisher), dealtext_func(provider_subject), dealtext_func(creator), dealtext_func(description), dealtext_func(
        language), dealtext_func(country)


# 设置公司规定的一些信息
def setCompanyInfo(isbn):
    lngid = 'TANDF_TS_' + isbn
    provider = 'tandfbook'
    provider_url = provider + '@https://www.taylorfrancis.com/books/' + isbn
    typeCode = 1  # 图书
    medium = 2  # 数字
    batch = getTodayDate() + '00'  # 设置批次
    # print(lngid, provider, provider_url, type, medium, batch)
    return lngid, provider, provider_url, typeCode, medium, batch


# 解析单行的数据
def parseOneLine(line):
    line_json = json.loads(line, encoding='utf-8')
    # 这里返回了10条数据
    for each_book in line_json['data']['resultSet']:
        # book_isbn 是一个 不是一个set
        book_isbn, cover_url_set, book_eisbn_set, book_pisbn_set = getISBNandCover(each_book['formats'])
        # 这里的isbn很多
        book_isbn_set = book_eisbn_set | book_pisbn_set  # 书籍总集合为ebook+纸质书并集
        if book_isbn_set is not None:
            title, title_sub, title_edition, page, date, date_created, publisher, provider_subject, creator, description, language, country = bookOtherInfo(
                each_book)
            lngid, provider, provider_url, typeCode, medium, batch = setCompanyInfo(book_isbn)
            yield book_eisbn_set, book_pisbn_set, cover_url_set, lngid, book_isbn, title, title_sub, title_edition, creator, publisher, description, provider_subject, page, date, date_created, language, country, provider, provider_url, typeCode, medium, batch


def runOneBigJson(big_json_path):
    count = 0
    file = open(big_json_path, encoding='utf-8')
    while True:
        count += 1
        line = file.readline()
        if not line:
            break
        for each_book_info in parseOneLine(line):
            # 解析获取到的每本书的isbn集合等信息
            book_eisbn_set, book_pisbn_set, cover_url_set, lngid, book_isbn, title, title_sub, title_edition, creator, publisher, description, provider_subject, page, date, date_created, language, country, provider, provider_url, typeCode, medium, batch = each_book_info
            # 如果isbn为空，则集合肯定为空
            if book_isbn:  # 存在isbn时(部分书籍不存在isbn)
                book_unique_id = isExistField(book_pisbn_set, book_eisbn_set)
                # 通过改变行判断是否有更新
                updateOrInsertField(book_unique_id, book_eisbn_set, book_pisbn_set, cover_url_set, lngid, book_isbn,
                                    title, title_sub, title_edition, creator,
                                    publisher, description, provider_subject, page, date, date_created, language,
                                    country, provider, provider_url, typeCode, medium, batch)
            log_str = 'Current Subject: %s, Item Success line: %s,id: %s' % (
                big_json_path.split('.')[0], count, lngid)
            logging.info(log_str)
    return None


def start():
    """
    修改为多进程
    :return:
    """
    big_json_list = getBigJsonPath()
    # ---------------
    pool = Pool(6)  # 指定解析进程数
    pool.map(runOneBigJson, big_json_list)


if __name__ == '__main__':
    start()
