
import os
import zipfile

from bs4 import BeautifulSoup
from lxml import etree
import re

RECOVER_PARSER = etree.XMLParser(recover=True, no_network=True)
NAMESPACES = {
    'dc': 'http://purl.org/dc/elements/1.1/',
}
pattern = re.compile('[\n\r]')


class Book(object):

    def __init__(self, book_name=None):
        if book_name:
            self.open(book_name)

    def fromstring(self, raw, parser=RECOVER_PARSER):
        return etree.fromstring(raw, parser=parser)

    def read_doc_props(self, raw):
        u"""

        :param raw: raw string of xml
        :return:
        """
        root = self.fromstring(raw)
        self.title = root.xpath(
            '//dc:title', namespaces={'dc': NAMESPACES['dc']})[0].text
        self.author = root.xpath(
            '//dc:creator', namespaces={'dc': NAMESPACES['dc']})[0].text

    def open(self, book_name=None):
        if book_name:
            self.book_name = book_name
        if not self.book_name:
            raise Exception('Book id not set')

        self.f = zipfile.ZipFile(self.book_name, 'r')
        content = self.f.read('META-INF/container.xml')
        soup = BeautifulSoup(content, "xml")

        oebps = soup.findAll('rootfile')[0]['full-path']
        print("ops filename path:" + str(oebps))
        folder = oebps.rfind(os.sep)
        self.oebps_folder = '' if folder == - \
            1 else oebps[:folder+1]   # 找到oebps的文件夹名称

        oebps_content = self.f.read(oebps)
        self.read_doc_props(oebps_content)

        opf_bs = BeautifulSoup(oebps_content, "xml")
        ncx = opf_bs.findAll('item', {'id': 'ncx'})[0]
        ncx = self.oebps_folder + ncx['href']     # 找到ncx的完整路径

        ncx_bs = BeautifulSoup(self.f.read(ncx), "xml")

        self.chapters = [(nav.navLabel.text, nav.content['src']) for
                         nav in ncx_bs.findAll('navMap')[0].findAll('navPoint')]
        content = self.f.read(
            self.oebps_folder+self.chapters[0][1].split('#')[0])
        content = BeautifulSoup(content, "xml").get_text(' ', True)
        self.content = pattern.sub('', content)
        self.img = self.f.read(self.oebps_folder+'Image00003.jpg')
        self.f.close()


if __name__ == '__main__':
    import pymongo
    import json
    import datetime
    from gridfs import *
    from elasticsearch import Elasticsearch
    from bson import ObjectId
    book = Book('pass/Vue.js项目实战7667382682782067598.epub')
    print(book.oebps_folder)
    print(book.title)
    print(book.author)
    print(book.chapters)

    # mongodb
    # 连接
    client = pymongo.MongoClient('host.docker.internal:27017')
    client.admin.authenticate('root', '^&*yt345', mechanism='SCRAM-SHA-1')
    dblist = client.list_database_names()
    if "testdb" in dblist:
        print("数据库已存在！")
    else:
        print('数据库不存在')
    client.testdb.books.drop()
    client.testdb.epub.drop()
    # 文件存储
    fs = GridFS(client.testdb, 'epub')
    fs.put(book.img, filename=book.title+'.epub')
    # 文档存储
    data = {
        'title': book.title,
        'author': book.author,
        'cover': 'http://files.epubee.com/getCover.ashx?fpath=ae/aed29f9c55896ab32b914051f4d97ead_s.jpg',
        'filename': str(int(datetime.datetime.now().timestamp()))+book.title,
    }
    re = client.testdb.books.insert_one(data)
    id = str(data['_id'])

    # elesticsearch
    # 连接
    es = Elasticsearch(
        ['host.docker.internal:9200'],
        http_auth=('elastic', 'changeme')
    )
    es.indices.delete('testindex', ignore=[400, 404])
    # 建立索引
    result = es.indices.create(index='testindex', ignore=400)
    print(result)
    # 建立映射
    data = {
        'properties': {
            'title': {
                'type':'text',
                'analyzer': 'ik_max_word',
                'search_analyzer': 'ik_max_word'
            },
            'content': {
                'type':'text',
                'analyzer': 'ik_max_word',
                'search_analyzer': 'ik_max_word'
            },
            'id': {
                'type':'text',
                'index': False
            }
        }
    }
    result = es.indices.put_mapping(index='testindex', body=data, ignore=400)
    print(result)
    # 插入数据
    data = {
        'title': book.title,
        'content': book.content,
        'id': id,
    }
    result = es.index(index='testindex',body=data)
    print(result)

    # 查询
    dsl = {
        'query': {
            'match': {
                'content': '图灵'
            }
        }
    }
    result = es.search(index='testindex', body=dsl, filter_path=[
        'hits.hits._source.title', 'hits.hits._source.id'])
    print(json.dumps(result, indent=2, ensure_ascii=False))

    client.close()
    es.close()
    # client.testdb.books.drop()
    # es.indices.delete('testindex', ignore=[400, 404])
