#!/usr/bin/env python
# coding: utf-8
import logging, sys, getopt, mistune, re, collections, hashlib, os, meilisearch
from bs4 import BeautifulSoup

LOG_FORMAT = "%(asctime)s - %(levelname)s - %(message)s"
logging.basicConfig(format=LOG_FORMAT, level=logging.INFO)

# 将一个字典的元素添加到另一个字典中
def copyItem(src, dest, key):
    if src[key] != None:
        dest[key] = src[key]

# 是否是头标签
def isHeader(tag):
    return re.match(r'^h\d$', tag) != None

# 是否是路由标签
def isRoute(tag):
    return 'route' == tag.lower()

# 将列表字符串串转成列表
def strToList(s):
    return list(filter(lambda s: len(s.strip()) > 0, re.split(r'''\s*\[\s*['"]\s*|\s*['"]\s*,\s*['"]\s*|\s*['"]\s*\]\s*''', s)))

# 解析文档内容
def parseContent(content):
    result = []
    hContent = mistune.html(content)
    soup = BeautifulSoup(hContent, "lxml")
    for tag in soup.body.contents:
        name = str(tag.name)
        if isHeader(name):
            # 如果是头标签，主要存标签文本
            result.append({'type': name, 'text': str(tag.string)})
        elif isRoute(name):
            # 如果是路由标签，需要解析标签属性
            data = {'type': 'route'}
            attrs = collections.defaultdict(lambda : None, tag.attrs)
            copyItem(attrs, data, 'path')
            copyItem(attrs, data, 'example')
            if attrs[':paramsdesc'] != None and attrs[':paramsdesc'].strip() != '':
                data['params'] = strToList(attrs[':paramsdesc'])
            for kx in list(tag.attrs.keys()):
                del tag[kx]
            tag.name = 'div'
            data['text'] = str(tag)
            result.append(data)
    return result

# 根据路径获取其host(根路径)
def getHost(path):
    i = path.find('/', 1)
    if i >= 0:
        return path[1:i]
    else:
        return path[1:]

# sha256算法
sha256 = hashlib.sha256()

# 处理数据内容，以将其转化为可供搜索的文档
def handleDatas(datas):
    result = []
    buf = [] # 栈
    for data in datas:
        t = data['type']
        if t == 'route':
            sha256.update(data['path'].encode('utf8'))
            data['id'] = sha256.hexdigest() # 使用path的hash值作为id
            data['host'] = getHost(data['path'])
            data['titles'] = [d['text'] for d in buf]
            del(data['type'])
            result.append(data)
        else:
            # 使用栈来辅助解析树
            # 深度优先遍历
            while len(buf) > 0 and buf[-1]['type'] >= t:
                buf.pop()
            buf.append(data)
    return result

l1Re = r'^---\s*$'
l2Re = r'^\s*pageClass:\s*routes\s*$'
l3Re = r'^---\s*$'
# 是否匹配正则表达式
def matchRe(pattern, s):
    return re.match(pattern, s) != None

# 解析markdown
def parseMd(mdFile):
    with open(mdFile, 'r', encoding='utf8') as f:
        # 先检测文档类型是否为 routes, 如果是才是需要解析的文档
        l1 = f.readline()
        l2 = f.readline()
        l3 = f.readline()
        if matchRe(l1Re, l1) and matchRe(l2Re, l2) and matchRe(l3Re, l3):
            content = f.read()
            # 解析 md 内容为数据列表
            datas = parseContent(content)
            # 将数据列表转为目标格式
            return handleDatas(datas)
        else:
            return None

# 初始化文档仓库
def initStore(client, indexId):
    index = client.index(indexId)
    try:
        index.delete()
        logging.info('已删除旧的文档库')
    except:
        pass
    client.create_index(indexId, {'primaryKey': 'id'})
    index.update_searchable_attributes(['host', 'titles'])
    index.update_displayed_attributes(["path", 'example', 'titles', 'params', 'text'])
    return index

# 初始化标签仓库
def initTagsStore(client, indexId):
    index = client.index(indexId)
    try:
        index.delete()
        logging.info('已删除旧的标签库')
    except:
        pass
    client.create_index(indexId, {'primaryKey': 'id'})
    index.update_searchable_attributes([])
    return index

def getUpdateStatus(index, updateId):
    status = index.get_update_status(updateId)
    if status['status'] == 'processed' or status['status'] == 'enqueued':
        return {'ok': True, 'number': status['type']['number']}
    else:
        return {'ok': False, 'error': status['error']}
def usage():
    print('''Usage: doc-store.py [OPTIONS]
Options:
    -d 文档根目录, 默认: docs
    -s MeiliSearch URL, 默认: http://localhost:7700
    -p MeiliSearch 密码, 默认: None(无密码)
''')

if __name__ == '__main__':
    dir = 'docs'
    url = 'http://localhost:7700'
    pwd = None
    try:
        opts, _ = getopt.getopt(sys.argv[1:], 'hd:s:p:')
    except getopt.GetoptError:
        usage()
        sys.exit(2)
    for opt, arg in opts:
        if opt == '-h':
            usage()
            sys.exit()
        elif opt == '-d':
            dir = arg
        elif opt == '-s':
            url = arg
        elif opt == '-p':
            pwd = arg
    
    client = meilisearch.Client(url, pwd)
    index = initStore(client, 'rss')
    tagIndex = initTagsStore(client, 'tags')
    buf = set()
    tags = []
    for f in filter(lambda f: f.lower().endswith('.md'), os.listdir(dir)):
        logging.info('正在处理文件: ' + f)
        docs = parseMd(os.path.join(dir, f))
        if docs != None and len(docs) > 0:
            # 获取和文档的一级标签(去重)以存到标签库
            for doc in docs:
                tag = doc['titles'][0]
                if tag not in buf:
                    tags.append(tag)
                    buf.add(tag)
            update = index.add_documents(docs)
            updateStatus = getUpdateStatus(index, update['updateId'])
            if updateStatus['ok']:
                logging.info('已添加 {} 条文档'.format(updateStatus['number']))
            else:
                logging.warn('添加文档失败: ' + updateStatus['error'])
    if len(tags) > 0:
        logging.info('正在存储标签')
        update = tagIndex.add_documents([{'id': 1, 'items': tags}])
        updateStatus = getUpdateStatus(tagIndex, update['updateId'])
        if not updateStatus['ok']:
            logging.error('添加标签失败: ' + updateStatus['error'])
    