import json
import os

from bs4 import BeautifulSoup
from elasticsearch import Elasticsearch
from elasticsearch import helpers

es = Elasticsearch('127.0.0.1:9200')
index_name = 'mysql-manual'


# 对html文件解析，提取出文章标题和正文内容，然后插入至es中
def parse_html_and_insert():
    html_obj_list = []
    for parent, dirnames, filenames in os.walk("./mysql-manual/", followlinks=True):
        for (index, filename) in enumerate(filenames):
            if filename.endswith('.html'):
                file_path = os.path.join(parent, filename)
                # print('文件名：%s' % filename)
                # print('文件完整路径：%s\n' % file_path)
                with open(file_path, 'rt', encoding='utf-8') as f:
                    data = f.read()
                    soup = BeautifulSoup(data)
                    main_text = soup.select('#docs-body')[0].text
                    title = soup.select('.title')[0].text
                    html_obj = {
                        'id': index,
                        'title': title,
                        'filename': filename,
                        'content': main_text
                    }
                    insert_result = es.index(index=index_name, body=html_obj)
                    print(index, filename, insert_result)
                    html_obj_list.append(html_obj)
    json_str = json.dumps(html_obj_list, ensure_ascii=False)
    # print(json_str)
    with open('html-content.json', 'wt') as f:
        f.write(json_str)


# 从已经处理好的json文件中读取，然后批量插入至es中
def insert_json_file():
    with open('html-content.json', 'rt', encoding='utf-8') as f:
        data = f.read()
        html_obj_list = json.loads(data)
        actions = []
        for item in html_obj_list:
            action = {
                "_index": index_name,
                "_type": "_doc",
                "_source": item
            }
            actions.append(action)
        helpers.bulk(es, actions)


parse_html_and_insert()
