file_dir_name = '/data/datasets/customs/Confluence/GITV/*.html'
import os
import glob
from bs4 import BeautifulSoup
import json
import re


def get_singel_data(file_path):
    # 读取HTML文件内容
    with open(file_path, 'r', encoding='utf-8') as file:
        html_content = file.read()

    # 使用BeautifulSoup解析HTML内容
    soup = BeautifulSoup(html_content, 'html')

    instruction = ""
    input = ""
    output = ""

    # 示例：解析所有的<p>标签
    paragraphs = soup.find_all('title')
    for i, paragraph in enumerate(paragraphs, start=1):
        instruction += paragraph.get_text().replace("yunwei : ", "")

    # 示例：解析具有特定class的<div>标签
    specific_divs = soup.find_all('p')
    for i, div in enumerate(specific_divs, start=1):
        output += "\n" + div.get_text()
    #
    # # 示例：解析具有特定id的<span>标签
    # specific_span = soup.find('span', id='specific-id')
    # if specific_span:
    #     print(f"Specific Span: {specific_span.get_text()}")
    # else:
    #     print("Specific Span not found.")

    # print(instruction)
    # print(input)
    # print(output)

    instruction = re.sub(r' ', '', instruction)
    input = re.sub(r' ', '', input)
    output = re.sub(r' ', '', output)
    instruction = re.sub(r'\n+', '\n', instruction)
    input = re.sub(r'\n+', '\n', input)
    output = re.sub(r'\n+', '\n', output)

    print(len(instruction)+len(output))
    return {
        "instruction": instruction,
        "input": input,
        "output": output
    }


# file_path = os.path.join(file_dir_name, "1572869.html")

all_list = []

for i in glob.glob(file_dir_name):
    if "周报" not in get_singel_data(i)["instruction"] or "周报" not in get_singel_data(i)["output"] :
        all_list.append(get_singel_data(i))

with open('wiki_data.json', 'w', encoding='utf-8') as file:
    json.dump(all_list, file, ensure_ascii=False, indent=4)
