import pandas as pd
import numpy as np
import re
from jieba import cut_for_search

# 读取进行数据预处理后的HTML文档全部信息
data = pd.read_csv("../data/data_with_otherinfos.csv")

# 禁用标点符号以及一些特殊字符
punctuations = '＂＃＄/■★─◎◆●▲％＆＇（）)(-①②③④⑤⑥＊＋，－／：；＜＝＞＠［＼］＾＿｀｛｜｝～｟｠｢｣､\u3000、〃〈〉《》「」『』【】〔〕〖〗〘〙〚〛〜〝〞〟〰〾〿–—‘’‛“”„‟…‧﹏﹑﹔·！？｡。'

def segment_html(data=data):
    # 用于存储分词后的结果
    segmented_results = []
    # 一次处理一行，对标题、描述、正文进行分词
    total_docs = len(data)
    for index in range(total_docs):
        doc_info = data.iloc[index]
        print(f"Processing document {index + 1} of {total_docs}...")
        
        # 对标题进行分词
        title_segmentation = cut_for_search(doc_info.title)
        title = ' '.join([word for word in title_segmentation if word not in punctuations and word != ' '])

        # 对描述进行分词
        description = str(doc_info.description)
        if description:
            description_segmentation = cut_for_search(description)
            description = ' '.join([word for word in description_segmentation if word not in punctuations and word != ' '])

        # 对内容进行分词
        content = str(doc_info.content)
        if content:
            content_segmentation = cut_for_search(content)
            content = ' '.join([word for word in content_segmentation if word not in punctuations and word != ' '])

        # 构建新的行数据
        segmented_row = [doc_info.url, title, description, doc_info.date_timestamp, content, doc_info.editor]
        segmented_results.append(segmented_row)

    return segmented_results

# 分词处理HTML文档并获取分词后的结果
segmented_results = segment_html()
segmented_df = pd.DataFrame(segmented_results, columns=data.columns)

# 填充空值
segmented_df = segmented_df.fillna("")

# 保存处理后的数据到CSV文件
segmented_df.to_csv("../data/segmented.csv", index=False)
segmented_df