# -*- coding: utf-8 -*-
"""
@Time    : 2025/2/27 15:02 
@Author  : ZhangShenao 
@File    : data_process.py
@Desc    : 数据收集与预处理
"""

import xml.etree.ElementTree as Et
from typing import List

import jieba

stop_words = ("的", "了", "在", "是", "我", "有", "和", "就")  # 停用词列表,可以根据实际业务情况进行修改


def pre_process(xml_path: str) -> List[List[str]]:
    """
    数据收集与预处理
    :param xml_path: xml格式的语料数据路径
    :return: 预处理后的文本列表
    """

    # 将文本内容解析为XML
    tree = Et.parse(xml_path)
    root = tree.getroot()

    # 获取所有<article>标签的内容
    texts = [record.find('article').text for record in root.findall('RECORD')]
    print(f"原始语料数量: {len(texts)}")

    # 分词和去除停用词
    processed_texts = []
    for text in texts:
        if text is not None:
            words = jieba.cut(text)
            processed_text = [word for word in words if word not in stop_words]
            processed_texts.append(processed_text)

    # 打印预处理后的文本
    # for text in processed_texts:
    #     print(text)

    print(f"数据预处理完成\n预处理后的语料数量: {len(processed_texts)}")
    return processed_texts
