import os
import re
import nltk
from docx import Document
from nltk.corpus import stopwords
from nltk.tokenize import word_tokenize
import logging

# 配置日志
logging.basicConfig(level=logging.INFO, format='%(asctime)s - %(levelname)s - %(message)s')


# 下载NLTK数据包
def download_nltk_data():
    try:
        nltk.data.find('tokenizers/punkt')
        nltk.data.find('corpora/stopwords')
    except LookupError:
        nltk.download('punkt')
        nltk.download('stopwords')


download_nltk_data()


def read_docx(docx_path):
    """读取Word文档内容"""
    try:
        document = Document(docx_path)
        full_text = [para.text for para in document.paragraphs]
        return '\n'.join(full_text)
    except Exception as e:
        logging.error(f"Error reading DOCX file: {e}")
        return ""


def preprocess_text(text):
    """预处理文本"""
    # 转换为小写
    text = text.lower()

    # 去除标点符号和特殊字符（保留汉字、字母、数字和空格）
    text = re.sub(r'[^\u4e00-\u9fa5a-z0-9\s]', '', text)

    # 分词
    words = word_tokenize(text)

    # 移除停用词（如果需要处理中文停用词，可以使用中文停用词表）
    stop_words = set(stopwords.words('english'))  # 默认使用英文停用词
    filtered_words = [word for word in words if word not in stop_words]

    return ' '.join(filtered_words)


def convert_and_preprocess(docx_path, txt_path):
    """将Word文档转换为预处理后的TXT文件"""
    try:
        logging.info(f"Reading DOCX file: {docx_path}")
        text = read_docx(docx_path)

        logging.info("Preprocessing text...")
        processed_text = preprocess_text(text)

        logging.info(f"Writing to TXT file: {txt_path}")
        with open(txt_path, 'w', encoding='utf-8') as file:
            file.write(processed_text)
        logging.info("Conversion and preprocessing completed successfully.")
    except Exception as e:
        logging.error(f"An error occurred: {e}")


if __name__ == "__main__":
    '''
    import argparse

    parser = argparse.ArgumentParser(description="Convert DOCX to preprocessed TXT.")
    parser.add_argument('input_file', type=str, help="Path to the input DOCX file.")
    parser.add_argument('output_file', type=str, help="Path to the output TXT file.")
    args = parser.parse_args()

    convert_and_preprocess(args.input_file, args.output_file)
    '''
    docx_file_path = 'test.docx'
    txt_output_path = 'output_preprocessed.txt'
    convert_and_preprocess(docx_file_path, txt_output_path)
