import re
from bs4 import BeautifulSoup
import jieba
import sys
import os
import logging

logger = logging.getLogger(__name__)
current_dir = os.path.dirname(os.path.abspath(__file__))
jieba.load_userdict(os.path.join(current_dir, "jieba_dict.txt"))


def remove_special_chars(text):
    # 保留中文字符、数字、英文字母和空格
    return re.sub(r"[^\u4e00-\u9fa5a-zA-Z0-9\s]", "", text)


def remove_extra_spaces(text):
    return " ".join(text.split())


stop_words = set(
    [
        "的",
        "了",
        "在",
        "是",
        "我",
        "有",
        "和",
        "就",
        "不",
        "人",
        "都",
        "一",
        "一个",
        "上",
        "也",
        "很",
        "到",
        "说",
        "要",
        "去",
        "你",
        "会",
        "着",
        "没有",
        "看",
        "好",
        "自己",
        "这",
        "在",
        "期间",
        "后",
        "没有",
        "任何",
        "情况下",
        "手机",
        "系统",
        "侧",
        "工具",
        "统计",
        "到",
        "一直",
        "上涨",
        "中",
        "了",
        "近",
        "获取",
        "信息",
        "API",
        "URL",
        "Method",
        "请求",
        "参数",
        "类型",
        "必填",
        "描述",
        "响应",
        "示例",
        "错误",
        "使用",
        "通过",
        "可以",
        "轻松",
        "分析",
        "数据",
        "为",
        "业务",
        "决策",
        "提供",
        "有力",
        "支持",
        "想",
        "知道",
        "内",
        "开发者",
        "逗号",
        "句号",
        "GET",
        "YYYYMMDD",
        "例如",
        "分钟",
    ]
)


def segment_text_and_remove_stop_words(text):
    word_tokens = jieba.lcut(text)
    logger.debug(f"word-tokens:\n{word_tokens}")
    filtered_text = [word for word in word_tokens if word not in stop_words]
    logger.debug(f"filtered:\n{filtered_text}")
    return " ".join(filtered_text)


def replace_numbers(text):
    return re.sub(r"\d+", "[NUM]", text)


def remove_urls_and_emails(text):
    text = re.sub(r"http\S+", "[URL]", text)
    text = re.sub(r"\S+@\S+", "[EMAIL]", text)
    return text


def remove_html_tags(text):
    soup = BeautifulSoup(text, "html.parser")
    return soup.get_text()


def remove_pinyin(text):
    # 去除拼音（假设拼音在括号内）
    return re.sub(r"\([^)]*\)", "", text)


def clean_text(text):
    text = remove_special_chars(text)
    text = remove_extra_spaces(text)
    text = segment_text_and_remove_stop_words(text)
    text = replace_numbers(text)
    text = remove_urls_and_emails(text)
    text = remove_pinyin(text)
    return text


def main():
    if len(sys.argv) != 2:
        logger.debug("Usage: python clean_text.py <filename>")
        return
    filename = sys.argv[1]
    with open(filename, "r", encoding="utf-8") as file:
        text = file.read()
    cleaned_text = clean_text(text)
    logger.debug(cleaned_text)


if __name__ == "__main__":
    main()
