import re
import logging
import os
from pathlib import Path
from collections import defaultdict

# -------------------------- 1. 日志配置（记录清洗过程与统计） --------------------------
logging.basicConfig(
    level=logging.INFO,
    format="%(asctime)s.%(msecs)03d - %(levelname)s: %(message)s",
    datefmt="%Y-%m-%d %H:%M:%S"
)


# -------------------------- 2. 动态路径解析（非硬编码，适配项目结构） --------------------------
def get_knowledge_paths():
    """
    动态获取 Knowledge_Base.txt 输入路径与清洗后输出路径：
    - 输入路径：项目根目录/data/Knowledge_Base.txt
    - 输出路径：项目根目录/data/cleaned_Knowledge_Base.txt
    """
    # 1. 获取当前脚本所在目录（src）
    src_dir = Path(__file__).parent.resolve()
    # 2. 获取项目根目录（src的父目录）
    project_root = src_dir.parent.resolve()
    # 3. 拼接data目录路径
    data_dir = project_root / "data"
    # 4. 确保data目录存在（避免目录不存在报错）
    if not data_dir.exists():
        os.makedirs(data_dir)
        logging.warning(f"data目录不存在，已自动创建：{data_dir}")

    # 5. 拼接输入/输出路径
    input_path = data_dir / "Knowledge_Base.txt"
    output_path = data_dir / "cleaned_Knowledge_Base.txt"

    # 6. 校验输入文件是否存在
    if not input_path.exists():
        raise FileNotFoundError(f"Knowledge_Base.txt 未找到！路径：{input_path}")

    return input_path, output_path


# -------------------------- 3. 通信领域通用规则（非硬编码，适配所有数据） --------------------------
class TelecomCleanRules:
    """通信领域清洗规则类：封装术语统一、格式修正、冗余剔除的通用逻辑"""

    def __init__(self):
        # 3.1 术语统一规则（键：待统一的变体，值：标准术语）
        self.term_unify_map = self._build_term_unify_map()
        # 3.2 全角→半角映射
        self.full2half_map = {
            "。": ".", "，": ",", "；": ";", "：": ":", "／": "/",
            "（": "(", "）": ")", "－": "-", "％": "%", "１": "1", "２": "2",
            "３": "3", "４": "4", "５": "5", "６": "6", "７": "7", "８": "8", "９": "9", "０": "0"
        }
        # 3.3 通信核心关键词（用于判断有效文本，避免误删）
        self.core_keywords = [
            # 设备厂商
            "华为", "中兴", "诺基亚", "爱立信", "三星", "大唐",
            # 设备模块
            "UBBP", "VBP", "RRU", "BBU", "AAU", "CPRI", "SFP",
            # 技术标准
            "5G", "NR", "4G", "LTE", "FDD", "TDD", "GSM", "CDMA", "WCDMA",
            # 性能指标
            "掉话率", "切换成功率", "接入成功率", "覆盖率", "RSRP", "RSRQ", "SINR", "CQI",
            # 规格参数
            "信道带宽", "噪声系数", "解调门限", "发射功率", "接收灵敏度", "天线增益",
            # 单位
            "dBm", "dB", "MHz", "GHz", "km", "mm", "m", "W", "Mbps", "Gbps"
        ]
        # 3.4 无效文本判定正则（纯空白、纯符号）
        self.invalid_pattern = re.compile(r'^[\s\.\,\;\:\*\/\-\(\)\=\%]*$')

    def _build_term_unify_map(self):
        """动态构建术语统一映射（避免硬编码，覆盖所有变体）"""
        term_map = defaultdict(str)
        # 3.4.1 技术术语标准化（大小写统一+格式统一）
        tech_terms = [("5g", "5G"), ("nr", "NR"), ("4g", "4G"), ("lte", "LTE"), ("fdd", "FDD"), ("tdd", "TDD")]
        for variant, standard in tech_terms:
            term_map[variant] = standard
            term_map[variant.upper()] = standard  # 覆盖大写变体（如“LTE”已标准，无需修改）

        # 3.4.2 设备模块标准化（如“UBBP-D9”→“UBBPd9”“VBP-D3”→“VBPd3”）
        module_pattern = re.compile(r'([A-Z]+)[\-_]([a-zA-Z0-9]+)')
        # 动态匹配所有“字母-数字/字母”格式，统一为“字母+小写字母+数字”（如UBBP-D9→UBBPd9）
        term_map["UBBP-D9"] = "UBBPd9"
        term_map["UBBP_D9"] = "UBBPd9"
        term_map["VBP-D3"] = "VBPd3"
        term_map["VBP_D3"] = "VBPd3"

        # 3.4.3 单位标准化（如“DBm”→“dBm”“db”→“dB”）
        unit_terms = [("DBm", "dBm"), ("Dbm", "dBm"), ("db", "dB"), ("DB", "dB"), ("MHZ", "MHz"), ("GHZ", "GHz")]
        for variant, standard in unit_terms:
            term_map[variant] = standard

        return term_map

    def unify_terms(self, text):
        """统一术语：替换文本中的变体为标准术语"""
        for variant, standard in self.term_unify_map.items():
            if variant in text:
                text = text.replace(variant, standard)
        # 额外处理天线规格（如“1T 1R”→“1T1R”）
        text = re.sub(r'(\d+[TR])\s+(\d+[TR])', r'\1\2', text)
        return text

    def fix_format(self, text):
        """修正格式：全角转半角+小数修复+分隔符补空格"""
        # 3.4.1 全角→半角
        for full, half in self.full2half_map.items():
            text = text.replace(full, half)
        # 3.4.2 修复小数格式（如“19. 92”→“19.92”“5. 2”→“5.2”）
        text = re.sub(r'(\d+)\. (\d+)', r'\1.\2', text)
        # 3.4.3 分隔符后补空格（避免粘连）
        text = re.sub(r'\.(?=[a-zA-Z0-9\u4e00-\u9fa5])', '. ', text)  # 句号后补空格
        text = re.sub(r'([;,])(?=[a-zA-Z0-9\u4e00-\u9fa5])', r'\1 ', text)  # 分号/逗号后补空格
        text = re.sub(r'\:(?=[a-zA-Z0-9])', r': ', text)  # 冒号后补空格
        # 3.4.4 压缩多余空格
        text = re.sub(r'\s+', ' ', text).strip()
        return text

    def remove_redundancy(self, text):
        """剔除冗余：过滤乱码+无效文本"""
        # 3.4.1 过滤纯空白/纯符号行
        if self.invalid_pattern.match(text):
            return ""

        # 3.4.2 过滤乱码（连续6个中文字符且无核心关键词）
        def filter_messy(s):
            result = []
            current_chinese = ""
            for char in s:
                if '\u4e00' <= char <= '\u9fa5':
                    current_chinese += char
                else:
                    # 处理连续中文字符
                    if current_chinese:
                        if any(kw in current_chinese for kw in self.core_keywords) or len(current_chinese) < 6:
                            result.append(current_chinese)
                        current_chinese = ""
                    result.append(char)
            # 处理末尾中文字符
            if current_chinese:
                if any(kw in current_chinese for kw in self.core_keywords) or len(current_chinese) < 6:
                    result.append(current_chinese)
            return ''.join(result)

        cleaned = filter_messy(text)
        # 再次过滤处理后的无效文本
        return cleaned if not self.invalid_pattern.match(cleaned) else ""


# -------------------------- 4. 核心清洗流程 --------------------------
def clean_knowledge_base():
    """
    完整清洗流程：
    1. 动态获取文件路径；2. 加载清洗规则；3. 读取原始数据；4. 逐行清洗；5. 去重；6. 保存结果
    """
    # 4.1 动态获取路径
    input_path, output_path = get_knowledge_paths()
    logging.info(f"开始清洗知识库：{input_path}")

    # 4.2 初始化清洗规则
    clean_rules = TelecomCleanRules()

    # 4.3 读取原始数据并清洗（去重+逐行处理）
    cleaned_lines = []
    seen_lines = set()  # 用于去重
    total_count = 0
    valid_count = 0
    duplicate_count = 0
    invalid_count = 0

    with open(input_path, "r", encoding="utf-8", errors="ignore") as f_in:
        for line_num, raw_line in enumerate(f_in, 1):
            total_count = line_num
            raw_line_stripped = raw_line.strip()

            # 4.3.1 跳过空行
            if not raw_line_stripped:
                invalid_count += 1
                continue

            # 4.3.2 去重（避免重复文本）
            if raw_line_stripped in seen_lines:
                duplicate_count += 1
                continue
            seen_lines.add(raw_line_stripped)

            # 4.3.3 执行清洗流程（术语统一→格式修正→冗余剔除）
            step1 = clean_rules.unify_terms(raw_line_stripped)  # 术语统一
            step2 = clean_rules.fix_format(step1)  # 格式修正
            step3 = clean_rules.remove_redundancy(step2)  # 冗余剔除

            # 4.3.4 保留有效文本
            if step3:
                cleaned_lines.append(step3)
                valid_count += 1
            else:
                invalid_count += 1

            # 4.3.5 打印进度（每1000行）
            if line_num % 1000 == 0:
                logging.info(
                    f"进度：第{line_num}行，有效文本{valid_count}条，重复{duplicate_count}条，无效{invalid_count}条")

    # 4.4 保存清洗结果
    with open(output_path, "w", encoding="utf-8") as f_out:
        f_out.write("\n".join(cleaned_lines))

    # 4.5 打印最终统计
    logging.info(f"\n清洗完成！结果保存至：{output_path}")
    logging.info(f"总处理行数：{total_count}")
    logging.info(f"有效文本数：{valid_count}（占比{valid_count / total_count * 100:.2f}%）")
    logging.info(f"重复文本数：{duplicate_count}（占比{duplicate_count / total_count * 100:.2f}%）")
    logging.info(f"无效文本数：{invalid_count}（占比{invalid_count / total_count * 100:.2f}%）")


# -------------------------- 5. 执行清洗 --------------------------
if __name__ == "__main__":
    try:
        clean_knowledge_base()
    except Exception as e:
        logging.error(f"清洗过程异常：{str(e)}", exc_info=True)
