import pandas as pd
import re
from typing import List, Tuple, Dict
import logging
import os
import time

# 配置日志
logging.basicConfig(level=logging.INFO, format='%(asctime)s - %(levelname)s - %(message)s')
logger = logging.getLogger(__name__)


class HardExampleMiner:
    """难例挖掘器：识别高价值训练样本"""

    def __init__(self, config_path: str = None):
        """
        初始化难例挖掘器
        """
        self.config = self._load_config(config_path)
        self.data_dir = self._get_data_dir()
        # 通信领域高频易混术语对
        self.confusing_pairs = self._load_confusing_pairs()
        logger.info(f"加载了 {len(self.confusing_pairs)} 个易混术语对")

    def _get_data_dir(self) -> str:
        """动态获取数据目录"""
        # 使用指定的数据目录
        specified_dir = r"D:\sjysds\pythonProject1\data"
        if os.path.exists(specified_dir):
            return specified_dir

        # 基于脚本位置推断
        script_dir = os.path.dirname(os.path.abspath(__file__))
        possible_paths = [
            os.path.join(script_dir, "..", "data"),
            os.path.join(script_dir, "..", "..", "data"),
            os.path.join(os.getcwd(), "data")
        ]

        for path in possible_paths:
            abs_path = os.path.abspath(path)
            if os.path.exists(abs_path):
                return abs_path

        # 创建默认目录
        default_dir = os.path.abspath(os.path.join(script_dir, "..", "data"))
        os.makedirs(default_dir, exist_ok=True)
        return default_dir

    def _load_config(self, config_path: str) -> Dict:
        """加载配置文件"""
        return {
            "input_encoding": "GBK",
            "output_encoding": "GBK"
        }

    def get_data_path(self, relative_path: str) -> str:
        """获取数据目录下的绝对路径"""
        if os.path.isabs(relative_path):
            return relative_path
        return os.path.join(self.data_dir, relative_path)

    def _load_confusing_pairs(self) -> List[Tuple[str, str]]:
        """
        加载通信领域高频易混术语对（简化版）
        """
        # 使用预定义的易混术语对，避免从知识库提取
        base_pairs = [
            ("SC-FDMA", "OFDMA"), ("RRU", "BBU"), ("RSRP", "RSRQ"),
            ("掉话率", "切换成功率"), ("接入成功率", "切换成功率"),
            ("TDD", "FDD"), ("LTE", "NR"), ("4G", "5G"),
            ("天线增益", "功率增益"), ("信道带宽", "传输带宽"),
            ("上行", "下行"), ("发射功率", "接收灵敏度"),
            ("误码率", "误帧率"), ("信噪比", "信号强度"),
            ("宏基站", "微基站"), ("室内覆盖", "室外覆盖"),
            ("硬切换", "软切换"), ("同频", "异频"),
        ]
        return base_pairs

    def detect_confusing_terms_fast(self, option_text: str) -> int:
        """
        快速检测选项中的易混淆术语对数量
        """
        if pd.isna(option_text) or not option_text:
            return 0

        option_text = str(option_text)
        confusing_count = 0

        # 使用更高效的方法
        for term1, term2 in self.confusing_pairs:
            if term1 in option_text and term2 in option_text:
                confusing_count += 1
                # 如果已经达到高难度标准，可以提前退出
                if confusing_count >= 2:
                    break

        return confusing_count

    def calculate_difficulty(self, correct_count: int, confusing_count: int) -> str:
        """计算题目难度"""
        if correct_count >= 3:
            return "高难度"
        elif correct_count == 2:
            return "高难度" if confusing_count >= 2 else "中等" if confusing_count == 1 else "简单"
        elif correct_count == 1:
            return "高难度" if confusing_count >= 2 else "中等" if confusing_count == 1 else "简单"
        else:
            return "简单"

    def label_difficulty_batch(self, df: pd.DataFrame) -> pd.DataFrame:
        """
        批量标注难度（更高效的方法）
        """
        logger.info("开始批量标注难度...")
        start_time = time.time()

        difficulties = []
        confusing_counts = []

        # 预先提取选项文本
        option_columns = [f'opt_{opt}' for opt in ['A', 'B', 'C', 'D', 'E']]

        for idx, row in df.iterrows():
            if idx % 500 == 0:
                logger.info(f"处理进度: {idx}/{len(df)}")

            try:
                # 获取正确选项数量
                correct_count = row.get('correct_count', 1)
                if pd.isna(correct_count):
                    correct_count = 1

                # 拼接选项文本
                option_texts = []
                for col in option_columns:
                    if col in row and pd.notna(row[col]):
                        option_texts.append(str(row[col]))

                option_text = " ".join(option_texts)

                # 检测易混淆术语
                confusing_count = self.detect_confusing_terms_fast(option_text)
                confusing_counts.append(confusing_count)

                # 计算难度
                difficulty = self.calculate_difficulty(correct_count, confusing_count)
                difficulties.append(difficulty)

            except Exception as e:
                logger.warning(f"第{idx}条数据标注失败: {e}")
                difficulties.append("简单")
                confusing_counts.append(0)

        # 批量添加列
        df = df.copy()
        df['confusing_count'] = confusing_counts
        df['difficulty'] = difficulties

        logger.info(f"难度标注完成，耗时: {time.time() - start_time:.2f}秒")
        return df

    def safe_read_csv(self, file_path: str) -> pd.DataFrame:
        """安全读取CSV文件"""
        try:
            # 先尝试GBK
            df = pd.read_csv(file_path, encoding='GBK')
            logger.info(f"成功用GBK编码读取文件")
            return df
        except UnicodeDecodeError:
            try:
                # 再尝试UTF-8
                df = pd.read_csv(file_path, encoding='utf-8')
                logger.info(f"成功用UTF-8编码读取文件")
                return df
            except:
                # 最后尝试忽略错误
                df = pd.read_csv(file_path, encoding='GBK', errors='ignore')
                logger.warning("使用GBK编码并忽略错误读取文件")
                return df

    def mine_hard_examples(self, input_file: str = "train_augmented_with_knowledge.csv",
                           output_file: str = "train_with_difficulty.csv",
                           hard_output_file: str = "train_hard.csv"):
        """
        挖掘难例数据（优化版）
        """
        input_path = self.get_data_path(input_file)

        if not os.path.exists(input_path):
            raise FileNotFoundError(f"输入文件未找到: {input_path}")

        logger.info(f"开始读取文件: {input_path}")
        df = self.safe_read_csv(input_path)
        logger.info(f"成功加载数据: {len(df)} 条")

        # 批量标注难度
        df = self.label_difficulty_batch(df)

        # 统计难度分布
        difficulty_stats = df['difficulty'].value_counts()
        logger.info("难度分布统计:")
        for level, count in difficulty_stats.items():
            percentage = count / len(df) * 100
            logger.info(f"  {level}: {count}条 ({percentage:.1f}%)")

        # 保存结果
        output_path = self.get_data_path(output_file)
        df.to_csv(output_path, encoding='GBK', index=False)
        logger.info(f"带难度标注的数据已保存: {output_path}")

        # 提取难例
        hard_df = df[df['difficulty'] == "高难度"].copy()
        hard_output_path = self.get_data_path(hard_output_file)
        hard_df.to_csv(hard_output_path, encoding='GBK', index=False)
        logger.info(f"难例数据已保存: {hard_output_path} (共{len(hard_df)}条)")

        return df, hard_df


def main():
    """主函数：执行难例挖掘"""
    miner = HardExampleMiner()

    try:
        logger.info("开始难例挖掘...")
        full_df, hard_df = miner.mine_hard_examples()

        print(f"\n处理完成!")
        print(f"总数据量: {len(full_df)} 条")
        print(f"难例数据量: {len(hard_df)} 条")
        print(f"难例比例: {len(hard_df) / len(full_df) * 100:.1f}%")

    except Exception as e:
        logger.error(f"难例挖掘失败: {e}")
        import traceback
        traceback.print_exc()


if __name__ == "__main__":
    main()