import pandas as pd
import numpy as np
import json
import os
import re
from typing import List, Dict, Tuple
import logging

# 配置日志
logging.basicConfig(level=logging.INFO, format='%(asctime)s - %(levelname)s - %(message)s')
logger = logging.getLogger(__name__)


class TestDataProcessor:
    """测试集处理器：完整处理测试集数据"""

    def __init__(self, data_dir: str = None):
        self.data_dir = data_dir or r"D:\sjysds\pythonProject1\data"
        self.text_cleaner = TextCleaner()
        self.knowledge_retriever = None

    def get_data_path(self, relative_path: str) -> str:
        """获取数据目录下的绝对路径"""
        if os.path.isabs(relative_path):
            return relative_path
        return os.path.join(self.data_dir, relative_path)

    def initialize_knowledge_retriever(self):
        """初始化知识检索器"""
        try:
            # 尝试导入KnowledgeRetriever类
            from src.train_preprocessing.KnowledgeRetriever import KnowledgeRetriever
            self.knowledge_retriever = KnowledgeRetriever()
            logger.info("知识检索器初始化成功")
        except ImportError:
            logger.warning("无法导入KnowledgeRetriever，将使用TF-IDF备选方案")
            self.knowledge_retriever = TFIDFKnowledgeRetriever(self.data_dir)
        except Exception as e:
            logger.error(f"知识检索器初始化失败: {e}")
            self.knowledge_retriever = TFIDFKnowledgeRetriever(self.data_dir)

    def load_test_data(self, test_file: str = "test.csv") -> pd.DataFrame:
        """加载测试集数据"""
        test_path = self.get_data_path(test_file)

        if not os.path.exists(test_path):
            raise FileNotFoundError(f"测试集文件未找到: {test_path}")

        # 尝试多种编码
        encodings = ['GBK', 'utf-8', 'GB2312']
        for encoding in encodings:
            try:
                df = pd.read_csv(test_path, encoding=encoding)
                logger.info(f"成功用 {encoding} 编码读取测试集，共{len(df)}条记录")
                return df
            except UnicodeDecodeError:
                continue

        # 如果都失败，使用ignore errors
        df = pd.read_csv(test_path, encoding='GBK', errors='ignore')
        logger.warning("使用GBK编码并忽略错误读取测试集")
        return df

    def standardize_test_format(self, df: pd.DataFrame) -> pd.DataFrame:
        """标准化测试集格式"""
        # 重命名字段
        field_mapping = {
            'index': 'test_question_id',
            'Question': 'raw_question',
            'Options': 'raw_options'
        }

        # 只重命名存在的字段
        existing_mapping = {k: v for k, v in field_mapping.items() if k in df.columns}
        df_standardized = df.rename(columns=existing_mapping)

        # 确保必要字段存在
        if 'test_question_id' not in df_standardized.columns:
            df_standardized['test_question_id'] = range(1, len(df_standardized) + 1)

        return df_standardized

    def clean_text_fields(self, df: pd.DataFrame) -> pd.DataFrame:
        """清洗文本字段"""
        df_clean = df.copy()

        # 清洗问题文本
        df_clean['cleaned_question'] = df_clean['raw_question'].apply(
            lambda x: self.text_cleaner.clean_general_text(x) if pd.notna(x) else ""
        )

        # 清洗选项文本
        def clean_options(options_str):
            if pd.isna(options_str):
                return []
            try:
                # 尝试解析选项列表（支持多种格式）
                if isinstance(options_str, list):
                    return [self.text_cleaner.clean_option_text(opt) for opt in options_str]
                elif isinstance(options_str, str):
                    # 尝试解析字符串格式的列表
                    if options_str.startswith('[') and options_str.endswith(']'):
                        options_list = eval(options_str)
                        if isinstance(options_list, list):
                            return [self.text_cleaner.clean_option_text(str(opt)) for opt in options_list]
                    # 处理其他格式
                    return [self.text_cleaner.clean_option_text(options_str)]
                else:
                    return [self.text_cleaner.clean_option_text(str(options_str))]
            except:
                return []

        df_clean['cleaned_options_list'] = df_clean['raw_options'].apply(clean_options)

        return df_clean

    def split_options(self, df: pd.DataFrame) -> pd.DataFrame:
        """将选项拆分为独立字段"""
        df_split = df.copy()

        # 初始化选项字段
        opt_columns = ['opt_A', 'opt_B', 'opt_C', 'opt_D', 'opt_E']
        for col in opt_columns:
            if col not in df_split.columns:
                df_split[col] = ''

        # 填充选项内容
        for idx, options_list in df_split['cleaned_options_list'].items():
            if isinstance(options_list, list):
                for i, opt_content in enumerate(options_list[:5]):
                    if i < len(opt_columns):
                        df_split.at[idx, opt_columns[i]] = str(opt_content) if pd.notna(opt_content) else ''

        # 添加选项计数
        df_split['option_count'] = df_split['cleaned_options_list'].apply(
            lambda x: min(len(x), 5) if isinstance(x, list) else 0
        )

        return df_split

    def associate_knowledge(self, df: pd.DataFrame) -> pd.DataFrame:
        """关联相关知识"""
        if self.knowledge_retriever is None:
            self.initialize_knowledge_retriever()

        df_knowledge = df.copy()

        # 添加知识相关字段
        df_knowledge['relevant_knowledge_ids'] = None
        df_knowledge['relevant_knowledge'] = None
        df_knowledge['augmented_input'] = None

        logger.info("开始关联知识...")

        for idx, row in df_knowledge.iterrows():
            if idx % 100 == 0:
                logger.info(f"知识关联进度: {idx}/{len(df_knowledge)}")

            try:
                question = row.get('cleaned_question', '')
                if not question or pd.isna(question):
                    continue

                # 检索问题相关知识
                knowledge_ids, question_knowledge = self.knowledge_retriever.retrieve_relevant_knowledge(question,
                                                                                                         top_k=3)

                # 检索选项相关知识
                option_knowledges = []
                for opt in ['A', 'B', 'C', 'D', 'E']:
                    opt_col = f'opt_{opt}'
                    if opt_col in row and pd.notna(row[opt_col]):
                        option_text = str(row[opt_col])
                        if option_text.strip():
                            opt_ids, opt_knowledge = self.knowledge_retriever.retrieve_relevant_knowledge(option_text,
                                                                                                          top_k=1)
                            if opt_knowledge:
                                option_knowledges.append(f"选项{opt}相关知识: {opt_knowledge}")

                option_knowledge_text = "；".join(option_knowledges)

                # 组合所有知识
                all_knowledge = question_knowledge
                if option_knowledge_text:
                    all_knowledge += f"；{option_knowledge_text}"

                # 生成增强输入
                augmented_input = f"问题：{question}；相关知识：{all_knowledge}"

                # 更新数据
                df_knowledge.at[idx, 'relevant_knowledge_ids'] = json.dumps(knowledge_ids, ensure_ascii=False)
                df_knowledge.at[idx, 'relevant_knowledge'] = question_knowledge
                df_knowledge.at[idx, 'augmented_input'] = augmented_input

            except Exception as e:
                logger.error(f"第{idx}条数据知识关联失败: {e}")
                continue

        return df_knowledge

    def process_test_data(self, test_file: str = "test.csv",
                          output_file: str = "test_processed.csv") -> pd.DataFrame:
        """
        完整的测试集处理流程

        Args:
            test_file: 测试集文件名
            output_file: 输出文件名

        Returns:
            处理后的测试集DataFrame
        """
        logger.info("开始处理测试集...")

        # 1. 加载数据
        df = self.load_test_data(test_file)

        # 2. 标准化格式
        df = self.standardize_test_format(df)

        # 3. 清洗文本
        df = self.clean_text_fields(df)

        # 4. 拆分选项
        df = self.split_options(df)

        # 5. 关联知识
        df = self.associate_knowledge(df)

        # 6. 保存结果
        output_path = self.get_data_path(output_file)
        df.to_csv(output_path, encoding='GBK', index=False)
        logger.info(f"测试集处理完成，已保存至: {output_path}")

        # 7. 输出统计信息
        self._print_processing_stats(df)

        return df

    def _print_processing_stats(self, df: pd.DataFrame):
        """输出处理统计信息"""
        logger.info("\n测试集处理统计:")
        logger.info(f"总题数: {len(df)}")
        logger.info(f"有效问题数: {df['cleaned_question'].notna().sum()}")

        if 'relevant_knowledge' in df.columns:
            knowledge_coverage = df['relevant_knowledge'].notna().sum() / len(df) * 100
            logger.info(f"知识覆盖率: {knowledge_coverage:.1f}%")

        # 显示示例
        logger.info("\n处理结果示例:")
        sample_cols = ['test_question_id', 'cleaned_question', 'relevant_knowledge']
        if len(df) > 0:
            sample = df.head(2)
            for idx, row in sample.iterrows():
                logger.info(f"\n问题ID: {row.get('test_question_id', 'N/A')}")
                logger.info(f"问题: {row.get('cleaned_question', '')}")
                if 'relevant_knowledge' in row and pd.notna(row['relevant_knowledge']):
                    logger.info(f"相关知识: {row['relevant_knowledge'][:100]}...")


# 备用的TF-IDF知识检索器（如果KnowledgeRetriever不可用）
class TFIDFKnowledgeRetriever:
    """TF-IDF知识检索器（备选方案）"""

    def __init__(self, data_dir: str):
        self.data_dir = data_dir
        self.vectorizer = None
        self.knowledge_vectors = None
        self.knowledge_unit_ids = []
        self.unit_id_to_content = {}
        self._initialize()

    def _initialize(self):
        """初始化TF-IDF检索器"""
        try:
            from sklearn.feature_extraction.text import TfidfVectorizer
            from sklearn.metrics.pairwise import cosine_similarity

            # 加载知识库
            knowledge_file = os.path.join(self.data_dir, "knowledge_units.jsonl")
            if not os.path.exists(knowledge_file):
                logger.warning("知识库文件未找到，无法进行知识检索")
                return

            knowledge_units = []
            with open(knowledge_file, 'r', encoding='utf-8') as f:
                for line in f:
                    try:
                        unit = json.loads(line.strip())
                        knowledge_units.append(unit)
                        self.unit_id_to_content[unit['unit_id']] = unit['core_content']
                    except:
                        continue

            self.knowledge_unit_ids = [unit['unit_id'] for unit in knowledge_units]
            knowledge_contents = [unit['core_content'] for unit in knowledge_units]

            # 构建TF-IDF
            self.vectorizer = TfidfVectorizer(max_features=5000)
            self.knowledge_vectors = self.vectorizer.fit_transform(knowledge_contents)

            logger.info(f"TF-IDF检索器初始化完成，加载{len(knowledge_units)}个知识单元")

        except Exception as e:
            logger.error(f"TF-IDF检索器初始化失败: {e}")

    def retrieve_relevant_knowledge(self, query: str, top_k: int = 3) -> Tuple[List[str], str]:
        """检索相关知识"""
        if self.vectorizer is None or self.knowledge_vectors is None:
            return [], ""

        try:
            # 转换查询为TF-IDF向量
            query_vec = self.vectorizer.transform([query])

            # 计算相似度
            from sklearn.metrics.pairwise import cosine_similarity
            similarities = cosine_similarity(query_vec, self.knowledge_vectors)

            # 获取最相关的知识
            top_indices = similarities.argsort()[0][-top_k:][::-1]

            relevant_ids = []
            knowledge_texts = []

            for idx in top_indices:
                if idx < len(self.knowledge_unit_ids):
                    unit_id = self.knowledge_unit_ids[idx]
                    relevant_ids.append(unit_id)
                    knowledge_texts.append(self.unit_id_to_content.get(unit_id, ""))

            concatenated_knowledge = "；".join(knowledge_texts)
            return relevant_ids, concatenated_knowledge

        except Exception as e:
            logger.warning(f"TF-IDF检索失败: {e}")
            return [], ""


# 文本清洗器（复用之前的实现）
class TextCleaner:
    """文本清洗器"""

    @staticmethod
    def clean_general_text(text: str) -> str:
        """通用文本清洗"""
        if pd.isna(text):
            return ""
        text = str(text).strip()
        # 简单的清理逻辑
        text = re.sub(r'\s+', ' ', text)
        return text

    @staticmethod
    def clean_option_text(option: str) -> str:
        """选项文本清洗"""
        cleaned = TextCleaner.clean_general_text(option)
        # 去除选项前缀
        cleaned = re.sub(r'^[A-E][:.、\s]*', '', cleaned)
        return cleaned


def main():
    """主函数：处理测试集"""
    processor = TestDataProcessor()

    try:
        # 处理测试集
        processed_test = processor.process_test_data(
            test_file="test.csv",
            output_file="test_processed.csv"
        )

        print(f"\n测试集处理完成!")
        print(f"处理后的数据量: {len(processed_test)} 条")

        # 显示前3条数据的增强输入
        if len(processed_test) > 0 and 'augmented_input' in processed_test.columns:
            print("\n增强输入示例:")
            for i in range(min(3, len(processed_test))):
                augmented_input = processed_test.iloc[i]['augmented_input']
                if pd.notna(augmented_input):
                    print(f"\n示例 {i + 1}:")
                    print(f"{augmented_input[:200]}...")

    except Exception as e:
        logger.error(f"测试集处理失败: {e}")
        import traceback
        traceback.print_exc()


if __name__ == "__main__":
    main()
