import pandas as pd
import numpy as np
import json
import os
import argparse
from typing import List, Dict, Tuple
import logging
import re
import chardet

# 配置日志
logging.basicConfig(level=logging.INFO, format='%(asctime)s - %(levelname)s - %(message)s')
logger = logging.getLogger(__name__)


class KnowledgeRetriever:
    """知识检索器：将问题与知识库进行语义关联"""

    def __init__(self, config_path: str = None):
        self.config = self._load_config(config_path)
        self.data_dir = self._get_data_dir()
        self.model = None
        self.knowledge_index = None
        self.knowledge_vectors = None
        self.knowledge_units = []
        self.unit_id_to_content = {}

    def _get_data_dir(self) -> str:
        """动态获取数据目录"""
        specified_dir = r"D:\sjysds\pythonProject1\data"
        if os.path.exists(specified_dir):
            return specified_dir

        script_dir = os.path.dirname(os.path.abspath(__file__))
        possible_paths = [
            os.path.join(script_dir, "..", "data"),
            os.path.join(script_dir, "..", "..", "data"),
            os.path.join(os.getcwd(), "data")
        ]

        for path in possible_paths:
            abs_path = os.path.abspath(path)
            if os.path.exists(abs_path):
                return abs_path

        default_dir = os.path.abspath(os.path.join(script_dir, "..", "data"))
        os.makedirs(default_dir, exist_ok=True)
        return default_dir

    def _load_config(self, config_path: str) -> Dict:
        """加载配置文件"""
        default_config = {
            "model_path": os.path.join(r"D:\sjysds\pythonProject1", "models", "all-MiniLM-L6-v2"),
            "question_top_k": 3,
            "option_top_k": 1,
            "normalize_vectors": True,
            "batch_size": 32,
            "input_encoding": "GBK",  # 输入文件编码
            "output_encoding": "utf-8"  # 输出文件编码
        }

        return default_config

    def get_data_path(self, relative_path: str) -> str:
        """获取数据目录下的绝对路径"""
        if os.path.isabs(relative_path):
            return relative_path

        absolute_path = os.path.join(self.data_dir, relative_path)

        dir_path = os.path.dirname(absolute_path)
        if dir_path and not os.path.exists(dir_path):
            os.makedirs(dir_path, exist_ok=True)

        return absolute_path

    def detect_encoding(self, file_path: str) -> str:
        """检测文件编码"""
        try:
            with open(file_path, 'rb') as f:
                raw_data = f.read(10000)  # 读取前10000字节来检测编码
                result = chardet.detect(raw_data)
                encoding = result['encoding']
                confidence = result['confidence']

                logger.info(f"检测到文件编码: {encoding} (置信度: {confidence:.2f})")

                # 如果置信度较低，优先使用配置的编码
                if confidence < 0.7:
                    encoding = self.config['input_encoding']
                    logger.info(f"使用配置的编码: {encoding}")

                return encoding
        except Exception as e:
            logger.warning(f"编码检测失败: {e}, 使用默认编码: {self.config['input_encoding']}")
            return self.config['input_encoding']

    def clean_special_characters(self, text: str) -> str:
        """清理特殊字符，确保编码兼容性"""
        if pd.isna(text):
            return ""

        text = str(text)
        # 移除控制字符和其他特殊字符
        text = re.sub(r'[\x00-\x1f\x7f-\x9f]', '', text)
        # 移除一些常见的无法编码的字符
        text = re.sub(
            r'[^\u4e00-\u9fa5\u0030-\u0039\u0041-\u005a\u0061-\u007a\u3000-\u303f\uff00-\uffef\s\.\,\;\:\-\+\(\)\[\]\{\}\u201c\u201d\u2018\u2019]',
            '', text)
        # 压缩多余空格
        text = re.sub(r'\s+', ' ', text).strip()
        return text

    def safe_read_csv(self, file_path: str) -> pd.DataFrame:
        """安全读取CSV文件，自动处理编码问题"""
        encodings_to_try = [
            self.config['input_encoding'],  # 首先尝试配置的编码
            'GBK',
            'GB2312',
            'utf-8',
            'latin-1'
        ]

        for encoding in encodings_to_try:
            try:
                logger.info(f"尝试用 {encoding} 编码读取文件...")
                df = pd.read_csv(file_path, encoding=encoding)
                logger.info(f"成功用 {encoding} 编码读取文件")
                return df
            except UnicodeDecodeError as e:
                logger.warning(f"{encoding} 编码读取失败: {e}")
                continue
            except Exception as e:
                logger.warning(f"{encoding} 编码读取失败: {e}")
                continue

        # 如果所有编码都失败，尝试二进制读取并清理
        try:
            logger.warning("所有编码尝试失败，尝试二进制读取并清理...")
            with open(file_path, 'rb') as f:
                content = f.read()

            # 尝试解码为字符串并清理
            content_str = content.decode('latin-1', errors='ignore')
            cleaned_content = self.clean_special_characters(content_str)

            # 保存为临时文件再读取
            temp_path = self.get_data_path("temp_cleaned.csv")
            with open(temp_path, 'w', encoding='utf-8') as f:
                f.write(cleaned_content)

            df = pd.read_csv(temp_path, encoding='utf-8')
            os.remove(temp_path)
            logger.info("通过二进制清理方式成功读取文件")
            return df

        except Exception as e:
            raise Exception(f"无法读取文件 {file_path}: {e}")

    def load_knowledge_base(self) -> List[Dict]:
        """加载结构化知识单元"""
        knowledge_file = self.get_data_path("knowledge_units.jsonl")
        if not os.path.exists(knowledge_file):
            raise FileNotFoundError(f"知识库文件未找到: {knowledge_file}")

        knowledge_units = []
        try:
            with open(knowledge_file, 'r', encoding='utf-8') as f:
                for line_num, line in enumerate(f, 1):
                    try:
                        unit = json.loads(line.strip())
                        # 清理知识内容中的特殊字符
                        if 'core_content' in unit:
                            unit['core_content'] = self.clean_special_characters(unit['core_content'])
                        knowledge_units.append(unit)
                        self.unit_id_to_content[unit['unit_id']] = unit['core_content']
                    except json.JSONDecodeError:
                        continue

            logger.info(f"成功加载 {len(knowledge_units)} 个知识单元")
            return knowledge_units

        except Exception as e:
            raise Exception(f"加载知识库失败: {e}")

    def initialize_retriever(self):
        """初始化语义检索模型和索引"""
        try:
            from sentence_transformers import SentenceTransformer
        except ImportError:
            raise ImportError("请安装所需库: pip install sentence-transformers")

        # 检查模型路径是否存在
        model_path = self.config['model_path']
        if not os.path.exists(model_path):
            logger.info(f"模型路径不存在，创建目录: {model_path}")
            os.makedirs(model_path, exist_ok=True)

            try:
                self.model = SentenceTransformer('all-MiniLM-L6-v2',
                                                 cache_folder=model_path)
            except:
                logger.warning("无法下载模型，使用TF-IDF作为备选方案")
                self.model = None
                return
        else:
            try:
                self.model = SentenceTransformer(model_path)
            except:
                logger.warning("加载本地模型失败，使用TF-IDF作为备选方案")
                self.model = None
                return

        # 加载知识库
        self.knowledge_units = self.load_knowledge_base()

        # 检查是否已有预构建的索引
        index_path = self.get_data_path("semantic_index.faiss")
        vectors_path = self.get_data_path("knowledge_vectors.npy")

        if os.path.exists(index_path) and os.path.exists(vectors_path):
            try:
                import faiss
                self.knowledge_index = faiss.read_index(index_path)
                self.knowledge_vectors = np.load(vectors_path)
                logger.info("加载预构建的语义索引")
                return
            except:
                logger.warning("加载预构建索引失败，重新构建")

        # 构建语义索引
        self.build_semantic_index()

    def build_semantic_index(self):
        """构建语义索引"""
        if self.model is None:
            self.build_tfidf_index()
            return

        logger.info("开始构建语义索引...")

        knowledge_contents = [unit['core_content'] for unit in self.knowledge_units]
        self.knowledge_unit_ids = [unit['unit_id'] for unit in self.knowledge_units]

        # 生成语义向量
        logger.info("生成知识向量...")
        self.knowledge_vectors = self.model.encode(
            knowledge_contents,
            batch_size=self.config['batch_size'],
            show_progress_bar=True,
            convert_to_numpy=True
        )

        # 归一化向量
        if self.config['normalize_vectors']:
            from sklearn.preprocessing import normalize
            self.knowledge_vectors = normalize(self.knowledge_vectors, axis=1, norm='l2')

        # 构建FAISS索引
        import faiss
        dimension = self.knowledge_vectors.shape[1]
        self.knowledge_index = faiss.IndexFlatIP(dimension)
        self.knowledge_index.add(self.knowledge_vectors)

        # 保存索引
        index_path = self.get_data_path("semantic_index.faiss")
        vectors_path = self.get_data_path("knowledge_vectors.npy")

        faiss.write_index(self.knowledge_index, index_path)
        np.save(vectors_path, self.knowledge_vectors)

        logger.info(f"语义索引构建完成")

    def build_tfidf_index(self):
        """使用TF-IDF作为备选方案"""
        from sklearn.feature_extraction.text import TfidfVectorizer

        logger.info("使用TF-IDF构建语义索引...")

        self.knowledge_units = self.load_knowledge_base()
        knowledge_contents = [unit['core_content'] for unit in self.knowledge_units]
        self.knowledge_unit_ids = [unit['unit_id'] for unit in self.knowledge_units]

        # 构建TF-IDF向量器
        self.vectorizer = TfidfVectorizer(max_features=5000, stop_words=None)
        self.knowledge_vectors = self.vectorizer.fit_transform(knowledge_contents)

        logger.info("TF-IDF索引构建完成")

    def retrieve_relevant_knowledge_tfidf(self, query: str, top_k: int = 3) -> Tuple[List[str], str]:
        """使用TF-IDF检索相关知识"""
        if not hasattr(self, 'vectorizer'):
            self.build_tfidf_index()

        # 清理查询文本
        cleaned_query = self.clean_special_characters(query)

        # 转换查询为TF-IDF向量
        query_vec = self.vectorizer.transform([cleaned_query])

        # 计算相似度
        from sklearn.metrics.pairwise import cosine_similarity
        similarities = cosine_similarity(query_vec, self.knowledge_vectors)

        # 获取最相关的知识
        top_indices = similarities.argsort()[0][-top_k:][::-1]

        relevant_ids = []
        knowledge_texts = []

        for idx in top_indices:
            unit_id = self.knowledge_unit_ids[idx]
            relevant_ids.append(unit_id)
            knowledge_texts.append(self.unit_id_to_content.get(unit_id, ""))

        concatenated_knowledge = "；".join(knowledge_texts)
        return relevant_ids, concatenated_knowledge

    def retrieve_relevant_knowledge(self, query: str, top_k: int = 3) -> Tuple[List[str], str]:
        """检索相关知识"""
        # 清理查询文本
        cleaned_query = self.clean_special_characters(query)

        if self.model is not None and hasattr(self, 'knowledge_index'):
            try:
                # 使用Sentence-BERT
                query_vector = self.model.encode([cleaned_query], convert_to_numpy=True)

                if self.config['normalize_vectors']:
                    from sklearn.preprocessing import normalize
                    query_vector = normalize(query_vector, axis=1, norm='l2')

                similarities, indices = self.knowledge_index.search(query_vector, top_k)

                relevant_ids = []
                knowledge_texts = []

                for i in range(top_k):
                    idx = indices[0][i]
                    if idx < len(self.knowledge_unit_ids):
                        unit_id = self.knowledge_unit_ids[idx]
                        relevant_ids.append(unit_id)
                        knowledge_texts.append(self.unit_id_to_content.get(unit_id, ""))

                concatenated_knowledge = "；".join(knowledge_texts)
                return relevant_ids, concatenated_knowledge

            except Exception as e:
                logger.warning(f"Sentence-BERT检索失败: {e}, 使用TF-IDF")

        # 回退到TF-IDF
        return self.retrieve_relevant_knowledge_tfidf(cleaned_query, top_k)

    def augment_train_data(self, train_file: str = "train_with_labels.csv",
                           output_file: str = "train_augmented_with_knowledge.csv"):
        """增强训练数据"""
        train_path = self.get_data_path(train_file)
        if not os.path.exists(train_path):
            raise FileNotFoundError(f"训练数据未找到: {train_path}")

        # 安全读取CSV文件
        train_df = self.safe_read_csv(train_path)
        logger.info(f"加载训练数据: {len(train_df)} 条")

        # 清理文本字段
        text_columns = ['cleaned_question', 'opt_A', 'opt_B', 'opt_C', 'opt_D', 'opt_E']
        for col in text_columns:
            if col in train_df.columns:
                train_df[col] = train_df[col].apply(lambda x: self.clean_special_characters(x) if pd.notna(x) else x)

        # 初始化检索器
        self.initialize_retriever()

        # 创建新字段
        train_df['relevant_knowledge_ids'] = None
        train_df['relevant_knowledge'] = None
        train_df['option_knowledge'] = None
        train_df['augmented_input'] = None

        # 逐条处理数据
        for idx, row in train_df.iterrows():
            if idx % 100 == 0:
                logger.info(f"处理进度: {idx}/{len(train_df)}")

            try:
                question = row.get('cleaned_question', '')
                if not question or pd.isna(question):
                    continue

                # 检索问题相关知识
                knowledge_ids, question_knowledge = self.retrieve_relevant_knowledge(question)

                # 检索选项相关知识
                option_knowledges = []
                for opt in ['A', 'B', 'C', 'D', 'E']:
                    opt_col = f'opt_{opt}'
                    if opt_col in row and pd.notna(row[opt_col]):
                        option_text = str(row[opt_col])
                        if option_text.strip():
                            opt_ids, opt_knowledge = self.retrieve_relevant_knowledge(option_text, top_k=1)
                            if opt_knowledge:
                                option_knowledges.append(f"选项{opt}相关知识: {opt_knowledge}")

                option_knowledge_text = "；".join(option_knowledges)

                # 组合所有知识
                all_knowledge = question_knowledge
                if option_knowledge_text:
                    all_knowledge += f"；{option_knowledge_text}"

                # 生成增强输入
                augmented_input = f"问题：{question}；相关知识：{all_knowledge}"

                # 更新数据
                train_df.at[idx, 'relevant_knowledge_ids'] = json.dumps(knowledge_ids, ensure_ascii=False)
                train_df.at[idx, 'relevant_knowledge'] = question_knowledge
                train_df.at[idx, 'option_knowledge'] = option_knowledge_text
                train_df.at[idx, 'augmented_input'] = self.clean_special_characters(augmented_input)

            except Exception as e:
                logger.error(f"处理第{idx}条数据失败: {e}")
                continue

        # 保存增强后的数据（使用UTF-8编码）
        output_path = self.get_data_path(output_file)
        train_df.to_csv(output_path, encoding=self.config['output_encoding'], index=False)
        logger.info(f"增强数据保存至: {output_path}")

        return train_df


def main():
    """主函数"""
    parser = argparse.ArgumentParser(description='训练数据知识增强')
    parser.add_argument('--train-file', default='train_with_labels.csv', help='训练数据文件名')
    parser.add_argument('--output-file', default='train_augmented_with_knowledge.csv', help='输出文件名')

    args = parser.parse_args()

    retriever = KnowledgeRetriever()

    try:
        logger.info(f"使用数据目录: {retriever.data_dir}")

        # 增强训练数据
        augmented_df = retriever.augment_train_data(
            train_file=args.train_file,
            output_file=args.output_file
        )

        # 显示示例
        print("\n增强数据示例:")
        sample = augmented_df.head(2)
        for idx, row in sample.iterrows():
            print(f"\n问题: {row['cleaned_question']}")
            if pd.notna(row['relevant_knowledge']):
                print(f"相关知识: {row['relevant_knowledge'][:100]}...")

    except Exception as e:
        logger.error(f"知识关联失败: {e}")
        import traceback
        traceback.print_exc()


if __name__ == "__main__":
    # 安装chardet库如果还没有安装
    try:
        import chardet
    except ImportError:
        print("请安装chardet库: pip install chardet")
        exit(1)

    main()