"""
数据预处理模块
用于处理小学数学题目数据，包括数据加载、清洗、格式标准化、数据集划分和数据增强等功能
支持Excel(.xlsx)和CSV格式的试卷数据
"""

import pandas as pd
import numpy as np
from typing import List, Dict, Any, Tuple, Union
from pathlib import Path
import re
from difflib import SequenceMatcher
from sklearn.model_selection import train_test_split
import uuid

class DataPreprocessor:
    def __init__(self, data_path: str):
        """
        初始化数据预处理器

        Args:
            data_path: 数据文件或目录的路径
        """
        self.data_path = Path(data_path)
        self.exam_info = None
        self.questions = None
        self.knowledge_points = None
        self.tags = None

    def load_data(self) -> None:
        """
        加载试卷数据，支持Excel和CSV格式
        """
        if self.data_path.is_file():
            # 根据文件扩展名选择加载方法
            if self.data_path.suffix.lower() == '.xlsx':
                self._load_single_excel(self.data_path)
            elif self.data_path.suffix.lower() == '.csv':
                self._load_single_csv_group(self.data_path)
        else:
            self._load_directory(self.data_path)

    def _load_single_excel(self, file_path: Path) -> None:
        """
        加载单个Excel文件

        Args:
            file_path: Excel文件路径
        """
        excel_file = pd.ExcelFile(file_path)
        self.exam_info = pd.read_excel(excel_file, 'exam_info')
        self.questions = pd.read_excel(excel_file, 'questions')
        self.knowledge_points = pd.read_excel(excel_file, 'knowledge_points')
        self.tags = pd.read_excel(excel_file, 'tags')

    def _load_single_csv_group(self, base_path: Path) -> None:
        """
        加载一组相关的CSV文件

        Args:
            base_path: CSV文件的基础路径
        """
        # 构建相关CSV文件的路径
        base_name = base_path.stem
        dir_path = base_path.parent

        exam_info_path = dir_path / f"{base_name}_试卷信息.csv"
        questions_path = dir_path / f"{base_name}_题目内容.csv"
        knowledge_points_path = dir_path / f"{base_name}_知识点关联.csv"
        tags_path = dir_path / f"{base_name}_标签关联.csv"

        # 加载各个CSV文件
        try:
            self.exam_info = pd.read_csv(exam_info_path)
            self.questions = pd.read_csv(questions_path)
            self.knowledge_points = pd.read_csv(knowledge_points_path)
            self.tags = pd.read_csv(tags_path)
            print(f"成功加载CSV文件组：{base_name}")
        except Exception as e:
            print(f"加载CSV文件时出错：{str(e)}")
            raise

    def _load_directory(self, dir_path: Path) -> None:
        """
        加载目录中的所有试卷文件

        Args:
            dir_path: 目录路径
        """
        # 初始化空的DataFrame
        self.exam_info = pd.DataFrame()
        self.questions = pd.DataFrame()
        self.knowledge_points = pd.DataFrame()
        self.tags = pd.DataFrame()

        # 处理Excel文件
        for excel_file in dir_path.glob('*.xlsx'):
            try:
                temp_exam_info = pd.read_excel(excel_file, 'exam_info')
                temp_questions = pd.read_excel(excel_file, 'questions')
                temp_knowledge_points = pd.read_excel(excel_file, 'knowledge_points')
                temp_tags = pd.read_excel(excel_file, 'tags')

                self._concat_data(temp_exam_info, temp_questions, temp_knowledge_points, temp_tags)
            except Exception as e:
                print(f"处理Excel文件 {excel_file} 时出错：{str(e)}")

        # 处理CSV文件组
        csv_bases = set()
        for csv_file in dir_path.glob('*.csv'):
            base_name = csv_file.stem
            if '_' in base_name:
                csv_bases.add(base_name.rsplit('_', 1)[0])

        for base_name in csv_bases:
            try:
                temp_exam_info = pd.read_csv(dir_path / f"{base_name}_试卷信息.csv")
                temp_questions = pd.read_csv(dir_path / f"{base_name}_题目内容.csv")
                temp_knowledge_points = pd.read_csv(dir_path / f"{base_name}_知识点关联.csv")
                temp_tags = pd.read_csv(dir_path / f"{base_name}_标签关联.csv")

                self._concat_data(temp_exam_info, temp_questions, temp_knowledge_points, temp_tags)
            except Exception as e:
                print(f"处理CSV文件组 {base_name} 时出错：{str(e)}")

    def _concat_data(self, exam_info: pd.DataFrame, questions: pd.DataFrame,
                    knowledge_points: pd.DataFrame, tags: pd.DataFrame) -> None:
        """
        合并数据到主DataFrame

        Args:
            exam_info: 试卷信息DataFrame
            questions: 题目内容DataFrame
            knowledge_points: 知识点关联DataFrame
            tags: 标签关联DataFrame
        """
        self.exam_info = pd.concat([self.exam_info, exam_info])
        self.questions = pd.concat([self.questions, questions])
        self.knowledge_points = pd.concat([self.knowledge_points, knowledge_points])
        self.tags = pd.concat([self.tags, tags])

    def process_example_paper(self) -> None:
        """
        处理示例试卷数据
        """
        example_path = Path("./文件示例/这是一份试卷_试卷信息.csv")
        if example_path.exists():
            print("开始处理示例试卷...")
            self._load_single_csv_group(example_path)
            self.clean_data()

            # 保存处理后的数据
            output_dir = Path("./文件示例/处理后的试卷")
            self.save_processed_data(str(output_dir))
            print(f"示例试卷处理完成，结果保存在：{output_dir}")
        else:
            print("未找到示例试卷文件")

    def clean_data(self) -> None:
        """
        数据清洗
        """
        print("开始数据清洗...")

        # 清洗试卷信息
        self.exam_info = self._clean_exam_info(self.exam_info)
        print("试卷信息清洗完成")

        # 清洗题目数据
        self.questions = self._clean_questions(self.questions)
        print("题目数据清洗完成")

        # 清洗知识点数据
        self.knowledge_points = self._clean_knowledge_points(self.knowledge_points)
        print("知识点数据清洗完成")

        # 清洗标签数据
        self.tags = self._clean_tags(self.tags)
        print("标签数据清洗完成")

    def _clean_exam_info(self, exam_info_df: pd.DataFrame) -> pd.DataFrame:
        """
        清洗试卷信息数据

        Args:
            exam_info_df: 试卷信息DataFrame

        Returns:
            清洗后的试卷信息DataFrame
        """
        # 删除重复的试卷信息
        exam_info_df = exam_info_df.drop_duplicates(subset=['exam_id'])

        # 标准化年级信息
        grade_mapping = {
            '一年级': '一年级', '1年级': '一年级', '一': '一年级',
            '二年级': '二年级', '2年级': '二年级', '二': '二年级',
            '三年级': '三年级', '3年级': '三年级', '三': '三年级',
            '四年级': '四年级', '4年级': '四年级', '四': '四年级',
            '五年级': '五年级', '5年级': '五年级', '五': '五年级',
            '六年级': '六年级', '6年级': '六年级', '六': '六年级'
        }
        exam_info_df['grade'] = exam_info_df['grade'].str.strip().map(grade_mapping)

        # 标准化学期信息
        semester_mapping = {
            '上学期': '上学期', '上': '上学期', '第一学期': '上学期',
            '下学期': '下学期', '下': '下学期', '第二学期': '下学期'
        }
        exam_info_df['semester'] = exam_info_df['semester'].str.strip().map(semester_mapping)

        # 标准化考试类型
        exam_type_mapping = {
            '期中考试': '期中考试', '期中': '期中考试',
            '期末考试': '期末考试', '期末': '期末考试',
            '单元测试': '单元测试', '单元': '单元测试',
            '月考': '月考'
        }
        exam_info_df['exam_type'] = exam_info_df['exam_type'].str.strip().map(exam_type_mapping)

        return exam_info_df

    def _clean_questions(self, questions_df: pd.DataFrame) -> pd.DataFrame:
        """
        清洗题目数据

        Args:
            questions_df: 题目DataFrame

        Returns:
            清洗后的题目DataFrame
        """
        # 删除必要字段为空的行
        questions_df = questions_df.dropna(subset=['question_id', 'exam_id', 'stem', 'answer'])

        # 标准化题目类型
        question_type_mapping = {
            '单选题': '单选题', '单选': '单选题', '选择题': '单选题',
            '填空题': '填空题', '填空': '填空题',
            '解答题': '解答题', '计算题': '解答题', '应用题': '解答题'
        }
        questions_df['type'] = questions_df['type'].str.strip().map(question_type_mapping)

        # 处理选项字段（仅对选择题）
        questions_df['options'] = questions_df.apply(
            lambda row: self._standardize_options(row['options']) if row['type'] == '单选题' else '',
            axis=1
        )

        # 标准化答案格式
        questions_df['answer'] = questions_df.apply(
            lambda row: self._standardize_answer(row['answer'], row['type']),
            axis=1
        )

        # 标准化解题步骤格式
        questions_df['solution'] = questions_df['solution'].apply(self._standardize_solution)

        # 标准化公式格式
        questions_df['key_formulas'] = questions_df['key_formulas'].apply(self._standardize_formulas)

        # 标准化难度等级
        questions_df['difficulty'] = pd.to_numeric(questions_df['difficulty'], errors='coerce')
        questions_df['difficulty'] = questions_df['difficulty'].clip(1, 5)

        # 删除重复题目
        questions_df = self._remove_similar_questions(questions_df)

        return questions_df

    def _standardize_options(self, options: str) -> str:
        """
        标准化选项格式

        Args:
            options: 原始选项字符串

        Returns:
            标准化后的选项字符串
        """
        if pd.isna(options) or not options:
            return ''

        try:
            # 分割选项
            if ';' in options:
                opts = options.split(';')
            elif '；' in options:
                opts = options.split('；')
            elif '\n' in options:
                opts = options.split('\n')
            else:
                opts = [options]

            # 清理和标准化每个选项
            cleaned_opts = []
            for opt in opts:
                opt = opt.strip()
                # 确保选项以A、B、C、D开头
                if not re.match(r'^[A-D]\.', opt):
                    opt = re.sub(r'^[A-D][\.\、\s]*', '', opt)  # 移除可能存在的其他格式
                    opt = f"{chr(65 + len(cleaned_opts))}.{opt}"  # 添加标准格式的选项标记
                cleaned_opts.append(opt)

            return ';'.join(cleaned_opts)
        except Exception as e:
            print(f"选项标准化出错: {str(e)}")
            return options

    def _standardize_answer(self, answer: str, question_type: str) -> str:
        """
        标准化答案格式

        Args:
            answer: 原始答案
            question_type: 题目类型

        Returns:
            标准化后的答案
        """
        if pd.isna(answer):
            return ''

        answer = str(answer).strip()

        if question_type == '单选题':
            # 选择题答案标准化为大写字母
            answer = re.sub(r'[^A-Da-d]', '', answer)
            return answer.upper()
        elif question_type == '填空题':
            # 填空题答案去除多余空白字符
            return re.sub(r'\s+', ' ', answer)
        else:
            # 解答题答案保持原格式
            return answer

    def _standardize_solution(self, solution: str) -> str:
        """
        标准化解题步骤格式

        Args:
            solution: 原始解题步骤

        Returns:
            标准化后的解题步骤
        """
        if pd.isna(solution):
            return ''

        # 分行并清理每一步
        steps = solution.split('\n')
        cleaned_steps = []

        for step in steps:
            step = step.strip()
            if not step:
                continue

            # 确保每一步都有序号
            if not re.match(r'^\d+[\.\、]', step):
                step = f"{len(cleaned_steps) + 1}.{step}"

            cleaned_steps.append(step)

        return '\n'.join(cleaned_steps)

    def _standardize_formulas(self, formulas: str) -> str:
        """
        标准化公式格式

        Args:
            formulas: 原始公式字符串

        Returns:
            标准化后的公式字符串
        """
        if pd.isna(formulas):
            return ''

        # 分割并清理每个公式
        if ';' in formulas:
            formula_list = formulas.split(';')
        elif '；' in formulas:
            formula_list = formulas.split('；')
        else:
            formula_list = [formulas]

        # 清理每个公式
        cleaned_formulas = []
        for formula in formula_list:
            formula = formula.strip()
            if formula:
                cleaned_formulas.append(formula)

        return ';'.join(cleaned_formulas)

    def _clean_knowledge_points(self, knowledge_points_df: pd.DataFrame) -> pd.DataFrame:
        """
        清洗知识点数据

        Args:
            knowledge_points_df: 知识点DataFrame

        Returns:
            清洗后的知识点DataFrame
        """
        # 删除空值
        knowledge_points_df = knowledge_points_df.dropna(subset=['question_id', 'knowledge_point'])

        # 标准化知识点名称
        knowledge_points_df['knowledge_point'] = knowledge_points_df['knowledge_point'].str.strip()

        # 标准化子知识点
        knowledge_points_df['sub_point'] = knowledge_points_df['sub_point'].str.strip()

        # 标准化重要程度
        knowledge_points_df['importance'] = pd.to_numeric(knowledge_points_df['importance'], errors='coerce')
        knowledge_points_df['importance'] = knowledge_points_df['importance'].clip(1, 5)

        return knowledge_points_df

    def _clean_tags(self, tags_df: pd.DataFrame) -> pd.DataFrame:
        """
        清洗标签数据

        Args:
            tags_df: 标签DataFrame

        Returns:
            清洗后的标签DataFrame
        """
        # 删除空值
        tags_df = tags_df.dropna(subset=['question_id', 'tag'])

        # 标准化标签名称
        tags_df['tag'] = tags_df['tag'].str.strip()

        # 标准化标签类别
        category_mapping = {
            '题型': '题型', '类型': '题型',
            '能力': '能力', '能力要求': '能力',
            '应用场景': '应用场景', '场景': '应用场景'
        }
        tags_df['category'] = tags_df['category'].str.strip().map(category_mapping)

        return tags_df

    def merge_data(self) -> pd.DataFrame:
        """
        合并所有表格数据为完整的题目信息

        Returns:
            合并后的DataFrame
        """
        # 合并题目和试卷信息
        merged_data = pd.merge(self.questions, self.exam_info, on='exam_id', how='left')

        # 合并知识点信息
        knowledge_points_grouped = self.knowledge_points.groupby('question_id').agg({
            'knowledge_point': lambda x: ';'.join(x),
            'sub_point': lambda x: ';'.join(x[~pd.isna(x)]),
            'importance': 'mean'
        }).reset_index()
        merged_data = pd.merge(merged_data, knowledge_points_grouped, on='question_id', how='left')

        # 合并标签信息
        tags_grouped = self.tags.groupby('question_id').agg({
            'tag': lambda x: ';'.join(x),
            'category': lambda x: ';'.join(pd.unique(x))
        }).reset_index()
        merged_data = pd.merge(merged_data, tags_grouped, on='question_id', how='left')

        return merged_data

    def split_dataset(self,
                     data: pd.DataFrame,
                     train_ratio: float = 0.7,
                     val_ratio: float = 0.15) -> Tuple[pd.DataFrame, pd.DataFrame, pd.DataFrame]:
        """
        划分数据集

        Args:
            data: 完整数据集
            train_ratio: 训练集比例
            val_ratio: 验证集比例

        Returns:
            训练集、验证集、测试集
        """
        # 按年级和难度进行分层抽样
        stratify = data['grade'] + '_' + data['difficulty'].astype(str)

        # 划分训练集和临时集
        train_data, temp_data = train_test_split(
            data,
            train_size=train_ratio,
            stratify=stratify,
            random_state=42
        )

        # 划分验证集和测试集
        val_size = val_ratio / (1 - train_ratio)
        stratify_temp = temp_data['grade'] + '_' + temp_data['difficulty'].astype(str)
        val_data, test_data = train_test_split(
            temp_data,
            train_size=val_size,
            stratify=stratify_temp,
            random_state=42
        )

        return train_data, val_data, test_data

    def save_processed_data(self, output_dir: str) -> None:
        """
        保存处理后的数据

        Args:
            output_dir: 输出目录
        """
        output_path = Path(output_dir)
        output_path.mkdir(parents=True, exist_ok=True)

        # 合并数据
        merged_data = self.merge_data()

        # 划分数据集
        train_data, val_data, test_data = self.split_dataset(merged_data)

        # 保存数据集
        train_data.to_excel(output_path / 'train.xlsx', index=False)
        val_data.to_excel(output_path / 'val.xlsx', index=False)
        test_data.to_excel(output_path / 'test.xlsx', index=False)

    def process_pipeline(self, output_dir: str) -> None:
        """
        完整的数据处理流程

        Args:
            output_dir: 输出目录
        """
        self.load_data()  # 加载数据
        self.clean_data()  # 清洗数据
        self.save_processed_data(output_dir)  # 保存处理后的数据

    def augment_data(self, data: List[Dict[str, Any]]) -> List[Dict[str, Any]]:
        """
        数据增强

        Args:
            data: 原始数据列表

        Returns:
            增强后的数据列表
        """
        augmented_data = []
        for item in data:
            # 原始数据
            augmented_data.append(item)
            # 数值替换
            augmented_data.append(self._replace_numbers(item))
            # 表述方式变换
            augmented_data.append(self._rephrase_question(item))
        return augmented_data

    def _replace_numbers(self, item: Dict[str, Any]) -> Dict[str, Any]:
        """
        数值替换

        Args:
            item: 原始题目数据

        Returns:
            数值替换后的题目数据
        """
        # TODO: 实现数值替换逻辑
        # 1. 识别题目中的数字
        # 2. 根据题目类型选择合适的替换范围
        # 3. 保持题目结构不变
        return item

    def _rephrase_question(self, item: Dict[str, Any]) -> Dict[str, Any]:
        """
        表述方式变换

        Args:
            item: 原始题目数据

        Returns:
            表述方式变换后的题目数据
        """
        # TODO: 实现表述方式变换逻辑
        # 1. 分析题目结构
        # 2. 使用同义词替换
        # 3. 调整语序
        return item

    def _remove_similar_questions(self, df: pd.DataFrame, similarity_threshold: float = 0.9) -> pd.DataFrame:
        """
        删除相似题目

        Args:
            df: 题目DataFrame
            similarity_threshold: 相似度阈值

        Returns:
            删除相似题目后的DataFrame
        """
        # 创建题干相似度矩阵
        stems = df['stem'].tolist()
        n = len(stems)
        similar_pairs = []

        for i in range(n):
            for j in range(i + 1, n):
                similarity = SequenceMatcher(None, stems[i], stems[j]).ratio()
                if similarity > similarity_threshold:
                    similar_pairs.append((i, j))

        # 标记要删除的题目
        to_remove = set()
        for i, j in similar_pairs:
            # 保留较新的题目（根据题目ID判断）
            if df.iloc[i]['question_id'] < df.iloc[j]['question_id']:
                to_remove.add(i)
            else:
                to_remove.add(j)

        # 删除重复题目
        return df.drop(df.index[list(to_remove)])
