import re
from typing import Union, Tuple
from loguru import logger
import pandas as pd
import numpy as np
from datetime import datetime
from collections import defaultdict
import json


class EnhancedDataCleaner():

    def __init__(self):
        self._init_logger()
        self.original_sample = None  # 新增属性

    def _init_logger(self):
        """初始化日志记录系统"""
        self.logs = []
        self.stats = defaultdict(int)
        self.start_time = datetime.now()

    def log_step(self, title: str, details: str = ""):
        """记录处理步骤"""
        timestamp = datetime.now().strftime("%H:%M:%S")
        self.logs.append({
            "time": timestamp,
            "title": title,
            "details": details
        })
    def process_data(self, df):
           # 增强版数据清洗
           cleaned_df, clean_log = self.clean_data(df)
           # 暂时不要此功能自动识别数据特征
           # features = self.extract_features(cleaned_df)
           features = "自动识别数据特征"
           return cleaned_df, features, clean_log
           
    def extract_features(self, df):
        # 识别数据类型
        numeric_cols = df.select_dtypes(include=['number']).columns.tolist()
        categorical_cols = df.select_dtypes(include=['object', 'category']).columns.tolist()
        temporal_cols = self.identify_date_columns(df)
        
        # 识别特征关系
        correlations = ""
        
        return {
            "numeric_cols": numeric_cols,
            "categorical_cols": categorical_cols,
            "temporal_cols": temporal_cols,
            "correlations": correlations,
            "all_columns": df.columns.tolist(),
            "row_count": df.shape[0]
        }
    def _generate_report(self, original_shape: tuple, cleaned_df: pd.DataFrame) -> str:
        """生成清洗报告"""
        report = {
            "original_shape": {
                "rows": original_shape[0],
                "columns": original_shape[1]
            },
            "cleaned_shape": {
                "rows": cleaned_df.shape[0],
                "columns": cleaned_df.shape[1]
            },
            "metrics": {
                "memory_usage_mb": cleaned_df.memory_usage(deep=True).sum() / 1024**2,
                "missing_values_filled": self.stats.get('missing_filled', 0),
                "invalid_records_removed": self.stats.get('invalid_amount_records', 0)
            },
            "steps": [log for log in self.logs],  # 转换日志为可序列化格式
            "sample_data": {
                "original_sample": self._get_sample_data(cleaned_df, original=True),
                "cleaned_sample": self._get_sample_data(cleaned_df)
            }
        }

        # 使用DataHelp.to_json代替json.dumps
        from app.datatk.data_help import DataHelp
        return DataHelp.to_json(report)

    def _get_sample_data(self, df: pd.DataFrame, original: bool = False) -> dict:
        """获取数据样本（前3行+随机3行）"""
        if original:
            sample = self.original_sample
        else:
            sample = pd.concat([df.head(3), df.sample(3)]
                               ) if len(df) > 5 else df

         # 转换时间类型字段为字符串
        datetime_cols = sample.select_dtypes(
            include=['datetime64[ns]']).columns
        for col in datetime_cols:
            sample[col] = sample[col].dt.strftime('%Y-%m-%d %H:%M:%S')
        return {
            "columns": sample.columns.tolist(),
            "data": sample.replace({np.nan: None}).to_dict(orient='records')
        }

    def log_metric(self, category: str, value: int):
        """记录统计指标"""
        self.stats[category] += value

    def process_time_columns(self, df: pd.DataFrame, time_column: str = "date") -> pd.DataFrame:
        """
         将时间字段拆分为年、月、日等独立列，并保留原始字段
        """
        # 1. 确保原始时间为datetime类型
        df[time_column] = pd.to_datetime(df[time_column], errors="coerce")

        # 2. 提取年、月、日等
        df[time_column+"_年"] = df[time_column].dt.year
        df[time_column+"_月"] = df[time_column].dt.month
        df[time_column+"_日"] = df[time_column].dt.day
        df[time_column+"_小时"] = df[time_column].dt.hour  # 可选

        # 3. 处理异常值（如无法解析的日期），不能要,容易出错
        # df = df.dropna(subset=[time_column])

        return df

    def clean_data(self, df: pd.DataFrame) -> Tuple[pd.DataFrame, str]:
        """
        增强版数据清洗流程 
        特性：
        1. 自动类型推断与转换
        2. 智能缺失值处理
        3. 跨字段逻辑校验
        4. 轻量级异常值检测
        """
        self._init_logger()
        original_shape = df.shape
        self.original_sample = df.head(3)  # 保存原始样本

        try:
            # ====== 预处理阶段 ======
            df = df.convert_dtypes()  # 自动推断最佳类型

            self.log_step(
                "1. 数据概览",
                f"原始维度: {original_shape}\n"
                f"内存用量: {df.memory_usage(deep=True).sum() / 1024**2:.2f} MB\n"
                f"字段类型分布:\n{df.dtypes.value_counts().to_markdown()}"
            )

            # ====== 核心清洗流程 ======
            df = self._preprocess(df)          # 基础清洗
            df = self._clean_columns(df)       # 你的字段级清洗
            df = self._handle_missing(df)      # 新增缺失值处理
            # df = self._validate_logic(df)      # 新增逻辑校验, 暂时注释

            # ====== 后处理阶段 ======
            report = self._generate_report(original_shape, df)

            return df, report

        except Exception as e:
            self.log_step("清洗异常", f"{type(e).__name__}: {str(e)}")
            return df, self._generate_report(original_shape, df)

    def _preprocess(self, df: pd.DataFrame) -> pd.DataFrame:
        """基础预处理"""
        # 1. 列名标准化
        df.columns = (
            df.columns.str.strip()
            .str.replace(r'[\s_]+', '_', regex=True)  # 空格/下划线规范化
            .str.lower()                              # 统一小写
        )

        # 2. 删除完全空值的列
        empty_cols = df.columns[df.isna().all()]
        if not empty_cols.empty:
            df = df.drop(columns=empty_cols)
            self.log_step(
                "删除空列",
                f"移除列: {', '.join(empty_cols)}\n"
                f"保留列数: {df.shape[1]}"
            )
            self.log_metric('empty_columns_removed', len(empty_cols))

        # 3. 去重（保留最后出现的副本）
        dup_count = df.duplicated().sum()
        if dup_count > 0:
            df = df.drop_duplicates(keep='last')
            self.log_step(
                "删除重复记录",
                f"删除行数: {dup_count}\n"
                f"样例数据:\n{df.sample(2).to_markdown()}"
            )
            self.log_metric('duplicates_removed', dup_count)

        return df.reset_index(drop=True)

    def _handle_missing(self, df: pd.DataFrame) -> pd.DataFrame:
        """智能缺失值处理"""
        missing_stats = df.isna().sum()
        total_cells = df.size

        # 缺失值处理策略
        for col in df.columns:
            col_missing = missing_stats[col]
            if col_missing == 0:
                continue

            dtype = df[col].dtype
            fill_value = None

            # 根据类型选择填充策略
            if pd.api.types.is_numeric_dtype(dtype):
                fill_value = df[col].median()
                method = "中位数"
            elif pd.api.types.is_datetime64_any_dtype(dtype):
                fill_value = pd.Timestamp.now()
                method = "当前时间"
            elif pd.api.types.is_string_dtype(dtype):
                fill_value = "未知"
                method = "'未知'"
            else:
                continue  # 其他类型暂不处理

            df[col] = df[col].fillna(fill_value)
            self.log_step(
                f"填充缺失值 [{col}]",
                f"填充方式: {method}\n"
                f"填充数量: {col_missing} ({col_missing/len(df):.1%})"
            )
            self.log_metric('missing_filled', col_missing)

        self.log_step(
            "缺失值统计",
            f"总缺失值: {missing_stats.sum()}/{total_cells} ({missing_stats.sum()/total_cells:.1%})\n"
            f"各列详情:\n{missing_stats[missing_stats > 0].to_markdown()}"
        )
        return df

    def _validate_logic(self, df: pd.DataFrame) -> pd.DataFrame:
        """跨字段逻辑校验"""
        # 示例规则：订单金额需>=0
        if {'amount', 'price'}.issubset(df.columns):
            invalid = df.query("amount < 0 or price < 0")
            if not invalid.empty:
                df = df[(df['amount'] >= 0) & (df['price'] >= 0)]
                self.log_step(
                    "金额逻辑校验",
                    f"移除负值记录: {len(invalid)}\n"
                    f"样例数据:\n{invalid.sample(2).to_markdown()}"
                )
                self.log_metric('invalid_amount_records', len(invalid))

        # 可在此添加更多业务规则...
        return df

        """优化数据类型减少内存占用"""
        original_memory = df.memory_usage(deep=True).sum()

        # 数值类型优化
        for col in df.select_dtypes(include='number'):
            col_min = df[col].min()
            col_max = df[col].max()

            # 整数类型优化
            if pd.api.types.is_integer_dtype(df[col]):
                if col_min >= 0:
                    df[col] = pd.to_numeric(df[col], downcast='unsigned')
                else:
                    df[col] = pd.to_numeric(df[col], downcast='integer')
            # 浮点类型优化
            else:
                df[col] = pd.to_numeric(df[col], downcast='float')

        # 分类类型优化
        for col in df.select_dtypes(include='object'):
            if df[col].nunique() / len(df) < 0.5:  # 基数较低时转category
                df[col] = df[col].astype('category')

        optimized_memory = df.memory_usage(deep=True).sum()
        self.log_step(
            "类型优化",
            f"内存节省: {(original_memory - optimized_memory)/1024**2:.2f} MB\n"
            f"优化后类型分布:\n{df.dtypes.value_counts().to_markdown()}"
        )
        return df

    def _clean_columns(self, df: pd.DataFrame) -> pd.DataFrame:
        """基于字段命名模式的智能清洗"""

        # 配置字段识别规则（可扩展）
        FIELD_RULES = {
            # 键: 处理类型
            # 值: (匹配关键词列表, 处理函数)
            'date': (
                ['date', 'time', '时间', '日期'],
                self._clean_date_column
            ),
            'phone': (
                ['phone', 'mobile', '电话', '手机'],
                self._clean_phone_column
            ),
            'email': (
                ['email', 'mail', '邮箱'],
                self._validate_email_column
            ),
            'amount': (
                ['amount', 'price', '金额', '价格', '成本'],
                self._clean_amount_column
            ),
            'id': (
                ['身份证'],
                self._validate_id_column
            )
        }

        # 遍历所有字段处理规则
        for col in df.columns:
            # 获取字段的清洗配置
            clean_type, func = self._detect_clean_type(col, FIELD_RULES)

            if clean_type:
                # 执行指定清洗函数
                try:
                    df = func(df, col)
                except Exception as e:
                    self.log_step(f"清洗异常 - {col}",
                                  f"类型: {clean_type}\n错误详情: {str(e)}")

        return df

    def _detect_clean_type(self, column_name: str, rules: dict) -> tuple:
        """智能检测字段清洗类型"""
        lower_name = column_name.lower()

        for clean_type, (keywords, _) in rules.items():
            # 匹配规则：字段名包含任一关键词即触发
            if any(kw in lower_name for kw in keywords):
                return (clean_type, rules[clean_type][1])

        return (None, None)

    # ======= 各类型的具体清洗方法 ========

    def _clean_date_column(self, df: pd.DataFrame, col: str) -> pd.DataFrame:
        """通用时间字段处理"""
        # 保存原始值的副本，以便在转换失败时使用
        original_values = df[col].copy()
        
        # 首先尝试自动推断格式（不指定format参数）
        try:
            df[col] = pd.to_datetime(df[col], errors='ignore')
        except:
            pass
        
        # 如果还有未成功转换的值，尝试使用更多的显式格式
        date_patterns = [
            '%Y-%m-%d', '%Y/%m/%d',     # ISO格式
            '%Y年%m月%d日', '%Y年%m月%d号', # 中文格式
            '%d-%b-%y', '%d/%m/%Y',     # 其他常见格式
            '%m/%d/%Y', '%m-%d-%Y',     # 美式日期格式
            '%Y%m%d', '%Y.%m.%d',       # 紧凑格式和点分隔格式
            '%b %d, %Y', '%B %d, %Y',   # 月份名称格式
            '%d %b %Y', '%d %B %Y',     # 欧式格式
            '%Y-%m-%d %H:%M', '%Y/%m/%d %H:%M',  # 带时间
            '%Y-%m-%d %H:%M:%S', '%Y/%m/%d %H:%M:%S',  # 带秒
            '%Y-%m-%dT%H:%M:%S', '%Y-%m-%dT%H:%M:%SZ'  # ISO 8601格式
        ]

        for fmt in date_patterns:
            # 只处理尚未成功转换的部分（仍然是object类型的）
            mask = pd.isna(df[col])
            if any(mask) and hasattr(df[col], 'dt') is False:
                try:
                    parsed = pd.to_datetime(original_values[mask], format=fmt, errors='coerce')
                    df.loc[mask, col] = parsed
                except:
                    continue
        
        # 最后，将所有NaT值替换回原始值
        mask = pd.isna(df[col])
        if any(mask):
            df.loc[mask, col] = original_values[mask]
        
        return df

    def _clean_phone_column(self, df: pd.DataFrame, col: str) -> pd.DataFrame:
        """电话号码统一处理"""

        # 联系方式需要处理成字符串
        df[col] = df[col].astype(str).str.split('.', expand=True)[0]

        # 清理非数字字符
        cleaned = (
            df[col]
            .astype(str)
            .str.replace(r'\D+', '', regex=True)
            .replace('nan', np.nan)
        )

        # 有效性验证（11位且有效号段）
        valid_phones = cleaned.str.match(r'^1[3-9]\d{9}$', na=False)
        df[col] = cleaned.where(valid_phones, np.nan)

        # 统计
        invalid_count = len(df) - valid_phones.sum()
        self.log_step(f"手机号处理 [{col}]",
                      f"有效号码率：{valid_phones.mean():.1%}\n"
                      f"典型无效值："
                      f"{cleaned[~valid_phones].dropna().head(3).tolist()}")
        self.log_metric('invalid_phones', invalid_count)

        return df

    def _validate_email_column(self, df: pd.DataFrame, col: str) -> pd.DataFrame:
        """邮箱验证逻辑"""
        # RFC 5322 标准正则（简化版）
        email_regex = r'^[a-zA-Z0-9_.+-]+@[a-zA-Z0-9-]+\.[a-zA-Z0-9-.]+$'
        invalid_emails = ~df[col].str.match(email_regex, na=False)

        df[col] = df[col].where(~invalid_emails, np.nan)

        # 统计记录
        self.log_step(f"邮箱验证 [{col}]",
                      f"无效邮箱数量：{invalid_emails.sum()}\n"
                      f"样例问题："
                      f"{df.loc[invalid_emails, col].head(3).tolist()}")
        self.log_metric('invalid_emails', invalid_emails.sum())

        return df

    def _clean_amount_column(self, df: pd.DataFrame, col: str) -> pd.DataFrame:
        """
        清理金额列，处理带有"万"、"亿"等单位的数值
        仅当需要单位转换时才修改原始值
        
        参数:
            df (pd.DataFrame): 待清理的数据框
            col (str): 需要清理的列名
            
        返回:
            pd.DataFrame: 清理后的数据框
        """
        try:
            # 创建数据框的副本，避免修改原始数据
            cleaned_df = df.copy()
            
            # 检查是否是数值相关的列
            sample_data = df[col].dropna().head(100)  # 取前100个非空值样本
            if len(sample_data) == 0:
                return cleaned_df  # 如果全是空值，直接返回副本
            
            # 检查是否包含数值特征
            def has_numeric_characteristics(x):
                if pd.api.types.is_numeric_dtype(type(x)):
                    return True
                if isinstance(x, str):
                    # 检查是否包含数字或金额相关字符
                    return bool(re.search(r'[0-9]|万|亿|千|百|元', x))
                return False
            
            # 检查样本数据是否包含数值特征
            numeric_samples = sample_data.apply(has_numeric_characteristics)
            if not numeric_samples.any():
                return cleaned_df  # 如果没有数值特征，直接返回副本
            
            # 如果已经是数值类型，不做任何转换，保留原始精度
            if pd.api.types.is_numeric_dtype(df[col]):
                unit_note = "已是数值类型，保留原始精度"
                # 仅计算统计信息但不修改数据
                amount_filter = (df[col] < 0) | (df[col] > 1e8)
                outliers = df[col][amount_filter]
                self.log_step(f"金额字段 [{col}]",
                            f"{unit_note}\n"
                            f"异常值数量：{len(outliers)}\n"
                            f"最大值：{df[col].max()}")
                self.log_metric('amount_outliers', len(outliers))
                return cleaned_df
            
            # 处理字符串类型的金额
            if df[col].dtype == 'object':
                # 创建一个掩码来标记需要转换的行
                needs_conversion = pd.Series(False, index=df.index)
                
                # 将所有值转换为字符串
                str_series = cleaned_df[col].astype(str)
                
                # 创建一个新的Series来存储处理后的值
                processed_series = cleaned_df[col].copy()
                
                # 处理"万"单位
                wan_mask = str_series.str.contains('万', na=False)
                if wan_mask.any():
                    wan_values = str_series[wan_mask].str.replace('万', '').str.replace(',', '')
                    # 只对含"万"的值进行转换
                    processed_series.loc[wan_mask] = pd.to_numeric(wan_values, errors='coerce') * 10000
                    needs_conversion = needs_conversion | wan_mask
                    unit_note = "检测到'万'单位，已换算为元"

                # 处理"亿"单位
                yi_mask = str_series.str.contains('亿', na=False)
                if yi_mask.any():
                    yi_values = str_series[yi_mask].str.replace('亿', '').str.replace(',', '')
                    # 只对含"亿"的值进行转换
                    processed_series.loc[yi_mask] = pd.to_numeric(yi_values, errors='coerce') * 100000000
                    needs_conversion = needs_conversion | yi_mask
                    unit_note = "检测到'亿'单位，已换算为元"
                    
                # 处理其他数值（不含单位的字符串表示数值）
                other_mask = ~(wan_mask | yi_mask)
                if other_mask.any():
                    other_values = str_series[other_mask].str.replace(',', '')
                    # 仅处理包含数字的值
                    number_mask = other_values.str.contains(r'\d', na=False)
                    if number_mask.any():
                        # 先检查哪些确实需要转换（字符串表示的数值）
                        numeric_mask = other_mask & number_mask
                        # 对于这些值，只有在不是有效数字的情况下才进行转换
                        # 不过，我们保持原始的精度
                        processed_series.loc[numeric_mask] = pd.to_numeric(other_values[number_mask], errors='coerce')
                        needs_conversion = needs_conversion | numeric_mask
                    unit_note = "处理了非单位数值表示"

                # 仅更新需要转换的行，保留其他行的原始值
                cleaned_df.loc[needs_conversion, col] = processed_series.loc[needs_conversion]
                
                # 统计量计算
                converted_count = needs_conversion.sum()
                conversion_ratio = round(converted_count / len(df) * 100, 2) if len(df) > 0 else 0
                
                # 检测异常值
                if pd.api.types.is_numeric_dtype(cleaned_df[col]):
                    amount_filter = (cleaned_df[col] < 0) | (cleaned_df[col] > 1e8)
                    outliers = cleaned_df[col][amount_filter]
                    outlier_count = len(outliers)
                else:
                    outlier_count = 0
                
                self.log_step(f"金额清洗 [{col}]",
                          f"{unit_note}\n"
                          f"转换行数：{converted_count} ({conversion_ratio}%)\n"
                          f"异常值数量：{outlier_count}")
                self.log_metric('amount_conversions', converted_count)
                self.log_metric('amount_outliers', outlier_count)
                
                return cleaned_df
                
        except Exception as e:
            logger.error(f"清理金额列 {col} 时出错: {str(e)}")
            return df  # 发生错误时返回原始数据框

    def _clean_amount_columnold(self, df: pd.DataFrame, col: str) -> pd.DataFrame:
        """金额字段标准化"""
        # 提取数字和符号
        # 需要判断 df[col]是否为数字类型
        leaned = (
            df[col].astype(str)
            .str.replace(r'[^\d.-]', '', regex=True)  # 留数字、点和负号
            .apply(pd.to_numeric, errors='coerce')
        )
            # 自动单位检测（万/亿）
        if df[col].str.contains('万').any():
            cleaned *= 10000
            unit_note = "检测到'万'单位，已换算为元"
        elif df[col].str.contains('亿').any():
            cleaned *= 100000000
            unit_note = "检测到'亿'单位，已换算为元"
        else:
            unit_note = "无单位换算"

        df[col] = cleaned

        # 记录异常值（负数或超出合理范围）
        amount_filter = (df[col] < 0) | (df[col] > 1e8)
        outliers = df[col][amount_filter]
        self.log_step(f"金额清洗 [{col}]",
                    f"{unit_note}\n"
                    f"异常值数量：{len(outliers)}\n"
                    f"最大值：{cleaned.max():.2f}")
        self.log_metric('amount_outliers', len(outliers))

        return df

    def _validate_id_column(self, df: pd.DataFrame, col: str) -> pd.DataFrame:
        """身份证校验增强版"""
        # 简单格式验证
        def validate_id(id_num):
            if pd.isna(id_num):
                return False
            s = str(id_num)
            # 检查长度
            if len(s) not in [15, 18]:
                return False
            # 18位需校验码
            if len(s) == 18:
                factors = [7, 9, 10, 5, 8, 4, 2, 1, 6, 3, 7, 9, 10, 5, 8, 4, 2]
                check_dict = {i: c for i, c in enumerate('10X98765432')}
                try:
                    total = sum(int(s[i]) * factors[i] for i in range(17))
                    return s[-1].upper() == check_dict[total % 11]
                except:
                    return False
            return True

        valid_mask = df[col].apply(validate_id)
        invalid_count = (~valid_mask).sum()

        self.log_step(f"证件校验 [{col}]",
                      f"有效证件比例：{valid_mask.mean():.1%}\n"
                      f"长度错误：{df[col][df[col].str.len().isin([15,18]) == False].count()}")
        self.log_metric('invalid_ids', invalid_count)

        return df

    def identify_date_columns(self, df):
        """
        识别数据框中的日期/时间类型列
        
        参数:
            df (pd.DataFrame): 要分析的数据框
            
        返回:
            list: 识别出的日期列名称列表
        """
        date_cols = []
        
        # 1. 首先识别已经是日期类型的列
        date_cols.extend(df.select_dtypes(include=['datetime64', 'datetime']).columns.tolist())
        
        # 2. 尝试识别字符串列中的日期格式
        for col in df.select_dtypes(include=['object']).columns:
            # 跳过空列或者大多数为空的列
            if df[col].isna().sum() / len(df) > 0.5:
                continue
                
            # 获取非空样本值进行检测
            sample_values = df[col].dropna().sample(min(10, df[col].count())).tolist()
            
            if not sample_values:
                continue
                
            # 计数成功解析为日期的样本数
            success_count = 0
            for val in sample_values:
                try:
                    # 尝试多种常见日期格式解析
                    result = pd.to_datetime(val, errors='raise')
                    success_count += 1
                except (ValueError, TypeError):
                    pass
                    
            # 如果超过70%的样本能被解析为日期，认为是日期列
            if success_count / len(sample_values) >= 0.7:
                date_cols.append(col)
                
        # 3. 通过列名启发式识别
        date_keywords = ['日期', '时间', 'date', 'time', 'year', '年', '月', 'month', 
                        'day', '日', 'created', 'updated', 'timestamp']
        
        for col in df.columns:
            col_lower = str(col).lower()
            # 如果列名包含日期关键词，且尚未被识别为日期列
            if col not in date_cols and any(keyword in col_lower for keyword in date_keywords):
                # 进一步验证数据是否符合日期特征
                try:
                    # 检查是否为数值类型且在合理的年份范围内(假设是年份列)
                    if df[col].dtype.kind in 'if':
                        values = df[col].dropna()
                        if values.empty:
                            continue
                        
                        # 判断是否可能是年份
                        min_val, max_val = values.min(), values.max()
                        if 1900 <= min_val <= max_val <= 2100 and max_val - min_val < 200:
                            date_cols.append(col)
                            continue
                            
                    # 尝试转换为日期类型
                    pd.to_datetime(df[col], errors='raise')
                    date_cols.append(col)
                except (ValueError, TypeError):
                    pass
        
        # 4. 检测并标记UNIX时间戳列
        for col in df.select_dtypes(include=['int64', 'float64']).columns:
            if col in date_cols:
                continue
                
            # UNIX时间戳特征: 大整数值
            sample = df[col].dropna()
            if sample.empty:
                continue
                
            min_val, max_val = sample.min(), sample.max()
            
            # 检查是否为秒级时间戳 (1970年以来的秒数)
            if 9e8 < min_val < max_val < 2e9:  # 约1970-2030年之间的秒级时间戳范围
                date_cols.append(col)
            # 检查是否为毫秒级时间戳
            elif 9e11 < min_val < max_val < 2e12:  # 约1970-2030年之间的毫秒级时间戳范围
                date_cols.append(col)
        
        # 移除重复项并返回结果
        return list(set(date_cols))

    def _interpret_correlation_strength(self, coefficient):
        """
        解释相关系数的强度
        
        参数:
            coefficient (float): 相关系数值
            
        返回:
            str: 相关性强度描述
        """
        abs_coef = abs(coefficient)
        
        if abs_coef < 0.1:
            return "几乎无相关"
        elif abs_coef < 0.3:
            return "弱相关"
        elif abs_coef < 0.5:
            return "中等相关"
        elif abs_coef < 0.7:
            return "较强相关"
        elif abs_coef < 0.9:
            return "强相关"
        else:
            return "极强相关"

    def _detect_multicollinearity(self, significant_pairs):
        """
        检测多重共线性问题
        
        参数:
            significant_pairs (list): 显著相关的变量对列表
            
        返回:
            dict: 多重共线性检测结果
        """
        # 多重共线性检测的阈值
        HIGH_CORRELATION_THRESHOLD = 0.7
        
        # 构建变量关系网络
        variable_network = {}
        
        for pair in significant_pairs:
            if abs(pair["coefficient"]) >= HIGH_CORRELATION_THRESHOLD:
                col1, col2 = pair["column1"], pair["column2"]
                
                # 为每个变量添加高度相关的变量
                variable_network.setdefault(col1, []).append({
                    "column": col2,
                    "coefficient": pair["coefficient"]
                })
                
                variable_network.setdefault(col2, []).append({
                    "column": col1,
                    "coefficient": pair["coefficient"]
                })
        
        # 识别存在多重共线性的变量组
        multicollinearity_groups = []
        processed_vars = set()
        
        for var, related_vars in variable_network.items():
            if var in processed_vars or len(related_vars) < 2:
                continue
                
            # 找出与当前变量高度相关的所有变量
            collinear_group = [var]
            collinear_relations = []
            
            for related in related_vars:
                collinear_group.append(related["column"])
                collinear_relations.append({
                    "var1": var,
                    "var2": related["column"],
                    "coefficient": related["coefficient"]
                })
            
            # 检查这些变量之间是否也存在高相关性
            for i, var1 in enumerate(collinear_group[1:], 1):
                for var2 in collinear_group[i+1:]:
                    # 找到这两个变量之间的相关系数
                    for pair in significant_pairs:
                        if ((pair["column1"] == var1 and pair["column2"] == var2) or
                            (pair["column1"] == var2 and pair["column2"] == var1)):
                            if abs(pair["coefficient"]) >= HIGH_CORRELATION_THRESHOLD:
                                collinear_relations.append({
                                    "var1": var1,
                                    "var2": var2,
                                    "coefficient": pair["coefficient"]
                                })
            
            # 如果三个或更多变量形成多重共线性网络
            if len(set(collinear_group)) >= 3 and len(collinear_relations) >= 3:
                multicollinearity_groups.append({
                    "variables": list(set(collinear_group)),
                    "relations": collinear_relations
                })
                processed_vars.update(collinear_group)
        
        return {
            "detected": len(multicollinearity_groups) > 0,
            "groups": multicollinearity_groups
        }

    def generate_data_summary(self, df: pd.DataFrame) -> dict:
        """
        生成数据摘要信息，包括：
        1. 基本信息（行数、列数、内存使用）
        2. 每列的数据类型和统计信息
        3. 缺失值分析
        4. 数值列统计
        5. 分类列分布
        6. 异常值检测
        7. 数据质量评分

        参数:
            df (pd.DataFrame): 要分析的数据框

        返回:
            dict: 包含数据摘要信息的字典
        """
        try:
            # 基本信息统计
            memory_usage_mb = round(df.memory_usage(deep=True).sum() / 1024 / 1024, 2)  # 转换为MB并保留2位小数
            duplicate_rows = df.duplicated().sum()
            duplicate_ratio = round(duplicate_rows / len(df) * 100, 2) if len(df) > 0 else 0

            summary = {
                "基本信息": {
                    "总行数": len(df),
                    "总列数": len(df.columns),
                    "内存占用(MB)": memory_usage_mb,
                    "数值型列数": len(df.select_dtypes(include=['number']).columns),
                    "类别型列数": len(df.select_dtypes(include=['object', 'category']).columns),
                    "日期型列数": len(df.select_dtypes(include=['datetime64']).columns),
                    "重复行数": duplicate_rows,
                    "重复行比例(%)": duplicate_ratio
                },
                "列信息": {},
                "数据质量": {
                    "完整度": 0,
                    "准确度": 0,
                    "一致性": 0,
                    "总分": 0
                }
            }

            completeness_scores = []
            accuracy_scores = []
            consistency_scores = []

            # 遍历每一列生成详细信息
            for column in df.columns:
                null_count = df[column].isnull().sum()
                null_ratio = round(null_count / len(df) * 100, 2) if len(df) > 0 else 0
                
                col_info = {
                    "数据类型": str(df[column].dtype),
                    "空值数量": null_count,
                    "空值比例(%)": null_ratio
                }

                # 计算列的完整度分数 (0-100)
                completeness = 100 - null_ratio
                completeness_scores.append(completeness)

                # 对数值类型列添加统计信息
                if pd.api.types.is_numeric_dtype(df[column]):
                    numeric_stats = df[column].describe()
                    
                    # 计算异常值界限
                    Q1 = float(numeric_stats["25%"])
                    Q3 = float(numeric_stats["75%"])
                    IQR = Q3 - Q1
                    lower_bound = Q1 - 1.5 * IQR
                    upper_bound = Q3 + 1.5 * IQR
                    
                    # 检测异常值
                    outliers = df[(df[column] < lower_bound) | (df[column] > upper_bound)][column]
                    outliers_count = len(outliers)
                    outliers_ratio = round(outliers_count / len(df) * 100, 2) if len(df) > 0 else 0
                    
                    col_info.update({
                        "统计量": {
                            "最小值": round(float(numeric_stats["min"]), 2) if not pd.isna(numeric_stats["min"]) else None,
                            "最大值": round(float(numeric_stats["max"]), 2) if not pd.isna(numeric_stats["max"]) else None,
                            "平均值": round(float(numeric_stats["mean"]), 2) if not pd.isna(numeric_stats["mean"]) else None,
                            "中位数": round(float(df[column].median()), 2) if not pd.isna(df[column].median()) else None,
                            "标准差": round(float(numeric_stats["std"]), 2) if not pd.isna(numeric_stats["std"]) else None,
                            "25分位数": round(float(numeric_stats["25%"]), 2) if not pd.isna(numeric_stats["25%"]) else None,
                            "75分位数": round(float(numeric_stats["75%"]), 2) if not pd.isna(numeric_stats["75%"]) else None
                        },
                        "异常值": {
                            "数量": outliers_count,
                            "比例(%)": outliers_ratio,
                            "下界": round(lower_bound, 2),
                            "上界": round(upper_bound, 2)
                        },
                        "分布特征": {
                            "偏度": round(float(df[column].skew()), 2),  # 添加偏度
                            "峰度": round(float(df[column].kurtosis()), 2)  # 添加峰度
                        }
                    })
                    
                    # 计算数值列的准确度分数 (基于异常值比例)
                    accuracy = 100 - outliers_ratio
                    accuracy_scores.append(accuracy)
                
                # 对分类或字符串类型列添加分布信息
                else:
                    value_counts = df[column].value_counts()
                    top_5_counts = value_counts.head(5).to_dict()
                    # 确保字典的键是字符串类型
                    top_5_counts = {str(k): int(v) for k, v in top_5_counts.items()}
                    
                    # 计算类别分布的均匀度
                    if len(value_counts) > 0:
                        entropy = -(value_counts / len(df) * np.log2(value_counts / len(df))).sum()
                        max_entropy = np.log2(len(value_counts))
                        uniformity = round((entropy / max_entropy * 100) if max_entropy > 0 else 100, 2)
                    else:
                        uniformity = 0
                    
                    unique_count = df[column].nunique()
                    unique_ratio = round(unique_count / len(df) * 100, 2) if len(df) > 0 else 0
                    
                    col_info.update({
                        "类别统计": {
                            "唯一值数量": unique_count,
                            "唯一值比例(%)": unique_ratio,
                            "最频繁值": str(value_counts.index[0]) if not value_counts.empty else None,
                            "最频繁值占比(%)": round(value_counts.iloc[0] / len(df) * 100, 2) if not value_counts.empty else 0,
                            "分布均匀度(%)": uniformity
                        },
                        "前五值分布": top_5_counts
                    })
                    
                    # 计算分类列的准确度分数 (基于唯一值比例和分布均匀度)
                    accuracy = (uniformity + (100 - unique_ratio)) / 2
                    accuracy_scores.append(accuracy)

                # 对日期类型列添加时间范围信息
                if pd.api.types.is_datetime64_any_dtype(df[column]):
                    date_series = df[column].dropna()
                    if not date_series.empty:
                        col_info.update({
                            "时间范围": {
                                "最早日期": date_series.min().strftime('%Y-%m-%d %H:%M:%S'),
                                "最晚日期": date_series.max().strftime('%Y-%m-%d %H:%M:%S'),
                                "时间跨度(天)": (date_series.max() - date_series.min()).days
                            },
                            "时间分布": {
                                "年份分布": date_series.dt.year.value_counts().to_dict(),
                                "月份分布": date_series.dt.month.value_counts().to_dict(),
                                "星期分布": date_series.dt.dayofweek.value_counts().to_dict()
                            }
                        })

                summary["列信息"][column] = col_info

            # # 添加相关性分析（仅对数值列）
            # numeric_cols = df.select_dtypes(include=['number']).columns
            # if len(numeric_cols) >= 2:
            #     correlation_matrix = df[numeric_cols].corr()
            #     # 找出高相关性的列对（相关系数绝对值 > 0.7）
            #     high_correlations = []
            #     for i in range(len(numeric_cols)):
            #         for j in range(i + 1, len(numeric_cols)):
            #             corr = correlation_matrix.iloc[i, j]
            #             if abs(corr) > 0.7:
            #                 high_correlations.append({
            #                     "列1": numeric_cols[i],
            #                     "列2": numeric_cols[j],
            #                     "相关系数": round(float(corr), 3),
            #                     "相关强度": "强正相关" if corr > 0.7 else "强负相关"
            #                 })
                
            #     if high_correlations:
            #         summary["相关性分析"] = {
            #             "高相关对数量": len(high_correlations),
            #             "详细信息": high_correlations
            #         }
                    
            #         # 计算一致性分数 (基于相关性)
            #         total_pairs = len(numeric_cols) * (len(numeric_cols) - 1) / 2
            #         if total_pairs > 0:
            #             consistency = 100 - (len(high_correlations) / total_pairs * 100)
            #             consistency_scores.append(consistency)

            # 计算总体数据质量分数
            if completeness_scores:
                summary["数据质量"]["完整度"] = round(sum(completeness_scores) / len(completeness_scores), 2)
            if accuracy_scores:
                summary["数据质量"]["准确度"] = round(sum(accuracy_scores) / len(accuracy_scores), 2)
            # if consistency_scores:
            #     summary["数据质量"]["一致性"] = round(sum(consistency_scores) / len(consistency_scores), 2)
            
            # 计算总分
            quality_scores = [score for score in summary["数据质量"].values() if score > 0]
            if quality_scores:
                summary["数据质量"]["总分"] = round(sum(quality_scores) / len(quality_scores), 2)

            return summary

        except Exception as e:
            logger.error(f"生成数据摘要时发生错误: {str(e)}")
            raise Exception(f"生成数据摘要失败: {str(e)}")