{
 "cells": [
  {
   "cell_type": "code",
   "execution_count": 3,
   "id": "63269847",
   "metadata": {},
   "outputs": [],
   "source": [
    "import warnings\n",
    "warnings.filterwarnings('ignore')"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 4,
   "id": "d732d640",
   "metadata": {},
   "outputs": [],
   "source": [
    "import os\n",
    "import gc\n",
    "import numpy as np\n",
    "import pandas as pd\n",
    "import re\n",
    "import time\n",
    "from scipy import stats\n",
    "import matplotlib.pyplot as plt\n",
    "import category_encoders as ce\n",
    "import networkx as nx\n",
    "import pickle\n",
    "import lightgbm as lgb\n",
    "import catboost as cat\n",
    "import xgboost as xgb\n",
    "from datetime import timedelta\n",
    "from gensim.models import Word2Vec\n",
    "from io import StringIO\n",
    "from tqdm import tqdm\n",
    "from lightgbm import LGBMClassifier\n",
    "from lightgbm import log_evaluation, early_stopping\n",
    "from sklearn.metrics import roc_curve\n",
    "from scipy.stats import chi2_contingency, pearsonr\n",
    "from sklearn.preprocessing import StandardScaler, OneHotEncoder, LabelEncoder\n",
    "from sklearn.feature_extraction.text import TfidfVectorizer, CountVectorizer\n",
    "from sklearn.feature_extraction import FeatureHasher\n",
    "from sklearn.model_selection import StratifiedKFold, KFold, train_test_split, GridSearchCV\n",
    "from category_encoders import TargetEncoder\n",
    "from sklearn.decomposition import TruncatedSVD\n",
    "from autogluon.tabular import TabularDataset, TabularPredictor, FeatureMetadata\n",
    "from autogluon.features.generators import AsTypeFeatureGenerator, BulkFeatureGenerator, DropUniqueFeatureGenerator, FillNaFeatureGenerator, PipelineFeatureGenerator\n",
    "from autogluon.features.generators import CategoryFeatureGenerator, IdentityFeatureGenerator, AutoMLPipelineFeatureGenerator\n",
    "from autogluon.common.features.types import R_INT, R_FLOAT\n",
    "from autogluon.core.metrics import make_scorer"
   ]
  },
  {
   "cell_type": "markdown",
   "id": "98168ca1",
   "metadata": {},
   "source": [
    "# 数据导入"
   ]
  },
  {
   "cell_type": "markdown",
   "id": "e218df05",
   "metadata": {},
   "source": [
    "## 通用导入函数"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 5,
   "id": "11381d8a",
   "metadata": {},
   "outputs": [],
   "source": [
    "def load_data_from_directory(directory):\n",
    "    \"\"\"\n",
    "    遍历目录加载所有CSV文件，将其作为独立的DataFrame变量\n",
    "\n",
    "    参数:\n",
    "    - directory: 输入的数据路径\n",
    "    \n",
    "    返回:\n",
    "    - 含有数据集名称的列表\n",
    "    \"\"\"\n",
    "    dataset_names = []\n",
    "    for filename in os.listdir(directory):\n",
    "        if filename.endswith(\".csv\"):\n",
    "            dataset_name = os.path.splitext(filename)[0] + '_data' # 获取文件名作为变量名\n",
    "            file_path = os.path.join(directory, filename)  # 完整的文件路径\n",
    "            globals()[dataset_name] = pd.read_csv(file_path)  # 将文件加载为DataFrame并赋值给全局变量\n",
    "            dataset_names.append(dataset_name)\n",
    "            print(f\"数据集 {dataset_name} 已加载为 DataFrame\")\n",
    "\n",
    "    return dataset_names"
   ]
  },
  {
   "cell_type": "markdown",
   "id": "29ef15ba",
   "metadata": {},
   "source": [
    "## 导入数据集"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 6,
   "id": "2a0a7091",
   "metadata": {},
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "数据集 AGET_PAY_data 已加载为 DataFrame\n",
      "数据集 ASSET_data 已加载为 DataFrame\n",
      "数据集 CCD_TR_DTL_data 已加载为 DataFrame\n",
      "数据集 MB_PAGEVIEW_DTL_data 已加载为 DataFrame\n",
      "数据集 MB_QRYTRNFLW_data 已加载为 DataFrame\n",
      "数据集 MB_TRNFLW_data 已加载为 DataFrame\n",
      "数据集 NATURE_data 已加载为 DataFrame\n",
      "数据集 PROD_HOLD_data 已加载为 DataFrame\n",
      "数据集 TARGET_data 已加载为 DataFrame\n",
      "数据集 TR_APS_DTL_data 已加载为 DataFrame\n",
      "数据集 TR_IBTF_data 已加载为 DataFrame\n",
      "数据集 TR_TPAY_data 已加载为 DataFrame\n"
     ]
    }
   ],
   "source": [
    "train_load_dt = '../DATA'\n",
    "train_data_name = load_data_from_directory(train_load_dt)"
   ]
  },
  {
   "cell_type": "markdown",
   "id": "ec9d41a0",
   "metadata": {},
   "source": [
    "# 数据探查与清洗通用方法"
   ]
  },
  {
   "cell_type": "markdown",
   "id": "41b7484f",
   "metadata": {},
   "source": [
    "## 1. 基础信息探查"
   ]
  },
  {
   "cell_type": "markdown",
   "id": "2d1f8365",
   "metadata": {},
   "source": [
    "### 1.1 优化后的字段类型识别辅助函数"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 8,
   "id": "dd41166b",
   "metadata": {},
   "outputs": [],
   "source": [
    "def is_text(series):\n",
    "    \"\"\"\n",
    "    判断series是否全部为MD5哈希值的文本型字段\n",
    "    \n",
    "    参数:\n",
    "    - series: pandas Series\n",
    "    \n",
    "    返回:\n",
    "    - bool: 是否为MD5哈希值\n",
    "    \"\"\"\n",
    "    if series.dtype != 'object':\n",
    "        return False\n",
    "    \n",
    "    # 去除空值\n",
    "    non_null = series.dropna()\n",
    "    if len(non_null) == 0:\n",
    "        return False\n",
    "    \n",
    "    # 检查是否所有值都是32位小写十六进制字符串\n",
    "    # MD5的特征: 32个字符, 只包含0-9和a-f\n",
    "    sample_size = min(1000, len(non_null))  # 采样检查以提高性能\n",
    "    sample = non_null.sample(n=sample_size, random_state=42)\n",
    "    \n",
    "    def is_md5_string(s):\n",
    "        s_str = str(s)\n",
    "        if len(s_str) != 32:\n",
    "            return False\n",
    "        try:\n",
    "            int(s_str, 16)  # 尝试按16进制解析\n",
    "            return s_str.islower()  # 检查是否全小写\n",
    "        except ValueError:\n",
    "            return False\n",
    "    \n",
    "    # 如果90%以上的样本都是MD5格式,判定为MD5字段\n",
    "    md5_ratio = sample.apply(is_md5_string).sum() / len(sample)\n",
    "    return md5_ratio > 0.9\n",
    "\n",
    "\n",
    "def is_date_field(col_name, series):\n",
    "    \"\"\"\n",
    "    判断字段是否为日期型字段\n",
    "    \n",
    "    参数:\n",
    "    - col_name: 列名\n",
    "    - series: pandas Series\n",
    "    \n",
    "    返回:\n",
    "    - bool: 是否为日期型字段\n",
    "    \"\"\"\n",
    "    # 如果已经是datetime类型\n",
    "    if pd.api.types.is_datetime64_any_dtype(series):\n",
    "        return True\n",
    "    \n",
    "    # 如果列名包含DATE或DAT\n",
    "    if 'DATE' in col_name.upper() or 'DAT' in col_name.upper():\n",
    "        # 情况1: 检查int类型是否为8位日期数字(如20250731)\n",
    "        if pd.api.types.is_integer_dtype(series):\n",
    "            non_null = series.dropna()\n",
    "            if len(non_null) > 0:\n",
    "                # 检查是否所有值都在合理的日期范围内(19000101-21001231)\n",
    "                min_val = non_null.min()\n",
    "                max_val = non_null.max()\n",
    "                if 19000101 <= min_val <= 21001231 and 19000101 <= max_val <= 21001231:\n",
    "                    # 进一步检查是否都是8位数\n",
    "                    sample = non_null.head(100)\n",
    "                    is_8digit = (sample >= 10000000) & (sample <= 99999999)\n",
    "                    if is_8digit.mean() > 0.9:\n",
    "                        return True\n",
    "        \n",
    "        # 情况2: 检查object类型是否为8位数字字符串(如\"20250731\")\n",
    "        elif series.dtype == 'object':\n",
    "            non_null = series.dropna()\n",
    "            if len(non_null) > 0:\n",
    "                sample = non_null.head(100)\n",
    "                # 检查是否为8位数字\n",
    "                is_8digit = sample.astype(str).str.match(r'^\\d{8}$').mean() > 0.9\n",
    "                if is_8digit:\n",
    "                    return True\n",
    "                # 或者检查是否为日期格式字符串(如\"2025-04-07\")\n",
    "                is_date_format = sample.astype(str).str.match(r'^\\d{4}-\\d{2}-\\d{2}$').mean() > 0.9\n",
    "                if is_date_format:\n",
    "                    return True\n",
    "    \n",
    "    return False\n",
    "\n",
    "\n",
    "def identify_field_type(col, df, primary_key='CUST_NO'):\n",
    "    \"\"\"\n",
    "    识别字段的业务类型(优化版,适配MD5脱敏数据)\n",
    "    \n",
    "    字段类型识别规则(按优先级):\n",
    "    1. 主键: CUST_NO\n",
    "    2. 日期型: 列名含DATE/DAT且为8位整数或datetime类型 ⚠️优先于数值型判断\n",
    "    3. 类别代码: 列名以_CD或_IND结尾 ⚠️优先于数值型判断\n",
    "    4. 数值型: numeric类型(int/float)\n",
    "    5. MD5脱敏类别: 32位十六进制字符串\n",
    "    6. 普通类别型: 唯一值≤50或占比<50%\n",
    "    \n",
    "    参数:\n",
    "    - col: 列名\n",
    "    - df: DataFrame\n",
    "    - primary_key: 主键字段名\n",
    "    \n",
    "    返回:\n",
    "    - str: 字段类型标识\n",
    "    \"\"\"\n",
    "    if col == primary_key:\n",
    "        return '🔑主键'\n",
    "    \n",
    "    # ⚠️ 优先检查日期型(必须在数值型之前,因为日期字段可能是int类型)\n",
    "    if is_date_field(col, df[col]):\n",
    "        return '📅日期型'\n",
    "    \n",
    "    # ⚠️ 优先检查类别代码(必须在数值型之前,因为_CD/_IND字段可能是int类型)\n",
    "    if col.endswith('_CD') or col.endswith('_IND'):\n",
    "        return '🏷️类别型'\n",
    "    \n",
    "    # 检查是否为数值型\n",
    "    if pd.api.types.is_numeric_dtype(df[col]):\n",
    "        return '📊数值型'\n",
    "    \n",
    "    # 以下为object类型的判断\n",
    "    if pd.api.types.is_object_dtype(df[col]) or pd.api.types.is_categorical_dtype(df[col]):\n",
    "        non_null_data = df[col].dropna()\n",
    "        \n",
    "        if len(non_null_data) == 0:\n",
    "            return '🏷️类别型'\n",
    "        \n",
    "        # 检查是否为文本字段\n",
    "        if is_text(df[col]):\n",
    "            return '🔐文本型'\n",
    "        \n",
    "        # 根据唯一值数量和占比判断\n",
    "        unique_count = df[col].nunique()\n",
    "        unique_ratio = unique_count / len(df)\n",
    "        \n",
    "        # 唯一值数量≤50 或 唯一值占比<50%,判定为类别型\n",
    "        if unique_count <= 50 or unique_ratio < 0.5:\n",
    "            return '🏷️类别型'\n",
    "        else:\n",
    "            # 其他情况判定为类别型(因为实际上脱敏后都是类别型)\n",
    "            return '🏷️类别型'\n",
    "    \n",
    "    return '❓未知'"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 9,
   "id": "dc5b7601",
   "metadata": {},
   "outputs": [],
   "source": [
    "def explore_basic_info(df, dataset_name=\"数据集\", primary_key='CUST_NO'):\n",
    "    \"\"\"\n",
    "    探查数据集的基础信息(优化版,适配MD5脱敏数据)\n",
    "    \n",
    "    参数:\n",
    "    - df: pandas DataFrame，待探查的数据集\n",
    "    - dataset_name: str，数据集名称\n",
    "    - primary_key: str，主键字段名（默认为'CUST_NO'，将从分析中排除）\n",
    "    \n",
    "    返回:\n",
    "    - dict: 包含基础信息的字典\n",
    "    \"\"\"\n",
    "    print(\"=\"*80)\n",
    "    print(f\"📊 【{dataset_name}】基础信息探查\")\n",
    "    print(\"=\"*80)\n",
    "    \n",
    "    # 1. 数据形状\n",
    "    n_rows, n_cols = df.shape\n",
    "    print(f\"\\n1️⃣  数据形状:\")\n",
    "    print(f\"   ├─ 行数（样本数）: {n_rows:,}\")\n",
    "    print(f\"   └─ 列数（特征数）: {n_cols}\")\n",
    "    \n",
    "    # 2. 数据类型分布\n",
    "    dtype_counts = df.dtypes.value_counts()\n",
    "    print(f\"\\n2️⃣  数据类型分布:\")\n",
    "    for dtype, count in dtype_counts.items():\n",
    "        print(f\"   ├─ {dtype}: {count} 列 ({count/n_cols*100:.1f}%)\")\n",
    "    \n",
    "    # 3. 内存占用\n",
    "    memory_usage = df.memory_usage(deep=True).sum()\n",
    "    memory_mb = memory_usage / 1024 / 1024\n",
    "    print(f\"\\n3️⃣  内存占用:\")\n",
    "    print(f\"   └─ 总内存: {memory_mb:.2f} MB\")\n",
    "    \n",
    "    # 4. 列信息详情（使用优化后的字段类型识别）\n",
    "    print(f\"\\n4️⃣  列信息详情:\")\n",
    "    print(f\"   {'列名':<30} {'字段类型':<15} {'数据类型':<15} {'非空数':<10} {'唯一值数':<10} {'唯一率':<8} {'内存(KB)':<10}\")\n",
    "    print(f\"   {'-'*115}\")\n",
    "    \n",
    "    for col in df.columns:\n",
    "        field_type = identify_field_type(col, df, primary_key)\n",
    "        col_dtype = str(df[col].dtype)\n",
    "        non_null = df[col].count()\n",
    "        unique_count = df[col].nunique()\n",
    "        unique_ratio = unique_count / len(df)\n",
    "        col_memory = df[col].memory_usage(deep=True) / 1024\n",
    "        \n",
    "        print(f\"   {col:<30} {field_type:<15} {col_dtype:<15} {non_null:<10} {unique_count:<10} {unique_ratio:<8.2%} {col_memory:<10.2f}\")\n",
    "    \n",
    "    \n",
    "    # 5. 字段类型统计（排除主键）\n",
    "    analysis_cols = [col for col in df.columns if col != primary_key]\n",
    "    \n",
    "    # 分类统计\n",
    "    numeric_cols = []\n",
    "    date_cols = []\n",
    "    categorical_cols = []\n",
    "    md5_cols = []\n",
    "    \n",
    "    for col in analysis_cols:\n",
    "        field_type = identify_field_type(col, df, primary_key)\n",
    "        \n",
    "        if '数值型' in field_type:\n",
    "            numeric_cols.append(col)\n",
    "        elif '日期型' in field_type:\n",
    "            date_cols.append(col)\n",
    "        elif 'MD5' in field_type:\n",
    "            md5_cols.append(col)\n",
    "        elif '类别型' in field_type:\n",
    "            categorical_cols.append(col)\n",
    "    \n",
    "    print(f\"\\n5️⃣  字段类型统计（排除主键 {primary_key}）:\")\n",
    "    print(f\"   ├─ 数值型字段: {len(numeric_cols)} 个\")\n",
    "    if len(numeric_cols) > 0:\n",
    "        print(f\"   │  └─ {', '.join(numeric_cols[:5])}{'...' if len(numeric_cols) > 5 else ''}\")\n",
    "    \n",
    "    print(f\"   ├─ 日期型字段: {len(date_cols)} 个\")\n",
    "    if len(date_cols) > 0:\n",
    "        print(f\"   │  └─ {', '.join(date_cols[:5])}{'...' if len(date_cols) > 5 else ''}\")\n",
    "    \n",
    "    print(f\"   ├─ 类别型字段: {len(categorical_cols)} 个\")\n",
    "    if len(categorical_cols) > 0:\n",
    "        print(f\"   │  └─ {', '.join(categorical_cols[:5])}{'...' if len(categorical_cols) > 5 else ''}\")\n",
    "    \n",
    "    print(f\"   └─ 文本型字段: {len(md5_cols)} 个\")\n",
    "    if len(md5_cols) > 0:\n",
    "        print(f\"      └─ {', '.join(md5_cols[:5])}{'...' if len(md5_cols) > 5 else ''}\")\n",
    "    \n",
    "    print(\"\\n\" + \"=\"*80)\n",
    "    \n",
    "    return {\n",
    "        'shape': (n_rows, n_cols),\n",
    "        'dtypes': dtype_counts.to_dict(),\n",
    "        'memory_mb': memory_mb,\n",
    "        'primary_key': primary_key,\n",
    "        'numeric_cols': numeric_cols,\n",
    "        'date_cols': date_cols,\n",
    "        'categorical_cols': categorical_cols,\n",
    "        'md5_cols': md5_cols,\n",
    "        'analysis_cols': analysis_cols\n",
    "    }"
   ]
  },
  {
   "cell_type": "markdown",
   "id": "8697e3d4",
   "metadata": {},
   "source": [
    "## 2. 数据质量分析"
   ]
  },
  {
   "cell_type": "markdown",
   "id": "4c355d7d",
   "metadata": {},
   "source": [
    "### 2.1 优化后的统计特征分析"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 10,
   "id": "9d66ac72",
   "metadata": {},
   "outputs": [],
   "source": [
    "def analyze_text_features(df, dataset_name=\"数据集\", primary_key='CUST_NO'):\n",
    "    \"\"\"\n",
    "    分析文本型特征的特性\n",
    "    \n",
    "    参数:\n",
    "    - df: pandas DataFrame，待分析的数据集\n",
    "    - dataset_name: str，数据集名称\n",
    "    - primary_key: str，主键字段名（默认为'CUST_NO'，将从分析中排除）\n",
    "    \n",
    "    返回:\n",
    "    - dict: 文本型特征分析结果\n",
    "    \"\"\"\n",
    "    # 识别文本型字段\n",
    "    text_cols = []\n",
    "    analysis_cols = [col for col in df.columns if col != primary_key]\n",
    "    \n",
    "    for col in analysis_cols:\n",
    "        if pd.api.types.is_object_dtype(df[col]) or pd.api.types.is_categorical_dtype(df[col]):\n",
    "            non_null_data = df[col].dropna()\n",
    "            if len(non_null_data) == 0:\n",
    "                continue\n",
    "            \n",
    "            unique_ratio = df[col].nunique() / len(df)\n",
    "            avg_length = non_null_data.astype(str).str.len().mean()\n",
    "            \n",
    "            # 文本型判定条件：唯一值比例>50% 且 平均长度>10\n",
    "            if unique_ratio > 0.5 and avg_length > 10:\n",
    "                text_cols.append(col)\n",
    "    \n",
    "    if len(text_cols) == 0:\n",
    "        print(f\"⚠️  【{dataset_name}】无文本型字段（已排除主键 {primary_key}）\")\n",
    "        return None\n",
    "    \n",
    "    print(\"=\"*100)\n",
    "    print(f\"📝 【{dataset_name}】文本型特征分析 ({len(text_cols)} 个字段，已排除主键 {primary_key})\")\n",
    "    print(\"=\"*100)\n",
    "    \n",
    "    text_info = {}\n",
    "    \n",
    "    for col in text_cols:\n",
    "        print(f\"\\n🔹 字段: {col}\")\n",
    "        print(f\"   {'-'*90}\")\n",
    "        \n",
    "        # 基础统计\n",
    "        n_unique = df[col].nunique()\n",
    "        n_missing = df[col].isnull().sum()\n",
    "        missing_rate = n_missing / len(df) * 100\n",
    "        \n",
    "        # 文本长度统计\n",
    "        text_lengths = df[col].dropna().astype(str).str.len()\n",
    "        min_len = text_lengths.min()\n",
    "        max_len = text_lengths.max()\n",
    "        avg_len = text_lengths.mean()\n",
    "        median_len = text_lengths.median()\n",
    "        \n",
    "        print(f\"   唯一值数: {n_unique:,} ({n_unique/len(df)*100:.2f}%) | 缺失数: {n_missing:,} ({missing_rate:.2f}%)\")\n",
    "        print(f\"   文本长度: 最小={min_len:.0f}, 最大={max_len:.0f}, 平均={avg_len:.1f}, 中位数={median_len:.0f}\")\n",
    "        \n",
    "        # 词频统计（Top10）\n",
    "        word_counts = df[col].value_counts()\n",
    "        print(f\"\\n   Top 10 高频文本:\")\n",
    "        print(f\"   {'文本内容':<60} {'频次':<15} {'占比(%)':<15}\")\n",
    "        print(f\"   {'-'*95}\")\n",
    "        \n",
    "        for i, (val, count) in enumerate(word_counts.head(10).items()):\n",
    "            ratio = count / len(df) * 100\n",
    "            val_str = str(val)[:57] + \"...\" if len(str(val)) > 60 else str(val)\n",
    "            print(f\"   {val_str:<60} {count:<15,} {ratio:<15.2f}\")\n",
    "        \n",
    "        # 空文本和单字符统计\n",
    "        empty_count = (df[col].astype(str).str.strip() == '').sum()\n",
    "        single_char_count = (df[col].dropna().astype(str).str.len() == 1).sum()\n",
    "        \n",
    "        if empty_count > 0:\n",
    "            print(f\"\\n   ⚠️  空文本数: {empty_count} ({empty_count/len(df)*100:.2f}%)\")\n",
    "        if single_char_count > 0:\n",
    "            print(f\"   ⚠️  单字符文本数: {single_char_count} ({single_char_count/len(df)*100:.2f}%)\")\n",
    "        \n",
    "        text_info[col] = {\n",
    "            'n_unique': n_unique,\n",
    "            'n_missing': n_missing,\n",
    "            'missing_rate': missing_rate,\n",
    "            'min_length': min_len,\n",
    "            'max_length': max_len,\n",
    "            'avg_length': avg_len,\n",
    "            'median_length': median_len,\n",
    "            'empty_count': empty_count,\n",
    "            'single_char_count': single_char_count,\n",
    "            'top_values': word_counts.head(10).to_dict()\n",
    "        }\n",
    "    \n",
    "    print(\"\\n\" + \"=\"*100)\n",
    "    \n",
    "    return text_info"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 11,
   "id": "f88ca0ca",
   "metadata": {},
   "outputs": [],
   "source": [
    "def analyze_data_quality(df, dataset_name=\"数据集\", primary_key='CUST_NO'):\n",
    "    \"\"\"\n",
    "    分析数据集的数据质量，包括缺失值、重复值、异常值等\n",
    "    \n",
    "    参数:\n",
    "    - df: pandas DataFrame，待分析的数据集\n",
    "    - dataset_name: str，数据集名称\n",
    "    - primary_key: str，主键字段名（默认为'CUST_NO'，将从分析中排除）\n",
    "    \n",
    "    返回:\n",
    "    - dict: 包含质量分析结果的字典\n",
    "    \"\"\"\n",
    "    print(\"=\"*80)\n",
    "    print(f\"🔍 【{dataset_name}】数据质量分析\")\n",
    "    print(\"=\"*80)\n",
    "    \n",
    "    n_rows = len(df)\n",
    "    \n",
    "    # 1. 缺失值分析\n",
    "    print(f\"\\n1️⃣  缺失值分析:\")\n",
    "    missing_stats = pd.DataFrame({\n",
    "        '缺失数': df.isnull().sum(),\n",
    "        '缺失率(%)': (df.isnull().sum() / n_rows * 100).round(2)\n",
    "    })\n",
    "    missing_stats = missing_stats[missing_stats['缺失数'] > 0].sort_values('缺失率(%)', ascending=False)\n",
    "    \n",
    "    if len(missing_stats) > 0:\n",
    "        print(f\"   ⚠️  发现 {len(missing_stats)} 个字段存在缺失值:\")\n",
    "        print(f\"\\n   {'字段名':<30} {'缺失数':<12} {'缺失率(%)':<12}\")\n",
    "        print(f\"   {'-'*60}\")\n",
    "        for col, row in missing_stats.iterrows():\n",
    "            print(f\"   {col:<30} {int(row['缺失数']):<12} {row['缺失率(%)']:<12.2f}\")\n",
    "    else:\n",
    "        print(f\"   ✅ 无缺失值\")\n",
    "    \n",
    "    # 2. 重复值分析\n",
    "    print(f\"\\n2️⃣  重复值分析:\")\n",
    "    n_duplicates = df.duplicated().sum()\n",
    "    duplicate_rate = n_duplicates / n_rows * 100\n",
    "    \n",
    "    if n_duplicates > 0:\n",
    "        print(f\"   ⚠️  重复行数: {n_duplicates:,} ({duplicate_rate:.2f}%)\")\n",
    "    else:\n",
    "        print(f\"   ✅ 无重复行\")\n",
    "    \n",
    "    # 3. 唯一值分析\n",
    "    print(f\"\\n3️⃣  唯一值分析:\")\n",
    "    print(f\"   {'字段名':<30} {'唯一值数':<15} {'唯一率(%)':<15} {'数据类型':<15}\")\n",
    "    print(f\"   {'-'*80}\")\n",
    "    \n",
    "    for col in df.columns:\n",
    "        unique_count = df[col].nunique()\n",
    "        unique_rate = unique_count / n_rows * 100\n",
    "        dtype = str(df[col].dtype)\n",
    "        \n",
    "        # 标记特殊情况\n",
    "        marker = \"\"\n",
    "        if unique_count == 1:\n",
    "            marker = \"⚠️ 常量\"\n",
    "        elif unique_count == n_rows:\n",
    "            marker = \"🔑 唯一标识\"\n",
    "        elif unique_rate < 1:\n",
    "            marker = \"📊 低基数\"\n",
    "        \n",
    "        print(f\"   {col:<30} {unique_count:<15} {unique_rate:<15.2f} {dtype:<15} {marker}\")\n",
    "    \n",
    "    # 4. 数值型字段异常值检测（使用IQR方法）\n",
    "    numeric_cols = df.select_dtypes(include=[np.number]).columns\n",
    "    \n",
    "    if len(numeric_cols) > 0:\n",
    "        print(f\"\\n4️⃣  数值型字段异常值检测（IQR法）:\")\n",
    "        print(f\"   {'字段名':<30} {'异常值数':<15} {'异常率(%)':<15}\")\n",
    "        print(f\"   {'-'*65}\")\n",
    "        \n",
    "        outlier_info = {}\n",
    "        for col in numeric_cols:\n",
    "            Q1 = df[col].quantile(0.25)\n",
    "            Q3 = df[col].quantile(0.75)\n",
    "            IQR = Q3 - Q1\n",
    "            lower_bound = Q1 - 1.5 * IQR\n",
    "            upper_bound = Q3 + 1.5 * IQR\n",
    "            \n",
    "            outliers = ((df[col] < lower_bound) | (df[col] > upper_bound)).sum()\n",
    "            outlier_rate = outliers / n_rows * 100\n",
    "            \n",
    "            outlier_info[col] = {\n",
    "                'count': outliers,\n",
    "                'rate': outlier_rate,\n",
    "                'bounds': (lower_bound, upper_bound)\n",
    "            }\n",
    "            \n",
    "            if outliers > 0:\n",
    "                print(f\"   {col:<30} {outliers:<15} {outlier_rate:<15.2f}\")\n",
    "    else:\n",
    "        outlier_info = {}\n",
    "    \n",
    "    # 5. 零值和负值分析（针对数值型）\n",
    "    if len(numeric_cols) > 0:\n",
    "        print(f\"\\n5️⃣  零值和负值分析:\")\n",
    "        print(f\"   {'字段名':<30} {'零值数':<12} {'零值率(%)':<12} {'负值数':<12} {'负值率(%)':<12}\")\n",
    "        print(f\"   {'-'*90}\")\n",
    "        \n",
    "        for col in numeric_cols:\n",
    "            zero_count = (df[col] == 0).sum()\n",
    "            zero_rate = zero_count / n_rows * 100\n",
    "            negative_count = (df[col] < 0).sum()\n",
    "            negative_rate = negative_count / n_rows * 100\n",
    "            \n",
    "            if zero_count > 0 or negative_count > 0:\n",
    "                print(f\"   {col:<30} {zero_count:<12} {zero_rate:<12.2f} {negative_count:<12} {negative_rate:<12.2f}\")\n",
    "    \n",
    "    print(\"\\n\" + \"=\"*80)\n",
    "    \n",
    "    return {\n",
    "        'missing_stats': missing_stats.to_dict() if len(missing_stats) > 0 else {},\n",
    "        'n_duplicates': n_duplicates,\n",
    "        'duplicate_rate': duplicate_rate,\n",
    "        'outlier_info': outlier_info\n",
    "    }"
   ]
  },
  {
   "cell_type": "markdown",
   "id": "24fe52fe",
   "metadata": {},
   "source": [
    "## 3. 统计特征分析"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 12,
   "id": "2c9b16a8",
   "metadata": {},
   "outputs": [],
   "source": [
    "def analyze_numeric_features(df, dataset_name=\"数据集\"):\n",
    "    \"\"\"\n",
    "    分析数值型特征的统计特性\n",
    "    \n",
    "    参数:\n",
    "    - df: pandas DataFrame，待分析的数据集\n",
    "    - dataset_name: str，数据集名称\n",
    "    \n",
    "    返回:\n",
    "    - DataFrame: 数值型特征统计信息\n",
    "    \"\"\"\n",
    "    numeric_cols = df.select_dtypes(include=[np.number]).columns\n",
    "    \n",
    "    if len(numeric_cols) == 0:\n",
    "        print(f\"⚠️  【{dataset_name}】无数值型字段\")\n",
    "        return None\n",
    "    \n",
    "    print(\"=\"*100)\n",
    "    print(f\"📈 【{dataset_name}】数值型特征统计分析 ({len(numeric_cols)} 个字段)\")\n",
    "    print(\"=\"*100)\n",
    "    \n",
    "    # 基础统计信息\n",
    "    stats_df = df[numeric_cols].describe().T\n",
    "    \n",
    "    # 添加额外统计指标\n",
    "    stats_df['缺失数'] = df[numeric_cols].isnull().sum()\n",
    "    stats_df['缺失率(%)'] = (stats_df['缺失数'] / len(df) * 100).round(2)\n",
    "    stats_df['零值数'] = (df[numeric_cols] == 0).sum()\n",
    "    stats_df['零值率(%)'] = (stats_df['零值数'] / len(df) * 100).round(2)\n",
    "    stats_df['偏度'] = df[numeric_cols].skew().round(2)\n",
    "    stats_df['峰度'] = df[numeric_cols].kurtosis().round(2)\n",
    "    \n",
    "    # 重新排列列顺序\n",
    "    cols_order = ['count', 'mean', 'std', 'min', '25%', '50%', '75%', 'max', \n",
    "                  '缺失数', '缺失率(%)', '零值数', '零值率(%)', '偏度', '峰度']\n",
    "    stats_df = stats_df[cols_order]\n",
    "    \n",
    "    # 格式化输出\n",
    "    print(\"\\n📊 详细统计信息:\")\n",
    "    print(stats_df.to_string())\n",
    "    \n",
    "    # 分布特征总结\n",
    "    print(f\"\\n📌 分布特征总结:\")\n",
    "    print(f\"   {'字段名':<30} {'分布特征':<40} {'建议':<30}\")\n",
    "    print(f\"   {'-'*105}\")\n",
    "    \n",
    "    for col in numeric_cols:\n",
    "        skewness = df[col].skew()\n",
    "        kurtosis_val = df[col].kurtosis()\n",
    "        \n",
    "        # 判断分布特征\n",
    "        if abs(skewness) < 0.5:\n",
    "            dist_type = \"✅ 近似正态分布\"\n",
    "            suggestion = \"无需转换\"\n",
    "        elif skewness > 0.5:\n",
    "            dist_type = f\"⚠️  右偏分布 (偏度={skewness:.2f})\"\n",
    "            suggestion = \"考虑对数/Box-Cox转换\"\n",
    "        else:\n",
    "            dist_type = f\"⚠️  左偏分布 (偏度={skewness:.2f})\"\n",
    "            suggestion = \"考虑平方/指数转换\"\n",
    "        \n",
    "        print(f\"   {col:<30} {dist_type:<40} {suggestion:<30}\")\n",
    "    \n",
    "    print(\"\\n\" + \"=\"*100)\n",
    "    \n",
    "    return stats_df\n",
    "\n",
    "\n",
    "def analyze_categorical_features(df, dataset_name=\"数据集\", top_n=10):\n",
    "    \"\"\"\n",
    "    分析类别型特征的分布特性\n",
    "    \n",
    "    参数:\n",
    "    - df: pandas DataFrame，待分析的数据集\n",
    "    - dataset_name: str，数据集名称\n",
    "    - top_n: int，显示频次最高的类别数量\n",
    "    \n",
    "    返回:\n",
    "    - dict: 类别型特征分析结果\n",
    "    \"\"\"\n",
    "    categorical_cols = df.select_dtypes(include=['object', 'category']).columns\n",
    "    \n",
    "    if len(categorical_cols) == 0:\n",
    "        print(f\"⚠️  【{dataset_name}】无类别型字段\")\n",
    "        return None\n",
    "    \n",
    "    print(\"=\"*100)\n",
    "    print(f\"📊 【{dataset_name}】类别型特征分析 ({len(categorical_cols)} 个字段)\")\n",
    "    print(\"=\"*100)\n",
    "    \n",
    "    categorical_info = {}\n",
    "    \n",
    "    for col in categorical_cols:\n",
    "        print(f\"\\n🔹 字段: {col}\")\n",
    "        print(f\"   {'-'*90}\")\n",
    "        \n",
    "        # 基础统计\n",
    "        n_unique = df[col].nunique()\n",
    "        n_missing = df[col].isnull().sum()\n",
    "        missing_rate = n_missing / len(df) * 100\n",
    "        \n",
    "        print(f\"   唯一值数: {n_unique:,} | 缺失数: {n_missing:,} ({missing_rate:.2f}%)\")\n",
    "        \n",
    "        # 频次统计\n",
    "        value_counts = df[col].value_counts()\n",
    "        \n",
    "        print(f\"\\n   Top {min(top_n, len(value_counts))} 高频类别:\")\n",
    "        print(f\"   {'类别':<40} {'频次':<15} {'占比(%)':<15}\")\n",
    "        print(f\"   {'-'*75}\")\n",
    "        \n",
    "        for i, (val, count) in enumerate(value_counts.head(top_n).items()):\n",
    "            ratio = count / len(df) * 100\n",
    "            val_str = str(val)[:37] + \"...\" if len(str(val)) > 40 else str(val)\n",
    "            print(f\"   {val_str:<40} {count:<15,} {ratio:<15.2f}\")\n",
    "        \n",
    "        # 低频类别统计\n",
    "        low_freq_threshold = len(df) * 0.001  # 0.1%阈值\n",
    "        low_freq_count = (value_counts < low_freq_threshold).sum()\n",
    "        \n",
    "        if low_freq_count > 0:\n",
    "            print(f\"\\n   ⚠️  低频类别数（<0.1%）: {low_freq_count}\")\n",
    "        \n",
    "        categorical_info[col] = {\n",
    "            'n_unique': n_unique,\n",
    "            'n_missing': n_missing,\n",
    "            'missing_rate': missing_rate,\n",
    "            'top_values': value_counts.head(top_n).to_dict(),\n",
    "            'low_freq_count': low_freq_count\n",
    "        }\n",
    "    \n",
    "    print(\"\\n\" + \"=\"*100)\n",
    "    \n",
    "    return categorical_info"
   ]
  },
  {
   "cell_type": "markdown",
   "id": "5680a651",
   "metadata": {},
   "source": [
    "## 4. 数据清洗操作"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 13,
   "id": "ecb16ca0",
   "metadata": {},
   "outputs": [],
   "source": [
    "def handle_missing_values(df, numeric_strategy='median', categorical_strategy='mode', \n",
    "                         fill_value=None, dataset_name=\"数据集\", inplace=False):\n",
    "    \"\"\"\n",
    "    处理缺失值的通用方法\n",
    "    \n",
    "    参数:\n",
    "    - df: pandas DataFrame，待处理的数据集\n",
    "    - numeric_strategy: str，数值型缺失值处理策略 ['mean', 'median', 'mode', 'zero', 'forward', 'backward', 'custom']\n",
    "    - categorical_strategy: str，类别型缺失值处理策略 ['mode', 'unknown', 'forward', 'backward', 'custom']\n",
    "    - fill_value: 自定义填充值（当strategy='custom'时使用）\n",
    "    - dataset_name: str，数据集名称\n",
    "    - inplace: bool，是否原地修改\n",
    "    \n",
    "    返回:\n",
    "    - DataFrame: 处理后的数据集（如果inplace=False）\n",
    "    \"\"\"\n",
    "    if not inplace:\n",
    "        df = df.copy()\n",
    "    \n",
    "    print(\"=\"*80)\n",
    "    print(f\"🔧 【{dataset_name}】缺失值处理\")\n",
    "    print(\"=\"*80)\n",
    "    \n",
    "    # 记录处理前的缺失情况\n",
    "    missing_before = df.isnull().sum().sum()\n",
    "    \n",
    "    if missing_before == 0:\n",
    "        print(\"✅ 数据集无缺失值，无需处理\")\n",
    "        return df if not inplace else None\n",
    "    \n",
    "    print(f\"\\n处理前缺失值总数: {missing_before:,}\")\n",
    "    \n",
    "    # 数值型字段处理\n",
    "    numeric_cols = df.select_dtypes(include=[np.number]).columns\n",
    "    numeric_missing = df[numeric_cols].isnull().sum()\n",
    "    numeric_missing_cols = numeric_missing[numeric_missing > 0].index\n",
    "    \n",
    "    if len(numeric_missing_cols) > 0:\n",
    "        print(f\"\\n📊 数值型字段处理 (策略: {numeric_strategy}):\")\n",
    "        for col in numeric_missing_cols:\n",
    "            missing_count = df[col].isnull().sum()\n",
    "            \n",
    "            if numeric_strategy == 'mean':\n",
    "                fill_val = df[col].mean()\n",
    "                df[col].fillna(fill_val, inplace=True)\n",
    "            elif numeric_strategy == 'median':\n",
    "                fill_val = df[col].median()\n",
    "                df[col].fillna(fill_val, inplace=True)\n",
    "            elif numeric_strategy == 'mode':\n",
    "                fill_val = df[col].mode()[0] if not df[col].mode().empty else 0\n",
    "                df[col].fillna(fill_val, inplace=True)\n",
    "            elif numeric_strategy == 'zero':\n",
    "                df[col].fillna(0, inplace=True)\n",
    "                fill_val = 0\n",
    "            elif numeric_strategy == 'forward':\n",
    "                df[col].fillna(method='ffill', inplace=True)\n",
    "                fill_val = \"前向填充\"\n",
    "            elif numeric_strategy == 'backward':\n",
    "                df[col].fillna(method='bfill', inplace=True)\n",
    "                fill_val = \"后向填充\"\n",
    "            elif numeric_strategy == 'custom' and fill_value is not None:\n",
    "                df[col].fillna(fill_value, inplace=True)\n",
    "                fill_val = fill_value\n",
    "            \n",
    "            if numeric_strategy not in ['forward', 'backward']:\n",
    "                print(f\"   ├─ {col}: {missing_count} 个缺失值 → 填充值: {fill_val:.2f if isinstance(fill_val, float) else fill_val}\")\n",
    "            else:\n",
    "                print(f\"   ├─ {col}: {missing_count} 个缺失值 → {fill_val}\")\n",
    "    \n",
    "    # 类别型字段处理\n",
    "    categorical_cols = df.select_dtypes(include=['object', 'category']).columns\n",
    "    categorical_missing = df[categorical_cols].isnull().sum()\n",
    "    categorical_missing_cols = categorical_missing[categorical_missing > 0].index\n",
    "    \n",
    "    if len(categorical_missing_cols) > 0:\n",
    "        print(f\"\\n📝 类别型字段处理 (策略: {categorical_strategy}):\")\n",
    "        for col in categorical_missing_cols:\n",
    "            missing_count = df[col].isnull().sum()\n",
    "            \n",
    "            if categorical_strategy == 'mode':\n",
    "                fill_val = df[col].mode()[0] if not df[col].mode().empty else 'unknown'\n",
    "                df[col].fillna(fill_val, inplace=True)\n",
    "            elif categorical_strategy == 'unknown':\n",
    "                df[col].fillna('unknown', inplace=True)\n",
    "                fill_val = 'unknown'\n",
    "            elif categorical_strategy == 'forward':\n",
    "                df[col].fillna(method='ffill', inplace=True)\n",
    "                fill_val = \"前向填充\"\n",
    "            elif categorical_strategy == 'backward':\n",
    "                df[col].fillna(method='bfill', inplace=True)\n",
    "                fill_val = \"后向填充\"\n",
    "            elif categorical_strategy == 'custom' and fill_value is not None:\n",
    "                df[col].fillna(fill_value, inplace=True)\n",
    "                fill_val = fill_value\n",
    "            \n",
    "            print(f\"   ├─ {col}: {missing_count} 个缺失值 → 填充值: {fill_val}\")\n",
    "    \n",
    "    # 记录处理后的缺失情况\n",
    "    missing_after = df.isnull().sum().sum()\n",
    "    print(f\"\\n处理后缺失值总数: {missing_after:,}\")\n",
    "    print(f\"✅ 成功处理 {missing_before - missing_after:,} 个缺失值\")\n",
    "    \n",
    "    print(\"=\"*80)\n",
    "    \n",
    "    return df if not inplace else None\n",
    "\n",
    "\n",
    "def remove_duplicates(df, subset=None, keep='first', dataset_name=\"数据集\", inplace=False):\n",
    "    \"\"\"\n",
    "    删除重复行\n",
    "    \n",
    "    参数:\n",
    "    - df: pandas DataFrame，待处理的数据集\n",
    "    - subset: list，用于判断重复的列（None表示所有列）\n",
    "    - keep: str，保留策略 ['first', 'last', False]\n",
    "    - dataset_name: str，数据集名称\n",
    "    - inplace: bool，是否原地修改\n",
    "    \n",
    "    返回:\n",
    "    - DataFrame: 处理后的数据集（如果inplace=False）\n",
    "    \"\"\"\n",
    "    if not inplace:\n",
    "        df = df.copy()\n",
    "    \n",
    "    print(\"=\"*80)\n",
    "    print(f\"🔧 【{dataset_name}】重复值处理\")\n",
    "    print(\"=\"*80)\n",
    "    \n",
    "    n_before = len(df)\n",
    "    n_duplicates = df.duplicated(subset=subset, keep=keep).sum()\n",
    "    \n",
    "    if n_duplicates == 0:\n",
    "        print(\"✅ 数据集无重复行\")\n",
    "    else:\n",
    "        print(f\"\\n发现重复行: {n_duplicates:,} ({n_duplicates/n_before*100:.2f}%)\")\n",
    "        df.drop_duplicates(subset=subset, keep=keep, inplace=True)\n",
    "        n_after = len(df)\n",
    "        print(f\"删除后行数: {n_before:,} → {n_after:,}\")\n",
    "        print(f\"✅ 成功删除 {n_before - n_after:,} 行重复数据\")\n",
    "    \n",
    "    print(\"=\"*80)\n",
    "    \n",
    "    return df if not inplace else None\n",
    "\n",
    "\n",
    "def handle_outliers(df, columns=None, method='iqr', threshold=1.5, strategy='cap', \n",
    "                   dataset_name=\"数据集\", inplace=False):\n",
    "    \"\"\"\n",
    "    处理异常值\n",
    "    \n",
    "    参数:\n",
    "    - df: pandas DataFrame，待处理的数据集\n",
    "    - columns: list，需要处理异常值的列（None表示所有数值型列）\n",
    "    - method: str，异常值检测方法 ['iqr', 'zscore']\n",
    "    - threshold: float，阈值（IQR法默认1.5，Z-score法默认3）\n",
    "    - strategy: str，处理策略 ['cap'(截断), 'remove'(删除), 'nan'(设为缺失)]\n",
    "    - dataset_name: str，数据集名称\n",
    "    - inplace: bool，是否原地修改\n",
    "    \n",
    "    返回:\n",
    "    - DataFrame: 处理后的数据集（如果inplace=False）\n",
    "    \"\"\"\n",
    "    if not inplace:\n",
    "        df = df.copy()\n",
    "    \n",
    "    print(\"=\"*80)\n",
    "    print(f\"🔧 【{dataset_name}】异常值处理\")\n",
    "    print(\"=\"*80)\n",
    "    \n",
    "    if columns is None:\n",
    "        columns = df.select_dtypes(include=[np.number]).columns.tolist()\n",
    "    \n",
    "    print(f\"\\n检测方法: {method.upper()} | 阈值: {threshold} | 处理策略: {strategy}\")\n",
    "    print(f\"\\n{'字段名':<30} {'异常值数':<15} {'处理方式':<30}\")\n",
    "    print(\"-\"*80)\n",
    "    \n",
    "    total_outliers = 0\n",
    "    \n",
    "    for col in columns:\n",
    "        if col not in df.columns:\n",
    "            continue\n",
    "            \n",
    "        # 检测异常值\n",
    "        if method == 'iqr':\n",
    "            Q1 = df[col].quantile(0.25)\n",
    "            Q3 = df[col].quantile(0.75)\n",
    "            IQR = Q3 - Q1\n",
    "            lower_bound = Q1 - threshold * IQR\n",
    "            upper_bound = Q3 + threshold * IQR\n",
    "            outlier_mask = (df[col] < lower_bound) | (df[col] > upper_bound)\n",
    "        elif method == 'zscore':\n",
    "            z_scores = np.abs(stats.zscore(df[col].dropna()))\n",
    "            outlier_mask = z_scores > threshold\n",
    "        else:\n",
    "            print(f\"⚠️  未知的检测方法: {method}\")\n",
    "            continue\n",
    "        \n",
    "        n_outliers = outlier_mask.sum()\n",
    "        total_outliers += n_outliers\n",
    "        \n",
    "        if n_outliers > 0:\n",
    "            # 处理异常值\n",
    "            if strategy == 'cap':\n",
    "                if method == 'iqr':\n",
    "                    df.loc[df[col] < lower_bound, col] = lower_bound\n",
    "                    df.loc[df[col] > upper_bound, col] = upper_bound\n",
    "                    action = f\"截断至 [{lower_bound:.2f}, {upper_bound:.2f}]\"\n",
    "                else:\n",
    "                    action = \"Z-score截断\"\n",
    "            elif strategy == 'remove':\n",
    "                df = df[~outlier_mask]\n",
    "                action = \"删除异常行\"\n",
    "            elif strategy == 'nan':\n",
    "                df.loc[outlier_mask, col] = np.nan\n",
    "                action = \"设为缺失值\"\n",
    "            \n",
    "            print(f\"{col:<30} {n_outliers:<15} {action:<30}\")\n",
    "    \n",
    "    if total_outliers == 0:\n",
    "        print(\"✅ 未检测到异常值\")\n",
    "    else:\n",
    "        print(f\"\\n✅ 共处理 {total_outliers:,} 个异常值\")\n",
    "    \n",
    "    print(\"=\"*80)\n",
    "    \n",
    "    return df if not inplace else None\n",
    "\n",
    "\n",
    "def optimize_dtypes(df, dataset_name=\"数据集\", inplace=False):\n",
    "    \"\"\"\n",
    "    优化数据类型以减少内存占用\n",
    "    \n",
    "    参数:\n",
    "    - df: pandas DataFrame，待优化的数据集\n",
    "    - dataset_name: str，数据集名称\n",
    "    - inplace: bool，是否原地修改\n",
    "    \n",
    "    返回:\n",
    "    - DataFrame: 优化后的数据集（如果inplace=False）\n",
    "    \"\"\"\n",
    "    if not inplace:\n",
    "        df = df.copy()\n",
    "    \n",
    "    print(\"=\"*80)\n",
    "    print(f\"⚡ 【{dataset_name}】数据类型优化\")\n",
    "    print(\"=\"*80)\n",
    "    \n",
    "    memory_before = df.memory_usage(deep=True).sum() / 1024 / 1024\n",
    "    print(f\"\\n优化前内存占用: {memory_before:.2f} MB\")\n",
    "    \n",
    "    # 整型优化\n",
    "    int_cols = df.select_dtypes(include=['int64']).columns\n",
    "    for col in int_cols:\n",
    "        col_min = df[col].min()\n",
    "        col_max = df[col].max()\n",
    "        \n",
    "        if col_min >= 0:\n",
    "            if col_max <= 255:\n",
    "                df[col] = df[col].astype('uint8')\n",
    "            elif col_max <= 65535:\n",
    "                df[col] = df[col].astype('uint16')\n",
    "            elif col_max <= 4294967295:\n",
    "                df[col] = df[col].astype('uint32')\n",
    "        else:\n",
    "            if col_min >= -128 and col_max <= 127:\n",
    "                df[col] = df[col].astype('int8')\n",
    "            elif col_min >= -32768 and col_max <= 32767:\n",
    "                df[col] = df[col].astype('int16')\n",
    "            elif col_min >= -2147483648 and col_max <= 2147483647:\n",
    "                df[col] = df[col].astype('int32')\n",
    "    \n",
    "    # 浮点型优化\n",
    "    float_cols = df.select_dtypes(include=['float64']).columns\n",
    "    for col in float_cols:\n",
    "        df[col] = df[col].astype('float32')\n",
    "    \n",
    "    # 类别型优化（对于唯一值较少的object类型）\n",
    "    object_cols = df.select_dtypes(include=['object']).columns\n",
    "    for col in object_cols:\n",
    "        num_unique = df[col].nunique()\n",
    "        num_total = len(df)\n",
    "        if num_unique / num_total < 0.5:  # 唯一值比例小于50%\n",
    "            df[col] = df[col].astype('category')\n",
    "    \n",
    "    memory_after = df.memory_usage(deep=True).sum() / 1024 / 1024\n",
    "    memory_saved = memory_before - memory_after\n",
    "    memory_saved_pct = memory_saved / memory_before * 100\n",
    "    \n",
    "    print(f\"优化后内存占用: {memory_after:.2f} MB\")\n",
    "    print(f\"✅ 节省内存: {memory_saved:.2f} MB ({memory_saved_pct:.1f}%)\")\n",
    "    \n",
    "    print(\"=\"*80)\n",
    "    \n",
    "    return df if not inplace else None"
   ]
  },
  {
   "cell_type": "markdown",
   "id": "253e82bf",
   "metadata": {},
   "source": [
    "## 5. 可视化分析"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 14,
   "id": "17ede510",
   "metadata": {},
   "outputs": [],
   "source": [
    "def visualize_numeric_distribution(df, columns=None, ncols=3, figsize=(15, 4), dataset_name=\"数据集\"):\n",
    "    \"\"\"\n",
    "    可视化数值型特征的分布\n",
    "    \n",
    "    参数:\n",
    "    - df: pandas DataFrame，待可视化的数据集\n",
    "    - columns: list，需要可视化的列（None表示所有数值型列）\n",
    "    - ncols: int，每行显示的子图数量\n",
    "    - figsize: tuple，单个子图的大小\n",
    "    - dataset_name: str，数据集名称\n",
    "    \"\"\"\n",
    "    if columns is None:\n",
    "        columns = df.select_dtypes(include=[np.number]).columns.tolist()\n",
    "    \n",
    "    if len(columns) == 0:\n",
    "        print(f\"⚠️  【{dataset_name}】无数值型字段可视化\")\n",
    "        return\n",
    "    \n",
    "    n_features = len(columns)\n",
    "    nrows = (n_features + ncols - 1) // ncols\n",
    "    \n",
    "    fig, axes = plt.subplots(nrows, ncols, figsize=(figsize[0], figsize[1] * nrows))\n",
    "    axes = axes.flatten() if nrows > 1 or ncols > 1 else [axes]\n",
    "    \n",
    "    print(f\"📊 【{dataset_name}】数值型特征分布可视化 ({n_features} 个字段)\")\n",
    "    \n",
    "    for idx, col in enumerate(columns):\n",
    "        ax = axes[idx]\n",
    "        \n",
    "        # 绘制直方图和KDE\n",
    "        data = df[col].dropna()\n",
    "        ax.hist(data, bins=50, alpha=0.6, color='skyblue', edgecolor='black', density=True)\n",
    "        \n",
    "        # 添加KDE曲线\n",
    "        if len(data) > 1:\n",
    "            data.plot(kind='kde', ax=ax, color='red', linewidth=2)\n",
    "        \n",
    "        # 添加统计信息\n",
    "        mean_val = data.mean()\n",
    "        median_val = data.median()\n",
    "        ax.axvline(mean_val, color='green', linestyle='--', linewidth=1.5, label=f'均值: {mean_val:.2f}')\n",
    "        ax.axvline(median_val, color='orange', linestyle='--', linewidth=1.5, label=f'中位数: {median_val:.2f}')\n",
    "        \n",
    "        ax.set_title(f'{col}\\n偏度: {data.skew():.2f} | 峰度: {data.kurtosis():.2f}', fontsize=10)\n",
    "        ax.set_xlabel('')\n",
    "        ax.set_ylabel('密度')\n",
    "        ax.legend(fontsize=8)\n",
    "        ax.grid(True, alpha=0.3)\n",
    "    \n",
    "    # 隐藏多余的子图\n",
    "    for idx in range(n_features, len(axes)):\n",
    "        axes[idx].axis('off')\n",
    "    \n",
    "    plt.tight_layout()\n",
    "    plt.show()\n",
    "\n",
    "\n",
    "def visualize_boxplot(df, columns=None, ncols=3, figsize=(15, 4), dataset_name=\"数据集\"):\n",
    "    \"\"\"\n",
    "    绘制箱线图检测异常值\n",
    "    \n",
    "    参数:\n",
    "    - df: pandas DataFrame，待可视化的数据集\n",
    "    - columns: list，需要可视化的列（None表示所有数值型列）\n",
    "    - ncols: int，每行显示的子图数量\n",
    "    - figsize: tuple，单个子图的大小\n",
    "    - dataset_name: str，数据集名称\n",
    "    \"\"\"\n",
    "    if columns is None:\n",
    "        columns = df.select_dtypes(include=[np.number]).columns.tolist()\n",
    "    \n",
    "    if len(columns) == 0:\n",
    "        print(f\"⚠️  【{dataset_name}】无数值型字段可视化\")\n",
    "        return\n",
    "    \n",
    "    n_features = len(columns)\n",
    "    nrows = (n_features + ncols - 1) // ncols\n",
    "    \n",
    "    fig, axes = plt.subplots(nrows, ncols, figsize=(figsize[0], figsize[1] * nrows))\n",
    "    axes = axes.flatten() if nrows > 1 or ncols > 1 else [axes]\n",
    "    \n",
    "    print(f\"📦 【{dataset_name}】箱线图异常值检测 ({n_features} 个字段)\")\n",
    "    \n",
    "    for idx, col in enumerate(columns):\n",
    "        ax = axes[idx]\n",
    "        \n",
    "        # 绘制箱线图\n",
    "        data = df[col].dropna()\n",
    "        bp = ax.boxplot(data, vert=True, patch_artist=True)\n",
    "        \n",
    "        # 美化箱线图\n",
    "        bp['boxes'][0].set_facecolor('lightblue')\n",
    "        bp['boxes'][0].set_alpha(0.7)\n",
    "        bp['medians'][0].set_color('red')\n",
    "        bp['medians'][0].set_linewidth(2)\n",
    "        \n",
    "        # 计算异常值数量\n",
    "        Q1 = data.quantile(0.25)\n",
    "        Q3 = data.quantile(0.75)\n",
    "        IQR = Q3 - Q1\n",
    "        lower_bound = Q1 - 1.5 * IQR\n",
    "        upper_bound = Q3 + 1.5 * IQR\n",
    "        n_outliers = ((data < lower_bound) | (data > upper_bound)).sum()\n",
    "        outlier_pct = n_outliers / len(data) * 100\n",
    "        \n",
    "        ax.set_title(f'{col}\\n异常值: {n_outliers} ({outlier_pct:.1f}%)', fontsize=10)\n",
    "        ax.set_ylabel('值')\n",
    "        ax.grid(True, alpha=0.3, axis='y')\n",
    "    \n",
    "    # 隐藏多余的子图\n",
    "    for idx in range(n_features, len(axes)):\n",
    "        axes[idx].axis('off')\n",
    "    \n",
    "    plt.tight_layout()\n",
    "    plt.show()\n",
    "\n",
    "\n",
    "def visualize_correlation_matrix(df, method='pearson', figsize=(12, 10), dataset_name=\"数据集\"):\n",
    "    \"\"\"\n",
    "    绘制相关性热图\n",
    "    \n",
    "    参数:\n",
    "    - df: pandas DataFrame，待可视化的数据集\n",
    "    - method: str，相关性计算方法 ['pearson', 'spearman', 'kendall']\n",
    "    - figsize: tuple，图形大小\n",
    "    - dataset_name: str，数据集名称\n",
    "    \"\"\"\n",
    "    numeric_cols = df.select_dtypes(include=[np.number]).columns\n",
    "    \n",
    "    if len(numeric_cols) < 2:\n",
    "        print(f\"⚠️  【{dataset_name}】数值型字段少于2个，无法计算相关性\")\n",
    "        return\n",
    "    \n",
    "    print(f\"🔥 【{dataset_name}】特征相关性热图 (方法: {method})\")\n",
    "    \n",
    "    # 计算相关性矩阵\n",
    "    corr_matrix = df[numeric_cols].corr(method=method)\n",
    "    \n",
    "    # 绘制热图\n",
    "    plt.figure(figsize=figsize)\n",
    "    \n",
    "    import seaborn as sns\n",
    "    mask = np.triu(np.ones_like(corr_matrix, dtype=bool))  # 只显示下三角\n",
    "    \n",
    "    sns.heatmap(corr_matrix, mask=mask, annot=True, fmt='.2f', \n",
    "                cmap='coolwarm', center=0, square=True, linewidths=0.5,\n",
    "                cbar_kws={\"shrink\": 0.8}, vmin=-1, vmax=1)\n",
    "    \n",
    "    plt.title(f'{dataset_name} - 特征相关性矩阵 ({method.upper()})', fontsize=14, pad=20)\n",
    "    plt.tight_layout()\n",
    "    plt.show()\n",
    "    \n",
    "    # 输出高相关性特征对\n",
    "    print(f\"\\n🔍 高相关性特征对 (|相关系数| > 0.7):\")\n",
    "    print(f\"   {'特征1':<30} {'特征2':<30} {'相关系数':<15}\")\n",
    "    print(f\"   {'-'*80}\")\n",
    "    \n",
    "    high_corr_pairs = []\n",
    "    for i in range(len(corr_matrix.columns)):\n",
    "        for j in range(i+1, len(corr_matrix.columns)):\n",
    "            corr_val = corr_matrix.iloc[i, j]\n",
    "            if abs(corr_val) > 0.7:\n",
    "                feat1 = corr_matrix.columns[i]\n",
    "                feat2 = corr_matrix.columns[j]\n",
    "                high_corr_pairs.append((feat1, feat2, corr_val))\n",
    "                print(f\"   {feat1:<30} {feat2:<30} {corr_val:<15.3f}\")\n",
    "    \n",
    "    if len(high_corr_pairs) == 0:\n",
    "        print(\"   ✅ 无高相关性特征对\")\n",
    "\n",
    "\n",
    "def visualize_categorical_distribution(df, columns=None, top_n=10, ncols=2, \n",
    "                                       figsize=(15, 5), dataset_name=\"数据集\"):\n",
    "    \"\"\"\n",
    "    可视化类别型特征的分布\n",
    "    \n",
    "    参数:\n",
    "    - df: pandas DataFrame，待可视化的数据集\n",
    "    - columns: list，需要可视化的列（None表示所有类别型列）\n",
    "    - top_n: int，显示前N个高频类别\n",
    "    - ncols: int，每行显示的子图数量\n",
    "    - figsize: tuple，单个子图的大小\n",
    "    - dataset_name: str，数据集名称\n",
    "    \"\"\"\n",
    "    if columns is None:\n",
    "        columns = df.select_dtypes(include=['object', 'category']).columns.tolist()\n",
    "    \n",
    "    if len(columns) == 0:\n",
    "        print(f\"⚠️  【{dataset_name}】无类别型字段可视化\")\n",
    "        return\n",
    "    \n",
    "    n_features = len(columns)\n",
    "    nrows = (n_features + ncols - 1) // ncols\n",
    "    \n",
    "    fig, axes = plt.subplots(nrows, ncols, figsize=(figsize[0], figsize[1] * nrows))\n",
    "    axes = axes.flatten() if nrows > 1 or ncols > 1 else [axes]\n",
    "    \n",
    "    print(f\"📊 【{dataset_name}】类别型特征分布可视化 ({n_features} 个字段)\")\n",
    "    \n",
    "    for idx, col in enumerate(columns):\n",
    "        ax = axes[idx]\n",
    "        \n",
    "        # 统计频次\n",
    "        value_counts = df[col].value_counts().head(top_n)\n",
    "        \n",
    "        # 绘制条形图\n",
    "        value_counts.plot(kind='barh', ax=ax, color='skyblue', edgecolor='black')\n",
    "        \n",
    "        ax.set_title(f'{col}\\n唯一值数: {df[col].nunique()}', fontsize=10)\n",
    "        ax.set_xlabel('频次')\n",
    "        ax.set_ylabel('')\n",
    "        ax.grid(True, alpha=0.3, axis='x')\n",
    "        \n",
    "        # 在条形上添加数值\n",
    "        for i, (val, count) in enumerate(value_counts.items()):\n",
    "            ax.text(count, i, f' {count:,}', va='center', fontsize=8)\n",
    "    \n",
    "    # 隐藏多余的子图\n",
    "    for idx in range(n_features, len(axes)):\n",
    "        axes[idx].axis('off')\n",
    "    \n",
    "    plt.tight_layout()\n",
    "    plt.show()"
   ]
  },
  {
   "cell_type": "markdown",
   "id": "505c6044",
   "metadata": {},
   "source": [
    "## 6. 完整数据探查流程"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 13,
   "id": "555ce883",
   "metadata": {},
   "outputs": [],
   "source": [
    "def comprehensive_data_exploration(df, dataset_name=\"数据集\", primary_key='CUST_NO', enable_visualization=True):\n",
    "    \"\"\"\n",
    "    完整的数据探查流程（一键执行所有分析）\n",
    "    \n",
    "    参数:\n",
    "    - df: pandas DataFrame，待探查的数据集\n",
    "    - dataset_name: str，数据集名称\n",
    "    - primary_key: str，主键字段名（默认为'CUST_NO'）\n",
    "    - enable_visualization: bool，是否启用可视化\n",
    "    \n",
    "    返回:\n",
    "    - dict: 包含所有分析结果的字典\n",
    "    \"\"\"\n",
    "    print(\"\\n\" + \"🚀\"*40)\n",
    "    print(f\"{'='*80}\")\n",
    "    print(f\"{'开始完整数据探查流程':^80}\")\n",
    "    print(f\"{'='*80}\")\n",
    "    print(\"🚀\"*40 + \"\\n\")\n",
    "    \n",
    "    results = {}\n",
    "    \n",
    "    # 1. 基础信息探查\n",
    "    print(\"\\n\" + \"►\"*40)\n",
    "    results['basic_info'] = explore_basic_info(df, dataset_name, primary_key)\n",
    "    \n",
    "    # 2. 数据质量分析\n",
    "    print(\"\\n\" + \"►\"*40)\n",
    "    results['quality'] = analyze_data_quality(df, dataset_name, primary_key)\n",
    "    \n",
    "    # 3. 数值型特征统计分析\n",
    "    print(\"\\n\" + \"►\"*40)\n",
    "    results['numeric_stats'] = analyze_numeric_features(df, dataset_name, primary_key)\n",
    "    \n",
    "    # 4. 类别型特征分析\n",
    "    print(\"\\n\" + \"►\"*40)\n",
    "    results['categorical_stats'] = analyze_categorical_features(df, dataset_name, primary_key)\n",
    "    \n",
    "    # 5. 文本型特征分析\n",
    "    print(\"\\n\" + \"►\"*40)\n",
    "    results['text_stats'] = analyze_text_features(df, dataset_name, primary_key)\n",
    "    \n",
    "    # 6. 可视化分析\n",
    "    if enable_visualization:\n",
    "        print(\"\\n\" + \"►\"*40)\n",
    "        print(\"📊 开始可视化分析...\")\n",
    "        \n",
    "        # 排除主键后的数值型字段\n",
    "        numeric_cols = [col for col in df.select_dtypes(include=[np.number]).columns if col != primary_key]\n",
    "        if len(numeric_cols) > 0:\n",
    "            # 数值型分布图\n",
    "            if len(numeric_cols) <= 9:  # 限制可视化字段数\n",
    "                visualize_numeric_distribution(df, numeric_cols[:9], dataset_name=dataset_name)\n",
    "                visualize_boxplot(df, numeric_cols[:9], dataset_name=dataset_name)\n",
    "            else:\n",
    "                print(f\"   ⚠️  数值型字段过多({len(numeric_cols)}个)，只显示前9个\")\n",
    "                visualize_numeric_distribution(df, numeric_cols[:9], dataset_name=dataset_name)\n",
    "                visualize_boxplot(df, numeric_cols[:9], dataset_name=dataset_name)\n",
    "            \n",
    "            # 相关性热图\n",
    "            if len(numeric_cols) >= 2 and len(numeric_cols) <= 20:\n",
    "                visualize_correlation_matrix(df[numeric_cols], dataset_name=dataset_name)\n",
    "            elif len(numeric_cols) > 20:\n",
    "                print(f\"   ⚠️  数值型字段过多({len(numeric_cols)}个)，相关性矩阵过大，跳过\")\n",
    "        \n",
    "        # 类别型字段（排除主键和文本型）\n",
    "        categorical_cols = []\n",
    "        text_cols = results['basic_info'].get('text_cols', [])\n",
    "        for col in df.select_dtypes(include=['object', 'category']).columns:\n",
    "            if col != primary_key and col not in text_cols:\n",
    "                categorical_cols.append(col)\n",
    "        \n",
    "        if len(categorical_cols) > 0:\n",
    "            # 类别型分布图\n",
    "            if len(categorical_cols) <= 6:\n",
    "                visualize_categorical_distribution(df, categorical_cols[:6], dataset_name=dataset_name)\n",
    "            else:\n",
    "                print(f\"   ⚠️  类别型字段过多({len(categorical_cols)}个)，只显示前6个\")\n",
    "                visualize_categorical_distribution(df, categorical_cols[:6], dataset_name=dataset_name)\n",
    "    \n",
    "    # 7. 生成探查报告摘要\n",
    "    print(\"\\n\" + \"=\"*80)\n",
    "    print(f\"📋 【{dataset_name}】数据探查报告摘要\")\n",
    "    print(\"=\"*80)\n",
    "    \n",
    "    print(f\"\\n✅ 数据规模: {results['basic_info']['shape'][0]:,} 行 × {results['basic_info']['shape'][1]} 列\")\n",
    "    print(f\"✅ 内存占用: {results['basic_info']['memory_mb']:.2f} MB\")\n",
    "    print(f\"✅ 主键字段: {primary_key}\")\n",
    "    print(f\"✅ 数值型字段: {len(results['basic_info']['numeric_cols'])} 个\")\n",
    "    print(f\"✅ 类别型字段: {len(results['basic_info']['categorical_cols'])} 个\")\n",
    "    print(f\"✅ 文本型字段: {len(results['basic_info']['text_cols'])} 个\")\n",
    "    print(f\"✅ 日期型字段: {len(results['basic_info']['datetime_cols'])} 个\")\n",
    "    \n",
    "    if results['quality']['n_duplicates'] > 0:\n",
    "        print(f\"⚠️  重复行: {results['quality']['n_duplicates']:,} ({results['quality']['duplicate_rate']:.2f}%)\")\n",
    "    else:\n",
    "        print(f\"✅ 无重复行\")\n",
    "    \n",
    "    if len(results['quality']['missing_stats']) > 0:\n",
    "        print(f\"⚠️  存在缺失值的字段: {len(results['quality']['missing_stats'])} 个\")\n",
    "    else:\n",
    "        print(f\"✅ 无缺失值\")\n",
    "    \n",
    "    if results['quality']['outlier_info']:\n",
    "        total_outliers = sum([info['count'] for info in results['quality']['outlier_info'].values()])\n",
    "        print(f\"⚠️  异常值总数: {total_outliers:,}\")\n",
    "    else:\n",
    "        print(f\"✅ 无明显异常值\")\n",
    "    \n",
    "    print(\"\\n\" + \"=\"*80)\n",
    "    print(\"🎉 数据探查流程完成！\")\n",
    "    print(\"=\"*80 + \"\\n\")\n",
    "    \n",
    "    return results"
   ]
  },
  {
   "cell_type": "markdown",
   "id": "8d1d2038",
   "metadata": {},
   "source": [
    "# 数据探查"
   ]
  },
  {
   "cell_type": "markdown",
   "id": "a8a32d05",
   "metadata": {},
   "source": [
    "## 示例1: 完整探查流程（推荐使用）"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "id": "83394140",
   "metadata": {},
   "outputs": [],
   "source": [
    "# 假设已经通过前面的load_data_from_directory函数加载了数据\n",
    "# 例如：A_nature_data, A_asset_data 等变量已经存在\n",
    "\n",
    "# 对自然属性信息表进行完整探查\n",
    "if 'A_nature_data' in globals():\n",
    "    nature_results = comprehensive_data_exploration(\n",
    "        A_nature_data, \n",
    "        dataset_name=\"自然属性信息表(NATURE)\",\n",
    "        primary_key='CUST_NO',  # 指定主键字段\n",
    "        enable_visualization=True\n",
    "    )\n",
    "\n",
    "# 对页面访问明细表进行探查（包含文本型字段）\n",
    "if 'A_pageview_dtl_data' in globals():\n",
    "    pageview_results = comprehensive_data_exploration(\n",
    "        A_pageview_dtl_data,\n",
    "        dataset_name=\"掌银页面访问明细表(MB_PAGEVIEW_DTL)\",\n",
    "        primary_key='CUST_NO',\n",
    "        enable_visualization=False  # 文本型字段较多，建议关闭自动可视化\n",
    "    )"
   ]
  },
  {
   "cell_type": "markdown",
   "id": "fcfda23b",
   "metadata": {},
   "source": [
    "## 示例2: 单独使用各个方法"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "id": "68a78b40",
   "metadata": {},
   "outputs": [],
   "source": [
    "# 如果只需要某个特定的分析，可以单独调用\n",
    "\n",
    "# 示例：只查看基础信息（会自动识别字段类型）\n",
    "if 'A_asset_data' in globals():\n",
    "    basic_info = explore_basic_info(A_asset_data, \"资产信息表(ASSET)\", primary_key='CUST_NO')\n",
    "\n",
    "# 示例：只进行数据质量分析\n",
    "if 'A_asset_data' in globals():\n",
    "    quality_report = analyze_data_quality(A_asset_data, \"资产信息表(ASSET)\", primary_key='CUST_NO')\n",
    "\n",
    "# 示例：只分析数值型特征（自动排除主键）\n",
    "if 'A_asset_data' in globals():\n",
    "    numeric_stats = analyze_numeric_features(A_asset_data, \"资产信息表(ASSET)\", primary_key='CUST_NO')\n",
    "\n",
    "# 示例：分析文本型字段（如页面访问表）\n",
    "if 'A_pageview_dtl_data' in globals():\n",
    "    text_stats = analyze_text_features(A_pageview_dtl_data, \"掌银页面访问明细表\", primary_key='CUST_NO')\n",
    "\n",
    "# 示例：只可视化分布\n",
    "if 'A_asset_data' in globals():\n",
    "    # 获取非主键的数值型字段\n",
    "    numeric_cols = [col for col in A_asset_data.select_dtypes(include=[np.number]).columns if col != 'CUST_NO']\n",
    "    visualize_numeric_distribution(A_asset_data, columns=numeric_cols[:9], dataset_name=\"资产信息表(ASSET)\")"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 15,
   "id": "243837ca",
   "metadata": {},
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "================================================================================\n",
      "📊 【掌银页面访问明细表】基础信息探查\n",
      "================================================================================\n",
      "\n",
      "1️⃣  数据形状:\n",
      "   ├─ 行数（样本数）: 372,196\n",
      "   └─ 列数（特征数）: 5\n",
      "\n",
      "2️⃣  数据类型分布:\n",
      "   ├─ object: 5 列 (100.0%)\n",
      "\n",
      "3️⃣  内存占用:\n",
      "   └─ 总内存: 150.15 MB\n",
      "\n",
      "4️⃣  列信息详情:\n",
      "   列名                             字段类型            数据类型            非空数        唯一值数       唯一率      内存(KB)    \n",
      "   -------------------------------------------------------------------------------------------------------------------\n",
      "   OPERATION_DATE                 📅日期型            object          372196     88         0.02%    24352.79  \n",
      "   CUST_NO                        🔑主键             object          372196     2753       0.74%    32349.19  \n",
      "   PAGE_TITLE                     🔐文本型            object          372196     988        0.27%    32349.19  \n",
      "   REFERRER_TITLE                 🔐文本型            object          372196     992        0.27%    32349.19  \n",
      "   CUST_NO                        🔑主键             object          372196     2753       0.74%    32349.19  \n",
      "   PAGE_TITLE                     🔐文本型            object          372196     988        0.27%    32349.19  \n",
      "   REFERRER_TITLE                 🔐文本型            object          372196     992        0.27%    32349.19  \n",
      "   MODEL_NAME                     🔐文本型            object          372196     105        0.03%    32349.19  \n",
      "\n",
      "5️⃣  字段类型统计（排除主键 CUST_NO）:\n",
      "   ├─ 数值型字段: 0 个\n",
      "   ├─ 日期型字段: 1 个\n",
      "   │  └─ OPERATION_DATE\n",
      "   ├─ 类别型字段: 0 个\n",
      "   └─ 文本型字段: 0 个\n",
      "\n",
      "================================================================================\n",
      "   MODEL_NAME                     🔐文本型            object          372196     105        0.03%    32349.19  \n",
      "\n",
      "5️⃣  字段类型统计（排除主键 CUST_NO）:\n",
      "   ├─ 数值型字段: 0 个\n",
      "   ├─ 日期型字段: 1 个\n",
      "   │  └─ OPERATION_DATE\n",
      "   ├─ 类别型字段: 0 个\n",
      "   └─ 文本型字段: 0 个\n",
      "\n",
      "================================================================================\n"
     ]
    }
   ],
   "source": [
    "# 实际数据探查示例（如果数据已加载）\n",
    "# 根据实际变量名调整，例如对页面访问表进行探查\n",
    "if 'MB_PAGEVIEW_DTL_data' in globals():\n",
    "    pageView_info = explore_basic_info(MB_PAGEVIEW_DTL_data, \"掌银页面访问明细表\", primary_key='CUST_NO')"
   ]
  },
  {
   "cell_type": "markdown",
   "id": "8ef55a68",
   "metadata": {},
   "source": [
    "## 示例3: 数据清洗操作"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "id": "cf1d340f",
   "metadata": {},
   "outputs": [],
   "source": [
    "# 清洗操作示例\n",
    "\n",
    "if 'A_asset_data' in globals():\n",
    "    # 1. 处理缺失值（不修改原数据）\n",
    "    asset_cleaned = handle_missing_values(\n",
    "        A_asset_data, \n",
    "        numeric_strategy='median',      # 数值型用中位数填充\n",
    "        categorical_strategy='mode',    # 类别型用众数填充\n",
    "        dataset_name=\"资产信息表\",\n",
    "        inplace=False                   # 返回新的DataFrame\n",
    "    )\n",
    "    \n",
    "    # 2. 删除重复值\n",
    "    asset_cleaned = remove_duplicates(\n",
    "        asset_cleaned,\n",
    "        subset=None,                    # 根据所有列判断重复\n",
    "        keep='first',                   # 保留第一次出现的\n",
    "        dataset_name=\"资产信息表\",\n",
    "        inplace=False\n",
    "    )\n",
    "    \n",
    "    # 3. 处理异常值\n",
    "    asset_cleaned = handle_outliers(\n",
    "        asset_cleaned,\n",
    "        method='iqr',                   # 使用IQR方法\n",
    "        threshold=1.5,                  # 1.5倍IQR\n",
    "        strategy='cap',                 # 截断策略\n",
    "        dataset_name=\"资产信息表\",\n",
    "        inplace=False\n",
    "    )\n",
    "    \n",
    "    # 4. 优化数据类型\n",
    "    asset_optimized = optimize_dtypes(\n",
    "        asset_cleaned,\n",
    "        dataset_name=\"资产信息表\",\n",
    "        inplace=False\n",
    "    )\n",
    "    \n",
    "    print(f\"\\n✅ 清洗完成！\")\n",
    "    print(f\"   原始数据: {A_asset_data.shape}\")\n",
    "    print(f\"   清洗后: {asset_optimized.shape}\")"
   ]
  },
  {
   "cell_type": "markdown",
   "id": "c22082e8",
   "metadata": {},
   "source": [
    "---\n",
    "\n",
    "# 方法特性说明\n",
    "\n",
    "## ✨ 核心优势\n",
    "\n",
    "1. **高度通用性**\n",
    "   - 所有方法仅针对单个DataFrame设计\n",
    "   - 可应用于任何pandas DataFrame\n",
    "   - 参数化设计，灵活配置\n",
    "\n",
    "2. **专业性**\n",
    "   - 完整的统计分析（偏度、峰度、相关性等）\n",
    "   - 多种异常值检测方法（IQR、Z-score）\n",
    "   - 丰富的可视化支持\n",
    "\n",
    "3. **易用性**\n",
    "   - 清晰的输出格式和进度提示\n",
    "   - 一键执行完整流程\n",
    "   - 支持单独调用各个功能\n",
    "\n",
    "4. **工程化**\n",
    "   - 内存优化功能\n",
    "   - 数据类型自动转换\n",
    "   - 详细的日志输出\n",
    "\n",
    "## 📋 方法清单\n",
    "\n",
    "### 探查方法\n",
    "- `explore_basic_info()` - 基础信息探查\n",
    "- `analyze_data_quality()` - 数据质量分析\n",
    "- `analyze_numeric_features()` - 数值型特征分析\n",
    "- `analyze_categorical_features()` - 类别型特征分析\n",
    "\n",
    "### 清洗方法\n",
    "- `handle_missing_values()` - 缺失值处理\n",
    "- `remove_duplicates()` - 重复值删除\n",
    "- `handle_outliers()` - 异常值处理\n",
    "- `optimize_dtypes()` - 数据类型优化\n",
    "\n",
    "### 可视化方法\n",
    "- `visualize_numeric_distribution()` - 数值分布图\n",
    "- `visualize_boxplot()` - 箱线图\n",
    "- `visualize_correlation_matrix()` - 相关性热图\n",
    "- `visualize_categorical_distribution()` - 类别分布图\n",
    "\n",
    "### 综合方法\n",
    "- `comprehensive_data_exploration()` - 完整探查流程\n",
    "\n",
    "## 💡 使用建议\n",
    "\n",
    "1. **初次探查**：使用`comprehensive_data_exploration()`获取全面了解\n",
    "2. **针对性分析**：根据需要单独调用特定方法\n",
    "3. **清洗流程**：先探查→发现问题→针对性清洗→再次验证\n",
    "4. **可视化**：对于大数据集，建议关闭自动可视化，手动选择关键字段"
   ]
  }
 ],
 "metadata": {
  "kernelspec": {
   "display_name": "starcup",
   "language": "python",
   "name": "python3"
  },
  "language_info": {
   "codemirror_mode": {
    "name": "ipython",
    "version": 3
   },
   "file_extension": ".py",
   "mimetype": "text/x-python",
   "name": "python",
   "nbconvert_exporter": "python",
   "pygments_lexer": "ipython3",
   "version": "3.10.18"
  }
 },
 "nbformat": 4,
 "nbformat_minor": 5
}
