{
 "cells": [
  {
   "cell_type": "code",
   "execution_count": 1,
   "id": "63269847",
   "metadata": {},
   "outputs": [],
   "source": [
    "import warnings\n",
    "warnings.filterwarnings('ignore')"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 2,
   "id": "d732d640",
   "metadata": {},
   "outputs": [],
   "source": [
    "import os\n",
    "import gc\n",
    "import numpy as np\n",
    "import pandas as pd\n",
    "import re\n",
    "import time\n",
    "from scipy import stats\n",
    "import matplotlib.pyplot as plt\n",
    "import category_encoders as ce\n",
    "import networkx as nx\n",
    "import pickle\n",
    "import lightgbm as lgb\n",
    "import catboost as cat\n",
    "import xgboost as xgb\n",
    "from datetime import timedelta\n",
    "from gensim.models import Word2Vec\n",
    "from io import StringIO\n",
    "from tqdm import tqdm\n",
    "from lightgbm import LGBMClassifier\n",
    "from lightgbm import log_evaluation, early_stopping\n",
    "from sklearn.metrics import roc_curve\n",
    "from scipy.stats import chi2_contingency, pearsonr\n",
    "from sklearn.preprocessing import StandardScaler, OneHotEncoder, LabelEncoder\n",
    "from sklearn.feature_extraction.text import TfidfVectorizer, CountVectorizer\n",
    "from sklearn.feature_extraction import FeatureHasher\n",
    "from sklearn.model_selection import StratifiedKFold, KFold, train_test_split, GridSearchCV\n",
    "from category_encoders import TargetEncoder\n",
    "from sklearn.decomposition import TruncatedSVD\n",
    "from autogluon.tabular import TabularDataset, TabularPredictor, FeatureMetadata\n",
    "from autogluon.features.generators import AsTypeFeatureGenerator, BulkFeatureGenerator, DropUniqueFeatureGenerator, FillNaFeatureGenerator, PipelineFeatureGenerator\n",
    "from autogluon.features.generators import CategoryFeatureGenerator, IdentityFeatureGenerator, AutoMLPipelineFeatureGenerator\n",
    "from autogluon.common.features.types import R_INT, R_FLOAT\n",
    "from autogluon.core.metrics import make_scorer"
   ]
  },
  {
   "cell_type": "markdown",
   "id": "98168ca1",
   "metadata": {},
   "source": [
    "# 数据导入"
   ]
  },
  {
   "cell_type": "markdown",
   "id": "e218df05",
   "metadata": {},
   "source": [
    "## 通用导入函数"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 3,
   "id": "11381d8a",
   "metadata": {},
   "outputs": [],
   "source": [
    "def load_data_from_directory(directory):\n",
    "    \"\"\"\n",
    "    遍历目录加载所有CSV文件，将其作为独立的DataFrame变量\n",
    "\n",
    "    参数:\n",
    "    - directory: 输入的数据路径\n",
    "    \n",
    "    返回:\n",
    "    - 含有数据集名称的列表\n",
    "    \"\"\"\n",
    "    dataset_names = []\n",
    "    for filename in os.listdir(directory):\n",
    "        if filename.endswith(\".csv\"):\n",
    "            dataset_name = os.path.splitext(filename)[0] + '_data' # 获取文件名作为变量名\n",
    "            file_path = os.path.join(directory, filename)  # 完整的文件路径\n",
    "            globals()[dataset_name] = pd.read_csv(file_path)  # 将文件加载为DataFrame并赋值给全局变量\n",
    "            dataset_names.append(dataset_name)\n",
    "            print(f\"数据集 {dataset_name} 已加载为 DataFrame\")\n",
    "\n",
    "    return dataset_names"
   ]
  },
  {
   "cell_type": "markdown",
   "id": "29ef15ba",
   "metadata": {},
   "source": [
    "## 导入数据集"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 5,
   "id": "2a0a7091",
   "metadata": {},
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "数据集 AGET_PAY_data 已加载为 DataFrame\n",
      "数据集 ASSET_data 已加载为 DataFrame\n",
      "数据集 CCD_TR_DTL_data 已加载为 DataFrame\n",
      "数据集 MB_PAGEVIEW_DTL_data 已加载为 DataFrame\n",
      "数据集 MB_QRYTRNFLW_data 已加载为 DataFrame\n",
      "数据集 MB_TRNFLW_data 已加载为 DataFrame\n",
      "数据集 NATURE_data 已加载为 DataFrame\n",
      "数据集 PROD_HOLD_data 已加载为 DataFrame\n",
      "数据集 TARGET_data 已加载为 DataFrame\n",
      "数据集 TARGET_VALID_data 已加载为 DataFrame\n",
      "数据集 TR_APS_DTL_data 已加载为 DataFrame\n",
      "数据集 TR_IBTF_data 已加载为 DataFrame\n",
      "数据集 TR_TPAY_data 已加载为 DataFrame\n"
     ]
    }
   ],
   "source": [
    "train_load_dt = '../DATA'\n",
    "train_data_name = load_data_from_directory(train_load_dt)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 14,
   "id": "527a55d6",
   "metadata": {},
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "📊 已加载的数据集:\n",
      "  - AGET_PAY_data\n",
      "  - ASSET_data\n",
      "  - CCD_TR_DTL_data\n",
      "  - MB_PAGEVIEW_DTL_data\n",
      "  - MB_QRYTRNFLW_data\n",
      "  - MB_TRNFLW_data\n",
      "  - NATURE_data\n",
      "  - PROD_HOLD_data\n",
      "  - TARGET_data\n",
      "  - TARGET_VALID_data\n",
      "  - TR_APS_DTL_data\n",
      "  - TR_IBTF_data\n",
      "  - TR_TPAY_data\n",
      "\n",
      "================================================================================\n",
      "示例1: NATURE_data (自然属性信息表)\n",
      "================================================================================\n",
      "   DATA_DAT                           CUST_NO  NTRL_CUST_SEX_CD  \\\n",
      "0  20250731  5374eec4c3ab060cfde53ecf09950a06                 2   \n",
      "1  20250731  e26cde597f38d4787d0db271fb880032                 1   \n",
      "\n",
      "   NTRL_CUST_AGE  NTRL_RANK_CD  NTRL_SEAN_ACTV_IND  \n",
      "0           33.0             2                   1  \n",
      "1           36.0             2                   1  \n",
      "\n",
      "列名: ['DATA_DAT', 'CUST_NO', 'NTRL_CUST_SEX_CD', 'NTRL_CUST_AGE', 'NTRL_RANK_CD', 'NTRL_SEAN_ACTV_IND']\n",
      "\n",
      "================================================================================\n",
      "示例2: PROD_HOLD_data (产品持有信息表)\n",
      "================================================================================\n",
      "   DATA_DAT                           CUST_NO  DP_IND  IL_IND  DCARD_IND  \\\n",
      "0  20250731  360127c123787d478f70691590418ac4       1       0          1   \n",
      "1  20250731  5282d6b513aa8e8fbdcd44d80581f4cb       1       0          1   \n",
      "\n",
      "   CCARD_IND  FNCG_IND  FUND_IND  BOND_IND  INSUR_IND  METAL_IND  PAY_IND  \\\n",
      "0          0         0         0         0          0          0        0   \n",
      "1          0         0         0         0          0          0        0   \n",
      "\n",
      "   EBNK_IND  MB_IND  MS_IND  TDPT_PAY_ALI_IND  TDPT_PAY_WCHT_IND  \n",
      "0         1       1       1                 0                  1  \n",
      "1         1       1       1                 0                  1  \n",
      "\n",
      "列名: ['DATA_DAT', 'CUST_NO', 'DP_IND', 'IL_IND', 'DCARD_IND', 'CCARD_IND', 'FNCG_IND', 'FUND_IND', 'BOND_IND', 'INSUR_IND', 'METAL_IND', 'PAY_IND', 'EBNK_IND', 'MB_IND', 'MS_IND', 'TDPT_PAY_ALI_IND', 'TDPT_PAY_WCHT_IND']\n",
      "\n",
      "================================================================================\n",
      "示例3: TR_APS_DTL_data (活期交易表)\n",
      "================================================================================\n",
      "   APSDTRDAT                           CUST_NO  \\\n",
      "0   20250402  3abac600050b2b3ad8876a1caf85beb9   \n",
      "1   20250402  3abac600050b2b3ad8876a1caf85beb9   \n",
      "\n",
      "                          APSDTRCOD  APSDTRAMT  \\\n",
      "0  566a1fdfd622806c20378b970c4cbff3    60000.0   \n",
      "1  566a1fdfd622806c20378b970c4cbff3     2000.0   \n",
      "\n",
      "                            APSDABS                         APSDTRCHL  \n",
      "0  acaf665ffd5ef2fe03b0daaa12d79aab  f1811258c561f96461a243415727b1f5  \n",
      "1  acaf665ffd5ef2fe03b0daaa12d79aab  f1811258c561f96461a243415727b1f5  \n",
      "\n",
      "列名: ['APSDTRDAT', 'CUST_NO', 'APSDTRCOD', 'APSDTRAMT', 'APSDABS', 'APSDTRCHL']\n",
      "\n",
      "================================================================================\n",
      "示例4: MB_PAGEVIEW_DTL_data (掌银页面访问明细表)\n",
      "================================================================================\n",
      "  OPERATION_DATE                           CUST_NO  \\\n",
      "0     2025-04-07  864a14a62ffffbc4741d365ea5a08278   \n",
      "1     2025-04-07  864a14a62ffffbc4741d365ea5a08278   \n",
      "\n",
      "                         PAGE_TITLE                    REFERRER_TITLE  \\\n",
      "0  dc127d306179477fef4f3a9378dc550b  0e5c9561153e8b3fd936b94a5641c8e1   \n",
      "1  0e5c9561153e8b3fd936b94a5641c8e1  c5b386b7a6348a2f1ba70f2259fb827e   \n",
      "\n",
      "                         MODEL_NAME  \n",
      "0  c5b386b7a6348a2f1ba70f2259fb827e  \n",
      "1  c5b386b7a6348a2f1ba70f2259fb827e  \n",
      "\n",
      "列名: ['OPERATION_DATE', 'CUST_NO', 'PAGE_TITLE', 'REFERRER_TITLE', 'MODEL_NAME']\n"
     ]
    }
   ],
   "source": [
    "# 查看数据集结构示例\n",
    "print(\"📊 已加载的数据集:\")\n",
    "for name in train_data_name:\n",
    "    print(f\"  - {name}\")\n",
    "\n",
    "print(\"\\n\" + \"=\"*80)\n",
    "print(\"示例1: NATURE_data (自然属性信息表)\")\n",
    "print(\"=\"*80)\n",
    "print(NATURE_data.head(2))\n",
    "print(f\"\\n列名: {list(NATURE_data.columns)}\")\n",
    "\n",
    "print(\"\\n\" + \"=\"*80)\n",
    "print(\"示例2: PROD_HOLD_data (产品持有信息表)\")\n",
    "print(\"=\"*80)\n",
    "print(PROD_HOLD_data.head(2))\n",
    "print(f\"\\n列名: {list(PROD_HOLD_data.columns)}\")\n",
    "\n",
    "print(\"\\n\" + \"=\"*80)\n",
    "print(\"示例3: TR_APS_DTL_data (活期交易表)\")\n",
    "print(\"=\"*80)\n",
    "print(TR_APS_DTL_data.head(2))\n",
    "print(f\"\\n列名: {list(TR_APS_DTL_data.columns)}\")\n",
    "\n",
    "print(\"\\n\" + \"=\"*80)\n",
    "print(\"示例4: MB_PAGEVIEW_DTL_data (掌银页面访问明细表)\")\n",
    "print(\"=\"*80)\n",
    "print(MB_PAGEVIEW_DTL_data.head(2))\n",
    "print(f\"\\n列名: {list(MB_PAGEVIEW_DTL_data.columns)}\")"
   ]
  },
  {
   "cell_type": "markdown",
   "id": "ec9d41a0",
   "metadata": {},
   "source": [
    "# 数据探查与清洗通用方法\n",
    "\n",
    "本部分将构建一套完整的数据探查与清洗方法体系，所有方法均针对单个数据集设计，具有高度通用性和可复用性。\n",
    "\n",
    "## 核心功能模块：\n",
    "1. **基础信息探查**：数据形状、类型、内存占用\n",
    "2. **数据质量分析**：缺失值、重复值、异常值检测\n",
    "3. **统计特征分析**：数值型/类别型变量分布\n",
    "4. **数据清洗操作**：缺失值处理、异常值处理、类型转换\n",
    "5. **可视化分析**：分布图、箱线图、相关性热图"
   ]
  },
  {
   "cell_type": "markdown",
   "id": "a0a443f8",
   "metadata": {},
   "source": [
    "## 📌 重要说明\n",
    "\n",
    "### 字段类型识别规则(基于MD5脱敏后的数据特征)\n",
    "\n",
    "本套方法能够自动识别以下字段类型:\n",
    "\n",
    "#### 🔐 数据脱敏规则说明\n",
    "1. **主键**: `CUST_NO` - 使用MD5脱敏(32位小写十六进制字符串)\n",
    "2. **日期字段**: `DATA_DAT`/`DATE`/`APSDTRDAT`/`OPERATION_DATE`等 - **未脱敏**(保留原格式)\n",
    "3. **数值字段**: 金额(AMT)、余额(BAL)、笔数(CNT)、年龄(AGE)等 - **未脱敏**\n",
    "4. **类别代码**: 以`_CD`或`_IND`结尾的字段 - **未脱敏**\n",
    "   - 如: `NTRL_CUST_SEX_CD`(性别代码)、`DP_IND`(存款标识)\n",
    "5. **其他字段**: 交易码、摘要、页面标题等 - **MD5脱敏**\n",
    "\n",
    "#### 🎯 字段类型自动识别逻辑\n",
    "\n",
    "1. **🔑 主键字段**: `CUST_NO` - 将从统计分析中自动排除\n",
    "2. **\udcc5 日期型字段**: \n",
    "   - 列名包含`DATE`/`DAT`且为8位数字字符串(如\"20250731\")\n",
    "   - 或已解析为datetime64类型\n",
    "3. **\ud83d📊 数值型字段**: \n",
    "   - numeric类型(int/float)\n",
    "   - 用于金额、余额、笔数、年龄等统计分析\n",
    "4. **🏷️ 类别型字段**: \n",
    "   - 列名以`_CD`或`_IND`结尾(类别代码/标识字段)\n",
    "   - 或唯一值数量≤50且占比<50%\n",
    "5. **\udd10 MD5脱敏字段**(类别型):\n",
    "   - object类型且所有非空值均为32位小写十六进制字符串\n",
    "   - 如交易码、渠道码、摘要等经过MD5处理的字段\n",
    "   - 虽然唯一值多,但本质上是类别型特征\n",
    "\n",
    "#### 📋 实际数据示例\n",
    "\n",
    "##### `NATURE_data` (自然属性信息表)\n",
    "- `CUST_NO` → 🔑主键(MD5脱敏)\n",
    "- `DATA_DAT` → 📅日期型(未脱敏: \"20250731\")\n",
    "- `NTRL_CUST_SEX_CD` → 🏷️类别型(性别代码,未脱敏: 1/2)\n",
    "- `NTRL_CUST_AGE` → 📊数值型(年龄,未脱敏: 33.0)\n",
    "- `NTRL_RANK_CD` → 🏷️类别型(等级代码,未脱敏: 1/2/3)\n",
    "- `NTRL_SEAN_ACTV_IND` → 🏷️类别型(季活标识,未脱敏: 0/1)\n",
    "\n",
    "##### `TR_APS_DTL_data` (活期交易表)\n",
    "- `CUST_NO` → 🔑主键(MD5脱敏)\n",
    "- `APSDTRDAT` → 📅日期型(未脱敏: \"20250402\")\n",
    "- `APSDTRAMT` → 📊数值型(交易金额,未脱敏: 60000.0)\n",
    "- `APSDTRCOD` → 🔐MD5类别型(交易码,MD5脱敏: \"566a1fdf...\")\n",
    "- `APSDABS` → 🔐MD5类别型(摘要,MD5脱敏: \"acaf665f...\")\n",
    "- `APSDTRCHL` → 🔐MD5类别型(渠道,MD5脱敏: \"f1811258...\")\n",
    "\n",
    "##### `MB_PAGEVIEW_DTL_data` (掌银页面访问表)\n",
    "- `CUST_NO` → 🔑主键(MD5脱敏)\n",
    "- `OPERATION_DATE` → 📅日期型(未脱敏: \"2025-04-07\")\n",
    "- `PAGE_TITLE` → 🔐MD5类别型(页面标题,MD5脱敏: \"dc127d30...\")\n",
    "- `REFERRER_TITLE` → 🔐MD5类别型(上一页面,MD5脱敏: \"0e5c9561...\")\n",
    "- `MODEL_NAME` → 🔐MD5类别型(模块名称,MD5脱敏: \"c5b386b7...\")"
   ]
  },
  {
   "cell_type": "markdown",
   "id": "41b7484f",
   "metadata": {},
   "source": [
    "## 1. 基础信息探查"
   ]
  },
  {
   "cell_type": "markdown",
   "id": "2d1f8365",
   "metadata": {},
   "source": [
    "### 🔧 优化后的字段类型识别辅助函数"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 20,
   "id": "dd41166b",
   "metadata": {},
   "outputs": [],
   "source": [
    "def is_md5_hash(series):\n",
    "    \"\"\"\n",
    "    判断series是否全部为MD5哈希值(32位小写十六进制字符串)\n",
    "    \n",
    "    参数:\n",
    "    - series: pandas Series\n",
    "    \n",
    "    返回:\n",
    "    - bool: 是否为MD5哈希值\n",
    "    \"\"\"\n",
    "    if series.dtype != 'object':\n",
    "        return False\n",
    "    \n",
    "    # 去除空值\n",
    "    non_null = series.dropna()\n",
    "    if len(non_null) == 0:\n",
    "        return False\n",
    "    \n",
    "    # 检查是否所有值都是32位小写十六进制字符串\n",
    "    # MD5的特征: 32个字符, 只包含0-9和a-f\n",
    "    sample_size = min(1000, len(non_null))  # 采样检查以提高性能\n",
    "    sample = non_null.sample(n=sample_size, random_state=42)\n",
    "    \n",
    "    def is_md5_string(s):\n",
    "        s_str = str(s)\n",
    "        if len(s_str) != 32:\n",
    "            return False\n",
    "        try:\n",
    "            int(s_str, 16)  # 尝试按16进制解析\n",
    "            return s_str.islower()  # 检查是否全小写\n",
    "        except ValueError:\n",
    "            return False\n",
    "    \n",
    "    # 如果90%以上的样本都是MD5格式,判定为MD5字段\n",
    "    md5_ratio = sample.apply(is_md5_string).sum() / len(sample)\n",
    "    return md5_ratio > 0.9\n",
    "\n",
    "\n",
    "def is_date_field(col_name, series):\n",
    "    \"\"\"\n",
    "    判断字段是否为日期型字段\n",
    "    \n",
    "    参数:\n",
    "    - col_name: 列名\n",
    "    - series: pandas Series\n",
    "    \n",
    "    返回:\n",
    "    - bool: 是否为日期型字段\n",
    "    \"\"\"\n",
    "    # 如果已经是datetime类型\n",
    "    if pd.api.types.is_datetime64_any_dtype(series):\n",
    "        return True\n",
    "    \n",
    "    # 如果列名包含DATE或DAT\n",
    "    if 'DATE' in col_name.upper() or 'DAT' in col_name.upper():\n",
    "        # 情况1: 检查int类型是否为8位日期数字(如20250731)\n",
    "        if pd.api.types.is_integer_dtype(series):\n",
    "            non_null = series.dropna()\n",
    "            if len(non_null) > 0:\n",
    "                # 检查是否所有值都在合理的日期范围内(19000101-21001231)\n",
    "                min_val = non_null.min()\n",
    "                max_val = non_null.max()\n",
    "                if 19000101 <= min_val <= 21001231 and 19000101 <= max_val <= 21001231:\n",
    "                    # 进一步检查是否都是8位数\n",
    "                    sample = non_null.head(100)\n",
    "                    is_8digit = (sample >= 10000000) & (sample <= 99999999)\n",
    "                    if is_8digit.mean() > 0.9:\n",
    "                        return True\n",
    "        \n",
    "        # 情况2: 检查object类型是否为8位数字字符串(如\"20250731\")\n",
    "        elif series.dtype == 'object':\n",
    "            non_null = series.dropna()\n",
    "            if len(non_null) > 0:\n",
    "                sample = non_null.head(100)\n",
    "                # 检查是否为8位数字\n",
    "                is_8digit = sample.astype(str).str.match(r'^\\d{8}$').mean() > 0.9\n",
    "                if is_8digit:\n",
    "                    return True\n",
    "                # 或者检查是否为日期格式字符串(如\"2025-04-07\")\n",
    "                is_date_format = sample.astype(str).str.match(r'^\\d{4}-\\d{2}-\\d{2}$').mean() > 0.9\n",
    "                if is_date_format:\n",
    "                    return True\n",
    "    \n",
    "    return False\n",
    "\n",
    "\n",
    "def identify_field_type(col, df, primary_key='CUST_NO'):\n",
    "    \"\"\"\n",
    "    识别字段的业务类型(优化版,适配MD5脱敏数据)\n",
    "    \n",
    "    字段类型识别规则(按优先级):\n",
    "    1. 主键: CUST_NO\n",
    "    2. 日期型: 列名含DATE/DAT且为8位整数或datetime类型 ⚠️优先于数值型判断\n",
    "    3. 类别代码: 列名以_CD或_IND结尾 ⚠️优先于数值型判断\n",
    "    4. 数值型: numeric类型(int/float)\n",
    "    5. MD5脱敏类别: 32位十六进制字符串\n",
    "    6. 普通类别型: 唯一值≤50或占比<50%\n",
    "    \n",
    "    参数:\n",
    "    - col: 列名\n",
    "    - df: DataFrame\n",
    "    - primary_key: 主键字段名\n",
    "    \n",
    "    返回:\n",
    "    - str: 字段类型标识\n",
    "    \"\"\"\n",
    "    if col == primary_key:\n",
    "        return '🔑主键'\n",
    "    \n",
    "    # ⚠️ 优先检查日期型(必须在数值型之前,因为日期字段可能是int类型)\n",
    "    if is_date_field(col, df[col]):\n",
    "        return '📅日期型'\n",
    "    \n",
    "    # ⚠️ 优先检查类别代码(必须在数值型之前,因为_CD/_IND字段可能是int类型)\n",
    "    if col.endswith('_CD') or col.endswith('_IND'):\n",
    "        return '🏷️类别型'\n",
    "    \n",
    "    # 检查是否为数值型\n",
    "    if pd.api.types.is_numeric_dtype(df[col]):\n",
    "        return '📊数值型'\n",
    "    \n",
    "    # 以下为object类型的判断\n",
    "    if pd.api.types.is_object_dtype(df[col]) or pd.api.types.is_categorical_dtype(df[col]):\n",
    "        non_null_data = df[col].dropna()\n",
    "        \n",
    "        if len(non_null_data) == 0:\n",
    "            return '🏷️类别型'\n",
    "        \n",
    "        # 检查是否为MD5脱敏字段\n",
    "        if is_md5_hash(df[col]):\n",
    "            return '🔐MD5类别型'\n",
    "        \n",
    "        # 根据唯一值数量和占比判断\n",
    "        unique_count = df[col].nunique()\n",
    "        unique_ratio = unique_count / len(df)\n",
    "        \n",
    "        # 唯一值数量≤50 或 唯一值占比<50%,判定为类别型\n",
    "        if unique_count <= 50 or unique_ratio < 0.5:\n",
    "            return '🏷️类别型'\n",
    "        else:\n",
    "            # 其他情况判定为类别型(因为实际上脱敏后都是类别型)\n",
    "            return '🏷️类别型'\n",
    "    \n",
    "    return '❓未知'"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 21,
   "id": "dc5b7601",
   "metadata": {},
   "outputs": [],
   "source": [
    "def explore_basic_info(df, dataset_name=\"数据集\", primary_key='CUST_NO'):\n",
    "    \"\"\"\n",
    "    探查数据集的基础信息(优化版,适配MD5脱敏数据)\n",
    "    \n",
    "    参数:\n",
    "    - df: pandas DataFrame，待探查的数据集\n",
    "    - dataset_name: str，数据集名称\n",
    "    - primary_key: str，主键字段名（默认为'CUST_NO'，将从分析中排除）\n",
    "    \n",
    "    返回:\n",
    "    - dict: 包含基础信息的字典\n",
    "    \"\"\"\n",
    "    print(\"=\"*80)\n",
    "    print(f\"📊 【{dataset_name}】基础信息探查\")\n",
    "    print(\"=\"*80)\n",
    "    \n",
    "    # 1. 数据形状\n",
    "    n_rows, n_cols = df.shape\n",
    "    print(f\"\\n1️⃣  数据形状:\")\n",
    "    print(f\"   ├─ 行数（样本数）: {n_rows:,}\")\n",
    "    print(f\"   └─ 列数（特征数）: {n_cols}\")\n",
    "    \n",
    "    # 2. 数据类型分布\n",
    "    dtype_counts = df.dtypes.value_counts()\n",
    "    print(f\"\\n2️⃣  数据类型分布:\")\n",
    "    for dtype, count in dtype_counts.items():\n",
    "        print(f\"   ├─ {dtype}: {count} 列 ({count/n_cols*100:.1f}%)\")\n",
    "    \n",
    "    # 3. 内存占用\n",
    "    memory_usage = df.memory_usage(deep=True).sum()\n",
    "    memory_mb = memory_usage / 1024 / 1024\n",
    "    print(f\"\\n3️⃣  内存占用:\")\n",
    "    print(f\"   └─ 总内存: {memory_mb:.2f} MB\")\n",
    "    \n",
    "    # 4. 列信息详情（使用优化后的字段类型识别）\n",
    "    print(f\"\\n4️⃣  列信息详情:\")\n",
    "    print(f\"   {'列名':<30} {'字段类型':<15} {'数据类型':<15} {'非空数':<10} {'唯一值数':<10} {'唯一率':<8} {'内存(KB)':<10}\")\n",
    "    print(f\"   {'-'*115}\")\n",
    "    \n",
    "    for col in df.columns:\n",
    "        field_type = identify_field_type(col, df, primary_key)\n",
    "        col_dtype = str(df[col].dtype)\n",
    "        non_null = df[col].count()\n",
    "        unique_count = df[col].nunique()\n",
    "        unique_ratio = unique_count / len(df)\n",
    "        col_memory = df[col].memory_usage(deep=True) / 1024\n",
    "        \n",
    "        print(f\"   {col:<30} {field_type:<15} {col_dtype:<15} {non_null:<10} {unique_count:<10} {unique_ratio:<8.2%} {col_memory:<10.2f}\")\n",
    "    \n",
    "    \n",
    "    # 5. 字段类型统计（排除主键）\n",
    "    analysis_cols = [col for col in df.columns if col != primary_key]\n",
    "    \n",
    "    # 分类统计\n",
    "    numeric_cols = []\n",
    "    date_cols = []\n",
    "    categorical_cols = []\n",
    "    md5_cols = []\n",
    "    \n",
    "    for col in analysis_cols:\n",
    "        field_type = identify_field_type(col, df, primary_key)\n",
    "        \n",
    "        if '数值型' in field_type:\n",
    "            numeric_cols.append(col)\n",
    "        elif '日期型' in field_type:\n",
    "            date_cols.append(col)\n",
    "        elif 'MD5' in field_type:\n",
    "            md5_cols.append(col)\n",
    "        elif '类别型' in field_type:\n",
    "            categorical_cols.append(col)\n",
    "    \n",
    "    print(f\"\\n5️⃣  字段类型统计（排除主键 {primary_key}）:\")\n",
    "    print(f\"   ├─ 数值型字段: {len(numeric_cols)} 个\")\n",
    "    if len(numeric_cols) > 0:\n",
    "        print(f\"   │  └─ {', '.join(numeric_cols[:5])}{'...' if len(numeric_cols) > 5 else ''}\")\n",
    "    \n",
    "    print(f\"   ├─ 日期型字段: {len(date_cols)} 个\")\n",
    "    if len(date_cols) > 0:\n",
    "        print(f\"   │  └─ {', '.join(date_cols[:5])}{'...' if len(date_cols) > 5 else ''}\")\n",
    "    \n",
    "    print(f\"   ├─ 类别型字段(未脱敏): {len(categorical_cols)} 个\")\n",
    "    if len(categorical_cols) > 0:\n",
    "        print(f\"   │  └─ {', '.join(categorical_cols[:5])}{'...' if len(categorical_cols) > 5 else ''}\")\n",
    "    \n",
    "    print(f\"   └─ MD5脱敏类别字段: {len(md5_cols)} 个\")\n",
    "    if len(md5_cols) > 0:\n",
    "        print(f\"      └─ {', '.join(md5_cols[:5])}{'...' if len(md5_cols) > 5 else ''}\")\n",
    "    \n",
    "    print(\"\\n\" + \"=\"*80)\n",
    "    \n",
    "    return {\n",
    "        'shape': (n_rows, n_cols),\n",
    "        'dtypes': dtype_counts.to_dict(),\n",
    "        'memory_mb': memory_mb,\n",
    "        'primary_key': primary_key,\n",
    "        'numeric_cols': numeric_cols,\n",
    "        'date_cols': date_cols,\n",
    "        'categorical_cols': categorical_cols,\n",
    "        'md5_cols': md5_cols,\n",
    "        'analysis_cols': analysis_cols\n",
    "    }"
   ]
  },
  {
   "cell_type": "markdown",
   "id": "8697e3d4",
   "metadata": {},
   "source": [
    "## 2. 数据质量分析"
   ]
  },
  {
   "cell_type": "markdown",
   "id": "5aabd317",
   "metadata": {},
   "source": [
    "### ✅ 测试优化后的字段识别功能"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 23,
   "id": "278d3634",
   "metadata": {},
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "================================================================================\n",
      "📊 【自然属性信息表(NATURE)】基础信息探查\n",
      "================================================================================\n",
      "\n",
      "1️⃣  数据形状:\n",
      "   ├─ 行数（样本数）: 5,975\n",
      "   └─ 列数（特征数）: 6\n",
      "\n",
      "2️⃣  数据类型分布:\n",
      "   ├─ int64: 4 列 (66.7%)\n",
      "   ├─ object: 1 列 (16.7%)\n",
      "   ├─ float64: 1 列 (16.7%)\n",
      "\n",
      "3️⃣  内存占用:\n",
      "   └─ 总内存: 0.74 MB\n",
      "\n",
      "4️⃣  列信息详情:\n",
      "   列名                             字段类型            数据类型            非空数        唯一值数       唯一率      内存(KB)    \n",
      "   -------------------------------------------------------------------------------------------------------------------\n",
      "   DATA_DAT                       📅日期型            int64           5975       1          0.02%    46.80     \n",
      "   CUST_NO                        🔑主键             object          5975       5975       100.00%  519.44    \n",
      "   NTRL_CUST_SEX_CD               🏷️类别型           int64           5975       2          0.03%    46.80     \n",
      "   NTRL_CUST_AGE                  📊数值型            float64         5975       40         0.67%    46.80     \n",
      "   NTRL_RANK_CD                   🏷️类别型           int64           5975       8          0.13%    46.80     \n",
      "   NTRL_SEAN_ACTV_IND             🏷️类别型           int64           5975       2          0.03%    46.80     \n",
      "\n",
      "5️⃣  字段类型统计（排除主键 CUST_NO）:\n",
      "   ├─ 数值型字段: 1 个\n",
      "   │  └─ NTRL_CUST_AGE\n",
      "   ├─ 日期型字段: 1 个\n",
      "   │  └─ DATA_DAT\n",
      "   ├─ 类别型字段(未脱敏): 3 个\n",
      "   │  └─ NTRL_CUST_SEX_CD, NTRL_RANK_CD, NTRL_SEAN_ACTV_IND\n",
      "   └─ MD5脱敏类别字段: 0 个\n",
      "\n",
      "================================================================================\n"
     ]
    }
   ],
   "source": [
    "# 测试1: NATURE_data (自然属性信息表)\n",
    "nature_info = explore_basic_info(NATURE_data, \"自然属性信息表(NATURE)\")"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 22,
   "id": "11902913",
   "metadata": {},
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "测试 is_date_field 函数:\n",
      "DATA_DAT是否为日期字段: True\n",
      "NTRL_CUST_SEX_CD是否为日期字段: False\n",
      "\n",
      "测试 identify_field_type 函数:\n",
      "DATA_DAT字段类型: 📅日期型\n",
      "NTRL_CUST_SEX_CD字段类型: 🏷️类别型\n",
      "NTRL_SEAN_ACTV_IND字段类型: 🏷️类别型\n"
     ]
    }
   ],
   "source": [
    "# 测试日期字段识别函数\n",
    "print(\"测试 is_date_field 函数:\")\n",
    "print(\"DATA_DAT是否为日期字段:\", is_date_field('DATA_DAT', NATURE_data['DATA_DAT']))\n",
    "print(\"NTRL_CUST_SEX_CD是否为日期字段:\", is_date_field('NTRL_CUST_SEX_CD', NATURE_data['NTRL_CUST_SEX_CD']))\n",
    "\n",
    "print(\"\\n测试 identify_field_type 函数:\")\n",
    "print(\"DATA_DAT字段类型:\", identify_field_type('DATA_DAT', NATURE_data))\n",
    "print(\"NTRL_CUST_SEX_CD字段类型:\", identify_field_type('NTRL_CUST_SEX_CD', NATURE_data))\n",
    "print(\"NTRL_SEAN_ACTV_IND字段类型:\", identify_field_type('NTRL_SEAN_ACTV_IND', NATURE_data))"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 24,
   "id": "243b1a3d",
   "metadata": {},
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "\n",
      "🔻🔻🔻🔻🔻🔻🔻🔻🔻🔻🔻🔻🔻🔻🔻🔻🔻🔻🔻🔻🔻🔻🔻🔻🔻🔻🔻🔻🔻🔻🔻🔻🔻🔻🔻🔻🔻🔻🔻🔻\n",
      "================================================================================\n",
      "📊 【活期交易表(TR_APS_DTL)】基础信息探查\n",
      "================================================================================\n",
      "\n",
      "1️⃣  数据形状:\n",
      "   ├─ 行数（样本数）: 345,313\n",
      "   └─ 列数（特征数）: 6\n",
      "\n",
      "2️⃣  数据类型分布:\n",
      "   ├─ object: 4 列 (66.7%)\n",
      "   ├─ int64: 1 列 (16.7%)\n",
      "   ├─ float64: 1 列 (16.7%)\n",
      "\n",
      "3️⃣  内存占用:\n",
      "   └─ 总内存: 122.51 MB\n",
      "\n",
      "4️⃣  列信息详情:\n",
      "   列名                             字段类型            数据类型            非空数        唯一值数       唯一率      内存(KB)    \n",
      "   -------------------------------------------------------------------------------------------------------------------\n",
      "   APSDTRDAT                      📅日期型            int64           345313     91         0.03%    2697.88   \n",
      "   CUST_NO                        🔑主键             object          345313     5616       1.63%    30012.68  \n",
      "   APSDTRCOD                      🔐MD5类别型         object          345313     154        0.04%    30012.68  \n",
      "   APSDTRAMT                      📊数值型            float64         345313     48947      14.17%   2697.88   \n",
      "   APSDABS                        🔐MD5类别型         object          345313     1527       0.44%    30012.68  \n",
      "   APSDTRCHL                      🔐MD5类别型         object          345313     29         0.01%    30012.68  \n",
      "\n",
      "5️⃣  字段类型统计（排除主键 CUST_NO）:\n",
      "   ├─ 数值型字段: 1 个\n",
      "   │  └─ APSDTRAMT\n",
      "   ├─ 日期型字段: 1 个\n",
      "   │  └─ APSDTRDAT\n",
      "   ├─ 类别型字段(未脱敏): 0 个\n",
      "   └─ MD5脱敏类别字段: 3 个\n",
      "      └─ APSDTRCOD, APSDABS, APSDTRCHL\n",
      "\n",
      "================================================================================\n"
     ]
    }
   ],
   "source": [
    "# 测试2: TR_APS_DTL_data (活期交易表 - 含MD5脱敏字段)\n",
    "print(\"\\n\" + \"🔻\"*40)\n",
    "tr_aps_info = explore_basic_info(TR_APS_DTL_data, \"活期交易表(TR_APS_DTL)\")"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 25,
   "id": "b40eab3e",
   "metadata": {},
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "\n",
      "🔻🔻🔻🔻🔻🔻🔻🔻🔻🔻🔻🔻🔻🔻🔻🔻🔻🔻🔻🔻🔻🔻🔻🔻🔻🔻🔻🔻🔻🔻🔻🔻🔻🔻🔻🔻🔻🔻🔻🔻\n",
      "================================================================================\n",
      "📊 【掌银页面访问表(MB_PAGEVIEW_DTL)】基础信息探查\n",
      "================================================================================\n",
      "\n",
      "1️⃣  数据形状:\n",
      "   ├─ 行数（样本数）: 372,196\n",
      "   └─ 列数（特征数）: 5\n",
      "\n",
      "2️⃣  数据类型分布:\n",
      "   ├─ object: 5 列 (100.0%)\n",
      "\n",
      "3️⃣  内存占用:\n",
      "   └─ 总内存: 150.15 MB\n",
      "\n",
      "4️⃣  列信息详情:\n",
      "   列名                             字段类型            数据类型            非空数        唯一值数       唯一率      内存(KB)    \n",
      "   -------------------------------------------------------------------------------------------------------------------\n",
      "   OPERATION_DATE                 📅日期型            object          372196     88         0.02%    24352.79  \n",
      "   CUST_NO                        🔑主键             object          372196     2753       0.74%    32349.19  \n",
      "   PAGE_TITLE                     🔐MD5类别型         object          372196     988        0.27%    32349.19  \n",
      "   REFERRER_TITLE                 🔐MD5类别型         object          372196     992        0.27%    32349.19  \n",
      "   MODEL_NAME                     🔐MD5类别型         object          372196     105        0.03%    32349.19  \n",
      "\n",
      "5️⃣  字段类型统计（排除主键 CUST_NO）:\n",
      "   ├─ 数值型字段: 0 个\n",
      "   ├─ 日期型字段: 1 个\n",
      "   │  └─ OPERATION_DATE\n",
      "   ├─ 类别型字段(未脱敏): 0 个\n",
      "   └─ MD5脱敏类别字段: 3 个\n",
      "      └─ PAGE_TITLE, REFERRER_TITLE, MODEL_NAME\n",
      "\n",
      "================================================================================\n"
     ]
    }
   ],
   "source": [
    "# 测试3: MB_PAGEVIEW_DTL_data (掌银页面访问表 - 含不同日期格式和MD5字段)\n",
    "print(\"\\n\" + \"🔻\"*40)\n",
    "mb_info = explore_basic_info(MB_PAGEVIEW_DTL_data, \"掌银页面访问表(MB_PAGEVIEW_DTL)\")"
   ]
  },
  {
   "cell_type": "markdown",
   "id": "cc8b0b6f",
   "metadata": {},
   "source": [
    "### ✅ 字段识别优化总结\n",
    "\n",
    "#### 🎯 优化成果\n",
    "\n",
    "经过优化,字段类型识别系统现在能够准确识别以下情况:\n",
    "\n",
    "1. **✅ 日期字段识别**\n",
    "   - `DATA_DAT` (int64类型, 值为20250731) → 📅日期型\n",
    "   - `APSDTRDAT` (int64类型, 值为20250402) → 📅日期型  \n",
    "   - `OPERATION_DATE` (object类型, 值为\"2025-04-07\") → 📅日期型\n",
    "\n",
    "2. **✅ 类别代码字段识别**\n",
    "   - `NTRL_CUST_SEX_CD` (以_CD结尾, int64类型) → 🏷️类别型\n",
    "   - `NTRL_RANK_CD` (以_CD结尾, int64类型) → 🏷️类别型\n",
    "   - `NTRL_SEAN_ACTV_IND` (以_IND结尾, int64类型) → 🏷️类别型\n",
    "\n",
    "3. **✅ MD5脱敏字段识别**\n",
    "   - `APSDTRCOD` (32位十六进制, 如\"566a1fdf...\") → 🔐MD5类别型\n",
    "   - `APSDABS` (32位十六进制, 如\"acaf665f...\") → 🔐MD5类别型\n",
    "   - `APSDTRCHL` (32位十六进制, 如\"f1811258...\") → 🔐MD5类别型\n",
    "   - `PAGE_TITLE`/`REFERRER_TITLE`/`MODEL_NAME` → 🔐MD5类别型\n",
    "\n",
    "4. **✅ 数值字段识别**\n",
    "   - `NTRL_CUST_AGE` (float64类型) → 📊数值型\n",
    "   - `APSDTRAMT` (float64类型) → 📊数值型\n",
    "\n",
    "#### 🔧 关键优化点\n",
    "\n",
    "1. **判断优先级调整**: \n",
    "   - 日期型和类别代码(_CD/_IND)判断优先于数值型\n",
    "   - 避免int类型的日期/类别字段被误判为数值型\n",
    "\n",
    "2. **日期识别增强**:\n",
    "   - 支持int64类型的8位日期数字(如20250731)\n",
    "   - 支持object类型的日期字符串(如\"2025-04-07\")\n",
    "   - 通过数值范围和位数验证日期合法性\n",
    "\n",
    "3. **MD5检测算法**:\n",
    "   - 检查32位长度\n",
    "   - 验证是否为小写十六进制字符\n",
    "   - 采样验证(提高性能)\n",
    "   - 90%以上样本匹配则判定为MD5\n",
    "\n",
    "#### 📊 测试结果汇总\n",
    "\n",
    "| 数据表 | 日期字段 | 类别字段 | MD5字段 | 数值字段 |\n",
    "|--------|---------|---------|---------|---------|\n",
    "| NATURE_data | 1 | 3 | 0 | 1 |\n",
    "| TR_APS_DTL_data | 1 | 0 | 3 | 1 |\n",
    "| MB_PAGEVIEW_DTL_data | 1 | 0 | 3 | 0 |\n",
    "\n",
    "**✅ 所有字段类型识别准确率: 100%**"
   ]
  },
  {
   "cell_type": "markdown",
   "id": "a85a8555",
   "metadata": {},
   "source": [
    "---\n",
    "\n",
    "## 💡 使用建议\n",
    "\n",
    "### 适用于所有数据表的探查流程\n",
    "\n",
    "```python\n",
    "# 1. 对单个数据表进行探查\n",
    "info = explore_basic_info(your_dataframe, \"数据表名称\")\n",
    "\n",
    "# 2. 访问返回的字段分类信息\n",
    "numeric_cols = info['numeric_cols']      # 数值型字段列表\n",
    "date_cols = info['date_cols']            # 日期型字段列表  \n",
    "categorical_cols = info['categorical_cols']  # 类别型字段列表(未脱敏)\n",
    "md5_cols = info['md5_cols']              # MD5脱敏类别字段列表\n",
    "\n",
    "# 3. 后续可针对不同类型字段进行特征工程\n",
    "# - 数值型: 统计特征、分箱、标准化\n",
    "# - 日期型: 提取年月日、时间差、周期特征\n",
    "# - 类别型: One-Hot编码、Label编码、Target编码\n",
    "# - MD5类别: 频次编码、Target编码、词向量(对于文本类)\n",
    "```\n",
    "\n",
    "### 批量探查所有数据表\n",
    "\n",
    "```python\n",
    "# 批量对所有导入的数据表进行探查\n",
    "for data_name in train_data_name:\n",
    "    df = globals()[data_name]\n",
    "    dataset_name = data_name.replace('_data', '').upper()\n",
    "    info = explore_basic_info(df, dataset_name)\n",
    "    print(\"\\n\" + \"=\"*80 + \"\\n\")\n",
    "```"
   ]
  },
  {
   "cell_type": "markdown",
   "id": "4c355d7d",
   "metadata": {},
   "source": [
    "### 🔄 优化后的统计特征分析\n",
    "\n",
    "以下方法已优化，支持文本型字段识别并自动排除主键字段"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 7,
   "id": "9d66ac72",
   "metadata": {},
   "outputs": [],
   "source": [
    "def analyze_text_features(df, dataset_name=\"数据集\", primary_key='CUST_NO'):\n",
    "    \"\"\"\n",
    "    分析文本型特征的特性\n",
    "    \n",
    "    参数:\n",
    "    - df: pandas DataFrame，待分析的数据集\n",
    "    - dataset_name: str，数据集名称\n",
    "    - primary_key: str，主键字段名（默认为'CUST_NO'，将从分析中排除）\n",
    "    \n",
    "    返回:\n",
    "    - dict: 文本型特征分析结果\n",
    "    \"\"\"\n",
    "    # 识别文本型字段\n",
    "    text_cols = []\n",
    "    analysis_cols = [col for col in df.columns if col != primary_key]\n",
    "    \n",
    "    for col in analysis_cols:\n",
    "        if pd.api.types.is_object_dtype(df[col]) or pd.api.types.is_categorical_dtype(df[col]):\n",
    "            non_null_data = df[col].dropna()\n",
    "            if len(non_null_data) == 0:\n",
    "                continue\n",
    "            \n",
    "            unique_ratio = df[col].nunique() / len(df)\n",
    "            avg_length = non_null_data.astype(str).str.len().mean()\n",
    "            \n",
    "            # 文本型判定条件：唯一值比例>50% 且 平均长度>10\n",
    "            if unique_ratio > 0.5 and avg_length > 10:\n",
    "                text_cols.append(col)\n",
    "    \n",
    "    if len(text_cols) == 0:\n",
    "        print(f\"⚠️  【{dataset_name}】无文本型字段（已排除主键 {primary_key}）\")\n",
    "        return None\n",
    "    \n",
    "    print(\"=\"*100)\n",
    "    print(f\"📝 【{dataset_name}】文本型特征分析 ({len(text_cols)} 个字段，已排除主键 {primary_key})\")\n",
    "    print(\"=\"*100)\n",
    "    \n",
    "    text_info = {}\n",
    "    \n",
    "    for col in text_cols:\n",
    "        print(f\"\\n🔹 字段: {col}\")\n",
    "        print(f\"   {'-'*90}\")\n",
    "        \n",
    "        # 基础统计\n",
    "        n_unique = df[col].nunique()\n",
    "        n_missing = df[col].isnull().sum()\n",
    "        missing_rate = n_missing / len(df) * 100\n",
    "        \n",
    "        # 文本长度统计\n",
    "        text_lengths = df[col].dropna().astype(str).str.len()\n",
    "        min_len = text_lengths.min()\n",
    "        max_len = text_lengths.max()\n",
    "        avg_len = text_lengths.mean()\n",
    "        median_len = text_lengths.median()\n",
    "        \n",
    "        print(f\"   唯一值数: {n_unique:,} ({n_unique/len(df)*100:.2f}%) | 缺失数: {n_missing:,} ({missing_rate:.2f}%)\")\n",
    "        print(f\"   文本长度: 最小={min_len:.0f}, 最大={max_len:.0f}, 平均={avg_len:.1f}, 中位数={median_len:.0f}\")\n",
    "        \n",
    "        # 词频统计（Top10）\n",
    "        word_counts = df[col].value_counts()\n",
    "        print(f\"\\n   Top 10 高频文本:\")\n",
    "        print(f\"   {'文本内容':<60} {'频次':<15} {'占比(%)':<15}\")\n",
    "        print(f\"   {'-'*95}\")\n",
    "        \n",
    "        for i, (val, count) in enumerate(word_counts.head(10).items()):\n",
    "            ratio = count / len(df) * 100\n",
    "            val_str = str(val)[:57] + \"...\" if len(str(val)) > 60 else str(val)\n",
    "            print(f\"   {val_str:<60} {count:<15,} {ratio:<15.2f}\")\n",
    "        \n",
    "        # 空文本和单字符统计\n",
    "        empty_count = (df[col].astype(str).str.strip() == '').sum()\n",
    "        single_char_count = (df[col].dropna().astype(str).str.len() == 1).sum()\n",
    "        \n",
    "        if empty_count > 0:\n",
    "            print(f\"\\n   ⚠️  空文本数: {empty_count} ({empty_count/len(df)*100:.2f}%)\")\n",
    "        if single_char_count > 0:\n",
    "            print(f\"   ⚠️  单字符文本数: {single_char_count} ({single_char_count/len(df)*100:.2f}%)\")\n",
    "        \n",
    "        text_info[col] = {\n",
    "            'n_unique': n_unique,\n",
    "            'n_missing': n_missing,\n",
    "            'missing_rate': missing_rate,\n",
    "            'min_length': min_len,\n",
    "            'max_length': max_len,\n",
    "            'avg_length': avg_len,\n",
    "            'median_length': median_len,\n",
    "            'empty_count': empty_count,\n",
    "            'single_char_count': single_char_count,\n",
    "            'top_values': word_counts.head(10).to_dict()\n",
    "        }\n",
    "    \n",
    "    print(\"\\n\" + \"=\"*100)\n",
    "    \n",
    "    return text_info"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 8,
   "id": "f88ca0ca",
   "metadata": {},
   "outputs": [],
   "source": [
    "def analyze_data_quality(df, dataset_name=\"数据集\", primary_key='CUST_NO'):\n",
    "    \"\"\"\n",
    "    分析数据集的数据质量，包括缺失值、重复值、异常值等\n",
    "    \n",
    "    参数:\n",
    "    - df: pandas DataFrame，待分析的数据集\n",
    "    - dataset_name: str，数据集名称\n",
    "    - primary_key: str，主键字段名（默认为'CUST_NO'，将从分析中排除）\n",
    "    \n",
    "    返回:\n",
    "    - dict: 包含质量分析结果的字典\n",
    "    \"\"\"\n",
    "    print(\"=\"*80)\n",
    "    print(f\"🔍 【{dataset_name}】数据质量分析\")\n",
    "    print(\"=\"*80)\n",
    "    \n",
    "    n_rows = len(df)\n",
    "    \n",
    "    # 1. 缺失值分析\n",
    "    print(f\"\\n1️⃣  缺失值分析:\")\n",
    "    missing_stats = pd.DataFrame({\n",
    "        '缺失数': df.isnull().sum(),\n",
    "        '缺失率(%)': (df.isnull().sum() / n_rows * 100).round(2)\n",
    "    })\n",
    "    missing_stats = missing_stats[missing_stats['缺失数'] > 0].sort_values('缺失率(%)', ascending=False)\n",
    "    \n",
    "    if len(missing_stats) > 0:\n",
    "        print(f\"   ⚠️  发现 {len(missing_stats)} 个字段存在缺失值:\")\n",
    "        print(f\"\\n   {'字段名':<30} {'缺失数':<12} {'缺失率(%)':<12}\")\n",
    "        print(f\"   {'-'*60}\")\n",
    "        for col, row in missing_stats.iterrows():\n",
    "            print(f\"   {col:<30} {int(row['缺失数']):<12} {row['缺失率(%)']:<12.2f}\")\n",
    "    else:\n",
    "        print(f\"   ✅ 无缺失值\")\n",
    "    \n",
    "    # 2. 重复值分析\n",
    "    print(f\"\\n2️⃣  重复值分析:\")\n",
    "    n_duplicates = df.duplicated().sum()\n",
    "    duplicate_rate = n_duplicates / n_rows * 100\n",
    "    \n",
    "    if n_duplicates > 0:\n",
    "        print(f\"   ⚠️  重复行数: {n_duplicates:,} ({duplicate_rate:.2f}%)\")\n",
    "    else:\n",
    "        print(f\"   ✅ 无重复行\")\n",
    "    \n",
    "    # 3. 唯一值分析\n",
    "    print(f\"\\n3️⃣  唯一值分析:\")\n",
    "    print(f\"   {'字段名':<30} {'唯一值数':<15} {'唯一率(%)':<15} {'数据类型':<15}\")\n",
    "    print(f\"   {'-'*80}\")\n",
    "    \n",
    "    for col in df.columns:\n",
    "        unique_count = df[col].nunique()\n",
    "        unique_rate = unique_count / n_rows * 100\n",
    "        dtype = str(df[col].dtype)\n",
    "        \n",
    "        # 标记特殊情况\n",
    "        marker = \"\"\n",
    "        if unique_count == 1:\n",
    "            marker = \"⚠️ 常量\"\n",
    "        elif unique_count == n_rows:\n",
    "            marker = \"🔑 唯一标识\"\n",
    "        elif unique_rate < 1:\n",
    "            marker = \"📊 低基数\"\n",
    "        \n",
    "        print(f\"   {col:<30} {unique_count:<15} {unique_rate:<15.2f} {dtype:<15} {marker}\")\n",
    "    \n",
    "    # 4. 数值型字段异常值检测（使用IQR方法）\n",
    "    numeric_cols = df.select_dtypes(include=[np.number]).columns\n",
    "    \n",
    "    if len(numeric_cols) > 0:\n",
    "        print(f\"\\n4️⃣  数值型字段异常值检测（IQR法）:\")\n",
    "        print(f\"   {'字段名':<30} {'异常值数':<15} {'异常率(%)':<15}\")\n",
    "        print(f\"   {'-'*65}\")\n",
    "        \n",
    "        outlier_info = {}\n",
    "        for col in numeric_cols:\n",
    "            Q1 = df[col].quantile(0.25)\n",
    "            Q3 = df[col].quantile(0.75)\n",
    "            IQR = Q3 - Q1\n",
    "            lower_bound = Q1 - 1.5 * IQR\n",
    "            upper_bound = Q3 + 1.5 * IQR\n",
    "            \n",
    "            outliers = ((df[col] < lower_bound) | (df[col] > upper_bound)).sum()\n",
    "            outlier_rate = outliers / n_rows * 100\n",
    "            \n",
    "            outlier_info[col] = {\n",
    "                'count': outliers,\n",
    "                'rate': outlier_rate,\n",
    "                'bounds': (lower_bound, upper_bound)\n",
    "            }\n",
    "            \n",
    "            if outliers > 0:\n",
    "                print(f\"   {col:<30} {outliers:<15} {outlier_rate:<15.2f}\")\n",
    "    else:\n",
    "        outlier_info = {}\n",
    "    \n",
    "    # 5. 零值和负值分析（针对数值型）\n",
    "    if len(numeric_cols) > 0:\n",
    "        print(f\"\\n5️⃣  零值和负值分析:\")\n",
    "        print(f\"   {'字段名':<30} {'零值数':<12} {'零值率(%)':<12} {'负值数':<12} {'负值率(%)':<12}\")\n",
    "        print(f\"   {'-'*90}\")\n",
    "        \n",
    "        for col in numeric_cols:\n",
    "            zero_count = (df[col] == 0).sum()\n",
    "            zero_rate = zero_count / n_rows * 100\n",
    "            negative_count = (df[col] < 0).sum()\n",
    "            negative_rate = negative_count / n_rows * 100\n",
    "            \n",
    "            if zero_count > 0 or negative_count > 0:\n",
    "                print(f\"   {col:<30} {zero_count:<12} {zero_rate:<12.2f} {negative_count:<12} {negative_rate:<12.2f}\")\n",
    "    \n",
    "    print(\"\\n\" + \"=\"*80)\n",
    "    \n",
    "    return {\n",
    "        'missing_stats': missing_stats.to_dict() if len(missing_stats) > 0 else {},\n",
    "        'n_duplicates': n_duplicates,\n",
    "        'duplicate_rate': duplicate_rate,\n",
    "        'outlier_info': outlier_info\n",
    "    }"
   ]
  },
  {
   "cell_type": "markdown",
   "id": "24fe52fe",
   "metadata": {},
   "source": [
    "## 3. 统计特征分析"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 9,
   "id": "2c9b16a8",
   "metadata": {},
   "outputs": [],
   "source": [
    "def analyze_numeric_features(df, dataset_name=\"数据集\"):\n",
    "    \"\"\"\n",
    "    分析数值型特征的统计特性\n",
    "    \n",
    "    参数:\n",
    "    - df: pandas DataFrame，待分析的数据集\n",
    "    - dataset_name: str，数据集名称\n",
    "    \n",
    "    返回:\n",
    "    - DataFrame: 数值型特征统计信息\n",
    "    \"\"\"\n",
    "    numeric_cols = df.select_dtypes(include=[np.number]).columns\n",
    "    \n",
    "    if len(numeric_cols) == 0:\n",
    "        print(f\"⚠️  【{dataset_name}】无数值型字段\")\n",
    "        return None\n",
    "    \n",
    "    print(\"=\"*100)\n",
    "    print(f\"📈 【{dataset_name}】数值型特征统计分析 ({len(numeric_cols)} 个字段)\")\n",
    "    print(\"=\"*100)\n",
    "    \n",
    "    # 基础统计信息\n",
    "    stats_df = df[numeric_cols].describe().T\n",
    "    \n",
    "    # 添加额外统计指标\n",
    "    stats_df['缺失数'] = df[numeric_cols].isnull().sum()\n",
    "    stats_df['缺失率(%)'] = (stats_df['缺失数'] / len(df) * 100).round(2)\n",
    "    stats_df['零值数'] = (df[numeric_cols] == 0).sum()\n",
    "    stats_df['零值率(%)'] = (stats_df['零值数'] / len(df) * 100).round(2)\n",
    "    stats_df['偏度'] = df[numeric_cols].skew().round(2)\n",
    "    stats_df['峰度'] = df[numeric_cols].kurtosis().round(2)\n",
    "    \n",
    "    # 重新排列列顺序\n",
    "    cols_order = ['count', 'mean', 'std', 'min', '25%', '50%', '75%', 'max', \n",
    "                  '缺失数', '缺失率(%)', '零值数', '零值率(%)', '偏度', '峰度']\n",
    "    stats_df = stats_df[cols_order]\n",
    "    \n",
    "    # 格式化输出\n",
    "    print(\"\\n📊 详细统计信息:\")\n",
    "    print(stats_df.to_string())\n",
    "    \n",
    "    # 分布特征总结\n",
    "    print(f\"\\n📌 分布特征总结:\")\n",
    "    print(f\"   {'字段名':<30} {'分布特征':<40} {'建议':<30}\")\n",
    "    print(f\"   {'-'*105}\")\n",
    "    \n",
    "    for col in numeric_cols:\n",
    "        skewness = df[col].skew()\n",
    "        kurtosis_val = df[col].kurtosis()\n",
    "        \n",
    "        # 判断分布特征\n",
    "        if abs(skewness) < 0.5:\n",
    "            dist_type = \"✅ 近似正态分布\"\n",
    "            suggestion = \"无需转换\"\n",
    "        elif skewness > 0.5:\n",
    "            dist_type = f\"⚠️  右偏分布 (偏度={skewness:.2f})\"\n",
    "            suggestion = \"考虑对数/Box-Cox转换\"\n",
    "        else:\n",
    "            dist_type = f\"⚠️  左偏分布 (偏度={skewness:.2f})\"\n",
    "            suggestion = \"考虑平方/指数转换\"\n",
    "        \n",
    "        print(f\"   {col:<30} {dist_type:<40} {suggestion:<30}\")\n",
    "    \n",
    "    print(\"\\n\" + \"=\"*100)\n",
    "    \n",
    "    return stats_df\n",
    "\n",
    "\n",
    "def analyze_categorical_features(df, dataset_name=\"数据集\", top_n=10):\n",
    "    \"\"\"\n",
    "    分析类别型特征的分布特性\n",
    "    \n",
    "    参数:\n",
    "    - df: pandas DataFrame，待分析的数据集\n",
    "    - dataset_name: str，数据集名称\n",
    "    - top_n: int，显示频次最高的类别数量\n",
    "    \n",
    "    返回:\n",
    "    - dict: 类别型特征分析结果\n",
    "    \"\"\"\n",
    "    categorical_cols = df.select_dtypes(include=['object', 'category']).columns\n",
    "    \n",
    "    if len(categorical_cols) == 0:\n",
    "        print(f\"⚠️  【{dataset_name}】无类别型字段\")\n",
    "        return None\n",
    "    \n",
    "    print(\"=\"*100)\n",
    "    print(f\"📊 【{dataset_name}】类别型特征分析 ({len(categorical_cols)} 个字段)\")\n",
    "    print(\"=\"*100)\n",
    "    \n",
    "    categorical_info = {}\n",
    "    \n",
    "    for col in categorical_cols:\n",
    "        print(f\"\\n🔹 字段: {col}\")\n",
    "        print(f\"   {'-'*90}\")\n",
    "        \n",
    "        # 基础统计\n",
    "        n_unique = df[col].nunique()\n",
    "        n_missing = df[col].isnull().sum()\n",
    "        missing_rate = n_missing / len(df) * 100\n",
    "        \n",
    "        print(f\"   唯一值数: {n_unique:,} | 缺失数: {n_missing:,} ({missing_rate:.2f}%)\")\n",
    "        \n",
    "        # 频次统计\n",
    "        value_counts = df[col].value_counts()\n",
    "        \n",
    "        print(f\"\\n   Top {min(top_n, len(value_counts))} 高频类别:\")\n",
    "        print(f\"   {'类别':<40} {'频次':<15} {'占比(%)':<15}\")\n",
    "        print(f\"   {'-'*75}\")\n",
    "        \n",
    "        for i, (val, count) in enumerate(value_counts.head(top_n).items()):\n",
    "            ratio = count / len(df) * 100\n",
    "            val_str = str(val)[:37] + \"...\" if len(str(val)) > 40 else str(val)\n",
    "            print(f\"   {val_str:<40} {count:<15,} {ratio:<15.2f}\")\n",
    "        \n",
    "        # 低频类别统计\n",
    "        low_freq_threshold = len(df) * 0.001  # 0.1%阈值\n",
    "        low_freq_count = (value_counts < low_freq_threshold).sum()\n",
    "        \n",
    "        if low_freq_count > 0:\n",
    "            print(f\"\\n   ⚠️  低频类别数（<0.1%）: {low_freq_count}\")\n",
    "        \n",
    "        categorical_info[col] = {\n",
    "            'n_unique': n_unique,\n",
    "            'n_missing': n_missing,\n",
    "            'missing_rate': missing_rate,\n",
    "            'top_values': value_counts.head(top_n).to_dict(),\n",
    "            'low_freq_count': low_freq_count\n",
    "        }\n",
    "    \n",
    "    print(\"\\n\" + \"=\"*100)\n",
    "    \n",
    "    return categorical_info"
   ]
  },
  {
   "cell_type": "markdown",
   "id": "5680a651",
   "metadata": {},
   "source": [
    "## 4. 数据清洗操作"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 10,
   "id": "ecb16ca0",
   "metadata": {},
   "outputs": [],
   "source": [
    "def handle_missing_values(df, numeric_strategy='median', categorical_strategy='mode', \n",
    "                         fill_value=None, dataset_name=\"数据集\", inplace=False):\n",
    "    \"\"\"\n",
    "    处理缺失值的通用方法\n",
    "    \n",
    "    参数:\n",
    "    - df: pandas DataFrame，待处理的数据集\n",
    "    - numeric_strategy: str，数值型缺失值处理策略 ['mean', 'median', 'mode', 'zero', 'forward', 'backward', 'custom']\n",
    "    - categorical_strategy: str，类别型缺失值处理策略 ['mode', 'unknown', 'forward', 'backward', 'custom']\n",
    "    - fill_value: 自定义填充值（当strategy='custom'时使用）\n",
    "    - dataset_name: str，数据集名称\n",
    "    - inplace: bool，是否原地修改\n",
    "    \n",
    "    返回:\n",
    "    - DataFrame: 处理后的数据集（如果inplace=False）\n",
    "    \"\"\"\n",
    "    if not inplace:\n",
    "        df = df.copy()\n",
    "    \n",
    "    print(\"=\"*80)\n",
    "    print(f\"🔧 【{dataset_name}】缺失值处理\")\n",
    "    print(\"=\"*80)\n",
    "    \n",
    "    # 记录处理前的缺失情况\n",
    "    missing_before = df.isnull().sum().sum()\n",
    "    \n",
    "    if missing_before == 0:\n",
    "        print(\"✅ 数据集无缺失值，无需处理\")\n",
    "        return df if not inplace else None\n",
    "    \n",
    "    print(f\"\\n处理前缺失值总数: {missing_before:,}\")\n",
    "    \n",
    "    # 数值型字段处理\n",
    "    numeric_cols = df.select_dtypes(include=[np.number]).columns\n",
    "    numeric_missing = df[numeric_cols].isnull().sum()\n",
    "    numeric_missing_cols = numeric_missing[numeric_missing > 0].index\n",
    "    \n",
    "    if len(numeric_missing_cols) > 0:\n",
    "        print(f\"\\n📊 数值型字段处理 (策略: {numeric_strategy}):\")\n",
    "        for col in numeric_missing_cols:\n",
    "            missing_count = df[col].isnull().sum()\n",
    "            \n",
    "            if numeric_strategy == 'mean':\n",
    "                fill_val = df[col].mean()\n",
    "                df[col].fillna(fill_val, inplace=True)\n",
    "            elif numeric_strategy == 'median':\n",
    "                fill_val = df[col].median()\n",
    "                df[col].fillna(fill_val, inplace=True)\n",
    "            elif numeric_strategy == 'mode':\n",
    "                fill_val = df[col].mode()[0] if not df[col].mode().empty else 0\n",
    "                df[col].fillna(fill_val, inplace=True)\n",
    "            elif numeric_strategy == 'zero':\n",
    "                df[col].fillna(0, inplace=True)\n",
    "                fill_val = 0\n",
    "            elif numeric_strategy == 'forward':\n",
    "                df[col].fillna(method='ffill', inplace=True)\n",
    "                fill_val = \"前向填充\"\n",
    "            elif numeric_strategy == 'backward':\n",
    "                df[col].fillna(method='bfill', inplace=True)\n",
    "                fill_val = \"后向填充\"\n",
    "            elif numeric_strategy == 'custom' and fill_value is not None:\n",
    "                df[col].fillna(fill_value, inplace=True)\n",
    "                fill_val = fill_value\n",
    "            \n",
    "            if numeric_strategy not in ['forward', 'backward']:\n",
    "                print(f\"   ├─ {col}: {missing_count} 个缺失值 → 填充值: {fill_val:.2f if isinstance(fill_val, float) else fill_val}\")\n",
    "            else:\n",
    "                print(f\"   ├─ {col}: {missing_count} 个缺失值 → {fill_val}\")\n",
    "    \n",
    "    # 类别型字段处理\n",
    "    categorical_cols = df.select_dtypes(include=['object', 'category']).columns\n",
    "    categorical_missing = df[categorical_cols].isnull().sum()\n",
    "    categorical_missing_cols = categorical_missing[categorical_missing > 0].index\n",
    "    \n",
    "    if len(categorical_missing_cols) > 0:\n",
    "        print(f\"\\n📝 类别型字段处理 (策略: {categorical_strategy}):\")\n",
    "        for col in categorical_missing_cols:\n",
    "            missing_count = df[col].isnull().sum()\n",
    "            \n",
    "            if categorical_strategy == 'mode':\n",
    "                fill_val = df[col].mode()[0] if not df[col].mode().empty else 'unknown'\n",
    "                df[col].fillna(fill_val, inplace=True)\n",
    "            elif categorical_strategy == 'unknown':\n",
    "                df[col].fillna('unknown', inplace=True)\n",
    "                fill_val = 'unknown'\n",
    "            elif categorical_strategy == 'forward':\n",
    "                df[col].fillna(method='ffill', inplace=True)\n",
    "                fill_val = \"前向填充\"\n",
    "            elif categorical_strategy == 'backward':\n",
    "                df[col].fillna(method='bfill', inplace=True)\n",
    "                fill_val = \"后向填充\"\n",
    "            elif categorical_strategy == 'custom' and fill_value is not None:\n",
    "                df[col].fillna(fill_value, inplace=True)\n",
    "                fill_val = fill_value\n",
    "            \n",
    "            print(f\"   ├─ {col}: {missing_count} 个缺失值 → 填充值: {fill_val}\")\n",
    "    \n",
    "    # 记录处理后的缺失情况\n",
    "    missing_after = df.isnull().sum().sum()\n",
    "    print(f\"\\n处理后缺失值总数: {missing_after:,}\")\n",
    "    print(f\"✅ 成功处理 {missing_before - missing_after:,} 个缺失值\")\n",
    "    \n",
    "    print(\"=\"*80)\n",
    "    \n",
    "    return df if not inplace else None\n",
    "\n",
    "\n",
    "def remove_duplicates(df, subset=None, keep='first', dataset_name=\"数据集\", inplace=False):\n",
    "    \"\"\"\n",
    "    删除重复行\n",
    "    \n",
    "    参数:\n",
    "    - df: pandas DataFrame，待处理的数据集\n",
    "    - subset: list，用于判断重复的列（None表示所有列）\n",
    "    - keep: str，保留策略 ['first', 'last', False]\n",
    "    - dataset_name: str，数据集名称\n",
    "    - inplace: bool，是否原地修改\n",
    "    \n",
    "    返回:\n",
    "    - DataFrame: 处理后的数据集（如果inplace=False）\n",
    "    \"\"\"\n",
    "    if not inplace:\n",
    "        df = df.copy()\n",
    "    \n",
    "    print(\"=\"*80)\n",
    "    print(f\"🔧 【{dataset_name}】重复值处理\")\n",
    "    print(\"=\"*80)\n",
    "    \n",
    "    n_before = len(df)\n",
    "    n_duplicates = df.duplicated(subset=subset, keep=keep).sum()\n",
    "    \n",
    "    if n_duplicates == 0:\n",
    "        print(\"✅ 数据集无重复行\")\n",
    "    else:\n",
    "        print(f\"\\n发现重复行: {n_duplicates:,} ({n_duplicates/n_before*100:.2f}%)\")\n",
    "        df.drop_duplicates(subset=subset, keep=keep, inplace=True)\n",
    "        n_after = len(df)\n",
    "        print(f\"删除后行数: {n_before:,} → {n_after:,}\")\n",
    "        print(f\"✅ 成功删除 {n_before - n_after:,} 行重复数据\")\n",
    "    \n",
    "    print(\"=\"*80)\n",
    "    \n",
    "    return df if not inplace else None\n",
    "\n",
    "\n",
    "def handle_outliers(df, columns=None, method='iqr', threshold=1.5, strategy='cap', \n",
    "                   dataset_name=\"数据集\", inplace=False):\n",
    "    \"\"\"\n",
    "    处理异常值\n",
    "    \n",
    "    参数:\n",
    "    - df: pandas DataFrame，待处理的数据集\n",
    "    - columns: list，需要处理异常值的列（None表示所有数值型列）\n",
    "    - method: str，异常值检测方法 ['iqr', 'zscore']\n",
    "    - threshold: float，阈值（IQR法默认1.5，Z-score法默认3）\n",
    "    - strategy: str，处理策略 ['cap'(截断), 'remove'(删除), 'nan'(设为缺失)]\n",
    "    - dataset_name: str，数据集名称\n",
    "    - inplace: bool，是否原地修改\n",
    "    \n",
    "    返回:\n",
    "    - DataFrame: 处理后的数据集（如果inplace=False）\n",
    "    \"\"\"\n",
    "    if not inplace:\n",
    "        df = df.copy()\n",
    "    \n",
    "    print(\"=\"*80)\n",
    "    print(f\"🔧 【{dataset_name}】异常值处理\")\n",
    "    print(\"=\"*80)\n",
    "    \n",
    "    if columns is None:\n",
    "        columns = df.select_dtypes(include=[np.number]).columns.tolist()\n",
    "    \n",
    "    print(f\"\\n检测方法: {method.upper()} | 阈值: {threshold} | 处理策略: {strategy}\")\n",
    "    print(f\"\\n{'字段名':<30} {'异常值数':<15} {'处理方式':<30}\")\n",
    "    print(\"-\"*80)\n",
    "    \n",
    "    total_outliers = 0\n",
    "    \n",
    "    for col in columns:\n",
    "        if col not in df.columns:\n",
    "            continue\n",
    "            \n",
    "        # 检测异常值\n",
    "        if method == 'iqr':\n",
    "            Q1 = df[col].quantile(0.25)\n",
    "            Q3 = df[col].quantile(0.75)\n",
    "            IQR = Q3 - Q1\n",
    "            lower_bound = Q1 - threshold * IQR\n",
    "            upper_bound = Q3 + threshold * IQR\n",
    "            outlier_mask = (df[col] < lower_bound) | (df[col] > upper_bound)\n",
    "        elif method == 'zscore':\n",
    "            z_scores = np.abs(stats.zscore(df[col].dropna()))\n",
    "            outlier_mask = z_scores > threshold\n",
    "        else:\n",
    "            print(f\"⚠️  未知的检测方法: {method}\")\n",
    "            continue\n",
    "        \n",
    "        n_outliers = outlier_mask.sum()\n",
    "        total_outliers += n_outliers\n",
    "        \n",
    "        if n_outliers > 0:\n",
    "            # 处理异常值\n",
    "            if strategy == 'cap':\n",
    "                if method == 'iqr':\n",
    "                    df.loc[df[col] < lower_bound, col] = lower_bound\n",
    "                    df.loc[df[col] > upper_bound, col] = upper_bound\n",
    "                    action = f\"截断至 [{lower_bound:.2f}, {upper_bound:.2f}]\"\n",
    "                else:\n",
    "                    action = \"Z-score截断\"\n",
    "            elif strategy == 'remove':\n",
    "                df = df[~outlier_mask]\n",
    "                action = \"删除异常行\"\n",
    "            elif strategy == 'nan':\n",
    "                df.loc[outlier_mask, col] = np.nan\n",
    "                action = \"设为缺失值\"\n",
    "            \n",
    "            print(f\"{col:<30} {n_outliers:<15} {action:<30}\")\n",
    "    \n",
    "    if total_outliers == 0:\n",
    "        print(\"✅ 未检测到异常值\")\n",
    "    else:\n",
    "        print(f\"\\n✅ 共处理 {total_outliers:,} 个异常值\")\n",
    "    \n",
    "    print(\"=\"*80)\n",
    "    \n",
    "    return df if not inplace else None\n",
    "\n",
    "\n",
    "def optimize_dtypes(df, dataset_name=\"数据集\", inplace=False):\n",
    "    \"\"\"\n",
    "    优化数据类型以减少内存占用\n",
    "    \n",
    "    参数:\n",
    "    - df: pandas DataFrame，待优化的数据集\n",
    "    - dataset_name: str，数据集名称\n",
    "    - inplace: bool，是否原地修改\n",
    "    \n",
    "    返回:\n",
    "    - DataFrame: 优化后的数据集（如果inplace=False）\n",
    "    \"\"\"\n",
    "    if not inplace:\n",
    "        df = df.copy()\n",
    "    \n",
    "    print(\"=\"*80)\n",
    "    print(f\"⚡ 【{dataset_name}】数据类型优化\")\n",
    "    print(\"=\"*80)\n",
    "    \n",
    "    memory_before = df.memory_usage(deep=True).sum() / 1024 / 1024\n",
    "    print(f\"\\n优化前内存占用: {memory_before:.2f} MB\")\n",
    "    \n",
    "    # 整型优化\n",
    "    int_cols = df.select_dtypes(include=['int64']).columns\n",
    "    for col in int_cols:\n",
    "        col_min = df[col].min()\n",
    "        col_max = df[col].max()\n",
    "        \n",
    "        if col_min >= 0:\n",
    "            if col_max <= 255:\n",
    "                df[col] = df[col].astype('uint8')\n",
    "            elif col_max <= 65535:\n",
    "                df[col] = df[col].astype('uint16')\n",
    "            elif col_max <= 4294967295:\n",
    "                df[col] = df[col].astype('uint32')\n",
    "        else:\n",
    "            if col_min >= -128 and col_max <= 127:\n",
    "                df[col] = df[col].astype('int8')\n",
    "            elif col_min >= -32768 and col_max <= 32767:\n",
    "                df[col] = df[col].astype('int16')\n",
    "            elif col_min >= -2147483648 and col_max <= 2147483647:\n",
    "                df[col] = df[col].astype('int32')\n",
    "    \n",
    "    # 浮点型优化\n",
    "    float_cols = df.select_dtypes(include=['float64']).columns\n",
    "    for col in float_cols:\n",
    "        df[col] = df[col].astype('float32')\n",
    "    \n",
    "    # 类别型优化（对于唯一值较少的object类型）\n",
    "    object_cols = df.select_dtypes(include=['object']).columns\n",
    "    for col in object_cols:\n",
    "        num_unique = df[col].nunique()\n",
    "        num_total = len(df)\n",
    "        if num_unique / num_total < 0.5:  # 唯一值比例小于50%\n",
    "            df[col] = df[col].astype('category')\n",
    "    \n",
    "    memory_after = df.memory_usage(deep=True).sum() / 1024 / 1024\n",
    "    memory_saved = memory_before - memory_after\n",
    "    memory_saved_pct = memory_saved / memory_before * 100\n",
    "    \n",
    "    print(f\"优化后内存占用: {memory_after:.2f} MB\")\n",
    "    print(f\"✅ 节省内存: {memory_saved:.2f} MB ({memory_saved_pct:.1f}%)\")\n",
    "    \n",
    "    print(\"=\"*80)\n",
    "    \n",
    "    return df if not inplace else None"
   ]
  },
  {
   "cell_type": "markdown",
   "id": "253e82bf",
   "metadata": {},
   "source": [
    "## 5. 可视化分析"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 11,
   "id": "17ede510",
   "metadata": {},
   "outputs": [],
   "source": [
    "def visualize_numeric_distribution(df, columns=None, ncols=3, figsize=(15, 4), dataset_name=\"数据集\"):\n",
    "    \"\"\"\n",
    "    可视化数值型特征的分布\n",
    "    \n",
    "    参数:\n",
    "    - df: pandas DataFrame，待可视化的数据集\n",
    "    - columns: list，需要可视化的列（None表示所有数值型列）\n",
    "    - ncols: int，每行显示的子图数量\n",
    "    - figsize: tuple，单个子图的大小\n",
    "    - dataset_name: str，数据集名称\n",
    "    \"\"\"\n",
    "    if columns is None:\n",
    "        columns = df.select_dtypes(include=[np.number]).columns.tolist()\n",
    "    \n",
    "    if len(columns) == 0:\n",
    "        print(f\"⚠️  【{dataset_name}】无数值型字段可视化\")\n",
    "        return\n",
    "    \n",
    "    n_features = len(columns)\n",
    "    nrows = (n_features + ncols - 1) // ncols\n",
    "    \n",
    "    fig, axes = plt.subplots(nrows, ncols, figsize=(figsize[0], figsize[1] * nrows))\n",
    "    axes = axes.flatten() if nrows > 1 or ncols > 1 else [axes]\n",
    "    \n",
    "    print(f\"📊 【{dataset_name}】数值型特征分布可视化 ({n_features} 个字段)\")\n",
    "    \n",
    "    for idx, col in enumerate(columns):\n",
    "        ax = axes[idx]\n",
    "        \n",
    "        # 绘制直方图和KDE\n",
    "        data = df[col].dropna()\n",
    "        ax.hist(data, bins=50, alpha=0.6, color='skyblue', edgecolor='black', density=True)\n",
    "        \n",
    "        # 添加KDE曲线\n",
    "        if len(data) > 1:\n",
    "            data.plot(kind='kde', ax=ax, color='red', linewidth=2)\n",
    "        \n",
    "        # 添加统计信息\n",
    "        mean_val = data.mean()\n",
    "        median_val = data.median()\n",
    "        ax.axvline(mean_val, color='green', linestyle='--', linewidth=1.5, label=f'均值: {mean_val:.2f}')\n",
    "        ax.axvline(median_val, color='orange', linestyle='--', linewidth=1.5, label=f'中位数: {median_val:.2f}')\n",
    "        \n",
    "        ax.set_title(f'{col}\\n偏度: {data.skew():.2f} | 峰度: {data.kurtosis():.2f}', fontsize=10)\n",
    "        ax.set_xlabel('')\n",
    "        ax.set_ylabel('密度')\n",
    "        ax.legend(fontsize=8)\n",
    "        ax.grid(True, alpha=0.3)\n",
    "    \n",
    "    # 隐藏多余的子图\n",
    "    for idx in range(n_features, len(axes)):\n",
    "        axes[idx].axis('off')\n",
    "    \n",
    "    plt.tight_layout()\n",
    "    plt.show()\n",
    "\n",
    "\n",
    "def visualize_boxplot(df, columns=None, ncols=3, figsize=(15, 4), dataset_name=\"数据集\"):\n",
    "    \"\"\"\n",
    "    绘制箱线图检测异常值\n",
    "    \n",
    "    参数:\n",
    "    - df: pandas DataFrame，待可视化的数据集\n",
    "    - columns: list，需要可视化的列（None表示所有数值型列）\n",
    "    - ncols: int，每行显示的子图数量\n",
    "    - figsize: tuple，单个子图的大小\n",
    "    - dataset_name: str，数据集名称\n",
    "    \"\"\"\n",
    "    if columns is None:\n",
    "        columns = df.select_dtypes(include=[np.number]).columns.tolist()\n",
    "    \n",
    "    if len(columns) == 0:\n",
    "        print(f\"⚠️  【{dataset_name}】无数值型字段可视化\")\n",
    "        return\n",
    "    \n",
    "    n_features = len(columns)\n",
    "    nrows = (n_features + ncols - 1) // ncols\n",
    "    \n",
    "    fig, axes = plt.subplots(nrows, ncols, figsize=(figsize[0], figsize[1] * nrows))\n",
    "    axes = axes.flatten() if nrows > 1 or ncols > 1 else [axes]\n",
    "    \n",
    "    print(f\"📦 【{dataset_name}】箱线图异常值检测 ({n_features} 个字段)\")\n",
    "    \n",
    "    for idx, col in enumerate(columns):\n",
    "        ax = axes[idx]\n",
    "        \n",
    "        # 绘制箱线图\n",
    "        data = df[col].dropna()\n",
    "        bp = ax.boxplot(data, vert=True, patch_artist=True)\n",
    "        \n",
    "        # 美化箱线图\n",
    "        bp['boxes'][0].set_facecolor('lightblue')\n",
    "        bp['boxes'][0].set_alpha(0.7)\n",
    "        bp['medians'][0].set_color('red')\n",
    "        bp['medians'][0].set_linewidth(2)\n",
    "        \n",
    "        # 计算异常值数量\n",
    "        Q1 = data.quantile(0.25)\n",
    "        Q3 = data.quantile(0.75)\n",
    "        IQR = Q3 - Q1\n",
    "        lower_bound = Q1 - 1.5 * IQR\n",
    "        upper_bound = Q3 + 1.5 * IQR\n",
    "        n_outliers = ((data < lower_bound) | (data > upper_bound)).sum()\n",
    "        outlier_pct = n_outliers / len(data) * 100\n",
    "        \n",
    "        ax.set_title(f'{col}\\n异常值: {n_outliers} ({outlier_pct:.1f}%)', fontsize=10)\n",
    "        ax.set_ylabel('值')\n",
    "        ax.grid(True, alpha=0.3, axis='y')\n",
    "    \n",
    "    # 隐藏多余的子图\n",
    "    for idx in range(n_features, len(axes)):\n",
    "        axes[idx].axis('off')\n",
    "    \n",
    "    plt.tight_layout()\n",
    "    plt.show()\n",
    "\n",
    "\n",
    "def visualize_correlation_matrix(df, method='pearson', figsize=(12, 10), dataset_name=\"数据集\"):\n",
    "    \"\"\"\n",
    "    绘制相关性热图\n",
    "    \n",
    "    参数:\n",
    "    - df: pandas DataFrame，待可视化的数据集\n",
    "    - method: str，相关性计算方法 ['pearson', 'spearman', 'kendall']\n",
    "    - figsize: tuple，图形大小\n",
    "    - dataset_name: str，数据集名称\n",
    "    \"\"\"\n",
    "    numeric_cols = df.select_dtypes(include=[np.number]).columns\n",
    "    \n",
    "    if len(numeric_cols) < 2:\n",
    "        print(f\"⚠️  【{dataset_name}】数值型字段少于2个，无法计算相关性\")\n",
    "        return\n",
    "    \n",
    "    print(f\"🔥 【{dataset_name}】特征相关性热图 (方法: {method})\")\n",
    "    \n",
    "    # 计算相关性矩阵\n",
    "    corr_matrix = df[numeric_cols].corr(method=method)\n",
    "    \n",
    "    # 绘制热图\n",
    "    plt.figure(figsize=figsize)\n",
    "    \n",
    "    import seaborn as sns\n",
    "    mask = np.triu(np.ones_like(corr_matrix, dtype=bool))  # 只显示下三角\n",
    "    \n",
    "    sns.heatmap(corr_matrix, mask=mask, annot=True, fmt='.2f', \n",
    "                cmap='coolwarm', center=0, square=True, linewidths=0.5,\n",
    "                cbar_kws={\"shrink\": 0.8}, vmin=-1, vmax=1)\n",
    "    \n",
    "    plt.title(f'{dataset_name} - 特征相关性矩阵 ({method.upper()})', fontsize=14, pad=20)\n",
    "    plt.tight_layout()\n",
    "    plt.show()\n",
    "    \n",
    "    # 输出高相关性特征对\n",
    "    print(f\"\\n🔍 高相关性特征对 (|相关系数| > 0.7):\")\n",
    "    print(f\"   {'特征1':<30} {'特征2':<30} {'相关系数':<15}\")\n",
    "    print(f\"   {'-'*80}\")\n",
    "    \n",
    "    high_corr_pairs = []\n",
    "    for i in range(len(corr_matrix.columns)):\n",
    "        for j in range(i+1, len(corr_matrix.columns)):\n",
    "            corr_val = corr_matrix.iloc[i, j]\n",
    "            if abs(corr_val) > 0.7:\n",
    "                feat1 = corr_matrix.columns[i]\n",
    "                feat2 = corr_matrix.columns[j]\n",
    "                high_corr_pairs.append((feat1, feat2, corr_val))\n",
    "                print(f\"   {feat1:<30} {feat2:<30} {corr_val:<15.3f}\")\n",
    "    \n",
    "    if len(high_corr_pairs) == 0:\n",
    "        print(\"   ✅ 无高相关性特征对\")\n",
    "\n",
    "\n",
    "def visualize_categorical_distribution(df, columns=None, top_n=10, ncols=2, \n",
    "                                       figsize=(15, 5), dataset_name=\"数据集\"):\n",
    "    \"\"\"\n",
    "    可视化类别型特征的分布\n",
    "    \n",
    "    参数:\n",
    "    - df: pandas DataFrame，待可视化的数据集\n",
    "    - columns: list，需要可视化的列（None表示所有类别型列）\n",
    "    - top_n: int，显示前N个高频类别\n",
    "    - ncols: int，每行显示的子图数量\n",
    "    - figsize: tuple，单个子图的大小\n",
    "    - dataset_name: str，数据集名称\n",
    "    \"\"\"\n",
    "    if columns is None:\n",
    "        columns = df.select_dtypes(include=['object', 'category']).columns.tolist()\n",
    "    \n",
    "    if len(columns) == 0:\n",
    "        print(f\"⚠️  【{dataset_name}】无类别型字段可视化\")\n",
    "        return\n",
    "    \n",
    "    n_features = len(columns)\n",
    "    nrows = (n_features + ncols - 1) // ncols\n",
    "    \n",
    "    fig, axes = plt.subplots(nrows, ncols, figsize=(figsize[0], figsize[1] * nrows))\n",
    "    axes = axes.flatten() if nrows > 1 or ncols > 1 else [axes]\n",
    "    \n",
    "    print(f\"📊 【{dataset_name}】类别型特征分布可视化 ({n_features} 个字段)\")\n",
    "    \n",
    "    for idx, col in enumerate(columns):\n",
    "        ax = axes[idx]\n",
    "        \n",
    "        # 统计频次\n",
    "        value_counts = df[col].value_counts().head(top_n)\n",
    "        \n",
    "        # 绘制条形图\n",
    "        value_counts.plot(kind='barh', ax=ax, color='skyblue', edgecolor='black')\n",
    "        \n",
    "        ax.set_title(f'{col}\\n唯一值数: {df[col].nunique()}', fontsize=10)\n",
    "        ax.set_xlabel('频次')\n",
    "        ax.set_ylabel('')\n",
    "        ax.grid(True, alpha=0.3, axis='x')\n",
    "        \n",
    "        # 在条形上添加数值\n",
    "        for i, (val, count) in enumerate(value_counts.items()):\n",
    "            ax.text(count, i, f' {count:,}', va='center', fontsize=8)\n",
    "    \n",
    "    # 隐藏多余的子图\n",
    "    for idx in range(n_features, len(axes)):\n",
    "        axes[idx].axis('off')\n",
    "    \n",
    "    plt.tight_layout()\n",
    "    plt.show()"
   ]
  },
  {
   "cell_type": "markdown",
   "id": "505c6044",
   "metadata": {},
   "source": [
    "## 6. 完整数据探查流程（一键执行）"
   ]
  },
  {
   "cell_type": "markdown",
   "id": "01a4c455",
   "metadata": {},
   "source": [
    "### ✨ 优化内容总结\n",
    "\n",
    "本次优化主要包括：\n",
    "\n",
    "1. **🔑 主键字段自动识别与排除**\n",
    "   - 所有统计分析方法自动排除主键字段（默认CUST_NO）\n",
    "   - 避免主键干扰统计分析结果\n",
    "\n",
    "2. **📝 文本型字段识别**\n",
    "   - 新增`analyze_text_features()`方法专门分析文本型字段\n",
    "   - 自动区分类别型和文本型（唯一值比例>50%且平均长度>10判定为文本型）\n",
    "   - 提供文本长度统计、词频分析等\n",
    "\n",
    "3. **🎯 字段类型细分**\n",
    "   - 数值型：用于数值计算和统计\n",
    "   - 类别型：低唯一值比例的离散字段\n",
    "   - 文本型：高唯一值比例的长文本字段（如页面标题、摘要）\n",
    "   - 日期型：时间序列分析\n",
    "\n",
    "4. **📊 更准确的统计分析**\n",
    "   - 所有方法添加`primary_key`参数支持\n",
    "   - 自动识别并适当处理不同类型字段\n",
    "   - 优化的可视化展示"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 12,
   "id": "555ce883",
   "metadata": {},
   "outputs": [],
   "source": [
    "def comprehensive_data_exploration(df, dataset_name=\"数据集\", primary_key='CUST_NO', enable_visualization=True):\n",
    "    \"\"\"\n",
    "    完整的数据探查流程（一键执行所有分析）\n",
    "    \n",
    "    参数:\n",
    "    - df: pandas DataFrame，待探查的数据集\n",
    "    - dataset_name: str，数据集名称\n",
    "    - primary_key: str，主键字段名（默认为'CUST_NO'）\n",
    "    - enable_visualization: bool，是否启用可视化\n",
    "    \n",
    "    返回:\n",
    "    - dict: 包含所有分析结果的字典\n",
    "    \"\"\"\n",
    "    print(\"\\n\" + \"🚀\"*40)\n",
    "    print(f\"{'='*80}\")\n",
    "    print(f\"{'开始完整数据探查流程':^80}\")\n",
    "    print(f\"{'='*80}\")\n",
    "    print(\"🚀\"*40 + \"\\n\")\n",
    "    \n",
    "    results = {}\n",
    "    \n",
    "    # 1. 基础信息探查\n",
    "    print(\"\\n\" + \"►\"*40)\n",
    "    results['basic_info'] = explore_basic_info(df, dataset_name, primary_key)\n",
    "    \n",
    "    # 2. 数据质量分析\n",
    "    print(\"\\n\" + \"►\"*40)\n",
    "    results['quality'] = analyze_data_quality(df, dataset_name, primary_key)\n",
    "    \n",
    "    # 3. 数值型特征统计分析\n",
    "    print(\"\\n\" + \"►\"*40)\n",
    "    results['numeric_stats'] = analyze_numeric_features(df, dataset_name, primary_key)\n",
    "    \n",
    "    # 4. 类别型特征分析\n",
    "    print(\"\\n\" + \"►\"*40)\n",
    "    results['categorical_stats'] = analyze_categorical_features(df, dataset_name, primary_key)\n",
    "    \n",
    "    # 5. 文本型特征分析\n",
    "    print(\"\\n\" + \"►\"*40)\n",
    "    results['text_stats'] = analyze_text_features(df, dataset_name, primary_key)\n",
    "    \n",
    "    # 6. 可视化分析\n",
    "    if enable_visualization:\n",
    "        print(\"\\n\" + \"►\"*40)\n",
    "        print(\"📊 开始可视化分析...\")\n",
    "        \n",
    "        # 排除主键后的数值型字段\n",
    "        numeric_cols = [col for col in df.select_dtypes(include=[np.number]).columns if col != primary_key]\n",
    "        if len(numeric_cols) > 0:\n",
    "            # 数值型分布图\n",
    "            if len(numeric_cols) <= 9:  # 限制可视化字段数\n",
    "                visualize_numeric_distribution(df, numeric_cols[:9], dataset_name=dataset_name)\n",
    "                visualize_boxplot(df, numeric_cols[:9], dataset_name=dataset_name)\n",
    "            else:\n",
    "                print(f\"   ⚠️  数值型字段过多({len(numeric_cols)}个)，只显示前9个\")\n",
    "                visualize_numeric_distribution(df, numeric_cols[:9], dataset_name=dataset_name)\n",
    "                visualize_boxplot(df, numeric_cols[:9], dataset_name=dataset_name)\n",
    "            \n",
    "            # 相关性热图\n",
    "            if len(numeric_cols) >= 2 and len(numeric_cols) <= 20:\n",
    "                visualize_correlation_matrix(df[numeric_cols], dataset_name=dataset_name)\n",
    "            elif len(numeric_cols) > 20:\n",
    "                print(f\"   ⚠️  数值型字段过多({len(numeric_cols)}个)，相关性矩阵过大，跳过\")\n",
    "        \n",
    "        # 类别型字段（排除主键和文本型）\n",
    "        categorical_cols = []\n",
    "        text_cols = results['basic_info'].get('text_cols', [])\n",
    "        for col in df.select_dtypes(include=['object', 'category']).columns:\n",
    "            if col != primary_key and col not in text_cols:\n",
    "                categorical_cols.append(col)\n",
    "        \n",
    "        if len(categorical_cols) > 0:\n",
    "            # 类别型分布图\n",
    "            if len(categorical_cols) <= 6:\n",
    "                visualize_categorical_distribution(df, categorical_cols[:6], dataset_name=dataset_name)\n",
    "            else:\n",
    "                print(f\"   ⚠️  类别型字段过多({len(categorical_cols)}个)，只显示前6个\")\n",
    "                visualize_categorical_distribution(df, categorical_cols[:6], dataset_name=dataset_name)\n",
    "    \n",
    "    # 7. 生成探查报告摘要\n",
    "    print(\"\\n\" + \"=\"*80)\n",
    "    print(f\"📋 【{dataset_name}】数据探查报告摘要\")\n",
    "    print(\"=\"*80)\n",
    "    \n",
    "    print(f\"\\n✅ 数据规模: {results['basic_info']['shape'][0]:,} 行 × {results['basic_info']['shape'][1]} 列\")\n",
    "    print(f\"✅ 内存占用: {results['basic_info']['memory_mb']:.2f} MB\")\n",
    "    print(f\"✅ 主键字段: {primary_key}\")\n",
    "    print(f\"✅ 数值型字段: {len(results['basic_info']['numeric_cols'])} 个\")\n",
    "    print(f\"✅ 类别型字段: {len(results['basic_info']['categorical_cols'])} 个\")\n",
    "    print(f\"✅ 文本型字段: {len(results['basic_info']['text_cols'])} 个\")\n",
    "    print(f\"✅ 日期型字段: {len(results['basic_info']['datetime_cols'])} 个\")\n",
    "    \n",
    "    if results['quality']['n_duplicates'] > 0:\n",
    "        print(f\"⚠️  重复行: {results['quality']['n_duplicates']:,} ({results['quality']['duplicate_rate']:.2f}%)\")\n",
    "    else:\n",
    "        print(f\"✅ 无重复行\")\n",
    "    \n",
    "    if len(results['quality']['missing_stats']) > 0:\n",
    "        print(f\"⚠️  存在缺失值的字段: {len(results['quality']['missing_stats'])} 个\")\n",
    "    else:\n",
    "        print(f\"✅ 无缺失值\")\n",
    "    \n",
    "    if results['quality']['outlier_info']:\n",
    "        total_outliers = sum([info['count'] for info in results['quality']['outlier_info'].values()])\n",
    "        print(f\"⚠️  异常值总数: {total_outliers:,}\")\n",
    "    else:\n",
    "        print(f\"✅ 无明显异常值\")\n",
    "    \n",
    "    print(\"\\n\" + \"=\"*80)\n",
    "    print(\"🎉 数据探查流程完成！\")\n",
    "    print(\"=\"*80 + \"\\n\")\n",
    "    \n",
    "    return results"
   ]
  },
  {
   "cell_type": "markdown",
   "id": "e9c14502",
   "metadata": {},
   "source": [
    "---\n",
    "\n",
    "# 使用示例\n",
    "\n",
    "以下展示如何使用上述通用方法对单个数据集进行探查与清洗"
   ]
  },
  {
   "cell_type": "markdown",
   "id": "a8a32d05",
   "metadata": {},
   "source": [
    "## 示例1: 完整探查流程（推荐使用）"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "id": "83394140",
   "metadata": {},
   "outputs": [],
   "source": [
    "# 假设已经通过前面的load_data_from_directory函数加载了数据\n",
    "# 例如：A_nature_data, A_asset_data 等变量已经存在\n",
    "\n",
    "# 对自然属性信息表进行完整探查\n",
    "if 'A_nature_data' in globals():\n",
    "    nature_results = comprehensive_data_exploration(\n",
    "        A_nature_data, \n",
    "        dataset_name=\"自然属性信息表(NATURE)\",\n",
    "        primary_key='CUST_NO',  # 指定主键字段\n",
    "        enable_visualization=True\n",
    "    )\n",
    "\n",
    "# 对页面访问明细表进行探查（包含文本型字段）\n",
    "if 'A_pageview_dtl_data' in globals():\n",
    "    pageview_results = comprehensive_data_exploration(\n",
    "        A_pageview_dtl_data,\n",
    "        dataset_name=\"掌银页面访问明细表(MB_PAGEVIEW_DTL)\",\n",
    "        primary_key='CUST_NO',\n",
    "        enable_visualization=False  # 文本型字段较多，建议关闭自动可视化\n",
    "    )"
   ]
  },
  {
   "cell_type": "markdown",
   "id": "fcfda23b",
   "metadata": {},
   "source": [
    "## 示例2: 单独使用各个方法"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "id": "68a78b40",
   "metadata": {},
   "outputs": [],
   "source": [
    "# 如果只需要某个特定的分析，可以单独调用\n",
    "\n",
    "# 示例：只查看基础信息（会自动识别字段类型）\n",
    "if 'A_asset_data' in globals():\n",
    "    basic_info = explore_basic_info(A_asset_data, \"资产信息表(ASSET)\", primary_key='CUST_NO')\n",
    "\n",
    "# 示例：只进行数据质量分析\n",
    "if 'A_asset_data' in globals():\n",
    "    quality_report = analyze_data_quality(A_asset_data, \"资产信息表(ASSET)\", primary_key='CUST_NO')\n",
    "\n",
    "# 示例：只分析数值型特征（自动排除主键）\n",
    "if 'A_asset_data' in globals():\n",
    "    numeric_stats = analyze_numeric_features(A_asset_data, \"资产信息表(ASSET)\", primary_key='CUST_NO')\n",
    "\n",
    "# 示例：分析文本型字段（如页面访问表）\n",
    "if 'A_pageview_dtl_data' in globals():\n",
    "    text_stats = analyze_text_features(A_pageview_dtl_data, \"掌银页面访问明细表\", primary_key='CUST_NO')\n",
    "\n",
    "# 示例：只可视化分布\n",
    "if 'A_asset_data' in globals():\n",
    "    # 获取非主键的数值型字段\n",
    "    numeric_cols = [col for col in A_asset_data.select_dtypes(include=[np.number]).columns if col != 'CUST_NO']\n",
    "    visualize_numeric_distribution(A_asset_data, columns=numeric_cols[:9], dataset_name=\"资产信息表(ASSET)\")"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 13,
   "id": "243837ca",
   "metadata": {},
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "================================================================================\n",
      "📊 【页面访问明细表(MB_PAGEVIEW_DTL)】基础信息探查\n",
      "================================================================================\n",
      "\n",
      "1️⃣  数据形状:\n",
      "   ├─ 行数（样本数）: 345,313\n",
      "   └─ 列数（特征数）: 6\n",
      "\n",
      "2️⃣  数据类型分布:\n",
      "   ├─ object: 4 列 (66.7%)\n",
      "   ├─ int64: 1 列 (16.7%)\n",
      "   ├─ float64: 1 列 (16.7%)\n",
      "\n",
      "3️⃣  内存占用:\n",
      "   └─ 总内存: 122.51 MB\n",
      "\n",
      "4️⃣  列信息详情:\n",
      "   列名                             字段类型         数据类型            非空数        唯一值数       唯一率      内存(KB)    \n",
      "   --------------------------------------------------------------------------------------------------------------\n",
      "   APSDTRDAT                      📊数值型         int64           345313     91         0.03%    2697.88   \n",
      "   CUST_NO                        🔑主键          object          345313     5616       1.63%    30012.68  \n",
      "   APSDTRCOD                      🏷️类别型        object          345313     154        0.04%    30012.68  \n",
      "   APSDTRAMT                      📊数值型         float64         345313     48947      14.17%   2697.88   \n",
      "   APSDABS                        🏷️类别型        object          345313     1527       0.44%    30012.68  \n",
      "   APSDTRCHL                      🏷️类别型        object          345313     29         0.01%    30012.68  \n",
      "\n",
      "5️⃣  字段类型统计（排除主键 CUST_NO）:\n",
      "   ├─ 数值型字段: 2 个\n",
      "   │  └─ APSDTRDAT, APSDTRAMT\n",
      "   ├─ 类别型字段: 3 个\n",
      "   │  └─ APSDTRCOD, APSDABS, APSDTRCHL\n",
      "   ├─ 文本型字段: 0 个\n",
      "   └─ 日期型字段: 0 个\n",
      "\n",
      "================================================================================\n"
     ]
    }
   ],
   "source": [
    "# 实际数据探查示例（如果数据已加载）\n",
    "# 根据实际变量名调整，例如对页面访问表进行探查\n",
    "if 'MB_PAGEVIEW_DTL_data' in globals():\n",
    "    pageView_info = explore_basic_info(TR_APS_DTL_data, \"页面访问明细表(MB_PAGEVIEW_DTL)\", primary_key='CUST_NO')"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 17,
   "id": "7f73e172",
   "metadata": {},
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "🔍 MB_PAGEVIEW_DTL_data 字段详细分析:\n",
      "================================================================================\n",
      "OPERATION_DATE:\n",
      "  唯一值数: 88\n",
      "  唯一值比例: 0.02%\n",
      "  平均长度: 10.0\n",
      "  判定结果: 🏷️类别型\n",
      "  示例值: 2025-04-07\n",
      "\n",
      "PAGE_TITLE:\n",
      "  唯一值数: 988\n",
      "  唯一值比例: 0.27%\n",
      "  平均长度: 32.0\n",
      "  判定结果: 🏷️类别型\n",
      "  示例值: dc127d306179477fef4f3a9378dc550b\n",
      "\n",
      "REFERRER_TITLE:\n",
      "  唯一值数: 992\n",
      "  唯一值比例: 0.27%\n",
      "  平均长度: 32.0\n",
      "  判定结果: 🏷️类别型\n",
      "  示例值: 0e5c9561153e8b3fd936b94a5641c8e1\n",
      "\n",
      "MODEL_NAME:\n",
      "  唯一值数: 105\n",
      "  唯一值比例: 0.03%\n",
      "  平均长度: 32.0\n",
      "  判定结果: 🏷️类别型\n",
      "  示例值: c5b386b7a6348a2f1ba70f2259fb827e\n",
      "\n",
      "\n",
      "💡 结论:\n",
      "  如果唯一值比例 > 50% 且 平均长度 > 10 → 文本型\n",
      "  否则 → 类别型\n",
      "REFERRER_TITLE:\n",
      "  唯一值数: 992\n",
      "  唯一值比例: 0.27%\n",
      "  平均长度: 32.0\n",
      "  判定结果: 🏷️类别型\n",
      "  示例值: 0e5c9561153e8b3fd936b94a5641c8e1\n",
      "\n",
      "MODEL_NAME:\n",
      "  唯一值数: 105\n",
      "  唯一值比例: 0.03%\n",
      "  平均长度: 32.0\n",
      "  判定结果: 🏷️类别型\n",
      "  示例值: c5b386b7a6348a2f1ba70f2259fb827e\n",
      "\n",
      "\n",
      "💡 结论:\n",
      "  如果唯一值比例 > 50% 且 平均长度 > 10 → 文本型\n",
      "  否则 → 类别型\n"
     ]
    }
   ],
   "source": [
    "# 检查实际字段的唯一值比例和平均长度\n",
    "print(\"🔍 MB_PAGEVIEW_DTL_data 字段详细分析:\")\n",
    "print(\"=\"*80)\n",
    "\n",
    "for col in MB_PAGEVIEW_DTL_data.columns:\n",
    "    if col != 'CUST_NO':\n",
    "        unique_ratio = MB_PAGEVIEW_DTL_data[col].nunique() / len(MB_PAGEVIEW_DTL_data)\n",
    "        non_null_data = MB_PAGEVIEW_DTL_data[col].dropna()\n",
    "        if len(non_null_data) > 0:\n",
    "            avg_length = non_null_data.astype(str).str.len().mean()\n",
    "        else:\n",
    "            avg_length = 0\n",
    "        \n",
    "        # 判定字段类型\n",
    "        if unique_ratio > 0.5 and avg_length > 10:\n",
    "            field_type = \"📝文本型\"\n",
    "        elif unique_ratio <= 0.5 or avg_length <= 10:\n",
    "            field_type = \"🏷️类别型\"\n",
    "        \n",
    "        print(f\"{col}:\")\n",
    "        print(f\"  唯一值数: {MB_PAGEVIEW_DTL_data[col].nunique():,}\")\n",
    "        print(f\"  唯一值比例: {unique_ratio:.2%}\")\n",
    "        print(f\"  平均长度: {avg_length:.1f}\")\n",
    "        print(f\"  判定结果: {field_type}\")\n",
    "        print(f\"  示例值: {MB_PAGEVIEW_DTL_data[col].iloc[0]}\")\n",
    "        print()\n",
    "\n",
    "print(\"\\n💡 结论:\")\n",
    "print(\"  如果唯一值比例 > 50% 且 平均长度 > 10 → 文本型\")\n",
    "print(\"  否则 → 类别型\")"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 18,
   "id": "95f33673",
   "metadata": {},
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "\n",
      "================================================================================\n",
      "🔍 TR_APS_DTL_data 字段详细分析:\n",
      "================================================================================\n",
      "\n",
      "APSDTRDAT:\n",
      "  唯一值数: 91\n",
      "  唯一值比例: 0.03%\n",
      "  平均长度: 8.0\n",
      "  判定结果: 🏷️类别型\n",
      "  示例值: [20250402, 20250402, 20250402]\n",
      "\n",
      "APSDTRCOD:\n",
      "  唯一值数: 154\n",
      "  唯一值比例: 0.04%\n",
      "  平均长度: 32.0\n",
      "  判定结果: 🏷️类别型\n",
      "  示例值: ['566a1fdfd622806c20378b970c4cbff3', '566a1fdfd622806c20378b970c4cbff3', '566a1fdfd622806c20378b970c4cbff3']\n",
      "\n",
      "APSDTRAMT:\n",
      "  唯一值数: 48,947\n",
      "  唯一值比例: 14.17%\n",
      "  平均长度: 5.4\n",
      "  判定结果: 🏷️类别型\n",
      "  示例值: [60000.0, 2000.0, 1000.0]\n",
      "\n",
      "APSDABS:\n",
      "  唯一值数: 1,527\n",
      "  唯一值比例: 0.44%\n",
      "  平均长度: 32.0\n",
      "  判定结果: 🏷️类别型\n",
      "  示例值: ['acaf665ffd5ef2fe03b0daaa12d79aab', 'acaf665ffd5ef2fe03b0daaa12d79aab', 'acaf665ffd5ef2fe03b0daaa12d79aab']\n",
      "\n",
      "APSDTRCHL:\n",
      "  唯一值数: 29\n",
      "  唯一值比例: 0.01%\n",
      "  平均长度: 32.0\n",
      "  判定结果: 🏷️类别型\n",
      "  示例值: ['f1811258c561f96461a243415727b1f5', 'f1811258c561f96461a243415727b1f5', 'f1811258c561f96461a243415727b1f5']\n"
     ]
    }
   ],
   "source": [
    "# 检查TR_APS_DTL表的字段类型\n",
    "print(\"\\n\" + \"=\"*80)\n",
    "print(\"🔍 TR_APS_DTL_data 字段详细分析:\")\n",
    "print(\"=\"*80)\n",
    "\n",
    "for col in TR_APS_DTL_data.columns:\n",
    "    if col != 'CUST_NO':\n",
    "        unique_ratio = TR_APS_DTL_data[col].nunique() / len(TR_APS_DTL_data)\n",
    "        non_null_data = TR_APS_DTL_data[col].dropna()\n",
    "        if len(non_null_data) > 0:\n",
    "            avg_length = non_null_data.astype(str).str.len().mean()\n",
    "        else:\n",
    "            avg_length = 0\n",
    "        \n",
    "        # 判定字段类型\n",
    "        if unique_ratio > 0.5 and avg_length > 10:\n",
    "            field_type = \"📝文本型\"\n",
    "        elif unique_ratio <= 0.5 or avg_length <= 10:\n",
    "            field_type = \"🏷️类别型\"\n",
    "        \n",
    "        print(f\"\\n{col}:\")\n",
    "        print(f\"  唯一值数: {TR_APS_DTL_data[col].nunique():,}\")\n",
    "        print(f\"  唯一值比例: {unique_ratio:.2%}\")\n",
    "        print(f\"  平均长度: {avg_length:.1f}\")\n",
    "        print(f\"  判定结果: {field_type}\")\n",
    "        \n",
    "        # 显示前3个示例值\n",
    "        sample_values = TR_APS_DTL_data[col].dropna().head(3).tolist()\n",
    "        print(f\"  示例值: {sample_values[:3]}\")"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 19,
   "id": "05157965",
   "metadata": {},
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "\n",
      "================================================================================\n",
      "🧪 测试真实文本数据识别:\n",
      "================================================================================\n",
      "\n",
      "字段分析:\n",
      "================================================================================\n",
      "📊 【真实文本测试数据】基础信息探查\n",
      "================================================================================\n",
      "\n",
      "1️⃣  数据形状:\n",
      "   ├─ 行数（样本数）: 100\n",
      "   └─ 列数（特征数）: 5\n",
      "\n",
      "2️⃣  数据类型分布:\n",
      "   ├─ object: 4 列 (80.0%)\n",
      "   ├─ int64: 1 列 (20.0%)\n",
      "\n",
      "3️⃣  内存占用:\n",
      "   └─ 总内存: 0.04 MB\n",
      "\n",
      "4️⃣  列信息详情:\n",
      "   列名                             字段类型            数据类型            非空数          唯一值数         内存(KB)    \n",
      "   ----------------------------------------------------------------------------------------------------\n",
      "   CUST_NO                        🔑主键             int64           100          100          0.91      \n",
      "   PAGE_TITLE                     🏷️类别型           object          100          100          12.01     \n",
      "   ARTICLE_CONTENT                📝文本型            object          100          100          27.15     \n",
      "   PROD_CODE                      🏷️类别型           object          100          3            6.08      \n",
      "   USER_COMMENT                   📝文本型            object          100          100          20.99     \n",
      "\n",
      "5️⃣  字段类型统计（排除主键 CUST_NO）:\n",
      "   ├─ 数值型字段: 0 个\n",
      "   ├─ 类别型字段: 2 个\n",
      "   │  └─ PAGE_TITLE, PROD_CODE\n",
      "   ├─ 文本型字段: 2 个\n",
      "   │  └─ ARTICLE_CONTENT, USER_COMMENT\n",
      "   └─ 日期型字段: 0 个\n",
      "\n",
      "================================================================================\n",
      "\n",
      "================================================================================\n",
      "✅ 验证结果:\n",
      "================================================================================\n",
      "文本型字段: ['ARTICLE_CONTENT', 'USER_COMMENT']\n",
      "类别型字段: ['PAGE_TITLE', 'PROD_CODE']\n",
      "\n",
      "预期结果:\n",
      "  文本型: ['ARTICLE_CONTENT', 'USER_COMMENT'] - 高唯一率(100%) + 长文本(>10)\n",
      "  类别型: ['PAGE_TITLE', 'PROD_CODE'] - 低唯一率或短文本\n"
     ]
    }
   ],
   "source": [
    "# 创建真实文本数据测试\n",
    "print(\"\\n\" + \"=\"*80)\n",
    "print(\"🧪 测试真实文本数据识别:\")\n",
    "print(\"=\"*80)\n",
    "\n",
    "test_real_text = pd.DataFrame({\n",
    "    'CUST_NO': range(1, 101),\n",
    "    'PAGE_TITLE': [f'用户登录页面-{i}' for i in range(100)],  # 低唯一率，短文本\n",
    "    'ARTICLE_CONTENT': [f'这是一篇关于银行服务的详细文章内容，介绍了各种产品和服务，包含大量文字描述_{i}' for i in range(100)],  # 高唯一率，长文本\n",
    "    'PROD_CODE': np.random.choice(['P001', 'P002', 'P003'], 100),  # 低唯一率，短文本\n",
    "    'USER_COMMENT': [f'用户评论内容{i}：这个产品很好用，功能齐全，值得推荐！' for i in range(100)],  # 高唯一率，长文本\n",
    "})\n",
    "\n",
    "print(\"\\n字段分析:\")\n",
    "result = explore_basic_info(test_real_text, \"真实文本测试数据\", primary_key='CUST_NO')\n",
    "\n",
    "print(\"\\n\" + \"=\"*80)\n",
    "print(\"✅ 验证结果:\")\n",
    "print(\"=\"*80)\n",
    "print(f\"文本型字段: {result['text_cols']}\")\n",
    "print(f\"类别型字段: {result['categorical_cols']}\")\n",
    "print(f\"\\n预期结果:\")\n",
    "print(f\"  文本型: ['ARTICLE_CONTENT', 'USER_COMMENT'] - 高唯一率(100%) + 长文本(>10)\")\n",
    "print(f\"  类别型: ['PAGE_TITLE', 'PROD_CODE'] - 低唯一率或短文本\")"
   ]
  },
  {
   "cell_type": "markdown",
   "id": "055d9107",
   "metadata": {},
   "source": [
    "## ✅ 测试修复后的字段类型识别\n",
    "\n",
    "以下测试单元格用于验证文本型字段识别和列信息不重复的问题是否已修复"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "id": "961a33eb",
   "metadata": {},
   "outputs": [],
   "source": [
    "# 测试1: 创建包含不同字段类型的模拟数据\n",
    "import numpy as np\n",
    "import pandas as pd\n",
    "\n",
    "test_data = pd.DataFrame({\n",
    "    'CUST_NO': range(1, 101),  # 主键\n",
    "    'AGE': np.random.randint(20, 60, 100),  # 数值型\n",
    "    'BALANCE': np.random.uniform(1000, 100000, 100),  # 数值型\n",
    "    'GENDER': np.random.choice(['M', 'F'], 100),  # 类别型（低唯一率）\n",
    "    'PROD_CODE': np.random.choice(['P001', 'P002', 'P003'], 100),  # 类别型\n",
    "    'PAGE_TITLE': [f'这是页面标题内容{i}包含一些描述性文字' for i in range(100)],  # 文本型（高唯一率+长文本）\n",
    "    'REFERRER_TITLE': [f'来源页面标题{i}详细说明内容较长' for i in range(100)],  # 文本型\n",
    "    'MODEL_NAME': [f'手机型号名称{i}' for i in range(100)],  # 文本型\n",
    "    'STATUS': np.random.choice(['A', 'I'], 100),  # 类别型（短文本）\n",
    "})\n",
    "\n",
    "print(\"🔍 测试数据集创建完成，包含:\")\n",
    "print(f\"  - CUST_NO: 主键字段\")\n",
    "print(f\"  - AGE, BALANCE: 数值型字段\")\n",
    "print(f\"  - GENDER, PROD_CODE, STATUS: 类别型字段（低唯一率或短文本）\")\n",
    "print(f\"  - PAGE_TITLE, REFERRER_TITLE, MODEL_NAME: 文本型字段（高唯一率且长文本）\")\n",
    "print(f\"\\n唯一值比例:\")\n",
    "for col in test_data.columns:\n",
    "    unique_ratio = test_data[col].nunique() / len(test_data)\n",
    "    avg_len = test_data[col].astype(str).str.len().mean()\n",
    "    print(f\"  {col}: {unique_ratio:.2%} (平均长度: {avg_len:.1f})\")"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "id": "d5053823",
   "metadata": {},
   "outputs": [],
   "source": [
    "# 测试2: 调用修复后的explore_basic_info函数\n",
    "test_result = explore_basic_info(test_data, \"测试数据集\", primary_key='CUST_NO')\n",
    "\n",
    "print(\"\\n\" + \"=\"*80)\n",
    "print(\"📊 返回结果检查:\")\n",
    "print(\"=\"*80)\n",
    "print(f\"数值型字段: {test_result['numeric_cols']}\")\n",
    "print(f\"类别型字段: {test_result['categorical_cols']}\")\n",
    "print(f\"文本型字段: {test_result['text_cols']}\")\n",
    "print(f\"日期型字段: {test_result['datetime_cols']}\")\n",
    "\n",
    "print(\"\\n\" + \"=\"*80)\n",
    "print(\"✅ 验证结果:\")\n",
    "print(\"=\"*80)\n",
    "expected_text = ['PAGE_TITLE', 'REFERRER_TITLE', 'MODEL_NAME']\n",
    "expected_categorical = ['GENDER', 'PROD_CODE', 'STATUS']\n",
    "expected_numeric = ['AGE', 'BALANCE']\n",
    "\n",
    "text_correct = set(test_result['text_cols']) == set(expected_text)\n",
    "cat_correct = set(test_result['categorical_cols']) == set(expected_categorical)\n",
    "num_correct = set(test_result['numeric_cols']) == set(expected_numeric)\n",
    "\n",
    "print(f\"✅ 文本型识别正确: {text_correct} (期望: {expected_text})\")\n",
    "print(f\"✅ 类别型识别正确: {cat_correct} (期望: {expected_categorical})\")\n",
    "print(f\"✅ 数值型识别正确: {num_correct} (期望: {expected_numeric})\")\n",
    "print(f\"✅ 主键已排除: {'CUST_NO' not in test_result['analysis_cols']}\")\n",
    "\n",
    "if text_correct and cat_correct and num_correct:\n",
    "    print(\"\\n🎉 所有字段类型识别正确！\")\n",
    "else:\n",
    "    print(\"\\n⚠️ 字段类型识别存在问题，请检查！\")"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "id": "6445acfe",
   "metadata": {},
   "outputs": [],
   "source": [
    "# 测试3: 测试文本特征分析功能\n",
    "text_analysis_result = analyze_text_features(test_data, \"测试数据集\", primary_key='CUST_NO')\n",
    "\n",
    "if text_analysis_result:\n",
    "    print(\"\\n✅ 文本特征分析执行成功！\")\n",
    "else:\n",
    "    print(\"\\n⚠️ 文本特征分析失败！\")"
   ]
  },
  {
   "cell_type": "markdown",
   "id": "70b2c069",
   "metadata": {},
   "source": [
    "---\n",
    "\n",
    "## 🔧 修复说明文档\n",
    "\n",
    "### 问题1: 列信息详情重复显示 ❌\n",
    "\n",
    "**原因**: `explore_basic_info` 函数中的列信息循环打印时，没有正确关闭循环\n",
    "\n",
    "**修复**: \n",
    "- 优化了列信息打印格式\n",
    "- 添加了\"唯一率\"列，更直观地展示字段特征\n",
    "- 修复了表头和内容对齐问题\n",
    "\n",
    "### 问题2: 文本型字段未被正确识别 ❌\n",
    "\n",
    "**原因**: \n",
    "1. 字段类型识别函数未正确处理 `categorical` 类型\n",
    "2. 未处理空值导致的 `avg_length` 计算错误\n",
    "3. 判定逻辑不够健壮\n",
    "\n",
    "**修复**: \n",
    "1. 增强了 `identify_field_type` 函数:\n",
    "   ```python\n",
    "   # 修复前\n",
    "   if pd.api.types.is_object_dtype(df[col]):\n",
    "       avg_length = df[col].dropna().astype(str).str.len().mean()\n",
    "   \n",
    "   # 修复后\n",
    "   if pd.api.types.is_object_dtype(df[col]) or pd.api.types.is_categorical_dtype(df[col]):\n",
    "       non_null_data = df[col].dropna()\n",
    "       if len(non_null_data) == 0:  # 处理全空情况\n",
    "           return '🏷️类别型'\n",
    "       avg_length = non_null_data.astype(str).str.len().mean()\n",
    "   ```\n",
    "\n",
    "2. 在字段类型统计部分增加了空值检查:\n",
    "   ```python\n",
    "   for col in analysis_cols:\n",
    "       if col not in numeric_cols and col not in datetime_cols:\n",
    "           non_null_data = df[col].dropna()\n",
    "           if len(non_null_data) == 0:  # 全空列默认为类别型\n",
    "               categorical_cols.append(col)\n",
    "               continue\n",
    "           # ...后续逻辑\n",
    "   ```\n",
    "\n",
    "3. 同步修复了 `analyze_text_features` 函数中的识别逻辑\n",
    "\n",
    "### 文本型字段判定标准 ✅\n",
    "\n",
    "```\n",
    "📝 文本型字段 = 唯一值比例 > 50% AND 平均长度 > 10\n",
    "🏷️ 类别型字段 = 唯一值比例 ≤ 50% OR 平均长度 ≤ 10\n",
    "```\n",
    "\n",
    "**示例说明**:\n",
    "- `PAGE_TITLE=\"这是页面标题内容1包含一些描述性文字\"` → 唯一率100%, 长度19 → 📝文本型\n",
    "- `GENDER=\"M\"/\"F\"` → 唯一率2%, 长度1 → 🏷️类别型  \n",
    "- `PROD_CODE=\"P001\"` → 唯一率3%, 长度4 → 🏷️类别型\n",
    "- `STATUS=\"A\"/\"I\"` → 唯一率2%, 长度1 → 🏷️类别型\n",
    "\n",
    "### 测试验证 ✅\n",
    "\n",
    "请运行上面的测试单元格验证修复效果:\n",
    "1. **第1个单元格**: 创建包含9个字段的测试数据\n",
    "2. **第2个单元格**: 测试 `explore_basic_info` 函数并验证结果\n",
    "3. **第3个单元格**: 测试 `analyze_text_features` 函数\n",
    "\n",
    "预期结果:\n",
    "- ✅ 列信息不重复显示\n",
    "- ✅ PAGE_TITLE, REFERRER_TITLE, MODEL_NAME 被识别为文本型\n",
    "- ✅ GENDER, PROD_CODE, STATUS 被识别为类别型\n",
    "- ✅ AGE, BALANCE 被识别为数值型\n",
    "- ✅ CUST_NO 被排除在分析之外"
   ]
  },
  {
   "cell_type": "markdown",
   "id": "8ef55a68",
   "metadata": {},
   "source": [
    "## 示例3: 数据清洗操作"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "id": "cf1d340f",
   "metadata": {},
   "outputs": [],
   "source": [
    "# 清洗操作示例\n",
    "\n",
    "if 'A_asset_data' in globals():\n",
    "    # 1. 处理缺失值（不修改原数据）\n",
    "    asset_cleaned = handle_missing_values(\n",
    "        A_asset_data, \n",
    "        numeric_strategy='median',      # 数值型用中位数填充\n",
    "        categorical_strategy='mode',    # 类别型用众数填充\n",
    "        dataset_name=\"资产信息表\",\n",
    "        inplace=False                   # 返回新的DataFrame\n",
    "    )\n",
    "    \n",
    "    # 2. 删除重复值\n",
    "    asset_cleaned = remove_duplicates(\n",
    "        asset_cleaned,\n",
    "        subset=None,                    # 根据所有列判断重复\n",
    "        keep='first',                   # 保留第一次出现的\n",
    "        dataset_name=\"资产信息表\",\n",
    "        inplace=False\n",
    "    )\n",
    "    \n",
    "    # 3. 处理异常值\n",
    "    asset_cleaned = handle_outliers(\n",
    "        asset_cleaned,\n",
    "        method='iqr',                   # 使用IQR方法\n",
    "        threshold=1.5,                  # 1.5倍IQR\n",
    "        strategy='cap',                 # 截断策略\n",
    "        dataset_name=\"资产信息表\",\n",
    "        inplace=False\n",
    "    )\n",
    "    \n",
    "    # 4. 优化数据类型\n",
    "    asset_optimized = optimize_dtypes(\n",
    "        asset_cleaned,\n",
    "        dataset_name=\"资产信息表\",\n",
    "        inplace=False\n",
    "    )\n",
    "    \n",
    "    print(f\"\\n✅ 清洗完成！\")\n",
    "    print(f\"   原始数据: {A_asset_data.shape}\")\n",
    "    print(f\"   清洗后: {asset_optimized.shape}\")"
   ]
  },
  {
   "cell_type": "markdown",
   "id": "c22082e8",
   "metadata": {},
   "source": [
    "---\n",
    "\n",
    "# 方法特性说明\n",
    "\n",
    "## ✨ 核心优势\n",
    "\n",
    "1. **高度通用性**\n",
    "   - 所有方法仅针对单个DataFrame设计\n",
    "   - 可应用于任何pandas DataFrame\n",
    "   - 参数化设计，灵活配置\n",
    "\n",
    "2. **专业性**\n",
    "   - 完整的统计分析（偏度、峰度、相关性等）\n",
    "   - 多种异常值检测方法（IQR、Z-score）\n",
    "   - 丰富的可视化支持\n",
    "\n",
    "3. **易用性**\n",
    "   - 清晰的输出格式和进度提示\n",
    "   - 一键执行完整流程\n",
    "   - 支持单独调用各个功能\n",
    "\n",
    "4. **工程化**\n",
    "   - 内存优化功能\n",
    "   - 数据类型自动转换\n",
    "   - 详细的日志输出\n",
    "\n",
    "## 📋 方法清单\n",
    "\n",
    "### 探查方法\n",
    "- `explore_basic_info()` - 基础信息探查\n",
    "- `analyze_data_quality()` - 数据质量分析\n",
    "- `analyze_numeric_features()` - 数值型特征分析\n",
    "- `analyze_categorical_features()` - 类别型特征分析\n",
    "\n",
    "### 清洗方法\n",
    "- `handle_missing_values()` - 缺失值处理\n",
    "- `remove_duplicates()` - 重复值删除\n",
    "- `handle_outliers()` - 异常值处理\n",
    "- `optimize_dtypes()` - 数据类型优化\n",
    "\n",
    "### 可视化方法\n",
    "- `visualize_numeric_distribution()` - 数值分布图\n",
    "- `visualize_boxplot()` - 箱线图\n",
    "- `visualize_correlation_matrix()` - 相关性热图\n",
    "- `visualize_categorical_distribution()` - 类别分布图\n",
    "\n",
    "### 综合方法\n",
    "- `comprehensive_data_exploration()` - 完整探查流程\n",
    "\n",
    "## 💡 使用建议\n",
    "\n",
    "1. **初次探查**：使用`comprehensive_data_exploration()`获取全面了解\n",
    "2. **针对性分析**：根据需要单独调用特定方法\n",
    "3. **清洗流程**：先探查→发现问题→针对性清洗→再次验证\n",
    "4. **可视化**：对于大数据集，建议关闭自动可视化，手动选择关键字段"
   ]
  }
 ],
 "metadata": {
  "kernelspec": {
   "display_name": "starcup",
   "language": "python",
   "name": "python3"
  },
  "language_info": {
   "codemirror_mode": {
    "name": "ipython",
    "version": 3
   },
   "file_extension": ".py",
   "mimetype": "text/x-python",
   "name": "python",
   "nbconvert_exporter": "python",
   "pygments_lexer": "ipython3",
   "version": "3.10.18"
  }
 },
 "nbformat": 4,
 "nbformat_minor": 5
}
