import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
import seaborn as sns
import os
from datetime import datetime
from scipy import stats
import warnings
from typing import Dict, List, Union, Optional, Tuple

# 忽略警告
warnings.filterwarnings('ignore')


class DataProfiler:
    """数据探查工具：分析数据概况、缺失值、唯一值、分布、异常值等特征"""

    def __init__(self, data: pd.DataFrame, sample_size: int = 5000):
        """
        初始化探查器

        Args:
            data: 待分析的DataFrame
            sample_size: 抽样分析的最大行数（避免大表分析过慢）
        """
        self.data = data.copy()
        self.n_rows, self.n_cols = self.data.shape
        self.sample = self._get_sample(sample_size)
        self.columns = self.data.columns.tolist()
        self.stats = {}  # 存储分析结果

    def _get_sample(self, sample_size: int) -> pd.DataFrame:
        """对大数据集抽样（避免计算过慢）"""
        if self.n_rows <= sample_size:
            return self.data
        return self.data.sample(sample_size, random_state=42)

    def profile(self) -> Dict:
        """执行全量探查（调用各模块）"""
        print(f"[INFO] 开始数据探查：{self.n_rows}行 × {self.n_cols}列")

        # 1. 数据概况
        self.stats["overview"] = self._profile_overview()

        # 2. 缺失值分析
        self.stats["missing"] = self._profile_missing()

        # 3. 唯一值分析
        self.stats["unique"] = self._profile_unique()

        # 4. 数据类型分析
        self.stats["dtypes"] = self._profile_dtypes()

        # 5. 数值型字段分布
        self.stats["numeric"] = self._profile_numeric()

        # 6. 分类型字段分布
        self.stats["categorical"] = self._profile_categorical()

        # 7. 相关性分析
        self.stats["correlation"] = self._profile_correlation()

        # 8. 异常值检测
        self.stats["outliers"] = self._profile_outliers()

        print("[INFO] 数据探查完成！")
        return self.stats

    def _profile_overview(self) -> Dict:
        """分析数据概况"""
        return {
            "rows": self.n_rows,
            "columns": self.n_cols,
            "memory_mb": round(self.data.memory_usage(deep=True).sum() / 1024 / 1024, 2),
            "duplicated_rows": self.data.duplicated().sum(),
            "duplicated_rows_pct": round(self.data.duplicated().mean() * 100, 2)
        }

    def _profile_missing(self) -> Dict:
        """分析缺失值"""
        missing = self.data.isna().sum()
        missing_pct = round(missing / self.n_rows * 100, 2)

        # 按缺失率降序排序
        missing_df = pd.DataFrame({
            "missing_count": missing,
            "missing_pct": missing_pct
        }).sort_values("missing_pct", ascending=False)

        # 统计完全无缺失的字段数
        complete_cols = (missing == 0).sum()

        return {
            "missing_df": missing_df,
            "complete_columns": complete_cols,
            "complete_columns_pct": round(complete_cols / self.n_cols * 100, 2)
        }

    def _profile_unique(self) -> Dict:
        """分析唯一值"""
        unique_counts = self.data.apply(lambda x: x.nunique())
        unique_pct = round(unique_counts / self.n_rows * 100, 2)

        # 按唯一值数量降序排序
        unique_df = pd.DataFrame({
            "unique_count": unique_counts,
            "unique_pct": unique_pct
        }).sort_values("unique_count", ascending=False)

        # 识别潜在的主键（唯一值比例接近100%且无缺失）
        potential_primary_keys = unique_df[
            (unique_df["unique_pct"] > 99.9) &
            (self.stats["missing"]["missing_df"]["missing_count"] == 0)
            ].index.tolist()

        return {
            "unique_df": unique_df,
            "potential_primary_keys": potential_primary_keys
        }

    def _profile_dtypes(self) -> Dict:
        """分析数据类型（实际类型 vs 声明类型）"""
        # 声明的类型
        declared_dtypes = self.data.dtypes.astype(str).to_dict()

        # 实际类型（对object类型进一步判断）
        actual_dtypes = {}
        for col in self.columns:
            if declared_dtypes[col] == "object":
                # 尝试判断是否为日期
                try:
                    pd.to_datetime(self.data[col], errors="raise")
                    actual_dtypes[col] = "datetime"
                    continue
                except:
                    pass

                # 尝试判断是否为数值（如"123"）
                try:
                    pd.to_numeric(self.data[col], errors="raise")
                    actual_dtypes[col] = "numeric_str"
                    continue
                except:
                    pass

                # 其他object类型
                actual_dtypes[col] = "string"
            else:
                # 非object类型直接使用声明类型
                actual_dtypes[col] = declared_dtypes[col]

        return {
            "declared_dtypes": declared_dtypes,
            "actual_dtypes": actual_dtypes
        }

    def _profile_numeric(self) -> Dict:
        """分析数值型字段的分布"""
        # 筛选数值型字段
        numeric_cols = self.data.select_dtypes(include=[np.number]).columns.tolist()
        if not numeric_cols:
            return {"numeric_cols": [], "stats": {}}

        # 计算基本统计量
        stats = {}
        for col in numeric_cols:
            col_data = self.data[col].dropna()
            if len(col_data) == 0:
                continue

            # 计算分位数
            q1, q2, q3 = col_data.quantile([0.25, 0.5, 0.75])

            stats[col] = {
                "mean": col_data.mean(),
                "median": q2,
                "std": col_data.std(),
                "min": col_data.min(),
                "max": col_data.max(),
                "range": col_data.max() - col_data.min(),
                "q1": q1,  # 25%分位数
                "q3": q3,  # 75%分位数
                "iqr": q3 - q1,  # 四分位距
                "skewness": col_data.skew(),  # 偏度
                "kurtosis": col_data.kurt(),  # 峰度
                "zeros_pct": round((col_data == 0).mean() * 100, 2)  # 零值比例
            }

        return {
            "numeric_cols": numeric_cols,
            "stats": stats
        }

    def _profile_categorical(self) -> Dict:
        """分析分类型字段的分布"""
        # 筛选object类型字段（排除数值型和日期型）
        categorical_cols = []
        for col in self.columns:
            if self.stats["dtypes"]["actual_dtypes"][col] in ["string", "object"]:
                categorical_cols.append(col)

        if not categorical_cols:
            return {"categorical_cols": [], "top_categories": {}}

        # 计算每个字段的前N个类别及其占比
        top_categories = {}
        for col in categorical_cols:
            value_counts = self.data[col].value_counts(normalize=True) * 100
            top_categories[col] = {
                "count": value_counts.size,  # 类别总数
                "top5": value_counts.head(5).reset_index().rename(  # 前5个类别
                    columns={col: "category", "count": "pct"}
                ).to_dict(orient="records")
            }

        return {
            "categorical_cols": categorical_cols,
            "top_categories": top_categories
        }

    def _profile_correlation(self) -> Dict:
        """分析字段间的相关性"""
        # 筛选数值型字段
        numeric_cols = self.stats["numeric"]["numeric_cols"]
        if len(numeric_cols) < 2:
            return {"correlation_matrix": None, "high_correlations": []}

        # 计算皮尔逊相关系数矩阵
        corr_matrix = self.data[numeric_cols].corr().round(2)

        # 找出强相关的字段对（|r| > 0.7）
        high_correlations = []
        for i in range(len(numeric_cols)):
            for j in range(i + 1, len(numeric_cols)):
                col1, col2 = numeric_cols[i], numeric_cols[j]
                corr = corr_matrix.loc[col1, col2]
                if abs(corr) > 0.7:
                    high_correlations.append({
                        "column1": col1,
                        "column2": col2,
                        "correlation": corr
                    })

        # 按相关性绝对值降序排序
        high_correlations = sorted(
            high_correlations,
            key=lambda x: abs(x["correlation"]),
            reverse=True
        )

        return {
            "correlation_matrix": corr_matrix,
            "high_correlations": high_correlations
        }

    def _profile_outliers(self) -> Dict:
        """检测数值型字段的异常值（基于Z-score和IQR方法）"""
        numeric_cols = self.stats["numeric"]["numeric_cols"]
        if not numeric_cols:
            return {"outliers": {}}

        outliers = {}
        for col in numeric_cols:
            col_data = self.data[col].dropna()
            if len(col_data) == 0:
                continue

            # 1. Z-score方法（|Z| > 3视为异常）
            z_scores = stats.zscore(col_data)
            z_outliers = col_data[(abs(z_scores) > 3)]

            # 2. IQR方法（< Q1-1.5*IQR 或 > Q3+1.5*IQR视为异常）
            q1, q3 = col_data.quantile([0.25, 0.75])
            iqr = q3 - q1
            iqr_lower = q1 - 1.5 * iqr
            iqr_upper = q3 + 1.5 * iqr
            iqr_outliers = col_data[(col_data < iqr_lower) | (col_data > iqr_upper)]

            outliers[col] = {
                "z_outliers_count": len(z_outliers),
                "z_outliers_pct": round(len(z_outliers) / len(col_data) * 100, 2),
                "iqr_outliers_count": len(iqr_outliers),
                "iqr_outliers_pct": round(len(iqr_outliers) / len(col_data) * 100, 2),
                "z_outliers_min": z_outliers.min() if len(z_outliers) > 0 else None,
                "z_outliers_max": z_outliers.max() if len(z_outliers) > 0 else None,
                "iqr_outliers_min": iqr_outliers.min() if len(iqr_outliers) > 0 else None,
                "iqr_outliers_max": iqr_outliers.max() if len(iqr_outliers) > 0 else None
            }

        return {"outliers": outliers}

    def to_html(self, output_path: str = "data_profile_report.html") -> None:
        """生成HTML格式的探查报告"""
        if not self.stats:
            raise ValueError("请先调用profile()方法执行探查！")

        # 创建HTML模板
        html = f"""
        <!DOCTYPE html>
        <html>
        <head>
            <meta charset="UTF-8">
            <title>数据探查报告</title>
            <style>
                body {{ font-family: Arial, sans-serif; margin: 20px; }}
                h1, h2, h3 {{ color: #333; }}
                table {{ border-collapse: collapse; width: 100%; margin-bottom: 20px; }}
                th, td {{ border: 1px solid #ddd; padding: 8px; text-align: left; }}
                th {{ background-color: #f2f2f2; }}
                tr:nth-child(even) {{ background-color: #f9f9f9; }}
                .section {{ margin-bottom: 40px; }}
                .highlight {{ background-color: #ffffcc; }}
                .alert {{ color: red; font-weight: bold; }}
                .success {{ color: green; font-weight: bold; }}
            </style>
        </head>
        <body>
            <h1>数据探查报告</h1>
            <p>生成时间: {datetime.now().strftime('%Y-%m-%d %H:%M:%S')}</p>
            <p>数据规模: {self.n_rows:,}行 × {self.n_cols:,}列</p>
            <p>内存占用: {self.stats["overview"]["memory_mb"]} MB</p>

            <!-- 1. 数据概况 -->
            <div class="section">
                <h2>1. 数据概况</h2>
                <table>
                    <tr><th>指标</th><th>值</th></tr>
                    <tr><td>行数</td><td>{self.n_rows:,}</td></tr>
                    <tr><td>列数</td><td>{self.n_cols:,}</td></tr>
                    <tr><td>内存占用</td><td>{self.stats["overview"]["memory_mb"]} MB</td></tr>
                    <tr><td>重复行数</td><td>{self.stats["overview"]["duplicated_rows"]:,} ({self.stats["overview"]["duplicated_rows_pct"]}%)</td></tr>
                </table>
            </div>

            <!-- 2. 缺失值分析 -->
            <div class="section">
                <h2>2. 缺失值分析</h2>
                <p>完全无缺失的字段: {self.stats["missing"]["complete_columns"]} ({self.stats["missing"]["complete_columns_pct"]}%)</p>

                <h3>缺失值统计 (Top 10)</h3>
                <table>
                    <tr><th>字段</th><th>缺失值数量</th><th>缺失值比例</th><th>状态</th></tr>
                    {self._generate_missing_html()}
                </table>
            </div>

            <!-- 3. 唯一值分析 -->
            <div class="section">
                <h2>3. 唯一值分析</h2>

                <h3>唯一值统计 (Top 10)</h3>
                <table>
                    <tr><th>字段</th><th>唯一值数量</th><th>唯一值比例</th><th>状态</th></tr>
                    {self._generate_unique_html()}
                </table>

                <h3>潜在主键</h3>
                <p>{', '.join(self.stats["unique"]["potential_primary_keys"]) if self.stats["unique"]["potential_primary_keys"] else '未发现潜在主键'}</p>
            </div>

            <!-- 4. 数据类型分析 -->
            <div class="section">
                <h2>4. 数据类型分析</h2>
                <table>
                    <tr><th>字段</th><th>声明类型</th><th>实际类型</th></tr>
                    {self._generate_dtypes_html()}
                </table>
            </div>

            <!-- 5. 数值型字段分布 -->
            <div class="section">
                <h2>5. 数值型字段分布</h2>
                {self._generate_numeric_html()}
            </div>

            <!-- 6. 分类型字段分布 -->
            <div class="section">
                <h2>6. 分类型字段分布</h2>
                {self._generate_categorical_html()}
            </div>

            <!-- 7. 相关性分析 -->
            <div class="section">
                <h2>7. 相关性分析</h2>

                <h3>强相关字段对 (|r| > 0.7)</h3>
                <table>
                    <tr><th>字段1</th><th>字段2</th><th>相关系数</th></tr>
                    {self._generate_correlation_html()}
                </table>
            </div>

            <!-- 8. 异常值检测 -->
            <div class="section">
                <h2>8. 异常值检测</h2>
                <p>注：Z-score方法（|Z| > 3）；IQR方法（< Q1-1.5*IQR 或 > Q3+1.5*IQR）</p>

                <table>
                    <tr><th>字段</th><th>Z-score异常值数量</th><th>Z-score异常值比例</th><th>IQR异常值数量</th><th>IQR异常值比例</th></tr>
                    {self._generate_outliers_html()}
                </table>
            </div>
        </body>
        </html>
        """

        # 保存HTML文件
        with open(output_path, "w", encoding="utf-8") as f:
            f.write(html)

        print(f"[INFO] HTML报告已保存至: {os.path.abspath(output_path)}")

    def _generate_missing_html(self) -> str:
        """生成缺失值部分的HTML表格行"""
        missing_df = self.stats["missing"]["missing_df"].head(10)
        rows = []
        for col, row in missing_df.iterrows():
            status = "alert" if row["missing_pct"] > 30 else "success"
            rows.append(f"""
                <tr>
                    <td>{col}</td>
                    <td>{int(row["missing_count"])}</td>
                    <td>{row["missing_pct"]}%</td>
                    <td class="{status}">{'>30%需关注' if row["missing_pct"] > 30 else '正常'}</td>
                </tr>
            """)
        return "".join(rows)

    def _generate_unique_html(self) -> str:
        """生成唯一值部分的HTML表格行"""
        unique_df = self.stats["unique"]["unique_df"].head(10)
        rows = []
        for col, row in unique_df.iterrows():
            status = "success" if col in self.stats["unique"]["potential_primary_keys"] else ""
            rows.append(f"""
                <tr>
                    <td>{col}</td>
                    <td>{int(row["unique_count"])}</td>
                    <td>{row["unique_pct"]}%</td>
                    <td class="{status}">{f'潜在主键' if col in self.stats["unique"]["potential_primary_keys"] else ''}</td>
                </tr>
            """)
        return "".join(rows)

    def _generate_dtypes_html(self) -> str:
        """生成数据类型部分的HTML表格行"""
        rows = []
        for col in self.columns:
            declared = self.stats["dtypes"]["declared_dtypes"][col]
            actual = self.stats["dtypes"]["actual_dtypes"][col]
            highlight = "highlight" if declared != actual else ""
            rows.append(f"""
                <tr class="{highlight}">
                    <td>{col}</td>
                    <td>{declared}</td>
                    <td>{actual}</td>
                </tr>
            """)
        return "".join(rows)

    def _generate_numeric_html(self) -> str:
        """生成数值型字段分布的HTML内容"""
        numeric_cols = self.stats["numeric"]["numeric_cols"]
        if not numeric_cols:
            return "<p>未发现数值型字段</p>"

        sections = []
        for col in numeric_cols:
            stats = self.stats["numeric"]["stats"][col]
            sections.append(f"""
                <h3>{col} (分布)</h3>
                <table>
                    <tr><th>统计量</th><th>值</th><th>统计量</th><th>值</th></tr>
                    <tr><td>均值</td><td>{stats["mean"]:.2f}</td><td>中位数</td><td>{stats["median"]:.2f}</td></tr>
                    <tr><td>标准差</td><td>{stats["std"]:.2f}</td><td>偏度</td><td>{stats["skewness"]:.2f}</td></tr>
                    <tr><td>最小值</td><td>{stats["min"]:.2f}</td><td>最大值</td><td>{stats["max"]:.2f}</td></tr>
                    <tr><td>Q1 (25%)</td><td>{stats["q1"]:.2f}</td><td>Q3 (75%)</td><td>{stats["q3"]:.2f}</td></tr>
                    <tr><td>IQR</td><td>{stats["iqr"]:.2f}</td><td>零值比例</td><td>{stats["zeros_pct"]}%</td></tr>
                </table>
            """)
        return "".join(sections)

    def _generate_categorical_html(self) -> str:
        """生成分类型字段分布的HTML内容"""
        categorical_cols = self.stats["categorical"]["categorical_cols"]
        if not categorical_cols:
            return "<p>未发现分类型字段</p>"

        sections = []
        for col in categorical_cols:
            top = self.stats["categorical"]["top_categories"][col]
            top_cats = top["top5"]
            top_cats_html = "".join([
                f"<tr><td>{i + 1}</td><td>{cat['category']}</td><td>{cat['pct']:.2f}%</td></tr>"
                for i, cat in enumerate(top_cats)
            ])

            sections.append(f"""
                <h3>{col} (类别分布)</h3>
                <p>总类别数: {top['count']}</p>
                <table>
                    <tr><th>排名</th><th>类别</th><th>占比</th></tr>
                    {top_cats_html}
                </table>
            """)
        return "".join(sections)

    def _generate_correlation_html(self) -> str:
        """生成相关性分析的HTML表格行"""
        high_corr = self.stats["correlation"]["high_correlations"]
        if not high_corr:
            return "<tr><td colspan='3'>未发现强相关字段对</td></tr>"

        rows = []
        for item in high_corr:
            color = "color: red;" if item["correlation"] < 0 else "color: blue;"
            rows.append(f"""
                <tr>
                    <td>{item["column1"]}</td>
                    <td>{item["column2"]}</td>
                    <td style="{color}">{item["correlation"]:.2f}</td>
                </tr>
            """)
        return "".join(rows)

    def _generate_outliers_html(self) -> str:
        """生成异常值检测的HTML表格行"""
        outliers = self.stats["outliers"]["outliers"]
        if not outliers:
            return "<tr><td colspan='5'>未发现数值型字段</td></tr>"

        rows = []
        for col, stats in outliers.items():
            z_alert = "alert" if stats["z_outliers_pct"] > 5 else ""
            iqr_alert = "alert" if stats["iqr_outliers_pct"] > 5 else ""

            rows.append(f"""
                <tr>
                    <td>{col}</td>
                    <td class="{z_alert}">{stats["z_outliers_count"]}</td>
                    <td class="{z_alert}">{stats["z_outliers_pct"]}%</td>
                    <td class="{iqr_alert}">{stats["iqr_outliers_count"]}</td>
                    <td class="{iqr_alert}">{stats["iqr_outliers_pct"]}%</td>
                </tr>
            """)
        return "".join(rows)


def profile_data(input_path: str, output_path: str = "data_profile_report.html", **kwargs) -> None:
    """
    执行数据探查并生成HTML报告

    Args:
        input_path: 输入文件路径（支持CSV/Excel）
        output_path: 输出HTML报告路径
        **kwargs: 读取文件的额外参数（如sep、sheet_name等）
    """
    # 读取数据
    if input_path.endswith('.csv'):
        df = pd.read_csv(input_path, **kwargs)
    elif input_path.endswith(('.xlsx', '.xls')):
        df = pd.read_excel(input_path, **kwargs)
    else:
        raise ValueError("仅支持CSV或Excel格式！")

    # 执行探查
    profiler = DataProfiler(df)
    profiler.profile()

    # 生成报告
    profiler.to_html(output_path)


if __name__ == "__main__":
    # 示例：读取CSV并生成报告
    # profile_data("your_data.csv", "your_report.html", sep=",")

    # 示例：读取Excel并生成报告
    # profile_data("your_data.xlsx", "your_report.html", sheet_name="Sheet1")

    # 自定义示例数据
    import io

    sample_data = """
    id,name,age,gender,salary,department,join_date,score
    1,张三,30,男,10000,技术部,2020-01-15,95.5
    2,李四,25,女,8000,市场部,2021-03-20,88.0
    3,王五,35,男,12000,技术部,2019-05-10,92.0
    4,赵六,28,女,9500,财务部,2020-07-05,76.5
    5,钱七,40,男,15000,技术部,2018-11-25,99.0
    6,孙八,32,女,11000,市场部,2019-08-18,85.0
    7,周九,26,男,8500,财务部,2021-01-30,79.0
    8,吴十,38,女,13000,技术部,2018-04-12,94.0
    9,郑十一,29,男,9000,市场部,2020-09-01,81.0
    10,王十二,33,女,11500,财务部,2019-12-05,83.5
    """

    # 从字符串读取数据
    df = pd.read_csv(io.StringIO(sample_data))

    # 执行探查并生成报告
    profiler = DataProfiler(df)
    profiler.profile()
    profiler.to_html("sample_profile_report.html")
