import json
import pandas as pd
import numpy as np
import re
from sklearn.preprocessing import StandardScaler, LabelEncoder


def load_and_clean_json_data(json_data):
    """
    加载和清洗JSON格式的结构化数据

    参数:
    json_data: 可以是JSON字符串、文件路径或已解析的字典/列表

    返回:
    df_clean: 清洗后的DataFrame
    cleaning_report: 数据清洗报告
    """
    # 1. 解析JSON数据
    if isinstance(json_data, str):
        # 检查是文件路径还是JSON字符串
        if json_data.strip().startswith('{') or json_data.strip().startswith('['):
            # 是JSON字符串
            try:
                data = json.loads(json_data)
            except json.JSONDecodeError as e:
                raise ValueError(f"无效的JSON字符串: {e}")
        else:
            # 可能是文件路径
            try:
                with open(json_data, 'r', encoding='utf-8') as file:
                    data = json.load(file)
            except Exception as e:
                raise ValueError(f"读取文件失败: {e}")
    elif isinstance(json_data, (dict, list)):
        # 已经是解析后的JSON数据
        data = json_data
    else:
        raise ValueError("不支持的数据类型，请提供JSON字符串、文件路径或已解析的字典/列表")

    # 2. 转换为DataFrame
    # 如果JSON是字典列表
    if isinstance(data, list):
        df = pd.DataFrame(data)
    # 如果JSON是单个字典
    elif isinstance(data, dict):
        df = pd.DataFrame([data])
    else:
        raise ValueError("JSON数据必须是字典或列表形式")

    df_clean = df.copy()
    report = {"original_shape": df.shape}

    # 3. 处理缺失值
    missing_sum = df_clean.isnull().sum()
    missing_percentage = (df_clean.isnull().sum() / len(df_clean)) * 100
    report["missing_values_before"] = missing_sum[missing_sum > 0].to_dict()

    # 对不同类型列的缺失值采用不同的处理策略
    for col in df_clean.columns:
        if df_clean[col].isnull().sum() > 0:
            # 数值列：用中位数填充
            if pd.api.types.is_numeric_dtype(df_clean[col]):
                median_val = df_clean[col].median()
                df_clean[col].fillna(median_val, inplace=True)
                report[f"filled_{col}"] = f"用中位数 {median_val:.2f} 填充"
            # 类别列：用众数填充
            else:
                mode_val = df_clean[col].mode()[0] if not df_clean[col].mode().empty else "未知"
                df_clean[col].fillna(mode_val, inplace=True)
                report[f"filled_{col}"] = f"用众数 '{mode_val}' 填充"

    report["missing_values_after"] = df_clean.isnull().sum().sum()

    # 4. 处理异常值（使用IQR方法）
    numeric_cols = df_clean.select_dtypes(include=[np.number]).columns
    outliers_count = {}

    for col in numeric_cols:
        Q1 = df_clean[col].quantile(0.25)
        Q3 = df_clean[col].quantile(0.75)
        IQR = Q3 - Q1
        lower_bound = Q1 - 1.5 * IQR
        upper_bound = Q3 + 1.5 * IQR

        # 识别异常值
        outliers = df_clean[(df_clean[col] < lower_bound) | (df_clean[col] > upper_bound)]
        outliers_count[col] = len(outliers)

        # 用中位数替换异常值（也可选择删除）
        if len(outliers) > 0:
            median_val = df_clean[col].median()
            df_clean.loc[(df_clean[col] < lower_bound) | (df_clean[col] > upper_bound), col] = median_val
            report[f"outliers_{col}"] = f"发现 {len(outliers)} 个异常值，已用中位数替换"

    report["outliers_details"] = outliers_count

    # 5. 去除重复行
    initial_count = len(df_clean)
    df_clean.drop_duplicates(inplace=True)
    df_clean.reset_index(drop=True, inplace=True)
    report["duplicates_removed"] = initial_count - len(df_clean)

    # 6. 数据标准化/归一化（数值列）
    # 注意：这一步通常应在划分训练测试集后进行，此处先不做，留到后续流程
    # 这里只记录数值列的信息，供后续处理参考
    report["numeric_columns"] = list(numeric_cols)

    # 7. 转换类别数据（标签编码）
    categorical_cols = df_clean.select_dtypes(include=['object']).columns
    label_encoders = {}

    for col in categorical_cols:
        le = LabelEncoder()
        # 处理可能存在的NaN值（尽管前面已经处理过缺失值）
        non_null_data = df_clean[col][df_clean[col].notnull()]
        if len(non_null_data) > 0:
            df_clean[col] = df_clean[col].astype(str)
            df_clean[col] = le.fit_transform(df_clean[col])
            label_encoders[col] = le
            report[f"encoded_{col}"] = "已完成标签编码"

    report["final_shape"] = df_clean.shape
    report["label_encoders"] = label_encoders

    print(f"数据清洗完成！原始数据: {report['original_shape']}, 清洗后: {report['final_shape']}")
    return df_clean, report


# 使用示例
if __name__ == "__main__":
    # 示例JSON数据
    example_json = '''
    [
        {"name": "Alice", "age": 25, "city": "New York", "income": 50000},
        {"name": "Bob", "age": 35, "city": "Boston", "income": 75000},
        {"name": "Charlie", "age": 200, "city": "Chicago", "income": 100000},
        {"name": "Diana", "age": 28, "city": null, "income": null},
        {"name": "Alice", "age": 25, "city": "New York", "income": 50000}
    ]
    '''

    # 清洗数据
    cleaned_data, cleaning_report = load_and_clean_json_data(example_json)

    # 查看清洗报告
    print("\n===== 数据清洗报告 =====")
    for key, value in cleaning_report.items():
        if key not in ['label_encoders']:  # 跳过编码器对象
            print(f"{key}: {value}")

    # 查看清洗后的数据
    print("\n===== 清洗后的数据 =====")
    print(cleaned_data)

    # 如果需要，可以将清洗后的DataFrame保存为JSON
    cleaned_json = cleaned_data.to_json(orient='records', indent=2)
    print("\n===== 清洗后的JSON数据 =====")
    print(cleaned_json)