import pandas as pd
import numpy as np
import json
import sys
import warnings
warnings.filterwarnings('ignore')

def convert_to_serializable(obj):
    """将numpy类型转换为Python原生类型以支持JSON序列化"""
    if isinstance(obj, np.integer):
        return int(obj)
    elif isinstance(obj, np.floating):
        return float(obj)
    elif isinstance(obj, np.ndarray):
        return obj.tolist()
    elif isinstance(obj, dict):
        return {key: convert_to_serializable(value) for key, value in obj.items()}
    elif isinstance(obj, list):
        return [convert_to_serializable(item) for item in obj]
    else:
        return obj

def smote_augmentation(csv_file_path, target_samples):
    """使用SMOTE进行数据扩充"""
    try:
        # 先尝试导入必要的库
        try:
            from imblearn.over_sampling import SMOTE, RandomOverSampler
            from sklearn.preprocessing import StandardScaler
        except ImportError as e:
            return {
                'status': 'error',
                'message': f'缺少必要的库: {str(e)}. 请安装 imbalanced-learn: pip install imbalanced-learn'
            }

        # 读取CSV数据
        df = pd.read_csv(csv_file_path)

        print(f"原始数据形状: {df.shape}")

        # 记录原始统计信息
        original_stats = {
            'total_samples': int(len(df)),
            'total_features': int(df.shape[1]),
            'missing_values_count': int(df.isnull().sum().sum())
        }

        # 检查是否有缺失值
        if df.isnull().sum().sum() > 0:
            print("警告: 数据中仍有缺失值，将用均值填充")
            df = df.fillna(df.mean())

        # 检查目标变量列是否存在
        if 'qyb' not in df.columns:
            raise ValueError("目标变量 'qyb' 不存在于数据中")

        # 分离特征和目标变量
        X = df.drop('qyb', axis=1)
        y = df['qyb']

        print(f"特征数量: {X.shape[1]}")
        print(f"目标变量统计:")
        print(y.describe())

        # 对于回归问题，需要将连续的目标变量离散化以使用SMOTE
        try:
            # 尝试分为5个区间，如果数据范围太小则减少区间数
            n_bins = min(5, len(y.unique()))
            if n_bins < 2:
                n_bins = 2

            y_binned = pd.cut(y, bins=n_bins, labels=False, duplicates='drop')

            print(f"离散化后的目标变量分布:")
            binned_counts = pd.Series(y_binned).value_counts().sort_index()
            print(binned_counts)

            # 检查是否有足够的样本进行SMOTE
            if binned_counts.min() < 2:
                print("某些类别样本数过少，使用随机过采样")
                use_random_sampling = True
            else:
                use_random_sampling = False

        except Exception as e:
            print(f"目标变量离散化失败: {e}")
            # 使用简单的二分法
            median_val = y.median()
            y_binned = (y >= median_val).astype(int)
            use_random_sampling = True

        # 数据标准化
        try:
            scaler = StandardScaler()
            X_scaled = scaler.fit_transform(X)
        except Exception as e:
            print(f"数据标准化失败: {e}")
            X_scaled = X.values
            scaler = None

        # 进行过采样
        if use_random_sampling or len(df) < 50:
            print("使用随机过采样")
            sampler = RandomOverSampler(random_state=42)
        else:
            print("使用SMOTE过采样")
            # 确保k_neighbors不超过最小类别的样本数
            min_class_size = pd.Series(y_binned).value_counts().min()
            k_neighbors = min(5, min_class_size - 1, len(df) - 1)
            k_neighbors = max(1, k_neighbors)

            sampler = SMOTE(
                sampling_strategy='auto',
                random_state=42,
                k_neighbors=k_neighbors
            )

        try:
            X_resampled, y_binned_resampled = sampler.fit_resample(X_scaled, y_binned)
            print(f"过采样后数据形状: {X_resampled.shape}")

        except Exception as e:
            print(f"过采样失败: {e}")
            # 如果过采样失败，使用重复采样
            indices = np.random.choice(len(X_scaled), target_samples, replace=True)
            X_resampled = X_scaled[indices]
            y_binned_resampled = y_binned[indices]

        # 如果扩充后的样本数超过目标数量，随机采样到目标数量
        if len(X_resampled) > target_samples:
            indices = np.random.choice(len(X_resampled), target_samples, replace=False)
            X_resampled = X_resampled[indices]
            y_binned_resampled = y_binned_resampled[indices]

        # 反向标准化
        if scaler is not None:
            X_final = scaler.inverse_transform(X_resampled)
        else:
            X_final = X_resampled

        # 为目标变量生成值
        try:
            # 根据原始y的分布生成新的目标变量值
            y_mean = float(y.mean())
            y_std = float(y.std())

            # 生成符合原始分布的新目标变量值
            y_final = np.random.normal(y_mean, y_std, len(X_resampled))

            # 确保值在合理范围内
            y_min, y_max = float(y.min()), float(y.max())
            y_final = np.clip(y_final, y_min, y_max)

        except Exception as e:
            print(f"目标变量生成失败: {e}")
            # 使用简单的重复采样
            y_final = np.random.choice(y.values, len(X_resampled))

        # 创建最终的DataFrame
        feature_columns = X.columns.tolist()
        df_augmented = pd.DataFrame(X_final, columns=feature_columns)
        df_augmented['qyb'] = y_final

        # 生成输出文件路径
        import os
        output_dir = "temp/output"
        os.makedirs(output_dir, exist_ok=True)
        # _{pd.Timestamp.now().strftime('%Y%m%d_%H%M%S')}
        output_csv_path = os.path.join(output_dir, f"smote_augmented_data.csv")

        # 保存扩充后的数据
        df_augmented.to_csv(output_csv_path, index=False)

        # 计算最终统计信息
        final_stats = {
            'total_samples': int(len(df_augmented)),
            'total_features': int(df_augmented.shape[1]),
            'missing_values_count': int(df_augmented.isnull().sum().sum()),
            'target_variable_stats': {
                'mean': float(df_augmented['qyb'].mean()),
                'std': float(df_augmented['qyb'].std()),
                'min': float(df_augmented['qyb'].min()),
                'max': float(df_augmented['qyb'].max())
            }
        }

        # 计算扩充比例
        augmentation_ratio = float(len(df_augmented) / len(df))

        result = {
            'status': 'success',
            'message': 'SMOTE数据扩充完成',
            'augmented_data_path': output_csv_path,
            'original_stats': original_stats,
            'final_stats': final_stats,
            'augmentation_ratio': augmentation_ratio,
            'samples_added': int(len(df_augmented) - len(df)),
            'target_samples_requested': int(target_samples),
            'actual_samples_generated': int(len(df_augmented)),
            'augmentation_method': 'SMOTE' if not use_random_sampling else 'Random Over Sampling'
        }

        # 确保结果完全可序列化
        result = convert_to_serializable(result)

        return result

    except Exception as e:
        return {
            'status': 'error',
            'message': f'SMOTE扩充失败: {str(e)}'
        }

if __name__ == "__main__":
    if len(sys.argv) < 3:
        result = {
            'status': 'error',
            'message': '缺少必要参数：CSV文件路径和目标样本数'
        }
        print(json.dumps(result, ensure_ascii=False))
        sys.exit(1)

    csv_file_path = sys.argv[1]
    target_samples = int(sys.argv[2])

    result = smote_augmentation(csv_file_path, target_samples)

    try:
        print(json.dumps(result, ensure_ascii=False))
    except Exception as e:
        error_result = {
            'status': 'error',
            'message': f'结果序列化失败: {str(e)}'
        }
        print(json.dumps(error_result))
