# AGN数据增强工具（基于hueffel-et-al-2025研究）
# 启动命令: streamlit run agn_augmentation_app.py
# 依赖安装: pip install streamlit pandas numpy matplotlib seaborn openpyxl scikit-learn

import streamlit as st
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
import seaborn as sns
from io import BytesIO, StringIO
from datetime import datetime
from sklearn.preprocessing import MinMaxScaler, StandardScaler
import joblib
import base64

# 设置中文字体支持
plt.rcParams["font.family"] = ["SimHei", "WenQuanYi Micro Hei", "Heiti TC"]
sns.set(font_scale=1.0)
sns.set_style("whitegrid")

def load_data(uploaded_file):
    """加载用户上传的数据文件（CSV或Excel）"""
    try:
        if uploaded_file.name.endswith('.csv'):
            return pd.read_csv(uploaded_file)
        elif uploaded_file.name.endswith(('.xlsx', '.xls')):
            return pd.read_excel(uploaded_file)
        else:
            st.error("不支持的文件格式，请上传CSV或Excel文件")
            return None
    except Exception as e:
        st.error(f"文件读取错误: {str(e)}")
        return None

def auto_classify_columns(df):
    """
    自动分类列类型：信息列、特征列、目标列
    分类逻辑参考hueffel-et-al-2025论文中的数据结构
    """
    info_cols = []
    feature_cols = []
    target_cols = []
    
    for col in df.columns:
        # 检查列的数据类型
        if pd.api.types.is_numeric_dtype(df[col]):
            # 数值型列初步划分为特征列
            feature_cols.append(col)
        else:
            # 非数值型列划分为信息列
            info_cols.append(col)
    
    # 简单启发式：如果数值列较少（<=3），可能是目标列
    if len(feature_cols) <= 3 and len(feature_cols) > 0:
        target_cols = feature_cols.copy()
        feature_cols = []
    
    return info_cols, feature_cols, target_cols

def scale_features(df, feature_cols, scaling_method):
    """
    对特征列进行缩放处理
    
    参数:
    - df: 包含特征的数据框
    - feature_cols: 需要缩放的特征列列表
    - scaling_method: 缩放方法，可选"不缩放"、"Min-Max归一化"或"Z-score标准化"
    
    返回:
    - scaled_df: 缩放后的数据集
    - scaler: 用于缩放的转换器对象，便于后续逆转换
    - scaling_params: 缩放参数，用于记录和展示
    """
    scaled_df = df.copy()
    scaler = None
    scaling_params = {}
    
    if scaling_method == "不缩放":
        return scaled_df, scaler, scaling_params
    
    # 提取特征数据
    features = scaled_df[feature_cols].values
    
    # 选择并拟合缩放器
    if scaling_method == "Min-Max归一化":
        scaler = MinMaxScaler(feature_range=(0, 1))
        scaled_features = scaler.fit_transform(features)
        
        # 记录缩放参数
        for i, col in enumerate(feature_cols):
            scaling_params[col] = {
                "min": scaler.data_min_[i],
                "max": scaler.data_max_[i],
                "range": scaler.data_max_[i] - scaler.data_min_[i]
            }
            
    elif scaling_method == "Z-score标准化":
        scaler = StandardScaler()
        scaled_features = scaler.fit_transform(features)
        
        # 记录缩放参数
        for i, col in enumerate(feature_cols):
            scaling_params[col] = {
                "mean": scaler.mean_[i],
                "std": np.sqrt(scaler.var_[i])
            }
    
    # 将缩放后的特征放回数据框
    scaled_df[feature_cols] = scaled_features
    
    return scaled_df, scaler, scaling_params

def inverse_scale_features(scaled_df, original_df, feature_cols, scaler, scaling_method):
    """
    将缩放后的特征逆转换回原始尺度
    """
    if scaling_method == "不缩放" or scaler is None:
        return scaled_df.copy()
    
    df_copy = scaled_df.copy()
    features = df_copy[feature_cols].values
    
    # 逆转换
    original_scale_features = scaler.inverse_transform(features)
    df_copy[feature_cols] = original_scale_features
    
    return df_copy

def apply_agn_augmentation(original_df, scaled_df, info_cols, feature_cols, target_cols, 
                          sigma, n_samples, modify_target, scaling_method, scaler,
                          train_indices=None):
    """
    应用加性高斯噪声(AGN)数据增强，严格遵循论文中的算法逻辑
    
    参数:
    - original_df: 原始数据DataFrame
    - scaled_df: 缩放后的数据集（用于添加噪声）
    - info_cols: 信息列列表
    - feature_cols: 特征列列表
    - target_cols: 目标列列表
    - sigma: 高斯噪声标准差
    - n_samples: 每个原始样本生成的增强样本数
    - modify_target: 是否对目标列添加噪声
    - scaling_method: 缩放方法
    - scaler: 缩放器对象
    - train_indices: 训练集索引（如果指定仅增强训练集）
    
    返回:
    - augmented_df: 增强后的完整数据集（已转换回原始尺度）
    - augmented_scaled_df: 增强后的缩放数据集（用于可视化）
    """
    # 如果指定了训练集索引，仅对训练集进行增强
    if train_indices is not None:
        df_to_augment = original_df.iloc[train_indices].copy()
        scaled_to_augment = scaled_df.iloc[train_indices].copy()
        remaining_df = original_df.drop(train_indices).copy()
        remaining_df['增强标识'] = '原始数据'
    else:
        df_to_augment = original_df.copy()
        scaled_to_augment = scaled_df.copy()
        remaining_df = pd.DataFrame(columns=original_df.columns)
    
    # 存储所有增强数据
    augmented_data = []
    augmented_scaled_data = []
    
    # 为原始数据添加标识
    original_data = df_to_augment.copy()
    original_data['增强标识'] = '原始数据'
    augmented_data.append(original_data)
    
    original_scaled_data = scaled_to_augment.copy()
    original_scaled_data['增强标识'] = '原始数据'
    augmented_scaled_data.append(original_scaled_data)
    
    # 对每个样本生成n_samples个增强样本
    total_samples = len(df_to_augment)
    progress_bar = st.progress(0)
    status_text = st.empty()
    
    for i, (idx, row) in enumerate(df_to_augment.iterrows()):
        scaled_row = scaled_to_augment.iloc[i]
        
        # 更新进度
        progress = (i + 1) / total_samples
        progress_bar.progress(progress)
        status_text.text(f"正在生成增强数据: {i+1}/{total_samples} 个原始样本")
        
        # 为当前样本生成n_samples个增强版本
        for sample_num in range(1, n_samples + 1):
            new_row = row.copy()
            new_scaled_row = scaled_row.copy()
            
            # 1. 处理特征列 - 应用加性高斯噪声（论文核心算法）
            # 在缩放后的特征上添加噪声
            for col in feature_cols:
                scaled_value = scaled_row[col]
                # 生成高斯噪声 ε ~ N(0, σ²)
                noise = np.random.normal(loc=0, scale=sigma)
                # 新值 = 原始值 + 噪声（论文公式 x' = x + ε）
                new_scaled_value = scaled_value + noise
                
                # 处理有物理意义范围的特征（如百分比）
                if scaling_method == "Min-Max归一化":
                    # 对于Min-Max缩放的特征，确保在[0,1]范围内
                    new_scaled_value = np.clip(new_scaled_value, 0, 1)
                
                new_scaled_row[col] = new_scaled_value
            
            # 将缩放后添加噪声的特征转换回原始尺度
            # 创建临时DataFrame以应用逆转换
            temp_scaled = pd.DataFrame([new_scaled_row], columns=scaled_to_augment.columns)
            temp_original = inverse_scale_features(
                temp_scaled, original_df, feature_cols, scaler, scaling_method
            )
            
            # 更新原始尺度的新行
            for col in feature_cols:
                new_row[col] = temp_original.iloc[0][col]
                
                # 对原始尺度数据应用物理约束
                if '百分比' in col.lower() or '率' in col.lower() or '%' in col:
                    new_row[col] = np.clip(new_row[col], 0, 100)  # 百分比限制在0-100
            
            # 2. 处理目标列 - 根据用户选择决定是否添加噪声
            if modify_target and len(target_cols) > 0:
                for col in target_cols:
                    original_value = row[col]
                    # 目标列噪声通常设为特征列噪声的较小比例（论文建议）
                    target_noise = np.random.normal(loc=0, scale=sigma * 0.3)
                    
                    # 如果目标列也需要考虑缩放
                    if scaling_method != "不缩放" and col in feature_cols:
                        # 找到目标列在特征列中的索引
                        col_idx = feature_cols.index(col)
                        # 在原始尺度上添加噪声
                        new_value = original_value + target_noise * (
                            scaler.data_max_[col_idx] - scaler.data_min_[col_idx] 
                            if scaling_method == "Min-Max归一化" 
                            else scaler.scale_[col_idx]
                        )
                    else:
                        new_value = original_value + target_noise
                    
                    new_row[col] = new_value
                    new_scaled_row[col] = new_value  # 目标列不缩放，直接使用原始值
            
            # 3. 处理信息列 - 保留原始信息并添加增强标识
            new_row['增强标识'] = f"AGN增强数据（{idx}_{sample_num}）"
            new_scaled_row['增强标识'] = f"AGN增强数据（{idx}_{sample_num}）"
            
            augmented_data.append(new_row.to_frame().T)
            augmented_scaled_data.append(new_scaled_row.to_frame().T)
    
    progress_bar.empty()
    status_text.empty()
    
    # 合并所有增强数据和未增强数据（如果有）
    augmented_df = pd.concat(augmented_data, ignore_index=True)
    augmented_scaled_df = pd.concat(augmented_scaled_data, ignore_index=True)
    
    if not remaining_df.empty:
        augmented_df = pd.concat([augmented_df, remaining_df], ignore_index=True)
        
        # 处理缩放数据集中的剩余数据
        remaining_scaled = scaled_df.drop(train_indices).copy()
        remaining_scaled['增强标识'] = '原始数据'
        augmented_scaled_df = pd.concat([augmented_scaled_df, remaining_scaled], ignore_index=True)
    
    return augmented_df, augmented_scaled_df

def plot_distribution_comparison(original_df, scaled_df, augmented_scaled_df, 
                                feature_cols, target_cols, modify_target, scaling_method):
    """
    绘制增强前后的数据分布对比图，验证是否符合论文中要求的统计特征一致性
    """
    # 仅选择增强数据（排除原始数据）进行对比
    augmented_only = augmented_scaled_df[augmented_scaled_df['增强标识'] != '原始数据']
    
    # 确定需要绘制的列
    cols_to_plot = feature_cols.copy()
    if modify_target and len(target_cols) > 0:
        cols_to_plot.extend(target_cols)
    
    # 每次最多显示6列，避免图表过大
    max_cols_per_plot = 6
    for i in range(0, len(cols_to_plot), max_cols_per_plot):
        subset = cols_to_plot[i:i+max_cols_per_plot]
        n = len(subset)
        
        fig, axes = plt.subplots(n, 1, figsize=(10, 4 * n))
        if n == 1:
            axes = [axes]
        
        # for j, col in enumerate(subset):
        #     ax = axes[j]
        #     # 原始数据分布（使用缩放后的数据进行对比，更直观）
        #     sns.kdeplot(scaled_df[col], ax=ax, label='原始数据（缩放后）', fill=True, alpha=0.5)
        #     # 增强数据分布
        #     sns.kdeplot(augmented_only[col], ax=ax, label='AGN增强数据（缩放后）', fill=True, alpha=0.5)
            
        #     ax.set_title(f'{col} 增强前后分布对比')
        #     ax.set_xlabel(f'{col}值{"（已缩放）" if scaling_method != "不缩放" else ""}')
        #     ax.set_ylabel('密度')
        #     ax.legend()
        
        # plt.tight_layout()
        # st.pyplot(fig)
        
        # # 如果使用了缩放，同时展示原始尺度下的分布（选择第一个特征）
        # if scaling_method != "不缩放" and i == 0 and len(subset) > 0:
        #     st.subheader("原始尺度下的分布对比（示例）")
        #     col = subset[0]
        #     fig, ax = plt.subplots(figsize=(10, 5))
        #     sns.kdeplot(original_df[col], ax=ax, label='原始数据（原始尺度）', fill=True, alpha=0.5)
        #     sns.kdeplot(
        #         augmented_scaled_df[augmented_scaled_df['增强标识'] != '原始数据'][col], 
        #         ax=ax, 
        #         label='AGN增强数据（原始尺度）', 
        #         fill=True, 
        #         alpha=0.5
        #     )
        #     ax.set_title(f'{col} 原始尺度下增强前后分布对比')
        #     ax.set_xlabel(f'{col}值（原始尺度）')
        #     ax.set_ylabel('密度')
        #     ax.legend()
        #     plt.tight_layout()
        #     st.pyplot(fig)

        for j, col in enumerate(subset):
            ax = axes[j]
            # Original data distribution (using scaled data for better comparison)
            sns.kdeplot(scaled_df[col], ax=ax, label='Original Data (Scaled)', fill=True, alpha=0.5)
            # Augmented data distribution
            sns.kdeplot(augmented_only[col], ax=ax, label='AGN Augmented Data (Scaled)', fill=True, alpha=0.5)
            
            ax.set_title(f'Distribution Comparison Before/After Augmentation: {col}')
            ax.set_xlabel(f'{col} Value{" (Scaled)" if scaling_method != "No Scaling" else ""}')
            ax.set_ylabel('Density')
            ax.legend()

        plt.tight_layout()
        st.pyplot(fig)

        # If scaling is used, also show distribution in original scale (select first feature)
        if scaling_method != "不缩放" and i == 0 and len(subset) > 0:
            st.subheader("原始尺度下的分布对比（示例）")
            col = subset[0]
            fig, ax = plt.subplots(figsize=(10, 5))
            sns.kdeplot(original_df[col], ax=ax, label='Original Data (Original Scale)', fill=True, alpha=0.5)
            sns.kdeplot(
                augmented_scaled_df[augmented_scaled_df['增强标识'] != '原始数据'][col], 
                ax=ax, 
                label='AGN Augmented Data (Original Scale)', 
                fill=True, 
                alpha=0.5
            )
            ax.set_title(f'Distribution Comparison Before/After Augmentation (Original Scale): {col}')
            ax.set_xlabel(f'{col} Value (Original Scale)')
            ax.set_ylabel('Density')
            ax.legend()
            plt.tight_layout()
            st.pyplot(fig)

def export_to_csv(df):
    """将DataFrame导出为CSV格式"""
    output = StringIO()
    df.to_csv(output, index=False, encoding='utf-8-sig')
    return output.getvalue()

def export_to_excel(df):
    """将DataFrame导出为Excel格式"""
    output = BytesIO()
    with pd.ExcelWriter(output, engine='openpyxl') as writer:
        df.to_excel(writer, index=False, sheet_name='AGN增强数据')
    return output.getvalue()

def export_scaler(scaler, scaling_method):
    """导出缩放器，便于后续逆转换"""
    if scaler is None or scaling_method == "不缩放":
        return None
    
    buffer = BytesIO()
    joblib.dump(scaler, buffer)
    buffer.seek(0)
    return buffer.getvalue()

def generate_parameter_log(sigma, n_samples, modify_target, original_rows, augmented_rows, 
                         info_cols, feature_cols, target_cols, scaling_method, scaling_params):
    """生成参数记录文件内容，用于实验复现（符合论文可重复性要求）"""
    log = [
        f"AGN数据增强参数记录",
        f"增强时间: {datetime.now().strftime('%Y-%m-%d %H:%M:%S')}",
        f"原始数据行数: {original_rows}",
        f"增强后数据行数: {augmented_rows}",
        f"噪声水平 (sigma): {sigma}",
        f"每个样本生成的增强样本数: {n_samples}",
        f"是否修改目标列: {'是' if modify_target else '否'}",
        f"特征缩放方法: {scaling_method}",
        f"\n信息列 ({len(info_cols)}): {', '.join(info_cols) if info_cols else '无'}",
        f"特征列 ({len(feature_cols)}): {', '.join(feature_cols) if feature_cols else '无'}",
        f"目标列 ({len(target_cols)}): {', '.join(target_cols) if target_cols else '无'}"
    ]
    
    # 添加缩放参数
    if scaling_method != "不缩放" and scaling_params:
        log.append("\n缩放参数:")
        for col, params in scaling_params.items():
            if scaling_method == "Min-Max归一化":
                log.append(f"  {col}: min={params['min']:.4f}, max={params['max']:.4f}, range={params['range']:.4f}")
            elif scaling_method == "Z-score标准化":
                log.append(f"  {col}: mean={params['mean']:.4f}, std={params['std']:.4f}")
    
    return "\n".join(log)

def main():
    # 设置页面配置
    st.set_page_config(
        page_title="AGN数据增强工具",
        page_icon="🧪",
        layout="wide"
    )
    
    # 页面标题和说明
    st.title("🧪 AGN 数据增强工具")
    st.markdown("基于论文《Empowering Reactivity Predictions through Noise-Based Data Augmentation》"
                "(hueffel-et-al-2025) 开发的加性高斯噪声数据增强工具，专为分子反应性预测数据设计。")
    
    # 初始化会话状态
    if 'df' not in st.session_state:
        st.session_state.df = None
    if 'scaled_df' not in st.session_state:
        st.session_state.scaled_df = None
    if 'info_cols' not in st.session_state:
        st.session_state.info_cols = []
    if 'feature_cols' not in st.session_state:
        st.session_state.feature_cols = []
    if 'target_cols' not in st.session_state:
        st.session_state.target_cols = []
    if 'augmented_df' not in st.session_state:
        st.session_state.augmented_df = None
    if 'augmented_scaled_df' not in st.session_state:
        st.session_state.augmented_scaled_df = None
    if 'scaler' not in st.session_state:
        st.session_state.scaler = None
    if 'scaling_method' not in st.session_state:
        st.session_state.scaling_method = "不缩放"
    if 'scaling_params' not in st.session_state:
        st.session_state.scaling_params = {}
    
    # 创建标签页
    tab1, tab2, tab3, tab4 = st.tabs([
        "1. 数据上传与分类", 
        "2. 参数配置", 
        "3. 增强结果", 
        "4. 数据导出"
    ])
    
    with tab1:
        st.header("数据上传与列分类")
        st.markdown("上传包含分子反应性数据的CSV或Excel文件，并分类数据列。")
        
        # 数据上传
        uploaded_file = st.file_uploader("选择数据文件", type=['csv', 'xlsx', 'xls'])
        
        if uploaded_file is not None:
            # 加载数据
            st.session_state.df = load_data(uploaded_file)
            
            if st.session_state.df is not None:
                # 显示数据基本信息
                st.subheader("数据预览")
                col1, col2 = st.columns(2)
                with col1:
                    st.write(f"数据形状: {st.session_state.df.shape[0]} 行, {st.session_state.df.shape[1]} 列")
                    st.dataframe(st.session_state.df.head())
                
                with col2:
                    st.write("列信息:")
                    col_info = []
                    for col in st.session_state.df.columns:
                        dtype = st.session_state.df[col].dtype
                        has_missing = st.session_state.df[col].isnull().any()
                        # 计算特征量级
                        if pd.api.types.is_numeric_dtype(st.session_state.df[col]) and not st.session_state.df[col].isnull().all():
                            col_min = st.session_state.df[col].min(skipna=True)
                            col_max = st.session_state.df[col].max(skipna=True)
                            magnitude = f"{10**np.floor(np.log10(np.abs(col_max-col_min))):.0e}" if col_max != col_min else "常数"
                        else:
                            magnitude = "非数值"
                            
                        col_info.append({
                            "列名": col,
                            "数据类型": str(dtype),
                            "是否有缺失值": "是" if has_missing else "否",
                            "数值量级": magnitude
                        })
                    st.dataframe(pd.DataFrame(col_info), hide_index=True)
                
                # 特征缩放选项
                st.subheader("特征缩放设置")
                st.markdown("""
                选择是否对特征进行缩放处理（建议在特征量级差异较大时使用）：
                - **不缩放**：直接对原始特征添加噪声
                - **Min-Max归一化**：将特征缩放到[0,1]范围
                - **Z-score标准化**：将特征转换为均值为0，标准差为1的分布
                """)
                
                st.session_state.scaling_method = st.radio(
                    "选择缩放方法",
                    options=["不缩放", "Min-Max归一化", "Z-score标准化"],
                    index=0,
                    help="缩放可以确保噪声对不同量级特征的影响程度一致"
                )
                
                # 自动分类列并允许用户调整
                st.subheader("列分类")
                st.markdown("""
                请指定各列的类型（参考hueffel-et-al-2025论文）：
                - **信息列**：非数值型标识信息（如反应ID，不参与模型训练）
                - **特征列**：数值型描述符（如物理化学参数，模型输入）
                - **目标列**：数值型预测目标（如活化能、产率等，模型输出）
                """)
                
                # 自动分类
                auto_info, auto_feature, auto_target = auto_classify_columns(st.session_state.df)
                
                # 用户确认/修改分类
                col1, col2, col3 = st.columns(3)
                
                with col1:
                    st.session_state.info_cols = st.multiselect(
                        "信息列",
                        options=st.session_state.df.columns,
                        default=auto_info,
                        help="非数值型、仅用于标识样本的列（如反应ID、分子名称等）"
                    )
                
                with col2:
                    st.session_state.feature_cols = st.multiselect(
                        "特征列",
                        options=[col for col in st.session_state.df.columns if col not in st.session_state.info_cols],
                        default=auto_feature,
                        help="数值型、用于描述反应/分子属性的列（如电子描述符、空间位阻参数等）"
                    )
                
                with col3:
                    st.session_state.target_cols = st.multiselect(
                        "目标列",
                        options=[col for col in st.session_state.df.columns if col not in st.session_state.info_cols and col not in st.session_state.feature_cols],
                        default=auto_target,
                        help="数值型、需预测的反应指标列（如活化能ΔG‡、对映体过量%ee、反应产率等）"
                    )
                
                # 检查是否有重叠的列选择
                overlapping = set(st.session_state.info_cols) & set(st.session_state.feature_cols) | \
                             set(st.session_state.info_cols) & set(st.session_state.target_cols) | \
                             set(st.session_state.feature_cols) & set(st.session_state.target_cols)
                
                if overlapping:
                    st.warning(f"检测到列被重复分类: {', '.join(overlapping)}，请修正")
                else:
                    # 执行特征缩放
                    if st.session_state.feature_cols:
                        st.session_state.scaled_df, st.session_state.scaler, st.session_state.scaling_params = scale_features(
                            st.session_state.df, 
                            st.session_state.feature_cols, 
                            st.session_state.scaling_method
                        )
                        
                        # 显示缩放信息
                        if st.session_state.scaling_method != "不缩放":
                            st.info(f"已使用{st.session_state.scaling_method}对{len(st.session_state.feature_cols)}个特征列进行处理")
                            
                            # 显示缩放前后的示例
                            with st.expander("查看缩放前后的特征示例"):
                                sample_cols = st.session_state.feature_cols[:3]  # 只显示前3个特征
                                if sample_cols:
                                    original_sample = st.session_state.df[sample_cols].head(3)
                                    scaled_sample = st.session_state.scaled_df[sample_cols].head(3)
                                    
                                    col1, col2 = st.columns(2)
                                    with col1:
                                        st.write("原始特征值：")
                                        st.dataframe(original_sample)
                                    with col2:
                                        st.write(f"{st.session_state.scaling_method}后的值：")
                                        st.dataframe(scaled_sample)
                
                    st.success("列分类已完成，可以前往参数配置页面")
    
    with tab2:
        st.header("AGN增强参数配置")
        
        if st.session_state.df is None:
            st.info("请先在第一个标签页上传并处理数据")
        else:
            # 检查是否至少选择了一个特征列
            if not st.session_state.feature_cols:
                st.warning("请至少选择一个特征列才能进行数据增强")
            else:
                # 噪声水平(sigma)设置
                st.subheader("核心参数设置")
                col1, col2 = st.columns(2)
                
                with col1:
                    sigma = st.slider(
                        "噪声水平 (sigma, σ)",
                        min_value=0.01,
                        max_value=0.5,
                        step=0.01,
                        value=0.1,
                        help="高斯噪声的标准差，参考论文建议范围0.01-0.5。sigma过大会导致数据偏离真实化学规律，过小则增强效果不明显。"
                    )
                    # 允许直接输入sigma值
                    sigma_input = st.number_input(
                        "或直接输入sigma值",
                        min_value=0.01,
                        max_value=0.5,
                        value=sigma,
                        step=0.01
                    )
                    sigma = sigma_input  # 确保使用最新输入的值
                
                with col2:
                    n_samples = st.slider(
                        "每个原始样本生成的增强样本数 (N)",
                        min_value=1,
                        max_value=20,
                        value=4,
                        help="论文中测试过1-20的范围，SₙAr反应数据集优化参数为4。"
                    )
                    # 允许直接输入数量
                    n_input = st.number_input(
                        "或直接输入数量",
                        min_value=1,
                        max_value=20,
                        value=n_samples,
                        step=1
                    )
                    n_samples = n_input  # 确保使用最新输入的值
                
                # 噪声幅度提示
                if st.session_state.scaling_method != "不缩放" and st.session_state.feature_cols:
                    st.subheader("噪声幅度参考")
                    st.markdown("基于当前sigma值和缩放方法，噪声在原始特征尺度上的大致幅度：")
                    
                    noise_info = []
                    for col in st.session_state.feature_cols[:5]:  # 只显示前5个特征
                        if st.session_state.scaling_method == "Min-Max归一化":
                            # 计算原始尺度下的噪声幅度
                            col_range = st.session_state.scaling_params[col]['range']
                            noise_magnitude = sigma * col_range
                            noise_info.append({
                                "特征列": col,
                                "原始范围": f"{st.session_state.scaling_params[col]['min']:.2f} - {st.session_state.scaling_params[col]['max']:.2f}",
                                "噪声幅度(±3σ)": f"±{3*noise_magnitude:.4f}",
                                "占范围比例": f"{3*sigma*100:.1f}%"
                            })
                        elif st.session_state.scaling_method == "Z-score标准化":
                            # 计算原始尺度下的噪声幅度
                            col_std = st.session_state.scaling_params[col]['std']
                            noise_magnitude = sigma * col_std
                            noise_info.append({
                                "特征列": col,
                                "原始标准差": f"{col_std:.4f}",
                                "噪声幅度(±3σ)": f"±{3*noise_magnitude:.4f}",
                                "相对幅度": f"{3*sigma*100:.1f}% of σ"
                            })
                    
                    st.dataframe(pd.DataFrame(noise_info), hide_index=True)
                
                # 目标列处理选项
                st.subheader("目标列处理")
                modify_target = st.radio(
                    "是否对目标列添加噪声",
                    options=["不修改目标列", "对目标列添加微小噪声"],
                    index=0,
                    help="论文中AGN增强仅针对特征列，目标列保留原始值。"
                ) == "对目标列添加微小噪声"
                
                if modify_target:
                    st.warning("此选项需谨慎使用！仅当目标列存在合理波动范围时使用（如实验测量误差），需确保噪声水平远低于目标列本身的变化幅度。")
                
                # 增强范围选择
                st.subheader("增强范围")
                augment_scope = st.radio(
                    "选择增强范围",
                    options=["全量数据增强", "仅训练集增强"],
                    index=0,
                    help="论文中仅对训练集进行增强，测试集保持不变以确保评估客观性。"
                )
                
                train_indices = None
                if augment_scope == "仅训练集增强":
                    st.markdown("请选择标识训练集的方式：")
                    train_method = st.radio(
                        "训练集标识方式",
                        options=["通过列值标识", "手动选择行范围"],
                        index=0
                    )
                    
                    if train_method == "通过列值标识":
                        train_col = st.selectbox(
                            "选择标识训练集的列",
                            options=st.session_state.df.columns
                        )
                        train_value = st.text_input(
                            "输入表示训练集的列值",
                            value="train"
                        )
                        train_indices = st.session_state.df[st.session_state.df[train_col] == train_value].index
                        st.info(f"检测到训练集样本数: {len(train_indices)}")
                    
                    else:  # 手动选择行范围
                        start_row = st.number_input(
                            "训练集起始行（从0开始）",
                            min_value=0,
                            max_value=len(st.session_state.df)-1,
                            value=0
                        )
                        end_row = st.number_input(
                            "训练集结束行（包含）",
                            min_value=start_row,
                            max_value=len(st.session_state.df)-1,
                            value=len(st.session_state.df)//2
                        )
                        train_indices = range(start_row, end_row + 1)
                        st.info(f"训练集范围: {start_row} - {end_row}，共 {end_row - start_row + 1} 个样本")
                
                # 保存参数到会话状态
                st.session_state.sigma = sigma
                st.session_state.n_samples = n_samples
                st.session_state.modify_target = modify_target
                
                # 执行增强按钮
                if st.button("开始增强"):
                    with st.spinner("正在执行AGN数据增强..."):
                        # 应用AGN增强
                        st.session_state.augmented_df, st.session_state.augmented_scaled_df = apply_agn_augmentation(
                            st.session_state.df,
                            st.session_state.scaled_df,
                            st.session_state.info_cols,
                            st.session_state.feature_cols,
                            st.session_state.target_cols,
                            sigma,
                            n_samples,
                            modify_target,
                            st.session_state.scaling_method,
                            st.session_state.scaler,
                            train_indices
                        )
                    
                    st.success("数据增强完成！")
                    # 切换到结果标签页
                    st.experimental_set_query_params(tab="3")
    
    with tab3:
        st.header("增强结果展示")
        
        if st.session_state.augmented_df is None:
            st.info("请先在参数配置页面完成数据增强")
        else:
            # 显示增强前后的数据量对比
            original_count = len(st.session_state.df)
            augmented_count = len(st.session_state.augmented_df)
            generated_count = augmented_count - original_count
            
            st.subheader("数据量统计")
            col1, col2, col3 = st.columns(3)
            with col1:
                st.metric("原始数据量", original_count)
            with col2:
                st.metric("新增增强数据量", generated_count)
            with col3:
                st.metric("增强后总数据量", augmented_count)
            
            # 数据预览
            st.subheader("数据对比预览")
            st.markdown("原始数据前3行 vs 增强后数据示例：")
            
            col1, col2 = st.columns(2)
            with col1:
                st.write("原始数据：")
                st.dataframe(st.session_state.df.head(3))
            
            with col2:
                st.write("增强后数据（含原始数据）：")
                # 找到第一个原始样本生成的增强样本
                first_original_id = st.session_state.df.index[0]
                augmented_samples = st.session_state.augmented_df[
                    st.session_state.augmented_df['增强标识'].str.startswith(f"AGN增强数据（{first_original_id}_")
                ].head(3)
                # 合并展示
                preview_df = pd.concat([
                    st.session_state.augmented_df[st.session_state.augmented_df['增强标识'] == '原始数据'].head(1),
                    augmented_samples
                ])
                st.dataframe(preview_df)
            
            # 数据分布可视化
            st.subheader("增强前后数据分布对比")
            st.markdown("验证增强后的数据是否保持原始数据的统计特征（参考论文要求）：")
            plot_distribution_comparison(
                st.session_state.df,
                st.session_state.scaled_df,
                st.session_state.augmented_scaled_df,
                st.session_state.feature_cols,
                st.session_state.target_cols,
                st.session_state.get('modify_target', False),
                st.session_state.scaling_method
            )
            
            # 前往导出页面按钮
            if st.button("前往数据导出"):
                st.experimental_set_query_params(tab="4")
    
    with tab4:
        st.header("增强后数据导出")
        
        if st.session_state.augmented_df is None:
            st.info("请先完成数据增强")
        else:
            # 导出增强后的数据
            st.subheader("导出增强数据集")
            export_format = st.radio("选择导出格式", options=["CSV", "Excel"], index=0)
            
            if export_format == "CSV":
                csv_data = export_to_csv(st.session_state.augmented_df)
                st.download_button(
                    label="下载CSV文件",
                    data=csv_data,
                    file_name=f"agn_augmented_data_{datetime.now().strftime('%Y%m%d_%H%M%S')}.csv",
                    mime="text/csv",
                    )
            else:
                excel_data = export_to_excel(st.session_state.augmented_df)
                st.download_button(
                    label="下载Excel文件",
                    data=excel_data,
                    file_name=f"agn_augmented_data_{datetime.now().strftime('%Y%m%d_%H%M%S')}.xlsx",
                    mime="application/vnd.openxmlformats-officedocument.spreadsheetml.sheet",
                )
            
            # 导出缩放器（如果使用了缩放）
            if st.session_state.scaling_method != "不缩放" and st.session_state.scaler is not None:
                st.subheader("导出缩放器")
                st.markdown("下载用于特征缩放的转换器，可用于后续将模型预测结果转换回原始尺度。")
                
                scaler_data = export_scaler(st.session_state.scaler, st.session_state.scaling_method)
                if scaler_data:
                    st.download_button(
                        label="下载缩放器（pkl格式）",
                        data=scaler_data,
                        file_name=f"scaler_{st.session_state.scaling_method.replace(' ', '_').lower()}_{datetime.now().strftime('%Y%m%d_%H%M%S')}.pkl",
                        mime="application/octet-stream"
                    )
            
            # 导出参数记录
            st.subheader("导出增强参数记录")
            st.markdown("记录本次增强使用的所有参数，便于实验复现（符合论文可重复性要求）。")
            
            # 生成参数记录
            param_log = generate_parameter_log(
                st.session_state.get('sigma', 0.1),
                st.session_state.get('n_samples', 4),
                st.session_state.get('modify_target', False),
                len(st.session_state.df),
                len(st.session_state.augmented_df),
                st.session_state.info_cols,
                st.session_state.feature_cols,
                st.session_state.target_cols,
                st.session_state.scaling_method,
                st.session_state.scaling_params
            )
            
            st.download_button(
                label="下载参数记录（TXT）",
                data=param_log,
                file_name=f"agn_augmentation_params_{datetime.now().strftime('%Y%m%d_%H%M%S')}.txt",
                mime="text/plain"
            )
            
            # 显示参数记录内容
            with st.expander("查看参数记录内容"):
                st.text(param_log)

if __name__ == "__main__":
    main()
    