import streamlit as st
import torch
import torch.nn as nn
import torch.optim as optim
from torch.utils.data import Dataset, DataLoader, Subset
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
import seaborn as sns
from sklearn.model_selection import KFold
from sklearn.preprocessing import StandardScaler
from sklearn.metrics import mean_absolute_error, mean_squared_error, r2_score
import io
import random

# 设置随机种子，保证结果可复现
SEED = 42
random.seed(SEED)
np.random.seed(SEED)
torch.manual_seed(SEED)
if torch.cuda.is_available():
    torch.cuda.manual_seed(SEED)

# 自注意力模块（无dropout）
class SelfAttention(nn.Module):
    def __init__(self, dim):
        super().__init__()
        self.qkv = nn.Linear(dim, dim * 3)
        self.scale = dim **-0.5
    
    def forward(self, x):
        q, k, v = self.qkv(x).chunk(3, dim=-1)
        attn = torch.softmax((q @ k.transpose(-2, -1)) * self.scale, dim=-1)
        output = attn @ v
        return output, attn  # 返回输出和注意力权重

# 单向交叉注意力模块
class CrossAttention(nn.Module):
    def __init__(self, dim_q, dim_kv):
        super().__init__()
        self.q = nn.Linear(dim_q, dim_q)
        self.kv = nn.Linear(dim_kv, dim_q * 2)
        self.scale = dim_q** -0.5
        
    def forward(self, x_q, x_kv):
        # 仅计算单向注意力：查询 -> 键值
        q = self.q(x_q)
        k, v = self.kv(x_kv).chunk(2, dim=-1)
        attn = torch.softmax((q @ k.transpose(-2, -1)) * self.scale, dim=-1)
        output = attn @ v
        return output, attn  # 返回输出和注意力权重

# 带异方差输出的回归网络
class HeteroscedasticRegression(nn.Module):
    def __init__(self, dim_ester, dim_base, dim_solvent, hidden_dim=128, dropout=0.3):
        super().__init__()
        # 自注意力层
        self.attn_ester = SelfAttention(dim_ester)
        self.attn_base = SelfAttention(dim_base)
        self.attn_solvent = SelfAttention(dim_solvent)
        
        # 单向交叉注意力层
        self.cross_ester_base = CrossAttention(dim_ester, dim_base)  # 酯 -> 碱
        self.cross_ester_solvent = CrossAttention(dim_ester, dim_solvent)  # 酯 -> 溶剂
        self.cross_base_solvent = CrossAttention(dim_base, dim_solvent)  # 碱 -> 溶剂
        
        # 特征融合和回归层
        combined_dim = dim_ester + dim_base + dim_solvent + \
                      dim_ester + dim_ester + dim_base
        
        self.regressor = nn.Sequential(
            nn.Linear(combined_dim, hidden_dim),
            nn.ReLU(),
            nn.Dropout(dropout),
            nn.Linear(hidden_dim, hidden_dim // 2),
            nn.ReLU(),
            nn.Dropout(dropout),
            nn.Linear(hidden_dim // 2, 2)  # 输出均值和对数方差
        )
        
    def forward(self, ester, base, solvent):
        # 自注意力
        h_ester, attn_ester = self.attn_ester(ester)
        h_base, attn_base = self.attn_base(base)
        h_solvent, attn_solvent = self.attn_solvent(solvent)
        
        # 单向交叉注意力
        h_eb, attn_eb = self.cross_ester_base(h_ester, h_base)  # 酯 -> 碱
        h_es, attn_es = self.cross_ester_solvent(h_ester, h_solvent)  # 酯 -> 溶剂
        h_bs, attn_bs = self.cross_base_solvent(h_base, h_solvent)  # 碱 -> 溶剂
        
        # 拼接特征
        concat = torch.cat([h_ester, h_base, h_solvent, h_eb, h_es, h_bs], dim=-1)
        
        # 回归输出 (mean, log_var)
        out = self.regressor(concat)
        mean, log_var = out[:, 0], out[:, 1]
        
        # 存储注意力权重用于分析
        self.attention_weights = {
            'ester': attn_ester, 'base': attn_base, 'solvent': attn_solvent,
            'ester_base': attn_eb, 'ester_solvent': attn_es, 'base_solvent': attn_bs
        }
        return mean, log_var

# 异方差损失函数
def heteroscedastic_loss(mean, log_var, target):
    # 确保目标形状与预测一致
    if target.dim() == 1:
        target = target.unsqueeze(1)
    precision = torch.exp(-log_var)
    return torch.mean(0.5 * precision * (target - mean)** 2 + 0.5 * log_var)

# 自定义数据集
class RacemizationDataset(Dataset):
    def __init__(self, df, ester_cols, base_cols, solvent_cols, target_col):
        self.ester = df[ester_cols].values.astype(np.float32)
        self.base = df[base_cols].values.astype(np.float32)
        self.solvent = df[solvent_cols].values.astype(np.float32)
        self.target = df[target_col].values.astype(np.float32)
        
    def __len__(self):
        return len(self.target)
    
    def __getitem__(self, idx):
        return (
            torch.from_numpy(self.ester[idx]),
            torch.from_numpy(self.base[idx]),
            torch.from_numpy(self.solvent[idx]),
            torch.from_numpy(np.array(self.target[idx]))
        )

# 训练函数
def train_model(model, train_loader, val_loader, criterion, optimizer, 
                device, epochs, patience=10):
    model.train()
    best_val_loss = float('inf')
    counter = 0
    history = {'train_loss': [], 'val_loss': []}
    
    for epoch in range(epochs):
        train_loss = 0.0
        model.train()
        
        for ester, base, solvent, target in train_loader:
            ester, base, solvent, target = (
                ester.to(device), base.to(device), solvent.to(device), target.to(device)
            )
            
            optimizer.zero_grad()
            mean, log_var = model(ester, base, solvent)
            loss = criterion(mean, log_var, target)
            
            loss.backward()
            optimizer.step()
            train_loss += loss.item() * ester.size(0)
        
        train_loss /= len(train_loader.dataset)
        history['train_loss'].append(train_loss)
        
        # 验证
        val_loss, _, _ = evaluate_model(model, val_loader, criterion, device)
        history['val_loss'].append(val_loss)
        
        # 早停机制
        if val_loss < best_val_loss:
            best_val_loss = val_loss
            best_model = model.state_dict()
            counter = 0
        else:
            counter += 1
            if counter >= patience:
                st.write(f"早停在第 {epoch+1} 轮")
                break
        
        if (epoch + 1) % 10 == 0:
            st.write(f"轮次 {epoch+1}/{epochs}, 训练损失: {train_loss:.4f}, 验证损失: {val_loss:.4f}")
    
    model.load_state_dict(best_model)
    return model, history

# 评估函数
def evaluate_model(model, loader, criterion, device):
    model.eval()
    total_loss = 0.0
    all_preds = []
    all_targets = []
    
    with torch.no_grad():
        for ester, base, solvent, target in loader:
            ester, base, solvent, target = (
                ester.to(device), base.to(device), solvent.to(device), target.to(device)
            )
            
            mean, log_var = model(ester, base, solvent)
            loss = criterion(mean, log_var, target)
            
            total_loss += loss.item() * ester.size(0)
            all_preds.extend(mean.cpu().numpy())
            all_targets.extend(target.cpu().numpy())
    
    avg_loss = total_loss / len(loader.dataset)
    mae = mean_absolute_error(all_targets, all_preds)
    rmse = np.sqrt(mean_squared_error(all_targets, all_preds))
    r2 = r2_score(all_targets, all_preds)
    
    return avg_loss, (mae, rmse, r2), (all_preds, all_targets)

# 使用MC Dropout进行预测
def mc_dropout_predict(model, loader, device, n_samples=20):
    model.train()  # 启用dropout
    all_preds = []
    
    with torch.no_grad():
        for _ in range(n_samples):
            preds = []
            for ester, base, solvent, _ in loader:
                ester, base, solvent = (
                    ester.to(device), base.to(device), solvent.to(device)
                )
                
                mean, _ = model(ester, base, solvent)
                preds.extend(mean.cpu().numpy())
            all_preds.append(preds)
    
    # 计算均值和标准差（不确定性）
    all_preds = np.array(all_preds)
    mean_preds = np.mean(all_preds, axis=0)
    std_preds = np.std(all_preds, axis=0)
    return mean_preds, std_preds

# 交叉验证函数
def cross_validate(df, ester_cols, base_cols, solvent_cols, target_col, 
                   n_splits=5, epochs=100, batch_size=32, lr=1e-4, patience=10):
    # 设备设置
    device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
    st.write(f"使用设备: {device}")
    
    # 初始化K折交叉验证
    kf = KFold(n_splits=n_splits, shuffle=True, random_state=SEED)
    fold_results = []
    all_attention_weights = []
    
    # 创建完整数据集
    full_dataset = RacemizationDataset(df, ester_cols, base_cols, solvent_cols, target_col)
    
    for fold, (train_idx, val_idx) in enumerate(kf.split(full_dataset)):
        st.write(f"\n===== 折 {fold+1}/{n_splits} =====")
        
        # 划分训练集和验证集
        train_dataset = Subset(full_dataset, train_idx)
        val_dataset = Subset(full_dataset, val_idx)
        
        # 数据标准化
        train_ester = np.array([full_dataset[i][0].numpy() for i in train_idx])
        train_base = np.array([full_dataset[i][1].numpy() for i in train_idx])
        train_solvent = np.array([full_dataset[i][2].numpy() for i in train_idx])
        
        # 初始化标准化器
        scaler_ester = StandardScaler()
        scaler_base = StandardScaler()
        scaler_solvent = StandardScaler()
        
        # 拟合标准化器
        scaler_ester.fit(train_ester)
        scaler_base.fit(train_base)
        scaler_solvent.fit(train_solvent)
        
        # 应用标准化到数据集
        for i in train_idx:
            full_dataset.ester[i] = scaler_ester.transform(full_dataset.ester[i].reshape(1, -1))[0]
            full_dataset.base[i] = scaler_base.transform(full_dataset.base[i].reshape(1, -1))[0]
            full_dataset.solvent[i] = scaler_solvent.transform(full_dataset.solvent[i].reshape(1, -1))[0]
        
        for i in val_idx:
            full_dataset.ester[i] = scaler_ester.transform(full_dataset.ester[i].reshape(1, -1))[0]
            full_dataset.base[i] = scaler_base.transform(full_dataset.base[i].reshape(1, -1))[0]
            full_dataset.solvent[i] = scaler_solvent.transform(full_dataset.solvent[i].reshape(1, -1))[0]
        
        # 创建数据加载器
        train_loader = DataLoader(train_dataset, batch_size=batch_size, shuffle=True)
        val_loader = DataLoader(val_dataset, batch_size=batch_size, shuffle=False)
        
        # 初始化模型、损失函数和优化器
        dim_ester = len(ester_cols)
        dim_base = len(base_cols)
        dim_solvent = len(solvent_cols)
        
        model = HeteroscedasticRegression(dim_ester, dim_base, dim_solvent).to(device)
        criterion = heteroscedastic_loss
        optimizer = optim.Adam(model.parameters(), lr=lr)
        
        # 训练模型
        st.write("开始训练...")
        model, history = train_model(
            model, train_loader, val_loader, criterion, optimizer,
            device, epochs, patience
        )
        
        # 评估模型
        val_loss, metrics, (preds, targets) = evaluate_model(model, val_loader, criterion, device)
        mae, rmse, r2 = metrics
        
        st.write(f"折 {fold+1} 结果:")
        st.write(f"MAE: {mae:.4f}, RMSE: {rmse:.4f}, R²: {r2:.4f}")
        
        fold_results.append({
            'fold': fold+1, 'mae': mae, 'rmse': rmse, 'r2': r2,
            'model': model, 'history': history, 'preds': preds, 'targets': targets,
            'scalers': {'ester': scaler_ester, 'base': scaler_base, 'solvent': scaler_solvent}
        })
        
        # 收集注意力权重（验证集第一个批次）
        model.eval()
        first_batch = next(iter(val_loader))
        ester, base, solvent, _ = first_batch
        ester, base, solvent = ester.to(device), base.to(device), solvent.to(device)
        
        with torch.no_grad():
            model(ester, base, solvent)
        
        all_attention_weights.append(model.attention_weights)
    
    # 计算平均性能
    avg_mae = np.mean([r['mae'] for r in fold_results])
    avg_rmse = np.mean([r['rmse'] for r in fold_results])
    avg_r2 = np.mean([r['r2'] for r in fold_results])
    
    st.write("\n===== 交叉验证平均结果 =====")
    st.write(f"平均 MAE: {avg_mae:.4f} ± {np.std([r['mae'] for r in fold_results]):.4f}")
    st.write(f"平均 RMSE: {avg_rmse:.4f} ± {np.std([r['rmse'] for r in fold_results]):.4f}")
    st.write(f"平均 R²: {avg_r2:.4f} ± {np.std([r['r2'] for r in fold_results]):.4f}")
    
    return fold_results, all_attention_weights

# 自注意力可视化（放大尺寸，单个占一行）
def visualize_attention(attention_weights, feature_names, title, figsize=(14, 12)):
    # 处理注意力权重
    attn_np = attention_weights.detach().cpu().numpy()
    if attn_np.ndim == 3:  # 平均批次维度
        attn_np = attn_np.mean(axis=0)
    
    # 匹配特征名与矩阵维度
    n_features = len(feature_names)
    if attn_np.shape[0] > n_features:
        attn_np = attn_np[:n_features, :n_features]
    elif attn_np.shape[0] < n_features:
        feature_names = feature_names[:attn_np.shape[0]]
    
    # 绘制热图（放大尺寸，标签字体调整）
    plt.figure(figsize=figsize)
    ax = sns.heatmap(
        attn_np, xticklabels=feature_names, yticklabels=feature_names,
        cmap='viridis', annot=False, fmt='.2f', cbar_kws={'shrink': 0.8}
    )
    # 调整标签字体大小，避免重叠
    ax.tick_params(axis='x', labelsize=12, rotation=45, labelrotation=45)
    ax.tick_params(axis='y', labelsize=12, rotation=0)
    plt.title(title, fontsize=16, pad=20)
    plt.tight_layout()
    
    # 保存到BytesIO
    buf = io.BytesIO()
    plt.savefig(buf, format='png', bbox_inches='tight', dpi=300)
    buf.seek(0)
    return buf

# 交叉注意力可视化（放大尺寸，单个占一行）
def visualize_cross_attention(attention_weights, query_names, key_names, title, figsize=(14, 10)):
    # 处理注意力权重
    attn_np = attention_weights.detach().cpu().numpy()
    if attn_np.ndim == 3:  # 平均批次维度
        attn_np = attn_np.mean(axis=0)
    
    # 匹配特征名与矩阵维度
    n_query = len(query_names)
    n_key = len(key_names)
    attn_np = attn_np[:n_query, :n_key]
    
    # 绘制热图（放大尺寸，标签字体调整）
    plt.figure(figsize=figsize)
    ax = sns.heatmap(
        attn_np, xticklabels=key_names, yticklabels=query_names,
        cmap='viridis', annot=False, fmt='.2f', cbar_kws={'shrink': 0.8}
    )
    # 调整标签字体大小
    ax.tick_params(axis='x', labelsize=12, rotation=45, labelrotation=45)
    ax.tick_params(axis='y', labelsize=12, rotation=0)
    plt.title(title, fontsize=16, pad=20)
    plt.tight_layout()
    
    # 保存到BytesIO
    buf = io.BytesIO()
    plt.savefig(buf, format='png', bbox_inches='tight', dpi=300)
    buf.seek(0)
    return buf

# 主应用函数（Tab布局）
def main():
    st.set_page_config(page_title="手性活性酯消旋预测", layout="wide")
    st.title("手性活性酯消旋预测与注意力分析")
    st.write("基于注意力机制的消旋影响因素分析（支持CSV/XLSX数据）")
    
    # 初始化会话状态（存储数据、特征、参数、结果）
    if 'data' not in st.session_state:
        st.session_state['data'] = None
    if 'features' not in st.session_state:
        st.session_state['features'] = None  # 存储target_col, ester_cols等
    if 'params' not in st.session_state:
        st.session_state['params'] = None    # 存储模型参数
    if 'results' not in st.session_state:
        st.session_state['results'] = None   # 存储训练结果
    if 'attention_weights' not in st.session_state:
        st.session_state['attention_weights'] = None
    
    # 创建Tab标签页（核心修改：替换sidebar为tabs）
    tab1, tab2, tab3, tab4 = st.tabs(["1. 数据上传", "2. 特征选择", "3. 模型参数", "4. 结果分析"])
    
    # ------------------------------ Tab1: 数据上传 ------------------------------
    with tab1:
        st.header("数据上传")
        st.write("支持CSV或Excel格式，需包含目标列（消旋程度）和特征列（酯/碱/溶剂）")
        
        # 文件上传控件
        uploaded_file = st.file_uploader(
            "选择数据文件", type=["csv", "xlsx"],
            help="示例格式：每行1个样本，列包含消旋程度、酯特征、碱特征、溶剂特征"
        )
        
        if uploaded_file is not None:
            try:
                # 读取文件（根据扩展名区分）
                file_ext = uploaded_file.name.split(".")[-1].lower()
                if file_ext == "csv":
                    df = pd.read_csv(uploaded_file)
                elif file_ext == "xlsx":
                    df = pd.read_excel(uploaded_file)
                else:
                    st.error("不支持的格式！仅支持CSV/XLSX")
                    df = None
                
                if df is not None:
                    st.session_state['data'] = df
                    st.success(f"数据加载成功！共 {len(df)} 样本，{len(df.columns)} 列")
                    
                    # 显示数据预览（前5行）
                    st.subheader("数据预览")
                    st.dataframe(df.head(), use_container_width=True)
                    
                    # 提示进入下一个Tab
                    st.info("请切换到「2. 特征选择」Tab继续操作")
            
            except Exception as e:
                st.error(f"文件读取失败：{str(e)}")
                st.session_state['data'] = None
    
    # ------------------------------ Tab2: 特征选择 ------------------------------
    with tab2:
        st.header("特征选择")
        df = st.session_state['data']
        
        if df is None:
            st.warning("请先在「1. 数据上传」Tab加载数据")
        else:
            # 选择目标列（消旋程度）
            target_col = st.selectbox(
                "选择目标列（需预测的消旋程度）",
                options=df.columns,
                help="例如：消旋率、消旋时间等数值型目标"
            )
            
            # 选择各组分特征列
            remaining_cols = df.columns.drop(target_col)
            
            st.subheader("手性活性酯特征列")
            ester_cols = st.multiselect(
                "选择酯的DFT特征（如键长、扭转能垒等）",
                options=remaining_cols,
                help="至少选择1个特征"
            )
            
            st.subheader("碱特征列")
            base_cols = st.multiselect(
                "选择碱的特征（如质子亲和能、碱性强度等）",
                options=remaining_cols.drop(ester_cols),
                help="至少选择1个特征"
            )
            
            st.subheader("溶剂特征列")
            solvent_cols = st.multiselect(
                "选择溶剂的特征（如介电常数、极性等）",
                options=remaining_cols.drop(ester_cols).drop(base_cols),
                help="至少选择1个特征"
            )
            
            # 验证特征选择完整性
            if ester_cols and base_cols and solvent_cols:
                # 保存特征选择结果到会话状态
                st.session_state['features'] = {
                    'target_col': target_col,
                    'ester_cols': ester_cols,
                    'base_cols': base_cols,
                    'solvent_cols': solvent_cols
                }
                st.success("特征选择完成！请切换到「3. 模型参数」Tab设置训练参数")
                
                # 显示选择的特征汇总
                st.subheader("已选特征汇总")
                st.write(f"目标列：{target_col}")
                st.write(f"酯特征：{', '.join(ester_cols)}")
                st.write(f"碱特征：{', '.join(base_cols)}")
                st.write(f"溶剂特征：{', '.join(solvent_cols)}")
            else:
                st.warning("请为酯、碱、溶剂分别至少选择1个特征")
    
    # ------------------------------ Tab3: 模型参数 ------------------------------
    with tab3:
        st.header("模型参数设置")
        features = st.session_state['features']
        
        if features is None:
            st.warning("请先在「2. 特征选择」Tab完成特征配置")
        else:
            # 模型训练参数
            st.subheader("交叉验证参数")
            n_splits = st.slider(
                "交叉验证折数（KFold）", min_value=2, max_value=10, value=5,
                help="折数越多，结果越稳定，但训练时间越长"
            )
            
            st.subheader("训练参数")
            epochs = st.slider("训练轮次（Epochs）", min_value=50, max_value=500, value=100)
            batch_size = st.slider("批次大小（Batch Size）", min_value=8, max_value=128, value=32)
            lr = st.text_input("学习率（Learning Rate）", value="0.0001", help="建议范围：1e-4 ~ 1e-3")
            patience = st.slider("早停耐心值", min_value=5, max_value=30, value=10,
                                 help="验证损失连续N轮不下降则停止训练")
            
            # 验证学习率格式
            try:
                lr = float(lr)
                # 保存参数到会话状态
                st.session_state['params'] = {
                    'n_splits': n_splits, 'epochs': epochs,
                    'batch_size': batch_size, 'lr': lr, 'patience': patience
                }
                
                # 训练按钮
                if st.button("开始训练模型", type="primary", use_container_width=True):
                    with st.spinner("模型训练中...（请耐心等待）"):
                        # 从会话状态获取数据和特征
                        df = st.session_state['data']
                        target_col = features['target_col']
                        ester_cols = features['ester_cols']
                        base_cols = features['base_cols']
                        solvent_cols = features['solvent_cols']
                        
                        # 运行交叉验证
                        results, attention_weights = cross_validate(
                            df, ester_cols, base_cols, solvent_cols, target_col,
                            n_splits=n_splits, epochs=epochs,
                            batch_size=batch_size, lr=lr, patience=patience
                        )
                        
                        # 保存结果到会话状态
                        st.session_state['results'] = results
                        st.session_state['attention_weights'] = attention_weights
                        st.success("模型训练完成！请切换到「4. 结果分析」Tab查看详情")
            
            except ValueError:
                st.error("学习率格式错误！请输入数字（如0.0001）")
    
    # ------------------------------ Tab4: 结果分析 ------------------------------
    with tab4:
        st.header("模型结果分析")
        results = st.session_state['results']
        attention_weights = st.session_state['attention_weights']
        features = st.session_state['features']
        
        if results is None:
            st.warning("请先在「3. 模型参数」Tab完成模型训练")
        else:
            # 1. 性能指标汇总
            st.subheader("1. 模型性能指标")
            # 整理结果为DataFrame
            results_df = pd.DataFrame([
                {'折数': r['fold'], 'MAE': f"{r['mae']:.4f}", 
                 'RMSE': f"{r['rmse']:.4f}", 'R²': f"{r['r2']:.4f}"} 
                for r in results
            ])
            st.dataframe(results_df, use_container_width=True)
            
            # 绘制性能指标折线图
            fig, axes = plt.subplots(1, 3, figsize=(18, 6))
            folds = [r['fold'] for r in results]
            
            # MAE
            axes[0].plot(folds, [r['mae'] for r in results], 'o-', color='#1f77b4', linewidth=2, markersize=8)
            axes[0].set_title('各折MAE', fontsize=14)
            axes[0].set_xlabel('折数', fontsize=12)
            axes[0].set_ylabel('MAE', fontsize=12)
            axes[0].grid(alpha=0.3)
            
            # RMSE
            axes[1].plot(folds, [r['rmse'] for r in results], 'o-', color='#2ca02c', linewidth=2, markersize=8)
            axes[1].set_title('各折RMSE', fontsize=14)
            axes[1].set_xlabel('折数', fontsize=12)
            axes[1].set_ylabel('RMSE', fontsize=12)
            axes[1].grid(alpha=0.3)
            
            # R²
            axes[2].plot(folds, [r['r2'] for r in results], 'o-', color='#ff7f0e', linewidth=2, markersize=8)
            axes[2].set_title('各折R²', fontsize=14)
            axes[2].set_xlabel('折数', fontsize=12)
            axes[2].set_ylabel('R²', fontsize=12)
            axes[2].grid(alpha=0.3)
            
            plt.tight_layout()
            st.pyplot(fig)
        
            # 2. 预测值vs实际值散点图
            st.subheader("2. 预测值 vs 实际值")
            all_preds = []
            all_targets = []
            for r in results:
                all_preds.extend(r['preds'])
                all_targets.extend(r['targets'])
            
            fig, ax = plt.subplots(figsize=(10, 8))
            ax.scatter(all_targets, all_preds, alpha=0.6, color='#1f77b4', s=60)
            # 绘制参考线（y=x）
            min_val = min(min(all_targets), min(all_preds))
            max_val = max(max(all_targets), max(all_preds))
            ax.plot([min_val, max_val], [min_val, max_val], 'r--', linewidth=2, label='理想预测线(y=x)')
            
            ax.set_xlabel('实际值', fontsize=14)
            ax.set_ylabel('预测值', fontsize=14)
            ax.set_title('预测值 vs 实际值', fontsize=16)
            ax.legend(fontsize=12)
            ax.grid(alpha=0.3)
            plt.tight_layout()
            st.pyplot(fig)
        
            # 在Tab4的"3. 注意力权重分析"部分修改代码如下：

            # 3. 注意力权重分析（核心修改：添加features检查）
            st.subheader("3. 注意力权重分析")
            # 选择折数
            if attention_weights is not None and len(attention_weights) > 0:
                fold_idx = st.slider(
                    "选择查看的交叉验证折数",
                    min_value=0, max_value=len(attention_weights)-1,
                    value=0, format="折 %d"
                )
                selected_attn = attention_weights[fold_idx]
                
                # 关键修复：检查features是否有效
                if features is None:
                    st.warning("特征数据未找到，请先完成「2. 特征选择」步骤")
                else:
                    try:
                        ester_cols = features['ester_cols']
                        base_cols = features['base_cols']
                        solvent_cols = features['solvent_cols']
                        
                        # 3.1 自注意力热图（单个占一行）
                        st.subheader("3.1 自注意力权重（组内特征关联）")
                        
                        st.write("手性活性酯自注意力（行：关注特征 → 列：被关注特征）")
                        ester_attn_buf = visualize_attention(
                            selected_attn['ester'], ester_cols, "手性活性酯自注意力权重"
                        )
                        st.image(ester_attn_buf, use_container_width=True)
                        st.divider()  # 分隔线，优化布局
                        
                        st.write("碱自注意力（行：关注特征 → 列：被关注特征）")
                        base_attn_buf = visualize_attention(
                            selected_attn['base'], base_cols, "碱自注意力权重"
                        )
                        st.image(base_attn_buf, use_container_width=True)
                        st.divider()
                        
                        st.write("溶剂自注意力（行：关注特征 → 列：被关注特征）")
                        solvent_attn_buf = visualize_attention(
                            selected_attn['solvent'], solvent_cols, "溶剂自注意力权重"
                        )
                        st.image(solvent_attn_buf, use_container_width=True)
                        st.divider()
                        
                        # 3.2 交叉注意力热图（单个占一行）
                        st.subheader("3.2 交叉注意力权重（跨组分特征关联）")
                        
                        st.write("酯→碱交叉注意力（行：酯特征 → 列：碱特征，即酯关注碱的哪些特征）")
                        eb_attn_buf = visualize_cross_attention(
                            selected_attn['ester_base'], ester_cols, base_cols, "酯→碱 交叉注意力权重"
                        )
                        st.image(eb_attn_buf, use_container_width=True)
                        st.divider()
                        
                        st.write("酯→溶剂交叉注意力（行：酯特征 → 列：溶剂特征，即酯关注溶剂的哪些特征）")
                        es_attn_buf = visualize_cross_attention(
                            selected_attn['ester_solvent'], ester_cols, solvent_cols, "酯→溶剂 交叉注意力权重"
                        )
                        st.image(es_attn_buf, use_container_width=True)
                        st.divider()
                        
                        st.write("碱→溶剂交叉注意力（行：碱特征 → 列：溶剂特征，即碱关注溶剂的哪些特征）")
                        bs_attn_buf = visualize_cross_attention(
                            selected_attn['base_solvent'], base_cols, solvent_cols, "碱→溶剂 交叉注意力权重"
                        )
                        st.image(bs_attn_buf, use_container_width=True)
                        st.divider()
                    except KeyError as e:
                        st.error(f"特征数据不完整：缺少 {str(e)}，请重新完成「2. 特征选择」步骤")
            else:
                st.warning("请先完成模型训练，生成注意力权重数据")

            
            # 4. 不确定性分析（MC Dropout）
            st.subheader("4. 预测不确定性分析（MC Dropout）")
            if st.button("计算并显示不确定性", use_container_width=True):
                with st.spinner("计算不确定性中..."):
                    # 使用第一折模型
                    model = results[0]['model']
                    device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
                    df = st.session_state['data']
                    features = st.session_state['features']
                    
                    # 创建数据集并标准化
                    full_dataset = RacemizationDataset(
                        df, features['ester_cols'], features['base_cols'],
                        features['solvent_cols'], features['target_col']
                    )
                    scalers = results[0]['scalers']
                    for i in range(len(full_dataset)):
                        full_dataset.ester[i] = scalers['ester'].transform(full_dataset.ester[i].reshape(1, -1))[0]
                        full_dataset.base[i] = scalers['base'].transform(full_dataset.base[i].reshape(1, -1))[0]
                        full_dataset.solvent[i] = scalers['solvent'].transform(full_dataset.solvent[i].reshape(1, -1))[0]
                    
                    # MC Dropout预测
                    loader = DataLoader(full_dataset, batch_size=32, shuffle=False)
                    mean_preds, std_preds = mc_dropout_predict(model, loader, device)
                    
                    # 绘制不确定性图
                    fig, ax = plt.subplots(figsize=(14, 8))
                    sorted_idx = np.argsort(full_dataset.target)
                    x = np.arange(len(mean_preds))
                    
                    # 绘制预测值和不确定性
                    ax.errorbar(
                        x, mean_preds[sorted_idx], yerr=std_preds[sorted_idx],
                        fmt='o', alpha=0.7, color='#1f77b4', ecolor='#ff7f0e',
                        capsize=4, markersize=6, label='预测值±不确定性'
                    )
                    # 绘制实际值
                    ax.plot(
                        x, full_dataset.target[sorted_idx], 'r-', alpha=0.8,
                        linewidth=2, label='实际值'
                    )
                    
                    ax.set_xlabel('样本索引（按实际值排序）', fontsize=14)
                    ax.set_ylabel('消旋程度', fontsize=14)
                    ax.set_title('预测值与不确定性分析（橙色误差线为不确定性范围）', fontsize=16)
                    ax.legend(fontsize=12)
                    ax.grid(alpha=0.3)
                    plt.tight_layout()
                    st.pyplot(fig)

if __name__ == "__main__":
    main()