import os
# 解决mlflow保存模型报错 
os.environ['WRAPT_DISABLE_EXTENSIONS'] = 'true'    
import mlflow ,json  
import mlflow.keras,mlflow.tensorflow  
import streamlit as st
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
from sklearn.preprocessing import MinMaxScaler, LabelEncoder
from sklearn.model_selection import train_test_split
from tensorflow.keras.models import Sequential, load_model
from tensorflow.keras.layers import LSTM, Dense, Dropout
from tensorflow.keras.callbacks import EarlyStopping, ModelCheckpoint
from sklearn.metrics import mean_absolute_error, mean_squared_error, r2_score, accuracy_score, classification_report
import joblib,math 
import tensorflow as tf 
from datetime import datetime, timedelta
import tempfile
import warnings
import pickle 
import matplotlib as mpl
import matplotlib.pyplot as plt
from tensorflow.keras.models import load_model
import shutil
import time  
from hyperopt import fmin, tpe, hp, Trials, STATUS_OK

warnings.filterwarnings("ignore")
os.environ['TF_DETERMINISTIC_OPS'] = '0'  


# 模型临时保存路径
MODEL_DIR = "saved_models_tmp" 
os.makedirs(MODEL_DIR, exist_ok=True)  

def preprocess_data(train_data, features_columns, target_column, val_data=None, ts_column=None, 
                   n_steps=12, test_size=0.2, task_type='regression'):  # 新增task_type参数
    """
    预处理数据，支持回归、二分类、多分类任务
    
    参数:
        task_type: 任务类型，可选'regression'（回归）、'binary'（二分类）、'multiclass'（多分类）
    """
    try:
        # 验证任务类型
        if task_type not in ['regression', 'binary', 'multiclass']:
            raise ValueError(f"task_type必须为'regression'/'binary'/'multiclass'，当前为{task_type}")
        
        # 处理时间序列索引
        if not val_data:
            if ts_column:
                train_data[ts_column] = pd.to_datetime(train_data[ts_column])  
                train_data = train_data.sort_values(by=ts_column)
                train_data.set_index(ts_column, inplace=True)   
                # 分割训练集和验证集（时序数据不打乱）
                train_data, val_data = train_test_split(train_data, test_size=test_size, shuffle=False) 
            else:
                train_data, val_data = train_test_split(train_data, test_size=test_size, shuffle=True) 
        else: 
            if ts_column:
                train_data[ts_column] = pd.to_datetime(train_data[ts_column])  
                train_data = train_data.sort_values(by=ts_column)
                train_data.set_index(ts_column, inplace=True)  
                val_data[ts_column] = pd.to_datetime(val_data[ts_column])  
                val_data = val_data.sort_values(by=ts_column) 
                val_data.set_index(ts_column, inplace=True)   
        
        # 分离特征和目标变量
        features_train = train_data[features_columns].copy() 
        target_train = train_data[target_column].copy()  # 
        features_val = val_data[features_columns].copy() 
        target_val = val_data[target_column].copy()  
        
        # 特征归一化
        feature_scaler = MinMaxScaler(feature_range=(0, 1))   
        features_scaled_train = feature_scaler.fit_transform(features_train)
        features_scaled_val = feature_scaler.transform(features_val)
        
        # 目标变量处理（根据任务类型不同）
        target_scaler = None  # 分类任务不需要目标缩放器
        label_encoder = None  # 分类任务需要标签编码器
        
        if task_type == 'regression':
            # 回归任务：目标变量归一化
            target_scaler = MinMaxScaler(feature_range=(0, 1)) 
            target_scaled_train = target_scaler.fit_transform(target_train.values.reshape(-1, 1))
            target_scaled_val = target_scaler.transform(target_val.values.reshape(-1, 1))
        else:
            # 分类任务：目标变量标签编码（字符串转整数）
            label_encoder = LabelEncoder()
            # 确保训练集和验证集的标签都在编码器的映射中
            all_labels = pd.concat([target_train, target_val], axis=0).unique()
            label_encoder.fit(all_labels)
            # 编码目标变量
            target_encoded_train = label_encoder.transform(target_train)
            target_encoded_val = label_encoder.transform(target_val)
            # 转为与回归任务格式兼容的形状（后续创建序列时统一处理）
            target_scaled_train = target_encoded_train.reshape(-1, 1)
            target_scaled_val = target_encoded_val.reshape(-1, 1)
        
        # 创建时间序列数据集（统一逻辑，使用处理后的目标变量）
        X_train, y_train = [], []
        for i in range(n_steps, len(features_scaled_train)):
            # 特征序列：前n_steps+1个时间步的特征
            X_train.append(features_scaled_train[i-n_steps:i+1, :])
            # 目标值：第i个时间步的目标（回归用缩放后的值，分类用编码后的值）
            y_train.append(target_scaled_train[i, 0])  
        
        X_val, y_val = [], []
        for i in range(n_steps, len(features_scaled_val)):
            X_val.append(features_scaled_val[i-n_steps:i+1, :])
            y_val.append(target_scaled_val[i, 0])   
        
        # 转换为numpy数组
        X_train, y_train = np.array(X_train), np.array(y_train)  
        X_val, y_val = np.array(X_val), np.array(y_val)   
        
        # 构建返回字典（包含任务相关的处理器）
        result_dict = {
            'X_train': X_train, 'X_val': X_val,
            'y_train': y_train, 'y_val': y_val,
            'feature_scaler': feature_scaler,
            #'target_scaler': target_scaler, 
            'feature_names': features_columns,
            'target_column': target_column,
            'ts_column':ts_column, 
            'n_steps': n_steps,
            'task_type': task_type  # 新增：记录任务类型
        }
        
        # 根据任务类型补充处理器
        if task_type == 'regression':
            result_dict['target_scaler'] = target_scaler
        else:
            result_dict['label_encoder'] = label_encoder  # 分类任务添加标签编码器
        
        return result_dict 
        
    except Exception as e:  
        st.error(f"数据预处理失败: {str(e)}")  
        return None    
    

# 改进的通用模型构建（支持回归、二分类、多分类）
def build_universal_model(input_shape, 
                         task_type='regression',  # 'regression'/'binary'/'multiclass'
                         num_classes=None,        # 多分类时需要指定类别数
                         lstm_units=[64, 64], 
                         dropout_rate=0.2,  
                         optimizer_name="Adam", 
                         learning_rate=0.0005):
    # 验证任务类型
    if task_type not in ['regression', 'binary', 'multiclass']:
        raise ValueError(f"任务类型必须是'regression'/'binary'/'multiclass'，当前为{task_type}")
    
    # 验证多分类参数
    if task_type == 'multiclass' and (num_classes is None or num_classes < 2):
        raise ValueError("多分类任务必须指定有效的类别数（num_classes >= 2）")
    
    # 验证LSTM层数
    if not (1 <= len(lstm_units) <= 7):
        raise ValueError(f"LSTM层数必须在1-7之间，当前为{len(lstm_units)}层")
    
    model = Sequential()  
    
    # 动态添加LSTM层
    for i, units in enumerate(lstm_units):
        # print('here ~~',i,input_shape if i == 0 else None,(i != len(lstm_units) - 1))
        return_sequences = (i != len(lstm_units) - 1)
        model.add(LSTM(
            units, 
            activation='tanh',
            recurrent_activation='sigmoid',
            return_sequences=return_sequences,
            input_shape=input_shape if i == 0 else None,
            name=f"lstm_{i+1}_{int(time.time())}"))
        model.add(Dropout(dropout_rate, name=f"dropout_{i+1}_{int(time.time())}")) 
    
    # 根据任务类型配置输出层
    if task_type == 'regression':
        # 回归任务：输出层1个单元，无激活函数 
        model.add(Dense(1, name=f"dense_output_{int(time.time())}"))
        loss = 'mean_squared_error'
        metrics = ['mae']
    
    elif task_type == 'binary': 
        # 二分类任务：输出层1个单元，sigmoid激活
        model.add(Dense(1, activation='sigmoid', name=f"dense_output_{int(time.time())}"))
        loss = 'binary_crossentropy'
        metrics = ['accuracy']
    
    else:  # multiclass
        # 多分类任务：输出层num_classes个单元，softmax激活 
        model.add(Dense(num_classes, activation='softmax', name=f"dense_output_{int(time.time())}"))  
        loss = 'sparse_categorical_crossentropy'  # 适用于整数标签
        # 如果是独热编码标签，使用'categorical_crossentropy'
        metrics = ['sparse_categorical_accuracy'] 
    
    # 优化器配置
    if optimizer_name == "Adam":
        optimizer = tf.keras.optimizers.Adam(learning_rate=learning_rate)  
    elif optimizer_name == "RMSprop":
        optimizer = tf.keras.optimizers.RMSprop(learning_rate=learning_rate) 
    elif optimizer_name == "SGD":
        optimizer = tf.keras.optimizers.SGD(learning_rate=learning_rate)
    else:
        raise ValueError(f"未知的优化器: {optimizer_name}") 
    
    model.compile(  
        optimizer=optimizer,  
        loss=loss,
        metrics=metrics)   
    
    return model    

# 通用训练流程
def train_universal_model(data_dict, 
                         experiment_name,
                         task_type='regression',
                         num_classes=None,
                         epochs=100, 
                         batch_size=32, 
                         lstm_units=[64, 64], 
                         patience=30,
                         model_name="universal_model", 
                         optimizer_name="Adam", 
                         dropout_rate=0.2,
                         learning_rate=0.0005, 
                         use_hyperopt=False, 
                         hyperopt_params=None):     
    
    # 验证任务参数
    if task_type not in ['regression', 'binary', 'multiclass']:
        raise ValueError(f"任务类型必须是'regression'/'binary'/'multiclass'，当前为{task_type}")
    if task_type == 'multiclass' and (num_classes is None or num_classes < 2):
        raise ValueError("多分类任务必须指定有效的类别数（num_classes >= 2）")
    
    # 验证LSTM层数
    if not (1 <= len(lstm_units) <= 7):
        raise ValueError(f"LSTM层数必须在1-7之间，当前为{len(lstm_units)}层")
    
    try: 
        #print('创建实验',experiment_name) 
        mlflow.create_experiment(experiment_name) 
    except:
        pass  

    mlflow.set_experiment(experiment_name) 
    mlflow.keras.autolog(disable=True)
    
    # 模型保存路径 
    model_path = os.path.join(MODEL_DIR, f"{model_name}.h5")
    checkpoint_path = os.path.join(MODEL_DIR, f"{model_name}_checkpoint.keras")
    metadata_path = os.path.join(MODEL_DIR, f"{model_name}_metadata.pkl")  
    
    # 清除之前的模型文件
    for file_path in [model_path, checkpoint_path, metadata_path]:
        if os.path.exists(file_path):
            try:
                os.remove(file_path)
            except Exception as e:
                st.warning(f"无法删除旧模型文件 {file_path}: {str(e)}")  
    
    if use_hyperopt and hyperopt_params:  
        # 验证超参数中的层数配置
        if not (1 <= len(hyperopt_params['lstm_units_layers']) <=7):
            raise ValueError(f"超参数配置的LSTM层数必须在1-7之间，当前为{len(hyperopt_params['lstm_units_layers'])}层")
        
        # 定义搜索空间
        space = {}  
        for i, (min_val, max_val, step) in enumerate(hyperopt_params['lstm_units_layers']):
            space[f'lstm_units{i+1}'] = hp.quniform(
                f'lstm_units{i+1}', 
                min_val, 
                max_val, 
                step
            )
        
        # 其他超参数
        space.update({
            'dropout_rate': hp.uniform('dropout_rate', hyperopt_params['dropout_min'], hyperopt_params['dropout_max']),
            'optimizer': hp.choice('optimizer', list(range(len(hyperopt_params['optimizers'])))),
            'learning_rate': hp.loguniform('learning_rate', 
                                        math.log(hyperopt_params['learning_rate_min']), 
                                        math.log(hyperopt_params['learning_rate_max'])),
            'batch_size': hp.choice('batch_size', list(range(len(hyperopt_params['batch_sizes'])))),
            'epochs': hp.quniform('epochs', hyperopt_params['epochs_min'], hyperopt_params['epochs_max'], 10),
            'patience': hp.quniform('patience', hyperopt_params['patience_min'], hyperopt_params['patience_max'], 5)
        })    
        
        # 定义目标函数 
        def objective(params):     
            try:
                # 转换参数
                lstm_units = [int(params[f'lstm_units{i+1}']) for i in range(len(hyperopt_params['lstm_units_layers']))]
                dropout_rate = float(params['dropout_rate'])
                optimizer_name = hyperopt_params['optimizers'][params['optimizer']]
                learning_rate = float(params['learning_rate'])
                batch_size = int(hyperopt_params['batch_sizes'][params['batch_size']])
                epochs = int(params['epochs'])
                patience = int(params['patience'])
                
                # 创建运行名称
                run_name = f"{model_name}_trial_{len(trials.trials)}"
                model_path = os.path.join(MODEL_DIR, f"{run_name}.h5")
                
                with mlflow.start_run(run_name=run_name, nested=True):
                    # 记录参数
                    log_params = {
                        'task_type': task_type,
                        'dropout_rate': dropout_rate,
                        'optimizer': optimizer_name,
                        'learning_rate': learning_rate,
                        'batch_size': batch_size,
                        'epochs': epochs,
                        'patience': patience,
                        'n_steps': data_dict['n_steps'],
                        'num_layers': len(lstm_units), 

                        'feature_names': json.dumps(data_dict['feature_names']),
                        'target_column': data_dict['target_column'],
                    }
                    if task_type == 'multiclass':
                        log_params['num_classes'] = num_classes
                    
                    # 记录每层单元数
                    for i, units in enumerate(lstm_units):
                        log_params[f'lstm_units{i+1}'] = units
                    
                    mlflow.log_params(log_params)
                    
                    # 构建模型
                    ##
                    
                    model = build_universal_model(
                        input_shape=(data_dict['X_train'].shape[1], data_dict['X_train'].shape[2]),
                        task_type=task_type,
                        num_classes=num_classes,
                        lstm_units=lstm_units,
                        dropout_rate=dropout_rate,
                        optimizer_name=optimizer_name,
                        learning_rate=learning_rate
                    )
                    
                    # 训练模型
                    history = model.fit(
                        data_dict['X_train'], data_dict['y_train'],
                        validation_data=(data_dict['X_val'], data_dict['y_val']),
                        epochs=epochs,
                        batch_size=batch_size,
                        verbose=0,
                        callbacks=[EarlyStopping(monitor='val_loss', 
                                            patience=patience, 
                                            restore_best_weights=True),] 
                    )
                    
                    # 记录指标
                    # 评估与记录指标
                    val_loss, val_metric = model.evaluate(data_dict['X_val'], data_dict['y_val'], verbose=0)
                    
                    # 记录训练过程指标
                    best_loss = float('inf')
                    
                    if history is not None:
                        for epoch in range(len(history.history['loss'])):
                            mlflow.log_metric("train_loss", history.history['loss'][epoch], step=epoch)
                            mlflow.log_metric("val_loss", history.history['val_loss'][epoch], step=epoch)
                            
                            # 任务特定指标
                            if task_type == 'regression':
                                mlflow.log_metric("train_mae", history.history['mae'][epoch], step=epoch)
                                mlflow.log_metric("val_mae", history.history['val_mae'][epoch], step=epoch)
                            else:
                                acc_key = 'accuracy' if task_type == 'binary' else 'sparse_categorical_accuracy'
                                val_acc_key = f'val_{acc_key}'
                                mlflow.log_metric(f"train_{acc_key}", history.history[acc_key][epoch], step=epoch)
                                mlflow.log_metric(val_acc_key, history.history[val_acc_key][epoch], step=epoch) 
                            
                            if best_loss > history.history['val_loss'][epoch]:
                                best_loss = history.history['val_loss'][epoch]

                    # 构建指标字典
                    metrics = {'val_loss': val_loss, 'best_loss': best_loss}
                    if task_type == 'regression':
                        metrics['val_mae'] = val_metric
                    else:
                        metrics['val_accuracy'] = val_metric
                    
                    # 记录指标
                    mlflow.log_metrics(metrics)  
                    
                    # 保存模型相关文件（与原逻辑一致）
                    model_json_path = os.path.join(MODEL_DIR, "model_architecture.json") 
                    model_json = model.to_json()
                    with open(model_json_path, "w") as json_file:
                        json_file.write(model_json)
                    mlflow.log_artifact(model_json_path, "model_architecture") 
                        
                    model_weights_path = os.path.join(MODEL_DIR, "model_weights.weights.h5")   
                    model.save_weights(model_weights_path)  
                    mlflow.log_artifact(model_weights_path, "model_weights")
                    
                    # 保存特征缩放器（分类任务可能不需要，但保留兼容性）
                    if 'feature_scaler' in data_dict:
                        feature_scaler_path = os.path.join(MODEL_DIR, "feature_scaler.pkl") 
                        joblib.dump(data_dict['feature_scaler'], feature_scaler_path) 
                        mlflow.log_artifact(feature_scaler_path, "feature_scaler")

                    # 保存标签编码器（分类任务）
                    if task_type != 'regression' :
                        label_encoder_path = os.path.join(MODEL_DIR, "label_encoder.pkl")
                        joblib.dump(data_dict['label_encoder'], label_encoder_path)
                        mlflow.log_artifact(label_encoder_path, "label_encoder")
                    
                    if  task_type =='regression' and 'target_scaler' in data_dict:
                        target_scaler_path = os.path.join(MODEL_DIR, "target_scaler.pkl")
                        joblib.dump(data_dict['target_scaler'], target_scaler_path)
                        mlflow.log_artifact(target_scaler_path, "target_scaler")
                        
                    
                    # 保存模型 
                    model.save(model_path,save_format='h5')   
                    mlflow.log_artifact(model_path, "model")  
                    model = load_model(model_path)
                    mlflow.keras.log_model(model, "model")    

                    # 清理临时文件 
                    for file_path in [model_path, model_weights_path, model_json_path]:
                        if os.path.exists(file_path):
                            try:
                                os.remove(file_path)
                            except Exception as e:
                                st.warning(f"无法删除模型文件 {file_path}: {str(e)}")    
                    
                    return {'loss': best_loss, 
                        'status': STATUS_OK,  
                        'model': model, 
                        'history': history,
                        'params': params} 
            
            except Exception as e:
                st.error(f"参数优化失败: {str(e)}")
                return {'loss': float('inf'), 'status': STATUS_OK}  

        mlflow.set_experiment(experiment_name)  
        trials = Trials() 
        best = fmin(
            fn=objective,
            space=space,
            algo=tpe.suggest,
            max_evals=hyperopt_params['max_evals'],
            trials=trials
        ) 
        
        # 获取最佳模型  
        best_trial = min(trials.results, key=lambda x: x['loss'])
        history = best_trial['history']   
        model = best_trial['model'] 
        
    else:
        # 原始训练流程
        with mlflow.start_run(run_name=model_name,nested=True):
        
            model = build_universal_model( 
                input_shape=(data_dict['X_train'].shape[1], data_dict['X_train'].shape[2]),
                task_type=task_type,
                num_classes=num_classes,
                lstm_units=lstm_units,
                dropout_rate=dropout_rate,
                optimizer_name=optimizer_name,
                learning_rate=learning_rate
            ) 
            
            early_stopping = EarlyStopping( 
                monitor='val_loss',
                patience=patience,
                restore_best_weights=True,
                verbose=1)  
            
            model_checkpoint = ModelCheckpoint(  
                checkpoint_path,
                monitor='val_loss',
                save_best_only=True,
                save_weights_only=False,
                verbose=1)
            
            history = model.fit(
                data_dict['X_train'], data_dict['y_train'],
                validation_data=(data_dict['X_val'], data_dict['y_val']),
                epochs=epochs,
                batch_size=batch_size,
                verbose=1,
                callbacks=[early_stopping, model_checkpoint])   
            
            # 评估与记录指标
            val_loss, val_metric = model.evaluate(data_dict['X_val'], data_dict['y_val'], verbose=0)
            
            # 记录训练过程指标
            best_loss = float('inf')
            if history is not None:
                for epoch in range(len(history.history['loss'])):
                    mlflow.log_metric("train_loss", history.history['loss'][epoch], step=epoch)
                    mlflow.log_metric("val_loss", history.history['val_loss'][epoch], step=epoch)
                    
                    # 任务特定指标
                    if task_type == 'regression':
                        mlflow.log_metric("train_mae", history.history['mae'][epoch], step=epoch)
                        mlflow.log_metric("val_mae", history.history['val_mae'][epoch], step=epoch)
                    else:
                        acc_key = 'accuracy' if task_type == 'binary' else 'sparse_categorical_accuracy'
                        val_acc_key = f'val_{acc_key}'
                        mlflow.log_metric(f"train_{acc_key}", history.history[acc_key][epoch], step=epoch)
                        mlflow.log_metric(val_acc_key, history.history[val_acc_key][epoch], step=epoch)
                    
                    if best_loss > history.history['val_loss'][epoch]:
                        best_loss = history.history['val_loss'][epoch]

            # 构建指标字典
            metrics = {'val_loss': val_loss, 'best_loss': best_loss}
            if task_type == 'regression':
                metrics['val_mae'] = val_metric
            else:
                metrics['val_accuracy'] = val_metric
            
            # 构建参数字典
            params = {
                'task_type': task_type,
                'num_layers': len(lstm_units),
                'optimizer': optimizer_name,
                'dropout_rate': float(dropout_rate),
                'epochs': int(epochs),
                'learning_rate': float(learning_rate), 
                'patience': int(patience),
                'batch_size': int(batch_size),
                'n_steps': int(data_dict['n_steps']),
                'feature_names': json.dumps(data_dict['feature_names']), 
                'target_column': data_dict['target_column'],
            }
            if task_type == 'multiclass':
                params['num_classes'] = num_classes
            
            # 记录每层单元数
            for i, units in enumerate(lstm_units):
                params[f'units_{i+1}'] = units
            
            # 记录参数和指标
            mlflow.log_params(params)
            mlflow.log_metrics(metrics)  
            
            # 保存模型相关文件（与原逻辑一致，增加分类任务所需的标签编码器）
            model_json_path = os.path.join(MODEL_DIR, "model_architecture.json")   
            model_json = model.to_json()
            with open(model_json_path, "w") as json_file:
                json_file.write(model_json)
            mlflow.log_artifact(model_json_path, "model_architecture") 
                
            model_weights_path = os.path.join(MODEL_DIR, "model_weights.weights.h5")    
            model.save_weights(model_weights_path) 
            mlflow.log_artifact(model_weights_path, "model_weights") 
            
           
            if 'feature_scaler' in data_dict:
                feature_scaler_path = os.path.join(MODEL_DIR, "feature_scaler.pkl") 
                joblib.dump(data_dict['feature_scaler'], feature_scaler_path) 
                mlflow.log_artifact(feature_scaler_path, "feature_scaler") 

            # 保存标签编码器（分类任务）
            if task_type != 'regression' : 
                label_encoder_path = os.path.join(MODEL_DIR, "label_encoder.pkl")
                joblib.dump(data_dict['label_encoder'], label_encoder_path)
                mlflow.log_artifact(label_encoder_path, "label_encoder")
            
            if  task_type =='regression' and 'target_scaler' in data_dict:
                target_scaler_path = os.path.join(MODEL_DIR, "target_scaler.pkl")
                joblib.dump(data_dict['target_scaler'], target_scaler_path)
                mlflow.log_artifact(target_scaler_path, "target_scaler")
            
            # 保存模型
            model.save(model_path,save_format='h5')  
            mlflow.log_artifact(model_path, "model") 
            model = load_model(model_path)
            mlflow.keras.log_model(model, "model")  

            # 清理临时文件
            for file_path in [model_path, model_weights_path, model_json_path]:
                if os.path.exists(file_path):
                    try:
                        os.remove(file_path)
                    except Exception as e:
                        st.warning(f"无法删除模型文件 {file_path}: {str(e)}")     
    
    return model, history     




def predict_values(model, test_data, data_dict):
    """
    使用训练好的模型和预处理信息对测试数据进行预测
    
    参数:
        model: 训练好的Keras模型
        test_data: 测试数据集(DataFrame)
        data_dict: 预处理过程中生成的字典，包含缩放器、参数等信息
    
    返回:
        包含预测结果的字典，包括预测值、对应索引等
    """
    if 'ts_column' in data_dict:
        ts_column = data_dict['ts_column'] 
        test_data[ts_column] = pd.to_datetime(test_data[ts_column])  
        test_data.set_index(ts_column, inplace=True)    

    try:
        # 1. 提取特征列并进行缩放
        feature_names = data_dict['feature_names']
        test_features = test_data[feature_names].copy()
        
        # 使用训练时的特征缩放器进行缩放
        feature_scaler = data_dict['feature_scaler']
        test_features_scaled = feature_scaler.transform(test_features) 
        
        
        # 2. 构建LSTM所需的时间序列输入格式
        n_steps = data_dict['n_steps']
        X_test = []
        
        # 按照训练数据的窗口大小构建输入序列
        for i in range(n_steps, len(test_features_scaled)):
            # 窗口大小为n_steps + 1，与训练数据处理保持一致
            X_test.append(test_features_scaled[i - n_steps : i + 1, :])
        
        # 检查是否有足够的数据构建序列
        if len(X_test) == 0:
            raise ValueError(f"测试数据长度不足，无法构建{data_dict['n_steps'] + 1}长度的时间序列窗口")
        
        X_test = np.array(X_test)
        
        # 3. 模型预测
        y_pred_scaled = model.predict(X_test)
        print('1:',y_pred_scaled)
        
        # 4. 根据任务类型处理预测结果
        task_type = data_dict['task_type'] 
        
        
        # 处理预测结果
        if task_type == "regression":
            # 回归任务：反缩放预测结果
            target_scaler = data_dict['target_scaler']
            y_pred = target_scaler.inverse_transform(y_pred_scaled.reshape(-1, 1)).flatten()
        elif task_type == "binary":
            # 二分类任务：返回概率值(如需类别可自行添加阈值判断)
            y_pred = y_pred_scaled.flatten()
        else:  # multiclass
            # 多分类任务：返回预测类别(如需概率可返回y_pred_scaled)
            y_pred = np.argmax(y_pred_scaled, axis=1)
            print('2:',y_pred)
            # 如果有标签编码器，转换为原始标签
            if 'label_encoder' in data_dict:
                y_pred = data_dict['label_encoder'].inverse_transform(y_pred)
                print('3:',y_pred)
        
        # 5. 获取预测结果对应的时间索引
        if not test_features.index.empty:
            # 预测结果对应的索引与训练时y的索引位置一致
            pred_indices = test_features.index[n_steps:] 
        else:
            pred_indices = np.arange(n_steps, len(test_features)) 
        
        return {
            'y_pred': y_pred,               # 最终预测结果(原始尺度或类别)
            'pred_indices': pred_indices,   # 预测结果对应的索引(时间或位置)
            'X_test': X_test,               # 模型输入的测试序列
            'test_data':test_data,
            'y_pred_scaled': y_pred_scaled  # 缩放后的预测结果(用于调试)
        }
    
    except Exception as e:
        st.error(f"预测过程出错: {str(e)}")
        return None 
    



def evaluate_test_set(predict_result, test_data, data_dict):
    """
    根据预测结果计算测试集指标，与训练部分格式保持一致
    
    参数:
        predict_result: predict_values返回的预测结果字典
        test_data: 测试数据集(DataFrame)
        data_dict: 预处理生成的字典，包含任务类型等信息
    
    返回:
        包含各类评估指标的字典
    """
    try:
        # 获取任务类型（优先使用data_dict中的标识）
        task_type = data_dict['task_type'] 
        
        # 获取真实值（注意索引对齐）
        target_column = data_dict['target_column']
        y_true = test_data.loc[predict_result['pred_indices'], target_column].values
        
        # 获取预测值
        y_pred = predict_result['y_pred'] 
        
        
        # 初始化指标字典
        metrics = {}
        
        # 回归任务指标
        if task_type == 'regression':
            metrics['mae'] = mean_absolute_error(y_true, y_pred)
            metrics['mse'] = mean_squared_error(y_true, y_pred)
            metrics['rmse'] = math.sqrt(metrics['mse'])
            metrics['r2'] = r2_score(y_true, y_pred)
        
        # 二分类任务指标
        elif task_type == 'binary':
            # 计算准确率（默认阈值0.5）
            y_pred_class = (y_pred >= 0.5).astype(int)
            metrics['accuracy'] = accuracy_score(y_true, y_pred_class)
            
            # 计算分类报告中的指标
            report = classification_report(y_true, y_pred_class, output_dict=True)
            metrics['precision'] = report['1']['precision']
            metrics['recall'] = report['1']['recall']
            metrics['f1-score'] = report['1']['f1-score']
            
            # 如果真实标签是0/1，计算AUC
            if len(np.unique(y_true)) == 2:
                from sklearn.metrics import roc_auc_score
                metrics['auc'] = roc_auc_score(y_true, y_pred)
        
        # 多分类任务指标
        elif task_type == 'multiclass':
            metrics['accuracy'] = accuracy_score(y_true, y_pred)
            
            # 计算分类报告
            report = classification_report(y_true, y_pred, output_dict=True)
            metrics['macro_precision'] = report['macro avg']['precision']
            metrics['macro_recall'] = report['macro avg']['recall']
            metrics['macro_f1'] = report['macro avg']['f1-score']
            metrics['weighted_precision'] = report['weighted avg']['precision']
            metrics['weighted_recall'] = report['weighted avg']['recall']
            metrics['weighted_f1'] = report['weighted avg']['f1-score']
        
        # 补充基础信息
        metrics['sample_count'] = len(y_true)
        metrics['task_type'] = task_type
        
        return metrics
    
    except Exception as e:
        st.error(f"测试集指标计算失败: {str(e)}")
        return None  
    

#### 获取注册的模型列表  
def get_rg_models():
    model_infos = [] 
    # 获取所有注册模型的版本信息
    ml_versions = mlflow.search_model_versions() 
    
    for model_info in ml_versions:
        # 提取模型基本信息
        model_name = model_info.name
        run_id = model_info.run_id
        version = model_info.version
        
        # 获取该模型版本对应的运行记录
        try:
            run = mlflow.get_run(run_id)
            run_name = run.info.run_name
            
            # 从运行记录的指标中提取val_loss
            # 注意：指标键可能是'val_loss'或其他（取决于训练时的命名）
            val_loss = run.data.metrics.get('val_loss')  # 核心：从run中获取指标
            # 处理可能的None（如果没有记录val_loss）
            val_loss = round(val_loss, 4) if val_loss is not None else "未记录"
            
        except Exception as e:
            run_name = "未知"
            val_loss = f"获取失败: {str(e)}"
        
        # 添加到结果列表
        model_infos.append([
            model_name, 
            run_name, 
            version, 
            run_id,
            val_loss
        ])
    
    # 转换为DataFrame
    model_df = pd.DataFrame(
        data=model_infos,
        columns=['模型名称', 'run_name', 'version_id', 'run_id', 'val_loss']
    )  
    return model_df
     