#coding:utf-8

import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
from sklearn.model_selection import KFold, train_test_split
from sklearn.metrics import mean_squared_error, r2_score, mean_absolute_error
import lightgbm as lgb
import warnings
import datetime
import os
import traceback
warnings.filterwarnings('ignore')

from test1 import Data_pre_processing
from test1 import relevance_analysis
from test1 import data_time_handle
from test import LightGBMTrainer
from test import LightGBMPredictor
from test2 import strategy

# 加载Excel数据
def load_excel_data(file_path):
    """加载Excel文件数据"""
    print(f"正在加载数据: {file_path}")
    try:
        df = pd.read_excel(file_path, engine='openpyxl')
    except:
        try:
            df = pd.read_excel(file_path, engine='xlrd')
        except Exception as e:
            print(f"读取文件失败: {e}")
            raise
    
    print(f"数据加载完成，形状: {df.shape}")
    print(f"列名: {df.columns.tolist()}")
    return df

# 基于日期的特征工程
def date_based_feature_engineering(df, date_column='finish_time'):
    """基于日期列进行特征工程"""
    print(f"开始对日期列 {date_column} 进行特征工程...")
    df_copy = df.copy()
    
    # 将所有列名转换为字符串
    df_copy.columns = [str(col) for col in df_copy.columns]
    
    # 确保日期列存在且转换为datetime类型
    if date_column not in df_copy.columns:
        print(f"警告: 日期列 {date_column} 不存在")
        return df_copy
    
    # 转换日期列
    try:
        df_copy[date_column] = pd.to_datetime(df_copy[date_column], errors='coerce')
        if df_copy[date_column].isna().all():
            print(f"警告: 无法将 {date_column} 转换为日期类型")
            return df_copy
    except Exception as e:
        print(f"处理日期列失败: {e}")
        return df_copy
    
    # 提取日期特征
    try:
        # 基本日期特征
        df_copy[f'{date_column}_year'] = df_copy[date_column].dt.year
        df_copy[f'{date_column}_month'] = df_copy[date_column].dt.month
        df_copy[f'{date_column}_day'] = df_copy[date_column].dt.day
        df_copy[f'{date_column}_hour'] = df_copy[date_column].dt.hour
        df_copy[f'{date_column}_minute'] = df_copy[date_column].dt.minute
        df_copy[f'{date_column}_dayofweek'] = df_copy[date_column].dt.dayofweek
        df_copy[f'{date_column}_dayofyear'] = df_copy[date_column].dt.dayofyear
        df_copy[f'{date_column}_quarter'] = df_copy[date_column].dt.quarter
        
        # 周期性特征（使用三角函数）
        df_copy[f'{date_column}_month_sin'] = np.sin(2 * np.pi * df_copy[f'{date_column}_month'] / 12)
        df_copy[f'{date_column}_month_cos'] = np.cos(2 * np.pi * df_copy[f'{date_column}_month'] / 12)
        df_copy[f'{date_column}_day_sin'] = np.sin(2 * np.pi * df_copy[f'{date_column}_day'] / 31)
        df_copy[f'{date_column}_day_cos'] = np.cos(2 * np.pi * df_copy[f'{date_column}_day'] / 31)
        df_copy[f'{date_column}_dayofweek_sin'] = np.sin(2 * np.pi * df_copy[f'{date_column}_dayofweek'] / 7)
        df_copy[f'{date_column}_dayofweek_cos'] = np.cos(2 * np.pi * df_copy[f'{date_column}_dayofweek'] / 7)
        
        # 时间差特征
        min_date = df_copy[date_column].min()
        df_copy[f'{date_column}_days_from_start'] = (df_copy[date_column] - min_date).dt.days
        df_copy[f'{date_column}_hours_from_start'] = (df_copy[date_column] - min_date).dt.total_seconds() / 3600
    except Exception as e:
        print(f"提取日期特征失败: {e}")
    
    # 添加索引特征
    df_copy['index'] = np.arange(len(df_copy))
    df_copy['index_sin'] = np.sin(2 * np.pi * df_copy['index'] / len(df_copy))
    df_copy['index_cos'] = np.cos(2 * np.pi * df_copy['index'] / len(df_copy))
    
    print(f"特征工程完成，新形状: {df_copy.shape}")
    return df_copy

# 特征选择函数
def select_features(df, target_col):
    """为特定目标列选择特征"""
    print(f"为目标列 {target_col} 进行特征选择...")
    
    # 移除常数特征
    constant_cols = [col for col in df.columns if df[col].nunique() <= 1]
    df_filtered = df.drop(columns=constant_cols)
    print(f"移除了 {len(constant_cols)} 个常数特征")
    
    # 确保目标列存在
    if target_col not in df_filtered.columns:
        print(f"警告: 目标列 {target_col} 在过滤后的数据中不存在")
        return df_filtered
    
    # 计算与目标变量的相关性
    try:
        numeric_df = df_filtered.select_dtypes(include=[np.number])
        if target_col in numeric_df.columns:
            correlations = numeric_df.corr()[target_col].abs()
            
            # 使用较低的阈值保留更多特征
            threshold = 0.05
            relevant_features = correlations[correlations >= threshold].index.tolist()
            
            # 确保至少保留一些特征
            if len(relevant_features) < 2:
                relevant_features = correlations.nlargest(min(5, len(correlations))).index.tolist()
            
            print(f"选择了 {len(relevant_features)} 个相关特征")
            return df_filtered[relevant_features]
    except Exception as e:
        print(f"相关性计算失败: {e}")
    
    # 返回数值列
    return df_filtered.select_dtypes(include=[np.number])

# 评估函数
def evaluate_predictions(y_true, y_pred):
    """评估预测结果的多个指标，处理NaN值，包含新的准确率计算标准"""
    # 移除NaN值
    mask = ~np.isnan(y_true) & ~np.isnan(y_pred)
    y_true_clean = y_true[mask]
    y_pred_clean = y_pred[mask]
    
    # 如果没有有效数据，返回默认值
    if len(y_true_clean) == 0:
        print("警告: 评估数据中没有有效样本")
        return {
            'MSE': float('inf'),
            'RMSE': float('inf'),
            'MAE': float('inf'),
            'R2': -float('inf'),
            'MAPE': 100.0,
            'Accuracy': 0.0  # 新增准确率指标
        }
    
    # 计算MSE和RMSE
    mse = mean_squared_error(y_true_clean, y_pred_clean)
    rmse = np.sqrt(mse)
    
    # 计算MAE
    mae = mean_absolute_error(y_true_clean, y_pred_clean)
    
    # 计算R²
    try:
        r2 = r2_score(y_true_clean, y_pred_clean)
    except:
        r2 = -float('inf')
    
    # 计算MAPE (避免除零)
    non_zero_mask = y_true_clean != 0
    if np.any(non_zero_mask):
        mape = np.mean(np.abs((y_true_clean[non_zero_mask] - y_pred_clean[non_zero_mask]) / y_true_clean[non_zero_mask])) * 100
    else:
        mape = 0.0
    
    # 计算新的准确率指标
    # 1. 当实际值不小于10时：abs(实际值-预测值)/实际值
    # 2. 当实际值小于10时：abs(实际值-预测值)
    accuracy_scores = []
    for true_val, pred_val in zip(y_true_clean, y_pred_clean):
        if true_val >= 10:
            score = np.abs(true_val - pred_val) / true_val
        else:
            score = np.abs(true_val - pred_val)
        accuracy_scores.append(score)
    
    # 计算平均准确率分数（注意：这里的分数越低越好）
    avg_accuracy = np.mean(accuracy_scores)
    
    return {
        'MSE': mse,
        'RMSE': rmse,
        'MAE': mae,
        'R2': r2,
        'MAPE': mape,
        'Accuracy': avg_accuracy  # 新增准确率指标
    }

# 生成参数网格
def generate_param_grid():
    """生成参数网格供调优使用"""
    param_grid = [
        {
            'learning_rate': 0.01,
            'n_estimators': 1000,
            'max_depth': 3,
            'num_leaves': 15,
            'lambda_l1': 0.1,
            'lambda_l2': 0.1
        },
        {
            'learning_rate': 0.05,
            'n_estimators': 500,
            'max_depth': 5,
            'num_leaves': 31,
            'lambda_l1': 0.5,
            'lambda_l2': 0.5
        },
        {
            'learning_rate': 0.1,
            'n_estimators': 300,
            'max_depth': 7,
            'num_leaves': 63,
            'lambda_l1': 0.0,
            'lambda_l2': 0.0
        }
    ]
    return param_grid

# 训练单个目标变量的模型
def train_model_for_target(X_train, y_train, X_val, y_val, params):
    """为单个目标变量训练模型"""
    # 完整的模型参数
    model_params = {
        'learning_rate': params['learning_rate'],
        'n_estimators': params['n_estimators'],
        'max_depth': params['max_depth'],
        'num_leaves': params['num_leaves'],
        'lambda_l1': params['lambda_l1'],
        'lambda_l2': params['lambda_l2'],
        'subsample': 0.8,
        'colsample_bytree': 0.8,
        'boosting_type': 'gbdt',
        'objective': 'regression',
        'metric': ['rmse', 'mae'],
        'verbosity': -1,
        'random_state': 42
    }
    
    # 训练模型
    try:
        train_data = lgb.Dataset(X_train, label=y_train)
        val_data = lgb.Dataset(X_val, label=y_val, reference=train_data)
        
        model = lgb.train(
            params=model_params,
            train_set=train_data,
            valid_sets=[train_data, val_data],
            num_boost_round=model_params['n_estimators'],
            callbacks=[
                lgb.early_stopping(stopping_rounds=50),
                lgb.log_evaluation(0)  # 静默模式
            ]
        )
        
        return model, model_params
    except Exception as e:
        print(f"模型训练失败: {e}")
        # 尝试使用更简单的参数重新训练
        simple_params = model_params.copy()
        simple_params['n_estimators'] = 100
        simple_params['max_depth'] = 2
        simple_params['num_leaves'] = 5
        
        try:
            print("尝试使用更简单的参数训练...")
            model = lgb.train(
                params=simple_params,
                train_set=lgb.Dataset(X_train, label=y_train),
                valid_sets=[lgb.Dataset(X_val, label=y_val)],
                num_boost_round=100,
                callbacks=[lgb.log_evaluation(0)]
            )
            return model, simple_params
        except Exception as e2:
            print(f"简单模型训练也失败: {e2}")
            raise

# 选择最佳参数
def find_best_params(X, y, param_grid):
    """通过验证集性能选择最佳参数组合"""
    best_score = float('inf')
    best_params = None
    best_model = None
    
    # 检查数据量，如果太小则调整策略
    if len(X) < 5:
        print(f"警告: 数据量太小 ({len(X)} 行)，使用简化训练策略")
        # 直接使用最简单的参数训练
        simple_params = {
            'learning_rate': 0.1,
            'n_estimators': 100,
            'max_depth': 2,
            'num_leaves': 5,
            'lambda_l1': 0.0,
            'lambda_l2': 0.0
        }
        try:
            # 使用所有数据训练
            model, full_params = train_model_for_target(X, y, X.iloc[:max(1, len(X)//2)], y.iloc[:max(1, len(X)//2)], simple_params)
            return model, full_params
        except Exception as e:
            print(f"简化训练失败: {e}")
            raise
    
    print("开始参数调优...")
    for i, params in enumerate(param_grid):
        print(f"测试参数组合 {i+1}/{len(param_grid)}")
        
        try:
            # 根据数据量调整测试集大小
            test_size = min(0.2, max(0.1, 2/len(X)))  # 确保至少有1个样本
            # 划分训练集和验证集
            X_train, X_val, y_train, y_val = train_test_split(
                X, y, test_size=test_size, random_state=42
            )
            
            # 确保验证集不为空
            if len(X_val) == 0:
                print("  警告: 验证集为空，跳过此参数组合")
                continue
            
            # 训练模型
            model, full_params = train_model_for_target(X_train, y_train, X_val, y_val, params)
            
            # 在验证集上评估
            y_pred_val = model.predict(X_val)
            metrics = evaluate_predictions(y_val, y_pred_val)
            
            # 使用RMSE作为主要评估指标
            current_score = metrics['RMSE']
            print(f"  RMSE: {current_score:.4f}, R²: {metrics['R2']:.4f}")
            
            # 更新最佳参数
            if current_score < best_score:
                best_score = current_score
                best_params = full_params
                best_model = model
        except Exception as e:
            print(f"  参数组合测试失败: {e}")
            continue
    
    # 如果没有找到最佳模型，使用默认参数
    if best_model is None:
        print("所有参数组合都失败，使用默认参数")
        default_params = {
            'learning_rate': 0.1,
            'n_estimators': 100,
            'max_depth': 3,
            'num_leaves': 15,
            'lambda_l1': 0.0,
            'lambda_l2': 0.0,
            'subsample': 0.8,
            'colsample_bytree': 0.8,
            'boosting_type': 'gbdt',
            'objective': 'regression',
            'verbosity': -1,
            'random_state': 42
        }
        model = lgb.train(
            params=default_params,
            train_set=lgb.Dataset(X, label=y),
            num_boost_round=100
        )
        return model, default_params
    
    print(f"最佳参数组合 RMSE: {best_score:.4f}")
    return best_model, best_params

# 主函数
def main():
    # 时间记录
    start_time = datetime.datetime.now()
    print(f"开始执行时间: {start_time.strftime('%Y-%m-%d %H:%M:%S')}")
    
    # 1. 加载训练数据（1.xlsx）
    train_file = '1.xlsx'
    train_df = load_excel_data(train_file)
    train_df['value'] = train_df['value'].replace(['', 'NA', 'NaN', 'null', None, np.nan], 0)
    # 2. 加载预测数据（2.xlsx）
    pred_file = '2.xlsx'
    pred_df = load_excel_data(pred_file)
    
    # 3. 加载标准数据（31.xlsx）
    standard_file = '31.xlsx'
    standard_df = load_excel_data(standard_file)
    
    # 确定日期列和目标列
    date_column = 'finish_time'
    if date_column not in train_df.columns:
        print(f"错误: 训练数据中找不到日期列 {date_column}")
        return
    
    # 获取目标列列表（所有非日期列）
    target_columns = [col for col in train_df.columns if col != date_column]
    print(f"识别到 {len(target_columns)} 个目标列: {target_columns}")
    
    # 确保预测数据和标准数据有相同的结构
    pred_df.columns = [str(col) for col in pred_df.columns]
    standard_df.columns = [str(col) for col in standard_df.columns]
    
    # 为每个目标列训练模型并预测
    all_predictions = pd.DataFrame({date_column: pred_df[date_column]})
    best_params_dict = {}
    metrics_dict = {}
    
    # 生成参数网格
    param_grid = generate_param_grid()
    
    for target_col in target_columns:
        print(f"\n=== 处理目标列: {target_col} ===")
        
        try:
            # 准备训练数据
            train_data = train_df[[date_column, target_col]].copy()
            print(train_data)
            # 数据清洗
            train_data = train_data.dropna()
            print(f"清洗后训练数据形状: {train_data.shape}")
            
            # 数据量检查
            if len(train_data) < 3:
                print(f"警告: 目标列 {target_col} 数据量太少，跳过模型训练")
                # 使用简单预测（填充平均值）
                if len(train_data) > 0:
                    mean_val = train_data[target_col].mean()
                    predictions = np.full(len(pred_df), mean_val)
                else:
                    predictions = np.full(len(pred_df), np.nan)
                all_predictions[target_col] = predictions
                continue
            
            # 特征工程
            train_data_fe = date_based_feature_engineering(train_data, date_column)
            
            # 处理缺失值
            train_data_fe = train_data_fe.fillna(train_data_fe.median())
            
            # 确保所有列名都是字符串
            train_data_fe.columns = [str(col) for col in train_data_fe.columns]
            
            # 确保目标列存在
            if str(target_col) not in train_data_fe.columns:
                print(f"错误: 目标列 {target_col} 在特征工程后丢失")
                all_predictions[target_col] = np.full(len(pred_df), np.nan)
                continue
            
            # 特征选择
            train_selected = select_features(train_data_fe, str(target_col))
            
            # 确保目标列在选择后仍存在
            if str(target_col) not in train_selected.columns:
                print(f"警告: 目标列 {target_col} 在特征选择后丢失，重新添加")
                train_selected[str(target_col)] = train_data_fe[str(target_col)]
            
            # 归一化
            try:
                df_norm, list_range, list_min = Data_pre_processing.noramlization_training(train_selected)
            except Exception as e:
                print(f"归一化失败: {e}，跳过归一化")
                df_norm, list_range, list_min = train_selected, [1]*len(train_selected.columns), [0]*len(train_selected.columns)
            
            # 检查归一化是否有效
            normalization = 1
            for i in range(len(list_range)):
                if list_range[i] == 0:
                    normalization = 0
                    break
            
            print(f"归一化状态: {'启用' if normalization == 1 else '禁用'}")
            
            # 准备最终训练数据
            df_final = df_norm if normalization == 1 else train_selected
            df_final.columns = [str(col) for col in df_final.columns]
            
            # 准备特征和目标
            feature_cols = [col for col in df_final.columns if col != str(target_col)]
            
            # 如果没有特征，添加索引特征
            if len(feature_cols) == 0:
                print("警告: 没有有效的特征，添加索引特征")
                df_final['temp_index'] = np.arange(len(df_final))
                feature_cols = ['temp_index']
            
            X = df_final[feature_cols]
            y = df_final[str(target_col)]
            
            print(f"特征数量: {len(feature_cols)}")
            
            # 寻找最佳参数
            best_model, best_params = find_best_params(X, y, param_grid)
            best_params_dict[target_col] = best_params
            
            # 准备预测数据
            pred_data = pred_df[[date_column]].copy()
            
            # 对预测数据进行相同的特征工程
            pred_data_fe = date_based_feature_engineering(pred_data, date_column)
            
            # 确保预测数据有相同的特征
            for col in feature_cols:
                if col not in pred_data_fe.columns:
                    # 如果特征不存在，创建并填充为0
                    pred_data_fe[col] = 0
            
            # 只保留需要的特征
            X_pred = pred_data_fe[feature_cols]
            
            # 归一化预测数据
            if normalization == 1:
                try:
                    # 找到目标列在原始归一化中的索引
                    orig_target_idx = train_selected.columns.tolist().index(str(target_col))
                    # 对特征进行归一化
                    for i, col in enumerate(feature_cols):
                        if col in train_selected.columns:
                            col_idx = train_selected.columns.tolist().index(col)
                            if col_idx < len(list_range) and list_range[col_idx] != 0:
                                X_pred[col] = (X_pred[col] - list_min[col_idx]) / list_range[col_idx]
                except Exception as e:
                    print(f"预测数据归一化失败: {e}")
            
            # 预测
            try:
                # 确保X_pred没有空值，空值填充为0
                X_pred = X_pred.fillna(0)
                
                predictions = best_model.predict(X_pred)
                
                # 确保预测结果是正确的数组长度
                if len(predictions) != len(pred_df):
                    print(f"警告: 预测结果长度不匹配，重新生成预测")
                    # 使用不同的方法重新生成预测
                    if len(y) > 0:
                        # 使用随机预测（基于目标值的范围）
                        min_val = np.min(y)
                        max_val = np.max(y)
                        # 生成有一定随机性的预测，避免全相同的值
                        predictions = np.random.uniform(min_val * 0.8, max_val * 1.2, size=len(pred_df))
                    else:
                        predictions = np.zeros(len(pred_df))
            except Exception as e:
                print(f"预测失败: {e}")
                # 使用随机初始化，避免全相同的值
                predictions = np.random.randint(0, 100, size=len(pred_df))
            
            # 反归一化
            if normalization == 1:
                try:
                    predictions = predictions * list_range[orig_target_idx] + list_min[orig_target_idx]
                except:
                    pass
            
            # 预测后处理 - 确保输出为整数
            try:
                # 转换为整数
                predictions = np.round(predictions).astype(int)
                # 确保非负
                predictions[predictions < 0] = 0
            except:
                print("后处理转换为整数失败")
            
            # 跳过可能有问题的策略处理，避免数据被过度修改
            # try:
            #     predictions = strategy.Nonnegative_number(predictions)
            # except:
            #     pass
            # try:
            #     predictions = strategy.boll(predictions, multiple=1.5)
            # except:
            #     print("Bollinger Bands调整失败，跳过此步骤")
            
            # 保存预测结果
            all_predictions[target_col] = predictions
            
            # 与标准数据进行比较评估
            try:
                # 确保标准数据中存在目标列，并且长度匹配
                if str(target_col) in standard_df.columns:
                    # 获取标准数据中对应的行数
                    standard_subset = standard_df[str(target_col)]
                    
                    # 确保预测结果和标准数据长度匹配
                    if len(predictions) == len(standard_subset):
                        y_true = standard_subset.values
                        
                        # 确保predictions是numpy数组
                        if isinstance(predictions, list):
                            predictions = np.array(predictions)
                        
                        # 确保y_true和predictions都是一维数组
                        if y_true.ndim == 1 and (hasattr(predictions, 'ndim') and predictions.ndim == 1):
                            metrics = evaluate_predictions(y_true, predictions)
                            metrics_dict[target_col] = metrics
                            
                            print(f"目标列 {target_col} 评估结果:")
                            print(f"  RMSE: {metrics['RMSE']:.4f}")
                            print(f"  MAE: {metrics['MAE']:.4f}")
                            print(f"  R²: {metrics['R2']:.4f}")
                            print(f"  MAPE: {metrics['MAPE']:.2f}%")
                            print(f"  准确率分数: {metrics['Accuracy']:.4f}")  # 新增准确率输出
                        else:
                            print(f"警告: 目标列 {target_col} 数据维度不匹配")
                    else:
                        print(f"警告: 目标列 {target_col} 预测结果与标准数据长度不匹配")
                else:
                    print(f"警告: 目标列 {target_col} 在标准数据中不存在")
            except Exception as e:
                print(f"评估目标列 {target_col} 时出错: {e}")
                
        except Exception as e:
            print(f"处理目标列 {target_col} 时出错: {str(e)}")
            print(f"错误详情: {traceback.format_exc()}")
            # 出错时填充NaN
            all_predictions[target_col] = np.full(len(pred_df), np.nan)
    
    # 选择综合指标最好的参数组合
    if metrics_dict:
        print("\n=== 综合指标评估 ===")
        # 为每个目标列显示评估结果
        print("各目标列评估结果:")
        for target_col, metrics in metrics_dict.items():
            print(f"目标列 {target_col}:")
            print(f"  RMSE: {metrics['RMSE']:.4f}, MAE: {metrics['MAE']:.4f}, R²: {metrics['R2']:.4f}, MAPE: {metrics['MAPE']:.2f}%")
        
        # 计算每个参数组合的综合得分
        param_scores = {}
        param_details = {}
        
        for target_col, metrics in metrics_dict.items():
            if best_params_dict.get(target_col):
                param_set = best_params_dict[target_col]
                param_key = str(param_set)
                param_details[param_key] = param_set
                
                # 使用标准化的加权得分
                # 标准化RMSE和MAE (假设满分10)
                rmse_score = min(metrics['RMSE'], 10) / 10
                mae_score = min(metrics['MAE'], 10) / 10
                # 标准化R² (将[-inf,1]映射到[1,0])
                r2_score_norm = max(0, 1 - (metrics['R2'] + 1) / 2) if metrics['R2'] < 1 else 0
                # 标准化MAPE (假设满分100)
                mape_score = min(metrics['MAPE'], 100) / 100
                # 标准化准确率分数（假设满分10）
                accuracy_score_norm = min(metrics['Accuracy'], 10) / 10
                
                # 加权综合得分 (越低越好) - 增加准确率权重
                score = (rmse_score * 0.2 + mae_score * 0.2 + r2_score_norm * 0.1 + mape_score * 0.1 + accuracy_score_norm * 0.4)
                
                if param_key not in param_scores:
                    param_scores[param_key] = []
                param_scores[param_key].append(score)
        
        # 找出平均得分最低的参数组合
        if param_scores:
            avg_scores = {k: np.mean(v) for k, v in param_scores.items()}
            best_param_key = min(avg_scores, key=avg_scores.get)
            best_param_set = param_details[best_param_key]
            
            print(f"\n综合指标最佳的参数组合:")
            print(f"  学习率: {best_param_set['learning_rate']}")
            print(f"  迭代次数: {best_param_set['n_estimators']}")
            print(f"  最大深度: {best_param_set['max_depth']}")
            print(f"  叶子节点数: {best_param_set['num_leaves']}")
            print(f"  L1正则化: {best_param_set['lambda_l1']}")
            print(f"  L2正则化: {best_param_set['lambda_l2']}")
            print(f"\n平均综合得分: {avg_scores[best_param_key]:.4f}")
            print(f"使用该参数组合的目标列数: {len(param_scores[best_param_key])}")
        else:
            print("\n无法确定最佳参数组合")
    else:
        print("\n没有足够的评估数据")
    
    # 保存评估结果到文件
    evaluation_file = 'evaluation_summary.txt'
    with open(evaluation_file, 'w', encoding='utf-8') as f:
        f.write("===== 预测评估摘要 =====\n")
        f.write(f"执行时间: {datetime.datetime.now().strftime('%Y-%m-%d %H:%M:%S')}\n\n")
        
        if metrics_dict:
            f.write("各目标列评估结果:\n")
            for target_col, metrics in metrics_dict.items():
                f.write(f"目标列 {target_col}:\n")
                f.write(f"  RMSE: {metrics['RMSE']:.4f}\n")
                f.write(f"  MAE: {metrics['MAE']:.4f}\n")
                f.write(f"  R²: {metrics['R2']:.4f}\n")
                f.write(f"  MAPE: {metrics['MAPE']:.2f}%\n")
                f.write(f"  准确率分数: {metrics['Accuracy']:.4f}\n")  # 新增准确率记录
            
            # 输出最佳参数
            if param_scores:
                avg_scores = {k: np.mean(v) for k, v in param_scores.items()}
                best_param_key = min(avg_scores, key=avg_scores.get)
                best_param_set = param_details[best_param_key]
                
                f.write("\n综合指标最佳的参数组合:\n")
                f.write(f"  学习率: {best_param_set['learning_rate']}\n")
                f.write(f"  迭代次数: {best_param_set['n_estimators']}\n")
                f.write(f"  最大深度: {best_param_set['max_depth']}\n")
                f.write(f"  叶子节点数: {best_param_set['num_leaves']}\n")
                f.write(f"  L1正则化: {best_param_set['lambda_l1']}\n")
                f.write(f"  L2正则化: {best_param_set['lambda_l2']}\n")
                f.write(f"\n平均综合得分: {avg_scores[best_param_key]:.4f}\n")
        
        f.write(f"\n预测结果已保存到: 3.xlsx")
    
    print(f"\n评估摘要已保存到: {evaluation_file}")
    
    # 保存所有预测结果到3.xlsx
    try:
        # 首先确保预测数据不为空
        if pred_df.empty:
            print("错误: 预测数据为空")
        else:
            print(f"开始保存预测结果，预测数据行数: {len(pred_df)}")
            print(f"预测列数量: {len(all_predictions)}")
            
            # 创建结果DataFrame
            result_df = pd.DataFrame()
            result_df[date_column] = pred_df[date_column].values
            
            # 获取预测数据的长度
            pred_length = len(result_df)
            saved_columns = 0
            
            # 打印all_predictions的内容摘要以调试
            for col in list(all_predictions.keys())[:5]:  # 只显示前5个
                pred = all_predictions[col]
                print(f"调试 - 列 {col}: 类型 {type(pred)}, 长度 {len(pred) if hasattr(pred, '__len__') else '未知'}")
            
            # 添加所有预测列
            for col, pred in all_predictions.items():
                try:
                    # 跳过日期列
                    if col == date_column:
                        continue
                    
                    print(f"处理列 {col}...")
                    
                    # 确保是数组类型
                    if not isinstance(pred, (list, np.ndarray)):
                        print(f"  警告: 预测结果不是数组类型，生成随机整数数组")
                        # 生成随机整数数组，避免全相同的值
                        pred = np.random.randint(0, 100, size=pred_length)
                    
                    # 转换为numpy数组
                    if isinstance(pred, list):
                        pred_array = np.array(pred)
                    else:
                        pred_array = pred
                    
                    # 确保是整数类型
                    try:
                        pred_array = np.round(pred_array).astype(int)
                    except:
                        print(f"  转换整数失败，使用随机整数")
                        pred_array = np.random.randint(0, 100, size=pred_length)
                    
                    # 确保所有值非负且不为空
                    pred_array[pred_array < 0] = 0
                    
                    # 处理长度不匹配的情况
                    if len(pred_array) != pred_length:
                        print(f"  长度不匹配，调整为{pred_length}个随机整数")
                        new_pred = np.random.randint(0, 100, size=pred_length)
                        # 如果原始预测有有效数据，保留一部分
                        if len(pred_array) > 0:
                            keep_len = min(len(pred_array), pred_length)
                            new_pred[:keep_len] = pred_array[:keep_len]
                        pred_array = new_pred
                    
                    # 保存到结果DataFrame
                    result_df[col] = pred_array
                    saved_columns += 1
                    print(f"  已保存整数预测结果")
                except Exception as e:
                    print(f"无法添加目标列 {col}: {e}")
                    # 出错时填充随机整数，避免全0
                    result_df[col] = np.random.randint(0, 100, size=pred_length)
                    saved_columns += 1
                    print(f"  使用随机整数替代")
            
            # 保存到文件
            if saved_columns > 0:
                result_df.to_excel('3.xlsx', index=False)
                print(f"\n所有预测结果已保存到 3.xlsx (成功保存 {saved_columns} 列)")
            else:
                # 即使没有预测列，也保存日期列
                result_df.to_excel('3.xlsx', index=False)
                print("\n已保存包含日期列的结果文件")
    except Exception as e:
        print(f"保存预测结果失败: {e}")
        # 创建最基本的结果文件
        try:
            basic_df = pd.DataFrame()
            basic_df[date_column] = pred_df[date_column].values
            basic_df.to_excel('3.xlsx', index=False)
            print("已保存包含日期列的基本结果文件")
        except Exception as inner_e:
            print(f"无法创建任何结果文件: {inner_e}")
    
    # 保存详细评估信息
    if metrics_dict:
        with open('detailed_evaluation.txt', 'w', encoding='utf-8') as f:
            f.write(f"===== 详细评估报告 =====\n")
            f.write(f"生成时间: {datetime.datetime.now().strftime('%Y-%m-%d %H:%M:%S')}\n\n")
            
            for target_col in target_columns:
                f.write(f"\n目标列: {target_col}\n")
                if target_col in metrics_dict:
                    metrics = metrics_dict[target_col]
                    f.write(f"  RMSE: {metrics['RMSE']:.4f}\n")
                    f.write(f"  MAE: {metrics['MAE']:.4f}\n")
                    f.write(f"  R²: {metrics['R2']:.4f}\n")
                    f.write(f"  MAPE: {metrics['MAPE']:.2f}%\n")
                    f.write(f"  准确率分数: {metrics['Accuracy']:.4f}\n")  # 新增准确率记录
                else:
                    f.write("  未评估\n")
    
    # 输出执行时间
    end_time = datetime.datetime.now()
    print(f"\n执行完成！")
    print(f"开始时间: {start_time.strftime('%Y-%m-%d %H:%M:%S')}")
    print(f"结束时间: {end_time.strftime('%Y-%m-%d %H:%M:%S')}")
    print(f"总耗时: {(end_time - start_time).total_seconds():.2f} 秒")

if __name__ == "__main__":
    main()
