import joblib
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
from sklearn import svm
from sklearn.preprocessing import MinMaxScaler
from sklearn.metrics import mean_squared_error, r2_score, mean_absolute_error
import os

# 设置绘图风格
plt.style.use('seaborn-v0_8-whitegrid')
plt.rcParams['font.sans-serif'] = ['SimHei']  # 中文支持
plt.rcParams['axes.unicode_minus'] = False  # 解决负号显示问题

FEATURE_NAMES = ["环境温度", "外部温度"]
TARGET_NAME = "内部温度"


def main():
    # 1. 数据加载
    try:
        data = pd.read_excel('C:\\Users\\Administrator\\Desktop\\2.xlsx')
        print("数据加载成功! 数据维度:", data.shape)
    except Exception as e:
        print("数据加载失败:", e)
        return

    # 2. 数据预处理
    # 随机打乱数据
    data = data.sample(frac=1, random_state=42).reset_index(drop=True)

    # 分离特征和目标值
    features = data.iloc[:, :-1]
    features.columns = FEATURE_NAMES  # 设置特征名称

    targets = data.iloc[:, -1]
    targets.name = TARGET_NAME  # 设置目标变量名称

    # 3. 划分训练集和测试集
    num_size = 0.7
    num_train = int(len(data) * num_size)

    X_train, X_test = features.iloc[:num_train], features.iloc[num_train:]
    y_train, y_test = targets.iloc[:num_train], targets.iloc[num_train:]

    print(f"训练集样本数: {len(X_train)}, 测试集样本数: {len(X_test)}")
    print(f"特征数: {X_train.shape[1]}")

    # 4. 数据标准化
    scaler_X = MinMaxScaler(feature_range=(0, 1))
    scaler_y = MinMaxScaler(feature_range=(0, 1))

    X_train_scaled = scaler_X.fit_transform(X_train)
    X_test_scaled = scaler_X.transform(X_test)

    y_train_scaled = scaler_y.fit_transform(y_train.values.reshape(-1, 1)).ravel()
    y_test_scaled = scaler_y.transform(y_test.values.reshape(-1, 1)).ravel()

    # 5. 创建并训练SVR模型
    c = 4.0  # 惩罚因子
    gamma = 0.8  # RBF核参数
    epsilon = 0.01  # 回归松弛变量

    model = svm.SVR(kernel='rbf', C=c, gamma=gamma, epsilon=epsilon)
    model.fit(X_train_scaled, y_train_scaled)

    print("\n模型训练完成!")
    print(f"模型参数: C={c}, gamma={gamma}, epsilon={epsilon}")

    # 6. 模型预测
    # 训练集预测
    train_pred_scaled = model.predict(X_train_scaled)
    # 测试集预测
    test_pred_scaled = model.predict(X_test_scaled)

    # 反归一化
    train_pred = scaler_y.inverse_transform(train_pred_scaled.reshape(-1, 1)).ravel()
    test_pred = scaler_y.inverse_transform(test_pred_scaled.reshape(-1, 1)).ravel()

    y_train_orig = scaler_y.inverse_transform(y_train_scaled.reshape(-1, 1)).ravel()
    y_test_orig = scaler_y.inverse_transform(y_test_scaled.reshape(-1, 1)).ravel()

    # 7. 模型评估
    # 训练集评估
    train_rmse = np.sqrt(mean_squared_error(y_train_orig, train_pred))
    train_r2 = r2_score(y_train_orig, train_pred)
    train_mae = mean_absolute_error(y_train_orig, train_pred)
    train_mbe = np.mean(train_pred - y_train_orig)

    # 测试集评估
    test_rmse = np.sqrt(mean_squared_error(y_test_orig, test_pred))
    test_r2 = r2_score(y_test_orig, test_pred)
    test_mae = mean_absolute_error(y_test_orig, test_pred)
    test_mbe = np.mean(test_pred - y_test_orig)

    # 打印评估指标
    print("\n========= 模型性能指标 =========")
    print("             训练集      测试集")
    print(f"RMSE:      {train_rmse:.4f}     {test_rmse:.4f}")
    print(f"R²:        {train_r2:.4f}     {test_r2:.4f}")
    print(f"MAE:       {train_mae:.4f}     {test_mae:.4f}")
    print(f"MBE:       {train_mbe:.4f}     {test_mbe:.4f}")

    # 8. 结果可视化
    plt.figure(figsize=(18, 12))

    # 训练集预测对比
    plt.subplot(2, 2, 1)
    plt.plot(y_train_orig, 'r-', label='真实值', linewidth=1.5)
    plt.plot(train_pred, 'b--', label='预测值', linewidth=1)
    plt.title(f'训练集预测对比 (RMSE={train_rmse:.4f})', fontsize=12)
    plt.xlabel('样本序号', fontsize=10)
    plt.ylabel('目标值', fontsize=10)
    plt.legend()
    plt.grid(True, alpha=0.3)

    # 测试集预测对比
    plt.subplot(2, 2, 2)
    plt.plot(y_test_orig, 'r-', label='真实值', linewidth=1.5)
    plt.plot(test_pred, 'b--', label='预测值', linewidth=1)
    plt.title(f'测试集预测对比 (RMSE={test_rmse:.4f})', fontsize=12)
    plt.xlabel('样本序号', fontsize=10)
    plt.ylabel('目标值', fontsize=10)
    plt.legend()
    plt.grid(True, alpha=0.3)

    # 训练集散点图
    plt.subplot(2, 2, 3)
    plt.scatter(y_train_orig, train_pred, c='blue', alpha=0.6)
    min_val = min(np.min(y_train_orig), np.min(train_pred))
    max_val = max(np.max(y_train_orig), np.max(train_pred))
    plt.plot([min_val, max_val], [min_val, max_val], 'k--', linewidth=1.5)
    plt.title('训练集预测精度', fontsize=12)
    plt.xlabel('真实值', fontsize=10)
    plt.ylabel('预测值', fontsize=10)
    plt.grid(True, alpha=0.3)

    # 测试集散点图
    plt.subplot(2, 2, 4)
    plt.scatter(y_test_orig, test_pred, c='red', alpha=0.6)
    min_val = min(np.min(y_test_orig), np.min(test_pred))
    max_val = max(np.max(y_test_orig), np.max(test_pred))
    plt.plot([min_val, max_val], [min_val, max_val], 'k--', linewidth=1.5)
    plt.title('测试集预测精度', fontsize=12)
    plt.xlabel('真实值', fontsize=10)
    plt.ylabel('预测值', fontsize=10)
    plt.grid(True, alpha=0.3)

    plt.tight_layout()
    plt.savefig('SVR预测结果.png', dpi=300, bbox_inches='tight')
    plt.show()
    save_model_and_scalers(model, scaler_X, scaler_y, FEATURE_NAMES, TARGET_NAME)

    # 保存预测结果
    result_df = pd.DataFrame({
        '样本类型': ['训练集'] * len(y_train_orig) + ['测试集'] * len(y_test_orig),
        '真实值': np.concatenate([y_train_orig, y_test_orig]),
        '预测值': np.concatenate([train_pred, test_pred]),
        '误差': np.concatenate([train_pred - y_train_orig, test_pred - y_test_orig])
    })

    result_df.to_excel('预测结果.xlsx', index=False)
    print("预测结果已保存到 '预测结果.xlsx'")


def save_model_and_scalers(model, scaler_X, scaler_y, feature_names, target_name):
    """保存模型和归一化器"""
    joblib.dump({
        'model': model,
        'scaler_X': scaler_X,
        'scaler_y': scaler_y,
        'feature_names': feature_names,
        'target_name': target_name
    }, 'model_without_current.pkl')


def load_and_predict(new_sample):
    """加载模型并预测单一样本"""
    try:
        # 加载模型和归一化器
        saved_data = joblib.load('model_without_current.pkl')
        model = saved_data['model']
        scaler_X = saved_data['scaler_X']
        scaler_y = saved_data['scaler_y']
        feature_names = saved_data['feature_names']

        # 检查特征数量是否匹配
        if len(new_sample) != len(feature_names):
            raise ValueError(f"输入特征数量应为 {len(feature_names)}, 实际输入 {len(new_sample)}")

        # 创建带有特征名称的DataFrame
        sample_df = pd.DataFrame([new_sample], columns=feature_names)

        # 执行归一化和预测
        normalized_sample = scaler_X.transform(sample_df)
        pred_scaled = model.predict(normalized_sample)
        predicted_value = scaler_y.inverse_transform(pred_scaled.reshape(-1, 1))

        return predicted_value.flatten()[0]

    except Exception as e:
        print(f"预测失败: {e}")
        return None


def evaluate_entire_dataset(data_path='C:\\Users\\Administrator\\Desktop\\2.xlsx'):
    """
    使用已保存模型对整个数据集进行预测并分析偏差
    """
    try:
        # 1. 加载整个数据集
        data = pd.read_excel(data_path)

        # 从保存的模型中获取特征名称
        saved_data = joblib.load('model_without_current.pkl')
        feature_names = saved_data['feature_names']
        target_name = saved_data['target_name']

        # 2. 分离特征和标签，使用模型中的特征名称
        features = data[feature_names]  # 仅选择需要的特征列
        targets = data[target_name]     # 获取目标值

        # 3. 加载已保存的模型
        try:
            saved_data = joblib.load('model_without_current.pkl')
            model = saved_data['model']
            scaler_X = saved_data['scaler_X']
            scaler_y = saved_data['scaler_y']

            # 检查是否有保存的特征名称
            feature_names = None
            if hasattr(scaler_X, 'feature_names_in_'):
                feature_names = scaler_X.feature_names_in_
                print(f"使用保存的特征名称: {feature_names}")
            else:
                print("警告: 未保存特征名称，将使用原始数据列顺序")
        except Exception as e:
            print(f"加载模型失败: {e}")
            return

        # 4. 数据预处理
        # 使用保存的特征名称构建DataFrame（避免特征顺序问题）
        if feature_names is not None:
            if set(feature_names) != set(features.columns):
                print("警告: 数据集特征与保存特征名称不匹配，尝试调整顺序")
                # 确保特征顺序与模型期望一致
                features = features[feature_names]

        # 使用保存的归一化器转换特征
        X_scaled = scaler_X.transform(features)

        # 5. 进行批量预测
        print("正在进行批量预测...")
        pred_scaled = model.predict(X_scaled)

        # 反归一化预测结果
        predictions = scaler_y.inverse_transform(pred_scaled.reshape(-1, 1)).ravel()

        # 6. 计算偏差
        errors = predictions - targets
        abs_errors = np.abs(errors)

        # 7. 计算偏差统计信息
        max_error = np.max(abs_errors)
        min_error = np.min(abs_errors)
        mean_error = np.mean(errors)
        median_error = np.median(errors)
        std_dev = np.std(errors)

        print("\n===== 偏差分析报告 (整个数据集) =====")
        print(f"最大偏差(误差): {max_error:.4f}")
        print(f"最小偏差(误差): {min_error:.4f}")
        print(f"平均偏差(误差): {mean_error:.4f} (系统偏差指示)")
        print(f"中位数偏差(误差): {median_error:.4f}")
        print(f"偏差标准差: {std_dev:.4f}")
        print(f"偏差范围: {mean_error:.4f} ± {std_dev:.4f}")

        # 8. 偏差分布分析
        # 创建分组统计
        bins = np.linspace(-max_error, max_error, 21)
        hist, bin_edges = np.histogram(errors, bins=bins)

        plt.figure(figsize=(14, 7))

        # 偏差直方图
        plt.subplot(1, 2, 1)
        plt.hist(errors, bins=30, color='skyblue', alpha=0.7, edgecolor='black')
        plt.axvline(mean_error, color='red', linestyle='dashed', linewidth=1.5, label=f'平均值: {mean_error:.4f}')
        plt.axvline(mean_error - std_dev, color='green', linestyle='dashed', linewidth=1)
        plt.axvline(mean_error + std_dev, color='green', linestyle='dashed', linewidth=1, label=f'±标准差: {std_dev:.4f}')
        plt.title('偏差分布直方图', fontsize=14)
        plt.xlabel('预测偏差(预测值-真实值)', fontsize=12)
        plt.ylabel('样本数量', fontsize=12)
        plt.legend()
        plt.grid(alpha=0.2)

        # 箱线图展示偏差分布
        plt.subplot(1, 2, 2)
        plt.boxplot(errors, vert=False, patch_artist=True,
                    boxprops=dict(facecolor='lightblue'))
        plt.scatter(errors, [1] * len(errors), color='blue', alpha=0.2, s=20)
        plt.axvline(mean_error, color='red', linestyle='dashed', linewidth=1.5)
        plt.title('偏差分布箱线图', fontsize=14)
        plt.xlabel('预测偏差(预测值-真实值)', fontsize=12)
        plt.yticks([])
        plt.grid(alpha=0.2)

        plt.tight_layout()
        plt.savefig('整个数据集预测偏差分析.png', dpi=300, bbox_inches='tight')
        plt.show()

        # 9. 保存完整预测结果
        result_df = pd.DataFrame({
            '真实值': targets,
            '预测值': predictions,
            '偏差': errors,
            '绝对误差': abs_errors
        })
        result_df.to_excel('完整数据集预测结果.xlsx', index=False)
        print("\n完整预测结果已保存到 '完整数据集预测结果.xlsx'")

        # 10. 找到偏差最大的10个样本
        worst_samples = result_df.loc[result_df['绝对误差'].nlargest(10).index].copy()
        worst_samples['偏差排名'] = range(1, 11)
        print("\n偏差最大的10个样本:")
        print(worst_samples[['偏差排名', '真实值', '预测值', '偏差']].reset_index(drop=True))

    except Exception as e:
        print(f"整个数据集评估失败: {e}")
        import traceback
        traceback.print_exc()


def validate_prediction(new_features, known_truth=None):
    """
    验证新样本的预测结果偏差是否在合理范围内

    参数:
        new_features: 新样本的特征值列表
        known_truth: 已知的真实值(可选)

    返回:
        dict: 包含预测结果和验证信息
    """
    # 预设的偏差统计信息(根据您之前的分析结果)
    ref_mean_error = -0.0510
    ref_std_dev = 1.0463
    max_acceptable_deviation = 2.5 * ref_std_dev  # 基于2.5倍标准差作为阈值

    try:
        # 步骤1: 获取预测值
        prediction = load_and_predict(new_features)

        # 步骤2: 计算偏差(如果已知真实值)
        if known_truth is not None:
            deviation = prediction - known_truth
            absolute_deviation = abs(deviation)

            # 步骤3: 判断偏差是否在合理范围内
            is_acceptable = absolute_deviation <= max_acceptable_deviation

            # 步骤4: 计算偏差水平评估
            deviation_level = "正常"
            if absolute_deviation > 3 * ref_std_dev:
                deviation_level = "严重偏高"
            elif absolute_deviation > 2 * ref_std_dev:
                deviation_level = "偏高"
            elif absolute_deviation < 0.5 * ref_std_dev:
                deviation_level = "极低"

            # 步骤5: 生成诊断报告
            confidence = "高置信度"
            if not is_acceptable:
                if deviation > 0:
                    deviation_type = "高估"
                else:
                    deviation_type = "低估"

                confidence = "需进一步验证"
                diagnostic = (f"预测值 {deviation_type} {abs(deviation):.4f}，超过阈值({max_acceptable_deviation:.4f})"
                              f"\n建议检查数据质量或特征异常")
            else:
                diagnostic = "预测结果在预期偏差范围内"

                # 添加特殊情况的精准评估
                if absolute_deviation < 0.1:
                    diagnostic += "，预测精度优异"
                elif absolute_deviation < ref_std_dev:
                    diagnostic += "，预测精度良好"

            # 返回完整结果
            return {
                "prediction": prediction,
                "deviation": deviation,
                "is_acceptable": is_acceptable,
                "deviation_level": deviation_level,
                "confidence": confidence,
                "diagnostic": diagnostic,
                "ref_range": f"{ref_mean_error:.4f} ± {ref_std_dev:.4f}",
                "threshold": max_acceptable_deviation
            }

        # 如果未知真实值，只返回预测结果
        return {
            "prediction": prediction,
            "message": "预测已完成(未知真实值，无法计算偏差)",
            "ref_range": f"{ref_mean_error:.4f} ± {ref_std_dev:.4f}"
        }

    except Exception as e:
        return {
            "error": f"预测验证失败: {str(e)}",
            "suggestion": "请检查特征值格式或模型状态"
        }


if __name__ == "__main__":
    main()
    # 已知真实值的验证
    sample_features = [27.5, 70.8]
    sample_truth = 98.2  # 假设的真实值

    result = validate_prediction(sample_features, sample_truth)
    print("\n===== 预测验证报告 =====")
    for key, value in result.items():
        print(f"{key}: {value}")

    # 未知真实值的预测
    print("\n===== 无验证预测 =====")
    unknown_result = validate_prediction(sample_features)
    for key, value in unknown_result.items():
        print(f"{key}: {value}")

    evaluate_entire_dataset()
