import numpy as np
import h5py
import os
import pandas as pd
from sklearn.model_selection import train_test_split
from sklearn.metrics import mean_squared_error, r2_score
import xgboost as xgb
import matplotlib.pyplot as plt
import time

def load_merged_data(file_path):
    """从合并的HDF5文件加载数据"""
    with h5py.File(file_path, 'r') as f:
        # 读取所有数据
        data = {
            'Vc': {
                'mag': f['Vc_real'][()],    # Vc的幅值
                'phase': f['Vc_imag'][()]   # Vc的相位
            },
            'Vf': {
                'mag': f['Vf_real'][()],    # Vf的幅值
                'phase': f['Vf_imag'][()]   # Vf的相位
            },
            'Vr': {
                'mag': f['Vr_real'][()],    # Vr的幅值
                'phase': f['Vr_imag'][()]   # Vr的相位
            },
            'a': f['a_real'][()] + 1j * f['a_imag'][()]  # a为复数
        }
        
        # 打印数据形状以便调试
        print("\n数据形状:")
        print(f"Vc_mag: {data['Vc']['mag'].shape}")
        print(f"Vc_phase: {data['Vc']['phase'].shape}")
        print(f"Vf_mag: {data['Vf']['mag'].shape}")
        print(f"Vf_phase: {data['Vf']['phase'].shape}")
        print(f"Vr_mag: {data['Vr']['mag'].shape}")
        print(f"Vr_phase: {data['Vr']['phase'].shape}")
        print(f"a: {data['a'].shape}")
        
    return data

def prepare_features(data):
    """准备特征数据"""
    # 确保所有数据都是二维数组
    def ensure_2d(x):
        if len(x.shape) == 1:
            return x.reshape(-1, 1)
        return x
    
    # 获取波形数量（从a的数量确定）
    n_samples = len(data['a'])
    print(f"\n每个波形的点数: {data['Vc']['mag'].shape[0] // n_samples}")
    
    # 重塑波形数据
    n_points = data['Vc']['mag'].shape[0] // n_samples  # 每个波形的点数
    
    # 将波形数据重塑为 [n_samples, n_points]
    Vc_mag = data['Vc']['mag'].reshape(n_samples, n_points)
    Vc_phase = data['Vc']['phase'].reshape(n_samples, n_points)
    Vf_mag = data['Vf']['mag'].reshape(n_samples, n_points)
    Vf_phase = data['Vf']['phase'].reshape(n_samples, n_points)
    Vr_mag = data['Vr']['mag'].reshape(n_samples, n_points)
    Vr_phase = data['Vr']['phase'].reshape(n_samples, n_points)
    
    # 构建特征矩阵 - 将所有波形点展平作为特征
    features = np.hstack([
        Vc_mag, Vc_phase,
        Vf_mag, Vf_phase,
        Vr_mag, Vr_phase
    ])
    
    # 准备标签（a的实部和虚部）
    labels = np.hstack([
        np.real(data['a']).reshape(-1, 1),
        np.imag(data['a']).reshape(-1, 1)
    ])
    
    print(f"\n处理后的数据形状:")
    print(f"特征矩阵: {features.shape} (样本数, 特征数)")
    print(f"标签矩阵: {labels.shape} (样本数, 2)")
    
    return features, labels

def train_xgboost_model(X_train, y_train, X_test, y_test):
    """训练XGBoost模型"""
    # 检测可用的GPU数量
    import torch
    n_gpus = torch.cuda.device_count()
    print(f"\n检测到 {n_gpus} 个GPU设备")
    
    # 基础参数
    params = {
        'n_estimators': 1000,
        'learning_rate': 0.01,
        'max_depth': 6,
        'min_child_weight': 1,
        'subsample': 0.8,
        'colsample_bytree': 0.8,
        'random_state': 42,
        'objective': 'reg:squarederror',
        'eval_metric': 'rmse',
        'tree_method': 'hist'
    }
    
    # 如果有GPU，设置GPU相关参数
    if n_gpus > 0:
        params['device'] = 'cuda'
        if n_gpus > 1:
            # 使用所有可用的GPU
            params['n_jobs'] = n_gpus
            print(f"将使用全部 {n_gpus} 个GPU进行训练")
    else:
        params['device'] = 'cpu'
        print("未检测到GPU，将使用CPU训练")
    
    # 使用MultiOutputRegressor包装XGBoost
    from sklearn.multioutput import MultiOutputRegressor
    model = MultiOutputRegressor(xgb.XGBRegressor(**params))
    
    # 训练模型
    print("\n开始训练模型...")
    print(f"训练数据: {X_train.shape[0]:,} 个样本, 每个样本 {X_train.shape[1]:,} 个特征")
    print(f"测试数据: {X_test.shape[0]:,} 个样本, 每个样本 {X_test.shape[1]:,} 个特征")
    
    start_time = time.time()
    try:
        model.fit(X_train, y_train)
        device_type = "GPU" if n_gpus > 0 else "CPU"
        print(f"成功使用{device_type}训练完成")
    except Exception as e:
        print(f"GPU训练失败: {str(e)}")
        print("切换到CPU训练...")
        # 如果GPU失败，切换到CPU
        params['tree_method'] = 'hist'
        params['device'] = 'cpu'
        if 'n_jobs' in params:
            del params['n_jobs']
        model = MultiOutputRegressor(xgb.XGBRegressor(**params))
        model.fit(X_train, y_train)
    
    training_time = time.time() - start_time
    print(f"训练用时: {training_time:.2f} 秒")
    
    # 预测
    print("\n开始预测...")
    predict_start_time = time.time()
    y_pred = model.predict(X_test)
    predict_time = time.time() - predict_start_time
    print(f"预测用时: {predict_time:.2f} 秒")
    print(f"每个样本平均预测时间: {predict_time/len(X_test)*1000:.2f} 毫秒")
    
    return model, y_pred

def plot_results(y_test, y_pred, save_dir):
    """绘制预测结果"""
    plt.figure(figsize=(15, 5))
    
    # 绘制复数平面上的预测结果
    plt.subplot(121)
    plt.scatter(y_test[:, 0], y_test[:, 1], alpha=0.5, label='True')
    plt.scatter(y_pred[:, 0], y_pred[:, 1], alpha=0.5, label='Predicted')
    plt.xlabel('Real Part')
    plt.ylabel('Imaginary Part')
    plt.title('Predictions on Complex Plane')
    plt.legend()
    plt.grid(True)
    
    # 绘制误差分布
    errors = np.abs(y_test - y_pred)
    plt.subplot(122)
    plt.hist(errors.flatten(), bins=50, alpha=0.75)
    plt.xlabel('Error')
    plt.ylabel('Count')
    plt.title('Error Distribution')
    plt.grid(True)
    
    plt.tight_layout()
    plt.savefig(os.path.join(save_dir, 'prediction_results.png'), dpi=300, bbox_inches='tight')
    plt.close()

def save_predictions(y_test, y_pred, features_test, save_dir):
    """保存预测结果到CSV文件"""
    # 创建复数形式的实际值和预测值
    a_true = y_test[:, 0] + 1j * y_test[:, 1]
    a_pred = y_pred[:, 0] + 1j * y_pred[:, 1]
    
    # 计算复数形式的相对误差
    relative_error = np.abs(a_pred - a_true) / np.abs(a_true)
    # 计算百分比误差
    percent_error = relative_error * 100
    
    # 计算实部和虚部的相对误差
    real_relative_error = np.abs(y_pred[:, 0] - y_test[:, 0]) / np.abs(y_test[:, 0]) * 100
    imag_relative_error = np.abs(y_pred[:, 1] - y_test[:, 1]) / np.abs(y_test[:, 1]) * 100
    
    # 创建结果DataFrame
    results_df = pd.DataFrame({
        'true_real': y_test[:, 0],
        'true_imag': y_test[:, 1],
        'pred_real': y_pred[:, 0],
        'pred_imag': y_pred[:, 1],
        'relative_error': relative_error,
        'percent_error': percent_error,
        'real_part_error': real_relative_error,
        'imag_part_error': imag_relative_error,
        'absolute_error': np.abs(a_pred - a_true)
    })
    
    # 保存到CSV
    csv_path = os.path.join(save_dir, 'prediction_results.csv')
    results_df.to_csv(csv_path, index=False)
    print(f"预测结果已保存到: {csv_path}")
    
    # 计算并打印统计信息
    print("\n预测误差统计:")
    print("复数形式误差:")
    print(f"相对误差: 平均值={relative_error.mean():.6f} ({percent_error.mean():.2f}%), "
          f"最大值={relative_error.max():.6f} ({percent_error.max():.2f}%)")
    print(f"绝对误差: 平均值={np.abs(a_pred - a_true).mean():.6f}, "
          f"最大值={np.abs(a_pred - a_true).max():.6f}")
    
    print("\n分量误差:")
    print(f"实部相对误差: 平均值={real_relative_error.mean():.2f}%, "
          f"最大值={real_relative_error.max():.2f}%")
    print(f"虚部相对误差: 平均值={imag_relative_error.mean():.2f}%, "
          f"最大值={imag_relative_error.max():.2f}%")
    
    # 保存误差统计到单独的文件
    stats_df = pd.DataFrame({
        'metric': ['Complex Relative Error', 'Complex Relative Error (Max)', 
                  'Real Part Relative Error', 'Real Part Relative Error (Max)',
                  'Imaginary Part Relative Error', 'Imaginary Part Relative Error (Max)'],
        'value': [percent_error.mean(), percent_error.max(),
                 real_relative_error.mean(), real_relative_error.max(),
                 imag_relative_error.mean(), imag_relative_error.max()],
        'unit': ['%', '%', '%', '%', '%', '%']
    })
    
    stats_path = os.path.join(save_dir, 'error_statistics.csv')
    stats_df.to_csv(stats_path, index=False)
    print(f"\n误差统计已保存到: {stats_path}")

def main():
    # 设置路径
    data_dir = r'E:\MLdata\AFFon\cali_data'
    input_file = os.path.join(data_dir, 'merged_data.h5')
    
    if not os.path.exists(input_file):
        print(f"错误：未找到数据文件 {input_file}")
        return
    
    print("加载数据...")
    data = load_merged_data(input_file)
    
    print("准备特征...")
    X, y = prepare_features(data)
    total_samples = X.shape[0]
    
    # 分割训练集和测试集（使用20%的数据作为测试集）
    X_train, X_test, y_train, y_test = train_test_split(
        X, y, test_size=0.2, random_state=42
    )
    
    print("\n数据集划分:")
    print(f"总数据量: {total_samples:,} 个样本")
    print(f"训练集: {X_train.shape[0]:,} 个样本 ({X_train.shape[0]/total_samples*100:.1f}%)")
    print(f"测试集: {X_test.shape[0]:,} 个样本 ({X_test.shape[0]/total_samples*100:.1f}%)")
    print(f"每个样本的特征数: {X_train.shape[1]:,}")
    print("-" * 50)
    
    print("\n开始训练XGBoost模型...")
    model, y_pred = train_xgboost_model(
        X_train, y_train, X_test, y_test
    )
    
    # 创建结果目录
    results_dir = os.path.join(data_dir, 'xgboost_results')
    os.makedirs(results_dir, exist_ok=True)
    
    # 保存预测结果到CSV
    print("\n保存预测结果...")
    save_predictions(y_test, y_pred, X_test, results_dir)
    
    # 绘制结果
    print("\n绘制预测结果...")
    plot_results(y_test, y_pred, results_dir)
    
    print(f"\n所有结果已保存到: {results_dir}")

if __name__ == '__main__':
    main() 