import numpy as np
import h5py
import os
import pandas as pd
from sklearn.model_selection import train_test_split
from sklearn.metrics import mean_squared_error, r2_score
import xgboost as xgb
import matplotlib.pyplot as plt
import time
from sklearn.multioutput import MultiOutputRegressor
import tqdm
from tqdm.auto import tqdm

class DataLoader:
    def __init__(self, file_path):
        """初始化数据加载器
        
        Args:
            file_path (str): HDF5文件路径
        """
        self.file_path = file_path

    def get_set_count(self):
        """获取数据集的数量"""
        with h5py.File(self.file_path, 'r') as f:
            return len([key for key in f.keys() if key.startswith('set_')])

    def load_dataset(self, set_index):
        """从HDF5文件加载数据集
        
        Args:
            set_index (int): 数据集索引
            
        Returns:
            tuple: 特征数据和目标值
        """
        with h5py.File(self.file_path, 'r') as f:
            set_name = f'set_{set_index}'
            dataset = f[set_name]
            
            # 加载电压数据（假设这些是数组）
            Vc = dataset['Vc_real'][()] + 1j * dataset['Vc_imag'][()]
            Vf_star = dataset['Vf_star_real'][()] + 1j * dataset['Vf_star_imag'][()]
            Vr_star = dataset['Vr_star_real'][()] + 1j * dataset['Vr_star_imag'][()]
            
            # 加载a参数（标量复数值）
            a = dataset['a_real'][()] + 1j * dataset['a_imag'][()]
            
            # 构建特征矩阵 - 使用波形数据
            features = np.concatenate([
                np.abs(Vc), np.angle(Vc),
                np.abs(Vf_star), np.angle(Vf_star),
                np.abs(Vr_star), np.angle(Vr_star)
            ], axis=0)  # 确保在正确的轴上连接
            
            # 准备标签（a的实部和虚部）
            labels = np.array([a.real, a.imag])
            
            return features.reshape(1, -1), labels.reshape(1, -1)

    def load_all_datasets(self, sample_ratio=0.2):
        """加载部分数据集用于验证
        
        Args:
            sample_ratio (float): 要加载的数据集比例，默认0.2
            
        Returns:
            tuple: 所有数据集的特征和目标值
        """
        all_features = []
        all_labels = []
        
        n_sets = self.get_set_count()
        # 计算需要加载的数据集数量
        n_samples = int(n_sets * sample_ratio)
        print(f"找到 {n_sets} 个数据集，将加载 {n_samples} 个数据集进行验证")
        
        # 随机选择数据集的索引
        np.random.seed(42)  # 设置随机种子以保证结果可重现
        selected_indices = np.random.choice(n_sets, n_samples, replace=False)
        selected_indices.sort()  # 排序以保证顺序读取
        
        for i in selected_indices:
            features, labels = self.load_dataset(i)
            all_features.append(features)
            all_labels.append(labels)
        
        X = np.vstack(all_features)
        y = np.vstack(all_labels)
        
        print(f"\n处理后的数据形状:")
        print(f"特征矩阵: {X.shape} (样本数, 特征数)")
        print(f"标签矩阵: {y.shape} (样本数, 2)")
        
        return X, y

def train_xgboost_model(X_train, y_train, X_test, y_test):
    """训练XGBoost模型"""
    # 检测可用的GPU数量
    import torch
    n_gpus = torch.cuda.device_count()
    print(f"\n检测到 {n_gpus} 个GPU设备")
    
    # 基础参数
    params = {
        'n_estimators': 1000,
        'learning_rate': 0.01,
        'max_depth': 6,
        'min_child_weight': 1,
        'subsample': 0.8,
        'colsample_bytree': 0.8,
        'random_state': 42,
        'objective': 'reg:squarederror',
        'eval_metric': ['rmse', 'mae'],
        'tree_method': 'hist'  # 移除 early_stopping_rounds
    }
    
    # 如果有GPU，设置GPU相关参数
    if n_gpus > 0:
        params['device'] = 'cuda'
        print("将使用GPU训练")
    else:
        params['device'] = 'cpu'
        print("将使用CPU训练")
    
    # 创建基础模型，移除早停回调
    base_model = xgb.XGBRegressor(**params)
    
    # 创建多输出模型
    model = MultiOutputRegressor(base_model)
    
    print("\n开始训练模型...")
    print(f"训练数据: {X_train.shape[0]:,} 个样本, 每个样本 {X_train.shape[1]:,} 个特征")
    print(f"测试数据: {X_test.shape[0]:,} 个样本, 每个样本 {X_test.shape[1]:,} 个特征")
    
    start_time = time.time()
    
    # 训练模型
    print("\n训练进度:")
    model.fit(X_train, y_train)
    
    # 评估模型性能
    for i, estimator in enumerate(model.estimators_):
        component = "实部" if i == 0 else "虚部"
        print(f"\n{component}模型得分:")
        # 评估模型性能
        train_pred = estimator.predict(X_train)
        test_pred = estimator.predict(X_test)
        
        train_rmse = np.sqrt(mean_squared_error(y_train[:, i], train_pred))
        test_rmse = np.sqrt(mean_squared_error(y_test[:, i], test_pred))
        train_mae = np.mean(np.abs(y_train[:, i] - train_pred))
        test_mae = np.mean(np.abs(y_test[:, i] - test_pred))
        
        print(f"训练集 RMSE: {train_rmse:.6f}")
        print(f"测试集 RMSE: {test_rmse:.6f}")
        print(f"训练集 MAE: {train_mae:.6f}")
        print(f"测试集 MAE: {test_mae:.6f}")
    
    training_time = time.time() - start_time
    print(f"\n总训练用时: {training_time:.2f} 秒")
    
    # 预测
    print("\n开始预测...")
    predict_start_time = time.time()
    y_pred = model.predict(X_test)
    predict_time = time.time() - predict_start_time
    print(f"预测用时: {predict_time:.2f} 秒")
    print(f"每个样本平均预测时间: {predict_time/len(X_test)*1000:.2f} 毫秒")
    
    return model, y_pred

def plot_results(y_test, y_pred, save_dir):
    """绘制预测结果"""
    plt.figure(figsize=(15, 5))
    
    # 绘制复数平面上的预测结果
    plt.subplot(121)
    plt.scatter(y_test[:, 0], y_test[:, 1], alpha=0.5, label='True')
    plt.scatter(y_pred[:, 0], y_pred[:, 1], alpha=0.5, label='Predicted')
    plt.xlabel('Real Part')
    plt.ylabel('Imaginary Part')
    plt.title('Predictions on Complex Plane')
    plt.legend()
    plt.grid(True)
    
    # 绘制误差分布
    errors = np.abs(y_test - y_pred)
    plt.subplot(122)
    plt.hist(errors.flatten(), bins=50, alpha=0.75)
    plt.xlabel('Error')
    plt.ylabel('Count')
    plt.title('Error Distribution')
    plt.grid(True)
    
    plt.tight_layout()
    plt.savefig(os.path.join(save_dir, 'prediction_results.png'), dpi=300, bbox_inches='tight')
    plt.close()

def save_predictions(y_test, y_pred, features_test, save_dir):
    """保存预测结果到CSV文件"""
    # 创建复数形式的实际值和预测值
    a_true = y_test[:, 0] + 1j * y_test[:, 1]
    a_pred = y_pred[:, 0] + 1j * y_pred[:, 1]
    
    # 计算复数形式的相对误差
    relative_error = np.abs(a_pred - a_true) / np.abs(a_true)
    # 计算百分比误差
    percent_error = relative_error * 100
    
    # 计算实部和虚部的相对误差
    real_relative_error = np.abs(y_pred[:, 0] - y_test[:, 0]) / np.abs(y_test[:, 0]) * 100
    imag_relative_error = np.abs(y_pred[:, 1] - y_test[:, 1]) / np.abs(y_test[:, 1]) * 100
    
    # 创建结果DataFrame
    results_df = pd.DataFrame({
        'a_true_real': y_test[:, 0],
        'a_true_imag': y_test[:, 1],
        'a_pred_real': y_pred[:, 0],
        'a_pred_imag': y_pred[:, 1],
        'relative_error': relative_error,
        'percent_error': percent_error,
        'real_percent_error': real_relative_error,
        'imag_percent_error': imag_relative_error,
        'absolute_error': np.abs(a_pred - a_true)
    })
    
    # 保存到CSV
    csv_path = os.path.join(save_dir, 'prediction_results.csv')
    results_df.to_csv(csv_path, index=False)
    print(f"预测结果已保存到: {csv_path}")
    
    # 计算并打印统计信息
    print("\n预测误差统计:")
    print("复数形式误差:")
    print(f"相对误差: 平均值={relative_error.mean():.6f} ({percent_error.mean():.2f}%), "
          f"最大值={relative_error.max():.6f} ({percent_error.max():.2f}%)")
    print(f"绝对误差: 平均值={np.abs(a_pred - a_true).mean():.6f}, "
          f"最大值={np.abs(a_pred - a_true).max():.6f}")
    
    print("\n分量误差:")
    print(f"实部相对误差: 平均值={real_relative_error.mean():.2f}%, "
          f"最大值={real_relative_error.max():.2f}%")
    print(f"虚部相对误差: 平均值={imag_relative_error.mean():.2f}%, "
          f"最大值={imag_relative_error.max():.2f}%")
    
    # 保存误差统计到单独的文件
    stats_df = pd.DataFrame({
        'metric': ['复数相对误差', '复数相对误差(最大)', 
                  '实部相对误差', '实部相对误差(最大)',
                  '虚部相对误差', '虚部相对误差(最大)'],
        'value': [percent_error.mean(), percent_error.max(),
                 real_relative_error.mean(), real_relative_error.max(),
                 imag_relative_error.mean(), imag_relative_error.max()],
        'unit': ['%', '%', '%', '%', '%', '%']
    })
    
    stats_path = os.path.join(save_dir, 'error_statistics.csv')
    stats_df.to_csv(stats_path, index=False)
    print(f"\n误差统计已保存到: {stats_path}")

def main():
    # 设置路径
    data_dir = r'E:\MLdata\SimData'
    input_file = os.path.join(data_dir, 'coupling_simulation_data.h5')
    
    if not os.path.exists(input_file):
        print(f"错误：未找到数据文件 {input_file}")
        return
    
    print("加载数据...")
    data_loader = DataLoader(input_file)
    X, y = data_loader.load_all_datasets(sample_ratio=0.2)
    total_samples = X.shape[0]
    
    # 分割训练集和测试集（使用20%的数据作为测试集）
    X_train, X_test, y_train, y_test = train_test_split(
        X, y, test_size=0.1, random_state=42
    )
    
    print("\n数据集划分:")
    print(f"总数据量: {total_samples:,} 个样本")
    print(f"训练集: {X_train.shape[0]:,} 个样本 ({X_train.shape[0]/total_samples*100:.1f}%)")
    print(f"测试集: {X_test.shape[0]:,} 个样本 ({X_test.shape[0]/total_samples*100:.1f}%)")
    print(f"每个样本的特征数: {X_train.shape[1]:,}")
    print("-" * 50)
    
    print("\n开始训练XGBoost模型...")
    model, y_pred = train_xgboost_model(
        X_train, y_train, X_test, y_test
    )
    
    # 创建结果目录
    results_dir = os.path.join(data_dir, 'xgboost_results')
    os.makedirs(results_dir, exist_ok=True)
    
    # 保存预测结果到CSV
    print("\n保存预测结果...")
    save_predictions(y_test, y_pred, X_test, results_dir)
    
    # 绘制结果
    print("\n绘制预测结果...")
    plot_results(y_test, y_pred, results_dir)
    
    print(f"\n所有结果已保存到: {results_dir}")

if __name__ == '__main__':
    main() 