import numpy as np
import h5py
import glob
import os
from sklearn.model_selection import train_test_split
from sklearn.metrics import mean_squared_error, r2_score
import xgboost as xgb
import matplotlib.pyplot as plt

def load_data(file_path):
    """从HDF5文件加载数据"""
    data = {
        'Vc': [],
        'Vf': [],
        'Vr': [],
        'a': []
    }
    
    with h5py.File(file_path, 'r') as f:
        # 遍历所有索引
        for group_name in f.keys():
            group = f[group_name]
            
            # 读取电压数据并转换为复数形式
            Vc = group['Vc_real'][()] + 1j * group['Vc_imag'][()]
            Vf = group['Vf_real'][()] + 1j * group['Vf_imag'][()]
            Vr = group['Vr_real'][()] + 1j * group['Vr_imag'][()]
            
            # 读取a参数
            a = group['a_real'][()] + 1j * group['a_imag'][()]
            
            # 存储数据
            if isinstance(Vc, np.ndarray):
                # 如果是数组，则遍历每个元素
                for i in range(len(Vc)):
                    data['Vc'].append(Vc[i])
                    data['Vf'].append(Vf[i])
                    data['Vr'].append(Vr[i])
                    data['a'].append(a)
            else:
                # 如果是标量，直接添加
                data['Vc'].append(Vc)
                data['Vf'].append(Vf)
                data['Vr'].append(Vr)
                data['a'].append(a)
    
    return data

def prepare_features(data):
    """准备特征数据"""
    # 将复数数据转换为特征
    features = []
    labels = []
    
    # 确保所有数据长度一致
    for i in range(len(data['Vc'])):
        # 获取当前样本的所有数据
        Vc = data['Vc'][i]
        Vf = data['Vf'][i]
        Vr = data['Vr'][i]
        a = data['a'][i]
        
        # 检查数据是否为标量
        if np.isscalar(Vc) and np.isscalar(Vf) and np.isscalar(Vr):
            # 构建特征向量
            feature = [
                np.abs(Vc),     # Vc的幅值
                np.angle(Vc),   # Vc的相位
                np.abs(Vf),     # Vf的幅值
                np.angle(Vf),   # Vf的相位
                np.abs(Vr),     # Vr的幅值
                np.angle(Vr)    # Vr的相位
            ]
            features.append(feature)
            labels.append([a.real, a.imag])
        else:
            print(f"警告：第{i}个样本的数据不是标量，已跳过")
    
    return np.array(features), np.array(labels)

def train_xgboost_model(X_train, y_train, X_test, y_test):
    """训练XGBoost模型"""
    # 创建两个模型，分别预测实部和虚部
    model_real = xgb.XGBRegressor(
        n_estimators=100,
        learning_rate=0.1,
        max_depth=5,
        random_state=42
    )
    
    model_imag = xgb.XGBRegressor(
        n_estimators=100,
        learning_rate=0.1,
        max_depth=5,
        random_state=42
    )
    
    # 训练模型
    model_real.fit(X_train, y_train[:, 0])
    model_imag.fit(X_train, y_train[:, 1])
    
    # 预测
    y_pred_real = model_real.predict(X_test)
    y_pred_imag = model_imag.predict(X_test)
    
    # 合并预测结果
    y_pred = np.column_stack([y_pred_real, y_pred_imag])
    
    return model_real, model_imag, y_pred

def plot_results(y_test, y_pred):
    """绘制预测结果"""
    plt.figure(figsize=(12, 5))
    
    # 绘制实部对比
    plt.subplot(121)
    plt.scatter(y_test[:, 0], y_pred[:, 0], alpha=0.5)
    plt.plot([y_test[:, 0].min(), y_test[:, 0].max()], 
             [y_test[:, 0].min(), y_test[:, 0].max()], 
             'r--', lw=2)
    plt.xlabel('实际值 (实部)')
    plt.ylabel('预测值 (实部)')
    plt.title('a的实部预测结果')
    
    # 绘制虚部对比
    plt.subplot(122)
    plt.scatter(y_test[:, 1], y_pred[:, 1], alpha=0.5)
    plt.plot([y_test[:, 1].min(), y_test[:, 1].max()], 
             [y_test[:, 1].min(), y_test[:, 1].max()], 
             'r--', lw=2)
    plt.xlabel('实际值 (虚部)')
    plt.ylabel('预测值 (虚部)')
    plt.title('a的虚部预测结果')
    
    plt.tight_layout()
    plt.savefig('prediction_results.png')
    plt.close()

def main():
    # 加载所有校准数据文件
    data_dir = r'E:\Experiment\cali_data'
    h5_files = glob.glob(os.path.join(data_dir, '*_cali.h5'))
    
    if not h5_files:
        print("错误：未找到校准数据文件")
        return
    
    print(f"找到 {len(h5_files)} 个校准数据文件")
    
    # 收集所有数据
    all_data = {
        'Vc': [],
        'Vf': [],
        'Vr': [],
        'a': []
    }
    
    for file_path in h5_files:
        print(f"加载文件: {file_path}")
        data = load_data(file_path)
        print(f"文件中数据样本数: Vc={len(data['Vc'])}, Vf={len(data['Vf'])}, "
              f"Vr={len(data['Vr'])}, a={len(data['a'])}")
        
        # 检查数据类型
        print(f"数据类型: Vc={type(data['Vc'][0])}, a={type(data['a'][0])}")
        if isinstance(data['Vc'][0], np.ndarray):
            print(f"Vc形状: {data['Vc'][0].shape}")
        
        for key in all_data:
            all_data[key].extend(data[key])
    
    # 将列表转换为numpy数组
    for key in all_data:
        all_data[key] = np.array(all_data[key])
        print(f"{key} 最终形状: {all_data[key].shape}")
    
    print("\n准备训练数据...")
    X, y = prepare_features(all_data)
    print(f"特征形状: {X.shape}")
    print(f"标签形状: {y.shape}")
    
    # 分割训练集和测试集
    X_train, X_test, y_train, y_test = train_test_split(
        X, y, test_size=0.2, random_state=42
    )
    
    print("开始训练XGBoost模型...")
    model_real, model_imag, y_pred = train_xgboost_model(
        X_train, y_train, X_test, y_test
    )
    
    # 计算评估指标
    mse_real = mean_squared_error(y_test[:, 0], y_pred[:, 0])
    mse_imag = mean_squared_error(y_test[:, 1], y_pred[:, 1])
    r2_real = r2_score(y_test[:, 0], y_pred[:, 0])
    r2_imag = r2_score(y_test[:, 1], y_pred[:, 1])
    
    print("\n模型评估结果:")
    print(f"实部 MSE: {mse_real:.6f}")
    print(f"实部 R2: {r2_real:.6f}")
    print(f"虚部 MSE: {mse_imag:.6f}")
    print(f"虚部 R2: {r2_imag:.6f}")
    
    # 绘制结果
    print("\n绘制预测结果...")
    plot_results(y_test, y_pred)
    
    # 保存模型
    print("\n保存模型...")
    model_real.save_model('xgboost_model_real.json')
    model_imag.save_model('xgboost_model_imag.json')
    print("模型已保存为 'xgboost_model_real.json' 和 'xgboost_model_imag.json'")

if __name__ == '__main__':
    main() 