import os, shutil
import sys
import datetime
import joblib
import math
import numpy as np
import pandas as pd
import torch
from sklearn.covariance import MinCovDet, LedoitWolf
from sklearn.preprocessing import RobustScaler, StandardScaler, MinMaxScaler

from PyEMD import EMD
from orbitP.script import config
import yaml
import pywt
import argparse
from joblib import load
from orbitP.script.plot import plot_pml, plot_residue, plot_error, plot_rmse_3D, plot_rmse_all, plot_mutilEval
from tqdm import tqdm
from astropy.time import Time

idx = ["R","S","W","3D"]

def load_yaml_config(config_path):
    """从 YAML 文件加载参数"""
    with open(config_path, "r") as file:
        return yaml.safe_load(file)

def init_args(args):
    if torch.cuda.is_available():
        # 使用 CUDA 设备
        config.device = "cuda"
    else:
        # 使用 CPU 设备
        config.device = "cpu"
    print(config.device)
    cPath = config.configModelDir + args.model + ".yaml"
    configMsg = load_yaml_config(cPath)
    parser = argparse.ArgumentParser()
    for key, value in configMsg.items():
        if isinstance(value, list):  # 处理列表类型
            parser.add_argument(f"--{key}", nargs='+', type=type(value[0]), default=value)
        else:
            parser.add_argument(f"--{key}", type=eval(type(value).__name__), default=value)

    # 解析最终参数
    args = parser.parse_args()
    args.lambda_l2 = float(args.lambda_l2)
    if args.model == "PathFormer":
        args.patch_size_list = np.array(args.patch_size_list).reshape(args.num_layers, -1).tolist()

    print(args)
    return args


def getL2(model):
    l2_reg = 0.0
    for param in model.parameters():
        if param.requires_grad:
            l2_reg += torch.sum(param ** 2)
    return l2_reg

def RMSE(pred,src):
    src = np.asarray(src)
    pred = np.asarray(pred)
    if src.shape != pred.shape:
        raise ValueError("src and pred must have the same shape")

    mse = np.mean((src - pred) ** 2)
    rmse = np.sqrt(mse)
    return rmse

def MAE(pred,src):
    src = np.asarray(src)
    pred = np.asarray(pred)

    if src.shape != pred.shape:
        raise ValueError("src and pred must have the same shape")

    mae = np.mean(np.abs(src - pred))
    return mae


def save_config(args,savePath):
    with open(savePath, 'w') as file:
        for key, value in vars(args).items():
            file.write(f"{key}: {value}\n")

def get_file_paths(directory):
    file_paths = []
    for root, dirs, files in os.walk(directory):
        for file in files:
            file_path = os.path.join(root, file)
            file_paths.append(os.path.normpath(file_path))
    file_paths = sorted(file_paths)
    return file_paths

def log_loss(epoch:int, loss_val : float, train : bool = True):
    if train:
        file_name = "train_loss.txt"
    else:
        file_name = "test_loss.txt"

    path_to_file = config.lossPath+file_name
    os.makedirs(os.path.dirname(path_to_file), exist_ok=True)
    with open(path_to_file, "a") as f:
        f.write("Epoch"+str(epoch+1)+": "+str(loss_val)+"\n")
        f.close()

def save_pml(epoch, pml, title):
    path_to_file = config.lossPath+title+".txt"
    os.makedirs(os.path.dirname(path_to_file), exist_ok=True)
    with open(path_to_file, "a") as f:
        f.write("Epoch"+str(epoch+1)+": "+str(pml)+"\n")
        f.close()

def save_loss(epoch,train_loss=0,val_loss=0,trainFlag=False,valFlag=False):
    if trainFlag==True:
        log_loss(epoch,train_loss,train=True)
    if valFlag==True:
        log_loss(epoch,val_loss,train=False)

def get_SULT():
    df_ObserData = pd.read_csv(config.dataSetDir + 'df_ObserData.csv')
    df_MOEORBData = pd.read_csv(config.dataSetDir + 'df_MOEORBData.csv')
    df_stampObser = pd.read_csv(config.dataSetDir + 'df_stampObser.csv')
    df_stampMOEORB = pd.read_csv(config.dataSetDir + 'df_stampMOEORB.csv')
    ObserData = df_ObserData.to_numpy()
    MOEORBData = df_MOEORBData.to_numpy()
    stampObser = df_stampObser.to_numpy()
    stampMOEORB = df_stampMOEORB.to_numpy()
    return ObserData,MOEORBData,stampObser,stampMOEORB



def get_singleError(predError,tgtError):
    lastDim = predError.shape[-1]
    if lastDim != 1:
        print("single Error")
        sys.exit(0)
    error = predError - tgtError
    rmse = RMSE(predError,tgtError)
    print(f"RMSE_{idx[config.outputIdx]}: {rmse}")
    MAM = cal_single_MAM(np.abs(error))
    print(f"{idx[config.outputIdx]}: MIN {MAM[0]} ; AVE {MAM[1]} ; MAX {MAM[2]}")

def get_3DError(predError,tgtError):
    lastDim = predError.shape[-1]
    if lastDim != 3:
        print("3D Error")
        sys.exit(0)
    print("origin")
    RMSE_origin_3D = np.sqrt(np.mean(np.sum(tgtError ** 2, axis=2)))
    RMSE_origin_dir = np.sqrt(np.mean(tgtError ** 2, axis=(0, 1)))  # shape (3,)
    RMSE_origin_R, RMSE_origin_S, RMSE_origin_W = RMSE_origin_dir
    print(f"RMSE_R: {RMSE_origin_R} RMSE_S: {RMSE_origin_S} RMSE_W: {RMSE_origin_W}")
    print(f"RMSE_3D: {RMSE_origin_3D}")
    MAM = cal_3D_MAM(np.abs(tgtError))
    for i, data in enumerate(idx):
        print(f"{data}: MIN {MAM[i][0]} ; AVE {MAM[i][1]} ; MAX {MAM[i][2]}")

    print("model")
    error = predError - tgtError
    # 3D RMSE（欧氏距离）
    RMSE_3D = np.sqrt(np.mean(np.sum(error ** 2, axis=2)))
    # 单方向 RMSE（R, S, W）
    RMSE_dir = np.sqrt(np.mean(error ** 2, axis=(0, 1)))  # shape (3,)
    RMSE_R, RMSE_S, RMSE_W = RMSE_dir
    print(f"RMSE_R: {RMSE_R} RMSE_S: {RMSE_S} RMSE_W: {RMSE_W}")
    print(f"RMSE_3D: {RMSE_3D}")
    MAM = cal_3D_MAM(np.abs(error))
    for i, data in enumerate(idx):
        print(f"{data}: MIN {MAM[i][0]} ; AVE {MAM[i][1]} ; MAX {MAM[i][2]}")


def save_pred_allRmse(predList,targetList):
    if os.path.exists(config.predPath):
        shutil.rmtree(config.predPath)
    if not os.path.exists(config.predPath):
        os.mkdir(config.predPath)
    for k in range(config.axis):
        res = []
        title = []
        flag = True
        for modelName, predNow in predList.items():
            pred = predNow[:,:,k:k+1].reshape(-1, config.outputSize)
            target = targetList[modelName][:,:,k:k+1].reshape(-1, config.outputSize)
            predError, targetError = scalerInver(pred,target)
            test_size = predError.shape[0] // config.predicting_length
            predError = predError[:, :config.outputSize]
            targetError = targetError[:, :config.outputSize]
            resError = predError - targetError
            resErr = resError.reshape([test_size, config.predicting_length, config.outputSize])
            oriErr = targetError.reshape([test_size, config.predicting_length, config.outputSize])
            modErr = predError.reshape([test_size, config.predicting_length, config.outputSize])
            resErr = np.abs(resErr).mean(axis=0)
            oriErr = np.abs(oriErr).mean(axis=0)
            modErr = np.abs(modErr).mean(axis=0)
            if flag:
                flag = False
                res.append(oriErr)
                title.append("origin")
            res.append(resErr)
            title.append(modelName)
        plot_rmse_all(config.predPath,res,title, k)

    res = []
    title = []
    flag = True
    for modelName, predNow in predList.items():
        pred = predNow.reshape(-1, config.axis)
        target = targetList[modelName].reshape(-1, config.axis)
        predError, targetError = scalerInver(pred, target,scalerType=config.axis)
        test_size = predError.shape[0] // config.predicting_length
        predError = predError[:, :config.axis]
        targetError = targetError[:, :config.axis]
        resError = predError - targetError
        resErr = resError.reshape([test_size, config.predicting_length, config.axis])
        oriErr = targetError.reshape([test_size, config.predicting_length, config.axis])
        modErr = predError.reshape([test_size, config.predicting_length, config.axis])
        resErr_3d = np.linalg.norm(resErr, axis=2, keepdims=True)  # (N,96,1)
        oriErr_3d = np.linalg.norm(oriErr, axis=2, keepdims=True)  # (N,96,1)
        modErr_3d = np.linalg.norm(modErr, axis=2, keepdims=True)  # (N,96,1)
        resErr_3d = np.abs(resErr_3d).mean(axis=0)
        oriErr_3d = np.abs(oriErr_3d).mean(axis=0)
        modErr_3d = np.abs(modErr_3d).mean(axis=0)
        if flag:
            flag = False
            res.append(oriErr_3d)
            title.append("origin")
        res.append(resErr_3d)
        title.append(modelName)
    plot_rmse_all(config.predPath, res, title, 3)

def save_multiEval(predList,targetList,args):
    if os.path.exists(config.predPath):
        shutil.rmtree(config.predPath)
    if not os.path.exists(config.predPath):
        os.mkdir(config.predPath)
    meList = []
    pmlList = []
    predList = predList.reshape(-1, config.outputSize)
    targetList = targetList.reshape(-1, config.outputSize)
    predError, srcError = scalerInver(predList, targetList)

    test_size = predError.shape[0] // config.training_length
    predError = np.array(predError).reshape([test_size, config.training_length, args.feature_size])
    srcError = np.array(srcError).reshape([test_size, config.training_length, args.feature_size])

    if config.GPSType == 0:
        dataMatrix = predError[:, -1, -config.GPS_size:]
        category_ids = np.argmax(dataMatrix, axis=-1)
    else:
        dataMatrix = predError[:, -1, -config.GPS_stampSize:]
        category_ids = dataMatrix

    for c in range(1,config.GPS_size+1):  # 对每一个可能的类别
        idxs = np.where(category_ids == c)[0]  # 找出属于该类别的样本索引
        if len(idxs) == 0:
            continue  # 跳过没有样本的类别
        predError_now = predError[idxs]
        srcError_now = srcError[idxs]
        predError_now = predError_now.reshape((-1,args.feature_size))[:,:config.outputSize]
        srcError_now = srcError_now.reshape((-1,args.feature_size))[:,:config.outputSize]
        mse,rmse = RMSE(predError_now,srcError_now)
        mae = MAE(predError_now,srcError_now)
        pml = Pml(predError_now,srcError_now)
        meList.append([rmse,mae])
        pmlList.append(pml)
    plot_mutilEval(config.predPath,meList,pmlList)

def save_pred(predList,tgtList,title):
    if os.path.exists(config.predPath):
        shutil.rmtree(config.predPath)
    if not os.path.exists(config.predPath):
        os.mkdir(config.predPath)
    predList = predList.reshape(-1, config.outputSize)
    tgtList = tgtList.reshape(-1, config.outputSize)
    predError, tgtError = scalerInver(predList, tgtList,scalerType=0)
    length = config.predicting_length
    test_size = predError.shape[0] // length
    rmse = RMSE(predError, tgtError)
    mae = MAE(predError, tgtError)
    totpml = Pml(predError, tgtError)
    print(f"RMSE: {rmse}")
    print(f"MAE: {mae}")
    print(f"total pml: {totpml}")
    resError = predError - tgtError
    resErr = resError.reshape([test_size, length, config.outputSize])
    modelErr = predError.reshape([test_size, length, config.outputSize])
    oriErr = tgtError.reshape([test_size, length, config.outputSize])
    draw_Err(modelErr,oriErr,title)
    if config.outputSize == 1:
        get_singleError(modelErr, oriErr)
    else:
        get_3DError(modelErr, oriErr)
    plot_rmse_3D(config.predPath, resErr, title + "_3D RMSE")
    plot_error(config.predPath, resErr, oriErr, title + "_RMSE")

def save_predPml(pmlList,title):
    plot_pml(config.predPath,pmlList, title)

def deleteFile(path):
    if os.path.exists(path):
        os.remove(path)

def save_model(epoch,model,optimizer,scheduler,ema):
    if not os.path.exists(config.modelPath + f"train_{epoch + 1}/"):
        os.makedirs(config.modelPath + f"train_{epoch + 1}/")
    torch.save(model.state_dict(), config.modelPath + f"train_{epoch + 1}/" + f"train_{epoch + 1}.pth")
    torch.save(optimizer.state_dict(), config.modelPath + f"train_{epoch + 1}/" + f"optimizer_{epoch + 1}.pth")
    torch.save(scheduler.state_dict(), config.modelPath + f"train_{epoch + 1}/" + f"scheduler_{epoch + 1}.pth")
    torch.save(ema.state_dict(), config.modelPath + f"train_{epoch + 1}/" + f"ema_{epoch + 1}.pth")

def save_last_model(epoch,model,optimizer,scheduler,ema):
    if not os.path.exists(config.loadDir):
        os.makedirs(config.loadDir)
    torch.save(model.state_dict(), config.loadDir + f"train_{epoch + 1}.pth")
    torch.save(optimizer.state_dict(), config.loadDir + f"optimizer_{epoch + 1}.pth")
    torch.save(scheduler.state_dict(), config.loadDir + f"scheduler_{epoch + 1}.pth")
    torch.save(ema.state_dict(), config.loadDir + f"ema_{epoch + 1}.pth")

def save_best_model(epoch, model):
    if not os.path.exists(config.loadDir):
        os.makedirs(config.loadDir)
    torch.save(model.state_dict(), config.loadDir + f"train_best.pth")
    path_to_file = config.loadDir + f"train_best.txt"
    with open(path_to_file, "w") as f:
        f.write("Epoch: "+str(epoch)+"\n")
        f.close()


def get_alpha(epoch, max_epoch, start=0.6, end=0.25):
    """线性衰减 alpha"""
    progress = epoch / max_epoch
    return start + (end - start) * progress

def pmlBucket(predList,targetList,title):
    length = config.predicting_length
    predList = predList.reshape(-1, config.outputSize)
    targetList = targetList.reshape(-1, config.outputSize)
    predError, srcError = scalerInver(predList, targetList)
    test_size = predError.shape[0] // length
    predError = predError.reshape([test_size, length,config.outputSize])
    srcError = srcError.reshape([test_size, length,config.outputSize])
    pmlList = Pml(predError,srcError)
    RangeList = [0., 0.2, 0.4, 0.6, 0.8, 1.0]
    counts, bin_edges = np.histogram(pmlList, bins=RangeList)
    print("区间划分: ", bin_edges)
    print("每个区间的样本数: ", counts)
    above = np.sum(pmlList > RangeList[-1])
    print(f"负优化样本数量: {above}")

def Pml(pred, src):
    pred, src = np.asarray(pred), np.asarray(src)
    if pred.shape != src.shape:
        raise ValueError("src and pred must have the same shape")
    if pred.ndim == 3:  # 输入为 [samples, len, N]
        num = np.sum(np.abs(src - pred), axis=(1, 2))  # 每个样本单独累加
        den = np.sum(np.abs(src), axis=(1, 2))
        if np.any(den == 0):
            raise ValueError("One or more samples in src has zero denominator")
        pml = num / den  # [samples]
    elif pred.ndim == 2:  # 输入为 [len, N]
        num = np.sum(np.abs(src - pred))  # 标量
        den = np.sum(np.abs(src))
        if den == 0:
            raise ValueError("The sum of absolute values in src is zero, cannot divide by zero")
        pml = num / den  # 标量
    else:
        raise ValueError("Input must be 2D or 3D (got shape {})".format(pred.shape))
    return pml


def draw_Err(pred,tagt,title):
    test_size = pred.shape[0] // config.predicting_length
    for k in range(test_size):
        pml = Pml(pred[k], tagt[k])
        if k < 3:
            print(f"{k} Pml: {pml}")
            plot_error(config.predPath, pred[k], tagt[k], title+f" error {k}")
            plot_residue(config.predPath, pred[k], tagt[k], title+f" residue {k}")
        else:
            break
def cal_single_MAM(src):
    smpAvg_src = np.mean(src, axis=0)
    MAX = np.max(smpAvg_src,axis=0)
    AVE = np.mean(smpAvg_src,axis=0)
    MIN = np.min(smpAvg_src,axis=0)
    return [MIN, AVE, MAX]

def cal_3D_MAM(src):  #MAM: maximum, average, and minimal
    res = []
    smpAvg_src = np.mean(src,axis=0)
    for i in range(config.axis):
        MAX = np.max(smpAvg_src[:,i])
        AVE = np.mean(smpAvg_src[:,i])
        MIN = np.min(smpAvg_src[:,i])
        res.append([MIN,AVE,MAX])
    norm3d = np.linalg.norm(src, axis=2)
    smpAvg_norm3d = np.mean(norm3d, axis=0)
    MAX = np.max(smpAvg_norm3d)
    AVE = np.mean(smpAvg_norm3d)
    MIN = np.min(smpAvg_norm3d)
    res.append([MIN, AVE, MAX])
    return res

def euclidDistance(errors):
    """
    计算三个方向误差的欧几里得距离
    :param errors: list 或 tuple，长度为 3，例如 [dx, dy, dz]
    :return: 距离 (float)
    """
    if len(errors) != 3:
        raise ValueError("输入必须是包含三个方向误差的 list 或 tuple")
    # print(math.sqrt(sum(e**2 for e in errors)))
    return math.sqrt(sum(e**2 for e in errors))

def outlierDetection(orbitData, outType="sigma" , abs_threshold=None, iqr_rate=1.5):
    if abs_threshold is not None:
        mask = np.all(np.abs(orbitData) < abs_threshold, axis=1)
        orbitData = orbitData[mask]

    if outType == 'sigma':
        mu = np.mean(orbitData, axis=0)  # 每列的均值 μ_x, μ_y, μ_z
        sigma = np.std(orbitData, axis=0)  # 每列的标准差 σ_x, σ_y, σ_z
        lower = mu - 3 * sigma
        upper = mu + 3 * sigma
    elif outType == "IQR":
        Q1 = np.percentile(orbitData, 25, axis=0)
        Q3 = np.percentile(orbitData, 75, axis=0)
        IQR = Q3 - Q1

        lower = Q1 - iqr_rate * IQR
        upper = Q3 + iqr_rate * IQR
    else:
        print("outlier detection error")
        sys.exit(0)
    return lower, upper


def compute_mean_cov(X):
    """
    计算样本均值和协方差矩阵
    :param X: np.ndarray 或 torch.Tensor, shape (N, D)
    """
    mean = np.mean(X, axis=0)
    X_centered = X - mean
    cov = np.cov(X_centered, rowvar=False)
    return mean, cov

def get_sunPosition(uct_times, relative_to="earth"):
    from jplephem.spk import SPK
    kernel = SPK.open(config.de440Path)

    # 转换时间到 TDB JD
    uct_times = Time(uct_times, scale='utc')
    tdb_times = uct_times.tdb.jd

    if relative_to.lower() == "ssb":
        # 太阳 w.r.t. 太阳系质心
        positions_km = kernel[0, 10].compute(tdb_times)  # shape (3,N)
        return positions_km.T * 1000.0

    elif relative_to.lower() == "earth":
        # ---- Sun/SSB ----
        sun_pos_km = kernel[0, 10].compute(tdb_times)  # (3,N) 太阳质心相对于太阳系质心 (SSB)

        # ---- Earth/SSB = EMB/SSB + Earth/EMB ----
        emb_pos_km = kernel[0, 3].compute(tdb_times)  # (3,N) 地月质心相对于 SSB
        earth_rel_emb_km = kernel[3, 399].compute(tdb_times)  # (3,N) [3,399] = 地球质心相对于地月质心
        earth_pos_km = emb_pos_km + earth_rel_emb_km

        # ---- Sun/Earth ----
        positions_m = (sun_pos_km - earth_pos_km).T * 1000.0
        return positions_m

    else:
        raise ValueError("relative_to 必须是 'earth' 或 'ssb'")

def get_betaANDshadow(satP, satV, sunP):
    R_EARTH = 6378137.0
    R_SUN = 696000000.0

    n = np.cross(satP, satV)
    n_hat = n / np.linalg.norm(n)
    s_hat = sunP / np.linalg.norm(sunP)
    sin_beta = np.clip(np.dot(n_hat, s_hat), -1.0, 1.0)
    beta_rad = np.arcsin(sin_beta)

    S_sat = sunP - satP
    E_sat = -satP
    norm_Ssat = np.linalg.norm(S_sat)
    norm_Esat = np.linalg.norm(E_sat)

    cos_gamma = np.dot(S_sat, E_sat) / (norm_Ssat * norm_Esat)
    gamma = np.arccos(np.clip(cos_gamma, -1.0, 1.0))

    r_norm = np.linalg.norm(satP)
    theta_E = np.arcsin(np.clip(R_EARTH / r_norm, -1.0, 1.0))
    theta_S = np.arcsin(np.clip(R_SUN / norm_Ssat, -1.0, 1.0))

    # 连续阴影边界距离
    margin = (gamma - (theta_E - theta_S)) / (theta_S + 1e-9)

    # 离散阴影标志
    if gamma < (theta_E - theta_S):
        shadow = 2  # umbra
    elif gamma < (theta_E + theta_S):
        shadow = 1  # penumbra
    else:
        shadow = 0  # sunlit
    output = np.array([margin,np.sin(beta_rad),np.cos(beta_rad)])
    return output

def mahalanobis_distance(X, mean, cov_inv):
    """
    计算 Mahalanobis 距离 (NumPy 版本)
    :param X: np.ndarray, shape (N, D)
    :param mean: np.ndarray, shape (D,)
    :param cov_inv: np.ndarray, shape (D, D) 协方差矩阵的逆
    :return: np.ndarray, shape (N,) 每个样本的 Mahalanobis 距离
    """
    X_centered = X - mean  # (N, D)
    left = np.dot(X_centered, cov_inv)  # (N, D)
    mahal = np.sqrt(np.sum(left * X_centered, axis=1))  # (N,)
    return mahal

def ewm_smooth(data, alpha=0.25):
    """
    data: [S, L, N]  三维数据
    alpha: 平滑参数
    return: [S, L, N] 平滑后的数据
    """
    S, L, N = data.shape
    smoothed = np.zeros_like(data)
    smoothed[:, 0, :] = data[:, 0, :]
    for t in range(1, L):
        smoothed[:, t, :] = alpha * data[:, t, :] + (1 - alpha) * smoothed[:, t-1, :]
    return smoothed

def outlierDetection_mahalanobis(obsData,prdData,stampObs,stampPrd,timeData,frac_threshold=0.975):
    obsS, obsN = obsData.shape
    prdS, prdN = prdData.shape
    obsSs, obsNs = stampObs.shape
    prdSs, prdNs = stampPrd.shape
    dayNum = obsS // config.training_length #总天数
    obs_data = obsData[:, :config.axis]
    prd_data = prdData[:, :config.axis]

    obs_data = obs_data.reshape([dayNum,-1])
    prd_data = prd_data.reshape([dayNum,-1])
    obsData = obsData.reshape([dayNum,-1])
    prdData = prdData.reshape([dayNum,-1])
    stampObs = stampObs.reshape([dayNum,-1])
    stampPrd = stampPrd.reshape([dayNum,-1])
    origin_size = obs_data.shape[0]

    # 用观测段自身拟合协方差
    lw = LedoitWolf().fit(prd_data)
    mean = lw.location_
    cov = lw.covariance_ + np.eye(lw.covariance_.shape[0]) * 1e-6
    cov_inv = np.linalg.inv(cov)

    # 计算 Mahalanobis 距离平方
    Xc = prd_data - mean
    sol = Xc @ cov_inv
    dist2 = np.sum(sol * Xc, axis=1)

    # 阈值取预测段自身的分位数
    perc = 100.0 * frac_threshold
    threshold = np.percentile(dist2, perc)
    print(f"[Prediction segment only] Threshold at {perc}% = {threshold:.3f}")

    data_mask = dist2 <= threshold

    obsData = obsData[data_mask]
    prdData = prdData[data_mask]
    stampObs = stampObs[data_mask]
    stampPrd = stampPrd[data_mask]
    dayNum = obsData.shape[0]
    obsData = obsData.reshape([dayNum,config.training_length,obsN])
    prdData = prdData.reshape([dayNum,config.predicting_length,prdN])

    # oldObsData = obsData.copy().reshape([-1,obsN])
    # oldPrdData = prdData.copy().reshape([-1,prdN])

    splitSet = config.axis
    rollDataObs = np.array(obsData)
    obsData = np.concatenate((ewm_smooth(rollDataObs[:,:,:splitSet],alpha=0.25), rollDataObs[:,:,splitSet:]), axis=-1)  # 你可以试试 alpha=0.1 ~ 0.5
    # rollDataPrd = np.array(prdData)
    # prdData = np.concatenate((ewm_smooth(rollDataPrd[:,:,:splitSet],alpha=0.25), rollDataPrd[:,:,splitSet:]), axis=-1)  # 你可以试试 alpha=0.1 ~ 0.5

    obsData = obsData.reshape([-1,obsN])
    prdData = prdData.reshape([-1,prdN])
    stampObs = stampObs.reshape([-1,obsNs])
    stampPrd = stampPrd.reshape([-1,prdNs])

    # cnt = 0
    # for idx in range(0,obsData.shape[0],config.training_length):
    #     cnt+=1
    #     plot_error(config.showErrorDir, obsData[idx:idx+config.training_length,:config.axis], oldObsData[idx:idx+config.training_length,:config.axis], "obsRenoise"+str(idx//config.training_length))
    #     if cnt>5:
    #         break
    # sys.exit(0)

    last_size = obsData.shape[0]
    lastData = data_mask.sum().item()
    removeData = (~data_mask).sum().item()
    print(f"lastData: {lastData}")
    print(f"removeData: {removeData}")
    print(f"splitSize: {last_size/origin_size}")
    timeData = timeData[data_mask]
    return obsData, prdData, stampObs, stampPrd, timeData

def get_Velocity(preP,nxtP,preTime,nxtTime,nowTime):
    dt_prev = (nowTime - preTime).total_seconds()
    dt_next = (nxtTime - nowTime).total_seconds()
    nowV = np.array([(nxtP[i] - preP[i]) / (dt_prev + dt_next) for i in range(len(nxtP))])
    return nowV

def differencing(batch):
    """对每个样本的96步序列计算一阶差分"""
    diff_batch = batch[:, 1:, :] - batch[:, :-1, :]  # 形状变为 (num_samples, 95, feature_size)
    return diff_batch

def label_differencing(obser_last_step, moeorb_batch):
    """
    obser_last_step: 前96步的最后一个值 (num_samples, 1, feature_size)
    moeorb_batch: 后96步的值 (num_samples, 96, feature_size)
    """
    # 将前96步的最后一个值与后96步拼接，再差分
    extended = np.concatenate([obser_last_step, moeorb_batch], axis=1)  # (num_samples, 97, feature_size)
    diff_labels = extended[:, 1:, :] - extended[:, :-1, :]  # (num_samples, 96, feature_size)
    return diff_labels

def convert_eciErr_to_rsw(r_eci, v_eci, dr_eci):
    """
    批量将误差向量从ECI坐标系转换为RSW坐标系

    参数:
        r_eci : ndarray, shape (n, 3)
            卫星在ECI中的位置向量 [x, y, z] (米)
        v_eci : ndarray, shape (n, 3)
            卫星在ECI中的速度向量 [vx, vy, vz] (米/秒)
        dr_eci : ndarray, shape (n, 3)
            误差向量 Δr (米)

    返回:
        dr_rsw : ndarray, shape (n, 3)
            RSW坐标系下的误差向量 [ΔR, ΔS, ΔW]
    """

    # ========== 1. 单位径向向量 e_R ==========
    r_norm = np.linalg.norm(r_eci, axis=1, keepdims=True)
    e_R = r_eci / r_norm

    # ========== 2. 单位轨道面法向量 e_W ==========
    h_vec = np.cross(r_eci, v_eci)  # 轨道角动量
    h_norm = np.linalg.norm(h_vec, axis=1, keepdims=True)
    e_W = h_vec / h_norm

    # ========== 3. 单位沿轨向量 e_S ==========
    e_S = np.cross(e_W, e_R)

    # ========== 4. 构造变换矩阵 T ==========
    # 每列分别是 e_R, e_S, e_W
    # T 的形状是 (n, 3, 3)
    T = np.stack([e_R, e_S, e_W], axis=2)

    # ========== 5. 批量投影 Δr ==========
    # 扩展 dr_eci 为 (n, 3, 1)
    dr_eci_expanded = dr_eci[..., np.newaxis]

    # 批量矩阵乘法: dr_rsw = T^T @ dr_eci [3*3] [3*1]
    dr_rsw = np.matmul(T.transpose(0, 2, 1), dr_eci_expanded).squeeze(-1)

    return dr_rsw

def scaLer(obsData_train,obsData_val,obsData_test,prdData_train,prdData_val,prdData_test,runType):
    if runType == "train":
        obsScaler = StandardScaler()
        obsScaler.fit(obsData_train[:, :-config.unstd_size])  # 只拟合非unstd部分
        joblib.dump(obsScaler, config.obsScalerPath)
        prdScaler = StandardScaler()
        prdScaler.fit(prdData_train[:, :-config.GPS_stampSize])
        joblib.dump(prdScaler, config.prdScalerPath)
    else:
        obsScaler = joblib.load(config.obsScalerPath)
        prdScaler = joblib.load(config.prdScalerPath)
    obsData_train_scaled = obsScaler.transform(obsData_train[:, :-config.unstd_size])
    obsData_val_scaled = obsScaler.transform(obsData_val[:, :-config.unstd_size])
    obsData_test_scaled = obsScaler.transform(obsData_test[:, :-config.unstd_size])
    obsData_train = np.concatenate([obsData_train_scaled, obsData_train[:, -config.unstd_size:]], axis=-1)
    obsData_val = np.concatenate([obsData_val_scaled, obsData_val[:, -config.unstd_size:]], axis=-1)
    obsData_test = np.concatenate([obsData_test_scaled, obsData_test[:, -config.unstd_size:]], axis=-1)
    prdData_train_scaled = prdScaler.transform(prdData_train[:, :-config.GPS_stampSize])
    prdData_val_scaled = prdScaler.transform(prdData_val[:, :-config.GPS_stampSize])
    prdData_test_scaled = prdScaler.transform(prdData_test[:, :-config.GPS_stampSize])
    prdData_train = np.concatenate([prdData_train_scaled, prdData_train[:, -config.GPS_stampSize:]], axis=-1)
    prdData_val = np.concatenate([prdData_val_scaled, prdData_val[:, -config.GPS_stampSize:]], axis=-1)
    prdData_test = np.concatenate([prdData_test_scaled, prdData_test[:, -config.GPS_stampSize:]], axis=-1)

    return obsData_train,obsData_val,obsData_test,prdData_train,prdData_val,prdData_test

def scalerInver(predList, targetList, scalerType=1):
    prdScaler = joblib.load(config.prdScalerPath)
    if scalerType == 1:
        mu = prdScaler.mean_[config.outputIdx:config.outputIdx+1]
        std = prdScaler.scale_[config.outputIdx:config.outputIdx+1]
    else:
        mu = prdScaler.mean_[:config.outputSize]
        std = prdScaler.scale_[:config.outputSize]
    predError = predList * std + mu
    srcError = targetList * std + mu
    return predError, srcError

def splitDataset(obsData, prdData, stampObs, stampPrd, runType="train", sca=True):
    data_size = obsData.shape[0] // config.training_length
    _,obsN = obsData.shape
    _,prdN = prdData.shape
    obsData = np.array(obsData).reshape([data_size, config.training_length, obsN])
    prdData = np.array(prdData).reshape([data_size, config.predicting_length, prdN])
    stampObs = np.array(stampObs).reshape([data_size, config.training_length, config.stampSize])
    stampPrd = np.array(stampPrd).reshape([data_size, config.predicting_length, config.stampSize])

    train_size = int(data_size * config.splitSize)
    val_size = (data_size-train_size)//2
    obsData_train = obsData[:train_size];obsData_val = obsData[train_size:train_size+val_size];obsData_test = obsData[train_size+val_size:]
    prdData_train = prdData[:train_size];prdData_val = prdData[train_size:train_size+val_size];prdData_test = prdData[train_size+val_size:]
    stampObs_train = stampObs[:train_size];stampObs_val = stampObs[train_size:train_size+val_size];stampObs_test = stampObs[train_size+val_size:]
    stampPrd_train = stampPrd[:train_size];stampPrd_val = stampPrd[train_size+val_size:];stampPrd_test = stampPrd[:train_size]

    obsData_train = np.array(obsData_train).reshape(-1, obsN);obsData_val = np.array(obsData_val).reshape(-1, obsN);obsData_test = np.array(obsData_test).reshape(-1, obsN)
    prdData_train = np.array(prdData_train).reshape(-1, prdN);prdData_val = np.array(prdData_val).reshape(-1, prdN);prdData_test = np.array(prdData_test).reshape(-1, prdN)
    stampObs_train = np.array(stampObs_train).reshape(-1, config.stampSize);stampObs_val = np.array(stampObs_val).reshape(-1, config.stampSize);stampObs_test = np.array(stampObs_test).reshape(-1, config.stampSize)
    stampPrd_train = np.array(stampPrd_train).reshape(-1, config.stampSize);stampPrd_val = np.array(stampPrd_val).reshape(-1, config.stampSize);stampPrd_test = np.array(stampPrd_test).reshape(-1, config.stampSize)

    print(f"obsData_train: {obsData_train.shape}")
    print(f"obsData_val: {obsData_val.shape}")
    print(f"obsData_test: {obsData_test.shape}")
    print(f"prdData_train: {prdData_train.shape}")
    print(f"prdData_val: {prdData_val.shape}")
    print(f"prdData_test: {prdData_test.shape}")

    if sca == True:
        obsData_train,obsData_val,obsData_test,prdData_train,prdData_val,prdData_test = scaLer(obsData_train,obsData_val,obsData_test,prdData_train,prdData_val,prdData_test,runType)

    obsData = [obsData_train,obsData_val,obsData_test]
    prdData = [prdData_train,prdData_val,prdData_test]
    stampObs = [stampObs_train,stampObs_val,stampObs_test]
    stampPrd = [stampPrd_train,stampPrd_val,stampPrd_test]
    return obsData, prdData, stampObs, stampPrd


def clean_directory():
    if os.path.exists(config.saveDir):
        shutil.rmtree(config.saveDir)
    if not os.path.exists(config.saveDir):
        os.mkdir(config.saveDir)
    if not os.path.exists(config.lossPath):
        os.mkdir(config.lossPath)
    if not os.path.exists(config.modelPath):
        os.mkdir(config.modelPath)
    if not os.path.exists(config.predPath):
        os.mkdir(config.predPath)
    if not os.path.exists(config.loadPTDir):
        os.mkdir(config.loadPTDir)
    if not os.path.exists(config.loadDir):
        os.mkdir(config.loadDir)
    if not os.path.exists(config.loadDir+"YAML/"):
        os.mkdir(config.loadDir+"YAML/")
    if not os.path.exists(config.showErrorDir):
        os.mkdir(config.showErrorDir)

