
import numpy as np
from netCDF4 import Dataset
import tqdm
from sklearn.gaussian_process import GaussianProcessRegressor
from sklearn.gaussian_process.kernels import RBF, ConstantKernel as C
import joblib
import os
import matplotlib.pyplot as plt
from scipy.spatial.distance import pdist

# Improved GPR interpolation function
def gpr_interpolation(x, y, z, xi, yi, i, output_filepath, gpr_model=None, visualize_errors=False):
    """
    Robust Gaussian Process Regression interpolation with NaN handling
    
    Parameters:
    x (numpy.ndarray): X coordinates of observation points
    y (numpy.ndarray): Y coordinates of observation points
    z (numpy.ndarray): Values at observation points
    xi (numpy.ndarray): X coordinates of interpolation grid
    yi (numpy.ndarray): Y coordinates of interpolation grid
    i (int): Component index for visualization
    output_filepath (str): Path to save visualization files
    gpr_model (GaussianProcessRegressor): Pre-trained GPR model
    visualize_errors (bool): Whether to generate visualization plots
    
    Returns:
    tuple: Interpolated values and trained GPR model
    """
    # 1. Detect and remove NaN values
    valid_mask = ~np.isnan(z) & ~np.isnan(x) & ~np.isnan(y)
    
    if not np.any(valid_mask):
        raise ValueError("All data points are NaN, cannot perform interpolation")
    
    x_valid = x[valid_mask]
    y_valid = y[valid_mask]
    z_valid = z[valid_mask]
    
    nan_percentage = 100 * (1 - len(z_valid)/len(z))
    # print(f"Detected {nan_percentage:.2f}% NaN values removed")
    
    xi_flat = xi.flatten()
    yi_flat = yi.flatten()
    grid_points = np.vstack([xi_flat, yi_flat]).T
    
    # 2. Check if all valid points are at the same location (would cause singular matrix)
    if len(np.unique(np.column_stack([x_valid, y_valid]), axis=0)) < 5:
        # Use IDW as fallback method
        print("Too few valid points, using IDW interpolation as fallback")
        dist = np.sqrt((xi_flat[:, None] - x_valid[None, :])**2 + 
                      (yi_flat[:, None] - y_valid[None, :])**2)
        dist[dist == 0] = 1e-12
        weights = 1.0 / dist
        weights_sum = np.sum(weights, axis=1)
        weights_sum[weights_sum == 0] = 1e-12
        weights /= weights_sum[:, None]
        z_interpolated = np.sum(weights * z_valid[None, :], axis=1)
        return z_interpolated.reshape(xi.shape), None
    
    # 3. Create or use existing GPR model
    if gpr_model is None:
        # Dynamically adjust kernel parameters
        points = np.column_stack((x_valid, y_valid))
        if len(points) >= 2:
            distances = pdist(points, metric='euclidean')
            avg_distance = np.mean(distances)
        else:
            avg_distance = 1.0  # Default value to prevent errors
        length_scale = max(avg_distance, 0.1)  # Ensure length scale is at least 0.1
        
        kernel = C(1.0, (1e-3, 1e3)) * RBF([length_scale, length_scale], (1e-2, 1e2))
        
        gpr_model = GaussianProcessRegressor(
            kernel=kernel,
            n_restarts_optimizer=5,
            alpha=0.1,
            normalize_y=True,
            optimizer='fmin_l_bfgs_b',
            random_state=42
        )
        # Model training with error handling
        try:
            gpr_model.fit(np.column_stack([x_valid, y_valid]), z_valid)
            # print(f"Model training successful, optimized kernel: {gpr_model.kernel_}")
        except Exception as e:
            print(f"Model training failed: {str(e)}, using mean value filling")
            return np.full(xi.shape, np.nanmean(z_valid)), None
    
    # 4. Perform prediction
    z_interpolated = gpr_model.predict(grid_points)
    
    # 5. Visualization check (optional)
    if visualize_errors:
        plt.figure(figsize=(10, 6))
        plt.scatter(x_valid, y_valid, c=z_valid, s=50, cmap='viridis', label='Original points')
        plt.imshow(z_interpolated.reshape(xi.shape), 
                  origin='lower', alpha=1, cmap='coolwarm')
        plt.colorbar(label='Values')
        plt.title(f'GPR Interpolation Result (NaN removal rate: {nan_percentage:.1f}%)')
        plt.xlabel('Longitude')
        plt.ylabel('Latitude')
        plt.savefig(os.path.join(output_filepath, f'gpr_visualization_component_{i}.png'), dpi=150)
        plt.close()
    
    return z_interpolated.reshape(xi.shape), gpr_model

#------------------------#
#    Process nc file data #
#------------------------#

# Divide according to longitude and latitude
def process_observation_data(obs_array, lon_array, lat_array):
    """
    Process observation data, group by longitude-latitude grid, calculate average observation value for each grid, and record center longitude-latitude.

    Parameters:
    - obs_array: Observation value array, shape (1440, 175)
    - lon_array: Longitude array, shape (1440, 175)
    - lat_array: Latitude array, shape (1440, 175)

    Returns:
    - output_obs: Average observation value array, shape (1440, n_grids)
    - grid_centers: Center longitude-latitude of each grid, shape (n_grids, 2)
    """

    # 1. Extract longitude and latitude of each station (assuming they don't change over time)
    station_lons = lon_array[0, :]
    station_lats = lat_array[0, :]

    # 2. Define boundaries of longitude-latitude grid
    lon_bins = np.arange(-180, 180 + 5, 5)  # Longitude grid boundaries
    lat_bins = np.arange(-90, 90 + 2.5, 2.5)  # Latitude grid boundaries

    # 3. Assign grid ID to each station
    num_lon_bins = len(lon_bins) - 1
    num_lat_bins = len(lat_bins) - 1

    # Use digitize to locate the longitude-latitude bin index for each station
    lon_indices = np.digitize(station_lons, lon_bins, right=True) - 1
    lat_indices = np.digitize(station_lats, lat_bins, right=True) - 1

    # Ensure indices don't go out of bounds
    lon_indices = np.clip(lon_indices, 0, num_lon_bins - 1)
    lat_indices = np.clip(lat_indices, 0, num_lat_bins - 1)

    # Generate grid ID for each station (unique identifier)
    grid_ids = lat_indices * num_lon_bins + lon_indices

    # 4. Find all existing grid IDs (at least one station)
    unique_grid_ids = np.unique(grid_ids)
    n_grids = len(unique_grid_ids)

    # 5. Calculate center longitude-latitude for each grid
    grid_centers = np.zeros((n_grids, 2))
    for i, gid in enumerate(unique_grid_ids):
        lat_idx = gid // num_lon_bins
        lon_idx = gid % num_lon_bins
        center_lon = (lon_bins[lon_idx] + lon_bins[lon_idx + 1]) / 2
        center_lat = (lat_bins[lat_idx] + lat_bins[lat_idx + 1]) / 2
        grid_centers[i] = [center_lon, center_lat]

    # 6. Initialize output array
    output_obs = np.zeros((1440, n_grids))

    # 7. Group and calculate average for each time point
    for t in range(1440):
        current_obs = obs_array[t, :]

        # Use bincount to quickly calculate sum and count for each grid
        sum_values = np.bincount(grid_ids, weights=current_obs, minlength=num_lon_bins * num_lat_bins)
        counts = np.bincount(grid_ids, minlength=num_lon_bins * num_lat_bins)

        # Calculate average values (avoid division by zero)
        mean_values = sum_values / counts

        # Extract average values corresponding to existing grids
        output_obs[t, :] = mean_values[unique_grid_ids]

    return output_obs, grid_centers

# Check data dimensions
def check_dimensions(data_dict):
    """
    Check if all data arrays have consistent dimensions
    
    Parameters:
    data_dict (dict): Dictionary containing data arrays to check
    
    Returns:
    None
    """
    dims = {name: data.shape[1] for name, data in data_dict.items()}
    unique_dims = set(dims.values())
    if len(unique_dims) > 1:
        error_msg = "Inconsistent data dimensions:\n"
        for name, dim in dims.items():
            error_msg += f"{name} shape[1]: {dim}\n"
        raise ValueError(error_msg)
    else:
        print("All data dimensions are consistent")

# Detect NaN in raw data
def analyze_nan(data, name):
    """
    Analyze NaN values in data array
    
    Parameters:
    data (numpy.ndarray): Data array to analyze
    name (str): Name of the data array
    
    Returns:
    None
    """
    nan_count = np.isnan(data).sum()
    total = data.size
    print(f"{name} - NaN count: {nan_count} ({nan_count/total*100:.2f}%)")

# Replace longitude-latitude data
def expand_array(arr, target_shape=(1440, 117)):
    """
    Input an array of shape (117, 2),
    Split along column direction, then expand into arrays of shape (1440, 117).
    
    Parameters:
        arr: np.ndarray, shape=(117, 2)
        target_shape: Target shape after expansion, e.g. (1440, 117)

    Returns:
        expanded_arr1, expanded_arr2: Each is (1440, 117)
        Or optionally return combined array (1440, 117, 2)
    """
    # Split two columns
    arr1 = arr[:, 0:1]  # shape: (117, 1)
    arr2 = arr[:, 1:2]  # shape: (117, 1)

    # Expand to (1440, 117)
    expanded_arr1_lon = np.tile(arr1.T, (target_shape[0], 1))  # (1440, 117)
    expanded_arr2_lat = np.tile(arr2.T, (target_shape[0], 1))  # (1440, 117)

    return expanded_arr1_lon, expanded_arr2_lat  # Or return combined


def safe_avg(data):
    """
    Calculate row average of data, considering NaN values.

    This function calculates the average of each row of data by ignoring NaN values. If all values in a row are NaN,
    the average is treated as 0 to avoid division by zero errors.

    Parameters:
    data (numpy.ndarray): Input 2D numpy array that may contain NaN values.

    Returns:
    numpy.ndarray: 1D numpy array containing the average of each row.
    """
    # Copy data to avoid modifying the original array
    data = data.copy()
    # Replace NaN with 0 for calculation
    np.nan_to_num(data, copy=False, nan=0.0)
    # Calculate the number of non-NaN values in each row
    valid_count = (~np.isnan(data)).sum(axis=0, keepdims=True)
    # Prevent division by zero, at least one non-NaN value
    valid_count[valid_count == 0] = 1
    # Return the average of each row, avoiding division by zero
    test = np.sum(data, axis=0)/valid_count.squeeze()
    return np.sum(data, axis=0) / valid_count.squeeze()

# Read file and process data here
def GeoMagGPR(filename, output_filepath=None):
    """
    Process geomagnetic data using GPR interpolation
    
    Parameters:
    filename (str): Path to input NetCDF file
    output_filepath (str): Directory to save output files
    
    Returns:
    numpy.ndarray: Interpolated geomagnetic data with shape (time_steps, 3, 71, 73)
    """
    with Dataset(filename, "r") as nc:
        print(nc.variables['dbe_geo'])
        dbe_geo1 = nc.variables['dbe_geo'][:]
        dbn_geo1 = nc.variables['dbn_geo'][:]
        dbz_geo1 = nc.variables['dbz_geo'][:]
        glat1 = nc.variables['glat'][:]
        glon1 = nc.variables['glon'][:]

    # Get geomagnetic grid locations
    dbe_geo, latlon = process_observation_data(dbe_geo1, glon1, glat1)
    dbn_geo, latlon = process_observation_data(dbn_geo1, glon1, glat1)
    dbz_geo, latlon = process_observation_data(dbz_geo1, glon1, glat1)
    glon, glat = expand_array(latlon, target_shape=(1440, 117))
    # Take average of every 60 data points (originally 3600 too large, adjusted to 60)
    interval = 60
    new_time_dim = dbe_geo.shape[0] // interval
    # new_time_dim = 24
    # Get station data
    station_num = dbe_geo.shape[1]
    # Reshape data to accommodate safe average calculation
    print("\n=== Raw Data NaN Analysis ===")
    data_vars = {
        'dbe_geo': dbe_geo,
        'dbn_geo': dbn_geo,
        'dbz_geo': dbz_geo,
        'glat': glat,
        'glon': glon
    }

    station_num = dbe_geo.shape[1]
    dbe_reshaped = dbe_geo[:new_time_dim * interval, :].reshape(new_time_dim, interval, station_num)
    dbn_reshaped = dbn_geo[:new_time_dim * interval, :].reshape(new_time_dim, interval, station_num)
    dbz_reshaped = dbz_geo[:new_time_dim * interval, :].reshape(new_time_dim, interval, station_num)
    glat_reshaped = glat[:new_time_dim * interval, :].reshape(new_time_dim, interval, station_num)
    glon_reshaped = glon[:new_time_dim * interval, :].reshape(new_time_dim, interval, station_num)

    # Calculate safe averages
    dbe_avg = np.zeros((new_time_dim, station_num))
    dbn_avg = np.zeros((new_time_dim, station_num))
    dbz_avg = np.zeros((new_time_dim, station_num))
    glat_avg = np.zeros((new_time_dim, station_num))
    glon_avg = np.zeros((new_time_dim, station_num))

    for t in tqdm.tqdm(range(new_time_dim)):
        dbe_avg[t] = safe_avg(dbe_reshaped[t])
        # avg=t,175
        dbn_avg[t] = safe_avg(dbn_reshaped[t])
        dbz_avg[t] = safe_avg(dbz_reshaped[t])
        glat_avg[t] = safe_avg(glat_reshaped[t])
        glon_avg[t] = safe_avg(glon_reshaped[t])

    # Define interpolated grid (71x73)
    new_lat = np.linspace(-90, 90, 71)
    new_lon = np.linspace(0, 360, 73)
    new_lon_grid, new_lat_grid = np.meshgrid(new_lon, new_lat)

    # Create array to store results: (new_time_dim, 3, 71, 73)
    result = np.zeros((new_time_dim, 3, 71, 73), dtype=np.float64)

    # Use the most complete data from the first time step when training global model
    # print("\nTraining global GPR model...")
    base_time = 0  # Use first time step as base

    # Train models for three components
    gpr_models = []
    valid_ratios = []

    for i, comp_data in enumerate([dbe_avg[base_time], dbn_avg[base_time], dbz_avg[base_time]]):
        # print(f"\nTraining component {i+1}/3")
        # Use improved GPR interpolation function to train model
        _, gpr_model = gpr_interpolation(
            glon_avg[base_time], 
            glat_avg[base_time], 
            comp_data, 
            new_lon_grid, 
            new_lat_grid,
            i, output_filepath,
            visualize_errors=False
        )
        gpr_models.append(gpr_model)

    # Interpolate for each time step
    nan_fill_counter = np.zeros(3)  # Track NaN filling counts for each component

    for t in tqdm.tqdm(range(new_time_dim)):
        # Interpolate for three components
        for comp_idx in range(3):
            if comp_idx == 0:
                comp_data = dbe_avg[t]
            elif comp_idx == 1:
                comp_data = dbn_avg[t]
            else:
                comp_data = dbz_avg[t]
                
            # Use improved interpolation function
            comp_interp, _ = gpr_interpolation(
                glon_avg[t],
                glat_avg[t],
                comp_data,
                new_lon_grid,
                new_lat_grid, output_filepath,
                gpr_models[comp_idx]
            )
            
            result[t, comp_idx, :, :] = comp_interp
    # Plotting section
    if output_filepath != None:
        os.makedirs(output_filepath, exist_ok=True)
        np.save(output_filepath + "gpr_interpolated_data.npy", result)
        print("GPR interpolation completed, results saved")
        print(f"Final data shape: {result.shape} (time steps, components, latitude, longitude)")

        # Save grid coordinates
        grid_info = {
            'latitude': new_lat,
            'longitude': new_lon
        }
        np.savez(output_filepath + "grid_coordinates.npz", **grid_info)
        print("Grid coordinates saved")
    return result

def ReadDst(file_path):
    """
    Read Dst index data from file
    
    Parameters:
    file_path (str): Path to the Dst data file containing only Dst values
    
    Returns:
    numpy.ndarray: Dst values reshaped to (-1, 1)
    """
    dst_file = open(file_path, "r")
    # This file only contains Dst data
    dst = dst_file.readlines()
    Dstlist = list()

    for i in range(len(dst)):
        Dst_1h = dst[i].split()
        Dst_1h_result = Dst_1h[-1]
        Dstlist.append(Dst_1h_result)
    Dstlist = np.array(Dstlist)
    Dstlist = Dstlist.reshape((-1, 1))
    return Dstlist

if __name__ == "__main__":
    output_filepath = './Magnet_data/'
    filename = r'Data/SuperMAG_60s_20250421_rev-0006.1749112872.netcdf'
    result = GeoMagGPR(filename)