import pandas as pd
import numpy as np
import matplotlib.pyplot as plt


def import_data(file_path):
    """
    Reads a CSV or Excel file and splits the data into input (x) and output (y) DataFrames.

    Parameters:
        file_path (str): Path to the file.

    Returns:
        x (DataFrame): Input DataFrame containing force data.
        y (DataFrame): Output DataFrame containing node data.
    """
    # Read the file
    if file_path.endswith('.csv'):
        data = pd.read_csv(file_path)
    elif file_path.endswith('.xlsx'):
        data = pd.read_excel(file_path)
    else:
        raise ValueError("Unsupported file format. Please provide a .csv or .xlsx file.")

    # Split data into input (force) and output (node strains)
    x = data.iloc[:, 0]  # First column: Force
    y = data.iloc[:, 1:]  # Remaining columns: Node data

    # Ensure numeric data in y
    y = y.apply(pd.to_numeric, errors='coerce')

    return x, y


def pod_reduce_dimension(x, y, target_dimension):
    """
    Reduces the dimensionality of output (y) using Proper Orthogonal Decomposition (POD).

    Parameters:
        x (DataFrame): Input data.
        y (DataFrame): Output data.
        target_dimension (int): Number of dimensions to reduce to.

    Returns:
        reduced_y (DataFrame): Reduced-dimensional output as a DataFrame.
    """
    # Convert output data to NumPy array
    y_array = y.values

    # Handle NaN values in the output data
    if np.isnan(y_array).any():
        y_array = np.nan_to_num(y_array)  # Replace NaN values with zeros

    # Compute the covariance matrix of the output data
    cov_matrix = np.cov(y_array, rowvar=False)

    # Perform eigenvalue decomposition
    eigenvalues, eigenvectors = np.linalg.eigh(cov_matrix)

    # Sort eigenvalues and eigenvectors in descending order
    sorted_indices = np.argsort(eigenvalues)[::-1]
    eigenvalues = eigenvalues[sorted_indices]
    eigenvectors = eigenvectors[:, sorted_indices]

    # Select the top `target_dimension` eigenvectors
    reduced_basis = eigenvectors[:, :target_dimension]

    # Project the output data onto the reduced basis
    reduced_y_array = y_array @ reduced_basis

    # Convert the reduced data back to a DataFrame
    reduced_y = pd.DataFrame(reduced_y_array, columns=[f'Component_{i+1}' for i in range(target_dimension)])

    return reduced_y


def plot_clusters(x, reduced_y):
    """
    Draws 2D and 3D clustering plots based on reduced dimensions and input forces.

    Parameters:
        x (DataFrame): Input force values.
        reduced_y (DataFrame): Reduced-dimensional output data.
    """
    # Convert x values to a numpy array for coloring
    x_values = x.values

    # Plot 2D clustering (first two components)
    plt.figure(figsize=(8, 6))
    scatter_2d = plt.scatter(reduced_y.iloc[:, 0], reduced_y.iloc[:, 1], c=x_values, cmap='viridis')
    plt.colorbar(scatter_2d, label='Force (x)')
    plt.xlabel('Component 1')
    plt.ylabel('Component 2')
    plt.title('2D Clustering Plot Based on POD')
    plt.savefig('./output/2d_cluster.png')
    plt.show()

    # Plot 3D clustering (first three components, if available)
    if reduced_y.shape[1] >= 3:
        fig = plt.figure(figsize=(10, 8))
        ax = fig.add_subplot(111, projection='3d')
        scatter_3d = ax.scatter(
            reduced_y.iloc[:, 0], reduced_y.iloc[:, 1], reduced_y.iloc[:, 2],
            c=x_values, cmap='viridis'
        )
        fig.colorbar(scatter_3d, ax=ax, label='Force (x)')
        ax.set_xlabel('Component 1')
        ax.set_ylabel('Component 2')
        ax.set_zlabel('Component 3')
        ax.set_title('3D Clustering Plot Based on POD')
        plt.savefig('./output/3d_cluster.png')
        plt.show()


if __name__ == "__main__":
    import argparse

    parser = argparse.ArgumentParser(description="Reduce dimensionality of strain data using POD.")
    parser.add_argument("file_path", type=str, nargs='?', default='./data/sample1.xlsx', help="Path to the input file (CSV or Excel). Default is './sample.xlsx'.")
    parser.add_argument("output_path", type=str, nargs='?', default='./output/pod_output.csv', help="Path to save the output CSV file. Default is './pod_output.csv'.")
    parser.add_argument("--target_dimension", type=int, default=3, help="Number of dimensions to reduce to (default: 3).")
    args = parser.parse_args()

    # Import data
    x, y = import_data(args.file_path)

    # Reduce dimension using POD
    try:
        reduced_y = pod_reduce_dimension(x, y, args.target_dimension)
    except ValueError as e:
        print(f"Error during dimensionality reduction: {e}")
        exit(1)

    # Combine input (x) and reduced output (reduced_y) into one DataFrame
    combined_data = pd.concat([x, reduced_y], axis=1)

    # Save the result to a CSV file
    combined_data.to_csv(args.output_path, index=False)

    print(f"Reduced-dimension data saved to {args.output_path}")

    plot_clusters(x, reduced_y)
