import pandas as pd
import numpy as np
from sklearn.model_selection import train_test_split, GridSearchCV
from sklearn.neural_network import MLPRegressor
from sklearn.preprocessing import StandardScaler
from sklearn.metrics import r2_score, mean_squared_error
import math
import os

# Load the Excel file
def load_data(file_path):
    data = pd.read_excel(file_path)
    return data

# Prepare data for neural network
def prepare_data(data):
    # Extract sample names
    sample_names = data['Sample'].values
    
    # Extract predictors (columns 2 to 801)
    X = data.iloc[:, 1:801].values
    
    # Extract response variables (columns 802 to second last)
    y_cols = data.columns[801:-1]
    Y = {col: data[col].values for col in y_cols}
    
    return sample_names, X, Y, y_cols

# Split data into training and testing sets
def split_data(X, y, sample_names, test_size=0.2, random_state=42):
    X_train, X_test, y_train, y_test, names_train, names_test = train_test_split(
        X, y, sample_names, test_size=test_size, random_state=random_state
    )
    return X_train, X_test, y_train, y_test, names_train, names_test

# Normalize data
def normalize_data(X_train, X_test):
    scaler = StandardScaler()
    X_train_scaled = scaler.fit_transform(X_train)
    X_test_scaled = scaler.transform(X_test)
    return X_train_scaled, X_test_scaled, scaler

# Train neural network with cross-validation and parameter tuning
def train_nn_with_cv(X_train, y_train):
    # Define parameter grid for grid search
    param_grid = {
        'hidden_layer_sizes': [(50,), (100,), (50, 50), (100, 50)],
        'activation': ['relu', 'tanh'],
        'alpha': [0.0001, 0.001, 0.01],
        'learning_rate_init': [0.001, 0.01],
    }
    
    # Initialize MLPRegressor
    mlp = MLPRegressor(max_iter=1000, random_state=42)
    
    # Perform grid search with cross-validation
    grid_search = GridSearchCV(
        estimator=mlp,
        param_grid=param_grid,
        cv=5,  # 5-fold cross-validation
        scoring='neg_mean_squared_error',
        n_jobs=-1
    )
    
    # Fit the model
    grid_search.fit(X_train, y_train)
    
    # Get the best model
    best_model = grid_search.best_estimator_
    
    return best_model, grid_search.best_params_

# Evaluate model performance
def evaluate_model(model, X_test, y_test):
    # Predict
    y_pred = model.predict(X_test)
    
    # Calculate metrics
    r2 = r2_score(y_test, y_pred)
    rmse = math.sqrt(mean_squared_error(y_test, y_pred))
    
    return r2, rmse, y_pred

# Main function
def main(file_path, output_path):
    # Load data
    data = load_data(file_path)
    
    # Prepare data
    sample_names, X, Y, y_cols = prepare_data(data)
    
    # Initialize results dataframe for output
    results = []
    
    # Process each response variable
    for col in y_cols:
        print(f"Processing response variable: {col}")
        y = Y[col]
        
        # Split data
        X_train, X_test, y_train, y_test, names_train, names_test = split_data(X, y, sample_names)
        
        # Normalize data
        X_train_scaled, X_test_scaled, scaler = normalize_data(X_train, X_test)
        
        # Train model with CV and parameter tuning
        best_model, best_params = train_nn_with_cv(X_train_scaled, y_train)
        print(f"Best parameters for {col}: {best_params}")
        
        # Evaluate model
        r2, rmse, y_pred = evaluate_model(best_model, X_test_scaled, y_test)
        print(f"R² for {col}: {r2:.4f}")
        print(f"RMSE for {col}: {rmse:.4f}")
        
        # Store results
        for i in range(len(names_test)):
            results.append({
                'Sample': names_test[i],
                'Response_Variable': col,
                'R2': r2,
                'RMSE': rmse,
                'Actual': y_test[i],
                'Predicted': y_pred[i]
            })
    
    # Create results dataframe
    results_df = pd.DataFrame(results)
    
    # Export results to Excel
    results_df.to_excel(output_path, index=False)
    print(f"Results saved to {output_path}")

# Example usage
if __name__ == "__main__":
    input_file = "ML/CT_1_Combined_Data.xlsx"  # Replace with your input file path
    output_file = "ML/nn_results.xlsx"  # Replace with your desired output file path
    main(input_file, output_file)