import pandas as pd
import numpy as np
import torch
import torch.nn as nn
import torch.optim as optim
from torch.utils.data import Dataset, DataLoader, TensorDataset
from sklearn.model_selection import train_test_split, KFold
from sklearn.preprocessing import StandardScaler
from sklearn.metrics import r2_score, mean_squared_error
import matplotlib.pyplot as plt
from itertools import product
import os
from joblib import Parallel, delayed

# Set random seed for reproducibility
np.random.seed(42)
torch.manual_seed(42)
if torch.cuda.is_available():
    torch.cuda.manual_seed_all(42)

# Check for GPU availability
device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
print(f"Using device: {device}")

# Function to load the data
def load_data(file_path):
    df = pd.read_excel(file_path)
    # Assuming first column is 'Sample', followed by predictors, then response variables
    sample_names = df.iloc[:, 0]
    X = df.iloc[:, 1:801].values  # Predictors (columns 2 to 801)
    y = df.iloc[:, 801:-1].values  # Response variables (column 802 to second last)
    response_names = df.columns[801:-1]
    
    print(f"Loaded data: {X.shape[0]} samples, {X.shape[1]} features, {y.shape[1]} response variables")
    return sample_names, X, y, response_names

# Custom CNN model with flexible architecture
class CNNRegression(nn.Module):
    def __init__(self, input_length, filters, kernel_size, pool_size, dense_units, dropout_rate):
        super(CNNRegression, self).__init__()
        
        # CNN layers
        self.conv = nn.Conv1d(in_channels=1, out_channels=filters, kernel_size=kernel_size)
        self.relu = nn.ReLU()
        self.pool = nn.MaxPool1d(kernel_size=pool_size)
        self.dropout1 = nn.Dropout(dropout_rate)
        
        # Calculate output size after convolution and pooling
        conv_output_length = input_length - kernel_size + 1
        pool_output_length = ((conv_output_length - pool_size) // pool_size) + 1
        
        # Calculate flattened size
        self.flattened_size = filters * pool_output_length
        
        print(f"CNN architecture: Input={input_length}, Conv output={conv_output_length}, "
              f"Pool output={pool_output_length}, Flattened size={self.flattened_size}")
        
        # Fully connected layers
        self.fc1 = nn.Linear(self.flattened_size, dense_units)
        self.relu2 = nn.ReLU()
        self.dropout2 = nn.Dropout(dropout_rate)
        self.fc2 = nn.Linear(dense_units, 1)
    
    def forward(self, x):
        batch_size = x.size(0)
        
        # Reshape input: (batch_size, input_length, 1) -> (batch_size, 1, input_length)
        x = x.permute(0, 2, 1)
        
        # Print shapes for debugging (only during first forward pass)
        if not hasattr(self, 'debug_printed'):
            print(f"Input shape: {x.shape}")
            self.debug_printed = True
        
        # Apply CNN layers
        x = self.conv(x)
        if not hasattr(self, 'conv_shape_printed'):
            print(f"After conv shape: {x.shape}")
            self.conv_shape_printed = True
            
        x = self.relu(x)
        x = self.pool(x)
        
        if not hasattr(self, 'pool_shape_printed'):
            print(f"After pool shape: {x.shape}")
            self.pool_shape_printed = True
            
        x = self.dropout1(x)
        
        # Flatten the output - use flatten instead of reshape/view
        x = x.flatten(start_dim=1)
        
        if not hasattr(self, 'flatten_shape_printed'):
            print(f"After flatten shape: {x.shape}")
            self.flatten_shape_printed = True
        
        # Apply fully connected layers
        x = self.fc1(x)
        x = self.relu2(x)
        x = self.dropout2(x)
        x = self.fc2(x)
        
        return x

# Function to train model
def train_model(model, train_loader, val_loader, criterion, optimizer, device, patience=15, max_epochs=200):
    # Initialize early stopping parameters
    best_val_loss = float('inf')
    best_model_state = None
    patience_counter = 0
    
    # Lists to store training and validation losses
    train_losses = []
    val_losses = []
    
    # Training loop
    for epoch in range(max_epochs):
        # Training phase
        model.train()
        train_loss = 0.0
        for inputs, targets in train_loader:
            inputs, targets = inputs.to(device), targets.to(device)
            
            # Zero the parameter gradients
            optimizer.zero_grad()
            
            # Forward pass
            outputs = model(inputs)
            loss = criterion(outputs, targets)
            
            # Backward pass and optimize
            loss.backward()
            optimizer.step()
            
            train_loss += loss.item() * inputs.size(0)
        
        train_loss = train_loss / len(train_loader.dataset)
        train_losses.append(train_loss)
        
        # Validation phase
        model.eval()
        val_loss = 0.0
        with torch.no_grad():
            for inputs, targets in val_loader:
                inputs, targets = inputs.to(device), targets.to(device)
                outputs = model(inputs)
                loss = criterion(outputs, targets)
                val_loss += loss.item() * inputs.size(0)
        
        val_loss = val_loss / len(val_loader.dataset)
        val_losses.append(val_loss)
        
        # Print progress every 10 epochs
        if (epoch + 1) % 10 == 0:
            print(f'Epoch {epoch+1}/{max_epochs}, Train Loss: {train_loss:.6f}, Val Loss: {val_loss:.6f}')
        
        # Check for early stopping
        if val_loss < best_val_loss:
            best_val_loss = val_loss
            best_model_state = model.state_dict().copy()
            patience_counter = 0
        else:
            patience_counter += 1
            if patience_counter >= patience:
                print(f'Early stopping at epoch {epoch+1}')
                break
    
    # Load best model state
    model.load_state_dict(best_model_state)
    
    return model, train_losses, val_losses

# Simplified parameter grid for faster testing
def create_test_model(X, input_length, device):
    """Create a test model to verify dimensions"""
    # Use smaller network for testing
    model = CNNRegression(
        input_length=input_length,
        filters=32,
        kernel_size=3,
        pool_size=2,
        dense_units=64,
        dropout_rate=0.2
    ).to(device)
    
    # Create a small batch of test data
    X_tensor = torch.tensor(X[:5].reshape(5, X.shape[1], 1), dtype=torch.float32).to(device)
    
    # Run a forward pass to check dimensions
    with torch.no_grad():
        try:
            output = model(X_tensor)
            print(f"Test forward pass successful. Output shape: {output.shape}")
            return True
        except Exception as e:
            print(f"Test forward pass failed: {str(e)}")
            return False

# Function to perform grid search for hyperparameter tuning with reduced complexity
def grid_search_cv(X, y, param_grid, n_splits=5, batch_size=32):
    # Initialize variables to store best parameters and metrics
    best_params = None
    best_cv_rmse = float('inf')
    
    # Create KFold cross-validation
    kf = KFold(n_splits=n_splits, shuffle=True, random_state=42)
    
    # Generate all parameter combinations
    param_combinations = list(product(*param_grid.values()))
    param_keys = list(param_grid.keys())
    
    # Convert data to PyTorch tensors
    X_tensor = torch.tensor(X.reshape(X.shape[0], X.shape[1], 1), dtype=torch.float32)
    y_tensor = torch.tensor(y.reshape(-1, 1), dtype=torch.float32)
    
    for combo_idx, combo in enumerate(param_combinations):
        # Create dictionary of parameters
        params_dict = {param_keys[i]: combo[i] for i in range(len(param_keys))}
        
        print(f"Evaluating parameter set {combo_idx+1}/{len(param_combinations)}: {params_dict}")
        
        # Initialize list to store RMSE values for each fold
        cv_rmse_list = []
        
        for fold_idx, (train_idx, val_idx) in enumerate(kf.split(X)):
            print(f"  Fold {fold_idx+1}/{n_splits}")
            
            # Split data into train and validation sets
            X_train_cv = X_tensor[train_idx]
            y_train_cv = y_tensor[train_idx]
            X_val_cv = X_tensor[val_idx]
            y_val_cv = y_tensor[val_idx]
            
            # Create DataLoaders
            train_dataset = TensorDataset(X_train_cv, y_train_cv)
            val_dataset = TensorDataset(X_val_cv, y_val_cv)
            
            train_loader = DataLoader(train_dataset, batch_size=batch_size, shuffle=True)
            val_loader = DataLoader(val_dataset, batch_size=batch_size)
            
            # Create and initialize model with current parameters
            model = CNNRegression(
                input_length=X.shape[1],
                filters=params_dict['filters'],
                kernel_size=params_dict['kernel_size'],
                pool_size=params_dict['pool_size'],
                dense_units=params_dict['dense_units'],
                dropout_rate=params_dict['dropout_rate']
            ).to(device)
            
            # Define loss function and optimizer
            criterion = nn.MSELoss()
            optimizer = optim.Adam(model.parameters(), lr=params_dict['learning_rate'])
            
            # Train model
            model, _, _ = train_model(
                model=model,
                train_loader=train_loader,
                val_loader=val_loader,
                criterion=criterion,
                optimizer=optimizer,
                device=device,
                patience=10,
                max_epochs=100
            )
            
            # Evaluate model on validation set
            model.eval()
            y_pred_cv = []
            with torch.no_grad():
                for inputs, _ in val_loader:
                    inputs = inputs.to(device)
                    outputs = model(inputs)
                    y_pred_cv.append(outputs.cpu().numpy())
            
            y_pred_cv = np.vstack(y_pred_cv).flatten()
            y_val_np = y_val_cv.cpu().numpy().flatten()
            
            # Calculate RMSE
            rmse_cv = np.sqrt(mean_squared_error(y_val_np, y_pred_cv))
            cv_rmse_list.append(rmse_cv)
            
            print(f"    Validation RMSE: {rmse_cv:.6f}")
        
        # Calculate mean RMSE across folds
        mean_cv_rmse = np.mean(cv_rmse_list)
        print(f"  Mean CV RMSE: {mean_cv_rmse:.6f}")
        
        # Update best parameters if current combination is better
        if mean_cv_rmse < best_cv_rmse:
            best_cv_rmse = mean_cv_rmse
            best_params = params_dict
            print(f"  New best parameter set found!")
    
    print(f"Best parameters: {best_params}")
    print(f"Best cross-validation RMSE: {best_cv_rmse:.6f}")
    
    return best_params

# Main function with simpler parameter grid
def process_single_response(i, X, y, response_names, sample_names, param_grid, test_size, batch_size, device):
    """
    Process a single response variable and return results
    
    Parameters:
    -----------
    i : int
        Index of the response variable to process
    X : numpy array
        Predictor variables
    y : numpy array
        Response variables
    response_names : list or pandas Index
        Names of the response variables
    sample_names : pandas Index
        Names of the samples
    param_grid : dict
        Parameter grid for hyperparameter tuning
    test_size : float
        Proportion of the data to use for testing
    batch_size : int
        Batch size for training
    device : torch.device
        Device to use for computation (CPU or GPU)
    
    Returns:
    --------
    dict
        Dictionary with response variable name, model parameters, and performance metrics
    """
    print(f"\nProcessing response variable: {response_names[i]} ({i+1}/{len(response_names)})")
    
    # Extract current response variable
    y_current = y[:, i]
    
    # Split data into train and test sets
    X_train, X_test, y_train, y_test, train_idx, test_idx = train_test_split(
        X, y_current, range(len(sample_names)), test_size=test_size, random_state=42
    )
    
    # Scale features
    scaler_X = StandardScaler()
    X_train_scaled = scaler_X.fit_transform(X_train)
    X_test_scaled = scaler_X.transform(X_test)
    
    # Find best hyperparameters using cross-validation
    best_params = grid_search_cv(X_train_scaled, y_train, param_grid, batch_size=batch_size)
    
    # Convert data to PyTorch tensors and reshape for CNN
    X_train_tensor = torch.tensor(X_train_scaled.reshape(X_train_scaled.shape[0], X_train_scaled.shape[1], 1), dtype=torch.float32)
    y_train_tensor = torch.tensor(y_train.reshape(-1, 1), dtype=torch.float32)
    X_test_tensor = torch.tensor(X_test_scaled.reshape(X_test_scaled.shape[0], X_test_scaled.shape[1], 1), dtype=torch.float32)
    
    # Create DataLoaders for final training
    train_dataset = TensorDataset(X_train_tensor, y_train_tensor)
    train_loader = DataLoader(train_dataset, batch_size=batch_size, shuffle=True)
    
    # Create validation set from training data
    X_train_final, X_val_final, y_train_final, y_val_final = train_test_split(
        X_train_tensor, y_train_tensor, test_size=0.2, random_state=42
    )
    val_dataset = TensorDataset(X_val_final, y_val_final)
    val_loader = DataLoader(val_dataset, batch_size=batch_size)
    
    # Create and initialize final model with best parameters
    final_model = CNNRegression(
        input_length=X_train_scaled.shape[1],
        filters=best_params['filters'],
        kernel_size=best_params['kernel_size'],
        pool_size=best_params['pool_size'],
        dense_units=best_params['dense_units'],
        dropout_rate=best_params['dropout_rate']
    ).to(device)
    
    # Define loss function and optimizer
    criterion = nn.MSELoss()
    optimizer = optim.Adam(final_model.parameters(), lr=best_params['learning_rate'])
    
    # Train final model
    final_model, train_losses, val_losses = train_model(
        model=final_model,
        train_loader=train_loader,
        val_loader=val_loader,
        criterion=criterion,
        optimizer=optimizer,
        device=device,
        patience=15,
        max_epochs=200
    )
    
    # Plot training history
    plt.figure(figsize=(10, 5))
    plt.plot(train_losses, label='Training Loss')
    plt.plot(val_losses, label='Validation Loss')
    plt.title(f'Training and Validation Loss - {response_names[i]}')
    plt.xlabel('Epochs')
    plt.ylabel('Loss')
    plt.legend()
    plt.savefig(f'training_history_{response_names[i]}.png')
    plt.close()
    
    # Make predictions on test set
    final_model.eval()
    with torch.no_grad():
        y_pred = final_model(X_test_tensor.to(device)).cpu().numpy().flatten()
    
    # Calculate metrics
    r2 = r2_score(y_test, y_pred)
    rmse = np.sqrt(mean_squared_error(y_test, y_pred))
    
    print(f"Test R² for {response_names[i]}: {r2:.4f}")
    print(f"Test RMSE for {response_names[i]}: {rmse:.4f}")
    
    # Return summary result for this response variable
    return {
        'Response_Variable': response_names[i],
        'Filters': best_params['filters'],
        'Kernel_Size': best_params['kernel_size'],
        'Pool_Size': best_params['pool_size'],
        'Dense_Units': best_params['dense_units'],
        'Dropout_Rate': best_params['dropout_rate'],
        'Learning_Rate': best_params['learning_rate'],
        'R2': r2,
        'RMSE': rmse
    }

def run_cnn_regression(file_path, output_path, test_size=0.2, batch_size=32, n_jobs=None):
    """
    Run CNN regression for all response variables in parallel
    
    Parameters:
    -----------
    file_path : str
        Path to the input Excel file
    output_path : str
        Path to save the output Excel file
    test_size : float, optional
        Proportion of the data to use for testing (default: 0.2)
    batch_size : int, optional
        Batch size for training (default: 32)
    n_jobs : int, optional
        Number of jobs to run in parallel. None means 1, -1 means all processors (default: None)
    """
    # Load data
    sample_names, X, y, response_names = load_data(file_path)
    
    # Check for GPU availability
    device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
    print(f"Using device: {device}")
    
    # Try a simpler model first to check dimensions
    test_successful = create_test_model(X, X.shape[1], device)
    if not test_successful:
        print("Dimension test failed. Please check model architecture.")
        return
    
    # Define a simpler parameter grid for faster iteration
    param_grid = {
        'filters': [32],
        'kernel_size': [3],
        'pool_size': [2],
        'dense_units': [64],
        'dropout_rate': [0.2],
        'learning_rate': [0.001]
    }
    
    # Determine number of jobs to use
    if n_jobs is None:
        # No parallelization, process sequentially
        summary_results = []
        for i in range(y.shape[1]):
            result = process_single_response(
                i, X, y, response_names, sample_names, param_grid, test_size, batch_size, device
            )
            summary_results.append(result)
    else:
        # Process response variables in parallel
        print(f"Processing {y.shape[1]} response variables with {n_jobs} jobs")
        summary_results = Parallel(n_jobs=n_jobs)(
            delayed(process_single_response)(
                i, X, y, response_names, sample_names, param_grid, test_size, batch_size, device
            ) for i in range(y.shape[1])
        )
    
    # Create DataFrame from summary results
    summary_df = pd.DataFrame(summary_results)
    
    # Save summary results to Excel
    output_dir = os.path.dirname(output_path)
    if output_dir and not os.path.exists(output_dir):
        os.makedirs(output_dir)
    
    summary_df.to_excel(output_path, index=False)
    print(f"Summary results saved to {output_path}")
    
    return summary_df

# Example usage
if __name__ == "__main__":
    input_file = "ML/CT_1_Combined_Data.xlsx"  # Your file
    output_file = "ML/cnn_regression_results.xlsx"
    
    # Run CNN regression with parallelization
    # Use n_jobs=-1 to use all available cores, or a specific number (e.g., 4)
    # When working with GPU models, be careful with n_jobs as it might impact performance
    results_df = run_cnn_regression(
        file_path=input_file,
        output_path=output_file,
        test_size=0.2,
        batch_size=32,
        n_jobs=1  # Set to None for sequential processing, -1 for all cores, or a specific number
    )