import pandas as pd
import numpy as np
import torch
import torch.nn as nn
import torch.optim as optim
from torch.utils.data import Dataset, DataLoader, TensorDataset
from sklearn.model_selection import train_test_split, KFold
from sklearn.preprocessing import StandardScaler, OneHotEncoder
from sklearn.metrics import r2_score, mean_squared_error
import matplotlib.pyplot as plt
from itertools import product
import os
from joblib import Parallel, delayed

# Set random seed for reproducibility
np.random.seed(2025)
torch.manual_seed(2025)
if torch.cuda.is_available():
    torch.cuda.manual_seed_all(2025)

# Check for GPU availability
device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
print(f"Using device: {device}")

# Modified function to load the data and separate numerical and categorical features
def load_data(file_path):
    df = pd.read_excel(file_path)
    # First column is 'Sample'
    sample_names = df.iloc[:, 0]
    
    # Numerical features (columns 2 to 802, which are indices 1 to 800)
    X_num = df.iloc[:, 1:802].values
    
    # Categorical feature (column 803, which is index 802)
    X_cat = df.iloc[:, 802].values
    
    # Response variables (column 803 and onward, which are indices 803+)
    y = df.iloc[:, 803:].values
    response_names = df.columns[803:]
    
    print(f"Loaded data: {X_num.shape[0]} samples, {X_num.shape[1]} numerical features, 1 categorical feature, {y.shape[1]} response variables")
    return sample_names, X_num, X_cat, y, response_names

# Custom dataset for handling mixed data types
class MixedDataset(Dataset):
    def __init__(self, X_num, X_cat, y=None):
        self.X_num = torch.tensor(X_num, dtype=torch.float32)
        self.X_cat = torch.tensor(X_cat, dtype=torch.long)
        self.y = torch.tensor(y, dtype=torch.float32) if y is not None else None
        
    def __len__(self):
        return len(self.X_num)
    
    def __getitem__(self, idx):
        if self.y is not None:
            return (self.X_num[idx], self.X_cat[idx]), self.y[idx]
        else:
            return (self.X_num[idx], self.X_cat[idx])

# Custom collate function for the DataLoader
def mixed_collate(batch):
    X_num_list = []
    X_cat_list = []
    y_list = []
    
    for (X_num, X_cat), y in batch:
        X_num_list.append(X_num)
        X_cat_list.append(X_cat)
        y_list.append(y)
    
    X_num_batch = torch.stack(X_num_list)
    X_cat_batch = torch.stack(X_cat_list)
    y_batch = torch.stack(y_list)
    
    return (X_num_batch, X_cat_batch), y_batch

# Modified CNN model to handle both categorical and numerical features
class MixedCNNRegression(nn.Module):
    def __init__(self, numerical_input_length, num_categories, embedding_dim, 
                 filters, kernel_size, pool_size, dense_units, dropout_rate):
        super(MixedCNNRegression, self).__init__()
        
        # Embedding layer for categorical features
        self.embedding = nn.Embedding(num_categories, embedding_dim)
        
        # CNN layers for numerical features
        self.conv = nn.Conv1d(in_channels=1, out_channels=filters, kernel_size=kernel_size)
        self.relu = nn.ReLU()
        self.pool = nn.MaxPool1d(kernel_size=pool_size)
        self.dropout1 = nn.Dropout(dropout_rate)
        
        # Calculate output size after convolution and pooling
        conv_output_length = numerical_input_length - kernel_size + 1
        pool_output_length = ((conv_output_length - pool_size) // pool_size) + 1
        
        # Calculate flattened size
        self.flattened_size = filters * pool_output_length
        
        print(f"CNN architecture: Input={numerical_input_length}, Conv output={conv_output_length}, "
              f"Pool output={pool_output_length}, Flattened size={self.flattened_size}")
        
        # Fully connected layers (combining numerical CNN output with categorical embeddings)
        self.fc1 = nn.Linear(self.flattened_size + embedding_dim, dense_units)
        self.relu2 = nn.ReLU()
        self.dropout2 = nn.Dropout(dropout_rate)
        self.fc2 = nn.Linear(dense_units, 1)
    
    def forward(self, x):
        x_num, x_cat = x
        batch_size = x_num.size(0)
        
        # Process numerical features through CNN
        # Reshape input: (batch_size, input_length) -> (batch_size, 1, input_length)
        x_num = x_num.unsqueeze(1)
        
        # Apply CNN layers
        x_num = self.conv(x_num)
        x_num = self.relu(x_num)
        x_num = self.pool(x_num)
        x_num = self.dropout1(x_num)
        
        # Flatten the output
        x_num = x_num.flatten(start_dim=1)
        
        # Process categorical features through embedding
        x_cat = self.embedding(x_cat)
        
        # Combine numerical and categorical features
        x_combined = torch.cat((x_num, x_cat), dim=1)
        
        # Apply fully connected layers
        x_combined = self.fc1(x_combined)
        x_combined = self.relu2(x_combined)
        x_combined = self.dropout2(x_combined)
        x_combined = self.fc2(x_combined)
        
        return x_combined

# Modified function to train model
def train_model(model, train_loader, val_loader, criterion, optimizer, device, patience=15, max_epochs=200):
    # Initialize early stopping parameters
    best_val_loss = float('inf')
    best_model_state = None
    patience_counter = 0
    
    # Lists to store training and validation losses
    train_losses = []
    val_losses = []
    
    # Training loop
    for epoch in range(max_epochs):
        # Training phase
        model.train()
        train_loss = 0.0
        for inputs, targets in train_loader:
            x_num, x_cat = inputs
            x_num, x_cat = x_num.to(device), x_cat.to(device)
            targets = targets.to(device)
            
            # Zero the parameter gradients
            optimizer.zero_grad()
            
            # Forward pass
            outputs = model((x_num, x_cat))
            loss = criterion(outputs, targets)
            
            # Backward pass and optimize
            loss.backward()
            optimizer.step()
            
            train_loss += loss.item() * x_num.size(0)
        
        train_loss = train_loss / len(train_loader.dataset)
        train_losses.append(train_loss)
        
        # Validation phase
        model.eval()
        val_loss = 0.0
        with torch.no_grad():
            for inputs, targets in val_loader:
                x_num, x_cat = inputs
                x_num, x_cat = x_num.to(device), x_cat.to(device)
                targets = targets.to(device)
                
                outputs = model((x_num, x_cat))
                loss = criterion(outputs, targets)
                val_loss += loss.item() * x_num.size(0)
        
        val_loss = val_loss / len(val_loader.dataset)
        val_losses.append(val_loss)
        
        # Print progress every 10 epochs
        if (epoch + 1) % 10 == 0:
            print(f'Epoch {epoch+1}/{max_epochs}, Train Loss: {train_loss:.6f}, Val Loss: {val_loss:.6f}')
        
        # Check for early stopping
        if val_loss < best_val_loss:
            best_val_loss = val_loss
            best_model_state = model.state_dict().copy()
            patience_counter = 0
        else:
            patience_counter += 1
            if patience_counter >= patience:
                print(f'Early stopping at epoch {epoch+1}')
                break
    
    # Load best model state
    model.load_state_dict(best_model_state)
    
    return model, train_losses, val_losses

# Function to create a test model to verify dimensions
def create_test_model(X_num, X_cat, num_categories, embedding_dim, device):
    """Create a test model to verify dimensions"""
    # Use smaller network for testing
    model = MixedCNNRegression(
        numerical_input_length=X_num.shape[1],
        num_categories=num_categories,
        embedding_dim=embedding_dim,
        filters=32,
        kernel_size=3,
        pool_size=2,
        dense_units=64,
        dropout_rate=0.2
    ).to(device)
    
    # Create a small batch of test data
    X_num_tensor = torch.tensor(X_num[:5], dtype=torch.float32).to(device)
    X_cat_tensor = torch.tensor(X_cat[:5], dtype=torch.long).to(device)
    
    # Run a forward pass to check dimensions
    with torch.no_grad():
        try:
            output = model((X_num_tensor, X_cat_tensor))
            print(f"Test forward pass successful. Output shape: {output.shape}")
            return True
        except Exception as e:
            print(f"Test forward pass failed: {str(e)}")
            return False

# Modified function to perform random search for hyperparameter tuning
def random_search_cv(X_num, X_cat, y, num_categories, param_grid, n_iter=10, n_splits=5, batch_size=32):
    """
    Perform random search for hyperparameter tuning with mixed data types
    """
    # Initialize variables to store best parameters and metrics
    best_params = None
    best_cv_rmse = float('inf')
    
    # Create KFold cross-validation
    kf = KFold(n_splits=n_splits, shuffle=True, random_state=42)
    
    # Generate random parameter combinations
    import random
    random.seed(42)
    
    param_combinations = []
    for _ in range(n_iter):
        params = {}
        for param_name, param_values in param_grid.items():
            params[param_name] = random.choice(param_values)
        param_combinations.append(params)
    
    for combo_idx, params_dict in enumerate(param_combinations):
        print(f"Evaluating parameter set {combo_idx+1}/{n_iter}: {params_dict}")
        
        # Initialize list to store RMSE values for each fold
        cv_rmse_list = []
        
        for fold_idx, (train_idx, val_idx) in enumerate(kf.split(X_num)):
            print(f"  Fold {fold_idx+1}/{n_splits}")
            
            # Split data into train and validation sets
            X_num_train_cv = X_num[train_idx]
            X_cat_train_cv = X_cat[train_idx]
            y_train_cv = y[train_idx]
            
            X_num_val_cv = X_num[val_idx]
            X_cat_val_cv = X_cat[val_idx]
            y_val_cv = y[val_idx]
            
            # Create DataLoaders
            train_dataset = MixedDataset(X_num_train_cv, X_cat_train_cv, y_train_cv.reshape(-1, 1))
            val_dataset = MixedDataset(X_num_val_cv, X_cat_val_cv, y_val_cv.reshape(-1, 1))
            
            train_loader = DataLoader(train_dataset, batch_size=batch_size, shuffle=True, collate_fn=mixed_collate)
            val_loader = DataLoader(val_dataset, batch_size=batch_size, collate_fn=mixed_collate)
            
            # Create and initialize model with current parameters
            model = MixedCNNRegression(
                numerical_input_length=X_num.shape[1],
                num_categories=num_categories,
                embedding_dim=params_dict['embedding_dim'],
                filters=params_dict['filters'],
                kernel_size=params_dict['kernel_size'],
                pool_size=params_dict['pool_size'],
                dense_units=params_dict['dense_units'],
                dropout_rate=params_dict['dropout_rate']
            ).to(device)
            
            # Define loss function and optimizer
            criterion = nn.MSELoss()
            optimizer = optim.Adam(model.parameters(), lr=params_dict['learning_rate'])
            
            # Train model with reduced patience and max_epochs for faster search
            model, _, _ = train_model(
                model=model,
                train_loader=train_loader,
                val_loader=val_loader,
                criterion=criterion,
                optimizer=optimizer,
                device=device,
                patience=5,
                max_epochs=50
            )
            
            # Evaluate model on validation set
            model.eval()
            y_pred_cv = []
            with torch.no_grad():
                for inputs, _ in val_loader:
                    x_num, x_cat = inputs
                    x_num, x_cat = x_num.to(device), x_cat.to(device)
                    outputs = model((x_num, x_cat))
                    y_pred_cv.append(outputs.cpu().numpy())
            
            y_pred_cv = np.vstack(y_pred_cv).flatten()
            y_val_np = y_val_cv
            
            # Calculate RMSE
            rmse_cv = np.sqrt(mean_squared_error(y_val_np, y_pred_cv))
            cv_rmse_list.append(rmse_cv)
            
            print(f"    Validation RMSE: {rmse_cv:.6f}")
        
        # Calculate mean RMSE across folds
        mean_cv_rmse = np.mean(cv_rmse_list)
        print(f"  Mean CV RMSE: {mean_cv_rmse:.6f}")
        
        # Update best parameters if current combination is better
        if mean_cv_rmse < best_cv_rmse:
            best_cv_rmse = mean_cv_rmse
            best_params = params_dict
            print(f"  New best parameter set found!")
    
    print(f"Best parameters: {best_params}")
    print(f"Best cross-validation RMSE: {best_cv_rmse:.6f}")
    
    return best_params

# Modified function to process a single response variable
def process_single_response(i, X_num, X_cat, y, num_categories, response_names, sample_names, param_grid, test_size, batch_size, device):
    """
    Process a single response variable with mixed data types and return results
    """
    print(f"\nProcessing response variable: {response_names[i]} ({i+1}/{len(response_names)})")
    
    # Extract current response variable
    y_current = y[:, i]
    
    # Split data into train and test sets
    X_num_train, X_num_test, X_cat_train, X_cat_test, y_train, y_test, train_idx, test_idx = train_test_split(
        X_num, X_cat, y_current, range(len(sample_names)), test_size=test_size, random_state=42
    )
    
    # Get test sample names
    test_sample_names = [sample_names[idx] for idx in test_idx]
    
    # Scale numerical features
    scaler_X = StandardScaler()
    X_num_train_scaled = scaler_X.fit_transform(X_num_train)
    X_num_test_scaled = scaler_X.transform(X_num_test)
    
    # Find best hyperparameters using cross-validation
    best_params = random_search_cv(
        X_num_train_scaled, X_cat_train, y_train, 
        num_categories,
        param_grid, 
        n_iter=15,
        n_splits=5,
        batch_size=batch_size
    )

    # Create DataLoaders for final training
    train_dataset = MixedDataset(X_num_train_scaled, X_cat_train, y_train.reshape(-1, 1))
    train_loader = DataLoader(train_dataset, batch_size=batch_size, shuffle=True, collate_fn=mixed_collate)
    
    # Create validation set from training data
    X_num_train_final, X_num_val_final, X_cat_train_final, X_cat_val_final, y_train_final, y_val_final = train_test_split(
        X_num_train_scaled, X_cat_train, y_train, test_size=0.2, random_state=42
    )
    
    val_dataset = MixedDataset(X_num_val_final, X_cat_val_final, y_val_final.reshape(-1, 1))
    val_loader = DataLoader(val_dataset, batch_size=batch_size, collate_fn=mixed_collate)
    
    # Create and initialize final model with best parameters
    final_model = MixedCNNRegression(
        numerical_input_length=X_num_train_scaled.shape[1],
        num_categories=num_categories,
        embedding_dim=best_params['embedding_dim'],
        filters=best_params['filters'],
        kernel_size=best_params['kernel_size'],
        pool_size=best_params['pool_size'],
        dense_units=best_params['dense_units'],
        dropout_rate=best_params['dropout_rate']
    ).to(device)
    
    # Define loss function and optimizer
    criterion = nn.MSELoss()
    optimizer = optim.Adam(final_model.parameters(), lr=best_params['learning_rate'])
    
    # Train final model
    final_model, train_losses, val_losses = train_model(
        model=final_model,
        train_loader=train_loader,
        val_loader=val_loader,
        criterion=criterion,
        optimizer=optimizer,
        device=device,
        patience=15,
        max_epochs=200
    )
    
    # Make predictions on test set
    final_model.eval()
    test_dataset = MixedDataset(X_num_test_scaled, X_cat_test, np.zeros((X_num_test_scaled.shape[0], 1)))
    test_loader = DataLoader(test_dataset, batch_size=batch_size, collate_fn=mixed_collate)
    
    with torch.no_grad():
        y_pred = []
        for inputs, _ in test_loader:
            x_num, x_cat = inputs
            x_num, x_cat = x_num.to(device), x_cat.to(device)
            outputs = final_model((x_num, x_cat))
            y_pred.append(outputs.cpu().numpy())
    
    y_pred = np.vstack(y_pred).flatten()
    
    # Calculate metrics
    r2 = r2_score(y_test, y_pred)
    rmse = np.sqrt(mean_squared_error(y_test, y_pred))
    
    print(f"Test R² for {response_names[i]}: {r2:.4f}")
    print(f"Test RMSE for {response_names[i]}: {rmse:.4f}")
    
    # Create a DataFrame with sample names, actual values, and predicted values
    prediction_df = pd.DataFrame({
        'Sample': test_sample_names,
        'Actual': y_test,
        'Predicted': y_pred,
        'Residual': y_test - y_pred
    })
    
    # Return summary result for this response variable and prediction data
    summary_result = {
        'Response_Variable': response_names[i],
        'Embedding_Dim': best_params['embedding_dim'],
        'Filters': best_params['filters'],
        'Kernel_Size': best_params['kernel_size'],
        'Pool_Size': best_params['pool_size'],
        'Dense_Units': best_params['dense_units'],
        'Dropout_Rate': best_params['dropout_rate'],
        'Learning_Rate': best_params['learning_rate'],
        'R2': r2,
        'RMSE': rmse
    }
    
    return summary_result, prediction_df

# Modified main function to handle mixed data types
def run_mixed_cnn_regression(file_path, output_path, predictions_dir, test_size=0.2, batch_size=32, n_jobs=None):
    """
    Run CNN regression with mixed data types for all response variables
    """
    # Load data
    sample_names, X_num, X_cat, y, response_names = load_data(file_path)
    
    # Check for GPU availability
    device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
    print(f"Using device: {device}")
    
    # Get unique categories for the categorical feature
    unique_categories = np.unique(X_cat)
    num_categories = len(unique_categories)
    print(f"Number of unique categories in categorical feature: {num_categories}")
    
    # Map categorical values to integer indices (0 to num_categories-1)
    category_map = {cat: i for i, cat in enumerate(unique_categories)}
    X_cat_mapped = np.array([category_map[cat] for cat in X_cat])
    
    # Try a test model first to check dimensions
    embedding_dim = 8  # Example embedding dimension
    test_successful = create_test_model(X_num, X_cat_mapped, num_categories, embedding_dim, device)
    if not test_successful:
        print("Dimension test failed. Please check model architecture.")
        return
    
    # Define an expanded parameter grid with embedding dimension
    param_grid = {
        'embedding_dim': [4, 8, 16],
        'filters': [16, 32, 64],
        'kernel_size': [3, 5, 7],
        'pool_size': [2, 3],
        'dense_units': [32, 64, 128],
        'dropout_rate': [0.1, 0.2, 0.3],
        'learning_rate': [0.0001, 0.001, 0.01]
    }
    
    # Create the predictions directory if it doesn't exist
    if not os.path.exists(predictions_dir):
        os.makedirs(predictions_dir)
    
    # Determine number of jobs to use
    if n_jobs is None:
        # No parallelization, process sequentially
        summary_results = []
        for i in range(y.shape[1]):
            summary_result, prediction_df = process_single_response(
                i, X_num, X_cat_mapped, y, num_categories, response_names, sample_names, 
                param_grid, test_size, batch_size, device
            )
            summary_results.append(summary_result)
            
            # Save prediction data for this response variable
            pred_output_path = os.path.join(predictions_dir, f"predictions_{response_names[i]}.xlsx")
            prediction_df.to_excel(pred_output_path, index=False)
            print(f"Predictions for {response_names[i]} saved to {pred_output_path}")
    else:
        # Process response variables in parallel
        print(f"Processing {y.shape[1]} response variables with {n_jobs} jobs")
        results = Parallel(n_jobs=n_jobs)(
            delayed(process_single_response)(
                i, X_num, X_cat_mapped, y, num_categories, response_names, sample_names, 
                param_grid, test_size, batch_size, device
            ) for i in range(y.shape[1])
        )
        
        # Unpack results
        summary_results = []
        for i, (summary_result, prediction_df) in enumerate(results):
            summary_results.append(summary_result)
            
            # Save prediction data for this response variable
            pred_output_path = os.path.join(predictions_dir, f"predictions_{response_names[i]}.xlsx")
            prediction_df.to_excel(pred_output_path, index=False)
            print(f"Predictions for {response_names[i]} saved to {pred_output_path}")
    
    # Create DataFrame from summary results
    summary_df = pd.DataFrame(summary_results)
    
    # Save summary results to Excel
    output_dir = os.path.dirname(output_path)
    if output_dir and not os.path.exists(output_dir):
        os.makedirs(output_dir)
    
    summary_df.to_excel(output_path, index=False)
    print(f"Summary results saved to {output_path}")
    
    # Combine all prediction data into one file
    all_predictions = []
    for i, response_name in enumerate(response_names):
        pred_file_path = os.path.join(predictions_dir, f"predictions_{response_name}.xlsx")
        pred_df = pd.read_excel(pred_file_path)
        pred_df['Response_Variable'] = response_name
        all_predictions.append(pred_df)
    
    combined_predictions = pd.concat(all_predictions, axis=0)
    combined_pred_path = os.path.join(predictions_dir, "all_predictions.xlsx")
    combined_predictions.to_excel(combined_pred_path, index=False)
    print(f"Combined predictions saved to {combined_pred_path}")
    
    return summary_df

# Example usage
if __name__ == "__main__":
    input_file = "ML/醇提浓缩-Combined-批次+时间.xlsx"  # Your file
    output_file = "ML/醇提浓缩-Combined-批次+时间_cnn_regression_results.xlsx"
    predictions_dir = "ML/醇提浓缩-Combined-批次+时间_mixed_prediction_results"
    
    # Run mixed CNN regression
    results_df = run_mixed_cnn_regression(
        file_path=input_file,
        output_path=output_file,
        predictions_dir=predictions_dir,
        test_size=0.2,
        batch_size=32,
        n_jobs=4  # Set to None for sequential processing, -1 for all cores, or a specific number
    )