# ====================== PART 7: ALTERNATIVE APPROACH - WEIGHTED LSSVM MODEL ======================

# Load necessary additional libraries
library(kernlab)  # For LSSVM implementation
library(caret)    # For cross-validation and tuning

# Function to create and train a weighted LSSVM model with tuning
create_weighted_lssvm_model <- function(train_data, test_data, weights, response_var, 
                                        tuning_method = "cv", cross_val_folds = 5) {
  # Step 1: Identify prediction columns
  pred_cols <- grep("^Pred_", colnames(train_data), value = TRUE)
  
  if (length(pred_cols) == 0) {
    stop("No prediction columns found in the data")
  }
  
  # Step 2: Create weighted features
  train_weighted <- data.frame(response = train_data$Actual)
  test_weighted <- data.frame(response = test_data$Actual)
  
  # For each model/prediction
  for (pred_col in pred_cols) {
    # Extract model name from column name (remove "Pred_" prefix)
    model_name <- sub("^Pred_", "", pred_col)
    
    # Check if we have a weight for this model
    if (model_name %in% names(weights) && !is.na(weights[model_name]) && weights[model_name] > 0) {
      # Apply weight to the prediction
      weighted_col_name <- paste0("weighted_", model_name)
      train_weighted[[weighted_col_name]] <- train_data[[pred_col]] * weights[model_name]
      test_weighted[[weighted_col_name]] <- test_data[[pred_col]] * weights[model_name]
    }
  }
  
  # Check if we have any weighted columns
  if (ncol(train_weighted) <= 1) {
    stop("No weighted columns were created, possibly due to missing weights")
  }
  
  # Step 3: LSSVM Model Setup
  # Create training control based on requested method
  if (tuning_method == "cv") {
    trControl <- trainControl(
      method = "cv",
      number = cross_val_folds,
      verboseIter = TRUE
    )
  } else if (tuning_method == "repeatedcv") {
    trControl <- trainControl(
      method = "repeatedcv",
      number = cross_val_folds,
      repeats = 3,
      verboseIter = TRUE
    )
  } else {
    trControl <- trainControl(
      method = "boot",
      number = 25,
      verboseIter = TRUE
    )
  }
  
  # Define tuning grid for LSSVM
  # For RBF kernel
  sigma_range <- 10^seq(-3, 1, length.out = 5)
  C_range <- 10^seq(-2, 3, length.out = 5)
  tuneGrid <- expand.grid(sigma = sigma_range, C = C_range)
  
  # Step 4: Train LSSVM model with tuning
  cat("Training LSSVM model with", nrow(tuneGrid), "parameter combinations using", 
      tuning_method, "with", cross_val_folds, "folds\n")
  
  set.seed(123)  # For reproducibility
  
  # Handle potential errors during training
  lssvm_result <- try({
    # Train the model using caret for hyperparameter tuning
    lssvm_model <- train(
      response ~ .,
      data = train_weighted,
      method = "svmRadial",  # Radial basis function kernel (same as used in LSSVM)
      trControl = trControl,
      tuneGrid = tuneGrid,
      preProcess = c("center", "scale")  # Standardize predictors
    )
    
    # Make predictions on test set
    test_predictions <- predict(lssvm_model, newdata = test_weighted)
    
    # Return results
    list(
      model = lssvm_model,
      predictions = test_predictions,
      train_data = train_weighted,
      test_data = test_weighted,
      best_parameters = lssvm_model$bestTune
    )
  }, silent = TRUE)
  
  # If LSSVM fails, fall back to Linear Regression
  if (inherits(lssvm_result, "try-error")) {
    cat("LSSVM model failed, falling back to Linear Regression\n")
    cat("Error:", conditionMessage(attr(lssvm_result, "condition")), "\n")
    
    # Train linear model instead
    lm_model <- lm(response ~ ., data = train_weighted)
    test_predictions <- predict(lm_model, newdata = test_weighted)
    
    return(list(
      model = lm_model,
      predictions = test_predictions,
      train_data = train_weighted,
      test_data = test_weighted,
      fallback = TRUE
    ))
  }
  
  return(lssvm_result)
}

# Function to implement the alternative weighted LSSVM model approach
perform_alternative_weighted_lssvm <- function(all_predictions_data, all_variables, output_dir) {
  # Create directory for alternative approach
  alt_dir <- file.path(output_dir, "Alternative_Weighted_LSSVM")
  dir.create(alt_dir, showWarnings = FALSE)
  
  # Initialize results dataframe
  alt_model_results <- data.frame(
    Response_Variable = character(),
    Alt_LSSVM_R2 = numeric(),
    Alt_LSSVM_RMSE = numeric(),
    Alt_LSSVM_sMAPE = numeric(),
    Alt_LSSVM_RRMSE = numeric(),
    BMA_R2 = numeric(),
    Improvement_Over_BMA = numeric(),
    Best_Sigma = numeric(),
    Best_C = numeric(),
    Fallback_To_LM = logical(),
    stringsAsFactors = FALSE
  )
  
  # Load the weight information - use OLS weights by default
  ols_weights_df <- read_excel(file.path(output_dir, "bma_ols_weights.xlsx"))
  
  # Process each response variable
  for (var in all_variables) {
    cat("\nProcessing Alternative LSSVM Model for variable:", var, "\n")
    
    # Step 1: Collect all model predictions for this variable
    var_preds_list <- list()
    
    for (model_name in names(models)) {
      if (var %in% unique(predictions_data[[model_name]]$Variable)) {
        var_preds <- predictions_data[[model_name]] %>%
          filter(Variable == var)
        var_preds_list[[model_name]] <- var_preds
      }
    }
    
    # Filter to models that have predictions for this variable
    non_empty_preds <- var_preds_list[sapply(var_preds_list, nrow) > 0]
    
    # Step 2: Combine predictions with actual values into a single dataset
    if (length(non_empty_preds) == 0) {
      cat("No predictions found for", var, "- skipping\n")
      next
    }
    
    # Find common samples across all models
    sample_sets <- lapply(non_empty_preds, function(df) df$Sample)
    common_samples <- Reduce(intersect, sample_sets)
    
    if (length(common_samples) == 0) {
      cat("No common samples found for", var, "- skipping\n")
      next
    }
    
    # Filter each model's predictions to include only common samples
    for (model_name in names(non_empty_preds)) {
      non_empty_preds[[model_name]] <- non_empty_preds[[model_name]] %>%
        filter(Sample %in% common_samples) %>%
        arrange(Sample)  # Ensure same order
    }
    
    # Create a comprehensive dataset using aligned data
    combined_data <- non_empty_preds[[1]][, c("Sample", "Actual")]
    
    for (model_name in names(non_empty_preds)) {
      # Verify row count alignment
      if (nrow(non_empty_preds[[model_name]]) != nrow(combined_data)) {
        cat("Warning: Row count mismatch for model", model_name, 
            "- expected", nrow(combined_data), "but got", 
            nrow(non_empty_preds[[model_name]]), "\n")
        # Skip this model if there's a mismatch
        next
      }
      combined_data[[paste0("Pred_", model_name)]] <- non_empty_preds[[model_name]]$Predicted
    }
    
    # Step 3: Get OLS weights for this variable
    var_row <- which(ols_weights_df$Response_Variable == var)
    if (length(var_row) == 0) {
      cat("No weights found for", var, "- skipping\n")
      next
    }
    
    var_weights <- as.numeric(ols_weights_df[var_row, -1])
    names(var_weights) <- colnames(ols_weights_df)[-1]
    
    # Step 4: Split data into train and test sets
    data_splits <- split_data(combined_data, train_ratio = 0.7)
    train_data <- data_splits$train
    test_data <- data_splits$test
    
    # Step 5: Create and train the new weighted LSSVM model
    cat("Training weighted LSSVM model for", var, "\n")
    lssvm_result <- create_weighted_lssvm_model(
      train_data = train_data,
      test_data = test_data,
      weights = var_weights,
      response_var = "Actual",
      tuning_method = "cv",
      cross_val_folds = 5
    )
    
    # Step 6: Evaluate the new model
    metrics <- calculate_metrics(
      test_data$Actual,
      lssvm_result$predictions
    )
    
    # Get BMA performance for comparison
    bma_var_performance <- bma_performance %>%
      filter(Response_Variable == var)
    
    if (nrow(bma_var_performance) > 0) {
      bma_r2 <- bma_var_performance$Test_R2[1]
      improvement <- metrics$r2 - bma_r2
    } else {
      bma_r2 <- NA
      improvement <- NA
    }
    
    # Extract best parameters if available
    if (!is.null(lssvm_result$best_parameters)) {
      best_sigma <- lssvm_result$best_parameters$sigma
      best_C <- lssvm_result$best_parameters$C
    } else {
      best_sigma <- NA
      best_C <- NA
    }
    
    # Check if fallback to LM was used
    fallback <- ifelse(!is.null(lssvm_result$fallback), lssvm_result$fallback, FALSE)
    
    # Add to results
    alt_model_results <- rbind(
      alt_model_results,
      data.frame(
        Response_Variable = var,
        Alt_LSSVM_R2 = metrics$r2,
        Alt_LSSVM_RMSE = metrics$rmse,
        Alt_LSSVM_sMAPE = metrics$smape,
        Alt_LSSVM_RRMSE = metrics$rrmse,
        BMA_R2 = bma_r2,
        Improvement_Over_BMA = improvement,
        Best_Sigma = best_sigma,
        Best_C = best_C,
        Fallback_To_LM = fallback
      )
    )
    
    # Create prediction plot
    # Create prediction plot
    alt_test_plot <- create_prediction_plot(
      test_data$Actual, 
      lssvm_result$predictions,
      paste(var, "(Weighted LSSVM Model)"),
      metrics$r2,
      metrics$smape
    )
    
    # Save the plot
    ggsave(
      filename = file.path(alt_dir, paste0(var, "_alt_weighted_lssvm.png")),
      plot = alt_test_plot,
      width = 8,
      height = 6,
      dpi = 300
    )
    
    # Create residual plot
    residuals <- test_data$Actual - lssvm_result$predictions
    residual_df <- data.frame(
      Predicted = lssvm_result$predictions,
      Residual = residuals
    )
    
    residual_plot <- ggplot(residual_df, aes(x = Predicted, y = Residual)) +
      geom_point(alpha = 0.7, color = "blue", size = 3) +
      geom_hline(yintercept = 0, color = "red", linetype = "dashed") +
      labs(title = paste(var, "- Residual Plot (Weighted LSSVM)"),
           x = "Predicted Value",
           y = "Residual") +
      theme_bw() +
      theme(
        panel.grid = element_blank(),
        axis.line = element_line(color = "black"),
        panel.border = element_rect(color = "black", fill = NA, size = 1),
        axis.title = element_text(size = 20, color = "black"),
        axis.text = element_text(size = 14, color = "black"),
        title = element_text(size = 20)
      )
    
    # Save the residual plot
    ggsave(
      filename = file.path(alt_dir, paste0(var, "_alt_weighted_lssvm_residuals.png")),
      plot = residual_plot,
      width = 8,
      height = 6,
      dpi = 300
    )
    
    # Save model details and weighted data
    if (!fallback) {
      # For LSSVM model
      model_info <- data.frame(
        Parameter = c("Best_Sigma", "Best_C", "R2", "RMSE", "sMAPE", "RRMSE"),
        Value = c(best_sigma, best_C, metrics$r2, metrics$rmse, metrics$smape, metrics$rrmse)
      )
    } else {
      # For Linear model fallback
      model_info <- data.frame(
        Parameter = c("Model_Type", "R2", "RMSE", "sMAPE", "RRMSE"),
        Value = c("Linear_Regression_Fallback", metrics$r2, metrics$rmse, metrics$smape, metrics$rrmse)
      )
    }
    
    write_xlsx(model_info, file.path(alt_dir, paste0(var, "_model_details.xlsx")))
    
    # Print summary
    cat("Weighted LSSVM model performance:\n")
    cat("R²:", round(metrics$r2, 4), "\n")
    cat("RMSE:", round(metrics$rmse, 4), "\n")
    cat("sMAPE:", round(metrics$smape, 4), "%\n")
    cat("RRMSE:", round(metrics$rrmse, 4), "%\n")
    
    if (!is.na(improvement)) {
      cat("Improvement over BMA:", round(improvement, 4), "\n")
    }
    
    if (!fallback) {
      cat("Best parameters - Sigma:", best_sigma, "C:", best_C, "\n")
    } else {
      cat("Note: Fell back to linear regression due to LSSVM training issues\n")
    }
  }
  
  # Calculate means if we have results
  if (nrow(alt_model_results) > 0) {
    mean_row <- data.frame(
      Response_Variable = "MEAN",
      Alt_LSSVM_R2 = mean(alt_model_results$Alt_LSSVM_R2, na.rm = TRUE),
      Alt_LSSVM_RMSE = mean(alt_model_results$Alt_LSSVM_RMSE, na.rm = TRUE),
      Alt_LSSVM_sMAPE = mean(alt_model_results$Alt_LSSVM_sMAPE, na.rm = TRUE),
      Alt_LSSVM_RRMSE = mean(alt_model_results$Alt_LSSVM_RRMSE, na.rm = TRUE),
      BMA_R2 = mean(alt_model_results$BMA_R2, na.rm = TRUE),
      Improvement_Over_BMA = mean(alt_model_results$Improvement_Over_BMA, na.rm = TRUE),
      Best_Sigma = NA,
      Best_C = NA,
      Fallback_To_LM = NA
    )
    
    alt_model_results <- rbind(alt_model_results, mean_row)
  }
  
  # Save overall results
  write_xlsx(alt_model_results, file.path(alt_dir, "alternative_lssvm_performance.xlsx"))
  
  return(alt_model_results)
}

# Call the function to implement the alternative LSSVM approach
cat("\n===== Implementing Alternative Weighted LSSVM Model Approach =====\n")
alt_lssvm_results <- perform_alternative_weighted_lssvm(predictions_data, all_variables, output_dir)

# Compare BMA and Alternative LSSVM Model approaches
cat("\n===== Comparison of BMA vs Alternative Weighted LSSVM Model =====\n")
print(alt_lssvm_results[, c("Response_Variable", "BMA_R2", "Alt_LSSVM_R2", "Improvement_Over_BMA")])

# Analyze which variables had the most improvement
if (nrow(alt_lssvm_results) > 1) {  # More than just the MEAN row
  improvement_analysis <- alt_lssvm_results %>%
    filter(Response_Variable != "MEAN") %>%
    arrange(desc(Improvement_Over_BMA))
  
  cat("\n===== Variables with Greatest Improvement from LSSVM Approach =====\n")
  print(head(improvement_analysis[, c("Response_Variable", "BMA_R2", "Alt_LSSVM_R2", "Improvement_Over_BMA")], 5))
  
  # Check how many variables improved with LSSVM
  improved_vars <- sum(improvement_analysis$Improvement_Over_BMA > 0, na.rm = TRUE)
  total_vars <- nrow(improvement_analysis)
  
  cat("\nWeighted LSSVM improved performance for", improved_vars, "out of", total_vars, 
      "variables (", round(improved_vars/total_vars*100, 1), "%)\n")
}
