import numpy as np
import pandas as pd
from sklearn.model_selection import train_test_split
from sklearn.preprocessing import StandardScaler
from sklearn.neural_network import MLPClassifier
from sklearn.metrics import accuracy_score, classification_report
from xgboost import XGBClassifier
import glob
import os
import logging
from collections import Counter

# Configure logging
logging.basicConfig(level=logging.INFO, 
                   format='%(asctime)s - %(levelname)s - %(message)s')

class DataLoadingError(Exception):
    """Custom exception for data loading errors"""
    pass

class SolverPredictor:
    def __init__(self, result_file, feature_dir, timeout_value=10000.0):
        self.result_file = result_file
        self.feature_dir = feature_dir
        self.timeout_value = timeout_value
        self.xgb_model = None
        self.mlp_model = None
        self.scaler = StandardScaler()
        self.solver_names = None
        
        # Validate input paths
        if not os.path.exists(result_file):
            raise FileNotFoundError(f"Results file not found: {result_file}")
        if not os.path.exists(feature_dir):
            raise FileNotFoundError(f"Feature directory not found: {feature_dir}")
        if not os.path.isdir(feature_dir):
            raise NotADirectoryError(f"Feature path is not a directory: {feature_dir}")
    
    def _find_best_solver(self, solver_times):
        """
        Find the best solver for each instance based on solving time.
        Returns the index of the best solver.
        """
        # Create a mask for timeout values
        timeout_mask = solver_times >= self.timeout_value
        
        # Count timeouts per instance
        timeout_counts = np.sum(timeout_mask, axis=1)
        total_solvers = solver_times.shape[1]
        
        # Get minimum times and corresponding solver indices
        best_times = np.min(solver_times, axis=1)
        best_solvers = np.argmin(solver_times, axis=1)
        
        # Log solver distribution and statistics
        solver_counts = Counter(best_solvers)
        logging.info("\nBest solver distribution:")
        for solver_idx, count in sorted(solver_counts.items()):
            percentage = (count / len(best_solvers)) * 100
            logging.info(f"Solver {solver_idx}: {count} instances ({percentage:.2f}%)")
        
        # Log timeout statistics
        instances_all_timeout = np.sum(timeout_counts == total_solvers)
        if instances_all_timeout > 0:
            logging.warning(f"{instances_all_timeout} instances had all solvers timeout")
        
        instances_some_timeout = np.sum((timeout_counts > 0) & (timeout_counts < total_solvers))
        if instances_some_timeout > 0:
            logging.info(f"{instances_some_timeout} instances had some solvers timeout")
        
        # Log solving time statistics
        non_timeout_times = best_times[best_times < self.timeout_value]
        if len(non_timeout_times) > 0:
            logging.info(f"\nSolving time statistics for solved instances:")
            logging.info(f"Mean: {np.mean(non_timeout_times):.2f}s")
            logging.info(f"Median: {np.median(non_timeout_times):.2f}s")
            logging.info(f"Min: {np.min(non_timeout_times):.2f}s")
            logging.info(f"Max: {np.max(non_timeout_times):.2f}s")
        
        return best_solvers
        
    def load_data(self):
        logging.info(f"Loading results from: {self.result_file}")
        try:
            # Read the results file, skipping the header row
            results_df = pd.read_csv(self.result_file, header=0)
            
            if results_df.empty:
                raise DataLoadingError("Results file is empty")
            
            logging.info(f"Loaded results file with shape: {results_df.shape}")
            
            # Store solver names if they're in the header
            self.solver_names = results_df.columns[1:].tolist()
            
            # Rest of the columns are solver times (excluding the hash column)
            solver_times = results_df.iloc[:, 1:].values
            # First column contains instance hashes
            instance_hashes = results_df.iloc[:, 0].values
            
            logging.info(f"Found {len(instance_hashes)} instances and {solver_times.shape[1]} solvers")
            
            # Find best solver for each instance
            best_solvers = self._find_best_solver(solver_times)
            
            # Load features for each instance
            features = []
            valid_indices = []
            missing_features = []
            
            # Get list of all feature files
            feature_files = {os.path.basename(f).split('-')[0]: f 
                           for f in glob.glob(os.path.join(self.feature_dir, "*.npz"))}
            
            for idx, hash_val in enumerate(instance_hashes):
                if hash_val in feature_files:
                    feature_path = feature_files[hash_val]
                    try:
                        feature_data = np.load(feature_path)
                        feature_array = feature_data['sorted']
                        
                        # Validate feature dimensions
                        if feature_array.shape != (512,):
                            logging.warning(f"Unexpected feature dimensions for {hash_val}: {feature_array.shape}")
                            continue
                            
                        features.append(feature_array)
                        valid_indices.append(idx)
                    except Exception as e:
                        logging.error(f"Error loading feature file {feature_path}: {str(e)}")
                        missing_features.append(hash_val)
                else:
                    missing_features.append(hash_val)
            
            if not features:
                raise DataLoadingError(
                    f"No valid feature files found in {self.feature_dir}. "
                    f"First few missing features: {missing_features[:5]}"
                )
            
            features = np.array(features)
            best_solvers = best_solvers[valid_indices]
            
            logging.info(f"Successfully loaded {len(features)} instances with valid features")
            logging.info(f"Missing features for {len(missing_features)} instances")
            if missing_features:
                logging.info(f"First few missing hashes: {missing_features[:5]}")
            
            if len(features) < 10:  # Arbitrary minimum dataset size
                raise DataLoadingError(
                    f"Too few valid instances ({len(features)}) to train models effectively. "
                    "Please ensure more feature files are available."
                )
            
            return features, best_solvers
        except pd.errors.EmptyDataError:
            raise DataLoadingError(f"Could not read results file: {self.result_file} (empty or invalid CSV)")
        except pd.errors.ParserError as e:
            raise DataLoadingError(f"Error parsing results file {self.result_file}: {str(e)}")
    
    def train_models(self, test_size=0.9, random_state=42):
        try:
            # Load and split data
            X, y = self.load_data()
            
            logging.info(f"Splitting dataset with test_size={test_size}")
            X_train, X_test, y_train, y_test = train_test_split(
                X, y, test_size=test_size, random_state=random_state
            )
            
            # Log detailed information about training labels
            unique_solvers, solver_counts = np.unique(y_train, return_counts=True)
            logging.info("\nTraining set solver distribution:")
            for solver_idx, count in zip(unique_solvers, solver_counts):
                solver_name = f"({self.solver_names[solver_idx]})" if self.solver_names else ""
                percentage = (count / len(y_train)) * 100
                logging.info(f"Solver {solver_idx} {solver_name}: {count} instances ({percentage:.2f}%)")
            
            logging.info("\nFull training set labels (solver indices):")
            logging.info(f"y_train shape: {y_train.shape}")
            logging.info("y_train values:")
            logging.info(y_train)

            logging.info(f"Training set size: {len(X_train)}, Test set size: {len(X_test)}")
            
            # Scale features
            X_train_scaled = self.scaler.fit_transform(X_train)
            X_test_scaled = self.scaler.transform(X_test)
            
            # Train XGBoost
            logging.info("Training XGBoost model...")
            self.xgb_model = XGBClassifier(
                n_estimators=100,
                learning_rate=0.1,
                max_depth=5,
                random_state=random_state
            )
            self.xgb_model.fit(X_train_scaled, y_train)
            
            # Train MLP
            logging.info("Training MLP model...")
            self.mlp_model = MLPClassifier(
                hidden_layer_sizes=(256, 128, 64),
                max_iter=1000,
                random_state=random_state
            )
            self.mlp_model.fit(X_train_scaled, y_train)
            
            # Evaluate models
            logging.info("\nModel Evaluation:")
            logging.info("XGBoost Results:")
            self._evaluate_model(self.xgb_model, X_test_scaled, y_test, "XGBoost")
            
            logging.info("\nMLP Results:")
            self._evaluate_model(self.mlp_model, X_test_scaled, y_test, "MLP")
            
            return X_test_scaled, y_test
            
        except Exception as e:
            logging.error(f"Error during model training: {str(e)}")
            raise
    
    def _evaluate_model(self, model, X_test, y_test, model_name):
        y_pred = model.predict(X_test)
        accuracy = accuracy_score(y_test, y_pred)
        logging.info(f"{model_name} Accuracy: {accuracy:.4f}")
        logging.info("\nClassification Report:")
        logging.info("\n" + classification_report(y_test, y_pred))
        
        # Add confusion matrix visualization if needed
        
    def predict(self, features):
        """
        Predict best solver for new instances using both models
        """
        if self.xgb_model is None or self.mlp_model is None:
            raise RuntimeError("Models have not been trained. Call train_models() first.")
            
        if features.shape[1] != 512:
            raise ValueError(f"Expected features with 512 dimensions, got {features.shape[1]}")
            
        features_scaled = self.scaler.transform(features)
        xgb_pred = self.xgb_model.predict(features_scaled)
        mlp_pred = self.mlp_model.predict(features_scaled)
        
        predictions = {
            'xgboost_prediction': xgb_pred,
            'mlp_prediction': mlp_pred
        }
        
        # Add solver names if available
        if self.solver_names:
            predictions['xgboost_solver_names'] = [self.solver_names[i] for i in xgb_pred]
            predictions['mlp_solver_names'] = [self.solver_names[i] for i in mlp_pred]
            
        return predictions

# Example usage
if __name__ == "__main__":
    try:
        # Initialize the predictor with your data paths
        predictor = SolverPredictor(
            result_file="comp-result/res2022",
            feature_dir="../all-instance-scope-features",
            timeout_value=10000.0  # Adjust if using a different timeout value
        )
        
        # Train the models
        X_test, y_test = predictor.train_models()
        
        # Example prediction for new instances
        # new_features should be a numpy array with shape (n_instances, 512)
        # new_features = ...
        # predictions = predictor.predict(new_features)
        
    except FileNotFoundError as e:
        logging.error(f"File not found error: {str(e)}")
    except DataLoadingError as e:
        logging.error(f"Data loading error: {str(e)}")
    except Exception as e:
        logging.error(f"Unexpected error: {str(e)}") 