import pandas as pd
import numpy as np
from sklearn.preprocessing import StandardScaler
from sklearn.decomposition import PCA
from sklearn.feature_selection import SelectKBest, f_classif
from PyQt5.QtCore import QObject, pyqtSignal

class DataProcessor(QObject):
    # Define signals for progress updates
    progress_updated = pyqtSignal(int)
    status_updated = pyqtSignal(str)
    
    def __init__(self):
        super().__init__()
        self.data = None
        self.features = None
        self.labels = None
        
    def load_data(self, file_path):
        """Load data from various file formats"""
        try:
            if file_path.endswith('.csv'):
                self.data = pd.read_csv(file_path)
            elif file_path.endswith('.xlsx'):
                self.data = pd.read_excel(file_path)
            else:
                raise ValueError("Unsupported file format")
                
            self.status_updated.emit("Data loaded successfully")
            return True
        except Exception as e:
            self.status_updated.emit(f"Error loading data: {str(e)}")
            return False
            
    def clean_data(self):
        """Clean the loaded data"""
        if self.data is None:
            self.status_updated.emit("No data loaded")
            return False
            
        try:
            # Remove duplicates
            self.progress_updated.emit(20)
            self.data = self.data.drop_duplicates()
            
            # Handle missing values
            self.progress_updated.emit(40)
            numeric_columns = self.data.select_dtypes(include=[np.number]).columns
            self.data[numeric_columns] = self.data[numeric_columns].fillna(self.data[numeric_columns].mean())
            
            # Handle categorical missing values
            self.progress_updated.emit(60)
            categorical_columns = self.data.select_dtypes(include=['object']).columns
            self.data[categorical_columns] = self.data[categorical_columns].fillna(self.data[categorical_columns].mode().iloc[0])
            
            # Remove outliers using IQR method
            self.progress_updated.emit(80)
            for column in numeric_columns:
                Q1 = self.data[column].quantile(0.25)
                Q3 = self.data[column].quantile(0.75)
                IQR = Q3 - Q1
                self.data = self.data[~((self.data[column] < (Q1 - 1.5 * IQR)) | 
                                      (self.data[column] > (Q3 + 1.5 * IQR)))]
                
            self.progress_updated.emit(100)
            self.status_updated.emit("Data cleaned successfully")
            return True
        except Exception as e:
            self.status_updated.emit(f"Error cleaning data: {str(e)}")
            return False
            
    def extract_features(self, method='all'):
        """Extract features from cleaned data"""
        if self.data is None:
            self.status_updated.emit("No data loaded")
            return False
            
        try:
            # Prepare numeric data
            numeric_data = self.data.select_dtypes(include=[np.number])
            
            if len(numeric_data.columns) == 0:
                self.status_updated.emit("No numeric columns found")
                return False
                
            # Standardize data
            self.progress_updated.emit(20)
            scaler = StandardScaler()
            scaled_data = scaler.fit_transform(numeric_data)
            
            features = {}
            
            if method in ['all', 'pca']:
                # PCA
                self.progress_updated.emit(40)
                pca = PCA(n_components=min(5, len(numeric_data.columns)))
                pca_features = pca.fit_transform(scaled_data)
                features['pca'] = pca_features
                
            if method in ['all', 'statistical']:
                # Statistical features
                self.progress_updated.emit(60)
                features['statistical'] = {
                    'mean': numeric_data.mean(),
                    'std': numeric_data.std(),
                    'skew': numeric_data.skew(),
                    'kurtosis': numeric_data.kurtosis()
                }
                
            if method in ['all', 'select_k_best']:
                # Select K Best features
                self.progress_updated.emit(80)
                if 'target' in self.data.columns:
                    selector = SelectKBest(score_func=f_classif, k=min(5, len(numeric_data.columns)))
                    selected_features = selector.fit_transform(scaled_data, self.data['target'])
                    features['selected'] = selected_features
                    
            self.features = features
            self.progress_updated.emit(100)
            self.status_updated.emit("Features extracted successfully")
            return True
        except Exception as e:
            self.status_updated.emit(f"Error extracting features: {str(e)}")
            return False
            
    def get_feature_importance(self):
        """Calculate feature importance"""
        if self.data is None or self.features is None:
            return None
            
        try:
            numeric_data = self.data.select_dtypes(include=[np.number])
            importance = {}
            
            # Calculate correlation with target if available
            if 'target' in self.data.columns:
                correlations = numeric_data.corrwith(self.data['target'])
                importance['correlation'] = correlations.abs().sort_values(ascending=False)
                
            # PCA explained variance if available
            if 'pca' in self.features:
                pca = PCA().fit(numeric_data)
                importance['pca_explained_variance'] = pd.Series(
                    pca.explained_variance_ratio_,
                    index=[f'PC{i+1}' for i in range(len(pca.explained_variance_ratio_))]
                )
                
            return importance
        except Exception as e:
            self.status_updated.emit(f"Error calculating feature importance: {str(e)}")
            return None
            
    def get_summary_statistics(self):
        """Get summary statistics of the data"""
        if self.data is None:
            return None
            
        try:
            summary = {
                'basic_stats': self.data.describe(),
                'missing_values': self.data.isnull().sum(),
                'data_types': self.data.dtypes,
                'shape': self.data.shape
            }
            return summary
        except Exception as e:
            self.status_updated.emit(f"Error calculating summary statistics: {str(e)}")
            return None 