import pandas as pd
import numpy as np
import logging 
from itertools import combinations

import matplotlib.pyplot as plt
import seaborn as sns
import random
from sklearn.model_selection import StratifiedKFold
from sklearn.feature_selection import SelectKBest
from sklearn.feature_selection import chi2
from sklearn.feature_selection import f_classif
from sklearn.preprocessing import LabelEncoder

from utils import print_warning, print_function, cal_info_gain_rate

class FeatureSelector():
    '''
    The class is used to select features from a feature set by most popular ways.
    
    Parameters
    ----------
    features : DataFrame, shape(n_samples, n_features)
        Features should be DataFrame type in pandas module.
        
    labels : array, shape(n_samples, ), optional
        Each value is the label of one sample corresponding to each row in features.
        
    Attributes
    ----------
    missing_rates_ : Series
        Indexes are features, values are corresponding missing rate.
    variances_ : Series
        Indexes are features, values are corresponding variances.
    corr_ : DataFrame
        Indexes are features, columns are also features, values are corresponding 
        correlation coefficient.
    Fs_ : Series
        Indexes are features, values are corresponding ANOVA F value.
    info_gain_rates_ : Series
        Indexes are features, values are corresponding ANOVA F value.
    Examples
    --------
    
    '''
    
    def __init__(self, features, labels=None):
        self.features = features
        self.labels = labels
        self.left_features_ = list(self.features.columns)
        
        self.missing_rates_ = None
        self.variances_ = None
        self.corr_ = None
        self.Fs_ = None
        self.info_gain_rates_ = None
        
    def plot_missing_rates(self):
        '''
        Plot the missing rates with graph. The x axis denote missing rate range，the
        y axis denote corresponding number of samples.
        '''
        if self.missing_rates_ is None:
            self._compute_missing_rates()
            
        plt.figure()
        plt.hist(self.missing_rates_)
        plt.xticks(np.linspace(0, 1, 11));
        plt.xlabel('missing_rate')
        plt.ylabel('num of samples'); 
        plt.title("Proportion diagram of missing rates")
        
    def plot_variances(self):
        '''
        Plot the variances with graph. The x axis denote variances range, the y
        axis denote corresponding number of samples.
        '''
        if self.variances_ is None:
            self._compute_variances()
            
        plt.figure()
        plt.hist(self.variances_)
#         plt.xticks(np.linspace(0, 1, 11));
        plt.xlabel('variance')
        plt.ylabel('num of samples'); 
        plt.title("Proportion diagram of variances")
        
    def plot_corr(self):
        '''
        Plot the correlation heatmap.
        '''
        if self.corr_ is None:
            self._compute_corr()
            
        plt.figure()
        sns.heatmap(self.corr_, annot=True)
            

    def _compute_missing_rates(self):
        '''Compute the missing rates of all features.'''
        self.missing_rates_ = self.features.isnull().sum() / self.features.shape[0]
            
    def _compute_variances(self):
        '''Compute the variances of all features.'''
        self.variances_ = self.features.var(axis=0)
        
    def _compute_corr(self):
        '''Compute the correlation coefficients of each pair of all features.'''
        self.corr_ = self.features.corr()
        
    def _remove_features(self, component, removed_features):
        '''
        Remove features from class member left_features_ and print removed feature.
        
        Parameters
        ----------
        component : str
            The component where features will be removed.(e.g 'filter_high_missing', 'embedded_selector')
        removed_features : list
            Features will be removed.
        '''
        print_function("%s: remove %s" % (component, str(removed_features)))
        self.left_features_ = list(set(self.left_features_) - set(removed_features))
        
        
    def filter_high_missing(self, threshold=1.):
        '''
        Remove the feature whose missing rate is greater than the special threshold.
        
        Parameters
        ----------
        threshold : float, optional
            Features whose missing rate is lower than the threshold will be removed.
            
        Returns
        -------
            self
        '''
        if self.missing_rates_ is None:
            self._compute_missing_rates()
        
        removed_features = list(self.missing_rates_[self.missing_rates_ > threshold].index)
        self._remove_features("filter_high_missing", removed_features)
        return self
    
    def filter_low_variance(self, threshold=0.):
        '''
        Remove the feature whose variance is lower than the special threshold.
        
        Parameters
        ----------
        threshold : float, optional
            Features whose variance is lower than the threshold will be removed.
            
        Returns
        -------
            self
        '''
        if self.variances_ is None:
            self._compute_variances()
            
        removed_features = list(self.variances_[self.variances_ < threshold].index)
        self._remove_features("filter_high_missing", removed_features)
        return self
    
    def filter_cat_features(self, cat_features, top_k=1):
        '''
        Remove catergory features with lower info gain rate, remain top k.
        
        Parameters
        ----------
        cat_features : list
            Features name list with catergory values.
        top_k : int, default is 1
            Remain top k features with greater info gain rates.
            
        Returns
        -------
            self
        '''
        
        if self.labels is None:
            raise RuntimeError("The labels is None, so you should afferent labels parameter if you want use back_greedy method!")
        
        if self.info_gain_rates_ is None:
            igrs = []
            for feature in cat_features:
                igr = cal_info_gain_rate(self.features.iloc[:][feature].values, self.labels)
                igrs.append(igr)
            self.info_gain_rates_ = pd.Series(data=igrs, index=cat_features)
        
        removed_features = list(self.info_gain_rates_.sort_values(ascending=False).index[top_k:])
        self._remove_features("filter_cat_features", removed_features)
        return self
        
    def filter_continuous_features(self, continuous_features, top_k=1):
        '''
        Remove continuous features with lower F value, remain top k.
        
        Parameters
        ----------
        continuous_features : list
            Feature name list with continuous values.
        top_k : int, default is 1
            Remain top k features with greater F value.
            
        Returns
        -------
            self
        '''
        if self.labels is None:
            raise RuntimeError("The labels is None, so you should afferent labels parameter if you want use back_greedy method!")

        if self.fs_ is None:
            Fs, _ = f_classif(self.features.iloc[:][continuous_features], self.labels)
            self.Fs_ = pd.Series(data=Fs, index=continuous_features)
        rankings = [(feature, f) for feature, f in zip(continuous_features, self.Fs_.values)]
        
        sorted_rankings = sorted(rankings, key=lambda x: x[1], reverse=True)
        removed_features = [x[1] for x in sorted_rankings][top_k:]
        self._remove_features("filter_continuous_features", removed_features)
        return self
    
    def wrapper_selector(self, func, n_folds=3, remain_num=1, random_state=0, threshold=1e-4):
        '''
        Recursive feature elimination method with one criterion.
        
        In each step, remove one feature except which the left feature set gets the highest score, until the number of features reaches 
        remain_num or the decreased score is lower than threshold.
        
        e.g. The current feature set is ["a", "b", "c"]. We will get all mean score of cv in subset ["a", "b"], ["a", "c"] and 
        ["b", "c"]. If the mean score of subset ["a", "b"] is highest and the mean score is greater than the mean score of 
        ["a", "b", "c"], then we will remove feature "c".
        
        Parameters
        ----------
        func : function, def func(train_x, train_y, test_x, test_y) return score
            The function is used to get the score with train and test set, e.g. auc, -logloss
        n_folds : int
            The number of cv folds.
        remain_num : int, default is 1
            The leaest number of left features.
        random_state : int, default is 0
            The random number is used to split the train set.
        threshold : float, default is 1e-4
            The loop will be stopped when the decreased score is lower than threshold.

        Returns
        -------
            self
        '''
        left_features = self.left_features_
        removed_features = []
        if self.labels is None:
            raise RuntimeError("The labels is None, so you should afferent labels parameter if you want use back_greedy method!")
            
        if len(left_features) <= remain_num:
            raise ValueError("The value of remain_num should be less than the number of total features, default is 1!")
            
        skf = StratifiedKFold(n_splits=n_folds, shuffle=True, random_state=random_state)
        max_score = -1.0
        while len(left_features) > remain_num:
            pairs = []   #[(removed_feature, mean_score), ...]
            for feature in left_features:
                left_features = list(set(left_features) - {feature})
                scores = []
                for train_index, test_index in skf.split(self.features.values, self.labels):
                    x_train, x_test = self.features.iloc[train_index][left_features], self.features.iloc[test_index][left_features]
                    y_train, y_test = self.labels[train_index], self.labels[test_index]
                    score = func(x_train, y_train, x_test, y_test)
                    scores.append(score)
                mean_score = np.sum(scores) / n_folds
                pairs.append((feature, mean_score))
            
            highest_pair = sorted(pairs, key=lambda x: x[1])[-1]
            if highest_pair[1] - max_score > threshold: 
                print_function("remove feature: %s, socre: %f" % highest_pair)
                max_score = highest_pair[1]
                left_features = list(set(left_features) - {highest_pair[0]})
                removed_features.append(highest_pair[0])
#                 self._remove_features([highest_pair[0]])
            else:
                print_function("The num of left features reaches remain_num or the descrease sorce reaches threshold.")
                break
        self._remove_features("wrapper_selector", removed_features)
        return self
    
    def embedded_selector(self, func, n_folds=3, remain_num=1, random_state=0):
        '''
        Embedded feature selection method.
        Remove the features with lower importance ranking.
        
        Parameters
        ----------
        func : function
            def func(train_x, train_y, test_x, test_y) return importances list like [(feature, importance), ...]
        n_folds : int
            The number of cv folds.
        remain_num : int, default is 1
            The leaest number of left features.
        random_state : int, default is 0
            The random number is used to split the train set.
        threshold : float, default is 1e-4

        
        Returns
        -------
            self
        '''
        if self.labels is None:
            raise RuntimeError("The labels is None, so you should afferent labels parameter if you want use back_greedy method!")
            
        if len(self.left_features_) <= remain_num:
            raise ValueError("The value of remain_num should be less than the number of total features, default is 1!")
            
        skf = StratifiedKFold(n_splits=n_folds, shuffle=True, random_state=random_state)
        rankings = []
        for train_index, test_index in skf.split(self.features.values, self.labels):
            x_train, x_test = self.features.iloc[train_index][self.left_features_], self.features.iloc[test_index][self.left_features_]
            y_train, y_test = self.labels[train_index], self.labels[test_index]
            importances = func(x_train, y_train, x_test, y_test) # importances: [(feature, importance), ...]
            stand_importances = sorted(importances, key=lambda x: x[0])
            ranking = np.argsort([x[1] for x in stand_importances])
            rankings.append(ranking)
        
        mean_ranking = np.mean(rankings, axis=0)
        sorted_features = sorted(self.left_features_)
        pairs = [(feature, mean_rank) for feature, mean_rank in zip(sorted_features, mean_ranking)]
        sorted_pairs = sorted(pairs, key=lambda x: x[1])
        removed_features = [x[0] for x in sorted_pairs][remain_num:]
        self._remove_features("embedded_selector", removed_features)
        return self
        
