from __future__ import division
import numpy as np
import logging
import sys

from .tree import InternalNode
from .tree import LeafNode

class TreeBuilder(object):

    def _find_split_parameters(self, X, Y, n_min_leaf, n_trials):
        """
        Compute parameters of the best split for the data X, Y.

        X: features, one data point per row
        : labels, one data point per row
        n_trials: the number of split dimensions to try.
        n_min_leaf: the minimum leaf size -- don't create a split with
            children smaller than this.

        Returns the pair (split_dim, split_threshold) or None if no appropriate
        split is found.  split_dim is an integer and split_threshold is a real
        number.

        Call self._information_gain(Y, Y_left, Y_right) to compute the
        information gain of a split.
        """
        n_data, n_dim = X.shape, len(X.dtype.names)
        # short circuit if this leaf is too small to split
        if n_data < 2 * n_min_leaf:
            return None

        # prepare for search
        best_info_gain = -np.inf
        best_split_dim = None
        best_split_threshold = None
        for trial in xrange(n_trials):
            # pick a dimension to split one
            split_dim = X.dtype.names[np.random.randint(0,n_dim)]
            # find the gaps between data points
            possible_thresholds = np.unique(X[split_dim])
            if X[split_dim].dtype.type is np.float_ :
                # move thresholds to the midpoints of the gaps
                possible_thresholds = 0.5 * (
                    possible_thresholds[:-1] +
                        possible_thresholds[1:])

            # search for the best split
            for split_threshold in possible_thresholds:
                
                if X[split_dim].dtype.type is np.float_ or X[split_dim].dtype.type is np.int_ :
                    mask_l = X[split_dim] < split_threshold
                elif X[split_dim].dtype.type is np.string_  or X[split_dim].dtype.type is np.bool_:
                    mask_l = X[split_dim] != split_threshold
                else:
                    logging.error('dimension type not recognize: ', X[split_dim].dtype)
                    print 'dimension type not recognize: ', X[split_dim].dtype
                    raise Exception('ERROR: one of the columns of the data is not string nor float')
                mask_r = np.logical_not(mask_l)

                # refuse to make leafs that are too small
                if np.sum(mask_l) < n_min_leaf or \
                        np.sum(mask_r) < n_min_leaf:
                    continue

                info_gain = self._information_gain(
                        Y, Y[mask_l], Y[mask_r])

                if info_gain > best_info_gain:
                    best_split_dim = split_dim
                    best_info_gain = info_gain
                    best_split_threshold = split_threshold

        if best_split_dim is None:
            return None
        else:
            return best_split_dim, best_split_threshold


    def fit(self,  X, Y,  max_depth, n_min_leaf, n_trials, first=False):
        if first:
            self.leafnode = []
            self.leaf_id = -1

        yhat = Y.mean(axis=0).reshape((1,-1))
        # short circuit for pure leafs
        if np.all(Y == Y[0]):
            self.leaf_id += 1
            return LeafNode(yhat, self.leaf_id)

        # avoid growing trees that are too deep
        if max_depth <= 0:
            self.leaf_id += 1
            return LeafNode(yhat, self.leaf_id)

        split_params = self._find_split_parameters(
                X, Y, n_min_leaf=n_min_leaf, n_trials=n_trials)

        # if we didn't find a good split point then become leaf
        if split_params is None:
            self.leaf_id += 1
            return LeafNode(yhat, self.leaf_id)

        split_dim, split_threshold = split_params

        if X[split_dim].dtype.type is np.float_ or X[split_dim].dtype.type is np.int_ :
            mask_l = X[split_dim] < split_threshold
        
        elif X[split_dim].dtype.type is np.string_  or X[split_dim].dtype.type is np.bool_:
            mask_l = X[split_dim] == split_threshold
        else:
            logging.error('dimension type not recognize: ', split_dim.dtype)
            raise Exception('ERROR: one of the columns of the data is not string nor float')

        mask_r = np.logical_not(mask_l)

        # refuse to make leafs that are too small
        if np.sum(mask_l) < n_min_leaf or \
                np.sum(mask_r) < n_min_leaf:
            raise Exception("Leaf too small")

        # otherwise split this node recursively
        left_child = self.fit(
                X[mask_l],
                Y[mask_l],
                max_depth=max_depth - 1,
                n_min_leaf=n_min_leaf,
                n_trials=n_trials)

        right_child = self.fit(
                X[mask_r],
                Y[mask_r],
                max_depth=max_depth - 1,
                n_min_leaf=n_min_leaf,
                n_trials=n_trials)
        return InternalNode(
                dim=split_dim,
                threshold=split_threshold,
                left_child=left_child,
                right_child=right_child)


class ClusteringTreeBuilder(TreeBuilder):

    def _entropy(self, x):
        x = x[x>0]
        return -np.sum(x*np.log(x))

    def _information_gain(self, y, y_l, y_r):
        n = y.shape[0]
        n_l = y_l.shape[0]
        n_r = y_r.shape[0]

        H = self._entropy(y.mean(axis=0))
        H_l = self._entropy(y_l.mean(axis=0))
        H_r = self._entropy(y_r.mean(axis=0))

        return H - n_l/n * H_l - n_r/n * H_r
