from java.lang import Float
from qmlt.learning.nb import NaiveBayesianClassifier;
from qmlt.dataset import Attribute;
from qmlt.dataset.utils import DataSetUtils;

class BasicNaiveBayesianClassifier(NaiveBayesianClassifier):
    
    def __init__(self):
        self.equi_sample_size = 0
        
        self.pc = {} # {'class':prob}
        self.pac = {} # {('class', attr_index, 'attr_value'):prob}
        self.mac = {} # {('class', attr_index):mean}
        self.sac = {} # {('class', attr_index):std}
        return
    
    def train(self, trainset, controller):
        "@sig public void train(Dataset trainset, NaiveBayesianTrainingController controller)"
        
        # init
        assert(trainset.getTargetDef().type == Attribute.STRING)
        self.equi_sample_size = controller.equivalentSampleSize
        
        # calc p(c)
        class_col = DataSetUtils.getTargetColumn(trainset.getInstances())
        tmp_pc = self._sort_into_dict(class_col)
        n = len(class_col)
        for c in tmp_pc.keys():
            self.pc[c] = tmp_pc[c] / float(n)
        
        # calc p(a|c)
        for i in range(len(trainset.getFeatureDefs())):
            attr_def = trainset.getFeatureDefs()[i]
            data_col = DataSetUtils.getFeatureColumn(trainset.getInstances(), i)
            value_set = set(data_col)
            m = self.equi_sample_size
            p = 1.0 / len(value_set)
            for c in self.pc.keys():
                if attr_def.type == Attribute.STRING:
                    for v in value_set:
                        insts = [x for x in trainset.getInstances() \
                                 if x.getFeatures()[i] == v and x.getTarget() == c]
                        self.pac[(c, i, v)] = (len(insts) + m * p) / (n + m)
                else:
                    values = [x.getFeatures()[i] for x in trainset.getInstances() \
                              if x.getTarget() == c]
                    mean = self._mean(values)
                    std = self._std(values, mean)
                    self.mac[(c, i)] = mean
                    self.sac[(c, i)] = std
                        
        # print self.pc
        # print self.pac
        # print self.mac
        # print self.sac
        return
    
    def predict(self, instance):
        "@sig public List<Object> predict(Instance instance)"
        if len(self.pc.keys()) <= 0:
            return None
        import math
        
        ln_post_probs = {}
        for c in self.pc.keys():
            ln_post_probs[c] = math.log(self.pc[c])
            
        for i in range(len(instance.dataSet.getFeatureDefs())):
            attr_def = instance.dataSet.getFeatureDefs()[i]
            value = instance.getFeatures()[i]
            for c in self.pc.keys():
                if attr_def.type == Attribute.STRING:
                    post_prob = self.pac.get((c, i, value), 0)
                    if post_prob > Float.MIN_VALUE:
                        ln_post_probs[c] += math.log(post_prob)
                else:
                    post_prob = self._gaussian(value, self.mac[(c, i)], self.sac[(c, i)])
                    if post_prob > Float.MIN_VALUE:
                        ln_post_probs[c] += math.log(post_prob)
                        
        pred = sorted(zip(ln_post_probs.values(), ln_post_probs.keys()), reverse=True)[0][1]
        return [pred]
    
    def saveModel(self, outputFilepath):
        import sys
        sys.stderr.write("method saveModel() not yet implemented.")
        return
    
    def loadModel(self, inputFilepath):
        import sys
        sys.stderr.write("method loadModel() not yet implemented.")
        return
    
    def _sort_into_dict(self, list):
        rst = {}
        for value in list:
            rst[value] = rst.get(value, 0) + 1
        return rst
    
    def _mean(self, values):
        n = len(values)
        return sum(values) / float(n)

    def _std(self, values, mean):
        nvar = 0
        n = len(values)
        for value in values:
            nvar += (value - mean) ** 2
        import math
        return math.sqrt(nvar / float(n))
    
    def _gaussian(self, value, mean, std):
        if std < Float.MIN_VALUE:
            x = 0
        else:
            x = (value - mean) / std
        import math
        return math.exp(-0.5 * (x ** 2)) / 2.5066282746310002 # the const = sqrt(2pi)
