#!/usr/bin/env python3

from math import log

"""
Bayes Classifier

Bayes Decision Theory: P(A|B) = P(B|A) * P(A) / P(B)
"""

class NaiveBayes(object):
    """
    Naive bayes Classifier

    Naive bayes has the assumption hat features are independent!
    """
    pass

class DocumentClassifier(NaiveBayes):
    """
    Document Classifier using naive bayes.

    Given training data of doc:category, determine target doc's category.
    """
    def __init__(self, data=None, categories=None):
        """
        data: list of string lists
        categories: list
        """
        self.data = data if data else []
        self.input_categories = categories if categories else []

        if not len(self.data) == len(self.input_categories):
            raise Exception("data and categories supplied do have not same number of elements")

        self.categories = self._unit_categories()
        
        self.vocabulary = []
        self.prob_categories = []
        # for each token in self.vocabulary, its prob according to training data
        self.prob_tokens_total = []
        # list of lists, each token in self.vocabulary, its prob in categories[i]
        self.prob_tokens_categories = []
        
        if self.data:
            self.train(self.data, self.input_categories)

    def _unit_categories(self):
        cates = list(set(self.input_categories))
        cates.sort()
        return cates

    def _create_vocab_list(self):
        vocabSet = set([])
        for strList in self.data:
            vocabSet = vocabSet | set(strList)
        self.vocabulary = list(vocabSet)
        self.vocabulary.sort()

    def getVocabulary(self):
        "return vocabulary used by data"
        if not self.vocabulary:
            self._create_vocab_list()
        return self.vocabulary

    def _tokens_to_vec(self, tokens):
        vocabs = self.getVocabulary()
        ret_vec = [0] * len(vocabs)
        for t in tokens:
            if t in vocabs:
                ret_vec[vocabs.index(t)] = 1
            else:
                raise Exception("the token %s is not in vocabulary derived from data!" % t)
        return ret_vec
        
    def train(self, data, categories):
        "train from data and categories"
        self.data = data
        self.input_categories = categories
        if not len(self.data) == len(self.input_categories):
            raise Exception("data and categories supplied do have not same number of elements")
        self.categories = self._unit_categories()

        # compute self.prob_categories, self.prob_tokens_categories, self.prob_tokens_total
        data_vecs = []
        for tokens in self.data:
            data_vecs.append(self._tokens_to_vec(tokens))

        # self.prob_tokens_total
        self.prob_tokens_total = [0] * len(self.vocabulary)
        totalN = len(self.data)
        for idx in range(0, len(self.vocabulary)):
            count = 0
            for dv in data_vecs:
                if dv[idx] > 0:
                    count += 1
            self.prob_tokens_total[idx] = float(count / totalN)

        # self.prob_categories
        for cate in self.categories:
            prob_cate = float(self.input_categories.count(cate) / len(self.input_categories))
            self.prob_categories.append(prob_cate)

        # self.prob_tokens_categories
        for cate in self.categories:
            total_category = self.input_categories.count(cate)
            probs = [0]*len(self.vocabulary)
            for idx in range(0, len(self.vocabulary)):
                count = 0
                for i, dv in enumerate(data_vecs):
                    if not self.input_categories[i] == cate:
                        continue
                    if dv[idx] > 0:
                        count += 1
                probs[idx] = float((count+1) / (total_category+2))
            self.prob_tokens_categories.append(probs)

        #print(self.prob_categories, self.prob_tokens_total, self.prob_tokens_categories)

    def apply(self, target_data):
        "apply target_data to DocumentClassifier to get its category"
        if not self.prob_tokens_categories or not self.prob_tokens_total:
            raise Exception("DocumentClassifier not trained with data yet!")

        prob_max_idx = -1
        log_prob_max = -1

        vec_target = self._tokens_to_vec(target_data)
        # prob_category_target = p_cateory * p_prob_category / p_target
        log_p_target = 0.0
        for idx in range(0, len(vec_target)):
            if vec_target[idx] > 0:
                log_p_target += log(self.prob_tokens_total[idx])
                
        # compute prob for each category
        for idx in range(0, len(self.categories)):
            log_p_category = log(self.prob_categories[idx])
            log_p_prob_category = 0.0
            for i in range(0, len(vec_target)):
                if vec_target[i] > 0:
                    log_p_prob_category += log(self.prob_tokens_categories[idx][i])
            log_prob_category_target = log_p_category + log_p_prob_category - log_p_target
            if log_prob_category_target > log_prob_max:
                log_prob_max = log_prob_category_target
                prob_max_idx = idx

        return self.categories[prob_max_idx]
    
if __name__ == '__main__':
    def loadDataSet():
        postingList = [['my', 'dog', 'has', 'flea', 'problems', 'help', 'please'],
                       ['maybe', 'not', 'take', 'him', 'to', 'dog', 'park', 'stupid'],
                       ['my', 'dalmation', 'is', 'so', 'cute', 'I', 'love', 'him'],
                       ['stop', 'posting', 'stupid', 'useless', 'garbage'],
                       ['my', 'licks', 'ate', 'my', 'steak', 'how', 'to', 'stop', 'him'],
                       ['quit', 'buying', 'worthless', 'dog', 'food', 'stupid']]
        categoryVec = ['normal', 'abuse', 'normal', 'abuse', 'normal', 'abuse'] # 1 abusive, 0 not
        return postingList, categoryVec

    data, categories = loadDataSet()
    dc = DocumentClassifier(data=data, categories=categories)
    #print(dc.getVocabulary())
    #print(dc._tokens_to_vec(['maybe', 'is']))
    #print(dc.categories)
    print(dc.apply(["stupid"]))
    print(dc.apply(["dog", "stupid"]))
    print(dc.apply(["please", "help"]))
    print(dc.apply(["worthless", "food"]))
