# coding=utf-8

import re
import math

def sampletrain(cl):
    cl.train("Nobody wins the water","good")
    cl.train("the quick rabbit jumps fences","good")
    cl.train("buy pharmaceuticals now","bad")
    cl.train("make quick money at the online casino","bad")
    cl.train("the quick brown fox jumps","good")

def getwords(doc):
    splitter=re.compile("\\W*")
    words=[s.lower() for s in splitter.split(doc) if len(s)>2 and len(s)<20]
    return dict([(w,1) for w in words])

class classifier:
    #初始化
    def __init__(self, getfeatures, filename=None):
        #fc代表feature count，例如 {'quick': {'bad': 1, 'good': 2}}
        #feature指的是一个单词，并不是一个词组或者文章
        self.fc={}
        
        #cc代表category count，例如{'bad': 2, 'good': 2}
        #每个分类总的个数        
        self.cc={}
        
        self.getfeatures=getfeatures
        
        #阈值
        self.thresholds={}

        
    #为某个词添加类别个数，如果如果没有则设置为1，如果有则加1    
    def incf(self,f,cat):
        self.fc.setdefault(f,{})
        self.fc[f].setdefault(cat,0)
        self.fc[f][cat]+=1
    
       
    #增加每个类别文章的个数    
    def incc(self,cat):
        self.cc.setdefault(cat,0)
        self.cc[cat]+=1
    
      
    #统计某个词在某个类别出现的文章次数，注意返回float，这个数量不会大于某个类别下的文章总数  
    def fcount(self,f,cat):
        if f in self.fc and cat in self.fc[f]:
            return float(self.fc[f][cat])
        return 0.0
    
    
    #统计每个类别出现的文章次数
    def catcount(self,cat):
        if cat in self.cc:
            return float(self.cc[cat])
        return 0
    
    
    #统计所有类别出现的文章总数
    def totalcount(self):
        return sum(self.cc.values())
    
    
    #返回所有类别的关键字
    def categories(self):
        return self.cc.keys()
    
    
    #训练样本
    #item:一个句子或者文章,cat:类别
    def train(self,item,cat):
        features=self.getfeatures(item)
        for f in features:
            self.incf(f, cat)
        self.incc(cat)

        
    #计算某个类别中出现某个单词的概率
    #即Pr(feature|category)
    def fprob(self,f,cat):
        if self.catcount(cat)==0:return 0
        return self.fcount(f, cat) / self.catcount(cat) #
    
    
    #加权的概率值
    def weightedprob(self,f,cat,prf,weight=1.0,ap=0.5):
        basicprob=prf(f,cat)
        totals=sum([self.fcount(f,c) for c in self.categories()])
        bp=((weight*ap)+(totals*basicprob))/(weight+totals)  
        return bp


    def setthreshold(self,cat,t):
        self.thresholds[cat]=t
        
    def getthreshold(self,cat):
        if cat not in self.thresholds:return 1.0
        return self.thresholds[cat]
    
    #分类器
    def classify(self,item,default=None):
        probs={}
        max=0.0
        for cat in self.categories():
            probs[cat]=self.prob(item,cat)
            if max<probs[cat]:
                max=probs[cat]
                best=cat
                
        for cat in probs:
            if cat==best:continue
            if probs[cat]*self.getthreshold(cat)>probs[cat]:return default
            
        return best

#Pr(Catetory|Document) = Pr(Document|Category) * Pr(Category) / Pr(Document)
class nativebayes(classifier):
    
    #朴素贝叶斯概率，改文章中所有单词出现的概率之乘积
    #item指的是一个句子或文章
    def docprob(self,item,cat):
        features=self.getfeatures(item)
        p=1
        for f in features: p*=self.weightedprob(f, cat, self.fprob)
        return p
    
    def prob(self,item,cat):
        catprob=self.catcount(cat)/self.totalcount()
        docprob=self.docprob(item, cat)
        return catprob*docprob

    
        
class fishierclassifier(classifier):
    
    def __init__(self, getfeatures):
        classifier.__init__(self, getfeatures)
        self.minimums={}
        
    def setminimum(self,cat,min):
        self.minimums[cat]=min
        
    def getminimum(self,cat):
        if cat not in self.minimums:return 0
        return self.minimums[cat]
    
    #Pr(category|feature)
    def cprob(self, f, cat):
        clf=self.fprob(f, cat)
        if clf==0:return 0
        freqsum=sum([self.fprob(f, c) for c in self.categories()])
        p=clf/(freqsum)
        return p
    
    
    def fisherprob(self,item,cat):
        p=1
        features=self.getfeatures(item)
        for f in features:
            p*=(self.weightedprob(f, cat, self.cprob))
        fscore=-2*math.log(p)
        return self.invchi2(fscore, len(features)*2)
        
        
    def invchi2(self,chi,df):
        m=chi/2.0
        sum=term=math.exp(-m)
        for i in range(1,df//2):
            term*=m/i
            sum+=term
        return min(sum,1.0)
        
    def classify(self,item,default=None):
        best=default
        max=0.0
        for c in self.categories():
            p=self.fisherprob(item, c)
            if p>self.getminimum(c) and p>max:
                best=c
                max=p
        return best
        
if __name__ == '__main__':
    cl=fishierclassifier(getwords)
    sampletrain(cl)
    print cl.classify("quick rabbit")
    print cl.classify("quick money")
    cl.setminimum("bad", 0.8)
    print cl.classify("quick money")
    
#    print cl.prob("quick rabbit","good")
#    print cl.prob("quick rabbit","bad")
    
#    print cl.fcount('quick', 'good')
#    print cl.fcount('quick', 'bad')
#    print cl.fprob('money', 'bad')
#    print cl.weightedprob('money','good',cl.fprob)
#    sampletrain(cl)
#    print cl.weightedprob('money','good',cl.fprob)
