import re
import math


cache_time = 60 * 60 * 12 # 12 hours cache

# We assume to have only 2 categories:
categories = ['favorite', 'spam']

ignorewords = ['http', 'href', 'target', '_blank', 'the', 'and', 'for', 'with', 'del', 'per', 'della', 'com']



class TrainingCache():
    
    def __init__(self, cahce):
        self.cache = cache

    def __call__(self, key, time_expire):

        def tmp(func):
            def action():
                if self.cache:
                    # get or set the cache value:
                    return self.cahce(key, lambda:func(), time_expire)
                else:
                    return func()
        
            action.__name___ = func.__name__
            action.__doc__ = func.__doc__   
            
            return action

        return tmp



def getwords(doc):
    splitter=re.compile('\\W*')
    # print doc
    # Split the words by non-alpha characters
    words=[s.lower() for s in splitter.split(doc) 
          if len(s)>2 and len(s)<20 and s not in ignorewords ]
  
    # Return the unique set of words only
    return dict([(w,1) for w in words])

class classifier:

    def __init__(self, getfeatures, db=None, cache=None):
        # Counts of feature/category combinations
        self.fc = {}
        # Counts of documents in each category
        self.cc = {}
        self.getfeatures = getfeatures        
        self.db = db
        # self.cache = TrainingCache(cache)
        self.cache = cache

    def incache(self, key, value, expire_time):
    
        if self.cache:
            return self.cache(key, value, expire_time)
        
        return value()                    
                   

        

    def incf(self,f,cat):
                
        c = self.incache('%s:%s' %(f, cat), lambda:self.fcount(f,cat), 60*60*1)
        
        if c == 0:            
            self.db.fc.insert(feature=f, category=cat, count=1)
        else:
            self.db((self.db.fc.feature==f) & (self.db.fc.category==cat)).update(count= c+1)

                
    def fcount(self,f,cat):
              
        res = self.db((self.db.fc.feature==f) & (self.db.fc.category==cat)).select(self.db.fc.count)
        
        # print '1 feature count %s %s ' %(f, cat)
       
        if res==None or len(res)==0: 
            return 0
        else: 
            return float(res[0]['count'])
       
    def incc(self,cat):
    
        c = self.incache(cat, lambda:self.catcount(cat), 60*60*24)
        
        if c == 0:
            self.db.cc.insert(category=cat, count=1)            
        else:
            self.db(self.db.cc.category==cat).update(count=c+1)            

    def catcount(self,cat):
        
        
        res = self.db(self.db.cc.category == cat).select(self.db.cc.count)
                       
        if res==None or len(res)==0: 
            return 0
        else: 
            return float(res[0]['count'])
        
        

    
    def categories(self):    
        '''           
        cur = self.db(self.db.cc).select()
        # print '3 categories'
        return [d.category for d in cur]
        '''
        return categories
                
    
    def totalcount(self):
                
        sum = self.db.cc.count.sum()
        res = self.db().select(sum).first()[sum]
        # print '4 total count'
                
        if res==None: 
            return 0
        
        return res[0]


    def train(self,item,cat):
        features=self.getfeatures(item)
        # Increment the count for every feature with this category
        for f in features:
          self.incf(f,cat)

        # Increment the count for this category
        self.incc(cat)
        
        
    def fprob(self,f,cat):
    
        catcount = self.incache(cat, lambda:self.catcount(cat), 60*60*24)
    
        if catcount == 0: 
            return 0

        # The total number of times this feature appeared in this 
        # category divided by the total number of items in this category
        c = self.incache('%s:%s' %(f, cat), lambda:self.fcount(f,cat), 60*60*1)
        
        
        return c / catcount

    def weightedprob(self,f,cat,prf,weight=1.0,ap=0.5):
        # Calculate current probability
        basicprob=prf(f,cat)

        # Count the number of times this feature has appeared in
        # all categories   
        
        categories = self.categories()
        totals=sum([self.incache('%s:%s' %(f, cat), lambda:self.fcount(f,cat), 60*60*1) for c in categories])

        # Calculate the weighted average
        bp=((weight*ap)+(totals*basicprob))/(weight+totals)
        
        return bp


class naivebayes(classifier):
  
    def __init__(self,getfeatures, db=None, cache=None):
        classifier.__init__(self,getfeatures, db, cache)
        self.thresholds={}
  
    def docprob(self,item,cat):
        features=self.getfeatures(item)   

        # Multiply the probabilities of all the features together
        p=1
        for f in features: p*=self.weightedprob(f,cat,self.fprob)
            
        return p

    def prob(self,item,cat):
        
        totalcount = self.incache('totalcount', lambda:self.totalcount(), 60*60*24);
        catcount = self.incache(cat, lambda:self.catcount(cat), 60*60*24)
        
        catprob = catcount/totalcount
        docprob = self.docprob(item,cat)
        
        return docprob*catprob
          
    def setthreshold(self,cat,t):
        
        self.thresholds[cat]=t
    
    def getthreshold(self,cat):
    
        if cat not in self.thresholds: 
            return 1.0
        return self.thresholds[cat]
  
    def classify(self,item,default=None):
        
        probs={}
        # Find the category with the highest probability
        max=0.0
        
        categories = self.categories()
        
        for cat in categories:
            probs[cat]=self.prob(item,cat)
            if probs[cat]>max: 
                max=probs[cat]
                best=cat

        # Make sure the probability exceeds threshold*next best
        for cat in probs:
            if cat==best: 
                continue
            if probs[cat]*self.getthreshold(best)>probs[best]: 
                return default
                
        return best, max

class fisherclassifier(classifier):

    def cprob(self,f,cat):
        # The frequency of this feature in this category    
        clf=self.fprob(f,cat)
        if clf==0: 
            return 0

        # The frequency of this feature in all the categories
        categories = self.categories()
        freqsum=sum([self.fprob(f,c) for c in categories])

        # The probability is the frequency in this category divided by
        # the overall frequency
        p=clf/(freqsum)
    
        return p
        
    def fisherprob(self,item,cat):
        # Multiply all the probabilities together
        p=1        
        features=self.getfeatures(item)
        
        for f in features:
          p*=(self.weightedprob(f,cat,self.cprob))

        # Take the natural log and multiply by -2
        fscore=-2*math.log(p)

        # Use the inverse chi2 function to get a probability
        return self.invchi2(fscore,len(features)*2)
        
    def invchi2(self,chi, df):
        m = chi / 2.0
        sum = term = math.exp(-m)
        
        for i in range(1, df//2):
            term *= m / i
            sum += term
            
        return min(sum, 1.0)
        
    def __init__(self,getfeatures, db=None, cache=None):
    
        classifier.__init__(self,getfeatures, db, cache)
        self.minimums={}

    def setminimum(self,cat,min):
        self.minimums[cat]=min
  
    def getminimum(self,cat):
        if cat not in self.minimums: 
            return 0
        return self.minimums[cat]

    def classify(self,item,default=None):
        # Loop through looking for the best result
        best=default
        max=0.0  
        
        categories = self.categories()
        for c in categories:         
          p=self.fisherprob(item,c)
          # Make sure it exceeds its minimum
          if p>self.getminimum(c) and p>max:
            best=c
            max=p
            
        return best, max
