#!/usr/bin/env python
# -*- coding: utf-8 -*-

#
# This program is free software; you can redistribute it and/or modify  
# it under the terms of the GNU General Public License as published by  
# the Free Software Foundation; either version 2 of the License, or     
# (at your option) any later version.                                   
#                                                                         
# A copy of the license can be found in the license.txt file supplied   
# with this software or at: http://www.gnu.org/copyleft/gpl.html       
#

from bitvector import BitVector

import murmur

class   BloomFilter(object) :
    def __init__(self, num_elements = 0, buckets_per_element = 0) :
        self.hash_times = 0
        self.bit_vector = None
        self.count_buckets = 0

        if num_elements > 0 and buckets_per_element > 0 :
            hash_times = BloomHelper.computeBestK( buckets_per_element )
            vector_size = num_elements * buckets_per_element + 20
            bit_vector = BitVector( size = vector_size )
            
            self.reset(hash_times, bit_vector)
        ## __init__()

    def reset(self, hash_times, bit_vector) :
        self.hash_times = hash_times
        self.bit_vector = bit_vector

        self.count_buckets = len(self.bit_vector)
        ## reset()

    def add(self, key) :
        buckets = self.getHashBuckets(key)
        for b in buckets :
            self.bit_vector[ b ] = 1
        ## add()

    def __contains__(self, key) :
        buckets = self.getHashBuckets(key)
        for b in buckets :
            if not self.bit_vector[ b ] :
                return False

        return True
        ## __contains__()

    def __str__(self) :
        return '<BloomFilter hash_times:%d bit_vector:%s>' % (self.hash_times, str(self.bit_vector))
        ## __str__()

    def getHashBuckets(self, key) :
        '''
        Murmur is faster than an SHA-based approach and provides as-good collision
        resistance.  The combinatorial generation approach described in
        http://www.eecs.harvard.edu/~kirsch/pubs/bbbf/esa06.pdf
        does prove to work in actual tests, and is obviously faster
        than performing further iterations of murmur.
        '''
        hash_count = self.hash_times
        maximum = self.count_buckets

        result = []
        hash1 = murmur.hash(key, 0)
        hash2 = murmur.hash(key, hash1)
        for i in range(hash_count) :
            result.append( abs( (hash1 + i * hash2) % maximum ) )

        return result
        ## getHashBuckets()


    def serialize(self) :
        '''serialize bloom-filter to a string

        @return serialized bloom-filter in string
        '''
        serialized = "\t".join((str(self.hash_times), str(self.bit_vector)))

        return serialized
        ## serialize()

    def deserialize(self, bloom_serialized) :
        '''deserialize bloom-filter from a serialized string

        @return new BloomFilter object
        '''
        if not bloom_serialized :
            return None

        hash_times, bit_vector = bloom_serialized.split("\t", 1);
        bf = BloomFilter()
        bf.reset( int(hash_times), BitVector( bitstring = bit_vector ) )
        return bf
        ## deserialize()

    ## class BloomFilter



class   BloomHelper(object) :
    '''
    The following calculations are taken from:
    http://www.cs.wisc.edu/~cao/papers/summary-cache/node8.html
    "Bloom Filters - the math"

    This class's static methods are meant to facilitate the use of the Bloom
    Filter class by helping to choose correct values of 'bits per element' and
    'number of hash functions, k'.
    '''

    MAX_BUCKETS = 15
    MIN_BUCKETS = 2
    MIN_K = 1
    MAX_K = 8
    
    OPT_K_PER_BUCKETS = ( 1, # dummy K for 0 buckets per element
                          1, # dummy K for 1 buckets per element
                          1, 2, 3, 3, 4, 5, 5, 6, 7, 8, 8, 8, 8, 8);


    # In the following table, the row 'i' shows false positive rates if i buckets
    # per element are used.  Column 'j' shows false positive rates if j hash
    # functions are used.  The first row is 'i=0', the first column is 'j=0'.
    # Each cell (i,j) the false positive rate determined by using i buckets per
    # element and j hash functions.

    PROBS = (
                (1.0, ), # dummy row representing 0 buckets per element
                (1.0, 1.0), # dummy row representing 1 buckets per element
                (1.0, 0.393,  0.400),
                (1.0, 0.283,  0.237,  0.253),
                (1.0, 0.221,  0.155,  0.147,   0.160),
                (1.0, 0.181,  0.109,  0.092,   0.092,   0.101), # 5
                (1.0, 0.154,  0.0804, 0.0609,  0.0561,  0.0578,  0.0638),
                (1.0, 0.133,  0.0618, 0.0423,  0.0359,  0.0347,  0.0364),
                (1.0, 0.118,  0.0489, 0.0306,  0.024,   0.0217,  0.0216,  0.0229),
                (1.0, 0.105,  0.0397, 0.0228,  0.0166,  0.0141,  0.0133,  0.0135,  0.0145), # 9
                (1.0, 0.0952, 0.0329, 0.0174,  0.0118,  0.00943, 0.00844, 0.00819, 0.00846),
                (1.0, 0.0869, 0.0276, 0.0136,  0.00864, 0.0065,  0.00552, 0.00513, 0.00509),
                (1.0, 0.08,   0.0236, 0.0108,  0.00646, 0.00459, 0.00371, 0.00329, 0.00314),
                (1.0, 0.074,  0.0203, 0.00875, 0.00492, 0.00332, 0.00255, 0.00217, 0.00199),
                (1.0, 0.0689, 0.0177, 0.00718, 0.00381, 0.00244, 0.00179, 0.00146, 0.00129),
                (1.0, 0.0645, 0.0156, 0.00596, 0.003,   0.00183, 0.00128, 0.001,   0.000852) # 15
            );  # the first column is a dummy column representing K=0.


    @classmethod
    def  computeBestK(cls, buckets_per_element) : 
        '''
        Given the number of buckets that can be used per element, return the optimal
        number of hash functions in order to minimize the false positive rate.
        '''
        if buckets_per_element < 0 :
            return cls.OPT_K_PER_BUCKETS[ 0 ]
        if buckets_per_element >= len(cls.OPT_K_PER_BUCKETS) :
            return cls.OPT_K_PER_BUCKETS[ len(cls.OPT_K_PER_BUCKETS) - 1 ]
        return cls.OPT_K_PER_BUCKETS[ buckets_per_element ]
       ## computeBestK()


    # Given a maximum tolerable false positive probability, compute a Bloom
    # specification which will give less than the specified false positive rate,
    # but minimize the number of buckets per element and the number of hash
    # functions used.  Because bandwidth (and therefore total bitvector size)
    # is considered more expensive than computing power, preference is given
    # to minimizing buckets per element rather than number of hash functions.
    #
    # @param max_false_positive_probability The maximum tolerable false positive rate.
    # @return A Bloom Specification which would result in a false positive rate
    # less than specified by the function call.

    @classmethod
    def computeBucketsAndK(cls, max_false_positive_probability) :
        # Handle the trivial cases
        if(max_false_positive_probability >= cls.PROBS[ cls.MIN_BUCKETS ][ cls.MIN_K ]) :
            return (2, cls.OPT_K_PER_BUCKETS[2])

        if(max_false_positive_probability < cls.PROBS[ cls.MAX_BUCKETS ][ cls.MAX_K ]) :
            return (cls.MAX_K, cls.MAX_BUCKETS)


        # First find the minimal required number of buckets:
        bucketsPerElement = 2
        K = cls.OPT_K_PER_BUCKETS[2]
        while (cls.PROBS[ bucketsPerElement ][ K ] > max_false_positive_probability) :
            bucketsPerElement += 1
            K = cls.OPT_K_PER_BUCKETS[bucketsPerElement]

        # Now that the number of buckets is sufficient, see if we can relax K
        # without losing too much precision.
        while (cls.PROBS[bucketsPerElement][K - 1] <= max_false_positive_probability) :
            K -= 1

        return (K, bucketsPerElement)
        ## computeBucketsAndK()

    ## class BloomHelper

