import random
import math
import numpy as np
from scipy import stats
from scipy.spatial import distance 
#TODO:
#  query procedure
#  multiprobe query

 
def LSHEuclideanFun(D,w):
# parameter d: data dimensionality
#           w: bin width
# returns    : a function hi(x)=floor((a*x+b)/w) - a comes from a N(0,1) distribuition and b from a [0,w] uniform distribution

    A = np.array([random.normalvariate(x,y) for x,y in [(0,1)]*D])
    b = random.uniform(0,w)
    result = lambda p: math.floor((np.inner(np.array(p),A)+b)/w)
    return result
class LSHParameterTuning:
    def __init__(self,r,c):
        self.p2=self.p(r,c)
        self.p1=self.p(r,1)
        self.rho=math.log(1.0/self.p1)/math.log(1.0/self.p2)
        print("p1=", self.p1," p2=", self.p2," rho=", self.rho)
    
    def p(self,r,c):
        x=float(r)/float(c)
        res=1-2*stats.norm.cdf(-x)-(2.0/(x*math.sqrt(2*math.pi)))*(1-math.exp(-math.pow(x,2)/2.0))
        return res

class LSH:
    def __init__(self,L=0,M=0,D=2,R=5,c=4):
        self.R1=R
        self.R2=R*c
        self.c=c
        x=LSHParameterTuning(R,c)
        self.rho=x.rho
        self.p1=x.p1
        self.p2=x.p2
        self.L=L #number of tables
        self.M=M #length of key - number of lsh functions concatenated
        self.D=D #dimensionality of the data
        self.hashfamily=[] # L lists of M lists of LSH functions 
        self.hashtable=[dict()] # L dicts stored in a list

    def GFun(self,W):
        # returns a lambda function (g) with the 
        # concatenation of M (hj) LSH-2stable function  width W
        # g(x) = [ h1(x), h2(x), ... , hM(x) ]
        res=[ LSHEuclideanFun(d,w) for (d,w) in [(self.D,W)]*self.M]
        gf = lambda point:tuple([lsf(point) for lsf in res])
        return gf
    
    def GFamily(self,W):
        # creates a list of L function [ g1, g2 , ... , gL] 
        self.hashfamily = [self.GFun(w) for w in [W]*self.L]
        
    def AddPoint(self,index):
        fun_dic=zip(self.hashfamily,self.hashtable)
        # zip together the hashing function and the associate table
        # in order to traverse both at the same time and hash the point 
        # data index using the right function to the associate table
        for gfun,hashb in fun_dic:
            hkey=gfun(self.data[index])
            if(hashb.has_key(hkey)):
                hashb[hkey].append(index)
            else:
                hashb[hkey]=[index]
        
    def BuildHashIndex(self,data,W):
        # W is the width of the bucket in the E2LSH
        # data is a list of list
        if(len(data)>0):
            n=len(data)
            self.L=int(math.ceil(math.pow(n,self.rho)))
            self.M=int(math.floor(math.log(n)/math.log(1/self.p2)))
            self.data=data
            self.hashtable=[dict() for y in [0]*self.L]
            self.D = len(data[0])
            self.GFamily(W)
        
            for i in range(0,len(data)):
                self.AddPoint(i)
    
    def QueryIndex(self,query):
        #query is of the same dimensionality of data
        if(len(query)==self.D):
            candidates=[]
            filtered=[]
            ccount=0
            fun_dic=zip(self.hashfamily,self.hashtable)
            # zip together the hashing function and the associate table
            # in order to traverse both at the same time and hash the point 
            # data index using the right function to the associate table
            for gfun,hashb in fun_dic:
                querykey=gfun(query)
                if(hashb.has_key(querykey)):
                    candidates.extend(hashb[querykey])
                    ccount+=len(hashb[querykey])
                    if(ccount >= 2*self.L):
                        break
            candidates=list(set(candidates)) #retira as duplicatas
            candidatesp=[np.array(self.data[i]) for i in candidates]
            q=np.array(query)
            dists = [distance.euclidean(q,p) for p in candidatesp]
            nni = np.argmin(dists)
            return (candidates[nni],candidatesp[nni])
            

        
        
