import scipy as spy
import math
import numpy as np
import scipy.optimize as opt

class Fit:
    def __init__(self,data):
        self.data = data
        self.unique = sorted(set(self.data))

    def fit(self):
        print 'Fit.fit()'

class PowerLawFit(Fit):
  #  def fit2(self,pinit):
  #      print 'PowerLawFit.fit()'
  #      # Power-law fitting is best done by first converting
  #      # to a linear equation and then fitting to a straight line.
  #      #
  #      #  y = a * x^b
  #      #  log(y) = log(a) + b*log(x)
  #      #
  #      logx = spy.log10(self.xdata)
  #      logy = spy.log10(self.ydata)

  #      # define our (line) fitting function
  #      fitfunc = lambda p, x: p[0] + p[1] * x
  #      errfunc = lambda p, x, y: (y - fitfunc(p, x))
  #      out = opt.leastsq(errfunc, pinit,args=(logx, logy))

  #      pfinal = out[0]
  #      a,b = pfinal
  #      fitted = 10**(spy.array(a+logx*b))
  #      return pfinal, fitted

    def fit(self,t1=1,t2=1000):
        print 'PowerLawFit.fit()'
        y = np.array(self.data)
        y = np.sort(y)
        res = []
        for xmin in range(t1,t2):#max(self.data)-1):
            #cut values 
            z = y[y>=xmin]
            n = len(z)
            if n < 50:
                break

            #MLE estimation of alpha
            #alpha = 1 + n/sum(spy.log(z/(xmin-0.5)))
            alpha = 1 + n/sum(spy.log(z/(xmin)))
            sigma = (alpha-1)/np.sqrt(n)

            #KS statistic comparing real CDF with theoretical CDF
            cx = np.arange(n,dtype='float')/float(n)
            last = -1
            vv = []
            ccx = []
            for a,b in zip(z,cx):
                if a != last:
                    last = a
                    ccx.append(b)
                    vv.append(a)
            ccx = np.array(ccx)
            vv = np.array(vv)
            cx = ccx
            cf = 1-(vv/xmin)**(-alpha+1)
            #ks = max(abs(cf-cx)/np.sqrt(cx*(1.0-cx)))
            ks = max(abs(cf-cx))
#            import pylab as plt
#            print vv
#            print cx
#            print cf
#            print xmin, ks
#            print ''
#            plt.figure()
#            plt.loglog(vv,cx,'r.')
#            plt.loglog(vv,cf,'c+')
#            plt.savefig('check_%d.pdf'%xmin)
#            plt.close()

            print xmin, alpha, ks
            res.append((ks,alpha,xmin,sigma))

        res.sort()
        alpha,xmin,sigma = res[0][1:]
        print 'alpha %f +/- %f, xmin = %f'%(alpha,sigma,xmin)
        self.alpha = alpha
        self.xmin = xmin
        self.alphaerr = sigma
        return alpha, xmin

    def fitted_pdf(self):
        #q1 = np.array(range(int(self.xmin),int(max(self.data))))
        q1 = np.array(filter(lambda i: i>=self.xmin, self.unique),dtype='float')
        fitted = []
        for x in q1:
            x = float(x)
            t = (self.alpha-1)/self.xmin * ((x/self.xmin)**(-self.alpha))
            fitted.append(t)
        fitted = np.array(fitted)
        return q1,fitted

    def fitted_ccdf(self):
        #q1 = np.array(range(int(self.xmin),int(max(self.data))),dtype='float')
        q1 = np.array(filter(lambda i: i>=self.xmin, self.unique),dtype='float')
        fitted =  np.array((q1/self.xmin)**(-self.alpha+1))
        return q1,fitted

class LogNormalFit(Fit):
    def fit(self):
        print 'LogNormalFit.fit()'
        # LogNormal fitting is best done by  first converting
        # to logarithm and then computing mean and variance (mu and sigma)
        
        y = np.array(self.data)
        logy = spy.log(y)
        #x = np.array(range(int(min(y)),int(max(y))))
        x = np.array(self.unique,dtype='float')
        logx = spy.log(x)

        mu = sum(logy)/len(logy)
        sigma = sum((i-mu)**2 for i in logy)/len(logy)
        sigma = sigma**0.5

        print 'mu %f, sigma %f'%(mu,sigma)
        self.mu = mu
        self.sigma = sigma
        return sigma,mu

    def fitted_pdf(self):
        x = np.array(range(int(min(self.data)),int(max(self.data))),dtype='float')
        #x = np.array(self.unique,dtype='float')
        mu = self.mu
        sigma = self.sigma
        fitted = np.array((np.exp(-(np.log(x) - mu)**2 / (2 * sigma**2)) / (x *
            sigma * np.sqrt(2 * np.pi))))
        tot = sum(fitted)
        fitted = fitted/tot
        return x,fitted

    def fitted_ccdf(self):
        x,p = self.fitted_pdf()
        c = [0.0]
        for v in p:
            c.append(c[-1]+v)
        print len(c), len(x)
        c = c[:len(x)]
        tot = c[-1]
        print 'lognormal tot ', tot
        c = [1.0-i/tot for i in c]
        return x,c

LOGFIT = True
class Fitter:
    def __init__(self, values, x=None, y=None):
        self.values = values
        if not x:
            hist = {}
            for v in values:
                hist.setdefault(v,0)
                hist[v] += 1
            x = sorted(hist)
            y = [hist[k] for k in x]
            t = sum(y)
            y = [float(i)/t for i in y]
        self.xdata = x
        self.ydata = y

    def name(self):
        return 'Fitter'

    def fit(self):
        pass

    def fit_func(self,x):
        pass

    def get_params(self):
      pass

    def get_fit(self):
        return self.xdata, map(self.fit_func,self.xdata)

    def get_fit_lbl(self,var):
        return str(var)

    def get_loglikelihood(self):
      if min(self.values) <= 0:
        print 'ERROR!'
      loglikelihood = sum(math.log(self.fit_func(x)) for x in self.values)
      return loglikelihood

class LogNormalFitter(Fitter):
    def fit(self):
        self.n = len(self.values)
        log_values = tuple(math.log(x) for x in self.values)
        avg = float(sum(log_values))/self.n
        avg2 = float(sum(x**2 for x in log_values))/self.n

        self.avg = avg
        self.var = avg2 - avg**2
        self.sigma = self.var**0.5

    def name(self):
        return 'LogNormal'

    def get_params(self):
      return (self.avg, self.sigma)

    def fit_func(self,x):
        num = math.exp( (-(math.log(x) - self.avg)**2) / 2.0*self.sigma**2 )
        den = x * self.sigma * math.sqrt(2.0*math.pi)
        return num/den

    def get_fit_lbl(self,var):
        return r"$\mu = %f - \sigma = %f$"%(self.avg, self.sigma)

class ExponentialFitter(Fitter):
    def fit(self):
        self.n = len(self.values)
        avg = float(sum(self.values))/self.n
        self.alpha = 1.0/avg

    def get_params(self):
      return self.alpha

    def name(self):
        return 'Exponential'

    def fit_func(self,x):
        r =  self.alpha * math.exp(-x*self.alpha)
        return r

    def get_fit_lbl(self,var):
        return r"$%.2f e^{-%s/%.2f}$"%(self.alpha,var,1.0/self.alpha)

class PowerLawFitter(Fitter):
    def fit(self):
        self.n = len(self.values)
        self.m = min(self.values)
        x = np.array(self.xdata)
        y = np.array(self.ydata)
        fitfunc = lambda p, x: p[0] * (x)**(-p[1]) 
        if LOGFIT:
          errfunc = lambda p, x, y: np.log(y) - np.log(fitfunc(p, x))
        else:
          errfunc = lambda p, x, y: y - fitfunc(p, x)
        pinit = [1.0,2.0]
        out = opt.leastsq(errfunc, pinit,args=(x, y),xtol=1e-12)
        (self.C, self.alpha) = out[0]
        self.C = self.ydata[0] * (self.xdata[0]**self.alpha)

    def get_params(self):
      return self.C, self.alpha

    def name(self):
        return 'Power-law'

    def fit_func(self,x):
        norm = (self.alpha - 1.0 )*(self.m**(self.alpha-1.0))
        r = norm*x**(-self.alpha)
        #return r
        return self.C * x**(-self.alpha)

    def get_fit_lbl(self,var):
        return r"$%f %s^{-%.2f}$"%(self.C,var,self.alpha)

class ShiftedPowerLawFitter(Fitter):
    def fit(self):
        self.n = len(self.values)
        self.m = min(self.values)
        x = np.array(self.xdata)
        y = np.array(self.ydata)
        fitfunc = lambda p, x: p[0] * (x+p[1])**(-p[2]) 
        if LOGFIT:
          errfunc = lambda p, x, y: np.log(y) - np.log(fitfunc(p, x))
        else:
          errfunc = lambda p, x, y: y - fitfunc(p, x)
        pinit = [1.0,1.0,1.0]
        out = opt.leastsq(errfunc, pinit,args=(x, y),xtol=1e-12)
        (self.C, self.xmin, self.alpha) = out[0]

    def get_params(self):
      return self.C, self.xmin, self.alpha

    def name(self):
        return 'Shifted power-law'

    def fit_func(self,x):
        return max(1e-12,self.C * (x+self.xmin)**(-self.alpha))

    def get_fit_lbl(self,var):
        return r"$(%s+%f)^{-%.2f}$"%(var,self.xmin,self.alpha)

class TruncatedPowerLawFitter(Fitter):
    def fit(self):
        x = np.array(self.xdata)
        y = np.array(self.ydata)
        fitfunc = lambda p, x: p[0] * (x)**(-p[1]) * np.exp(-x*p[2])
        if LOGFIT:
          errfunc = lambda p, x, y: np.log(y) - np.log(fitfunc(p, x))
        else:
          errfunc = lambda p, x, y: y - fitfunc(p, x)
        pinit = [1.0,1.0, 0.001]
        out = opt.leastsq(errfunc, pinit,args=(x, y),xtol=1e-12)
        (self.C, self.alpha, b) = out[0]
        self.beta = 1.0/b

    def get_params(self):
      return self.C, self.alpha, self.beta

    def name(self):
        return 'Truncated power-law'

    def fit_func(self,x):
      if self.beta:
        r = self.C * (x**(-self.alpha)) * math.exp(-x/self.beta)
      else:
        r = self.C * (x**(-self.alpha)) 
      return max(r, 1e-12)

    def get_fit_lbl(self,var):
        return r"$%s^{-%.2f} e^{-%s/%.2f}$"%(var,self.alpha,var,self.beta)

class ShiftedExponential(Fitter):
    def fit(self):
        x = np.array(self.xdata)
        y = np.array(self.ydata)
        fitfunc = lambda p, x: p[0] * (x)**(p[1]-1) * np.exp(-p[2]*(x**p[1]))
        if LOGFIT:
          errfunc = lambda p, x, y: np.log(y) - np.log(fitfunc(p, x))
        else:
          errfunc = lambda p, x, y: y - fitfunc(p, x)
        pinit = [1.0,1.0, 0.001]
        out = opt.leastsq(errfunc, pinit,args=(x, y),xtol=1e-12)
        (self.C, self.beta, self.lambd) = out[0]

    def get_params(self):
      return self.C, self.beta, self.lambd

    def name(self):
        return 'Shifted exponential'

    def fit_func(self,x):
      r = self.C * (x**(self.beta-1)) * math.exp(-self.lambd*(x**self.beta)) 
      return max(r, 1e-12)

    def get_fit_lbl(self,var):
        return r"$%s^{%.2f-1} e^{-%f%s^{%.2f}}$"%(var,self.beta,self.lambd,var,self.beta)

class ModelSelector:
  def __init__(self, values, x=None, y=None):
    self.fitters = (
        ExponentialFitter(values,x=x,y=y),
        ShiftedExponential(values,x=x,y=y),
        PowerLawFitter(values,x=x, y=y), 
        TruncatedPowerLawFitter(values,x=x,y=y),
        ShiftedPowerLawFitter(values,x=x,y=y),
        LogNormalFitter(values,x=x,y=y)
        )

  def data(self):
    return self.fitters[0].xdata, self.fitters[0].ydata

  def get_models(self):
    results = []
    for fitter in self.fitters:
        fitter.fit()
        lbl =  fitter.get_fit_lbl('x')
        ll = fitter.get_loglikelihood()
        name = fitter.name()
        fit = fitter.get_fit()
        params = fitter.get_params()
        results.append((ll,lbl,name,fit,params))
    results.sort()
    results.reverse()
    models = []
    keys = ('Log-likelihood', 'Label', 'Name', 'Fit', 'Params')
    for result in results:
      models.append(dict((key,value) for key,value in zip(keys,result)))
    return models



if __name__ == '__main__':
    a = 2.0
    import pylab as plt
    import random

    #R = map(lambda i: int(i*1000),powerlaw.rvs(a, size=1000))
    values  = [int(random.paretovariate(a)) for i in range(10000)]
    print min(values), max(values)

    p1 = PowerLawFit(values)
    alpha, xmin= p1.fit()
    print alpha, xmin

    from plfit import *
    p = plfit(values)
    p.plotcdf()
    plt.show()


