#!/usr/bin/python

import os,time
import numpy
from collections import Counter
from pylab import *
import tables,scipy
from scipy import fftpack
from scipy.cluster import vq
from scipy.ndimage import morphology,filters,interpolation
from ocrolib import docproc,improc
import pyflann
import random
import tables
import multiprocessing
import fcntl
from pdb import pm
from collections import Counter
from llpy.sutils import *
from llpy import variants

import argparse
parser = argparse.ArgumentParser(description = "Compute local linear models.")
parser.add_argument('-d','--data',default="data/training.h5",help="data file")
parser.add_argument('-p','--protos',default="data/protos32a.h5",help="prototypes file")
parser.add_argument('-o','--output',default="kernel.cmodel",help='output file')

parser.add_argument("-N","--nsamples",default=50000,type=int,help="number of samples")
parser.add_argument("-r","--epochs",default=10,type=int,help="number of epochs")
parser.add_argument("-s","--sigma",default=10.0,type=float,help="kernel width")
parser.add_argument("-e","--eta",default=0.03,type=float,help="learning rate (start)")
parser.add_argument("-E","--eta1",default=0.001,type=float,help="learning rate (end)")

parser.add_argument('-t','--test',default="data/testing.h5",help="data file")
parser.add_argument("-T",'--ntest',default=10000,type=int)

parser.add_argument('-f','--ftype',default="fixed8",help="floating point type for storing kernel values")
parser.add_argument('-v','--verbosity',default=1,type=int,help="verbosity")
parser.add_argument("-n","--neighbors",default=5000,type=int,help="number of neighbors")
parser.add_argument('--logreg_l2',default=1.0,type=float,help="l2 regularization parameter")
parser.add_argument("--testprotos",default=1000000000,type=int,help="max # protos")
parser.add_argument("--nologistic",action="store_true",help="use linear least squares instead of logistic regression")
# cmdline = "-N 5000 -r 3 -s 5.0 -e 0.1 -E 0.001".split()

parser.add_argument("--csize",default=1000,type=int,help="chunk size for neighborhoods")
parser.add_argument("-Q","--par",default=multiprocessing.cpu_count(),type=int,help="parallelism")

args = parser.parse_args()

class FixedPoint8:
    def __init__(self,*args):
        self.data = zeros(args,'int8')
        self.shape = self.data.shape
    def __len__(self):
        return len(self.data)
    def __getitem__(self,slice):
        return self.data.__getitem__(slice)/100.0
    def __setitem__(self,slice,value):
        assert amin(value)>-1.27 and amax(value)<1.27
        self.data.__setitem__(slice,value*100.0)

class FixedPoint16:
    def __init__(self,*args):
        self.data = zeros(args,'int16')
        self.shape = self.data.shape
    def __len__(self):
        return len(self.data)
    def __getitem__(self,slice):
        return self.data.__getitem__(slice)/30000.0
    def __setitem__(self,slice,value):
        assert amin(value)>-1.09 and amax(value)<1.09
        self.data.__setitem__(slice,value*30000.0)


with tables.openFile(args.protos) as pdb:
    nprotos = minimum(args.testprotos,len(pdb.root.classes))
    protos = array(pdb.root.patches[:nprotos],'f')
    nprotos = len(protos)
    pclasses = array(pdb.root.classes[:nprotos],'i')
    print "nprotos",nprotos

from scipy.spatial import distance

def sigmoid(x):
    return 1/(1+exp(-clip(x,-20,20)))

def sigkernel(v,protos,sigma):
    d = distance.cdist(make2d([v.ravel()]),make2d(protos)).ravel()
    d = exp(-d**2/2.0/sigma)
    d = concatenate([[1],d])
    return d

with tables.openFile(args.data) as db:
    nsamples = min(args.nsamples,len(db.root.classes))
    print "nsamples",nsamples
    raw_classes = array(db.root.classes[:nsamples])
    clist = sorted(list(set(raw_classes)))
    nclasses = len(clist)
    ctrans = dict([(v,k) for k,v in enumerate(clist)])
    classes = [ctrans[c] for c in raw_classes]
    print "transforming data"

def process(job):
    start,end = job
    with tables.openFile(args.data) as db:
        block = make2d(db.root.patches[start:end])
    block = array([sigkernel(v,protos,args.sigma) for v in block])
    return (start,end,block)

# need to create the pool before allocating the array, otherwise we
# run out of memory
print "par",args.par
pool = multiprocessing.Pool(args.par)

if args.ftype=="float":
    data = zeros((nsamples,nprotos+1),'f')
elif args.ftype=="fixed8":
    data = FixedPoint8(nsamples,nprotos+1)
elif args.ftype=="fixed16":
    data = FixedPoint16(nsamples,nprotos+1)

for start,end,block in poolmap(process,chunks(nsamples,args.csize),pool=pool):
    pprint(start,end)
    data[start:end,:] = block[:,:]

del pool

err = MovingAverage(initial=1.0,l=1e-4)
A = 0.01*randn(nclasses,nprotos+1)
a = 0.01*randn(nclasses)

print "training"
etas = linspace(args.eta,args.eta1,args.epochs)
for epoch in range(args.epochs):
    if args.verbosity>=1: print "# epoch",epoch,"of",args.epochs
    eta = etas[epoch]
    stats = Stats()
    for i in range(nsamples):
        x = data[i]
        cls = classes[i]
        y = sigmoid(dot(A,x)+a)
        wrong = (argmax(y)!=cls)
        err += wrong
        stats += wrong
        target = zeros(nclasses)
        target[cls] = 1
        if args.nologistic:
            delta_y = 2*(y-target)
        else:
            delta_y = 2*(y-target)*y*(1-y)
        A -= eta * outer(delta_y,x)
        a -= eta * delta_y
        if args.verbosity>=2 and i%10000==0: print "#",epoch,i,eta,":",err.value()
    if args.verbosity>=1: print "# epoch",epoch,"sigma",args.sigma,"error rate",stats.mean()

total = 0
errors = 0
with tables.openFile(args.test) as tdb:
    ntest = min(len(tdb.root.patches),args.ntest)
    for i in range(ntest):
        v = tdb.root.patches[i].ravel()
        x = sigkernel(v,protos,args.sigma)
        y = sigmoid(dot(A,x)+a)
        pred = argmax(y)
        total += 1
        if pred!=ctrans.get(tdb.root.classes[i],-1):
            errors += 1

print "llkernel result",args.protos,args.sigma,total,errors,errors*1.0/total
