#!/usr/bin/python

import os,time
import numpy
from collections import Counter
from pylab import *
import tables,scipy
from scipy import fftpack
from scipy.cluster import vq
from scipy.ndimage import morphology,filters,interpolation
from ocrolib import docproc,improc
import pyflann
import random
import tables
import multiprocessing
import fcntl
from pdb import pm
import variants
import sutils
reload(sutils)
from collections import Counter
from sutils import *
from scipy.spatial import distance
import mlinear

import argparse
parser = argparse.ArgumentParser(description = "Compute local linear models.")
parser.add_argument('-d','--data',default="DATA/training.h5",help="data file")
parser.add_argument('-p','--protos',default="DATA/protos032a.h5",help="prototypes file")
parser.add_argument('-o','--output',default="kernel.cmodel",help='output file')

parser.add_argument("-N","--nsamples",default=99999999,type=int,help="number of samples")
parser.add_argument("-i","--initial",default=20000,type=int,help="number of samples")
parser.add_argument("-r","--epochs",default=10,type=int,help="number of epochs")
parser.add_argument("-s","--sigma",default=10.0,type=float,help="kernel width")
parser.add_argument("-e","--eta",default=0.03,type=float,help="learning rate (start)")
parser.add_argument("-E","--eta1",default=0.001,type=float,help="learning rate (end)")

parser.add_argument('-t','--test',default="DATA/testing.h5",help="data file")
parser.add_argument("-T",'--ntest',default=1000,type=int)

parser.add_argument('-v','--verbosity',default=1,type=int,help="verbosity")
parser.add_argument("-n","--neighbors",default=5000,type=int,help="number of neighbors")
parser.add_argument('--logreg_l2',default=1.0,type=float,help="l2 regularization parameter")
parser.add_argument("--testprotos",default=1000000000,type=int,help="max # protos")
parser.add_argument("--nologistic",action="store_true",help="use linear least squares instead of logistic regression")
# cmdline = "-N 5000 -r 3 -s 5.0 -e 0.1 -E 0.001".split()

parser.add_argument("--csize",default=1000,type=int,help="chunk size for neighborhoods")
parser.add_argument("-Q","--par",default=multiprocessing.cpu_count(),type=int,help="parallelism")

args = parser.parse_args()

with tables.openFile(args.protos) as pdb:
    nprotos = minimum(args.testprotos,len(pdb.root.classes))
    protos = array(pdb.root.patches[:nprotos],'f')
    nprotos = len(protos)
    pclasses = array(pdb.root.classes[:nprotos],'i')
    print "nprotos",nprotos

def sigmoid(x):
    return 1/(1+exp(-clip(x,-20,20)))

def sigkernel(v,protos,sigma):
    d = distance.cdist(make2d([v.ravel()]),make2d(protos)).ravel()
    d = exp(-d**2/2.0/sigma)
    d = concatenate([[1],d])
    return d

print "loading data"

with tables.openFile(args.data) as db:
    nsamples = min(args.nsamples,len(db.root.classes))
    print "nsamples",nsamples
    raw_classes = array(db.root.classes[:nsamples])
    clist = sorted(list(set(raw_classes)))
    nclasses = len(clist)
    ctrans = dict([(v,k) for k,v in enumerate(clist)])
    classes = array([ctrans[c] for c in raw_classes])

print "loading test cases"

with tables.openFile(args.test) as tdb:
    tdata = array([sigkernel(tdb.root.patches[i].ravel(),protos,args.sigma) for i in range(args.ntest)])
    tclss = array([ctrans[tdb.root.classes[i]] for i in range(args.ntest)])

print "computing initial model"

def process(job):
    print job
    indexes = sorted(random.sample(range(nsamples),10000))
    with tables.openFile(args.data) as db:
        # overlap loading and distance computations to make this more efficient
        data = array([sigkernel(db.root.patches[i].ravel(),protos,args.sigma) for i in indexes])
        clss = array([classes[i] for i in indexes])
    lpc = mlinear.LinPcaClassifier()
    lpc.train(data,clss,k=50,classlist=clist,linear=1)
    pred = lpc.classify(data)
    print job,"error",sum(pred!=clss)*1.0/len(pred)
    return lpc

idata = []
iclasses = []
result = None
count = 0.0
for lpc in poolmap(process,range(200),par=4):
    count += 1.0
    if result is None:
        result = lpc
    else:
        print "maxdelta",amax(abs(result.R-lpc.R)),"avgdelta",mean(abs(result.R-lpc.R))
        l = 1.0/count
        result.R = result.R * (1.0-l) + lpc.R * l
        result.r = result.r * (1.0-l) + lpc.r * l
    print "===",count,sum(lpc.classify(tdata)!=tclss)



    
