#!/usr/bin/python

import os,time
import numpy
from collections import Counter
from pylab import *
import tables,scipy
from scipy import fftpack
from scipy.cluster import vq
from scipy.ndimage import morphology,filters,interpolation
from ocrolib import docproc,improc
import pyflann
import random
import tables
import multiprocessing
import fcntl
from pdb import pm
import variants
from collections import Counter
from llpy.sutils import *

import argparse
parser = argparse.ArgumentParser(description = "Compute local linear models.")
parser.add_argument('data',help="data file")
parser.add_argument('-p','--protos',default="protos.h5",help="prototypes file")
parser.add_argument('-o','--output',default="kernel.cmodel",help='output file')
parser.add_argument("-N","--nsamples",default=2000000000,type=int,help="number of samples")
parser.add_argument("-s","--sigma",default=1.0,type=float,help="kernel width")
parser.add_argument("-Q","--par",default=multiprocessing.cpu_count(),type=int,help="parallelism")
parser.add_argument("-n","--neighbors",default=5000,type=int,help="number of neighbors")
parser.add_argument('--logreg_l2',default=1.0,type=float,help="l2 regularization parameter")
parser.add_argument("--csize",default=100000,type=int,help="chunk size for neighborhoods")
parser.add_argument("--testprotos",default=1000000000,type=int,help="max # protos")
parser.add_argument("--nologistic",action="store_true",help="use linear least squares instead of logistic regression")
cmdline = "-N 100000 -p data/protos32a.h5 data/training.h5".split()
args = parser.parse_args(cmdline)
# args = parser.parse_args()

with tables.openFile(args.protos) as pdb:
    nprotos = minimum(args.testprotos,len(pdb.root.classes))
    protos = array(pdb.root.patches[:nprotos],'f')
    # protos = protos[::10]
    nprotos = len(protos)
    pclasses = array(pdb.root.classes[:nprotos],'i')
    print "nprotos",nprotos

db = tables.openFile(args.data)
nsamples = min(args.nsamples,len(db.root.classes))
print "nsamples",nsamples
classes = array(db.root.classes[:nsamples])
clist = sorted(list(set(classes)))
nclasses = len(clist)
ctrans = dict([(v,k) for k,v in enumerate(clist)])
print "nclasses",nclasses

nprotos += 1 # add 1 for constant
covariance = zeros((nprotos,nprotos),'f')
hebbian = zeros((nprotos,nclasses),'f')

from scipy.spatial import distance

def covheb(samples,classes):
    for i in range(nsamples):
        if i%100==0: print "===",i,hi.mean(),lo.mean(),nz.mean()
        v = db.root.patches[i].ravel()
        d = distance.cdist(array([v]),make2d(protos)).ravel()
        w = concatenate([[1.0],exp(-d**2/s)])
        threshold = amax(w)*1e-2
        hi += amax(w[1:])
        lo += amin(w[1:])
        nz += sum(w>threshold)
        for j in range(len(w)):
            # if w[j]<threshold: continue
            covariance[j] += w[j]*w
        hebbian[:,ctrans[classes[i]]] += w

def einv(m,eps=1e-4):
    w,v = eigh(m)
    w = where(abs(w)<eps,0.0,1.0/w)
    return dot(v,dot(diag(w),v.T))

def sigkernel(data,protos,sigma):
    d = distance.cdist(make2d(data),make2d(protos))
    d = exp(-d**2/2.0/sigma)
    d = c_[ones(len(d)),d]
    return d

import mlinear
reload(mlinear)

tdb = tables.openFile("data/testing.h5")

for sigma in [50.0,60.0,70.0,80.0,90.0,100.0]:
    for linear in [1,0]:
        print "computing sigmas",; sys.stdout.flush()
        trans = sigkernel(db.root.patches[:10000],protos,sigma)
        print "training",; sys.stdout.flush()
        lc = mlinear.LinClassifier()
        lc.train(trans,db.root.classes[:10000],linear=linear)
        print "testing",; sys.stdout.flush()
        trans = sigkernel(tdb.root.patches[:10000],protos,sigma)
        pred = lc.classify(trans)
        print
        print "result",sigma,linear,sum(pred!=tdb.root.classes[:10000]),len(pred)

