#!/usr/bin/python

# TODO:
# -- add rel-based selection for training?

import os,time
import numpy
from collections import Counter
from pylab import *
import tables,scipy
from scipy import fftpack
from scipy.cluster import vq
from scipy.ndimage import morphology,filters,interpolation
from ocrolib import docproc,improc
import pyflann
import random
import tables
import multiprocessing
import fcntl
from pdb import pm
from collections import Counter
from llpy.sutils import *

import argparse
parser = argparse.ArgumentParser(description = "Compute local linear models.")
parser.add_argument('data',help="data file")
parser.add_argument('-p','--protos',help="prototypes file")
parser.add_argument('-o','--output',help='output file')
parser.add_argument("-k",'--neighbors',default=3000,type=int,help="number of candidate neighbors")
parser.add_argument("-N","--nsamples",default=2000000000,type=int,help="number of samples")
parser.add_argument('--min_train',default=500,type=int,help="min # training samples regardless of impurity")
parser.add_argument('--logreg_l2',default=1.0,type=float,help="l2 regularization parameter")
parser.add_argument("--csize",default=100000,type=int,help="chunk size for neighborhoods")
parser.add_argument("--testprotos",default=1000000000,type=int,help="max # protos")
parser.add_argument("-Q","--par",default=multiprocessing.cpu_count(),type=int,help="parallelism")
# cmdline = "--testprotos 20 -k 1000 -N 30000 --csize 8000 -p data/tprotos.h5 -o output.h5 data/all_training.h5".split()
args = parser.parse_args()

with tables.openFile(args.data) as db:
    nsamples = min(args.nsamples,len(db.root.classes))
    classes = array(db.root.classes[:nsamples])
    print "nsamples",nsamples

with tables.openFile(args.protos) as pdb:
    nprotos = minimum(args.testprotos,len(pdb.root.classes))
    protos = array(pdb.root.patches[:nprotos],'f')
    pclasses = array(pdb.root.classes[:nprotos],'i')
    print "nprotos",nprotos

def batch_neighbors(job):
    print job
    start,end = job
    with tables.openFile(args.data) as db:
        batch = array(db.root.patches[start:end],'f')
        with NNIndex() as nn:
            ns,ds = nn.nn(make2d(batch),make2d(protos),num_neighbors=args.neighbors)
    ns += start
    return (ns,ds)

neighbors,dists = (None,None)
try:
    pool = multiprocessing.Pool(args.par)
    for ns,ds in pool.imap(batch_neighbors,chunks(nsamples,args.csize)):
        neighbors,dists = nn_merge_dists(neighbors,dists,ns,ds)
finally:
    pool.terminate()
    del pool

import mlinear

def train_proto(proto):
    with tables.openFile(args.data) as db:
        data = array([db.root.patches[i] for i in neighbors[proto]],'f')
        clss = array(classes[neighbors[proto]],'i')
    r = dists[proto,-1]
    r2 = dists[proto,int(dists.shape[1]*0.75)]
    hist = array(Counter(clss).most_common(100),'i')
    err = 1.0-hist[0,1]*1.0/sum(hist[:,1])
    good = in1d(clss,hist[:,0][hist[:,1]>20]) # remove "_"
    data = data[good]
    clss = clss[good]
    hist = array(Counter(clss).most_common(100),'i')
    cnames = " ".join([udecode(c) for c in hist[:,0]])
    if err<0.05:
        print "proto %6d err %.4f nerr ------ r %8.3f hist %s"%(proto,err,r,list(hist[:,1])),cnames
        return (proto,None,None,r,r2)
    lpc = mlinear.LinPcaClassifier()
    lpc.train(data,clss)
    pred = lpc.classify(data)
    nerr = sum(pred!=clss)*1.0/len(clss)
    print "proto %6d err %.4f nerr %.4f r %8.3f hist %s"%(proto,err,nerr,r,list(hist[:,1])),cnames
    return (proto,lpc,nerr,r,r2)

lpcs = {}
rs = {}
r2s = {}

try:
    pool = multiprocessing.Pool(args.par)
    for proto,lpc,nerr,r,r2 in pool.imap(train_proto,range(nprotos)):
        lpcs[proto] = lpc
        rs[proto] = r
        r2s[proto] = r2
finally:
    pool.terminate()
    del pool

print "saving"
with tables.openFile(args.output,"w") as odb:
    with tables.openFile(args.protos) as pdb:
        table_copy(pdb,odb)
        table_lcopy(pdb,odb)
    table_log(odb,"%s %s"%(sys.argv,time.asctime()))
    odb.createEArray(odb.root,"rs",tables.Float32Atom(),shape=(0,),filters=tables.Filters(9))
    odb.createEArray(odb.root,"r2s",tables.Float32Atom(),shape=(0,),filters=tables.Filters(9))
    odb.createVLArray(odb.root,'lpcs',tables.ObjectAtom(),filters=tables.Filters(9))
    for proto in range(nprotos):
        odb.root.lpcs.append([lpcs.get(proto)])
        odb.root.rs.append([rs[proto]])
        odb.root.r2s.append([r2s[proto]])
print "done"        
            
