#!/usr/bin/python

# TODO:
# -- add rel-based selection for training?

import os,time,traceback
import numpy
from collections import Counter
from pylab import *
import tables,scipy
from scipy import fftpack
from scipy.cluster import vq
from scipy.ndimage import morphology,filters,interpolation
from ocrolib import docproc,improc
import pyflann
import random
import tables
import multiprocessing
import fcntl
from pdb import pm
from collections import Counter
from llpy import variants
from llpy.sutils import *
from ocrolib import Record

import argparse
parser = argparse.ArgumentParser(description = "Compute local linear models.")
parser.add_argument('data',help="data file")
parser.add_argument('-p','--protos',help="prototypes file")
parser.add_argument('-o','--output',help='output file')
parser.add_argument("-N","--nsamples",default=2000000000,type=int,help="number of samples")
parser.add_argument("-d","--pca",default=10,type=int,help="pca dimensions for local linear training")
parser.add_argument("-Q","--par",default=multiprocessing.cpu_count(),type=int,help="parallelism")
parser.add_argument("-r","--noreject",action="store_true")
parser.add_argument("-b","--nobackground",action="store_true")
parser.add_argument("-v","--verbosity",default=1,type=int,help="verbosity")
parser.add_argument("-g","--geometry",action="store_true",help="incorporate baseline information")
parser.add_argument("-L","--linlimit",default=500000,type=int,help="max # samples for linear training")
parser.add_argument("--nn",default=1,type=int,help="number of nearest neighbors to train on")
parser.add_argument('--min_train',default=500,type=int,help="min # training samples regardless of impurity")
parser.add_argument('--logreg_l2',default=1.0,type=float,help="l2 regularization parameter")
parser.add_argument("--csize",default=100000,type=int,help="chunk size for neighborhoods")
parser.add_argument("--testprotos",default=1000000000,type=int,help="max # protos")
parser.add_argument("--nologistic",action="store_true",help="use linear least squares instead of logistic regression")
cmdline = "--nn 7 -N 30000 --csize 8000 -p DATA/protos032a.h5 -o output.h5 DATA/train.h5".split()
#args = parser.parse_args(cmdline)
args = parser.parse_args()

with tables.openFile(args.data) as db:
    nsamples = min(args.nsamples,len(db.root.classes))
    classes = array(db.root.classes[:nsamples])
    classes[classes==-1] = ord("~")
    print "nsamples",nsamples
    if args.geometry:
        assert "rel" in dir(db.root),"data set must contain rel data if -g is given"

def background(n):
    allnoise = find(classes==ord("~"))
    if len(allnoise)<=n: return allnoise
    noise = random.sample(allnoise,n)
    return noise

with tables.openFile(args.protos) as pdb:
    nprotos = len(pdb.root.classes)
    protos = array(pdb.root.patches[:nprotos],'f')
    protoshape = list(protos[0].shape)
    pclasses = array(pdb.root.classes[:nprotos],'i')
    pclasses[pclasses==-1] = ord("~")
if sum(pclasses==ord("_"))!=0:
    print "WARNING: '_' contained in prototype classes"

if args.testprotos<nprotos:
    nprotos = args.testprotos
    protos = protos[:nprotos]
    pclasses = pclasses[:nprotos]

print "nprotos",nprotos

nn = NNIndex()
nn.build_index(make2d(protos))

def batch_neighbors(job):
    if args.verbosity>0: print job
    start,end = job
    with tables.openFile(args.data) as db:
        batch = array(db.root.patches[start:end],'f')
        ns,ds = nn.nn_index(make2d(batch),args.nn)
    if len(ns.shape)!=2:
        ns.shape = (len(ns),1)
        ds.shape = (len(ds),1)
    assert len(ns)==(end-start)
    return (start,end,ns,ds)

closest = -ones((nsamples,args.nn),'i')
dists = -ones((nsamples,args.nn),'f')

for start,end,ns,ds in poolmap(batch_neighbors,chunks(nsamples,args.csize),par=args.par):
    closest[start:end] = ns[:,:]
    dists[start:end] = ds[:,:]

import mlinear

def train_proto(proto):
    try:
        return train_proto0(proto)
    except:
        traceback.print_exc()
        raise

def train_proto0(proto):
    if pclasses[proto]==ord("_"): return "pclass is '_'"
    result = Record(proto=proto)
    indexes = find(closest[:,0]==proto)
    result.nn1 = len(indexes)
    result.ds = dists[indexes] if len(indexes)>0 else zeros(1)
    result.r = amax(result.ds)
    result.rm = mean(result.ds)
    result.rs = sqrt(var(result.ds))
    indexes = list(indexes)
    extras = []
    for i in range(1,len(closest[0])):
        extras += list(find(closest[:,i]==proto))
    extras = list(set(indexes))
    random.shuffle(extras)
    indexes += extras
    if len(indexes)>args.linlimit:
        indexes = indexes[:args.linlimit]
    indexes = sorted(indexes)
    result.nn = len(indexes)
    if args.verbosity>=2:
        print "%d-nn"%args.nn,len(indexes),Counter(classes[indexes]).most_common(5) if len(indexes)>0 else None
        
    indexes = [i for i in indexes if classes[i]>=0]
    indexes = [i for i in indexes if classes[i]!=ord("_")]
    if args.noreject:
        indexes = [i for i in indexes if classes[i]!=ord("~")]
    hist = Counter(classes[indexes])
    result.hist = hist
    hist = array(hist.most_common(100),'i')
    if len(hist)<1 or hist[0,1]<2: return "too few samples in top class"
    err = 1.0 - hist[0,1]*1.0/(1e-6+sum(hist[:,1]))
    result.err = err
    result.nerr = err
    result.sigmas = None
    result.lpc = None
    result.reldens = None
    if len(hist)<2 and hist[0,0]==ord("~"):
        return "only reject classes"
    if len(hist)<2 or hist[1,1]<2:
        if args.noreject or args.nobackground:
            print "\t[warning] too few confusable classes:",proto,[list(x) for x in hist]
            return result
        else:
            before = [list(x) for x in hist]
            indexes = concatenate([indexes,background(max(10,int(0.1*hist[0,1])))])
            hist = array(Counter(classes[indexes]).most_common(),'i')
            after = [list(x) for x in hist]
            print "\t[warning] too few confusable classes:",proto,before,"->",after


    with tables.openFile(args.data) as db:
        data = array([db.root.patches[i] for i in indexes],'f')
        if args.geometry:
            rel = array([docproc.rel_geo_normalize(db.root.rel[i]) for i in indexes],'f')
    clss = classes[indexes]

    result.sigmas = sqrt(var(make2d(data),axis=0))

    if args.geometry:
        # for the densities, we use the non
        reldens = {}
        for cls in sorted(list(set(clss))):
            reldens[cls] = mlinear.DiagGaussian(rel[clss==cls])
        result.reldens = reldens

    lpc = mlinear.LinPcaClassifier()
    if args.geometry:
        data = rel_combine(data,rel)
    pca = min(args.pca,max(3,len(data)/2)) # for small training sets, decrease the # pca components
    try:
        lpc.train(data,clss,linear=args.nologistic,k=pca)
    except linalg.LinAlgError,e:
        return "linear algebra error"
    result.lpc = lpc
    pred = lpc.classify(data)
    nerr = sum(pred!=clss)*1.0/len(clss)

    result.nerr = nerr
    return result

count = 0
protolist = range(nprotos)
# protolist = sorted(protolist,key=lambda x:sin(x+sin(x)))
counts = zeros(len(protos))
sigmas = zeros(protos.shape)
rs = zeros(nprotos)
rms = zeros(nprotos)
rss = zeros(nprotos)
hists = {}
lpcs = {}
reldenses = {}
for result in poolmap(train_proto,protolist,par=args.par):
    if type(result)==str: 
        print "\t[skipped]",result
        continue
    print count,nprotos,":",
    count += 1
    if args.verbosity>0: 
        print "%5d   %5d %5d "%(result.proto,result.nn1,result.nn),
        def ffmt(x):
            s = "%5.2f"%x
            return re.sub(r' 0\.','  .',s)
        print ffmt(100*result.err),ffmt(100*result.nerr),
        # print "%5.2f %5.2f  "%(100*result.err,100*result.nerr),
        print "%6.2f "%result.r,
        cnames = " ".join([udecode(c) for c,n in result.hist.most_common()])
        print "%s %s"%(cnames,[n for c,n in result.hist.most_common()]),
        print
    proto = result.proto
    rs[proto] = result.r
    rms[proto] = result.rm
    rss[proto] = result.rs
    hists[proto] = result.hist
    counts[proto] = result.nn1
    if result.sigmas is not None: 
        sigmas[proto,:,:] = result.sigmas.reshape(sigmas[proto].shape)
    lpcs[proto] = result.lpc
    reldenses[proto] = result.reldens

print "saving"
with tables.openFile(args.output,"w") as odb:
    with tables.openFile(args.protos) as pdb:
        table_copy(pdb,odb)
        table_lcopy(pdb,odb)
    table_log(odb,"%s %s"%(sys.argv,time.asctime()))
    odb.createEArray(odb.root,"rs",tables.Float32Atom(),shape=(0,),filters=tables.Filters(9))
    odb.createEArray(odb.root,"rms",tables.Float32Atom(),shape=(0,),filters=tables.Filters(9))
    odb.createEArray(odb.root,"rss",tables.Float32Atom(),shape=(0,),filters=tables.Filters(9))
    odb.createVLArray(odb.root,'hists',tables.ObjectAtom(),filters=tables.Filters(9))
    odb.createCArray(odb.root,"sigmas",tables.Float32Atom(),shape=protos.shape,filters=tables.Filters(9))
    odb.createCArray(odb.root,"lpccounts",tables.Float32Atom(),shape=(nprotos,),filters=tables.Filters(9))
    if args.geometry:
        odblpcs = odb.createVLArray(odb.root,'rellpcs',tables.ObjectAtom(),filters=tables.Filters(9))
        odb.createVLArray(odb.root,'reldens',tables.ObjectAtom(),filters=tables.Filters(9))
    else:
        odblpcs = odb.createVLArray(odb.root,'lpcs',tables.ObjectAtom(),filters=tables.Filters(9))
    for proto in range(nprotos):
        odb.root.rs.append([rs[proto]])
        odb.root.rms.append([rms[proto]])
        odb.root.rss.append([rss[proto]])
        odb.root.hists.append(hists.get(proto))
        odb.root.sigmas[proto] = sigmas[proto]
        odb.root.lpccounts[proto] = counts[proto]
        odblpcs.append([lpcs.get(proto)])
        if args.geometry:
            odb.root.reldens.append([reldenses.get(proto)])

print "done"        
