#!/usr/bin/python

import os,time
import numpy
from collections import Counter
from pylab import *
import tables,scipy
from scipy import fftpack
from scipy.cluster import vq
from scipy.ndimage import morphology,filters,interpolation
from ocrolib import docproc,improc
import pyflann
import random
import tables
import multiprocessing
import fcntl
from pdb import pm
from collections import Counter
from llpy.sutils import *
from llpy import variants
from llpy import mlinear

import argparse
parser = argparse.ArgumentParser(description = "Compute rel models for confusable classes.")
parser.add_argument('-p','--protos',default='c2stats.h5',help="prototypes file")
parser.add_argument('-e','--errors',default='c2errs.h5',help="errors file from lleval")
parser.add_argument('-d','--data',default='data/training.h5',help="data samples")
parser.add_argument('-r','--rejects',default='data/rejects.h5',help="rejects samples")
parser.add_argument('-o','--output',default='c2rej.h5',help='output file')
parser.add_argument('-T','--threshold',default=2.0,type=float,help='threshold for choosing rejects')
parser.add_argument('-D','--ddisplay',action="store_true",help="debug display")
parser.add_argument('-R','--rrandom',action="store_true",help="random reject samples")
parser.add_argument('--pca_k',default=5,type=int,help="pca_k for linear classifier")
parser.add_argument('--nnskip',default=100,type=int,help="nearest neighbors to skip for training")
parser.add_argument('--testinc',default=1,type=int,help='proto increment for testing')
parser.add_argument('--testhists',action="store_true",help='proto increment for testing')
parser.add_argument('-Q','--par',default=multiprocessing.cpu_count(),help='parallelism')
parser.add_argument('--rejectsample',default=100000,type=int,help="how many rejects to sample for training")
args = parser.parse_args()
if args.ddisplay: args.par = 0

def counter_err(counter):
    if len(counter)==0: return 0.0
    return 1.0-counter.most_common(1)[0][1]*1.0/sum(counter.values())

with tables.openFile(args.protos) as pdb:
    nprotos = len(pdb.root.patches)
with tables.openFile(args.data) as db:
    with tables.openFile(args.errors) as edb:
        nsamples = len(db.root.classes)
        classes = array(db.root.classes[:nsamples],'i')
        n1s = array(edb.root.pred[:nsamples,1],'i')


print "getting rejects"
with tables.openFile(args.rejects) as rdb:
    rindexes = array(sorted(random.sample(range(len(rdb.root.patches)),args.rejectsample)),'i')
    rejects = array([rdb.root.patches[i] for i in rindexes])

print "building rejects index"
nn = NNIndex()
nn.build_index(make2d(rejects))
print "done"
                    


def process(proto):
    indexes = find(n1s==proto)
    if len(indexes)<10: return (proto,None)
    with tables.openFile(args.data) as db:
        good = array([db.root.patches[i] for i in indexes])
    with tables.openFile(args.protos) as pdb:
        v = pdb.root.patches[proto]
    if args.rrandom:
        rejects = random.sample(rindexes,maximum(100,len(indexes)))
    else:
        dists = [dist(v.ravel(),w.ravel()) for w in good]
        # distribution is generally unimodal and nice looking
        # not quite Gaussian, but good enough (uncomment this to see)
        if args.testhists:
            print mean(dists),median(dists),sqrt(var(dists))
            clf(); hist(dists); ginput(1,0.1)
            return
        threshold = mean(dists)+args.threshold*sqrt(var(dists))
        [rejects],[rdists] = nn.nn_index(array([v.ravel()]),2*len(indexes))
        rdists = sqrt(rdists)
        if args.ddisplay:
            print mean(dists),sqrt(var(dists))
            print rdists[:5]
            print rdists[rdists>threshold][:5]
        rejects = rindexes[rejects[rdists>threshold]]
        rejects = concatenate([rejects,random.sample(rindexes,maximum(100,len(indexes)))])
    with tables.openFile(args.rejects) as rdb:
        bad = array([rdb.root.patches[i] for i in rejects])
    data = r_[good,bad]
    outs = concatenate([ones(len(good)),zeros(len(bad))])
    if args.ddisplay:
        ion(); clf(); gray()
        subplot(121); showgrid(good,clear=0)
        subplot(122); showgrid(bad,clear=0)
        ginput(1,0.1)
    lpc = mlinear.LinPcaClassifier()
    lpc.train(make2d(data),outs,k=args.pca_k)
    pred = lpc.classify(data)
    ne = sum(pred!=outs)*1.0/len(data)
    print proto,len(indexes),len(data),ne
    return (proto,lpc)

jobs = range(0,nprotos,args.testinc)
if args.par<2:
    result = [process(p) for p in jobs]
else:
    pool = multiprocessing.Pool(args.par)
    result = pool.map(process,jobs)
del pool

print result
rej = {}
for i,l in result: rej[i] = l

with tables.openFile(args.output,"w") as odb:
    with tables.openFile(args.protos) as pdb:
        table_copy(pdb,odb)
        table_lcopy(pdb,odb)
    table_log(odb,"%s"%sys.argv)
    odb.createVLArray(odb.root,'rejclass',tables.ObjectAtom(),filters=tables.Filters(9))
    for i in range(nprotos):
        odb.root.rejclass.append([rej.get(i,None)])
