#!/usr/bin/python

### Compute all pairwise distances of patches.

import numpy,os,os.path,sys,traceback
from collections import Counter
import matplotlib
if "DISPLAY" not in os.environ: matplotlib.use("AGG")
else: matplotlib.use("GTK")
from pylab import *
import tables,scipy
from scipy import fftpack
from scipy.cluster import vq
from scipy.ndimage import morphology,filters,interpolation
from ocrolib import docproc,improc
import pyflann
import random
from tables import openFile,Filters,Int32Atom,Float32Atom,Int64Atom
import multiprocessing
import fcntl
import cPickle
from llpy.sutils import *

import argparse
parser = argparse.ArgumentParser(description = "Evaluate prototypes using a Python-based classifier.")
parser.add_argument('data',help="input files")
parser.add_argument('-o','--output',default=None,help="output file for error information")
parser.add_argument('-p','--parameter',default=[],nargs='*',help="additional parameters for the classifier")
parser.add_argument('-c','--classifier',default=None,help="classifier (Python pickle with coutputs method)")
parser.add_argument('-C','--confusion',default=10,type=int,help="number of confusions to print")
parser.add_argument('--noreject',action="store_true",help="don't use reject classes")
parser.add_argument('-r','--pattern',default='.*',help="only reclassify classes matching this pattern")
parser.add_argument('-R','--outpattern',default='.*',help="only reclassify classes matching this pattern")
parser.add_argument('-N','--nsamples',default=2000000000,type=int,help="number of samples")
parser.add_argument('-Q','--par',default=multiprocessing.cpu_count(),type=int,help="parallelism")
parser.add_argument('--chunksize',default=1000,type=int)
parser.add_argument('-v','--verbosity',default=1,type=int,help="verbosity level (default=1)")
args = parser.parse_args()

if args.output is None and args.verbosity>0:
    print "CLASSIFICATION ONLY, NOTHING STORED"

with openFile(args.data) as db:
    classes = array(db.root.classes[:len(db.root.classes)],'int64')
    
select = re.compile("^"+args.pattern+"$")
outselect = re.compile("^"+args.outpattern+"$")

with open(args.classifier) as stream:
    if args.verbosity>0: print "loading classifier"
    classifier = cPickle.load(stream)
    if args.verbosity>0: print "got",classifier
    assert "coutputs" in dir(classifier)

for s in args.parameter:
    k,v = s.split("=",2)
    assert k in classifier.__dict__.keys(),"classifier %s does not have a parameter %s"%(classifier,k)
    try: v = float(v)
    except: pass
    setattr(classifier,k,v)

def process(job):
    try:
        start,end = job
        # print job
        results = {}
        with openFile(args.data) as db:
            for i in range(start,end):
                image = db.root.patches[i]
                if "rel" in dir(db.root): 
                    rel = db.root.rel[i]
                else:
                    rel = None
                outputs = classifier.coutputs(image,geometry=rel)
                outputs = sorted(outputs,key=lambda x:-x[1])
                results[i] = outputs
        return results
    except:
        traceback.print_exc()
        raise

if args.par>1:
    pool = multiprocessing.Pool(args.par)

with openFile(args.data) as db:
    nsamples = minimum(args.nsamples,len(db.root.classes))

result = {}
total = 0
errors = 0
lerrors = 0
xerrors = 0
sizes = []
jobs = [(start,end) for start,end in chunks(nsamples,args.chunksize)]

def imap():
    if args.par<2:
        for r in (process(job) for job in jobs):
            yield r
    else:
        for r in pool.imap(process,jobs):
            yield r

def report():
    sys.stdout.write("%8d\t"%total)
    sys.stdout.write("%6d\t%6d\t%6d\t\t"%(errors,lerrors,xerrors))
    sys.stdout.write("%.4f\t%.4f\t%.4f\t\t"%(errors*1.0/total,lerrors*1.0/total,xerrors*1.0/total))
    sys.stdout.write("%.3f\n"%mean(sizes))

results = {}
confusion = Counter()
lconfusion = Counter()

for outputs in imap():
    for index,outputs in outputs.items():
        pred,prob = outputs[0] # they should be sorted and non-empty
        results[index] = (pred,prob)
        assert type(pred) in [str,unicode],\
            "bad result from prediction: %s (%s)"%(pred,type(pred))
        cls = udecode(classes[index])
        if not select.match(cls): continue
        if args.noreject and cls in [""," ","_","~"]: continue
        if not outselect.match(pred): continue
        if args.noreject and pred in [""," ","_","~"]: continue
        total += 1
        sizes.append(len(outputs))
        if pred!=cls:
            errors += 1
            if not confusable(pred,cls):
                lerrors += 1
                lconfusion[(pred,cls)] += 1
            else:
                confusion[(pred,cls)] += 1
            clss = [c for c,p in outputs]
            # print pred,cls,clss
            if cls not in clss:
                xerrors += 1
    if args.verbosity>0: print "#",; report()
        
print "llclassify-result",args.classifier,
print args.data,
report()

if args.confusion>0:
    print "strict confusions"
    for (pred,cls),v in confusion.most_common(args.confusion):
        print "%6d %s %s"%(v,pred,cls)
    print "lenient confusions"
    for (pred,cls),v in lconfusion.most_common(args.confusion):
        print "%6d %s %s"%(v,pred,cls)

if args.output is not None:
    if args.verbosity>0: print "writing"
    with openFile(args.data) as db, openFile(args.output,"w") as odb:
        shape = list(db.root.patches.shape[1:])
        table_lcopy(db,odb)
        odb.createEArray(odb.root,"patches",tables.Float32Atom(),shape=[0]+shape,filters=tables.Filters(9))
        odb.createEArray(odb.root,"classes",tables.Int64Atom(),shape=[0],filters=tables.Filters(9))
        odb.createEArray(odb.root,"probs",tables.Float32Atom(),shape=[0],filters=tables.Filters(9))
        for start,end in chunks(nsamples,100):
            odb.root.patches.append(db.root.patches[start:end])
        for i in range(nsamples):
            cls,prob = results[i] # result.get(i,("_",0.0))
            prev = udecode(db.root.classes[i])
            if not select.match(prev) or not outselect.match(cls):
                cls = prev
            odb.root.classes.append([uencode(cls)])
            odb.root.probs.append([prob])
    if args.verbosity>0: print "done"
