#!/usr/bin/python
from pylab import *
from scipy import linalg
from scipy.ndimage.morphology import binary_erosion
from scipy.ndimage import interpolation,filters
from ocrolib import improc
import numpy,pylab
import random,sqlite3
import random,signal
import codecs,os,re,glob,sys
from optparse import OptionParser
import collections
from collections import defaultdict
import inspect
from contextlib import contextmanager
import pyflann
import tables
import mlinear
import struct

numpy.seterr(over='raise')
numpy.seterr(divide='raise')
numpy.seterr(invalid='raise')

# workaround for loading old pickles

import sys

class MyLoader:
    def __init__(self,name,module):
        self.name = name
        self.module = module
    def load_module(self,name):
        assert name==self.name
        return self.module

class MyImporter:
    def find_module(self,fullname,path=None):
        if fullname=="mlinear": return MyLoader("mlinear",mlinear)
        # add more workarounds here
        return None

sys.meta_path.append(MyImporter())

def unlist(x):
    if type(x)==list: 
        assert len(x)==1
        return x[0]
    return x

# print functions

def dprint(*args):
    sys.stderr.write(" ".join([str(x) for x in args]))
    sys.stderr.write("\n")

def pprint(*args):
    sys.stderr.write(" ".join([str(x) for x in args]))
    sys.stderr.write("         \r")
    sys.stderr.flush()

class record:
    """A simple record type."""
    def __init__(self,**kw):
        self.__dict__.update(kw)
    def __str__(self):
        return str(self.__dict__)

class ValueStats:
    def __init__(self):
        self.stats = defaultdict(Stats)
    def __call__(self,value,label=None):
        if label is None:
            current = inspect.currentframe()
            callers = inspect.getouterframes(current,2)
            _,fname,lineno,mod,_,_ = callers[1]
            label = "%s:%s/%s"%(fname,lineno,mod)
            del current
            del callers
            del _
        self.stats[label] += value
        return value
    def report(self,prefix=""):
        for k in sorted(self.stats.keys()):
            print prefix,"%-30s"%k,self.stats[k].report()

class Stats:
    def __init__(self):
        self.s1 = 0
        self.sx = 0
        self.sx2 = 0
        self.lo = inf
        self.hi = -inf
    def __iadd__(self,x):
        self.s1 += 1
        self.sx += x
        self.sx2 += x**2
        if x<self.lo: self.lo = x
        if x>self.hi: self.hi = x
        return self
    def clear(self,n=1):
        self.sx = self.sx * n / self.s1
        self.sx2 = self.sx2 * n / self.s1
        self.s1 = n
    def mean(self):
        return self.sx/maximum(1e-6,self.s1)
    def sigma(self):
        return sqrt(self.sx2*1.0/self.s1-self.mean()**2.0)
    def report(self):
        return "%g(%g)[%g,%g]"%(self.mean(),self.sigma(),self.lo,self.hi)

class MovingAverage:
    def __init__(self,initial=0.0,l=1e-3):
        self.l = l
        self.s = initial
    def __iadd__(self,x):
        l = self.l
        self.s = (1.0-l)*self.s+l*x
        return self
    def value(self):
        return self.s
    
def uencode(s):
    """Convert a short unicode string into a 64bit integer"""
    assert type(s) in [unicode, str]
    assert len(s)<=4
    result = 0
    for c in s[len(s)-1::-1]: result = (result<<16)|ord(c)
    return result

def udecode(i):
    """Convert a 64bit integer into a short unicode string (16 bit at a time)."""
    result = []
    if type(i) in [str,unicode]: return i
    for k in range(4):
        if i==0: break
        c = i&0xffff
        result.append(unichr(c) if c>=128 else chr(c))
        i >>= 16
    return "".join(result)

def make2d(data):
    """Convert any input array into a 2D array by flattening axes 1 and over."""
    if type(data)==list: data = array(data)
    if data.ndim==1: return array([data])
    if data.ndim==2: return data
    return data.reshape(data.shape[0],prod(data.shape[1:]))

def arraywrap(data,m=None,dtype='f'):
    n = len(data)
    if m is None: m = max([len(v) for v in data])
    result = zeros((n,m),dtype)
    for i,v in enumerate(data):
        result[i,:len(v)] = v
    return result

def maketargets(classes):
    """Convert a list of classes into a list of (unary-encoded) target vectors."""
    s = sorted(list(set(classes)))
    n = len(s)
    s = dict([(s[i],i) for i in range(n)])
    targets = zeros((len(classes),n))
    for i in range(len(classes)):
        targets[i,s[classes[i]]] = 1
    return targets

confusable_sets = [["O","0","o"],["I","l","|","1","\\","/"],["-","_"],["`","'",".",","],
    ["c","C"],["k","K"],["o","O"],["p","P"],["s","S"],["u","U"],["v","V"],["w","W"],["x","X"],
    ["y","Y"],["z","Z"]]

confusable_pairs = {}
for s in confusable_sets:
    for u in s:
        for v in s:
            if u==v: continue
            confusable_pairs[(u,v)] = 1

def confusable(s,t):
    """Determine whether the two characters are confusable."""
    if s==t: return 1
    if type(s) not in [str,unicode]: s = udecode(s)
    if type(t) not in [str,unicode]: t = udecode(t)
    return confusable_pairs.get((s,t),0)

def tess_readchars(fname,boxfile=None,d=1,pad=2):
    """Read characters in Tesseract training format."""
    os.system("convert '%s' /tmp/image.png"%fname)
    image = imread("/tmp/image.png")
    h,w = image.shape
    if boxfile is None:
        boxfile = re.sub(r'(\.g4)*\.tiff?','.box',fname)
    with open(boxfile) as stream:
        for line in stream.readlines():
            f = line.split()
            c = f[0]
            x0,y0,x1,y1 = [int(x) for x in f[1:5]]
            cimage = image[h-y1-1-d:h-y0-1+d,x0-d:x1+1]
            cimage /= amax(cimage)
            cimage = improc.pad_by(1-cimage,pad)
            yield c,cimage

def tess_allchars(dir="tessdata/eng"):
    """Read a directory of Tesseract training images."""
    for fname in glob.glob(dir+"/*.g4.tif"):
        fid = re.sub(r'(\.g4)*\.tif$','',re.sub(r'^.*/','',fname))
        print "opening",fname,fid
        for c,img in readchars(fname):
            yield record(cls=c,image=img,font=fid)
    
def char_stream(d=32,skip=1000):
    db = tables.openFile("exp.h5")
    i = -1
    out = 0
    while 1:
        i += 1
        if i>=len(db.root.classes):
            i = 0
            yield None,None
        if udecode(db.root.classes[i]) in ["~","_"]: continue
        if out%1000==0: print "[char",out,"]"
        image = array(db.root.images[i],'f')
        image = docproc.isotropic_rescale(image,d)
        image /= 255
        yield image,db.root.classes[i]
        out += 1

def chunks(n,c):
    """Iterate over the range 0...n in chunks of size c."""
    i = 0
    while i<n:
        j = min(n,i+c)
        yield (i,j)
        i = j

def blocks(l,n):
    """Divide l into a list of blocks of length n."""
    return [l[i:min(i+n,len(l))] for i in range(0,len(l),n)]

def lshuffle(l,n):
    """Local shuffle."""
    b = blocks(l,n)
    for x in b: random.shuffle(x)
    random.shuffle(b)
    return concatenate(b)

def randomized_stream(fname="training.h5",d=32,skip=32):
    db = tables.openFile(fname)
    out = 0
    for r in range(10000):
        print "round",r
        indexes = range(len(db.root.classes))
        indexes = lshuffle(indexes,skip)
        for i in indexes:
            if udecode(db.root.classes[i]) in ["~","_"]: continue
            if out%1000==0: print "char",out
            image = array(db.root.images[i],'f')
            image = docproc.isotropic_rescale(image,d)
            image /= 255
            yield image,db.root.classes[i]
            out += 1
        yield None,None
        
@contextmanager
def flock(fname,exclusive=0):
    import fcntl
    with open(fname,"w") as fd:
        try:
            fcntl.lockf(fd,fcntl.LOCK_SH if not exclusive else fcntl.LOCK_EX)
            yield
        finally:
            fcntl.lockf(fd,fcntl.LOCK_UN)

def nn_merge_dists(r1,d1,r2,d2):
    """Merge return values from nearest neighbor lookups."""
    if r1 is None or r1.size==0: return r2,d2
    if r2 is None or r2.size==0: return r1,d1
    n,k = r1.shape
    r = c_[r1,r2]
    d = c_[d1,d2]
    for i in range(n):
        ix = argsort(d[i])
        r1[i] = r[i][ix][:k]
        d1[i] = d[i][ix][:k]
    return r1,d1


class NNIndex0(pyflann.FLANN):
    """A wrapper for FLANN that handles context management correctly."""
    def __init__(self,*args,**kw):
        if "algorithm" not in kw: kw["algorithm"] = "kmeans"
        if "branching" not in kw: kw["branching"] = 20
        if "target_precision" not in kw: kw["target_precision"] = 0.995
        kw["loglevel"] = 2
        pyflann.FLANN.__init__(self,*args,**kw)
    def __enter__(self):
        return self
    def __exit__(self,*args):
        self.delete_index()

nnindex_dir = "/tmp"

class NNIndex:
    """A wrapper for FLANN that handles context management correctly."""
    def __init__(self,*args,**kw):
        if "algorithm" not in kw: kw["algorithm"] = "kmeans"
        if "branching" not in kw: kw["branching"] = 20
        if "target_precision" not in kw: kw["target_precision"] = 0.995
        kw["loglevel"] = 2
        self.nn = pyflann.FLANN(*args,**kw)
    def setId(self,dataset):
        x = sum(dataset,dtype='d')+sum(dataset**2,dtype='d')
        x = hex(struct.unpack('Q',struct.pack('d',x))[0])[2:-1]
        self.identifier = x
        print "[NNIndex identifier",x,"]"
    def build_index(self,dataset,**kw):
        return self.nn.build_index(dataset,**kw)
    def build_index_cached(self,dataset,**kw):
        self.setId(dataset)
        cfile = nnindex_dir+"/"+self.identifier+".cache"
        newindex = 0
        if os.path.exists(cfile):
            print "loading",cfile
            self.nn.load_index(cfile,dataset)
            print "finished loading",cfile
        else:
            print "constructing index from scratch"
            self.nn.build_index(dataset,**kw)
            newindex = 1
        if newindex:
            print "writing",cfile
            self.nn.save_index(cfile)
            print "finished writing",cfile
    def nn_index(self,testset,num_neighbors=1,**kw):
        return self.nn.nn_index(testset,num_neighbors,**kw)
    def nn(self,dataset,testset,num_neighbors=1,**kw):
        self.setId(dataset)
        return self.nn.nn(dataset,testset,num_neighbors,**kw)
    def __enter__(self):
        return self
    def __exit__(self,*args):
        self.nn.delete_index()

def nn_batched(protos,data,k=1000,csize=50000,nsamples=2000000000):
    nsamples = min(len(data),nsamples)
    neighbors,dists = (None,None)
    for i,j in chunks(nsamples,csize):
        print i,j
        batch = data[i:j]
        with NNIndex() as nn:
            neighbors2,dists2 = nn.nn(make2d(batch),make2d(protos),num_neighbors=k)
        neighbors2 += i
        neighbors,dists = nn_merge_dists(neighbors,dists,neighbors2,dists2)
    return neighbors,dists

def table_copy(source,dest,names=None,omit=[],verbose=1):
    if names is None:
        names = [name for name in dir(source.root) if re.match(r'^[a-zA-Z][a-zA-Z0-9_][a-zA-Z0-9]*$',name)]
        names = [name for name in names if name not in omit]
    elif type(names) is str:
        names = names.split()
    for name in names:
        a = source.getNode("/"+name)
        if verbose: print "[copying",name,a.shape,a.atom,"]"
        if "VLArray" in str(a):
            b = dest.createVLArray(dest.root,name,a.atom,filters=tables.Filters(9))
        else:
            b = dest.createEArray(dest.root,name,a.atom,shape=[0]+list(a.shape[1:]),filters=tables.Filters(9))
        for i in range(len(a)):
            b.append([a[i]])
        dest.flush()
        
def table_assign(db,name,a,verbose=1):
    if a.dtype==dtype('int32'):
        atom = tables.Int32Atom()
    elif a.dtype==dtype('int64'):
        atom = tables.Int64Atom()
    elif a.dtype==dtype('f') or a.dtype==dtype('d'):
        atom = tables.Float32Atom()
    else:
        raise Exception('unknown array type: %s'%a.dtype)
    if verbose: print "[writing",name,a.shape,atom,"]"
    node = db.createEArray(db.root,name,atom,shape=[0]+list(a.shape[1:]),filters=tables.Filters(9))
    node.append(a)

def table_lcopy(db,odb):
    for name in dir(db.root._v_attrs):
        if name[:4]!="LOG_": continue
        value = db.getNodeAttr("/",name)
        odb.setNodeAttr("/",name,value)

def table_log(db,*args):
    import time
    db.setNodeAttr("/","LOG_%d"%int(time.time())," ".join(args))

def showgrid(data,d=None,r=32,clear=1):
    """Show a data array as a grid of image."""
    if clear: clf()
    if d is None: d = int(sqrt(prod(data[0].shape)))
    image = zeros((r*d+d,r*d+d))
    for i in range(min(len(data),d*d)):
        im = data[i]
        if im.ndim==1:
            im = im.reshape(d,len(im)/d)
        x = i//r
        y = i%r
        image[x*d:x*d+d,y*d:y*d+d] = im
    imshow(image.reshape(r*d+d,r*d+d))

reject_chars = [ord("~"),ord("_")]

def ischar(c):
    return c not in reject_chars

def counter2outputs(counter):
    if len(counter)<1: return [("~",1.0)]
    a = array(counter.most_common(),'i')
    order = argsort(-a[:,1])
    classes = a[order,0]
    probs = a[order,1]*1.0/sum(a[:,1])
    return [(udecode(classes[i]),probs[i]) for i in range(len(classes))]

def counter2array(counter,k=5):
    result = zeros((k,2),'i')
    for i,(cls,count) in enumerate(counter.most_common()):
        if i>=k-1: break
        result[i,0] = cls
        result[i,1] = count
    result[-1,0] = -1
    result[-1,1] = sum(counter.values())-sum(result[:,1])
    return result

def showgrid3(data,nimages=1024):
    """Show a data array as a grid of image."""
    clf()
    r = int(sqrt(nimages))
    w,h,d = data[0].shape
    image = zeros((r*h,r*w,3))
    for i in range(min(len(data),nimages)):
        im = data[i]
        im = im.reshape(w,h,3)
        image[(i/r)*h:(i/r+1)*h,(i%r)*w:(i%r+1)*w,:] = im
    imshow(image.reshape(r*h,r*w,3))

def scosts(sigmas):
    return sum(log(make2d(sigmas)),axis=1)+(sigmas[0].size/2.0)*log(2*pi)

def ddist(v,proto,sigma,cost):
    d = 0.5*sum((v.ravel()-proto.ravel())**2/sigma.ravel()**2) + cost
    d /= v.size
    return d

class Protos:
    """Load a prototype file and perform nearest neighbor lookups.  If
    sigmas are given, performs rescoring."""
    def __init__(self,fname=None,verbosity=1):
        self.verbosity = verbosity
        self.sfloor = 0.1
        self.k_rescore = None
        self.pdb = None
        if fname is not None: self.load(fname)
    def __getstate__(self):
        return dict([(k,v) for k,v in self.__dict__.items() if k not in ["nn","pdb"]])
    def __setstate__(self,state):
        self.__dict__.update(state)
        self.nn = NNIndex()
        self.nn.build_index(make2d(self.protos))
    def close(self):
        if self.pdb is not None:
            self.pdb.close()
            self.pdb = None
    def load(self,fname):
        with tables.openFile(fname) as pdb:
            n = len(pdb.root.classes)
            self.n = n
            self.protos = array(pdb.root.patches[:n])
            self.classes = array(pdb.root.classes[:n])
            if "sigmas" in dir(pdb.root):
                assert self.rescore>1
                self.sigmas = maximum(array(pdb.root.sigmas[:n]),self.sfloor)
                self.scosts = scosts(self.sigmas)
                self.k_rescore = 200
            else:
                self.sigmas = None
            if "relmeans" in dir(pdb.root):
                self.relmeans = array(pdb.root.relmeans[:n])
                self.relsigmas = array(pdb.root.relsigmas[:n])
            else:
                self.relmeans = None
            self.nn = NNIndex()
            self.nn.build_index(make2d(self.protos))
            if "d_classes" in dir(pdb.root):
                result = []
                for i,cs in enumerate(pdb.root.d_classes[:n]):
                    ns = pdb.root.d_counts[i]
                    ps = ns*1.0/maximum(1e-4,sum(ns))
                    assert not isnan(ps).any()
                    l = [(udecode(c),ps[j]) for j,c in enumerate(cs) if c>0 and ps[j]>0]
                    l = sorted(l,key=lambda x:-x[1])
                    result.append(l)
                self.posteriors = result
            else:
                self.posteriors = None
            if "d_means" in dir(pdb.root):
                self.d_means = array(pdb.root.d_means[:n],'f')
                if isnan(self.d_means).any(): print "warning: nan in d_means"
                self.d_sigmas = array(pdb.root.d_sigmas[:n],'f')
                if isnan(self.d_sigmas).any(): print "warning: nan in d_sigmas"
            else:
                self.d_means = None
            self.rejclasses = {}
            if "rejclass" in dir(pdb.root):
                for i in range(len(pdb.root.rejclass)):
                    rc = pdb.root.rejclass[i]
                    if type(rc)==list: rc = rc[0]
                    if rc is None: continue
                    self.rejclasses[i] = rc
        # FIXME add lin classifier and other stuff here as well
        if self.verbosity>0: 
            print "loaded",self.protos.shape,"protos",
            if self.sigmas is not None: print "with sigmas",self.sigmas.shape,
            if self.relmeans is not None: print "with rel",self.sigmas.relmeans
            print
        self.pdb = tables.openFile(fname)
    def rescore(self,v,k):
        return 0.5*sum((v.ravel()-self.protos[k].ravel())**2/self.sigmas[k].ravel()**2) + self.scosts[k]
    def nn_index(self,query,k=1):
        if self.k_rescore>1:
            assert self.sigmas is not None
            neighbors,dists = self.nn.nn_index(make2d(query),self.k_rescore)
            for i,v in enumerate(query):
                for j,l in enumerate(neighbors[i]):
                    dists[i,j] = ddist(v,self.protos[l],self.sigmas[l],self.scosts[l])
                order = argsort(dists[i])
                neighbors[i] = neighbors[i][order]
                dists[i] = dists[i][order]
        else:
            neighbors,dists = self.nn.nn_index(make2d(query),k+1)
        neighbors = [n[:k] for n in neighbors]
        dists = [d[:k] for d in dists]
        return neighbors,dists
    def rejclass(self,proto):
        return self.rejclasses.get(proto,None)

def csnormalize(image,f=0.75):
    bimage = 1*(image>mean([amax(image),amin(image)]))
    w,h = bimage.shape
    [xs,ys] = mgrid[0:w,0:h]
    s = sum(bimage)
    if s<1e-4: return image
    s = 1.0/s
    cx = sum(xs*bimage)*s
    cy = sum(ys*bimage)*s
    sxx = sum((xs-cx)**2*bimage)*s
    sxy = sum((xs-cx)*(ys-cy)*bimage)*s
    syy = sum((ys-cy)**2*bimage)*s
    w,v = eigh(array([[sxx,sxy],[sxy,syy]]))
    l = sqrt(amax(w))
    if l>0.01:
        scale = f*max(image.shape)/(4.0*l)
    else:
        scale = 1.0
    m = array([[1.0/scale,0],[0.0,1.0/scale]])
    w,h = image.shape
    c = array([cx,cy])
    d = c-dot(m,array([w/2,h/2]))
    image = interpolation.affine_transform(image,m,offset=d,order=1)
    return image

def protect(f):
    def wrapped(*args,**kw):
        try:
            f(*args,**kw)
        except:
            traceback.print_exc()
            raise
    return wrapped

import multiprocessing

def poolmap(f,l,par=None,pool=None):
    if par is None and pool is None:
        par = multiprocessing.cpu_count()
    if pool is None and par<2:
        for x in l:
            yield f(x)
    else:
        cleanup = None
        if pool is None:
            pool = multiprocessing.Pool(par)
            cleanup = pool
        try:
            for result in pool.imap_unordered(f,l):
                yield result
        finally:
            if cleanup is not None: 
                cleanup.close()
                del cleanup

def kmeans(data,k):
    """K-means computation.  This happens to call the FLANN implementation,
    which is fairly fast, but somewhat buggy.  Alternatively, one could use
    the scipy.stats implementation."""
    if len(data)<=k: return data
    flann = pyflann.FLANN()
    for i in range(5):
        codebook = flann.kmeans(make2d(data),k)
        md = max([abs(amax(data)),abs(amin(data))])
        mc = max([abs(amax(codebook)),abs(amin(codebook))])
        if mc<1.001*mc: return codebook
        data = data[1:]
    raise Exception("flann kmeans failed")

def extchange(fname,extension):
    result = os.path.splitext(fname)[0]
    if extension[0]!=".": result += "."
    result += extension
    return result

def unary(x,lo,hi,n=64):
    return 1.0*(linspace(lo,hi,n)<x)

def rel_unary(rel,n=64):
    reldata = array([[unary(y,-1,1,n),unary(w,0,2,n),unary(h,0,2,n)] for y,w,h in rel],'f')
    return reldata

def rel_combine(data,rel,n=64):
    assert len(rel)==len(data)
    # print data.shape,reldata.shape
    reldata = rel_unary(rel,n=n)
    data = c_[make2d(data),make2d(reldata)]
    return data
