#!/usr/bin/python

import os
import numpy
from collections import Counter
from pylab import *
import tables,scipy
from scipy import fftpack
from scipy.cluster import vq
from scipy.ndimage import morphology,filters,interpolation
from ocrolib import docproc,improc
import pyflann
import random
import tables
import multiprocessing
import fcntl
from pdb import pm
from llpy import variants
from llpy import fast_kmeans
from llpy.sutils import *
import traceback

import argparse
parser = argparse.ArgumentParser(description = "Compute a k-means clustering of the input rows and store in the output.")
parser.add_argument('input',help="data file")
parser.add_argument('-o','--output',help="output file")
parser.add_argument('-P','--protos',type=int,default=-1,help="per class protos (fixed if given)")
parser.add_argument('-m','--maxprotos',type=int,default=200,help="max # of protos per class")
parser.add_argument('-M','--minsize',type=int,default=10000,help="min size for full k-means")
parser.add_argument('-C','--mincount',type=int,default=200,help="min per class count")
parser.add_argument('-N','--nsamples',type=int,default=2000000000,help="max # of samples")
parser.add_argument('-V','--variants',type=int,default=0,help='generate additional variants')
parser.add_argument('-W','--rweight',type=int,default=10,help='weight of input vectors')
parser.add_argument('-Q','--par',type=int,default=multiprocessing.cpu_count(),help='amount of parallelism')
# FIXME add regex pattern here
parser.add_argument('--maxperclass',type=int,default=400000,help="maximum # samples to load for a class")
parser.add_argument('--maxsample',type=int,default=100000,help="subsample for split")
parser.add_argument('--maxfinal',type=int,default=100000,help="subsample for split")
parser.add_argument('--norejects',action="store_true")
parser.add_argument('--ksplit',type=int,default=2,help='default split size')
parser.add_argument('--wsigma',type=float,default=-1,help='sigma for horizontal weighting')
parser.add_argument('--wfloor',type=float,default=0.3,help='cost floor for horizontal weighting')
parser.add_argument('--wcenter',type=float,default=-1,help='center for horizontal weighting')
args = parser.parse_args()

if args.protos>0: args.mincount = args.protos

with tables.openFile(args.input) as db:
    nsamples = min(len(db.root.classes),args.nsamples)
    classes = array(db.root.classes[:nsamples],'int64')
    kinds = Counter(classes)
    d2 = list(db.root.patches.shape[1:])
    d = prod(d2)
    print "[done loading",len(classes),"samples]"

if args.wsigma>0.0:
    print d2
    weights = zeros(d2)
    if args.wcenter<0: args.wcenter = d2[1]//2
    weights[:,args.wcenter] = 1
    weights = filters.gaussian_filter(weights,args.wsigma)
    weights /= amax(weights)
    weights = maximum(weights,args.wfloor)
    ion(); gray(); imshow(weights,vmin=0,vmax=1); ginput(1,10)
else:
    weights = ones(d2)

def kmeans(data,k,use_flann=0):
    """K-means computation.  This happens to call the FLANN implementation,
    which is fairly fast, but somewhat buggy.  Alternatively, one could use
    the scipy.stats implementation."""

    return array(fast_kmeans.kmeans_iter(data,k),data.dtype)
    return array(fast_kmeans.fast_kmeans(data,k),data.dtype)

    if len(data)<=k: return data
    if use_flann:
        flann = pyflann.FLANN()
        for i in range(5):
            codebook = flann.kmeans(make2d(data),k)
            md = max([abs(amax(data)),abs(amin(data))])
            mc = max([abs(amax(codebook)),abs(amin(codebook))])
            if mc<1.001*mc: return codebook
            data = data[1:]
        raise Exception("flann kmeans failed")
    else:
        assert amin(data)>-2.0 and amax(data)<2.0
        from scikits.learn import cluster
        km = cluster.KMeans(k=k)
        km.fit(data)
        return array(km.cluster_centers_,'f')

def quantize(data,codebook):
    """Quantize the data relative to the codebook.  Returns the
    closest codebook entry and the corresponding distance."""
    flann = pyflann.FLANN()
    # print "--- quantize",codebook.dtype,codebook.shape,data.dtype,data.shape
    result,dists = flann.nn(make2d(codebook),make2d(data),1)
    assert result.ndim==1
    return result,dists

def subsample(data,n):
    if len(data)<=n: return data
    sel = range(len(data))
    sel = random.sample(sel,n)
    return data[array(sel,'i')]

def rkmeans(data,k,ksplit=2,minsize=10000,level=0,note=None):
    """Recursive k-means.  This performs multiple levels of kmeans
    (using k=ksplit, 2 by default) until it gets datasets of size at
    most minsize; those are then clustered using regular k-means.  The
    k prototypes are approximately apportioned between the subtrees."""
    if len(data)<=k: return data
    if note is not None: print "    "*level,'[%s]'%note,
    print "rkmeans",level,":",k,ksplit,data.shape
    data = make2d(data)
    # don't do anything if we have fewer samples than clusters
    if len(data)<=k: return data
    # print "  "*level,"kmeans",data.shape,k,ksplit
    if data.size<1 or k<1: return None
    if k==1:
        m = mean(data,axis=0)
        return array([m.ravel()])
    if len(data)<minsize or k<4:
        codebook = kmeans(subsample(data,args.maxfinal),k)
        return array(codebook,'f')
    assert amin(data)>-2 and amax(data)<2
    codebook = kmeans(subsample(data,args.maxsample),ksplit)
    quant,dists = quantize(data,codebook)
    result = []
    remaining = k
    for i in range(ksplit):
        rdata = data[quant==i,:]
        frac = sum(quant==i)*1.0/sum(quant>=i)
        kn = int(frac*remaining)
        remaining -= kn
        assert amin(rdata)>-2 and amax(rdata)<2
        if kn>=2:
            rcodebook = rkmeans(rdata,kn,ksplit=ksplit,level=level+1,note=note)
        elif len(data)>5:
            rcodebook = array([mean(rdata,axis=0)])
        else:
            rcodebook = None
        if rcodebook is None: continue
        result.append(rcodebook)
    return concatenate(result,axis=0)

def process(arg):
    try:
        c,count = arg
        print "starting",c,udecode(c)
        if c==ord("~") and args.norejects: return None
        if c==ord("_"): return None
        if count<args.mincount: return None
        indexes = find(array(classes[:nsamples],'int64')==c)
        if len(indexes)>args.maxperclass:
            indexes = random.sample(indexes,args.maxperclass)
        indexes = sorted(indexes)
        data = []
        with tables.openFile(args.input) as db:
            print "loading",len(indexes),"from",db.root.patches.shape,"for",c
            for i in indexes:
                if amin(db.root.patches[i])>=-2 and amax(db.root.patches[i])<=2:
                    data.append(db.root.patches[i])
        if args.wsigma>0:
            data = [v*weights for v in data]
        if len(data)!=len(indexes):
            print "warning","wanted",len(indexes),"got",len(data),"samples within range for",c
        assert amin(data)>=-2 and amax(data)<=2
        if args.variants>0:
            print "computing variants",len(data)
            assert len(data)*args.variants<20000000
            rows = []
            for i,row in enumerate(data):
                if i%10==0: print "%d:%d"%(i,len(rows)),; sys.stdout.flush()
                rows += [data[i]]*args.rweight
                rows += variants.rvariants(data[i],args.variants)
            print
            data = array(rows)
            del rows
            print "got",len(data),"variants"
        data = make2d(array(data))
        print "got",len(data),"for",c,"shape",data.shape
        if amin(data)<-2.0 or amax(data)>2.0:
            print "out of range data values",
            print data.shape,
            print (amin(data),amax(data)),mean(data),"normalizing"
            data /= amax(data)
        if args.protos>0: k = args.protos
        else: k = int(clip(sqrt(len(data)),2,args.maxprotos))
        codebook = rkmeans(data,k,ksplit=args.ksplit,minsize=args.minsize,note="%4d"%c)
        print "[completed",udecode(c),c,count,len(codebook),"]"
        return (c,count,codebook)
    except:
        traceback.print_exc()
        return None

jobs = []
unfinished = set()
for c,count in kinds.most_common():
    if count<args.mincount: continue
    # FIXME add regex here
    if udecode(c) in ["~","_"]: continue
    jobs.append((c,count))
    unfinished.add(c)

if args.par<2:
    output = [process(job) for job in jobs]
else:
    # FIXME shuffle here
    pool = multiprocessing.Pool(args.par)
    output = []
    for out in pool.imap_unordered(process,jobs):
        if out is None: continue
        c = out[0]
        unfinished -= set([c])
        output.append(out)
        print "finished",c,"done",len(output),"remaining",len(unfinished),sorted(list(unfinished))[:10]

with tables.openFile(args.output,"w") as odb:
    table_log(odb,str(sys.argv))
    odb.createEArray(odb.root,'patches',tables.Float32Atom(),shape=[0]+d2)
    odb.createEArray(odb.root,'classes',tables.Int64Atom(),shape=(0,))
    odb.createEArray(odb.root,'counts',tables.Float32Atom(),shape=(0,))
    odb.setNodeAttr("/","weights",weights)
    for c,count,codebook in output:
        if codebook is None: continue
        print "[got",c,count,len(codebook),"]"
        odb.root.patches.append(codebook.reshape([len(codebook)]+d2))
        odb.root.classes.append([c]*len(codebook))
        odb.root.counts.append([0.0]*len(codebook))

