#!/usr/bin/python

import os
import numpy
from collections import Counter
from pylab import *
import tables,scipy
from scipy import fftpack
from scipy.cluster import vq
from scipy.ndimage import morphology,filters,interpolation
from ocrolib import docproc,improc
import pyflann
import random
import tables
import multiprocessing
import fcntl
from pdb import pm
import variants
from llpy.sutils import *

import argparse
parser = argparse.ArgumentParser(description = "Cluster images regardless of class. Uses multithreading and sampling.")
parser.add_argument('input',help="data file")
parser.add_argument('-o','--output',help="output file")
parser.add_argument('-N','--nsamples',type=int,default=2000000000,help="max # of samples")
parser.add_argument('-Q','--par',type=int,default=multiprocessing.cpu_count(),help='amount of parallelism')
parser.add_argument('-M','--minsize',type=int,default=10000,help="min size for full k-means")
parser.add_argument('-r','--pattern',default='.*',help='class pattern')
parser.add_argument('-s','--maxsample',type=int,default=100000,help="subsample for split")
parser.add_argument('-R','--kratio',type=int,default=50,help="ratio of size to k")
parser.add_argument('-k','--nclusters',type=int,default=None,help="number of clusters (instead of kratio)")
parser.add_argument('--ksplit',type=int,default=2,help="hierarchical splits")
args = parser.parse_args()

def kmeans(data,k):
    """K-means computation.  This happens to call the FLANN implementation,
    which is fairly fast, but somewhat buggy.  Alternatively, one could use
    the scipy.stats implementation."""
    assert data.ndim>=2
    assert data.size>0
    assert k>0
    flann = pyflann.FLANN()
    for i in range(5):
        codebook = flann.kmeans(make2d(data),k)
        md = max([abs(amax(data)),abs(amin(data))])
        mc = max([abs(amax(codebook)),abs(amin(codebook))])
        if mc<1.001*mc: return codebook
        data = data[1:]
    raise Exception("flann kmeans failed")

with tables.openFile(args.input) as db:
    nsamples = min(len(db.root.classes),args.nsamples)
    print "nsamples",nsamples
    assert nsamples>100
    classes = array(db.root.classes[:nsamples],'i')
    sel = [i for i,c in enumerate(classes) if re.match("^"+args.pattern+"$",udecode(c))]
    d2 = list(db.root.patches[0].shape)
    print "selected",len(sel)
    if args.nclusters>0:
        args.kratio = int(len(sel)/args.nclusters)

def process(job):
    sel = job
    print len(sel)
    sample = sel
    if len(sample)>args.maxsample: 
        sample = random.sample(sample,args.maxsample)
        sample.sort()
    data = []
    with tables.openFile(args.input) as db:
        for i in sample:
            data.append(array(db.root.patches[i],'f'))
    data = make2d(array(data))
    if len(sel)<args.minsize:
        k = max(2,int(len(sel)/args.kratio))
        assert len(data)>0
        assert k>0
        codebook = kmeans(data,k)
        return ([],codebook)
    else:
        codebook = kmeans(data,args.ksplit)
        flann = pyflann.FLANN()
        flann.build_index(make2d(codebook))
        subs = [[] for i in range(args.ksplit)]
        with tables.openFile(args.input) as db:
            for s in sel:
                c,d = flann.nn_index(db.root.patches[s].ravel(),1)
                subs[c].append(s)
        return (subs,None)

if args.par>1:
    pool = multiprocessing.Pool(args.par)

jobs = [sel]
codebooks = []

r = 0
while len(jobs)>0:
    print "round",r,[len(x) for x in jobs]
    if args.par>1:
        result = pool.map(process,jobs)
    else:
        result = [process(job) for job in jobs]
    jobs = []
    for s,c in result:
        jobs += s
        if c is not None: codebooks += [c]
    r += 1

with tables.openFile(args.output,"w") as odb:
    odb.createEArray(odb.root,'patches',tables.Float32Atom(),shape=[0]+d2)
    odb.createEArray(odb.root,'classes',tables.Int64Atom(),shape=(0,))
    for codebook in codebooks:
        odb.root.patches.append(codebook.reshape(len(codebook),*d2))
        odb.root.classes.append([uencode("_")]*len(codebook))
