import h5py
import numpy as np
import matplotlib.pyplot as plt
from mpl_toolkits.mplot3d import Axes3D
from sklearn.cluster import DBSCAN
import math

#INPUT_DATA_DIR = '/home/behollis/DATA/pierre/ocean/'
#OUTPUT_DATA_DIR = '/home/behollis/Dropbox/visweek2015/oslines/'

OUTPUT_DATA_DIR = '/home/brad/visweek2015-revision-code/oslines/'
INPUT_DATA_DIR = '/home/brad/DATA/ts00050/'

'''
implementation of entropy based streamline cluster from:

Chen, ChengKai, et al. An illustrative visualization framework for 
3d vector fields. Computer Graphics Forum. 
Vol. 30. No. 7. Blackwell Publishing Ltd, 2011.
'''

import numpy.linalg as la

def writeElapsedTime(start, end):
    import os
    systime = os.times()
    tout = open(OUTPUT_DATA_DIR+'timingSlineClusters.varSampling.lock.{0}.txt'.format(systime[4]), 'w') #sys time
    tout.write('Elapsed time: {0}'.format(end - start))
    tout.close() 

def calcSlineLength(sl):
    ''' find total length of streamline and length of each seg '''
    sl_seg_lengths = list()
    total_len = 0.0
    num_pts = len(sl[0])
    for j in range( 0, num_pts - 1 ):
        pt0 = ( sl[0][j], sl[1][j], 0.0 )
        pt1 = ( sl[0][j+1], sl[1][j+1], 0.0 )
        
        dist = np.linalg.norm( np.array(pt1) - np.array(pt0) )
        #print 'dist: ' + str(dist)
        sl_seg_lengths.append( dist )
        
        total_len += dist
        
    return total_len, sl_seg_lengths
       
def calcStreamlineFeatures(slines, pts_to_sample=0.0):
    slines_features = list()
    
    for sl in slines: 
    
        # trim slines back to 500 points forward / backward
        #sl[0] = sl[0][500:-500]
        #sl[1] = sl[1][500:-500]
        
        if len(sl[0]) < 3 and len(sl[1]) < 3:
            continue 
         
        slf = list() #list of streamline feature vectors to cluster
        #find first, middle, and last points on sl
        begin_pt = [ sl[0][0], sl[1][0] ] 
        half_num_pts = len(sl[0])/2
           
        end_pt = [ sl[0][-1], sl[1][-1] ] 
        slf = begin_pt + end_pt 
           
        linear_entropy = 0.0
        angl_entropy = 0.0
           
        num_pts = len( sl[0] ) 
        num_segs = num_pts - 1
           
        total_len, sl_seg_lengths = calcSlineLength(sl)
               
        #find midpoint location of arc length
        mid_pt_idx = 0
        arc_length = 0.0
        
        for j in range( 0, num_pts - 1 ):
            pt0 = ( sl[0][j], sl[1][j], 0.0 )
            pt1 = ( sl[0][j+1], sl[1][j+1], 0.0 )
           
            dist = np.linalg.norm( np.array(pt1) - np.array(pt0) )
            arc_length += dist
          
            if arc_length >= ( total_len / 2.0 ):
                mid_pt_idx = j
                break
           
        # add mid point
        mid_pt = [ sl[ 0 ][ mid_pt_idx ], sl[ 1 ][ mid_pt_idx ] ] 
        slf += mid_pt
       
        # find additional points to be sampled
        arc_length = 0.0
        pt_count = 0.0
        if pts_to_sample > 0:
            for j in range( 0, num_pts - 1 ):
                pt0 = ( sl[0][j], sl[1][j], 0.0 )
                pt1 = ( sl[0][j+1], sl[1][j+1], 0.0 )
               
                dist = np.linalg.norm( np.array(pt1) - np.array(pt0) )
                arc_length += dist
              
                if arc_length >= pt_count * ( total_len / pts_to_sample ):
                    if pt_count >= pts_to_sample:
                        break
                   
                    pt_count += 1
                    curr_pt = [ sl[0][j], sl[1][j] ] 
                    slf += curr_pt 
                  
        slines_features.append( slf )
    
    # return feature vectors of streamlines
    return slines_features
        
def readStreamlines(x, y, f):
    slines = list()
    groups = f.keys()
    
    for mem in groups:
        dir = str(mem).zfill(4) + '/x' + str(x).zfill(3) + '/y' + str(y).zfill(3)
    
        try:
            xlst = list(f[dir][0])
            ylst = list(f[dir][1])
            zlst = 0.0 * len(list(f[dir][0]))
            
            slines.append([xlst,ylst,zlst])
        except:
            continue
    
    return slines
        
if __name__ == '__main__':
    
    LON = 53
    LAT = 90
    X = 125
    Y = 125
    DIM = 2
    
    SLINESF = 'lockSlines.1100steps.hdf5'
    SENTROPIESF = 'lockEntropyONLY.fixed.hdf5'
    OFILE = 'lock.SlineClusters.varSampling.3to13pts.hdf5'

    import time
    start = time.time()
    
    f = h5py.File(OUTPUT_DATA_DIR + SLINESF, 'r')
    entrF = h5py.File(OUTPUT_DATA_DIR + SENTROPIESF)
    output = h5py.File( OUTPUT_DATA_DIR + OFILE, 'w')
    
    dslclus = output.create_dataset(name='slineclusters', shape=(X,Y), dtype='f')
    dsampled = output.create_dataset(name='ptsSampledForClustering', shape=(X,Y), dtype='f')
    
    minLE = np.amin(entrF['avgLinearEntropy'])
    minAE = np.amin(entrF['avgAngularEntropy'])
    
    maxLE = np.amax(entrF['avgLinearEntropy'])
    maxAE = np.amax(entrF['avgAngularEntropy'])
    
    TOTAL_ENTROPY_RANGE = math.fabs( (maxLE + maxAE) - (minLE + minLE) ) 
    
    MIN_ADDITIOAL_SAMP_PTS = 0 #beyond three sample points
    MAX_ADDITIONAL_SAMP_PTS = 10 #beyond three sample points
    PTS_TO_SAMPLE_RANGE = MAX_ADDITIONAL_SAMP_PTS - MIN_ADDITIOAL_SAMP_PTS
    
    for x in range( 0, X ):
        for y in range( 0, Y ):
            
            print 'calculating {0}, {1}'.format(x, y)
            
            entrL = entrF['avgLinearEntropy'][x,y]
            entrA = entrF['avgAngularEntropy'][x,y] 
            avg_total_entropy_xy = entrL + entrA 
            
            #determine from enropy measurements
            PTS_SAMPLED_BEYOND_MIN =  math.floor( PTS_TO_SAMPLE_RANGE * ( avg_total_entropy_xy  / TOTAL_ENTROPY_RANGE ) )
            FEATURES = 2 * 3 + 2 * PTS_SAMPLED_BEYOND_MIN #default of first, middle and end point in 2 dimensions

            sltotal = readStreamlines(x,y,f)
        
            #calculate feature vectors for streamlines
            try:
                feat = calcStreamlineFeatures(sltotal, pts_to_sample=PTS_SAMPLED_BEYOND_MIN)
            
                feat_total = np.array( feat )
                feat_total.reshape(len(feat_total), FEATURES)
                
                
                # Lock-Exchange PARAMS
                #dcov[x,y] = np.cov(np.array(tpts).T)#pca.get_covariance()
                # let's use eps to be some signficant fraction of
                # the simulation physical domain...we have 152 x 152 cells
                # five percent of the diagonal distance across the domain
                # thus, diag dist = 176.67
                # 0.05 * 214.96 = 8.38
                # NOTE: include this metric and value in the paper
                # min samples is 0.1 of total members, or 2 
                
                db = DBSCAN(eps=8.38, min_samples = 2).fit( np.asarray(feat_total) )
            
                
                # OCEAN
                #dcov[x,y] = np.cov(np.array(tpts).T)#pca.get_covariance()
                # let's use eps to be some signficant fraction of
                # the simulation physical domain...we have 90 x 53 cells
                # five percent of the diagonal distance across the domain
                # thus, diag dist = 104.44
                # 0.05 * 104.44 = 5.2
                # NOTE: include this metric and value in the paper
                # min samples is 0.1 of total members, or 3 
                #db = DBSCAN(eps=5.2, min_samples = 3).fit( np.asarray(feat_total) )
                
                # STIRRING PARAMS
                #dcov[x,y] = np.cov(np.array(tpts).T)#pca.get_covariance()
                # let's use eps to be some signficant fraction of
                # the simulation physical domain...we have 152 x 152 cells
                # five percent of the diagonal distance across the domain
                # thus, diag dist = 214.96
                # 0.05 * 214.96 = 10.7
                # NOTE: include this metric and value in the paper
                # min samples is 0.1 of total members, or 2 
                
                #db = DBSCAN(eps=10.7, min_samples = 2).fit( np.asarray(feat_total) )
                
                core_samples_mask = np.zeros_like(db.labels_, dtype=bool)
                core_samples_mask[ db.core_sample_indices_] = True
                labels = db.labels_
        
                # Number of clusters in labels, ignoring noise if present.
                n_clusters_ = len(set(labels)) - (1 if -1 in labels else 0)
                dslclus[x,y] = n_clusters_
                #print n_clusters_
            except:
                print 'exception occured when trying to find clusters!'
                dslclus[x,y] = 0
                
            pts_sampled = int( FEATURES / float(DIM) )
            dsampled[x,y] = pts_sampled
            
            print 'sampling extra points = {0}'.format(int(pts_sampled - 3)) 
            print 'clusters: ' + str(dslclus[x,y])
            
    end = time.time()
    writeElapsedTime(start, end)

    f.close()
    output.close()
    
    print 'finished!'