"""

Default clustering workflow


Copyright 2009 Michael Seiler
Rutgers University
miseiler@gmail.com

This file is part of ConsensusCluster.

ConsensusCluster is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.

ConsensusCluster is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
GNU General Public License for more details.

You should have received a copy of the GNU General Public License
along with ConsensusCluster.  If not, see <http://www.gnu.org/licenses/>.


"""

import numpy, sys, time, os, getopt
from itertools import combinations as comb

try:
    import psyco
    psyco.full()
except:
    pass

import pycuda.autoinit

import clustio, scripts, analysis
from clustio import ConsoleDisplay, Plot, Clustmap
from kernels import kmeans, som, treetype, hierarchy, nw

class ConsensusCluster(object):
    """

    ConsensusCluster

        Creates a consensus of any number of clustering methods, clusters the result as a distance matrix, and reorders it using simulated annealing

        Usage:
            
            sdata               - parsers.Parse object
            num_clusters        - K value, or the number of clusters for the clustering functions to find for each subsample.
            clustering_algs     - BaseCluster-Derived class which assigns clusters during initialisation

        Properties

            tree
                
                This is a treetype.Tree object of the final clustering, if Hierarchical clustering was used.  Its leaves have
                value equal to the sample indices (sdata.samples)

            reorder_indices
                
                This is an array of the indices corresponding to the new order the consensus matrix has taken after reordering, using the original matrix as
                a reference.  This can be useful if you plan to use this information, as frequently the consensus matrix ordering will give you more insight
                into the 'true' K value than actual clustering will.

    """

    def __init__(self, sdata, num_clusters=2, subsamples=500, clustering_algs=['kmeans'], **kwds):

        dim = len(sdata.samples)

        self.__dict__ = { 'datapoints': sdata.samples, 'num_clusters': num_clusters, 'tree': None, 'clustering_algs': clustering_algs, 'M': sdata.M }
                         
        #Actual work

        # FIXME: Progress?
        cm = numpy.zeros((sdata.M.shape[0], sdata.M.shape[0]), dtype=numpy.float64)
        tc = numpy.zeros((sdata.M.shape[0], sdata.M.shape[0]), dtype=numpy.int32)
        
        # FIXME: Why do these methods align M when everyone aligns it to 128 anyway?
        # FIXME: If we're keeping a matrix for SNR we might as well keep a REAL one for SNR

        for alg in clustering_algs:
            if   alg == 'kmeans':
                a, b = kmeans(sdata.M, num_clusters=num_clusters, repeats=subsamples)
            elif alg == 'som':
                a, b = som(sdata.M, num_clusters, repeats=subsamples)

            cm += a
            tc += b

            del a, b

        if not tc.all():
            raise ValueError, 'Some samples were not detected as having been selected for clustering.\nPlease either increase the number of subsamples or enable full-dataset clustering.'

        self.consensus_matrix = cm / tc
        
        del cm, tc

        print('\nClustering the consensus matrix...\n')

        self.tree = hierarchy(sdata, self.num_clusters, data_matrix = (1 - self.consensus_matrix), distance_matrix = True, linkage = 'average')

        print('\nReordering the consensus matrix...\n')

        treetype.reorder(self.tree, self.consensus_matrix)    
        best_order = tuple(self.tree.sequence)
        self.reorder_indices = self.tree.sequence

        self.consensus_matrix = self.consensus_matrix.take(best_order,1).T.take(best_order,1)


class CommonCluster(object):
    """

    CommonCluster

        Common class

        This class presents a default workflow for Consensus Clustering.  It is designed to be subclassed to suit your needs.

        See individual methods for advice on appropriate subclassing methodology.

        Usage:
            
            class MyCluster(CommonCluster):

                def __init__(self, parser, filename, log2, sub_medians, center, scale, pca_fraction, eigenvector_weight,
                             kvalues, subsamples, **kwds):
                    
                    CommonCluster.__init__(self, parser, filename, ....

            Or simply CommonCluster(parser, filename, ....
            
            In either case, CommonCluster will be run with the following options:

                parser              - parsers.ParseX class, see parsers.py.  No default.
                filename            - File to be parsed by parser, see parsers.py.  No default.
                log2                - Take the log2 of all data.  Default: False
                sub_medians         - Subtract the median of sample medians from each entry in M.  Default: False
                center              - Normalise genes over all samples to have mean 0.  Default: False 
                scale               - Normalise genes over all samples by dividing by the Root-Mean-Square.  Default: False
                pca_fraction        - Choose genes from those principle components that explain pca_fraction of the variance.  Default: 0.85
                eigenvector_weight  - Take the top eigenvector_weight fraction of genes that occur with high weights in selected components.  Default: 0.25
                kvalues             - List of K-Values to cluster.  Default: [2, 3, 4, 5, 6]
                subsamples          - Number of subsampling iterations to form consensus.  Default: 300

    """
    
    def __init__(self, parser, filename, **kwds):

        self.console = ConsoleDisplay(log=False)
        self.set_kwds(**kwds)   # Set defaults for some settings variables.
                                # FIXME: This is horribly inconsistent, borne from the days we had a GUI
                                # FIXME: and some settings were set there, while others were set here.

        parser, filename, settings = self.handle_cmdline_args(parser, filename, **kwds)
        kwds.update(settings)
    
        self.consensus_procedure(parser, filename, **kwds)

    def set_kwds(self, keep_list = None, pca_only = False, pca_legend = True, no_pca = False, coring = False, mds = False,
                 transpose = False, no_snr = False, flist = None, tom = False, dmatrix = False, write_pca_filename = None,
                 corr = False, corr_signum_thresh = None, corr_abs = False, **kwds):

        self.keep_list  = keep_list     # List of filenames of samples to keep, usually set by UI
        self.pca_only   = pca_only      # Do PCA and then stop
        self.pca_legend = pca_legend    # Draw the PCA legend?
        self.no_pca     = no_pca        # Don't do PCA
        self.coring     = coring        # Try to core the cluster matrix
        self.mds        = mds           # Run MDS instead of PCA
        self.transpose  = transpose     # Transpose the input file
        self.no_snr     = no_snr        # No SNR reports
        self.flist      = flist         # List of features to keep
        self.tom        = tom           # Create a topological overlap matrix from the input
        self.dmatrix    = dmatrix       # Input is a distance matrix
        self.corr       = corr          # Input will be converted to a correlation matrix
        self.corr_abs   = corr_abs      # Correlation matrix will be abs'd
        self.corr_signum_thresh = corr_signum_thresh    # Correlation matrix signum threshold
        self.write_pca_filename = write_pca_filename    # Filename to write pca-normalised data to

    def handle_cmdline_args(self, parser, filename, **kwds):
        """

        Should be self-explanatory.  An unrecognised option or -h will pull up the usage.

        """

        def usage(unrec=None):

            if unrec is not None:
                print("Unrecognised option: %s\n" % unrec)

            print("USAGE: python cluster.py [OPTIONS]\n")
            print("\t-f <filename>\t\t\tLoad <filename> for clustering.  Default Parser: Normal")
            print("\t-p <parser>\t\t\tParse <filename> with <parser>.  Only valid with the -f option.  E.g. 'Normal'")
            print("\t--help, -h\t\t\tThis help.")
            
            print("\n\t******DATA NORMALISATION******")
            print("\t--log2, --nolog2\t\tPerform log2 reexpression, or turn it off. Default is off.")
            print("\t--submedians, --nosubmedians\tPerform median centring, or turn it off. Default is off.\n\t\t\t\t\tNOTE: Turning this on will turn off mean centring.")
            print("\t--center, --nocenter\t\tPerform mean centring, or turn it off. Default is off.\n\t\t\t\t\tNOTE: Turning this on will turn off median centring.")
            print("\t--scale, --noscale\t\tPerform RMS scaling, or turn it off. Default is off.")
            
            print("\n\t******PCA AND FEATURE SELECTION******")
            print("\t--nopca\t\t\t\tDo not perform PCA at all. This precludes feature selection.\n\t\t\t\t\tUseful if your data is known to be singular.")
            print("\t--pcaonly\t\t\tPerform PCA without clustering.")
            print("\t--mds\t\t\t\tPerform MDS *instead of* PCA. Note that --dmatrix also enables\n\t\t\t\t\tthis option. Normalisation will NOT take place. Default: PCA is used.")
            print("\t--pcafraction <fraction>\tSelect features from the top <fraction> principle components. Default is 0.85")
            print("\t--eigweight <fraction>\t\tSelect the top <fraction> features by weight in each principle component.\n\t\t\t\t\tDefault is 0.25")
            print("\t--noselection\t\t\tDo not perform feature selection. Simply sets pcafraction and eigweight to 1.")
            
            print("\n\t******SAMPLE SELECTION******")
            print("\t-c <file1,file2,file3,..>\tDefine samples (one on each line) in file1, etc as clusters.  Sample set will be\n\t\t\t\t\treduced to these samples, and their labels will be shown in logs and PCA plot.")
            print("\t--krange <fst>,<snd>\t\tRepeat for each kvalue between <fst> and <snd> inclusive. Default is 2 to 6.")
            print("\t--subsamples  <number>\t\tNumber of clustering iterations to perform. Default is 300.")

            print("\n\t******CLUSTERING OPTIONS******")
            print("\t--corr\t\t\t\tUse the correlation matrix to perform clustering. Implies --dmatrix after conversion.")
            print("\t--corrabs\t\t\tTake the absolute value of the correlation matrix.")
            print("\t--corrthresh <fraction>\t\tApply a signum function to the correlation matrix, where values are 1 if >= <fraction>\n\t\t\t\t\tand 0 otherwise. Default is unset.")
            print("\t--kmeans\t\t\tRun the K-Means algorithm")
            print("\t--som\t\t\t\tRun the Self-Organising Map algorithm")
            print("\t--coring\t\t\tTurns on EXPERIMENTAL coring support. Additional logfiles and images are generated which\n\t\t\t\t\tdetail suggested 'core' clusters. Take its advice at your own risk!")

            print("\n\t******INPUT/OUTPUT OPTIONS******")
            print("\t-o <dir>\t\t\tClustering results be stored in new directory <dir>. Default is a timestamped directory of format username-YYYY-MM-DD-HH-MM-SS.")
            print("\t--dmatrix\t\t\tInput is an NxN distance matrix, rather than a sequence of vectors.\n\t\t\t\t\tNote that this will enable MDS scaling and SNR logs will not be produced.")
            print("\t--flist <file1,file2...>\tUse only the features found in <files> for clustering.\n\t\t\t\t\tPCA selection will still be performed unless --noselection is enabled.")
            print("\t--transpose\t\t\tTranspose the input matrix")
            print("\t--tom\t\t\t\tCreate a topological overlap matrix from sample vector input. Implies --corr and --transpose.")
            print("\t--bsize <num>\t\t\tBlock size, in pixels, for the cluster heatmap. Default is 10.")
            print("\t--tree_space <num>\t\tVertical height, in pixels, for tree and font display. Default is 200.") 
            print("\t--nosnr\t\t\t\tSNR logs are not produced. Default is on.")
            print("\t--nottest\t\t\tKruskal ttest is not done in SNR report. Useful for discreet data. Default is on.")

            print("\n\n\tExample: python cluster.py -f mydata.txt --kmeans --log2 --submedians --noselection -c clusterdefs/cluster_1,clusterdefs_cluster2")
            print("\n\tOpens mydata.txt, log2 reexpresses and median centres the data, performs no feature selection, and begin k-means clustering using the cluster_1 and cluster_2 definitions in the clusterdefs folder.\n")

        
        # Krange needs to be --krange=2,6 or -k 2,6
        # add o, k
        # snr needs to be --snr=thing,thing,thing or I guess force them to quote it?
        # ditto for comparelogs

        try:
            opts, args = getopt.gnu_getopt(sys.argv[1:], 'hf:p:c:o:k:', ['nopca', 'log2', 'nolog2', 'center', 'nocenter', 'scale', 'noscale', 'submedians', 'nosubmedians',
                                                                         'pcafraction=', 'eigweight=', 'krange=', 'subsamples=', 'snr=', 'comparelogs=', 'flist=', 'noselection',
                                                                         'help', 'kmeans', 'som', 'bsize=', 'coring', 'pcaonly', 'nottest', 'mds', 'dmatrix', 'transpose', 'nosnr',
                                                                         'tom', 'writepca=', 'corr', 'corrthresh=', 'corrabs', 'tree_space=', 'dpi=', 'max_cluster_size='])
        except getopt.GetoptError, err:
            usage(err)
            sys.exit(2)

        # We have no standalone arguments
        if args:
            usage(args)
            sys.exit(2)

        RUN_SNR = 0
        settings = kwds # Handle settings normally set via the GUI
        algs = []       # Clustering algorithms

        setopts = set([x[0] for x in opts ])

        if '-p' not in setopts:
            parser = clustio.ParseNormal
        if '-o' not in setopts:
            self.outdir = None

        for opt, arg in opts:
            if opt == '-f': filename = os.path.realpath(arg)
            elif opt == '-p': parser = eval('clustio.Parse' + arg)
            elif opt == '-c': self.keep_list = [ os.path.realpath(x) for x in arg.split(',') if x ]
            elif opt == '-o': self.outdir = arg
            elif opt == '--nopca': self.no_pca = True
            elif opt == '--writepca':
                fn = os.path.realpath(arg)
                self.write_pca_filename = fn
            elif opt == '--nosnr': self.no_snr = True
            elif opt == '--dmatrix':
                self.mds     = True
                self.no_snr  = True
                self.dmatrix = True
            elif opt == '--tom':
                self.tom       = True
                self.dmatrix   = True
                self.transpose = True
                self.mds       = True
                self.no_snr    = True
                self.corr      = True
            elif opt == '--corr':
                self.corr      = True
                self.dmatrix   = True
                self.mds       = True
                self.no_snr    = True
            elif opt == '--corrthresh': self.corr_signum_thresh = float(arg)
            elif opt == '--corrabs': self.corr_abs = True
            elif opt == '--mds': self.mds = True
            elif opt == '--transpose': self.transpose = True
            elif opt == '--pcaonly': self.pca_only = True
            elif opt == '--log2': settings['log2'] = True
            elif opt == '--kmeans': algs.append('kmeans')
            elif opt == '--som': algs.append('som')
            elif opt == '--noselection':
                settings['eigenvector_weight'] = 1.0
                settings['pca_fraction'] = 1.0
            elif opt == '--submedians':
                settings['sub_medians'] = True
                settings['center'] = False
            elif opt == '--center':
                settings['center'] = True
                settings['sub_medians'] = False
            elif opt == '--scale': settings['scale'] = True
            elif opt == '--nolog2': settings['log2'] = False
            elif opt == '--nosubmedians': settings['sub_medians'] = False
            elif opt == '--nocenter': settings['center'] = False
            elif opt == '--noscale': settings['scale'] = False
            elif opt == '--pcafraction': settings['pca_fraction'] = float(arg)
            elif opt == '--eigweight': settings['eigenvector_weight'] = float(arg)
            elif opt == '--krange' or opt == '-k': settings['kvalues'] = list(xrange(int(arg.split(',')[0]), int(arg.split(',')[1])+1))
            elif opt == '--subsamples': settings['subsamples'] = int(arg)
            elif opt == '--bsize': settings['bsize'] = int(arg)
            elif opt == '--tree_space': settings['tree_space'] = int(arg)
            elif opt == '--dpi': settings['dpi'] = int(arg)
            elif opt == '--nottest': settings['ttest'] = False
            elif opt == '--coring': self.coring = True
            elif opt == '--comparelogs':
                from clustio.parsers  import read_cluster_log
                from clustio.logstats import compare

                log1,log2 = arg.split(',')
    
                log1_dict = read_cluster_log(log1)
                log2_dict = read_cluster_log(log2)
                
                compare(log1_dict, log2_dict, log1, log2)
                compare(log2_dict, log1_dict, log2, log1)
                sys.exit(0)

            elif opt == '--snr': 
                RUN_SNR = 1
                snropts = arg.split(',')
            elif opt == '--flist': self.flist = [ os.path.realpath(x) for x in arg.split(',') if x ]
            elif opt == '-h' or opt == '--help':
                usage()
                sys.exit(0)

        if RUN_SNR:
            if filename is None:
                print("To use SNR list generation, please use the -f flag (and optionally -p) to select a dataset\n")
                print("USAGE: python common.py -f mydata.txt --snr <outfile>,<cluster1>,<cluster2>\n")
                sys.exit(0)
               
            s = parser(filename)

            for k in ('log2', 'sub_medians', 'center', 'scale'):
                if k not in settings:
                    settings.setdefault(k, False)

            analysis.normalise(s, log2=settings['log2'], sub_medians=settings['sub_medians'], center=settings['center'], scale=settings['scale'])

            # XXX Sept 24th we need to use the , split because we switched to getopt to avoid the bug where filenames with spaces could not be processed
            outfile, clust1, clust2 = snropts

            ttest = True
            if 'ttest' in settings:
                ttest = settings['ttest']

            scripts.write_ratio(s, clust1, clust2, outfile, pval_threshold=0.05, snr_threshold=0.5, ttest=ttest)
            sys.exit(0)

        if algs:
            settings['clustering_algs'] = algs
            
        if filename is None:
            print('No filename selected!\n\n')
            usage()
            sys.exit(1)

        if self.dmatrix and self.no_pca:
            print('Unable to perform dmatrix clustering without MDS projection! Please remove --nopca or --dmatrix to continue.\n\n')
            usage()
            sys.exit(1)

        if self.dmatrix and (self.keep_list or self.flist) and not (self.tom or self.corr):
            # User has given us a distance matrix and wants to reduce samples/features (which *should* be the same list)

            if self.keep_list and self.flist:
                print('\nWarning! Both -c and --flist are set for a distance matrix! Using -c only!\n\n')
            
                self.flist = self.keep_list
            else:
                if self.keep_list:
                    self.flist     = self.keep_list
                else:
                    self.keep_list = self.flist


        self.args = sys.argv # Preserve for logging purposes

        return parser, filename, settings

    def consensus_procedure(self, parser, filename, log2=False, sub_medians=False, center=False, scale=False, pca_fraction=0.85, eigenvector_weight=0.25,
                 kvalues=range(2,7), subsamples=300, keep_list=None, **kwds):
        """
    
        Initialise clustering procedure and tell the user what's going on.
    
        """
        console = self.console

        if keep_list is not None:
            self.keep_list = keep_list
        
        cdir = os.path.realpath(os.curdir)

        try:

            def newdir():
                try:
                    usrname = os.environ['USER']
                except:
                    usrname = 'unknown'
                tstamp = time.strftime("%Y-%m-%d-%H.%M.%S")
                ndir   = '-'.join([usrname, tstamp])
                os.mkdir(ndir)
                os.chdir(cdir + os.path.sep + ndir)

            # If the user hasn't specified an output directory via -o, make a timestamped one
            if self.outdir is None:
                newdir()
            else:
                try:
                    os.mkdir(self.outdir)
                except:
                    pass
                
                os.chdir(cdir + os.path.sep + self.outdir)

            self.sdata = console.announce_wrap('Parsing data...', parser, filename)
    
            self._preprocess()
    
            self._postprocess(log2, sub_medians, center, scale, pca_fraction, eigenvector_weight)
            
            if not self.pca_only:
                for i in kvalues:
                    self.run_cluster(i, subsamples, kwds)

        except:
            os.chdir(cdir)
            raise

        os.chdir(cdir)

    def makeplot(self, M, V=None, label='PCA', pca_legend=True, defined_clusters=None):
        """
    
        Use matplotlib and display.py's Plot function to draw the samples along the first two Principle Components
    
        Usage: makeplot(M, V)
            
            sdata   - parsers.Parse object containing sample points and data in numpy.array form
            V       - The eigenvectors of the covariance matrix as determined by SVD
            label   - The filename
            pca_legend       - Draw a legend
            defined_clusters - A dict of cluster ids and their sample_ids.  This overrides those defined in the GUI, if any.
    
        If matplotlib isn't installed, this function will simply do nothing.
    
        WARNING:    Depending on how the matrix is decomposed you may find different, but also correct, values of V
                    This will manifest itself as the same plot, but reflected in one or both directions
        
        """
    
        plots = []
        legend = []
    
        if V is not None:
            N = numpy.dot(V[:2], numpy.transpose(M))
        else:
            N = M.T[:2]
    
        if defined_clusters is None and hasattr(self, 'defined_clusters'):
            defined_clusters = self.defined_clusters
            
        if defined_clusters is not None:

            indices = {}
            total_ind = 0
            sample_ids = [ x.sample_id for x in self.sdata.samples ]
            
            for cluster_id in defined_clusters:
                indices[cluster_id] = clustio.argintersect(sample_ids, defined_clusters[cluster_id])
                total_ind += len(indices[cluster_id])

            for cluster in indices:
                plot = N.take(tuple(indices[cluster]), 1)
        
                if plot.any():
                    plots.append(plot)
                    legend.append(cluster)

            if total_ind < len(self.sdata.samples): #Unlabeled samples?
                leftover = numpy.setdiff1d(xrange(len(self.sdata.samples)), sum([ indices[id] for id in indices ], []))
                
                plot = N.take(tuple(leftover), 1)

                plots.append(plot)
                legend.append('Unlabeled')

        else:
            #No kept files, just do as you're told
            legend = None
            plots = [N]     

        if not pca_legend:
            legend = None
    
        Plot(plots, legend = legend, fig_label = label)
    
    def run_pca(self, log2, sub_medians, center, scale, pca_fraction, eigenvector_weight, pca_legend=True):
        """
    
        Create a matrix from self.sdata.samples, normalise it, and then run PCA to reduce dimensionality.
    
        Usage: self.run_pca(log2, sub_medians, center, scale, pca_fraction, eigenvector_weight)
    
            log2                - Take the log2 of all data.
            sub_medians         - Subtract the median of sample medians from each entry in M.
            center              - Normalise genes over all samples to have mean 0.
            scale               - Normalise genes over all samples by dividing by the Root-Mean-Square.
            pca_fraction        - Choose genes from those principle components that explain pca_fraction of the variance.
            eigenvector_weight  - Take the top eigenvector_weight fraction of genes that occur with high weights in selected components.

        This function runs makeplot once the data has been normalised.
        A logfile called "PCA results - timestamp.log" will be created with PCA result information.

        """

        console = self.console
    
        console.new_logfile('PCA results')
        
        M = self.sdata.M
    
        console.log("Normalising %sx%s matrix" % (M.shape[0], M.shape[1]))
    
        V, gene_indices = analysis.get_pca_genes(M, pca_fraction, eigenvector_weight)

        console.log("Found %s principle components in the top %s fraction" % (len(V), pca_fraction))
        console.log("Found %s reliable features occurring with high weight (top %s by absolute value)" % (len(gene_indices), eigenvector_weight))
        
        self.makeplot(M, V, 'PCA results - PC2v1 - All samples', pca_legend)
        #self.makeplot(M, V[1:], 'PCA results - PC3v2 - All samples', pca_legend)
    
        #Reduce dimensions
        M = M.take(gene_indices, 1)

        self.sdata.M = M

        if hasattr(self.sdata, 'gene_names') and len(self.sdata.gene_names):
            self.sdata.gene_names = self.sdata.gene_names.take(gene_indices)

            console.new_logfile('PCA Results - Feature list')
            console.log("\nReliable features:\n", display=False)
            
            for name in self.sdata.gene_names:
                console.log("%s" % name, display=False)

    
    def run_cluster(self, num_clusters, subsamples, kwds):
        """
    
        Run the clustering routines, generate a heatmap of the consensus matrix, and fill the logs with cluster information.
    
        Each time this is run it will create a logfile with the number of clusters and subsamples in its name.  This contains
        information on which samples where clustered together for that particular K value.
    
        Usage: self.run_cluster(num_clusters, subsamples, kwds)
    
            num_clusters        - K value, or the number of clusters for the clustering functions to find for each subsample.
            subsamples          - The number of subsampling iterations to run.  In each subsample, the genes, samples, or both may
                                  be randomly selected for clustering.  This helps to ensure robust clustering.  More subsamples, more
                                  robust clusters.
            kwds                - Additional options to be sent to cluster.ConsensusCluster
    
        It's probably a very bad idea to subclass run_cluster.  The _report and _save_hmap functions are almost certainly what you want.
    
        """

        console = self.console
   
        console.new_logfile(logname = '%s clusters - %s subsamples' % (num_clusters, subsamples))
        
        console.log("\nSamples: %s" % len(self.sdata.samples))
        console.write("\nClustering data...")
    
        args = locals()
        del args['self']
        args.update(kwds)

        #Actual work
        clust_data = ConsensusCluster(self.sdata, **args)
        
        colour_map = console.announce_wrap("Building heatmap...", self._save_hmap, clust_data, **args)
        
        console.announce_wrap("Generating logfiles...", self._report, clust_data, colour_map=colour_map, **args)

        def core():
            #TESTING
            dc = {}
            
            M = clust_data.consensus_matrix.take(tuple(numpy.argsort(clust_data.reorder_indices)), 0)
            #for sam in clust_data.datapoints:
            #    dc.setdefault(str(sam.cluster_id), []).append(sam.sample_id)
            V, core_samples = analysis.get_pca_genes(M, 0.85, 0.15)
            
            for i in core_samples:
                sam = clust_data.datapoints[clust_data.reorder_indices[i]]
                dc.setdefault(str(sam.cluster_id), []).append(sam.sample_id)

            #dc['Core'] = [ clust_data.datapoints[clust_data.reorder_indices[x]].sample_id for x in core_samples ]
            
            console.new_logfile('%s cluster suggested cores' % num_clusters)

            for i in dc:
                #console.log('\nCluster %s core:\n' % i, display=False)
                console.log("\nCluster %s (%s):\n" % (i, colour_map[int(i)]), display=False)
            
                for sample_id in dc[i]:
                    console.log("\t%s" % sample_id, display=False)

            self.makeplot(M, V, '%s Cluster PCA Plot' % num_clusters, pca_legend = True, defined_clusters = dc)

        if self.coring:
            console.write("\nCreating Consensus PCA Plot...")
            core()

    def _report(self, clust_data, **kwds):
        """

        _report is called by run_cluster after each clustering set at a particular k-value is complete.

        Its job is to inform the user which clusters went where.  This can be done to the screen and to the logfile using console.log()

        Subclassing:

            def _report(self, clust_data, console, **kwds):

                etc...

            clust_data.datapoints is a list of SampleData objects, each of which has a cluster_id attribute.  This attribute indicates
            cluster identity, and any SampleData objects that share it are considered to be in the same cluster.  This doesn't have to be
            1, 2, 3...etc.  In fact, it doesn't have to be a number.

            See display.ConsoleDisplay for logging/display usage.

            You may want to subclass _report if you want to report on additional information, or if you simply want to turn this logging feature off.

        """
        console = self.console

        #SNR Threshold
        threshold = 0.5
        
        #Initialise the various dictionaries
        colour_map = kwds['colour_map']

        if hasattr(self, 'defined_clusters'):
            sample_id_to_cluster_def = {}

            for cluster_def in self.defined_clusters:
                for sample_id in self.defined_clusters[cluster_def]:
                    sample_id_to_cluster_def[sample_id] = cluster_def

        cluster_sample_ids = dict()
        cluster_sample_indices = dict()
    
        for clust_obj in [ (clust_data.datapoints[x].sample_id, clust_data.datapoints[x].cluster_id, x) for x in clust_data.reorder_indices ]:
            sample_id, cluster_id, sample_idx = clust_obj

            cluster_sample_ids.setdefault(cluster_id, []).append(sample_id)
            cluster_sample_indices.setdefault(cluster_id, []).append(sample_idx)
    
        #Start writing the log
        console.log("\nClustering results")
        console.log("---------------------\n")
        console.log(" ".join(['Command: '] + self.args))
    
        console.log("\nNumber of clusters: %s\nNumber of subsamples clustered: %s\n" % (kwds['num_clusters'], kwds['subsamples']))
        console.log("\n---------------------")
        console.log("\nClusters")

        cluster_list = cluster_sample_ids.keys()
        cluster_list.sort()

        for cluster_id in cluster_list:

            console.log("\nCluster %s (%s):\n" % (cluster_id, colour_map[cluster_id]))
    
            for sample_id in cluster_sample_ids[cluster_id]:
                if hasattr(self, 'defined_clusters'):
                    console.log("\t%s\t\t%s" % (sample_id, sample_id_to_cluster_def[sample_id]))
                else:
                    console.log("\t%s" % sample_id)

        if not self.no_snr:

            M = self.sdata.M
            
            buffer = []
            
            if hasattr(self.sdata, 'gene_names'):
                
                for i, j in comb(xrange(len(cluster_list)), 2):
                    clust1, clust2 = cluster_list[i], cluster_list[j]
                    
                    ttest = True # v0.5: On by default
                    if 'ttest' in kwds:
                        ttest = kwds['ttest']
    
                    ratios = analysis.snr(M, cluster_sample_indices[clust1], cluster_sample_indices[clust2], threshold=threshold, significance=ttest)
                    
                    if ratios:
                        buffer.append("\nCluster %s vs %s:" % (clust1, clust2))
                        buffer.append("--------------------\n")
                        buffer.append("Gene ID\t\tCluster %s Avg\tCluster %s Avg\tSNR Ratio\tp-value" % (clust1, clust2))
    
                        for ratio, gene_idx, mean1, mean2, pval in ratios:
                            buffer.append("%10s\t\t%f\t\t%f\t\t%f\t\t%s" % (self.sdata.gene_names[gene_idx], mean1, mean2, ratio, pval))
    
            # This just prevents empty logs from being written
            def write_buffer(name, desc, buf):
                console.new_logfile(name)
                console.log(desc, display=False)
    
                for line in buf:
                    console.log(line, display=False)
    
            if buffer:
                write_buffer('SNR Results - %s clusters - %s subsamples' % (kwds['num_clusters'], kwds['subsamples']), "SNR-ranked features with ratio greater than %s" % threshold, buffer)
    
    def _save_hmap(self, clust_data, bsize = 10, tree_space = 200, dpi = None, **kwds):
        """

        _save_hmap uses display.Clustmap to produce a heatmap/dendrogram of the consensus matrix produced by cluster.ConsensusCluster

        Subclassing:

            def _save_hmap(self, clust_data, **kwds):

                etc

            Really, the best reason to subclass _save_hmap is to change the heatmap labels.  See display.Clustmap for additional syntax.

            example: display.Clustmap(clust_data, [ clust_data.datapoints[x].sample_class for x in clust_data.reorder_indices ]).save('Consensus Matrix')

                ...will create a file called Consensus Matrix.png, which contains the consensus matrix heatmap labeled by sdata.samples[x].sample_class.

            clust_data.reorder_indices is (predictably) a list of indices which constitute the best order.  Since cluster.ConsensusCluster
            reorders the consensus matrix (clust_data.consensus_matrix) for you (but doesn't touch sdata.samples/clust_data.datapoints), you'll
            need to reorder the label list accordingly.  This can be just a list of labels as well, though once again you'll have to reorder your list
            to match reorder_indices.  A list comprehension of the general form [ labels[x] for x in clust_data.reorder_indices ] will do this for you,
            assuming labels is in the same order as sdata.samples/clust_data.datapoints.

            display.Clustmap.save() creates an image file and saves it to disk.
            display.Clustmap.show() opens a GTK window with the image.  Requires GTK.  See display.py.

        """

        filename = lambda s: "%s - %s clusters - %s subsamples" % (s, kwds['num_clusters'], kwds['subsamples'])

        labels = [ (clust_data.datapoints[x].sample_id, clust_data.datapoints[x].cluster_id) for x in clust_data.reorder_indices ]

        map = Clustmap(clust_data, labels, bsize=bsize, tree_space=tree_space)
        map.save(filename('Consensus Matrix'), dpi=dpi)

        return map.colour_map

    def _preprocess(self):
        """Any data preprocessing that needs to be done BEFORE PCA is accomplished through this method"""

        console = self.console

        # It's vital that transpose happens first for intuitivity

        if self.transpose:
            self.sdata = console.announce_wrap('Transposing input matrix...', scripts.transpose_sdata, self.sdata)

        if self.keep_list is not None:
            self.sdata, self.defined_clusters = console.announce_wrap('Removing samples not found in %s...' % ", ".join(self.keep_list), scripts.scale_to_set, self.sdata, *self.keep_list)

        if self.flist:
            self.sdata = console.announce_wrap('Removing features not found in %s...' % self.flist, scripts.scale_probes, self.sdata, *self.flist)
            
    def _postprocess(self, log2, sub_medians, center, scale, pca_fraction, eigenvector_weight):

        console = self.console

        idlist = [ x.sample_id for x in self.sdata.samples ]
        if len(dict.fromkeys(idlist)) != len(idlist):
            console.except_to_console('One or more Sample IDs are not unique!')
        
        # TOM sets dmatrix, so we have to make an exception here.
        # Not that it makes any sense, but who am I to judge?

        if not self.dmatrix or self.tom:
            analysis.normalise(self.sdata, log2=log2, sub_medians=sub_medians, center=center, scale=scale)

        # TOM takes a correlation graph as input
        # If we aren't doing TOM, correlation needs to convert to a dissimilarity matrix
        # TOM requires correlation to be abs'd

        if self.corr:
            self.sdata.M = console.announce_wrap('Creating correlation graph...', nw.corr, self.sdata.M, corr_abs=(self.corr_abs or self.tom), threshold=self.corr_signum_thresh, dissim=(not self.tom))

            if self.tom:
                beta = 6.0 # FIXME: Should determine this from the data
                if self.corr_signum_thresh: beta = None

                self.sdata.M = console.announce_wrap('Creating TOM map...', nw.tom, self.sdata.M, beta=beta)

        if not self.no_pca:
            if self.mds or self.dmatrix:
                self.sdata.M = console.announce_wrap('Running MDS on %sx%s matrix...' % (self.sdata.M.shape[0], self.sdata.M.shape[1]), analysis.mds, self.sdata.M, pca_fraction)
                self.makeplot(self.sdata.M, label='MDS results - PC2v1 - All samples', pca_legend = self.pca_legend)
                self.sdata.gene_names = numpy.array([ 'MDS Projection ' + str(x) for x in xrange(len(self.sdata.M[1])) ])
                console.write("Found %s components in the top %s fraction" % (self.sdata.M.shape[1], pca_fraction))
            else:
                console.announce_wrap('Running PCA...', self.run_pca, log2, sub_medians, center, scale, pca_fraction, eigenvector_weight, self.pca_legend)

        if self.write_pca_filename is not None:
            scripts.write_normal(self.sdata, self.write_pca_filename)


if __name__ == '__main__':

    parser   = None
    filename = None

    args = {}

    CommonCluster(parser, filename, **args)
