"""

Copyright 2009 Michael Seiler
Rutgers University
miseiler@gmail.com

This file is part of ConsensusCluster.

ConsensusCluster is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.

ConsensusCluster is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
GNU General Public License for more details.

You should have received a copy of the GNU General Public License
along with ConsensusCluster.  If not, see <http://www.gnu.org/licenses/>.


"""

import os
import numpy as N

from itertools import combinations as comb
from clustio.parsers import SampleData
from copy import deepcopy

from clustio import *


def transpose_sdata(s):

    c = parsers.NullParser()
    c.M = s.M.T.copy()
    c.gene_names = N.array(s.sample_ids)
    c.samples = [ parsers.SampleData(sample_id=x) for x in s.gene_names ]

    return c

def write_ratio(s, clust1, clust2, filename, snr_threshold=0.5, pval_threshold=0.001, ttest=False):
    """Write SNR ratios given sdata obj, clust1 list or filename, clust2 list or filename, file to write to, threshold for SNR, threshold for pval, do ttest or not"""

    from analysis import snr

    M = s.M

    f = open(filename, 'w')
    
    c1name, c1ind = get_indices(s, clust1)
    c2name, c2ind = get_indices(s, clust2)
    
    ratios = snr(M, c1ind, c2ind, snr_threshold, ttest)
    
    f.write("%s vs %s:\n" % (c1name, c2name))
    f.write("--------------------\n")
    f.write("Gene ID\t\t%s Avg\t%s Avg\tSNR Ratio\tp-value\n" % (c1name, c2name))
    
    for ratio, gene_idx, mean1, mean2, pval in ratios:
        if not ttest or pval <= pval_threshold:
            f.write("\n%10s\t\t%f\t\t%f\t\t%f\t\t%s" % (s.gene_names[gene_idx], mean1, mean2, ratio, pval))

    f.close()

def scale_to_set(sdata, *filenames):
    """

    scale_to_set(filename)

        Removes all but those sample_ids you specifiy.

        filenames    - filenames or dicts
                       each file containing list of sample ids to use
                       or each dict containing name->list of sample ids

    Returns: modified sdata object, dict of cluster->indices

    """

    new_sdata = parsers.NullParser()

    defined_clusters = list_or_files(*filenames)

    sample_id_list    = [ x.sample_id for x in sdata.samples ]
    samples_to_keep   = sum([ defined_clusters[x] for x in defined_clusters ], [])
    sample_indices    = argintersect(sample_id_list, samples_to_keep)
    
    sample_classes    = dict([ (defined_clusters[k][i], k) for k in defined_clusters
                               for i in xrange(len(defined_clusters[k])) ])

    #Adjustment
    new_sdata.samples = [ SampleData(cluster_id=sdata.samples[i].cluster_id, sample_id=sdata.samples[i].sample_id,
                                     sample_num=sdata.samples[i].sample_num, index=sdata.samples[i].index,
                                     sample_class=sample_classes[sdata.samples[i].sample_id]) for i in sample_indices ]

    new_sdata.M = sdata.M.take(tuple(sample_indices), 0)
    new_sdata.gene_names = sdata.gene_names.copy()

    sample_id_list = new_sdata.sample_ids
    
    for name in defined_clusters: #If samples aren't in the main, ignore them
        sample_list = defined_clusters[name]
        def_indices = argintersect(sample_list, sample_id_list)
        defined_clusters[name] = [ sample_list[x] for x in def_indices ]

    return new_sdata, defined_clusters

def scale_probes(sdata, *filenames):
    """

    scale_probes(sdata, filename)
        
        Removes all gene probes except those you specify

        filename    - File(s) containing a list of probes, one on each line
                      Also accepted: Lists, dicts.  Only the values will be used in the dicts.

    Returns: modified sdata object

    """

    new_sdata = parsers.NullParser()

    plist = sum(list_or_files(*filenames).values(), [])

    probes_to_keep = tuple(argintersect(sdata.gene_names, plist))

    new_sdata.M = sdata.M.take(probes_to_keep, 1)
    new_sdata.gene_names = sdata.gene_names.take(probes_to_keep)
    new_sdata.samples = [ deepcopy(x) for x in sdata.samples ]

    return new_sdata

def new_defined_clusters(sdata, conv):
    """

    Define different clusters than the ones specified by your Defined Clusters, whether
    through the GUI, modification of keep_list, or through the command line

    sdata: sample data obj
    conv: conversion dict, keys sample ids values new cluster assignments
    Stick this in your preprocess function (see common.py for subclassing help)

    """
    
    new_clusts = {}
    s_ids = [x.sample_id for x in sdata.samples]

    for s_id in s_ids:
        if s_id in conv:
            new_clusts.setdefault(conv[s_id], []).append(s_id)
        else:
            new_clusts.setdefault('Unknown', []).append(s_id)

    return new_clusts

def make_def_clusters_from_log(logfile):
    """Takes a logfile and writes cluster definition files"""

    logdict = read_cluster_log(logfile)

    for clustname in logdict:
        name = clustname.split() #Most currently: # (colour)
        filen = 'cluster_' + str(name[0]) #cluster_0, etc

        f = open(filen, 'w')

        for sample in logdict[clustname]:
            f.write(sample)
            f.write("\n")

        f.close()

def remove_pc(sdata, num=1):
    """Remove the first num principle components from the data"""

    M = sdata.M
    
    avg = numpy.average(M, 0)

    M -= avg

    u, s, V = numpy.linalg.svd(M, 0)        #Decompose
    S = numpy.identity(s.shape[0]) * s

    for i in xrange(num):
        S[i][i] = 0.        #Sets the offending eigenvalue to 0

    sdata.M = numpy.dot(numpy.dot(u, S), V) + avg       #Recompose

    return sdata

def km(timeconv, eventconv, *clusts):
    """
    Draw the kaplan-meier curves for a number of clusters
    
    timeconv - file of tab-delim table of sample id -> survival time conversions
    eventconv - file of tab-delim table of sample id -> event conversions (ie, 1 for yes, 0 for no, anything else for NA)
    clusts - cluster filenames
    
    """

    tc = read_table(timeconv)
    ec = read_table(eventconv)

    labels = []
    times = []
    events = []

    for clust in clusts:

        cl = read_list(clust)
       
        ts = []
        ev = []

        for sam in cl:
            try:
                time = float(tc[sam])
                event = int(ec[sam]) ^ 1 #XXX: Gnumeric requires censors, not events

                ts.append(time)
                ev.append(event)
            except:
                pass

        labels.append(clust)
        times.append(ts)
        events.append(ev)

    f = open('gnumeric_input', 'w')

    for i in xrange(len(labels)):
        for j in xrange(len(times[i])):
            f.write('\t'.join([ str(times[i][j]), str(events[i][j]), str(i) ]) + '\n')
        print('Wrote label %s as group %s' % (labels[i], str([i])))

    f.close()

def compare_clust(*clusts):
    """

    Print some statistics about the similarities/differences between 'clusts',
    which are files with one sample_id on each line, as per usual.

    """

    cdict = list_or_files(*clusts)

    for fst, snd in comb(cdict.keys(), 2):
        fst_ind = argintersect(cdict[fst], cdict[snd])
        snd_ind = argintersect(cdict[snd], cdict[fst])

        num_common = len(fst_ind)

        if num_common:

            print('\n%s vs %s:' % (fst, snd))
            print('Common: %s (%f, %f) (%s/%s, %s/%s)' % (num_common, float(num_common) / len(cdict[fst]), float(num_common) / len(cdict[snd]), num_common, len(cdict[fst]), num_common, len(cdict[snd])))

def create_mvpa_dataset(s, *names):

    from mvpa.suite import Dataset
    
    dset = None

    def_clusters = list_or_files(*names)

    for name in def_clusters:
        n, ind = get_indices(s, {name: def_clusters[name]})
        
        if dset:
            dset += Dataset(samples=s.M.take(tuple(ind), 0), labels=n)
        else:
            dset = Dataset(samples=s.M.take(tuple(ind), 0), labels=n)

    return dset

def create_network_from_sdata(s, weighted=False):

    import networkx as nx

    # Are we dealing with a similarity graph?
    assert s.M.shape[0] == s.M.shape[1]
    assert (N.array([ x.sample_id for x in s.samples ]) == s.gene_names).all()

    G = nx.Graph()

    G.add_nodes_from(s.gene_names)

    for i, j in comb(xrange(len(s)), 2):
        weight = s.M[i][j]

        if weight:
            if weighted:
                G.add_edge(s.gene_names[i], s.gene_names[j], weight=weight)
            else:
                G.add_edge(s.gene_names[i], s.gene_names[j])

    return G

def create_visml_from_sdata(s, distance='euclidean', dmatrix=False, allow_zero_weight=False):

    from pyvisml import VisML
    
    if not dmatrix:
        import scipy
        dm = eval('scipy.spatial.distance.%s' % distance)
    
        # Distance matrix M
        M = N.zeros((s.M.shape[0], s.M.shape[0]), dtype=N.float32)
        for i, j in comb(xrange(len(s)), 2):
            M[i][j] = dm(s.M[i], s.M[j])
    
        # Normed to [0,1]
        M /= M.max()
    else:
        M = s.M

    tree = VisML.create_empty_visml_tree(layout='elegant:100', fineArt='False')

    nodes = []
    for name in s.sample_ids:
        nodes.append(tree.add_node(name, '0', '0'))

    for i, j in comb(xrange(len(s)), 2):
        if M[i][j] or allow_zero_weight:
            tree.add_edge(nodes[i], nodes[j], weight=str(M[i][j]))

    return tree

def create_sdata_from_visml(tree, methods=[]):
    """
    Creates a symmetric SampleData object from tree, incorporating only those links in methods if given

    """

    from pyvisml import VisML

    A = VisML.VisMLTree(tree)
    
    Anodes = {}
    for k in A.nodes:
        Anodes[k.name] = k

    sample_ids = Anodes.keys()
    sample_ids.sort()

    M = N.zeros((len(sample_ids), len(sample_ids)), dtype=N.float32)

    Midx = {}
    for i in xrange(len(sample_ids)):
        Midx[sample_ids[i]] = i

    for k in sample_ids:
        for link in Anodes[k].links:
            
            if methods:
                if link.method not in methods:
                    continue

            i = Midx[k]
            j = Midx[link.to]

            if link.weight is not None:
                M[i][j] = float(link.weight)
            else:
                M[i][j] = 1.0

    assert (M == M.T).all()

    c = parsers.NullParser()
    c.M = M
    c.gene_names = N.array(sample_ids)
    c.samples = [ parsers.SampleData(sample_id=x) for x in c.gene_names ]

    return c

def cv_workflow(s, fdict, clf):

    from mvpa.suite import CrossValidatedTransferError, TransferError, NFoldSplitter

    errors = []
    confusions = []

    for i in xrange(len(s.M[0]) - 1):
        pset = list(s.gene_names)[:-1]
        scale_probes(s, pset)
        dset = create_mvpa_dataset(s, fdict)
        cv = CrossValidatedTransferError(TransferError(clf), NFoldSplitter(), enable_states=['results', 'confusion'])
        er = cv(dset)
        errors.append((len(s.M[0]), er))
        confusions.append((len(s.M[0]), cv.confusion.asstring(description=1)))

    return errors, confusions
