import sys
import fastcluster

def read_pair_similarities(filename):
    pair_dx = {}
    for line in open(filename):
        line = line.strip()
        if len(line) == 0:
            continue
        fields = line.split()
        node1 = int(fields[0])
        node2 = int(fields[1])
        similarity = float(fields[2])
        if node2 < node1:
            node1, node2 = node2, node1
        pair_dx[(node1, node2)] = similarity
    return pair_dx

def determine_node_max_and_min(pair_dx):
    all_keys = set([node0 for node0, node1 in pair_dx])
    all_keys.update([node1 for node0, node1 in pair_dx])
    min_node = min(all_keys)
    max_node = max(all_keys)
    return min_node, max_node

def transform_pairs_to_condensed_distance_matrix(pair_dx, min_node, max_node):
    # extract upper triangle of matrix and convert similarities to distances...
    condensed = [1.0 - pair_dx[(i, j)] if pair_dx.has_key((i, j)) else 1.0 for i in range(min_node, max_node + 1) for j in range(i + 1, max_node + 1)]
    return condensed

def extract_quality_clusters(hierarchy, min_node, max_node, threshold):
    # hierarchy is a 4 by (n - 1) matrix, n is the number of clustered samples
    # For the -th iteration, clusters with indices Z[i, 0] and Z[i, 1] are combined to form a new cluster n + i
    # A cluster with an index less than n corresponds to one of the original observations.
    # The distance between clusters Z[i, 0] and Z[i, 1] is given by Z[i, 2].
    # The fourth value Z[i, 3] represents the number of original observations in the newly formed cluster.
    
    # dictionary of clusters, each entry will hold a list
    # of all the original members inside the cluster, initially
    # each sample is inside it's own cluster...
    clusters = {}
    for i in range(min_node, max_node+1):
        clusters[i] = [i]
    # create clusters by merging the hierarchy, up to the quality
    # level set by the given threshold...
    cluster_number = max_node + 1
    for (cluster0, cluster1, distance, n_observations) in hierarchy:
        if distance > threshold:
            break
        clusters[cluster_number] = clusters[cluster0] + clusters[cluster1]
        del clusters[cluster0]
        del clusters[cluster1]
        cluster_number += 1
    return clusters
    
def output_clusters(clusters, filename):
    file = open(filename, 'wt')
    for clusterid in sorted(clusters.keys()):
        members = clusters[clusterid]
        file.write("[%d] %s\n" % (clusterid, ' '.join([str(m) for m in members])))
    file.close()
    
if __name__ == '__main__':
    import argparse
    import time
    
    # start the clock...
    start_time = time.time()    
    # parse command line...
    parser = argparse.ArgumentParser(description="Command line for average_link.py.")
    parser.add_argument('-input', type=str, help="filename for node pair similarities in index0, index1, similarity in tsv format.")
    parser.add_argument('-output', type=str, help="filename to write cluster assignments one set of cluster members per line.")
    parser.add_argument('-threshold', type=float, default = 0.50, help="cutoff distance for accepting cluster merging.")
    
    opts = parser.parse_args()
    # read input data as list of pairs...
    sys.stderr.write("Loading data...")
    pair_dx = read_pair_similarities(opts.input)
    min_node, max_node = determine_node_max_and_min(pair_dx)
    sys.stderr.write("OK.\n")
    # transform into condensed distance matrix...
    sys.stderr.write("Creating condensed distance matrix...")
    condensed = transform_pairs_to_condensed_distance_matrix(pair_dx, min_node, max_node)
    sys.stderr.write("OK.\n")
    # run clustering using average link...
    sys.stderr.write("Performing hierarchical average/UPGMA linkage on a condensed distance matrix...")
    hierarchy = fastcluster.average(condensed)
    sys.stderr.write("OK.\n")
    # output results...
    sys.stderr.write("Extracting clusters at maximum distance threshold of %f..." % opts.threshold)
    clusters = extract_quality_clusters(hierarchy, min_node, max_node, opts.threshold)
    sys.stderr.write("OK.\n")
    # output clusters to file one line of sample indexes per cluster...
    if opts.output:
        sys.stderr.write("Writing clusters to output file...")
        output_clusters(clusters, opts.output)
        sys.stderr.write("OK.\n")
    # show elapsed wallclock time...
    elapsed = time.time() - start_time
    sys.stderr.write("Elapsed time = %0.2f seconds.\n" % elapsed)
