import redi_utils as utils

import os
import math

association_data_types = ["rface", "pairwise" ]
association_usage = "\tredi %s import associations -f /local/path/to/file.tsv -t %s"

def load(dataset_id, filename, config, options):
    print "Importing associations from " + filename + " to " + dataset_id

    itemid = options.id
    if itemid is None: itemid = utils.get_filename(filename)

    dataType = utils.get_dataType(dataset_id, options, association_usage, association_data_types)

    if dataType == "rface":
        print("TODO : Process RFACE results")

    elif dataType == "pairwise":
        table_out = open(dataset_id + "/associations/PAIRWISE_TABLE_" + itemid, "w")
        graph_out = open(dataset_id + "/associations/PAIRWISE_GRAPH_" + itemid, "w")
        item_schema, max_logpv = process_pairwise(filename, table_out, graph_out)
        table_out.close()
        graph_out.close()

        return { "id": itemid, "type": "associations", "subtype": "pairwise", "max_logpv": max_logpv,
                 "table": os.path.basename(table_out.name), "schema": item_schema,
                 "graph": os.path.basename(graph_out.name) }
    return {}

"""
Include edges where nodes are in original set, direction does not matter so do not populate edge if A->B if B->A are in hash
Expected tab delimited columns are nodeA nodeB pvalue correlation numNonNA
"""
def process_pairwise(associations_file, table_out, graph_out):
    edges_file = open(associations_file)
    edges_hash = []

    max_pv = -1000.0
    max_pv_corr = -1000.0

    numberOfValidEdges = 1
    numberOfDuplicateEdges = 0
    totalEdges = 0
    invalidCorrelations = 0
    unMapped = 0

    # TODO: Represent nulls as sql bulk 'slash-N' value
    for line in edges_file:
        totalEdges += 1
        tokens = line.rstrip().split('\t')

        if len(tokens)<11 or len(tokens)>12:
            print " wrong # of tokens: <%s>, got %d, expecting 11 or 12" % (line,len(tokens))
            continue

        nodeA = tokens[0]
        nodeB = tokens[1]

        if isUnmappedAssociation(nodeA, nodeB):
            unMapped += 1
            continue

        nodeA = nodeA.replace("|", "_")
        nodeB = nodeB.replace('|', '_')
        feature1id = tokens[0]
        feature2id = tokens[1]

        if not (nodeA + "_" + nodeB) in edges_hash:
            edges_hash.append(nodeA + "_" + nodeB)
            numberOfValidEdges += 1
            dataA = process_feature_alias(nodeA)
            dataB = process_feature_alias(nodeB)

            if len(dataA) == 7:
                dataA.append("")
                nodeA = nodeA + ":"

            if len(dataB) == 7:
                dataB.append("")
                nodeB = nodeB + ":"

            correlation = tokens[2]
            if (correlation == 'nan'):
                invalidCorrelations += 1
                continue

            numna = tokens[3]
            pv = tokens[4]
            if float(pv) > max_pv: max_pv = float(pv)

            bonf = tokens[5]
            pv_bonf = tokens[6]
            if (float(pv_bonf) > max_pv_corr): max_pv_corr = float(pv_bonf)

            numnaf1 = tokens[7]
            pvf1 = tokens[8]
            numnaf2 = tokens[9]
            pvf2 = tokens[10]
            rho = str(sign(float(correlation))*abs(float(pv)))
            link_distance = '500000000'
            if len(tokens) >= 12: link_distance = tokens[11]

            table_out.write(feature1id + "\t" + feature2id + "\t" + nodeA + "\t" + "\t".join(dataA) + "\t" + nodeB + "\t" + "\t".join(dataB) + "\t" + correlation + "\t" + numna + "\t" + pv + "\t" + bonf + "\t" + pv_bonf + "\t" + numnaf1 + "\t" + pvf1 + "\t" + numnaf2 + "\t" + pvf2 + "\t" + rho + "\t" + link_distance + "\n")
            append_graph_out(line, graph_out)

        else:
            print "duplicated edge:" + nodeA + "_" + nodeB
            numberOfDuplicateEdges += 1

    print "Report: Valid Edges %i Duped %i cNAN %i \nunMapped %i \nTotal %i max_pvalue %f max_pvalue_corr %f" %(numberOfValidEdges, numberOfDuplicateEdges, invalidCorrelations, unMapped, totalEdges, max_pv, max_pv_corr)

    item_schema = []
    item_schema.append({ "name": "F1_ID", "label": "Feature 1", "type": "string" })
    item_schema.append({ "name": "F2_ID", "label": "Feature 2", "type": "string" })

    item_schema.append({ "name": "CORRELATION", "label": "Correlation", "type": "double" })
    item_schema.append({ "name": "NUM_NON_NA", "label": "Number of Non-NA Samples", "type": "int" })
    item_schema.append({ "name": "LOGGED_PVALUE", "label": "Logged p-value", "type": "double" })
    item_schema.append({ "name": "BONF_FAC", "label": "BONF_FAC", "type": "double" })
    item_schema.append({ "name": "LOGGED_PVALUE_BONF", "label": "Logged p-value (BONF)", "type": "double" })
    item_schema.append({ "name": "RHO_SCORE", "label": "Rho Score", "type": "double" })
    item_schema.append({ "name": "LINK_DISTANCE", "label": "Link Distance", "type": "double" })

    item_schema.append({ "name": "F1_ALIAS", "label": "Feature 1 :: Alias", "type": "string" })
    item_schema.append({ "name": "F1_TYPE", "label": "Feature 1 :: Type", "type": "string" })
    item_schema.append({ "name": "F1_SOURCE", "label": "Feature 1 :: Source", "type": "string" })
    item_schema.append({ "name": "F1_LABEL", "label": "Feature 1 :: Label", "type": "string" })
    item_schema.append({ "name": "F1_CHR", "label": "Feature 1 :: Chromosome", "type": "string" })
    item_schema.append({ "name": "F1_START", "label": "Feature 1 :: Start", "type": "int" })
    item_schema.append({ "name": "F1_END", "label": "Feature 1 :: End", "type": "int" })
    item_schema.append({ "name": "F1_STRAND", "label": "Feature 1 :: Strand", "type": "string" }) # TODO : Determine best way to represent strand
    item_schema.append({ "name": "F1_DESCRIPTION", "label": "Feature 1 :: Description", "type": "string" })
    item_schema.append({ "name": "F1_NUM_NON_NA", "label": "Feature 1 :: Number of Non-NA Samples", "type": "double" })
    item_schema.append({ "name": "F1_LOGGED_PVALUE", "label": "Feature 1 :: Logged p-value", "type": "double" })

    item_schema.append({ "name": "F2_ALIAS", "label": "Feature 2 :: Alias", "type": "string" })
    item_schema.append({ "name": "F2_TYPE", "label": "Feature 2 :: Type", "type": "string" })
    item_schema.append({ "name": "F2_SOURCE", "label": "Feature 2 :: Source", "type": "string" })
    item_schema.append({ "name": "F2_LABEL", "label": "Feature 2 :: Label", "type": "string" })
    item_schema.append({ "name": "F2_CHR", "label": "Feature 2 :: Chromosome", "type": "string" })
    item_schema.append({ "name": "F2_START", "label": "Feature 2 :: Start", "type": "int" })
    item_schema.append({ "name": "F2_END", "label": "Feature 2 :: End", "type": "int" })
    item_schema.append({ "name": "F2_STRAND", "label": "Feature 2 :: Strand", "type": "string" })
    item_schema.append({ "name": "F2_DESCRIPTION", "label": "Feature 2 :: Description", "type": "string" })
    item_schema.append({ "name": "F2_NUM_NON_NA", "label": "Feature 2 :: Number of Non-NA Samples", "type": "double" })
    item_schema.append({ "name": "F2_LOGGED_PVALUE", "label": "Feature 2 :: Logged p-value", "type": "double" })

    order = 0;
    for singleitem in item_schema:
        singleitem["order"] = order
        order += 1

    return item_schema, max_pv

"""
Classify edges as unmapped if both nodes do not have chr positions
"""
def isUnmappedAssociation(f1alias, f2alias):
    f1data = f1alias.split(":")
    if (len(f1data) < 5): return False

    f1source = f1data[1]
    f1chr = f1data[3]

    f2data = f2alias.split(":")
    f2source = f2data[1]
    f2chr = f2data[3]

    if (f1chr == "" and f2chr == ""): return True
    return False

def process_feature_alias(alias):
    data = alias.split(':')
    if len(data) > 4 and len(data[3]) > 3: data[3] = data[3][3:]
    return data

def append_graph_out(line,outFile):
    rfaceInfo=line.rstrip().split("\t");
    sourceNode=getNode(rfaceInfo[0]);
    targetNode=getNode(rfaceInfo[1]);

    if(sourceNode==None or targetNode==None or sourceNode=="" or targetNode==""): return;

    if(rfaceInfo[4]=="-inf"):
        pvalue="-1000";
    else:
        pvalue=rfaceInfo[4];

    outFile.writelines(sourceNode + "\t" + targetNode + "\t" + rfaceInfo[0] + "\t" + rfaceInfo[1] + "\t" +  pvalue +  "\t" + rfaceInfo[2] + "\t" + rfaceInfo[3] +  "\n");

def getNode(featureid):
    featureInfo=featureid.split(":");

    if(featureInfo[1].lower()=="gexp" or featureInfo[1].lower() == "meth"):
        return featureInfo[2].lower();
    if(featureInfo[1].lower()=="gnab"):
        index=featureInfo[2].rpartition("_");
        if(index[0]==""):
            return index[2].lower();
        else:
            return index[0].lower();

#quick function to tell sign of int
sign = lambda x: math.copysign(1, x)
