'''
Created on Jun 18, 2010
@author: oabalbin

Modified on Nov 8, 2011
@author: oabalbin
'''

import sys
import numpy as np
import networkx as nx
from optparse import OptionParser
from collections import deque, defaultdict

def read_edges_list(inputfile,outputfile):
    """
    
    """
    header = inputfile.next()
    outputfile.write(",".join(header.split('\t')).replace(',','\t')+'\n')
    STRING_max_score = 1000
    for line in inputfile:
        line = line.strip('\n')
        fields = line.split('\t')
        item_a, item_b, weight = fields[0], fields[1], -np.log(float(fields[2])/STRING_max_score)
        outline = [item_a, item_b, weight]
        
        outputfile.write(",".join(map(str,outline)).replace(',', '\t')+'\n')
    

def read_nodes_list(inputfile,outputfile):
    """
    """
    all_nodes=defaultdict()
    header = inputfile.next()
    outputfile.write(",".join(header.split('\t')).replace(',','\t')+'\n')
    
    for line in inputfile:
        line = line.strip('\n')
        fields = line.split('\t')
        weight =float(fields[1])
        
        if weight==0:
            all_nodes[fields[0]] = weight
        else:
            all_nodes[fields[0]] = np.abs(np.log(weight)) 
    
    node_weight_denominator = np.sum(np.array(all_nodes.values()))
    for node, weight in all_nodes.iteritems():
        otli=[ node, weight/node_weight_denominator ]
        outputfile.write(",".join(map(str,otli)).replace(',','\t')+'\n')
        

def read_network(edgelistFile, nodelistFile, logval, addnodes=[]):
    """
    Returns a network from the edge file
    """
    edgelist = read_edges_dict3(edgelistFile,addnodes)     # Returns a list of edges and their cost, you could use read..edges_dict1, edges_dict2, edges_dict3 
    nodelist = read_nodes_dict3(nodelistFile, logval)      # Returns a list of nodes and their prize, you could use read..nodes_dict1, nodes_dict2, nodes_dict3 
    interNet = nx.Graph()
    interNet.add_weighted_edges_from(edgelist)
    
    for nd, value in nodelist.iteritems():
        # This is going to slow a lot the network creation 
        # and should be improved
        print nd
        if nd in interNet.nodes():
            interNet.node[nd]['weight'] = value
 
    return interNet


def read_nodes_dict(inputfile,logval=False):
    """
    Read mrna, protein nodes
    """
    all_prot_nodes, all_nodes=defaultdict(),defaultdict()
    all_mrna_nodes = defaultdict()
    header = inputfile.next()
    node_scale=10 #1000
    
    for line in inputfile:
        line = line.strip('\n')
        fields = line.split('\t')
        weight =float(fields[1])
        if fields[0].find("gene_notfound") != -1:
            continue
        
        if weight==0:
            all_prot_nodes[fields[0]] = weight
        elif len(fields[0].split('_')) > 1:
            node = fields[0].replace('_','')
            if not logval:
                all_mrna_nodes[node] = np.abs(np.log(weight))
            else:
                all_mrna_nodes[node] = np.abs(weight)
        else:
            if not logval:
                all_prot_nodes[fields[0]] = np.abs(np.log(weight))
            else:
                all_prot_nodes[fields[0]] = np.abs(weight)

    prot_node_weight_denominator = np.sum(np.array(all_prot_nodes.values()))
    mrna_node_weight_denominator = np.sum(np.array(all_mrna_nodes.values()))
    
    for node, weight in all_prot_nodes.iteritems():
        all_nodes[node] = (weight/prot_node_weight_denominator)*node_scale
    
    for node, weight in all_mrna_nodes.iteritems():
        all_nodes[node] = (weight/mrna_node_weight_denominator)*node_scale
    
    return all_nodes


def read_edges_dict(inputfile):
    """    
    """
    header = inputfile.next()
    edgelist=[]
    STRING_max_score = 1000
    for line in inputfile:
        line = line.strip('\n')
        fields = line.split('\t')
        #item_a, item_b, weight = int(fields[0]), int(fields[1]), -np.log(float(fields[2])/STRING_max_score)
        
        item_a, item_b, weight = fields[0], fields[1], -np.log(float(fields[2])/STRING_max_score)
        
        if (item_a.find("gene_notfound") != -1 ) or (item_b.find("gene_notfound") != -1):
            continue
        item_a, item_b = item_a.replace('_',''), item_b.replace('_','')
        
        edgelist.append((item_a, item_b, weight))
    
    return edgelist


def read_nodes_dict2(nodesfile,logval=False):
    """
    It generates a list of nodes with weight w. 
    node_prize = weight*beta_node_scale
    """
    all_prot_nodes, all_nodes=defaultdict(),defaultdict()
    ifile = open(nodesfile)
    header = ifile.next()
    beta_node_scale=1 #1000
    
    for line in ifile:
        line = line.strip('\n')
        fields = line.split('\t')
        weight =float(fields[1])
        
        if fields[0].find("gene_notfound") != -1:
            continue
        node = fields[0].replace('_','')
        if weight==0.0:
            all_nodes[node] = weight
        else:
            all_nodes[node] =  weight*beta_node_scale
                

    ifile.close()
    return all_nodes

def read_nodes_dict3(nodesfile,logval=False):
    """
    It generates a list of nodes with weight w. 
    node_prize = FOLD_CHANGE/MAX(FOLD_CHANGE_DATATYPE) DATA TYPE=Transcript, Protein, Phospho-Protein
    """
    all_nodes_weighted, all_nodes=defaultdict(),defaultdict()
    ifile = open(nodesfile)
    header = ifile.next()
    beta_node_scale=1 #10 #1000
    
    for line in ifile:
        line = line.strip('\n')
        fields = line.split('\t')
        weight =float(fields[1])
        
        if fields[0].find("gene_notfound") != -1:
            continue
        node = fields[0].replace('_','')
        all_nodes[node] =  weight*beta_node_scale
    ifile.close()
    return all_nodes


def read_edges_dict2(edgesfile, addnodes=[]):
    """
    It reads a edge file with structure nodeA, nodeB, edge_confidence
    final_edge_cost = -np.log(float(fields[2])/STRING_max_score)
    The idea is that maximum confidence edges have very low cost
    """
    ifile=open(edgesfile)
    header = ifile.next()
    edgelist=[]
    STRING_max_score = 1000
    nodelist=defaultdict()
    for line in ifile:
        fields = line.strip('\n').split('\t')
        #item_a, item_b, weight = int(fields[0]), int(fields[1]), -np.log(float(fields[2])/STRING_max_score)
        item_a, item_b, weight = fields[0], fields[1], -np.log(float(fields[2])/STRING_max_score) if float(fields[2]) > 0.0 else 0 #-np.log(float(fields[2])/STRING_max_score) if float(fields[2]) > 0.0 else 0
                        
        if (item_a.find("gene_notfound") != -1 ):
            nodelist[item_b.split('(')[0]] = item_b
            continue
        elif (item_b.find("gene_notfound") != -1):
            nodelist[item_a.split('(')[0]] = item_a
            continue
        
        item_a, item_b = item_a.replace('_',''), item_b.replace('_','')
        nodelist[item_a.split('(')[0]] = item_a
        nodelist[item_b.split('(')[0]] = item_b 

        
        edgelist.append((item_a, item_b, weight))
        #print nodelist[item_a.split('(')[0]], item_a
    # To add additional nodes not present in the original network. This should be in a separate script
    protein="KRAS"
    if addnodes:
        item_a=nodelist[protein]
        for item_c, weight in addnodes.iteritems():
            try:
                item_b = nodelist[item_c]
                #print item_b 
                if float(weight) == 1.0:
                    weight = 0.0
                else:
                    weight = -np.log(float(weight))
                edgelist.append((item_a, item_b, weight ))
            except KeyError:
                # nodes not found in the network but present in the in home list
                #edgelist.append((item_a, item_c, weight))
                #print item_c
                continue
    ifile.close()
    return edgelist


def read_edges_dict3(edgesfile, addnodes=[]):
    """
    It reads a edge file with structure nodeA, nodeB, edge_confidence
    final_edge_cost = -np.log(float(fields[2])/STRING_max_score)
    The idea is that maximum confidence edges have very low cost
    """
    ifile=open(edgesfile)
    header = ifile.next()
    edgelist=[]
    STRING_max_score = 1000
    nodelist=defaultdict()
    for line in ifile:
        fields = line.strip('\n').split('\t')
        #item_a, item_b, weight = int(fields[0]), int(fields[1]), -np.log(float(fields[2])/STRING_max_score)
        item_a, item_b, weight = fields[0], fields[1], fields[2] #-np.log(float(fields[2])/STRING_max_score) if float(fields[2]) > 0.0 else 0
                        
        if (item_a.find("gene_notfound") != -1 ):
            nodelist[item_b.split('(')[0]] = item_b
            continue
        elif (item_b.find("gene_notfound") != -1):
            nodelist[item_a.split('(')[0]] = item_a
            continue
        
        item_a, item_b = item_a.replace('_',''), item_b.replace('_','')
        nodelist[item_a.split('(')[0]] = item_a
        nodelist[item_b.split('(')[0]] = item_b 

        
        edgelist.append((item_a, item_b, weight))
        #print nodelist[item_a.split('(')[0]], item_a
    # To add additional nodes not present in the original network. This should be in a separate script
    protein="KRAS"
    if addnodes:
        item_a=nodelist[protein]
        for item_c, weight in addnodes.iteritems():
            try:
                item_b = nodelist[item_c]
                #print item_b 
                if float(weight) == 1.0:
                    weight = 0.0
                else:
                    weight = -np.log(float(weight))
                edgelist.append((item_a, item_b, weight ))
            except KeyError:
                # nodes not found in the network but present in the in home list
                #edgelist.append((item_a, item_c, weight))
                #print item_c
                continue
    ifile.close()
    return edgelist



def read_item_dict(inputfile):
    """
    """
    items_dict=defaultdict()
    for line in inputfile:
        line = line.strip('\n')
        fields = line.split('\t')
        items_dict[fields[0]]=fields[1]

    return items_dict
        

def get_giant_component(interactomeNet):
    """
    It returns the giant component of the network
    """
    giant_component = nx.connected_component_subgraphs(interactomeNet)[0]
    
    return giant_component


def write_network_file(thisnetwork, outedgeslife, outedgeslife_names, outnodesfile):
    """
    Writes thisnetwork. Usually thisnetwork is the giant component of other network.
    """
    
    for e in thisnetwork.edges_iter(data=True):
        outline = [e[0],e[1],e[2]['weight']]
        outline_names = [e[0],e[1]]
        outedgeslife.write(",".join(map(str,outline)).replace(',','\t')+'\n')
        outedgeslife_names.write(",".join(map(str,outline_names)).replace(',','\t')+'\n')
        #print e, e[2]['weight']
    
    for n in thisnetwork.nodes(data=True):
        node_weight = n[1]['weight']
        outline2 = [n[0],node_weight]
        outnodesfile.write(",".join(map(str,outline2)).replace(',','\t')+'\n')

'''    
if __name__ == '__main__':
    
optionparser = OptionParser("usage: %prog [options] ")
optionparser.add_option("-e", "--edgesFile", dest="edgesFile",
                        help="annotation file for all files to use")
optionparser.add_option("-n", "--nodesFile", dest="nodesFile",
                        help="nodesFile file for all files to use")
optionparser.add_option("-l", "--logval", dest="logval",
                        help="annotation file for all files to use")

(options, args) = optionparser.parse_args()

#inputfile1, inputfile2 = options.edgesFile, options.nodesFile
#outputfile1, outputfile2 = options.edgesFile+'_norm', options.nodesFile+'_norm'

'''

edgelistFile = '/data/projects/pcst/test_2011/hsaMerged_edges_hr.tab_heinz2'
nodelistFile = '/data/projects/pcst/test_2011/foldChange.tab_heinz2'

outEdgesFile = edgelistFile+'_norm'
outNodesFile = nodelistFile+'_norm'
outNamesFile = edgelistFile+'_norm_names'

logval=True
#myInteractomeNet = read_edge_list_network(inputfile1, inputfile2,logval, house_AR)
# Without the AR nodes
interactNet = read_network(edgelistFile, nodelistFile,logval, [])
# get the giant component in order to make sure that the input network is fully connected.
# Snet = giant component
Snet = get_giant_component(interactNet)
#all_nodes = read_nodes_dict(open(inputfile2), logval)
#all_nodes = myNet_giant_component.nodes()
write_network_file(Snet, open(outEdgesFile,'w'), open(outNamesFile,'w'), open(outNodesFile,'w'))
print 'Done'

# time python ~/workspace/pcst/trunk/heinz.py -e 2010_06_25_18_43dhea_edges_norm.txt -n 2010_06_25_18_43dhea_nodes2_norm.txt -g False -N True -E True

# time python ~/workspace/pcst/trunk/heinz.py -e 2010_07_03_22_45dhea_edges.txt_norm1000_wnf -n 2010_07_03_22_45dhea_nodes2.txt_norm1000_wnf -g False -N True -E True - c True -v True
# 11-9-11
# time python ~/workspace/pcst/trunk/heinz.py -e hsaMerged_hr_edges.tab_heinz_norm -n foldChange.tab_heinz_norm -g False -N True -E True - c True -v True
#11-15-11
# time python ~/workspace/pcst/trunk/heinz.py -e hsaMerged_edges_hr.tab_heinz2_norm -n hsaMerged_nodes_hr.tab.heinz2_norm -g False -N True -E True - c True -v True -s 5 -d 0.5











    
    