#!/usr/bin/env python
# (c) Yongjin Park, 2013
import sys
import os
import time
import random
import argparse
import subprocess as sp
import networkx as nx
from datetime import datetime
from math import log, exp, sqrt, ceil

def log_msg(msg):
    tstr = datetime.now().strftime("%c")
    print >> sys.stderr, "[%s] %s" % (tstr, msg)
    sys.stderr.flush()
    return

################################################################
# input data class
class InputData:

    def __init__(self):
        self.idx2label = {}
        self.label2idx = {}
        self.data_graph = None
        self.expr_x = {}
        self.expr_y = {}
        self.cum_idx = 0        
        pass

    '''
    read data graph and give consistent index
    '''
    def read_data(self, net_file, expr_x_file = None, expr_y_file = None):
        ext = net_file.strip().split('.')[-1]
        
        # build a graph
        G0 = nx.Graph()
        if not os.path.exists( net_file ) :
            log_msg('Network file does not exist: %s' % net_file)
            sys.exit(1)

        tmp = map(lambda x: x.strip().split(), open(net_file))
        tmp = filter(lambda x: len(x)>1, tmp)
        
        # remove empty node name
        tmp = filter(lambda x: len(x[0])>0 and len(x[1])>0, tmp)
        
        G0.add_edges_from(filter(lambda x: len(x) == 2, tmp), weight=1.)
        for e, w in [(x[:2], x[2]) for x in tmp if len(x) == 3] :
            try:
                w = float(w)
                if w >= 0.: G0.add_edge(e[0], e[1], weight=w)
            except ValueError:
                log_msg('invalid weight (%s,%s) [%s]' % (e[0], e[1], w))
                sys.exit(1)
                pass
            pass

        # build/append idx2label and label2idx
        idx2lbl = self.idx2label
        lbl2idx = self.label2idx
        for v in sorted(G0.nodes()): #_iter():
            if not lbl2idx.has_key(v):
                self.cum_idx += 1
                id = self.cum_idx
                lbl2idx[v] = id
                idx2lbl[id] = v

        G = nx.Graph()
        for u, v, dat in G0.edges_iter(data=True):
            G.add_edge(lbl2idx[u], lbl2idx[v], weight=dat['weight'])

        log_msg( 'V = %d E = %d' % (G.number_of_nodes(), G.number_of_edges()) )
        self.data_graph = G

        # read expression data
        if (expr_x_file != None) and (expr_y_file != None) :

            if not os.path.exists(expr_x_file) :
                log_msg('expression x file does not exist: %s' % expr_x_file)
                sys.exit(1)
            if not os.path.exists(expr_y_file) :
                log_msg('expression y file does not exist: %s' % expr_y_file)
                sys.exit(1)

            x_lines = map(lambda l: l.strip().split(), open(expr_x_file))
            x_lines = filter(lambda la: len(la) > 1, x_lines)
            y_lines = map(lambda l: l.strip().split(), open(expr_y_file))
            y_lines = filter(lambda la: len(la) > 1, y_lines)


            if len(x_lines) != len(y_lines) :
                log_msg('expression files do not match')
                sys.exit(1)

            try:
                get_float_vec = lambda str_vec : map(float, str_vec)

                self.expr_x = dict( [ (lbl2idx[la[0]], get_float_vec(la[1:])) #get_float_vec(la[1:]))
                                     for la in x_lines if la[0] in lbl2idx] )

                self.expr_y = dict( [(lbl2idx[la[0]], get_float_vec(la[1:]))
                                     for la in y_lines if la[0] in lbl2idx] )
            except ValueError:
                log_msg('cannot parse expr files: ' + e)
                sys.exit(1)

            log_msg('read expressions: ' + expr_x_file + ', ' + expr_y_file )

        return G

    # ================================================================
    # private helper functions
    @staticmethod
    def __get_edge_str(Gsub):
        edge_str = lambda e : '%d\t%d\t%f\n%d\t%d\t%f' % (e[0], e[1], e[2]['weight'], e[1], e[0], e[2]['weight'])
        return '\n'.join(map(edge_str, Gsub.edges_iter(data=True))) + '\n'

    # ================================================================
    # generate sub-networks of subset
    # write networks' weighted pairs under out_dir
    # return generated network files
    def gen_sub_net(self, out_dir, nodes=None):
        edge_str = InputData.__get_edge_str

        fhdr = out_dir.rstrip('/') + '/'
        fname = fhdr + 'G.wpairs'

        if nodes == None :
            H = self.data_graph
        else:
            H = self.data_graph.subgraph( nodes )

        if H.number_of_edges() < H.number_of_nodes() :
            return (fname, False)

        try:
            fh = open(fname, 'w')
            fh.write(edge_str(H))    # edge string
            fh.close()
        except:
            log_msg('cannot creat subnetwork file: ' + fname)
            return (fname, False)
        
        return (fname, True)

    def gen_sub_expr(self, out_dir, nodes=None):
        fhdr = out_dir.rstrip('/') + '/'
        x_file = fhdr + 'expr_x.txt'
        y_file = fhdr + 'expr_y.txt'

        if len(self.expr_x) == 0 or len(self.expr_y) == 0 :
            return (x_file, y_file, False)

        expr_x = self.expr_x
        expr_y = self.expr_y

        try:
            fh_x = open(x_file, 'w')
            fh_y = open(y_file, 'w')

            if nodes == None:
                nodes = self.data_graph.nodes_iter()

            vec2str = lambda vec: '\t'.join(map(str, vec))

            for v in nodes:
                if (v in expr_x) and (v in expr_y) :
                    print >> fh_x, str(v) + '\t' + vec2str(expr_x[v]) 
                    print >> fh_y, str(v) + '\t' + vec2str(expr_y[v])

            fh_x.close()
            fh_y.close()

        except IOError:
            return (x_file, y_file, False)

        return (x_file, y_file, True)

    pass # End of InputData


################################################################
#
# tree class to represent the search history
#
class Tree:

    def __init__(self, depth):
        self.treeobj = nx.DiGraph()
        self.pa = {}
        self.root_idx = 0
        self.treeobj.add_node(self.root_idx)
        self.node = self.treeobj.node
        self.treeobj.node[0]['isleaf'] = True
        self.depthBase = depth
        pass

    # add subtree underneath 'curr'
    # group must take 0-based index
    def add_subtree(self, curr, group):
        treeobj = self.treeobj
        bottom = self.__add_subtree(curr, 0)

        if 'subset' in treeobj.node[curr] :
            del treeobj.node[curr]['subset']
            
        group_ret = {}
        for k, idx in enumerate(bottom):
            if k in group:
                treeobj.node[idx]['subset'] = group[k]
                group_ret[idx] = group[k]


        return group_ret

    # helper function
    # d = current depth
    def __add_subtree(self, curr, d):
        treeobj = self.treeobj
        if d == self.depthBase:
            treeobj.node[curr]['isleaf'] = True
            return [curr]
        (left, right) = (2 * curr + 1, 2 * curr + 2)
        treeobj.add_edge(curr, left)
        treeobj.add_edge(curr, right)
        self.pa[left] = curr
        self.pa[right] = curr
        treeobj.node[curr]['isleaf'] = False

        lset = self.__add_subtree(left, d + 1)
        rset = self.__add_subtree(right, d + 1)
        return lset + rset


    # pruning out unnecessary subtree(s)
    # at leaf level
    # (i) if empty -> remove -> return 0
    # at internal level
    # (iia) if no child -> remove node -> return 0
    # (iib) if one child -> transfer node -> return 1
    # (iic) if two or more -> do nothing -> return 1
    def prune_subtree(self, curr=None):
        if curr == None: curr = self.root_idx
        treeobj = self.treeobj
        pa = self.pa

        nodeobj = treeobj.node[curr]

        # at leaf level
        if nodeobj.get('isleaf',False) :
            if len(nodeobj.get('subset', {})) == 0:
                treeobj.remove_node(curr)
                del pa[curr]
                return 0
            return 1
        
        # at internal level
        valid = []
        nret = 0
        for x in treeobj.neighbors(curr):
            ret = self.prune_subtree(x)
            if ret > 0 : valid.append(x)
            nret += ret

        if nret == 0: 
            if curr != self.root_idx:
                treeobj.remove_node(curr)
                del pa[curr]
            return 0

        elif nret == 1 and curr != self.root_idx :
            # transfer 
            # copy child's edges and child's data
            # delete child node
            if curr != self.root_idx:
                pa_node = pa[curr]
            else:
                pa_node = curr
            ch_node = valid[0]

            for x in treeobj.neighbors(ch_node):
                treeobj.add_edge(curr, x)
                pa[x] = curr

            # needs deepcopy?
            treeobj.node[curr] = treeobj.node[ch_node]

            treeobj.remove_node(ch_node)
            del pa[ch_node]

            return 1
        
        return 1
    
    # just output edges
    def edges_str(self):
        ret = '\n'.join( [ '%s\t%s'%(u,v) for (u,v) in self.treeobj.edges_iter()] )
        return ret

    pass # End of class Tree

################################################################
## helper routine

def get_bottom_clusters( btree ):
    ret = {}
    for gg, dat in btree.node.iteritems():        
        if dat.get('isleaf', False):
            ret[gg] = dat.get('subset', [])
    return ret


def get_bottom_dict( btree ):

    T = btree.treeobj

    def _f(r) :
        neigh = T.neighbors(r)
        if len(neigh) == 0 :
            return [ T.node[r].get('subset',[]) ]

        tmp = [ _f(x) for x in neigh ]
        return reduce(lambda a, x: a+x, tmp, [])

    botList = _f( btree.root_idx )
    get_pair = lambda uList, k: [(u,k) for u in uList]
    ret = dict( reduce(lambda a,k: a + get_pair(botList[k], k), 
                       xrange(len(botList)), []) )
    return ret

def dict2clusters( dict_obj ):
    out = {}
    for v, k in dict_obj.iteritems():
        if not out.has_key(k) : 
            out[k] = []
        out[k].append(v)
    return out

def get_tree_depth( btree ):
    T = btree.treeobj
    def _f( r ):
        neigh = T.neighbors(r)
        if len(neigh) == 0 : return 0
        ret = max( [1+_f(x) for x in neigh] )
        return ret
    
    return _f( btree.root_idx )


################################################################
# class Engine
class Engine:

    def __init__(self, inData, args):
        
        self.EXEDIR = os.path.dirname(os.path.realpath(__file__))

        try:
            self.wd = sp.check_output(['mktemp', '-d', './tmp.deblock.XXXXXX']).strip()
        except sp.CalledProcessError:
            log_msg('cannot create working directory')
            sys.exit(1)
            pass

        self.elapsed_time = 0.
        self.inData = inData

        binFile = self.EXEDIR + '/deblock'
        
        self.cmd_exe = [ binFile ]
        self.cmd_exe += [ '-i', str(args.iter) ]
        
        self.args = args
        self.color = None
        self.expr_mean = None
        self.expr_sd = None

        pass # end of initialization

    def __del__(self):
        try:
            sp.check_call(['rm', '-rf', self.wd])            
        except sp.CalledProcessError:
            log_msg('cannot clear out working direcotry: '+self.wd)
            sys.exit(1)
        
    def clear_wd(self):
        try:
            sp.check_call(['rm', '-rf', self.wd + '/*'])
        except sp.CalledProcessError:
            log_msg('cannot clear out working direcotry: '+self.wd)
            sys.exit(1)

    def initial_color( self, membership ):
        ret = self.wd + '/init_color.txt'
        fh = open( ret, 'w' )
        fh.writelines( ['%d\t%d\n'%(u,k) for u,k in membership.iteritems()] )
        fh.close()
        return ret

    def run(self, depth, nodes=None, init_color=None, repeats=True, a0=None, b0=None):

        dataFile, validData = self.inData.gen_sub_net(self.wd, nodes)

        if not validData : 
            self.color = {}
            self.TreeNodes = []
            if nodes != None: self.color = { 0: nodes }
            return
        
        exprX, exprY, hasExpr = self.inData.gen_sub_expr(self.wd, nodes)

        self.clear_wd()

        result_hdr = self.wd + '/out'
        curr_cmd = []
        curr_cmd += self.cmd_exe

        if init_color != None: curr_cmd += ['-c', self.initial_color(init_color)]

        curr_cmd += ['-d', str(depth), '-o', result_hdr, '-n', dataFile]

        if repeats:
            curr_cmd += ['-r', str(self.args.rep)]

        if hasExpr:
            curr_cmd += ['-x', exprX, '-y', exprY]

        if a0 != None:
            curr_cmd += ['-a0', str(a0)]
        
        if b0 != None:
            curr_cmd += ['-b0', str(b0)]

        if self.args.verbose > 0 :
            log_msg('running variational inference with depth = ' + str(depth))

        # print curr_cmd
        errFH = sys.stderr if self.args.verbose > 0 else open(os.devnull,"w")
        outFH = sys.stdout if self.args.verbose > 0 else open(os.devnull,"w")

        try:
            # clean up before running
            sp.check_call(['rm', '-rf', self.wd+'/*'])
            # run and wait; should redirect output?            
            p = sp.Popen(curr_cmd, stdout=outFH, stderr=errFH)
            p.wait()
            pass
        except (sp.CalledProcessError, OSError, IOError) as e:
            log_msg('cannot execute command:\n'+' '.join(curr_cmd))
            sys.exit(1)
            pass
                
        # get vertex color
        colorfile = result_hdr + '.color'
        color = {}
        for x, g in map(lambda x: x.strip().split('\t'), open(colorfile)):
            k = int(g)
            v = int(x)
            if not color.has_key(k): color[k] = []
            color[k].append(v)

        self.color = color

        # get expression results
        if hasExpr:
            expr_mean_file = result_hdr + '.aux_mean'
            expr_sd_file = result_hdr + '.aux_sd'

            get_float_vec = lambda str_vec: map(float, str_vec)

            expr_mean_tmp = map(lambda l: l.strip().split('\t'), open(expr_mean_file))
            expr_sd_tmp = map(lambda l: l.strip().split('\t'), open(expr_sd_file))

            self.expr_mean = dict( [(int(la[0]),get_float_vec(la[1:])) for la in expr_mean_tmp if len(la) > 1] )
            self.expr_sd = dict( [(int(la[0]),get_float_vec(la[1:])) for la in expr_sd_tmp if len(la) > 1] )

        return # end of method


    pass # end of inference engine


################################################################
# 1. initialize by iterative bisection
# 2. determine depth and determine initial Z
# 3. fit a full model

## resolve initial latent assignment and depth
## by iterative k-section
def resolve_init_model( args, engine, nodes=None, a0=0.1, b0=0.1 ):

    btreeIterative = Tree( depth = args.d )
    engine.run( depth = args.d, nodes=nodes, repeats=True, a0=a0, b0=b0 )
    group = btreeIterative.add_subtree( btreeIterative.root_idx, 
                                        engine.color )

    while len(group) > 0:
        group_next = {}

        for k in group :
            if len(group[k]) < args.ssz : continue

            engine.run( depth = args.d, nodes = group[k], repeats=True, a0=a0, b0=b0 )

            if len(engine.color) <= 1 : continue

            group_sub = btreeIterative.add_subtree(k, engine.color)

            for s in group_sub:
                if len(group_sub[s]) > 1:
                    group_next[s] = group_sub[s]

        group = group_next
        pass

    btreeIterative.prune_subtree()

    depth = get_tree_depth( btreeIterative )
    groupIterative = get_bottom_dict( btreeIterative )

    return (groupIterative, depth, btreeIterative)

################################################################
class InfoFile:
    def __init__(self,out_dir):
        self.out_dir = out_dir
        self.out_file = out_dir.rstrip('/') + '/' + 'info.txt'
        self.content = {}

    def __del__(self):
        fh = open(self.out_file, 'w')
        fh.writelines( [ '%s\t%s\n'%( str(k), str(self.content[k]) )
                        for k in sorted(self.content.keys()) ] )
        fh.close()

    def add_info(self, key, value):
        key = key.replace('\t',' ')
        value = value.replace('\t',' ')
        self.content[key] = value

    pass

################################################################
def group_data_output( fname, group_data):
    if group_data != None:
        fh = open(fname, 'w')
        for k in group_data :
            print >> fh, str(k) + '\t' + '\t'.join(map(str, group_data[k]))

        fh.close()

def vertex_color_output( fname, color_data, idx2label ):
    fh = open(fname, 'w')
    for k in color_data :
        tmp = [ idx2label[id] for id in color_data[k] if id in idx2label ]
        print >> fh, str(k) + '\t' + '\t'.join(tmp)
    fh.close()


################################################################
if __name__ == '__main__' :

    parser = argparse.ArgumentParser(description='Differential Expression analysis on network Block model',
                                     epilog='(c) 2013, Yongjin Park and Joel S. Bader, {ypark28, joel.bader}@jhu.edu')
    parser.add_argument( 'dataFile', help='A file that contains a list of pairs' )
    parser.add_argument( '-d', type=int, default=1, help='The depth of base learner (default: %(default)s)' )
    parser.add_argument( '-o', '--out', default='./out', help='Output directory (default: %(default)s)' )
    parser.add_argument( '-x', '--expx', default=None, help='Expression file X' )
    parser.add_argument( '-y', '--expy', default=None, help='Expression file Y' )
    parser.add_argument( '-i', '--iter', type=int, default=100, help='Number of update iterations (default: %(default)s)' )
    parser.add_argument( '-z', '--ssz', type=int, default=10, help='stopping cluster size: %(default)s)' )
    parser.add_argument( '-r', '--rep', type=int, default=1, help='Number of repetition (default: %(default)s)' )
    parser.add_argument( '-v', '--verbose', action='count', default=0, help='(default: %(default)s)' )
    
    args = parser.parse_args()

    # read input data and start engine
    inData = InputData()

    log_msg('reading data: ' + args.dataFile)

    inData.read_data( args.dataFile, args.expx, args.expy )

    engine = Engine( inData, args )

    ## output direcotry
    output_dir = args.out.rstrip('/') + '/'
    try:
        sp.check_call(['mkdir', '-p', output_dir])
    except sp.CalledProcessError:
        log_msg('cannot create output directory ' + output_dir)
        sys.exit(1)

    ## output info
    infoFile = InfoFile( output_dir )
    infoFile.add_info( 'Data Network', args.dataFile )
    infoFile.add_info( 'Inference Started', datetime.now().strftime("%c") )

    ## initial iterative bisection
    log_msg('initial fitting by iterative bisection')
    (groupIterative, depth, btreeIterative) = resolve_init_model( args, engine )
    
    vertex_color_output( output_dir + 'initial.color', 
                         dict2clusters( groupIterative ),
                         inData.idx2label )

    ## run with full depth
    log_msg('fitting with full depth = %d'%depth)
    engine.run( depth=depth, nodes=None, init_color=groupIterative, repeats=False )

    vertex_color_output( output_dir + 'vertex.color', 
                         engine.color, inData.idx2label )

    group_data_output( output_dir + 'expr.mean', 
                       engine.expr_mean )

    group_data_output( output_dir + 'expr.sd', 
                       engine.expr_sd )

    infoFile.add_info( 'Inference ended', datetime.now().strftime("%c") )
    log_msg('Done')
    sys.exit(0)
    # end of main
