"""
MEMESA-TOOLS
============
Model preparation, vertex enumeration and vertex analysis tools developed as
part of the MEMESA project.

    Copyright (C) 2009-2012 Brett G. Olivier

    This program is free software: you can redistribute it and/or modify
    it under the terms of the GNU General Public License as published by
    the Free Software Foundation, either version 3 of the License, or
    (at your option) any later version.

    This program is distributed in the hope that it will be useful,
    but WITHOUT ANY WARRANTY; without even the implied warranty of
    MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
    GNU General Public License for more details.

    You should have received a copy of the GNU General Public License
    along with this program.  If not, see <http://www.gnu.org/licenses/>

Author: Brett G. Olivier
Contact email: bgoli@users.sourceforge.net

"""

import os, time, copy
cDir = os.path.dirname(os.path.abspath(os.sys.argv[0]))
import matplotlib
import matplotlib.pyplot as P
HD5datF = None

# coremodel
##  work_dir = 'core_memesa_model'
##  model_name = 'core_memesa_model' # base name not xml file

# iJR904
##  work_dir = 'iJR904M'
##  model_name = 'Ecoli_iJR904.thr' # base name not xml file

# big Ecoli
#work_dir = 'iAF1260'
#model_name = 'Ecoli_iAF1260_noox.glc' # base name not xml file
##  model_name = 'Ecoli_iAF1260_ox.glc' # base name not xml file

# Timo
work_dir = 'Test'
#model_name = 'toy_model_s1.l2' # base name not xml file
model_name = 'toy_model_s2.l2' # base name not xml file

ENABLE_MODULE_SEARCH = True
ENABLE_MODULE_ENUMERATION = True
ENABLE_MODULE_VALUES = True

# end userdata #
ENABLE_MODULE_FVA = False
# set ENABLE_MODULE_VALUES to True to output CSV's of module flux

if model_name.endswith('.xml'):
    model_name = model_name[:-4]

if HD5datF == None:
    HD5datF = os.path.join(cDir, work_dir, 'vertex', '%s.None.hdf5' % model_name)

# set up file names
model_file = '%s.xml' % model_name
model_dir = os.path.join(cDir, work_dir, 'sbml')
work_dir = os.path.join(cDir, work_dir, 'vertex')
correlation_matrix_name = '%s.corrcoeff.csv' % model_name
vertex_variable_fluxes = "%s.vertex_variable_fluxes.csv" % model_name
vertex_variable_fluxes_all = "%s.vertex_variable_fluxes_all.csv" % model_name

import numpy
import h5py

from pyscescbm.CBVersion import __DEBUG__, __version__
from pyscescbm import CBRead, CBWrite, CBTools, CBPlot
from pyscescbm import CBSolver as slv

if not os.path.exists(work_dir):
    os.mkdir(work_dir)

mod = CBRead.readSBML2FBA(model_file, work_dir=model_dir)
mod.id = model_name

print(model_name)

CBTools.addStoichToFBAModel(mod)
CBTools.processBiGGchemFormula(mod)
CBTools.processSBMLAnnotationNotes(mod, annotation_key='note')

# open HDF5 vertex file
HD5dat = h5py.File(HD5datF,'r')
print(len(HD5dat['data/vertices']), HD5dat['data/vertices'].shape)

def getVariableFluxInfo(vertex_variable_fluxes, vertex_variable_fluxes_all, work_dir=work_dir):
    """
    Read in the varaible flux information and process them in terms of indexes, enums etc.
    Returns: VARIABLE_FLUX_NAMES, VARIABLE_FLUX_NAMES_idx, VARIABLE_FLUX_NAMES_enum
    """
    if work_dir != None:
        vertex_variable_fluxes = os.path.join(work_dir, vertex_variable_fluxes)
        vertex_variable_fluxes_all = os.path.join(work_dir, vertex_variable_fluxes_all)

    # get variable vertex flux names
    VF = file(vertex_variable_fluxes,'r')
    VARIABLE_FLUX_NAMES = tuple(VF.read().strip().split(','))
    print(VARIABLE_FLUX_NAMES)
    print(len(VARIABLE_FLUX_NAMES))
    VF.close

    # get variable vertex ennumerated flux names
    VF = file(vertex_variable_fluxes_all,'r')
    VFA = []
    for l in VF:
        VFA.append(l.strip().split(','))

    VARIABLE_FLUX_NAMES_enum = []
    VARIABLE_FLUX_NAMES_idx = []
    for j in range(len(VFA[0])):
        if VFA[1][j] == 'True':
            VARIABLE_FLUX_NAMES_enum.append((j,VFA[0][j]))
            VARIABLE_FLUX_NAMES_idx.append(j)
    del VFA
    VARIABLE_FLUX_NAMES_enum = tuple(VARIABLE_FLUX_NAMES_enum)
    VARIABLE_FLUX_NAMES_idx = numpy.array(VARIABLE_FLUX_NAMES_idx)
    print(VARIABLE_FLUX_NAMES_enum)
    print(len(VARIABLE_FLUX_NAMES_enum))
    print('Variable vertex idx:', len(VARIABLE_FLUX_NAMES_idx))
    VF.close
    return VARIABLE_FLUX_NAMES, VARIABLE_FLUX_NAMES_idx, VARIABLE_FLUX_NAMES_enum

def getCorrCoefMatrix(fname, work_dir=None):
    """
    Loads a correlation matrix exported by outputCorrCoef, returns correlation matrix and row/col labels

     - *fname* the file name
     - *work_dir* [default=None] optional working directory

    """

    if work_dir != None:
        fname = os.path.join(work_dir, fname)
    F = file(fname, 'r')
    fnames = []
    ccmat = []
    for l in F:
        L = [i.strip() for i in l.split(',')]
        if len(L) > 1:
            fnames.append(L.pop(0))
            ccmat.append([float(i) for i in L])
    print fnames
    print ccmat
    return numpy.array(ccmat), fnames

def getAdjacencyMatrix(arr, noNaN=False, fpFix=False, diagFix=False, getU=False):
    """
    Return a new adjacency matrix, clean up input array if required.

     - *arr* the original matrix e.g. correlation matrix
     - *noNaN* [default=False] replace NaN's with 0.0
     - *fpFix* [default=False] all abs(values) smaller than machine.eps*10.0 are considered zero
     - *diagFix* [default=False] make all diagonal values 1.0
     - *getU* [default=False] only return the Upper matrix L is set to 0.0

    """
    MA = numpy.MachAr()
    for r in range(arr.shape[0]):
        for c in range(arr.shape[1]):
            if diagFix and r == c:
                arr[r,c] = 1.0
            if getU and r > c:
                arr[r,c] = 0.0
            if noNaN and numpy.isnan(arr[r,c]):
                arr[r,c] = 0.0
            if fpFix and abs(arr[r,c]) <= MA.eps*10.0:
                arr[r,c] = 0.0
            if abs(arr[r,c]) > 0.0:
                arr[r,c] = 1.0
            else:
                arr[r,c] = 0.0
    return arr

def WriteModules2CSV(modules, fname, work_dir=None):
    """
    Write modules as a csv file with one modules per row, note this is set of lists and not a matrix

     - *modules* a linked list of modules `[[mod1], [mod2]]`
     - *fname* the file name base
     - *work_dir* optional output directory

    """
    fname = fname+'.modules.csv'
    if work_dir != None:
        fname = os.path.join(work_dir, fname)
    F = file(fname,'w')
    MaxModLen = 0
    for m in modules:
        if len(m) > MaxModLen:
            MaxModLen = len(m)
    for m in range(len(modules)):
        for i in range(MaxModLen):
            try:
                F.write('%s' % modules[m][i])
            except IndexError:
                F.write(' ')
            if i < MaxModLen-1:
                F.write(',')
            else:
                F.write('\n')
    F.flush()
    F.close()
    print 'Modules written to: %s' % fname

def FindModules_pygraph(CCarr, VFnames):
    """
    Uses PyGraph to find the connected components
    """
    from pygraph.classes.graph import graph
    from pygraph.algorithms.accessibility import connected_components, mutual_accessibility, cut_edges, cut_nodes


    AdjMat = numpy.round(CCarr.copy(), 6)
    AdjMat = getAdjacencyMatrix(AdjMat, noNaN=False, fpFix=False, diagFix=False, getU=False)

    GR = graph()
    GR.add_nodes(VFnames)
    for r in range(AdjMat.shape[0]):
        for c in range(AdjMat.shape[1]):
            if r < c and AdjMat[r,c] == 1.0:
                GR.add_edge((VFnames[r], VFnames[c]))

    pg_CComp = connected_components(GR)

    maxmodnum = 0
    for c in pg_CComp.keys():
        if pg_CComp[c] > maxmodnum:
            maxmodnum = pg_CComp[c]
    print('maxmodnum', maxmodnum)

    modules = {}
    for m in range(1, maxmodnum+1):
        modules.update({m : []})
    for c in pg_CComp.keys():
        modules[pg_CComp[c]].append(c)

    print('\npygraph.connected_components')
    print(pg_CComp)
    print(modules)
    print('\npygraph.modules')
    for m in modules.keys():
        print('\nm%s (%s)' % (m,len(modules[m])))
        print(modules[m])
    print('\n')
    return [modules[m] for m in modules.keys()]

def nx_SaveGraph(graph, fname, work_dir=None):
    nx.draw(graph)
    if work_dir != None:
        fname = os.path.join(work_dir, fname)
    P.savefig("%s.png" % fname, dpi=300)
    P.close('all')

nx = None
def FindModules_networkx(CCarr, VFnames, fname=None, work_dir=None, roundec=8):
    """
    Find the modules using networkx's connected_components:

     - *CCarr* the input array (e.g. correlation matrix)
     - *VFnames* flux/reaction names
     - *fname* filename base for graph plots
     - *work_dir* output directory

    Note this is designed to work for correlation matrices
    """
    global nx
    import networkx as nx

    AdjMat = numpy.round(CCarr.copy(), roundec)
    AdjMat = getAdjacencyMatrix(AdjMat, noNaN=False, fpFix=False, diagFix=False, getU=False)

    G = nx.Graph()
    G.add_nodes_from(VFnames)
    for r in range(AdjMat.shape[0]):
        for c in range(AdjMat.shape[1]):
            if AdjMat[r,c] == 1.0 and r != c:
                G.add_edge(VFnames[r], VFnames[c])

    print('Graph is connected: %s' % nx.is_connected(G))
    time.sleep(2)
    modules = nx.connected_components(G)
    nx_modules = nx.connected_component_subgraphs(G)
    for m in nx_modules:
        print('\nboundary')
        print(nx.node_boundary(m, G))
        print(nx.edge_boundary(m, G))

    time.sleep(2)

    # this is a bit of fluff that tries to plot all the module sub graphs
    if fname != None:
        nx_SaveGraph(G, fname, work_dir)
        for m in range(len(nx_modules)):
            fname2 = fname+'.m%s' % (m+1)
            nx_SaveGraph(nx_modules[m], fname2, work_dir)

    print('\nnx.connected_components (%s)' % len(modules))
    for m in range(len(modules)):
        print('\nm%s (%s)' % (m+1, len(modules[m])))
        print(modules[m])
    print('\n')
    return modules

def ModuleSearch(corr_arr, node_names, fname, work_dir, method='networkx'):
    """
    Searches for modules and other interesting stuff, returns a list of modules
     - *corr_arr* a correlation matrix
     - *node_names* say fluxes
     - *method* can be one of ['pygraph','networkx']

    """
    if method == 'networkx':
        print('\n****************\nUSING NETWORKX TO FIND MODULES\n****************\n')
        modules = FindModules_networkx(corr_arr, node_names, fname+'.nx', work_dir, roundec=8)
        return modules
    elif method == 'pygraph':
        print('\n****************\nUSING PYGRAPH TO FIND MODULES\n****************\n')
        modules = FindModules_pygraph(corr_arr, node_names)
        return modules
    else:
        print('Invalid method: %s' % method)
        return []

def Enumerate_modules(vertices, modules, ennu_var_fluxes, fname, work_dir=None):
    """
    Ennumerate vertices per module writes (work_dir)fname.modules.txt

     - *vertices* this is an HDF5 file containing all the vertices
     - *modules* a list of modules (each module is a list of fluxes)
     - *ennu_var_fluxes* enumerated variable fluxes (VARIABLE_FLUX_NAMES_enum) generated by getVariableFluxInfo()
     - *fname*
     - *work_dir* [default=None]

    """
    modules_idx = []
    uvertin = []
    for m in range(len(modules)):
        module_idx = []
        for j in ennu_var_fluxes:
            if j[1] in modules[m]:
                module_idx.append(j[0])
        modules_idx.append(module_idx)
        uvertin.append([numpy.array(module_idx)])
    # Debug
    TIME_START = time.time()
    cntr = 0
    for idx in xrange(HD5dat['data/vertices'].shape[0]):
        VTX = HD5dat['data/vertices'][idx]
        for M in range(len(modules_idx)):
            modval =  VTX.take(modules_idx[M])
            new_vert = True
            for r in range(len(uvertin[M])):
                if (modval == uvertin[M][r]).all():
                    new_vert = False
                    break
            if new_vert:
                print('Adding new module solution (%s) to module: %s' % (idx, M))
                uvertin[M].append(modval)
        del VTX, modval
        if cntr == 10000:
            print(idx, float(idx)/float(HD5dat['data/vertices'].shape[0])*100.0, ((time.time()-TIME_START)/60.0), ((time.time()-TIME_START)/idx*float(HD5dat['data/vertices'].shape[0]))/60.0)
            cntr = 1
        else:
            cntr += 1

    print('\n\n')
    print('Modules')
    if work_dir != None:
        fname = os.path.join(work_dir, fname)
    MRF = file(fname+'.modules.txt','w')
    verTotal = 1
    for m in range(len(uvertin)):
        try:
            uvertin[m].pop(0)
            print('\nModule:', m)
            print(len(uvertin[m]))
            MRF.write('Module: %s\n' % (m+1))
            MRF.write('Unique vertices: %s\n' % len(uvertin[m]))
            Jstr = ''
            for j in modules[m]:
                Jstr += '%s,' % j
            MRF.write('Module fluxes: %s\n' % Jstr[:-1])
            verTotal *= len(uvertin[m])

            print(uvertin[m][0])
            print(uvertin[m][1])
            print(uvertin[m][-1])
        except:
            print('\nModule %s empty' % m)
    MRF.write('Total vertices: %s\n' % verTotal)
    MRF.close()

def Enumerate_modules_csv(vertices, modules, ennu_var_fluxes, fname, work_dir=None):
    """
    Ennumerate vertices per module writes (work_dir)fname.modules.txt

     - *vertices* this is an HDF5 file containing all the vertices
     - *modules* a list of modules (each module is a list of fluxes)
     - *ennu_var_fluxes* enumerated variable fluxes (VARIABLE_FLUX_NAMES_enum) generated by getVariableFluxInfo()
     - *fname*
     - *work_dir* [default=None]

    """
    modules_idx = []
    modules_names = []
    uvertin = []
    for m in range(len(modules)):
        module_idx = []
        module_names = []
        for j in ennu_var_fluxes:
            if j[1] in modules[m]:
                module_idx.append(j[0])
                module_names.append(j[1])
        modules_idx.append(module_idx)
        modules_names.append(module_names)
        uvertin.append([numpy.array(module_idx)])
    # Debug
    TIME_START = time.time()
    cntr = 0
    ##  print HD5dat['data/vertices'][:,109]
    ##  print HD5dat['data/vertices'][:,110]

    for idx in xrange(HD5dat['data/vertices'].shape[0]):
        VTX = HD5dat['data/vertices'][idx]
        for M in range(len(modules_idx)):
            ##  print modules_idx[M]
            modval =  VTX.take(modules_idx[M])
            new_vert = True
            for r in range(len(uvertin[M])):
                if (modval == uvertin[M][r]).all():
                    new_vert = False
                    break
            if new_vert:
                print('Adding new module solution (%s) to module: %s' % (idx, M))
                uvertin[M].append(modval)
                print modval
        del VTX, modval
        if cntr == 10000:
            print 'STATUS',idx, float(idx)/float(HD5dat['data/vertices'].shape[0])*100.0, ((time.time()-TIME_START)/60.0), ((time.time()-TIME_START)/idx*float(HD5dat['data/vertices'].shape[0]))/60.0, '\n'
            cntr = 1
        else:
            cntr += 1
    ##  print HD5dat['data/vertices'][:,109]
    ##  print HD5dat['data/vertices'][:,110]
    ##  print HD5dat['data/vertices'][-1,109]
    ##  print HD5dat['data/vertices'][-1,110]
    ##  for r in range(HD5dat['data/vertices'].shape[0]):
        ##  print HD5dat['data/vertices'][r,109]

    print('\n\n')
    print('Modules')
    if work_dir != None:
        fname = os.path.join(work_dir, fname)
    MRF = file(fname+'.modules.txt','w')
    verTotal = 1
    ##  print uvertin
    headers = []
    for m in range(len(uvertin)):
        try:
            headers.append(uvertin[m].pop(0))
            ##  print('\nModule:', m)
            ##  print(len(uvertin[m]))
            MRF.write('Module: %s\n' % (m+1))
            MRF.write('Unique vertices: %s\n' % len(uvertin[m]))
            Jstr = ''
            for j in modules[m]:
                Jstr += '%s,' % j
            MRF.write('Module fluxes: %s\n' % Jstr[:-1])
            verTotal *= len(uvertin[m])

            ##  print(uvertin[m][0])
        except:
            print('\nModule %s empty' % m)
    MRF.write('Total vertices: %s\n' % verTotal)
    MRF.close()



    print '\nDIAGNOSTIX'
    print len(uvertin)
    print [len(m) for m in uvertin]
    for m in range(len(uvertin)):
        ##  J = headers[m]
        J = modules_names[m]
        print J
        mL = copy.copy(uvertin[m])
        mL.insert(0,J)
        CBTools.exportLabelledLinkedList(mL, names=None, fname=fname+'.module_%s.csv' % (m+1) , sep=',', format='%s', appendlist=False)

VertCorrCoeff, VARIABLE_FLUX_NAMES = getCorrCoefMatrix(correlation_matrix_name, work_dir=work_dir)
VARIABLE_FLUX_NAMES, VARIABLE_FLUX_NAMES_idx, VARIABLE_FLUX_NAMES_enum = getVariableFluxInfo(vertex_variable_fluxes, vertex_variable_fluxes_all, work_dir=work_dir)

modules = None
netx_modules = None
pygr_modules = None
if ENABLE_MODULE_SEARCH:
    print('\nPerforming module search ... ')
    netx_modules = ModuleSearch(VertCorrCoeff, VARIABLE_FLUX_NAMES, model_name, work_dir, method='networkx')
    WriteModules2CSV(netx_modules, model_name+'.nx', work_dir=work_dir)
    print(netx_modules)
    modules = netx_modules
    print('done.')
else:
    print('\nLoading modules ... ')
    import ModuleLibrary
    modImpName = 'get_%s_modules' %  model_name.replace('.','_')
    modules = getattr(ModuleLibrary, modImpName)()
    WriteModules2CSV(modules, model_name+'.lib', work_dir=work_dir)
    print('done.')

print('\nModules')
for m in modules:
    print(m)

if ENABLE_MODULE_ENUMERATION:
    print('\nEnumerating modules ... ')
    if ENABLE_MODULE_VALUES:
        Enumerate_modules_csv(HD5dat, modules, VARIABLE_FLUX_NAMES_enum, model_name, work_dir=work_dir)
    else:
        Enumerate_modules(HD5dat, modules, VARIABLE_FLUX_NAMES_enum, model_name, work_dir=work_dir)
    print('done.\n')

def DoFVAonModules(fba, modules, fname, work_dir=None):
    """
    Perform FVA on modules and plot/export results
    """
    if work_dir != None:
        os.path.join(work_dir, fname)
    fname += '.moduleFVA'
    for M in range(len(modules)):
        print modules[M]
        fva_dat, fva_names = slv.FluxVariabilityAnalysis(mod, selected_reactions=modules[M], pre_opt=True, tol=None, objF2constr=True, rhs_sense='lower', optPercentage=100.0, work_dir=None, debug=False)
        if M == 0:
            CBWrite.WriteFVAdata(fva_dat, fva_names, fname, work_dir=work_dir, roundec=6, scale_min=False, appendfile=False, info='m%s' % (M+1))
        else:
            CBWrite.WriteFVAdata(fva_dat, fva_names, fname, work_dir=work_dir, roundec=6, scale_min=False, appendfile=True, info='m%s' % (M+1))
        CBPlot.plotFluxVariability(fva_dat, fva_names, fname+'_m%s' % (M+1), work_dir=work_dir, title='m%s' % (M+1), ySlice=None, minHeight=None, maxHeight=50, roundec=6)
        modFlux = []
        Jdat = {
                    'Jid' : None,
                    'Jval' : None,
                    'Jmin' : None,
                    'Jmax' : None
                }

if ENABLE_MODULE_FVA:
    print('\nPerforming FVA on modules ... ')
    DoFVAonModules(mod, modules, model_name, work_dir=work_dir)
    print('done.\n')

print '\nshutting down ... '
HD5dat.close()
P.close('all')
