from distance_metrics import *
#from cdf_plot import *
from hashlib import md5
from matplotlib.pyplot import hist, show, savefig, clf
from scipy.sparse import dok_matrix
from pylab import *
import cPickle as pickle
import igraph
import logging
import os
import sys

logging.basicConfig(level=logging.INFO,
                    format='%(asctime)s\t%(levelname)s\t%(message)s')

def calc_file_md5(fn):
    """Returns the MD5 hash of a given input file, as hex-string.
    
    The function reads the file in blocks of 8,192 bytes in order to avoid
    excessive memory usage, and utilize the internal MD5 block size.
    """
    m = md5()
    with open(fn, 'rb') as f:
        for block in iter(lambda: f.read(8192), ''):
            m.update(block)
    return m.hexdigest()

def transform_relationships_file_to_ncol(fn_rels, fn_ncol):
    with open(fn_rels, 'r') as f_rels:
        with open(fn_ncol, 'w') as f_ncol:
            for line in f_rels.xreadlines():
                # Skip comment lines
                if line.startswith('#'):
                    continue
                as_left, as_right, reltype = line.strip().split('|')
                if '-1' == reltype:
                    logging.debug('%s is provider of %s', as_right, as_left)
                    f_ncol.write('%s %s\n' % (as_right, as_left))
                elif '1' == reltype:
                    logging.debug('%s is customer of %s', as_right, as_left)
                    f_ncol.write('%s %s\n' % (as_left, as_right))
                else:
                    logging.debug('%s and %s are peers or siblings', as_left, as_right)
                    f_ncol.write('%s %s\n' % (as_left, as_right))
                    f_ncol.write('%s %s\n' % (as_right, as_left))

def get_or_add_asn_by_name(g, as_name):
    """Returns a vertex by its name, or creates one with the given name.
    
    Parameters:
    - `g`          A graph object to look in (igraph.Graph)
    - `as_name`    The name as the AS (string)
    
    Returns: A vertex from `g` with the given `as_name` (igraph.Vertex)
    If `g` does not contain a vertex with the given name,
    a new vertex is created in `g` with that name.
    If more than one vertex with the given name exists, the first one is returned.
    """
    if not 'name' in g.vs.attribute_names():
        # This is the first vertex in the graph. Reuse the default root.
        logging.debug('New graph. Reusing vertex 0.')
        v = g.vs[0]
        v['name'] = as_name
        return v
    # Search for an existing node
    vs = g.vs(name=as_name)
    if 0 < len(vs):
        logging.debug('Vertex %d has name "%s"', vs[0].index, vs[0]['name'])
        return vs[0]
    # No such vertex. Create it, name it, and return it.
    logging.debug('Creating new vertex for name "%s"', as_name)
    g.add_vertices(1)
    v = g.vs[g.vcount()-1]
    v['name'] = as_name
    return v

def get_node_by_asn(g, asn):
    vs = g.vs(name=asn)
    if 0 < len(vs):
        # Exists. Return first hit (probably unique, but who cares?)
        return vs[0]
    return None

def read_as_level_graph(fn):
    """Read a ncol-AS-level file and return an undirected AS graph.
    
    Parameters:
    - `fn`    Path to ncol file with AS-level data (string)
    
    Returns: AS graph (igraph.Graph)
    
    Details:
        Every line in the ncol-AS-level file defines a relation between
        two ASes in the form of "as#1 as#2".
        The resulting graph contains a vertex for every AS# encountered,
        and an undirected edge for every relationship.
    """
    g = igraph.read(fn)
    g.to_undirected()
    for v in g.vs:
        v['name'] = int(v['name'])
    # Save MD5 hash on processed content as a graph attribute,
    # in order to detect changes when loading
    g['md5'] = calc_file_md5(fn)
    # Remember the filename used for building the graph
    g['src-file'] = os.path.split(fn)[-1]
    return g

def get_relationships(fn, nodes):
    """Read a relationships file and return a dictionary of relationships.
    
    For every node in the relationships file, three sets are populated with the
    names of the customer, providers and siblings (or peer-to-peers) of that node.
    """
#    g = igraph.Graph(directed=True)
    rels = dict()
    for node in nodes:
        rels[node['name']] = { 'c': set(),
                               'p': set(),
                               's': set() }
    cnt = 0
    def add_node(name):
        if not name in rels:
            rels[name] = { 'c': set(),
                           'p': set(),
                           's': set() }
    def add_rel(provider, customer):
        add_node(provider)
        rels[provider]['c'].add(customer)
        add_node(customer)
        rels[customer]['p'].add(provider)
    with open(fn, 'r') as f:
        for line in f.xreadlines():
            # Skip comment lines
            if line.startswith('#'):
                continue
            as1, as2, reltype = map(int, line.strip().split('|'))
            if -1 == reltype:
                logging.debug('%s is customer of %s', as2, as1)
                add_rel(as1, as2)
            elif 1 == reltype:
                logging.debug('%s is provider of %s', as2, as1)
                add_rel(as2, as1)
            else:
                logging.debug('%s and %s are peers or siblings', as1, as2)
                add_node(as1)
                rels[as1]['s'].add(as2)
                add_node(as2)
                rels[as2]['s'].add(as1)
            cnt += 1
            if 0 == cnt % 15000:
                logging.info('At line %d ...', cnt)
    return rels

def read_relationships_graph(fn):
    """Read a relationships file and return a directed AS graph.
    
    Parameters:
    - `fn`    Path to relationships file (string)
    
    Returns: AS relationships graph (igraph.Graph)
    
    Details:
        Every line in the relationships file is in the form of "as1|as2|type"
        where as2 is a rel(type) of as1, and rel(type) is one of
        "customer" (-1), "peer" (0), "provider" (1), or "sibling" (3).
        The resulting graph contains a vertex for every AS# encountered,
        and a directed edge from as_i to as_j for every (i,j) such that
        as_i is a customer / peer / sibling of as_j, or such that
        as_j is a provider / peer / sibling of as_i.
    """
    ###########################################################################################
    # relationships_ver should be incremented every time the algorithm used to build the graph
    # changes in such a way that even if the source-file content is unchanged,
    # the graph should be rebuilt, and not loaded from cache.
    #
    # Version history:
    # 1.0    Edges point from provider to customer
    # 1.1    Edges point from customer to provider (Udi's email, 26/7/2011)
    ###########################################################################################
    relationships_ver = '1.1'
    # Check for a cached version
    fn_no_path = os.path.split(fn)[-1]
    pkl_fn = 'cache/%s.pkl' % (fn_no_path)
    if os.path.isfile(pkl_fn):
        # There's a pickle file. Let's read it and see whether it's up to date.
        logging.info('Loading cached version of relationships graph for "%s"', fn)
        with open(pkl_fn, 'rb') as fpkl:
            g = pickle.load(fpkl)
        # Calculate MD5 of current file
        h = calc_file_md5(fn)
        if h == g['md5'] and relationships_ver == g['ver']:
            logging.info('Cached version is up to date (MD5 %s, version %s)',
                         h, relationships_ver)
            return g
        else:
            logging.info('Cached version not up to date (MD5 (%s, %s), Version (%s, %s))'
                         ' -- Rebuilding graph', h, g['md5'], relationships_ver, g['ver'])
    # No cache. Build from scratch, and cache result.
    g = igraph.Graph(directed=True)
    cnt = 0
    with open(fn, 'r') as f:
        for line in f.xreadlines():
            # Skip comment lines
            if line.startswith('#'):
                continue
            as1, as2, reltype = line.strip().split('|')
            v1 = get_or_add_asn_by_name(g, as1).index
            v2 = get_or_add_asn_by_name(g, as2).index
            if '-1' == reltype:
                logging.debug('%s is customer of %s', as2, as1)
                g.add_edges((v2,v1))
            elif '1' == reltype:
                logging.debug('%s is provider of %s', as2, as1)
                g.add_edges((v1,v2))
            else:
                logging.debug('%s and %s are peers or siblings', as1, as2)
                g.add_edges([(v1,v2), (v2,v1)])
            cnt += 1
            if 0 == cnt % 5000:
                logging.info('At line %d ...', cnt)
    # Save MD5 hash on processed content as a graph attribute,
    # in order to detect changes when loading
    g['md5'] = calc_file_md5(fn)
    # Remember the filename used for building the graph
    g['src-file'] = fn_no_path
    # Remember algorithm version used for building the graph
    g['ver'] = relationships_ver
    logging.info('Caching graph for "%s" (MD5 is %s, Version is %s)',
                 fn, g['md5'], g['ver'])
    with open(pkl_fn, 'wb') as fpkl:
        pickle.dump(g, fpkl)
    return g

def add_directions(undirected_graph, rels):
    ""
    directed_graph = undirected_graph.copy()
    directed_graph.to_directed(mutual=True)
    cnt = 0
    heuristics_cnt = 0
    # Iterate over edges
    for edge in undirected_graph.es:
        as1, as2 = directed_graph.vs[edge.tuple]
        # Add directed edge according to direction in relationships
        if as1['name'] in rels[as2['name']]['c']:
            # AS1 is a customer of AS2
            logging.debug('Real edge direction: ASN-%s --> ASN-%s', as1['name'], as2['name'])
            directed_graph.delete_edges((as2.index, as1.index))
        elif as1['name'] in rels[as2['name']]['p']:
            # AS1 is a provider of AS2
            logging.debug('Real edge direction: ASN-%s --> ASN-%s', as2['name'], as1['name'])
            directed_graph.delete_edges((as1.index, as2.index))
        elif as1['name'] in rels[as2['name']]['s']:
            # AS1 and AS2 are peers or siblings
            logging.debug('Real edge direction: ASN-%s <--> ASN-%s', as1['name'], as2['name'])
        else:
            # Use degree heuristics to decide edge direction
            heuristics_cnt += 1
            deg1, deg2 = undirected_graph.degree(edge.tuple)
            deg_ratio = float(deg1) / float(deg2)
            deg_dist = deg1 - deg2
            if deg_ratio > 1.5 and deg_dist > 100:
                logging.debug('Heuristic edge: ASN-%s (degree %d) --> ASN-%s (degree %d)',
                              as2['name'], deg2, as1['name'], deg1)
                directed_graph.delete_edges((as1.index, as2.index))
            elif deg_ratio < 1/1.5 and deg_dist < -100:
                logging.debug('Heuristic edge: ASN-%s (degree %d) --> ASN-%s (degree %d)',
                              as1['name'], deg1, as2['name'], deg2)
                directed_graph.delete_edges((as2.index, as1.index))
            else:
                # Peer / Sibling
                logging.debug('Heuristic edges: ASN-%s (degree %d) <--> ASN-%s (degree %d)',
                              as1['name'], deg1, as2['name'], deg2)
        cnt += 1
        if 0 == cnt % 5000:
            logging.info('At edge %d ...', cnt)
    return directed_graph

def add_directions_to_asn_graph(undirected_graph, relationships_graph):
    """Returns a copy of the AS-level graph with directions for the links.
    
    The directions are determined using the `relationships_graph` when such links are available
    in that graph, and determined heuristically if there isn't such a link. 
    """
    ###########################################################################################
    # dirs_ver should be incremented every time to algorithm used to add directions changes
    # in such a way that even if the base-graphs were not modified from cached versions,
    # the graph should be rebuilt, and not loaded from cache.
    #
    # Version history:
    # 1.0    Edges point from provider to customer
    # 1.1    Edges point from customer to provider (Udi's email, 26/7/2011)
	# 1.2	 Bug fixes: relationship lookup using wrong index,
	#		  erroneous inversion of heuristic condition (with Lipner, 21/08/2011)
    ###########################################################################################
    dirs_ver = '1.2'
    # Check for a cached version
    pkl_fn = 'cache/%s__%s.pkl' % (undirected_graph['src-file'], relationships_graph['src-file'])
    if os.path.isfile(pkl_fn):
        # There's a pickle file. Let's read it and see whether it's up to date.
        logging.info('Loading cached version of directed graph from "%s"', pkl_fn)
        with open(pkl_fn, 'rb') as fpkl:
            directed_graph = pickle.load(fpkl)
        # Calculate MD5 of current file
        if undirected_graph['md5'] == directed_graph['md5-undirected'] and   \
                relationships_graph['md5'] == directed_graph['md5-relationships'] and   \
                dirs_ver == directed_graph['ver']:
            logging.info('Cached version of directed graph is up to date')
            return directed_graph
        else:
            logging.info('Cached version not up to date -- Rebuilding directed graph')
    # No cache. Build from scratch, and cache result.
    directed_graph = igraph.Graph(directed=True)
    cnt = 0
    heuristics_cnt = 0
    # Iterate over undirected edges
    for undirected_edge in undirected_graph.es:
        as1, as2 = undirected_graph.vs[undirected_edge.tuple]
        # Get (or create) the equivalent nodes in the output directed graph
        dir_as1 = get_or_add_asn_by_name(directed_graph, as1['name'])
        dir_as2 = get_or_add_asn_by_name(directed_graph, as2['name'])
        # Add directed edge according to direction in relationships graph, if it exists
        # Are there nodes in the relationships graph with name `as1` & `as2`?
        heuristic_flag = True
        rel_as1 = get_node_by_asn(relationships_graph, as1['name'])
        rel_as2 = get_node_by_asn(relationships_graph, as2['name'])
        if rel_as1 and rel_as2:
            # They exist! Is there an edge between them in the relationships graph?
            if relationships_graph.are_connected(rel_as1.index, rel_as2.index):
                heuristic_flag = False
                logging.debug('Real edge direction: ASN-%s --> ASN-%s', as1['name'], as2['name'])
                directed_graph.add_edges((dir_as1.index, dir_as2.index))
            elif relationships_graph.are_connected(rel_as2.index, rel_as1.index):
                heuristic_flag = False
                logging.debug('Real edge direction: ASN-%s --> ASN-%s', as2['name'], as1['name'])
                directed_graph.add_edges((dir_as2.index, dir_as1.index))
        if heuristic_flag:
            # Use degree heuristics to decide edge direction
            heuristics_cnt += 1
            deg1, deg2 = undirected_graph.degree(undirected_edge.tuple)
            deg_ratio = float(deg1) / float(deg2)
            deg_dist = deg1 - deg2
            if deg_ratio > 1.5 and deg_dist > 100:
                logging.debug('Heuristic edge: ASN-%s (degree %d) --> ASN-%s (degree %d)',
                              as2['name'], deg2, as1['name'], deg1)
                directed_graph.add_edges((dir_as2.index, dir_as1.index))
            elif deg_ratio < 1/1.5 and deg_dist < -100:
                logging.debug('Heuristic edge: ASN-%s (degree %d) --> ASN-%s (degree %d)',
                              as1['name'], deg1, as2['name'], deg2)
                directed_graph.add_edges((dir_as1.index, dir_as2.index))
            else:
                # Peer / Sibling
                logging.debug('Heuristic edges: ASN-%s (degree %d) <--> ASN-%s (degree %d)',
                              as1['name'], deg1, as2['name'], deg2)
                directed_graph.add_edges([(dir_as1.index, dir_as2.index),
                                          (dir_as2.index, dir_as1.index)])
        
        cnt += 1
        if 0 == cnt % 5000:
            logging.info('At edge %d ...', cnt)
    directed_graph['heuristics'] = heuristics_cnt
    # Save MD5 hash on processed content as a graph attribute,
    # in order to detect changes when loading
    directed_graph['md5-undirected'] = undirected_graph['md5']
    directed_graph['md5-relationships'] = relationships_graph['md5']
    directed_graph['undirected-src-file'] = undirected_graph['src-file']
    directed_graph['relationships-src-file'] = relationships_graph['src-file']
    # Remember algorithm version used for building the graph
    directed_graph['ver'] = dirs_ver
    logging.info('Caching enhanced graph to file "%s" (Unirected-MD5 "%s", Relationships-MD5 "%s")',
                 pkl_fn, undirected_graph['md5'], relationships_graph['md5'])
    with open(pkl_fn, 'wb') as fpkl:
        pickle.dump(directed_graph, fpkl)
    return directed_graph

def compare_directed_and_undirected_graphs (undirected,directed):
    """Compares a directed and undirected graph using several metrics.
    
    Metrics:
        - Mean Square Distance - compared to 0.
        - Weighted PageRank Metric - compared to sum of harmonic series.
        - TopNValues: take the top N values of each graph and check how many are identical.
                      Plot for rising numbers of N. - compared to sum(1,2...max(N))
        - 
    
    In Progress. 
    
    """
        
    # Some general statistics for reference. Can get rid of these later.
    print "Mean Square Distance:", meanSquareDistance(undirected, directed)
    lsd= listAbsDistance(undirected, directed)
    print "Min values:", min(lsd), "Max values:", max(lsd)

    # Save Graph Values.
    ll_undir = logList(undirected)
    ll_dir = logList(directed)    
#    hist(ll_undir,100, alpha=0.5, log=True)
#    hist(ll_dir,100, alpha=0.5, log=True)
#    savefig('similarity.png', dpi=400)
#    clf()
#    hist(logList(lsd),100, log=True)
#    savefig('difference.png', dpi=400)
#    wprm = weightedPageRankMetric (undirected, directed)
#    hist(wprm ,100, log=True)
#    savefig('wprm.png', dpi=400)
#    
    # Print Weighted PageRank Metric
#    print "Weighted PageRank Metric comparing directed and undirected graphs:", sum(wprm)
#    print "Harmonic Sum (for reference)", calcHarmonics (len(undirected))
#    

    a_ll_undir = array(ll_undir)
    a_ll_dir = array(ll_dir)
    ll_undir.sort()
    ll_dir.sort()
    as_ll_undir = array(ll_undir)
    as_ll_dir = array(ll_dir)
    
    a_undir = array(undirected)
    a_dir = array(directed)
    undirected.sort()
    directed.sort()
    as_undir = array(undirected)
    as_dir = array(directed)
   
    
#    Checking Values of Directed and Undirected Graphs
#    f = open('undirected.txt', 'w')
#    undirected.sort()
#    for item in undirected:
#        print>>f, item
#        
#    f = open('directed.txt', 'w')
#    directed.sort()
#    for item in directed:
#        print>>f, item    
#    f.close()
  
#    Plotting CDF
#    cdf_plot(array(ll_undir),'-')
#    cdf_plot(array(ll_dir),'-')
##    show()
#    savefig('cdfplot.png', dpi=300)
#    clf()
   
#    Plotting Scatter Plots
#    scatter (a_ll_undir, a_ll_dir, alpha=0.3)
#    savefig('scatter_log.png', dpi=300) 
#    xlabel ('Undirected')
#    ylabel ('Directed')
#    title ('Scatter Plot, log-scale, unsorted')
#    savefig('scatter_log.png', dpi=300) 
#    clf()
#    
#    
#    scatter (as_ll_undir,as_ll_dir, alpha=0.3)
#    xlabel ('Undirected')
#    ylabel ('Directed')
#    title ('Scatter Plot, log-scale, sorted')
#    savefig('scatter_log_sorted.png', dpi=300) 
#    clf()
#    
#    scatter (a_undir, a_dir, alpha=0.3, color="darkgreen")
#    xlabel ('Undirected')
#    ylabel ('Directed')
#    title ('Scatter Plot, unsorted')
#    savefig('scatter.png', dpi=300)
#    clf()   
#    
#    scatter (as_undir, as_dir, alpha=0.3, color="darkgreen")
#    xlabel ('Undirected')
#    ylabel ('Directed')
#    title ('Scatter Plot, sorted')
#    savefig('scatter_sorted.png', dpi=300)
#    clf()  


def compare_centrality_measures (x,y,txt):
    """Compares two different centrality metrics.
    
    Metrics:
        - Mean Square Distance - compared to 0.
        - Weighted PageRank Metric - compared to sum of harmonic series.
        - TopNValues: take the top N values of each graph and check how many are identical.
                      Plot for rising numbers of N. - compared to sum(1,2...max(N))
        - 
    
    In Progress. 
    
    """

    if len(x)!=len(y):
        raise "List length not equal."

#     Some general statistics for reference. Can get rid of these later.
#    print "Mean Square Distance:", meanSquareDistance(x, y)
#    lsd= listAbsDistance(x, y)
#    print "Min values:", min(lsd), "Max values:", max(lsd)

    # Save Graph Values.
#    ll_x = logList(x)
#    ll_y = logList(y)    
#    lln_x = normList(ll_x)
#    lln_y = normList(ll_y)

    tx = normList (x)
    ty = normList (y)
    ltx = logList (tx)
    lty = logList (ty)

#    SIMILARITY HISTOGRAMS

    

    hist(ltx,200, alpha=0.5, log=True, label="Undirected")
    hist(lty,200, alpha=0.5, log=True, label = txt)
    xlabel ('Normalized labeling range')
    ylabel ('#nodes labeled')
    legend ()
    title ('Histogram: Normalized labeling of undirected PageRank vs. ' + txt + ' - log scale')
    show()
#    savefig('similarity_' + txt + '.png', dpi=400)
    clf()
    
#    SCATTER PLOTS
    scatter (ltx, lty, alpha=0.3)
    xlabel ('Undirected')
    ylabel (txt)
    title ('Scatter Plot, undirected vs. ' + txt)
    show()
#    savefig('scatter_' + txt + '.png', dpi=400) 
    clf()


#    lsd= listAbsDistance(x, y)
#    hist(lsd,200, log=True)
#    show()
#    clf()


#    WPRM 
    wprmX = weightedPageRankMetric (x, y)
    wprmY = weightedPageRankMetric (y, x)    
    hist(wprmX ,200, alpha = 0.5, log=True)
    hist(wprmY ,200, alpha = 0.5, log=True)
    show()
    clf()
    print "Weighted PageRank Metric comparing x and y:", sum(wprmX)
    print "Weighted PageRank Metric comparing x and y:", sum(wprmY)
    
    
#    LIPNER
#    binVars = bin_variance (x,y)
#    plot (binVars['x'],binVars['y'])
#    show()
#    clf()
#    
#    print "Harmonic Sum (for reference)", calcHarmonics (len(x))

    
def cluster_ases(as_scores, k):
    """Returns a k-classification list for the given `as_score` scores-list.
    
    Parameters:
    - `as_scores`    A list of "scores" (centrality measures) for ASes
    - `k`            The number of clusters
    
    Returns: A list of assignments, where the AS with score as_score[i]
                belongs to cluster assignments[i] (0..(k-1)).
    """
    if 1 >= k:
        # Single cluster is trivial
        return [0] * len(as_score)
    import numpy
    from scipy.cluster.vq import kmeans, whiten
    features = whiten(numpy.array(as_scores))
    codebook, distortion = kmeans(features, k)
    # For each AS, find the closest centroid, and assign the AS to that cluster
    def get_closest_cluster_index(val, book):
        idx = 0
        min_sqr_dist = (val - book[0])**2
        for cidx, centroid in enumerate(book):
            sqr_dist = (val - centroid)**2
            if sqr_dist < min_sqr_dist:
                idx = cidx
                min_sqr_dist = sqr_dist
        return idx
    assignments = list()
    for element in features:
        assignments.append(get_closest_cluster_index(element, codebook))
    return assignments

def read_as_class(fn):
    as_types_enum = {
        '1': 'EC',
        '2': 'STP',
        '3': 'LTP',
        '4': 'CAHP',
    }
    as_class = dict()
    with open(fn, 'r') as f:
        for line in f.xreadlines():
            asn, desc, astype = line.strip().split('|')
            as_class[asn] = as_types_enum[astype]
    return as_class

def write_arff(trainset, arff_path, methods, g):
    f = dict()
    f['train'] = open('%s.train.arff' % (arff_path), 'w')
    f['all'] = open('%s.all.arff' % (arff_path), 'w')
    for ft in f:
        f[ft].write('@relation AS_%s\n\n' % (ft))
        f[ft].write('@attribute asn numeric\n')
        for method in methods:
            f[ft].write('@attribute %s numeric\n' % (method))
        f[ft].write('@attribute astype {EC,STP,LTP,CAHP}\n\n')
        f[ft].write('@data\n\n')
    for node in g.vs:
        asn = node['name']
        valstring = ','.join(['%f' % (node[method]) for method in methods])
        f['all'].write('%s,%s,?\n' % (asn, valstring))
        if asn in trainset:
            f['train'].write('%s,%s,%s\n' % (asn, valstring, trainset[asn]))

def main():
    as_ncol_file_path = 'data/dimes_ases_2011_1_improved.ncol'
    rels_file_path = 'data/as-rel.2011.01.16.txt'
    ncol_rels_file_path = 'cache/as-rel.2011.01.16.ncol'
    as_class_train_path = 'data/AS.train'
    undirected_graph = directed_graph = None
    methods = ['degree','betweenness','closeness','shell_index','evcent','constraint','pagerank'] # ['eccentricity']
    run_mode = 'skip parsing' # 'skip parsing' 'dimes and rels' 'rels only'
    cache_code = '2'
    if run_mode == 'skip parsing':
        with open('cache/directed.centrality.%s.pkl' % (cache_code), 'rb') as fpkl:
            logging.info('Loading cached version of directed graph with centrality measures')
            directed_graph = pickle.load(fpkl)
        with open('cache/undirected.centrality.%s.pkl' % (cache_code), 'rb') as fpkl:
            logging.info('Loading cached version of undirected graph with centrality measures')
            undirected_graph = pickle.load(fpkl)
        graphs = {'undirected': undirected_graph,
                  'directed': directed_graph }
    elif run_mode == 'rels only':
        logging.info('Transforming relationships file to Ncol file')
        #transform_relationships_file_to_ncol(rels_file_path, ncol_rels_file_path)
        logging.info('Read directed graph from Ncol file')
        directed_graph = igraph.read(ncol_rels_file_path, names=True, weights=False, directed=True)
        logging.info('Create undirected graph from directed Ncol file')
        undirected_graph = directed_graph.copy()
        undirected_graph.to_undirected()
#        directed_graph.vs['pagerank'] = directed_graph.pagerank(directed=True)
#        undirected_graph.vs['pagerank'] = undirected_graph.pagerank(directed=False)
    elif run_mode == 'dimes and rels':
        logging.info('Reading AS-level graph from file "%s"', as_ncol_file_path)
        undirected_graph = read_as_level_graph(as_ncol_file_path)
        logging.info('Finished building undirected graph (%d nodes, %d edges)',
                     undirected_graph.vcount(), undirected_graph.ecount())
#        logging.info('Reading relationships graph from file "%s"', rels_file_path)
#        relationships_graph = read_relationships_graph(rels_file_path)
#        logging.info('Finished building relationships graph (%d nodes, %d edges)',
#                     relationships_graph.vcount(), relationships_graph.ecount())
        logging.info('Building relationships dictionary')
        rels = get_relationships(rels_file_path, undirected_graph.vs)
        logging.info('Finished building relationships dictionary')
        logging.info('Adding directions to AS-level links using relationships graph')
        directed_graph = add_directions(undirected_graph, rels)
#        directed_graph = add_directions_to_asn_graph(undirected_graph, relationships_graph)
        logging.info('Finished adding directions to AS-level graph (%d nodes, %d edges, %d heuristics)',
                     directed_graph.vcount(), directed_graph.ecount(), directed_graph['heuristics'])
    if run_mode != 'skip parsing':
#        from multiprocessing import Pool
#        pool = Pool(processes=4)
#        results = dict()
        graphs = {'undirected': undirected_graph,
                  'directed': directed_graph }
        for graph in graphs:
            for method in methods:
                logging.info('Running %s on %s AS-level graph' % (method, graph))
                graphs[graph].vs[method] = getattr(graphs[graph], method)()
            logging.info('Caching %s graph with centrality measures' % (graph))
            with open('cache/%s.centrality.%s.pkl' % (graph, cache_code), 'wb') as fpkl:
                pickle.dump(graphs[graph], fpkl)
#        results['pagerank'] = pool.apply_async(undirected_graph.pagerank) #, (), directed=False)
#        results['degree'] = pool.apply_async(undirected_graph.degree)
#        results['betweenness'] = pool.apply_async(undirected_graph.betweenness) #, (), directed=False)
#        results['shell_index'] = pool.apply_async(undirected_graph.shell_index)
#        results['closeness'] = pool.apply_async(undirected_graph.closeness)
#        results['evcent'] = pool.apply_async(undirected_graph.evcent)
#    #    results['eccentricity'] = pool.apply_async(undirected_graph.eccentricity)
#        results['constraint'] = pool.apply_async(undirected_graph.constraint)
#        for result in results:
#            undirected_graph.vs[result] = results[result].get()
#        logging.info('Running PageRank on directed graph')
#        directed_graph.vs['pagerank'] = directed_graph.pagerank(directed=True)

    ## TODO!
    
#    f = open('tmpdegree.txt', 'w')
#    for item in undirected_graph.vs['degree']:
#        print>>f, item
#    f = open('tmpbetween.txt', 'w')
#    for item in undirected_graph.vs['betweenness']:
#        print>>f, item
#    f = open('tmpshell.txt', 'w')
#    for item in undirected_graph.vs['shell_index']:
#        print>>f, item
       
    undir_pr = dict()
    dir_pr = dict()
    for v in undirected_graph.vs:
        undir_pr[v['name']] = v['pagerank']
    for v in directed_graph.vs:
        dir_pr[v['name']] = v['pagerank']
    undir_pr_list = []
    dir_pr_list = []
    for node_name in undir_pr.keys():
        undir_pr_list.append(undir_pr[node_name])
        dir_pr_list.append(dir_pr[node_name])
    logging.info('Comparing PageRank for undirected vs. directed graphs')
#    compare_centrality_measures (undirected_graph.vs['pagerank'],undirected_graph.vs['pagerank'],'undirected')
#    logging.info('Comparing PageRank for undirected vs. degree')
    compare_centrality_measures (undir_pr_list,dir_pr_list,'directed')
    logging.info('Comparing PageRank for undirected vs. degree')
    compare_centrality_measures (undirected_graph.vs['pagerank'],undirected_graph.vs['degree'],'degree')
    logging.info('Comparing PageRank for undirected vs. betweenness')
    compare_centrality_measures (undirected_graph.vs['pagerank'],undirected_graph.vs['betweenness'],'betweenness')
    logging.info('Comparing PageRank for undirected vs. shell index')
    compare_centrality_measures (undirected_graph.vs['pagerank'],undirected_graph.vs['shell_index'],'shell')
    logging.info('Comparing PageRank for undirected vs. closeness')
    compare_centrality_measures (undirected_graph.vs['pagerank'],undirected_graph.vs['closeness'],'closeness')
    logging.info('Comparing PageRank for undirected vs. evcent')
    compare_centrality_measures (undirected_graph.vs['pagerank'],undirected_graph.vs['evcent'],'evcent')
    logging.info('Comparing PageRank for undirected vs. constraint')
    compare_centrality_measures (undirected_graph.vs['pagerank'],undirected_graph.vs['constraint'],'constraint')

                                 
    logging.info('Reading AS classification train-set')
    train_set = read_as_class(as_class_train_path)
    for graph in graphs:
        write_arff(train_set, 'data/%s.AS' % (graph), methods, graphs[graph])
        
    logging.info('Check for correlation between AS classification and PageRank')
    ## TODO...
    
    logging.info('Check whether undirected PageRank can be used for inferring link direction')
    ## TODO...

if '__main__' == __name__:
    sys.exit(main())
