# Keep shared variables, functions in here
import sys, os, string, math, datetime, calendar

bzip2 = '/usr/bin/bzip2'
time_shift = 8*60*60

POLY_STROKE_WIDTH = 3.0

# report file directory
root = '/bigdrive/routing_forensics'
archive_dir = '%s/archives' % root
data_dir = '%s/prefixanalyzer/prefix' % root
output_dir = '%s/prefixanalyzer/output' % root
match_dir = '%s/prefixanalyzer/matches' % root
log_dir = '%s/prefixanalyzer/logs' % root

gnuplot_dir = '/usr/bin'
dot_path = '/usr/bin/dot'


# List of the current monitors that
# RouteViews is using.  The main being route-views2
# A list of peers they peer with is available from:
# http://routeviews.org/peers/
# The ripe list is available at 
# http://www.ris.ripe.net/peerlist/rrcNN.html 
# where NN is the peer number
ripe_monitors = [
    'rrc00',
    'rrc01',
    'rrc02',
    'rrc03',
    'rrc04',
    'rrc05',
    'rrc06',
    'rrc07',
    'rrc08',
    'rrc09',
    'rrc10']

rv_monitors = [
    'route-views.paix', 
    'route-views.wide', 
    'route-views.linx',
    'route-views.eqix',
    'route-views2',
    'oix-route-views'  # the earliest one, from 1997.11
]

monitors = rv_monitors + ripe_monitors

########################
# INPUT: peer list file
#######################
peer_list = {
        'route-views2': 'peers/route-views2.txt',
        'route-views.paix': 'peers/route-views.paix.txt', 
        'route-views.wide': 'peers/route-views.wide.txt', 
        'route-views.linx': 'peers/route-views.linx.txt',
        'rrc00': 'peers/rrc00.txt' 
}

instability_peer_list = {
        'route-views2': 'peers/route-views2.instability.txt',
        'rrc00': 'peers/rrc00.instability.txt' 
}

instability_prefix_peer_list = {
	'route-views2': 'peers/route-views2.normal_prefix_selectedones.txt',
	'rrc00': 'peers/rrc00.normal_prefix.txt'
}

prefix_list={
	'route-views2': 'prefix_lists/route-views2_prefix.txt',
	'rrc00': 'prefix_lists/rrc00_prefix.txt'
}

interval_peer_list = {
        'route-views2': 'peers/route-views2.interval.txt',
        'rrc00': 'peers/rrc00.interval.txt' 
}

# Event type definition goes here. 
# We borrow the classification from Labovitz's paper. 
# The only refinement we made is to divide AADup and WADup to two types.
instability_event_num = 8  # number of all event types
ErrorEventType = -1
WWDup = 0
AADupType1 = 1    # all same
AADupType2 = 2    # same in ASPATH and next-hop, but different in attributes
AADiff = 3               # implicit withdrawal
WADupType1 = 4    # all same
WADupType2 = 5    # same in ASPATH and next-hop, but different in attributes
WADiff = 6
AW = 7

instabilities = { 'ErrorEventType':-1, 
    'WWDup':0,
    'AADupType1':1,
    'AADupType2':2,
    'AADiff':3,
    'WADupType1':4,
    'WADupType2':5,
    'WADiff':6,
    'AW':7 }

instabilities_reverse = { -1 : 'ErrorEventType', 
    0:'WWDup',
    1:'AADupType1',
    2:'AADupType2',
    3:'AADiff',
    4:'WADupType1',
    5:'WADupType2',
    6:'WADiff',
    7:'AW' }

inst_groups = { 'WWDup':(0,), 
        'AADupType1':(1,), 
        'AADupType2':(2,), 
        'AADup':(1,2), 
        'AADiff':(3,), 
        'WADupType1':(4,), 
        'WADupType2':(5,), 
        'WADup':(4,5),
        'WADiff':(6,), 
        'AW':(7,)  }

i = 1
histogram_bins = []
while i <= 16*60*60:
    histogram_bins.append(i)
    i = i*2

def gnuplot_data(output, script, data, filename=None, dst_dir='', write_data=1):
    dirname = os.path.dirname('%s/%s' % (graph_dir, output))
    if not os.path.exists(dirname):
        os.makedirs(dirname, mode=0775)
    tmp_file = os.tempnam()
    if not filename:
        filename = tmp_file
    data_file = '%s/%s/%s.dat' % (report_dir, dst_dir, filename)
    gnuplot_file = '%s/%s/%s.plt' % (graph_dir, dst_dir, filename)
    if write_data:
        fh = open(data_file, 'w', 0664)
        for line in data:
            fh.write(' '.join([str(v) for v in line]) + '\n')
        fh.close()
    script = script.replace('TMPFILE', data_file)
    fh = open(gnuplot_file, 'w', 0664)
    stdin, stdout = os.popen4('gnuplot')
    if 'set output' not in script:
        script = 'set output "%s/%s/%s"\n' % (graph_dir, dst_dir, output) + script
    stdin.write(script)
    fh.write(script)
    stdin.write('\nquit\n')
    stdin.close()
    sys.stdout.write(stdout.read())
    stdout.close()
    os.wait()


def histogram_bin(inter_arrival):
    if inter_arrival >= histogram_bins[-1]:
        return histogram_bins[-1]
    else:
        bin = 1
        if inter_arrival <= 0:
            bin = 1
        else:
            try:
                bin = histogram_bins[int(math.log(inter_arrival, 2))]
            except:
                print 'Error finding bin for %s' % inter_arrival
        return bin

def readData(filename):
    if not os.path.isfile(filename):
        return
    data = []
    fh = open(filename, 'r')
    line = fh.readline()
    while(line):
        line = string.rstrip(line)
        data.append(string.split(line))
        line = fh.readline()
    return data

def combineData(dirs, filename):
    data = []
    files = []
    for d in dirs:
        if not os.path.exists(d): continue
        if os.path.exists('%s/%s.dat' % (d, filename)):
            files.append('%s/%s.dat' % (d, filename))

    for f in files:
        if not os.path.exists(f): continue
        fh = open(f, 'r')
        line = fh.readline()
        while(line):
            point = string.split(line)
            if len(point) > 1:
                data.append(point)
            line = fh.readline()
        fh.close()
    return data

# Given start and end dates and a specific monitor
# return the full path to all the filtered text archives
# needed to process from start to end
def get_archive_list(start_date, end_date, monitor):
    current_date = start_date
    current_month = 0
    files = []
    while current_date <= end_date:
        #working_dir = '%s/%s/%04d.%02d/filter' % \
        #    (archive_dir, monitor, current_date.year, current_date.month)
        working_dir = '%s/%04d.%02d/filter' % \
            (archive_dir, current_date.year, current_date.month)
        assert(os.path.isdir(working_dir), working_dir)
        for f in os.listdir(working_dir):
            if f.startswith('updates.%04d%02d%02d.%02d' % \
                (current_date.year, current_date.month, current_date.day,
                current_date.hour)):
                files.append('%s/%s' % (working_dir, f))
        current_date += datetime.timedelta(hours=1)
    return files

# Given start and end dates and a specific monitor
# return the full path to all the plain text archives
# needed to process from start to end
def get_plain_list(start_date, end_date, monitor):
    current_date = start_date
    current_month = 0
    files = []
    while current_date <= end_date:
        working_dir = '%s/%s/%04d.%02d/plain' % \
            (archive_dir, monitor, current_date.year, current_date.month)
        assert(os.path.isdir(working_dir), working_dir)
        for f in os.listdir(working_dir):
            if f.startswith('updates.%04d%02d%02d.%02d' % \
                (current_date.year, current_date.month, current_date.day,
                current_date.hour)):
                files.append('%s/%s' % (working_dir, f))
        current_date += datetime.timedelta(hours=1)
    return files

def get_match_list(start_date, end_date, file_prefix):
    files = []
    current_date = start_date
    root_folder = '%s/%s/%s' % (match_dir, file_prefix.split('.', 2)[0], file_prefix)
    while current_date <= end_date:
        file = '%s/%s-%s.txt' % (root_folder, current_date.year, current_date.month)
        if os.path.isfile(file):
            files.append(file)
        daysInMonth = calendar.monthrange(current_date.year, current_date.month)[1]
        current_date += datetime.timedelta(days=daysInMonth)
    return files

def get_plain_match_list(start_date, end_date, file_prefix):
    files = []
    current_date = start_date
    root_folder = '%s/%s/%s' % (match_dir, file_prefix.split('.', 2)[0], file_prefix)
    while current_date <= end_date:
        file = '%s/plain-%s-%s.txt' % (root_folder, current_date.year, current_date.month)
        if os.path.isfile(file):
            files.append(file)
        daysInMonth = calendar.monthrange(current_date.year, current_date.month)[1]
        current_date += datetime.timedelta(days=daysInMonth)
    return files
        
def decimalIp(dotDecimalIp):
    ipBlocks = dotDecimalIp.split(".")
    assert(len(ipBlocks) == 4)
    decIp = 0
    for i in range(0, 4):
        decIp += int(ipBlocks[i]) << (24 - 8 * i)
    return decIp

def dotDecimalIp(decimalIp):
    ipBlocks = []
    for i in range(0, 4):
        exp = (24 - 8 * i)
        ipBlocks.append(decimalIp >> exp)
        decimalIp -= decimalIp >> exp << exp
    
    return '.'.join(ipBlocks)
    
def mean (values):
    if not values:
        return 0

    sum = 0.0
    for v in values:
        sum += float(v)
    return float(sum / len(values))
