#!/usr/bin/env python
import os
import re
import csv
import itertools
from optparse import OptionParser
from fabric.context_managers import lcd
from fabric.operations import local

parser = OptionParser()
parser.add_option('-c', dest='capreader', help="path to capreader directory")
parser.add_option('-l', dest='list', help="path to a file containting a list of pcap files")
parser.add_option('-o', dest='output', help="output file for this run")
parser.add_option('-v', '--verbose', action="store_true", help="verbose, debug information")
last_non_zero_flow = 1
flowtool_path = '/tmp/flowtool'


def make_clean_make(directory):
    with lcd(directory):
        local('make clean')
        local('make')


def parse_line(memory_line, init_memory):
    result = {}
    m = re.match('^Memory: (\d+).*?Conns (\d+).*?$', memory_line)
    if m:
        result['current_memory'] = int(m.group(1)) - init_memory
        result['flows'] = int(m.group(2))
        global last_non_zero_flow
        if result['flows']:
            last_non_zero_flow = result['flows']
        else:
            result['flows'] = last_non_zero_flow
    else:
        result = None

    return result


def parse_capfile_pkt_log(pcap_file):
    with open(pcap_file + '.pkt.log', 'r') as f:
        line = f.readline()  # first line which contains the initialize memory
        init_line = parse_line(line, 0)
        init_memory = init_line.get('current_memory', 0)
        # parses each two lines (pkt, mem) into a dictionary by calling parse_line
        results = []
        for line in f:
            result = parse_line(line, init_memory)
            if result is not None:
                results.append(result)

        # results = [parse_line(mem_line, init_memory)
        #            for pkt_line, mem_line in itertools.izip_longest(*[f] * 2)
        #            if pkt_line.startswith(' Pkt:')]
    return results, init_memory


def average_results(results):
    max_memory = 0
    running_total = 0
    for result in results:
        used_memory = result['current_memory'] / result['flows']
        if used_memory > max_memory:
            max_memory = used_memory
        running_total += used_memory
    average_memory = running_total / len(results)
    return max_memory, average_memory


def checkout_flowtool():
    if os.path.exists(flowtool_path):
        svn_command = 'svn up'
    else:
        local('mkdir /tmp/flowtool')
        svn_command = 'svn co svn://dev-svn.klw.ca.int.prnw.net/netcore/trunk/flowtool/ .'
    svn_command += ' --no-auth-cache --non-interactive --username %s --password %s' % ('guest', 'vineyard')

    with lcd(flowtool_path):
        local(svn_command)
        
def create_log_dir(log_file_dir):
    if not os.path.exists(log_file_dir):
        try:
            os.mkdir(log_file_dir)
        except Exception, error_msg:
            print "Directory already exists: %s" % log_file_dir
            print error_msg
            
def log_file_cleanup(log_file_dir):
    if os.path.exists(log_file_dir):
        try:
            for files in os.listdir(log_file_dir):
                log_file = os.path.join(log_file_dir, files)
                if os.path.exists(log_file):
                    try:
                        os.remove(log_file)
                    except Exception, error_msg:
                        print error_msg
        except Exception, error_msg:
            print error_msg

if __name__ == "__main__":
    (options, args) = parser.parse_args()
    assert os.path.exists(options.capreader)
    
    log_file_dir = "/dev/shm/flowtool_log_dir/"
    create_log_dir(log_file_dir)

    checkout_flowtool()
    make_clean_make(flowtool_path)
    make_clean_make(options.capreader)


    command = '{flowtool} -D {capreader_directory} -M -V {{pcap_file}} -O {log_file_dir}'.format(
        flowtool=os.path.join(flowtool_path, 'flowtool'),
        capreader_directory=options.capreader,
        log_file_dir=log_file_dir
    )

    if options.list:
        with open(options.list, 'r') as f:
            for line in f:
                cleaned_line = " ".join(line.split())
                if cleaned_line:
                    args.append(cleaned_line)

    all_results = []
    for pcap_file in args:
        full_pcap_path = os.path.abspath(pcap_file)
        pcap = pcap_file.split("/")
        pcap = pcap[-1]
        log_file_path = os.path.join(log_file_dir, pcap)
        if options.verbose:
            print('Running pcap: %s' % full_pcap_path)
        print pcap_file
        assert os.path.exists(pcap_file)

        local(command.format(pcap_file=pcap_file))    # runs capfile through flowtool
        pcap_results, init_memory = parse_capfile_pkt_log(log_file_path)

        max_memory, avg_memory = average_results(pcap_results)
        all_results.append(dict(pcap_file=pcap_file, avg_memory=avg_memory, max_memory=max_memory))

        if options.verbose:
            print('Initial Memory: %s' % init_memory)
            for i, result in enumerate(pcap_results):
                current_mem = result['current_memory']
                flows = result['flows']
                print('Packet: %s, Current Memory: %s, Flows: %s, Memory/Flow: %s' %
                      (i + 1, current_mem, flows, current_mem/flows))
            print('Average Memory: %s, Max Memory: %s' % (avg_memory, max_memory))
            print('End of pcap file.\n\n')

    with open(options.output, 'wb') as output:
        for result in all_results:
            line = result['pcap_file'] + ',' + str(result['avg_memory']) + ',' \
                   + str(result['max_memory']) + '\n'
            output.write(line)
            
    log_file_cleanup(log_file_dir)
