import numpy as np
import csv, math
import sys, os
import matplotlib.pyplot as plt

##################################
# Setup
#
infinity = 10E10
# Usage print
def print_usage():
    print("Usage: python analyze.py /path/to/run/folder")

def export_cdf_to_pgfplotter_file(filename, xbins, yvals):
    assert(len(xbins) == len(yvals))
    number_entries = len(xbins)
    str_builder = "x y \n"
    for i in range(number_entries):
        str_builder += "{} {} \n".format(xbins[i], yvals[i])
    with open(filename, "w+") as f:
        f.write(str_builder)
    return

def export_tcp_resends(filename, hopcounts, tcp_resend_per_flow):
    assert(len(hopcounts) == len(tcp_resend_per_flow))
    str_builder = "x y \n"
    for i in range(len(hopcounts)):
        str_builder += "{} {} \n".format(hopcounts[i], tcp_resend_per_flow[i])
    with open(filename, "w+") as f:
        f.write(str_builder)
    return


def read_tcp_resends(run_folder_path):
    filename = run_folder_path + "statistics.log"
    num_resends = 0
    num_drops_at_src = 0
    with open(filename, "r") as f:
        for line in f:
            if "TCP_RESEND_OCCURRED" in line:
                splitted_line = line.split(':')
                num_resends = int(splitted_line[1])
            if "PACKETS_DROPPED_AT_SOURCE" in line:
                splitted_line = line.split(':')
                num_drops_at_src = int(splitted_line[1])
    return max(num_resends - num_drops_at_src, 0)

##################################
# Analyze flow completion
#
def analyze_flow_completion(run_folder_path, non_zero_flows=[]):
    flow_statistic_tuple = []
    num_unfinished_flows = 0
    with open(run_folder_path + '/flow_completion.csv.log') as file:
        reader = csv.reader(file)

        # To enable preliminary read to determine size:
        # data = list(reader)
        # row_count = len(data)

        # Column lists
        
        #flow_ids = []
        #source_ids = []
        #target_ids = []
        #sent_bytes = []
        #total_size_bytes = []
        #start_time = []
        #end_time = []
        #duration = []
        #completed = []
        print("Reading in flow completion log file...")
        # Read in column lists
        for row in reader:
            st = float(row[5])
            source_id = int(row[1])
            target_id = int(row[2])
            if (source_id, target_id) not in non_zero_flows:
                continue
            if row[8] == 'FALSE':
                num_unfinished_flows += 1
                sent_bytes = int(row[3])
                #total_size_bytes.append(float(row[4]))
                start_time = int(row[5])
                end_time = int(row[6])
                duration = int(row[7])
                flow_statistic_tuple.append((source_id, target_id, sent_bytes, start_time, end_time, float(infinity)))
            #if st >= 100000000 and st < 1000000000 and row[8] == 'TRUE':
            if row[8] == 'TRUE':    
                sent_bytes = int(row[3])
                #total_size_bytes.append(float(row[4]))
                start_time = int(row[5])
                end_time = int(row[6])
                duration = int(row[7])
                flow_statistic_tuple.append((source_id, target_id, sent_bytes, start_time, end_time, duration))
    fcts = sorted([x[5] for x in flow_statistic_tuple])
    print("99.99 FCT for MLU: {} is {}".format(run_folder_path, fcts[int(0.9996 * len(fcts))]))
    print("99.9 FCT for MLU: {} is {}".format(run_folder_path, fcts[int(0.999 * len(fcts))]))
    print("99 FCT for MLU: {} is {}".format(run_folder_path, fcts[int(0.99 * len(fcts))]))
    return flow_statistic_tuple, num_unfinished_flows

##################################
# Analyze flow completion
#
def analyze_throughput(run_folder_path, non_zero_flows=[]):
    flow_statistic_tuple = []
    num_unfinished_flows = 0
    with open(run_folder_path + '/flow_completion.csv.log') as file:
        reader = csv.reader(file)

        # To enable preliminary read to determine size:
        # data = list(reader)
        # row_count = len(data)

        # Column lists
        
        #flow_ids = []
        #source_ids = []
        #target_ids = []
        #sent_bytes = []
        #total_size_bytes = []
        #start_time = []
        #end_time = []
        #duration = []
        #completed = []
        print("Reading in flow completion log file...")
        # Read in column lists
        for row in reader:
            st = float(row[5])
            source_id = int(row[1])
            target_id = int(row[2])
            if (source_id, target_id) not in non_zero_flows:
                continue
            if row[8] == 'FALSE':
                num_unfinished_flows += 1
                sent_bytes = int(row[3])
                #total_size_bytes.append(float(row[4]))
                start_time = int(row[5])
                end_time = int(row[6])
                duration = int(row[7])
                flow_statistic_tuple.append((source_id, target_id, sent_bytes, start_time, end_time, 0.))
            #if st >= 100000000 and st < 1000000000 and row[8] == 'TRUE':
            if row[8] == 'TRUE':    
                sent_bytes = int(row[3])
                #total_size_bytes.append(float(row[4]))
                start_time = int(row[5])
                end_time = int(row[6])
                duration = int(row[7])
                flow_statistic_tuple.append((source_id, target_id, sent_bytes, start_time, end_time, float(sent_bytes)/duration))
    return flow_statistic_tuple, num_unfinished_flows


def analyze_throughput_binned_by_log_flow_size(flow_size_array, fct_array, nbins):
    assert(len(flow_size_array) == len(fct_array))
    max_flow_size = max(flow_size_array)
    logged_bin = np.linspace(0, np.log10(max_flow_size), nbins)
    total_bins = len(logged_bin)
    binned_throughputs = [0] * total_bins
    for bin_num in range(total_bins):
        binned_throughputs[bin_num] = []
    for i in range(len(flow_size_array)):
        fct = fct_array[i]
        log_flow_size = np.log10(flow_size_array[i])
        theoretical_limit = 600. + (flow_size_array[i] * 8 / ( 256 * 100. / 19 ))
        #throughput = flow_size_array[i] * 8. / fct
        throughput = theoretical_limit / fct
        bin_index = 0
        for index in range(total_bins):
            if index == total_bins - 1:
                bin_index = index
                break
            elif log_flow_size >= logged_bin[index] and log_flow_size < logged_bin[index + 1]:
                bin_index = index
                break
        binned_throughputs[bin_index].append(throughput)
    
    yvals = [0] * total_bins
    for bin_num in range(total_bins):
        bin_throughput_size = len(binned_throughputs[bin_num])
        if bin_throughput_size == 0:
            continue
        sorted_throughputs = sorted(binned_throughputs[bin_num])
        sorted_throughputs[int(0.1 * bin_throughput_size)]
        yvals[bin_num] = sorted_throughputs[int(0.01 * bin_throughput_size)]
    return logged_bin, yvals




##################################
# Analyze port utilization
#
def analyze_port_utilization():
    with open(run_folder_path + '/port_utilization.csv.log') as file:
        reader = csv.reader(file)

        # Column lists
        source_ids = []
        target_ids = []
        attached_to_server = []
        utilized_ns = []
        utilization = []
        utilization_server_ports = []
        utilization_non_server_ports = []
        num_server_port_zero = 0
        num_non_server_port_zero = 0

        print("Reading in port utilization log file...")

        # Read in column lists
        for row in reader:
            source_ids.append(float(row[0]))
            target_ids.append(float(row[1]))
            attached_to_server.append(row[2] == 'Y')
            utilized_ns.append(float(row[3]))
            utilization.append(float(row[4]))
            if row[2] == 'Y':
                utilization_server_ports.append(float(row[4]))
                if float(row[4]) == 0:
                    num_server_port_zero += 1
            else:
                utilization_non_server_ports.append(float(row[4]))
                if float(row[4]) == 0:
                    num_non_server_port_zero += 1

            if len(row) != 5:
                print("Invalid row: ", row)
                exit()

        print("Calculating statistics...")

        # General statistics (there is always a server port)
        statistics = {

            'all_port_num': len(source_ids),
            'all_port_unique_sources': len(set(source_ids)),
            'all_port_unique_targets': len(set(target_ids)),
            'all_port_mean_utilization': np.mean(utilization),
            'all_port_median_utilization': np.median(utilization),
            'all_port_std_utilization': np.std(utilization),
            'all_port_99th_utilization': np.percentile(utilization, 99),
            'all_port_99.9th_utilization': np.percentile(utilization, 99.9),

            'server_port_num': len(utilization_server_ports),
            'server_port_zero_num': num_server_port_zero,
            'server_port_mean_utilization': np.mean(utilization_server_ports),
            'server_port_median_utilization': np.median(utilization_server_ports),
            'server_port_std_utilization': np.std(utilization_server_ports),
            'server_port_99th_utilization': np.percentile(utilization_server_ports, 99),
            'server_port_99.9th_utilization': np.percentile(utilization_server_ports, 99.9)

        }

        # Only print non-server port statistics if they exist
        statistics['non_server_port_num'] = len(utilization_non_server_ports)
        if len(utilization_non_server_ports) > 0:
            statistics['non_server_ports_zero_num'] = num_non_server_port_zero
            statistics['non_server_port_mean_utilization'] = np.mean(utilization_non_server_ports)
            statistics['non_server_port_median_utilization'] = np.median(utilization_non_server_ports)
            statistics['non_server_port_std_utilization'] = np.std(utilization_non_server_ports)
            statistics['non_server_port_99th_utilization'] = np.percentile(utilization_non_server_ports, 99)
            statistics['non_server_port_99.9th_utilization'] = np.percentile(utilization_non_server_ports, 99.9)

        # Print raw results
        print('Writing to result file port_utilization.statistics...')
        with open(analysis_folder_path + '/port_utilization.statistics', 'w+') as outfile:
            for key, value in sorted(statistics.items()):
                outfile.write(str(key) + "=" + str(value) + "\n")

# Call analysis functions
#def main(run_folder_paths):

def get_name_from_mlu(mlu):
    mlu_str = "{:.3f}".format(mlu)
    mlu_str = mlu_str.replace('.', 'p')
    return mlu_str

def get_name_from_ahc(ahc):
    ahc_str = "{:.1f}".format(ahc)
    ahc_str = ahc_str.replace('.', 'p')
    return ahc_str

def normalized_cdf(array_of_values, nbins):
    #hist, bin_edges = np.histogram([math.log(x) for x in array_of_values], bins=nbins, density=True)
    hist, bin_edges = np.histogram(array_of_values, bins=nbins, density=False)
    number_entries = len(array_of_values)
    histo = list(np.cumsum([float(x)/number_entries for x in hist]))
    for i in range(len(histo)):
        if histo[i] > 1:
            histo[i] = 1.
    histo.append(1.)
    print("min val : {}".format(min(array_of_values)))
    print("max val : {}".format(max(array_of_values)))
    #print(bin_edges)
    return (list(bin_edges), histo)

def analyze_diff_mlu(mlus, load_level, stats_file_output_dir):
    nbins = 50
    markers = ['x', '+', 'd']
    nonzero_flows = []
    with open("./temp/diff_mlu/mlu{}/traffic_nblock20.txt".format(get_name_from_mlu(mlus[0])), 'r') as f:
        for line in f:
            if line[0] == "#" or line[0] == "\n":
                continue
            linearr = line.split(',')
            src = int(linearr[1])
            dst = int(linearr[2])
            nonzero_flows.append((src, dst, ))


    run_path_names = ["./temp/diff_mlu/mlu{}".format(get_name_from_mlu(x))  for x in mlus]
    #run_path_names = ["./temp/diff_mlu/load_{}_uniform/mlu{}/".format(load_level, get_name_from_mlu(x))  for x in mlus]
    flow_statistics = []
    fig = plt.figure()
    legends = []
    index = 0
    nunfinishedflows_vector = []
    for run_path in run_path_names:
        flow_completion_tuple, num_unfinished_flows = analyze_flow_completion(run_path, non_zero_flows=nonzero_flows)
        nunfinishedflows_vector.append(num_unfinished_flows)
        flow_statistics.append(flow_completion_tuple)
        fct = [np.log10(x[-1]) for x in flow_completion_tuple]
        bins , hist = normalized_cdf(sorted(fct), nbins)
        fct_output_filename = stats_file_output_dir + "fct_diffmlu_mlu{}.txt".format(get_name_from_mlu(mlus[index]))
        export_cdf_to_pgfplotter_file(fct_output_filename, bins, hist)
        plt.plot(bins, hist, marker=markers[index % len(markers)], linewidth=0.7)

        legends.append("MLU : {}".format(mlus[index]))
        index += 1
    print(nunfinishedflows_vector)
    plt.xlabel("Flow Completion Time in nanoseconds (FCT)")
    plt.ylabel("CDF")
    plt.ylim(ymax=1.1, ymin=0.)
    plt.legend(legends)

    flow_statistics = []
    fig = plt.figure()
    legends = []
    index = 0
    nunfinishedflows_vector = []
    tcp_resends = []
    for run_path in run_path_names:
        tcp_resends.append(read_tcp_resends(run_path + "/"))
        flow_completion_tuple, num_unfinished_flows = analyze_throughput(run_path, non_zero_flows=nonzero_flows)
        nunfinishedflows_vector.append(num_unfinished_flows)
        flow_statistics.append(flow_completion_tuple)
        fct = [x[-1] for x in flow_completion_tuple]
        bins , hist = normalized_cdf(sorted(fct), nbins)
        throughput_output_filename = stats_file_output_dir + "tput_diffmlu_mlu{}.txt".format(get_name_from_mlu(mlus[index]))
        export_cdf_to_pgfplotter_file(throughput_output_filename, bins, hist)
        #bins = [0,] + bins
        #hist = [0,] + hist
        plt.plot(bins, hist, marker=markers[index % len(markers)], linewidth=0.7)

        legends.append("MLU : {}".format(mlus[index]))
        index += 1
    #export_tcp_resends("./temp/diff_mlu/diff_mlu_num_tcp_resends.txt".format(load_level), mlus, [float(x)/99679 for x in tcp_resends])
    print(nunfinishedflows_vector)
    plt.xlabel("Throughput")
    plt.ylabel("CDF")
    plt.ylim(ymax=1.1, ymin=0.)
    plt.legend(legends)
    plt.show()
    return

def analyze_diff_ahc(ahcs, load_level, stats_file_output_dir):
    nbins = 50
    markers = ['x', '+', 'd']
    nonzero_flows = []
    with open("./temp/diff_ahc/load_{}_toe/traffic_nblock20.txt".format(load_level), 'r') as f:
        for line in f:
            if line[0] == "#" or line[0] == "\n":
                continue
            linearr = line.split(',')
            src = int(linearr[1])
            dst = int(linearr[2])
            nonzero_flows.append((src, dst, ))


    run_path_names = ["./temp/diff_ahc/load_{}_toe/ahc{}/".format(load_level, get_name_from_ahc(x))  for x in ahcs]
    #run_path_names = ["./temp/diff_ahc/load_{}_uniform/ahc{}/".format(load_level, get_name_from_ahc(x))  for x in ahcs]
    flow_statistics = []
    fig = plt.figure()
    legends = []
    index = 0
    nunfinishedflows_vector = []
    for run_path in run_path_names:
        flow_completion_tuple, num_unfinished_flows = analyze_flow_completion(run_path, non_zero_flows=nonzero_flows)
        nunfinishedflows_vector.append(num_unfinished_flows)
        flow_statistics.append(flow_completion_tuple)
        fct = [np.log10(x[-1]) for x in flow_completion_tuple]
        bins , hist = normalized_cdf(sorted(fct), nbins)
        fct_output_filename = stats_file_output_dir + "fct_diffahc_ahc{}.txt".format(get_name_from_ahc(ahcs[index]))
        export_cdf_to_pgfplotter_file(fct_output_filename, bins, hist)
        plt.plot(bins, hist, marker=markers[index % len(markers)], linewidth=0.7)

        legends.append("AHC : {}".format(ahcs[index]))
        index += 1
    print(nunfinishedflows_vector)
    plt.xlabel("Flow Completion Time in nanoseconds (FCT)")
    plt.ylabel("CDF")
    plt.ylim(ymax=1.1, ymin=0.)
    plt.legend(legends)

    flow_statistics = []
    fig = plt.figure()
    legends = []
    index = 0
    nunfinishedflows_vector = []
    tcp_resends = []
    for run_path in run_path_names:
        tcp_resends.append(read_tcp_resends(run_path))
        flow_completion_tuple, num_unfinished_flows = analyze_throughput(run_path, non_zero_flows=nonzero_flows)
        nunfinishedflows_vector.append(num_unfinished_flows)
        flow_statistics.append(flow_completion_tuple)
        fct = [x[-1] for x in flow_completion_tuple]
        bins , hist = normalized_cdf(sorted(fct), nbins)
        throughput_output_filename = stats_file_output_dir + "tput_diffahc_ahc{}.txt".format(get_name_from_ahc(ahcs[index]))
        export_cdf_to_pgfplotter_file(throughput_output_filename, bins, hist)
        #bins = [0,] + bins
        #hist = [0,] + hist
        plt.plot(bins, hist, marker=markers[index % len(markers)], linewidth=0.7)

        legends.append("AHC : {}".format(ahcs[index]))
        index += 1
    export_tcp_resends("./temp/diff_ahc/load_{}_toe/diff_ahc_num_tcp_resends.txt".format(load_level), ahcs, [float(x)/99679 for x in tcp_resends])
    print(nunfinishedflows_vector)
    plt.xlabel("Throughput")
    plt.ylabel("CDF")
    plt.ylim(ymax=1.1, ymin=0.)
    plt.legend(legends)
    '''
    fig = plt.figure()
    legends = []
    index = 0
    nunfinishedflows_vector = []
    for run_path in run_path_names:
        flow_completion_tuple, num_unfinished_flows = analyze_flow_completion(run_path, non_zero_flows=nonzero_flows)
        nunfinishedflows_vector.append(num_unfinished_flows)
        flow_sizes = [x[2] for x in flow_completion_tuple]
        fct = [x[-1] for x in flow_completion_tuple]
        bins , hist = analyze_throughput_binned_by_log_flow_size(flow_sizes, fct, 30)
        plt.plot(bins, hist, marker=markers[index % len(markers)], linewidth=0.7)

        legends.append("AHC : {}".format(ahcs[index]))
        index += 1
    print(nunfinishedflows_vector)
    plt.xlabel("Flow Size (log10)")
    plt.ylabel("99th percentile")
    plt.ylim(ymin=0.)
    plt.legend(legends)
    '''
    plt.show()
    return

if __name__=="__main__":
    nbins = 50
    ahcs = [1.1, 1.2, 1.3, 1.4, 1.5, 1.6, 1.7, 1.8, 1.9]
    mlus = [0.095, 0.112, 0.140, 0.379, 0.904, 1.446]
    mlus = [0.095, 0.379, 0.776]
    #mlus = [0.095, 0.269, 1.556]
    #mlus = [0.095, 0.140, 0.379, 1.446]
    #ahcs = [1.1, 1.3, 1.5, 1.7, 1.8, 1.9]
    stats_file_output_dir = "/Users/minyee/static_topology_engineer/plot_data/"
    analyze_diff_mlu(mlus, 100000, stats_file_output_dir)
    #analyze_diff_ahc(ahcs, 100000, stats_file_output_dir)
    exit()
    mlus = [0.095, 0.204, 0.377]
    markers = ['x', '+', '|']

    run_path_names = ["temp/mlu" + get_name_from_mlu(x) for x in mlus]
    flow_statistics = []
    fig = plt.figure()
    legends = []
    index = 0
    for run_path in run_path_names:
        flow_completion_tuple, _ = analyze_flow_completion(run_path)
        flow_statistics.append(flow_completion_tuple)
        fct = [x[-1] for x in flow_completion_tuple]
        bins , hist = normalized_cdf(sorted(fct), nbins)
        bins = [2 * bins[0] - bins[1]] + bins
        hist = [0,] + hist
        plt.plot(bins, hist, marker=markers[index])

        legends.append("MLU : {}".format(mlus[index]))
        index += 1
    plt.xlabel("Flow Completion Time in nanoseconds (FCT)")
    plt.ylabel("CDF")
    plt.ylim(ymax=1., ymin=0.)
    plt.legend(legends)
    plt.show()
    #analyze_port_utilization()
