from SimPy.Simulation import *
from Packet import Packet
from Link import Link
from Host import Host
from NIC import NIC
from Router import Router
import math

class MetricMessage():
    def __init__(self, process, time, packet, event, data = None):
        # Needs time sent, sender, packet sent, event (string), data (list)
        self.timestamp = time
        self.process = process
        self.packet = packet
        self.event = event
        self.data = data
    
class Metrics(Process):
    
    # Create a list of the processes to store messages in time order for
    # each process
    __processes = dict()
    
    # List of links and flows that are being measured in metrics
    __links = []
    __flows = []
    
    def __init__(self, links, flows):
        Process.__init__(self)
        
        self.__links = links
        self.__flows = flows
        
        self.throughputFile = {}
        self.tStart = {}
        
        self.throughputFileR = {}
        self.tStartR = {}
        self.tEnd = {}
        
        self.link = {}
        self.nicLeft = {}
        self.nicRight = {}
        self.nicSending = {}
        self.bufferOccupancyFile = {}
        self.packetLossFile = {}
        self.linkFlowFile = {}
        self.lossCount = {}      
        
        # Create the list of links to measure
        for link in self.__links:
            self.addProcess(link[0])
            self.addProcess(link[1])
            self.addProcess(link[2])
            
            # Be able to get link from nicLeft and nicRight and vice versa
            self.link[link[1]] = link[0]
            self.link[link[2]] = link[0]
            self.nicLeft[link[0]] = link[1]
            self.nicRight[link[0]] = link[2]
            
            # Setup packet counts in link going each way
            self.nicSending[link[1]] = 0
            self.nicSending[link[2]] = 0
            
            # Loss count starts at 0
            self.lossCount[link[0]] = 0
            
            # Setup log files
            # Take advantage of integer division to compress line length
            self.bufferOccupancyFile[link[1]] = open('bufOccupL%d.csv' % (len(self.bufferOccupancyFile) / 2), 'w')
            self.bufferOccupancyFile[link[2]] = open('bufOccupR%d.csv' % (len(self.bufferOccupancyFile) / 2), 'w')
            self.packetLossFile[link[0]] = open('packetLoss%d.csv' % len(self.packetLossFile), 'w')
            self.linkFlowFile[link[1]] = open('linkFlowLtoR%d.csv' % (len(self.linkFlowFile) / 2), 'w')
            self.linkFlowFile[link[2]] = open('linkFlowRtoL%d.csv' % (len(self.linkFlowFile) / 2), 'w')
            
        # Create a list of flows to measure
        for flow in self.__flows:
            self.addProcess(flow[0])
            self.addProcess(flow[1])
                      
            # Setup log files and the time bounds for flows
            self.throughputFile[flow[0]] = open('throughput%d.csv' % len(self.tStart), 'w')
            self.tStart[flow[0]] = 0
                   
            self.throughputFileR[flow[1]] = open('throughputReceive%d.csv' % len(self.tStartR), 'w')
            self.tStartR[flow[1]] = 0
            self.tEnd[flow[1]] = None
        
    def addProcess(self, process):
        # Add process to process list and enable logging in the process
        self.__processes[process] = []
    
    def log(self, process, time, packet = None, event = None, data = None):
        # If the process is not being measured, return immediately
        if process not in self.__processes:
            return
                
        # For NIC/Link
        if event == 'NICReceiving':
            if self.nicLeft[self.link[process]] == process: # receiving nic on left, so update right
                right = self.nicRight[self.link[process]]
                self.nicSending[right] -= 1
                # num packets going this way in link
                self.linkFlowFile[right].write("%f, %f\n" % (time, self.nicSending[right]))
            else:
                left = self.nicLeft[self.link[process]]
                self.nicSending[left] -= 1
                self.linkFlowFile[left].write("%f, %f\n" % (time, self.nicSending[left]))
        
        elif event == 'NICSending':         # Just asking to add to the buffer at send()
            # We get a buffer size to write here.
            self.bufferOccupancyFile[process].write("%f, %f\n" % (time, data))
                
        elif event == 'NICSendingIntoLink': # actually entering link at go()
            self.nicSending[process] += 1
            self.linkFlowFile[process].write("%f, %f\n" % (time, self.nicSending[process]))
            
        elif event == 'PacketLost': # Link lost a packet. Data holds the receiver nic
            link = process
            if self.nicLeft[link] == data: # then the right side should decrement
                self.nicSending[self.nicRight[link]] -= 1
                self.linkFlowFile[self.nicRight[link]].write("%f, %f\n" % (time, self.nicSending[self.nicRight[link]]))
            else: # then the left side should decrement
                self.nicSending[self.nicLeft[link]] -= 1
                self.linkFlowFile[self.nicLeft[link]].write("%f, %f\n" % (time, self.nicSending[self.nicLeft[link]]))
            # This was also packet loss
            self.lossCount[process] += 1
            self.packetLossFile[process].write("%f, %f\n" % (time, self.lossCount[process] / time))

        elif event == 'Packet dropped': # by a nic who was sending
            # self.nicSending[process] -= 1 Note that we only increment in NIC's go(), so this is unnecessary.
            self.linkFlowFile[process].write("%f, %f\n" % (time, self.nicSending[process]))
            
            # This was also packet loss
            self.lossCount[self.link[process]] += 1
            self.packetLossFile[self.link[process]].write("%f, %f\n" % (time, self.lossCount[self.link[process]] / time))
            
        # For Hosts (flows)
        elif event == 'RTT':
            if self.tStart[process] != 0:
                # Compute average sender throughput at this time and write
                self.throughputFile[process].write("%f, %f, %f, %f\n" % \
                    (time, float(data[2])/(time - self.tStart[process]), data[0], data[1]))
            else:
                # Set start time if it has not been set before
                self.tStart[process] = time
                
        elif event == 'NewPacketReceived':
            if self.tStartR[process] != 0:
                # Compute average received throughput
                self.throughputFileR[process].write("%f, %f\n" % \
                    (time, float(data)/(time - self.tStartR[process])))
            else:
                # Set start time if it has not been set before
                self.tStartR[process] = time
                
        elif event == 'FlowEnd':
            self.tEnd[process] = time
                
        return
            
    def go(self):
        while True:
            # No more processes to send, so idle
            yield passivate, self
            
    def display(self):
        self.compute_statistics()
        return
            
    def compute_statistics(self):
        print 'Displaying all relevant information'
        # Compute aggregate statistics
        finalEndTime = 0
        
        # Close every log file that was previously written to now that no more
        # logging occurs
        for process in self.throughputFile:
            self.throughputFile[process].close()
        for process in self.throughputFileR:
            self.throughputFileR[process].close()
        for process in self.bufferOccupancyFile:
            self.bufferOccupancyFile[process].close()
        for process in self.packetLossFile:
            self.packetLossFile[process].close()
        for process in self.linkFlowFile:
            self.linkFlowFile[process].close()
        
        # Compute aggregate statistics for the flows
        for i in range(len(self.__flows)):
            # Reopen the throughput file in read mode
            throughputSend = open('throughput%d.csv' % i, 'r')
            
            # Get the start and end time of the current flow so we do not do
            # flow analysis on routing table updates after the flow has ended
            startTimeSend = self.tStart[self.__flows[i][0]]
            endTime = self.tEnd[self.__flows[i][1]]
            
            # Determine when the final flow ends so we do not do buffer analysis
            # when there are only routing table updates after all flows ended
            finalEndTime = max(finalEndTime, endTime)
            
            # Read the file into memory for ease of computation
            throughputSendList = parseFile(throughputSend, 4)
            
            # Define initial values so print statement can always print
            avgThroughput = 0
            stdPacketDelay = 0
            avgPacketDelay = 0
            
            if throughputSendList:
                # Throughput is computed at the average throughput, so return
                # the last element logged
                avgThroughput = throughputSendList[-1][1]
                
                # Return the packet delay statistics
                stdPacketDelay, avgPacketDelay = timeWeightedStd(throughputSendList, endTime, 2)
            	
            # Write flow number, throughput, and packet delay statistics to stdout
            print '''Flow %d:\n
            \tAverage Throughput: %f\n
            \tAverage Packet Delay: %f\n
            \tPacket Delay Standard Deviation: %f\n''' % \
            (i, avgThroughput, avgPacketDelay, stdPacketDelay)
            
        # Compute aggregate statistics for the links
        for i in range(len(self.__links)):
            # Reopen buffer occupancy and packet loss files
            bufferLOccupancy = open('bufOccupL%d.csv' % i, 'r')
            bufferROccupancy = open('bufOccupR%d.csv' % i, 'r')
            packetLoss = open('packetLoss%d.csv' % i, 'r')
            
            # Read files into memory
            bufferLOccupancyList = parseFile(bufferLOccupancy, 2)
            bufferROccupancyList = parseFile(bufferROccupancy, 2)
            packetLossList = parseFile(packetLoss, 2)
            
            # Define initial values so print statement can always print
            stdBufferLOccupancy = 0
            avgBufferLOccupancy = 0
            stdBufferROccupancy = 0
            avgBufferROccupancy = 0
            avgPacketLoss = 0
            
            # Compute buffer and packet loss statistics
            if bufferLOccupancyList:
                stdBufferLOccupancy, avgBufferLOccupancy = timeWeightedStd(bufferLOccupancyList, finalEndTime, 1)
            if bufferROccupancyList:
                stdBufferROccupancy, avgBufferROccupancy = timeWeightedStd(bufferROccupancyList, finalEndTime, 1)
            if packetLossList:
                avgPacketLoss = packetLossList[-1][1]
            
            # Print link number, buffer statistics, and packet loss statistics to stdout
            print '''Link %d:\n
            \tLeft Buffer Occupancy: %f\n
            \tLeft Buffer Standard Deviation: %f\n
            \tRight Buffer Occupancy: %f\n
            \tRight Buffer Standard Deviation: %f\n
            \tAverage Packet Loss per Second: %f\n''' % \
            (i, avgBufferLOccupancy, stdBufferLOccupancy, \
             avgBufferROccupancy, stdBufferROccupancy, \
             avgPacketLoss)

# Compute the standard deviation weighted by percent of time 
def timeWeightedStd(inputList, endTime, index):
    totalValue = 0
    # First compute the mean, total time, and the time in each state
    mean, timeElapsed, timeWeights = timeWeightedAvg(inputList, endTime, index)
    for i in range(len(inputList[:len(timeWeights)])):
        # Time weighted standard deviation is sum(% time * (value - mean)^2)
        value = (timeWeights[i] / timeElapsed) * ((inputList[i][index] - mean) ** 2)
        totalValue += value
    # Output the standard deviation and mean since all statistics want both
    return math.sqrt(totalValue), mean

# Compute the average time, time elapsed, and how much time in each state
def timeWeightedAvg(inputList, endTime, index):
    # Time elapsed is either time between first and last log, or time between
    # first log and when the flows have stopped
    timeElapsed = inputList[-1][0] - inputList[0][0]
    if endTime:
        timeElapsed = min(timeElapsed, endTime - inputList[0][0])
    totalWeight = 0
    timeWeightList = []
    
    for i in range(len(inputList[:-1])):
        # If we are past the end time when it exists then stop
        if endTime != None and inputList[i + 1][0] > endTime:
            break
        # Compute the time spent in the state to ease computing standard deviation
        timeWeight = inputList[i + 1][0] - inputList[i][0]
        timeWeightList.append(timeWeight)
        
        # Compute the total weighted sum, sum up the elements in column # index
        weightedValue = inputList[i][index] * timeWeight
        totalWeight += weightedValue
        
    # Return average, time elapsed, and how much time spent in each state to
    # ease computing standard devation
    return totalWeight / timeElapsed, timeElapsed, timeWeightList

# Output a list that is the log file in memory
def parseFile(inputFile, elementsPerEntry = 2):
    outputList = []
    # Make sure we are at the start of the input file
    inputFile.seek(0)
    # For each line, make sure each element read in is a number and that we
    # are getting a line with the correct number of entries per row
    for line in inputFile:
        newline = []
        splitline = line.split(',')
        for element in splitline:
            if not isNumber(element) or len(splitline) < elementsPerEntry:
                newline = []
                break
            else:
                newline.append(float(element))
        if newline:
            outputList.append(newline)
    # Return the list that is the log file in memory
    return outputList
            
# Determine if a string is a number
def isNumber(s):
    try:
        float(s)
        return True
    except ValueError:
        return False