#!/usr/bin/env python
#
# Collection of simulation result containers
#
# Author:   Mike Murphy <mamurph@cs.clemson.edu>
# Revision: 23 September 2009
#
#   Copyright 2009 Clemson University
#
#   Licensed under the Apache License, Version 2.0 (the "License");
#   you may not use this file except in compliance with the License.
#   You may obtain a copy of the License at
#
#       http://www.apache.org/licenses/LICENSE-2.0
#
#   Unless required by applicable law or agreed to in writing, software
#   distributed under the License is distributed on an "AS IS" BASIS,
#   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
#   See the License for the specific language governing permissions and
#   limitations under the License.
#
# This material is based upon work supported under a National Science
# Foundation Graduate Research Fellowship. Any opinions, findings,
# conclusions or recommendations expressed in this publication are those of
# the author(s) and do not necessarily reflect the views of the National
# Science Foundation.

'''
Containers for various simulation results. Each result type is a child class
of the Result class, which provides common capabilities for all result types.
The simulator sends all results, regardless of type, to an output handler,
which may in turn send the results through a pipeline for post-processing
and disk storage.

This module also provides Result-producing replacement functions for the
kernel trace and metadata routines.
'''

from units import fmt_size, fmt_speed
from core_common import SM_WAITING, SM_RUNNING, SM_COMPLETED, SM_ERRORS


JOB_STATUS_MAP = { 'submitted': {'submitted': 1}, \
                   'started': {'started' : 1}, 'finished': {'finished': 1} }


class Result(object):
   '''
   Base class for simulation results. Each result is labeled with a result
   type, so that postprocessing routines can be developed without having
   to import simulator core modules. All results are also labeled with the
   entity producing the result, a timestamp (where applicable), and an
   optional message.

   Results also contain fields, which are implemented as a dictionary of
   name: value pairs. Field names are strings, while values must be one of
   several primitive types: None, int, str, float, or bool. Fields may also
   be specified as using delta, or difference, calculations, for use with
   the aggregator post-processing module.
   '''
   def __init__(self, result_type, entity, timestamp, message=''):
      '''
      @param result_type: Name of the result type
      @type result_type: str
      @param entity: Name of the entity producing the result
      @type entity: str
      @param timestamp: Timestamp of the result, which may be a number, a
                        tuple in the form (clock, relative_time), or None
                        (to indicate that this type of result is not
                        timestamped)
      @param message: Message to accompany the result
      @type message: str
      '''
      self.result_type = result_type
      self.entity = entity
      self.time = timestamp
      self.relative_time = timestamp
      if type(timestamp) is tuple:
         self.time = timestamp[0]
         self.relative_time = timestamp[1]
      #
      self.message = message
      self.fields = {}
      self.delta_fields = {}
   #
   def add_field(self, field, value, use_delta=False):
      '''
      Adds a field to the result class. Field values must be primitive
      types (None, int, str, float, bool). If use_delta is set to True,
      then the fields will be flagged for use with the aggregation
      post-processor.

      @param field: Field name to add
      @type field: str
      @param value: Value of the field (must be a primitive type)
      @param use_delta: enable or disable field aggregation
      @type use_delta: bool
      '''
      if (value is not None) and (type(value) not in (int, str, float, bool)):
         raise Exception('Unsupported non-primitive value type')
      self.fields[field] = value
      if use_delta:
         self.delta_fields[field] = value
   #
   def same_as(self, other):
      '''
      Compares two results to determine if they are duplicates. In this
      comparison, time values are ignored, but messages and other strings use
      case-sensitive comparisons.

      @param other: Other result against which to compare
      @type other: Result

      @rtype: bool
      @return: True iff this result and the other result vary only by time (or
      not at all)
      '''
      match = False
      if self.result_type == other.result_type:
         if self.message == other.message:
	    match = True
	    for field in self.fields:
	       if other.fields.has_key(field):
	          if self.fields[field] != other.fields[field]:
		     match = False
		     break
	       else:
	          match = False
		  break
      return match
   #
   def __str__(self):
      s = '%d: <%s> %s' % (self.time, self.result_type, self.message)
      return s
#


class AggregationResult(Result):
   '''
   Result type produced by the Aggregator component. One of the fields of
   this type is the "aggregation_type", which provides a string value
   indicating the quantity that has been aggregated.
   '''
   def __init__(self, timestamp, norm_time, agg_type, agg_map):
      '''
      @param timestamp: Timestamp as supported by L{Result}
      @param norm_time: Time normalized to the first aggregation result of
                        the same type
      @type norm_time: number
      @param agg_type: Aggregation type name
      @type agg_type: str
      @param agg_map: Aggregation map (dictionary of name: value pairs,
                      where all values are primitive types)
      @type agg_map: dict
      '''
      Result.__init__(self, 'Aggregation', '_aggregator_', timestamp)
      self.fields = agg_map.copy()
      self.fields['aggregation_type'] = agg_type
      self.fields['norm_time'] = norm_time
      self.message = 'aggregated %d values' % len(agg_map)
#


class DiscardResult(Result):
   '''
   Result created whenever a job is discarded without simulating its
   execution. The reason for the discard will be indicated in the "reason"
   field.
   '''
   def __init__(self, submit_time, reason, job):
      '''
      @param submit_time: Time at which job was (to be) submitted
      @type submit_time: number
      @param reason: Reason for discarding job
      @type reason: str
      @param job: Job that has been discarded
      '''
      Result.__init__(self, 'Discard', '_driver_', None)
      self.add_field('submit_time', submit_time)
      self.add_field('reason', reason)
      self.add_field('job_id', job.name)
      self.add_field('job_length', job.length)
      self.add_field('job_vo', job.vo)
      self.message = 'Discarding job %s for submission at %d (%s)' % \
         (job.name, submit_time, reason)
#


class DiscrepancyResult(Result):
   '''
   Result type produced whenever a job, or the simulated grid, must be changed
   due to invalid information contained in the job trace. A reason and
   resolution are provided in the result fields.
   '''
   def __init__(self, timestamp, job_id, reason, resln, extra_data={}):
      '''
      @param timestamp: Timestamp as supported by L{Result}
      @param job_id: Job ID or name
      @type job_id: str
      @param reason: Reason for the discrepancy
      @type reason: str
      @param resln: Resolution action taken
      @type resln: str
      @param extra_data: Job extra_data field
      @type extra_data: dict
      '''
      Result.__init__(self, 'Discrepancy', '_driver_', timestamp)
      self.add_field('job_id', job_id)
      self.add_field('reason', reason)
      self.add_field('resolution', resln)
      for field in extra_data:
         self.add_field(field, extra_data[field])
      s = '%s: %s: %s' % (job_id, reason, resln)
      self.message = s
#


class FileRequestResult(Result):
   '''
   Result that occurs whenever a file is requested from a FileCache
   '''
   def __init__(self, timestamp, filename, cache_name, cached):
      '''
      @param timestamp: Timestamp as supported by L{Result}
      @param filename: Name of the file requested
      @type filename: str
      @param cache_name: Name of the FileCache
      @type cache_name: str
      @param cached: Flag indicating whether or not the file was in
                     the cache
      @type cached: bool
      '''
      Result.__init__(self, 'FileRequest', cache_name, timestamp)
      self.add_field('filename', filename)
      self.add_field('cache_name', cache_name)
      self.add_field('cached', cached)
      s = '%s from %s' % (filename, cache_name)
      if cached:
         s += ' [cached]'
      self.message = s
#


class FileTransferResult(Result):
   '''
   Result to record a transfer from a FileStore to a FileCache (in the event
   of a cache miss) or another other file transfer
   '''
   def __init__(self, timestamp, filename, size, server_name, cache_name, spd):
      '''
      @param timestamp: Timestamp as supported by L{Result}
      @param filename: Name of file that has been transferred
      @type filename: str
      @param size: Size in bytes of simulated file
      @type size: int
      @param server_name: Name of server from which file was obtained
      @type server_name: str
      @param cache_name: Name of cache to which file was transferred
      @type cache_name: str
      @param spd: Transfer speed in bytes/s (0 if instantaneous)
      @type spd: number
      '''
      Result.__init__(self, 'FileTransfer', server_name, timestamp)
      self.add_field('filename', filename)
      self.add_field('size', size)
      self.add_field('source', server_name)
      self.add_field('destination', cache_name)
      self.add_field('transfer_speed', spd)
      self.message = 'from %s to %s - %s at %s' % \
         (server_name, cache_name, fmt_size(size), fmt_speed(spd))
#


class JobResult(Result):
   '''
   Record of a Job, entered into the output pipeline whenever the job completes
   or is discarded
   '''
   def __init__(self, timestamp, job):
      '''
      @param timestamp: Timestamp as supported by L{Result}
      @param job: Job that has completed or been discarded
      '''
      Result.__init__(self, 'Job', job.name, timestamp)
      self.add_field('length', job.length)
      self.add_field('actual_length', job.actual_length)
      self.add_field('cpu_bound', job.cpu)
      self.add_field('network_bound', job.net)
      self.add_field('job_id', job.name)
      self.add_field('vo', job.vo)
      self.add_field('completed', job.complete)
      self.add_field('start_time', job.start_time)
      self.add_field('submit_time', job.submit_time)
      self.add_field('finish_time', job.finish_time)
      self.add_field('error', job.error)
      self.add_field('worker_node', None)
      #
      s = job.name + ': finished'
      #
      if job.worker_node is not None:
         self.add_field('worker_node', job.worker_node.name)
	 s += ' on %s' % job.worker_node.name
	 extra = job.worker_node.extra_data
	 for field in extra:
	    self.add_field('wn_' + field, str(extra[field]))
      self.add_field('scheduler', None)
      if job.sched is not None:
         self.add_field('scheduler', job.sched.name)
	 self.add_field('sched_tag', job.sched.tag)
	 s += ' (%s@%s)' % (job.sched.name, job.sched.tag)
      for field in job.extra_data:
         self.add_field('j_' + field, str(job.extra_data[field]))
      self.message = s
#


class JobStartResult(Result):
   '''
   Record dispatched when a job is started
   '''
   def __init__(self, timestamp, job):
      '''
      @param timestamp: Timestamp as supported by L{Result}
      @param job: Job that has started
      '''
      Result.__init__(self, 'JobStart', job.name, timestamp)
      self.add_field('job_id', job.name)
      self.message = 'Job %s started at %d' % (job.name, timestamp)
#


class JobSubmitResult(Result):
   '''
   Result that is sent when a job is successfully submitted
   '''
   def __init__(self, timestamp, job):
      '''
      @param timestamp: Timestamp as supported by L{Result}
      @param job: Job that has been submitted
      '''
      Result.__init__(self, 'JobSubmit', job.name, timestamp)
      self.add_field('job_id', job.name)
      self.add_field('length', job.length)
      self.add_field('vo', job.vo)
      self.message = 'Job %s submitted at %d' % (job.name, timestamp)
#


class MetadataResult(Result):
   '''
   Result class for injecting metadata into the simulator output. Metadata
   consists of simple measurement/value pairs and is not timestamped.
   '''
   def __init__(self, qty_name, qty_value):
      '''
      @param qty_name: Name of metadata item to inject
      @type qty_name: str
      @param qty_value: Value of metadata item (primitive)
      '''
      Result.__init__(self, 'Metadata', '_simulator_', None)
      self.add_field('measurement', qty_name)
      self.add_field('value', qty_value)
      self.message = str(qty_name) + ': ' + str(qty_value)
#


class SchedResult(Result):
   '''
   Scheduling result record, which encodes the scheduler status information
   both for the entire scheduler and for each Virtual Organization seen by
   the scheduler. Scheduler and VO metrics are added to the aggregation fields
   for use with the Aggregator component.
   '''
   def __init__(self, time, sched_id, name, tag, stat, vo_s):
      '''
      @param time: Timestamp as supported by L{Result}
      @param sched_id: Unique ID of the scheduler
      @type sched_id: str
      @param name: Name of the scheduler
      @type name: str
      @param tag: Scheduler tag
      @type tag: str
      @param stat: Statistics tuple produced by the get_update() method of the
                   Scheduler class
      @param vo_s: Dictionary containing a tuple of statistics for each VO
                   seen by the scheduler, in the form sched_name: tuple
      @type vo_s: dict
      '''
      Result.__init__(self, 'Sched', sched_id, time)
      wait, run, comp, err, slots, free = tuple(stat)
      self.add_field('total_slots', slots, True)
      self.add_field('free_slots', free, True)
      self.add_field('waiting', wait, True)
      self.add_field('running', run, True)
      self.add_field('completed', comp, True)
      self.add_field('errors', err, True)
      self.add_field('name', name)
      self.add_field('tag', tag)
      self.add_field('sched_id', sched_id)
      total = wait + run
      self.add_field('queue_size', total, True)
      #
      for vo in vo_s:
         self.add_field('waiting_' + vo, vo_s[vo][SM_WAITING], True)
         self.add_field('running_' + vo, vo_s[vo][SM_RUNNING], True)
         self.add_field('completed_' + vo, vo_s[vo][SM_COMPLETED], True)
         self.add_field('errors_' + vo, vo_s[vo][SM_ERRORS], True)
      #
      rslt = '%s (%s) tot: %d  wait: %d  run: %d  fin: %d  err: %d' % \
         (tag, name, total, wait, run, comp, err)
      self.message = rslt
#


class SiteRejectionResult(Result):
   '''
   Result that records the rejection of a site at creation time, specifying
   the site name and reason for rejection. This type of result is not
   timestamped.
   '''
   def __init__(self, name, reason):
      '''
      @param name: Name of the rejected site
      @type name: str
      @param reason: Reason for rejection
      @type reason: str
      '''
      Result.__init__(self, 'SiteRejection', name, None)
      self.add_field('site_name', name)
      self.add_field('reason', reason)
      self.message = name + ': ' + reason
#


class SiteResult(Result):
   '''
   Result that records the presence or absence of a site on the grid.
   '''
   def __init__(self, timestamp, name, sched_name, cluster_name, cpu_count):
      '''
      @param timestamp: Timestamp at which result is observed
      @param name: Name of the site
      @type name: str
      @param sched_name: Name of the scheduler in use at the site
      @type sched_name: str
      @param cluster_name: Name of the cluster used by the site
      @type cluster_name: str
      @param cpu_count: Number of CPU cores known to the site
      @type cpu_count: int
      '''
      Result.__init__(self, 'Site', name, timestamp)
      self.add_field('site_name', name)
      self.add_field('sched_name', sched_name)
      self.add_field('cluster_name', cluster_name)
      self.add_field('ce_cpu_count', cpu_count)
      self.message = '%s: %d cpus' % (name, cpu_count)
#


class TraceResult(Result):
   '''
   Result produced by the kernel_trace function in this module, to record the
   simulation trace as part of the output data
   '''
   def __init__(self, timestamp, message, queue_size, events, rate, \
                real_time, delta_e=0, delta_q=0):
      '''
      @param timestamp: Timestamp as supported by L{Result}
      @param message: Simulator message
      @type message: str
      @param queue_size: Event queue size
      @type queue_size: int
      @param events: Total number of events processed
      @type events: int
      @param rate: Event processing rate in evts/sec
      @type rate: float
      @param real_time: Current wallclock time in UTC seconds
      @type real_time: float
      @param delta_e: Number of events processed since last trace update
      @type delta_e: int
      @param delta_q: Change in the event queue size since last trace update
      @type delta_q: int
      '''
      Result.__init__(self, 'Trace', '_kernel_', timestamp, message)
      self.add_field('queue_size', queue_size)
      self.add_field('event_count', events)
      self.add_field('rate', rate)
      self.add_field('actual_time', real_time)
      self.add_field('delta_e', delta_e)
      self.add_field('delta_q', delta_q)
#


class VMPilotResult(Result):
   '''
   Result produced by Virtual Machine pilot jobs whenever the pilot state
   changes
   '''
   def __init__(self, timestamp, img_name, pilot_name, message):
      '''
      @param timestamp: Timestamp as supported by L{Result}
      @param img_name: Name of the VM image file
      @type img_name: str
      @param pilot_name: Name of the pilot job
      @type pilot_name: str
      @param message: Current pilot state
      @type message: str
      '''
      Result.__init__(self, 'VMPilot', pilot_name, timestamp)
      self.add_field('image_name', img_name)
      self.add_field('pilot_name', pilot_name)
      self.add_field('state', message)
      self.message = 'pilot: %s  image: %s  state: %s' % \
         (pilot_name, img_name, message)
#


class VMResult(Result):
   '''
   Result produced whenever a Virtual Machine changes state
   '''
   def __init__(self, timestamp, name, state):
      '''
      @param timestamp: Timestamp as supported by L{Result}
      @param name: Name of the VM
      @type name: str
      @param state: State to which VM has changed
      @type state: str
      '''
      Result.__init__(self, 'VM', name, timestamp, '%s: %s' % (name, state))
      self.add_field('name', name)
      self.add_field('state', state)
#


class VOResult(Result):
   '''
   Result that records the observation of a single Virtual Organization in
   the trace data. For VOC simulations, this result also encodes VOC
   management policy information.
   '''
   def __init__(self, timestamp, name, vo_record):
      '''
      @param timestamp: Timestamp at which VO is observed
      @param name: Name of the VO
      @type name: str
      @param vo_record: L{vo_format_reader.VO} object (or similar) that
                        provides policy information about a VO
      '''
      Result.__init__(self, 'VO', timestamp, None, 'VO: ' + name)
      self.add_field('name', name)
      for key in vo_record:
         value = vo_record[key]
         if (value is None) or (type(value) in (str, int, float, bool)):
            self.add_field(key, value)
#


class VOCResult(Result):
   '''
   Result used for periodic updates on the state of a Virtual Organization
   Cluster.
   '''
   def __init__(self, timestamp, vo, count, target, limit):
      '''
      @param timestamp: Timestamp as supported by L{Result}
      @param vo: Name of the VO
      @type vo: str
      @param count: Number of pilot jobs currently running for this VOC
      @type count: int
      @param target: VOC target level
      @type target: int
      @param limit: VOC limit level
      @type limit: int
      '''
      Result.__init__(self, 'VOC', vo, timestamp)
      self.add_field('vo', vo)
      self.add_field('num_pilots', count, True)
      self.add_field('target', target)
      self.add_field('limit', limit)
      self.message = '%s: %d (%d -- %d)' % (vo, count, target, limit)
#


class WatchdogResult(Result):
   '''
   Result periodically produced to monitor the state of the watchdog
   component
   '''
   def __init__(self, timestamp, wd_name, w_nametg, q_nametg, w_size, q_size):
      '''
      @param timestamp: Timestamp as supported by L{Result}
      @param wd_name: Watchdog name
      @type wd_name: str
      @param w_nametg: Name and tag of the scheduler the watchdog is
                       monitoring
      @type w_nametg: str
      @param q_nametg: Name and tag of the scheduler to which the
                       watchdog sends pilot jobs
      @type q_nametg: str
      @param w_size: Monitored scheduler queue size
      @type w_size: int
      @param q_size: Submission scheduler queue size
      @type q_size: int
      '''
      Result.__init__(self, 'Watchdog', wd_name, timestamp)
      self.add_field('name', wd_name)
      self.add_field('watch_queue_id', w_nametg)
      self.add_field('watch_queue_size', w_size)
      self.add_field('submit_queue_id', q_nametg)
      self.add_field('submit_queue_size', q_size)
      self.message = '%s  watch_q: %d  submit_q: %d' % \
                     (wd_name, w_size, q_size)
#


# Factory trace and metadata functions to replace the kernel defaults
def kernel_trace(simtime, realtime, msg, qsize, events, rate, de, dq):
   '''
   Result-producing replacement for the kernel trace function

   @param simtime: Current simulated time
   @type simtime: number
   @param realtime: Current wall clock time in UTC seconds since the epoch
   @type realtime: float
   @param msg: Current simulator message
   @type msg: str
   @param qsize: Current event queue size
   @type qsize: int
   @param events: Total number of events processed
   @type events: int
   @param rate: Event processing rate (events/sec)
   @type rate: float
   @param de: Number of events processed since last trace update
   @type de: int
   @param dq: Change in event queue size since last trace update
   @type dq: int

   @rtype: L{results.TraceResult}
   '''
   return TraceResult(simtime, msg, qsize, events, rate, realtime, de, dq)
#

def kernel_metadata(start_time, end_time, exec_time, event_count, event_rate):
   '''
   Result-producing replacement for the kernel metadata function

   @param start_time: Simulation start time in UTC seconds since epoch
   @type start_time: float
   @param end_time: Simulation end time in UTC seconds since epoch
   @type end_time: float
   @param exec_time:  Execution time (seconds) for the simulation
   @type exec_time: float
   @param event_count: Total number of events processed during the run
   @type event_count: int
   @param event_rate: Average event processing rate over the run
   @type event_rate: float

   @rtype: L{results.MetadataResult}
   '''
   r = []
   r.append(MetadataResult('run_start_time', start_time))
   r.append(MetadataResult('run_end_time', end_time))
   r.append(MetadataResult('sim_exec_time', exec_time))
   r.append(MetadataResult('sim_total_events', event_count))
   r.append(MetadataResult('sim_event_rate', event_rate))
   r.append(MetadataResult('version', end_time))
   return ((r,))
#
