#!/usr/bin/env python
#
# Input handler for EGEE RTM traces (obtained from http://grid-observatory.org)
#
# Author:   Mike Murphy <mamurph@cs.clemson.edu>
# Revision: 23 September 2009
#
#   Copyright 2009 Clemson University
#
#   Licensed under the Apache License, Version 2.0 (the "License");
#   you may not use this file except in compliance with the License.
#   You may obtain a copy of the License at
#
#       http://www.apache.org/licenses/LICENSE-2.0
#
#   Unless required by applicable law or agreed to in writing, software
#   distributed under the License is distributed on an "AS IS" BASIS,
#   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
#   See the License for the specific language governing permissions and
#   limitations under the License.
#
# This material is based upon work supported under a National Science
# Foundation Graduate Research Fellowship. Any opinions, findings,
# conclusions or recommendations expressed in this publication are those of
# the author(s) and do not necessarily reflect the views of the National
# Science Foundation.

'''
Input handler for EGEE RTM traces (available from http://grid-observatory.org).
RTM traces are downloaded in a .tar.gz archive format, which should NOT be
extracted. Instead, this module will process the archives directly, retrieving
the job data from within the archive and producing one output file per
archive. The resulting job file will contain data for all Virtual Organizations
observed on the grid for the entire week contained within the archive.

This module may be invoked as a script as follows:

python egee_rtm_input.py [-d <output_directory>] [-c <comment>] \
   [-p <cpu_bound>] <input_1> [... <input_N>]

Each archive will be converted into a job file, using an automated naming
scheme. The downloaded archive files have filenames with the form
EGEE-YYYY-WW-RTM.tar.gz, which will be translated into an output file name
with the form egee-YYYY-WW.jobs (where YYYY is the 4-digit year and WW is the
week number). A second file, with a name in the form egee-YYYY-WW.vo, will be
created with dynamic Virtual Organization information.
'''

from datafilter import data_filter
from job_format_writer import JobWriter
from tabbed_data_input import TabbedData
from vo_format_writer import VOWriter
import tarfile

# Field definitions for EGEE RTM traces, from:
# http://query.grid-observatory.org/GOTechnicalDocV1.2.pdf

JOB_ID                      = 0    # The glite Job identifier
RTM_TYPE                    = 1    # A type computed from the RTM data
FINAL_REASON                = 2    # Job termination status
FINAL_EXIT_CODE             = 3    # Job exit code
RB                          = 4    # Name of the Resource Broker
UI                          = 5    # Name of the User Interface
CE                          = 6    # Name of the Computing Element
WN                          = 7    # Name of the Worker Node
VO                          = 8    # Name of the Virtual Organization
DN                          = 9    # Name of the Identity
REQUIREMENTS                = 10   # The job requirements
RANK                        = 11   # The ranking formula
REGISTRATION_TIME_STRING    = 12   # Registration date
TIME_WRITTEN_STRING         = 13
UI_REGISTER_EPOCH           = 14   # Epoch at which the job was registered
NS_ACCEPTED_EPOCH           = 15   # Epoch when job accepted by network server
WLM_MATCH_EPOCH             = 16   # Epoch when job matched to resource
JC_TRANSFER_EPOCH           = 17   # Job controller xferred job to resource
LM_ACCEPTED_EPOCH           = 18   # Job accepted by resource (logmonitor)
LM_RUNNING_EPOCH            = 19   # Job running on resource (logmonitor)
LM_DONE_EPOCH               = 20   # Job completed on resource (logmonitor)
LRMS_RUNNING_EPOCH          = 21   # Job running on resource (lrms)
LRMS_DONE_EPOCH             = 22   # Job completed on resource (lrms)
REGISTRATION_TIME           = 23   # submission to acceptance by RB (seconds)
MATCH_TIME                  = 24   # acceptance to finding a CE (waiting state)
UPTO_SCHED_XFER_TIME        = 25   # acceptance to xfer to CE
UPTO_SCHED_ACCEPT_TIME      = 26   # same as above observed by logmonitor
LM_CE_TOTAL_TIME            = 27   # scheduled + running time
LM_CE_SCHEDULED_TIME        = 28   # scheduled time only
LM_WN_TIME                  = 29   # time on worker node (logmonitor)
LRMS_WN_TIME                = 30   # time on worker node (lrms)
TOTAL_TIME                  = 31   # acceptance to completion
EFFICIENCY                  = 32   # ratio b/t running time and time in system
REQUIREMENTS_COUNT          = 33   # number of conjoined expr in requirements
RANK_COUNT                  = 34
RESUBMIT_COUNT              = 35   # Number of resubmissions
LM_RS_WASTED_WN_TIME        = 36   # Running time before failure on WN


# Types for tabbed_data_input
EGEE_TYPE_MAP = (str, str, str, str, str, str, str, str, str, str, str, str, \
                 str, str, int, int, int, int, int, int, int, int, int, int, \
		 int, int, int, int, int, int, int, int, float, int, int, \
		 int, int)
#

def select_completed(row):
   '''
   Row selector for rows that indicate successful job completion in the RTM
   data set. It should be noted that many RTM entries do not show as completed
   and will be discarded if this selector is used.

   @param row: Row to be tested in the selector
   @type row: sequence

   @rtype: bool
   @return: True iff the RTM data set indicates that the job represented by
   the input row has completed
   '''
   return (row[RTM_TYPE] == 'REGISTERED-TRANSFER-RAN-CLEARED')
#

def select_times(row, time_start_col, time_end_col, start, end):
   '''
   Selects a row if the job starts and ends within a specified time range
   (seconds since the epoch, UTC).

   @param row: Input row
   @type row: sequence
   @param time_start_col: Column number containing the start time
   @type time_start_col: int
   @param time_end_col: Column number containing the end time
   @type time_end_col: int
   @param start: Start time in seconds UTC
   @type start: float
   @param end: End time in seconds UTC
   @type end: float

   @rtype: bool
   @return: True iff the job starts and ends within the specified time frame
   '''
   result = True
   if start > 0:
      if type(time_start_col) is int:
         result = result and row[time_start_col] >= start
      else:
         result = result and time_start_col(row) >= start
   #
   if end > 0:
      if type(time_end_col) is int:
         result = result and row[time_end_col] <= end
      else:
         result = result and time_end_col(row) <= end
   #
   return result
#

def select_nonzero(row, col_nums):
   '''
   Selects a row if specific column numbers are non-zero

   @param row: Input row
   @type row: sequence
   @param col_nums: Tuple of column numbers (ints) to check

   @rtype: bool
   @return: True iff all requested columns have non-zero values
   '''
   result = True
   for num in col_nums:
      if row[num] == 0:
         result = False
   return result
#

def first_nonzero(row, fields):
   '''
   Returns the first of several specified fields in a row, where the value of
   the field is not equal to zero.

   @param row: Input row
   @type row: sequence
   @param fields: Field numbers to check (tuple of ints)

   @rtype: number
   @return: a value (int/float) or zero if all fields are zero
   '''
   value = 0
   for field in fields:
      if row[field] != 0:
         value = row[field]
	 break
   return value
#

def filter_best_running_epoch(row):
   '''
   Returns the "best" job starting time from an input row by comparing the
   LRMS start time and the LM start time to the job submission time (to
   ensure the start time is after the submission time). If the LRMS time is
   available, it will be favored over the LM time.

   @param row: Input row
   @type row: sequence

   @rtype: float
   @return: job start time in seconds UTC
   '''
   submit = row[UI_REGISTER_EPOCH]
   lrms = row[LRMS_RUNNING_EPOCH]
   lm = row[LM_RUNNING_EPOCH]
   sel = lm
   if lrms >= submit:
      sel = lrms
   return sel
#

def filter_best_done_epoch(row):
   '''
   Returns the "best" job ending time from an input row. When the LRMS time
   is available, it will be used instead of the LM time.

   @param row: Input row
   @type row: sequence

   @rtype: float
   @return: the job end time in seconds UTC
   '''
   start = filter_best_running_epoch(row)
   lrms = row[LRMS_DONE_EPOCH]
   lm = row[LM_DONE_EPOCH]
   sel = lm
   if lrms >= start:
      sel = lrms
   return sel
#

def filter_best_wn_time(row):
   '''
   Selects the best job running time (worker node time), favoring the LRMS
   data over the LM data.

   @param row: Input row
   @type row: sequence

   @rtype: float
   @return: the job length in seconds
   '''
   return first_nonzero(row, (LRMS_WN_TIME, LM_WN_TIME))
#

BEST_RUNNING_EPOCH = filter_best_running_epoch
BEST_DONE_EPOCH = filter_best_done_epoch
BEST_WN_TIME = filter_best_wn_time

ONLY_COMPLETED_JOBS = select_completed

SELECTOR = [(select_nonzero, (UI_REGISTER_EPOCH,)), ONLY_COMPLETED_JOBS]
COLUMNS = [ UI_REGISTER_EPOCH, JOB_ID, VO, BEST_WN_TIME, CE, \
            BEST_RUNNING_EPOCH ]
#

class JobTrace(TabbedData):
   '''
   Decoder for a single job trace file (a member of the outer tar archive),
   buffering the trace in memory as a list of rows. Since the individual
   archives are only a few hundred mebibytes in size, an entire archive
   should be bufferable on most systems, without swapping.
   '''
   def __init__(self, filename, fh):
      '''
      @param filename: Name of the archive member
      @type filename: str
      @param fh: Open file handle to archive member
      '''
      TabbedData.__init__(self, filename, fh, EGEE_TYPE_MAP)
      self.jobs = []
   def record_handler(self, record):
      '''
      Handler for a single job record within the trace. Converts the raw job
      record into a trace record for later output to the job file.

      @param record: Raw record to process
      @type record: sequence
      '''
      df = data_filter(record, SELECTOR, COLUMNS)
      if df is not None:
         self.jobs.append(df)
   #
   def decode(self):
      '''
      Reads and processes all lines from the input member

      @return: a list of all processed jobs
      '''
      chk = self.parse_line()
      while chk:
         chk = self.parse_line()
      return self.jobs
   #
#


def load_archive(tarfilename, jw, vw, cpu_bound=100):
   '''
   Loads a tar archive of RTM data, converting each record in the data set
   into a job record that is written to the output file using a JobWriter
   instance. The list of Virtual Organizations found in the trace file will
   be written to the specified VOWriter instance.

   @param tarfilename: Name of the tar file to open
   @type tarfilename: str
   @param jw: JobWriter instance to which jobs should be written
   @param vw: VOWriter instance to which the VO list will be written
   '''
   tfh = tarfile.open(tarfilename, 'r')
   ti = tfh.next()
   all_jobs = []
   seen_vos = []
   last_job_time = -1
   last_vo_time = -1
   while ti is not None:
      fh = tfh.extractfile(ti)
      jt = JobTrace(ti.name, fh)
      all_jobs.extend(jt.decode())
      ti = tfh.next()
   #
   all_jobs.sort(cmp=(lambda a, b: cmp(a[0], b[0])))
   for submit, name, vo, length, ce, start in all_jobs:
      if submit > last_job_time:
         jw.write_time(submit)
         last_job_time = submit
      elif submit < last_job_time:
         raise Exception('Sort did not sort correctly')
      jw.write_job(name, ce, length, cpu_bound, vo, (start - submit))
      if vo not in seen_vos:
         time = submit - 1  # Ensure VO appears before job by 1 sec adjustment
         if time > last_vo_time:
            vw.write_time(submit)
            last_vo_time = time
         vw.write_vo(vo)
         seen_vos.append(vo)
      #
   #
#

if __name__ == '__main__':
   import optparse, os.path, sys

   op = optparse.OptionParser()
   op.add_option('-d', '--output-directory', dest='directory', \
                 help='write output files to DIRECTORY', metavar='DIRECTORY')
   op.add_option('-c', '--comment', help='set metadata comment to COMMENT', \
                 metavar='COMMENT', dest='comment')
   op.add_option('-p', '--cpu-bound', dest='cpu', \
                 help='set cpu bound to VALUE (0-100)', metavar='VALUE')
   opts, filenames = op.parse_args()

   if len(filenames) < 1:
      print >> sys.stderr, 'Usage:', sys.argv[0], '[-d <directory>]', \
         '[-p <cpu_bound>] [-c <comment>] <tar_file_1> [... <tar_file_n>]'
      sys.exit(2)
   #

   out_dir = '.'
   if opts.directory is not None:
      out_dir = opts.directory
   #
   cpu = 100
   if opts.cpu is not None:
      cpu = float(opts.cpu)
      if (cpu < 0) or (cpu > 100):
         print >> sys.stderr, 'CPU not in range 0 to 100'
         sys.exit(2)
   #

   for filename in filenames:
      parts = filename.split('-')
      year = parts[1]
      week = parts[2]
      out_base = os.path.join(out_dir, 'egee-' + year + '-' + week)
      jfh = open(out_base + '.jobs', 'w')
      vfh = open(out_base + '.vo', 'w')
      jw = JobWriter(jfh, normalize_time=True)
      vw = VOWriter(vfh, normalize_time=True)
      load_archive(filename, jw, vw, cpu)
      jfh.close()
      vfh.close()
   #
#
