#!/usr/bin/env python
#
# Mechanisms for producing simulator job trace files, to be read by the
# job_format_reader core module.
#
# Author:   Mike Murphy <mamurph@cs.clemson.edu>
# Revision: 23 September 2009
#
#   Copyright 2009 Clemson University
#
#   Licensed under the Apache License, Version 2.0 (the "License");
#   you may not use this file except in compliance with the License.
#   You may obtain a copy of the License at
#
#       http://www.apache.org/licenses/LICENSE-2.0
#
#   Unless required by applicable law or agreed to in writing, software
#   distributed under the License is distributed on an "AS IS" BASIS,
#   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
#   See the License for the specific language governing permissions and
#   limitations under the License.
#
# This material is based upon work supported under a National Science
# Foundation Graduate Research Fellowship. Any opinions, findings,
# conclusions or recommendations expressed in this publication are those of
# the author(s) and do not necessarily reflect the views of the National
# Science Foundation.

'''
Implements a uniform format writer for grid job traces, which will be readable
by the corresponding simvoc.core.job_format_reader module. This format
consists of a plain text file, the format of which is documented externally.
'''

from common_writer import CommonWriter

# Be sure to increment this number when changing the output format!
JOB_FORMAT = 0.90
HEADER = 'job trace generated by job_format_writer'

class JobWriter(CommonWriter):
   '''
   Mechanism for writing job trace files in the format required by the core
   job_format_reader module. Writing jobs is a two-step process: first, the
   timestamp of a job or batch of jobs is written using write_time(), then
   the job or batch of jobs is written using calls to write_job(). It is
   important to note that jobs/batches must be written in time order, as the
   resulting file must be partially ordered with respect to job submission
   time.

   This module does not buffer all jobs in memory before writing, so that
   memory limits are not exceeded when handling extremely large job traces.
   File system limitations may be an issue on some platforms, however.
   '''
   def __init__(self, fh, normalize_time=False):
      '''
      @param fh: Open file handle (w mode) to which trace will be written
      @param normalize_time: if True, times will be normalized relative to
                             the first time value written
      @type normalize_time: bool
      '''
      CommonWriter.__init__(self, fh, JOB_FORMAT, HEADER, normalize_time)
   def write_job(self, job_id, site_name, length, cpu, vo, queue=-1):
      '''
      Writes a single job record to the output file handle. A call to
      write_time() MUST proceed the first call to this method.

      @param job_id: name to associate with this job
      @type job_id: str
      @param site_name: name of site to which job is to be submitted
      @type site_name: str
      @param length: running length of the job
      @type length: number
      @param cpu: percentage of the job that is CPU bound in range 0-100. The
                  remaining percentage (100 - cpu_bound) will be considered
                  network-bound for simulation purposes.
      @type cpu: number
      @param vo: name of the Virtual Organization with which this job is
                 associated
      @type vo: str
      @param queue: Elapsed time between job submission and start (optional)
      @type queue: number
      '''
      self.check_time()
      self.fh.write(': %s %s %s %s %s' % (str(job_id), str(site_name), \
                                      str(length), str(cpu), str(vo)))
      if queue >= 0:
         self.fh.write(' %s' % str(queue))
      self.fh.write('\n')
#
