#!/usr/bin/env python
#
# Extracts BASIC information from *base-level* (*.lis) files from GRIF-LAL
# traces from EGEE. The resulting data structure will be an instance of
# EGEEMap, which contains a dictionary mapping cluster names to the scheduler
# and number of CPU cores reported by the CE. Since some CEs may share the same
# hardware, it is possible that some CPUs may be counted more than once. The
# GRIF-LAL information does not contain data for resolving this duplication,
# but two heuristics have been implemented to reduce duplication: whenever two
# or more CEs have the same domain name and the same size, it is assumed that
# all such CEs are interfaces to a single cluster. Moreover, whenever two or
# more CEs have the same domain name, but report different sizes, the largest
# size is selected as the cluster size. It is possible that these heuristics
# may result in undercounts in certain situations, such as when two clusters
# of exactly the same size are present in the same domain, or when one domain
# has multiple, disparate clusters.
#
# Author:   Mike Murphy <mamurph@cs.clemson.edu>
# Revision: 23 September 2009
#
#   Copyright 2009 Clemson University
#
#   Licensed under the Apache License, Version 2.0 (the "License");
#   you may not use this file except in compliance with the License.
#   You may obtain a copy of the License at
#
#       http://www.apache.org/licenses/LICENSE-2.0
#
#   Unless required by applicable law or agreed to in writing, software
#   distributed under the License is distributed on an "AS IS" BASIS,
#   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
#   See the License for the specific language governing permissions and
#   limitations under the License.
#
# This material is based upon work supported under a National Science
# Foundation Graduate Research Fellowship. Any opinions, findings,
# conclusions or recommendations expressed in this publication are those of
# the author(s) and do not necessarily reflect the views of the National
# Science Foundation.

'''
Grid map input component for EGEE GRIF-LAL Information System data,
available from the Grid Observatory (grid-observatory.org). This input
component has the same limitations as the input data set: the exact size
of each CE on the grid should be regarded as an estimate at best. Multiple
CEs may share the same hardware, leading to overcounting of site sizes. Two
heuristics have been added to the code to reduce the overcount: a duplicate
detection heuristic, and a merge heuristic. Whenever two CEs in the same
domain have the same size, the duplicate detection heuristic generates a
single cluster for both CEs. Whenever several clusters (of differing sizes)
are present on the same domain, they are merged into the largest sized
cluster among the several. Although these heuristics should reduce overcount,
it is possible that they introduce undercount.

When invoked as a script, this component has the following signature:

egee_map_input.py -o <output_file> [-c <comment>] <lis_file> [... <lis_file>]

where <lis_file> is a base GRIF-LAL file obtained from the Grid Observatory,
<output_file> specifies where the output is to be written, with an optional
<comment> added to the metadata of the output file. Multiple GRIF-LAL files
may be used as input, but input files MUST be in time order. All files MUST
have names of the format GRIF-LAL:YYYYMMDD-HHhMM:bdii.lis

It should be noted that this component CANNOT handle the .diff files included
with the Grid Observatory traces. These files must be applied externally, if
desired, using the patch(1) command.

The output produced by this module will be a text file produced by the
map_format_writer module. If multiple input files are specified, then the
grid file will represent a dynamic grid that changes over time (provided
the grid maps actually change in the input files).
'''

from datafilter import timestamp_to_gmtime
from map_format_writer import MapWriter
from mod_ldif import LDIFParser
import time

class EGEEMap(object):
   '''
   Temporary data structure for processing EGEE map information. This
   structure is NOT included in any output files.
   '''
   def __init__(self):
      self.epoch = 0
      self.filename = ''
      self.ce_map = {}     # ce_name: {'scheduler': name, 'cluster': name}
      self.cluster_map = {}  # cluster_name: cpu_count
      self.total_ce_cpu_count = 0
      self.total_cluster_cpu_count = 0
      self.collisions = 0   # known duplications (automatically handled)
      self.duplicate_cpus = 0   # detected CPU count duplication
      self.sched_names = [] # scheduler names found in set
      self.version = time.time()
#

class SiteParser(LDIFParser):
   '''
   Extension to the modified LDIF parser for extracting GRIF-LAL Information
   System information (.lis files). After calling the parse() method
   (inherited from the parent LDIFParser class), the following fields will
   be populated with data::

     ce_raw               Raw map of CEs in the form of a dictionary mapping:
                          ce_name: {'scheduler': str, 'cpu_count': int}
     sched_names          List of scheduler names (strings)
     total_ce_cpu_count   Sum of all CPU counts reported by all CEs. This WILL
                          BE an over-count, since larger clusters frequently
                          use several CEs to connect to the grid. (int)
     collisions           Total count of all colliding entries in the data set,
                          where a colliding entry is defined as an entry with
                          the same CE name as another entry, but with a
                          different CPU count. (int)
   '''
   def __init__(self, fh):
      '''
      @param fh: handle to open .lis file (rb mode)
      '''
      LDIFParser.__init__(self, fh)
      self.ce_raw = {}    # ce_name: {'scheduler': name, 'cpu_count': int}
      self.sched_names = []
      self.total_ce_cpu_count = 0
      self.collisions = 0
   def handle(self, dn, entry):
      '''
      This method is called by the modified LDIF parser and should not be
      directly invoked. Use the parse() method (inherited from the parent
      class) instead.
      '''
      if entry.has_key('GlueCEHostingCluster') and \
                          entry.has_key('GlueCEInfoTotalCPUs') and \
			  entry.has_key('GlueCEInfoJobManager'):
         name = entry['GlueCEHostingCluster'][0]
	 sched_name = entry['GlueCEInfoJobManager'][0]
         cpus = int(entry['GlueCEInfoTotalCPUs'][0])
         #
         # Sanity check: be sure site and sched names do not contain whitespace
         # (if they do, reject the site: bad data)
         if (len(name.split()) == 1) and (len(sched_name.split()) == 1):
	    if sched_name not in self.sched_names:
	       self.sched_names.append(sched_name)
	    if name not in self.ce_raw:
	       self.ce_raw[name] = {'scheduler': sched_name, 'cpu_count': cpus}
	       self.total_ce_cpu_count += cpus
	    else:
	       # ASSUME: each CE provides an interface to only one scheduler,
	       # so any conflicts are the result of CPU count differences.
               # Resolve these conflicts CONSERVATIVELY, by taking the lower
               # CPU count.
	       if cpus != self.ce_raw[name]['cpu_count']:
	          self.collisions += 1
	          diff = cpus - self.ce_raw[name]['cpu_count']
	          if (diff < 0) and (cpus > 0):
	             self.ce_raw[name]['cpu_count'] = cpus
		     self.total_ce_cpu_count -= diff
                  #
               #
            #
         #
      #
   #
#


def domain(site_name):
   '''
   Returns the domain name of a site_name. Since all sites in the data set
   are FQDNs, the domain name is simply the component of the site name
   starting with (and including) the first dot.

   @param site_name: Site name to parse
   @type site_name: str

   @rtype: str
   @return: the domain name, with the leading dot
   '''
   parts = site_name.split('.')
   dname = '.' + '.'.join(parts[1:])
   return dname
#


def filter_sites(ce_raw, egee_map):
   '''
   Filters a raw map of grid CEs into an EGEEMap object with sites and
   clusters. Every site is associated with at least one cluster. Whenever two
   or more CEs on the same domain have identical CPU counts, an identical
   number of sites will be created, sharing a single cluster. Whenever more
   than one cluster is found on the same grid, all clusters will be merged
   into the cluster with the largest CPU count.

   @param ce_raw: Raw CE map, as may be found in a SiteParser object's ce_map
                  field after calling the parse() method
   @param egee_map: Output EGEEMap object to be populated
   '''
   domain_clusters = {}    # domain: cpu_count
   for site_name in ce_raw:
      dname = domain(site_name)
      cluster_name = 'cluster' + dname
      sched_name = ce_raw[site_name]['scheduler']
      cpu_count = ce_raw[site_name]['cpu_count']
      if dname not in domain_clusters:
         domain_clusters[dname] = cpu_count
         egee_map.total_cluster_cpu_count += cpu_count
      else:
         old_count = domain_clusters[dname]
         if cpu_count == old_count:
            egee_map.duplicate_cpus += cpu_count
         elif cpu_count > old_count:
            diff = cpu_count - old_count
            domain_clusters[dname] = cpu_count
            egee_map.total_cluster_cpu_count += diff
      #
      egee_map.ce_map[site_name] = { 'scheduler': sched_name, \
                                     'cluster': cluster_name }
      #
   #
   for dname in domain_clusters:
      cluster_name = 'cluster' + dname
      cpu_count = domain_clusters[dname]
      egee_map.cluster_map[cluster_name] = cpu_count
   #
#


def write_map_delta(mw, egee_map_to, egee_map_from=EGEEMap()):
   '''
   Writes a timestamp followed by changes to the grid observed in egee_map_to
   relative to egee_map_from. An initial grid map may be written by invoking
   this method without egee_map_from.

   CAUTION: egee_map_from will be DESTROYED in the process of performing the
   comparison

   @param mw: MapWriter instance used for output
   @param egee_map_to: Later grid map: will not be changed
   @param egee_map_from: Earlier grid map: WILL BE DESTROYED
   '''
   #
   if egee_map_to.epoch < egee_map_from.epoch:
      raise Exception('Attempting to write maps out of time order')
   mw.write_time(egee_map_to.epoch)
   mw.write_line_comment('GRIF-LAL file: %s' % egee_map_to.filename)
   mw.write_line_comment('total ce cpu count: %d' % \
                         egee_map_to.total_ce_cpu_count)
   mw.write_line_comment('total cluster cpu count: %d' % \
                         egee_map_to.total_cluster_cpu_count)
   mw.write_line_comment('collisions: %d' % egee_map_to.collisions)
   mw.write_line_comment('duplicate CPUs: %d' % egee_map_to.duplicate_cpus)
   mw.write_blank()
   # Handle clusters first
   for cluster in egee_map_to.cluster_map:
      to_cpu = egee_map_to.cluster_map[cluster]
      if cluster not in egee_map_from.cluster_map:
         mw.write_line_comment('new cluster: %s' % cluster)
         mw.grow_cluster(cluster, to_cpu)
         mw.write_blank()
      else:
         cpu_diff = to_cpu - egee_map_from.cluster_map[cluster]
         if cpu_diff > 0:
            mw.grow_cluster(cluster, cpu_diff)
         elif cpu_diff < 0:
            mw.shrink_cluster(cluster, -1*cpu_diff)
         del egee_map_from.cluster_map[cluster]
   #
   for cluster in egee_map_from.cluster_map:
      # Clusters not already deleted from map were not in the "to" map
      mw.write_line_comment('cluster removed: %s' % cluster)
      mw.shrink_cluster(cluster, egee_map_from.cluster_map[cluster])
      mw.write_blank()
   #
   #
   # Sites
   for site in egee_map_to.ce_map:
      to_sched = egee_map_to.ce_map[site]['scheduler']
      to_cluster = egee_map_to.ce_map[site]['cluster']
      if site not in egee_map_from.ce_map:
         mw.write_line_comment('new site: %s' % site)
         mw.add_ce(site, to_cluster, to_sched)
         mw.write_blank()
      else:
         from_sched = egee_map_from.ce_map[site]['scheduler']
         from_cluster = egee_map_from.ce_map[site]['cluster']
         if (to_sched != from_sched) or (to_cluster != from_cluster):
            # Remove, then add, or simulation-time exceptions will occur
            mw.write_line_comment('site %s changed:' % site)
            mw.write_line_comment('* from scheduler %s to scheduler %s' % \
                                  (from_sched, to_sched))
            mw.write_line_comment('* from cluster %s to cluster %s' % \
                                  (from_cluster, to_cluster))
            mw.remove_ce(site)
            mw.add_ce(site, to_cluster, to_sched)
            mw.write_blank()
         del egee_map_from.ce_map[site]
   #
   for site in egee_map_from.ce_map:
      # Sites that have disappeared
      mw.write_line_comment('site removed: %s' % site)
      mw.remove_ce(site)
      mw.write_blank()
   #
#


if __name__ == '__main__':
   import optparse, sys

   op = optparse.OptionParser()
   op.add_option('-o', '--output', dest='output', \
                 help='write output to FILE', metavar='FILE')
   op.add_option('-c', '--comment', dest='comment', \
                 help='set metadata comment to COMMENT', metavar='COMMENT')
   opts, filenames = op.parse_args()

   if (len(filenames) < 1) or (opts.output is None):
      print >> sys.stderr, 'Usage:', sys.argv[0], \
         '-o <output_file> [-c <comment>] <in_file_1> [... <in_file_N>]'
      print >> sys.stderr, 'Input files must be specified in time order!'
      sys.exit(2)
   #

   fh = open(opts.output, 'w')
   mw = MapWriter(fh, normalize_time=True)

   if opts.comment is not None:
      mw.write_meta_comment(opts.comment)
   #
   mw.write_meta('input_count', len(filenames))

   last_egee_map = EGEEMap()   # empty map for starting
   for fn in filenames:
      p = SiteParser(open(fn, 'rb'))
      p.parse()

      egee_map = EGEEMap()
      # epochs: filenames have format GRIF-LAL:YYYYMMDD-HHhMM:bdii.lis
      parts = fn.split(':')
      egee_map.epoch = timestamp_to_gmtime(parts[1], '%Y%m%d-%Hh%M')
      egee_map.filename = fn
      egee_map.sched_names = p.sched_names[:]
      egee_map.total_ce_cpu_count = p.total_ce_cpu_count
      egee_map.collisions = p.collisions

      filter_sites(p.ce_raw, egee_map)
      write_map_delta(mw, egee_map, last_egee_map)
      last_egee_map = egee_map
   #

   fh.close()
#
