#!/usr/bin/env python
#
# Output pipeline component for suppressing duplicate results
#
# Author:   Mike Murphy <mamurph@cs.clemson.edu>
# Revision: 23 September 2009
#
#   Copyright 2009 Clemson University
#
#   Licensed under the Apache License, Version 2.0 (the "License");
#   you may not use this file except in compliance with the License.
#   You may obtain a copy of the License at
#
#       http://www.apache.org/licenses/LICENSE-2.0
#
#   Unless required by applicable law or agreed to in writing, software
#   distributed under the License is distributed on an "AS IS" BASIS,
#   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
#   See the License for the specific language governing permissions and
#   limitations under the License.
#
# This material is based upon work supported under a National Science
# Foundation Graduate Research Fellowship. Any opinions, findings,
# conclusions or recommendations expressed in this publication are those of
# the author(s) and do not necessarily reflect the views of the National
# Science Foundation.

'''
Output pipeline component used for suppressing duplicate results. Two results
are considered duplicates if all result data values are the same (timestamps
may differ).
'''

from kernel import Output


class DuplicateFilter(Output):
   '''
   Output handler that prevents duplicate results from being transmitted
   downstream. This handler is designed to be used with a downstream
   output handler in the pipeline and is not ended to be a final output
   destination.
   '''
   def __init__(self, output_handler):
      '''
      @param output_handler: Downstream output handler instance (required)
      '''
      Output.__init__(self, output_handler)
      self.last_written = {}
   #
   # Override docstrings on the handlers
   def handle_append(self, result):
      '''
      Does nothing in this implementation and is not called.
      '''
      pass
   #
   def handle_close(self):
      '''
      Does nothing in this implementation and is not called.
      '''
      pass
   #
   # Override the core append/close methods, since pipelining needs to be
   # suppressed on duplicates
   def append(self, result):
      '''
      Appends a result to the DuplicateFilter. If the result is determined to
      be different than the previous result, then it is passed downstream when
      the next non-duplicate result arrives. When a sequence of identical
      results is observed, both the first and the last results are passed
      downstream, so that timestamps will be correct when drawing constant
      graph segments. Intermediate duplicates will be discarded.

      @param result: Result to submit to the filter
      '''
      rtype = result.result_type
      write = True
      if rtype in self.last_written:
         last, count = self.last_written[rtype]
	 if result.same_as(last):
	    count += 1
	    self.last_written[rtype] = (result, count)
	    write = False
	 else:
	    if count > 1:
	       # Write the last of the set of identical results, so that
	       # constant graph segments will be correct
	       self.output_handler.append(last)
      if write:
         self.last_written[rtype] = (result, 1)
	 self.output_handler.append(result)
   #
   def close(self):
      '''
      Writes the final duplicate endpoints where sequences of duplicates have
      been observed (if any have been observed), then closes all output
      handlers in the downstream portion of the pipeline.
      '''
      for key in self.last_written:
         last, count = self.last_written[key]
	 if count > 1:
	    self.output_handler.append(last)
      self.output_handler.close()
   #
#
