#   This program is free software; you can redistribute it and/or modify
#   it under the terms of the version 3 of the GNU Lesser General Public License
#   as published by the Free Software Foundation.
#
#   This program is distributed in the hope that it will be useful,
#   but WITHOUT ANY WARRANTY; without even the implied warranty of
#   MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
#   GNU General Public License for more details.
#
#   You should have received a copy of the GNU Lesser General Public License
#   along with this program; if not, write to the Free Software
#   Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307  USA
#
# Copyright (c) NEC Deutschland GmbH, NEC HPC Europe
#
import logging
import time
from aggmon.consumer import Consumer
from aggmon.aggmon_component import Component
from basic_types.metric import Metric
from aggmon.importers.torque_accounting_log.torque_accounting_log_importer import TORQUE_LOG_MARKER
from aggmon.aggmon_component import Components
from threading import Lock


class JobNodeCache():

    instance = None

    class JobRecord():
    # job record: (jobid, start, end, [nodes])
        def __init__( self, start=None, cnodes=[] ):
            self.start = start
            self.cnodes = cnodes

        def __repr__( self ):
            return "%s(%d, %s)" % (self.__class__.__name__, self.start, str( self.cnodes ))

    def __init__( self ):
        self.cache = {}    # dict of JobRecords belonging to currently running jobs
        self.cache_lock = Lock()
        JobNodeCache.instance = self
        for component_name in Components.alive.keys():
            if component_name.startswith( "MetricDatabase" ):
                db = Components.alive[component_name].instance
        # reply job events from database starting at start_s
        # Note: (now - start_s) must be bigger than the max time a job is allowed to run to not miss jobs
        start_s = int( time.time() ) - 3600 * 7
        metrics = db.local_getJobMetrics( None, None, start_s, 0 )
        for metric in metrics:
            self.add_event( metric )
        # dump cache content
        #print "job cache content:"
        #for jobid, r in self.cache.items():
        #    print "jobid:", jobid, ",", r

    def add_event( self, metric ):
        # add started, rerun or restarted jobs
        if metric.value == TORQUE_LOG_MARKER["S"] or metric.value == TORQUE_LOG_MARKER["R"] or metric.value == TORQUE_LOG_MARKER["T"]:
            new_job_record = self.JobRecord( metric.start, metric.cnodes )
            with self.cache_lock:
                self.cache[metric.name] = new_job_record
            #print "added:", new_job_record
        # remove ended and checkpointed (stopped) jobs
        elif metric.value == TORQUE_LOG_MARKER["E"] or metric.value == TORQUE_LOG_MARKER["C"]:
            try:
                with self.cache_lock:
                    del self.cache[metric.name]
                    #print "removed:", metric.name
            except:
                #print "not in cache:", metric.name
                pass

    def get_jobs_by_node( self, node_name ):
        # return a list of job IDs currently running on node
        with self.cache_lock:
            jobs = [jobid for jobid, r in self.cache.items() if node_name in r.cnodes]
        return jobs

    def get_nodes_by_job( self, jobid ):
        # return a list of node names utilized for job ID
        with self.cache_lock:
            if jobid in self.cache:
                nodes = self.cache[jobid].cnodes
            else:
                nodes = []
        return nodes


class JobNodeCacheConsumer(Consumer):

    def __init__( self, input_channel=None, topic="#", hierarchy=None ):
        self._topic = topic
        self.consumer = Consumer.__init__( self, hierarchy=hierarchy, input_channel=input_channel, topic=topic )
        self.jobnodecache = JobNodeCache()
        logging.info( "JobNodeCache %s will cache node names for job IDs" % type(self).__name__ )

    def handleEvent( self, metric, topic=None ):
        if isinstance( metric, Metric ):
            self.jobnodecache.add_event( metric )


class JobNodeCacheComponent(Component):
    def __init__( self, *args, **kwds ):
        Component.__init__( self, *args, **kwds )
        self.allowed_kwds = ["input_channel", "topic", "hierarchy"]

    def instantiate( self, *args, **kwds ):
        try:
            logging.debug( "instantiating: %s" % self.name )
            self.instance = JobNodeCacheConsumer( *args, **kwds )
        except Exception, e:
            raise e

    def stop( self ):
        if self.instance.is_running():
            self.state = Component.STOPPED
            self.instance.unsubscribe()
