#   This program is free software; you can redistribute it and/or modify
#   it under the terms of the version 3 of the GNU Lesser General Public License
#   as published by the Free Software Foundation.
#
#   This program is distributed in the hope that it will be useful,
#   but WITHOUT ANY WARRANTY; without even the implied warranty of
#   MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
#   GNU General Public License for more details.
#
#   You should have received a copy of the GNU Lesser General Public License
#   along with this program; if not, write to the Free Software
#   Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307  USA
#
# Copyright (c) NEC Deutschland GmbH, NEC HPC Europe
#
# $Id$
import logging
import time
from threading import Thread
from Queue import Queue

from aggmon.aggmon_component import Component
from aggmon.consumer import Consumer
from aggmon.direct_rpc_server import DirectRPCServer
from aggmon.metric_cache import MetricCache
from basic_types.factory import Factory
from basic_types.key_value_object import KeyValueObject
from basic_types.metric import Metric
from databases.metric.metric_stores import *
from mon_hierarchy import Hierarchy
#from multiprocessing import Process, JoinableQueue

# Constants
DB_STATE_OFFLINE = 0
DB_STATE_ONLINE = 1
DB_STATE_SYNC_SEND = 2
DB_STATE_SYNC_RECV = 3


__all__ = [ "MetricDatabase", "MetricDatabaseComponent", "DBControlMsg", "DB_STATE_OFFLINE", "DB_STATE_ONLINE", "DB_STATE_SYNC_SEND", "DB_STATE_SYNC_RECV" ]


class DBControlMsg(KeyValueObject):
    def __init__( self, command="", *args ):
        self.command = command
        self.args = args

Factory.set_factory( "DBControlMsg", DBControlMsg, __name__ )


class LocalRemoteServer():
    """
    Helper class for the direct rpc server, providing remote access to the database.
    """
    def __init__( self, name ):
        self.name = name
        DirectRPCServer.register_rpc( name, self.server )

    def server( self, group_path=None, *args, **kwds ):
        return MetricDatabase.localremotes[self.name].local_or_remote( group_path, *args, **kwds )

    def local_or_remote( self, group_path=None, *args, **kwds ):
        if group_path is None:
            # TODO: this is wrong!
            group_path = self.__group_path

        if group_path in MetricDatabase.instances:
            # query a local database instance
            return eval( "MetricDatabase.instances[group_path].local_%s( *args, **kwds )" % self.name )
        else:
            # LIMITATION: this only works with unique host names within the cluster, this is
            # a limitation of the direct RPC
            # find master(s) of addressed host / group_path from hierarchy
            db = MetricDatabase.instances.values()[0]
            target_group = None
            for group in db.getHierarchy().get_groups():
                if group.get_name() == group_path:
                    target_group = group
                    break
            if target_group is not None:
                master_of_group = group.get_master_names()[0].split( "/" )[-1]
                call_args = [ self.name, group_path ]
                call_args.extend( args )
                return DirectRPCServer.client( " ".join( call_args ), master_of_group )
            else:
                logging.warning( "Could not find group '%s' in hierarchy. Bogus DB query!" % group_path )
                return None


class MetricDatabase(Consumer):
    """
    Database for metrics. There are two basic types of metrics that can be
    handled by this database: numerical metrics containing float, int and
    long values and log metrics containing string and unicode values.
    """
    # place for storing instances indexed by the group_path
    instances = {}
    localremotes = {}

    def __init__( self, group_path="/", state=DB_STATE_OFFLINE, ts_options={}, **kwds ):
        self.__state = state
        self.__log = []   # accumulates requests when db state is offline
        self.ts_options = ts_options   # rotation_period, keep_exact, etc., passed in from above

        topic = Hierarchy.path_to_topic( group_path )
        if len( topic ) > 0:
            topic = topic + "."
        topic = topic + "*.*.*"
        if "topic" in kwds:
            del kwds["topic"]
        if "store" in kwds:
            args = [group_path]
            self.store = eval( "%s( self, args, **kwds)" % kwds["store"] )
            logging.debug( "MetricDatabase using store: %s" % self.store.__class__.__name__ )
            del kwds["store"]
            for kwd in self.store.kwds_allowed:
                if kwd in kwds:
                    del kwds[kwd]

        assert group_path.startswith( "/" ), "In DB: group path %s doesn't start with '/'" % group_path
        self._group_path = group_path
        self.db_work = Queue()
        # start exactly one worker thread to not break the serialization of the database
        # a worker thread decouples things here so that the foreign call-backs are not
        # done in the path of the connection's IO-loop
        self.db_thread = Thread( target=self.dbWorker, args=(self.db_work,), name="DBworker_"+self._group_path )
        self.db_thread.daemon = True
        self.db_stopping = False
        self.db_thread.start()

        self.consumer = Consumer.__init__( self, topic=topic, **kwds )
        MetricDatabase.instances[group_path] = self
        self.metric_cache = MetricCache( self )
        self.metric_cache.load_state()
        #self.metric_cache.dump()
        time.sleep( 3 )
        self.local_dbSetOnline()

    @staticmethod
    def cleanup( ):
        for db in MetricDatabase.instances.values():
            db._stop()

    # various helper functions
    @staticmethod
    def isJobType( val ):
        return isinstance( val, dict ) and ("value" in val and \
                (isinstance( val["value"], str ) or isinstance( val["value"], unicode )) and \
                not "output" in val) \
            or isinstance( val, Metric ) and ( \
                (isinstance( val.value, str ) or isinstance( val.value, unicode )) and \
                not "output" in val.__dict__)

    @staticmethod
    def isLogType( metric ):
        return (isinstance( metric.value, str ) or isinstance( metric.value, unicode)) and \
            "output" in metric.__dict__

    @staticmethod
    def isNumType( metric ):
        return isinstance( metric.value, float) or \
               isinstance( metric.value, int) or \
               isinstance( metric.value, long)

    def _initTimeSeriesInfo( self, host_name, metric_name, **options ):
        return self.store._initTimeSeriesInfo( host_name, metric_name, **options )

    def __registerServers( self ):
        self.localremotes = {}

    def _stop(self):
        """
        Stop the database instance in controlled way.
        """
        # TODO: one of the two lines below is redundant.
        self.db_stopping = True
        self.db_work.put( "STOP" )
        self.db_thread.join()
        logging.info( "DB instance '%s' stopped." % self.__group_path )

    def addMetric( self, metric ):
        """
        Add a metric to the metric database.
        """
        #TODO: what if there is no "host" attribute? For derived metrics and such...
        # Suggestion: change attribute "host" to "owner". Owner describes the attribute
        # the metric belongs to, e.g. cluster, rack, host.
        # Another advantage of this could be that owner.source.name will be unique
        # throughout the whole cluster.
        try:
            self.store.addMetric( metric )
        except Exception, e:
            # do not bail out here, this could be a temporary failure only e.g. DB not available during failover
            logging.warning( str( e ) )

    def dbWorker( self, queue ):
        """
        Function executed by worker thread that serializes the database instance's work.
        Pulls events from a queue and processes them. Blocks on queue.get() if no events available.
        """
        logging.info( "started db worker thread for group %s" % self._group_path )
        q = queue
        while not self.db_stopping:
            event = q.get()
            if event == 'STOP':
                break

            if isinstance( event, Metric ):

                if self.__state == DB_STATE_ONLINE:
                    self.addMetric( event )
                elif self.__state == DB_STATE_SYNC_SEND or self.__state == DB_STATE_OFFLINE:
                    self.__log.append( event )      # serial: only one worker thread

            elif isinstance( event, DBControlMsg ):

                logging.debug( "DB instance '%s': received control message, cmd=%s" % (self._group_path, event.command) )
                if event.command == "set_offline":
                    logging.debug( "DB instance '%s': setting offline" % self._group_path )
                    self.__state = DB_STATE_OFFLINE
                elif event.command == "set_online":
                    # TODO: what if we come from sync_send...

                    # replay logged events
                    logging.debug( "DB instance '%s': replaying %d events" % (self._group_path, len( self.__log)) )
                    while len( self.__log ) > 0:
                        metric = self.__log.pop( 0 )
                        self.addMetric( metric )
                    logging.debug( "DB instance '%s': setting online" % self._group_path )
                    self.__state = DB_STATE_ONLINE

            # acknowledge work done for this request
            q.task_done()
        logging.info( "stopping db worker thread for group %s" % self._group_path )

    def clearMetric( self, host_name, metric_name ):
        """
        Remove all metrics with specified name and host name.
        """
        self.store.clearMetric( host_name, metric_name )

    def handleEvent( self, event, topic=None ):
        """
        This method overrides handleEvent in the base class Consumer and
        simply queues events into the database instance's work queue.
        Topic information is ignored currently.
        """
        self.db_work.put( event )

    # stuff below is the local and remote API
    @staticmethod
    def getDBInstances():
        """
        Lists database instances present on this node. Normally this should return a
        list of group paths for which the current node is a master.
        """
        return MetricDatabase.instances.keys()

    def local_dbSetOffline( self ):
        """
        Set current instance of database offline.
        """
        self.db_work.put( DBControlMsg( "set_offline" ) )

    def local_dbSetOnline( self ):
        """
        Set current instance of database online.
        """
        self.db_work.put( DBControlMsg( "set_online" ) )

    def local_findWhereMetric( self, metric_name, metric_attr, condition, value, recurse=False ):
        """
        Find hosts on which the given metric's attribute fulfills a particular condition.
        Should be fast because it should only look into the cache.
        recurse : search recursively, i.e. ask also all child groups
        Returns a dict of host or group paths : metric
        """
        found = self.metric_cache.locate_metric_condition( metric_name, metric_attr, condition, value )
        if recurse is not False:
            own_group = self.getHierarchy().get_groups( self._group_path )[0]
            sub_group_names = own_group.get_group_names()
            # unfortunately this is synchronous and serial
            for sub_group_name in sub_group_names:
                # query sub_groups for condition
                remote_found = MetricDatabase.localremotes["findWhereMetric"].local_or_remote( sub_group_name, metric_name, metric_attr, condition, value, recurse=True )
                for key, value in remote_found.items():
                    found[key] = value
        return found

    def local_getHostNames( self ):
        """
        Look all hosts up for which metrics are stored.
        Returns a list containing host names as strings.
        """
        return self.store.getHostNames()

    def local_getLastMetricsByHostName( self, host_name ):
        """
        Get a MetricSet object that is a list of many Metric objects that
        are attributed to "host_name".
        Note: the Metric objects do just contain one time, value pair, the most recent one!
        Other time, value records could be retrieved with getRecordsByMetricName().
        Returns a list containing Metric objects.
        """
        return self.store.getLastMetricsByHostName( host_name )

    def local_getLastMetricByMetricName( self, host_name, metric_name ):
        """
        Retrieve the metric specified by host_name and metric_name.
        The metric contains the last time and value recorded.
        Returns a single Metric object.
        """
        return self.store.getLastMetricByMetricName( host_name, metric_name )

    def local_getLastSeen( self, host_name=None ):
        """
        Return the last_seen timestamp and age for a host.
        """
        return self.metric_cache.last_seen( host_name )

    def local_getMetricNames( self, host_name ):
        """
        Look all metric names up for a particular host name.
        Returns a list containing available metric names as strings.
        """
        return self.store.getMetricNames( host_name )

    def local_getJobMetrics( self, host_name, metric_name, start_s=0, end_s=0 ):
        """
        Return a list of named metrics of host between timestamp start and end.
        This is intended to be used for job typed metrics that are not stored as time series objects.
        """
        return self.store.getJobMetrics(host_name, metric_name, start_s=0, end_s=0 )

    def local_getRecordsByMetricName( self, host_name=None, metric_name=None, start_s=0, end_s=0, nsteps=0, step_s=0 ):
        """
        Retrieve records.
        Each record has two attributes time_ns (time in 10E-9 seconds) and value.
        start_s: timestamp in seconds since epoch of earliest record to be returned
        end_s: timestamp in seconds since epoch of latest record to be returned
        nsteps: number of steps (data points) to attempt to return. Like step_s this will lead to averaging for numeric data.
        step_s: time in seconds giving the minimum time between two consecutive records
        Returns a list containing records, record are instances of a subclass of time_series_database.
        """
        return self.store.getRecordsByMetricName( host_name, metric_name, start_s, end_s, nsteps, step_s )

    def local_getSummary( self, path ):
        """
        A "directory" listing of a path inside the fs serialized metric database.
        TODO: shouldn't this actually be in the serializer functions? The separation between
        database and serializer is not really clean, anyway...
        """
        return self.store.getSummary( path )

    def local_getTimeSeriesType( self, host_name, metric_name ):
        """
        Get time-series type for a metric on a host, i.e. it's class name.
        Returns a string containing the class name of the time series for the metric.
        """
        return self.store.getTimeSeriesType( host_name, metric_name )

# magic for registering direct rpc servers for each function that starts with local_
for m in dir( MetricDatabase ):
    if m.startswith( "local_" ) and eval( "callable( MetricDatabase.%s )" % m ):
        name = m.lstrip( "local_" )
        MetricDatabase.localremotes[name] = LocalRemoteServer( name )
        exec( "MetricDatabase.%s = staticmethod(MetricDatabase.localremotes[name].local_or_remote)" % name )

DirectRPCServer.register_rpc( "getDBInstances", MetricDatabase.getDBInstances )


def get_group_names( group_path=None ):
    """
    Get list of group paths.
    If group_path is specified, return list of child groups of this particular group.
    Returns a list of group paths in the system, or list of child groups of a particular group.
    """
    hierarchy = MetricDatabase.instances[MetricDatabase.getDBInstances()[0]].getHierarchy()
    if group_path is None:
        return hierarchy.get_group_names()
    else:
        group = hierarchy.get_groups( group_path )
        if group is not None:
            return group.get_group_names()
    return None

DirectRPCServer.register_rpc( "hierarchyGroupNames", get_group_names )


class MetricDatabaseComponent(Component):
    def __init__( self, *args, **kwds ):
        Component.__init__( self, *args, **kwds )
        self.allowed_kwds = ["group_path", "hierarchy", "input_channel", "state", "store", "topic", "ts_options"]

    def instantiate( self, *args, **kwds ):
        # lose the reference to any previous instance
        self.instance = None
        try:
            logging.debug( "instantiating: %s" % self.name )
            self.instance = MetricDatabase( *args, **kwds )
        except Exception, e:
            logging.error( "instantiating: %s" % self.name )
            raise e

    def status( self ):
        # anything smarter here?
        if self.state == Component.RUNNING:
            return Component.RUNNING
        else:
            return Component.STOPPED

    def stop( self ):
        if self.instance.is_running():
            self.state = Component.STOPPED
            # do this rather through sending a control message?
            self.instance._stop()
