#   This program is free software; you can redistribute it and/or modify
#   it under the terms of the version 3 of the GNU Lesser General Public License
#   as published by the Free Software Foundation.
#
#   This program is distributed in the hope that it will be useful,
#   but WITHOUT ANY WARRANTY; without even the implied warranty of
#   MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
#   GNU General Public License for more details.
#
#   You should have received a copy of the GNU Lesser General Public License
#   along with this program; if not, write to the Free Software
#   Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307  USA
#
# Copyright (c) NEC Deutschland GmbH, NEC HPC Europe
#
# $Id$
import logging
import os
import sys
import time

from aggmon.databases.metric.time_series_database import TimeSeriesDatabase, NS_IN_S
from aggmon.databases.metric.metric_store import MetricStore
from aggmon.databases.metric.log import LOG
from aggmon.databases.metric.rrd import RRD
from aggmon.databases.metric.job import JOB
from mon_hierarchy import Hierarchy
from basic_types.metric import Metric, MetricSet
from basic_types.marshaller.fs_marshaller import FSMarshaller
from basic_types.marshaller.fs_unmarshaller import FSUnmarshaller
from basic_types.helpers import md5sum_file

__all__ = "FSMetricStore"


class FSMetricStore(MetricStore):

    kwds_allowed = ["database_path"]

    def __init__( self, metric_database, args, database_path="", **kwds ):
        self.group_path = args[0]
        self.metric_database = metric_database
        self.__database_path = os.path.join( database_path, self._flattenGroupPath( self.group_path ) )

        logging.info( "Instantiating metrics db (%s) in %s" % (self.__class__.__name__, self.__database_path) )
        if not os.path.exists( self.__database_path ):
            try:
                os.makedirs( self.__database_path )
            except Exception, e:
                logging.error( "Failed to create database directory %s" % self.__database_path )
                logging.error( e )
                sys.exit(1)

    def __getMetricsDirPathByHostName( self, host_name ):
        """
        Generates the string of that path, where the metrics
        of host "host_name" will be stored.
        Returns a path where the name of the host is added at the end.
        """
        if host_name == Hierarchy.ROOT_PATH:
            return os.path.join( self.__database_path, Hierarchy.ROOT_NAME )
        host_name = host_name.split( "/" )[-1]
        return os.path.join( self.__database_path, host_name )

    def _getTimeSeriesInfo( self, host_name, metric_name ):
        hs = self.metric_database.metric_cache.get_host_state( host_name )
        if hs is not None:
            ts_handle = hs.get_ts_handle( metric_name )
        else:
            hs = self.metric_database.metric_cache.add_host_state( host_name )
        if ts_handle is None:
            ts_handle = self._initTimeSeriesInfo( host_name, metric_name, **self.ts_options )
            hs.set_ts_handle( metric_name, ts_handle )
        return ts_handle

    def _initTimeSeriesInfo( self, host_name, metric_name, **options ):
        metric_dir_path = os.path.join( self.__getMetricsDirPathByHostName( host_name ), metric_name )
        if not os.path.exists( metric_dir_path ):
            return None
        # ask database what type (RRD, LOG, JOB) it is at metric_dir_path
        try:
            ts_info = TimeSeriesDatabase.read_ts_info( metric_dir_path )
        except IOError:
            # there is no ts_info file, also no ts_handle, maybe this has no time-series?
            if os.path.exists( os.path.join( metric_dir_path, "no_log" ) ):
                return "NOTS"
            else:
                return None
        except TypeError, e:
            # problem with evaluating the ts_info, i.e. the dict is broken
            logging.error( "Could not retrieve ts_info for %s, file could be corrupt. %s" % (metric_dir_path, str(e)) )
            return None
        else:
            ts_class = ts_info["class"]
            ts_path = ts_info["path"]
            del ts_info["class"]
            if len( options ) > 0:
                options["path"] = ts_path
            else:
                options = ts_info
            if ts_class == "RRD":
                ts_handle = RRD( **options )
            elif ts_class == "LOG":
                ts_handle = LOG( **options )
            elif ts_class == "JOB":
                ts_handle = JOB( **options )
            else:
                logging.error( "Unknown time-series class: %s" % ts_class )
                return None
        return ts_handle

    def addMetric( self, metric ):
        metrics_dir_path = self.__getMetricsDirPathByHostName( metric.host )
        if not os.path.exists( metrics_dir_path ):
            os.makedirs( metrics_dir_path )

        # we can't deal with metrics which have "/" in the name therefore replace this string
        metric.name = metric.name.replace( "/", "slash" )

        # check for cached marshallers
        host_name = metric.host
        hs = self.metric_database.metric_cache.get_host_state( host_name )
        (marshaller, __unmarshaller) = hs.get_marshaller_unmarshaller()
        if marshaller is None:
            marshaller = FSMarshaller( metrics_dir_path )
            hs.set_marshaller_unmarshaller( marshaller=marshaller )

        # in case metrics arrive with a zero time stamp add the current time
        if metric.time == 0:
            metric.time = time()

        logging.debug( "%s handle metric, %s" % (self.__class__.__name__, str( metric.__dict__ )) )

        # append to or insert into database, on append also remember the whole metric in cache
        op, stripped_metric, old_metric = self.metric_database.metric_cache.test_append_and_strip( metric )
        if op == "append":
            # marshal metric into metrics_dir_path
            marshaller.writeStruct( metric.name, stripped_metric )

        # add/update metric value in time-series database at metrics_dir_path
        metric_dir_path = os.path.join( metrics_dir_path, metric.name )
        if not os.path.exists( metric_dir_path ):
            logging.error( "metric dir path %s does not exist!" % metric_dir_path)
            logging.error( "op=%s metric=%s stripped_metric=%s old_metric=%s" % (op, repr(metric), repr(stripped_metric), repr(old_metric)))
            raise ValueError( metric_dir_path + " does not exist" )

        # don't put into time-series if "no_log" attribute is set
        if hasattr( metric, "no_log" ):
            return

        # is ts_handle already in cache?
        ts = hs.get_ts_handle( metric.name )
        if ts is None:
            # numerical type metrics are put into RRD, string and job type metrics are logged
            if self.metric_database.isNumType( metric ):
                metric_type = "RRD"
            elif self.metric_database.isLogType( metric ):
                metric_type = "LOG"
            elif self.metric_database.isJobType( metric ):
                metric_type = "JOB"
            else:
                logging.error( "MetricDatabase: skipping: unsupported value data type '%s' for %s" % (type( metric.value ).__name__, repr( metric ))  )
                # TODO: remove this exception later, when we understood why this happens
                raise
                return
            # create the ts handle
            ts = eval( "%s( metric_dir_path )" % metric_type )
            hs.set_ts_handle( metric.name, ts )
            # and write the file!
            ts._write_ts_info()

            # debug check
            if not isinstance( ts, TimeSeriesDatabase ):
                logging.error( "1. ts is instance of %s instead of TimeSeriesDatabase: %s, %s" % (str( type( ts ) ), ts, metric.name) )
                raise TypeError( "ts has wrong type: %s" % str( type( ts ) ) )

        #print( "MetricDatabase: %s %s %s" % (op, metric_type, repr( metric )) )   # leave this in for tests with multiprocessing
        if not isinstance( ts, TimeSeriesDatabase ):
            logging.error( "2. ts is instance of %s instead of TimeSeriesDatabase: %s, %s" % (str( type( ts ) ), ts, metric.name) )
            raise TypeError( "ts has wrong type: %s" % str( type( ts ) ) )
        args = [ long( metric.time * NS_IN_S ), metric.value ]
        kwds = {}
        if self.metric_database.isLogType( metric ) and hasattr( metric, "output" ):
            kwds["output"] = metric.output
        if op == "append":
            ts.append( *args, **kwds )
        elif op == "insert":
            ts.insert( *args, **kwds )

    def clearMetric( self, host_name, metric_name ):
        metric_dir_path = os.path.join( self.__getMetricsDirPathByHostName( host_name ), metric_name )
        if os.path.exists( metric_dir_path ):
            os.unlink( metric_dir_path )

    def getSummary( self, path ):
        query_path = os.path.join( self.__database_path, path.lstrip( "/" ) )
        summary = []
        if os.path.exists( query_path ):
            entries = os.listdir( query_path )
            entries.sort()
            for entry in entries:
                entry_path = os.path.join( query_path, entry )
                if os.path.isdir( entry_path ):
                    summary.append( { "name": entry, "type": "dir" } )
                elif os.path.isfile( entry_path ):
                    try:
                        fobj = file( entry_path, "rb" )
                    except:
                        logging.error( "Failed to open file %s" % entry_path )
                        continue
                    md5sum = md5sum_file( fobj)
                    fobj.close()
                    summary.append( { "name": entry, "type": "file", "md5": md5sum } )
        return summary

    def getHostNames( self ):
        host_names = []
        if os.path.exists( self.__database_path ):
            host_names = os.listdir( self.__database_path )
        return host_names

    def getLastMetricsByHostName( self, host_name ):
        metrics = MetricSet()
        metrics_dir_path = self.__getMetricsDirPathByHostName( host_name )
        if os.path.exists( metrics_dir_path ):
            for metric_name in os.listdir( metrics_dir_path ):
                try:
                    metric = self.getLastMetricByMetricName( host_name, metric_name )
                except Exception, e:
                    logging.error( "ignoring failed current metric '%s' for '%s' (%s)" % (metric_name, host_name, str(e)) )
                    continue
                metrics.append( metric )
        return metrics

    def getLastMetricByMetricName( self, host_name, metric_name ):
        metric = None
        metrics_dir_path = self.__getMetricsDirPathByHostName( host_name )
        if os.path.exists( metrics_dir_path ):
            hs = self.metric_database.metric_cache.get_host_state( host_name )
            (__marshaller, unmarshaller) = hs.get_marshaller_unmarshaller()
            if unmarshaller is None:
                unmarshaller = FSUnmarshaller( metrics_dir_path, depth=1 )
                hs.set_marshaller_unmarshaller( unmarshaller=unmarshaller )
            if not metric_name.startswith( "." ):
                metric = Metric(host=host_name, name=metric_name, source="none", time=0, value=0)
                # TODO: handle possible exceptions (ValueError) or pass up
                unmarshaller.readStruct( metric_name, metric )
                metric.name = metric_name
        return metric

    def getMetricNames( self, host_name ):
        metric_names = []
        metrics_path = self.__getMetricsDirPathByHostName( host_name )
        if os.path.exists( metrics_path ):
            metric_names = os.listdir( metrics_path )
        return metric_names

    def getJobMetrics( self, host_name=None, metric_name=None, start_s=0, end_s=0 ):
        return []

    def getRecordsByMetricName( self, host_name, metric_name, start_s=0, end_s=0, nsteps=0, step_s=0 ):
        ts_handle = self._getTimeSeriesInfo( host_name, metric_name )
        if not isinstance( ts_handle, TimeSeriesDatabase ):
            return []
        if end_s == 0:
            if start_s == 0 and step_s == 0:
                return ts_handle.fetch_until( time.time() * NS_IN_S, nsteps=int(nsteps) )
            else:
                return ts_handle.fetch_from( long( float(start_s) ) * NS_IN_S, nsteps=int(nsteps), step_ns=long( float(step_s) ) * NS_IN_S )
        else:
            return ts_handle.fetch_range( long( float(start_s) ) * NS_IN_S, long( float(end_s) ) * NS_IN_S, nsteps=int(nsteps), step_ns=long( float(step_s) ) * NS_IN_S )

    def getTimeSeriesType( self, host_name, metric_name ):
        ts_handle = self._getTimeSeriesInfo( host_name, metric_name )
        if isinstance( ts_handle, TimeSeriesDatabase ):
            return ts_handle.__class__.__name__
        return ts_handle
