#   This program is free software; you can redistribute it and/or modify
#   it under the terms of the version 3 of the GNU Lesser General Public License
#   as published by the Free Software Foundation.
#
#   This program is distributed in the hope that it will be useful,
#   but WITHOUT ANY WARRANTY; without even the implied warranty of
#   MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
#   GNU General Public License for more details.
#
#   You should have received a copy of the GNU Lesser General Public License
#   along with this program; if not, write to the Free Software
#   Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307  USA
#
# Copyright (c) NEC Deutschland GmbH, NEC HPC Europe
#
# $Id$

import os
import logging

from string import lower
from basic_types.marshaller import XDRMarshaller
from basic_types.marshaller import XDRUnmarshaller
from basic_types import NumRecord
from glob import glob
from time import time, strftime, gmtime

__all__ = ["TimeSeriesDatabase", "S_IN_DAY", "NS_IN_S"]


# Constants
NS_IN_S = 1000.0 * 1000.0 * 1000.0
S_IN_DAY = 24 * 3600
DATABASE_TYPE_FILENAME = "__time_series_db_info__"


class TimeSeriesDatabase(object):
    """
    Base class for string (LOG) typed, numerical (RRD) typed and other classes containing time series data.
    There are three levels to condense data before it is finally removed.
    Note, insert and append is only possible to files that are within the keep_exact range!

                        |<---- keep_exact = 1
    20110603000000.rrd     20110604000000.rrd     20110605000000.rrd
    |<-------------->|     <---------------->     |<-------------...          RRD---is-a--->TimeSeriesDatabase
            |
            `---- RRD.fetch_all(step_ns), RRA.insert(), rm rrd----,
                                                                  |
                                                                  V
    20110518000000.rra     20110525000000.rra     20110602000000.rra
    |<-------------->|     <---------------->     |<-------------...          RRA---is-a--->TimeSeriesDatabase
            |
            `---- RRA.fetch_all(step_ns), RRY.insert(), rm rra----,
                                                                  |
                                                                  V
                                                  20110000000000.rry
                                                  |<-------------...          RRY---is-a--->TimeSeriesDatabase

    @param path: path to directory where time series files should be placed
    @param keep_exact: for how many rotation periods to keep exact data. Needed for averaging out RRD data or scrapping old LOG data.
    @param rotation_period: time in seconds after which a new time series file should be created
    @param *args: list of record objects that are being added to the time series. Normally empty.
    """
    def __init__( self, path=None, keep_exact=-1, rotation_period=S_IN_DAY, *args ):
        self._type_name = type( self ).__name__
        assert self._type_name != "TimeSeriesDatabase", \
                "Can't make instance of base class %s! Create instance of subclass!" % self._type_name
        self._last_error = None
        self._dir_path = path
        self._file_extension = "".join( [".", lower( self._type_name )] )
        self._keep_exact = keep_exact
        self._rotation_period = rotation_period
        self._record_log = []   # buffer for uncommitted records 

        # TODO: check consistence of data (to be done outside the init method!
        self.__verify_db_files()
        
        # is there a ts database info file?
        ts_info = None
        try:
            ts_info = TimeSeriesDatabase.read_ts_info( self._dir_path )
        except TypeError:
            # TODO: something's wrong with the file or content: delete the file
            logging.error( "found bogus ts info file in %s, removing it." % self._dir_path )
            os.unlink( os.path.join( self._dir_path, DATABASE_TYPE_FILENAME ) )
        except IOError:
            logging.info( "ts info file not found in %s. Will be created." % self._dir_path )

        if self._type_name == "RRD":
            if ts_info is not None:
                # file is there, compare info with that passed on command line
                if ts_info["rotation_period"] != rotation_period:
                    logging.info( "rotation periods for %s differ: old=%d new=%d" % (self._dir_path, ts_info["rotation_period"], rotation_period) )
                    # TODO: rebuild time-series files with new rotation period.
                    # !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!

        # write the new ts_info
        self._write_ts_info()

        if len( args ) > 0:
            # EF: unclear what this should be doing. We should maybe delete the files covered by the records...
            # current_time = time() * NS_IN_S
            # self.clear( current_time, current_time )

            for arg in args:
                # TODO: check if proper record type!
                self.insert_record( arg )


    def __avg_or_step_records( self, records, nsteps=0, step_ns=0 ):
        assert isinstance( nsteps, int ), "nsteps must be an integer! It's %s" % type( nsteps ).__name__

        # do nothing if not enough data
        if len( records ) <= 2:
            return records

        # it's not nice to need to know the record type here, but NumRecords are known for
        # several types of TS databases (rrd, rra, rry) and I'm hesitating to implement a method
        # into each of them.
        if isinstance( records[0], NumRecord ):
            return self._avg_records( records, nsteps=nsteps, step_ns=step_ns )
        else:
            return self._step_records( records, nsteps=nsteps, step_ns=step_ns )

    @staticmethod
    def _avg_records( records, nsteps=0, step_ns=0 ):
        records_start = records[0].time_ns
        records_end = records[-1].time_ns
        if nsteps > 0:
            step_ns = long( ( records_end - records_start ) / nsteps )

        if step_ns == long( 0 ):
            return records

        avg_records = []
        #start_time_ns = records[0].time_ns
        start_time_ns = long( step_ns * long( records[0].time_ns / step_ns ) )
        end_time_ns = start_time_ns + step_ns
        bin_time_ns = start_time_ns + long( step_ns / 2 )
        s = 0.0
        nitems = 0
        for record in records:
            if start_time_ns <= record.time_ns < end_time_ns:
                # inside bin
                s = s + record.value
                nitems += 1
            elif  end_time_ns <= record.time_ns:
                # append averaged bin, if any data
                if nitems > 0:
                    avg_records.append( NumRecord( bin_time_ns, s / nitems ) )

                start_time_ns = long( step_ns * long( record.time_ns / step_ns ) )
                end_time_ns = start_time_ns + step_ns
                bin_time_ns = start_time_ns + long( step_ns / 2 )
                s = record.value
                nitems = 1
        return avg_records

    def _dump_records( self, records ):
        """
        Just a simple helper function to dump records in a human readable form.
        @param records: list containing elements of NumRecord type
        """
        i = 0
        t = records[0].time_ns
        for record in records:
            delta_t_s = ( record.time_ns - t ) / NS_IN_S
            print "%5d: %s, delta t = %.3f s" % (i, record, delta_t_s)
            i = i + 1
            t = record.time_ns

    def _get_file_list( self, from_ns=0, to_ns=0 ):
        """
        Return all path and filenames for the period between from and to parameters.
        The returned list with pathnames contains already existing files and also
        files that do not yet exist but should be created within append and insert.
        Note that append and insert must not provide both from_ns and to_ns.
        The returned filenames contain a time stamp in the form year (YYYY),
        month (MM), date (DD), hour (HH), minute (MM) and second (SS).
        @param files: sorted (ascending) list of filenames including path
        @param from_ns: Time in 10E-9 seconds.
        @param to_ns: Time in 10E-9 seconds.
        @return: list containing path and filenames in form /path/<YYYYMMDDHHMMSS>.<database-type>
        """
        files = glob( os.path.join( self._dir_path, "*" + self._file_extension ) )
        files.sort()
        #logging.debug( "available files: " + str( files ) )
        if from_ns == 0 and to_ns == 0:
            return files
        if from_ns >= to_ns:
            time_str = strftime( "%Y%m%d%H%M%S", gmtime( int( from_ns / NS_IN_S / self._rotation_period ) * self._rotation_period ) )
            file_path = os.path.join( self._dir_path, time_str + self._file_extension )
            if len( files ) == 0:
                return [file_path]
            else:
                files.append( file_path )
                files.sort()
                to_ns = from_ns
        from_s = long( strftime( "%Y%m%d%H%M%S", gmtime( from_ns / NS_IN_S ) ) )
        to_s = long( strftime( "%Y%m%d%H%M%S", gmtime( to_ns / NS_IN_S ) ) )
        selected_files = []
        for n in range( 0, len( files ) ):
            file_time = self._get_time_from_filename( files[n] )
            if n + 1 == len( files ): # and file_time > from_s:
                selected_files.append( files[n] )
                break
            file_time = self._get_time_from_filename( files[n + 1] )
            if file_time > from_s:
                selected_files.append( files[n] )
            if file_time > to_s:
                break
        #logging.debug( "selected files: " + str( selected_files ) )
        return selected_files

    def _get_time_from_filename( self, filename ):
        """
        Extract time-stamp that is encoded in the filename and return a long value.
        The returned value is just a long value that increases with time but NOT seconds
        since epoch.
        @param filename: filename in form /path/<YYYYMMDDHHMMSS>.<database-type>, path is optional
        @return: time as long value
        """
        return long( os.path.basename(filename).split(".")[0] )

    @staticmethod
    def _step_records( records, nsteps=0, step_ns=0 ):
        #assert isinstance( nsteps, int ), "nsteps must be an integer! It's %s" % type( nsteps ).__name__

        records_start = records[0].time_ns
        records_end = records[-1].time_ns
        if nsteps > 0:
            step_ns = long( (records_end - records_start) / nsteps )

        if step_ns == long( 0 ):
            return records

        stepped_records = [records[0]]
        target_time_ns = records[0].time_ns + step_ns
        next_time_ns = target_time_ns + step_ns
        for record in records[1:]:
            if target_time_ns <= record.time_ns:
                stepped_records.append( record )
                if record.time_ns < next_time_ns:
                    target_time_ns += step_ns
                    next_time_ns += step_ns
                else:
                    target_time_ns = record.time_ns + step_ns
                    next_time_ns = target_time_ns + step_ns
        return stepped_records

    ## Print a string representation of the current database.
    # @return: a string representation of the database will be returned
    def __repr__( self ):
        rep = []
        database_type = "%s(%s, %d, %d, " % ( self._type_name, \
                        repr( self._dir_path ), self._keep_exact, self._rotation_period )
        rep.append( database_type )
        all_records = self.fetch_all( 0, time() * NS_IN_S, 0 )
        for record in all_records:
            rep.append( repr( record ) + ", " )
        rep.append( ")" )
        return "".join( rep )

    ## Verify on-disk file, read in data, and if broken write out as much as possible to a new file.
    #
    def __verify_db_files( self, check_all=False ):
        if check_all:
            files = self._get_file_list()
        else:
            files = self._get_file_list( time() * NS_IN_S )
        if len( files ) == 0:
            return
        logging.info( "Verifying ts files at %s : begin" % self._dir_path )
        for file_name in files:
            logging.debug( "Verifying ts file '%s'" % file_name )
            self._last_error = None
            records = self.fetch_all( selected_files=[file_name] )
            if self._last_error is not None:
                logging.error( "Error in TS file: %s -- rewriting file." % self._last_error )
                self._last_error = None
                # rewrite ts file
                self.clear( selected_files=[file_name] )
                for record in records:
                    self.append_record( record )
        logging.info( "Verifying ts files at %s : end" % self._dir_path )

    ## Writes information on time series database type to file named DATABASE_TYPE_FILENAME.
    # @return: nothing
    def _write_ts_info( self ):
        file_path = os.path.join( self._dir_path, DATABASE_TYPE_FILENAME )
        ts_info = { "class" : self._type_name,
                    "path"  : self._dir_path,
                    "keep_exact" : self._keep_exact,
                    "rotation_period" : self._rotation_period }
        open( file_path, "w+" ).write( str( ts_info ) )

    ## Append a record to the current time-series.
    # @param record: works with any type of records for which a marshal function is defined
    # @return: nothing
    def append_record( self, record ):
        time_ns = record.time_ns
        selected_files = self._get_file_list( time_ns )
        if ( len( selected_files ) != 1 ):
            logging.error( "Can't append! No suitable file found!" )
            return
        current_file = selected_files[0]
        #logging.debug( "%s.append: %s into %s" % (self._type_name, str( record ), current_file) )
        #print "%s.append: %s into %s" % (self._type_name, str( record ), current_file)
        xdr_marshaller = XDRMarshaller()
        record.marshal( xdr_marshaller )
        open( current_file, "a+b").write( xdr_marshaller.get_buffer() )

    ## Remove database files.
    # Remove all database files that contain data within the period:
    # start_time_ns - rotation_time to end_time_ns
    # Note: If sttart_time_ns == end_time_ns == 0, which is the default
    # ALL database files will be removed!
    # @param start_time_ns: start time in 10E-9 seconds since epoch
    # @param end_time_ns: end time in 10E-9 seconds since epoch
    # @return: nothing
    def clear( self, start_time_ns=0, end_time_ns=0, selected_files=None ):
        if selected_files is None or len(selected_files) == 0:
            if start_time_ns == 0 and end_time_ns == 0:
                end_time_ns = time() * NS_IN_S
            selected_files = self._get_file_list( start_time_ns, end_time_ns )
        for file_name in selected_files:
            if os.path.exists( file_name ):
                os.unlink( file_name )

    ## Reads all records stored in the current database where the
    # time difference between two consecutive records is greater than step_ns.
    # Note: The returned records do NOT exactly cover the from_ns to To_ns time period.
    # Theses times are only used to identify the database files. Actually more records are returned.
    # @param from_ns: time in 10E-9 seconds since epoch
    # @param to_ns: time in nanoseconds since epoch
    # @param step_ns: time in 10E-9 seconds giving the minimum time between two consecutive records
    # @return: list of records
    def fetch_all( self, from_ns=0, to_ns=0, nsteps=0, step_ns=0, selected_files=None ):
        records = []
        if selected_files == None:
            selected_files = self._get_file_list( from_ns, to_ns )
        for file_name in selected_files:
            if not os.path.exists( file_name ):
                # TODO: handle the case of missing ts file
                continue
            # reading should actually succeed if the file exists
            file_contents = open( file_name, "rb" ).read()
            xdr_unmarshaller = XDRUnmarshaller( file_contents )
            while xdr_unmarshaller.get_position() != -1:
                try:
                    record = self.record()
                    record.unmarshal( xdr_unmarshaller )
                    records.append( record )
                except EOFError:
                    self._last_error = "EOFError %s" % file_name
                    break
        if (step_ns > 0 or nsteps > 0) and len( records ) > 0:
            step_ns = long( step_ns )
            return TimeSeriesDatabase.__avg_or_step_records( records, nsteps=nsteps, step_ns=step_ns )
        else:
            if hasattr( self, "filter_records" ):
                return self.filter_records( records )
            else:
                return records

    ## Reads all records stored in this database where the
    # time-stamp is newer than start_time_ns.
    # @param start_time_ns: timestamp of earliest record to be returned
    # @param step_ns: time in 10E-9 seconds giving the minimal time between two consecutive records
    # @return: list of records matching the criteria
    def fetch_from( self, start_time_ns, nsteps=0, step_ns=0 ):
        if start_time_ns < 0:
            start_time_ns = long( time() * NS_IN_S ) + start_time_ns

        all_records = self.fetch_all( start_time_ns, time() * NS_IN_S )
        matched_records = []
        for record in all_records:
            if record.time_ns >= start_time_ns:
                matched_records.append( record )

        return self.__avg_or_step_records( matched_records, nsteps=nsteps, step_ns=step_ns )

    ## Reads all records stored in this database where the
    # time-stamp is newer than start_time_ns and older than end_time_ns.
    # @param start_time_ns: timestamp of earliest record to be returned
    # @param end_time_ns: timestamp of latest record to be returned
    # @param step_ns: time in 10E-9 seconds giving the minimum time between two consecutive records
    # @return: list of records matching the criteria
    def fetch_range( self, start_time_ns, end_time_ns, nsteps=0, step_ns=0 ):
        if start_time_ns < 0:
            start_time_ns = long( time() * NS_IN_S ) + start_time_ns

        if end_time_ns < 0:
            end_time_ns = long( time() * NS_IN_S ) + end_time_ns

        all_records = self.fetch_all( start_time_ns, end_time_ns )

        matched_records = []
        for record in all_records:
            if record.time_ns >= start_time_ns and record.time_ns <= end_time_ns:
                matched_records.append( record )

        return self.__avg_or_step_records( matched_records, nsteps=nsteps, step_ns=step_ns )

    ## Reads all records stored in this database where the
    # time-stamp is older than end_time_ns.
    # @param end_time_ns: timestamp of newest record to be returned
    # @param step_ns: time in 10E-9 seconds giving the minimum time between two consecutive records
    # @return: list of records matching the criteria
    def fetch_until( self, end_time_ns, nsteps=0, step_ns=0 ):
        all_records = self.fetch_all( 0, time() * NS_IN_S )
        matched_records = []
        for record in all_records:
            if record.time_ns <= end_time_ns:
                matched_records.append( record )

        return self.__avg_or_step_records( matched_records, nsteps=nsteps, step_ns=step_ns )

    def get_files_to_condense( self ):
        """
        Return a list containing path names with files to condense.
        The list will be created based on keep_exact.
        @return: list object containing path elements (absolute directory plus file names)
        """
        int_path_list = []
        if ( self._keep_exact >= 0 ):
            path_list = self._get_file_list()
            if len( path_list ) > self._keep_exact + 1:
                del path_list[len( path_list ) - self._keep_exact - 1 : len( path_list )]
                for path_name in path_list:
                    int_path_name = path_name.replace( self._file_extension, ".int" )
                    os.rename( path_name, int_path_name )
                    int_path_list.append( int_path_name )
        return int_path_list

    def get_keep_exact( self ):
        return self._keep_exact

    def get_rotation_period( self ):
        return self._rotation_period

    ## Inserts a record into the database.
    # @param record: record of subclass type Record
    # @return: nothing
    # TODO: Since we have a fixed record length for each tim_ns, value pair,
    # this should be eventually be optimized by opening the database file "r+b",
    # seek to the right position and insert the new time_ns, value pair.
    def insert_record( self, record ):
        time_ns = record.time_ns
        all_records = self.fetch_all( time_ns, time_ns, 0 )
        selected_files = self._get_file_list( time_ns )
        if ( len( selected_files ) != 1 ):
            logging.error( "Can't insert! No suitable file found!" )
            return
        current_file = selected_files[0]
        #logging.debug( "%s.insert: %s into %s" % (self._type_name, str( record ), current_file) )
        #print "%s.insert: %s into %s" % (self._type_name, str( record ), current_file)
        xdr_marshaller = XDRMarshaller()
        if len( all_records ) == 0 or all_records[-1].time_ns < time_ns:
            # Record is newer than the last record:
            #  open file for append,
            #  append record
            record.marshal( xdr_marshaller )
            open( current_file, "ab" ).write( xdr_marshaller.get_buffer() )
        elif time_ns < all_records[0].time_ns:
            # Record is older than the first record:
            #  read old content, open file as empty (w+ means truncate file),
            #  write record and old content
            old_contents = open( current_file, "rb" ).read()
            record.marshal( xdr_marshaller )
            open( current_file, "w+b" ).write( xdr_marshaller.get_buffer() + old_contents )
        else:
            # Record should be sorted into the existing values:
            #  insert new record into the XDR buffer at the suitable place,
            #  silently overwrite older values for equal time_s values
            for current_record in all_records:
                if record != None and time_ns <= current_record.time_ns:
                    record.marshal( xdr_marshaller )
                    record = None
                    if current_record.time_ns == time_ns:
                        continue
                current_record.marshal( xdr_marshaller )
            if record != None:
                assert time_ns > current_record.time_ns
                all_records.append( record )
            open( current_file, "w+b" ).write( xdr_marshaller.get_buffer() )

    def number_of_files_to_condense( self ):
        """
        Return the number of files to condense.
        @return: integer reflecting the number of files
        """
        n = 0
        if ( self._keep_exact >= 0 ):
            path_list = self._get_file_list()
            n = int( len( path_list ) - self._keep_exact - 1 )
        return n

    ## Find out which type the database at the given location is.
    # Note, this is a static method and can be called without owning an instance!
    # @param base_dir_path: path to where database files should be placed
    # @return: name of type of the database (which is a string)
    @staticmethod
    def read_ts_info( base_dir_path ):
        file_path = os.path.join( base_dir_path, DATABASE_TYPE_FILENAME )
        ts_info = None
        # an IOError here should be passed upwards
        ts_string = open( file_path, "r" ).read()
        try:
            ts_info = eval( ts_string )
        except Exception as e:
            raise TypeError, e
        return ts_info
