package  Schedule::Chronic::Distributed::DBI;

=head1 NAME

Schedule::Chronic::Distributed::DBI - Database Abstraction

=head1 SYNOPSIS

use base qw(Schedule::Chronic::Distributed::DBI);

=head1 DESCRIPTION

Base class for persistent object classes, using Class::DBI for persistence
and providing useful inflation/deflation methods as well as hackery to
handle forking daemons, database connectivity loss and timeouts.

=head1 RESILIENCE

Resilience is provided by trying to cope with short connectivity loss, but exiting and trying to serialise all
objects that have changed since load/save to disk.

The database connection code (over-ridden CDBI db_Main ) will allow 20 seconds per DBI connection attempt
and 3 Minutes of multiple attempts before giving up and quiting, and serialising objects.

This means that a disconnected node, will have it's jobs re-assigned after 3 minutes, and the disconnected
node will have to be restarted manually, and so resynch on restart rather than reconnect automatically
and mess up the data.

=head1 METHODS

Inflation and deflation are used in the instantiation and updating of objects inheriting from this class.

=head2 inflate_timepiece

Instantiates Time::Piece object from date

=head2 deflate_timepiece

Serialises Time::Piece object to date

=head2 db_Main

Class/Object method, returns the database handle for this class/object.

=cut


use strict;

use Time::Piece;
use Config;

use base qw(Class::DBI::Pg);

my $dsn = $ENV{CHRONIC_DSN} || 'dbname=chronic;host=localhost';
my $username = 'chronic';
my $password = 'chr0n05';

# 20 seconds per DBI connection attempt
my $connect_attempt_timeout = 20;
# 3 Minutes of multiple attempts before giving up
my $connect_timeout = 180;

my $timedout = 0;

$Class::DBI::Weaken_Is_Available = 0;
__PACKAGE__->connection("dbi:Pg:$dsn", 'chronic','chr0n05', {AutoCommit => 1, InactiveDestroy => 1});

# Perrin Harkins' fork-safe connection closure
sub _mk_db_closure {
	my ($class, $dsn, $user, $pass, $attr) = @_;
        $attr ||= {};

	my $dbh;
	my $process_id = $$;
	return sub {
		# set the PID in a private cache key to prevent us
		# from sharing one with the parent after fork.  This
		# is better than disconnecting the existing $dbh since
		# the parent may still need the connection open.  Note
		# that forking code also needs to set InactiveDestroy
		# on all open handles in the child or the connection
		# will be broken during DESTROY.
	        return undef if ($timedout);
		$attr->{private_cache_key_pid} = $$;
                # reopen if this is a new process or if the connection
                # is bad
		if ($process_id != $$ or not ( $dbh && $dbh->FETCH('Active') && $dbh->ping) ) {
		  $dbh = undef;
		  return undef if ($timedout);
		  my $connection_wait = DateTime->now + DateTime::Duration->new( seconds => $connect_timeout );
		  my $alarm_timer = alarm $connect_attempt_timeout;
		  warn "alarm_timer (old) : $alarm_timer";
		  while ( not ($dbh) ) {
                    eval {
		      local $SIG{ALRM} = sub { die "connect attempt timed out after $connect_attempt_timeout seconds\n" };
		      alarm($alarm_timer) if ($alarm_timer < $connect_attempt_timeout);
		      $dbh = DBI->connect($dsn, $user, $pass, $attr);
		     };

		      # time out after 180 seconds (within schedule loop)
		    if ($alarm_timer > 0) {
		      warn "alarm_timer (old) : $alarm_timer";
		      my $diff = DateTime->now - ($connection_wait - DateTime::Duration->new( seconds => $connect_timeout ));
		      my $diff_secs = $diff->seconds + ($diff->minutes * 60);
		      warn "diff : $diff_secs \n"; 
		      my $new_alarm_time = $alarm_timer - $diff_secs;
		      warn "alarm_timer (new) : $new_alarm_time";
		      if ($new_alarm_time > 5) {
			alarm($new_alarm_time);
		      } else {
			$timedout = 1;
			die "schedule loop / connection timed out after 180 seconds\n";
		      }
		    }

		    last unless ($@);
		    warn DateTime->now, ' ', __PACKAGE__, "could not connect to database : '$@'\n";

		    # time out after 180 seconds (outside schedule loop)
		    if ($connection_wait <= DateTime->now ) {
		      $timedout = 1;
                      die "could not connect to database, retried for $connect_timeout seconds : $@";
                    }

		    warn DateTime->now, ' ', __PACKAGE__, "retrying in 5 seconds\n";
		    sleep 5;
		  }
		  $process_id = $$;
		}
		return $dbh;
	};

}

sub inflate_timepiece {
    my ($class,$arg,$object,$field) = @_;
    if ( (substr($arg, 0, 4) eq "0000") or (!$arg) ) {
	return $object->{$field} = undef;
    }
    return Time::Piece->strptime($arg, "%Y-%m-%d %H:%M:%S")
}

sub deflate_timepiece { my $class = shift; my $time = shift; return $time->strftime("%Y-%m-%d %H:%M:%S") if $time; }

sub DESTROY {
  my $self = shift;
  # check for any unsaved changes
  if ($self->is_changed) {
    # write out to file /var/scheduler_unsaved_objects_pid.ddmmyy
    my $t = localtime;
    my $filename = ">>/var/scheduler_unsaved_objects_${$}.".$t->dmy('');
    open (UNSAVED_OUTFILE, $filename);
    print UNSAVED_OUTFILE Dumper(%$self);
    close UNSAVED_OUTFILE;
  }

  # call C::DBI destroy
  $self->SUPER::DESTROY();
}

1;

=head1 SEE ALSO

Schedule::Chronic::Distributed

Class::DBI

Time::Piece

=head1 AUTHOR

aaron trevena, E<lt>aaron.trevena@gmail.comE<gt>

=head1 COPYRIGHT AND LICENSE

Copyright (C) 2006 by Aaron Trevena

This library is free software; you can redistribute it and/or modify
it under the same terms as Perl itself, either Perl version 5.8.5 or,
at your option, any later version of Perl 5 you may have available.

=cut
