package Arcos::JobQueue::Daemon;

use strict;
use warnings;

use Arcos::Log qw(debug info critical);
use Arcos::JobQueue;
use Arcos::DB::Job;
use Arcos::Conf qw(ArcosRoot JobQueueMaxProcesses JobQueuePollInterval);
use Carp qw(croak);
use Proc::Daemon;
use Config::ApacheFormat;
use Arcos::DateTime;
use Parallel::ForkManager;
use Time::HiRes qw(sleep);
use Params::Validate qw(:all);
use Arcos::Warehouse::DB;

our $PIDFILE = File::Spec->catfile(ArcosRoot(), 'tmp', 'queue_daemon.pid');
our $POLL_INTERVAL;
our $GRACEFUL_RESTART;
our $GRACEFUL_STOP;
our %ACTIVE_JOBS;

# Handle errors, since STDERR will be closed
$SIG{__DIE__} = sub {
    my $in_eval = 0;
    for (my $stack = 1 ; my $sub = (CORE::caller($stack))[3] ; $stack++) {
        $in_eval = 1 if $sub =~ /^\(eval\)/;
    }
    return if $in_eval;
    my $err = shift;
    critical($err);
    die $err;
};

$SIG{__WARN__} = sub {
    my $warn = shift;
    critical($warn);
    print STDERR $warn;    # to see on console before daemonizing
};

$SIG{'TERM'} = sub {
    info(__PACKAGE__ . " caught SIGTERM.");
    _stop();
};

$SIG{'USR1'} = sub {
    info(__PACKAGE__ . " caught SIGUSR1. Doing graceful restart.");
    $GRACEFUL_RESTART = 1;
};

$SIG{'USR2'} = sub {
    info(__PACKAGE__ . " caught SIGUSR2. Doing graceful stop.");
    $GRACEFUL_STOP = 1;
};

sub run {
    my $class = shift;
    my %args =
      validate(@_,
               {types => {type     => ARRAYREF,
                          optional => 1
                         },
                exclude_types => {type     => ARRAYREF,
                                  optional => 1
                                 }
               });

    my %handlers = Arcos::JobQueue->available_type_handlers;

    # figure out the list of types we'll handle
    my @types;
    if ($args{types} and @{$args{types}}) {
        @types = @{$args{types}};
        $handlers{$_}
          or croak("Unknown type '$_': types must appear in conf/queue.conf.")
          for @types;
    } elsif ($args{exclude_types} and @{$args{exclude_types}}) {
        my %exclude = map { ($_, 1) } @{$args{exclude_types}};
        @types = grep { !$exclude{$_} } keys %handlers;
        croak("Processing exclude_types left 0 types to handle!")
          unless @types;
    } else {
        @types = keys %handlers;
    }

    # pre-load handler classes before the fork to save memory
    foreach my $handler (map { $handlers{$_} } @types) {
        eval "require $handler";
        croak $@ if $@;
    }

    # Collect all open database handles.  These are needed so the
    # children can set InactiveDestroy after forking.  Failing to do
    # so results in "Lost connection during query" errors in the
    # parent.  This is needed before the Proc::Daemon fork so we don't
    # lose the pre-fork connections.
    my @grand_parent_dbh;
    foreach my $instance (Arcos::Conf->instances()) {
        Arcos::Conf->instance($instance);
        push @grand_parent_dbh, Arcos::DB->db_Main(),
          Arcos::Warehouse::DB->get_dbh();
    }

    # Daemonize!
    Proc::Daemon::Init();
    Arcos::Log::reopen_log();

    # Write pidfile
    open(PIDFILE, '>', $PIDFILE) or do {
        critical("Unable to write '$PIDFILE': $!");
        exit(1);
    };
    print PIDFILE $$;
    close PIDFILE;
    info(__PACKAGE__ . ' started.');
    if ($ENV{'HARNESS_ACTIVE'}) {
        info(__PACKAGE__ . ' running in test mode.');
    }

    # set InactiveDestroy so closing these handles doesn't cause problems
    foreach my $gparent_dbh (@grand_parent_dbh) {
        $gparent_dbh->{InactiveDestroy} = 1;
    }

    my $poll_interval = JobQueuePollInterval;

    # setup parallel fork-manager with the requested number of max
    # processes (0 means no forking)
    my $fork_manager = Parallel::ForkManager->new(JobQueueMaxProcesses - 1);

    # setup some debugging statements to keep track of jobs running
    $fork_manager->run_on_start(
        sub {
            my ($pid, $job_id) = @_;
            debug("jobqueue worker process $pid forked");
            $ACTIVE_JOBS{$pid} =
              {job_id => $job_id, instance => Arcos::Conf->instance()};
        });
    $fork_manager->run_on_finish(
        sub {
            my ($pid, $exit_code, $job_id) = @_;
            debug(
                "jobqueue worker process $pid finished with code $exit_code");
            delete $ACTIVE_JOBS{$pid};
        });
    $fork_manager->run_on_wait(
        sub {
            debug(  "jobqueue parent is waiting for a process to start"
                  . " or to exit.");
        },
        1);

    # Collect all open database handles.  These are needed so the
    # children can set InactiveDestroy after forking.  Failing to do
    # so results in "Lost connection during query" errors in the
    # parent.
    my @parent_dbh;
    foreach my $instance (Arcos::Conf->instances()) {
        Arcos::Conf->instance($instance);
        push @parent_dbh, Arcos::DB->db_Main(),
          Arcos::Warehouse::DB->get_dbh();
    }

    # save this so we know if an error is in the parent
    my $PARENT_PID = $$;

    # keep this loop going as long as we're still finding pending jobs
    while (1) {
        my $found_job = 0;

        # setup an eval here to catch errors from database failures
        eval {

            # round-robin through instances looking for jobs
            foreach my $instance (Arcos::Conf->instances()) {
                Arcos::Conf->instance($instance);
                if (my $job_id =
                    Arcos::JobQueue->accept_next_job(types => \@types))
                {
                    debug(__PACKAGE__ . " accepted job $job_id");
                    $found_job = 1;

                    # do the fork and keep working in the child
                    $fork_manager->start($job_id) and next;

                    # set InactiveDestroy on all the parent's handles to
                    # avoid errors
                    foreach my $parent_dbh (@parent_dbh) {
                        $parent_dbh->{InactiveDestroy} = 1;
                    }

                    eval { $class->_handle_job($job_id); };
                    if ($@) {
                        critical(__PACKAGE__
                            . ": (worker $$) Error while handling job $job_id: "
                            . $@);
                        my $job = Arcos::DB::Job->retrieve($job_id);
                        $job->mark_failed();
                        $job->update();
                    }
                    debug(__PACKAGE__ . " (worker $$) completed job $job_id");

                    # done, exit the child
                    $fork_manager->finish(0);
                }

            }

            # no more jobs. sleep a while and then look again.
            sleep $poll_interval unless $found_job;
        };

        # loop encountered an error, pause to keep from overwhelming
        # the logs during brief outages.  Never do this in the child
        # procs!
        if ($@) {
            if ($$ == $PARENT_PID) {
                critical("Job queue encountered an error: $@");
                critical("Pausing job queue for 10 seconds...");
                sleep(10);
            } else {
                die $@;
            }
        }

        if ($GRACEFUL_RESTART or $GRACEFUL_STOP) {

            # copy the active jobs list, since they will remove
            # themselves as they exit
            my %children = %ACTIVE_JOBS;

            # eval these in case the database can't be reached
            eval {
                $class->_notify_children(children => \%children,
                                         event    => 'graceful_shutdown');
            };
            if ($@) {
                critical(  "Job queue encountered an error"
                         . " during graceful shutdown: $@");
            }

            info('Waiting for child processes to finish.');
            $fork_manager->wait_all_children;

            eval {
                $class->_notify_children(children => \%children,
                                         event    => 'post_shutdown');
            };
            if ($@) {
                critical(  "Job queue encountered an error"
                         . " during graceful shutdown: $@");
            }

            if ($GRACEFUL_RESTART) {
                info('Children done. Restarting.');
                debug("exec'ing $0 " . join(' ', @ARGV));
                exec($0, @ARGV)
                  or die "Couldn't exec $0 " . join(' ', @ARGV) . ": $!";
            } else {
                _stop();
            }
        }
    }
}

sub _notify_children {
    my $class = shift;
    my %args =
      validate(@_,
               {children => {type => HASHREF},
                event    => {
                          type  => SCALAR,
                          regex => qr/graceful_shutdown|post_shutdown/
                         },
               });

    my %handlers = Arcos::JobQueue->available_type_handlers;
    foreach my $pid (keys %{$args{'children'}}) {

        # first switch to the instance this job is from
        Arcos::Conf->instance($args{'children'}->{$pid}->{'instance'});

        my $job_id = $args{'children'}->{$pid}->{'job_id'};
        my $job    = Arcos::DB::Job->retrieve($job_id);
        croak("Unable to load job $job_id") if not $job;

        my $job_class = $handlers{$job->type()};
        my $method    = $args{'event'};
        $job_class->$method(job_id => $job_id,
                            pid    => $args{'children'}->{$job_id});
    }
}

sub _stop {

    # Parallel::ForkManager seems to clean up child procs ok, so just clear
    # the pid file
    unlink $PIDFILE if -e $PIDFILE;

    info(__PACKAGE__ . " exiting.");
    exit(0);
}

sub _handle_job {
    my ($class, $job_id) = @_;

    my $job = Arcos::DB::Job->retrieve($job_id);
    return unless $job;

    my %handlers  = Arcos::JobQueue->available_type_handlers;
    my $job_class = $handlers{$job->type()};
    if (!$job_class) {
        critical(  __PACKAGE__
                 . ": No handler found for job "
                 . $job->id()
                 . " of type '"
                 . $job->type()
                 . "'");
        $job->mark_failed();
        $job->update();
        return;
    }

    debug(__PACKAGE__ . " dispatching job to $job_class");
    my $params = $job->parameters();
    $job->time_begun(Arcos::DateTime->now());
    $job->mark_processing();
    $job->update();

    my $result = $job_class->handler(%{$params});
    if (defined $result and (ref($result) ne 'HASH')) {
        critical(__PACKAGE__ . ": Job "
              . $job->id()
              . " of type '"
              . $job->type()
              . "' returned a result that was not a hash reference or undef");
        $result = undef;
        $job->mark_failed();
    } else {
        $job->mark_complete();
    }
    $job->result($result);
    $job->time_completed(Arcos::DateTime->now());
    $job->update();
}

1;

__END__

=head1 NAME

Arcos::JobQueue::Daemon - Daemon for processing job queue

=head1 SYNOPSIS

  use Arcos::JobQueue::Daemon;
  Arcos::JobQueue::Daemon->run();

=head1 DESCRIPTION

This is the daemon that does the processing of pending jobs in the queue.

=head1 INTERFACE

=over 4

=item C<run>

This method starts the daemon, and is the only method intended to be called 
from an external script.  It will read its config file, load all handler 
classes, and turn itself into a daemon.

=back

=head1 PRIVATE METHODS

These methods are not meant to be called from another class, but may be 
important to know if you are subclassing.

=over 4

=item C<_handle_job($job_id)>

This method dispatches jobs to handlers.  It expects to be passed a job ID.

=back

=head1 SEE ALSO

L<Arcos::DB::Job>, L<Arcos::JobQueue>
