#!/usr/bin/perl
#
# Copyright 2008-2010 Vadim Zhukov <persgray@gmail.com>
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# 1. Redistributions of source code must retain the above copyright notice, this
#    list of conditions and the following disclaimer.
# 2. Redistributions in binary form must reproduce the above copyright notice,
#    this list of conditions and the following disclaimer in the documentation
#    and/or other materials provided with the distribution.
#
# THIS SOFTWARE IS PROVIDED BY AGAVA COMPANY ``AS IS'' AND ANY EXPRESS OR
# IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
# MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO
# EVENT SHALL THE AGAVA COMPANY OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT,
# INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
# BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF
# USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
# SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
# The views and conclusions contained in the software and documentation are
# those of the authors and should not be interpreted as representing official
# policies, either expressed or implied, of the AGAVA Company.

use bytes;
use strict;

use Errno qw/:POSIX/;
use Fcntl qw/:DEFAULT :flock :seek/;
use File::Spec;
use Getopt::Std;
use IO::Handle qw/autoflush/;
use MIME::Base64 qw/encode_base64/;
use POSIX qw/strerror strftime :sys_wait_h/;
use RB::Dir;
use RB::Log;
use RB::Profile;
use RB::Remote;
use RB::RotateScheme::Weekly;
use RB::RotateScheme::Monthly;
use RB::Transfer;

# For Getopt::Std
our $VERSION = "3.0";
$Getopt::Std::STANDARD_HELP_VERSION = 1;

# For RB::Log
our $LOGPROGNAME = 'rb_dump';


#####################################################################
# Show usage info. "Strange" name is for Getopt::Std
#
sub HELP_MESSAGE {
	print STDERR <<EOU;
Usage:
rb_dump [-IilNnv] [-d dir] [-m modules] [-r item] [-T mintime] profile host ...
    -d Base directory to place dumps in (overrides "archive_dir" profile option)
    -I Ignore existing locks and do not set own.
    -i Overwrite existing files (ignore "abort_if_exists" profile option)
    -l List items being dumped
    -m Comma-separated list of modules to backup only
    -N Forces autorotate to be disabled
    -n Test mode (more info will be shown)
    -r Set "resume from" item mark (for debugging purposes)
    -T Override "min_remote_time", specified in seconds; 0 disables pauses
    -v Verbose mode (detailed backup progress printing instead of errors only)
Profile can be set either as relative (to ~/.rb/) or absolute path.
In test mode, autorotate process is started syncronized, not in background.
EOU
}


#####################################################################
# Read options from command line, but do not merge 'em -
#  they should override configuration file options
#

unless(@ARGV) {
	HELP_MESSAGE;
	exit 0;
}

my (%CmdLineOptions, %SLCmdLineOptions);
getopts('d:Iilm:Nnr:T:v', \%SLCmdLineOptions);

my $ProfilePath = shift @ARGV;
errx(1, "No profile given") unless $ProfilePath ne '';
errx(1, "No hosts to backup given") unless @ARGV;

# Default is not defined either
$CmdLineOptions{'archive_dir'} = $SLCmdLineOptions{'d'};
$CmdLineOptions{'min_remote_time'} = $SLCmdLineOptions{'T'};
$CmdLineOptions{'resume_from'} = $SLCmdLineOptions{'r'};

# And those should be set/changed only by request
if (exists $SLCmdLineOptions{'I'}) {
	$CmdLineOptions{'no_locks'} = 1;
}
if (exists $SLCmdLineOptions{'i'}) {
	$CmdLineOptions{'abort_if_exists'} = 0;
}
if (exists $SLCmdLineOptions{'l'}) {
	$CmdLineOptions{'list_dumped'} = 1;
}
if (exists $SLCmdLineOptions{'N'}) {
	$CmdLineOptions{'autorotate'} = 0;
}
if (exists $SLCmdLineOptions{'n'}) {
	$CmdLineOptions{'test_mode'} = 1;
}
if (exists $SLCmdLineOptions{'v'}) {
	$CmdLineOptions{'verbosity'} = 2;
}

my %OnlyModules;
if ($SLCmdLineOptions{'m'}) {
	%OnlyModules = map { ($_, 1) }
	    (split ('\s*,\s*', $SLCmdLineOptions{'m'}));
}

# Read profile
if (substr($ProfilePath, 0, 1) ne '/') {
	# Relative path, assume "~/.rb/" prefix
	$ProfilePath = $ENV{'HOME'}.'/.rb/'.$ProfilePath;
}
our $Profile = RB::Profile->new($ProfilePath);
our $Config = $Profile->{'global'};

# Merge command-line options. Simple, isn't it? ;)
for my $o (keys %CmdLineOptions) {
	next unless defined $CmdLineOptions{$o};
	$Config->{$o} = $CmdLineOptions{$o};
}


#####################################################################
# Do make-up for some options and define some other variables
#

# Do not glob() data from command line
unless (defined $CmdLineOptions{'archive_dir'}) {
	($Config->{'archive_dir'}) = (glob $Config->{'archive_dir'});
}
if ($Config->{'archive_dir'} eq '') {
	$Config->{'archive_dir'} = '.';
}

# Make sure verbosity is set, to avoid warnings
$Config->{'verbosity'} = 0 unless defined $Config->{'verbosity'};

# Date of backup to be made
my ($sec,$min,$hour,$mday,$mon,$year,$wday,$yday,$isdst) = localtime;
$Config->{'archive_date'} = strftime ('%Y%m%d', 0, 0, 0, $mday, $mon, $year);

# Number of errors occured that canceled module dumps
my $DumpErrors = 0;
my $HostDumpErrors = 0;

# All dumped data size
my $DumpedBytes = 0;
# All dumped data size except headers, padding etc.
my $RawBytes = 0;

# These counters need to be accessible from continue block.
#  Initialize here, reinitialize in continue block.
my $dumpedBytesPerHost = 0;
my $rawBytesPerHost = 0;

my $BufferSize = bufferSize;

openGlobalLog;


#####################################################################
# Creates directory hierarchy for separated items
# Return 0 if OK, non-zero otherwise
sub createItemHier($$$) {
	my ($host, $module, $path) = (shift, shift, shift);
	if (substr($path, 0, 1) eq '/') {
		$path = substr($path, 1);
	}
	return 0 if $path eq '';

	my @dirs = File::Spec->splitdir($path);
	for (my $createPath = $Config->{'archive_dir'}.'/'.$host.'/'.$module;
	    @dirs;) {
		$createPath = $createPath.'/'.shift(@dirs);
		if (!mkdir $createPath) {
			my $ec = $!+0;
			next if $!{EEXIST};    # Do not fail for existing dirs
			logIt 0, "        Error creating item hierarchy (".
			    $createPath."): $!";
			return $ec;
		}
	}

	return 0;
}

# Saves much information about archive dumped.
# This info will be used by other routines. They can't use profile values
# because profile could be changed.
sub beginArchiveInfo($$) {
	my ($module, $item) = (shift, shift);
	my $t = genTimestamp;
	my $s = "begin = $t\n".
	        "type = ".$module->{'what'}."\n".
	        "format = ".$module->{'archive_format'}."\n".
	        "compressed = ".($module->{'compress'} ? 'yes' : 'no')."\n".
	        "extension = ".$module->{'archive_ext'}."\n";
	$s .= "increment_from = ".$item->{'increment_from'}."\n"
	    if $item->{'increment_from'};
	print ARCHIVEINFO $s;
}

# Complementary function to the one above.
sub endArchiveInfo($) {
	my $exitCode = shift;
	my $t = genTimestamp;
	my $s = "end = $t\n".
	        "exit_code = ".$exitCode."\n";
	my $ok = print ARCHIVEINFO $s;
	close ARCHIVEINFO;
	return $ok;
}

# Opens two filehandles: unnamed archive filehandle and ARCHIVEINFO.
# ARCHIVEINFO will be filled with already known data.
# Returns archive filehandle if OK, undef otherwise.
# Callback function, see readSeparateArchives() in RB::Transfer
sub archiveTransferBegin($$$) {
	my ($item, $host, $module) = splice(@_, 0, 3);
	my $itemDir = $Config->{'archive_dir'}.'/'.$host.'/'.$module->{'name'};
	my $itemDumpName = $item->{'name'}; 
	if ($module->{'separate_archives'}) {
		$itemDir .= '/'.$itemDumpName;
		unless (-d $itemDir or createItemHier(
		    $host, $module->{'name'}, $itemDumpName) == 0) {
			logIt(0, "    Error creating subdirectory for item ".
			    $itemDumpName.", skipping: $!");
			return 1;
		}
	} else {
		# Module directory should already exists
		$itemDumpName = $module->{'name'};
	}

	# Make simple options simply accessible
	foreach (qw/increment_from/) {
		my $h = $item->{'headers'}->{$_};
		if (defined $h) {
			$item->{$_} = $h->[$#{$h}]->{'value'};
		}
	}

	my $archivePath =
	    $itemDir.'/'.$Config->{'archive_date'}.$module->{'archive_ext'};
	my $infoPath = $itemDir.'/'.$Config->{'archive_date'}.'.info';
	my $openFlags = O_WRONLY|O_CREAT|O_EXLOCK;
	my $fh;    # Archive file handle
	if ($Config->{'abort_if_exists'}) {
		$openFlags |= O_EXCL;
	} else {
		$openFlags |= O_TRUNC;
	}
	if (!sysopen(ARCHIVEINFO, $infoPath,
	    O_CREAT|O_WRONLY|O_APPEND|O_EXLOCK)) {
		my $ec = $!+0;
		logIt(0, "    Error creating archive info for ".$itemDumpName.
		    " (".$infoPath."), skipping: ".$!);
		return $ec;
	} elsif (!sysopen($fh, $archivePath, $openFlags)) {
		my $ec = $!+0;
		logIt(0, "    Error creating archive for ".$itemDumpName.
		   " (".$archivePath."), skipping: ".$!);
		beginArchiveInfo($module, $item);
		endArchiveInfo($ec);
		close ARCHIVEINFO;
		return $ec;
	}
	ARCHIVEINFO->autoflush(1);
	logIt(1, "        Dumping ".$itemDumpName." to ".$archivePath." begun");
	beginArchiveInfo($module, $item);
	return $fh;
}

# Callback function, see readSeparateArchives() in RB::Transfer
sub archiveTransferEnd($$$$) {
	my ($item, $to, $host, $module) = splice(@_, 0, 4);
	undef $!;
	close $to;
	return endArchiveInfo($! ? $!+0 : $item->{'errcode'});
}

# Arguments:
#   $host       - host to update status of
#   $moduleName - name of module which status was changed
#   $status     - text string that should begin by word "OK", "WARNING" or
#                 "ERROR" (like in Nagios). "OK" may occupy the whole line;
#                 "WARNING" and "ERROR" should be followed by space and one-line
#                 description. $status should not end with newline character.
#
# Returns:
#   - boolean (true if no error, false if error occured)
#
# Side effects:
#   Sets $! on error.
sub updateDumpStatus($$$) {
	my ($host, $moduleName, $status) = splice(@_, 0, 3);

	# Prepare status string
	$status = $moduleName.' '.
	    strftime('%Y-%m-%d %H:%M:%S', (localtime)).' '.
	    $status."\n";

	sysopen(DUMPSTATUS, $Config->{'archive_dir'}.'/'.$host.'/dumpstatus',
	    O_CREAT|O_RDWR|O_EXLOCK) or return 0;
	my (@newStatus, $len, $found);
	while (<DUMPSTATUS>) {
		$_ =~ /^[\s]*([\S]+)[\s]/;    # Read module name
		if ($1 eq $moduleName) {
			$found = 1;
			$_ = $status;
		}
		$len += length $_;
		push(@newStatus, $_);
	}
	unless ($found) {
		push(@newStatus, $status);
		$len += length $status;
	}
	seek(DUMPSTATUS, 0, SEEK_SET);
	truncate(DUMPSTATUS, $len);
	print DUMPSTATUS @newStatus;
	close DUMPSTATUS;    # Flushed and unlocked automatically
	return 0;
}


#####################################################################
# Proceed backups
#

my $HostsDumped = 0;
my @RotatePIDs;        # PIDs of background rotate processes.
my ($hostDumpStart, $hostTransferDuration);    # Statistics

ITERATE_HOSTS:
foreach my $host (@ARGV) {
	$host = lc($host);
	my $hostSavePath = $Config->{'archive_dir'}.'/'.$host;
	if (!-d $hostSavePath) {
		if (!mkdir $hostSavePath) {
			logIt(0, "Cannot create host subdirectory for ".$host.
			    ", skipping: $!");
			next ITERATE_HOSTS;
		}
	}
	$Config->{'host'} = $host;

	# Check do we have enough space (see "abort_if_free" profile directive)
	if ($Config->{'abort_if_free_abs'} or
	    $Config->{'abort_if_free_percent'}) {
		pipe(DFREAD, DFWRITE);
		my $dfpid = fork();
		unless (defined $dfpid) {
			logIt(0, "CRITICAL ERROR: Cannot fork: $!");
			# This is severe error
			exit 2;
		} elsif ($dfpid == 0) {
			# Child, exec() df(1)
			close DFREAD or die "$!";
			open(STDOUT, '<&DFWRITE') or die "$!";
			close DFWRITE or die "$!";
			exec('df', '-k', $hostSavePath);
			die "ERROR: Cannot execute df(1): $!";
		}
		# Parent, read df(1) output
		close DFWRITE;
		# First line is header, second (and last) is what we need
		my @lines = <DFREAD>;
		waitpid(WNOHANG, $dfpid);    # Avoid zombies
		my @fsstat = split(/[\s]+/, $lines[1], 6);
		unless (@fsstat) {
			# Error message should be printed by df(1) or our child,
			# depending where problem arised
			logIt(0, "ERROR: df(1) call failed, canceling dump");
			next ITERATE_HOSTS;
		} elsif (($Config->{'abort_if_free_percent'} and
		    $fsstat[4] > $Config->{'abort_if_free_percent'}) or
		    ($Config->{'abort_if_free_abs'} and
		    $fsstat[3] * 1024 < $Config->{'abort_if_free_abs'})) {
			logIt(0, "ERROR: Disk free space gone beyond limits, ".
			    "canceling dump");
			next ITERATE_HOSTS;
		}
	}

	$hostDumpStart = time();

	# Error message will be shown by checkAgent(), if needed
	checkAgent($host) or next ITERATE_HOSTS;

	logIt(1, "Starting backup process for host ".$host);
	my ($modulesDumpedCount, $moduleErrors, $lastItemName) =
	    (0, 0, $Config->{'resume_from'});
	my ($moduleStatus, $moduleLockPath, %hostBackups, %saveConfig);
	ITERATE_MODULES:
	foreach my $moduleName (@{$Config->{'backup_modules'}}) {
		# Save current configuration before, restored in continue block.
		%saveConfig = %{$Config};

		# Do we ever need to do something?
		if (%OnlyModules and !exists $OnlyModules{$moduleName}) {
			logIt(1, "Module $moduleName skipped due to ".
			    "command-line option -m");
			$moduleStatus = 'OK';
			next ITERATE_MODULES;
		}

		# Cache
		my $module = $Profile->{$moduleName};
		my $moduleSavePath = $hostSavePath.'/'.$moduleName;
		my $moduleLockPath = $moduleSavePath.'/.lock.'.$Config->{'archive_date'};

		# Lock module directory
		unless ($Config->{'no_locks'} or sysopen(MODULE_LOCK,
		    $moduleLockPath, O_WRONLY|O_EXLOCK|O_NONBLOCK|O_CREAT)) {
			if ($!+0 == EWOULDBLOCK) {
				logIt(0,
				    "    Module directory locked, skipping");
			} else {
				logIt(0,
				    "    Cannot lock module directory, ".
				    "skipping: $!");
			}
			next ITERATE_MODULES;
		}
		
		# Add module configuration to global.
		$Config = { %saveConfig, %{$module} };

		# Check if there are errors pending
		unless ($moduleErrors) {
			if (!-d $moduleSavePath and !mkdir $moduleSavePath) {
				logIt(0,
				    "    Cannot create host subdirectory for".
				    " module ".$module.", skipping: $!");
				$moduleStatus = 'ERROR cannot create module '.
				    'subdirectory';
				$HostDumpErrors++;
				next ITERATE_MODULES;
			}
		} else {
			# Skip some code and deal with errors
			if ($moduleErrors > $module->{'max_retry_count'}) {
				logIt(0, "    Maximum retry-on-error count".
				    " reached, aborting module backup");
				$moduleStatus = 'ERROR too many errors';
				$HostDumpErrors++;
				next ITERATE_MODULES;
			}

			if ($Config->{'retry_wait'}) {
				logIt(0,
				    "    Retrying module backup on error,".
				    " waiting for ".$Config->{'retry_wait'}.
				    " seconds.");
				sleep int $Config->{'retry_wait'};
			}
		}

		# Calculate dates to take increment backups from, if needed
		my %incrementItemsFrom;
		if ($module->{'incremental_daily'} or
		    $module->{'incremental_weekly'}) {
			unless (exists $hostBackups{$moduleName}) {
				$hostBackups{$moduleName} = scanBackupDir(
				    $moduleSavePath, $Profile->{$moduleName});
			}
			unless (defined $hostBackups{$moduleName}) {
				logIt(0, 'WARNING: cannot scan '.
				    "module $moduleName directory, ".
				    'assuming we need full backup');
				$hostBackups{$moduleName} = {};   # Fail-safe
			}
			my $moduleBackups = $hostBackups{$moduleName};   # Cache

			my $schemeMonthly = new RB::RotateScheme::Monthly(
			    $module->{'rotate_months'});
			my $schemeWeekly = new RB::RotateScheme::Weekly(
			    $module->{'rotate_weeks'});

			foreach my $itemName (keys %{$moduleBackups}) {
				my $item = $moduleBackups->{$itemName};
				# Dates of full backups
				my @datesFull = grep {
				    $_ < $Config->{'archive_date'} and
				    $item->{$_}->increment_from eq '' }
				    keys %{$item};

				# Check if we should make full backup
				my $incFrom = {};    # Actually, just a shortcut
				$incrementItemsFrom{$itemName} = $incFrom;
				if ($module->{'incremental_daily'} and
				    !$schemeWeekly->shouldUpdate(
				      $Config->{'archive_date'},
				      $host.$moduleName, @datesFull)) {
					$incFrom->{'scheme'} = 'week';
				}
				if ($module->{'incremental_weekly'} and
				    !$schemeMonthly->shouldUpdate(
				      $Config->{'archive_date'},
				      $host.$moduleName, @datesFull)) {
					$incFrom->{'scheme'} = 'month';
				}
				next if $incFrom->{'scheme'} eq '';

				# Calculate and save date to increment from
				my @allncFromDates = @datesFull;
				if ($incFrom->{'scheme'} eq 'week') {
					push(@allncFromDates, grep {
					    $item->{$_}->increment_from eq 'month'
					    } keys %{$item} );
				}
				foreach my $date (@allncFromDates) {
					if ($incFrom->{'date'} < $date) {
						$incFrom->{'date'} = $date;
					}
				}
			}
		}

		# We'll reset this counter after successfull dump, so it'll be
		# used only after using "redo", which we do for retrying.
		$moduleErrors++;

		# Open connection to agent
		logIt(1, "    Backing up module $moduleName");
		$lastItemName = ''
		    unless defined $lastItemName;    # Eliminate warning
		my $separator = generateSeparator;
		my ($remotePID, $sshRead, $sshWrite) =
		    openAgentConn($host, 1, 1, 'dump');
		redo ITERATE_MODULES unless defined $remotePID;

		# Send agent options
		syswrite($sshWrite, join('',
		    map { encodeHeader($_, $module->{$_}) }
		    grep { $_ ne 'which' } keys %{$module}).
		    encodeHeader('module_name', $moduleName).
		    encodeHeader('buffer_size', $BufferSize).
		    encodeHeader('separator', $separator).
		    encodeHeader('resume_from', $lastItemName).
		    encodeHeader('archive_date', $Config->{'archive_date'})
		    );
		foreach my $itemName (keys %incrementItemsFrom) {
			my $item = $incrementItemsFrom{$itemName};
			my $eh = encodeHeader('increment_dump',
			    $itemName,
# Why?			    'scheme', $item->{'scheme'},
			    'date', $item->{'date'}
			    );
			syswrite($sshWrite, $eh);
		}
		syswrite($sshWrite, join('',
		    map { encodeHeader('which', $_) } @{$module->{'which'}}));
		syswrite($sshWrite, endHeaders);

		# Now backup transfer session is actually began
		my $transferStart = time();
		my ($itemsDumped, $dumpedBytes, $rawBytes) =
		    readArchives($sshRead, $separator,
		    \&archiveTransferBegin, \&archiveTransferEnd,
		    $host, $module);
		close $sshRead;
		if ($Config->{'test_mode'}) {
			print STDERR map { "ITEM DUMPED: '".$_->{'name'}.
			    "' (error: [".$_->{'errcode'}.
			    "] ".$_->{'error'}.")\n" }
			    @{$itemsDumped};
		}
		my $lastItem = $itemsDumped->[$#{@{$itemsDumped}}];
		my $ec = $lastItem->{'errcode'};

		# Update statistics
		$hostTransferDuration += (time() - $transferStart) || 1;
		$dumpedBytesPerHost += $dumpedBytes;
		$rawBytesPerHost += $rawBytes;

		# Check for errors occured during dump
		if (!$module->{'separate_archives'} && $ec == EEXIST) {
			$moduleStatus = 'WARNING module already dumped';
			next ITERATE_MODULES;
		}
		redo ITERATE_MODULES if $ec;

		# We MUST call waitpid() for each child to avoid zombies.
		# XXX Also we want to know exit code of agent, 
		waitpid($remotePID, 0);
		my $agentRetCode = $? >> 8;
		$modulesDumpedCount++;
	} continue {
		# Reset some variables before moving on to a new module
		if ($moduleStatus ne '') {
			updateDumpStatus($host, $moduleName, $moduleStatus);
			$moduleStatus = '';
		}
		$moduleErrors = 0;
		$lastItemName = $Config->{'resume_from'};
		
		# Free module lock (rely on OS to free lock after close),
		# as SUS says.
		unless ($Config->{'no_locks'} or !defined fileno MODULE_LOCK) {
			unlink $moduleLockPath;
			close MODULE_LOCK;
		}

		# Restore global configuration.
		# NOTE: do not take reference!
		$Config = { %saveConfig };
	}

	$HostsDumped++ if $modulesDumpedCount > 0;
} continue {
	# Update statistics
	my $hostDumpEnd = time();
	if ($hostTransferDuration) {
		my $statmsg = sprintf("    Host $host statistics: ".
		    "%llu raw bytes, ".
		    "%llu transferred bytes, ".
		    "raw speed %.2f KBytes/sec, ".
		    "transfer speed %.2f KBytes/sec.",
		    $rawBytesPerHost,
		    $dumpedBytesPerHost,
		    (($rawBytesPerHost / ($hostTransferDuration || 1)) / 1024),
		    ($dumpedBytesPerHost / (($hostDumpEnd - $hostDumpEnd) || 1) / 1024)
		    );
		logIt(1, $statmsg);
		logIt(1, "    $HostDumpErrors errors occured during host dump.")
		    if $HostDumpErrors;
		logIt(1, "Backup process for $host ended.");
		$hostTransferDuration = 0;
	}

	$DumpErrors += $HostDumpErrors;
	$HostDumpErrors = 0;
	$DumpedBytes += $dumpedBytesPerHost;
	$RawBytes += $rawBytesPerHost;
	$dumpedBytesPerHost = 0;
	$rawBytesPerHost = 0;

	if ($Config->{'autorotate'}) {
		logIt(1, "Starting rotate process for $host in background");
		my $rpid = fork;
		unless (defined $rpid) {
			logIt(0, "Error: cannot fork for rotate process: $!");
		} elsif ($rpid == 0) {
			my $rotatePath = $0;
			unless ($rotatePath =~ s|(.*/)?rb_dump$|$1rb_rotate|) {
				# Hope it'll be in $PATH...
				$rotatePath = 'rb_rotate';
			}
			my @rotateCmd = ($rotatePath);
			push(@rotateCmd, '-n') if $Config->{'test_mode'};
			push(@rotateCmd, '-v') if $Config->{'verbosity'};
			push(@rotateCmd, '-m', $SLCmdLineOptions{'m'})
			    if exists $SLCmdLineOptions{'m'};
			exec(@rotateCmd, $ProfilePath, $host);
			# Again, use die() to avoid any cleanup code run
			die "Cannot exec rotate program: $!";
		} else {
			if ($Config->{'test_mode'}) {
				# Test mode is _test_ mode, so help testing by
				#  avoiding of messing up messages
				waitpid($rpid, 0);
			} else {
				push @RotatePIDs, $rpid;
				# Avoid zombies if not in manual mode
				waitpid($rpid, WNOHANG) if !-t STDIN;
			}
		}
	}
}

# logIt(1, "Critical error: no hosts dumped") if $HostsDumped == 0;
if (-t STDIN) {
	# Wait for rotate processes to terminate, to avoid mess on the console.
	while (@RotatePIDs) {
		for (my $pid = pop @RotatePIDs; waitpid($pid, 0) == 0; ) {}
	}
}
exit($DumpErrors ? 3 : 0);
