#!/usr/bin/perl
#
# Copyright 2008-2010 Vadim Zhukov <persgray@gmail.com>
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# 1. Redistributions of source code must retain the above copyright notice, this
#    list of conditions and the following disclaimer.
# 2. Redistributions in binary form must reproduce the above copyright notice,
#    this list of conditions and the following disclaimer in the documentation
#    and/or other materials provided with the distribution.
#
# THIS SOFTWARE IS PROVIDED BY AGAVA COMPANY ``AS IS'' AND ANY EXPRESS OR
# IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
# MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO
# EVENT SHALL THE AGAVA COMPANY OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT,
# INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
# BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF
# USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
# SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
# The views and conclusions contained in the software and documentation are
# those of the authors and should not be interpreted as representing official
# policies, either expressed or implied, of the AGAVA Company.

package main;

use bytes;
use strict;

use Cwd qw/cwd/;
use Errno qw/EPERM/;
use Fcntl qw/:mode/;
use File::Basename;
use File::Path;
use IPC::Open2;
use IPC::Open3;
use MIME::Base64 qw/decode_base64/;
use POSIX qw/mktime/;

BEGIN {
	import RB::Transfer qw/decodeHeaders encodeHeader endHeaders
	                       sendArchive readArchives/;
}

my $exitCode = 0;

my $Usage = <<EOU;
Usage: rb_agent [verbose] command what [param=value ...] -- which [...]
Supported commands: dump put restore version filter
rb_agent shouldn't be called manually except for debugging purposes.
EOU
chomp $Usage;

# Report error and exit with code specified
sub errx($;$) {
	print STDERR "AGENT: FATAL: ".$_[1]."\n";
	exit $_[0];
}

if (scalar(@ARGV) == 0) {
	errx(0, $Usage);
}

my $Verbose = 0;
if ($ARGV[0] eq 'verbose') {
	# Enable only if it's really needed, produces a LOT of output
	$Verbose = 1;
	print STDERR "AGENT: Verbose mode enabled\n";
	shift @ARGV;
}

# What should we do?
my $Command = lc(shift @ARGV);
if ($Command eq 'version') {
	print "rb_agent version 3.10\n";
	exit 0;
} elsif ($Command eq 'filter') {
	my $t = shift @ARGV;
	foreach my $f (@ARGV) {
		my $rv = checkFileByUnixTime($t, $f);
		exit 1 if $rv < 0;
	}
	exit 0;
} elsif ($Command !~ /^dump|put|restore$/) {
	errx(1, "Unsupported command $Command. Did you forget to update agent?");
}

# Defaults
our $Config = {
    'network_timeout' => 25
    };

# Get operation parameters. Some of them require special handling, see below.
print STDERR "AGENT: decoding configuration headers\n" if $Verbose;
my $Headers = decodeHeaders(\*STDIN);
unless (defined $Headers and $Headers != 0) {
	errx(1, "AGENT: no configuration headers");
}

#if ($Verbose) {
#	foreach my $_hn (sort keys %{$Headers}) {
#		my $_h = $Headers->{$_hn};
#		foreach my $i (sort { $_h->[$a]->{'value'}
#		    cmp $_h->[$b]->{'value'} } 0..$#{$_h}) {
#			print STDERR $_hn.": ".$_h->[$i]->{'value'};
#			foreach my $_an (sort keys
#			    %{$_h->[$i]->{'attributes'}}) {
#				print STDERR " ".$_an."=".
#				    $_h->[$i]->{'attributes'}->{$_an};
#			}
#			print STDERR "\n";
#		}
#	}
#}

# List of items to operate on.
my @Items;
if ($Headers->{'which'}) {
	my $w = delete $Headers->{'which'};
	@Items = map { $w->[$_]->{'value'} } 0..$#{$w};
}

# Used when dumping, makes up list of items to build increment dumps for and
# information needed to make such dumps.
my %IncrementDump;
if ($Headers->{'increment_dump'}) {
	# NOTE: Dates should be in YYYYMMDD format
	%IncrementDump = map { ($_->{'value'}, $_->{'attributes'}) }
	    @{$Headers->{'increment_dump'}};
	delete $Headers->{'increment_dump'};
}

# Used when restoring, makes up list of items with increment dumps.
my %IncrementRestore;
if ($Headers->{'increment_restore'}) {
	foreach my $ir (@{$Headers->{'increment_restore'}}) {
		my $itemName = $ir->{'value'};
		$IncrementRestore{$itemName} = []
		    unless exists $IncrementRestore{$itemName};
		push(@{$IncrementRestore{$itemName}}, $ir->{'attributes'});
	}
	delete $Headers->{'increment_restore'};
}

# Other parameters are simple: last value set is actual value.
$Config = { %{$Config},
    map { ($_, $Headers->{$_}->[$#{$Headers->{$_}}]->{'value'}) }
    keys %{$Headers} };
if ($Verbose) {
	print STDERR "AGENT: PARAMETERS SET:\n";
	print STDERR map { "\t$_ => ".$Config->{$_}."\n" } sort keys %{$Config};
}

if ($Config->{'what'} ne 'files' and $Config->{'what'} ne 'mysql' and
    $Config->{'what'} ne 'postgres' and $Config->{'what'} ne 'fs') {
	errx(1, "Unknown backup module type: ".$Config->{'what'}.
	    ". Did you forget to update agent?");
}

# Precalculate some values here
for my $opt ('dump_as', 'restore_as') {
	if ($Config->{$opt} ne '') {
		$Config->{$opt} =~ /^([^:]*)(:(.*))?$/;
		$Config->{$opt.'_user'} = $1;
		$Config->{$opt.'_group'} = $3;
	}
}

# Change current directory, if specified
my $OldWD = cwd;
if ($Command ne 'put' and
    $Config->{'workdir'} ne '' and
    $Config->{'workdir'} ne '.') {
	print STDERR "AGENT: Changing directory to ".$Config->{'workdir'}."\n"
	    if $Verbose;
	chdir $Config->{'workdir'}
	    or errx(2, "Cannot change working directory: $!");
}

$Config->{'test_mode'} = 1 if $Verbose;

my $BufferSize = $Config->{'buffer_size'}
    or errx(1, "Buffer size was not provided");

# Not waitpid()-ed yet PIDs of created children
my @Pids;
# Wait for all processes with IDs from array above are gone.
sub waitMyPids() {
	my $err = 0;
	while (@Pids) {
		my $pid = shift @Pids;
		if (defined $pid) {
			print STDERR "AGENT: waiting for PID ${pid}\n"
			    if $Verbose;
			if (waitpid($pid, 0) != -1) {
				my $ec = $? >> 8;
				if ($err == 0 and $ec != 0) {
					$err = $ec;
				}
			}
		}
	}
	print STDERR "AGENT: waitMyPids() ended\n"
	    if $Verbose;
	return $err;
}

# Forks and execs specified program with specified options. Do not use open()
# instead because it does not allow easily to transfer options safe.
#
# Arguments:
#   $as       - user to open program as (sudo(8) used)
#   $dumping  - true if program will send data to net, false otherwise.
#   $compress - true if we need to gzip/gunzip stream.
#   $readFrom - file handle to read data from; /dev/null is opened if undefined.
#               Used to give pax(1) list of files to archive, for example.
#   @cmd      - command and it's arguments.
#
# Returns:
#   pair of file handles (to read from and to write to), or empty list
#   on error.
#
# Sets $! on error.
sub openProgram($$$@) {
	my ($as, $dumping, $compress, $readFrom) = splice(@_, 0, 4);
	print STDERR "AGENT: command to exec: ".join(' ', @_)."\n"
	    if $Verbose;

	my ($sendHandle, $sendHandleChild, $gzipRead, $gzipWrite);
	pipe($sendHandle, $sendHandleChild)
	    or errx(2, "Error making child pipe: $!");

	if ($compress) {
		# We have to create one more child
		pipe($gzipRead, $gzipWrite)
		    or errx(2, "Error making gzip pipe: $!");
		print STDERR "AGENT: forking for gzip...\n"
		    if $Verbose;
		my $gpid = fork;
		if (!defined $gpid) {
			errx(2, "Cannot fork gzip child: $!");
		} elsif ($gpid == 0) {
			# Child: exec gzip
			close $sendHandle;
			if ($dumping) {
				close $gzipWrite;
				open(STDIN, '<&', $gzipRead)
				    or errx(2,
				    "Error duplicating gzip read pipe: $!");
				close $gzipRead;
				open(STDOUT, '>&', $sendHandleChild)
				    or errx(2,
				    "Error duplicating gzip write pipe: $!");
				close $sendHandleChild;
			} else {
				close $gzipRead;
				open(STDIN, '>&', $sendHandleChild)
				    or errx(2,
				    "Error duplicating gzip read pipe: $!");
				close $sendHandleChild;
				open(STDOUT, '<&', $gzipWrite)
				    or errx(2,
				    "Error duplicating gzip write pipe: $!");
				close $gzipWrite;
			}
			print STDERR
			    "AGENT: All is prepared for exec()-ing gzip\n"
			    if $Verbose;
			my @gzipCmd = qw/gzip -c/;
			if ($dumping) {
				push(@gzipCmd, '-n');
				# $compress can [0-9\+] or undefined
				push(@gzipCmd, '-'.$compress)
				    if $compress ne '+';
			} else {
				push(@gzipCmd, '-d');
			}
			exec @gzipCmd;
			errx(2, "Cannot exec gzip: $!");
		} else {
			# Parent: just some cleanup, preparing for next fork.
			print STDERR
			    "AGENT: gzip child forked OK, PID ${gpid}\n"
			    if $Verbose;
			push(@Pids, $gpid);
			$dumping ? close($gzipRead) : close($gzipWrite);
		}
	}

	my $pid = fork;
	if (!defined $pid) {
		errx(2, "Cannot fork program child: $!");
	} elsif ($pid == 0) {
		# Child, exec program
		close $sendHandle;
		if ($dumping) {
			if (defined $readFrom) {
				open(STDIN, '<&', $readFrom);
			} else {
				open(STDIN, '<', '/dev/null');
			}
			if ($compress) {
				open(STDOUT, '>&', $gzipWrite) or errx(2,
				    "Error duplicating program output pipe: $!");
				    close $gzipWrite;
			} else {
				open(STDOUT, '>&', $sendHandleChild) or errx(2,
				    "Error duplicating program output pipe: $!");
				close $sendHandleChild;
			}
		} else {
			$readFrom = \*STDIN unless defined $readFrom;
			if ($compress) {
				open($readFrom, '<&', $gzipRead) or errx(2,
				    "Error duplicating program input pipe: $!");
				close $gzipRead;
			} else {
				open($readFrom, '<&', $sendHandleChild) or errx(2,
				    "Error duplicating program input pipe: $!");
				close $sendHandleChild;
			}
		}

		unshift(@_, 'sudo', '-u', $as) if $as ne '';
		exec @_;
		die("Cannot exec program: $!");
	}
	# Parent, will read archive data
	close $sendHandleChild;
	print STDERR "AGENT: program child forked OK, PID ${pid}\n"
	    if $Verbose;
	push (@Pids, $pid);
	if ($compress) {
		$dumping ? close($gzipWrite) : close($gzipRead);
	}
	return $sendHandle;
}

# Open file to put data in.
#
# Arguments:
#   $item    - hashref representing item being downloaded.
#   $pathref - scalarref that'll consists of path to file created. We have to
#              use reference and not return value because this is actually a
#              callback function called from RB::Remote.
#   
# Returns:
#   file handle to write in.
sub openForPutting($\$) {
	my ($item, $pathref) = splice(@_, 0, 2);
	my $itemName = $item->{'name'};    # Cache

	# Prepare output directory and determine archive name
	my ($dir, $itemPlain);
	if ($Config->{'separate_archives'}) {
		$dir = $Config->{'workdir'} ne '' ?
		    $Config->{'workdir'}.'/'.$itemName : $itemName;
		$itemPlain = $itemName.'-';
	} else {
		$dir = $Config->{'workdir'} ne '' ? $Config->{'workdir'} : '.';
		$itemPlain = '';
		$itemName = $Config->{'module_name'};
	}
	if (-e $dir and !-d $dir) {
		$dir = dirname($dir);
	}
	mkpath $dir unless -d $dir;
	$itemPlain =~ s|/|_|sg;
	my $path = $dir.'/backup-'.$itemPlain.$Config->{'module_name'}.'-'.
	    $Config->{'date'}.$Config->{'archive_ext'};

	# Make sure file name is unique
	my $suffix = '';
	while (-e $path.$suffix) {
		$suffix++;
	}
	$path .= $suffix;
	print STDERR "AGENT: putting item $itemName to $path\n"
	    if $Verbose;
	${$pathref} = $path;

	# Open program
	my ($childRead, $childWrite);
	unless (pipe($childRead, $childWrite)) {
		print STDERR "AGENT: $!\n";
		return undef;
	}
	my $pid = fork;
	unless (defined $pid) {
		print STDERR "AGENT: $!\n";
		return undef;
	}
	if ($pid == 0) {
		# Child, dumps data to a file
		close $childWrite;
		unless (open(STDIN, '<&', $childRead)) {
			print STDERR "AGENT CHILD: dup failed: $!\n";
			return undef;
		}
		close $childRead;
		open(STDOUT, '>', '/dev/null');

		# NOTE: If sudo was compiled with --with-umask option then it'll
		# reset umask to value it compiled with. See bug 307:
		# http://www.sudo.ws/bugs/show_bug.cgi?id=307
		umask($Config->{'restore_as_group'} ne '' ? 0137 : 0177);
		
		my @cmd = ('sudo');
		push(@cmd, '-u', $Config->{'restore_as_user'})
		    if $Config->{'restore_as_user'} ne '';
		push(@cmd, qw|/usr/bin/tee --|, $path);
		print STDERR "AGENT: PUTTING CMD: ".join(' ', @cmd)."\n"
		    if $Verbose;
		exec(@cmd);
		print STDERR "AGENT CHILD: exec failed: $!\n";
		exit 2;
	}

	return $childWrite;
};

# Close archive file.
#
# Arguments:
#   $item    - hashref representing item being downloaded.
#   $to      - opened file handle.
#   $pathref - scalarref that consists of path to file created, see
#              openForPutting() description above for more comments.
#
# Returns:
#   true if OK, false otherwise.
sub endPutting($$\$) {
	my ($item, $to, $pathref) = splice(@_, 0, 3);
	my $ok = close $to;

	if ($ok and $Config->{'restore_as_group'} ne '') {
		# We have to use chown(1) instead of chown(3p) because
		# we do not work under root, and only root can change file
		# owner.
		my @cmd = ('sudo', 'chgrp', $Config->{'restore_as_group'},
		    ${$pathref});
		print STDERR "AGENT: CHGRP CMD: ".join(' ', @cmd)."\n"
		    if $Verbose;
		system(@cmd);
		if ($? != 0) {
			print STDERR "AGENT: WARNING: cannot change group ".
			    'to "'.$Config->{'restore_as_group'}."\": $!\n";
		}
	}

	return $ok;
}

# Predeclare to allow two-level recursion
sub filterDirectoryByUnixTime;

# Checks if given file is modified after given date. If so and if it is not a
# directory or is an empty directory, then print file name. For directories,
# routine steps in with filterDirectoryByUnixTime().
#
# Arguments:
#   $time - Unix timestamp (seconds since Epoch).
#   $path - path to file to check.
#
# Returns:
#   true if something was printed by _this_ routine, false otherwise.
sub checkFileByUnixTime($$) {
	my ($time, $path) = (shift, shift);
	my @stat = lstat $path;
	if (S_ISDIR($stat[2])) {
		my $rv = filterDirectoryByUnixTime($time, $path);
		if ($rv < 0) {
			print STDERR "AGENT: Error reading $path: $!\n";
			return 0;
		}
	}
	if ($stat[9] > $time or $stat[10] > $time) {
		print $path."\n";
		return 1;
	}
	return 0;
}

# Read given directory recursively and print items that were modified after
# timestamp given.
#
# Arguments:
#   $dir  - path to directory to read.
#   $time - Unix timestamp (seconds since Epoch).
#
# Returns:
#   number of items printed, or -1 on error.
sub filterDirectoryByUnixTime($$) {
	my ($time, $dir) = (shift, shift);
	my $dh;
	unless (opendir($dh, $dir)) {
		if ($!+0 == EPERM) {
			# Not a big deal, but notification required
			print STDERR "AGENT: WARNING: not enough permissons ".
			    "to access directory $dir\n";
			return 0;
		} else {
			print STDERR "AGENT: cannot open directory $dir: $!\n";
			return -1;
		}
	}
	$dir .= '/' unless ($dir eq '' or $dir eq '/');
	my $printed = 0;
	while (defined(my $f = readdir $dh)) {
		next if $f eq '.' or $f eq '..';
		$printed++ if checkFileByUnixTime($time, $dir.$f);
	}
	closedir $dh;
	return $printed;
}

# Starts a new process that'll print names of files modified after specified
# date. Output is compatible with pax(1) (one name per line).
#
# Arguments:
#   $as    - user to start filter as (sudo(8) used)
#   $date  - Date in YYYYMMDD format to compare to.
#   @files - Path(s) to traverse. It is OK to put here not a directory.
#
# Returns:
#   file handle ref to read output of process created.
#
# Do not return on error.
sub startFileDateFilter($$@) {
	my ($as, $date) = (shift, shift);
	my ($year, $month, $mday) =
	    (substr($date, 0, 4), substr($date, 4, 2), substr($date, 6, 2));
	my $t = mktime(0, 0, 0, $mday, $month-1, $year-1900);

	my ($rfh, $wfh);
	pipe($rfh, $wfh)
	    or errx(2, "Error making child pipe: $!");

	my $pid = fork;
	unless (defined $pid) {
		errx(2, "Cannot fork: $!");
	} elsif ($pid == 0) {
		# We need to change working directory to home in case of relative path in $0
		my $wd = cwd;
		chdir()
		    or die "AGENT: Cannot change working directory back to home: $!";
		open(STDIN, '<', '/dev/null')
		    or die "AGENT: Cannot reopen stdin for traverse child: $!";
		open(STDOUT, '>&', $wfh)
		    or die "AGENT: Cannot reopen stdout for traverse child: $!";
		my @cmd = ('sudo');
		push(@cmd, '-u', $as) if $as ne '';
		push(@cmd, $0);
		push(@cmd, "verbose") if $Verbose;
		push(@cmd, "filter", $t,
		    map { substr($_, 0, 1) eq '/' ? $_ : $wd.'/'.$_ } @_);
		print STDERR "AGENT: TRAVERSE COMMAND: ".join(" ", @cmd)."\n"
		    if $Verbose;
		exec(@cmd);
		die "AGENT: Cannot execute traverse child: $!";
		# NOTREACHED
		my $ec = 0;
		foreach my $f (@_) {
			undef $!;
			my $rv = checkFileByUnixTime($t, $f);
			if ($rv < 0) {
				# Do not force file inclusion, or we'll may
				# archive some data twice. So no
				#  print "$f\n";
				# here.

				print STDERR "AGENT: Cannot filter $f: $!\n";
				$ec = $!+0 unless $ec;
			}
		}
		exit $ec;
	}
	push(@Pids, $pid);
	return $rfh;
}

sub listAll_mysql() {
	my @result = `mysql -B -e 'SHOW DATABASES;'`;
	shift;		# Get rid of column name,
			# disabling printing it is not
			# supported by all MySQL client
			# versions. :(
	return @result;
}

sub listAll_postgres() {
	my @result = `psql -l`;
	shift;		# Get rid of column name, same as above.
	return @result;
}

sub listAll_fs() {
	my @result;

	# Read list of all filesystems and put other mentioned
	#  filesystems in front of it or at the end of it.
	foreach my $l (`mount`) {
		next if $l !~ /^([\S]+) on (.*) type ([\S]+) \(\.*[\b]local[\b].*\)$/;
		push(@result, $2);
	}
	return @result;
}

# Take into account user ordering
sub buildItemsList(@) {
	my (@out, @pre, @end);
	my $allMark = 0;

	while (@Items) {
		my $item = shift @Items;
		if ($item eq '*') {
			$allMark = 1;
			@end = grep { $_ ne '*' } @Items;
			last;
		}
		push (@pre, $item);
	}

	if ($allMark) {
		# Combine full list with priorities specified

		my $listFunc = 'listAll_'.$Config->{'what'};

		# HACK: We assume that order of items will not change
		# between runs. Thus we have to force sorting values below.

		COMBINE_DB_LIST:
		foreach my $item (sort(&{$listFunc})) {
			chomp $item;
			foreach (@pre) {
				next COMBINE_DB_LIST if $_ eq $item;
			}
			foreach (@end) {
				next COMBINE_DB_LIST if $_ eq $item;
			}
			push(@out, $item);
		}
		push(@out, @end);
	}

	if ($Config->{'resume_from'} ne '') {
		my @tmp = @out;
		while (@tmp and $tmp[0] ne $Config->{'resume-from'}) {
			shift @tmp;
		}
		if (@tmp) {
			print STDERR
			    "AGENT: Resuming from item ".
			    $Config->{'resume-from'}."\n"
			    if $Verbose;
			@out = @tmp;
		} else {
			print STDERR "AGENT: Item to resume from not ".
			    "found, restarting from scratch\n";
		}
	}

	print STDERR "AGENT: All items to be dumped: ".
	    join(', ', @out)."\n"
	    if $Verbose;
	return @out;
}

#   $cmd: [ program [arg ...] ]
# $items: [ item ... ]
sub dumpItems($$) {
	my ($cmd, $items) = @_;

	if ($Config->{'separate_archives'}) {
		foreach my $item (@{$items}) {
			my $fh = openProgram($Config->{'dump_as'},
			    1, $Config->{'compress'}, undef, @{$cmd}, $item);
			if (defined $fh) {
print STDERR "AGENT: DUMPING ".$Config->{'what'}." ITEM $item MODULE ".
$Config->{'module_name'}."\n"
if $Verbose;
				unless (sendArchive($fh, \*STDOUT, $item,
				    $Config->{'separator'})
				    and syswrite(STDOUT, endHeaders)) {
					$exitCode = $!+0;
					close $fh;
					last;
				}
				close $fh;
			}
			waitMyPids;
		}
	} else {
		if (my $fh = openProgram($Config->{'dump_as'},
		    1, $Config->{'compress'}, undef, @{$cmd}, @{$items})) {
print STDERR "AGENT: DUMPING ".$Config->{'what'}." ITEM MODULE ".
$Config->{'module_name'}."\n"
if $Verbose;
			unless (sendArchive($fh, \*STDOUT,
			    $Config->{'module_name'},
			    $Config->{'separator'})
			    and syswrite(STDOUT, endHeaders)) {
				$exitCode = $!+0;
			}
			close $fh;
		}
		waitMyPids;
	}
}


###################################################
# Main program flow starts here

if ($Command eq 'dump') {
	if ($Config->{'what'} eq 'files') {
		my @files = grep { $_ ne '.' and $_ ne '..' }
		     map { glob } @Items;
		print STDERR "AGENT: File names escaped: ".
		    join(', ', @files)."\n"
		    if $Verbose;

		my @paxCmd = qw/pax -w/;
		if ($Config->{'archive_format'} ne '') {
			push(@paxCmd, '-x',
			    $Config->{'archive_format'} eq 'tar' ?
			    'ustar' : 'cpio');
		}
		# Do not end command parameters with '--' here, we'll need to do
		# some item-based research later.

		# Now dump
		if ($Config->{'separate_archives'}) {
			my $baseDir;
			if ($Config->{'workdir'}) {
				print STDERR "AGENT: Using specified working ".
				    "directory as base\n"
				    if $Verbose;
				$baseDir = $Config->{'workdir'};
			} else {
				print STDERR "AGENT: Using current working ".
				    "directory as base\n"
				    if $Verbose;
				$baseDir = cwd;
			}
			print STDERR "AGENT: Base directory: $baseDir\n"
			    if $Verbose;

			if ($Config->{'resume_from'} ne '') {
				my @files2 = @files;
				while (@files2 and
				    $files2[0] ne $Config->{'resume_from'}) {
					shift @files2;
				}
				if (@files2) {
					print STDERR
					    "AGENT: Resuming from item ".
					    $Config->{'resume_from'}."\n"
					    if $Verbose;
					@files = @files2;
				} else {
					print STDERR "AGENT: Item to resume ".
					    "from not found, restarting from ".
					    "scratch\n";
				}
			}

			my $noOK = 1;
			foreach my $file (@files) {
				# Check if we should create increment archive
				my $readNamesFrom;
				if ($IncrementDump{$file} and
				    $IncrementDump{$file}->{'date'} ne '') {
					$readNamesFrom = startFileDateFilter(
					    $Config->{'dump_as'},
					    $IncrementDump{$file}->{'date'},
					    $file);
					syswrite(STDOUT, encodeHeader(
					    'increment_from',
					    $IncrementDump{$file}->{'date'}));
 				}
				
				# Dump always items with their names as prefix
				# in archive.
				#
				# If we'll need to extract item data into
				# directory backed up, we can use:
				# a) -C; b) -s tar(1)/pax(1) option.
				#
				# Also, this way it'll be possible to escape
				# collisions by creating some directory
				# hierarchy on the backup host inside module
				# directory.
				my @execCmd = @paxCmd;
				if (defined $readNamesFrom) {
					push(@execCmd, '-d');
				} else {
					push(@execCmd, '--', $file);
				}
				my $out = openProgram($Config->{'dump_as'},
				    1, $Config->{'compress'}, $readNamesFrom,
				    @execCmd);
				if (defined $out) {
					# Discard statistics
print STDERR "AGENT: DUMPING FILE $file\n"
    if $Verbose;
					my $sentOK = sendArchive(
					    $out, \*STDOUT, $file,
					    $Config->{'separator'});
					# We SHOULD send ending headers always.
					if (syswrite(STDOUT, endHeaders)
					    and $sentOK) {
						$noOK = 0;
					}
					close $out;
				}
				my $progRetCode = waitMyPids;
				if ($progRetCode != 0) {
					$exitCode = $progRetCode;
					last;
				}
			}
			if ($exitCode == 0 and $noOK) {
				$exitCode = 2;
			}
		} else {
			# Simple as it can only be :)
			my $readNamesFrom;
			if (exists $IncrementDump{''}) {
				$readNamesFrom = startFileDateFilter(
				    $Config->{'dump_as'},
				    $IncrementDump{''}->{'date'},
				    @files);
				syswrite(STDOUT, encodeHeader('increment_from',
				    $IncrementDump{''}->{'date'}));
			}
			my @execCmd = @paxCmd;
			if (defined $readNamesFrom) {
				push(@execCmd, '-d');
			} else {
				push(@execCmd, '--', @files);
			}
			my $out = openProgram($Config->{'dump_as'},
			    1, $Config->{'compress'}, $readNamesFrom,
			    @execCmd);
			if (defined $out) {
print STDERR "AGENT: DUMPING FILES MODULE ".$Config->{'module_name'}."\n"
    if $Verbose;
				my $sentOK = sendArchive($out, \*STDOUT,
				    $Config->{'module_name'},
				    $Config->{'separator'})
				    && syswrite(STDOUT, endHeaders);
print STDERR "AGENT: sendArchive() return $exitCode\n"
    if $Verbose;
				close $out;
				$exitCode = $sentOK ? 0 : $!+0;
			} else {
				$exitCode = 2;
			}
			if ($exitCode == 0) {
				$exitCode = waitMyPids;
			}
		}
	} elsif ($Config->{'what'} eq 'mysql'
	      or $Config->{'what'} eq 'postgres') {
		my @dbs = buildItemsList;
		my @cmd;

		if ($Config->{'what'} eq 'mysql') {
			@cmd = qw/mysqldump --opt --add-drop-table --add-locks
			    --no-create-db --databases --/;
		# We support MySQL and PostgreSQL only now
		} elsif ($Config->{'archive_format'} eq 'plain') {
			@cmd = qw/pg_dump -Fp -C -D -O
			    --disable-dollar-quoting/;
		} elsif ($Config->{'archive_format'} eq 'custom') {
			@cmd = qw/pg_dump -Fc --disable-dollar-quoting/;
		} else {
			# "tar", default
			@cmd = qw/pg_dump -Ft --disable-dollar-quoting/;
		}

		dumpItems(\@cmd, \@dbs);
	} elsif ($Config->{'what'} eq 'fs') {
		my @fss = buildItemsList;
		my @cmd = qw/dump -u -h 0 -f - --/;
		dumpItems(\@cmd, \@fss);
	}
} elsif ($Command eq 'put') {
	# "workdir" specifies directory where to put the files
	my $path;
	my ($itemsPut, $bytesRaw, $bytesData) = readArchives(
	    \*STDIN, $Config->{'separator'},
	    \&openForPutting, \&endPutting, \$path);
	$exitCode = $itemsPut->[$#{$itemsPut}]->{'errcode'};
} elsif ($Command eq 'restore') {
	my @restoreCmd;    # Should only be set if no special processing happens

	if ($Config->{'what'} eq 'files') {
# 		TODO: More clean restore from increment dumps - remove files
# 		      which got removed between dumps. This should be done
# 		      through looking in "pax -v" output.
# 		if (%IncrementRestore) {
# 			# Here are some special processing goes :)
# 		} else {
# 			@restoreCmd = qw/pax -r -v --/;
# 		}
		@restoreCmd = qw/pax -r --/;
	} elsif ($Config->{'what'} eq 'mysql') {
		# NOTE: Client should have "~/.my.cnf" with user/pass for
		# mysql(1).
		@restoreCmd = qw/mysql --/;
	} elsif ($Config->{'what'} eq 'postgres') {
		# NOTE: Client should have "~/.pgpass" with user/pass for
		# psql(1).
		@restoreCmd = qw/psql -A -q -X -f - -1 --/;
	} elsif ($Config->{'what'} eq 'fs') {
		@restoreCmd = qw/restore -f - --/;
	}

	if (@restoreCmd) {
		$exitCode = readArchives(*STDIN{IO}, $Config->{'separator'},
		    \&openProgram, sub { return close $_[1] ? 0 : $!+0; },
		    $Config->{'restore_as'}, 0, $Config->{'compress'}, undef,
		    @restoreCmd, @Items);
	}
}

waitMyPids;    # For safety

$exitCode += 0;
print STDERR "AGENT: exiting with code $exitCode\n"
    if $Verbose;
exit $exitCode;
