#!/usr/bin/perl
$|=1;

use Fcntl ':flock';
use Data::Dumper;
use Date::Parse;
use Date::Format;
use Net::Amazon::S3;
use Digest::MD5::File qw(file_md5_hex);
use File::Find;

use Getopt::Long;
my %opts;
GetOptions(
	\%opts,
	"userid|aws_access_key_id=s",
	"password|aws_secret_access_key=s",
	"source=s",
	"bucket=s",
	"log=s",
	"output=s",
	"prefix:s",
	"verbose+",
	"delete",
	"progress=i",
	"sort=s",
	"max=i"
);

die qq(Usage:
	ps3sync -u userid -p password -s /source/path -b bucket [-p prefix] [-v]

	-u|--userid|--aws_access_key_id = aws_access_key_id
	--password|-aws_secret_access_key = aws_secret_access_key
	-s|--source = path to source directory. leave off trailing slash!
	-b|--bucket = destination bucket on S3
	--prefix = prefix to pre-pend to keys
	-l|--log = logging file (eqivalent to -v > filename)
	-v|--verbose = verbosity. use multiple times for:
		-v  : prints when adding/deleting keys, with progress meter
		-v -v : ... and prints progress while scanning bucket
		-v -v -v : ... and prints when skipping keys
	-d|--delete = deletes destination files that are not on source
	--progress x = progress bar for files > x bytes
	--sort <date>  
	--max x = upload files only of size <= x bytes
	
) unless(
	defined $opts{userid} &&
	defined $opts{password} &&
	defined $opts{source} &&
	defined $opts{bucket}
);

my $source_dir = $opts{source};
my $dest_bucket_name = $opts{bucket};
my $prefix = $opts{prefix};



say(1,'-' x 50);
say(1,"ps3sync STARTED");
say(1,Dumper(\%opts));

my $s3 = Net::Amazon::S3->new( {   
	aws_access_key_id     => $opts{userid},
	aws_secret_access_key => $opts{password},
	retry => 1
}) or panic($s3->err . ":" . $s3->errstr);
my $bucket = $s3->bucket($dest_bucket_name) or panic($s3->err .":". $s3->errstr);

my $s3List = s3List();

my $srcFiles;
my $fileCount = 0;
my $byteCount = 0;
my $fileProg = 0;
my $byteProg = 0;
find(sub { 
	-f $File::Find::name or return;
	$srcFiles->{$File::Find::name} = [stat($File::Find::name)]; 
	$byteCount += $srcFiles->{$File::Find::name}->[7];
	$fileCount++;
}, $source_dir);


my @keys = $opts{sort} ?
	sort {$srcFiles->{$b}->[9] <=> $srcFiles->{$a}->[9]} (keys %$srcFiles) :
	(keys %$srcFiles);

foreach(@keys) { src2s3($_); }

	#find(\&wanted, $source_dir);


say(1,"ps3sync FINISHED");
say(1,'-' x 50);


############################################################################



sub s3List {
	# return hashref of key -> head pairs
	# important headers:
	#	x-amz-meta-ps3sync-mtime => '1192747592343',
    #   x-amz-meta-ps3sync-size => '151656'
    #   size => '151656'
	#	last_modified' => '2008-04-28T17:34:32.000Z',
    #   etag => '61b521a80bb63e67f8ce511e925b20b9',
    #   content_length => '151656',


	my $conf = {
		bucket => $dest_bucket_name,
		prefix => $prefix
	};
	my $h;


	while(1) {
		my $r = $s3->list_bucket($conf);
		foreach (@{$r->{keys}}) {
			my $k = $_->{key};
			$h->{$k} = {};
			$h->{$k}->{visited} = 1;


			my $r_mtime = str2time($_->{last_modified});
			my $r_size = $_->{size};

			my $path_end = $k;
			$path_end =~ s!^$prefix/!!;
			my $path = "$source_dir/$path_end";
			-f $path or do {
				if($opts{delete}) { 
					$bucket->delete_key($k); 
					say(1, "deleted $k");
				}
				next;
			};
			my ($dev,$ino,$mode,$nlink,$uid,$gid,$rdev,$size,$atime,$mtime)
				= stat($path);

			if($r_mtime <= $mtime or $r_size != $size) {
				say(1, "updating $key..."); 
				upload($path, $k);
			} else {
				say(3, "no change in $k");
			}
			
		}

		last unless $r->{is_truncated};

		my $next_marker = $r->{next_marker} || $r->{keys}->[-1]->{key};
		say(2,scalar(keys %$h), " -- $next_marker");

		$conf = {
			bucket => $dest_bucket_name,
			prefix => $prefix,
			marker => $next_marker
		};
	}
	return $h;
}


sub wanted { return src2s3($File::Find::name); }

sub src2s3 {
	my $file = shift;
	-f $file or return; # files only
	-s $file or return; # nonzero size only
	my $key = $file; 	# /tmp/src/some/path/foo.ext
	$key =~ s!^$source_dir/!!;		# some/path/foo.ext
	if($prefix) { $key = "$prefix/$key"; }

	my $size = $srcFiles->{$file}->[7];

	if($opts{progress}) { 
		$fileProg++; 
		$byteProg += $size;
	}
	return if $s3List->{$key}->{visited};

	my $filePct = int(10000*$fileProg/$fileCount)/100;
	my $bytePct = int(10000*$byteProg/$byteCount)/100;
	say(1, "(f:$filePct% b:$bytePct%) adding $key ($size)..."); 

	# otherwise this is a file not on S3
	upload($file, $key);
}



sub upload {
	my $path = shift;
	my $key = shift;

	my $fileStat = $srcFiles->{$path};
	my $size = $fileStat->[7];
	my $dateStr = time2str("%c", $fileStat->[9]);

	if($opts{max} and ($size > $opts{max})) {
		say(1,"skipping $key, size greater than $opts{max}");
		return;
	}

	if($opts{progress} and $size > $opts{progress}) {
		my $upload_cb_progress=0;
		my $upload_cb = sub  {
			my $chunk = shift;
			$upload_cb_progress += length($chunk);
			print int(10000*$upload_cb_progress/$size)/100;
			print "%\r";
		};
		$s3->upload_cb($upload_cb);
	} else {
		$s3->upload_cb(0);
	}

	my $r = $bucket->add_key_filename( $key, $path )
		or panic( $s3->err .":". $s3->errstr);

	say(1,"DONE: $path -> $key (size=$size, date=$dateStr)"); 
}




sub panic {
	my $str = join ' ', @_;
	say(0,"PANIC: $str\n");
	say(0,Dumper($s3));
	die $_;
}



sub say {

#
#	say(1,"something"); -- print "sometihgin\n" if($opts{verbose}>=1);
#						-- logwrite("something") if($v==1);
#

	my $v = shift;
	if($opts{verbose} and $opts{verbose}>= $v) { print @_, "\n"; }
	if($v==1) { logwrite(@_); }
}


sub logwrite {
	$opts{'log'} or return;
	open LOG, '>>', $opts{'log'};
	flock(LOG, LOCK_EX) or die('couldnt flock ', $opts{'log'});
	seek LOG, 0, 2;
	print LOG "($$) ", time2str('%c', time), " ";
	print LOG @_, "\n";
	close LOG;
}
