#!/usr/bin/perl
#@flyinpoptartcat

#    This program is free software: you can redistribute it and/or modify
#    it under the terms of the GNU General Public License as published by
#    the Free Software Foundation, either version 3 of the License, or
#    (at your option) any later version.
#
#    This program is distributed in the hope that it will be useful,
#    but WITHOUT ANY WARRANTY; without even the implied warranty of
#    MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
#    GNU General Public License for more details.
#
#    You should have received a copy of the GNU General Public License
#    along with this program.  If not, see <http://www.gnu.org/licenses/>.

BEGIN{
	use utf8;
	print "\nReapper v1.1 - HTTP Download Utitity\n";
	
	use LWP::UserAgent;
	use LWP::ConnCache;
	use HTTP::Request;
	use HTTP::Response;
	use Getopt::Long qw( GetOptions );
	use Tie::File;
	use vars '$checkWrite', '$hostname';
}

	GetOptions(
		"dir=s"        => \my $dir,
		"dl=s"         => \my $downloadURL,
		"file=s"       => \my $file,
		"reap=s"       => \my $reap,
		"vast-mode"    => \my $vastMode,
		#"reap-depth=i" => \my $reapDepth,
		"attrib=s"     => \my $pattern,
		"moz-ua"       => \my $mozUA,
		"A=s"          => \my $AcceptExt,
		"v"            => \my $verbose,
	);

	
	$|=1;
	if(defined $pattern){
		
		$pattern = $pattern . "=(\"|')";
		
	}elsif(defined $vastMode){
	
		$pattern = "(\"|')";
		
	}else{
	
		my $pattern = "src=('|\")";
	
	}
	
	
	unless(defined $dir){
		help();
		exit;
	}
	
	$dir =~ s/\\/\//g; # replace back slashes with forward slashes
	$dir = "./" . $dir unless $dir =~ m/^(\/|C:\/)/i;
	
	map{ $befor = $befor.$_."/"; mkdir($befor); } split m/\//, $dir; #one liner to create dir structure
	undef $befor;
	
	$downloadURL = "http://" . $downloadURL if defined $downloadURL and not $downloadURL =~ m/^http:\/\//;
	
	
	unless($reap =~ m/\.(.|..|...|....)$/){
		print "would you like me to add a \"/\" to the end of the URL (y/n)? ";
		my $addAnswer = <>;
		
		if($addAnswer =~ m/y/i){
			$reap = $reap . "/";
		}
	}
	
	
#setup http interface
my $ua = LWP::UserAgent->new(conn_cache => 1);
my $cache = LWP::ConnCache->new;
$ua->conn_cache($cache); # use connection cacheing (faster)
$ua->timeout(2); # don't wait longer then 2 secs
$ua->max_redirect(2);


if(defined $mozUA){

	$ua->agent("Mozilla/5.0 (X11; U; Linux i686; en-US; rv:1.5) Gecko/20031027");

} else {

	$ua->agent("Reapper/v1.1 (HTTP download utility)");

}

if(defined $downloadURL){
	
	print "downloading $downloadURL\n\n";
	$ua->show_progress(1);
	getAndSave($downloadURL);
	
}

$ua->show_progress(1) if defined $verbose;

$hostname = extractHostname($reap) if defined $reap;

fileList($file)            if defined $file;
reapLinks($reap, $pattern) if defined $reap;





sub reapLinks{
	my $todoUrl = shift;
	my $pattern = shift;
	
	my $get = $ua->get($todoUrl);
	
	if($get->is_success){
		print "page is reachable! now extacting links and downloading...\n";
		
		foreach my $linkWithGarbage ( split( m/$pattern/i, $get->decoded_content ) ){
			
			$linkWithGarbage =~ s/(\n|\t|\r|\x20)//g;
			$linkWithGarbage =~ s/("|').*//;
			
			if(not defined $vastMode){
				
				$linkWithGarbage =~ s/<script.*?>.*<\/script>//gi; # strip javascript
				
			}
			
			next if length $linkWithGarbage < 1; # don't try blanks
			
			# handle alone file names as links
			unless($linkWithGarbage =~ m/\//){
			
				$linkWithGarbage = $todoUrl . $linkWithGarbage;
				
			} elsif($linkWithGarbage =~ m/^\.\//) {
				
				$linkWithGarbage =~ s/^\.\///;
				$linkWithGarbage = $todoUrl . $linkWithGarbage;
				
			}
			
			
			# a fix for "//domain/uri" type urls
			if($linkWithGarbage =~ m/^\/\//){
				$linkWithGarbage = "http" . $linkWithGarbage;
			}
			
			unless($linkWithGarbage =~ m/^http:/i){
				
				$linkWithGarbage = "http://$hostname" . $linkWithGarbage;
				
			}
			
			
			# the huge regex on this line is AI to provent the stupid from happening
			push(@pureImageLinks, $linkWithGarbage) unless $linkWithGarbage =~ m/(http:\/\/$hostname\/.*\/$|http:\/\/$hostname\/#.+|http:\/\/$hostname\/.$)/;
		}
		
		shift @pureImageLinks; # it will never contain an image link
		
		downloadList(@pureImageLinks);
		
	} else {
		
		print "the url you specified cannot be retrieved\n";
	
	}
	
}





sub fileList{
	
	my $File = shift;
	tie my @list, "Tie::File", $File, recsep => "\n";

	downloadList(@list);
	
}

sub extractHostname{
	$toExtract = shift;
	
	$toExtract =~ s/http(|s):\/\///i;
	$toExtract =~ s/\/.*//g;
	
	return $toExtract;
	
}

sub downloadList{
	my @toDownload = @_;
	
	foreach my $url (@toDownload){
	
		getAndSave($url);
		
	}
}

sub getAndSave{
	my $url = shift;
	my $pattern = shift;
	my $writefile;
	
	chomp $url; # just in case
	my $get = $ua->get($url);
	
	my $name = $url;
	$name =~ s/^http(s|):\/\///i;
	$name =~ s/.*\///g;

	if($get->is_success and not -e "$dir/$name" and length $get->decoded_content >= 1){
	
		open $writefile, ">", "$dir"."$name";
		binmode $writefile, ":bytes"; # a fix for wide char errors
		$checkWrite = syswrite $writefile, $get->decoded_content;
		close $writefile;
		
		if($checkWrite eq undef){
		
			print STDERR "fayeld to write to teh file!\n";
			
		}else{
		
			print "DONE: $url\n";
			
			if(defined $verbose){
				print "SAVED FILE: $dir"."$name\nBYTES WRITEN: " . (-s "$dir"."$name") . "\n\n";
			} else {
				print "\n";
			}
			
		}
		
		
	
	}
	
	# try to unload from RAM after done
	undef $writefile;
	undef $get;

}

sub help{


print q{
useage: perl reapper.pl [-dir] [OPTIONS]

OPTIONS:

    -dir [dir]        -  Location to save files

    -dl [url]         -  Download this file

    --reap [url]      -  Downloads all files within the 
                         scr HTML attribute of the page (good for images)

    --vast-mode       -  Try to download anything within quotes. ONLY use this
                         If --reap seems to not be able to find links.

    --attrib [regex]  -  A case insensitive regex. used too specify the
                         HTML attribute you expect the links too the files will be in.
                         Default is scr. must be used with -reap

    --file [file]     -  Download from this list of line speparated links

    --moz-ua          -  Use a mozilla http useragent

    -v                -  Verbose. talk alot

EXAMPLES:
    reapper.pl -dir "/usr/apache" -dl "http://apache.osuosl.org/httpd/httpd-2.4.4.tar.gz"
    reapper.pl -dir "/images/" --reap "http://sitewithimagegallery.com/gallery/" --moz-ua
    reapper.pl -dir "C:\archive" --reap "http://sitewithindexing.com/achive/" --attrib href
    reapper.pl -dir "C:\pdfs\" --file linklist.txt
	
Reapper loads the file into memory befor writing the file to disk, so be careful of not
downloading huge files. 500MB would probably be the max (unless you have vast amounts of RAM).
}



}
