#!/usr/bin/perl -w
use strict;

use WWW::Curl::Easy;
use URI::Escape;
use CGI qw(:standard);
use XML::Parser;

## Some setup ##
my $max_results;
my $stats_results = "";
my $url;
my $curl = new WWW::Curl::Easy;
my $item;
my $total_pages;
my $start = 0;
my $ndsp = 20;

## Options ##
# xga (1024 x 768 or greater)
# nmp (with n = 1,2,4,6,8,10)
my $islt = "xga";

## Search parameter ##
my $search_param;

## User input parsing ##
sub readUserInput
{
	my $found = 0;

	foreach $item (@ARGV){

		## Last parameter found, storing number of images
		if($found == 1){
			$max_results = $item;
		}else{
			## Found -c flag, removing trailing space from search_param ##
			if ($item =~ m/-c/ ){
				$search_param =~ s/\s+$//;
				$found = 1;
			}else{
				## Adding words to search_param ##
				$search_param .= $item." ";
			}
		}
	}

	## Updating total pages stuff ##
	$total_pages = $max_results / $ndsp;
}

## Some other parsing here? ##
sub parseSearch
{
	$search_param =~ s/\s/+/;
}

## Url generation ##
sub doSetup 
{
	&parseSearch;
	$url = "http://www.google.it/images?q=";
	$url .= $search_param;
	$url .= "&um=1&hl=it&biw=1366&bih=664&tbs=isch:1,isz:lt,islt:".$islt."&source=lnt";
	$url .= "&start=".$start."&ndsp=".$ndsp;
}

## Testing here ##
sub execute
{
	## Some setupping... ##
	open(my $tmp, ">", "files/tmp.html");
	
	## Curl setup ##
	$curl->setopt(CURLOPT_URL, $url);
	$curl->setopt(CURLOPT_WRITEDATA, $tmp);

	## Curl perfmorming action
	my $retcode = $curl->perform;

	if ($retcode == 0){
		## Closing file handler ##
		close($tmp);
				
		my $res;
		## Webpage reading ##
		open(FILE, "files/tmp.html");	
		
		## How many results are there? ##
		while (<FILE>){	$res .= $_; }

		## Prints on variable ##
		if( $res =~ m/resultStats>(.*?)<nobr>/ ){
			$stats_results .= $1."\n";
		}else{
			print "Oi oi, something went wrong!!! Maybe Google changed the html?\n";
		}
		
		## Url fetching ##
		open(OUTPUT, ">>files/list.txt");
		while($res =~ m/imgres\?imgurl\\x3d(.*?)\\x26/g ) {
			print OUTPUT $1."\n";
		}

		## Closing output, for next iteration ##
		close(OUTPUT);
		## Closing input, for next download and iteration ##
		close(FILE);
				
	}else{
		print("Error: $retcode\n");
	}
}

## Ctrl-C handling ##
sub ctrcl
{
	exit(0);
}

## Downloading part ##
sub downloadAll
{
	my $line;
	my $curr=0;
	open(INPUT, "../files/list.txt");

	foreach $line(<INPUT>){		
		$SIG{'INT'} = 'ctrlc';
		chomp($line);
		my $res = system("curl -C - -O $line");

		if( $res == 0 ){
			print "Download of ".$line." was successful\n";
			open(SAVE, ">../files/save.txt");
			print SAVE $curr;
			close(SAVE);
		}else{
			print "Download of ".$line." failed\n";
		}
		$curr += 1;
	}

	close(INPUT);
}


## Main program part ##
if ($ARGV[0] =~ m/--help/ ){
	## Help file ##
	print "\nUsage: ./sandworm.pl <query search> -c <number of results needed>\n";
	print "To resume last session: ./sandworm.pl -r\n\n";	

}elsif ($ARGV[0] =~ m/-r/){
	my $val;
	## Resume download ##
	open(RESUME, "files/save.txt");
	foreach $val (<RESUME>){
		chomp($val);
	}
	close(RESUME);

	## Handles opening ##
	open(LAST, "files/list.txt");
	my $rest;
        my $next=$rest;
	
	chdir "images/";
	foreach $rest (<LAST>){
		$SIG{'INT'} = 'ctrlc';
		$val -= 1;
		## Skips rest of the code if val > 0 ##
		next if ($val > 0);
                chomp($rest);
	        my $res = system("curl -C - -O $rest");

	        if( $res == 0 ){
	                print "Download of ".$rest." was successful\n";
			open(SAVE, ">../files/save.txt");
			print SAVE $next;
			$next += 1;
			close(SAVE);
                }else{
        	        print "Download of ".$rest." failed\n";
		}
	}
	close(LAST);

}else{
	## The magic happens ##
	&readUserInput;
	my $i = 0;
	print "Dobbiamo cercare $total_pages pagine\n";

	## Url creation and pages getting ##
	while ($i < $total_pages){
		print "Scanning page $i...";	
		## Process to download all img urls ##	
		&doSetup;
		&execute;
		$i +=1;
		$start += $ndsp;
		print "[DONE]\n";
	}

	## Directory setup ##
	chdir "images/";
	## Downloads!! ##
	&downloadAll;
}
