#!/usr/bin/perl

use strict;
use warnings;

use WWW::Mechanize;
use Data::Dumper;
use HTML::TreeBuilder;

use IO::Handle;

use Encode;
# binmode STDOUT, ":utf8";

#use Log::Log4perl qw(:easy);
#Log::Log4perl->easy_init(
#	{ level => $INFO, file => "STDOUT", utf8 => 1, layout => '%-5p - %m%n' },
#	{ level => $DEBUG, layout => '%-5p - %m%n', file => ':utf8>output.log' },
#);

##########################
# Log4perl INITIALISATION
##########################

use Log::Log4perl qw(get_logger);

my $conf = q(
    log4perl.category       = DEBUG, CONSOLE, LOGFILE

    log4perl.appender.LOGFILE           = Log::Log4perl::Appender::File
    log4perl.appender.LOGFILE.filename  = output.log
    log4perl.appender.LOGFILE.Threshold = DEBUG
    log4perl.appender.LOGFILE.mode      = clobber
    log4perl.appender.LOGFILE.utf8      = 1
    log4perl.appender.LOGFILE.layout    = Log::Log4perl::Layout::SimpleLayout

    log4perl.appender.CONSOLE           = Log::Log4perl::Appender::Screen
    log4perl.appender.CONSOLE.Threshold = INFO
    log4perl.appender.CONSOLE.stderr    = 0
    log4perl.appender.CONSOLE.utf8      = 1
    log4perl.appender.CONSOLE.layout    = Log::Log4perl::Layout::SimpleLayout
  );

# ... passed as a reference to init()
Log::Log4perl::init( \$conf );

sub ERROR($) {
	my $msg = shift;
	get_logger()->error($msg);
}
sub DEBUG($) {
	my $msg = shift;
	get_logger()->debug($msg);
}
sub INFO($) {
	my $msg = shift;
	get_logger()->info($msg);
}
sub WARN($) {
	my $msg = shift;
	get_logger()->warn($msg);
}

##########################
# FUNCTIONS
##########################

sub process_content {
	my $content = shift;
	my $url     = shift;
	my $depth   = shift;
	my $path    = shift;

	if (   $content =~ /This position is based/
		|| $content =~ /View job cart/ )
	{
		my $tree = HTML::TreeBuilder->new;
		$tree->parse_content($content);

		my $element = $tree->look_down( "_tag", "h2" );
		unless ( defined $element ) {
			ERROR "UNPARSABLE CONTENT IN " . $url;
			return;
		}
		my $content_div = $element->as_text;

	   #$content_div =~ s/[^[:ascii:]]+//g;    # get rid of non-ASCII characters
		if ( $content_div !~ /Let.*s work together./ ) {
			$content_div = decode_utf8($content_div);
			my @field =
			  split( / - /, $content_div );    # split into name / location

			# can't infer it for japanese,etc.
			$field[1] = q{}
			  unless defined $field[1];
			  
			  # get rid of the +1 link
			  $field[1] =~ s/[^[:ascii:]]+//g;
			  $field[1] =~ s/\s+$//;

			unless ( exists $main::job_title_seen{ $field[0] } ) {
				INFO $depth . "\t"
				  . $field[1] . "\t"
				  . $field[0] . "\t"
				  . $path . "\t"
				  . $url;

				print $main::fh_out datetime() . "\t"
				  . $field[1] . "\t"
				  . $field[0] . "\t"
				  . $path . "\t"
				  . $url . "\n";

			}
			else {
				DEBUG "SEEN ALREADY..." . $field[0];
			}
			$main::job_title_seen{ $field[0] }++;
		}
		$tree->destroy();
	}

}

sub crawl_deeper {
	my $seed_url = shift;

	my $MAXDEPTH = 30;
	my $counter  = 0;

	# seed stack
	my $seed = {};
	$seed->{url}   = $seed_url;
	$seed->{depth} = 0;
	$seed->{path}  = '';

	my @left_to_process = ();

	push @left_to_process, $seed;

	# process stack
	while (@left_to_process) {
		$counter++;

		INFO "PROCESSED $counter" if ( $counter % 100 == 0 );

		#print Dumper(@left_to_process);
		my $current = pop @left_to_process;

		#INFO $current->{depth} . " - " . $current->{relative_link};

		# load content
		my $mech =
		  WWW::Mechanize->new( agent => 'contact@gjobssearch.com', autocheck => 0 );
		$mech->get( $current->{url} );
		unless ( $mech->success ) {
			DEBUG "COULD NOT FETCH $current->{url}";
			next;
		}

		# is job listing?
		process_content(
			$mech->content,    $current->{url},
			$current->{depth}, $current->{relative_link}
		);

		next if $current->{depth} > $MAXDEPTH;

		# follow links
		my @links =
		  $mech->find_all_links( url_abs_regex => qr{^http://www.google} );

		for my $link (@links) {

			# check if seen already
			next if exists $main::excluded_link{ $link->url_abs() };
			next if exists $main::excluded_rel_link{ $link->url_abs()->path };
			next if $link->url_abs() !~ /job/;

			# make this link seen
			$main::excluded_rel_link{ $link->url_abs()->path }++;

			# create new link
			my $new_link;

			# path
			defined $link->text()
			  ? $new_link->{path} =
			    $current->{path} . ' >> ' . $link->text()
			  : $new_link->{path} =
			  $current->{path} . ' >> ' . $link->url_abs();

			# depth
			$new_link->{depth} = $current->{depth} + 1;

			# url
			$new_link->{url} = $link->url_abs()->as_string;

			$new_link->{relative_link} = $link->url_abs()->path;

			# add to bottom of queue
			push @left_to_process, $new_link;
		}

	}

}

sub datetime() {
	my ( $sec, $min, $hour, $mday, $mon, $year, $wday, $yday, $isdst ) =
	  localtime(time);
	return sprintf "%02d/%02d/%4d", $mday, $mon + 1, $year + 1900;
}

sub load_seen_links() {
	open our $fh_in, '<:utf8', 'jobs.txt';
	my $counter = 0;
	while (<$fh_in>) {
		$counter++;
		chomp;
		my @field = split(/\t/);
		$main::excluded_rel_link{ $field[3] }++;
		$main::job_title_seen{ $field[2] }++;
	}
	INFO("PRELOADED $counter");
}

##########################
#MAIN
##########################

#my $start_url = "http://www.google.com/intl/en/jobs/locations/";
my $start_url = "http://www.google.pl/intl/en/jobs/poland/index.html";

#my $start_url = "http://www.google.co.uk/intl/en/jobs/index.html";

our %excluded_link = (
	'http://www.google.com/'                    => undef,
	'http://www.google.com/jobs/css/styles.css' => undef,
);

our %excluded_rel_link = (
	'/intl/en/about.html'          => undef,
	'/intl/en/jobs/privacy/'       => undef,
	'/intl/en/jobs/lifeatgoogle/'  => undef,
	'/intl/en/jobs/joininggoogle/' => undef,
	'/'                            => undef,
);

our %job_title_seen = ();

load_seen_links();

open our $fh_out, '>>:utf8', 'jobs.txt';
$fh_out->autoflush(1);

crawl_deeper($start_url);

close $fh_out;
