package GDoc::Job::Fetch::US::Legi::CRS::NCSE;

use Moose;
use WWW::Mechanize;
use Text::Capitalize;
use Data::Dumper;
use Encode 'decode_utf8';

use GDoc::Job;

extends 'GDoc::Job::Fetch::US::Legi::CRS';

sub setup {
    my $self = shift;
    $self->_baseurl( 'http://ncseonline.org/NLE/CRS/' );
    $self->_basedir( 'http://ncseonline.org' );
}

sub run { 
    my $self = shift;

    if ( $self->data->{pdf_link} ) { 
        $self->_fetch_pdf;
    } elsif ( $self->data->{index_link} ) { 
        $self->_fetch_index;
    } else { 
        $self->_fetch_base;
    }
}


sub _fetch_base { 
    my $self = shift;

    $self->logger->info( "Fetching base " . $self->_baseurl );
    my $mech = $self->_mech;
    $mech->get( $self->_baseurl );

    my @links = $mech->find_all_links( url_regex => qr{Detail.cfm\?Category=\w+} );

    foreach my $link( @links ) { 
        my $url = $self->_baseurl . $link->url;
        $self->logger->info( "Found index $url" );


        my $job = GDoc::Job->new( class => 'Fetch::US::Legi::CRS::NCSE',
                                  queue => $self->fetch_queue,
                                  data  => { index_link     => $url }
                                );
        eval { 
            $job->submit;
        };

        if ( my $err = $@ ) { 
            $self->logger->error( "Unable to submit fetch-index job: $err" );
        };
    }
}


sub _fetch_index { 
    my $self = shift;
    my %data = %{ $self->data };

    $self->logger->info( "Fetching index page $data{index_link}" );
    my $mech = $self->_mech;
    $mech->get( $data{index_link} );

    my %seen;
    # loop over index pages
    while( 1 ) { 
        my @pdf_links = $self->_parse_pdf_links( $mech->response->content );
        @pdf_links = $self->_remove_already_collected( @pdf_links );
        
        foreach my $link( @pdf_links ) { 
            my $job = GDoc::Job->new( class => 'Fetch::US::Legi::CRS::NCSE',
                                      queue => $self->fetch_queue,
                                      data  => $link
                                    );

            eval { 
                $job->submit;
            };

            if ( my $err = $@ ) { 
                $self->logger->error( "Unable to submit fetch-PDF job: $err" );
            }
        }

        if ( my $next_link = $mech->find_link( text_regex => qr{Next >>} ) ) { 
            sleep 5;   # be polite
            $self->logger->info( "Fetching next index page " . $next_link->url );
            $mech->get( $next_link );
        } else { 
            last;
        }
    }
}


sub _parse_pdf_links { 
    my $self = shift;
    my $html = shift;

    # mech response gets us raw bytes; we need the decoded UTF-8 for
    # below regexes
    $html = decode_utf8( $html );

    # each document has its own big table in the list
    my @tables = ( $html =~ m{<table .*?WIDTH="686">(.*?)</table>}gsi );
    
    my @links;
    foreach my $tab( @tables ) { 
        my $data = { };
        $tab =~ m{<a href="abstract\.cfm\?NLEid=\d+">(?<num>\w+)\s+-\s+(?<desc>.*?)</a>};
        next unless $+{num} and $+{desc};
        $data->{num}  = $+{num};
        $data->{desc} = $+{desc};

        # fix MS curly-quote bullshit (it breaks SQS)
        $data->{desc} =~ s/\x{2018}|\x{2019}|\x{201B}/\'/g;
        $data->{desc} =~ s/\x{201C}|\x{201D}|\x{201E}|\x{201F}/\"/g;
        $data->{desc} =~ s/\x{2013}|\x{2014}/-/g;

        $tab =~ m{<a href="(?<url>.*?\.pdf)">};
        next unless $+{url};

        # some of these URLs are relative to site root, others are absolute
        my $url = $+{url};
        $data->{pdf_link} = ( $url =~ /^http/ ) ? $url : $self->_basedir . $url;

        push @links, $data;
    }

    return @links;
}


sub _remove_already_collected { 
    my $self = shift;
    my @links = @_;

    my $s3 = $self->s3;
    my $bkt = $s3->bucket( 'docs.govdoc.org' );

    return grep { 
        not defined $bkt->head_key( sprintf( "us/legi/crs/CRS-%s.pdf", $_->{num} ) ) 
    } @links;
}



1;
