use strict;
use warnings;

package Lingua::EN::WSD::WebBased::Scenario;

use List::Util qw/reduce/;
use List::MoreUtils qw/uniq/;

use Lingua::EN::WSD::WebBased::Logger;
use Lingua::EN::WSD::WebBased::Utils;
use Lingua::EN::WSD::WebBased::Seeder::WordNetSense;
use Lingua::EN::WSD::WebBased::Expander::GoogleWithFullDocumentContent;
use Lingua::EN::WSD::WebBased::Vectorizer;
use Lingua::EN::WSD::WebBased::Vector;
use Math::DirichletEstimator;

use Class::AccessorMaker {
    wordnet             => undef,
    sense_seeder        => undef,
    vectorizer          => undef,
    expander            =>
        new Lingua::EN::WSD::WebBased::Expander::GoogleWithFullDocumentContent,
    vocabulary          => undef,
    param_cache         => undef,
    param_cache_file    => 'param_cache',
    enable_param_cache  => 1
}, 'new_init';

sub init {

    my $self = shift;

    $self->sense_seeder(
        new Lingua::EN::WSD::WebBased::Seeder::WordNetSense(wordnet => $self->wordnet)
    ) if !defined $self->sense_seeder;

    $self->vectorizer(
        new Lingua::EN::WSD::WebBased::Vectorizer(wordnet => $self->wordnet)
    ) if !defined $self->vectorizer;

    $self->vocabulary(select_vocabulary_from_wordnet($self->wordnet));

}

sub disambiguate {

    my ($self, $target_word, $context) = @_;

    return "$target_word#1" if $self->wordnet->queryWord($target_word) == 1;

    my $candidate_priors = $self->prepare_candidate_priors($target_word);
    my $r = $self->prepare_vectors_and_parameters($target_word, $context);
    my @candidate_senses = @{$r->{candidate_senses}};
    
    my %candidate_scores = map {
        $_ => $self->score(
            $candidate_priors->{$_},
            $r->{xs_c},
            $r->{xs_t},
            $r->{xs_s}{$_},
            $r->{alpha_c},
            $r->{alpha_t},
            $r->{alpha_s}{$_}
        )
    } sort @candidate_senses;

    for (sort keys %candidate_scores) {
        _trace("score($_): ", $candidate_scores{$_});
    }

    return reduce { $candidate_scores{$a} > $candidate_scores{$b} ? $a : $b }
        @candidate_senses;

}

sub score {

    my ($self,
        $candidate_sense_prior,
        $xs_c, $xs_t, $xs_s,
        $alpha_c, $alpha_t, $alpha_s) = @_;

#    my $p_s_when_t = average_log_dirichlet($xs_s, $alpha_t);
#    my $p_c_when_s = average_log_dirichlet($xs_c, $alpha_s);
#    my $p_t_when_s = average_log_dirichlet($xs_t, $alpha_s);

    my $p_s_when_c = average_log_polya($xs_s, $alpha_c);
    my $p_s_when_t = average_log_polya($xs_s, $alpha_t);

    $candidate_sense_prior = log $candidate_sense_prior;
    _trace("$candidate_sense_prior $p_s_when_c $p_s_when_t");

    return $p_s_when_c - $p_s_when_t;

}

sub prepare_candidate_priors {

    my ($self, $target_word) = @_;

    my @candidate_senses = $self->wordnet->queryWord($target_word);
    my %candidate_priors;
    for my $candidate_sense (@candidate_senses) {
        my $freq = $self->wordnet->frequency($candidate_sense);
        $candidate_priors{$candidate_sense} = $freq if $freq > 0;
    }
    my $candidate_priors = build_sparse_vector(\%candidate_priors,
        build_vocabulary(\@candidate_senses));
    (tied %$candidate_priors)->smooth->normalize;

    return $candidate_priors;

}

sub prepare_vectors_and_parameters {

    my ($self, $target_word, $context) = @_;
    
    my $target_word_lemma = do { $target_word =~ /^(.*?)#/; $1 };
    my @candidate_senses = $self->wordnet->queryWord($target_word);

    # convert array vocabulary to hash vocabulary
    
    my ($vocabulary, $vocabulary_size) = build_vocabulary($self->vocabulary);

    # vector set for target word
    
    _trace("prepare vector set for target word: $target_word");
    my $target_word_vector_set = $self->word_to_vector_set($target_word_lemma);
    for (@$target_word_vector_set) {
        $_ = build_sparse_vector($_, $vocabulary, $vocabulary_size);
        $_ = (tied %$_)->flatten($self->vocabulary);
    }
    _trace("estimate alpha_t for target word: $target_word");
    my $alpha_t;
    $alpha_t = estimate($target_word_vector_set, POLYA);

    # vector sets for candidate senses

    my %candidate_sense_vector_sets;
    my %alpha_s;
    for my $sense (@candidate_senses) {
        _trace("prepare vector set for candidate sense: $sense");
        $candidate_sense_vector_sets{$sense} = $self->sense_to_vector_set($sense);
#        push @{$candidate_sense_vector_sets{$sense}},
#            $self->get_extra_vector_for_sense($sense);
        for (@{$candidate_sense_vector_sets{$sense}}) {
            $_ = build_sparse_vector($_, $vocabulary, $vocabulary_size);
            $_ = (tied %$_)->flatten($self->vocabulary);
        }
#        _trace("estimate alpha_s for candidate sense: $sense");
    }

    # vector set for context
    
    _trace("prepare vector set for context: $context");
    my $context_vector_set = $self->context_to_vector_set($context);
#    push @$context_vector_set, $self->get_extra_vector_for_context($context);
    for (@$context_vector_set) {
        $_ = build_sparse_vector($_, $vocabulary, $vocabulary_size);
        $_ = (tied %$_)->flatten($self->vocabulary);
    }
    _trace("estimate alpha_c for context: $context");
    my $alpha_c = estimate($context_vector_set, POLYA);
    
    return {
        alpha_t             => $alpha_t,
        alpha_s             => \%alpha_s,
        alpha_c             => $alpha_c,
        xs_c                => $context_vector_set,
        xs_s                => \%candidate_sense_vector_sets,
        xs_t                => $target_word_vector_set,
        candidate_senses    => \@candidate_senses
    };

}

sub context_to_vector_set  { }

sub sense_to_vector_set {
    
    my ($self, $sense) = @_;

    my $seed_queries = $self->sense_seeder->gen_queries($sense);
    my $documents = $self->expander->expand($seed_queries);

#    use Data::Dumper;
#    print STDERR Dumper $documents;

    return [
#        grep { keys %$_ > 50 }
        map { $self->vectorizer->vectorize($_) } @$documents 
    ];

}

sub word_to_vector_set {

    my ($self, $word) = @_;

    my $documents = $self->expander->expand([qq/"$word"/]);

#    use Data::Dumper;
#    print STDERR Dumper $documents;

    return [
#        grep { keys %$_ > 50 }
        map { $self->vectorizer->vectorize($_) } @$documents 
    ];

}

sub get_extra_vector_for_sense {

    my ($self, $sense) = @_;

    return $self->vectorizer->vectorize($self->sense_seeder->gen_extra($sense));

}

1;

