#!/usr/bin/perl

use utf8;
use strict;
use warnings;
use open qw(:std :utf8);

use lib '/home/ksurent/Lingua--RU--OpenCorpora--Tokenizer/blib/lib';

use DBI;
use File::Slurp;
use Getopt::Long;
use Config::INI::Reader;
use Lingua::RU::OpenCorpora::Tokenizer::List;
use Lingua::RU::OpenCorpora::Tokenizer::Model;

GetOptions(
    \my %opts,
    'config|c=s',
    'evaluate|e',
);

my $conf     = Config::INI::Reader->read_file($opts{config});
my $mysql    = $conf->{mysql};
my $data_dir = $conf->{generator}{lists_dir};

my $dbh = DBI->connect(
    "DBI:mysql:$mysql->{dbname}:$mysql->{host}",
    $mysql->{user},
    $mysql->{passwd},
    {
        mysql_enable_utf8 => 1,
    },
) or die DBI->errstr;

my $exceptions = Lingua::RU::OpenCorpora::Tokenizer::List->new({
    list => 'exceptions',
    data => {
        map +($_,undef),
        read_file("$data_dir/tokenizer_exceptions.txt", {binmode => ':utf8', chomp => 1})
    },
});
my $prefixes = Lingua::RU::OpenCorpora::Tokenizer::List->new({
    list => 'prefixes',
    data => {
        map +($_,undef),
        read_file("$data_dir/tokenizer_prefixes.txt", {binmode => ':utf8', chomp => 1})
    },
});
my $hyphens = Lingua::RU::OpenCorpora::Tokenizer::List->new({
    list => 'hyphens',
    data => {
        map +($_,undef), @{ $dbh->selectcol_arrayref("
            select
                distinct form_text
            from
                form2lemma
            where
                form_text like '%-%'
        ") }
    },
});

my %to_skip = map  +($_,undef),
              grep { length and not /^#/ }
              read_file("$data_dir/bad_sentences.txt", {chomp => 1});

my($max_len) = $dbh->selectrow_array('select max(char_length(source)) from sentences');
$dbh->do('set session group_concat_max_len = ' . $max_len * 2);

my $tokens = $dbh->selectall_hashref("
    select
        sent_id,
        group_concat(tf_text order by `pos` separator ' ') as tokens
    from
        text_forms
    group by
        sent_id
", 'sent_id');
$tokens->{$_}{tokens} = [split / /, $tokens->{$_}{tokens}] for keys %$tokens;

my $sentences_sth = $dbh->prepare("
    select
        sent_id,
        source
    from
        sentences
");

$sentences_sth->execute;
$sentences_sth->bind_columns(\my($sent_id, $text));

my @corpus;

while($sentences_sth->fetch) {
    next if exists $to_skip{$sent_id};

    push @corpus, {
        text   => $text . '  ',
        tokens => $tokens->{$sent_id}{tokens},
    };
}

my $model = Lingua::RU::OpenCorpora::Tokenizer::Model->new({
    corpus     => \@corpus,
    hyphens    => $hyphens,
    prefixes   => $prefixes,
    exceptions => $exceptions,
});
$model->train;

$dbh->begin_work or die DBI->errstr;

$dbh->do('truncate table tokenizer_coeff');

my $coeff_sth = $dbh->prepare("
    insert into
        tokenizer_coeff(vector, coeff)
    values
        @{[ join ',', ('(?,?)') x keys %{ $model->{data} }]}
");
$coeff_sth->execute(map +($_,$model->{data}{$_}), keys %{ $model->{data} });

#$dbh->do('truncate table tokenizer_strange');
#
#my $strange_sth = $dbh->prepare("
#    insert into
#        tokenizer_strange(sent_id, pos, border, coeff)
#    values
#        @{[ join ',', ('(?, ?, ?, ?)') x keys %strange ]}
#");
#$strange_sth->execute(); # TODO arguments

$dbh->commit or die DBI->errstr;

if($opts{evaluate}) {
    $model->evaluate;
    $model->print_stats;

#    $dbh->begin_work or die DBI->errstr;
#
#    my $qa_sth = $dbh->prepare("
#        insert into
#            tokenizer_qa(run, threshold, `precision`, recall, F1)
#        values
#            (?, ?, ?, ?, ?)
#    ");
#    my $date = sub { sprintf '%04i-%02i-%02i', $_[5]+1900,$_[4]+1,$_[3] }->(localtime);
#
#    for my $threshold (keys %{ $model->{stats} }) {
#        my $stats = $model->{stats}{$threshold};
#        $qa_sth->execute(
#            $date,
#            $threshold,
#            $stats->{precision},
#            $stats->{recall},
#            $stats->{F1},
#        );
#    }
#
#    $dbh->commit or die DBI->errstr;
}
