//#pragma warning (disable: 4996)
//#include <tr1/regex.hpp>
//#include <tr1/regex/icu.hpp>
//#pragma warning (default: 4996)
#include <regex>
#include "rs_token.h"

static tr1::wregex word_token_basic_rx = tr1::wregex(L"([a-zA-Z]+)|([^a-zA-Z]+)");
static tr1::wregex word_token_strings_rx = tr1::wregex(L"([a-zA-Z]+)|(&[^;]*;|[^a-zA-Z&]+)");
//static tr1::wregex word_token_ident_rx = tr1::wregex("([[:Letter:]]+)|([^[:Letter:]]+)");
static tr1::wregex word_token_po_rx = tr1::wregex(L"([a-zA-Z'\\-\\&\\\\]+)|([^a-zA-Z'\\-\\&\\\\]+)");
// todo: correct this latin1-letter range static tr1::wregex word_token_latin_rx = tr1::wregex(L"([[:Letter:]]+)|([^[:Letter:]]+)");
static tr1::wregex word_token_latin_rx = tr1::wregex(L"([a-zA-Z]+)|([^a-zA-Z]+)");

// wdm_strings


void rs_token::normalize ()
{   // normalizes token - converts it to lowercase, removes the first ampersand if present
    // stores normalization operations into 'tt_ndata'
    int amp_pos = (int) tt_string.find (L"&");
    tt_ndata.nd_amp = amp_pos;
    if (amp_pos >=0)
        tt_string.erase (amp_pos, 1);
    size_t str_len = tt_string.length();
    size_t i;
    bool first_upper=false, first_lower=false, next_upper=false, next_lower=false;
    tt_ndata.nd_case = wc_mixed;
    if (str_len >=1) {
        first_upper = is_upper (tt_string[0]);
        first_lower = is_lower (tt_string[0]);
    }
    tt_ndata.nd_has_nonascii = is_nonascii (tt_string[0]);
    for (i=1; i<str_len; i++) {
        tt_ndata.nd_has_nonascii |= is_nonascii (tt_string[i]);
        next_upper |= is_upper (tt_string[i]); 
        next_lower |= is_lower (tt_string[i]); 
    }
    if (!first_lower && !next_upper && first_upper && next_lower)
        tt_ndata.nd_case = wc_init_upper;
    if (!first_upper && !next_upper)
        tt_ndata.nd_case = wc_lower;
    if (!first_lower && !next_lower)
        tt_ndata.nd_case = wc_upper;
    if (!first_upper && !first_lower && !next_upper && !next_lower)
        tt_ndata.nd_case = wc_none;
    for (i=0; i<str_len; i++)
        to_lower (tt_string[i]);
}


void rs_token::denormalize ()
{
    // applies 'tt_ndata' to 'tt_string'
    size_t str_len = tt_string.length();
    size_t i;
    tt_string = tt_string;
    if (tt_ndata.nd_case == wc_upper) {
      for (i=0; i<str_len; i++)
          to_upper (tt_string[i]);
    }
    if (str_len > 0  &&   tt_ndata.nd_case == wc_init_upper) {
          to_upper (tt_string[0]);
    }
    if (tt_ndata.nd_case == wc_mixed) {
        ; // todo report error "casing unreliable" or convert casing based on complete initial word
    }
    if (tt_ndata.nd_amp < 0)
        return;
    size_t amp_pos = tt_ndata.nd_amp;
    if (amp_pos>str_len)
        amp_pos = str_len;
    tt_string.insert (amp_pos, L"&");
}


void tokenized_text::tokenize (wd_mode wdm)
{   // tokenizes a plain text 'tdt_text'
    // into vector of token data
    // 'wdm' specifies division of characters into words
    tdt_vec.clear();
    size_t text_len = tdt_text.length();
    if (text_len == 0) {
        tdt_size = 0;
        return;
    }
    tr1::wregex* word_token_ptr = &word_token_basic_rx;
    switch (wdm) {
        default:
        case wdm_basic: word_token_ptr = &word_token_basic_rx; break;
        //case wdm_ident:
        //case wdm_qd:
        case wdm_po: word_token_ptr = &word_token_po_rx; break;
        case wdm_strings: word_token_ptr = &word_token_strings_rx; break;
        case wdm_latin: word_token_ptr = &word_token_latin_rx; break;

    }
    tr1::wsregex_iterator
        regex_it(tdt_text.begin(), tdt_text.end(), *word_token_ptr);
    tr1::wsregex_iterator regex_end;

    while(regex_it != regex_end) {
        wstring word = (*regex_it)[1];
        wstring::const_iterator word_start = (*regex_it)[1].first;
        wstring::const_iterator word_end = (*regex_it)[1].second;
        bool is_word=(*regex_it)[1].matched;
        wstring non_word = (*regex_it)[2];
        wstring::const_iterator non_word_start = (*regex_it)[2].first;
        wstring::const_iterator non_word_end = (*regex_it)[2].second;
        bool is_non_word=(*regex_it)[2].matched;

        size_t token_len, token_ofs;
        bool token_is_word(true);
        if (is_word) {
            token_len = word_end - word_start;
            token_ofs = word_start - tdt_text.begin();
            token_is_word = true;
        } else if (is_non_word) {
            token_len = non_word_end - non_word_start;
            token_ofs = non_word_start - tdt_text.begin();
            token_is_word = false;
        }
        rs_token ttr (tdt_text.substr (token_ofs, token_len), token_is_word);
        //std::cout << wpiece << "|";
        tdt_vec.push_back (ttr);
        ++regex_it;
    }
    tdt_size = unsigned (tdt_vec.size());
}


rs_string tokenized_text::get_context (unsigned context_size) const
{
    // gets the token context, many tokens around current token
    // concatenated together into a string 
    wstring context;
    size_t half_count = context_size / 2; // so many words before tdt_vec[tdt_pos]
    size_t wd_start_ix;
    if (tdt_pos>=half_count)
        wd_start_ix = tdt_pos - half_count;
    else
        wd_start_ix = 0;
    size_t wd_end_ix = wd_start_ix + context_size;
    size_t text_len=tdt_vec.size();
    if (wd_end_ix>text_len)
        wd_end_ix=text_len;
    context = L"";
    for (size_t ix=wd_start_ix; ix<wd_end_ix; ix++) {
        context += tdt_vec[ix];
    }
    return context;
}
