#ifndef RS_TOKEN_H
#define RS_TOKEN_H
#include <vector>
#include "rs_string.h"
using namespace std;

enum word_case { 
    wc_none, // no alphabetic characters a-z, A-Z
    wc_lower, // alphabetic characters only lowercase
    wc_upper, // alphabetic characters only uppercase
    wc_init_upper, 
    // todo add wc_init_lower: initial lower, than upper
    wc_mixed, // none of the above 3
    wc_unknown // not yet computed
};
const int max_word_cases = (wc_mixed - wc_lower) + 1;
struct norm_data {
    word_case nd_case;
    int nd_amp; // position of ampersand "&": -1 notpresent; >=0: so many characters are in front of it
    bool nd_has_nonascii;
};


class rs_token {
public:
    rs_token (const rs_string& str, bool _is_word) :
        tt_string(str),
        tt_is_word(_is_word)
    {
        tt_ndata.nd_amp = -1;
        tt_ndata.nd_case = wc_unknown;
        tt_ndata.nd_has_nonascii = false;
    };
    operator rs_string() const { return tt_string; };
    const rs_string& get_string() const { return tt_string; };
    bool is_word () const { return tt_is_word; };
    void normalize ();
    void denormalize ();
    word_case get_case () const { return tt_ndata.nd_case; };
    bool has_nonascii () const { return tt_ndata.nd_has_nonascii; };
    void set_string (const rs_string& str) { tt_string = str; };
private:
    // prefix "tt_" is from former class name text_token;
    // "rst_" is already used for tables
    bool tt_is_word; // true for words; false for non-words (punctuation etc)
    // which characters are considered part of word is specified by wd_mode variable
    // for tokenized_text::tokenize
    rs_string tt_string; // actual string
    norm_data tt_ndata;
};

//void normalize_word (const rs_string &wsrc, rs_string &wnorm, norm_data &ndata);
//void denormalize_word (const rs_string &wnorm, rs_string &wcnv, norm_data &ndata);


enum wd_mode {
    // which characters are considered part of word
    wdm_basic, // a..z, A..Z
    wdm_ident, // wdm_basic plus underscore (_)
    wdm_qd, // wdm_basic plus  "'" and "-"
    wdm_po, // wdm_qd plus "&" and "\"
    wdm_strings, // words consist of a-z, separate tokens are specified with &
    wdm_latin // unicode latin characters
};



// todo change representation of tokenized text
// todo (cont) from <vector> to >list> to allow merging of tokens,
// todo (cont) allow incremental tokenizing, not everything at start

class tokenized_text {
public:
    tokenized_text (const rs_string& text, wd_mode wdm=wdm_basic) :  tdt_text(text)
    {
        tokenize (wdm);
        tdt_pos = 0;
        tdt_last_token_was_word = false;
        rs_token tt (L"", false);
        tdt_vec.push_back (tt); // this one-past-end is used as return value when eof()==true
    };
    const rs_token& get_token () {
        if (tdt_pos<tdt_size) {
            return tdt_vec[tdt_pos++];
        } else {
            return tdt_vec[tdt_size];
        }
    }
    const rs_token& peek_token (unsigned advance) {
        if (tdt_pos+advance < tdt_size) {
            return tdt_vec[tdt_pos+advance];
        } else {
            return tdt_vec[tdt_size];
        }
    }
    bool eof() const { return tdt_pos >= tdt_size; };
    rs_string get_context (unsigned context_size) const;
private:
    vector<rs_token> tdt_vec;
    const rs_string& tdt_text; // todo check if it is necessary since storing tokens as strings
    unsigned tdt_pos;
    unsigned tdt_size;
    bool tdt_last_token_was_word;
// function
    void tokenize (wd_mode wdm);
};




#endif
