/*
 * Copyright (c) 2005, 2006 Andrew Krizhanovsky /aka at mail.iias.spb.su/
 * Distributed under GNU Public License.
 */

package russian;

import LemmatizerWrapper.*;
import java.util.*;
import gate.*;
import gate.creole.*;
import gate.util.*;
import ru.edu.niimm.mapping.linguistic.Template;
import ru.edu.niimm.mapping.linguistic.ThesTemplates;
//import ru.edu.niimm.mapping.linguistic.NLPmodule;
// import ru.edu.niimm.mapping.linguistic.NLPServers;
public class RuPOSTagger
        extends AbstractLanguageAnalyser {
    //extends AbstractLanguageAnalyser implements ProcessingResource {
    //extends AbstractProcessingResource  implements ProcessingResource {

    /** The annotation set to be used for the generated annotations. */
    private String inputASName, outputASName;

    private String encoding;

    /** XML-RPC client connected to Lemmatizer wrapper.
     * Server could be installed on Linux or Cygwin (Windoze).
     */
//    private LemClient client;

    /** XML-RPC port to connect to Lemmatizer wrapper */
    private int port;

    /** XML-RPC server host with Lemmatizer wrapper */
    private String host;

    //    private static String serverName = "217.66.17.37";

    //private static String serverName = "192.168.5.220";

//    private static String serverName = "localhost";

    /** Select dictionary RUSSIAN|ENGLISH|GERMAN */
    private String dict_lang;

    LemClient client;

    //default constructor
    public RuPOSTagger(){
    }

    public void initLemClient(int port)
            throws ExecutionException {
        client = new LemClient ();
        client.init("localhost", 8000);
//        client.LemLoadDict("ENGLISH");
        client.LemLoadDict("RUSSIAN");
    }

    public Resource init() throws ResourceInstantiationException{
        try {
            initLemClient(port);
        } catch (ExecutionException e) {
            e.printStackTrace();  //To change body of catch statement use File | Settings | File Templates.
        }
        return super.init();
    }

    public void reInit()
            throws ResourceInstantiationException {}


    public void execute() throws ExecutionException
    {
        // Check the language.
        //if(Language)
        // ...

        if(document == null) throw new GateRuntimeException(
                "No document to process!");

        // Check that dictionary is loaded, else load it.

        //if(GetAllWordForms)
        //    server.ShowAllWordForms(true);

        runTagger();
    }

    private void runTagger() throws ExecutionException
    {
        //check the parameters
        if(inputASName != null && inputASName.equals(""))
        {
            inputASName = null;
        }

        AnnotationSet inputAS = (inputASName == null || inputASName.length() == 0) ?
                document.getAnnotations() :
                document.getAnnotations(inputASName);

        AnnotationSet outputAS = outputASName == null || outputASName.length() == 0 ?
                document.getAnnotations() :
                document.getAnnotations(outputASName);

        AnnotationSet sentencesAS = inputAS.get(SENTENCE_ANNOTATION_TYPE);

        AnnotationSet tokensAS = inputAS.get(TOKEN_ANNOTATION_TYPE);
        if(sentencesAS == null || sentencesAS.size() == 0
                || tokensAS == null || tokensAS.size() == 0)
        {
            throw new GateRuntimeException("No sentences or tokens to process!\n" +
                    "Please run a sentence splitter "+
                    "and tokeniser first!");
        }

        long startTime = System.currentTimeMillis();

        fireStatusChanged("Russian POS tagging " + document.getName());
        fireProgressChanged(0);
        //prepare the input for LemServer
        Map<String,Wordform> wordforms = LemClient.createEmptyWordformMap();
        Map<Integer,Paradigm> paradigms= LemClient.createEmptyParadigmMap();

        //define a comparator for annotations by start offset
        Comparator offsetComparator = new OffsetComparator();

        //read all the tokens and all the sentences
        List sentencesList = new ArrayList(sentencesAS);
        Collections.sort(sentencesList, offsetComparator);

        List tokensList = new ArrayList(tokensAS);
        Collections.sort(tokensList, offsetComparator);

        int word_count = 0;
        List<String> token_words = new ArrayList<String>();
        // List tokensInCurrentSentence = new ArrayList();
        ListIterator token_it = tokensList.listIterator();
        // put all words  to token_words list and numbers to nums
        List<String> nums = new ArrayList<String>();
        String str = "";
        while(token_it.hasNext())
        {
            Annotation cur_token = (Annotation)token_it.next();
            if(cur_token.getFeatures().get(ANNIEConstants.TOKEN_KIND_FEATURE_NAME).equals("word")) {
                word_count++;
                String srcData = (String)cur_token.getFeatures().get(ANNIEConstants.TOKEN_STRING_FEATURE_NAME);
                str += srcData + " ";
                token_words.add( srcData );
            }
//            System.out.println("Token:  " + cur_token.toString() +  " " + cur_token.getType() + " "+
//                     cur_token.getFeatures().get(ANNIEConstants.TOKEN_KIND_FEATURE_NAME) );
            /*if(cur_token.getFeatures().get(ANNIEConstants.TOKEN_KIND_FEATURE_NAME).equals("number") &&
               cur_token.getFeatures().get(ANNIEConstants.TOKEN_LENGTH_FEATURE_NAME).toString().equals("4") )
            {
                System.out.println("DATE:  " + cur_token.getFeatures().get(ANNIEConstants.TOKEN_STRING_FEATURE_NAME));
                nums.add( (String)cur_token.getFeatures().get(ANNIEConstants.TOKEN_STRING_FEATURE_NAME) );

            }*/
        }
         try
         {
        fireProgressChanged(33);
        System.out.println("SOURCE DATA: " + str);
        FeatureMap params0 = gate.Factory.newFeatureMap();
        params0.put("type", "string");
        params0.put("data", str);
        Annotation cur_token0 = (Annotation) tokensList.get(0);
        if(cur_token0 != null)
        {
            try {
                outputAS.add(cur_token0.getStartNode().getOffset(), cur_token0.getStartNode().getOffset(),
                        "SOURCE", params0);
            } catch (InvalidOffsetException e) {
                e.printStackTrace();  //To change body of catch statement use File | Settings | File Templates.
            }
        }
        else
        {

        }

         }
        catch (NullPointerException ex)
        {
            Out.prln("");
        }
      /*   LemClient client = new LemClient ();
        client.init("localhost", 8000);
//        client.LemLoadDict("ENGLISH");
        client.LemLoadDict("RUSSIAN");
        client.LemSetPrintingAllForms(true);*/
        client.createWordformAndParadigm((String[])token_words.toArray(new String[0]), wordforms, paradigms);

        fireProgressChanged(66);

        // For every word
        //      1. set category POS
        //      2. add Wordform and Paradigm
        token_it = tokensList.listIterator();

//        token_it = token_words.listIterator();
        while(token_it.hasNext())
        {
            Annotation cur_token = (Annotation)token_it.next();
            if(cur_token.getFeatures().get(ANNIEConstants.TOKEN_KIND_FEATURE_NAME).equals("word"))
            {
                String word = (String)cur_token.getFeatures().get(ANNIEConstants.TOKEN_STRING_FEATURE_NAME);
                if(wordforms.containsKey(word)) {
                    Wordform w = wordforms.get(word);

                    if(w.paradigms != null && w.paradigms.length > 0)
                    {
                        // 1. set category POS
//                        cur_token.getFeatures().put(TOKEN_CATEGORY_FEATURE_NAME , w.paradigms[0].pos);

                        // 2. add Wordform and Paradigms

                        for(int i = 0; i < w.paradigms.length; i++) {
                            // 2.1. add Wordform
                            FeatureMap params = gate.Factory.newFeatureMap();
                            params.put("type", "WRD");
                            params.put("word", w.word);
                            params.put("paradigm_id", w.joinParadigmIdList(","));

                            String s = w.joinGramCodes(0, "|");
                            if(0 < s.length())
                            {
                                params.put("gram_codes", s);
                            }

                            if(null != w.accent[i])
                            {
                                params.put("accent", w.accent[i]);
                            }

                            // 2.2. add Paradigm
                            FeatureMap params2 = gate.Factory.newFeatureMap();
                            Paradigm p = w.paradigms[i];
                            params2.put("type", "PARA");
                            params2.put("word", w.word);
                            params2.put("id", p.id);
                            params2.put("lemma", p.lemma);
                            params2.put("pos", p.pos);
                            String list = "";
                           /* Map<String,Wordform> wordforms2 = LemClient.createEmptyWordformMap();
                            Map<Integer,Paradigm> paradigms2 = LemClient.createEmptyParadigmMap();

                                client.createWordformAndParadigm(p.wordforms, wordforms2, paradigms2);
//                            for ( int f = 0; f < wordforms2.size(); f++)
                            for(String form : wordforms2.keySet())
                            {
                                list += form;
//                                p.wordforms[f];

                                String codes = "";
//                                if(wordforms2.containsKey(p.wordforms[f])) {
                                    Wordform ww = wordforms2.get(form);
                                    codes = ww.joinGramCodes(0, "|");
//                                }
                                list += " " + codes + ";";
                            }*/
                            params2.put("form_list", list + "#");
                            s = p.joinCommonAnCodes(",");
                            if(0 < s.length()) {
                                params2.put("common_ancodes", s);
                            }

//                            params.putAll(params2);
//                            params.putAll(inputAS.get(cur_token.getId()).getFeatures());
                            try {
//                                outputAS.add(cur_token.getStartNode().getOffset(), cur_token.getEndNode().getOffset(),
//                                        "Token", params);
                                outputAS.add(cur_token.getStartNode().getOffset(), cur_token.getEndNode().getOffset(),
                                        "Wordform", params);
                                outputAS.add(cur_token.getStartNode().getOffset(), cur_token.getEndNode().getOffset(),
                                        "Paradigm", params2);
                            } catch (InvalidOffsetException e) {
                                e.printStackTrace();  //To change body of catch statement use File | Settings | File Templates.
                            }
//                            System.out.println("my ruPOS Tagger");
                        }

                    }
                }
            }
        };

        // 3. add Thesaurus-like Term
        // @todo produce template

        ListIterator sentencesIter = sentencesList.listIterator();
        // put all words to token_words list
        while(sentencesIter.hasNext())
        {
            Annotation cur_sentence = (Annotation)sentencesIter.next();

            Long beginSent = cur_sentence.getStartNode().getOffset();
            Long endSent = cur_sentence.getEndNode().getOffset();

            token_it = tokensList.listIterator();
            int wordsInCurSentCount = 0;
            List<String> curSentWords = new ArrayList<String>();
            List<Annotation> curSentTokens = new ArrayList<Annotation>();

            while (token_it.hasNext())
            {
                Annotation cur_token = (Annotation)token_it.next();
                if(cur_token.getStartNode().getOffset() >= beginSent &&
                        cur_token.getEndNode().getOffset() <= endSent &&
                        cur_token.getFeatures().get(ANNIEConstants.TOKEN_KIND_FEATURE_NAME).equals("word"))
                {
                    wordsInCurSentCount++;
                    curSentWords.add( (String)cur_token.getFeatures().get(ANNIEConstants.TOKEN_STRING_FEATURE_NAME) );
                    curSentTokens.add( cur_token );
                }
                /*if(cur_token.getFeatures().get(ANNIEConstants.TOKEN_KIND_FEATURE_NAME).equals("number") &&
                        cur_token.getFeatures().get(ANNIEConstants.TOKEN_LENGTH_FEATURE_NAME).toString().equals("4") )
                {
                    System.out.println("DATE:  " + cur_token.getFeatures().get(ANNIEConstants.TOKEN_STRING_FEATURE_NAME));
                    wordsInCurSentCount++;
                    curSentWords.add( (String)cur_token.getFeatures().get(ANNIEConstants.TOKEN_STRING_FEATURE_NAME) );
                    curSentTokens.add( cur_token );
                }*/

            }

            word_count += extractTemplateTerms(ThesTemplates.S, curSentTokens, curSentWords, outputAS);
            word_count += extractTemplateTerms(ThesTemplates.P, curSentTokens, curSentWords, outputAS);
            word_count += extractTemplateTerms(ThesTemplates.SP, curSentTokens, curSentWords, outputAS);
            word_count += extractTemplateTerms(ThesTemplates.PS, curSentTokens, curSentWords, outputAS);
            word_count += extractTemplateTerms(ThesTemplates.SS, curSentTokens, curSentWords, outputAS);

            /*word_count += extractTemplateTerms(ThesTemplates.SPP, curSentTokens, curSentWords, outputAS);
            word_count += extractTemplateTerms(ThesTemplates.SPS, curSentTokens, curSentWords, outputAS);
            word_count += extractTemplateTerms(ThesTemplates.SSS, curSentTokens, curSentWords, outputAS);
            word_count += extractTemplateTerms(ThesTemplates.PPS, curSentTokens, curSentWords, outputAS);*/

            word_count += extractTemplateTerms(ThesTemplates.INFINITIVE, curSentTokens, curSentWords, outputAS);
//            word_count += extractTemplateTerms(ThesTemplates.INFINITIVES, curSentTokens, curSentWords, outputAS);
            word_count += extractTemplateTerms(ThesTemplates.PRICHASTIE, curSentTokens, curSentWords, outputAS);
//            word_count += extractTemplateTerms(ThesTemplates.SPRICHASTIES, curSentTokens, curSentWords, outputAS);

//            word_count += extractTemplateTerms(ThesTemplates.t6_all, curSentTokens, curSentWords, outputAS);
//            word_count += extractTemplateTerms(ThesTemplates.t5_all, curSentTokens, curSentWords, outputAS);
//            word_count += extractTemplateTerms(ThesTemplates.t4_all, curSentTokens, curSentWords, outputAS);
//            word_count += extractTemplateTerms(ThesTemplates.t3_all, curSentTokens, curSentWords, outputAS);
//            word_count += extractTemplateTerms(ThesTemplates.t2_all, curSentTokens, curSentWords, outputAS);
//            word_count += extractTemplateTerms(ThesTemplates.t1_all, curSentTokens, curSentWords, outputAS);
           // curSentTokens = new ArrayList<Annotation>();

//              ENGLISH templates
//            extractEnglishTemplateTerms(ThesTemplates.t1_en, curSentTokens, curSentWords, outputAS);
//            extractEnglishTemplateTerms(ThesTemplates.t2_en, curSentTokens, curSentWords, outputAS);
//            extractEnglishTemplateTerms(ThesTemplates.t5_en, curSentTokens, curSentWords, outputAS);
//            extractEnglishTemplateTerms(ThesTemplates.t3_en, curSentTokens, curSentWords, outputAS);

//            extractTemplateDates(ThesTemplates.dyyyy, curSentTokens, curSentWords, outputAS);
//            extractTemplateCentury(ThesTemplates.century, curSentTokens, curSentWords, outputAS);
//
//
//            extractTemplateTerms(ThesTemplates.t4, curSentTokens, curSentWords, outputAS);
//
//            extractTemplateTerms(ThesTemplates.t3words, curSentTokens, curSentWords, outputAS);
//

//            extractTemplateTerms(ThesTemplates.t3S_predl_S, curSentTokens, curSentWords, outputAS);

        }


        fireProgressChanged(100);

        document.getFeatures().put("Number of words", Integer.toString(word_count));
    }

    private ArrayList<String> extractTemplateDates(Template template, List<Annotation> curSentTokens,
                                                   List<String> curSentWords, AnnotationSet outputAS)
    {
        for(int i = 0; i < curSentWords.size(); i++)
        {
            String word = curSentWords.get(i);
            if(template.checkDateTemplateOn(word))
            {
                FeatureMap fm = gate.Factory.newFeatureMap();
                fm.put("kind", "year");
                fm.put("string", word);

                outputAS.add(curSentTokens.get(i).getStartNode(), curSentTokens.get(i).getEndNode(),
                        "ThesTerm", fm);
            }
        }
        return null;
    }

    private ArrayList<String> extractTemplateCentury(Template template, List<Annotation> curSentTokens,
                                                   List<String> curSentWords, AnnotationSet outputAS)
    {
        for(int i = 0; i < curSentWords.size(); i++)
        {
            String word = curSentWords.get(i);
            if(template.checkCenturyTemplateOn(word))
            {
                FeatureMap fm = gate.Factory.newFeatureMap();
                fm.put("kind", "century");
                fm.put("string", word);
                outputAS.add(curSentTokens.get(i).getStartNode(), curSentTokens.get(i).getEndNode(),
                        "ThesTerm", fm);
            }
        }
        return null;
    }

    private int extractTemplateTerms(Template template, List<Annotation> curSentTokens,
                                     List<String> curSentWords, AnnotationSet outputAS)
    {
        int wc = 0;
        try
        {


        Map<String,Wordform> wordforms = LemClient.createEmptyWordformMap();
        Map<Integer,Paradigm> paradigms= LemClient.createEmptyParadigmMap();

//        LemClient client = new LemClient ();
//        client.init("localhost", 8000);
//        client.LemLoadDict("ENGLISH");
//        client.LemLoadDict("RUSSIAN");
        client.createWordformAndParadigm(curSentWords.toArray(new String[0]),
                wordforms, paradigms);
//        if(template.getWordCount() >= 3)
//        {
            int cnt = template.getWordCount() - 1;
        Out.prln("cnt "+cnt);
            for(int i = cnt; i < curSentTokens.size(); i++)
            {
                wc += processWords(i, cnt, curSentTokens, wordforms,
                        template, outputAS);

            }
//        }

//        for(int i = 2; i < curSentTokens.size(); i++)
//        {
//            List<Wordform> words = new ArrayList<Wordform>(3);
//
//            String word = (String)curSentTokens.get(i - 2).getFeatures().get(ANNIEConstants.TOKEN_STRING_FEATURE_NAME);
//            if(wordforms.containsKey(word))
//            {
//                Wordform w = wordforms.get(word);
//                if(w != null)
//                {
//                    words.add(w);
//                }
//            }
//
//            String word2 = (String)curSentTokens.get(i - 1).getFeatures().get(ANNIEConstants.TOKEN_STRING_FEATURE_NAME);
//            if(wordforms.containsKey(word2))
//            {
//                Wordform w = wordforms.get(word2);
//                if(w != null)
//                {
//                    words.add(w);
//                }
//            }
//
//            String word3 = (String)curSentTokens.get(i).getFeatures().get(ANNIEConstants.TOKEN_STRING_FEATURE_NAME);
//            if(wordforms.containsKey(word3))
//            {
//                Wordform w = wordforms.get(word3);
//                if(w != null)
//                {
//                    words.add(w);
//                }
//            }
//
//            if(template.checkTemplateOn(words))
//            {
//
//                FeatureMap fm = gate.Factory.newFeatureMap();
//                fm.put("kind", "word");
//                fm.put("string", word + " " + word2 + " " + word3);
//                fm.put("lemma", word + " " + word2 + " " + word3);
//                outputAS.add(curSentTokens.get(i - 2).getStartNode(), curSentTokens.get(i).getEndNode(),
//                        "ThesTerm", fm);
//                Out.prln("ThesTerm: " + word + " " + word2 + " " + word3);
//                wc++;
//            }
//        }
//
//        for(int i = 1; i < curSentTokens.size(); i++)
//        {
//            List<Wordform> words = new ArrayList<Wordform>(2);
//            String lemma = null;
//            String word = (String)curSentTokens.get(i - 1).getFeatures().get(ANNIEConstants.TOKEN_STRING_FEATURE_NAME);
//            if(wordforms.containsKey(word))
//            {
//                Wordform w = wordforms.get(word);
//                if(w != null)
//                {
//                    words.add(w);
//                    if(w.paradigms != null)
//                    {
////                                        lemma = w.paradigms[0].lemma + "@" + w.paradigms[0].pos + "$" + w.paradigms[0].joinCommonAnCodes(",");
//                        lemma = w.paradigms[0].lemma;
//                        if(w.paradigms.length > 1)
//                        {
//                            System.out.println("RPOST: lexical ambiguity!" + w.paradigms[1].lemma);
//                        }
//                    }
//                }
//            }
//
//            String word2 = (String)curSentTokens.get(i).getFeatures().get(ANNIEConstants.TOKEN_STRING_FEATURE_NAME);
//            if(wordforms.containsKey(word2))
//            {
//                Wordform w = wordforms.get(word2);
//                if(w != null)
//                {
//                    words.add(w);
//                    if(w.paradigms != null)
//                    {
//                        if(lemma != null)
//                        {
////                                        lemma = w.paradigms[0].lemma + "@" + w.paradigms[0].pos + "$" + w.paradigms[0].joinCommonAnCodes(",");
//                            lemma += " " + w.paradigms[0].lemma;
//                            if(w.paradigms.length > 1)
//                        {
//                            System.out.println("RPOST: lexical ambiguity!" + w.paradigms[1].lemma);
//                        }
//                        }
//                    }
//                }
//            }
//            if(template.checkTemplateOn(words))
//            {
//
//                FeatureMap fm = gate.Factory.newFeatureMap();
//                fm.put("kind", "word");
//                fm.put("string", word + " " + word2);
//                if(lemma != null)
//                {
//                    fm.put("lemma", lemma);
//                    Out.prln("ThesTerm: " + lemma);
//                }
//                else
//                {
//                    fm.put("lemma", word + " " + word2);
//                    Out.prln("ThesTerm: " + word + " " + word2);
//                }
//                outputAS.add(curSentTokens.get(i - 1).getStartNode(), curSentTokens.get(i).getEndNode(),
//                        "ThesTerm", fm);
//
//                wc++;
//            }
//        }
//
//        for(int i = 0; i < curSentTokens.size(); i++)
//        {
//            List<Wordform> words = new ArrayList<Wordform>(1);
//            String lemma = null;
//            String word = (String)curSentTokens.get(i).getFeatures().get(ANNIEConstants.TOKEN_STRING_FEATURE_NAME);
//            if(wordforms.containsKey(word))
//            {
//                Wordform w = wordforms.get(word);
//                words.add(w);
//                if(w.paradigms != null)
//                {
////                                        lemma = w.paradigms[0].lemma + "@" + w.paradigms[0].pos + "$" + w.paradigms[0].joinCommonAnCodes(",");
//                    //lemma = w.paradigms[0].lemma;
//                    if(w.paradigms.length > 1)
//                        {
//                            System.out.println("RPOST: lexical ambiguity! ("+w.paradigms.length+")" + w.paradigms[1].lemma);
//                        }
//                    for (Paradigm para : w.paradigms)
//                    {
//                        if(para.pos.equalsIgnoreCase("NOUN"))
//                        {
//                            System.out.println("RPOST: Solved with " + para.lemma);
//                            lemma = para.lemma;
//                        }
//                        else if(para.pos.equalsIgnoreCase("ADJECTIVE"))
//                        {
//                            System.out.println("RPOST: Solved with " + para.lemma);
//                            lemma = para.lemma;
//                        }
//
//                    }
//                }
//            }
//            if(template.checkTemplateOn(words))
//            {
//                FeatureMap fm = gate.Factory.newFeatureMap();
//                fm.put("kind", "word");
//                fm.put("string", word);
//                if(lemma != null)
//                {
//                    fm.put("lemma", lemma);
//                    Out.prln("ThesTerm: " + lemma);
//                }
//                else
//                {
//                    fm.put("lemma", word);
//                    Out.prln("ThesTerm: " + word);
//                }
//                outputAS.add(curSentTokens.get(i).getStartNode(), curSentTokens.get(i).getEndNode(),
//                        "ThesTerm", fm);
//                wc++;
//            }
//        }

            }
                        catch (NullPointerException ex)
                        {
                            System.out.println("BAD 2");
                        }
        return wc;
    }

    private int processWords(int i, int cnt, List<Annotation> curSentTokens,
                             Map<String,Wordform> wordforms,
                             Template template,
                             AnnotationSet outputAS)
    {
        int wc = 0;
        List<Wordform> words = new ArrayList<Wordform>(cnt + 1);
        for(int j=0; j <= cnt; j++)
        {
            String word = (String)curSentTokens.get(i - cnt + j).getFeatures().get(ANNIEConstants.TOKEN_STRING_FEATURE_NAME);
            if(wordforms.containsKey(word))
            {
                Wordform w = wordforms.get(word);
                if(w != null)
                {
                    Out.pr(word + " ");
                    words.add(w);
                }
            }
        }
        Out.prln();
        try
        {
        if(template.checkTemplateOn(words))
        {
            FeatureMap fm = gate.Factory.newFeatureMap();
            fm.put("kind", "word");
            String str = "";
            String lemma = "";
            for(int j=0; j <= cnt; j++)
            {
                if(words.get(j) != null && words.get(j).word != null)
                {
                    str += words.get(j).word + " ";
                    if(words.get(j).paradigms != null && words.get(j).paradigms.length > 0)
                    {
                        lemma += words.get(j).paradigms[0].lemma;
                    }
                    else
                    {
                        lemma += words.get(j).word + " ";
                    }
                }

            }
            fm.put("string", str.trim());
            fm.put("lemma", lemma.trim());
            outputAS.add(curSentTokens.get(i - cnt).getStartNode(),
                    curSentTokens.get(i).getEndNode(),
                    "ThesTerm", fm);
                Out.prln("ThesTerm: " + str);
            wc++;
        }
        }
        catch (NullPointerException ex)
        {
            System.out.println("BAD ");
        }
        return wc;
    }
   /*  private int extractEnglishTemplateTerms(Template template, List<Annotation> curSentTokens,
                                     List<String> curSentWords, AnnotationSet outputAS)
    {
        int wc = 0;
        Map<String,Wordform> wordforms = LemClient.createEmptyWordformMap();
        Map<Integer,Paradigm> paradigms= LemClient.createEmptyParadigmMap();

        LemClient client = new LemClient ();
        client.init("localhost", 8000);
//        client.LemLoadDict("ENGLISH");
        client.LemLoadDict("RUSSIAN");
        client.createWordformAndParadigm(curSentWords.toArray(new String[0]),
                wordforms, paradigms);

        for(int i = 2; i < curSentTokens.size(); i++)
        {
            List<Wordform> words = new ArrayList<Wordform>(3);

            String word = (String)curSentTokens.get(i - 2).getFeatures().get(ANNIEConstants.TOKEN_STRING_FEATURE_NAME);
            if(wordforms.containsKey(word))
            {
                Wordform w = wordforms.get(word);
                if(w != null)
                {
                    words.add(w);
                }
            }

            String word2 = (String)curSentTokens.get(i - 1).getFeatures().get(ANNIEConstants.TOKEN_STRING_FEATURE_NAME);
            if(wordforms.containsKey(word2))
            {
                Wordform w = wordforms.get(word2);
                if(w != null)
                {
                    words.add(w);
                }
            }

            String word3 = (String)curSentTokens.get(i).getFeatures().get(ANNIEConstants.TOKEN_STRING_FEATURE_NAME);
            if(wordforms.containsKey(word3))
            {
                Wordform w = wordforms.get(word3);
                if(w != null)
                {
                    words.add(w);
                }
            }

            if(template.checkTemplateOn(words))
            {

                FeatureMap fm = gate.Factory.newFeatureMap();
                fm.put("kind", "word");
                fm.put("string", word + " " + word2 + " " + word3);
                fm.put("lemma", word + " " + word2 + " " + word3);
                outputAS.add(curSentTokens.get(i - 2).getStartNode(), curSentTokens.get(i).getEndNode(),
                        "ThesTerm", fm);
                Out.prln("ThesTerm: " + word + word2 + word3);
                wc++;
            }
        }

        for(int i = 1; i < curSentTokens.size(); i++)
        {
            List<Wordform> words = new ArrayList<Wordform>(2);
            String lemma = null;
            String word = (String)curSentTokens.get(i - 1).getFeatures().get(ANNIEConstants.TOKEN_STRING_FEATURE_NAME);
            if(wordforms.containsKey(word))
            {
                Wordform w = wordforms.get(word);
                if(w != null)
                {
                    words.add(w);
                    if(w.paradigms != null)
                    {
//                                        lemma = w.paradigms[0].lemma + "@" + w.paradigms[0].pos + "$" + w.paradigms[0].joinCommonAnCodes(",");
                        lemma = w.paradigms[0].lemma;
                        if(w.paradigms.length > 1)
                        {
                            System.out.println("RPOST: lexical ambiguity!" + w.paradigms[1].lemma);
                        }
                    }
                }
            }

            String word2 = (String)curSentTokens.get(i).getFeatures().get(ANNIEConstants.TOKEN_STRING_FEATURE_NAME);
            if(wordforms.containsKey(word2))
            {
                Wordform w = wordforms.get(word2);
                if(w != null)
                {
                    words.add(w);
                    if(w.paradigms != null)
                    {
                        if(lemma != null)
                        {
//                                        lemma = w.paradigms[0].lemma + "@" + w.paradigms[0].pos + "$" + w.paradigms[0].joinCommonAnCodes(",");
                            lemma += " " + w.paradigms[0].lemma;
                            if(w.paradigms.length > 1)
                        {
                            System.out.println("RPOST: lexical ambiguity!" + w.paradigms[1].lemma);
                        }
                        }
                    }
                }
            }
            if(template.checkTemplateOn(words))
            {

                FeatureMap fm = gate.Factory.newFeatureMap();
                fm.put("kind", "word");
                fm.put("string", word + " " + word2);
                if(lemma != null)
                {
                    fm.put("lemma", lemma);
                    Out.prln("ThesTerm: " +lemma);
                }
                else
                {
                    fm.put("lemma", word + " " + word2);
                    Out.prln("ThesTerm: " +word + word2);
                }
                outputAS.add(curSentTokens.get(i - 1).getStartNode(), curSentTokens.get(i).getEndNode(),
                        "ThesTerm", fm);

                wc++;
            }
        }

        for(int i = 0; i < curSentTokens.size(); i++)
        {
            List<Wordform> words = new ArrayList<Wordform>(1);
            String lemma = null;
            String word = (String)curSentTokens.get(i).getFeatures().get(ANNIEConstants.TOKEN_STRING_FEATURE_NAME);
            if(wordforms.containsKey(word))
            {
                Wordform w = wordforms.get(word);
                words.add(w);
                if(w.paradigms != null)
                {
//                                        lemma = w.paradigms[0].lemma + "@" + w.paradigms[0].pos + "$" + w.paradigms[0].joinCommonAnCodes(",");
                    //lemma = w.paradigms[0].lemma;
                    if(w.paradigms.length > 1)
                        {
                            System.out.println("RPOST: lexical ambiguity! ("+w.paradigms.length+")" + w.paradigms[1].lemma);
                        }
                    for (Paradigm para : w.paradigms)
                    {
                        if(para.pos.equalsIgnoreCase("NOUN"))
                        {
                            System.out.println("RPOST: Solved with " + para.lemma);
                            lemma = para.lemma;
                        }
                        else if(para.pos.equalsIgnoreCase("ADJECTIVE"))
                        {
                            System.out.println("RPOST: Solved with " + para.lemma);
                            lemma = para.lemma;
                        }

                    }
                }
            }
            if(template.checkTemplateOn(words))
            {
                FeatureMap fm = gate.Factory.newFeatureMap();
                fm.put("kind", "word");
                fm.put("string", word);
                if(lemma != null)
                {
                    fm.put("lemma", lemma);
                    Out.prln("ThesTerm: " +lemma);
                }
                else
                {
                    fm.put("lemma", word);
                    Out.prln("ThesTerm: " +word);
                }

                outputAS.add(curSentTokens.get(i).getStartNode(), curSentTokens.get(i).getEndNode(),
                        "ThesTerm", fm);

                wc++;
            }
        }
        return wc;
    }


    private ArrayList<String> extractThesTerm(String[] context, Map<String, Wordform> wordForms)
    {
        ArrayList<String> result = new ArrayList<String>();
        Wordform center = wordForms.get(context[1]);
        Wordform left = wordForms.get(context[0]);
        Wordform right = wordForms.get(context[2]);

//        result.addAll(processTemplate(center, null));
        result.addAll(processTemplate(left, center));
        result.addAll(processTemplate(center, right));


        return result;
    }
*/
   /* private ArrayList<String> processTemplate(Wordform first, Wordform second)
    {

        ArrayList<String> result = new ArrayList<String>();

        if(first != null)
        {
            if(second!= null && second.paradigms != null && second.paradigms[0].pos.equalsIgnoreCase("ADJECTIVE"))
            {  // second ADJ

                if(first.gram_codes != null)
                {
                    for(int j = 0; j < first.gram_codes.length; j++)
                    {

                        for(int k = 0; k < (first.gram_codes[j].length);  k++)
                        {
                            if(second.gram_codes != null)
                            {
                                for(int jj = 0; jj < second.gram_codes.length; jj++)
                                {

                                    for(int kk = 0; kk < (second.gram_codes[jj].length);  kk++)
                                    {
                                        if(second.gram_codes[jj][kk].contains(first.gram_codes[j][k]) ||
                                                first.gram_codes[j][k].contains(second.gram_codes[jj][kk]))
                                        {
//                                System.out.print(first.word);
//                                System.out.println(" : " + first.gram_codes[j][k]);
//                                System.out.print(second.word);
//                                System.out.println(" : " + second.gram_codes[jj][kk]);
//                                System.out.println("------------------------------------");
                                            result.add(first.word + " " + second.word);
                                            result.add(second.word + " " + first.word);
                                            break;
//                                + " " +
//                                        second.word + " " + first.word;
                                        }
                                    }
                                }
                            }
                        }
                    }
                }
            }
            else
            {
                if(second!= null && second.paradigms != null && second.paradigms[0].pos.equalsIgnoreCase("NOUN"))
                {  // second NOUN

                    if(first.gram_codes != null)
                    {
                        for(int j = 0; j < first.gram_codes.length; j++)
                        {

                            for(int k = 0; k < (first.gram_codes[j].length);  k++)
                            {
                                if(second.gram_codes != null)
                                {
                                    for(int jj = 0; jj < second.gram_codes.length; jj++)
                                    {

                                        for(int kk = 0; kk < (second.gram_codes[jj].length);  kk++)
                                        {
                                            if(first.paradigms[0].pos.equalsIgnoreCase("NOUN") &&
                                                    second.paradigms[0].pos.equalsIgnoreCase("NOUN") &&
                                                    ((second.gram_codes[jj][kk].contains("рд") &&
                                                            first.gram_codes[j][k].contains("им")) ||
                                                            (second.gram_codes[jj][kk].contains("им") &&
                                                                    first.gram_codes[j][k].contains("рд"))) &&
                                                    ! first.paradigms[0].lemma.equalsIgnoreCase(second.paradigms[0].lemma)   )
                                            {
                                                // HASH  GENETIVE ++
//                                System.out.print(first.word);
//                                System.out.println(" : *** " + first.gram_codes[j][k]);
//                                System.out.print(second.word);
//                                System.out.println(" : " + second.gram_codes[jj][kk]);
//                                System.out.println("------------------------------------");
                                                result.add(first.word + " " + second.word);
                                                result.add(second.word + " " + first.word);
                                            }
                                        }
                                    }
                                }
                            }
                        }
                    }

                }
                else
                {
                    result.add(first.word);
                }
            }
        }

        return result;


    }
    // getter and setter methods
    */
    public String getInputASName()                      {return inputASName;}
    public void   setInputASName(String inputASName)    {this.inputASName = inputASName;}

    public String getOutputASName()                     {return outputASName;}
    public void   setOutputASName(String outputASName)  {this.outputASName = outputASName;}

    public void setEncoding(String encoding) {
        this.encoding = encoding;
    }
    public String getEncoding() {
        return this.encoding;
    }

    public void setPortLemServer(Integer new_port) {
        port = new_port;
        System.out.println("PortLemServer = " + port);
    }
    public Integer getPortLemServer() {
        return port;
    }

    public void setHostLemServer(String new_host) {
        if(new_host.equals("localhost")) {
            System.out.println("Search localhost by InetAddress.getLocalHost()...");
//            host = LemClient.getHostName();
            host = "localhost";
        } else {
            host = new_host;
        }
        System.out.println("HostLemServer = " + host);
    }
    public String getHostLemServer() {
        return host;
    }


    public void setDictLemServer(String new_dict) {
        String[] dicts = {"RUSSIAN", "ENGLISH", "GERMAN"};

        boolean b = false;
        for(String d:dicts) {
            if(0 == d.compareToIgnoreCase(new_dict)) {
                dict_lang = new_dict;
                b = true;
                break;
            }
        }
        if(!b) {
            System.out.println("Warnging! DictLemServer remains the same. Select one of RUSSIAN, ENGLISH, or GERMAN.");
        }

        System.out.println("DictLemServer = " + dict_lang);
    }
    public String getDictLemServer() {
        return dict_lang;
    }
} // class RuPOSTagger
