package ru.edu.niimm.mapping.linguistic;
import gate.Annotation;
import gate.AnnotationSet;
import gate.Corpus;
import gate.Document;
import gate.Factory;
import gate.FeatureMap;
import gate.Gate;
import gate.GateConstants;
import gate.ProcessingResource;
import gate.corpora.RepositioningInfo;
import gate.creole.ExecutionException;
import gate.creole.ResourceInstantiationException;
import gate.creole.SerialAnalyserController;
import gate.creole.ontology.owlim.OWLIMOntologyLR;
import gate.util.GateException;
import gate.util.Out;

import java.io.File;
import java.io.IOException;
import java.net.MalformedURLException;
import java.net.URL;
import java.util.ArrayList;
import java.util.HashMap;
import java.util.HashSet;
import java.util.Iterator;
import java.util.LinkedList;
import java.util.List;
import java.util.Map;
import java.util.Set;
import java.util.Vector;

import LemmatizerWrapper.LemClient;
import LemmatizerWrapper.Paradigm;
import LemmatizerWrapper.Wordform;



/**
 * This class illustrates how to use RussianPOSTagger
 * in another application - put ingredients in one end (URLs pointing
 * to documents) and get normalized words (for Russian, Enlish, or Deutch) out the
 * other end.
 * <P><B>NOTE:</B><BR>
 * For simplicity's sake, we don't do any exception handling.
 */
public class NLPImpl implements NLP
{

    //TODO:RGAREEV:HIGHEST move to environment properties
    public static String path2GATE = "G:/univer/distr/gate-4.0-build2752-ALL/";
    
    public static String workPath = "mapping_ont";

    /** The Corpus Pipeline application to contain RussianPOSTagger */
    private static HashMap<String, SerialAnalyserController> annieControllers = new HashMap<String, SerialAnalyserController>();

    /**
     * Initialise the ANNIE system. This creates a "corpus pipeline"
     * application that can be used to run sets of documents through
     * the extraction system.
     */
    public SerialAnalyserController initForIndex(String ontPath, String lang)
    {
    	SerialAnalyserController annieController = null;
    	
        if(lang.equalsIgnoreCase("ru"))
        {
            Out.prln("Initialising PRs...");

            final String PR_NAMES_RPOST [] = {
                    "gate.creole.annotdelete.AnnotationDeletePR",
                    "gate.creole.tokeniser.DefaultTokeniser",
                    "gate.creole.gazetteer.DefaultGazetteer",
                    "gate.creole.splitter.SentenceSplitter",
                    //"russian.RuPOSTagger",            //    "gate.creole.RussianPOSTagger",
                    //  "gate.creole.POSTagger",
                    /* "mark.chunking.GATEWrapper"*/
                    //"gate.creole.ANNIETransducer",
                    //"gate.creole.orthomatcher.OrthoMatcher",
                    // "telin.Apolda"
            };
            
            // create a serial analyser controller to run ANNIE with
            try {
                annieController =
                        (SerialAnalyserController) Factory.createResource(
                                "gate.creole.SerialAnalyserController", Factory.newFeatureMap(),
                                Factory.newFeatureMap(), "ANNIE");
            } catch (ResourceInstantiationException e) {
                e.printStackTrace();
            }

            // load each PR
            for(int i = 0; i < PR_NAMES_RPOST.length; i++) {
                FeatureMap params = Factory.newFeatureMap(); // use default parameters
                ProcessingResource pr = null;
                try {
                    pr = (ProcessingResource)
                            Factory.createResource(PR_NAMES_RPOST[i], params);
                } catch (ResourceInstantiationException e) {
                    e.printStackTrace();
                }
                // add the PR to the pipeline controller
                annieController.add(pr);
            } // for each PR
            Out.prln("...Common PRs loaded");
            {
                FeatureMap params = Factory.newFeatureMap();
                params.put("portLemServer", 8000);
                params.put("hostLemServer", NLPServers.serverName);
                params.put("dictLemServer", "RUSSIAN");
                ProcessingResource pr = null;
                try {
                    pr = (ProcessingResource)
                            Factory.createResource("russian.RuPOSTagger", params);
                } catch (ResourceInstantiationException e) {
                    e.printStackTrace();
                }

                // add the PR to the pipeline controller
                annieController.add(pr);
            }
            Out.prln("...RPOST loaded");
            {
            	FeatureMap params = Factory.newFeatureMap();

                OWLIMOntologyLR annotOntology = new OWLIMOntologyLR();
                try {
                    annotOntology.setURL(new URL (ontPath));
                } catch (MalformedURLException e) {
                    e.printStackTrace();
                }
                try {
                    URL xmlRDFURL =  new URL (ontPath);
                    System.out.println(ontPath);
                    annotOntology.setRdfXmlURL( xmlRDFURL );
                    System.out.println(xmlRDFURL);
                    try {
                        annotOntology.init();
                    } catch (ResourceInstantiationException e) {
                        e.printStackTrace();
                    } catch (Exception e){
                        e.printStackTrace();
                    }
                    try {
                        params.put("ontology", annotOntology);
                        params.put("prefRepresentation", "label");
                        params.put("altRepresentation", "comment");
                        params.put("language", "ru");
                        params.put("lemmaFeature", "lemma");
                        //params.put("lemmaFeature", "specialFeature");
                        //params.put("inputASName", "POST");
                        ProcessingResource pr = (ProcessingResource)
                                Factory.createResource("telin.Apolda", params);
                        // add the PR to the pipeline controller
                        annieController.add(pr);
                    } catch (ResourceInstantiationException e) {
                        e.printStackTrace();
                    }
                } catch (MalformedURLException e) {
                    e.printStackTrace();
                }
            }
            Out.prln("...Apolda loaded");
            
            Out.prln("...All PRs are loaded");
        }
        else if(lang.equalsIgnoreCase("en"))
        {
            Out.prln("Initialising PRs...");

            final String PR_NAMES_RPOST [] = {
                    "gate.creole.annotdelete.AnnotationDeletePR",
                    "gate.creole.tokeniser.DefaultTokeniser",
                    "gate.creole.gazetteer.DefaultGazetteer",
                    "gate.creole.splitter.SentenceSplitter",
            };            

            // create a serial analyser controller to run ANNIE with
            try {
                annieController =
                        (SerialAnalyserController) Factory.createResource(
                                "gate.creole.SerialAnalyserController", Factory.newFeatureMap(),
                                Factory.newFeatureMap(), "ANNIE");
            } catch (ResourceInstantiationException e) {
                e.printStackTrace();
            }

            // load each PR
            for(int i = 0; i < PR_NAMES_RPOST.length; i++) {
                FeatureMap params = Factory.newFeatureMap(); // use default parameters
                ProcessingResource pr = null;
                try {
                    pr = (ProcessingResource)
                            Factory.createResource(PR_NAMES_RPOST[i], params);
                } catch (ResourceInstantiationException e) {
                    e.printStackTrace();
                }
                // add the PR to the pipeline controller
                annieController.add(pr);
            } // for each PR
            Out.prln("...Common PRs loaded");
            {
                FeatureMap params = Factory.newFeatureMap();
                params.put("portLemServer", 8000);
                params.put("hostLemServer", NLPServers.serverName);
//            params.put("dictLemServer", "ENGLISH");
                params.put("dictLemServer", "RUSSIAN");
//          params.put("outputASName", "POST");
                ProcessingResource pr = null;
                try {
                    pr = (ProcessingResource)
                            Factory.createResource("russian.RuPOSTagger", params);
                } catch (ResourceInstantiationException e) {
                    e.printStackTrace();
                }

                // add the PR to the pipeline controller
                annieController.add(pr);
            }
            Out.prln("...RPOST loaded");
            {
                FeatureMap params = Factory.newFeatureMap();

                OWLIMOntologyLR annotOntology = new OWLIMOntologyLR();
                try {
                    annotOntology.setURL(new URL (ontPath));
                } catch (MalformedURLException e) {
                    e.printStackTrace();  
                }
                try {
                    URL xmlRDFURL =  new URL (ontPath);
                    System.out.println(ontPath);
                    annotOntology.setRdfXmlURL( xmlRDFURL );
                    System.out.println(xmlRDFURL);
                    try {
                        annotOntology.init();
                    } catch (ResourceInstantiationException e) {
                        e.printStackTrace();  
                    }
                    try {
                        params.put("ontology", annotOntology);
                        params.put("prefRepresentation", "label");
                        params.put("altRepresentation", "comment");
                        params.put("language", "en");
                        params.put("lemmaFeature", "lemma");
//                    params.put("lemmaFeature", "specialFeature");
//                    params.put("inputASName", "POST");
                        ProcessingResource pr = (ProcessingResource)
                                Factory.createResource("telin.Apolda", params);
                        // add the PR to the pipeline controller
                        annieController.add(pr);
                    } catch (ResourceInstantiationException e) {
                        e.printStackTrace();  
                    }
                } catch (MalformedURLException e) {
                    e.printStackTrace();  
                }
            }
            Out.prln("...Apolda loaded");

            Out.prln("...All PRs are loaded");
        }
        
        return annieController;

    } // initPRs()

    /* (non-Javadoc)
	 * @see ru.edu.niimm.mapping.linguistic.NLP#execOnFieldIndex(java.lang.String, java.lang.String, java.net.URL, java.lang.String)
	 */
    public List<String> execOnFieldIndex(String className, String ontPth, String[] texts, String lang)
            throws GateException, IOException
    {
        List<String> result = new LinkedList<String>();

//        URL fileURL = new URL("http", "localhost", fileStr);
        Out.prln("Start field index...");

//        File p = new File(workPath);
//        workPath = p.getAbsolutePath().replaceAll("[.]", "").replaceAll("[\\\\]", "/") + "/";
//        String docPath = workPath + "FIELDS_TMP/" + name;
//        File pth = new File(docPath);
//        File[] docs =  pth.listFiles();
        //int j = 0;
//        for(File doc : docs)
//        {
        /* FOR FILED VALUE INDEX*/
//            texts[j++] = new URL(pathPrefix + workPath + "FIELDS_TMP/" + doc.getName());
//        }
        // create a GATE corpus and add a document for each command-line
        // argument

        SerialAnalyserController annieController = getAnnieController(ontPth, lang, className);
        Corpus corpus = Factory.newCorpus("corp");
        for (String text : texts)
        {
            //Out.prln("Creating doc for " + text);
            Document doc = Factory.newDocument(text);
            corpus.add(doc);
        } // for each of args

        annieController.setCorpus(corpus);
        annieController.execute();
        
        // for each document, get an XML document with the
        // person and location names added
        Iterator iter = corpus.iterator();
        int count = 0;
        while(iter.hasNext())
        {
            Document doc = (Document) iter.next();
            AnnotationSet defaultAnnotSet = doc.getAnnotations();
            Set annotTypesRequired = new HashSet();
//            annotTypesRequired.add("Wordform");
//            annotTypesRequired.add("SOURCE");
            annotTypesRequired.add("Mention");

            // pw annotation set: Paradigm and Wordform
            AnnotationSet pw = defaultAnnotSet.get(annotTypesRequired);

//            FeatureMap features = doc.getFeatures();
//             String originalContent = (String)
//                                features.get(GateConstants.ORIGINAL_DOCUMENT_CONTENT_FEATURE_NAME);
//                        RepositioningInfo info = (RepositioningInfo)
//                                features.get(GateConstants.DOCUMENT_REPOSITIONING_INFO_FEATURE_NAME);
            ++count;
//            File file = new File("POStagged/ThesaurusIndexed_" + count + ".txt");
//            Out.prln("File name: '"+file.getAbsolutePath()+"'");
//            FileWriter writer = new FileWriter(file);

            Iterator it = pw.iterator();
            Annotation currAnnot;
            SortedAnnotationList sortedAnnotations = new SortedAnnotationList();

            while(it.hasNext())
            {
                currAnnot = (Annotation) it.next();
//                sortedAnnotations.addSortedExclusive(currAnnot);
                sortedAnnotations.addSortedAll(currAnnot);
            }

//            StringBuffer editableContent = new StringBuffer("");
            // insert anotation tags backward
            Out.prln("Unsorted annotations count: " + pw.size());
//            Out.prln("Sorted annotations count: " + sortedAnnotations.size());
            Out.prln("***********************************************************");
//            Out.prln(doc.getSourceUrl());
//            ArrayList<Map<FeatureMap, Annotation>> mentions = new ArrayList<Map<FeatureMap, Annotation>>();
//            ArrayList<FeatureMap> data = new ArrayList<FeatureMap>();
            for(int i = pw.size()-1; i>=0; --i)
            {
                currAnnot = (Annotation) sortedAnnotations.get(i);
                FeatureMap f = currAnnot.getFeatures();
                String Mention = null;
                if(f.containsKey("identifier") && f.containsKey("class") &&
                        !f.get("identifier").toString().equalsIgnoreCase(f.get("class").toString()))
                {
                    Mention = "Thesaurus term " + f.get("identifier") + " from DB " + f.get("class");
                    result.add(f.get("identifier").toString());
                    Out.prln("Mention : " + Mention
                            + "\n" /*+
                            "start: " + currAnnot.getStartNode().getOffset() +
                            " end: " + currAnnot.getEndNode().getOffset() */);
//                    HashMap<FeatureMap, Annotation> x = new HashMap<FeatureMap, Annotation>(1);
//                    x.put(f, currAnnot);
//                    mentions.add(x);
                }

                /*if(Mention != null)
                {
                    editableContent = printAnnotationString(editableContent, Mention, currAnnot);
                }*/

//                currAnnot = (Annotation) sortedAnnotations.get(i);
//                f = currAnnot.getFeatures();
//                String Wordform = null;

                /*  if(f.get("type") != null && f.get("type").toString().equalsIgnoreCase("string"))
                {
                    Out.prln("originalContent: " + f.get("data") + "\n" +
                            "start: " + currAnnot.getStartNode().getOffset() +
                            "end: " + currAnnot.getEndNode().getOffset() );
                    data.add(f);
                }*/




                /* if(f.get("type") != null && f.get("type").toString().equalsIgnoreCase("PARA"))
                {
                    Out.prln(f.get("word") + " ("+f.get("lemma") + " " + f.get("pos") + ") " + f.get("form_list") );
                }*/
                /* if(f.get("type") != null && f.get("type").toString().equalsIgnoreCase("WRD"))
                {
                    Out.pr(f.get("word"));
                }*/
//                if(f.containsKey("pa")
            }
//            Out.prln("#" + cutMentionsFromData(data, mentions));
            Out.prln("***********************************************************");
//            writer.write(editableContent.toString());
//            writer.close();
        } // for each doc
        Out.prln(" field index finished...");
        return result;

//        finally
//        {
//            return result;
//        }
    }
    
    /* (non-Javadoc)
	 * @see ru.edu.niimm.mapping.linguistic.NLP#execOnQuery(java.lang.String, java.lang.String, java.lang.String)
	 */
    public Vector<String> execOnQuery(String queryPath, String ontPth, String lang)
    {
        Vector<String> result = new Vector<String>();
        if(Gate.getGateHome() == null)
        {
            Out.prln("Initialising GATE...");
            Gate.setGateHome(new File(path2GATE));
            try {
                Gate.init();
            } catch (GateException e) {
                e.printStackTrace();  
            }
        }
        
        File p = new File(workPath);
        workPath = p.getAbsolutePath().replaceAll("[.]", "").replaceAll("[\\\\]", "/") + "/";

        URL[] texts = new URL[ 1 ];
        Corpus corpus = null;
        try {
            texts[0] = new URL(queryPath);

            // create a GATE corpus and add a document for each command-line
            // argument


            try {
                corpus = Factory.newCorpus("corp");
            } catch (ResourceInstantiationException e) {
                e.printStackTrace();  
            }
            for(int i = 0; i < texts.length; i++)
            {
//       URL u = new URL(args[i]);

//            FeatureMap params = Factory.newFeatureMap();
//            params.put("sourceUrl", u);
//            params.put("preserveOriginalContent", true);
//            params.put("collectRepositioningInfo", true);
//       params.put("encoding", "Cp1251");    // widows-1251 //Cp1251
//            params.put("encoding", "utf8");    // widows-1251 //Cp1251
                Out.prln("Creating doc for " + texts[i]);
                Document doc = null;
                try {
                    doc = (Document) Factory.newDocument(texts[i], "utf-8");
                } catch (ResourceInstantiationException e) {
                    e.printStackTrace();  
                }
//            doc = (Document)doc.init();
//                    .createResource("gate.corpora.DocumentImpl", params);

                corpus.add(doc);

            } // for each of args
        } catch (MalformedURLException e) {
            e.printStackTrace();  
        }
        
        //TODO:RGareev: fix it
        String className = "1";
		SerialAnalyserController annieController = getAnnieController(ontPth, lang, className);

		// tell the pipeline about the corpus and run it
        annieController.setCorpus(corpus);
        try {
            annieController.execute();
        } catch (ExecutionException e) {
            e.printStackTrace();  
        }

        // for each document, get an XML document with the
        // person and location names added
        Iterator iter = corpus.iterator();
        int count = 0;
//        String startTagPart_1 = "<span GateID=\"";
//        String startTagPart_3 = "\" title=\"";
//        String startTagPart_2 = "\" lemma=\"";
//        String startTagPart_4 = "\" style=\"background:#FF9999;\">";
//        String startTagPart_4_1 = "\" style=\"background:#FF0000;\">";
//        String startTagPart_4_2 = "\" style=\"background:#FF5555;\">";
//        String endTag = "</span>";

        while(iter.hasNext())
        {
            Document doc = (Document) iter.next();
            AnnotationSet defaultAnnotSet = doc.getAnnotations();
            Set annotTypesRequired = new HashSet();
//       annotTypesRequired.add("Wordform");    // old: Person
//       annotTypesRequired.add("Paradigm");    // old: Person
//       annotTypesRequired.add("lemma");       // old: Location
//       annotTypesRequired.add("Sentence");    // old: Location
//            annotTypesRequired.add("ThesTerm");
            annotTypesRequired.add("Mention");

            // pw annotation set: Paradigm and Wordform
            AnnotationSet pw = defaultAnnotSet.get(annotTypesRequired);

            FeatureMap features = doc.getFeatures();
            String originalContent = (String)
                    features.get(GateConstants.ORIGINAL_DOCUMENT_CONTENT_FEATURE_NAME);
            RepositioningInfo info = (RepositioningInfo)
                    features.get(GateConstants.DOCUMENT_REPOSITIONING_INFO_FEATURE_NAME);

            ++count;
//         if(originalContent != null && info != null)
            {
//                File file = new File("POStagged/ThesaurusIndexed_" + count + ".txt");
//                Out.prln("File name: '"+file.getAbsolutePath()+"'");
//                FileWriter writer = new FileWriter(file);

                Iterator it = pw.iterator();
                Annotation currAnnot;
                SortedAnnotationList sortedAnnotations = new SortedAnnotationList();

                while(it.hasNext())
                {
                    currAnnot = (Annotation) it.next();
//                sortedAnnotations.addSortedExclusive(currAnnot);
                    sortedAnnotations.addSortedAll(currAnnot);
                }

                StringBuffer editableContent = new StringBuffer("");
                long insertPositionEnd;
                long insertPositionStart;
                // insert anotation tags backward
                Out.prln("Unsorted annotations count: " + pw.size());
                Out.prln("Sorted annotations count: " + sortedAnnotations.size());
                Out.prln("***********************************************************");
                Out.prln(doc.getSourceUrl());
                for(int i=sortedAnnotations.size()-1; i>=0; --i)
                {
                    currAnnot = (Annotation) sortedAnnotations.get(i);
                    FeatureMap f = currAnnot.getFeatures();
//                String word = "";
//                if(f.containsKey("word")) {
//                    word = (String)f.get("word");
//                   Out.prln("word : "+word);
//                }

//                String lemma = "";
//                if(f.containsKey("lemma")) {
//                    lemma = "lemma: " + f.get("lemma");
//                   Out.prln("lemma : "+lemma);
//                }

//                String pos = "";

//                if(f.containsKey("pos")) {
//                    pos = (String)f.get("pos");
//                 Out.prln("pos : "+ pos);
//                }

                    String Mention = null;
                    if(f.containsKey("identifier") && f.containsKey("class") &&
                            !f.get("identifier").toString().equalsIgnoreCase(f.get("class").toString()))
                    {
                        Mention = "Term " + f.get("identifier") + " from DB " + f.get("class");
                        Out.prln("Mention : " + Mention);
                    }
                    if(Mention != null)
                    {
                        editableContent = printAnnotationString(editableContent, Mention, currAnnot);
                    }

//                String ThesTerm = null;
//                if(f.containsKey("lemma"))
//                {
//                    ThesTerm = "Term: " + f.get("lemma");
//                }

//                if(ThesTerm != null)
//                {
//                    editableContent = printAnnotationString(editableContent, ThesTerm, currAnnot);
//                }
                }
                Out.prln("***********************************************************");
//                writer.write(editableContent.toString());
//                writer.close();
//                result = doc.toXml();
            }
            /*

            String xmlDocument = doc.toXml(pw, false);
            String fileName = new String("RussianPOSTagger_toXML_" + count + ".xml");
            FileWriter writer = new FileWriter(fileName);
            writer.write(xmlDocument);
            writer.close();

            // do something usefull with the XML here!
            Out.prln("'"+xmlDocument+"'");*/


            AnnotationSet pw2 = doc.getAnnotations();
            System.out.println(doc.getName());
            Iterator it = pw2.iterator();

            while (it.hasNext())
            {
                Annotation currAnnot = (Annotation) it.next();
                FeatureMap f = currAnnot.getFeatures();
                if (    f.containsKey("identifier") && f.containsKey("class") &&
                        !f.get("identifier").toString().equalsIgnoreCase(f.get("class").toString()))
                {
                    result.addElement(f.get("identifier").toString());
                    System.out.println(f.get("identifier").toString() + " in " + f.get("class").toString());
                }
            }
        } // for each doc
        return result;
    }

    public static StringBuffer printAnnotationString(StringBuffer editableContent, String Mention, Annotation currAnnot)
    {
    	editableContent.insert(0, " ------\r\n ");
        editableContent.insert(0, " \r\n *" + Mention + "* \r\n ");
        return editableContent;
    }    

    /* (non-Javadoc)
	 * @see ru.edu.niimm.mapping.linguistic.NLP#execute(java.lang.String, java.util.Vector)
	 */
    public Object execute(String method, Vector vector) throws Exception {
        if(method.equalsIgnoreCase("NLPmodule.execOnQuery"))
        {
            return this.execOnQuery(vector.get(0).toString(),
                    vector.get(1).toString(), "ru");
        }
        if(method.equalsIgnoreCase("NLPmodule.execOnFieldIndex"))
        {
            /*
        	return this.execOnFieldIndex(vector.get(0).toString(),
                    vector.get(1).toString(), new URL(vector.get(2).toString()), "ru");
            */
        	throw new UnsupportedOperationException();
        }
        if(method.equalsIgnoreCase("NLPmodule.lemmaConcat"))
        {
            return this.lemmaConcat(vector.get(0).toString());
        }
        if(method.equalsIgnoreCase("NLPmodule.descriptorConcat"))
        {
        	throw new UnsupportedOperationException();
        	/*
            return this.execOnFieldIndex("1", vector.get(0).toString(),
                    new URL(vector.get(1).toString()), "ru");
        	 */
        }
        return "invoke error. Unknown method.";
    }

    public String lemmaConcat(String str, String aotStructure)
    {
        LemClient client = new LemClient ();
        client.init(NLPServers.serverName, 8000);
        String msg_load = client.LemLoadDict("RUSSIAN");
        if(!msg_load.equals("OK"))
        {
            System.out.println("Error in loading RUSSIAN dictonary (LemServer) : " + msg_load);
        }
        client.LemSetPrintingAllForms(false);

        StringBuffer result = new StringBuffer();
        try
        {
            Map<String, Wordform> wordforms = LemClient.createEmptyWordformMap();
            Map<Integer, Paradigm> paradigms= LemClient.createEmptyParadigmMap();


            String[] words = new String[1];
            words[0] = str;
            client.createWordformAndParadigm(words, wordforms, paradigms);
            for(String key : wordforms.keySet())
            {
                result.append("<wordform key=\"" ).append( key ).append( "\">");
                result.append("<accents>");
                for(int i = 0; i < wordforms.get(key).accent.length; i++)
                {
                    if(wordforms.get(key).accent[i] != null)
                    {
                        result.append("<accent pid=\"").append( wordforms.get(key).paradigms[i].id ).append( "\">" ).append( (wordforms.get(key).accent[i] + 1) ).append( "</accent>");
                    }
                }
                result.append("</accents>");
                String[][] s = wordforms.get(key).gram_codes;
                result.append("<gramCodes>");
                for(int i=0; i< s.length; i++)
                {
                    result.append("<gramCode pid=\"" ).append( wordforms.get(key).paradigms[i].id ).append( "\">");
                    String[] ss = s[i];
                    for (String s1 : ss) {
                        result.append("<gramCodeString>" ).append( s1 ).append( "</gramCodeString>");
                    }
                    result.append("</gramCode>");
                }
                result.append("</gramCodes>");

                result.append("<paradigms>");

                for(Paradigm paradigm : wordforms.get(key).paradigms)
                {
                    result.append("<paradigm pid=\"" ).append( paradigm.id ).append( "\"/>");
                }
                result.append("</paradigms>");

                result.append("</wordform>");
            }

            for(Integer key : paradigms.keySet())
            {
                Paradigm paradigm = paradigms.get(key);
                result.append("<paradigm pid=\"" ).append( key ).append( "\">");

                result.append("<lemma>").append(paradigm.lemma).append("</lemma>");
                result.append("<pos>").append(paradigm.pos ).append( "</pos>");

                result.append("<commonAncodes>");
                for(String s : paradigm.common_ancodes)
                {
                    result.append("<commonAncode>").append( s ).append( "</commonAncode>");
                }
                result.append("</commonAncodes>");

                result.append("<wordforms>");
                for(String s : paradigm.wordforms)
                {
                    result.append("<wordform wid=\"" ).append( s ).append( "\"/>");
                }
                result.append("</wordforms>");

                result.append("</paradigm>");
            }
        } catch (Exception e)
        {
            return "";
        }
//        System.out.println(result);
        return result.toString();
    }

    public String lemmaConcat(String str)
    {
        LemClient client = new LemClient ();
        client.init(NLPServers.serverName, 8000);
        String msg_load = client.LemLoadDict("RUSSIAN");
        if(!msg_load.equals("OK"))
        {
            System.out.println("Error in loading RUSSIAN dictonary (LemServer) : " + msg_load);
        }
        client.LemSetPrintingAllForms(false);

        String lemma = "";

        try
        {
            Map<String, Wordform> wordforms = LemClient.createEmptyWordformMap();
            Map<Integer, Paradigm> paradigms= LemClient.createEmptyParadigmMap();
            /*str = str.replaceAll("–", " ");
            str = str.replaceAll("-", " ");
            str = str.replaceAll("[A-Za-z]", " ");
            str = str.replaceAll("[0-9]", " ");
            str = str.replaceAll("[0-9]", " ");*/
            str = str.replaceAll("[^А-Яа-я]", " ").toUpperCase();
            while(str.contains("  "))
            {
                str = str.replaceAll("  ", " ");
            }
            str = str.trim().toUpperCase();
            String[] words = str.split(" ");
            ArrayList<String> wrds = new ArrayList<String>();

            for(String word : words)
            {
                if(word.length() >= 2)
                {
                    wrds.add(word);
//                    System.out.println(word);
                }
            }

            words = wrds.toArray(new String[wrds.size()]);
            if(words != null && words.length > 0 && !words[0].equalsIgnoreCase(""))
            {
                client.createWordformAndParadigm(words, wordforms, paradigms);
                for(String word : words)
                {
                    if(word != null && !word.equalsIgnoreCase(""))
                    {
                        if(wordforms.containsKey(word))
                        {
                            Wordform w = wordforms.get(word);
                            if(w != null && w.paradigms != null && w.paradigms[0] != null && w.paradigms[0].lemma != null)
                            {
                                lemma += w.paradigms[0].lemma + " ";
                            }
                        }
                    }
                }
            }
        } catch (Exception e)
        {
            return "LEMMATIZER ERROR !";
        }
//        System.out.println(lemma);
        return lemma;
    }

    /* (non-Javadoc)
	 * @see ru.edu.niimm.mapping.linguistic.NLP#descriptorConcat(java.lang.String, java.net.URL, java.lang.String)
	 */
    public String descriptorConcat(String ontPth, URL fileURL, String lang)
            throws GateException, IOException
    {
//        String ontPth =
        String className = "1";
//        URL fileURL = new URL("http", "localhost", fileStr);
        Out.prln("Start field index...");
        Vector result = new Vector();
//        File p = new File(path);
//        workPath = p.getAbsolutePath().replaceAll("[.]", "").replaceAll("[\\\\]", "/") + "/";
//        String docPath = workPath + "FIELDS_TMP/" + name;
//        File pth = new File(docPath);
//        File[] docs =  p.listFiles();
        URL[] texts = new URL[ 1 ];
        texts[0] = fileURL;
//        int j = 0;
//        for(File doc : docs)
//        {
//        /* FOR FILED VALUE INDEX*/
//            texts[j++] = new URL(pathPrefix + workPath  + doc.getName());
//        }
        // create a GATE corpus and add a document for each command-line
        // argument

        SerialAnalyserController annieController 
        	= getAnnieController(ontPth, lang, className);
        Corpus corpus = Factory.newCorpus("corp");
        for (URL text : texts)
        {
            Out.prln("Creating doc for " + text);
            Document doc = Factory.newDocument(text, "utf-8");
            corpus.add(doc);
        } // for each of args
        annieController.setCorpus(corpus);
        annieController.execute();


        // for each document, get an XML document with the
        // person and location names added
        Iterator iter = corpus.iterator();
        int count = 0;
        while(iter.hasNext())
        {
            Document doc = (Document) iter.next();
            AnnotationSet defaultAnnotSet = doc.getAnnotations();
            Set annotTypesRequired = new HashSet();
            annotTypesRequired.add("Mention");

            // pw annotation set: Paradigm and Wordform
            AnnotationSet pw = defaultAnnotSet.get(annotTypesRequired);

            FeatureMap features = doc.getFeatures();
            /* String originalContent = (String)
                                features.get(GateConstants.ORIGINAL_DOCUMENT_CONTENT_FEATURE_NAME);
                        RepositioningInfo info = (RepositioningInfo)
                                features.get(GateConstants.DOCUMENT_REPOSITIONING_INFO_FEATURE_NAME);
            */
            ++count;
//            File file = new File("POStagged/ThesaurusIndexed_" + count + ".txt");
//            Out.prln("File name: '"+file.getAbsolutePath()+"'");
//            FileWriter writer = new FileWriter(file);

            Iterator it = pw.iterator();
            Annotation currAnnot;
            SortedAnnotationList sortedAnnotations = new SortedAnnotationList();

            while(it.hasNext())
            {
                currAnnot = (Annotation) it.next();
//                sortedAnnotations.addSortedExclusive(currAnnot);
                sortedAnnotations.addSortedAll(currAnnot);
            }

//            StringBuffer editableContent = new StringBuffer("");
            // insert anotation tags backward
            Out.prln("Unsorted annotations count: " + pw.size());
//            Out.prln("Sorted annotations count: " + sortedAnnotations.size());
            Out.prln("***********************************************************");
//            Out.prln(doc.getSourceUrl());
            for(int i = pw.size()-1; i>=0; --i)
            {
                currAnnot = (Annotation) sortedAnnotations.get(i);
                FeatureMap f = currAnnot.getFeatures();
                String Mention = null;
                if(f.containsKey("identifier") && f.containsKey("class") &&
                        !f.get("identifier").toString().equalsIgnoreCase(f.get("class").toString()))
                {
                    Mention = "Thesaurus term " + f.get("identifier") + " from DB " + f.get("class");
                    result.add(f.get("identifier").toString());
                    Out.prln("Mention : " + Mention);
                }
                /*if(Mention != null)
                {
                    editableContent = printAnnotationString(editableContent, Mention, currAnnot);
                }*/
            }
            Out.prln("***********************************************************");
//            writer.write(editableContent.toString());
//            writer.close();
        } // for each doc
        Out.prln(" field index finished...");

        StringBuffer res = new StringBuffer();
        String o = null;
        for(Iterator i = result.iterator() ; i.hasNext(); o = (String)i.next())
        {
            o += "\r\n";
            res.append(o);
        }
        return res.toString();
    }

	/**
	 * @param ontPth
	 * @param lang
	 * @param className
	 */
	private SerialAnalyserController getAnnieController(String ontPth, String lang, String className) {
		SerialAnalyserController annieController;
		if(!annieControllers.containsKey(className))
        {
            annieController = this.initForIndex(ontPth, lang);
            annieControllers.put(className, annieController);
        }
        else
        {
            annieController = annieControllers.get(className);
        }
		return annieController;
	}

    public static class SortedAnnotationList extends Vector {
        public SortedAnnotationList() {
            super();
        } // SortedAnnotationList

        public boolean addSortedExclusive(Annotation annot) {
            Annotation currAnot = null;

            // overlapping check
            for (int i=0; i<size(); ++i) {
                currAnot = (Annotation) get(i);
                if(annot.overlaps(currAnot)) {
                    return false;
                } // if
            } // for

            long annotStart = annot.getStartNode().getOffset().longValue();
            long currStart;
            // insert
            for (int i=0; i < size(); ++i) {
                currAnot = (Annotation) get(i);
                currStart = currAnot.getStartNode().getOffset().longValue();
                if(annotStart < currStart) {
                    insertElementAt(annot, i);
                    /*
                    Out.prln("Insert start: "+annotStart+" at position: "+i+" size="+size());
                    Out.prln("Current start: "+currStart);
                    */
                    return true;
                } // if
            } // for

            int size = size();
            insertElementAt(annot, size);
            //Out.prln("Insert start: "+annotStart+" at size position: "+size);
            return true;
        } // addSorted

        public boolean addSortedAll(Annotation annot) {
            long annotStart = annot.getStartNode().getOffset().longValue();
            long currStart;
            // insert
            for (int i=0; i < size(); ++i) {
                Annotation currAnot = (Annotation) get(i);
                currStart = currAnot.getStartNode().getOffset().longValue();
                if(annotStart < currStart) {
                    insertElementAt(annot, i);
                    /*
                    Out.prln("Insert start: "+annotStart+" at position: "+i+" size="+size());
                    Out.prln("Current start: "+currStart);
                    */
                    return true;
                } // if
            } // for
            int size = size();
            insertElementAt(annot, size);
            return true;
        }
    } // SortedAnnotationList    

} // class StandAloneRussianPOSTagger