/*
 *  NLPFeaturesOfDoc.java
 * 
 *  Yaoyong Li 22/03/2007
 *
 *  $Id: NLPFeaturesOfDoc.java, v 1.0 2007-03-22 12:58:16 +0000 yaoyong $
 */
package gate.learning;

import java.io.BufferedReader;
import java.io.BufferedWriter;
import java.io.IOException;
import java.util.ArrayList;
import java.util.Collections;
import java.util.Hashtable;
import java.util.Iterator;
import java.util.List;
import gate.Annotation;
import gate.AnnotationSet;
import gate.util.OffsetComparator;
import java.util.ListIterator;
import org.supercsv.cellprocessor.ConvertNullTo;
import org.supercsv.cellprocessor.ift.CellProcessor;
import org.supercsv.io.CsvListWriter;
import org.supercsv.io.ICsvListWriter;
import org.supercsv.prefs.CsvPreference;

/*
 * Obtain the NLP (linguistic) features from the GATE annotations of one
 * document.
 */
public class NLPFeaturesOfDoc {

    /** One component stores all the features for one instance. */
    StringBuffer[] featuresInLine;
    /** Feature names. */
    StringBuffer featuresName;
    /** Document id. */
    private String docId = null;
    /** Number of instances in the document. */
    int numInstances = 0;
    /** Total number of GATE types of NLP features. */
    int totalnumTypes = 0;
    /** Number of features counted for each instance */
    int[] featuresCounted;
    /** store the class name for each instances in. */
    String[] classLabels;
    List<String> headerList;
    List<CellProcessor> processorList;
    List[] instanceArray;
    /** store predicted class name for each instances in */
    List[] predictedClassLabels;

    /** Constructor with no parameters. */
    public NLPFeaturesOfDoc() {
    }

    /**
     * Constructor, obtain NLP features from GATE annotations for each instance in
     * the document.
     */
    public NLPFeaturesOfDoc(AnnotationSet annotations, String instanceType,
            String docName) {
        // Number of instances (tokens) in the document
        numInstances = annotations.get(instanceType).size();
        featuresInLine = new StringBuffer[numInstances];
        featuresName = new StringBuffer();
        totalnumTypes = 0;
        featuresCounted = new int[numInstances];
        classLabels = new String[numInstances];
        docId = docName;
        // Header & cell processor
        headerList = new ArrayList<String>();
        processorList = new ArrayList<CellProcessor>();
        // Predicted classes
        predictedClassLabels = new List[numInstances];
    }

    /**
     * Entry method for getting the NLP features according to the specifications
     * in the dataset defintion files.
     */
    public void obtainDocNLPFeatures(AnnotationSet annotations,
            DataSetDefinition dsd) {

        // get the NLP features from the attributes
        if (dsd.arrs.numTypes > 0) {
            gatedoc2NLPFeatures(annotations, dsd.getInstanceType(),
                    dsd.arrs.typesInDataSetDef, dsd.arrs.featuresInDataSetDef,
                    dsd.arrs.namesInDataSetDef, dsd.arrs.featurePosition); // it
        }
        // get the label from the class attribute
        gatedoc2LabelsComplete(annotations, dsd.getInstanceType(),
                dsd.arrs.classType, dsd.arrs.classFeature, dsd.classIsUseStartEndMarker,
                dsd.arrs.numPredictedClass, dsd.arrs.namesInPredictedClasses, dsd.arrs.predictedClassPosition);

    }

    /** Get the N-gram features from the GATE document. */
    public void gatedoc2NgramFeatures(AnnotationSet annotations,
            String instanceType, java.util.List ngrams) {
        AnnotationSet anns = annotations.get(instanceType);
        ArrayList annotationArray = (anns == null || anns.isEmpty())
                ? new ArrayList()
                : new ArrayList(anns);
        Collections.sort(annotationArray, new OffsetComparator());
        if (numInstances != annotationArray.size()) {
            System.out.println("!!Warning: the number of instances "
                    + new Integer(numInstances) + " in the document " + docId
                    + " is not right!!!");
            return;
        }
        int numNgrams = ngrams.size();
        // For each ngram
        for (int i1 = 0; i1 < numNgrams; ++i1) {
            Ngram ngram = (Ngram) ngrams.get(i1);
            String nameOfNgram = ngram.getName();
            int ngramPosition = ngram.position;
            String positionStr = obtainPositionStr(ngramPosition);
            featuresName.append(nameOfNgram + ConstantParameters.ITEMSEPARATOR);
            int consNum = ngram.getConsnum();
            String[] typeGateNgram = new String[consNum];
            String[] featureGateNgram = new String[consNum];
            for (int j = 0; j < consNum; ++j) {
                typeGateNgram[j] = (ngram.getTypessGate())[j];
                featureGateNgram[j] = (ngram.getFeaturesGate())[j];
            }
            AnnotationSet[] annsArray = new AnnotationSet[consNum];
            for (int j = 0; j < consNum; ++j) {
                annsArray[j] = (AnnotationSet) annotations.get(typeGateNgram[j]);
            }
            for (int i = 0; i < numInstances; ++i) {
                Annotation annToken = (Annotation) annotationArray.get(i);
                Long tokenStartOffset = annToken.getStartNode().getOffset();
                Long tokenEndOffset = annToken.getEndNode().getOffset();
                //AnnotationSet annsNgramType = annotations.get(typeGateNgram,
                // tokenStartOffset, tokenEndOffset);
                AnnotationSet annsNgramType = annsArray[0].get(tokenStartOffset, tokenEndOffset);
                String[] features;
                features = obtainNgramFeatures(annsNgramType,
                        featureGateNgram[0]);
                int numFeats = features.length;
                int number = ngram.getNumber();
                if (numFeats >= number) { //if the instance has enough number of features for the defined ngram
                    for (int j = 1; j < consNum; j++) {
                        String[] features1;
                        if (typeGateNgram[j].equals(typeGateNgram[0])) {
                            features1 = obtainNgramFeatures(annsNgramType, featureGateNgram[j]);
                        } else {
                            features1 = obtainNgramFeaturesFromDifferentType(annsNgramType,
                                    annsArray[j].get(tokenStartOffset, tokenEndOffset),
                                    featureGateNgram[j]);
                        }
                        for (int j1 = 0; j1 < features.length; ++j1) {
                            features[j1] = features[j1] + "_" + features1[j1];
                        }
                    }
                    // get the ngram features

                    StringBuffer[] featuresNgram = new StringBuffer[numFeats - number + 1];
                    for (int j = 0; j < featuresNgram.length; ++j) {
                        featuresNgram[j] = new StringBuffer();
                    }
                    for (int j = 0; j < number; ++j) {
                        for (int j1 = j; j1 < numFeats - number + 1 + j; ++j1) {
                            featuresNgram[j1 - j].append(features[j1]
                                    + NLPFeaturesList.SYMBOLNGARM);
                        }
                    }
                    Hashtable ngramTerms = new Hashtable();
                    for (int j = 0; j < featuresNgram.length; ++j) {
                        if (!ngramTerms.containsKey(featuresNgram[j].toString())) {
                            ngramTerms.put(featuresNgram[j].toString(), "1");
                        } else {
                            ngramTerms.put(featuresNgram[j].toString(),
                                    new Integer((new Integer(ngramTerms.get(featuresNgram[j].toString()).toString())).intValue() + 1));
                        }
                    }
                    List keys = new ArrayList(ngramTerms.keySet());
                    Collections.sort(keys);
                    Iterator iterator = keys.iterator();
                    if (featuresInLine[i] == null) {
                        featuresInLine[i] = new StringBuffer();
                    }
                    while (iterator.hasNext()) {
                        Object key = iterator.next();
                        if (ngramPosition != 0) {
                            this.featuresInLine[i].append(obtainFeatureName(nameOfNgram, key.toString()
                                    + NLPFeaturesList.SYMBOLNGARM + ngramTerms.get(key).toString())
                                    + positionStr + ConstantParameters.ITEMSEPARATOR);
                        } else {
                            this.featuresInLine[i].append(obtainFeatureName(nameOfNgram, key.toString()
                                    + NLPFeaturesList.SYMBOLNGARM + ngramTerms.get(key).toString())
                                    + ConstantParameters.ITEMSEPARATOR);
                        }
                        ++featuresCounted[i];
                    }
                }//if the number of features is not less than the n of the n-gram
            }// end of the loop on instances
        } // end of the loop on number of ngrams
    }

    /**
     * Obtain the string for the position, which is attached at the end of the nlp
     * feature.
     */
    String obtainPositionStr(int ngramPosition) {
        return "[" + (new Integer(ngramPosition)).toString() + "]";
    }

    /** Obtain the N-gram features from an annotation set. */
    private String[] obtainNgramFeatures(AnnotationSet annsNgramType,
            String gateFeature) {
        int num = annsNgramType.size();
        String[] feats = new String[num];
        ArrayList annotationArray = (annsNgramType == null || annsNgramType.isEmpty()) ? new ArrayList() : new ArrayList(annsNgramType);
        Collections.sort(annotationArray, new OffsetComparator());
        for (int i = 0; i < num; ++i) {
            feats[i] = (String) ((Annotation) annotationArray.get(i)).getFeatures().get(gateFeature);
            if (feats[i] == null) {
                feats[i] = ConstantParameters.NAMENONFEATURE;
            }
            feats[i] = feats[i].trim().replaceAll(ConstantParameters.ITEMSEPARATOR,
                    ConstantParameters.ITEMSEPREPLACEMENT);
        }
        return feats;
    }

    /**
     * Obtain the N-gram features from an annotation set for the Annotation type
     * which is different from the instance's type.
     */
    private String[] obtainNgramFeaturesFromDifferentType(
            AnnotationSet annsNgramType, AnnotationSet annsCurrent, String gateFeature) {
        int num = annsNgramType.size();
        String[] feats = new String[num];
        ArrayList annotationArray = (annsNgramType == null || annsNgramType.isEmpty()) ? new ArrayList() : new ArrayList(annsNgramType);
        Collections.sort(annotationArray, new OffsetComparator());
        for (int i = 0; i < num; ++i) {
            feats[i] = obtainAnnotationForTypeAndFeature(annsCurrent, gateFeature,
                    ((Annotation) (annotationArray.get(i))).getStartNode().getOffset(),
                    ((Annotation) (annotationArray.get(i))).getEndNode().getOffset());
            if (feats[i] != null) {
                feats[i] = feats[i].trim().replaceAll(ConstantParameters.ITEMSEPARATOR,
                        ConstantParameters.ITEMSEPREPLACEMENT);
            }
        }
        return feats;
    }

    /** Get the labels of each instance in the document. */
    public void gatedoc2LabelsComplete(AnnotationSet annotations,
            String instanceType, String classType, String classFeature, boolean classIsUseStartEndMarker,
            int numPredictedClass, String[] namesInPredictedClasses, int[] predictedClassPosition) {

        List instanceTypeList = new ArrayList(annotations.get(instanceType));
        Collections.sort(instanceTypeList, new OffsetComparator());
        AnnotationSet classTypeList = annotations.get(classType);

        // Iterate each instance type annotation
        int numInstances0 = 0;
        ListIterator instanceTypeIter = instanceTypeList.listIterator();
        int annId = -1;
        boolean isInEntityAnn = false;
        int maxSpan = 0;
        while(instanceTypeIter.hasNext()){
            Annotation instanceAnn = (Annotation) instanceTypeIter.next();
            List<Annotation> classCoveringList = new ArrayList(
                    classTypeList.getCovering(
                        null,
                        instanceAnn.getStartNode().getOffset(),
                        instanceAnn.getEndNode().getOffset()));

            // Class label
            if(classCoveringList.size() > 0){
                this.classLabels[numInstances0] = (String) classCoveringList.get(0).getFeatures().get(classFeature);
                if(isInEntityAnn == true && classCoveringList.get(0).getId().intValue() != annId && maxSpan > 1){
                    this.classLabels[numInstances0 - 1] += ConstantParameters.SUFFIXENDTOKEN;
                }
                if(isInEntityAnn == false || classCoveringList.get(0).getId().intValue() != annId){
                    maxSpan = 1;
                    this.classLabels[numInstances0] += ConstantParameters.SUFFIXSTARTTOKEN;
                }else{
                    maxSpan++;
                }
                annId = classCoveringList.get(0).getId().intValue();
                isInEntityAnn = true;
            }else{
                if(isInEntityAnn == true && maxSpan > 1){
                    this.classLabels[numInstances0 - 1] += ConstantParameters.SUFFIXENDTOKEN;
                }
                this.classLabels[numInstances0] = ConstantParameters.OUTOFCLASSNAME;
                maxSpan = 0;
                isInEntityAnn = false;
            }

            // Iterate each predicted class
            List tPredicedClassList = new ArrayList<String>();
            for(int i = 0; i < numPredictedClass; i++){
                if(numInstances0 + predictedClassPosition[i] >= 0 &&
                        numInstances0 + predictedClassPosition[i] < this.classLabels.length){
                    tPredicedClassList.add(this.classLabels[numInstances0 + predictedClassPosition[i]]);
                }else{
                    tPredicedClassList.add(null);
                }
            }
            predictedClassLabels[numInstances0] = tPredicedClassList;

            numInstances0++;
        }
    }

    /** Get the Attribute feature for each instance of the document. */
    public void gatedoc2NLPFeatures(AnnotationSet annSet,
            String instanceType, String[] typesGate, String[] featuresGate,
            String[] namesGate, int[] featurePosition) {
        
        int numTypes = typesGate.length;
        // Header and processor list
        for (int i = 0; i < numTypes; i++) {
            headerList.add(String.format("%s(%d)", namesGate[i], featurePosition[i]));
            processorList.add(new ConvertNullTo("", new WekaSanitizer()));
        }

        List instanceTypeList = new ArrayList(annSet.get(instanceType));
        Collections.sort(instanceTypeList, new gate.util.OffsetComparator());

        int numInstances0 = instanceTypeList.size();
        instanceArray = new List[numInstances0 + 1];

        for (int i = 0; i < numInstances0; i++) {
            List<String> lineList = new ArrayList<String>();
            Annotation annToken = null;
            for (int j = 0; j < numTypes; j++) {
                if (featurePosition[j] == 0) {
                    annToken = (Annotation) instanceTypeList.get(i);
                } else if ((featurePosition[j] < 0 && i + featurePosition[j] >= 0) || (featurePosition[j] > 0 && i + featurePosition[j] < numInstances0)) {
                    annToken = (Annotation) instanceTypeList.get(i + featurePosition[j]);
                }

                if(annToken != null){
                    if (typesGate[j].equals(instanceType)) {
                        lineList.add((String) annToken.getFeatures().get(featuresGate[j]));
                    } else {
                        // if not belongs to token
                        Long tokenStartOffset = annToken.getStartNode().getOffset();
                        Long tokenEndOffset = annToken.getEndNode().getOffset();
                        lineList.add(obtainAnnotationForTypeAndFeature((AnnotationSet) annSet.get(typesGate[j]), featuresGate[j], tokenStartOffset, tokenEndOffset));
                    }
                }else{
                    lineList.add(null);
                }
            }
            instanceArray[i] = lineList;
        }
    }

    /** Get the annotation with different type from the instance. */
    String obtainAnnotationForTypeAndFeature(AnnotationSet singleAnnSet,
            String gateFeature, Long tokenStartOffset, Long tokenEndOffset) {
        if (singleAnnSet instanceof AnnotationSet) {
            AnnotationSet coverAnnSet = (AnnotationSet) singleAnnSet.get(
                    tokenStartOffset, tokenEndOffset);
            Iterator overlappingIterator = coverAnnSet.iterator();
            if (overlappingIterator.hasNext()) {
                Annotation superannotation = (Annotation) overlappingIterator.next();
                return (String) superannotation.getFeatures().get(gateFeature);
            }
        }
        return null;
    }

    /** Write the NLP data into a file. */
    public void writeNLPFeaturesToFile(BufferedWriter out, String docId,
            int docIndex, int[] featurePosition) {
        if (LogService.maxVerbosityLevel > 1) {
            System.out.println("number=" + new Integer(numInstances));
        }
        ICsvListWriter writer;
        try {
            if (docIndex == 0) {
                //headerList.add(0, "Class");
                writer = new CsvListWriter(out, CsvPreference.STANDARD_PREFERENCE);
                writer.write(headerList);
                writer.close();
            }

            for (int i = 0; i < numInstances; ++i) {
                /*if (classNames[i] instanceof String) {
                    instanceArray[i].add(0, classNames[i]);
                } else {
                    instanceArray[i].add(0, "");
                }*/
                writer = new CsvListWriter(out, CsvPreference.STANDARD_PREFERENCE);
                //writer.write(instanceArray[i], tProcessors);
                writer.close();
            }
        } catch (IOException e) {
            System.out.println("Error occured in writing the NLP data to a file!");
        }
    }
    
    /** Write the NLP data into a file. */
    public void writeNLPFeaturesToFile(ICsvListWriter out, String docId,
            int docIndex, int[] featurePosition, DataSetDefinition dsd) {
        if (LogService.maxVerbosityLevel > 1) {
            System.out.println("number=" + new Integer(numInstances));
        }
        try {
            if (docIndex == 0) {
                headerList.add(0, "Class");
                for(int i = 0; i < dsd.arrs.numPredictedClass; i++){
                    headerList.add(String.format("%s(%d)", dsd.arrs.namesInPredictedClasses[i], dsd.arrs.predictedClassPosition[i]));
                }
                out.write(headerList);
            }

            processorList.add(0, new ConvertNullTo(""));
            for(int i = 0; i < dsd.arrs.numPredictedClass; i++){
                processorList.add(new ConvertNullTo(""));
            }
            CellProcessor[] tProcessors = processorList.toArray(new CellProcessor[processorList.size()]);

            for (int i = 0; i < numInstances; ++i) {
                if (classLabels[i] instanceof String) {
                    instanceArray[i].add(0, classLabels[i]);
                } else {
                    instanceArray[i].add(0, "");
                }
                instanceArray[i].addAll(predictedClassLabels[i]);
                out.write(instanceArray[i], tProcessors);
            }
        } catch (IOException e) {
            System.out.println("Error occured in writing the NLP data to a file!");
        }
    }


    /** Read the NLP data of one document from the NLP feature file. */
    public void readNLPFeaturesFromFile(BufferedReader in) {
        try {
            String[] lineItems = in.readLine().split(ConstantParameters.ITEMSEPARATOR);
            numInstances = Integer.parseInt(lineItems[2]);
            docId = lineItems[1];
            featuresInLine = new StringBuffer[numInstances];
            classLabels = new String[numInstances];
            int num;
            for (int i = 0; i < numInstances; ++i) {
                String[] lineItems1 = in.readLine().split(ConstantParameters.ITEMSEPARATOR);
                num = Integer.parseInt(lineItems1[0]);
                if (num > 0) {
                    StringBuffer classNs = new StringBuffer();
                    for (int j = 1; j < num; ++j) {
                        classNs.append(lineItems1[j] + ConstantParameters.ITEMSEPARATOR);
                    }
                    classNs.append(lineItems1[num]);
                    classLabels[i] = classNs.toString();
                }
                featuresInLine[i] = new StringBuffer();
                if (num + 1 < lineItems1.length) {
                    featuresInLine[i].append(lineItems1[num + 1]);
                }
                for (int j = num + 2; j < lineItems1.length; ++j) {
                    featuresInLine[i].append(ConstantParameters.ITEMSEPARATOR + lineItems1[j]);
                }
            }
        } catch (IOException e) {
            System.out.println("**Error occured in reading the NLP data from file for converting to FVs!");
        }

    }

    public void setDocId(String docId) {
        this.docId = new String(docId);
    }

    public String getDocId() {
        return this.docId;
    }

    /** Put the type and feature together. */
    static String obtainFeatureName(String type, String feat) {
        return ConstantParameters.ITEMSEPREPLACEMENT + type
                + ConstantParameters.ITEMSEPREPLACEMENT + feat;
    }
}
