/*
 *  RIBuildIndexLucenePR.java
 *
 */

package at.ofai.gate.riplugin2;


import gate.*;
import gate.creole.*;
import gate.creole.metadata.*;
import gate.util.GateRuntimeException;
import java.io.File;
import java.io.IOException;
import java.io.Reader;
import java.util.Iterator;
import java.util.List;
import org.apache.commons.io.FileUtils;
import org.apache.log4j.Logger;
import org.apache.lucene.analysis.Analyzer;
import org.apache.lucene.analysis.TokenStream;
import org.apache.lucene.analysis.tokenattributes.OffsetAttribute;
import org.apache.lucene.analysis.tokenattributes.PositionIncrementAttribute;
import org.apache.lucene.analysis.tokenattributes.TermAttribute;
import org.apache.lucene.document.Field;
import org.apache.lucene.index.IndexWriter;
import org.apache.lucene.store.FSDirectory;

// TODO: this should create a LR that represents the created semantic model.
// TODO: note that the semanticmodel should also be creatable from just the
// lucene index file to avoid the time-consuming corpus processing.
// Finally, one should be able to open a directory containing a lucene and
// sv model without recreating anything.

// TODO: ALso, this PR should take a containing annot parameter: if set,
// use the sorted sequence of these annots as "documents".

// document names:
// gatedocid.sectionid where gatedocid is gatedocnr:gatedocname and
// sectionid is annotationnr:annotationname:fromoffset:tooffset


/** 
 * This PR is a LA that should be run in a corpus controller. It takes an
 * (maybe still empty) Lucene Index LR as parameter and fills that LR with
 * index data from the corpus. If the index LR already has data in it, running
 * this PR will add additional information to it.
 * The filled Lucene Index LR can then be used with the RIBuildIndexSVPR to
 * create the actual random index.
 */
@CreoleResource(name = "RIBuildLuceneIndexPR2",
    icon = "riindex",
    //helpURL = "http://gate.ac.uk/userguide/sec:somechapter:sometopic:somepos",
    comment = "Create a Random Indexing Datastructure LR")
public class RIBuildIndexLucenePR
  extends AbstractLanguageAnalyser
  implements ProcessingResource, ControllerAwarePR
{

  @CreoleParameter(comment = "The annotation set to use", defaultValue="")
  @Optional
  @RunTime
  public void setAnnotationSetName(String asName) {
    this.asName = asName;
  }
  public String getAnnotationSetName() {
    return this.asName;
  }
  private String asName = "";

  @CreoleParameter(
    comment = "The annotation type name (default: Token)",
    defaultValue="Token")
  @Optional
  @RunTime
  public void setAnnotationTypeName(String typeName) {
    this.typeName = typeName;
  }
  public String getAnnotationTypeName() {
    return this.typeName;
  }
  private String typeName = "Token";


  @CreoleParameter(
    comment = "The feature name (default: string)",
    defaultValue="string")
  @Optional
  @RunTime
  public void setFeatureName(String featureName) {
    this.featureName = featureName;
  }
  public String getFeatureName() {
    return this.featureName;
  }
  private String featureName = "string";

  @CreoleParameter(
    comment = "Context annotation type, if empty, the whole document",
    defaultValue="")
  @Optional
  @RunTime
  public void setContextAnnotationTypeName(String name) {
    contextAnnotationTypeName = name;
  }
  public String getContextAnnotationTypeName() {
    return contextAnnotationTypeName;
  }
  private String contextAnnotationTypeName = "";

  @CreoleParameter(comment = "The Lucene Index LR to fill")
  @RunTime
  public void setIndexLuceneLR(RIIndexLuceneLR index) {
    this.indexLuceneLR = index;
  }

  public RIIndexLuceneLR getIndexLuceneLR() {
    return this.indexLuceneLR;
  }
  private RIIndexLuceneLR indexLuceneLR = null;


  // the index of the current document. This is kept up to date by counting
  // instead of using corpus.indexOf(doc) to avoid trouble if the corpus does
  // not allow direct access to any doc to return the index.
  // This counter is reset each time a corpus controller is started.
  private int curIndex = 0;

  public static final Logger logger =
    Logger.getLogger(RIBuildIndexLucenePR.class.getName());

  private IndexWriter indexWriter = null;
  private File luceneIndexDir = null;
  private MyTokenStream documentTokenStream = null;

  /**
   * Initialize this resource: check parameters 
   *
   * @return
   * @throws ResourceInstantiationException
   */
  @Override
  public Resource init() throws ResourceInstantiationException {
    documentTokenStream = new MyTokenStream();
    return this;
  }

  /**
   * Process one document after the other and index the token from
   * inputAS.Type.feature in the context of the document. If document
   * vectors are to be created too, the documents are identified by
   * a name that is created from the document index, a dot and the original
   * document name.
   * 
   * @throws ExecutionException
   */
  @Override
  public void execute() throws ExecutionException {
    // TODO: how to handle abort requests here?
    Document doc = getDocument();
    String   docName = doc.getName();
    Corpus   corp = getCorpus();
    String   corpName = corp.getName();
    int curIndex = corp.indexOf(doc);
    // if we have a containing annotation defined, return the sorted list of
    // these annotations and create a separate lucene document for each.
    // Otherwise just create one lucene document for the whole gate document
    if(!getContextAnnotationTypeName().equals("")) {
      //System.out.println("Processing for context annotations");
      AnnotationSet as = doc.getAnnotations(getAnnotationSetName());
      AnnotationSet cas = as.get(getContextAnnotationTypeName());
      AnnotationSet las = as.get(getAnnotationTypeName());
      List<Annotation> caslist = gate.Utils.inDocumentOrder(cas);
      int caindex = 0;
      for(Annotation ca : caslist) {
        String   docName4Indexing = corpName + "." + curIndex + "." + docName + "." + caindex;
        caindex++;
        AnnotationSet toindex =
          las.getContained(
            ca.getStartNode().getOffset(),
            ca.getEndNode().getOffset());
        runLuceneIndexerOnSet(toindex, docName4Indexing);
      }
    } else {
      //System.out.println("Processing for whole document");
      String   docName4Indexing = corpName + "." + curIndex + "." + docName;
      AnnotationSet as = doc.getAnnotations(getAnnotationSetName());
      //System.out.println("Have document annotaitons: "+as.size());
      as = as.get(getAnnotationTypeName());
      //System.out.println("Have annotations of desired type: "+as.size());
      runLuceneIndexerOnSet(as, docName4Indexing);
    }
  }

  private void runLuceneIndexerOnSet(AnnotationSet as, String docName4Indexing) {
    org.apache.lucene.document.Document luceneDoc =
      new org.apache.lucene.document.Document();
    //System.out.println("Running document indexer for "+docName4Indexing+", have anns: "+as.size());
    documentTokenStream.init(as);
    // TODO: what does adding Field.TermVector.WITH_POSITIONS actually change?
    // will it harm creating a "normal" SV RI index?
    luceneDoc.add(new Field("contents",documentTokenStream,Field.TermVector.WITH_POSITIONS));
    luceneDoc.add(new Field("path",docName4Indexing,Field.Store.YES,Field.Index.NOT_ANALYZED_NO_NORMS));
    try {
      indexWriter.addDocument(luceneDoc);
    } catch (Exception ex) {
      throw new GateRuntimeException("Exception when adding document",ex);
    }
    try {
      documentTokenStream.close();
    } catch (Exception ex) {
      throw new GateRuntimeException("Exception when closing token stream",ex);
    }
  }

  public void controllerExecutionAborted(gate.Controller controller,
    java.lang.Throwable throwable) {
    // TODO: what to do here?
    logger.debug("controllerExecutionAborted called");
  }

  public void controllerExecutionFinished(gate.Controller controller) {
    try {
      //System.out.println("Controller ended, writing index");
      //System.out.println("Lucene indexing ended, optimizing index");
      indexWriter.optimize();
      indexWriter.close();
      // TODO: update the lucene LR to "have index"?
    } catch (Exception ex) {
      throw new GateRuntimeException("Error during optimizing/closing Lucene index: "+ex);
    }
  }

  public void controllerExecutionStarted(gate.Controller controller) {
    // check if the controller is a corpus controller
    //System.out.println("Running controllerExecutionStarted");
    if(!(controller instanceof CorpusController)) {
      throw new GateRuntimeException("The RIIndexDocumentsPR must be run in a corpus controller");
    }
    luceneIndexDir = indexLuceneLR.getLuceneIndexDir();

    curIndex = 0;
    if(luceneIndexDir.exists()) {
      try {
        FileUtils.deleteDirectory(luceneIndexDir);
        logger.info("Existing Lucene index directory deleted");
      } catch (IOException ex) {
        throw new GateRuntimeException("Could not remove existing Lucene directory "+luceneIndexDir.getAbsolutePath());
      }
      luceneIndexDir.mkdir();
    }
    try {
      // TODO: can we handle abort requests here?
      indexWriter = new IndexWriter(
        FSDirectory.open(luceneIndexDir),
        new MyAnalyzer(),
        true,
        IndexWriter.MaxFieldLength.LIMITED);
    } catch (Exception ex) {
      logger.error("Exception when creating the Lucene Index Writer",ex);
      throw new GateRuntimeException("Exception when creating the Lucene Index Writer: "+ex);
    }
    //System.out.println("Controller processing initialized");
  }


  private class MyAnalyzer extends Analyzer {

    @Override
    public TokenStream tokenStream(String string, Reader reader) {
      //System.out.println("MyAnalyzer.tokenStream called with "+string+"/"+reader);
      return new TokenStream() {

        @Override
        public boolean incrementToken() throws IOException {
          return false;
        }
      };
    }
    
  }

  private class MyTokenStream extends TokenStream {

    public void init(AnnotationSet as) {
      List<Annotation> asList = gate.Utils.inDocumentOrder(as);
      //System.out.println("TokenStream initialized, have annotations: "+asList.size());
      annotsIterator = asList.iterator();
    }

    private gate.Document gateDoc = null;
    private Iterator<Annotation> annotsIterator = null;
    private TermAttribute termAtt = addAttribute(TermAttribute.class);
    private OffsetAttribute offsetAtt = addAttribute(OffsetAttribute.class);
    private PositionIncrementAttribute posIncAtt = addAttribute(PositionIncrementAttribute.class);

    @Override
    public boolean incrementToken() throws IOException {
      if(annotsIterator == null) {
        throw new GateRuntimeException("MyTokenStream.init must be called before incrementToken");
      }
      //System.out.println("incrementToken called");
      String token = null;
      Annotation ann = null;
      while(token == null && annotsIterator.hasNext()) {
        ann = annotsIterator.next();
        token = (String)ann.getFeatures().get(getFeatureName());
        //System.out.println("Found an annotation, feature value is "+token);
      }
      if(token != null) {
        // set the attribute
        termAtt.setTermBuffer(token);
        offsetAtt.setOffset(
          ann.getStartNode().getOffset().intValue(),
          ann.getEndNode().getOffset().intValue());
        posIncAtt.setPositionIncrement(1);
        //System.out.println("returning token: "+token);
        return true;
      } else {
        annotsIterator = null;
        return false;
      }

    }
    
  }
  
} // class RIBuildIndexLucenePR
