package rerac.components;

import goalie.Component;

import java.io.BufferedInputStream;
import java.io.File;
import java.io.FileFilter;
import java.io.FileInputStream;
import java.io.IOException;
import java.util.ArrayList;
import java.util.HashMap;
import java.util.List;
import java.util.Map;

import org.apache.lucene.analysis.WhitespaceAnalyzer;
import org.apache.lucene.document.Field;
import org.apache.lucene.index.IndexWriter;
import org.apache.lucene.store.Directory;
import org.apache.lucene.store.FSDirectory;

import rerac.feature.DocumentExtractor;
import rerac.protos.Corpus.Document;

import com.google.protobuf.CodedOutputStream;

/**
 * 
 * This builds a Lucene index for documents stored in the document protocol 
 * buffer format. The documents need to have tokens, no other annotation is 
 * expected.
 * The index stores with every document the file it is contained in and the 
 * offset in bytes a reader has to skip to for reading that document.
 * 
 * The following fields are stored:
 * "id": document id
 * "filename": file name
 * "offset": byte position
 * "contents": tokens
 * 
 * Input parameters:
 * "input_files": a white space-separated list of files containing documents 
 *   (protocol buffer format), to be indexed.
 * "input_dir": a directory for which all files are to be indexed.
 * "output_destination": the directory where the resulting index is to be put.
 * 
 * Output parameters:
 * "output": the directory where the resulting index is put (same as output 
 *   destination).
 * 
 * @author Benjamin Roth
 *
 */
public class IndexDocuments implements Component {
  
  public static final String REFERRED_NAME = "index_documents";

  @Override
  public Map<String, String> run(Map<String, String> params) throws IOException {
    List<File> inputFiles = new ArrayList<File>();
    // Collect list of files from parameters.
    if (params.containsKey("input_files")) {
      for (String fn : params.get("input_files").split("\\s+")) {
        inputFiles.add(new File(fn));        
      }
    }
    if (params.containsKey("input_dir")) {
      File dir = new File(params.get("input_dir"));

      File[] files = dir.listFiles(new FileFilter() { 
        public boolean accept(File f)
        { return f.isFile(); }
      });
      for (File f : files) {
        inputFiles.add(f);        
      }
    }
        
    String outputDir = params.get("output_destination");
   
    // Lucene directory for index.
    Directory directory = FSDirectory.open(new File(outputDir));

    // Lucenw index writer: white space tokenization, unlimited number of 
    // tokens per document.
    IndexWriter writer = new IndexWriter(directory, new WhitespaceAnalyzer(),
        IndexWriter.MaxFieldLength.UNLIMITED);
    // Clear index.
    writer.deleteAll();
    for (File inputFile : inputFiles) { 
      BufferedInputStream is = new BufferedInputStream(new FileInputStream(
          inputFile));
      String inputFN = inputFile.getCanonicalPath();
      System.out.println("indexing file : " + inputFN);
      long byteOffset = 0;
      for (Document doc; (doc = Document.parseDelimitedFrom(is)) != null;) {
        org.apache.lucene.document.Document luceneDoc = 
            new org.apache.lucene.document.Document();
        luceneDoc.add(
            new Field("id", doc.getId(), Field.Store.YES, 
                Field.Index.NOT_ANALYZED));
        // add information about file and byte offset for document.
        luceneDoc.add(
            new Field("filename", inputFN, Field.Store.YES, 
                Field.Index.NOT_ANALYZED));
        luceneDoc.add(
            new Field("offset", Long.toString(byteOffset), Field.Store.YES, 
                Field.Index.NOT_ANALYZED));
        // TODO: Use Field("contents",TokenStream) to put tokens directly in the index.
        luceneDoc.add(new Field("contents", 
            DocumentExtractor.textFromTokens(doc, " "),
            Field.Store.NO,
            Field.Index.ANALYZED));
        writer.addDocument(luceneDoc);
        byteOffset += CodedOutputStream.computeMessageSizeNoTag(doc);
      }
      is.close();
    }
    writer.optimize();
    writer.close();
    Map<String, String> outMap = new HashMap<String, String>();
    outMap.put("output", outputDir);
    return outMap;
  }

  @Override
  public void cleanup(Map<String, String> outputs) throws IOException {
    // TODO: Remove index?
  }

}
