package rerac.feature;

import rerac.protos.Corpus.Document;
import rerac.protos.Corpus.Document.Annotation;
import rerac.protos.Corpus.Document.Compound;
import rerac.protos.Corpus.Document.Compound.CompoundSlot;
import rerac.protos.Corpus.Document.CompoundGroup;
import rerac.protos.Corpus.Document.Token;

/**
 * This extracts information from a document. Currently it is used to extract 
 * sub-document and to get a single String from the token texts.
 * 
 * @author Benjamin Roth
 *
 */
public class DocumentExtractor {
  static final int DEFAULT_TARGET = -1;
  
  /**
   * This extracts a subdocument containing the tokens from start index 
   * (inclusive) to end index (exclusive) with all Annotations.
   * The following values are changed so that they refer to indices in the new 
   * document:
   * Annotation.target_token
   * Annotation.target_compound
   * Token.start
   * Token.end
   * Compound annotations are copied over partially if one of the arguments is 
   * in the new document.
   * The document id is set to 'ORIGINAL_ID:start:end'.
   * 
   * @param doc the document
   * @param start start index of token, inclusive.
   * @param end end index of token, exclusive.
   */
  public static Document extractSubDocument(Document doc, int start, int end,
      boolean useDefaultTarget) {
    if (start < 0 || end > doc.getTokenCount()) {
      throw new IllegalArgumentException("Start or end index out of range.");
    }
    Document.Builder docBuilder = Document.newBuilder();
    docBuilder.addAllMethod(doc.getMethodList());
    docBuilder.setConsistentMethod(doc.getConsistentMethod());
    docBuilder.setId(doc.getId() + ":" + start + ":" + end);
    if (doc.getTokenCount() == 0) {
      return docBuilder.build();
    }
    
    if (doc.hasText()) {
      Token firstTok = doc.getToken(0);
      Token lastTok = doc.getToken(doc.getTokenCount() - 1);
      docBuilder.setText(doc.getText().substring(firstTok.getStart(), 
          lastTok.getEnd()));
    }
    
    for (int i = start; i < end; ++i) {
      Token.Builder tb = Token.newBuilder(doc.getToken(i));
      for (int ai = 0; ai < tb.getAnnotationCount(); ++ai) {
        Annotation a = tb.getAnnotation(ai);
        if (a.hasTargetToken()) {
          int newTarget = a.getTargetToken() - start;
          if (newTarget >= 0 && newTarget < (end - start)) {
            tb.setAnnotation(ai, 
                Annotation.newBuilder().setTargetToken(newTarget));
          } else if (useDefaultTarget) {
            tb.setAnnotation(ai, 
                Annotation.newBuilder().setTargetToken(DEFAULT_TARGET));            
          }
        }
      }
      docBuilder.addToken(tb);
    }
    
    for (int cgi = 0; cgi < doc.getCompoundCount(); ++cgi) {
      CompoundGroup cg = doc.getCompound(cgi);
      CompoundGroup.Builder cgb = CompoundGroup.newBuilder(cg);
      cgb.clearCompound();
      for (int origCompInd = 0; origCompInd < cg.getCompoundCount(); 
            ++origCompInd) {
        Compound c = cg.getCompound(origCompInd);
        Compound.Builder cb = Compound.newBuilder(c);
        cb.clearSlot();
        for (CompoundSlot cs : c.getSlotList()) {
          if (cs.getTokenIndex() >= start && cs.getTokenIndex() < end) {
            int newTokenIndex = cs.getTokenIndex() - start;
            // Make slot point to position in new document.
            cb.addSlot(
                CompoundSlot.newBuilder(cs).setTokenIndex(newTokenIndex));
            // Update index to CompoundGroup.compound in token.Annotation.
            Token t = doc.getToken(cs.getTokenIndex());
            Token.Builder tb = Token.newBuilder(docBuilder.getToken(newTokenIndex));
            for (int ai = 0; ai < t.getAnnotationCount(); ++ai) {
              Annotation anno = t.getAnnotation(ai);
              if (anno.hasTargetCompound() &&
                  anno.getCompoundGroup() == cgi && 
                  anno.getTargetCompound() == origCompInd) {
                tb.setAnnotation(ai, 
                    Annotation.newBuilder(anno).setTargetCompound(
                        cgb.getCompoundCount()));
              }
            }
            docBuilder.setToken(newTokenIndex, tb);
          }
        }
        if (cb.getSlotCount() > 0) {
          cgb.addCompound(cb);
        }
      }
      docBuilder.addCompound(cgb);
    }
    return docBuilder.build();
  }
  
  /**
   * This returns the concatenation of all token texts.
   * 
   * @param doc the document
   * @param separator the separator inserted between succeeding tokens.
   * @return
   */
  public static String textFromTokens(Document doc, String separator) {
    // TODO: make separation dependent on BreakLevel.
    StringBuffer sb = new StringBuffer();
    for (Token tok : doc.getTokenList()) {
      if (sb.length() > 0) {
        sb.append(separator);
      }
      sb.append(tok.getText());
    }
    return sb.toString();
  }
  
}
