package de.tuberlin.dima.aim3.oc;

import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;

import de.tuberlin.dima.aim3.oc.input.WikiDumpParser;
import de.tuberlin.dima.aim3.oc.input.custom.WikiDumpStaxParser;
import de.tuberlin.dima.aim3.oc.input.custom.WikiPage;
import eu.stratosphere.nephele.configuration.Configuration;
import eu.stratosphere.pact.common.generic.io.InputFormat;
import eu.stratosphere.pact.common.io.DelimitedInputFormat;
import eu.stratosphere.pact.common.type.PactRecord;
import eu.stratosphere.pact.common.type.base.PactInteger;

/**
 * Stratosphere {@link InputFormat} which relies on the
 * {@link DelimitedInputFormat} and uses the <i><page></i> tag as delimiter to
 * be sliced by the framework.
 * 
 * TODO Currently runs into {@link OutOfMemoryError}s because stratosphere reads
 * the whole content between two <i><page></i> tags in memory as a byte array.
 * 
 * @author Florian Feigenbutz <florian.feigenbutz@campus.tu-berlin.de>
 */
public class DelimiterBasedWikipediaDumpInFormat extends DelimitedInputFormat {

  private static final Log LOG = LogFactory
      .getLog(DelimiterBasedWikipediaDumpInFormat.class);

  private static final String DEFAULT_DELIMITER = "<page>";

  public static final String CONFIG_KEY_DELIMITER = "input.xml.delimiter";

  private final WikiDumpParser parser;

  private byte[] delimiter;

  public DelimiterBasedWikipediaDumpInFormat() {
    parser = new WikiDumpStaxParser();
  }

  @Override
  public void configure(Configuration parameters) {
    super.configure(parameters);

    delimiter = parameters.getString(CONFIG_KEY_DELIMITER, DEFAULT_DELIMITER)
        .getBytes(Constants.CHARSET);
  }

  @Override
  public byte[] getDelimiter() {
    return delimiter;
  }

  @Override
  public boolean readRecord(PactRecord target, byte[] bytes, int numBytes) {
    int numMegaBytes = toMegaBytes(numBytes);
    LOG.info("Received record input of length " + toMegaBytes(bytes.length)
        + " or " + numMegaBytes + "MB");

    if (numMegaBytes >= 3 || toMegaBytes(bytes.length) >= 3) {
      LOG.warn("Skipping large input because of memory usage. Need to fix this!");
      return false;
    }

    StringBuffer pageInput = new StringBuffer();
    pageInput.append(new String(delimiter, Constants.CHARSET));
    pageInput.append(new String(bytes, Constants.CHARSET));
    LOG.info("Record input is: '" + pageInput.substring(0, 100) + " ... "
        + pageInput.substring(pageInput.length() - 101, pageInput.length() - 1)
        + "'");

    WikiPage parsedWikiPage = parser.readWikiDumpPage(pageInput.toString());
    if (parsedWikiPage == null) {
      LOG.warn("Error while parsing page from Wikipedia dump.");
      return false;
    }

    LOG.info("Successfully parsed page from Wikipedia Dump.");
    PactWikiPage pactPage = new PactWikiPage(parsedWikiPage);
    target.setField(0, new PactInteger(parsedWikiPage.getRevisions().size()));
    target.setField(1, pactPage);
    return true;
  }

  private int toMegaBytes(int numBytes) {
    return numBytes / (1024 * 1024);
  }
}
