package de.tuberlin.dima.aim3.oc;

import java.io.File;
import java.io.FileInputStream;
import java.io.FileNotFoundException;
import java.io.IOException;
import java.io.InputStreamReader;
import java.io.Reader;
import java.util.ArrayList;
import java.util.List;

import de.tuberlin.dima.aim3.oc.input.IterativeWikiDumpParser;
import de.tuberlin.dima.aim3.oc.input.custom.WikiPage;
import de.tuberlin.dima.aim3.oc.input.jaxb.WikiDumpStaxJaxbParser;
import eu.stratosphere.nephele.configuration.Configuration;
import eu.stratosphere.nephele.fs.BlockLocation;
import eu.stratosphere.nephele.fs.FileInputSplit;
import eu.stratosphere.nephele.fs.FileStatus;
import eu.stratosphere.nephele.fs.FileSystem;
import eu.stratosphere.nephele.fs.Path;
import eu.stratosphere.pact.common.generic.io.InputFormat;
import eu.stratosphere.pact.common.io.FileInputFormat;
import eu.stratosphere.pact.common.io.statistics.BaseStatistics;
import eu.stratosphere.pact.common.type.PactRecord;
import eu.stratosphere.pact.common.type.base.PactInteger;

/**
 * Stratosphere {@link InputFormat} which relies on the {@link FileInputFormat}
 * and uses custom code to slice and process the input content.
 * 
 * Currently it only creates a single input slice for every XML file which
 * simplifies parsing but does not scale well when running on a cluster.
 * 
 * Internally it uses JAXB and StAX for XML processing and iterates through the
 * input file reading one <i><page></i> node simultaneously.
 * 
 * TODO Enable slicing of XML input file
 * 
 * TODO Currently runs into {@link OutOfMemoryError}s when processing large
 * files (~ 600MB).
 * 
 * @author Florian Feigenbutz <florian.feigenbutz@campus.tu-berlin.de>
 */
public class JaxbBasedWikipediaDumpInFormat extends FileInputFormat {

  private IterativeWikiDumpParser parser;

  public JaxbBasedWikipediaDumpInFormat() {
    super();
  }

  @Override
  public void configure(Configuration parameters) {
    super.configure(parameters);

    Reader xmlReader;
    try {
      xmlReader = new InputStreamReader(new FileInputStream(new File(
          this.filePath.toUri())));
    } catch (FileNotFoundException e) {
      throw new RuntimeException("Cannot read input file", e);
    }
    parser = new WikiDumpStaxJaxbParser(xmlReader);
  }

  /**
   * Do NOT split Dump files into multiple splits.
   * 
   * Could be enhanced for scalability reasons but for a proof of concept this
   * is sufficient right now.
   */
  @Override
  public FileInputSplit[] createInputSplits(int minNumSplits)
      throws IOException {
    final Path path = this.filePath;
    final FileSystem fs = path.getFileSystem();

    final FileStatus pathFile = fs.getFileStatus(path);

    List<FileInputSplit> splits = new ArrayList<FileInputSplit>();
    if (pathFile.isDir()) {
      // input is directory. list all contained files
      final FileStatus[] dir = fs.listStatus(path);
      for (int i = 0; i < dir.length; i++) {
        final BlockLocation[] blocks = fs.getFileBlockLocations(dir[i], 0,
            dir[i].getLen());
        splits.add(new FileInputSplit(i, dir[i].getPath(), 0, dir[i].getLen(),
            blocks[0].getHosts()));
      }
    } else {
      // analogous for one file
      final BlockLocation[] blocks = fs.getFileBlockLocations(pathFile, 0,
          pathFile.getLen());
      splits.add(new FileInputSplit(0, path, 0, pathFile.getLen(), blocks[0]
          .getHosts()));
    }
    return splits.toArray(new FileInputSplit[splits.size()]);
  }

  @Override
  public BaseStatistics getStatistics(BaseStatistics cachedStatistics) {
    // TODO Auto-generated method stub (getStatistics)
    return null;
  }

  @Override
  public boolean reachedEnd() throws IOException {
    return parser.reachedEnd();
  }

  @Override
  public boolean nextRecord(PactRecord record) throws IOException {
    WikiPage page = parser.readNextWikiDumpPage();
    if (page != null) {
      record.setField(0, new PactInteger(page.getRevisions().size()));
      record.setField(1, new PactWikiPage(page));
      return true;
    }
    return false;
  }

}
