package mr.stocks.question1;

import java.io.IOException;
import java.util.Iterator;
import java.util.List;

import mr.dividends.model.DividendsInputWritable;
import mr.stocks.model.StocksInputWritable;

import org.apache.hadoop.fs.FileStatus;
import org.apache.hadoop.io.LongWritable;
import org.apache.hadoop.mapreduce.InputSplit;
import org.apache.hadoop.mapreduce.JobContext;
import org.apache.hadoop.mapreduce.RecordReader;
import org.apache.hadoop.mapreduce.TaskAttemptContext;
import org.apache.hadoop.mapreduce.lib.input.FileInputFormat;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;

public class StocksInputFormat extends FileInputFormat<LongWritable, StocksInputWritable> {

	private static Logger LOG = LoggerFactory.getLogger(StocksInputFormat.class);
	
	@Override
	protected List<FileStatus> listStatus(JobContext job
            ) throws IOException {
		List<FileStatus> result = super.listStatus(job);
		Iterator<FileStatus> i = result.iterator();

		while(i.hasNext()){
			FileStatus fs = i.next();
			LOG.info("Input file " + fs.getPath().getName());

			//remove directories and files without 'csv' extension
			if(!fs.isFile() || !fs.getPath().getName().endsWith("csv")){
				LOG.info("Removing file " + fs.getPath().getName() + " from input file list");
				i.remove();
			}
		}
		
		if(result.isEmpty())
			 throw new IOException("No csv file is found in this directory");
		
		return result;
	}
	
	@Override
	public RecordReader<LongWritable, StocksInputWritable> createRecordReader(
			InputSplit split, TaskAttemptContext context) throws IOException,
			InterruptedException {
		
		StocksWritablesRecordReader reader = new StocksWritablesRecordReader();
		reader.initialize(split, context);
		
		return reader;
	}

}
