package twitterhadoop.hadoop.io;

import java.io.IOException;
import java.text.ParseException;
import java.text.SimpleDateFormat;
import java.util.ArrayList;
import java.util.Calendar;
import java.util.List;

import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.io.LongWritable;
import org.apache.hadoop.mapreduce.InputFormat;
import org.apache.hadoop.mapreduce.InputSplit;
import org.apache.hadoop.mapreduce.Job;
import org.apache.hadoop.mapreduce.JobContext;
import org.apache.hadoop.mapreduce.RecordReader;
import org.apache.hadoop.mapreduce.TaskAttemptContext;

public class TwitterInputFormat extends InputFormat<LongWritable, TextArrayWritable>  {
	public static final String TWITTER_UNTIL_CONF   = "mapreduce.input.twitter.until";
	public static final String TWITTER_QUERY_CONF   = "mapreduce.input.twitter.query";
	public static final String TWITTER_NUM_OF_DAYS  = "mapreduce.input.twitter.numOfDays";
	public static final String TWITTER_TWEETS_LIMIT = "mapreduce.input.twitter.limit";
	
	/**
	 * Setting the Twitter query.
	 * 
	 * @param job       The Hadoop job.
	 * @param query     Query string.
	 * @param until     Until date.
	 * @param numOfDays Number of days to go backwards.
	 */
	public static void setQuery(Job job, String query, String until, int numOfDays) {
		job.getConfiguration().set(TWITTER_QUERY_CONF,  query);
		job.getConfiguration().set(TWITTER_UNTIL_CONF,  until);
		job.getConfiguration().setInt(TWITTER_NUM_OF_DAYS, numOfDays);
		job.getConfiguration().setInt(TWITTER_TWEETS_LIMIT, Integer.MAX_VALUE);
	}
	
	/**
	 * Set a limit in the number of tweets per split.
	 * This is mainly used for debugging not to exceed Twitters limit.
	 * 
	 * @param job
	 * @param limit
	 */
	public static void setLimit(Job job, int limit) {
		job.getConfiguration().setLong(TWITTER_TWEETS_LIMIT, limit);
	}
	
	@Override
	public List<InputSplit> getSplits(JobContext context) throws IOException, InterruptedException {
		List<InputSplit> splits = new ArrayList<InputSplit>();
		Configuration conf = context.getConfiguration();
		Calendar cal = null;
		SimpleDateFormat df = new SimpleDateFormat("yyyy-MM-dd");
		String since;
		String until = conf.get(TWITTER_UNTIL_CONF);
		
		if (until == null) {
			cal = Calendar.getInstance();
			until = df.format(cal.getTime());
		} else {
			cal = Calendar.getInstance();
			try {
				cal.setTime(df.parse(until));
			} catch (ParseException e) {
				e.printStackTrace();
			}
		}
		String query = conf.get(TWITTER_QUERY_CONF);
		int numOfMappers = conf.getInt("mapred.map.tasks", 1);
		int numOfDays = conf.getInt(TWITTER_NUM_OF_DAYS, 1);
		int limit = conf.getInt(TWITTER_TWEETS_LIMIT, Integer.MAX_VALUE);
		
		if (numOfMappers < numOfDays ) {
			for (int i = 0; i < numOfMappers; i++) {
				int daysPerMapper = numOfDays / numOfMappers;
				if (i < numOfDays % numOfMappers) {
					daysPerMapper++;
				}
				
				cal.add(Calendar.DATE, -daysPerMapper);
				since = df.format(cal.getTime());
				TwitterInputSplit split = new TwitterInputSplit(query, since, until, limit);
				until = since; 
				splits.add(split);
			}			
		} else {

			// Each mapper gets at most one split
			for (int i = 0; i < numOfDays; i++) {
				cal.add(Calendar.DATE, -1);
				since = df.format(cal.getTime());
				TwitterInputSplit split = new TwitterInputSplit(query, since, until, limit);
				until = since; 
				splits.add(split);
			}			

		}

		return splits;
	}

	@Override
	public RecordReader<LongWritable, TextArrayWritable> createRecordReader(
			InputSplit split, TaskAttemptContext context) throws IOException,
			InterruptedException {
		
		return new TwitterRecordReader(); 
	}

}
