
import java.io.File;
import java.io.IOException;
import java.util.*;

import org.apache.hadoop.fs.InvalidPathException;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.conf.*;
import org.apache.hadoop.io.*;
import org.apache.hadoop.mapreduce.*;
import org.apache.hadoop.mapreduce.lib.input.FileInputFormat;
import org.apache.hadoop.mapreduce.lib.input.TextInputFormat;
import org.apache.hadoop.mapreduce.lib.output.FileOutputFormat;
import org.apache.hadoop.mapreduce.lib.output.TextOutputFormat;
import org.apache.log4j.Logger;
public class WordCount {
	
public static class Map extends Mapper<Text, Text, Text, FloatWritable> {
	
	private Text word = new Text();
	private FloatWritable val = new FloatWritable();
	Logger logger = Logger.getLogger(Map.class);
	
public void map(Text key, Text value, Context context) throws IOException, InterruptedException {
			String line = value.toString();
			//StringTokenizer tokenizer = new StringTokenizer(line); 
			logger.debug("Key is :"+ key);
			String[] parts = line.split("\\s+"); 
			String[] veges = {"Pork","Pineapple","Bandakka","Vetakolu"};
			 for (int i = 0; i < veges.length; i++) {
				if(veges[i].equals(parts[0].trim()) && parts.length >4){
					word.set(key +"-"+ parts[0]);
					val.set(Float.parseFloat(parts[4]));					
					context.write(word, val);
					break;
				}
			}
				
			
		/*	while (tokenizer.hasMoreTokens()) {
				word.set(tokenizer.nextToken());
				if(word.toString().contains("Unit")){
				context.write(word, one);
				}
			}*/
	}
}

public static class Reduce extends Reducer<Text, FloatWritable, Text, FloatWritable> {

public void reduce(Text key, Iterable<FloatWritable> values, Context context) throws IOException, InterruptedException {
	  float sum = 0;
	  int count=0;
	    for (FloatWritable val : values) {
	        sum += val.get();
	        count++;
	    }
	    
	    
	    context.write(key, new FloatWritable(sum/count));
}

}

public static class PDFInputFormat extends FileInputFormat<Text, Text> {

	  @Override
	  public RecordReader<Text, Text> createRecordReader(InputSplit split,
	    TaskAttemptContext context) throws IOException, InterruptedException {
	      return new PDFLineRecordReader();
	  }

	  // Do not allow to ever split PDF files, even if larger than HDFS block size
	  @Override
	  protected boolean isSplitable(JobContext context, Path filename) {
	    return false;
	  }
}


public static void main(String[] args) throws Exception {
		Configuration conf = new Configuration();
		
		Job job = new Job(conf, "wordcount");
		job.setOutputKeyClass(Text.class);
		job.setOutputValueClass(FloatWritable.class);
		job.setMapperClass(Map.class);
		job.setReducerClass(Reduce.class);
		job.setInputFormatClass(PDFInputFormat.class);
		job.setOutputFormatClass(TextOutputFormat.class);
		
		String[] years = {"2012","2013","2014"};
		String[] weeks = {"W1","W2","W3","W4"};
		String[] months = {/*"01","02",*/"03","04","05","06","07","08"};
		
		for(String year : years){
			for(String month : months){
				for(String week : weeks){
					String fileName = "/home/nuwan/hadoop_data/DCSB-WRP";
					fileName += "-"+year+"-"+month+"-"+week+".pdf";
					
					 File f = new File(fileName);
					 if(f.exists()){
						 FileInputFormat.addInputPath(job, new Path(fileName));	
					 }
					
				}
			}
		}
		
	/*	FileInputFormat.addInputPath(job, new Path("/home/nuwan/hadoop_data/file1.pdf"));
		//FileInputFormat.addInputPath(job, new Path("/tmp/hadoop-nuwan/dfs/name/file"));*/
		FileOutputFormat.setOutputPath(job, new Path("/home/nuwan/hadoop_data/output"));
		job.waitForCompletion(true);
}

}