
import java.net.URI;
import org.apache.hadoop.filecache.DistributedCache;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.io.*;
import org.apache.hadoop.mapred.*;
public class hadoopBrute {
	
	  public static void main(String[] args) throws Exception{
			JobConf conf = new JobConf(hadoopBrute.class);
			conf.setJobName("String-Matching");
		 
			conf.setOutputKeyClass(Text.class);
			conf.setOutputValueClass(Text.class);
		 
			conf.setMapperClass(Map.class);
			conf.setPartitionerClass(ReduceBucketPartitioner.class);
			//conf.setCombinerClass(Reduce.class);
			conf.setReducerClass(Reduce.class);
			
			conf.setInputFormat(BytesInputFormat.class);
			conf.setOutputFormat(TextOutputFormat.class);
			
	        conf.set("mapreduce.output.textoutputformat.separator", ",");

			
			org.apache.hadoop.mapreduce.filecache.DistributedCache.addCacheFile(new URI("./query.dat"), conf);
		 
			FileInputFormat.setInputPaths(conf, new Path(args[0]));
			FileOutputFormat.setOutputPath(conf, new Path(args[1]));
		 
			JobClient.runJob(conf);
		  }
	}
