import java.io.*;
import java.util.List;
import java.util.StringTokenizer;
import java.util.Iterator;
import java.util.ArrayList;
import java.net.URI;

import org.apache.hadoop.filecache.DistributedCache;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.io.IntWritable;
import org.apache.hadoop.io.LongWritable;
import org.apache.hadoop.io.WritableComparable;
import org.apache.hadoop.io.Text;
import org.apache.hadoop.mapred.Mapper;
import org.apache.hadoop.mapred.Reducer;
import org.apache.hadoop.mapred.MapReduceBase;
import org.apache.hadoop.mapred.OutputCollector;
import org.apache.hadoop.mapred.Reporter;
import org.apache.hadoop.mapred.JobConf;
import org.apache.hadoop.mapred.JobClient;
//import org.apache.hadoop.mapred.lib.IdentityReducer;
import org.apache.hadoop.mapred.lib.InputSampler;
import org.apache.hadoop.mapred.lib.TotalOrderPartitioner;
import org.apache.hadoop.mapred.lib.InverseMapper;
import org.apache.hadoop.mapred.SequenceFileInputFormat;
import org.apache.hadoop.mapred.SequenceFileOutputFormat;
import org.apache.hadoop.mapred.TextOutputFormat;
import org.apache.hadoop.mapred.FileInputFormat;
import org.apache.hadoop.mapred.FileOutputFormat;


public class WordCounter {
  
    public boolean isStopWord(String token){

        List<String> list = new ArrayList<String>();
        try{
            String line = null;
            InputStream is = this.getClass().getResourceAsStream("resource/stopword.txt");
            BufferedReader reader = new BufferedReader(new InputStreamReader(is));
           // BufferedReader reader = new BufferedReader(new FileReader("resource/stopword.txt"));
            while((line = reader.readLine())!=null){
                list.add(line);
            }
            reader.close();
        } catch (IOException e){
            System.out.println("IO ERROR!");
        }
        return list.contains(token);
    }

    public static class TokenizerMapper extends MapReduceBase
            implements Mapper<LongWritable, Text, Text, IntWritable>{
            private final static IntWritable one = new IntWritable(1);
            private Text word = new Text();


            public void map(LongWritable key, Text value, 
                    OutputCollector<Text, IntWritable> output, Reporter reporter) 
                throws IOException {

                WordCounter wc = new WordCounter();

                String line = value.toString().toLowerCase();
                
                StringTokenizer itr = new StringTokenizer(line,",.!?-|<>\"[]{}():;\t ",false);
                while (itr.hasMoreTokens()) {
                    String token = itr.nextToken();
                    if (!wc.isStopWord(token)){
                        word.set(token);
                        output.collect(word, one);
                    }
                }
            }
    }

  public static class IntSumReducer extends MapReduceBase
        implements Reducer<Text, IntWritable, Text, IntWritable>{
		int k = 0;
		public void configure(org.apache.hadoop.mapred.JobConf job){
			k = job.getInt("wordcounter.minFre", 0);
		}
          public void reduce(Text key, Iterator<IntWritable> values, 
                  OutputCollector<Text, IntWritable> output, Reporter reporter) 
              throws IOException{

              int sum = 0;
              while (values.hasNext()) {
                  sum += values.next().get();
              }
              if (sum > k){
                  output.collect(key,new IntWritable(sum));
              }
          }
  }

  public static class IntWritableDecreasingComparator extends 
      IntWritable.Comparator{
          public int compare(WritableComparable a, WritableComparable b){
              return -super.compare(a,b);
          }
          public int compare(byte[] a, int b,int c, byte[] a1, int b1, int c1){
              return -super.compare(a,b,c,a1,b1,c1);
          }
      }

  public static void main(String[] args) throws Exception {

	  int k = 0;
     if(args.length !=3) {
          System.err.println("Format error!");
          System.out.println("hadoop jar wordcounter.jar <in> <out> [k]");
          
          System.exit(2);
      } else{
    	  for (int i=0; i<args.length; i++){
              System.out.println(args[i]);
        } 
    	  k = Integer.parseInt(args[2]);
      }
      String temp = args[0].substring(0,args[0].lastIndexOf("/"));
      String wcOutPath = temp+"/TEMP_DIR";
      
      //wordcount job
      org.apache.hadoop.mapred.JobConf countJob = new JobConf(WordCounter.class);
      countJob.setJarByClass(WordCounter.class);
      countJob.setMapperClass(TokenizerMapper.class);
      countJob.setCombinerClass(IntSumReducer.class);
      countJob.setReducerClass(IntSumReducer.class);
      countJob.setOutputFormat(SequenceFileOutputFormat.class);

      countJob.setOutputKeyClass(Text.class);
      countJob.setOutputValueClass(IntWritable.class);

      FileSystem fs = FileSystem.get(countJob);
      fs.delete(new Path(wcOutPath), true);
      FileInputFormat.addInputPath(countJob, new Path(args[0]));
      FileOutputFormat.setOutputPath(countJob, new Path(wcOutPath));
      countJob.setInt("wordcounter.minFre", k);
      JobClient.runJob(countJob);


      //sort job
      org.apache.hadoop.mapred.JobConf sortJob = new JobConf(WordCounter.class);
      sortJob.setJobName("wordcounter");

      sortJob.setMapperClass(InverseMapper.class);        
      //sortJob.setReducerClass(IdentityReducer.class);

      sortJob.setInputFormat(SequenceFileInputFormat.class);
      sortJob.setOutputFormat(TextOutputFormat.class);
      sortJob.setOutputKeyClass(IntWritable.class);
      sortJob.setOutputValueClass(Text.class);

      sortJob.setOutputKeyComparatorClass(IntWritableDecreasingComparator.class);

      fs = FileSystem.get(sortJob);
      fs.delete(new Path(args[1]), true);      
      FileInputFormat.setInputPaths(sortJob, new Path(wcOutPath));
      FileOutputFormat.setOutputPath(sortJob, new Path(args[1]));
 
      sortJob.setNumReduceTasks(1);
      sortJob.setPartitionerClass(TotalOrderPartitioner.class);
      InputSampler.Sampler<IntWritable, Text> sampler = 
          new InputSampler.RandomSampler<IntWritable, Text>(0.1, 500, 10);

      System.out.println("Sampling input to effect total-order sort...");

      Path inputDir = org.apache.hadoop.mapred.FileInputFormat.getInputPaths(sortJob)[0];
      inputDir = inputDir.makeQualified(inputDir.getFileSystem(sortJob));
      Path partitionFile = new Path(inputDir, "_sortPartitioning");
      TotalOrderPartitioner.setPartitionFile(sortJob, partitionFile);
      InputSampler.writePartitionFile(sortJob, sampler);

      URI partitionUri = new URI(partitionFile.toString() + "#" + "_sortPartitioning");
      DistributedCache.addCacheFile(partitionUri, sortJob);
      DistributedCache.createSymlink(sortJob);
      JobClient.runJob(sortJob);
  }
}
