import java.io.File;
import java.io.IOException;
import java.util.Iterator;
import java.util.StringTokenizer;
import java.util.TreeSet;

import org.apache.hadoop.dfs.DistributedFileSystem;
import org.apache.hadoop.fs.FSDataOutputStream;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.io.Text;
import org.apache.hadoop.io.Writable;
import org.apache.hadoop.io.WritableComparable;
import org.apache.hadoop.mapred.JobClient;
import org.apache.hadoop.mapred.JobConf;
import org.apache.hadoop.mapred.MapReduceBase;
import org.apache.hadoop.mapred.Mapper;
import org.apache.hadoop.mapred.OutputCollector;
import org.apache.hadoop.mapred.Reducer;
import org.apache.hadoop.mapred.Reporter;


public class InvertedIndex {

    public static class IMap extends MapReduceBase implements Mapper {

        private static final Text word = new Text();

        public void map(WritableComparable key, Writable val,
                OutputCollector output, Reporter reporter) throws IOException {
            // key = "filename:byteOffset
            StringTokenizer itr = new StringTokenizer (((Text) val).toString(), " \t\n\r\f.");
            ((Text) key).set(key + ":" + ((Text) val).toString());
            while (itr.hasMoreTokens()) {
                String tmp = ProjectMain.scrub(itr.nextToken());
                if (!tmp.equals("")) {
                    word.set(tmp);
                    output.collect(word, key);
                }
            }
        }
    }

    public static class IReduce extends MapReduceBase implements Reducer {

        private static final Text positions = new Text();
        private static final int maxCount = 
            WordCount.getIntFromFile(WordCount.maxCountPath, WordCount.dfs);
        @SuppressWarnings("unchecked")
        public void reduce(WritableComparable key, Iterator values,
                OutputCollector output, Reporter arg3) throws IOException {
            TreeSet<String> posSet = new TreeSet<String> ();
            int count = 0;
            while (values.hasNext()) {
                posSet.add(values.next().toString());
                count++;
            }
            if (count > maxCount)
                return;
            Iterator<String> itr = posSet.iterator();
            StringBuilder str = new StringBuilder ();
            if (itr.hasNext()) {
                str.append(itr.next());
            }
            while (itr.hasNext()) {
                str.append('^');
                str.append(itr.next());
            }
            positions.set(str.toString());
            output.collect(key, positions);
        }
    }


    public static void main(String[] args) throws IOException {
        JobConf conf = new JobConf (WordCount.class);
        DistributedFileSystem fs = ProjectMain.getDFS(conf);

        // setup DFS system
        Path root = new Path ("/");
        Path out = new Path ("out");
        Path dfsInput = new Path (args[0]);
        Path localOut = new Path (args[1]);
        conf.setWorkingDirectory(root);
        fs.setWorkingDirectory(root);
        fs.delete(dfsInput);
        fs.copyFromLocalFile(dfsInput, root);
        fs.delete(out);
        conf.setInputPath(dfsInput);
        conf.setOutputPath(out);

        int totalWords = TotalWords.returnTotalWords(dfsInput);
        FSDataOutputStream fmc = fs.create(WordCount.maxCountPath);
        fmc.writeInt((int) (totalWords * Double.parseDouble(args[2])));
        fmc.close();
        int maxCount = WordCount.getIntFromFile(WordCount.maxCountPath,
                WordCount.dfs);

        // setup job parameters
        conf.setJobName("Task II: Word Count");
        conf.setOutputKeyClass(Text.class);
        conf.setOutputValueClass(Text.class);
        conf.setMapperClass(InvertedIndex.IMap.class);
        conf.setReducerClass(InvertedIndex.IReduce.class);
        conf.setInputFormat(DocIDTextInputFormat.class);
        System.out.println("Total Words: " + totalWords);
        System.out.println(" Stop count: " + maxCount);
        JobClient.runJob(conf);

        // move output back to local FS
        File outFile = new File (args[1]);
        outFile.delete();
        if (!fs.exists(new Path ("out/part-00000")))
        	fs.createNewFile(new Path ("out/part-00000"));
        fs.copyToLocalFile(new Path ("out/part-00000"), localOut);
        fs.delete(out);
        fs.delete(dfsInput);
        fs.delete(new Path ("/xxx"));
    }

}
