package cn.linkai.hadoop.invertedindex;

import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.io.LongWritable;
import org.apache.hadoop.io.Text;
import org.apache.hadoop.mapreduce.Job;
import org.apache.hadoop.mapreduce.Mapper;
import org.apache.hadoop.mapreduce.Reducer;
import org.apache.hadoop.mapreduce.lib.input.FileInputFormat;
import org.apache.hadoop.mapreduce.lib.input.FileSplit;
import org.apache.hadoop.mapreduce.lib.input.TextInputFormat;
import org.apache.hadoop.mapreduce.lib.output.FileOutputFormat;

import java.io.IOException;
import java.util.HashMap;
import java.util.Iterator;
import java.util.Map;
import java.util.StringTokenizer;

public class InvertedIndex {

    public static void main(String[] args) {
        Configuration conf=null;
        Job job=null;
        try {
            conf=new Configuration();
            job=Job.getInstance(conf);
            job.setJobName("invertedIndex");
            job.setJarByClass(InvertedIndex.class);
            job.setMapperClass(InvertedIndexMapper.class);
            job.setReducerClass(InvertedIndexReducer.class);

            job.setOutputKeyClass(Text.class);
            job.setOutputValueClass(Text.class);

            job.setInputFormatClass(TextInputFormat.class);

            FileInputFormat.setInputPaths(job,new Path("E:\\hadoop\\hadoopdata\\invertedIndexIN201804221725"));
            FileOutputFormat.setOutputPath(job,new Path("E:\\hadoop\\hadoopdata\\out201804221727"));

            boolean state=job.waitForCompletion(true);
            if(state){
                System.out.println("success");
            }else{
                System.out.println("fail");
            }

        } catch (IOException e) {
            e.printStackTrace();
        } catch (InterruptedException e) {
            e.printStackTrace();
        } catch (ClassNotFoundException e) {
            e.printStackTrace();
        }
    }

    public static class InvertedIndexMapper extends Mapper<LongWritable,Text,Text,Text>{

        @Override
        protected void map(LongWritable key, Text value, Context context) throws IOException, InterruptedException {
            String word;
            String fileName;
            StringTokenizer tokenizer=new StringTokenizer(value.toString());
            fileName=((FileSplit)context.getInputSplit()).getPath().toString();
            fileName=fileName.substring(fileName.lastIndexOf("/")+1,fileName.length());
            while(tokenizer.hasMoreTokens()){
                word=tokenizer.nextToken();
                fileName+="->"+"1";
                context.write(new Text(word),new Text(fileName));
            }
        }
    }

    public static class InvertedIndexReducer extends Reducer<Text,Text,Text,Text>{
        @Override
        protected void reduce(Text key, Iterable<Text> values, Context context) throws IOException, InterruptedException {
            Map<String,Long> wordIndexMap=new HashMap<>();
            String fileName;
            Long count;
            Iterator<Text> it=values.iterator();
            while(it.hasNext()){
                Text text=new Text(it.next());
                String[] mapValues=text.toString().split("->");
                fileName=mapValues[0];
                count=Long.valueOf(mapValues[1]);
                if(wordIndexMap.get(fileName)!=null){
                    count+=wordIndexMap.get(fileName);
                }
                wordIndexMap.put(fileName,count);
            }
            StringBuffer value=new StringBuffer();
            for(Map.Entry<String,Long> entry:wordIndexMap.entrySet()){
                value.append(entry.getKey()+"->"+entry.getValue());
                value.append(",");
            }
            value.deleteCharAt(value.length()-1);
            context.write(key,new Text(value.toString()));
        }
    }
}
