package cn.linkai.hadoop.invertedindex;

import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.io.LongWritable;
import org.apache.hadoop.io.Text;
import org.apache.hadoop.mapreduce.Job;
import org.apache.hadoop.mapreduce.lib.input.FileInputFormat;
import org.apache.hadoop.mapreduce.lib.output.FileOutputFormat;

import java.io.IOException;

public class InvertedIndexTwiceMR {

    public static void main(String[] args) {
        Configuration conf=new Configuration();
        Job firstJob=null;
        Job secondJob=null;

        try {
            firstJob=Job.getInstance(conf,"job1");
            secondJob=Job.getInstance(conf,"job2");

            firstJob.setJarByClass(InvertedIndexTwiceMR.class);
            secondJob.setJarByClass(InvertedIndexTwiceMR.class);

            firstJob.setMapperClass(InvertedIndexTwiceMRFirst.InvertedIndexTwiceMRFirstMapper.class);
            firstJob.setReducerClass(InvertedIndexTwiceMRFirst.InvertedIndexTwiceMRFirstReducer.class);
            firstJob.setCombinerClass(InvertedIndexTwiceMRFirst.InvertedIndexTwiceMRFirstReducer.class);
            firstJob.setOutputKeyClass(Text.class);
            firstJob.setOutputValueClass(LongWritable.class);

            secondJob.setMapperClass(InvertedIndexTwiceMRSecond.InvertedIndexTwiceMRSecondMapper.class);
            secondJob.setReducerClass(InvertedIndexTwiceMRSecond.InvertedIndexTwiceMRSecondReducer.class);
            secondJob.setCombinerClass(InvertedIndexTwiceMRSecond.InvertedIndexTwiceMRSecondReducer.class);
            secondJob.setOutputKeyClass(Text.class);
            secondJob.setOutputValueClass(Text.class);
            secondJob.setPartitionerClass(InvertedIndexTwiceMRPartition.class);
            secondJob.setNumReduceTasks(3);

            FileInputFormat.addInputPath(firstJob,new Path("E:\\hadoop\\hadoopdata\\invertedIndexIN201804221725"));
            FileOutputFormat.setOutputPath(firstJob,new Path("E:\\hadoop\\hadoopdata\\outfirst201804221727"));

            FileInputFormat.addInputPath(secondJob,new Path("E:\\hadoop\\hadoopdata\\outfirst201804221727"));
            FileOutputFormat.setOutputPath(secondJob,new Path("E:\\hadoop\\hadoopdata\\outsecond201804221727"));



            boolean state=firstJob.waitForCompletion(false);
            if(state){
                System.out.println(secondJob.waitForCompletion(false)?"success":"fail");
            }else{
                System.out.println("fail");
            }


        } catch (IOException e) {
            e.printStackTrace();
        } catch (InterruptedException e) {
            e.printStackTrace();
        } catch (ClassNotFoundException e) {
            e.printStackTrace();
        }
    }
}
