package edu.npu.GraphRedundance;

import java.io.IOException;

import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.io.LongWritable;
import org.apache.hadoop.io.Text;
import org.apache.hadoop.mapreduce.Job;
import org.apache.hadoop.mapreduce.Mapper;
import org.apache.hadoop.mapreduce.lib.input.FileInputFormat;
import org.apache.hadoop.mapreduce.lib.output.FileOutputFormat;

public class OneHopRedundanceDriver {
	//计算一跳后冗余出的数据
	public static final String USERNAME = "locker";
	public static final String PATH_PREFIX = "hdfs://locker:9000/user/" + USERNAME + "/";
	static final String SEPARATOR = "\t";
	public static class OneHopRedundanceMapper extends Mapper<LongWritable, Text, Text, Text> {
		@Override
		protected void map(LongWritable key, Text value, Context context)
				throws IOException, InterruptedException {
			String line = value.toString();
			String[] splits = line.split(SEPARATOR);
			String[] kParts = splits[0].split("#");
			String[] vParts = splits[1].split("#");
			//如果不属于同一块则为冗余数据，输出
			if(!kParts[1].equals(vParts[1])) 
				context.write(new Text(vParts[1]), new Text(kParts[0] + SEPARATOR + vParts[0]));
			//原来数据
			context.write(new Text(kParts[1]), new Text(kParts[0] + SEPARATOR + vParts[0]));
		}
	}
	
	public static void main(String[] args) throws IOException, InterruptedException, ClassNotFoundException {
		Configuration conf = new Configuration();
		Job oneHop = new Job(conf);
		oneHop.setJarByClass(OneHopRedundanceDriver.class);
		oneHop.setMapperClass(OneHopRedundanceDriver.OneHopRedundanceMapper.class);
		oneHop.setMapOutputKeyClass(Text.class);
		oneHop.setMapOutputValueClass(Text.class);
		FileInputFormat.addInputPath(oneHop, new Path(PATH_PREFIX + "graph_part"));
		FileOutputFormat.setOutputPath(oneHop, new Path(PATH_PREFIX + "oneHop"));
		oneHop.setNumReduceTasks(2);
		oneHop.waitForCompletion(true);
	}
}
