package edu.npu.GraphRedundance;

import java.io.IOException;
import java.util.ArrayList;
import java.util.List;

import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.io.LongWritable;
import org.apache.hadoop.io.Text;
import org.apache.hadoop.mapreduce.Job;
import org.apache.hadoop.mapreduce.Mapper;
import org.apache.hadoop.mapreduce.Reducer;
import org.apache.hadoop.mapreduce.lib.input.FileInputFormat;
import org.apache.hadoop.mapreduce.lib.output.FileOutputFormat;

import edu.npu.GraphTransform.GraphSlim;
import edu.npu.GraphTransform.GraphSlim.GraphSlimMapper;
import edu.npu.GraphTransform.GraphSlim.GraphSlimReducer;

public class TwoHopRedundanceDriver {
	public static int HOP = 2;
	final static String SEPARATOR = "\t";
	public static final String USERNAME = "locker";
	public static final String PATH_PREFIX = "hdfs://locker:9000/user/" + USERNAME + "/";
	//计算两跳join后所有的数据
	public static class TwoHopRedundanceMapper extends Mapper<LongWritable, Text, Text, Text> {
		@Override
		protected void map(LongWritable key, Text value, Context context)
				throws IOException, InterruptedException {
			String line = value.toString();
			String[] splits = line.split(SEPARATOR);
			String[] kParts = splits[0].split("#");
			String[] vParts = splits[1].split("#");
			//如果两节点不属于同一块,则是需要连接的
			if(!kParts[1].equals(vParts[1])) {
				//由k指向v的边，属于k部分
				context.write(new Text(vParts[0]), //被指向
						new Text(kParts[1] + SEPARATOR + kParts[0]));//部分+源节点
				//由v指向k的边，属于v部分
				context.write(new Text(kParts[0]),// 被指向
						new Text(vParts[1] + SEPARATOR + vParts[0]));//部分+源节点
			} else {//属于同一块
				context.write(new Text(kParts[0]), new Text(vParts[0]));//正向
				context.write(new Text(vParts[0]), new Text(kParts[0]));//反向
			}
		}
	}
	public static class TwoHopRedundanceReducer extends Reducer<Text, Text, Text, Text> {
		@Override
		protected void reduce(Text key, Iterable<Text> values, Context context)
				throws IOException, InterruptedException {
			List<Integer> block = new ArrayList<Integer>();
			List<Long> node = new ArrayList<Long>();
			for(Text value : values) {
				//如果是被连接的边
				if(value.toString().split(SEPARATOR).length == 1)
					node.add(Long.parseLong(value.toString()));
				else {//否则是需要连接的边
					int b = Integer.parseInt(value.toString().split(SEPARATOR)[0]);
					if(!block.contains(b))
						block.add(b);
				}
			}
			for(int i = 0; i < block.size(); i++) {//每个block
				for(int j = 0; j < node.size(); j++) {//每个node
					context.write(new Text(block.get(i) + ""), 
							new Text(key + SEPARATOR + node.get(j)));
				}
			}
		}
	}
	
	public static void main(String[] args) throws IOException, InterruptedException, ClassNotFoundException {
		Configuration conf = new Configuration();
		Job job = new Job(conf, "twoHop");
		job.setJarByClass(TwoHopRedundanceDriver.class);
		job.setMapperClass(TwoHopRedundanceDriver.TwoHopRedundanceMapper.class);
		job.setReducerClass(TwoHopRedundanceDriver.TwoHopRedundanceReducer.class);
		job.setOutputKeyClass(Text.class);
		job.setOutputValueClass(Text.class);
		job.setNumReduceTasks(2);
//		FileInputFormat.addInputPath(job, new Path(PATH_PREFIX + "oneHop"));
		FileInputFormat.addInputPath(job, new Path(PATH_PREFIX + "graph_part"));
		FileOutputFormat.setOutputPath(job, new Path(PATH_PREFIX + "twoHop"));
		job.waitForCompletion(true);
		
		Job slim = new Job(conf, "twoSlim");
		slim.setJarByClass(GraphSlim.class);
		slim.setMapperClass(GraphSlimMapper.class);
		slim.setReducerClass(GraphSlimReducer.class);
		slim.setOutputKeyClass(Text.class);
		slim.setOutputValueClass(Text.class);
		slim.setNumReduceTasks(2);
//		不用加oneHop，因为oneHop里面连接的都是不同块的，而twoHop都是相同块的，肯定不重合
//		FileInputFormat.addInputPath(slim, new Path(PATH_PREFIX + "oneHop"));
		FileInputFormat.addInputPath(slim, new Path(PATH_PREFIX + "twoHop"));
		FileOutputFormat.setOutputPath(slim, new Path(PATH_PREFIX + "twoHop_slim"));
		slim.waitForCompletion(true);
	}
}
