package job06;

import java.io.IOException;

import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.io.LongWritable;
import org.apache.hadoop.io.NullWritable;
import org.apache.hadoop.io.Text;
import org.apache.hadoop.io.WritableComparable;
import org.apache.hadoop.io.WritableComparator;
import org.apache.hadoop.mapreduce.Job;
import org.apache.hadoop.mapreduce.Mapper;
import org.apache.hadoop.mapreduce.Mapper.Context;
import org.apache.hadoop.mapreduce.Partitioner;
import org.apache.hadoop.mapreduce.Reducer;
import org.apache.hadoop.mapreduce.lib.input.FileInputFormat;
import org.apache.hadoop.mapreduce.lib.input.FileSplit;
import org.apache.hadoop.mapreduce.lib.output.FileOutputFormat;
import org.apache.hadoop.util.GenericOptionsParser;

import job01.TProperties;

public class job06 {
	// Description：通过map端join处理，实现按地址库优先级匹配带解析数据
	public static void main(String[] args) {
		try {
			//创建配置信息
			Configuration conf = new Configuration();
			//map内存设置
			conf.set("mapreduce.map.memory.mb", "5120");
			conf.set("mapreduce.reduce.memory.mb", "5120");
			//不检查超时，由于集群环境不稳定有超时现象，所以设置为不检查，但需要保证程序无死循环
			conf.set("mapred.task.timeout", "0");
			//集群机器少的时候可以设置：客户端在写失败的时候，是否使用更换策略
			conf.set("dfs.client.block.write.replace-datanode-on-failure.policy","NEVER"); 
			conf.set("dfs.client.block.write.replace-datanode-on-failure.enable","true"); 
			// 获取命令行的参数
			String[] otherArgs = {"F://sparkData//configData//t_dx_product_msg_addr.txt", "F://sparkData//test//02.DxFileMatch//part-m-00*", "F://sparkData//test//06.DxProMatch"};
			//String[] otherArgs = new GenericOptionsParser(conf, args).getRemainingArgs();
			// 创建任务
			Job job = new Job(conf, "Dx_ProMatch");
			// 打成jar包运行，这句话是关键
			job.setJarByClass(job06.class);
			// 自定义Mapper类和设置map函数输出数据的key和value的类型
			job.setMapperClass(DxProMapper.class);
			job.setReducerClass(DxProReducer.class);
			 //设置map输出的key类型
			job.setMapOutputKeyClass(Text.class);
			//设置map输出的value类型
			job.setMapOutputValueClass(Text.class);
			//设置输出的key类型
			job.setOutputKeyClass(NullWritable.class);
			//设置输出的value类型
			job.setOutputValueClass(Text.class);
			// 分组函数
			job.setPartitionerClass(KeyPartitioner.class);
			// 分组函数
			job.setGroupingComparatorClass(KeyGroupingComparator.class);
			//输入输出路径
			FileInputFormat.addInputPaths(job, otherArgs[1]);
			FileInputFormat.addInputPaths(job, otherArgs[0]);
			FileOutputFormat.setOutputPath(job, new Path(otherArgs[2]));
			//提交作业 判断退出条件（0正常退出，1非正常退出）
			System.exit(job.waitForCompletion(true) ? 0 : 1);
		} catch (Exception e) {
			e.printStackTrace();
		}
	}
	public static class DxProMapper extends Mapper<LongWritable, Text, Text, Text>{
		private Text okey = new Text();
		private Text ovalue = new Text();
		
		public void map(LongWritable key, Text value, Context context) throws IOException, InterruptedException {
			// hdfs系统
			FileSplit fileSplit = (FileSplit) context.getInputSplit();
			// 全路径
			String path = fileSplit.getPath().toString();
			String[] values = value.toString().split(TProperties.getValue("fileoutsplit"), -1);
			StringBuffer sb = new StringBuffer();
			// 文件名称判断数据类型：配置数据产品库，行为匹配数据
			if (path.indexOf(TProperties.getValue("proaddress")) >= 0){
				okey = new Text(values[0] + ",1");
				// 数据标识 区分数据
				sb.append("1#" + values[1])
					.append(TProperties.getValue("outfilesplit")).append(values[2])
					.append(TProperties.getValue("outfilesplit")).append(values[3])
					.append(TProperties.getValue("outfilesplit")).append(values[4])
					.append(TProperties.getValue("outfilesplit")).append(values[5])
					.append(TProperties.getValue("outfilesplit")).append(values[6])
					.append(TProperties.getValue("outfilesplit")).append(values[7])
					.append(TProperties.getValue("outfilesplit")).append(values[8]);
				ovalue = new Text(sb.toString());
				context.write(okey, ovalue);
			} else {
				// 处理任务2的输出数据：行为匹配数据 拿取产品类型数据
				if("1".equals(values[2])) {
					//行为id+数据类型标识
					okey = new Text(values[0] + ",2");
					
					//用户id
					sb.append(values[1]);
					ovalue = new Text(sb.toString());
//					System.out.println(okey.toString());
//					System.out.println(ovalue.toString());
					context.write(okey, ovalue);
					// 行为id,2,用户id
				}
			}
		}
	}
	
	public static class DxProReducer extends Reducer<Text, Text, NullWritable, Text> {
		private Text result = new Text();
		
		public void reduce(Text key, Iterable<Text> values, Context context) throws IOException, InterruptedException{
			String promatch = "";
			int flag = 1;
			for (Text val : values) {
				// 判断是否首条数据
				if (flag == 1) {
					promatch = val.toString();
					if (promatch.indexOf("1#") == -1) {
						return;
					}
					flag = 2;
				} else {
					// System.out.println(val.toString());
					result = new Text(val.toString() 
							+ TProperties.getValue("outfilesplit")
							+ promatch.substring(2)
							+ TProperties.getValue("outfilesplit")
							+ key.toString().split(",")[0]);
					// System.out.println(result.toString());
					context.write(NullWritable.get(), result);
				}
			}
		}
	}
	
	//更改分区函数类 用key中的行为id作为分区数据
	public static class KeyPartitioner extends Partitioner<Text, Text> {
		@Override
		public int getPartition(Text key, Text value, int numPartitions) {
			return Math.abs(key.toString().split(",")[0].hashCode() * 127) % numPartitions;
		}
	}
	//更改分区函数类 相同分组数据分配到同一个reducer
	public static class KeyGroupingComparator extends WritableComparator {
		protected KeyGroupingComparator() {
			super(Text.class, true);
		}
		//比较判断
		@Override
		public int compare(WritableComparable w1, WritableComparable w2) {
			Text ip1 = (Text) w1;
			Text ip2 = (Text) w2;
			String l = ip1.toString().split(",")[0];
			String r = ip2.toString().split(",")[0];
			return l.equals(r) ? 0 : 1;
		}
	}
}
