package com.thp.bigdata.rjon.station;

import java.io.IOException;
import java.util.Arrays;
import java.util.Iterator;

import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.conf.Configured;
import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.io.LongWritable;
import org.apache.hadoop.io.Text;
import org.apache.hadoop.io.WritableComparable;
import org.apache.hadoop.io.WritableComparator;
import org.apache.hadoop.mapreduce.Job;
import org.apache.hadoop.mapreduce.Mapper;
import org.apache.hadoop.mapreduce.Partitioner;
import org.apache.hadoop.mapreduce.Reducer;
import org.apache.hadoop.mapreduce.lib.input.MultipleInputs;
import org.apache.hadoop.mapreduce.lib.input.TextInputFormat;
import org.apache.hadoop.mapreduce.lib.output.FileOutputFormat;
import org.apache.hadoop.util.Tool;
import org.apache.hadoop.util.ToolRunner;
import org.junit.Test;

public class JoinRecordWithStationName extends Configured implements Tool{

	
	/**
	 * 气象站  mapper 标记为 "0" ,先到达reducer
	 * @author lenovo
	 *
	 */
	public static class JoinStationMapper extends Mapper<LongWritable, Text, TextPair, Text> {
		@Override
		protected void map(LongWritable key, Text value, Context context)
				throws IOException, InterruptedException {
			String line = value.toString();
			String[] arr = line.split("\\s+");  // \\s 表示  空格  回车 换行 等空白符   + 表示多个
			int length = arr.length;
			if(length == 2) {  // 满足这种数据格式
				// key = 气象站id   value = 气象站名称
				// 注意是将气象站的名字输出
				context.write(new TextPair(arr[0], "0"), new Text(arr[1]));
			}
			
		}
	}
	
	/**
	 * 天气记录mapper 标记为 "1",后到达reducer
	 * @author lenovo
	 *
	 */
	public static class JoinRecordMapper extends Mapper<LongWritable, Text, TextPair, Text> {
		@Override
		protected void map(LongWritable key, Text value, Context context)
				throws IOException, InterruptedException {
			String line = value.toString();
			String[] arr = line.split("\\s+");  // 解析天气记录数据
			if(arr.length == 3) {
				// key = 气象站id   value = 天气记录数据
				context.write(new TextPair(arr[0], "1"), new Text(arr[1] + "\t" + arr[2]));
			}
		}
	}
	
	/**
	 * 通过上面的分组，
	 * @author lenovo
	 *
	 */
	public static class JoinReducer extends Reducer<TextPair, Text, Text, Text> {
		@Override
		protected void reduce(TextPair key, Iterable<Text> values, Context context)
				throws IOException, InterruptedException {
			Iterator<Text> iterator = values.iterator();
			Text stationName = new Text(iterator.next());  // 气象站名称
			
			
			System.out.println("迭代器里面的第一个元素 ：  (stationName) -- " + stationName.toString());
			
			System.out.println("---------------");
			while(iterator.hasNext()) {
				// 天气记录的每条数据
				Text record = iterator.next();
				System.out.println("迭代器里面的第二个元素 ：(record) --  " + record.toString());
				// 最终输出的数据
				Text outValue = new Text(stationName.toString() + "\t" + record.toString());
				context.write(key.getFirst(), outValue);  // key.getFirst()  就是气象站的id  刚好对应
			}
			System.out.println("--------------");
		}
	}
	
	
	/**
	 * 自定义分区方法：将气象站id相同的记录分到相同的reducer中
	 * hadoop 默认的Parition 的实现 HashPartitioner  就是使用下面的这种方法
	 */
	static class KeyPartitioner extends Partitioner<TextPair, Text> {
		@Override
		public int getPartition(TextPair key, Text value, int numPartitions) {
			// 根据气象站id进行选择分区，而不是组合键的整体
			// 相同的气象站id会发往相同的partition
			// 而且，产生的分区数，是会跟用户设置的reduce task数保持一致
			return (key.getFirst().hashCode() & Integer.MAX_VALUE) % numPartitions;
		}
	}
	
	
	/**
	 * 自定义的比较器   气象站的id相同就是两个key是相同的
	 * @author lenovo
	 *
	 */
	public static class GroupingComparator extends WritableComparator {
		/**
		 * 传入作为key的bean的class类型，以及制定需要让框架做反射的实例类型
		 */
		public GroupingComparator() {
			super(TextPair.class, true);
		}
		
		@Override
		public int compare(WritableComparable wc1, WritableComparable wc2) {
			TextPair tp1 = (TextPair) wc1;
			TextPair tp2 = (TextPair) wc2;
			Text L = tp1.getFirst();
			Text R = tp2.getFirst(); 
			return L.compareTo(R);
		}
	}
	
	
	 
	@Override
	public int run(String[] args) throws Exception {
		Configuration conf = new Configuration();
		
		
		conf.set("mapreduce.framework.name", "local");
		
		conf.set("fs.defaultFS", "file:///");
		
		Job job = Job.getInstance(conf);
		
		job.setJar("f:/rjoin.jar");
		
		
		Path recordInputPath = new Path(args[0]);  // 天气记录数据源
		Path stationInputPath = new Path(args[1]); // 气象站数据源
		
		Path outputPath = new Path(args[2]);   // 输出路径
		
		// 如果输出文件路径存在就删除
		FileSystem fs = outputPath.getFileSystem(conf);
		if(fs.isDirectory(outputPath)) {
			fs.delete(outputPath, true);
		}
		
		
		/**
		 * 之所以使用MultipleInputs，是Mapper现在需要读取指定的多个文件了
		 */
		// 读取天气记录的Mapper
		MultipleInputs.addInputPath(job, recordInputPath, TextInputFormat.class, JoinRecordMapper.class);
		// 读取气象站的Mapper
		MultipleInputs.addInputPath(job,stationInputPath,TextInputFormat.class ,JoinStationMapper.class);
		
		
		FileOutputFormat.setOutputPath(job, outputPath);
		
		job.setReducerClass(JoinReducer.class);  // Reducer
		
		// 自定义分区
		job.setPartitionerClass(KeyPartitioner.class);
		job.setNumReduceTasks(2);  // 这个写的数字就是使用多少个reduce来处理，然后会生成对应数量的文件
		// 自定义分组  -- 排序	
		job.setGroupingComparatorClass(GroupingComparator.class);
		
		
		job.setMapOutputKeyClass(TextPair.class);
		job.setMapOutputValueClass(Text.class);
		
		job.setOutputKeyClass(Text.class);
		job.setOutputKeyClass(Text.class);
		
		
		return job.waitForCompletion(true) ? 0 : 1;
	}
	
	
	
	
	
	public static void main(String[] args) throws Exception {
		String[] args0 = {"f:/station/input/record.txt","f:/station/input/station.txt","f:/station/output"};
		// String[] args0 = {};
		int exitCode = ToolRunner.run(new JoinRecordWithStationName(), args0);
		System.out.println("----------------------------");
		System.out.println(exitCode);
		System.out.println("----------------------------");
		System.exit(exitCode);
	}
	
	
	
	
	
	
	@Test
	public void test01() {
		String str = "012650-99999    194903241200    111";
		String[] fields = str.split("\\s+",2);
		System.out.println(Arrays.toString(fields));
	}













	
	
	
	
}
