package com.knight.hadoop.day09.joinMR;

import java.io.IOException;
import java.util.ArrayList;
import java.util.List;

import org.apache.commons.beanutils.BeanUtils;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.io.LongWritable;
import org.apache.hadoop.io.NullWritable;
import org.apache.hadoop.io.Text;
import org.apache.hadoop.mapreduce.Job;
import org.apache.hadoop.mapreduce.Mapper;
import org.apache.hadoop.mapreduce.Reducer;
import org.apache.hadoop.mapreduce.lib.input.FileInputFormat;
import org.apache.hadoop.mapreduce.lib.input.FileSplit;
import org.apache.hadoop.mapreduce.lib.output.FileOutputFormat;

import com.knight.hadoop.day08.flow.sort.FlowModel;
import com.knight.hadoop.day09.joinMR.MRJoin.JoinMapper.JoinReducer;

/**
 * mapReduce 进行join运算
 *
 *需求：
 *订单数据表t_order：
	id		date		pid	  amount
	1001	20150710	P0001	2
	1002	20150710	P0001	3
	1002	20150710	P0002	3

商品信息表t_product
	id		pname	category_id	price
	P0001	小米5		1000	     2
	P0002	锤子T1	1000	     3

假如数据量巨大，两表的数据是以文件的形式存储在HDFS中，需要用mapreduce程序来实现一下SQL查询运算： 
	select  a.id,a.date,b.name,b.category_id,b.price 
	from t_order a join t_product b on a.pid = b.id
 */
public class MRJoin {
	/**
	 * 实现思路：
	 * 1、首先将数据库中的数据导出为文件，分为为order.data和product.data
	 * 2、在执行map运行时，判断输入的行是order的数据还是product的数据，通过文件名进行判断。
	 * 3、通过pid作为key向reducer输入，输入数据的格式如下：
	 * P0001 {1001	20150710	P0001	2,1002	20150710	P0001	3,P0001	小米5	1000	2}
	 * 4、然后再reduce方法中遍历value的值，判断出那条记录是商品的，那条记录是订单。然后组装进行输出
	 */
	
	static class JoinMapper extends Mapper<LongWritable, Text, Text, InforBean>{
		
		InforBean bean = new InforBean();
		Text outPutText = new Text();
		
		@Override
		protected void map(LongWritable key, Text value, Context context)
				throws IOException, InterruptedException {
			
			//查找文件
			FileSplit fileSplit = (FileSplit) context.getInputSplit();
			String fileName = fileSplit.getPath().getName();//获取到文件名
			
			//切分记录 假设记录使用逗号分隔
			String line = value.toString();
			String[] values = line.split(",");
			
			String pid = "";
			
			if(fileName.startsWith("order")){
				//这里假设订单表导出的文件以order开头
				int orderId = Integer.parseInt(values[0]);
				String date = values[1];
				pid = values[2];
				float amount = Float.parseFloat(values[3]);
				
				bean.set(orderId, date, pid, amount, "", 0, 0,"0");
			}else{
				//商品表的文件
				pid = values[0];
				String pname = values[1];
				int category_id = Integer.parseInt(values[2]);
				float price = Float.parseFloat(values[3]);
				bean.set(0, "", pid,0, pname, category_id, price,"1");
			}
			outPutText.set(pid);
			context.write(outPutText, bean);
		}
		
		static class JoinReducer extends Reducer<Text, InforBean, InforBean, NullWritable>{
			@Override
			protected void reduce(Text key, Iterable<InforBean> values,Context context)
					throws IOException, InterruptedException {
				//P0001 {1001	20150710	P0001	2,1002	20150710	P0001	3,P0001	小米5	1000	2}
				
				//先找到商品的InfoBean
				
				List<InforBean> orders = new ArrayList<InforBean>();
				
				InforBean pro = new InforBean();
				
				for(InforBean bean:values){
					if("0".equals(bean.getFlag())){
						InforBean b = new InforBean();
						try {
							BeanUtils.copyProperties(b, bean);
							orders.add(b);
						} catch (Exception e) {
							e.printStackTrace();
						}
					}else{
						try {
							BeanUtils.copyProperties(pro, bean);
						} catch (Exception e) {
							e.printStackTrace();
						}
					}
				}
				
				for(InforBean order:orders){
					order.setPname(pro.getPname());
					order.setCategory_id(pro.getCategory_id());
					order.setPrice(pro.getPrice());
					context.write(order, NullWritable.get());
				}
				
			}
		}
	}
	
	
	public static void main(String[] args) throws Exception {
		
		Configuration configuration = new Configuration();
		//解决这个问题：
		/**
		 * Exception in thread "main" java.io.IOException: No FileSystem for scheme: hdfs
		 * configuration.set("fs.hdfs.impl", "org.apache.hadoop.hdfs.DistributedFileSystem");
		 */
		//配置hdfs的运行环境
		/*configuration.set("fs.defaultFS", "hdfs://hadoop4:9000");
		configuration.set("mapreduce.framework.name", "yarn");
		configuration.set("yarn.resourcemanager.hostname", "hadoop4");*/
		
		Job job = Job.getInstance(configuration);

		// 指定本程序的jar包所在的本地路径
		//job.setJar("C:/Users/YQ/Desktop/sort.jar");
		job.setJarByClass(MRJoin.class);

		// 设置mapper任务执行类
		job.setMapperClass(JoinMapper.class);
		// 设置reducer任务执行类
		job.setReducerClass(JoinReducer.class);
		
		//设置自定义的数据分发组件
		//job.setPartitionerClass(ProvincePartitioner.class);
		//设置相应分区数量的reduceTask
		//job.setNumReduceTasks(5);
		
		
		// 指定mapper输出数据的kv类型
		job.setMapOutputKeyClass(Text.class);
		job.setMapOutputValueClass(InforBean.class);

		// 指定最终输出数据的kv类型
		job.setOutputKeyClass(InforBean.class);
		job.setOutputValueClass(NullWritable.class);

		// 指定输入参数的目录
		FileInputFormat.setInputPaths(job, new Path(args[0]));
		// 指定输出参数的目录
		FileOutputFormat.setOutputPath(job, new Path(args[1]));

		// 将job配置的参数以及job所用的java类所在的jar包，提交到yarn去运行
		/* job.submit(); */
		// 但是我们一般用这个，因为可以等待运行结果返回，查看运行流程
		boolean res = job.waitForCompletion(true);
		System.exit(res ? 0 : 1);
	}
}
