package com.hadoop.mapreduce.tablejoin.reducejoin;

import org.apache.commons.io.output.NullWriter;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.io.IntWritable;
import org.apache.hadoop.io.Text;
import org.apache.hadoop.mapreduce.Job;
import org.apache.hadoop.mapreduce.Mapper;
import org.apache.hadoop.mapreduce.Reducer;
import org.apache.hadoop.mapreduce.lib.input.FileInputFormat;
import org.apache.hadoop.mapreduce.lib.input.FileSplit;
import org.apache.hadoop.mapreduce.lib.output.FileOutputFormat;

import java.io.IOException;
import java.util.ArrayList;

/**
 * 根据pid ，join方式联查两表
 *
 订单数据表t_order：
 id	date	pid	amount
 1001	20150710	P0001	2
 1002	20150710	P0001	3
 1002	20150710	P0002	3
 商品信息表t_product
 id	name	category_id	price
 P0001	小米5	C01	2
 P0002	锤子T1	C01	3

 */
public class ReduceOrderJoin {
    // flag=1表示这个对象是封装订单表记录
    // flag=0表示这个对象是封装产品信息记录
    static class ReduceOrderJoinMapper extends Mapper<IntWritable,Text,Text,InfoBean>{
    Text text = new Text();
    InfoBean infoBean=new InfoBean();
        @Override
        protected void map(IntWritable key, Text value, Context context) throws IOException, InterruptedException {
            FileSplit fileSplit = (FileSplit) context.getInputSplit();
            String name = fileSplit.getPath().getName();
            String pid="";
            // 通过文件名判断是哪种数据
            if(name.startsWith("order")){
                String[] split = value.toString().split(" ");
                text.set(split[2]);
                infoBean.set(Integer.parseInt(split[0]),split[1],split[2],split[3],"","",0.0,1);
            }
            else{
                String[] split = value.toString().split(" ");
                text.set(split[0]);
                infoBean.set(0,"","","",split[1],split[2],Double.parseDouble(split[3]),0);
            }
            context.write(text,infoBean);
        }
    }

    static class ReduceOrderJoinReducer extends Reducer<Text,InfoBean,InfoBean,NullWriter>{
        InfoBean infoBean=new InfoBean();
        @Override
        protected void reduce(Text key, Iterable<InfoBean> values, Context context) throws IOException, InterruptedException {
            ArrayList<InfoBean> infoBeanList = new ArrayList<InfoBean>();
            for (InfoBean info:
                 values) {
                if(info.getFlag()==0)
                    infoBean=info;
                else
                    infoBeanList.add(info);
            }
            // 拼接两类数据形成最终结果
            for (InfoBean info :
                    infoBeanList) {
                info.setName(infoBean.getName());
                info.setCategory_id(infoBean.getCategory_id());
                info.setPrice(infoBean.getPrice());

                context.write(info,NullWriter.NULL_WRITER);
            }
        }
    }

    public static void main(String[]  args) throws IOException, ClassNotFoundException, InterruptedException {
        Configuration conf=new Configuration();
        //集群模式
        conf.set("mapreduce.framework.name", "yarn");
		conf.set("yarn.resoucemanager.hostname", "192.168.186.148");
        conf.set("yarn.resourcemanager.address", "192.168.186:8032");
        conf.set("fs.defaultFS","hdfs://192.168.186.148:9000");
        Job job = Job.getInstance(conf);
        job.setJarByClass(ReduceOrderJoin.class);

        job.setMapperClass(ReduceOrderJoinMapper.class);
        job.setReducerClass(ReduceOrderJoinReducer.class);

        job.setMapOutputKeyClass(Text.class);
        job.setMapOutputValueClass(InfoBean.class);

        job.setOutputKeyClass(InfoBean.class);
        job.setOutputValueClass(NullWriter.class);

        FileInputFormat.setInputPaths(job,new Path("/order/in/"));
        FileOutputFormat.setOutputPath(job,new Path("/order/out/merge.txt"));

        job.waitForCompletion(true);
    }


}
