package com.hadoop.mapreduce.tablejoin.mapjoin;

import com.hadoop.mapreduce.tablejoin.reducejoin.InfoBean;
import org.apache.commons.io.output.NullWriter;
import org.apache.commons.lang.StringUtils;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.io.IntWritable;
import org.apache.hadoop.io.NullWritable;
import org.apache.hadoop.io.Text;
import org.apache.hadoop.mapreduce.Job;
import org.apache.hadoop.mapreduce.Mapper;
import org.apache.hadoop.mapreduce.lib.input.FileInputFormat;
import org.apache.hadoop.mapreduce.lib.output.FileOutputFormat;

import java.io.BufferedReader;
import java.io.FileReader;
import java.io.IOException;
import java.net.URI;
import java.net.URISyntaxException;
import java.util.HashMap;

/**
 * 根据pid ，join方式联查两表
 *
 订单数据表t_order：
 id	date	pid	amount
 1001	20150710	P0001	2
 1002	20150710	P0001	3
 1002	20150710	P0002	3
 商品信息表t_product
 id	name	category_id	price
 P0001	小米5	C01	2
 P0002	锤子T1	C01	3

 */
public class MapOrderJoin {
    static class MapOrderJoinMapper extends Mapper<IntWritable,Text,InfoBean,NullWritable>{
        // 用一个hashmap来加载保存产品信息表
        HashMap<String,InfoBean> map=new HashMap<String, InfoBean>();
        InfoBean infoBean=new InfoBean();
        /**
         * 通过阅读父类Mapper的源码，发现 setup方法是在maptask处理数据之前调用一次 可以用来做一些初始化工作
         */
        @Override
        protected void setup(Context context) throws IOException, InterruptedException {
            BufferedReader reader = new BufferedReader(new FileReader("product.txt"));
            String line="";
            while(StringUtils.isNotEmpty(line=reader.readLine())){
                String[] strings = line.split(" ");
                infoBean = new InfoBean();
                infoBean.setName(strings[1]);
                infoBean.setCategory_id(strings[2]);
                infoBean.setPrice(Double.parseDouble(strings[3]));
                map.put(strings[0],infoBean);
            }
            reader.close();
        }

        /**
         * // 由于已经持有完整的产品信息表，所以在map方法中就能实现join逻辑了
         */
        @Override
        protected void map(IntWritable key, Text value, Context context) throws IOException, InterruptedException {
            String[] strings = value.toString().split(" ");
            InfoBean infoBean = map.get(strings[2]);
            infoBean.setOrder(Integer.parseInt(strings[0]),strings[1],strings[2],strings[3]);
            context.write(infoBean,NullWritable.get());
        }
    }


    public static void main(String[] args) throws IOException, URISyntaxException, ClassNotFoundException, InterruptedException {
        Configuration conf=new Configuration();
        Job job = Job.getInstance(conf);
        job.setJarByClass(MapOrderJoin.class);
        job.setMapperClass(MapOrderJoinMapper.class);
        job.setOutputKeyClass(InfoBean.class);
        job.setOutputValueClass(NullWriter.class);
        FileInputFormat.setInputPaths(job,new Path("D:/srcdata/mapjoininput"));
        FileOutputFormat.setOutputPath(job,new Path("D:/temp/output"));
        // 指定需要缓存一个文件到所有的maptask运行节点工作目录
        /* job.addArchiveToClassPath(archive); */// 缓存jar包到task运行节点的classpath中
        /* job.addFileToClassPath(file); */// 缓存普通文件到task运行节点的classpath中
        /* job.addCacheArchive(uri); */// 缓存压缩包文件到task运行节点的工作目录
        /* job.addCacheFile(uri) */// 缓存普通文件到task运行节点的工作目录
        //将产品表文件缓存到task节点的工作目录中去
        job.addCacheFile(new URI("file:/D:/srcdata/mapjoincache/pdts.txt"));
        //map端join的逻辑不需要reduce阶段，设置reducetask数量为0
        job.setNumReduceTasks(0);
        job.waitForCompletion(true);
    }
}
