package com.qingguo.MapReduce;

import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.io.LongWritable;
import org.apache.hadoop.io.Text;
import org.apache.hadoop.mapreduce.Job;
import org.apache.hadoop.mapreduce.Mapper;
import org.apache.hadoop.mapreduce.Reducer;
import org.apache.hadoop.mapreduce.lib.input.FileInputFormat;
import org.apache.hadoop.mapreduce.lib.input.FileSplit;
import org.apache.hadoop.mapreduce.lib.output.FileOutputFormat;

import java.io.IOException;
import java.util.Vector;

public class join{
    //    第一步写MYmap类并继承Maper
    public static class Mymap extends Mapper<LongWritable, Text, Text, Text> {
        //重写map方法
        @Override
        protected void map(LongWritable k1, Text v1, Context context)
                throws IOException, InterruptedException {
            //map任务会读取两个路径文件的数据，通过路径来区分。
            FileSplit inputSplit = (FileSplit) context.getInputSplit();
            //获取输入的路径,每一行数据都会包含路径信息。
            String path = inputSplit.getPath().toString();
            //判断为哪个数据源的信息
            if (path.contains("dianxin_data")) {
                String str = v1.toString();
                String[] split = str.split("\t");
                //将城市编号作为key输出
                Text key = new Text(split[2]);
                //给value打上标签
                Text value = new Text("dianxin" + split[1] + "**" + split[4]);
                //将数据写出
                context.write(key, value);
            }
            if (path.contains("city_id")) {
                String str = v1.toString();
                String[] split = str.split(",");
                //将城市编号作为key输出
                Text key = new Text(split[0]);
                //给value打上标签
                Text value = new Text("city" + split[1]);
                //将数据写出
                context.write(key, value);
            }
        }
    }

    //*********************************************************************************************
    //第二步写Myreduce并继承Reducer类
    public static class Myreduce extends Reducer<Text, Text, Text, Text> {
        @Override
        protected void reduce(Text k2, Iterable<Text> v2s, Context context) throws IOException, InterruptedException {
            //创建两个集合来分别接收各自的数据
            Vector d = new Vector();
            Vector c = new Vector();
            for (Text text:v2s) {
                String string = text.toString();
                if(string.startsWith("dianxin")) {
                    d.add(string.substring(7));
                }
                if(string.startsWith("city")) {
                    c.add(string.substring(4));
                }
            }
            //嵌套将两个拼接在一起
            for (Object dd : d) {
                for (Object cc : c) {
                    context.write(new Text(k2), new Text(dd + "" + cc));
                }
            }
        }
    }

    //*****************************************************************************
//第三步，编写程序主入口，组装mapreduce
    public static void main(String[] args) throws IOException, ClassNotFoundException, InterruptedException {
//加载hadoop的配置参数
        Configuration conf = new Configuration();
//创建任务的对象(单例设计模式)传入配置参数，并取名字
        Job job = Job.getInstance(conf, join.class.getName());
//设置打包的类
        job.setJarByClass(join.class);
//设置读取文件的hdfs路径
        FileInputFormat.addInputPath(job, new Path(args[0]));
        FileInputFormat.addInputPath(job, new Path(args[1]));
//指定需要执行的map类
        job.setMapperClass(join.Mymap.class);
//指定map输出的序列化类型
        job.setMapOutputKeyClass(Text.class);
        job.setMapOutputValueClass(Text.class);
//指定需要执行的reduce类
        job.setReducerClass(join.Myreduce.class);
//指定reduce的序列化类
        job.setOutputKeyClass(Text.class);
        job.setOutputValueClass(Text.class);
//指定输出的hdfs路径
        FileOutputFormat.setOutputPath(job, new Path(args[2]));
//提交任务，等待执行完成，并打印执行日志
        job.waitForCompletion(true);
    }

}

