package com.shujia.mr.test2;

import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.io.LongWritable;
import org.apache.hadoop.io.Text;
import org.apache.hadoop.mapreduce.Job;
import org.apache.hadoop.mapreduce.Mapper;
import org.apache.hadoop.mapreduce.Reducer;
import org.apache.hadoop.mapreduce.lib.input.FileInputFormat;
import org.apache.hadoop.mapreduce.lib.input.FileSplit;
import org.apache.hadoop.mapreduce.lib.output.FileOutputFormat;

import java.io.IOException;
import java.util.ArrayList;

/*
    2，将城市id和城市名称进行关联。
    city_id.txt
    part-r-00000

 */
class DianXinMapper2 extends Mapper<LongWritable, Text, Text, Text> {
    @Override
    protected void map(LongWritable key, Text value, Mapper<LongWritable, Text, Text, Text>.Context context) throws IOException, InterruptedException {
        FileSplit inputSplit = (FileSplit) context.getInputSplit();
        String fileName = inputSplit.getPath().getName();

        String line = value.toString();

        if (fileName.startsWith("city")) {
            String[] strings = line.split(",");
            context.write(new Text(strings[0]), new Text("$" + strings[1]));
        } else if (fileName.startsWith("part")) {
            String[] strings = line.split("\t");
            String[] strings1 = strings[0].split("-");
            String phoneNum = strings1[0];
            String cityId = strings1[1];
            String avgStayTime = strings[1];
            context.write(new Text(cityId), new Text("#" + phoneNum + "-" + avgStayTime));
        }
    }
}

class DianXinReducer2 extends Reducer<Text, Text, Text, LongWritable> {
    @Override
    protected void reduce(Text key, Iterable<Text> values, Reducer<Text, Text, Text, LongWritable>.Context context) throws IOException, InterruptedException {
        String cityName = null;
        ArrayList<String> persons = new ArrayList<>();
        for (Text value : values) {
            String info = value.toString();
            if (info.startsWith("$")) {
                cityName = info.substring(1);
            } else if (info.startsWith("#")) {
                persons.add(info.substring(1));
            }
        }

        for (String person : persons) {
            String[] strings = person.split("-");
            String phoneNum = strings[0];
            long avgStayTime = Long.parseLong(strings[1]);
            context.write(new Text(phoneNum + "\t" + cityName), new LongWritable(avgStayTime));
        }

    }
}

public class DianXinDemo2 {
    public static void main(String[] args) throws Exception {
        //创建hadoop配置文件对象
        Configuration conf = new Configuration();
        //创建整个mapreduce作业，将来集群会将这个作业提交到yarn上面进行执行
        Job job = Job.getInstance(conf);
        //给整个作业起一个名字
        job.setJobName("27期 求电信用户平均停留时长关联城市案例");

        //设置reduce的个数 如果不设置，默认只有一个reduce，只会产生一个结果文件
//        job.setNumReduceTasks(2);

        //设置作业运行的主类名
        job.setJarByClass(DianXinDemo2.class);
        //指定map类
        job.setMapperClass(DianXinMapper2.class);
        //指定reduce类
        job.setReducerClass(DianXinReducer2.class);

        //设置map的输出key-value类型
        job.setMapOutputKeyClass(Text.class);
        job.setMapOutputValueClass(Text.class);

        //设置reduce的输出key-value类型
        job.setOutputKeyClass(Text.class);
        job.setOutputValueClass(LongWritable.class);

        //指定数据的输入路径，是HDFS上的路径
        //路径将来是以传参数的形式指定
        FileInputFormat.addInputPath(job, new Path(args[0]));
//        FileInputFormat.setInputPaths(job, new Path(args[0]), new Path("xxxx/xxx/xx/xxx"));
        //指定输出路径
        //注意：MapReduce输出的是一个文件夹，文件夹中包含了结果文件，这个文件夹不需要手动创建
        //只需要在运行之前进行指定，将来会自动创建
        FileOutputFormat.setOutputPath(job, new Path(args[1]));


//        MapTask
        //提交作业到yarn中执行
        boolean flag = job.waitForCompletion(true);
        System.out.println(flag ? "===============该案例执行成功 ！！==============" : "==========执行失败！！===========");
    }
}
