package com.shujia.airPM25;


import com.shujia.tuijian.TuiJianDemo;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.io.LongWritable;
import org.apache.hadoop.io.Text;
import org.apache.hadoop.mapreduce.Job;
import org.apache.hadoop.mapreduce.Mapper;
import org.apache.hadoop.mapreduce.Reducer;
import org.apache.hadoop.mapreduce.lib.input.FileInputFormat;
import org.apache.hadoop.mapreduce.lib.input.FileSplit;
import org.apache.hadoop.mapreduce.lib.output.FileOutputFormat;

/*
    part-r-00000:
        date: 20180927-城市编号:1001	#66
        date: 20180927-城市编号:1002	#60
        date: 20180927-城市编号:1003	#63

        1001 #66

    city:
        1001,$万寿西宫,北京
        1002,$定陵,北京
        1003,$东四,北京

    (1001,[#66,$万寿西宫-北京])

    1001-万寿西宫-北京 66


 */
import java.io.IOException;
import java.util.LinkedList;
import java.util.List;

class MyMapper3 extends Mapper<LongWritable, Text, Text, Text> {
    @Override
    protected void map(LongWritable key, Text value, Mapper<LongWritable, Text, Text, Text>.Context context) throws IOException, InterruptedException {

        //下面两步能获取当前行数据的输入文件名称
        FileSplit fileSplit = (FileSplit) context.getInputSplit();
        String name = fileSplit.getPath().getName();

        //将当前行数据转换为标准的String
        String line = value.toString();
        //若数据无效则丢弃
        if (line == null || line.equals("")) return;
        if (name.contains("city")) {
            //根据空格进行分割
            String[] split = line.split(",");
            //如果当前行是表一，在city前添加一个标记“#”，以跟表二区分
            String id = split[0];
            String city1 = split[1];
            String city2 = split[2];
            //输出key为id，value为city
            context.write(new Text(id), new Text("#" + city1 + city2));
        } else if (name.contains("part-r")) {
            //date: 20180927-城市编号:1001	66
            String[] split = line.split("-");
            String date = split[0];
            String cityId = split[1].split("\\s+")[0].split(":")[1];
            String pm25Avg = split[1].split("\\s+")[1];
            //如果当前行是表二，在输出的value字段前添加“$”，以跟表一区分
            context.write(new Text(cityId), new Text("$" + pm25Avg + "\t" + date));
        }
    }
}

class MyReducer3 extends Reducer<Text, Text, Text, Text> {
    @Override
    protected void reduce(Text key, Iterable<Text> values, Reducer<Text, Text, Text, Text>.Context context) throws IOException, InterruptedException {
        //list1存表一带来的数据
        List<String> list1 = new LinkedList<>();
        //list2存表二带来的数据
        List<String> list2 = new LinkedList<>();

        //遍历values
        for (Text text : values) {
            String value = text.toString();
            //如果value数据以#开头，则为表一中的数据，添加至list1中
            if (value.startsWith("#")) {
                value = value.substring(1);
                list1.add(value);

            } else if (value.startsWith("$")) {
                //如果value数据以$开头，则为表二中的数据，添加至list2中
                value = value.substring(1);
                list2.add(value);
            }
        }

        //将两表id相同的数据进行笛卡尔积，key为id，value为list1与list2的组合
        for (String a : list1) {
            for (String b : list2) {
                context.write(key, new Text(a + "\t" + b));
            }
        }
    }
}


public class Pm25AndCity2 {
    public static void main(String[] args) throws IOException, InterruptedException, ClassNotFoundException {
        Configuration conf = new Configuration();
        Job job = Job.getInstance(conf);
        job.setJarByClass(Pm25AndCity.class);
        job.setNumReduceTasks(1);
        job.setJobName("与city表进行拼接");

        job.setMapperClass(MyMapper3.class);
        job.setReducerClass(MyReducer3.class);

        job.setMapOutputKeyClass(Text.class);
        job.setMapOutputValueClass(Text.class);

        job.setOutputKeyClass(Text.class);
        job.setOutputValueClass(Text.class);

        FileInputFormat.setInputPaths(job, new Path(args[0]));
        FileOutputFormat.setOutputPath(job, new Path(args[1]));

        job.waitForCompletion(true);
    }
}
