package com.etc;

import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.FileUtil;
import org.apache.hadoop.fs.Path;

import org.apache.hadoop.io.LongWritable;
import org.apache.hadoop.io.Text;
import org.apache.hadoop.mapreduce.Job;
import org.apache.hadoop.mapreduce.Mapper;
import org.apache.hadoop.mapreduce.Partitioner;
import org.apache.hadoop.mapreduce.Reducer;
import org.apache.hadoop.mapreduce.lib.input.FileInputFormat;
import org.apache.hadoop.mapreduce.lib.output.FileOutputFormat;
import org.apache.hadoop.mapreduce.lib.output.MultipleOutputs;
import org.apache.hadoop.mapreduce.lib.output.TextOutputFormat;
import org.apache.hadoop.mapreduce.lib.output.LazyOutputFormat;
import java.io.File;
import java.io.IOException;

import java.util.HashMap;
import java.util.Map;

/**
 * @Auther: zhezhe
 * @Date: 2018/10/319:31
 * @Description:
 */
public class FlowBlongJob {
    public static class MapFlowBlong extends Mapper<LongWritable, Text, Text, FlowBan1> {



        @Override
        protected void map(LongWritable key, Text value, Context context) throws IOException, InterruptedException {


            String line = value.toString();
            String[] s = line.split("\t");
            String phone = s[1];
            String num = s[0];
            String ip = s[3];
            String url = s[4];
            int upFlow = Integer.valueOf(s[s.length - 3]);
            int dFlow = Integer.parseInt(s[s.length - 2]);
            FlowBan1 flowBan1 = new FlowBan1(num, phone, ip, url, upFlow, dFlow);
            context.write(new Text(phone), flowBan1);
        }
    }


    public static class ProvidePartitioner extends Partitioner<Text,FlowBan1>{

       private static Map<String,Integer> providerMap = new HashMap<>();

       static {
           providerMap.put("130",1);
           providerMap.put("134",2);
           providerMap.put("135",3);
           providerMap.put("136",4);
           providerMap.put("137",5);
           providerMap.put("138",6);
           providerMap.put("139",7);
       }

        /**partition是分割map每个节点的结果，按照key分别映射给不同的reduce，也是可以自定义的。
         * 这里其实可以理解归类。
         *
         * @param key  Map的结果对
         * @param flowBan1
         * @param i   reducer的数目
         * @return
         */
        @Override
        public int getPartition(Text key, FlowBan1 flowBan1, int i) {
            //截取key的前三位
            String s = key.toString().substring(0, 3);

            Integer num = providerMap.get(s);
            if (num == null) {
                return 0;
            }
            return num;
        }
    }

    public static class ReduceFlowBlong extends Reducer<Text, FlowBan1, Text, FlowBan1> {



        private MultipleOutputs<Text, FlowBan1> mos;

        @Override
        protected void setup(Context context) throws IOException,InterruptedException {
            mos = new MultipleOutputs<Text, FlowBan1>(context);
        }

        @Override
        protected void cleanup(Context context) throws IOException,InterruptedException {
            //注意要调用close方法，否则会没有输出
            mos.close();
        }



        String place;

        @Override
        protected void reduce(Text key, Iterable<FlowBan1> values, Context context) throws IOException, InterruptedException {
            if (key.toString().startsWith("130")) {
                place = "太原";
            } else if (key.toString().startsWith("134")) {
                place = "大同";
            } else if (key.toString().startsWith("135")) {
                place = "吕梁";
            } else if (key.toString().startsWith("136")) {
                place = "侯马";
            } else if (key.toString().startsWith("137")) {
                place = "朔州";
            } else if (key.toString().startsWith("138")) {
                place = "忻州";
            } else if (key.toString().startsWith("139")) {
                place = "长治";
            } else {
                place = "no information";
            }
            for (FlowBan1 value : values) {
                mos.write("BaseOnKey",new Text(place), value,place + "/" + place);
                //指定写出全部数据
               // mos.write("All",new Text(place), value);
            }

        }

        public static void main(String[] args) throws IOException, ClassNotFoundException, InterruptedException {

            Configuration conf = new Configuration();
            Job job = Job.getInstance(conf);

            job.setJarByClass(FlowBlongJob.class);

            job.setMapperClass(MapFlowBlong.class);
            job.setReducerClass(ReduceFlowBlong.class);

            //告诉job我们定义了分区功能
            job.setPartitionerClass(ProvidePartitioner.class);

            job.setMapOutputKeyClass(Text.class);
            job.setMapOutputValueClass(FlowBan1.class);
            //BaseOnKey的输出
            MultipleOutputs.addNamedOutput(job,"BaseOnKey", TextOutputFormat.class,Text.class,FlowBan1.class);
            //没有区分的所有输出
            //MultipleOutputs.addNamedOutput(job,"All",TextOutputFormat.class,Text.class,FlowBan1.class);

            //取消part-r-00000新式文件输出
            LazyOutputFormat.setOutputFormatClass(job,TextOutputFormat.class);

            job.setOutputKeyClass(Text.class);
            job.setOutputValueClass(FlowBan1.class);

            File file = new File("E:\\Mapreduce案例\\flowsum_of_mapreduce\\练习用的数据\\output");
            if (file.exists()) {
                FileUtil.fullyDelete(file);
            }

            FileInputFormat.addInputPath(job, new Path("E:\\Mapreduce案例\\flowsum_of_mapreduce\\练习用的数据\\input\\flow.log"));
            FileOutputFormat.setOutputPath(job, new Path("E:\\Mapreduce案例\\flowsum_of_mapreduce\\练习用的数据\\output"));


            job.setNumReduceTasks(8);

            System.exit(job.waitForCompletion(true) ? 0 : 1);

        }
    }
}

