package cn.etc;


import org.apache.commons.io.FileUtils;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.io.LongWritable;
import org.apache.hadoop.io.Text;
import org.apache.hadoop.mapreduce.Job;
import org.apache.hadoop.mapreduce.Mapper;
import org.apache.hadoop.mapreduce.Reducer;
import org.apache.hadoop.mapreduce.lib.input.FileInputFormat;
import org.apache.hadoop.mapreduce.lib.output.FileOutputFormat;

import java.io.File;

public class FlowCount{
    public static class FlowCountMap extends Mapper<LongWritable, Text, Text, FlowBean> {
        Text text = new Text();
        @Override
        protected void map(LongWritable key, Text value, Context context) throws java.io.IOException, InterruptedException {

            String line = value.toString();
            String[] split = line.split("t/");
            String phone = split[0];
            int upflow = Integer.parseInt(split[split.length - 1]);
            int dflow = Integer.parseInt(split[split.length - 2]);
            text.set(phone);
            context.write(text, new FlowBean(phone, upflow, dflow));
        }
    }

    public static class FlowCountReduce extends Reducer<Text,FlowBean,Text,FlowBean> {
        @Override
        protected void reduce(Text key, Iterable<FlowBean> values, Context context) throws java.io.IOException, InterruptedException {
            int count = 0;
            int index = 0;

            for (FlowBean flowBean : values) {
                count = count + flowBean.getdFlow();
                index = index + flowBean.getUpFlow();
            }
            context.write(key, new FlowBean(key.toString(), count, index));
        }
    }
    public static void main(String[] args) {
        Configuration config = new Configuration();
        try {
            //新建一个job工作
            Job job = Job.getInstance(config);

            //设置运行类
            job.setJarByClass(FlowCount.class);

            //设置要执行的mapper类
            job.setMapperClass(FlowCountMap.class);
            // 设置要执行的reduce类（自己书写的）
            job.setReducerClass(FlowCountReduce.class);

            //设置mapper的key和value的输出类型
            job.setMapOutputKeyClass(Text.class);
            job.setMapOutputValueClass(FlowBean.class);

            //设置reduce的key和value的输出类型
            job.setOutputKeyClass(Text.class);
            job.setOutputValueClass(FlowBean.class);

            //设置reduce任务的个数，默认个数一个（一般reduce的个数越多越高效）
            job.setNumReduceTasks(1);


            //判断文件是否存在
            File file = new File("F:\\out");
            if (file.exists()){
                FileUtils.deleteDirectory(file);
            }
            //mapereduce 输入数据的文件目录
            FileInputFormat.addInputPath(job, new Path("F:\\flow.log.txt"));
            //MaperReduce 执行后输出的数据目录
            FileOutputFormat.setOutputPath(job, new Path("F:\\out"));
            //执行完毕退出
            System.exit(job.waitForCompletion(true) ? 0 : 1);
        } catch (Exception e) {
            e.printStackTrace();
        }
    }
}
