package average;

import org.apache.commons.io.FileUtils;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.io.IntWritable;
import org.apache.hadoop.io.LongWritable;
import org.apache.hadoop.io.Text;
import org.apache.hadoop.mapreduce.Job;
import org.apache.hadoop.mapreduce.Mapper;
import org.apache.hadoop.mapreduce.Reducer;
import org.apache.hadoop.mapreduce.lib.input.FileInputFormat;
import org.apache.hadoop.mapreduce.lib.output.FileOutputFormat;


import java.io.File;
import java.io.IOException;
import java.io.UnsupportedEncodingException;


public class AvgTest {
    public static class AvgTestMap extends Mapper<LongWritable, Text,Text, IntWritable>{
        @Override
        protected void map(LongWritable key, Text value, Context context) throws IOException, InterruptedException {
            Text text = transformTextToUTF8(value, "GBK");
            String line = text.toString();
            String[] split = line.split(",");
            String name = split[0];
            String scource = split[1];

            context.write(new Text(name),new IntWritable(Integer.parseInt(scource)));
        }
        public static Text transformTextToUTF8(Text text, String encoding) {
            String value = null;
            try {
                value = new String(text.getBytes(), 0, text.getLength(), encoding);
            } catch (UnsupportedEncodingException e) {
                e.printStackTrace();
            }
            return new Text(value);
        }
    }

    public static class AvgTestReduce extends Reducer<Text,IntWritable,Text,IntWritable> {

        @Override
        protected void reduce(Text key, Iterable<IntWritable> values, Context context) throws IOException, InterruptedException {
            int sum = 0;
            int count = 0;
            for (IntWritable value : values) {
                sum = sum + value.get();
                count ++;
            }

            int avg = (int) sum / count;
            context.write(key, new IntWritable(avg));
        }




    }



    public static void main(String[] args) {
        Configuration config = new Configuration();
        try{
            //新建一个job工作
            Job job = Job.getInstance(config);

            //设置运行类
            job.setJarByClass(AvgTest.class);

            //设置要执行的mapper类
            job.setMapperClass(AvgTest.AvgTestMap.class);
            // 设置要执行的reduce类
            job.setReducerClass(AvgTestReduce.class);

            //设置key和value的输出类型
            job.setMapOutputKeyClass(Text.class);
            job.setMapOutputValueClass(IntWritable.class);

            //设置reduce的key和value的输出类型
            job.setOutputKeyClass(Text.class);
            job.setOutputValueClass(IntWritable.class);

            //设置reduce任务的个数，默认个数一个（一般reduce的个数越多越高效）
            job.setNumReduceTasks(1);

            //判断文件是否存在
            File file = new File("f:\\test2");
            if (file.exists()){
                FileUtils.deleteDirectory(file);
            }

            //mapereduce 输入数据的文件目录
            FileInputFormat.addInputPath(job,new Path("f:\\aver"));
            //MaperReduce 执行后输出的数据目录
            FileOutputFormat.setOutputPath(job,new Path("f:\\test2"));
            //执行完毕退出
            System.exit(job.waitForCompletion(true) ? 0 : 1);
        }catch (Exception e){
            e.printStackTrace();
        }
    }
}
