

package com.maxian.hdfs;

import java.io.IOException;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.io.IntWritable;
import org.apache.hadoop.io.LongWritable;
import org.apache.hadoop.io.Text;
import org.apache.hadoop.mapreduce.Job;
import org.apache.hadoop.mapreduce.Mapper;
import org.apache.hadoop.mapreduce.Reducer;
import org.apache.hadoop.mapreduce.lib.input.FileInputFormat;
import org.apache.hadoop.mapreduce.lib.output.FileOutputFormat;
//词频统计

    public class WordCount {
        static class Maps extends Mapper<LongWritable, Text, Text, IntWritable> {
            private final static IntWritable one = new IntWritable(1);

            @Override
            protected void map(LongWritable key, Text value, Context context) throws IOException, InterruptedException {
                // 将读入的每行数据按空格切分
                String[] dataArr = value.toString().split(" ");
                if (dataArr.length > 0) {
                    // 将每个单词作为map的key，value设置为1
                    for (String word : dataArr) {
                        context.write(new Text(word), one);
                    }
                }
            }
        }

        static class Reduces extends Reducer<Text, IntWritable, Text, IntWritable> {
            @Override
            public void reduce(Text key, Iterable<IntWritable> values, Context context)
                    throws IOException, InterruptedException {
                int sum = 0;
                for (IntWritable value : values) {
                    sum += value.get();
                }
                IntWritable result = new IntWritable();
                result.set(sum);
                context.write(key, result);
            }
        }

        public static void main(String[] args)
                throws IllegalArgumentException, IOException, ClassNotFoundException, InterruptedException {
            // 实例化Configuration类
            Configuration conf = new Configuration();
            // 新建一个任务
            Job job = Job.getInstance(conf, "word-count");
            // 设置主类
            job.setJarByClass(WordCount.class);
            // 输入文件地址，可以是本地，或者hdfs地址
            //String inputPath = "hdfs://master:9000/myTask/input/wordcount.txt";
            String inputPath = "D:\\MapReduder\\input\\wordcount.txt";
            // 输出文件夹地址，必需为新文件夹，可以本地文件夹或者hdfs文件夹
            //String outPath = "hdfs://master:9000/myTask/output/wordcount/";
            String outPath = "D:\\MapReduder\\output\\wordcount";
            // 如果有传入文件地址，接收参数为输入文件地址
            if (args != null && args.length > 0) {
                inputPath = args[0];
            }
            // 设置输入文件
            FileInputFormat.addInputPath(job, new Path(inputPath));
            // 设置Mapper类
            job.setMapperClass(Maps.class);
            // 设置Map输出Key的类型
            job.setMapOutputKeyClass(Text.class);
            // 设置Map输出Value的类型
            job.setMapOutputValueClass(IntWritable.class);
            // 设置Reducer类
            job.setReducerClass(Reduces.class);
            // 设置Reducer输出时Key的类型
            job.setOutputKeyClass(Text.class);
            // 设置Reducer输出时Value的类型
            job.setOutputValueClass(IntWritable.class);
            // 设置输出路径
            FileOutputFormat.setOutputPath(job, new Path(outPath));
            // 提交任务
            job.waitForCompletion(true);
        }

    }


