package workCount;

import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.io.IntWritable;
import org.apache.hadoop.io.LongWritable;
import org.apache.hadoop.io.Text;
import org.apache.hadoop.mapreduce.Job;
import org.apache.hadoop.mapreduce.Mapper;
import org.apache.hadoop.mapreduce.Reducer;
import org.apache.hadoop.mapreduce.lib.input.FileInputFormat;
import org.apache.hadoop.mapreduce.lib.output.FileOutputFormat;

import java.io.IOException;

//写workCount计算
public class workCount {
    //conf获得与HDFS的连接配置信息
    Configuration conf;

    //构造函数
    public workCount(String URL) {
        //与HDFS的服务端进行连接
        conf = new Configuration();
        conf.set("fs.defaultFS", URL);
        //设置副本数为1
        conf.set("dfs.replication", "1");
    }

    //wordCount操作
    public void wordCount_main(String fileName, String jobName, String destDir)
            throws IOException, InterruptedException, ClassNotFoundException {
        //定义一个任务，wordCount任务
        Job job = Job.getInstance(conf, jobName);
        job.setJarByClass(workCount.class);

        //定义任务的文件来源
        FileInputFormat.addInputPath(job, new Path(fileName));
        //定义处理完成的文件去向
        FileOutputFormat.setOutputPath(job,new Path(destDir));

        //任务
        //设置Mapper的传递
        job.setMapperClass(mapper.class);
        //输出：hadoop 1
        job.setMapOutputKeyClass(Text.class);
        job.setMapOutputValueClass(IntWritable.class);

        //设置Reducer的操作
        job.setReducerClass(reducer.class);
        //输出：hadoop 3
        job.setOutputKeyClass(Text.class);
        job.setOutputValueClass(IntWritable.class);

        //执行任务操作
        job.waitForCompletion(true);
    }

    //定义mapper操作
//    Mapper<Key1 , Value1 , Key2,Value2>
    /*
     * Text:文本数据，在Hadoop当中使用Text --> 包含未知的数据类型（int/double/char），可以实现字符操作（实现字符串的更改、连接……）
     * LongWritable:长整型数据(64bit)，在Hadoop当中使用LongWritable -->
     * 包含未知的数据类型（int/double/char），可以实现字符操作（实现字符串的更改、连接……）
     * IntWritable:整型数据(32bit)，在Hadoop当中使用IntWritable
     */
    public static class mapper extends Mapper<LongWritable, Text, Text, IntWritable> {
        //重写map操作
        //Context:上下文，即传递进来的内容向下一阶段传递
        protected void map(LongWritable key, Text value, Mapper<LongWritable, Text, Text, IntWritable>.Context context)
                throws IOException, InterruptedException {
            /*举个例子：
             * Text（hadoop world linux） -->String str（hadoop world linux）
             * -->strArr[0]:hadoop,strArr[1]:world, strArr[2]:linux
             */
            String str = value.toString();
            String[] strArr = str.split(" ");
            Text text;
            IntWritable intWritable;
            for (String s : strArr) {
                /*
                 * Text text = new Text(s);
                 * IntWritable intWritable = new IntWritable(1);
                 */
                text = new Text(s);
                intWritable = new IntWritable(1);
                context.write(text, intWritable);
            }

        }

    }

    //定义reduce操作
    //来自Shuffle处理完mapper操作的数据
    //需要实现：hadoop 1 1 1 1 -->求和-->hadoop 4
    //Reducer<k1,v1,k2,v2> --> k1,v1：Reducer的输入，同时也是Mapper的输出；k2,v2：Reducer的输出
    public static class reducer extends Reducer<Text, IntWritable, Text, IntWritable> {
        protected void reduce(Text key, Iterable<IntWritable> values, Reducer<Text, IntWritable, Text, IntWritable>.Context context)
                throws IOException, InterruptedException {
            int sum = 0;
            for (IntWritable tmp : values) {
                sum += tmp.get();
            }
            //写入到Reducer输出当中，格式为：Text, IntWritable
            context.write(key, new IntWritable(sum));
        }
    }

}