package com.mapreduce;

import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.conf.Configured;
import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.io.LongWritable;
import org.apache.hadoop.io.Text;
import org.apache.hadoop.mapreduce.Job;
import org.apache.hadoop.mapreduce.Mapper;
import org.apache.hadoop.mapreduce.Reducer;
import org.apache.hadoop.mapreduce.lib.input.FileSplit;
import org.apache.hadoop.mapreduce.lib.input.TextInputFormat;
import org.apache.hadoop.mapreduce.lib.output.TextOutputFormat;
import org.apache.hadoop.util.Tool;
import org.apache.hadoop.util.ToolRunner;

import java.io.IOException;
import java.net.URI;
import java.util.HashMap;
import java.util.Map;

public class IndexData extends Configured implements Tool {
    // 新建map类
    static class IndexDataMapper extends
            Mapper<LongWritable, Text, Text, Text>{
        // 只会在程序刚启动的时候执行一次
        Text fileName = new Text();
        @Override
        protected void setup(Mapper<LongWritable, Text, Text, Text>.Context context) throws IOException, InterruptedException {
            // 获取文件切片
            FileSplit fs = (FileSplit) context.getInputSplit();
            // 获取文件名
//            fileName = new Text(fs.getPath().getName());
            fileName.set(fs.getPath().getName());
        }
        // 每进来一行数据就会运行一次
        @Override
        protected void map(LongWritable key, Text value, Mapper<LongWritable, Text, Text, Text>.Context context) throws IOException, InterruptedException {
//            key:k1(偏移量)			value:v1(数据)
//0			Welcome to MapReduce World
//->map(求每个文件中的词频)

            String[] words = value.toString().split(" ");
            // words:["Welcome","to","MapReduce","World"]
            for (String word:words){
                // k2:word   v2:fileName
                context.write(new Text(word),
                        new Text(fileName));
            }
//
//
//k2					v2
//Welcome				file_1
//to					file_1
//MapReduce			file_1
//World				file_1
//Welcome				file_2

        }
    }
    // 新建reduce类
    static class IndexDataReducer extends
            Reducer<Text, Text, Text, Text>{
        @Override
        protected void reduce(Text key, Iterable<Text> values, Reducer<Text, Text, Text, Text>.Context context) throws IOException, InterruptedException {
//            key:k2						values:v2
//Welcome				["file_1","file_2","file_1","file_1","file_3"]
//to					["file_1"]
//MapReduce			["file_1"]
//World				["file_1"]
//->reduce
            Map<String,Integer> wordData = new HashMap<>();
            for (Text fileNames:values){
                String fileName = fileNames.toString();
                // 当文件名不存在时，在map集合中新建一个(新文件名,1)
                // 当文件名存在时，在map集合中获取到存在的value(词频)，词频+1
                if (!wordData.containsKey(fileName)){
                    wordData.put(fileName,1);
                }else {
                    wordData.put(fileName,wordData.get(fileName)+1);
                }
                // file_1
                // wordData:("file_1",3),("file_2",1),("file_3",1)

            }
            // 拼接结果
            StringBuffer result = new StringBuffer();
            for (Map.Entry<String,Integer> data:wordData.entrySet()){
                result.append(data.getKey()).append(":")
                        .append(data.getValue()).append(",");
            }
            // 最后一位多了一个逗号，需要去掉
            result.setLength(result.length()-1);
            context.write(key,new Text(result.toString()));
//java里面map集合：key:文件名   value：词频
//map.put(新文件名，1)
//            k3			v3
//hello			file_1:2,file_2:1
//hi			file_1:1,file_2:2,file_3:1
        }
    }

    // driver程序
    @Override
    public int run(String[] args) throws Exception {
        Configuration conf = getConf();
        // 定义输入输出路径
        Path input = new Path(
                "hdfs://192.168.10.11:9000/indexdata");
        Path output = new Path(
                "hdfs://192.168.10.11:9000/indexoutput");
        FileSystem fs = FileSystem.get(
                new URI("hdfs://192.168.10.11:9000"),conf);
        if (fs.exists(output))fs.delete(output,true);

        Job job = Job.getInstance(conf);
        job.setJobName("index");
        job.setJarByClass(this.getClass());

        job.setMapperClass(IndexDataMapper.class);
        job.setMapOutputKeyClass(Text.class);
        job.setMapOutputValueClass(Text.class);

        job.setReducerClass(IndexDataReducer.class);
        job.setOutputKeyClass(Text.class);
        job.setOutputValueClass(Text.class);

        job.setInputFormatClass(TextInputFormat.class);
        TextInputFormat.addInputPath(job,input);

        job.setOutputFormatClass(TextOutputFormat.class);
        TextOutputFormat.setOutputPath(job,output);
        return job.waitForCompletion(true)?0:-1;
    }

    public static void main(String[] args) throws Exception {
        System.exit(ToolRunner.run(new IndexData(),args));
    }
}
