package com.etc;

import jdk.internal.cmm.SystemResourcePressureImpl;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.FileUtil;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.io.LongWritable;
import org.apache.hadoop.io.Text;

import org.apache.hadoop.mapreduce.Job;
import org.apache.hadoop.mapreduce.Mapper;
import org.apache.hadoop.mapreduce.Reducer;
import org.apache.hadoop.mapreduce.lib.input.FileInputFormat;
import org.apache.hadoop.mapreduce.lib.input.FileSplit;
import org.apache.hadoop.mapreduce.lib.output.FileOutputFormat;


import java.io.File;
import java.io.IOException;


/**
 * @Auther: zhezhe
 * @Date: 2018/10/2715:00
 * @Description:
 */
public class InvertedIndex {
//输出值：key为单词+文件地址  value为频数，均指定1
    public static class MapInvertedIndex extends Mapper<LongWritable, Text,Text, Text>{

//        private Text keyInfo = new Text();
//        private Text valueInfo = new Text();


        @Override
        protected void map(LongWritable key, Text value, Context context) throws IOException, InterruptedException {

            //获取文件名
            FileSplit split = (FileSplit) context.getInputSplit();
            String fileName = split.getPath().getName();
            //读取一行数据
            String line = value.toString();
            //将这行数据按空格切分，结果存入split1中
            String[] split1 = line.split(" ");
            //遍历split1，取出一个个数据s，加文件名作为key，value设为1输出
            for (String s : split1) {
                context.write(new Text(s+"-->"+fileName),new Text("1"));






//                keyInfo.set(s+"-->"+fileName);
//                valueInfo.set("1");
//                context.write(keyInfo,valueInfo);

            }

        }
    }


//合并频数
//输出：key为单词  value为文件地址+频数
    public static class CombinerInvertedIndex extends Reducer<Text,Text,Text,Text>{

        @Override
        protected void reduce(Text key, Iterable<Text> values, Context context) throws IOException, InterruptedException {
            //将从map中接收到的key转为String类型，并以-->切分，结果存入word中
            //此时，word[0]为s，word[1]为filename
            String[] word = key.toString().split("-->");
            //定义一个变量sum，初始值为0
            int sum = 0;
           //遍历values，将获取的value加到sum上，因为value是Text类型，所以要先将其转为String类型，再转为Int型，才能与sum相加
           //因为我们从map中接受到的value为1，所以此处遍历values获取到的value都为1，相当于sum++
            for (Text value : values) {
                sum = sum + Integer.parseInt(value.toString());//sum++
            }
           //s作为key，filename和sum作为value发出
            context.write(new Text(word[0]),new Text( word[1]+"-->" + sum+"   "));
        }
    }


  //将每个单词对应的多个文件及频数整合到一行
    public static class ReduceInvertedIndex extends Reducer<Text,Text,Text,Text>{
        @Override
        protected void reduce(Text key, Iterable<Text> values, Context context) throws IOException, InterruptedException {
        //定义一个空字符串
            String str = "";
            //遍历values，相同的key合并value到一行
            for (Text value : values) {
                str = str + value;
            }
            context.write(key,new Text(str));

        }
    }


    public static void main(String[] args) throws IOException, ClassNotFoundException, InterruptedException {
        System.setProperty("HADOOP_USER_NAME","root");
        Configuration conf = new Configuration();

        Job job = Job.getInstance(conf);

        job.setJarByClass(InvertedIndex.class);
        job.setMapperClass(MapInvertedIndex.class);
        job.setCombinerClass(CombinerInvertedIndex.class);
        job.setReducerClass(ReduceInvertedIndex.class);

        job.setMapOutputKeyClass(Text.class);
        job.setMapOutputValueClass(Text.class);

        job.setOutputKeyClass(Text.class);
        job.setOutputValueClass(Text.class);

        File file = new File("D:\\Documents\\Tencent Files\\1159080439\\FileRecv\\练习数据\\练习数据\\output");
        if (file.exists()){
            FileUtil.fullyDelete(file);
        }
        FileInputFormat.addInputPath(job,new Path("D:\\Documents\\Tencent Files\\1159080439\\FileRecv\\练习数据\\练习数据\\input"));
        FileOutputFormat.setOutputPath(job,new Path("D:\\Documents\\Tencent Files\\1159080439\\FileRecv\\练习数据\\练习数据\\output"));

        job.setNumReduceTasks(1);

        System.exit(job.waitForCompletion(true)?0 : 1);
    }
}
