package com.etc;


import org.apache.hadoop.conf.Configuration;

import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.FileUtil;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.io.IntWritable;
import org.apache.hadoop.io.LongWritable;
import org.apache.hadoop.io.Text;


import org.apache.hadoop.mapreduce.Job;
import org.apache.hadoop.mapreduce.Mapper;
import org.apache.hadoop.mapreduce.Reducer;
import org.apache.hadoop.mapreduce.lib.input.FileInputFormat;
import org.apache.hadoop.mapreduce.lib.output.FileOutputFormat;


import java.io.File;
import java.io.IOException;
import java.net.URI;
import java.net.URISyntaxException;

public class WordCount {

    public static class MapWordCount extends Mapper<LongWritable, Text,Text, IntWritable>{
        @Override
        protected void map(LongWritable key, Text value, Context context) throws IOException, InterruptedException {
            String s = value.toString();
            String[] line = s.split(",");
            for (String s1 : line) {
                context.write(new Text(s1),new IntWritable(1));
            }
        }
    }


    public static class ReduceWordCount extends Reducer<Text,IntWritable,Text,IntWritable>{

        @Override
        protected void reduce(Text key, Iterable<IntWritable> values, Context context) throws IOException, InterruptedException {
            int num = 0;
            for (IntWritable value : values) {
                num  += value.get();

            }  context.write(key,new IntWritable(num));
        }
    }




        public static void main(String[] args) throws IOException, ClassNotFoundException, InterruptedException, URISyntaxException {

            Configuration conf = new Configuration();

            // 1、设置job运行时要访问的默认文件系统
            conf.set("fs.defaultFS", "hdfs://192.168.88.100:9000");//注意要写成IP地址，因为没有配windos的映射
            conf.set("fs.hdfs.impl", "org.apache.hadoop.hdfs.DistributedFileSystem");
            // 2、设置job提交到哪去运行
            conf.set("mapreduce.framework.name", "yarn");
            conf.set("yarn.resourcemanager.hostname", "192.168.88.100");//注意要写成IP地址，因为没有配windos的映射
//            // 3、如果要从windows系统上运行这个job提交客户端程序，则需要加这个跨平台提交的参数
//            conf.set("mapreduce.app-submission.cross-platform","true");

            Job job = Job.getInstance(conf);

             // 1、封装参数：jar包所在的位置
            //打jar的方式 1：点击右边的maven project,点击Lifecycle下面的package
//            job.setJar("E:\\Java文档\\wordcount\\target\\wordcount-1.0-SNAPSHOT.jar");
            job.setJarByClass(WordCount.class);
            // 2、封装参数： 本次job所要调用的Mapper实现类、Reducer实现类
            job.setMapperClass(MapWordCount.class);
            job.setReducerClass(ReduceWordCount.class);
            // 3、封装参数：本次job的Mapper实现类、Reducer实现类产生的结果数据的key、value类型
            job.setMapOutputKeyClass(Text.class);
            job.setMapOutputValueClass(IntWritable.class);

            job.setOutputKeyClass(Text.class);
            job.setOutputValueClass(IntWritable.class);

            FileInputFormat.addInputPath(job,new Path(args[0]));//这里用数组来表示，是灵活的
            FileOutputFormat.setOutputPath(job,new Path(args[1]));

            job.setNumReduceTasks(1);
            System.exit(job.waitForCompletion(true)?0:1);
        }
//打包，用xftp上传上去，写一个脚本来运行
//    APP_JAR=/opt/wordcount-1.0-SNAPSHOT.jar   这个是jar包的路径
//    INPUT=/input
//    OUTPUT=/output
//    $HADOOP_HOME/bin/hadoop fs -rm -r $OUTPUT   若文件存在则删除，对应之前程序里的文件删除
//    PROG=com.etc.WordCount     程序的路径
//    $HADOOP_HOME/bin/hadoop jar $APP_JAR $PROG  $INPUT $OUTPUT
}
