package org.apache.hadoop;

import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.io.*;
import org.apache.hadoop.mapreduce.Job;
import org.apache.hadoop.mapreduce.Mapper;
import org.apache.hadoop.mapreduce.Reducer;
import org.apache.hadoop.util.GenericOptionsParser;

import java.io.BufferedReader;
import java.io.IOException;
import java.io.InputStream;
import java.io.InputStreamReader;
import java.util.StringTokenizer;

//import org.apache.hadoop.mapreduce.lib.input.FileInputFormat;
//import org.apache.hadoop.mapreduce.lib.output.FileOutputFormat;
//利用MapReduce求平均数和去重 https://www.cnblogs.com/bigdataZJ/p/hadoopreading2.html
//Hadoop实战（一）之统计平均分 https://blog.csdn.net/weixin_45659364/article/details/109999266
public class Test1 {

    public static class MapperClass extends Mapper<LongWritable, Text, Text, DoubleWritable> {
        public void map(LongWritable key, Text value, Context context) {
            String line = value.toString();
            System.out.println("row data：" + line);
            StringTokenizer token = new StringTokenizer(line, "\t");
            String nameT = token.nextToken();
            nameT = "avg";
            int score = Integer.parseInt(token.nextToken());
            Text name = new Text(nameT);
            try {
                context.write(name, new DoubleWritable(score));
            } catch (IOException e) {
                e.printStackTrace();
            } catch (InterruptedException e) {
                e.printStackTrace();
            }
        }
    }

    public static class ReducerClass extends Reducer<Text, DoubleWritable, Text, DoubleWritable> {
        public void reduce(Text key, Iterable<DoubleWritable> value, Context context) {
            double sum = 0;
            int count = 0;
            for (DoubleWritable score : value) {
                sum += score.get();
                count++;
                //需要文件编码为UTF-8，java的VM选项设置为：-Dfile.encoding=UTF-8
                System.out.println("第" + count + "个数值为：" + score.get());
            }
            DoubleWritable avg = new DoubleWritable(sum / count);
            //IntWritable avg = new IntWritable(sum/count);
            //System.out.println("avg="+avg);
            try {
                context.write(key, avg);
            } catch (IOException e) {
                e.printStackTrace();
            } catch (InterruptedException e) {
                e.printStackTrace();
            }
        }
    }

    /**
     * @param args
     * @throws IOException
     * @throws ClassNotFoundException
     * @throws InterruptedException
     */
    public static void main(String[] args) throws IOException, InterruptedException, ClassNotFoundException {

        Configuration conf = new Configuration();
        String[] otherArgs = new GenericOptionsParser(conf, args).getRemainingArgs();
        if (otherArgs.length != 2) {
            System.err.println("Usage: Test1 <in> <out>");
            System.exit(2);
        }

        // 本地运行方式：运行后不会有yarn日志
        // 没有core-site.xml和hdfs-site.xml,mapred-site.xml,yarn-site.xml
        // node1必须是当前活动的节点。不调用有setJar()
        //conf.set("fs.defaultFS", "hdfs://node1:9000/");

        // yarn运行方式：
        // 通过: http://node1:8088/ 在yarn页面查询运行日志，8088是ResourceManager的web界面
        // 有core-site.xml和hdfs-site.xml,mapred-site.xml,yarn-site.xml
        // windows端指定跨平台，也可以在mapred-site.xml中设置
        conf.set("mapreduce.app-submission.cross-platform", "true");
        conf.set("fs.defaultFS", "hdfs://mycluster/");//也可以来自core-site.xml，可以是hdfs://mycluster
        conf.set("mapred.jar", "out/artifacts/h_jar/hadoop-mapreduce-examples.jar"); //yarn运行方式必须，文件必须是本项目的准确地址。

        Job job = new Job(conf, "Test1");

        job.setJarByClass(Test1.class);
        job.setMapperClass(MapperClass.class);
        job.setCombinerClass(ReducerClass.class);
        job.setReducerClass(ReducerClass.class);
        job.setOutputKeyClass(Text.class);
        job.setOutputValueClass(DoubleWritable.class);

        org.apache.hadoop.mapreduce.lib.input.FileInputFormat.addInputPath(job, new Path(otherArgs[0]));
        org.apache.hadoop.mapreduce.lib.output.FileOutputFormat.setOutputPath(job, new Path(otherArgs[1]));
        job.waitForCompletion(true);
        if (!job.isSuccessful()) {
            System.out.println("Job " + job.getJobID() + " failed!");
            System.exit(1);
        }
        //read outputs
        // 创建一个FileSystem实例
        FileSystem fileSystem = FileSystem.get(conf);

        // 指定要读取的文件路径
        String filePath = new Path(otherArgs[1]) + "/part-r-00000"; // 替换为part-r-00000文件的实际HDFS路径
        System.out.println(filePath);
        // 打开文件
        InputStream in = null;
        BufferedReader reader = null;
        try {
            in = fileSystem.open(new Path(filePath));
            reader = new BufferedReader(new InputStreamReader(in));
            // 按行读取并打印内容
            String line;
            while ((line = reader.readLine()) != null) {
                System.out.println(line);
            }
        } finally {
            IOUtils.closeStream(in);
            IOUtils.closeStream(reader);
            fileSystem.close();
        }
        //System.exit(job.waitForCompletion(true) ? 0 : 1);
    }
}
