package com.jscloud.bigdata.flink.counter;

import org.apache.commons.io.FileUtils;
import org.apache.flink.api.common.JobExecutionResult;
import org.apache.flink.api.common.accumulators.IntCounter;
import org.apache.flink.api.common.functions.RichMapFunction;
import org.apache.flink.api.java.ExecutionEnvironment;
import org.apache.flink.api.java.operators.DataSource;
import org.apache.flink.api.java.operators.MapOperator;
import org.apache.flink.api.java.tuple.Tuple3;
import org.apache.flink.configuration.Configuration;
import org.apache.flink.metrics.Counter;
import org.apache.log4j.Level;
import org.apache.log4j.Logger;

import java.io.File;
import java.util.List;

/**
 * * 计数器与累加器概念
 * <p>
 * ```
 * Accumulator即累加器，与Mapreduce counter的应用场景差不多，都能很好地观察task在运行期间的数据变化，可以在Flink job任务的算子函数中操作累加器，但是只能在任务执行结束之后才能获得累加器的最终结果。
 * Counter是一个具体的累加器(Accumulator)实现IntCounter, LongCounter 和 DoubleCounter。
 * ```
 * <p>
 * * 计数器与累加器用法
 * <p>
 * ~~~scala
 * (1)：创建累加器
 * IntCounter intCounter = new IntCounter();
 * <p>
 * (2)：注册累加器
 * getRuntimeContext.addAccumulator("num-lines",counter)
 * <p>
 * (3)：使用累加器
 * counter.add(1)
 * <p>
 * (4)：获取累加器的结果
 * myJobExecutionResult.getAccumulatorResult("num-lines")
 * <p>
 * ~~~
 * <p>
 * 分布式缓存概念
 * <p>
 * Flink提供了一个类似于hadoop分布式缓存，可以使用户在并行函数中很方便的读取本地文件。
 * 前面讲到的广播变量是将一些共享的数据放在TaskManager内存中，而Distribute cache是从外部加载一个文件/目录(例如hdfs)，然后分别复制到每一个TaskManager的本地磁盘中。
 * <p>
 * 分布式缓存用法：
 * <p>
 * ```
 * (1)：使用Flink运行环境调用registerCachedFile注册一个分布式缓存
 * env.registerCachedFile("hdfs:///path/to/your/file", "hdfsFile")
 * <p>
 * (2): 获取分布式缓存
 * File myFile = getRuntimeContext().getDistributedCache().getFile("hdfsFile");
 * <p>
 * ```
 * <p>
 * * 案例
 * * 通过分布式缓存，加载外部数据源数据，并且统计一共有多少行数据
 */
public class FlinkDistributeCacheByJava {

        public static void main(String[] args) throws Exception {
                Logger.getLogger("org").setLevel(Level.ERROR);
                ExecutionEnvironment executionEnvironment = ExecutionEnvironment.getExecutionEnvironment();
                executionEnvironment.registerCachedFile("D:\\JSProjects\\jsCloud-bigdata-app\\datas\\catalina.out", "student");

                Tuple3<Integer, String, Integer> tuple3 = new Tuple3<>();
                tuple3.setFields(1, "语文", 60);

                Tuple3<Integer, String, Integer> tuple4 = new Tuple3<>();
                tuple4.setFields(2, "数学", 80);

                DataSource<Tuple3<Integer, String, Integer>> sourceDataset = executionEnvironment.fromElements(tuple3, tuple4);

                MapOperator<Tuple3<Integer, String, Integer>, String> result = sourceDataset.map(new RichMapFunction<Tuple3<Integer, String, Integer>, String>() {
                        private List<String> fileLines;
                        private Counter counter;
                        private IntCounter intCounter = new IntCounter();

                        @Override
                        public void open(Configuration parameters) throws Exception {
                                //定义累加器
                                getRuntimeContext().addAccumulator("intCounter", intCounter);
                                //定义计数器，来一次累加一次
                                this.counter = getRuntimeContext().getMetricGroup().counter("counter");
                                //分布式缓存
                                File student = getRuntimeContext().getDistributedCache().getFile("student");
                                this.fileLines = FileUtils.readLines(student, "UTF-8");
                        }

                        @Override
                        public String map(Tuple3<Integer, String, Integer> value) throws Exception {
                                for (String fileLine : fileLines) {
                                        //操作累加器
                                        intCounter.add(10);
                                        //操作计数器
                                        counter.inc();
                                }

                                return value.f0 + "\t" + value.f1 + "\t" + value.f2;

                        }
                });
                result.print();

                JobExecutionResult lastJobExecutionResult = executionEnvironment.getLastJobExecutionResult();
                //获取累加器的结果值
                Object intCounter = lastJobExecutionResult.getAccumulatorResult("intCounter");
                System.out.println(intCounter.toString());
        }
}
