package com.shujia.flink.state;

import org.apache.flink.api.common.typeinfo.Types;
import org.apache.flink.api.java.tuple.Tuple2;
import org.apache.flink.configuration.Configuration;
import org.apache.flink.streaming.api.datastream.DataStreamSource;
import org.apache.flink.streaming.api.datastream.SingleOutputStreamOperator;
import org.apache.flink.streaming.api.environment.StreamExecutionEnvironment;
import org.apache.flink.streaming.api.functions.KeyedProcessFunction;
import org.apache.flink.streaming.api.functions.ProcessFunction;
import org.apache.flink.util.Collector;

import java.util.Hashtable;

public class Demo01WordCountState {
    public static void main(String[] args) throws Exception {

        StreamExecutionEnvironment env = StreamExecutionEnvironment.getExecutionEnvironment();

        DataStreamSource<String> socketDS = env.socketTextStream("master", 8888);


//        socketDS.map(w -> Tuple2.of(w, 1), Types.TUPLE(Types.STRING, Types.INT)).print();

        // 自定义处理逻辑：可以实现map、flatMap、filter等等操作
        // 先切分，进行扁平化处理，判断进来的单词是否等于vip，如果是则将其变成vip 10，其他的转换成 word 1，并且如果单词=b，则不输出
        SingleOutputStreamOperator<Tuple2<String, Integer>> kvDS = socketDS.process(new ProcessFunction<String, Tuple2<String, Integer>>() {
            // 每一条数据都会执行一次
            @Override
            /*
            line : 接收到的每一条数据
            context ：上下文环境，主要用于获取时间戳
            out：用于将数据发送到下游
             */
            public void processElement(String line, ProcessFunction<String, Tuple2<String, Integer>>.Context context, Collector<Tuple2<String, Integer>> out) throws Exception {
                for (String word : line.split(",")) {
                    if (!"b".equals(word)) {
                        int value = 0;
                        switch (word) {
                            case "vip":
                                value = 10;
                                break;
                            case "svip":
                                value = 100;
                                break;
                            default:
                                value = 1;
                                break;
                        }
                        // 通过out将数据以KV格式发送到下游
                        out.collect(Tuple2.of(word, value));
                    }
                }

            }
        });

        kvDS.keyBy(t2 -> t2.f0)
                .process(new KeyedProcessFunction<String, Tuple2<String, Integer>, String>() {
                    // 在每个并行度对应的线程中会共用该值
//                    Integer sumValue = 0;
                    // 给每个Key单独维护一个状态值，借助HashMap来实现
                    // 程序重启后状态会丢失
                    Hashtable<String, Integer> kvStateMap;

                    @Override
                    // Job启动时每个并行度对应的线程中运行的Task会执行一次
                    public void open(Configuration parameters) throws Exception {
                        super.open(parameters);
                        System.out.println("线程：" + Thread.currentThread().getId() + "进行了初始化");
                        kvStateMap = new Hashtable<>();
                    }

                    @Override
                    // 每一条数据会执行一次
                    public void processElement(Tuple2<String, Integer> kv, KeyedProcessFunction<String, Tuple2<String, Integer>, String>.Context context, Collector<String> out) throws Exception {
                        // 统计每个单词的value之和
                        int newValue = kv.f1;
                        if (kvStateMap.containsKey(kv.f0)) {
                            // 单词之前出现过
                            newValue = kvStateMap.get(kv.f0) + kv.f1;

                        }
                        kvStateMap.put(kv.f0, newValue);

                        out.collect(kv.f0 + "," + newValue);
                    }
                }).print();


        env.execute();

    }
}
