package com.yanxu;

import com.google.gson.Gson;
import com.yanxu.domain.Event2;
import com.yanxu.source.CustomSource2;
import org.apache.flink.api.common.eventtime.SerializableTimestampAssigner;
import org.apache.flink.api.common.eventtime.WatermarkStrategy;
import org.apache.flink.api.common.functions.AggregateFunction;
import org.apache.flink.streaming.api.datastream.DataStreamSource;
import org.apache.flink.streaming.api.datastream.KeyedStream;
import org.apache.flink.streaming.api.environment.StreamExecutionEnvironment;
import org.apache.flink.streaming.api.windowing.assigners.TumblingEventTimeWindows;
import org.apache.flink.streaming.api.windowing.time.Time;

import java.time.Duration;
import java.util.Collections;
import java.util.LinkedHashMap;
import java.util.Map;

/**
 * @author 折戟沉沙铁未销
 * @version V1.0
 * @date 2025/7/27-2025
 * @Description: 自定义 aggregate 聚合函数
 */
public class Api_17_WindowAggregateSample {
    public static void main(String[] args) throws Exception {
        // 获取执行环境
        StreamExecutionEnvironment env = StreamExecutionEnvironment.getExecutionEnvironment();
        env.setParallelism(1);

        // 定义 source 源
        DataStreamSource<Event2> dataStream = env.addSource(new CustomSource2());
        // 定义时间戳和水位线
        dataStream.assignTimestampsAndWatermarks(
                WatermarkStrategy.<Event2>forBoundedOutOfOrderness(Duration.ZERO)
                    .withTimestampAssigner(new SerializableTimestampAssigner<Event2>() {
                        @Override
                        public long extractTimestamp(Event2 event2, long recordTimestamp) {
                            return event2.getTimestamp();
                        }
                    })
        );
        // keyBy 进行分区操作
        // 注意：这是使用的 data-> true, 其实并没有根据任何字段进行分区
        KeyedStream<Event2, Boolean> keyedStream = dataStream.keyBy(data -> true);

        // 时间
        keyedStream
                .window(TumblingEventTimeWindows.of(Time.seconds(5)))
                //自定义 aggregate 函数
                //  aggregate 中包含3个参数
                //  1: <Event2>  输入的数据类型
                //  2: Map<String,Integer>  中间转换过程中的类型
                //  3: String  输出的数据类型
                .aggregate(new AggregateFunction<Event2, Map<String,Integer>, String>() {

                    // 创建累加器
                    @Override
                    public Map<String, Integer> createAccumulator() {
                        return new LinkedHashMap<>();
                    }

                    // 进行累加操作
                    @Override
                    public Map<String, Integer> add(Event2 event2, Map<String, Integer> accumulator) {
                        if (accumulator.containsKey(event2.getUrl())) {
                            Integer val = accumulator.get(event2.getUrl());
                            accumulator.put(event2.getUrl(), val + event2.getResponseTime());
                        } else {
                            accumulator.put(event2.getUrl(), event2.getResponseTime());
                        }
                        return accumulator;
                    }

                    //获取最终结果
                    //当窗口关闭的时候调用，用于聚合结果发送到下游
                    @Override
                    public String getResult(Map<String, Integer> accumulator) {
                        return new Gson().toJson(accumulator);
                    }

                    // 合并操作。（暂时没有用到）
                    @Override
                    public Map<String, Integer> merge(Map<String, Integer> stringIntegerMap, Map<String, Integer> acc1) {
                        return Collections.emptyMap();
                    }
                })
                // 打印
                .print("aggregate 自定义聚合函数处理 >>>");

        // 启动flink
        env.execute();
    }
}