package com.spx.chapter06;

import com.spx.chapter05.pojo.Event;
import com.spx.util.SampleDataUtil;
import org.apache.flink.api.common.eventtime.SerializableTimestampAssigner;
import org.apache.flink.api.common.eventtime.WatermarkStrategy;
import org.apache.flink.api.common.functions.AggregateFunction;
import org.apache.flink.api.java.tuple.Tuple2;
import org.apache.flink.streaming.api.datastream.SingleOutputStreamOperator;
import org.apache.flink.streaming.api.environment.StreamExecutionEnvironment;
import org.apache.flink.streaming.api.windowing.assigners.TumblingEventTimeWindows;
import org.apache.flink.streaming.api.windowing.time.Time;

import java.time.Duration;
import java.util.HashSet;

/**
 * create by undeRdoG on  2022-05-02  20:49
 * 凡心所向，素履以往，生如逆旅，一苇以航。
 */
public class AggregateFunctionTest_PV_UV {

    public static void main(String[] args) throws Exception {
        // 统计 PV 和  UV ，两者相除得到用户的平均活跃度
        StreamExecutionEnvironment env = StreamExecutionEnvironment.getExecutionEnvironment();

        SingleOutputStreamOperator<Event> dataSource = env.fromCollection(SampleDataUtil.getSample())
                .assignTimestampsAndWatermarks(
                        WatermarkStrategy.<Event>forBoundedOutOfOrderness(Duration.ZERO)
                                .withTimestampAssigner(new SerializableTimestampAssigner<Event>() {
                                    @Override
                                    public long extractTimestamp(Event element, long recordTimestamp) {
                                        return element.timestamp;
                                    }
                                })
                );

        // 这里要统计所有的用户，需要将所有的数据放在同一个分区统计
        SingleOutputStreamOperator<Double> res = dataSource.keyBy(data -> true)
                .window(TumblingEventTimeWindows.of(Time.seconds(10L)))
                .aggregate(new pvAndUVAggregationFunction());

        res.print();


        env.execute();
    }

    /**
    * 用 Long 保存PV，用 HashSet 保存UV
    * */
    public static class pvAndUVAggregationFunction implements AggregateFunction<Event, Tuple2<Long, HashSet<String>>,Double>{

        @Override
        public Tuple2<Long, HashSet<String>> createAccumulator() {
            return Tuple2.of(0L, new HashSet<>());
        }

        @Override
        public Tuple2<Long, HashSet<String>> add(Event value, Tuple2<Long, HashSet<String>> accumulator) {
            // 每来一条数据，PV加1，数据放入HashSet
            Long pv = accumulator.f0 + 1;
            accumulator.f1.add(value.user);
            accumulator.setField(pv, 0);
            return accumulator;
        }

        @Override
        public Double getResult(Tuple2<Long, HashSet<String>> accumulator) {
            System.out.println(accumulator.f0);
            System.out.println(accumulator.f1.size());
            return (double)accumulator.f0 / accumulator.f1.size();
        }

        @Override
        public Tuple2<Long, HashSet<String>> merge(Tuple2<Long, HashSet<String>> a, Tuple2<Long, HashSet<String>> b) {
            return null;
        }
    }


}
