package com.xujian.cdc.sink.starrocks;

import com.alibaba.fastjson2.JSONObject;
import com.xujian.cdc.schema.MyKafkaDeserializationSchema;
import com.xujian.common.beans.ChannelOrder;
import com.xujian.common.beans.DatabaseMapping;
import com.xujian.common.function.MyAggFunction;
import com.xujian.common.util.EnviromentUtil;
import org.apache.flink.api.common.eventtime.WatermarkStrategy;
import org.apache.flink.api.common.functions.FilterFunction;
import org.apache.flink.api.common.functions.MapFunction;
import org.apache.flink.api.java.tuple.Tuple3;
import org.apache.flink.connector.kafka.source.KafkaSource;
import org.apache.flink.connector.kafka.source.enumerator.initializer.OffsetsInitializer;
import org.apache.flink.connector.kafka.source.reader.deserializer.KafkaRecordDeserializationSchema;
import org.apache.flink.streaming.api.datastream.DataStream;
import org.apache.flink.streaming.api.datastream.SingleOutputStreamOperator;
import org.apache.flink.streaming.api.environment.StreamExecutionEnvironment;
import org.apache.flink.streaming.api.windowing.assigners.TumblingEventTimeWindows;
import org.apache.flink.streaming.api.windowing.time.Time;
import org.apache.flink.util.OutputTag;

import java.time.Duration;

/**
 * @author star xu
 * @date 2023/1/29 23:20
 * @Description:
 * @Slogan: 致敬大师, 致敬未来的自己
 */
public class Kafka2StarrockCalcuOrder {
    public static void main(String[] args) throws Exception {
        // 1 定义流式计算的环境
        String storgeType = "";
        final StreamExecutionEnvironment env = EnviromentUtil.build(storgeType);
        env.setParallelism(1);

        // 2 kafka source
        String servers = "192.168.122.1:9092";
        String topic = "whe_member_meinian_channel_order1";
        String groupId = "my-group";
        KafkaSource<DatabaseMapping> source = buildSource(servers, topic, groupId);
        DataStream<DatabaseMapping> stream = env.fromSource(source, WatermarkStrategy.noWatermarks(), "Kafka source");
        // 3 获取订单流，并处理数据
        SingleOutputStreamOperator<ChannelOrder> dataMsg = stream.rebalance().map(new MapFunction<DatabaseMapping, ChannelOrder>() {
            @Override
            public ChannelOrder map(DatabaseMapping value) throws Exception {
                JSONObject jsonObject = JSONObject.parseObject(value.getMessage());
                ChannelOrder channelOrder = new ChannelOrder();
                channelOrder.setId(jsonObject.getLong("id"));
                channelOrder.setOrderNo(jsonObject.getString("order_no"));
                channelOrder.setOrganizationName(jsonObject.getString("organization_name"));
                channelOrder.setPackagePrice(jsonObject.getDouble("package_price"));
                channelOrder.setOrderTime(jsonObject.getDate("order_time"));
                return channelOrder;
            }
        }).filter(new FilterFunction<ChannelOrder>() {
            @Override
            public boolean filter(ChannelOrder value) throws Exception {
                return value.getOrderStatus() != 2;
            }
        });
        // 4 窗口计算
        WatermarkStrategy<ChannelOrder> watermarkStrategy = WatermarkStrategy.<ChannelOrder>forBoundedOutOfOrderness(Duration.ofMillis(2))
                .withTimestampAssigner(((element, recordTimestamp) -> element.getOrderTime().getTime()))
                .withIdleness(Duration.ofSeconds(10));

        //  保存迟到太久的数据
        OutputTag<ChannelOrder> outputTag = new OutputTag<ChannelOrder>("late-data") {
        };
        SingleOutputStreamOperator<Tuple3<String, Integer, Double>> result = dataMsg.assignTimestampsAndWatermarks(watermarkStrategy)
                .keyBy(ChannelOrder::getOrganizationName)
                .window(TumblingEventTimeWindows.of(Time.minutes(30)))  //  每隔一分钟统计前一分钟的数据
                .allowedLateness(Time.seconds(30))  // 允许迟到30s
                .sideOutputLateData(outputTag)  // 记录迟到太久的数据
                .apply(new MyAggFunction());
        // 写入starrocks
        result.print();

        // 4 执行程序
        env.execute("Flink Streaming Java API Skeleton");
    }

    private static KafkaSource<DatabaseMapping> buildSource(String servers, String topic, String groupId) {
        return KafkaSource.<DatabaseMapping>builder()
                .setBootstrapServers(servers)
                .setTopics(topic)
                .setGroupId(groupId)
                .setStartingOffsets(OffsetsInitializer.earliest())
                .setDeserializer(KafkaRecordDeserializationSchema.of(new MyKafkaDeserializationSchema()))
                .build();
    }
}
