package com.intct.func;

import com.alibaba.fastjson.JSON;
import com.alibaba.fastjson.JSONObject;
import org.apache.flink.api.common.eventtime.WatermarkStrategy;
import org.apache.flink.api.common.serialization.DeserializationSchema;
import org.apache.flink.api.common.typeinfo.TypeInformation;
import org.apache.flink.connector.kafka.source.KafkaSource;
import org.apache.flink.connector.kafka.source.enumerator.initializer.OffsetsInitializer;
import org.apache.flink.streaming.api.datastream.DataStreamSource;
import org.apache.flink.streaming.api.environment.StreamExecutionEnvironment;
import org.apache.flink.streaming.api.functions.co.ProcessJoinFunction;
import org.apache.flink.streaming.api.windowing.time.Time;
import org.apache.flink.util.Collector;

import java.io.IOException;
import java.time.Duration;

/**
 * @author gufg
 * @since 2025-09-19 08:48
 */
public class FlinkIntervalJoin {

    public static void main(String[] args) throws Exception {

        //TODO 1、构建流式处理环境
        StreamExecutionEnvironment env = StreamExecutionEnvironment.getExecutionEnvironment();
        env.setParallelism(1);

        // 定义Watermark策略
        WatermarkStrategy<JSONObject> watermarkStrategy = WatermarkStrategy
                .<JSONObject>forBoundedOutOfOrderness(Duration.ofSeconds(5))  // 最大允许乱序5s
                .withTimestampAssigner((event, timestamp) -> event.getLong("timestamp")); // 指定事件时间字段

        // TODO 2、消费 kafka 交易数据
        KafkaSource<JSONObject> transactionsSource = KafkaSource.<JSONObject>builder()
                .setBootstrapServers("hadoop102:9092")
                .setTopics("transactions")
                .setGroupId("flink-consumer-group-transactions")
                .setStartingOffsets(OffsetsInitializer.latest())  // 指定从最新位置开始消费
                .setValueOnlyDeserializer(createDeserializer())  // 使用自定义反序列化器实现反序列化
                .build();

        // TODO 3、 构建基于事件时间的交易数据流，指定事件时间字段，指定watermark策略
        DataStreamSource<JSONObject> transactionDS = env.fromSource(
                transactionsSource,
                watermarkStrategy,
                "Transaction Source"
        );

        // TODO 4、构建 kafka 风险评估数据
        KafkaSource<JSONObject> riskScoreSource = KafkaSource.<JSONObject>builder()
                .setBootstrapServers("hadoop102:9092")
                .setTopics("riskScores")
                .setGroupId("flink-consumer-group-riskscores")
                .setStartingOffsets(OffsetsInitializer.latest())
                .setValueOnlyDeserializer(createDeserializer())
                .build();

        // TODO 5、 构建基于事件时间的风险评估数据流，指定事件时间字段，指定watermark策略
        DataStreamSource<JSONObject> riskScoreDS = env.fromSource(
                riskScoreSource,
                watermarkStrategy,
                "riskScore Source"
        );


        // TODO 6、双流 join  使用intervalJoin进行关联，返回关联后的所有字段
        transactionDS.keyBy(jsonObj -> jsonObj.getString("customerId"))
                .intervalJoin(riskScoreDS.keyBy(jsonObj -> jsonObj.getString("customerId")))
                .between(Time.seconds(-10), Time.seconds(10))
                .process(new ProcessJoinFunction<JSONObject,JSONObject,JSONObject>(){
                    @Override
                    public void processElement(JSONObject transaction, JSONObject riskScore, Context ctx, Collector<JSONObject> out) throws Exception {
                        // 创建一个对象来存储合并后的结果
                        JSONObject combined = new JSONObject();

                        // 将 intervalJoin 结果的遍历添加到新的 combined 对象中，并添加前缀避免冲突
                        for (String key : transaction.keySet()) {
                            combined.put("trains_"+key, transaction.get(key));
                        }
                        for (String key : riskScore.keySet()) {
                            combined.put("risk_" + key, riskScore.get(key));
                        }

                        // 计算时间差
                        long timeDifferenceSec = Math.abs(transaction.getLong("timestamp") - riskScore.getLong("timestamp")) / 1000;
                        combined.put("timeDifferenceSec", timeDifferenceSec);

                        // 添加当前处理时间
                        combined.put("processingTime", System.currentTimeMillis());
                        //System.out.println(combined.getLong("timeDifferenceSec"));

                        // 输出合并后的对象
                        // 检查风险评分是否超过阈值（例如70）
                        if (riskScore.getInteger("score") >= 70) {
                            // 输出合并后的对象,输出所有字段
                            out.collect(combined);
                        }

                    }
                }).print();

        // 执行 Flink job
        env.execute("Flink Interval Join");

    }

    /*
      自定义反序列化器,用于处理kafka消息流：
        1.判断空消息,如果为空消息返回null
        2.将kafka消息过来的json字符串转化成jsonObj对象，方便后续可以直接提取字段
     */
    public static DeserializationSchema<JSONObject> createDeserializer(){

        return new DeserializationSchema<JSONObject>() {

            @Override
            public TypeInformation<JSONObject> getProducedType() {
                return TypeInformation.of(JSONObject.class); // 输出类型为 JSONObject
            }

            @Override
            public JSONObject deserialize(byte[] message) throws IOException {
                if (message != null) {
                    try {
                        // 将字节数组转化为 JSON 字符串
                        String jsonString = new String(message);
                        return JSON.parseObject(jsonString); // 使用 FastJSON 解析
                    } catch (Exception e) {
                        System.out.println("反序列化失败, 消息内容: " + new String(message));
                        e.printStackTrace();
                        return null; // 若反序列化失败则返回 null
                    }
                }
                return null; // 空消息直接返回 null
            }

            @Override
            public boolean isEndOfStream(JSONObject jsonObject) {
                return false; // 无结束标志
            }
        };
    }


}