package com.zhisheng.sql.blink.stream.example;

import com.alibaba.fastjson.JSON;
import org.apache.flink.api.common.state.ValueState;
import org.apache.flink.api.common.state.ValueStateDescriptor;
import org.apache.flink.api.common.typeinfo.TypeInformation;
import org.apache.flink.configuration.ConfigOptions;
import org.apache.flink.configuration.Configuration;
import org.apache.flink.configuration.MemorySize;
import org.apache.flink.configuration.TaskManagerOptions;
import org.apache.flink.connector.jdbc.JdbcConnectionOptions;
import org.apache.flink.connector.jdbc.JdbcExecutionOptions;
import org.apache.flink.connector.jdbc.JdbcSink;
import org.apache.flink.streaming.api.datastream.DataStream;
import org.apache.flink.streaming.api.environment.StreamExecutionEnvironment;
import org.apache.flink.streaming.api.functions.KeyedProcessFunction;
import org.apache.flink.streaming.api.functions.sink.SinkFunction;
import org.apache.flink.table.api.EnvironmentSettings;
import org.apache.flink.table.api.Table;
import org.apache.flink.table.api.TableConfig;
import org.apache.flink.table.api.bridge.java.StreamTableEnvironment;
import org.apache.flink.types.Row;
import org.apache.flink.types.RowKind;
import org.apache.flink.util.Collector;

import java.util.Date;


/**
 * CasinoWinJob
 *
 * @author yongxiang
 * @date 2025/2/28 14:59
 * @desc
 */
public class CasinoWinJob_test {
    public static void main(String[] args) throws Exception {

        StreamExecutionEnvironment env = StreamExecutionEnvironment.getExecutionEnvironment();

        EnvironmentSettings settings = EnvironmentSettings.newInstance().inStreamingMode().build();
        StreamTableEnvironment tableEnv = StreamTableEnvironment.create(env, settings);
        TableConfig config = tableEnv.getConfig();
        config.getConfiguration().setBoolean("table.exec.emit.early-fire.enabled", true);
        config.getConfiguration().setString("table.exec.emit.early-fire.delay", "1s");
        config.getConfiguration().set(TaskManagerOptions.NETWORK_MEMORY_FRACTION,0.4F);
        env.setParallelism(2);
        config.getConfiguration().set(TaskManagerOptions.MEMORY_SEGMENT_SIZE, new MemorySize(32768));
        config.getConfiguration().set(TaskManagerOptions.NETWORK_MEMORY_MIN,new MemorySize(1024 * 1024 * 1024L));
        config.getConfiguration().set(TaskManagerOptions.NETWORK_MEMORY_MAX,new MemorySize(1024 * 1024 * 1024L));

        // 定义 Debezium Kafka 源表
        tableEnv.executeSql(
                "CREATE TABLE t_topology ("
                        + "  topology_id INT,"
                        + "  parent_topology_id INT,"
                        + "  name STRING, "
                        + "  status STRING, "
                        + "  topology_type_id INT "
                        + ") WITH ("
                        + "  'connector' = 'kafka',"
                        + "  'topic' = 'mysql-server.test_sm.t_topology ',"
                        + "  'properties.bootstrap.servers' = '192.168.50.235:9092,192.168.50.235:9093,192.168.50.235:9094',"
                        + "  'properties.group.id' = 'websocket_consumer',"
                        + "  'scan.startup.mode' = 'earliest-offset',"
                        + "  'format' = 'debezium-json',"
                        + "  'debezium-json.schema-include' = 'true'"
                        + ")");

        // 3. 创建过滤视图
        String createFilteredView =
                "CREATE TEMPORARY VIEW t_topology_view AS \n" +
                        "SELECT topology_id, parent_topology_id, name,topology_type_id \n" +
                        "FROM t_topology \n" +
                        "WHERE status = 'ACTIVE' AND topology_type_id IN (300,400,1200)";
        tableEnv.executeSql(createFilteredView);


        // 定义 Debezium Kafka 源表
        tableEnv.executeSql(
                "CREATE TABLE t_bet  ("
                        + "  bet_id INT,"
                        + "  wager DOUBLE,"
                        + "  casino_win DOUBLE, "
                        + "  table_id INT, "
                        + "  payout_complete_dtm BIGINT, " // 原始字段为 BIGINT（Unix 时间戳，毫秒）
                        + "  ts AS TO_TIMESTAMP_LTZ(payout_complete_dtm - 8 * 3600 * 1000, 3), "  // 转换为 TIMESTAMP
                        + "  WATERMARK FOR ts AS ts - INTERVAL '5' SECOND"
                        + ") WITH ("
                        + "  'connector' = 'kafka',"
                        + "  'topic' = 'mysql-server.test_sm.t_bet',"
                        + "  'properties.bootstrap.servers' = '192.168.50.235:9092,192.168.50.235:9093,192.168.50.235:9094',"
                        + "  'properties.group.id' = 'websocket_consumer',"
                        + "  'scan.startup.mode' = 'latest-offset',"
                        + "  'format' = 'debezium-json',"
                        + "  'debezium-json.schema-include' = 'true'"
                        + ")");


        Table result = tableEnv.sqlQuery(
                "   Select \n" +
                        "    tto.name AS table_name, \n" +
                        "    ttp.name AS pit_name, \n" +
                        "    tth.name AS hall,\n" +
                        "    SUM(tbu.casino_win) AS casino_win \n" +
                        "    FROM t_bet tbu \n" +
                        "    LEFT JOIN t_topology_view tto ON tbu.table_id=tto.topology_id\n" +
                        "    LEFT JOIN t_topology_view ttp ON tto.parent_topology_id = ttp.topology_id\n" +
                        "    LEFT JOIN t_topology_view tth ON ttp.parent_topology_id = tth.topology_id\n" +
                        " GROUP BY " +
                        "  tto.name,ttp.name,tth.name"
        );


        // 4. 输出中间结果（调试用）
        Table topologyTable = tableEnv.from("t_topology_view");
        tableEnv.toChangelogStream(topologyTable).print("t_topology_view");
        // 4. 输出中间结果（调试用）
        Table betTable = tableEnv.from("t_bet");
        tableEnv.toChangelogStream(betTable).print("t_bet");
        // 将结果转换为 DataStream
        DataStream<Row> resultStream = tableEnv.toChangelogStream(result);
        //resultStream.print();
        // 映射为 JSON 格式（Grafana Live 要求格式）
        DataStream<SumWin> jsonStream = resultStream.filter(e -> e.getKind().equals(RowKind.UPDATE_AFTER)).map(row -> {
            String tableName = row.getFieldAs("table_name");
            String pitName = row.getFieldAs("pit_name");
            String hall = row.getFieldAs("hall");
            Double SumWin = row.getFieldAs("casino_win");
            CasinoWinJob_test.SumWin sumWin = new SumWin();
            String path = "/" + hall + "/" + pitName + "/" + tableName;
            sumWin.setPath(path.replace(" ", "")); // 移除所有普通空格);
            sumWin.setSumWin(SumWin);
            sumWin.setDate(new Date());
            String test = "MassXXX/PitXXX/GMCMXXX,MassXXX/PitXXX/GMCNXXX";
            return sumWin;
        });
        DataStream<SumWin> delayedStream  = jsonStream
                .keyBy(SumWin::getPath) // 按路径分组，确保每个路径独立延迟
                .process(new DelayedOutputFunction(1000));

        //将结果流写入 Websocket
        delayedStream .addSink(new WebSocketSink());
        env.execute("Real-time total....");

    }

    public static class SumWin {
        private String path;
        private Double SumWin;
        private Date date = new Date();

        public String getPath() {
            return path;
        }

        public void setPath(String path) {
            this.path = path;
        }

        public Double getSumWin() {
            return SumWin;
        }

        public void setSumWin(Double sumWin) {
            SumWin = sumWin;
        }

        public Date getDate() {
            return date;
        }

        public void setDate(Date date) {
            this.date = date;
        }

        @Override
        public String toString() {
            return "SumWin{" +
                    "path='" + path + '\'' +
                    ", SumWin=" + SumWin +
                    ", date=" + date +
                    '}';
        }
    }

    public static class DelayedOutputFunction
            extends KeyedProcessFunction<String, SumWin, SumWin> {

        private final long delayMillis;
        private transient ValueState<SumWin> latestState;
        private transient ValueState<Long> timerState;

        public DelayedOutputFunction(long delayMillis) {
            this.delayMillis = delayMillis;
        }

        @Override
        public void open(Configuration parameters) {
            // 初始化状态：存储最新数据和定时器时间
            latestState = getRuntimeContext().getState(
                    new ValueStateDescriptor<>("latest-sumwin", SumWin.class));
            timerState = getRuntimeContext().getState(
                    new ValueStateDescriptor<>("timer", Long.class));
        }

        @Override
        public void processElement(
                SumWin value,
                Context ctx,
                Collector<SumWin> out
        ) throws Exception {
            // 更新最新数据
            latestState.update(value);

            // 注册或更新定时器（每个Key只保留一个定时器）
            Long currentTimer = timerState.value();
            if (currentTimer == null) {
                long triggerTime = ctx.timerService().currentProcessingTime() + delayMillis;
                ctx.timerService().registerProcessingTimeTimer(triggerTime);
                timerState.update(triggerTime);
            }
        }

        @Override
        public void onTimer(
                long timestamp,
                OnTimerContext ctx,
                Collector<SumWin> out
        ) throws Exception {
            // 触发时输出最新数据
            SumWin latest = latestState.value();
            if (latest != null) {
                out.collect(latest);
            }

            // 清理定时器状态
            timerState.clear();
        }
    }
}