package com.intct.ods;

import com.intct.common.FlinkSqlWithUtil;
import com.intct.utils.FlinkChenageLogConverUtil;
import org.apache.flink.streaming.api.datastream.DataStream;
import org.apache.flink.streaming.api.datastream.SingleOutputStreamOperator;
import org.apache.flink.streaming.api.environment.StreamExecutionEnvironment;
import org.apache.flink.table.api.bridge.java.StreamTableEnvironment;
import org.apache.flink.types.Row;

/**
 * @author gufg
 * @since 2025-10-25 16:25
 */
public class OdsOrderSQLBAK {
    public static void main(String[] args) throws Exception {
        StreamExecutionEnvironment env = StreamExecutionEnvironment.getExecutionEnvironment();
        StreamTableEnvironment tenv = StreamTableEnvironment.create(env);
        env.setParallelism(1);
        env.enableCheckpointing(5000L);//每五秒一个快照

        tenv.executeSql("CREATE TABLE mysql_dwd_order_info ( " +
                "     id	INT                               " +
                "     ,op	STRING                            " +
                "     ,order_no	STRING                        " +
                "     ,vehicle_id	INT                       " +
                "     ,driver_id	INT                       " +
                "     ,renter_id	INT                       " +
                "     ,order_type	INT                       " +
                "     ,state	INT                           " +
                "     ,cancel	INT                           " +
                "     ,work_flow	INT                       " +
                "     ,close_time	TIMESTAMP(3)              " +
                "     ,close_user_id	INT                   " +
                "     ,end_reason	STRING                    " +
                "     ,create_time	TIMESTAMP(3)              " +
                "     ,cancel_time	TIMESTAMP(3)              " +
                "     ,begin_address_code	STRING            " +
                "     ,navigation_brand	INT                   " +
                "     ,open_lng	DOUBLE                        " +
                "     ,open_lat	DOUBLE                        " +
                "     ,open_location	STRING                " +
                "     ,receive_lng	DOUBLE                    " +
                "     ,receive_lat	DOUBLE                    " +
                "     ,go_on_lng	DOUBLE                    " +
                "     ,receive_location	STRING                " +
                "     ,go_on_lat	DOUBLE                    " +
                "     ,go_on_location	STRING                " +
                "     ,call_mode	INT                       " +
                "     ,update_time	TIMESTAMP(3)              " +
                "     ,pay_amount	DOUBLE                    " +
                "    ,PRIMARY KEY(id) NOT ENFORCED            " +
                ")" + FlinkSqlWithUtil.getMysqlCdcWith("travel","order_info","latest-offset"));

                /*启动模式
                    配置选项scan.startup.mode指定 MySQL CDC 使用者的启动模式。有效枚举包括：
                    initial （默认）：在第一次启动时对受监视的数据库表执行初始快照，并继续读取最新的 binlog。
                    earliest-offset：跳过快照阶段，从可读取的最早 binlog 位点开始读取
                    latest-offset：首次启动时，从不对受监视的数据库表执行快照， 连接器仅从 binlog 的结尾处开始读取，这意味着连接器只能读取在连接器启动之后的数据更改。
                    specific-offset：跳过快照阶段，从指定的 binlog 位点开始读取。位点可通过 binlog 文件名和位置指定，或者在 GTID 在集群上启用时通过 GTID 集合指定。
                    timestamp：跳过快照阶段，从指定的时间戳开始读取 binlog 事件。
                */

        // 将SQL转换为DataStream
        DataStream<Row> mysqlDwdOrderInfo = tenv.toChangelogStream(tenv.from("mysql_dwd_order_info"));
        SingleOutputStreamOperator<Row> dwdOrderInfo = FlinkChenageLogConverUtil.changeLogConver(mysqlDwdOrderInfo, "op");

        // 将DataStreamAPI转为SQL
        tenv.createTemporaryView("conver_dwd_order_info", tenv.fromDataStream(dwdOrderInfo));

        tenv.executeSql("CREATE TABLE kafka_dwd_order_info ( " +
                "     id	INT                               " +
                "     ,op	STRING                            " +
                "     ,order_no	STRING                        " +
                "     ,vehicle_id	INT                       " +
                "     ,driver_id	INT                       " +
                "     ,renter_id	INT                       " +
                "     ,order_type	INT                       " +
                "     ,state	INT                           " +
                "     ,cancel	INT                           " +
                "     ,work_flow	INT                       " +
                "     ,close_time	TIMESTAMP(3)              " +
                "     ,close_user_id	INT                   " +
                "     ,end_reason	STRING                    " +
                "     ,create_time	TIMESTAMP(3)              " +
                "     ,cancel_time	TIMESTAMP(3)              " +
                "     ,begin_address_code	STRING            " +
                "     ,navigation_brand	INT                   " +
                "     ,open_lng	DOUBLE                        " +
                "     ,open_lat	DOUBLE                        " +
                "     ,open_location	STRING                " +
                "     ,receive_lng	DOUBLE                    " +
                "     ,receive_lat	DOUBLE                    " +
                "     ,go_on_lng	DOUBLE                    " +
                "     ,receive_location	STRING                " +
                "     ,go_on_lat	DOUBLE                    " +
                "     ,go_on_location	STRING                " +
                "     ,call_mode	INT                       " +
                "     ,update_time	TIMESTAMP(3)              " +
                "     ,pay_amount	DOUBLE                    " +
                "    ,PRIMARY KEY(id) NOT ENFORCED            " +
                ")" + FlinkSqlWithUtil.getKafkaSinkWith("dwd_orders"));

        tenv.executeSql("INSERT INTO kafka_dwd_order_info SELECT * FROM conver_dwd_order_info");

        env.execute();
    }
}
