package com.apps.sdses.flink141.demo;

import org.apache.flink.api.common.functions.MapFunction;
import org.apache.flink.streaming.api.datastream.DataStream;
import org.apache.flink.streaming.api.datastream.SingleOutputStreamOperator;
import org.apache.flink.streaming.api.environment.StreamExecutionEnvironment;
import org.apache.flink.table.api.DataTypes;
import org.apache.flink.table.api.Schema;
import org.apache.flink.table.api.Table;
import org.apache.flink.table.api.TableDescriptor;
import org.apache.flink.table.api.bridge.java.StreamTableEnvironment;
import org.apache.flink.types.Row;

/**
 * author: codejiwei
 * date: 2023/8/10
 * desc:
 **/
public class AlertRuleDemo {
    public static void main(String[] args) throws Exception {

        StreamExecutionEnvironment env = StreamExecutionEnvironment.getExecutionEnvironment();
        StreamTableEnvironment tableEnv = StreamTableEnvironment.create(env);

        //cdc source
        Schema schema = Schema.newBuilder()
                .column("id", DataTypes.INT().notNull())
                .column("name", DataTypes.STRING())
                .column("description", DataTypes.STRING())
                .primaryKey("id")
                .build();

        TableDescriptor descriptor = TableDescriptor.forConnector("mysql-cdc").schema(schema)
                .option("hostname", "localhost")
                .option("port", "3306")
                .option("username", "root")
                .option("password", "123456")
                .option("database-name", "mydb")
                .option("table-name", "products")
                .build();

        tableEnv.createTemporaryTable("node1", descriptor);
        Table cdcTable = tableEnv.from("node1");
        DataStream<Row> cdcDS = tableEnv.toChangelogStream(cdcTable);



        //kafka source
        tableEnv.executeSql("CREATE TABLE orders (\n" +
                "  order_id BIGINT,\n" +
                "  order_date TIMESTAMP(3),\n" +
                "  customer_name STRING,\n" +
                "  price DECIMAL(10,5),\n" +
                "  product_id INT,\n" +
                "  order_status TINYINT,\n" +
                "  WATERMARK FOR order_date AS order_date - INTERVAL '10' SECOND\n" +
                ") WITH (\n" +
                "  'connector' = 'kafka',\n" +
                "  'topic' = 'drity_data_sink',\n" +
                "  'properties.group.id' = 'test_group_id',\n" +
                "  'scan.startup.mode' = 'latest-offset',\n" +
                "  'properties.bootstrap.servers' = '192.168.102.154:29092',\n" +
                "  'format' = 'json'\n" +
                ")");
        Table table = tableEnv.from("orders");
        DataStream<Row> rowDataStream = tableEnv.toDataStream(table);
        SingleOutputStreamOperator<Row> map = rowDataStream.map(new MapFunction<Row, Row>() {
            @Override
            public Row map(Row value) throws Exception {
                Row name = Row.of("name");
                Row join = Row.join(value, name);

                return join;
            }
        });
//        Schema schema = Schema.newBuilder().fromResolvedSchema(table.getResolvedSchema())
//                .column("aaa", DataTypes.STRING())
//                .build();
//
//        System.out.println(schema.toString());
//        Table table1 = tableEnv.fromDataStream(map);
//        tableEnv.createTemporaryView("node2", map);

//        tableEnv.createTemporaryTable("node1", descriptor);
//
//        Table table = tableEnv.from("node1");
//        tableEnv.toChangelogStream(table).print();
//
        env.execute();


    }
}
