package com.intct.util;

import org.apache.flink.streaming.api.environment.StreamExecutionEnvironment;
import org.apache.flink.table.api.Table;
import org.apache.flink.table.api.bridge.java.StreamTableEnvironment;

/**
 * @author gufg
 * @since 2025-06-30 17:12
 */
public class FlinkSQL1 {
    public static void main(String[] args) {
            StreamExecutionEnvironment env = StreamExecutionEnvironment.getExecutionEnvironment();
            StreamTableEnvironment tEnv = StreamTableEnvironment.create(env);

//        TextLineInputFormat textLineInputFormat = new TextLineInputFormat();
//        Path path = new Path("D:/test/1.txt");
//
//        FileSource<String> fileSource = FileSource.forRecordStreamFormat(textLineInputFormat,  path).build();
//        DataStreamSource<String> test4Source = env.fromSource(fileSource, WatermarkStrategy.noWatermarks(), "test4_source");
//
//        test4Source.print();

            // 数据源
            tEnv.executeSql("CREATE TABLE file_test (" +
                    "  id INT, " +
                    "  name STRING " +
                    ") WITH (" +
                    "  'connector' = 'filesystem'," +
                    "  'path' = 'file:///D:/test/1.txt'," +
                    "  'format' = 'json'" +
                    ")");

            Table table = tEnv.sqlQuery("select * from file_test");

            // 写入数据到Kafka
            tEnv.executeSql("CREATE TABLE pageviews (" +
                    "  id BIGINT," +
                    "  name STRING," +
                    "  PRIMARY KEY (id) NOT ENFORCED" +
                    ") WITH (" +
                    "  'connector' = 'upsert-kafka'," +
                    "  'topic' = 'intct80'," +
                    "  'properties.bootstrap.servers' = 'cdh-node:9092'," +
                    "  'key.format' = 'json'," +
                    "  'value.format' = 'json'" +
                    ")");

            table.executeInsert("pageviews");

    }
}
