import org.apache.flink.api.common.functions.AggregateFunction;
import org.apache.flink.api.java.tuple.Tuple2;
import org.apache.flink.streaming.api.datastream.DataStream;
import org.apache.flink.streaming.api.environment.StreamExecutionEnvironment;
import org.apache.flink.table.api.Table;
import org.apache.flink.table.api.bridge.java.StreamTableEnvironment;
import org.apache.flink.table.catalog.Catalog;
import org.apache.flink.table.catalog.CatalogFactory;
import org.apache.flink.table.connector.source.LookupTableSource;
import org.apache.flink.table.types.DataType;
import org.apache.flink.types.Row;

public class FlinkKafkaSQLJob {
    public static void main(String[] args) throws Exception {
        // Set up the execution environment
        StreamExecutionEnvironment env = StreamExecutionEnvironment.getExecutionEnvironment();
        StreamTableEnvironment tableEnv = StreamTableEnvironment.create(env);

        // Define the Kafka source table (user_log)
        String userLogTable = "CREATE TABLE user_log ("
                + "user_id STRING, "
                + "item_id STRING, "
                + "category_id STRING, "
                + "behavior STRING, "
                + "ts TIMESTAMP(3), "
                + "WATERMARK FOR ts AS ts - INTERVAL '15' SECOND "
                + ") WITH ("
                + "'connector' = 'kafka', "
                + "'topic' = 'UserBehavior', "
                + "'properties.bootstrap.servers' = 'node1:9092,node2:9092,node3:9092', "
                + "'properties.group.id' = 'FlinkConsumer', "
                + "'scan.startup.mode' = 'latest-offset', "
                + "'format' = 'json'"
                + ")";
        tableEnv.executeSql(userLogTable);

        // Define the Kafka output table (output_table)
        String outputTable = "CREATE TABLE output_table ("
                + "window_start TIMESTAMP(3), "
                + "behavior STRING, "
                + "behavior_count BIGINT "
                + ") WITH ("
                + "'connector' = 'kafka', "
                + "'topic' = 'UserBehavior2', "
                + "'properties.bootstrap.servers' = 'node1:9092,node2:9092,node3:9092', "
                + "'properties.group.id' = 'FlinkConsumer', "
                + "'format' = 'json'"
                + ")";
        tableEnv.executeSql(outputTable);

        // Perform the windowed aggregation using TUMBLE
        String aggregationQuery = "INSERT INTO output_table "
                + "SELECT "
                + "TUMBLE_START(ts, INTERVAL '60' SECOND) AS window_start, "
                + "behavior, "
                + "COUNT(1) AS behavior_count "
                + "FROM user_log "
                + "GROUP BY "
                + "behavior, "
                + "TUMBLE(ts, INTERVAL '60' SECOND)";
        tableEnv.executeSql(aggregationQuery);

        // Optionally, run a SELECT query to track the data
        String selectQuery = "SELECT "
                + "TUMBLE_START(ts, INTERVAL '60' SECOND) AS window_start, "
                + "behavior, "
                + "COUNT(1) AS behavior_count "
                + "FROM user_log "
                + "GROUP BY "
                + "behavior, "
                + "TUMBLE(ts, INTERVAL '60' SECOND)";
        Table result = tableEnv.sqlQuery(selectQuery);
        
        // Print the result stream (optional)
        tableEnv.toAppendStream(result, Row.class).print();

        // Execute the job
        env.execute("Flink Kafka SQL Job");
    }
}
