package com.atguigu.edu.realtime.app.demo;

import org.apache.flink.streaming.api.environment.StreamExecutionEnvironment;
import org.apache.flink.table.api.Table;
import org.apache.flink.table.api.TableResult;
import org.apache.flink.table.api.bridge.java.StreamTableEnvironment;

import java.time.Duration;

//订单：
//{"id":"101","user_id":"u_101","status":"1001","amount":200,"ts":9900}
//{"id":"102","user_id":"u_102","status":"1001","amount":400,"ts":11000}
//{"id":"103","user_id":"u_103","status":"1001","amount":600,"ts":15000}
//订单明细:
//{"id":"992","order_id":"102","sku_id":"s_77","ts":10500}
//{"id":"990","order_id":"101","sku_id":"s_77","ts":9900}
//{"id":"991","order_id":"101","sku_id":"s_77","ts":1000}
//{"id":"993","order_id":"103","sku_id":"s_77","ts":15000}
public class FlinkSQLjoinAPP {
    public static void main(String[] args) throws Exception {
        StreamExecutionEnvironment env = StreamExecutionEnvironment.getExecutionEnvironment();
        env.setParallelism(1);
        //TableSQL的执行环境
        //只有两种操作  --> sqlQuery(查询)   查询的转换，将sql映射为一个table，但是并没有执行
        //             --> executeSql(执行)  真正的执行，tableResuult是可打印执行的操作结果
        StreamTableEnvironment tableEnv = StreamTableEnvironment.create(env);

        //状态保持20秒  -->  两条数据join时间的最大间隔(数据存储时间) --> 随着水位线的移动，过期的数据会被移除
        tableEnv.getConfig().setIdleStateRetention(Duration.ofMillis(20000));
        //需求： 两个表进行 join 结果写入kafka
        // 1 定义订单的动态表
        String createOrderInfoSQL="     CREATE TABLE order_info (\n" +
                "        `id` STRING,\n" +
                "       `user_id` STRING,\n" +
                "        `status` STRING,\n" +
                "         `amount` STRING,\n" +
                "          `ts` STRING\n" +
                "          ) WITH (\n" +
                "          'connector' = 'kafka',\n" +
                "         'topic' = 'demo_left_topic',\n" +
                "         'properties.bootstrap.servers' = 'hadoop102:9092',\n" +
                "          'properties.group.id' = 'flink_sql_join',\n" +
                "          'scan.startup.mode' = 'latest-offset',\n" +
                "           'format' = 'json'\n" +
                "         )";
        tableEnv.executeSql(createOrderInfoSQL);

        //2.定义订单明细的动态表.
        //订单明细
        //  {"id":"992","order_id":"102","sku_id":"s_77",  "ts":10500}
        //  {"id":"990","order_id":"101","sku_id":"s_77",  "ts":19900}
        String createOrderDetailSQL="     CREATE TABLE order_detail (\n" +
                "        `id` STRING,\n" +
                "       `order_id` STRING,\n" +
                "        `sku_id` STRING,\n" +
                "          `ts` STRING\n" +
                "          ) WITH (\n" +
                "          'connector' = 'kafka',\n" +
                "         'topic' = 'demo_right_topic',\n" +
                "         'properties.bootstrap.servers' = 'hadoop102:9092',\n" +
                "          'properties.group.id' = 'flink_sql_join',\n" +
                "          'scan.startup.mode' = 'latest-offset',\n" +
                "           'format' = 'json'\n" +
                "         )";
        tableEnv.executeSql(createOrderDetailSQL);

        //3.完成select   ....join的sql操作  -->table
        Table joinedTable = tableEnv.sqlQuery("select od.* ,oi.user_id,oi.status,oi.amount  " +
                "from order_info oi left join order_detail od on oi.id=od.order_id");//里面的left join 改成join 会变成inner join
        tableEnv.createTemporaryView("joined_table",joinedTable);
        //tableEnv.executeSql("select * from" + joinedTable).print(); //效果相同 --> ("select * from joined_table")
        //tableEnv.executeSql("select * from joined_table").print(); //效果相同 --> ("select * from joined_table")

        //4.定义一个目标表
        String createOrderWideSQL="CREATE TABLE order_wide (\n" +
                "           detail_id STRING,\n" +
                "           order_id STRING,\n" +
                "           sku_id STRING,\n" +
                "           ts STRING,\n" +
                "           user_id STRING,\n" +
                "           `status` STRING,\n" +
                "           PRIMARY KEY (user_id) NOT ENFORCED\n" +
                "         ) WITH (\n" +
                "           'connector' = 'upsert-kafka',\n" +
                "           'topic' = 'demo_order_wide',\n" +
                "          'properties.bootstrap.servers' = 'hadoop102:9092',\n" +
                "           'key.format' = 'json',\n" +
                "           'value.format' = 'json'\n" +
                "         )  ";
        tableEnv.executeSql(createOrderWideSQL);

        //5.执行inset select .....
        String insertSQL="insert into order_wide select id , order_id,sku_id,ts,user_id,status from "+joinedTable;
        tableEnv.executeSql(insertSQL);
//        Table table = tableEnv.sqlQuery("sql");  //查询的转换，只是定义了SQL，映射为一个table，但是并没有执行
//        TableResult tableResult = tableEnv.executeSql("sql");//真正的执行了，tableResult是可以打印的结果
        env.execute();
    }
}
