package com.atguigu.gmall.realtime.app.demo;

import org.apache.flink.streaming.api.environment.StreamExecutionEnvironment;
import org.apache.flink.table.api.Table;
import org.apache.flink.table.api.TableResult;
import org.apache.flink.table.api.bridge.java.StreamTableEnvironment;

import java.time.Duration;

public class FlinkSQLjoinApp {


    public static void main(String[] args) {
        //订单：
        //{"id":"101","user_id":"u_101","status":"1001", "amount":200,"ts":9900}
        //{"id":"102","user_id":"u_102","status":"1001", "amount":400,"ts":11000}
        //{"id":"103","user_id":"u_103","status":"1001", "amount":600,"ts":15000}
        //订单明细
        //  {"id":"992","order_id":"102","sku_id":"s_77",  "ts":10500}
        //  {"id":"990","order_id":"101","sku_id":"s_77",  "ts":9900}
        //
        //  {"id":"991","order_id":"101","sku_id":"s_77",  "ts":1000}
        //  {"id":"993","order_id":"103","sku_id":"s_77",  "ts":15000}

        StreamExecutionEnvironment env = StreamExecutionEnvironment.getExecutionEnvironment();

        StreamTableEnvironment tableEnv = StreamTableEnvironment.create(env);

        //状态保存20秒
        tableEnv.getConfig().setIdleStateRetention(Duration.ofMillis(30000));

        //需求： 两个表进行 join 结果写入kafka
        // 1 定义订单的动态表
        //CREATE TABLE order_info (
        //  `id` BIGINT,
        //  `user_id` BIGINT,
        //  `status` STRING,
        //   `amount` STRING,
        //   "ts":STRING
        //) WITH (
        //  'connector' = 'kafka',
        //  'topic' = 'demo_left_topic',
        //  'properties.bootstrap.servers' = 'hadoop102:9092',
        //  'properties.group.id' = 'flink_sql_join',
        //  'scan.startup.mode' = 'latest-offset',
        //  'format' = 'json'
        //)
        String createOrderInfoSQL="     CREATE TABLE order_info (\n" +
                "        `id` STRING,\n" +
                "       `user_id` STRING,\n" +
                "        `status` STRING,\n" +
                "         `amount` STRING,\n" +
                "          `ts` STRING\n" +
                "          ) WITH (\n" +
                "          'connector' = 'kafka',\n" +
                "         'topic' = 'demo_left_topic',\n" +
                "         'properties.bootstrap.servers' = 'hadoop102:9092',\n" +
                "          'properties.group.id' = 'flink_sql_join',\n" +
                "          'scan.startup.mode' = 'latest-offset',\n" +
                "           'format' = 'json'\n" +
                "         )";

        tableEnv.executeSql(createOrderInfoSQL);



        // 2  定义订单明细的动态表
        //订单明细
        //  {"id":"992","order_id":"102","sku_id":"s_77",  "ts":10500}
        //  {"id":"990","order_id":"101","sku_id":"s_77",  "ts":19900}
        String createOrderDetailSQL="     CREATE TABLE order_detail (\n" +
                "        `id` STRING,\n" +
                "       `order_id` STRING,\n" +
                "        `sku_id` STRING,\n" +
                "          `ts` STRING\n" +
                "          ) WITH (\n" +
                "          'connector' = 'kafka',\n" +
                "         'topic' = 'demo_right_topic',\n" +
                "         'properties.bootstrap.servers' = 'hadoop102:9092',\n" +
                "          'properties.group.id' = 'flink_sql_join',\n" +
                "          'scan.startup.mode' = 'latest-offset',\n" +
                "           'format' = 'json'\n" +
                "         )";
        tableEnv.executeSql(createOrderDetailSQL);

        //3 完成select   ..join 的sql操作  -->table
        // select od.* ,oi.user_id,oi.status,oi.amount  from order_info oi join order_detail od on oi.id=od.order_id
        Table joinedTable = tableEnv.sqlQuery("select od.* ,oi.user_id,oi.status,oi.amount  " +
                "from order_info oi left join order_detail od on oi.id=od.order_id");


        tableEnv.createTemporaryView("joined_table",joinedTable);

        // 这两种效果相同
       // tableEnv.executeSql("select * from joined_table").print();
      // tableEnv.executeSql("select * from "+joinedTable).print();

        //4  定义一个目标表
        //CREATE TABLE order_wide (
        //  detail_id STRING,
        //  order_id STRING,
        //  sku_id STRING,
        //  ts STRING,
        //  user_id STRING,
        //  `status` STRING
        //  PRIMARY KEY (detail_id) NOT ENFORCED
        //) WITH (
        //  'connector' = 'upsert-kafka',
        //  'topic' = 'demo_order_wide',
        //  'properties.bootstrap.servers' = 'hadoop102:9092',
        //  'key.format' = 'json',
        //  'value.format' = 'json'
        //);

        String createOrderWideSQL="CREATE TABLE order_wide (\n" +
                "           detail_id STRING,\n" +
                "           order_id STRING,\n" +
                "           sku_id STRING,\n" +
                "           ts STRING,\n" +
                "           user_id STRING,\n" +
                "           `status` STRING,\n" +
                "           PRIMARY KEY (user_id) NOT ENFORCED\n" +
                "         ) WITH (\n" +
                "           'connector' = 'upsert-kafka',\n" +
                "           'topic' = 'demo_order_wide',\n" +
                "          'properties.bootstrap.servers' = 'hadoop102:9092',\n" +
                "           'key.format' = 'json',\n" +
                "           'value.format' = 'json'\n" +
                "         )  ";
            tableEnv.executeSql(createOrderWideSQL);


        //5  执行insert select ...

        String insertSQL="insert into order_wide select id , order_id,sku_id,ts,user_id,status from "+joinedTable;

        tableEnv.executeSql(insertSQL);



//        Table table = tableEnv.sqlQuery("sql"); //查询的转换 定义了sql 把sql映射为一个table 但是没有真正执行
//
//        TableResult tableResult = tableEnv.executeSql("sql"); //真正的执行，tableResult是可以打印的执行结果

    }
}
