package com.atguigu.edu.realtime.app.demo;

import org.apache.flink.streaming.api.environment.StreamExecutionEnvironment;
import org.apache.flink.table.api.Table;
import org.apache.flink.table.api.bridge.java.StreamTableEnvironment;

public class FlinkSQLLookupJoinApp {
    public static void main(String[] args) throws Exception {
        //订单：
        //{"id":"101","user_id":"u_101","status":"1001","amount":200,"ts":9900}
        //字典表：base_dic
        //mysql
        StreamExecutionEnvironment env = StreamExecutionEnvironment.getExecutionEnvironment();
        //表环境
        StreamTableEnvironment tableEnv = StreamTableEnvironment.create(env);
        //env.setParallelism(1);
        //定义流 --
        String createOrderInfoSQL="     CREATE TABLE order_info (\n" +
                "        `id` STRING,\n" +
                "       `user_id` STRING,\n" +
                "        `status` STRING,\n" +
                "         `amount` STRING,\n" +
                "          `ts` STRING,\n" +
                "         `proc_time`  as proctime()\n"+
                "          ) WITH (\n" +
                "          'connector' = 'kafka',\n" +
                "         'topic' = 'demo_left_topic',\n" +
                "         'properties.bootstrap.servers' = 'hadoop102:9092',\n" +
                "          'properties.group.id' = 'flink_sql_join',\n" +
                "          'scan.startup.mode' = 'latest-offset',\n" +
                "           'format' = 'json'\n" +
                "         )";

        //获得流表
        tableEnv.executeSql(createOrderInfoSQL);
        //声明字典表
        String createBaseDicSQL="   CREATE   TABLE base_dic (\n" +
                "               dic_code STRING,\n" +
                "               dic_name STRING,\n" +
                "               PRIMARY KEY (dic_code) NOT ENFORCED\n" +//主键声明
                "               ) WITH (\n" +
                "               'connector' = 'jdbc',\n" +
                "               'url' = 'jdbc:mysql://hadoop102:3306/edu',\n" +
                "               'table-name' = 'base_dic',\n" +
                "               'username' = 'root',\n" +
                "               'password' = '000000', \n" +
                "               'lookup.cache.max-rows' ='1000',\n" +  //存储最大行数
                "               'lookup.cache.ttl' ='60s'\n" +  //数据最长保存时间(缓存中)  --> 相当于定时器
                "               ) ";

        tableEnv.executeSql(createBaseDicSQL);
        tableEnv.executeSql("select * from order_info").print();
//
//        //要跟维度表join的流表要增加一个时间字段，该字段由flink自带的函数proctime() 产生
//        String lookupJoinSQL="        SELECT id,user_id,status,dic_name as status_name,amount,ts\n" +
//                "         FROM order_info AS o\n" +
//                "         JOIN base_dic FOR SYSTEM_TIME AS OF o.proc_time AS dic\n" +
//                "            ON o.status = dic.dic_code";
//        Table lookupjoinTable = tableEnv.sqlQuery(lookupJoinSQL);
//        //tableEnv.executeSql(lookupJoinSQL).print();
//
//        String createSinkTableSQl="CREATE TABLE order_wide (\n" +
//                "           id STRING,\n" +
//                "           user_id STRING,\n" +
//                "           `status` STRING,\n" +
//                "           status_name STRING,\n" +
//                "           amount STRING,\n" +
//                "           `ts` STRING,\n" +
//                "           PRIMARY KEY (id) NOT ENFORCED\n" +
//                "         ) WITH (\n" +
//                "           'connector' = 'upsert-kafka',\n" +
//                "           'topic' = 'demo_order_wide',\n" +
//                "          'properties.bootstrap.servers' = 'hadoop102:9092',\n" +
//                "           'key.format' = 'json',\n" +
//                "           'value.format' = 'json'\n" +
//                "         )  ";
//        tableEnv.executeSql(createSinkTableSQl);
//        tableEnv.executeSql("insert into order_wide select * from "+lookupjoinTable);
        //env.execute();
    }
}
























