package com.atguigu.gmall.realtime.test;

import org.apache.flink.streaming.api.environment.StreamExecutionEnvironment;
import org.apache.flink.table.api.Table;
import org.apache.flink.table.api.bridge.java.StreamTableEnvironment;

/**
 * ClassName: Flink02_Demo
 * Description:
 * 模拟评论事实表实现
 * 需求：
 *      从kafka读取员工数据
 *      从Hbase读取部门数据
 *      将员工、部门进行关联
 *      将关联结果写入kafka主题
 * @Create 2024/4/20 10:05
 */
public class Flink02_Demo {
    public static void main(String[] args) {
        //TODO 环境
        StreamExecutionEnvironment env = StreamExecutionEnvironment.getExecutionEnvironment();
        env.setParallelism(1);
        StreamTableEnvironment tableEnv = StreamTableEnvironment.create(env);
        //检查点（略）

        //TODO 从kafka读取员工数据
        tableEnv.executeSql("CREATE TABLE emp (\n" +
                "  empno string,\n" +
                "  ename string,\n" +
                "  deptno string,\n" +
                "  proc_time as proctime()\n" +
                ") WITH (\n" +
                "  'connector' = 'kafka',\n" +
                "  'topic' = 'first',\n" +
                "  'properties.bootstrap.servers' = 'hadoop102:9092',\n" +
                "  'properties.group.id' = 'testGroup',\n" +
                "  'scan.startup.mode' = 'latest-offset',\n" +
                "  'format' = 'json'\n" +
                ")");
        //tableEnv.executeSql("select * from emp").print();

        //TODO 从Hbase读取部门数据（hbase先创建表）
        tableEnv.executeSql("CREATE TABLE dept (\n" +
                " deptno string,\n" +
                " info ROW<dname string>,\n" +
                " PRIMARY KEY (deptno) NOT ENFORCED\n" +
                ") WITH (\n" +
                " 'connector' = 'hbase-2.2',\n" +
                " 'table-name' = 't_dept',\n" +
                " 'zookeeper.quorum' = 'hadoop102:2181',\n" +
                " 'lookup.async' = 'true',\n" +
                " 'lookup.cache' = 'PARTIAL',\n" +
                " 'lookup.partial-cache.max-rows' = '200',\n" +
                " 'lookup.partial-cache.expire-after-write' = '1 hour',\n" +
                " 'lookup.partial-cache.expire-after-access' = '1 hour'\n" +
                ");");

        //tableEnv.executeSql("select * from dept").print();


        //TODO 将员工、部门进行关联
        //注意：状态失效时间如何考虑？经分析不适合使用普通内外连接，因为状态失效时间没有合适的设置
        //应该使用Lookup Join，他的底层实现原理和普通内外连接完全不同，他底层不会为参与连接的两张表维护状态
        //他以左表作为驱动，当左表数据到来，发送请求和右表进行关联，因此会经常访问HBase，所以HBase动态表创建时设置缓存
        Table joinedTable = tableEnv.sqlQuery("SELECT \n" +
                "e.empno,e.ename,d.deptno,d.dname\n" +
                "FROM emp AS e\n" +
                "  JOIN dept FOR SYSTEM_TIME AS OF e.proc_time AS d\n" +
                "    ON e.deptno = d.deptno;");
        //joinedTable.execute().print();


        //TODO 将关联结果写入kafka主题
        //1.创建动态表和kafka主题映射
        tableEnv.executeSql("CREATE TABLE joined_table (\n" +
                "  empno string,\n" +
                "  ename string,\n" +
                "  deptno string,\n" +
                "  dname string,\n" +
                "  PRIMARY KEY (empno) NOT ENFORCED\n" +
                ") WITH (\n" +
                "  'connector' = 'upsert-kafka',\n" +
                "  'topic' = 'second',\n" +
                "  'properties.bootstrap.servers' = 'hadoop102:9092',\n" +
                "  'key.format' = 'json',\n" +
                "  'value.format' = 'json'\n" +
                ");");

        joinedTable.executeInsert("joined_table");



    }
}
