package com.wushi.bigdata;

//import com.wushi.bigdata.catalog.ImpalaCatalogLocal;
import com.lcb.mars.flink.base.catalog.ImpalaCatalog;
import org.apache.flink.streaming.api.environment.StreamExecutionEnvironment;
import org.apache.flink.table.api.EnvironmentSettings;
import org.apache.flink.table.api.Table;
import org.apache.flink.table.api.TableResult;
import org.apache.flink.table.api.bridge.java.StreamTableEnvironment;

/**
 * @ClassName TableSql
 * 通过TableAPI执行相应的sql
 * @Deacription 通过定义不同的Catalog，可以在相应Catalog中创建源数据，并固化；避免多次创建； HiveCatalog依赖本地的hive配置：hive-site.xml,hdfs-site.xml
 * 默认Catalog为GenericMemoryCatalog内存型，所以需要每次都创建一遍；
 * @Author wushumin
 * @Date 2021-08-03 10:25
 * @Version 1.0
 **/
public class ImpalaCatalogTableSqlJoinCanalJson {
    public StreamTableEnvironment getEnv(){
        StreamExecutionEnvironment environment = StreamExecutionEnvironment.getExecutionEnvironment();
        EnvironmentSettings environmentSettings = EnvironmentSettings.newInstance().useBlinkPlanner().inStreamingMode().build();
        StreamTableEnvironment tableEnvironment = StreamTableEnvironment.create(environment,environmentSettings);

        //HiveCatalog 依赖hive配置，较为复杂
//        URL resources = Thread.currentThread().getContextClassLoader().getResource(".");
//        String path = resources.getPath();
//        HiveCatalog hiveCatalog = new HiveCatalog("hiveCat", "lcb_db", path);
//        tableEnvironment.registerCatalog("hiveCat",hiveCatalog);
//        GenericInMemoryCatalog memoryCat = new GenericInMemoryCatalog("memoryCat");
//        tableEnvironment.registerCatalog("memoryCat",memoryCat);

        //通过API注册catalog！！
//        tableEnvironment.registerCatalog("impalaCatalog",
//                new ImpalaCatalog("impalaCatalog","information_schema",
//                "jdbc:impala://192.168.6.76:21050"));
       //通过SQL注册catalog
        tableEnvironment.executeSql("CREATE CATALOG impalaCatalog WITH(\n" +
                "    'type' = 'jdbc-impala',\n" +
                "    'default-database' = 'information_schema',\n" +
                "    'username' = 'username',\n" +
                "    'password' = 'password',\n" +
                "    'base-url' = 'jdbc:impala://192.168.6.76:21050'\n" +
                ")");
        return tableEnvironment;
    }

    public void process(StreamTableEnvironment env){

        env.executeSql("show catalogs").print();

        env.useCatalog("impalaCatalog");
        String[] databases = env.listDatabases();
//        env.useDatabase("lcb_mic_user_db");
//        String[] tables = env.listTables();

//        注册source流表; 可以通过executeSql方式创建表，也可以手工插入'flink_catalog_db.schema_tables'; flink_catalog_db.schema_columns
//        env.executeSql("create database if not exists flink_catalog_db");
//        env.executeSql("CREATE TABLE if not exists  flink_catalog_db.flink_kafka_source4 (\n" +
//                "  `user_id` BIGINT,\n" +
//                "  `item_id` BIGINT,\n" +
//                "  `behavior` STRING,\n" +
//                //"  `ts` TIMESTAMP(3) METADATA FROM 'timestamp'\n" +     //kafka时间戳
//                "  proctime as PROCTIME()\n" +     //处理事件
//                ") WITH (\n" +
//                "  'connector' = 'kafka',\n" +
//                "  'topic' = 'flink_sql_test',\n" +
//                "  'properties.bootstrap.servers' = 'cdh1.lcbint.cn:9092',\n" +
//                "  'properties.group.id' = 'testGroup',\n" +
//                "  'scan.startup.mode' =  'latest-offset',\n" +
//                "  'format' = 'canal-json'\n" +
//                ")");


//        注册IMpala维度表; 不需要显示定义列,直接引用catalog中的us_micro_user列信息
//        String Mic_user_dim_sql=""+
//                "CREATE TABLE lcb_mic_user_db.us_micro_user WITH (\n" +
//                "  'connector.type' = 'jdbc',\n" +
//                "  'connector.url' = 'jdbc:impala://192.168.6.76:21050/lcb_mic_user_db;UseNativeQuery=1',\n" +
//                "  'connector.table' = 'us_micro_user',\n" +
//              "  'connector.driver' = 'com.cloudera.impala.jdbc41.Driver',\n" +
//                "  'connector.lookup.cache.max-rows' = '100', -- 缓存条数 \n"+
//                "  'connector.lookup.cache.ttl' = '30s' -- 缓存时间 \n"+
//                ")";
//        env.executeSql(Mic_user_dim_sql);
//        注册sink流表
//            env.executeSql("CREATE TABLE if not exists flink_catalog_db.flink_kafka_sink (\n" +
//                "  `user_id` BIGINT,\n" +
//                "  `real_name` varchar,\n" +
//                "  `pv` bigint,\n" +
//                " PRIMARY KEY (`user_id`) NOT ENFORCED \n"+
//                ") WITH (\n" +
//                "  'connector' = 'upsert-kafka',\n" +
//                "  'topic' = 'flink_sql_sink_test',\n" +
//                "  'properties.bootstrap.servers' = 'cdh1.lcbint.cn:9092',\n" +
//                "  'key.format' = 'json',\n" +
//                "  'value.format' = 'json'\n" +
//                ")");

        //        注册sink流表
            env.executeSql("CREATE TABLE if not exists flink_catalog_db.flink_kafka_sink3 (\n" +
                "  `user_id` BIGINT,\n" +
                "  `real_name` varchar,\n" +
                "  `pv` bigint,\n" +
                " PRIMARY KEY (`user_id`) NOT ENFORCED \n"+
                ") WITH (\n" +
                "  'connector' = 'upsert-kafka',\n" +
                "  'topic' = 'ads_kudu2es',\n" +
                " 'properties.bootstrap.servers' = 'cdh1.lcbint.cn:9092,cdh2.lcbint.cn:9092,cdh3.lcbint.cn:9092',\n" +
                " 'properties.group.id' = 'flink_sql',\n" +
                " 'value.canal-json.database.include'='lcb_mars_micro_ads_db',\n" +
                " 'value.canal-json.table.include'='micro_indicator_add_car_nums',\n" +
                "  'key.format' = 'json',\n" +
                "  'value.format' = 'canal-json'\n" +
                ")");

//        case1: 普通查询
//        TableResult tableResult = env.sqlQuery("select * from flink_catalog_db.flink_kafka_source3").execute();
//        tableResult.print();
//        case2: 维表查询
//        Table table = env.sqlQuery("select id,user_id,mobile,openid from lcb_mic_user_db.us_micro_user where user_id <100");
//        table.execute().print();

        //case3: 插入kafka sink流表
//        TableResult result = env.executeSql("insert into flink_catalog_db.flink_kafka_sink select user_id,'null' as real_name ,1 as pv from flink_catalog_db.flink_kafka_source");
//        result.print();

//      case3: join维度表
        Table jointTable = env.sqlQuery("select a.user_id,b.real_name, count(a.user_id) as pv from flink_catalog_db.flink_kafka_source4 a" +
                "  left join lcb_mic_user_db.us_micro_user for SYSTEM_TIME AS OF a.proctime  as b" +
                "  on a.user_id = b.user_id" +
                " group by a.user_id,b.real_name");
        env.createTemporaryView("join_table", jointTable);
        TableResult result = env.executeSql("insert into flink_catalog_db.flink_kafka_sink3 select * from join_table");
        env.executeSql("select * from join_table").print();
        System.out.println(result.getJobClient().get().getJobStatus());

    }

    public static void main(String[] args) {
        ImpalaCatalogTableSqlJoinCanalJson tableSql = new ImpalaCatalogTableSqlJoinCanalJson();
        StreamTableEnvironment env = tableSql.getEnv();
        tableSql.process(env);
    }
}

