package com.wushi.bigdata;

//import com.wushi.bigdata.catalog.ImpalaCatalogLocal;
import com.alibaba.fastjson.JSON;
import com.wushi.bigdata.udf.SubStringFunc;
import org.apache.flink.streaming.api.environment.StreamExecutionEnvironment;
import org.apache.flink.table.api.EnvironmentSettings;
import org.apache.flink.table.api.Table;
import org.apache.flink.table.api.TableResult;
import org.apache.flink.table.api.bridge.java.StreamTableEnvironment;

/**
 * @ClassName TableSql
 * 通过TableAPI执行相应的sql
 * @Deacription 通过定义不同的Catalog，可以在相应Catalog中创建源数据，并固化；避免多次创建； HiveCatalog依赖本地的hive配置：hive-site.xml,hdfs-site.xml
 * 默认Catalog为GenericMemoryCatalog内存型，所以需要每次都创建一遍；
 * @Author wushumin
 * @Date 2021-08-03 10:25
 * @Version 1.0
 **/
public class ImpalaCatalogWithFunction {
    public StreamTableEnvironment getEnv(){
        StreamExecutionEnvironment environment = StreamExecutionEnvironment.getExecutionEnvironment();
        EnvironmentSettings environmentSettings = EnvironmentSettings.newInstance().useBlinkPlanner().inStreamingMode().build();
        StreamTableEnvironment tableEnvironment = StreamTableEnvironment.create(environment,environmentSettings);

        //通过SQL注册catalog
        tableEnvironment.executeSql("CREATE CATALOG impalaCatalog WITH(\n" +
                "    'type' = 'jdbc-impala',\n" +
                "    'default-database' = 'flink_catalog_db',\n" +
                "    'username' = 'username',\n" +
                "    'password' = 'password',\n" +
                "    'base-url' = 'jdbc:impala://192.168.6.76:21050'\n" +
                ")");
        return tableEnvironment;
    }

    public void process(StreamTableEnvironment env){

        env.executeSql("show catalogs").print();

        env.useCatalog("impalaCatalog");
//        env.useDatabase("flink_catalog_db");
        String[] databases = env.listDatabases();
        String[] funcs = env.listFunctions();
        String[] udfs = env.listUserDefinedFunctions();
        System.out.println("=======:"+ databases!=null? JSON.toJSONString(databases):null);
        System.out.println("=======:"+funcs!=null? JSON.toJSONString(funcs):null);
        System.out.println("=======:"+udfs!=null ?JSON.toJSONString(udfs):null);
        //注册function
//        env.createFunction("subStr", SubStringFunc.class);
          env.executeSql("CREATE FUNCTION IF NOT EXISTS subStr4 AS 'com.wushi.bigdata.udf.SubStringFunc'");
//         udfs = env.listUserDefinedFunctions();

//        env.executeSql("CREATE TABLE if not exists  flink_kafka_source4 (\n" +
//                "  `user_id` BIGINT,\n" +
//                "  `item_id` BIGINT,\n" +
//                "  `behavior` STRING,\n" +
//                //"  `ts` TIMESTAMP(3) METADATA FROM 'timestamp'\n" +     //kafka时间戳
//                "  proctime as PROCTIME()\n" +     //处理事件
//                ") WITH (\n" +
//                "  'connector' = 'kafka',\n" +
//                "  'topic' = 'flink_sql_test',\n" +
//                "  'properties.bootstrap.servers' = 'cdh1.lcbint.cn:9092',\n" +
//                "  'properties.group.id' = 'testGroup',\n" +
//                "  'scan.startup.mode' =  'latest-offset',\n" +
//                "  'format' = 'canal-json'\n" +
//                ")");

        //        case1: 普通查询
//        TableResult tableResult = env.sqlQuery("select a.user_id, subStr4(a.behavior,2,5)  from flink_catalog_db.flink_kafka_source4 a").execute();
//        TableResult tableResult = env.sqlQuery("select SubStringFunc3('behavior-test34324',2,5)").execute();
//        tableResult.print();

//        case2: 维表查询
//        Table table = env.sqlQuery("select id,user_id,mobile,openid from lcb_mic_user_db.us_micro_user where user_id <100");
//        table.execute().print();

        //case3: 插入kafka sink流表
//        TableResult result = env.executeSql("insert into flink_catalog_db.flink_kafka_sink select user_id,'null' as real_name ,1 as pv from flink_catalog_db.flink_kafka_source");
//        result.print();

//      case3: join维度表
        Table jointTable = env.sqlQuery("select a.user_id, subStr4(b.real_name,0,1), count(a.user_id) as pv from flink_catalog_db.flink_kafka_source4 a" +
                "  left join lcb_mic_user_db.us_micro_user for SYSTEM_TIME AS OF a.proctime  as b" +
                "  on a.user_id = b.user_id" +
                " group by a.user_id,b.real_name");
        env.createTemporaryView("join_table", jointTable);
        TableResult result = env.executeSql("insert into flink_catalog_db.flink_kafka_sink3 select * from join_table");
        env.executeSql("select * from join_table").print();
        System.out.println(result.getJobClient().get().getJobStatus());

    }

    public static void main(String[] args) {
        ImpalaCatalogWithFunction tableSql = new ImpalaCatalogWithFunction();
        StreamTableEnvironment env = tableSql.getEnv();
        tableSql.process(env);
    }
}

