package com.wushi.bigdata;

import org.apache.flink.streaming.api.environment.StreamExecutionEnvironment;
import org.apache.flink.table.api.EnvironmentSettings;
import org.apache.flink.table.api.Table;
import org.apache.flink.table.api.TableResult;
import org.apache.flink.table.api.bridge.java.StreamTableEnvironment;
import org.apache.flink.table.catalog.GenericInMemoryCatalog;
import org.apache.flink.table.catalog.hive.HiveCatalog;

import java.net.URL;

/**
 * @ClassName TableSql
 * 通过TableAPI执行相应的sql
 * @Deacription 通过定义不同的Catalog，可以在相应Catalog中创建源数据，并固化；避免多次创建； HiveCatalog依赖本地的hive配置：hive-site.xml,hdfs-site.xml
 * 默认Catalog为GenericMemoryCatalog内存型，所以需要每次都创建一遍；
 * @Author wushumin
 * @Date 2021-08-03 10:25
 * @Version 1.0
 **/
public class TableSql {
    public StreamTableEnvironment getEnv(){
        StreamExecutionEnvironment environment = StreamExecutionEnvironment.getExecutionEnvironment();
        EnvironmentSettings environmentSettings = EnvironmentSettings.newInstance().useBlinkPlanner().inStreamingMode().build();
        StreamTableEnvironment tableEnvironment = StreamTableEnvironment.create(environment,environmentSettings);
        URL resources = Thread.currentThread().getContextClassLoader().getResource(".");
        String path = resources.getPath();
        HiveCatalog hiveCatalog = new HiveCatalog("hiveCat", "lcb_db", path);
        tableEnvironment.registerCatalog("hiveCat",hiveCatalog);
        GenericInMemoryCatalog memoryCat = new GenericInMemoryCatalog("memoryCat");
        tableEnvironment.registerCatalog("memoryCat",memoryCat);
        return tableEnvironment;
    }

    public void process(StreamTableEnvironment env){
//        注册source流表
//        env.executeSql("CREATE TABLE flink_kafka_source (\n" +
//                "  `user_id` BIGINT,\n" +
//                "  `item_id` BIGINT,\n" +
//                "  `behavior` STRING,\n" +
//                "  `ts` TIMESTAMP(3) METADATA FROM 'timestamp'\n" +     //kafka时间戳
//                ") WITH (\n" +
//                "  'connector' = 'kafka',\n" +
//                "  'topic' = 'flink_sql_test',\n" +
//                "  'properties.bootstrap.servers' = 'cdh1.lcbint.cn:9092',\n" +
//                "  'properties.group.id' = 'testGroup',\n" +
//                "  'scan.startup.mode' = 'earliest-offset',\n" +
//                "  'format' = 'json',\n" +
//                "'json.fail-on-missing-field' = 'false',\n" +
//                "'json.ignore-parse-errors' = 'true'\n" +
//                ")");

        String mic_user_db_sql="create database if not EXISTS `lcb_mic_user_db`";
//        env.executeSql(mic_user_db_sql);

//        注册IMpala维度表
//        String Mic_user_dim_sql=""+
//                "CREATE TABLE lcb_mic_user_db.us_micro_user (\n" +
//                "  `id`  bigint ,\n" +
//                "  `user_id` bigint,  \n" +
//                "  `group_id` int,  \n" +
//                "  `openid` varchar,  \n" +
//                "  `real_name` varchar  \n" +
//                ")WITH (\n" +
//                "  'connector.type' = 'jdbc',\n" +
//                "  'connector.url' = 'jdbc:impala://192.168.6.76:21050/lcb_mic_user_db;UseNativeQuery=1',\n" +
////                "  'connector.url' = 'jdbc:mysql://mysql.lcbint.cn:3306/lcb_mic_user_db',\n" +
//                "  'connector.table' = 'us_micro_user',\n" +
//              "  'connector.driver' = 'com.cloudera.impala.jdbc41.Driver',\n" +
////                "  'connector.driver' = 'com.mysql.jdbc.Driver',\n" +
////                "  'connector.username' = 'lcb_dev', -- 顾名思义 用户名\n" +
////                "  'connector.password' = '123456' , -- 密码\n" +
//                "  'connector.lookup.cache.max-rows' = '100', -- 缓存条数 \n"+
//                "  'connector.lookup.cache.ttl' = '30s' -- 缓存时间 \n"+
////                "  'connector.write.flush.max-rows' = '100', -- 意思是攒满多少条才触发写入 \n" +
////                "  'connector.write.flush.interval' = '1' -- 意思是攒满多少秒才触发写入；这2个参数，无论数据满足哪个条件，就会触发写入\n" +
////            "  'update-mode' = 'upsert' -- 指定为插入更新模式 \n"+
//                ")";
//        env.executeSql(Mic_user_dim_sql);
//        注册sink流表
        env.useCatalog("memoryCat");
//        env.executeSql("create database default_db");
        env.executeSql("CREATE TABLE flink_kafka_sink (\n" +
                "  `user_id` BIGINT,\n" +
                "  `real_name` varchar,\n" +
                "  `pv` bigint,\n" +
                " PRIMARY KEY (`user_id`) NOT ENFORCED \n"+
                ") WITH (\n" +
                "  'connector' = 'upsert-kafka',\n" +
                "  'topic' = 'flink_sql_sink_test',\n" +
                "  'properties.bootstrap.servers' = 'cdh1.lcbint.cn:9092',\n" +
                "  'key.format' = 'json',\n" +
                "  'value.format' = 'json'\n" +
                ")");

//        普通查询
//        TableResult tableResult = env.sqlQuery("select * from KafkaTable").execute();
//        tableResult.print();
//        Table table = env.sqlQuery("select user_id,item_id from KafkaTable where user_id %2=0");
//        TableResult result = table.execute();
//        result.print();
// 插入kafka sink流表
//        TableResult result = env.executeSql("insert into KafkaSinkTable select user_id,item_id, behavior from KafkaTable where user_id %2=0");


//        join维度表
//        TableResult tableResult = env.sqlQuery("select * from us_micro_user").execute();
//        tableResult.print();
        env.useCatalog("hiveCat");
        Table jointTable = env.sqlQuery("select a.user_id,b.real_name, count(a.user_id) as pv from flink_kafka_source a" +
                "  left join lcb_mic_user_db.us_micro_user  as b" +
                "  on a.user_id = b.user_id" +
                " group by a.user_id,b.real_name");

        env.useCatalog("memoryCat");
        env.createTemporaryView("join_table", jointTable);
        TableResult result = env.executeSql("insert into flink_kafka_sink select * from join_table");
        System.out.println(result.getJobClient().get().getJobStatus());

    }

    public static void main(String[] args) {
        TableSql tableSql = new TableSql();
        StreamTableEnvironment env = tableSql.getEnv();
        tableSql.process(env);
//        tableSql.processHiveCatalog(env);
    }

    private void processHiveCatalog(StreamTableEnvironment env) {
//       env.executeSql("CREATE TABLE flink_hive_table_test(\n" +
//                "  `user_id` BIGINT,\n" +
//                "  `item_id` BIGINT,\n" +
//                "  `behavior` STRING)");

        env.executeSql("show databases").print();
        env.executeSql("show tables").print();
    }
}

