package com.atguigu.flink.chapter11;

import org.apache.flink.configuration.Configuration;
import org.apache.flink.streaming.api.environment.StreamExecutionEnvironment;
import org.apache.flink.table.api.bridge.java.StreamTableEnvironment;
import org.apache.flink.table.catalog.hive.HiveCatalog;

/**
 * @Author lizhenchao@atguigu.cn
 * @Date 2022/1/23 10:10
 */
public class Flink12_Hive {
    public static void main(String[] args) {
        System.setProperty("HADOOP_USER_NAME", "atguigu");
        Configuration conf = new Configuration();
        conf.setInteger("rest.port", 20000);
        StreamExecutionEnvironment env = StreamExecutionEnvironment.getExecutionEnvironment(conf);
        env.setParallelism(1);
        StreamTableEnvironment tEnv = StreamTableEnvironment.create(env);
    
        tEnv.executeSql("create table stu(" +
                            "   id string, " +
                            "   ts bigint, " +
                            "   vc int " +
                            ")with(" +
                            "   'connector' = 'filesystem', " +
                            "   'path' = 'input/sensor.txt', " +
                            "   'format' = 'csv' " +
                            ")");
    
        // 1. 创建catalog对象
        HiveCatalog hive = new HiveCatalog("hive", "gmall", "input/");
        // 2. 注册catalog
        tEnv.registerCatalog("hive", hive);
        
        tEnv.useCatalog("hive");  // 设置当前应用默认的catalog
        tEnv.useDatabase("gmall");
        
//        tEnv.sqlQuery("select * from hive.gmall.stu").execute().print();
//        tEnv.sqlQuery("select * from stu").execute().print();
        tEnv.sqlQuery("select * from default_catalog.default_database.stu").execute().print();
        /*
        1. hive的元数据服务必须开启, 即使配置中没有, 也必须开
        2. hive配置文件目录, 必须是目录
        
         */
        
        
        
    }
}
