package com.hrt.flink.icebergoperate.unauto;

import org.apache.flink.streaming.api.environment.StreamExecutionEnvironment;
import org.apache.flink.table.api.bridge.java.StreamTableEnvironment;

/**
 * Flink SQL API 操作Iceberg
 */
public class FlinkSQLIceberg1 {
    public static void main(String[] args) {
        StreamExecutionEnvironment env = StreamExecutionEnvironment.getExecutionEnvironment();
        StreamTableEnvironment tblEnv = StreamTableEnvironment.create(env);

        env.enableCheckpointing(1000);

        //1.创建Catalog
        tblEnv.executeSql("create catalog hadoop_iceberg with ('type'='iceberg','catalog-type'='hadoop','warehouse'='hdfs://mycluster/flink_iceberg')");

        //2.使用当前Catalog
        tblEnv.useCatalog("hadoop_iceberg");

        //3.创建数据库
//        tblEnv.executeSql("create database iceberg_db");

        //4.使用当前库
        tblEnv.useDatabase("iceberg_db");

        //5.创建Iceberg 表
//        tblEnv.executeSql("create table hadoop_iceberg.iceberg_db.flink_iceberg_tbl2 (id int,name string,age int,loc string) partitioned by (loc)");

        //6.向iceberg表中插入数据
        tblEnv.executeSql("insert into hadoop_iceberg.iceberg_db.flink_iceberg_tbl2 values (1,'zs',18,'beijing'),(2,'ls',19,'shanghai'),(3,'ww',20,'guangzhou')");


    }

}
