package com.hrt.flink.hudi

import org.apache.flink.streaming.api.scala.StreamExecutionEnvironment
import org.apache.flink.table.api.bridge.scala.StreamTableEnvironment
import org.apache.flink.table.api.{EnvironmentSettings, Table}

/**
 *  FlinkSql 流式向Hudi写入数据
 * 写入的数据样例
 * 1,zs,18,20210710,beijing
 * 2,ls,19,20210710,shanghai
 * 3,ww,20,20210710,tianjin
 * 1,hrt,99,20210710,beijing
  */
case class Info(id:String,name:String,age:Int,ts:String,loc:String)

object FlinkWriteDataToHudi {
  def main(args: Array[String]): Unit = {
    //1.创建对象
    val env: StreamExecutionEnvironment = StreamExecutionEnvironment.getExecutionEnvironment

    val tableEnv: StreamTableEnvironment = StreamTableEnvironment.create(env,EnvironmentSettings.newInstance()
    .useBlinkPlanner().inStreamingMode().build())

    //2.必须开启checkpoint 默认有5个checkpoint后，hudi目录下才会有数据，不然只有一个.hoodie目录。
    env.enableCheckpointing(2000)
//    env.setStateBackend(new RocksDBStateBackend("hdfs://mycluster/flinkstate"))

    //3.设置并行度
    env.setParallelism(1)

    //4.读取Kakfa 中的数据
    tableEnv.executeSql(
      """
        | create table kafkaInputTable(
        |  id varchar,
        |  name varchar,
        |  age int,
        |  ts varchar,
        |  loc varchar
        | ) with (
        |  'connector' = 'kafka',
        |  'topic' = 'flink_hudi_test',
        |  'properties.bootstrap.servers'='192.168.56.13:9092,192.168.56.14:9092,192.168.56.15:9092',
        |  'scan.startup.mode'='latest-offset',
        |  'properties.group.id' = 'testgroup',
        |  'format' = 'csv'
        | )
      """.stripMargin)

    val table: Table = tableEnv.from("kafkaInputTable")

    //5.创建Flink 对应的hudi表
    /**
     * 注意：里面的path属性一定要加上hdfs前缀，否则按localhost
     */
    tableEnv.executeSql(
      """
        |CREATE TABLE t1(
        |  id VARCHAR(20) PRIMARY KEY NOT ENFORCED,--默认主键列为uuid,这里可以后面跟上“PRIMARY KEY NOT ENFORCED”指定为主键列
        |  name VARCHAR(10),
        |  age INT,
        |  ts VARCHAR(20),
        |  loc VARCHAR(20)
        |)
        |PARTITIONED BY (loc)
        |WITH (
        |  'connector' = 'hudi',
        |  'path' = 'hdfs://hadoop102:8020/flink_hudi_data',
        |  'write.tasks' = '1', -- default is 4 ,required more resource
        |  'compaction.tasks' = '1', -- default is 10 ,required more resource
        |  'table.type' = 'COPY_ON_WRITE' -- this creates a MERGE_ON_READ table, by default is COPY_ON_WRITE
        |)
      """.stripMargin)

    //6.向表中插入数据
    tableEnv.executeSql(
      s"""
         | insert into t1 select id,name,age,ts,loc from ${table}
      """.stripMargin)


    env.execute()
  }
}
