package com.dada.dongpeng;

import org.apache.flink.api.common.RuntimeExecutionMode;
import org.apache.flink.runtime.state.filesystem.FsStateBackend;
import org.apache.flink.streaming.api.environment.StreamExecutionEnvironment;
import org.apache.flink.table.api.TableConfig;
import org.apache.flink.table.api.bridge.java.StreamTableEnvironment;

public class IcebergCdc {

	public static void main(String[] args) {
		StreamExecutionEnvironment env = StreamExecutionEnvironment.getExecutionEnvironment();
		env.setParallelism(1);
		env.setRuntimeMode(RuntimeExecutionMode.STREAMING);
		env.enableCheckpointing(60000);
		env.setStateBackend(new FsStateBackend("file:///opt/soft/checkpoint/iceberg"));
		StreamTableEnvironment tEnv = StreamTableEnvironment.create(env);

		String catalog1 = "CREATE CATALOG memory_catalog WITH (\n" +
				"  'type' = 'generic_in_memory'\n" +
				");";
		String useCatalog = "USE CATALOG memory_catalog;\n";
		String database = "create database ods;\n";
		String useDatabase = "use ods;\n";

		String createEmployees = "CREATE TABLE if not exists employees (\n" +
				"    id BIGINT,\n" +
				"    name STRING,\n" +
				"    age BIGINT\n" +
				")WITH (\n" +
				"  'connector' = 'postgres-cdc',\n" +
				"  'hostname' = '127.0.0.1',\n" +
				"  'port' = '5432',\n" +
				"  'username' = 'postgres',\n" +
				"  'password' = '123456',\n" +
				"  'database-name' = 'postgres',\n" +
				"  'schema-name' = 'public',\n" +
				"  'table-name' = 'employees',\n" +
				"  'decoding.plugin.name' = 'pgoutput', -- 推荐使用pgoutput插件\n" +
				"  'slot.name' = 'flink_slot', -- 自定义逻辑解码槽名称\n" +
				"  'scan.incremental.snapshot.enabled' = 'true', -- 自定义逻辑解码槽名称\n" +
				"  'scan.startup.mode' = 'latest-offset' -- 初始模式：先读全量数据，再读增量数据\n" +
				");";

		String catalog2 = "CREATE CATALOG hadoop_catalog WITH (\n" +
				"  'type' = 'iceberg',\n" +
				" 'catalog-type'='hadoop',\n" +
				"  'warehouse' = 'hdfs://dongpengdeAir:8020/user/warehouse/iceberg',\n" +
				"  'property-version' = '1' \n" +
				");";
		String useCatalog2 = "USE CATALOG hadoop_catalog;\n";
		// ??
//		String database2 = "create database iceberg;\n";
		String useDatabase2 = "use iceberg;\n";
		String dropTable2 = "drop table hadoop_catalog.iceberg.sample2";
		String createTable2 = "CREATE TABLE if not exists hadoop_catalog.iceberg.sample2 (\n" +
				"    id BIGINT,\n" +
				"    name STRING,\n" +
				"    age BIGINT,\n" +
				"    PRIMARY KEY(`id`) NOT ENFORCED\n" +
				") with ('format-version'='2', 'write.upsert.enabled'='true');";

//		String insert = "INSERT into `hadoop_catalog`.`default`.`sample2` VALUES (1, 'a') /*+ OPTIONS('upsert-enabled'='true') */;";

//		String insert2 = "INSERT into `hadoop_catalog`.`default`.`sample2` VALUES (1, 'a');";

//		String drop = "DROP TABLE sample2";
//		String select = "select * from `hadoop_catalog`.`default`.`sample2`";
		String select = "insert into hadoop_catalog.iceberg.sample2 select * from `memory_catalog`.`ods`.`employees`;";
		tEnv.executeSql(catalog1);
		tEnv.executeSql(useCatalog);
		tEnv.executeSql(database);
		tEnv.executeSql(useDatabase);
		tEnv.executeSql(createEmployees);

		tEnv.executeSql(catalog2);
		tEnv.executeSql(useCatalog2);
		tEnv.executeSql(dropTable2);
		tEnv.executeSql(useDatabase2);
		tEnv.executeSql(createTable2);

		tEnv.executeSql(select);

//        tEnv.executeSql(createTable2);
//		tEnv.executeSql(drop);
//		tEnv.executeSql(insert2);
//		tEnv.executeSql(insert2);
//		tEnv.executeSql(select).print();




	}

}
