package streaming.api.sql;

import org.apache.flink.runtime.state.filesystem.FsStateBackend;
import org.apache.flink.streaming.api.CheckpointingMode;
import org.apache.flink.streaming.api.environment.StreamExecutionEnvironment;
import org.apache.flink.table.api.bridge.java.StreamTableEnvironment;

import java.util.UUID;

public class SQLTest1_file1 {

    public static void main(String[] args) {

        // 1. 创建环境
        StreamExecutionEnvironment env = StreamExecutionEnvironment.getExecutionEnvironment();
        env.setParallelism(1);
        env.enableCheckpointing( 60 *1000L, CheckpointingMode.EXACTLY_ONCE);
        env.setStateBackend(new FsStateBackend("file:///tmp/ckp"));
        StreamTableEnvironment tableEnv = StreamTableEnvironment.create(env);

        String topicFrom = "myTest";
        String kafkaServer = "192.168.36.130:9092,192.168.36.129:9092,192.168.36.128:9092";
        String kafkaFormat = "csv";
        String csvSeparator = ",";
        String kafkaConsumerGroupId = UUID.randomUUID().toString().replace("-","");

        String file_path = "file:///tmp/test";
        String file_format = "csv";

        StringBuffer sb1 = new StringBuffer();
        sb1.append("CREATE TABLE kafka_table (");
        sb1.append("d_a STRING, d_b STRING, pt AS PROCTIME()");
        sb1.append(") WITH (");
        sb1.append("'connector' = 'kafka',");
        sb1.append("'topic' = '").append(topicFrom).append("',");
        sb1.append("'properties.bootstrap.servers' = '").append(kafkaServer).append("',");
        sb1.append("'properties.group.id' = '").append(kafkaConsumerGroupId).append("',");
        sb1.append("'format' = '").append(kafkaFormat).append("',");
        if ("csv".equals(kafkaFormat)) {
            sb1.append("'csv.field-delimiter' = '").append(csvSeparator).append("',");
        }
        sb1.deleteCharAt(sb1.length() - 1);
        sb1.append(")");
        System.out.println("inputTable SQL: " + sb1.toString());
        tableEnv.executeSql(sb1.toString());

        StringBuffer sb2 = new StringBuffer();
        sb2.append("CREATE TABLE fs_table (");
        sb2.append("user_id STRING, order_amount STRING, dt STRING, `hour` STRING");
        sb2.append(") PARTITIONED BY (dt, `hour`) WITH (");
        sb2.append("'connector' = 'filesystem',");
        sb2.append("'path' = '").append(file_path).append("',");
        sb2.append("'format' = '").append(file_format).append("',");
//        sb2.append("'auto-compaction' = 'true',");
        sb2.append("'sink.partition-commit.delay' = '").append("15 min").append("',");
        sb2.append("'sink.partition-commit.policy.kind' = 'success-file',");
        sb2.deleteCharAt(sb2.length() - 1);
        sb2.append(")");
        System.out.println("outputTable SQL: " + sb2.toString());
        tableEnv.executeSql(sb2.toString());

        // 方式二 自定义SQl
        tableEnv.executeSql("INSERT INTO fs_table SELECT d_a, d_b,DATE_FORMAT(pt, 'yyyy-MM-dd'), DATE_FORMAT(pt, 'HH') FROM kafka_table");

//        tableEnv.executeSql("SELECT * FROM fs_table WHERE dt='2020-05-20' and `hour`='12'");
    }
}
