package com.app.dws;

import com.app.function.SplitFunction;
import com.bean.KeywordStats;
import com.common.GlobalConfig;
import com.utils.ClickHouseUtil;
import com.utils.MyKafkaUtil;
import org.apache.flink.api.common.restartstrategy.RestartStrategies;
import org.apache.flink.runtime.state.filesystem.FsStateBackend;
import org.apache.flink.streaming.api.CheckpointingMode;
import org.apache.flink.streaming.api.datastream.DataStream;
import org.apache.flink.streaming.api.environment.CheckpointConfig;
import org.apache.flink.streaming.api.environment.StreamExecutionEnvironment;
import org.apache.flink.table.api.EnvironmentSettings;
import org.apache.flink.table.api.Table;
import org.apache.flink.table.api.bridge.java.StreamTableEnvironment;

/**
 * @Description: TODO QQ1667847363
 * @author: xiao kun tai
 * @date:2022/1/12 20:14
 */

//TODO: 数据流 web/app -> Nginx -> Springboot  -> Kafka(ods)  ->  FlinkApp  -> Kafka(dwd) -> FlinkAPP  -> ClickHouse
//TODO: 程序 mockLog -> Nginx -> Logger.sh -> Kafka(ZK) -> BaseLogApp -> Kafka(ZK) -> KeywordStatsApp -> ClickHouse
public class KeywordStatsApp {
    public static void main(String[] args) throws Exception {
        //TODO:1.获取执行环境
        StreamExecutionEnvironment env = StreamExecutionEnvironment.getExecutionEnvironment();
        env.setParallelism(1); //生产环境，与Kafka分区数保持一致

        EnvironmentSettings settings = EnvironmentSettings
                .newInstance()
                .inStreamingMode()
                .build();
        StreamTableEnvironment tableEnv = StreamTableEnvironment.create(env, settings);

        //TODO:Flink-CDC 将读取 binlog 的位置信息以状态的方式保存在 CK,如果想要做到断点续传,需要从 Checkpoint 或者 Savepoint 启动程序
        /*//开启CK并指定状态后端为FS menory fs rocksdb
        env.setStateBackend(new FsStateBackend("hdfs://192.168.88.109:9820/gmall-flink/ck"));
        //开启 Checkpoint,每隔 5 秒钟做一次 CK
        env.enableCheckpointing(5000L);
        //指定 CK 的一致性语义
        env.getCheckpointConfig().setCheckpointingMode(CheckpointingMode.EXACTLY_ONCE);
        env.getCheckpointConfig().setCheckpointTimeout(10000L);
        env.getCheckpointConfig().setMinPauseBetweenCheckpoints(3000);
        //设置任务关闭的时候保留最后一次 CK 数据
        env.getCheckpointConfig().enableExternalizedCheckpoints(CheckpointConfig.ExternalizedCheckpointCleanup.RETAIN_ON_CANCELLATION);
        //指定从 CK 自动重启策略
        env.setRestartStrategy(RestartStrategies.fixedDelayRestart(3,2000L));
        //设置访问 HDFS 的用户名
        System.setProperty("HADOOP_USER_NAME", "root");*/

        //TODO:2.使用DDL方式读取kafka数据创建表

        String groupId = "keyword_stats_app" + GlobalConfig.NUMBER;
        String pageViewSourceTopic = "dwd_page_log";

        tableEnv.executeSql("CREATE TABLE page_view (" +
                "`common` MAP<STRING,STRING>, " +
                "`page` MAP<STRING,STRING>," +
                "`ts` BIGINT, " +
                "`rt` AS TO_TIMESTAMP(FROM_UNIXTIME(ts/1000, 'yyyy-MM-dd HH:mm:ss')) ," +
                "WATERMARK FOR rt AS rt - INTERVAL '1' SECOND) " +
                "WITH (" + MyKafkaUtil.getKafkaDDL(pageViewSourceTopic, groupId) + ")");


        //TODO:3.过滤数据  一条页面为 “search”  and 搜索词 is not  null
        Table fullWordTable = tableEnv.sqlQuery("select " +
                "page['item'] full_word ," +
                "rt " +
                "from page_view " +
                "where page['last_page_id']='search' and page['item'] is not null ");


        //TODO:4.注册UDTF，进行分词处理
        tableEnv.createTemporaryFunction("split_words", SplitFunction.class);
        Table wordTable = tableEnv.sqlQuery("select " +
                "word," +
                "rt " +
                "from " + fullWordTable + " ," +
                " LATERAL TABLE(split_words(full_word)) as T(word)");


        //TODO:5.分组 开窗  聚合
        Table resultTable = tableEnv.sqlQuery("select " +
                "'search' source," +
                "word keyword," +
                "count(*) ct," +
                "DATE_FORMAT(TUMBLE_START(rt, INTERVAL '10' SECOND),'yyyy-MM-dd HH:mm:ss') stt," +
                "DATE_FORMAT(TUMBLE_END(rt, INTERVAL '10' SECOND),'yyyy-MM-dd HH:mm:ss') edt," +
                "UNIX_TIMESTAMP()*1000 ts " +
                "from " + wordTable +
                " GROUP BY TUMBLE(rt, INTERVAL '10' SECOND ),word");


        //TODO:6.将动态表转换为流
        DataStream<KeywordStats> keywordStatsDataStream =
                tableEnv.<KeywordStats>toAppendStream(resultTable, KeywordStats.class);


        //TODO:7.将数据打印并写入ClickHouse
        System.out.println("任务开始>>>>>>>>>>>");
        keywordStatsDataStream.print(">>>>>>>>>>>");
        keywordStatsDataStream
                .addSink(ClickHouseUtil
                        .getSink("insert into keyword_stats_2021 (keyword,ct,source,stt,edt,ts) values(?,?,?,?,?,?)"));


        //TODO:8.启动任务
        env.execute("KeywordStatsApp");


    }
}
