package com.practice.gmall.realtime.app.dws;

import com.practice.gmall.realtime.app.BaseSQLApp;
import com.practice.gmall.realtime.bean.KeywordCountBean;
import com.practice.gmall.realtime.functions.SplitZnWords;
import com.practice.gmall.realtime.util.FlinkSinkUtil;
import com.practice.gmall.realtime.util.MyKafkaUtil;
import org.apache.flink.streaming.api.datastream.SingleOutputStreamOperator;
import org.apache.flink.streaming.api.environment.StreamExecutionEnvironment;
import org.apache.flink.table.api.Table;
import org.apache.flink.table.api.bridge.java.StreamTableEnvironment;

/**
 * ClassName: DwsKeywordPageView01
 * Package: com.practice.gmall.realtime.app.dws
 * Description: 将明细层数据按照小的时间周期进行汇总，每五秒钟统计一次关键词的搜索次数，并将数据写出到olap数据库-clickhouse
 * @Author lzy.ethan@qq.com
 * @Create 2023-02-17 9:48
 */
public class DwsKeywordPageView01 extends BaseSQLApp {
    public static void main(String[] args) {
        new DwsKeywordPageView01().init(4003, "DwsKeywordPageView01", 2);
    }

    @Override
    public void handle(StreamExecutionEnvironment env, StreamTableEnvironment tEnv) {
        // 1. read data from Kafka topic dwd_traffic_page
        tEnv.executeSql("" +
                "CREATE TABLE dwd_traffic_page(" +
                " page MAP<STRING, STRING>," +
                " ts BIGINT, " +
                " et AS TO_TIMESTAMP_LTZ(ts,3) ," +
                " WATERMARK FOR et AS et - INTERVAL '3' second) " +
                MyKafkaUtil.getKafkaSourceDDL("dwd_traffic_page", "DwsKeywordPageView01"));

        // 2. filter search data from dwd_traffic_page
        Table keywordLog = tEnv.sqlQuery( "" +
                "SELECT " +
                " page['item'] words," +
                " et " +
                "FROM dwd_traffic_page " +
                "WHERE " +
                "    page['item_type'] = 'keyword' " +
                "    AND (page['last_page_id'] = 'home'  " +
                "        OR  " +
                "        page['last_page_id'] = 'search') " +
                "    AND page['item'] is not null");

        tEnv.createTemporaryView("keyword_log", keywordLog);

        //        keywordLog.execute().print();
        // register UDTF function
        tEnv.createTemporarySystemFunction("split_zn_Words", SplitZnWords.class);

        // 3. use UDTF to query
        Table keywordsSplit = tEnv.sqlQuery("" +
                "SELECT " +
                "    word, " +
                "    et " +
                "FROM  " +
                "    keyword_log, LATERAL TABLE(split_zn_words(words)) ");
        tEnv.createTemporaryView("split_word",keywordsSplit);

        // 4.  use window function to aggregate times of keyword
        Table result = tEnv.sqlQuery("" +
                "SELECT  " +
                "    DATE_FORMAT(window_start,'yyyy-MM-dd HH:mm:ss') stt, " +
                "    DATE_FORMAT(window_end,'yyyy-MM-dd HH:mm:ss') edt,  " +
                "    word keyword, " +
                "    count(*) keyword_count," +
                "    unix_timestamp() as  ts    " +
                "FROM " +
                "    TABLE (TUMBLE(TABLE split_word, descriptor(et), interval '3' second ))   " +
                "GROUP BY window_start, window_end, word ");


        // 5. write the window query result to clickhouse
        // turn dynamic table to stream -> sink to clickhouse
        SingleOutputStreamOperator<KeywordCountBean> keywordCntStream = tEnv.toRetractStream(result, KeywordCountBean.class)
                .filter(tp2 -> tp2.f0)
                .map(tp2 -> tp2.f1);

        keywordCntStream.addSink(FlinkSinkUtil.getClickhouseSink("dws_traffic_keyword_page_view_window", KeywordCountBean.class));

        try {
            env.execute();
        } catch (Exception e) {
            throw new RuntimeException(e);
        }

    }
}
