package com.atguigu.gamll.realtime.app.dws;

import com.atguigu.gamll.realtime.app.dwd.BaseApp;
import com.atguigu.gamll.realtime.app.func.KeywordUDTF;
import com.atguigu.gamll.realtime.beans.KeywordStats;
import com.atguigu.gamll.realtime.common.GmallConstant;
import com.atguigu.gamll.realtime.utils.ClickHouseUtil;
import com.atguigu.gamll.realtime.utils.KeywordUtil;
import com.atguigu.gamll.realtime.utils.MyKafkaUtil;
import org.apache.flink.streaming.api.datastream.DataStream;
import org.apache.flink.streaming.api.environment.StreamExecutionEnvironment;
import org.apache.flink.table.api.Table;
import org.apache.flink.table.api.bridge.java.StreamTableEnvironment;

// 关键词主题统计
public class KeywordStatsApp extends BaseApp {
    public static void main(String[] args) throws Exception {
        KeywordStatsApp keywordStatsApp = new KeywordStatsApp();
        keywordStatsApp.baseEntry();
    }

    @Override
    public void biz(StreamExecutionEnvironment env) {
        // TODO 1.指定表执行环境
        StreamTableEnvironment tableEnv = StreamTableEnvironment.create(env);
        // 将自定义UDTF加载到流中
        tableEnv.createTemporarySystemFunction("ik_analyze", KeywordUDTF.class);

        // TODO 2.从kafka主题读取数据，创建动态表
        String topic = "dwd_page_log";
        String groupId = "keyword_stats_group";
        tableEnv.executeSql("CREATE TABLE page_log (" +
                "    common MAP<STRING, STRING>," +
                "    page MAP<STRING, STRING>," +
                "    ts BIGINT," +
                "    rowtime AS TO_TIMESTAMP(FROM_UNIXTIME(ts/1000, 'yyyy-MM-dd HH:mm:ss'))," +
                "    WATERMARK FOR rowtime AS rowtime - INTERVAL '3' second" +
                ") WITH (" + MyKafkaUtil.getKafkaDDL(topic, groupId) + ")");

        // TODO 3.从动态表中过滤搜索行为
        Table fullwordTable = tableEnv.sqlQuery("select " +
                "  page['item'] fullword," +
                "  rowtime" +
                " from page_log " +
                " where page['page_id'] = 'good_list' and page['item'] is not null");

        // TODO 4.对查询的结果进行分词，并和原表进行连接
        Table keywordTable = tableEnv.sqlQuery("select" +
                " keyword, " +
                " rowtime " +
                " from " + fullwordTable + "," +
                " LATERAL TABLE(ik_analyze(fullword))AS T(keyword)");

        // TODO 5.分组，开窗，聚合
        Table keywordStatsSearch = tableEnv.sqlQuery("select " +
                " keyword," +
                " count(*) ct," +
                " '" + GmallConstant.KEYWORD_SEARCH + "' source," +
                " DATE_FORMAT(TUMBLE_START(rowtime, INTERVAL '10' SECOND),'yyyy-MM-dd HH:mm:ss') stt," +
                " DATE_FORMAT(TUMBLE_END(rowtime, INTERVAL '10' SECOND),'yyyy-MM-dd HH:mm:ss') edt," +
                " UNIX_TIMESTAMP()*1000 ts" +
                " from " + keywordTable + " " +
                " GROUP BY TUMBLE(rowtime, INTERVAL '10' SECOND ), keyword");

        // TODO 6.将动态表转换为流
        DataStream<KeywordStats> keywordStatsDS = tableEnv.toAppendStream(keywordStatsSearch, KeywordStats.class);

        // TODO 7.将流中的数据写道Clickhouse中
        keywordStatsDS.print(">>>");
        keywordStatsDS.addSink(
                ClickHouseUtil.getSinkFunction("insert into keyword_stats_0819(keyword,ct,source,stt,edt,ts) values(?,?,?,?,?,?)")
        );

    }
}
