package com.atguigu.edu.realtime.app.dws;

import com.atguigu.edu.realtime.app.func.KeywordUDTF;
import com.atguigu.edu.realtime.common.EduConfig;
import com.atguigu.edu.realtime.util.MyKafkaUtil;
import org.apache.flink.streaming.api.environment.StreamExecutionEnvironment;
import org.apache.flink.table.api.Table;
import org.apache.flink.table.api.bridge.java.StreamTableEnvironment;

/**
 * ClassName: DwsTrafficSourceKeywordPageViewWindow
 * Package: com.atguigu.edu.realtime.app.dws
 * Description:
 * 流量域搜索关键词粒度页面浏览各窗口汇总表
 * @Author Mr.2
 * @Create 2023/9/8 19:34
 * @Version 1.0
 */
public class DwsTrafficSourceKeywordPageViewWindow {
    public static void main(String[] args) {
        // 采用 SQL 方式
        // TODO 1. 基本环境准备
        // 1.1 指定 流处理环境
        StreamExecutionEnvironment env = StreamExecutionEnvironment.getExecutionEnvironment();
        // 1.2 设置 并行度
        env.setParallelism(4);
        // 1.3 指定 表执行环境
        StreamTableEnvironment tableEnv = StreamTableEnvironment.create(env);
        // 1.4 将自定义的UDTF函数 注册到表执行环境
        // "ik_analyze" 就是 注册到表环境的函数名字, 在SQL里, 可以直接使用..
        // QQQ: createTemporarySystemFunction 与 createTemporaryFunction 的区别!!!
        tableEnv.createTemporarySystemFunction("ik_analyze", KeywordUDTF.class);

        // TODO 2. 检查点相关的设置
        // 间隔5000ms更新一次检查点; 因为往 doris里写，所以需要开启检查点
        env.enableCheckpointing(20000L);

        // TODO 3. 从 kafka 的 页面日志topic(dwd_traffic_page_log) 读取数据 创建动态表,
        //  并指定watermark生成策略 以及 event time 事件时间字段
        // 3.1 声明 消费者主题 和 消费者组
        String topic = "dwd_traffic_page_log";
        String groupId = "dws_traffic_keyword_group";
        // 3.2 创建动态表
        tableEnv.executeSql(" CREATE TABLE page_log (\n" +
                "  `common` MAP<STRING, STRING>,\n" +
                "  `page`   MAP<STRING, STRING>,\n" +
                "  `ts` BIGINT,\n" +
                "  `row_time` AS TO_TIMESTAMP(FROM_UNIXTIME(ts/1000)),\n" +
                "  WATERMARK FOR `row_time` AS `row_time`\n" +
                ")" + MyKafkaUtil.getKafkaDDL(topic,groupId));

        // 做测试 结果是否正确
        // 调用 .print() 方法; 如果是batch处理的话, 输出的结果: 有3部分构成(表头 + 表内容 + 表结束)
//        tableEnv.executeSql(" select * from page_log ").print();

        // TODO 4. 过滤出 搜索行为
        // Note that:
        //  1) "page_id" = "course_list"
        //  2) "item" is not null
        //  3) "item_type" = "keyword"
        //  4) "last_page_id" = "home" | "course_detail"
        Table searchTable = tableEnv.sqlQuery(
                " select\n" +
                        " page['item'] as fullword,\n" +
                        " row_time\n" +
                        " from page_log\n" +
                        " where page['last_page_id'] = 'home' or  page['last_page_id'] = 'course_list'\n" +
                        "   and page['item_type'] = 'keyword'\n" +
                        "   and page['item'] is not null");
        // 注册 表函数;
        tableEnv.createTemporaryView("search_table", searchTable);
        // 测试 结果 -- 正确
//        tableEnv.executeSql(" select * from search_table ").print();

        // TODO 5. 分词  分词结果和原表的字段进行关联
        Table splitTable = tableEnv.sqlQuery(" SELECT \n" +
                "  keyword,\n" +
                "  row_time\n" +
                " FROM search_table,\n" +
                " LATERAL TABLE(ik_analyze(fullword)) t(keyword)");
        // 注册 表函数;
        tableEnv.createTemporaryView("split_table", splitTable);
        // 测试 结果 -- 正确
//        tableEnv.executeSql("select * from split_table").print();

        // TODO 6. 分组、开窗、聚合统计
        // 第1种方式: Window TVF Aggregation
        // -- 滚动窗口聚合 tumbling window aggregation
        Table resTable = tableEnv.sqlQuery(" SELECT \n" +
                "     DATE_FORMAT(window_start, 'yyyy-MM-dd HH:mm:ss') stt, \n" +
                "     DATE_FORMAT(window_end, 'yyyy-MM-dd HH:mm:ss') edt, \n" +
                "     keyword,\n" +
                "     DATE_FORMAT(window_start, 'yyyyMMdd') cur_date,\n" +
                "     count(*) keyword_count\n" +
                " FROM TABLE(\n" +
                "     TUMBLE(TABLE split_table, DESCRIPTOR(row_time), INTERVAL '10' SECONDS))\n" +
                " GROUP BY window_start, window_end, keyword ");
        // 注册 表函数;
        tableEnv.createTemporaryView("res_table", resTable);
        // For test output ->
//        tableEnv.executeSql("select * from res_table").print();

        // TODO 7. 将聚合的结果写到Doris
        // 7.1 创建动态表
        tableEnv.executeSql("  CREATE TABLE t_doris (\n" +
                "  stt STRING,\n" +
                "  edt STRING,\n" +
                "  keyword STRING,\n" +
                "  cur_date STRING,\n" +
                "  keyword_count BIGINT\n" +
                "  ) \n" +
                "  WITH (\n" +
                "        'connector' = 'doris',\n" +
                "        'fenodes' = '" + EduConfig.DORIS_FE + "',\n" +
                "        'table.identifier' = '" + EduConfig.DORIS_DB + ".dws_traffic_source_keyword_page_view_window',\n" +
                "        'username' = 'root',\n" +
                "        'password' = 'aaaaaa',\n" +
                "        'sink.properties.format' = 'json',\n" +
                "        'sink.properties.read_json_by_line' = 'true',\n" +
                "        'sink.buffer-size' = '4086',\n" +
                "        'sink.buffer-count' = '4',\n" +
                "        'sink.enable-2pc' = 'false'\n" +
                " )" );
        // 7.2 写入 Doris
        tableEnv.executeSql("insert into t_doris select * from res_table");

    }
}
