package com.atguigu.gmall.realtime.app.dws;

import com.atguigu.gmall.realtime.app.func.KeywordUDTF;
import com.atguigu.gmall.realtime.bean.KeywordBean;
import com.atguigu.gmall.realtime.common.GmallConstant;
import com.atguigu.gmall.realtime.util.MyClickHouseUtil;
import com.atguigu.gmall.realtime.util.MyKafkaUtil;
import org.apache.flink.streaming.api.datastream.DataStream;
import org.apache.flink.streaming.api.environment.StreamExecutionEnvironment;
import org.apache.flink.table.api.Table;
import org.apache.flink.table.api.bridge.java.StreamTableEnvironment;

/**
 * Author: Felix
 * Date: 2022/6/18
 * Desc: 流量域-关键词聚合
 * 需要启动的进程
 *      zk、kafka、flume、clickhouse、DWDTrafficBaseLogSplit、DwsTrafficSourceKeywordPageViewWindow
 * 开发流程
 *      环境准备
 *      检查点相关设置
 *      从kafka的dwd_traffic_page_log主题中读取页面日志创建动态表,指定watermark以及提取事件时间字段
 *          tableEnv.executeSql(
 *              create table 表名(
 *                  common map<string,string>,
 *                  page map<string,string>,
 *                  ts bigint,
 *                  row_time   as     to_timestamp()  stringDate -->timestamp
 *                  watermark for 事件时间字段 as waterm生成策略
 *              )with(kafka连接器相关属性配置)
 *          )
 *     从表中过滤搜索行为
 *          where page['last_page_id'] = 'search' page['item_type']='keyword'
 *     使用自定义函数对搜索文本框中的内容进行分词，并且和原表字段进行关联
 *          注册自定义函数
 *          SELECT 字段 FROM 原表, LATERAL TABLE(表函数) 表函数处理结果临时表(临时表字段)
 *     分组、开窗、聚合计算
 *     将动态表转换为流
 *     将流中的数据写到ClickHouse
 *          流.addSink(
 *              SinkFunction<----JdbcSink.sink(
 *                  执行SQL,
 *                  给?占位符赋值,
 *                  执行相关的操作(批处理大小、等待时间),
 *                  连接相关的选项
 *              )
 *          )
 * 执行流程
 *      运行模拟生成前端埋点产生日志数据的jar包
 *      将产生的日志数据落盘
 *      flume从磁盘文件中采集日志数据到kafka的topic_log主题中
 *      DWDTrafficBaseLogSplit从topic_log主题中读取日志进行分流
 *          错误、启动、曝光、动作、页面
 *      DwsTrafficSourceKeywordPageViewWindow从页面日志中读取数据提取关键词
 *          详细流程参见开发流程
 *
 */
public class DwsTrafficSourceKeywordPageViewWindow {
    public static void main(String[] args) throws Exception {
        //TODO 1.基本环境准备
        //1.1 指定流处理环境
        StreamExecutionEnvironment env = StreamExecutionEnvironment.getExecutionEnvironment();
        //1.2 设置并行度
        env.setParallelism(4);
        //1.3 指定表执行环境
        StreamTableEnvironment tableEnv = StreamTableEnvironment.create(env);
        //1.4 注册表函数
        tableEnv.createTemporarySystemFunction("ik_analyze", KeywordUDTF.class);

        //TODO 2.检查地相关设置(略)

        //TODO 3. 从kafka的dwd_traffic_page_log主题中读取数据创建动态表 并指定Watermark以及提取事件时间字段
        tableEnv.executeSql("CREATE TABLE page_log (\n" +
            "  `common` map<string,string>,\n" +
            "  `page` map<string,string>,\n" +
            "  `ts` BIGINT,\n" +
            "  row_time as TO_TIMESTAMP(FROM_UNIXTIME(ts/1000,'yyyy-MM-dd HH:mm:ss')),\n" +
            "  WATERMARK FOR row_time AS row_time - INTERVAL '3' SECOND\n" +
            ")" + MyKafkaUtil.getKafkaDDL("dwd_traffic_page_log","dws_traffic_keyword_group"));

        //TODO 4.从表中过滤出搜索行为
        Table searchTable = tableEnv.sqlQuery("select\n" +
            " page['item'] fullword,\n" +
            " row_time\n" +
            "from \n" +
            " page_log\n" +
            "where \n" +
            " page['last_page_id'] = 'search' and page['item_type'] = 'keyword' and page['item'] is not null");

        tableEnv.createTemporaryView("search_table",searchTable);


        //TODO 5.使用自定义函数进行分词  将分词结果和原表其它字段进行连接
        Table splitTable = tableEnv.sqlQuery("SELECT\n" +
            " keyword,row_time\n" +
            " FROM search_table,LATERAL TABLE(ik_analyze(fullword)) t(keyword)");
        tableEnv.createTemporaryView("split_table",splitTable);

        //TODO 6.分组、开窗、聚合计算
        Table reduceTable = tableEnv.sqlQuery("select\n" +
            " DATE_FORMAT(TUMBLE_START(row_time, INTERVAL '10' SECOND), 'yyyy-MM-dd HH:mm:ss') stt,\n" +
            " DATE_FORMAT(TUMBLE_END(row_time, INTERVAL '10' SECOND), 'yyyy-MM-dd HH:mm:ss') edt,\n" +
            " '"+ GmallConstant.KEYWORD_SEARCH +"' as source,\n" +
            " keyword,\n" +
            " count(*) keyword_count,\n" +
            " UNIX_TIMESTAMP()*1000 ts\n" +
            "from \n" +
            "split_table\n" +
            "group by TUMBLE(row_time, INTERVAL '10' SECOND),keyword");

        //TODO 7.将动态表转换为流
        DataStream<KeywordBean> keywordDS = tableEnv.toAppendStream(reduceTable, KeywordBean.class);

        //TODO 8.将流中的数据写到ClickHouse中
        keywordDS.print(">>>>");
        keywordDS.addSink(
            MyClickHouseUtil.<KeywordBean>getSinkFunction("insert into dws_traffic_source_keyword_page_view_window values(?,?,?,?,?,?)")
        );
        env.execute();
    }
}
