package com.atguigu.edu.app.dws;

import com.atguigu.edu.app.func.KeywordUDTF;
import com.atguigu.edu.bean.KeywordBean;
import com.atguigu.edu.common.EduConstant;
import com.atguigu.edu.util.ClickHouseUtil;
import com.atguigu.edu.util.KafkaUtil;
import org.apache.flink.streaming.api.datastream.DataStream;
import org.apache.flink.streaming.api.environment.StreamExecutionEnvironment;
import org.apache.flink.table.api.Table;
import org.apache.flink.table.api.bridge.java.StreamTableEnvironment;

public class DwsTrafficSourceKeywordPageViewWindow {
    public static void main(String[] args) throws Exception {
        //todo 1 环境准备
        StreamExecutionEnvironment env = StreamExecutionEnvironment.getExecutionEnvironment();
        env.setParallelism(1);
        StreamTableEnvironment tableEnv = StreamTableEnvironment.create(env);
        //todo 2 设置状态后端
        /*env.enableCheckpointing(5 * 1000L, CheckpointingMode.AT_LEAST_ONCE);
        env.getCheckpointConfig().setCheckpointTimeout(3 * 60 *1000L);
        env.getCheckpointConfig().setMaxConcurrentCheckpoints(2);
        env.getCheckpointConfig().setCheckpointStorage("hdfs://hadoop102:8020/gmall/ck");
        System.setProperty("HADOOP_USER_NAME","atguigu");
        env.setStateBackend(new HashMapStateBackend());*/

        //todo 3 使用sql的形式读取数据 dwd_traffic_page_log
        String page_topic = "dwd_traffic_page_log";
        String groupId = "dws_traffic_source_keyword_page_view_window";
        tableEnv.executeSql("create table page_log(\n" +
                "  `common` map<string,string>,\n" +
                "  `page` map<string,string>,\n" +
                //"  `page` map<string,string>,\n" +
                "  `ts` bigint,\n" +
                "   rt as TO_TIMESTAMP_LTZ(ts,3),\n" +
                "   WATERMARK FOR rt AS rt - INTERVAL '2' SECOND \n" +
                ")" + KafkaUtil.getKafkaDDL(page_topic, groupId));
        Table filterTable = tableEnv.sqlQuery("select  \n" +
                "  `page`['item'] keyword,\n" +
                "   rt \n" +
                "from page_log\n" +
                "where `page`['item_type'] = 'keyword'\n" +
                //"and `page`['last_page_id'] = 'search'\n" +
                "and `page`['item'] is not null");
        //filterTable.execute().print();
        tableEnv.createTemporaryView("filter_table",filterTable);
        //todo 5 调用拆词的函数对keyword进行拆分
        //注册需要使用的拆分函数
        tableEnv.createTemporarySystemFunction("analyze_keyword", KeywordUDTF.class);
        Table wordTable = tableEnv.sqlQuery("select\n" +
                "word,\n" +
                "rt\n" +
                "from filter_table,\n" +
                "LATERAL TABLE(analyze_keyword(keyword))");
        //wordTable.execute().print();
        tableEnv.createTemporaryView("word_table",wordTable);

        //todo 6 开窗聚合词语
        Table groupTable = tableEnv.sqlQuery("select\n" +
                "DATE_FORMAT(TUMBLE_START(rt, INTERVAL '10' SECOND),'yyyy-MM-dd HH:mm:ss') AS stt,\n" +
                "DATE_FORMAT(TUMBLE_END(rt, INTERVAL '10' SECOND),'yyyy-MM-dd HH:mm:ss')  AS edt,\n" +
                "word keyword,\n" +
                //"'123' err, \n" +
                "count(*) keyword_count,\n" +
                " '" + EduConstant.KEYWORD_SEARCH + "' source,"+
                "UNIX_TIMESTAMP() ts\n" +
                "from word_table\n" +
                "group by word, \n" +
                "TUMBLE(rt,INTERVAL '10' SECOND)");
        //todo 7 转化数据为流
        DataStream<KeywordBean> beanDataStream = tableEnv.toAppendStream(groupTable, KeywordBean.class);
        //beanDataStream.print("bean>>>>>>>>>");
        //todo 8 click_house
        String sql = "insert into dws_traffic_source_keyword_page_view_window values(?,?,?,?,?,?)";
        beanDataStream.addSink(ClickHouseUtil.<KeywordBean>getClickHouseSinkFunc(sql));
        env.execute(groupId);
    }
}
