package com.atguigu.gmall.realtime.app.dws;

import com.atguigu.gmall.realtime.app.func.keywordUDTF;
import com.atguigu.gmall.realtime.bean.KeywordStats;
import com.atguigu.gmall.realtime.common.GmallConstant;
import com.atguigu.gmall.realtime.utils.ClickHouseUtil;
import com.atguigu.gmall.realtime.utils.MyKafkaPro;
import org.apache.flink.api.common.restartstrategy.RestartStrategies;
import org.apache.flink.runtime.state.filesystem.FsStateBackend;
import org.apache.flink.streaming.api.CheckpointingMode;
import org.apache.flink.streaming.api.datastream.DataStream;
import org.apache.flink.streaming.api.environment.StreamExecutionEnvironment;
import org.apache.flink.table.api.Table;
import org.apache.flink.table.api.bridge.java.StreamTableEnvironment;

//搜索关键词计算,通过FlinkSQL方法计算
public class KeywordStatsApp {
    public static void main(String[] args) throws Exception {
        //TODO 1 基本流环境
        //1.1 获取流环境
        StreamExecutionEnvironment env = StreamExecutionEnvironment.getExecutionEnvironment();
        env.setParallelism(4);
        //设置检查点
        //设置checkpoint的的配置
        //设置没5s保存一次checkpoint,精准一次消费
        env.enableCheckpointing(5000, CheckpointingMode.EXACTLY_ONCE);
        //checkpoint必须在一分钟内完成，否则舍弃,检查点超时时间
        env.getCheckpointConfig().setCheckpointTimeout(6000);
        //checkpoint保存的位置，及其操作用户
        env.setStateBackend(new FsStateBackend("hdfs://hadoop104:8020/gmall/flink/checkpoint1"));
        System.setProperty("HADOOP_USER_NAME","atguigu");
        //重启策略
        env.setRestartStrategy(RestartStrategies.fixedDelayRestart(3,3000L));
        //1.2 获取表
        StreamTableEnvironment tableEnv = StreamTableEnvironment.create(env);
        //TODO 2 注册自定义的UDTF函数
        tableEnv.createTemporarySystemFunction("ik_analyze", keywordUDTF.class);

        //TODO 3 将数据源定义成动态流环境
        //从kafka中获取数据,然后加载到动态表中,从page流中
        String pageTopic ="dwd_topic_page";
        String group="KeywordStatsApp";
        /*FlinkKafkaConsumer pageSource = MyKafka.getFlinkKafkaConsumer(pageTopic, group);
        DataStreamSource pageDStream = env.addSource(pageSource);*/

        //因为需要的数据原因，我们只需要common,page,ts数据
        tableEnv.executeSql("create table page_view( " +
                "common MAP<STRING,STRING>," +
                "page MAP<STRING,STRING>," +
                "ts BIGINT," +
                "rowtime AS TO_TIMESTAMP(FROM_UNIXTIME(ts/1000, 'yyyy-MM-dd HH:mm:ss'))," +
                "WATERMARK FOR rowtime AS rowtime - INTERVAL '3' SECOND" +
                ") WITH("+ MyKafkaPro.getKafkaDDL(pageTopic,group) +")");
        //TODO 4 过滤数据，只保留搜索日志
        Table fullwordView = tableEnv.sqlQuery("select page['item'] fullword,rowtime from page_view " +
                " where page['page_id']='good_list' " +
                " and page['item'] is not null");
        //TODO 5 利用自定义的UDTF将数据进行拆分
        Table keywordView = tableEnv.sqlQuery("select rowtime,keyword " +
                " from " + fullwordView +", LATERAL TABLE(ik_analyze(fullword)) AS t(keyword)" );
        //TODO 6 聚合数据,分组，开窗，聚合
        Table keyedByWithReduce = tableEnv.sqlQuery("select " +
                "keyword," +
                "count(*) ct,'"
                + GmallConstant.KEYWORD_SEARCH + "' source," +
                "DATE_FORMAT(TUMBLE_START(rowtime, INTERVAL '10' SECOND),'yyyy-MM-dd HH:mm:ss') stt," +
                "DATE_FORMAT(TUMBLE_END(rowtime, INTERVAL '10' SECOND),'yyyy-MM-dd HH:mm:ss') edt," +
                "UNIX_TIMESTAMP()*1000 ts " +
                "from  " + keywordView + " " +
                " group by TUMBLE(rowtime,INTERVAL '10' SECOND),keyword");
        //TODO 7 将表转换成流
        DataStream<KeywordStats> keywordStatsDataStream = tableEnv.toAppendStream(keyedByWithReduce, KeywordStats.class);
        keywordStatsDataStream.print(">>>>>>>>>>>");

        //TODO  将数据写入到clickhouse中

        keywordStatsDataStream.addSink(
                ClickHouseUtil.getJdbcSink("insert into keyword_stats_1021(keyword,ct,source,stt,edt,ts)  " +
                        " values(?,?,?,?,?,?)"
                )
        );

        env.execute();
    }
}
