package com.atliuzu.app.dws;


import com.atliuzu.bean.KeywordBean;
import com.atliuzu.utils.ClickHouseUtil;
import com.atliuzu.utils.MyKafkaUtil;
import org.apache.flink.api.common.typeinfo.TypeInformation;
import org.apache.flink.streaming.api.datastream.DataStream;
import org.apache.flink.streaming.api.environment.StreamExecutionEnvironment;
import org.apache.flink.table.api.Table;
import org.apache.flink.table.api.bridge.java.StreamTableEnvironment;
import org.apache.flink.types.Row;

/**
 * @Author : songyuan
 * @Description :
 * Date : 2022/8/20 15:06
 * @Version : 1.0
 */
public class DwsTrafficKeywordPageViewWindow {

    public static void main(String[] args) throws Exception {

        //1.创建流的执行环境
        StreamExecutionEnvironment env = StreamExecutionEnvironment.getExecutionEnvironment();
        env.setParallelism(1);
        StreamTableEnvironment tableEnv = StreamTableEnvironment.create(env);

        //2.获取状态后端
//        env.enableCheckpointing(3000L, CheckpointingMode.EXACTLY_ONCE);
//        env.getCheckpointConfig().setCheckpointTimeout(60 * 1000L);
//        env.getCheckpointConfig().setMinPauseBetweenCheckpoints(3000L);
//        env.getCheckpointConfig().enableExternalizedCheckpoints(
//                CheckpointConfig.ExternalizedCheckpointCleanup.RETAIN_ON_CANCELLATION
//        );
//        env.setRestartStrategy(RestartStrategies.failureRateRestart(
//                3, Time.days(1), Time.minutes(1)
//        ));
//        env.setStateBackend(new HashMapStateBackend());
//        env.getCheckpointConfig().setCheckpointStorage(
//                "hdfs://hadoop102:8020/ck"
//        );
//        System.setProperty("HADOOP_USER_NAME", "atguigu");

        //3.ddl读取page数据 并获取事件事件
        tableEnv.executeSql("" +
                "create table page_view( " +
                "    `page` Map<String,String>, " +
                "    `ts` Bigint, " +
                "    `rt` AS TO_TIMESTAMP_LTZ(ts,3), " +
                "    WATERMARK FOR rt AS rt - INTERVAL '2' SECOND " +
                ") " + MyKafkaUtil.getKafkaDDL("dwd_traffic_page_log", "keyword_page_view_window"));
        //tableEnv.executeSql("select * from page_view").print();


        //4.过滤出搜索数据
        Table filterTable = tableEnv.sqlQuery("" +
                "select " +
                "    page['item'] word, " +
                "    rt " +
                "from page_view " +
                "where page['item_type']='keyword' " +
                "and page['item'] is not null");
        tableEnv.createTemporaryView("filter_table", filterTable);


//        //5.自定义进行切词操作
//        //tableEnv.executeSql("select * from filter_table").print();
//        tableEnv.createTemporarySystemFunction("SplitFunction", splitFunction.class);
//        Table splitTable = tableEnv.sqlQuery("" +
//                "SELECT  " +
//                "    rt, " +
//                "    word " +
//                "FROM filter_table, LATERAL TABLE(SplitFunction(words))");
//        tableEnv.createTemporaryView("split_table", splitTable);

        //6.分组开窗聚合
        Table resultTable = tableEnv.sqlQuery("" +
                "select " +
                "    DATE_FORMAT(TUMBLE_START(rt, INTERVAL '10' SECOND),'yyyy-MM-dd HH:mm:ss') stt, " +
                "    DATE_FORMAT(TUMBLE_END(rt, INTERVAL '10' SECOND),'yyyy-MM-dd HH:mm:ss') edt, " +
                "    word keyword, " +
                "    count(*) keyword_count, " +
                "    UNIX_TIMESTAMP() ts " +
                "from filter_table " +
                "group by word, " +
                "TUMBLE(rt, INTERVAL '10' SECOND)");
        DataStream<Row> rowDataStream = tableEnv.toDataStream(resultTable);
        DataStream<KeywordBean> keywordBeanDataStream = tableEnv.toAppendStream(resultTable, TypeInformation.of(KeywordBean.class));
        keywordBeanDataStream.print();
        //rowDataStream.print();

        //7.写入ck
        keywordBeanDataStream.addSink(ClickHouseUtil.getJdbcSink("insert into dws_traffic_keyword_page_view_window values(?,?,?,?,?)"));


        //8.任务执行
        env.execute();


    }

}
