package com.atguigu.gmall.realtime.app.dws;

import com.atguigu.gmall.realtime.app.dws.func.KeywordFunction;
import com.atguigu.gmall.realtime.bean.KeywordBean;
import com.atguigu.gmall.realtime.constant.GmallConstant;
import com.atguigu.gmall.realtime.util.ClickhouseUtil;
import com.atguigu.gmall.realtime.util.MyKafkaUtil;
import org.apache.flink.streaming.api.datastream.DataStream;
import org.apache.flink.streaming.api.environment.StreamExecutionEnvironment;
import org.apache.flink.table.api.Table;
import org.apache.flink.table.api.bridge.java.StreamTableEnvironment;

public class DwsTrafficSourceKeywordPageViewWindow {


    //1  从行为数据中找到用户搜索的关键词
    //2  对关键词进行分词操作
    //3  根据分词进行聚合统计  Flink 轻度聚合
    //4  写入clickhouse中
    public static void main(String[] args) throws Exception {

        //0 环境
        StreamExecutionEnvironment env = StreamExecutionEnvironment.getExecutionEnvironment();

        StreamTableEnvironment tableEnv = StreamTableEnvironment.create(env);
        //1     从行为数据中找到用户搜索的关键词
        //1.1 消费的数据源 ？ 由于根据关键词进行搜索 属于翻页操作
        //        数据源：dwd_traffic_page_log
        //create table dwd_traffic_page_log
        //(  common  map<string,string>,
        //     page  map<string,string>,
        //     ts  string ,
        //     row_time as TO_TIMESTAMP(FROM_UNIXTIME(cast(ts as bigint)/1000)), //时间ts 转成timestamp类型的字段
        //     watermark for row_time as row_time  - INTERVAL '5' SECOND     //  把转好的字段声明为watermark
        //)with(
        //
        //)
        String sourceTopic="dwd_traffic_page_log";
        String groupId="dws_traffic_source_keyword";
        String createSourceTableSQL="create table dwd_traffic_page_log\n" +
                "(  common  map<string,string>,\n" +
                "     page  map<string,string>,\n" +
                "     ts  string ,\n" +
                "     row_time as TO_TIMESTAMP(FROM_UNIXTIME(cast(ts as bigint)/1000)),  \n" +
                "     watermark for row_time as row_time  - INTERVAL '5' SECOND    \n" +
                ") "+ MyKafkaUtil.getKafkaDDL(sourceTopic,groupId);

        tableEnv.executeSql(createSourceTableSQL);
       // tableEnv.executeSql("select * from dwd_traffic_page_log ").print();
        //    从数据源中抓取数据， 定义动态表
        //1.2  通过select 操作读取搜索操作的关键词等信息
        // select item full_word ,ts,row_time  from  dwd_traffic_page_log
        // where page['page_id']='good_list' and  page['last_page_id']='search' and page['item_type']='keyword'
        String selectWordSQL="select page['item']  full_word ,ts,row_time  from  dwd_traffic_page_log\n" +
                "       where page['page_id']='good_list' and  page['last_page_id']='search' and page['item_type']='keyword'";

        Table selectWordTable = tableEnv.sqlQuery(selectWordSQL);

        //tableEnv.executeSql("select * from "+selectWordTable).print();


        //2  对关键词进行分词操作
        //2.1  使用分词器进行分词处理  ik分词器+自定义扩展词库 keywordUtil
        //2.2  定义一个UDF 来把分词操作的代码应用到flinksql中  UDF(1对1) UDAF(聚合，多变1)　UDTF(1变多)
        // 1个fullword--> 多个keyword   UDTF
        tableEnv.createTemporarySystemFunction("keywordAnalyze", KeywordFunction.class);
        // select  fullword,ts,row_time,keyword  from +selectWordTable  ,  LATERAL TABLE(keywordAnalyze(fullword))

        String selectKeywordSQL=" select  full_word,ts,row_time,keyword  from  "+selectWordTable+" ,  LATERAL TABLE(keywordAnalyze(full_word))";

        Table selectKeywordTable = tableEnv.sqlQuery(selectKeywordSQL);


      //  tableEnv.executeSql(selectKeywordSQL).print();


        //3  根据分词进行聚合统计  Flink 轻度聚合
        //   flinksql 进行聚合操作
        //   定义维度： 窗口的起点，终点， 来源：搜索关键词 ，窗口触发时间ts, 关键词    度量：出现次数
        //SELECT window_start, window_end, window_time , keyword,count(*) ct,'search' source
        //  FROM TABLE(
        //    TUMBLE(TABLE selectKeywordTable, DESCRIPTOR(row_time), INTERVAL '10' SECONDS))
        //  GROUP BY window_start, window_end,keyword;

        String windowSQL="       SELECT cast( DATE_FORMAT(window_start,'yyyy-MM-dd HH:mm:ss') as string) stt," +
                "    cast( DATE_FORMAT( window_end,'yyyy-MM-dd HH:mm:ss') as string) edt," +
                "  '"+ GmallConstant.KEYWORD_SEARCH +"' source ,\n" +
                "  keyword ,\n" +
                "  count(*) keyword_count ,\n" +
                " UNIX_TIMESTAMP()*1000 ts \n" +
                "      FROM TABLE(\n" +
                "           TUMBLE(TABLE "+selectKeywordTable+ ", DESCRIPTOR(row_time), INTERVAL '10' SECONDS))\n" +
                "        GROUP BY window_start, window_end ,keyword ";


        //tableEnv.executeSql(windowSQL).print();

        Table windowTable = tableEnv.sqlQuery(windowSQL);


        //4  写入clickhouse中
        //    方案 1  纯sql insert into  xxx select   目前官方不支持clickhouse jdbc connector
        //    可以参考 phoenix方式修改 源码 增加一个数据源
        //    方案 2  把flink sql Table 转成流 flink api 可以api的方式自定义个jdbcSink来完成写入。

        //4.1 表转流
        DataStream<KeywordBean> keywordBeanDataStream = tableEnv.toAppendStream(windowTable, KeywordBean.class);
        keywordBeanDataStream.print();
        //4.2 流写入clickhouse
        keywordBeanDataStream.addSink(ClickhouseUtil.<KeywordBean>getSink(
                "insert into dws_traffic_source_keyword_page_view_window (stt, edt, source, keyword,keyword_count,ts) values (?,?,?, ?, ?, ?)"
        ));

        env.execute();

    }



}
