package com.qingyunge.app.dws;

import com.qingyunge.app.func.SplitFunction;
import com.qingyunge.bean.KeywordBean;
import com.qingyunge.bean.Splitbean;
import com.qingyunge.util.MyClickHouseUtil;
import com.qingyunge.util.MyKafkaUtil;
import org.apache.flink.api.common.functions.ReduceFunction;
import org.apache.flink.api.java.functions.KeySelector;
import org.apache.flink.api.java.tuple.Tuple2;
import org.apache.flink.streaming.api.datastream.DataStream;
import org.apache.flink.streaming.api.datastream.SingleOutputStreamOperator;
import org.apache.flink.streaming.api.environment.StreamExecutionEnvironment;
import org.apache.flink.table.api.Table;
import org.apache.flink.table.api.bridge.java.StreamTableEnvironment;

public class Dws_evaluate_source_keyword_window {
    public static void main(String[] args) throws Exception {
        StreamExecutionEnvironment env = StreamExecutionEnvironment.getExecutionEnvironment().setParallelism(1);
        StreamTableEnvironment tableEnv = StreamTableEnvironment.create(env);
        String topic = "sku_eval_topic";
        String gourpid = "EvaluateSourceKeyWord";
        //TODO 1.提取 评论关键词 和 skuId , ts
        // 1719045865000
        // 1719045820302
        tableEnv.executeSql("create table keyword_table ( " +
                "  `skuId` string, " +
                "  `evaluate` string, " +
                "  `ts` bigint, " +
                "  `rt` AS TO_TIMESTAMP(FROM_UNIXTIME(ts / 1000)), " +
                "    WATERMARK FOR rt AS rt - INTERVAL '2' SECOND " +
                ")" + MyKafkaUtil.getKafkaDDL(topic,gourpid));
        tableEnv.createTemporaryFunction("SplitFunction", SplitFunction.class);

        // TODO 切词

        Table splitword = tableEnv.sqlQuery("SELECT " +
                " skuId, " +
                "    word, " +
                "    rt " +
                "FROM keyword_table,  " +
                "LATERAL TABLE(SplitFunction(evaluate))");
        DataStream<Splitbean> splitbeanDataStream = tableEnv.toAppendStream(splitword, Splitbean.class);
//        splitbeanDataStream.print("s=====");
        tableEnv.createTemporaryView("split_table",splitword);
        // TODO 2 . 按skuId 第一个字母分组 开窗 聚合 Tuple2<word,1>
        Table keywordtable = tableEnv.sqlQuery("select " +
                "    DATE_FORMAT(TUMBLE_START(rt, INTERVAL '10' SECOND),'yyyy-MM-dd HH:mm:ss') stt, " +
                "    DATE_FORMAT(TUMBLE_END(rt, INTERVAL '10' SECOND),'yyyy-MM-dd HH:mm:ss') edt, " +
                "    LEFT(split_table.skuId, 1) AS skuId, " +
                "    word AS keyword, " +
                "    count(*) AS keywordCount, " +
                "    UNIX_TIMESTAMP()*1000 ts " +
                "from split_table " +
                "group by skuId,word,TUMBLE(rt, INTERVAL '10' SECOND)");
        //TODO 3 . 把数据写 clickhouse         A, word ,sum
        DataStream<KeywordBean> keywordBeanDS = tableEnv.toAppendStream(keywordtable, KeywordBean.class);
        keywordBeanDS.print("k========");
        SingleOutputStreamOperator<KeywordBean> keyBySkuandKeyDS = keywordBeanDS.keyBy(new KeySelector<KeywordBean, Tuple2<String, String>>() {

            @Override
            public Tuple2<String, String> getKey(KeywordBean keywordBean) throws Exception {
                return Tuple2.of(keywordBean.getSkuId(), keywordBean.getKeyword());
            }
        }).reduce(new ReduceFunction<KeywordBean>() {
            @Override
            public KeywordBean reduce(KeywordBean keywordBean, KeywordBean t1) throws Exception {
                keywordBean.setKeywordCount(keywordBean.getKeywordCount() + t1.getKeywordCount());
                return keywordBean;
            }
        });
        keyBySkuandKeyDS.print("sink==");
        keyBySkuandKeyDS.addSink(MyClickHouseUtil.getSinkFunction("insert into dws_keyword_table values(?,?,?,?,?,?)"));
        env.execute("DwsEvaluateSourceKeyWordWindow");
    }
}
