package flink;

import bean.ckBean;
import function.splitWordsFunction;
import org.apache.flink.api.java.tuple.Tuple2;
import org.apache.flink.streaming.api.datastream.DataStream;
import org.apache.flink.streaming.api.environment.StreamExecutionEnvironment;
import org.apache.flink.table.api.EnvironmentSettings;
import org.apache.flink.table.api.Table;
import org.apache.flink.table.api.bridge.java.StreamTableEnvironment;
import utils.ClickHouseUtils;

public class KeyWordsApp {
    public static void main(String[] args) throws Exception {
        //创建执行环境
        StreamExecutionEnvironment env = StreamExecutionEnvironment.getExecutionEnvironment();
        env.setParallelism(1);
        EnvironmentSettings settings = EnvironmentSettings.newInstance()
                .useBlinkPlanner()
                .inStreamingMode()
                .build();
        StreamTableEnvironment tableEnv = StreamTableEnvironment.create(env,settings);

        //读取kafka数据并创建表
        String querysql="CREATE TABLE queryTable (\n" +
                "  `itemid` BIGINT,\n" +
                "  `describestr` STRING,\n" +
                "  `ts` BIGINT,\n" +
                "  `rt` as TO_TIMESTAMP(FROM_UNIXTIME(ts/1000)),\n" +
                "   WATERMARK FOR rt AS rt - INTERVAL '1' SECOND\n" +
                ") WITH (\n" +
                "  'connector' = 'kafka',\n" +
                "  'topic' = 'query',\n" +
                "  'properties.bootstrap.servers' = '192.168.1.10:9092',\n" +
                "  'properties.group.id' = 'query',\n" +
                "  'scan.startup.mode' = 'latest-offset',\n" +
                "  'format' = 'csv'\n" +
                ")";
        String itemsql="CREATE TABLE itemTable (\n" +
                "  `itemid` BIGINT,\n" +
                "  `provience` STRING,\n" +
                "  `city` STRING\n" +
                ") WITH (\n" +
                "  'connector' = 'kafka',\n" +
                "  'topic' = 'item',\n" +
                "  'properties.bootstrap.servers' = '192.168.1.10:9092',\n" +
                "  'properties.group.id' = 'item',\n" +
                "  'scan.startup.mode' = 'latest-offset',\n" +
                "  'format' = 'csv'\n" +
                ")";
        tableEnv.executeSql(querysql);
        tableEnv.executeSql(itemsql);

        //注册自定义函数
        tableEnv.createTemporarySystemFunction("splitWords", splitWordsFunction.class);
        String sql = "select \n" +
                "\titemid,\n" +
                "\tword,\n" +
                "\trt\n" +
                "\tfrom queryTable , LATERAL TABLE(splitWords(describestr)) ";
        Table tableResult = tableEnv.sqlQuery(sql);

        //关联两张表并进行聚合，开窗统计
        String groupSql = "  select \n" +
                "   DATE_FORMAT(TUMBLE_START(rt, INTERVAL '10' SECOND), 'yyyy-MM-dd HH:mm:ss') as startDate,\n" +
                "   DATE_FORMAT(TUMBLE_END(rt, INTERVAL '10' SECOND), 'yyyy-MM-dd HH:mm:ss') as endDate,\n" +
                "   itemid,\n" +
                "   word,\n" +
                "   count(word) count_word\n" +
                "  from "+tableResult+"\n"+
                "  GROUP BY \n" +
                "    itemid,\n" +
                "    word,\n" +
                "    TUMBLE(rt, INTERVAL '10' SECOND)";

        Table groupResult = tableEnv.sqlQuery(groupSql);

        String joinSql ="  select \n" +
                "   a.startDate,\n" +
                "   a.endDate,\n" +
                "   a.itemid,\n" +
                "   a.word,\n" +
                "   b.provience,\n" +
                "   b.city,\n" +
                "   a.count_word,\n" +
                "   UNIX_TIMESTAMP()*1000 timestr\n" +
                "  from "+groupResult+" a \n" +
                "       inner join itemTable b on a.itemid=b.itemid\n" +
                "  GROUP BY \n" +
                "    a.itemid,\n" +
                "    a.word,\n" +
                "    b.provience,\n" +
                "    a.startDate,\n" +
                "    a.endDate,\n" +
                "    a.count_word,\n" +
                "    b.city";
        Table result = tableEnv.sqlQuery(joinSql);

        //转换表为流并写入clickhouse
        DataStream<Tuple2<Boolean, ckBean>> dataStream = tableEnv.toRetractStream(result, ckBean.class);
        dataStream.print();
        dataStream.map(line ->{
            return line.f1;
        }).addSink(ClickHouseUtils.getSink("insert into test values(?,?,?,?,?,?,?,?)"));

        //启动任务
        env.execute("KeyWordsApp");
    }
}
