package yuekao2.dws;

import org.apache.flink.streaming.api.datastream.SingleOutputStreamOperator;
import org.apache.flink.streaming.api.environment.StreamExecutionEnvironment;
import org.apache.flink.table.annotation.DataTypeHint;
import org.apache.flink.table.annotation.FunctionHint;
import org.apache.flink.table.api.Table;
import org.apache.flink.table.api.bridge.java.StreamTableEnvironment;
import org.apache.flink.table.functions.TableFunction;
import org.apache.flink.types.Row;
import yuekao2.entity.Tm1_3_6;
import yuekao2.util.ClickHouseUtil;
import yuekao2.util.MyIkmoeney;

import java.util.ArrayList;

@FunctionHint(output = @DataTypeHint("ROW<word STRING>"))
public class KeyByTable extends TableFunction<Row> {
    public void eval(String str) {
        ArrayList<String> splitword = MyIkmoeney.Splitword(str);
        for (String s : splitword) {
            collect(Row.of(s));
        }
    }

    public static void main(String[] args) throws Exception {
        StreamExecutionEnvironment env = StreamExecutionEnvironment.getExecutionEnvironment();
        env.setParallelism(1);
        StreamTableEnvironment tEnv = StreamTableEnvironment.create(env);

        tEnv.executeSql("CREATE TABLE dwd_union (\n" +
                "  `common` Map<String,String>,\n" +
                "  `page` Map<String,String>,\n" +
                "  `ts` BIGINT,\n" +
                "  `times` AS TO_TIMESTAMP(FROM_UNIXTIME(ts/1000)),\n" +
                "  WATERMARK FOR times AS times - INTERVAL '0' SECOND\n" +
                ") WITH (\n" +
                " 'connector' = 'kafka',\n" +
                " 'topic' = 'dwd_union_log',\n" +
                " 'properties.bootstrap.servers' = 'hadoop-single:9092',\n" +
                " 'properties.group.id' = 'testGroup',\n" +
                " 'scan.startup.mode' = 'latest-offset',\n" +
                " 'format' = 'json'\n" +
                ")");

//        tEnv.sqlQuery("select * from dwd_union").execute().print();

        tEnv.createTemporarySystemFunction("biao",KeyByTable.class);

        //page 字段下 item 字段不为 null；
        //② page 字段下 last_page_id 为 search；
        //③ page 字段下 item_type 为 keyword。
        Table table = tEnv.sqlQuery(
                "SELECT ts, times, word\n" +
                        "        FROM dwd_union, LATERAL TABLE(biao(page['item']))\n" +
                        "        where page['item'] is not null\n" +
                        "        and page['last_page_id'] = 'search'\n" +
                        "        and page['item_type'] = 'keyword'");

//        table.execute().print();
        tEnv.createTemporaryView("t1",table);
        //按照拆分后的关键词分组。统计每个词的出现频次，补充窗口起始时间、结束时间和关键词来源（source）字段。调用 unix_timestamp()
        // 函数获取以秒为单位的当前系统时间戳，转为毫秒（*1000），作为 ClickHouse 表的版本字段，用于数据去重。
        Table table1 = tEnv.sqlQuery("select \n" +
                "   DATE_FORMAT(TUMBLE_START(times, INTERVAL '10' SECOND),'yyyy-MM-dd HH:mm:ss') AS wstart,\n" +
                "   DATE_FORMAT(TUMBLE_END(times, INTERVAL '10' SECOND),'yyyy-MM-dd HH:mm:ss') AS wend,\n" +
                "   'source' AS source,\n" +
                "   word,\n" +
                "   count(1) AS cnt,\n" +
                "   unix_timestamp() * 1000 AS ts\n" +
                "   from t1 \n" +
                "   group by word,TUMBLE(times, INTERVAL '10' SECOND)");
//        table1.execute().print();


        SingleOutputStreamOperator<Tm1_3_6> map = tEnv.toRetractStream(table1, Tm1_3_6.class).map(x -> x.f1);

        map.print();

        map.addSink(new ClickHouseUtil());

        env.execute();
    }
}
