package cn.doitedu.sql;

import com.alibaba.fastjson.JSON;
import lombok.AllArgsConstructor;
import lombok.Data;
import lombok.NoArgsConstructor;
import org.apache.flink.api.common.eventtime.WatermarkStrategy;
import org.apache.flink.api.common.functions.MapFunction;
import org.apache.flink.api.common.serialization.SimpleStringSchema;
import org.apache.flink.connector.kafka.source.KafkaSource;
import org.apache.flink.connector.kafka.source.enumerator.initializer.OffsetsInitializer;
import org.apache.flink.streaming.api.CheckpointingMode;
import org.apache.flink.streaming.api.datastream.DataStreamSource;
import org.apache.flink.streaming.api.datastream.SingleOutputStreamOperator;
import org.apache.flink.streaming.api.environment.StreamExecutionEnvironment;
import org.apache.flink.table.api.DataTypes;
import org.apache.flink.table.api.Schema;
import org.apache.flink.table.api.bridge.java.StreamTableEnvironment;

import java.util.Map;

public class Demo15_StreamToTable {

    public static void main(String[] args) throws Exception {


        StreamExecutionEnvironment env = StreamExecutionEnvironment.getExecutionEnvironment();
        env.enableCheckpointing(5000, CheckpointingMode.EXACTLY_ONCE);
        env.getCheckpointConfig().setCheckpointStorage("file:///d:/ckpt");

        env.setParallelism(1);

        StreamTableEnvironment tenv = StreamTableEnvironment.create(env);


        // 构造source
        KafkaSource<String> source = KafkaSource.<String>builder()
                .setTopics("tpc-a")
                .setBootstrapServers("doitedu:9092")
                .setGroupId("ggg")
                .setClientIdPrefix("ccc")
                .setValueOnlyDeserializer(new SimpleStringSchema())
                .setStartingOffsets(OffsetsInitializer.latest())
                .build();


        //{"uid":1,"event_id":"page_load","properties":{"url":"/a","ref":"/x"},"action_time":1704719574000}
        DataStreamSource<String> stream = env.fromSource(source, WatermarkStrategy.noWatermarks(), "s");

        // 假设我们进行了流api的复杂加工
        // 得到一个很结构化的数据流
        SingleOutputStreamOperator<UserEvent> beanStream = stream.map(json -> JSON.parseObject(json, UserEvent.class));

        // 后续的处理逻辑哥不想再调api了，想用sql了
        // 就可以把这个加工好的数据很结构化的流，转成 表
        tenv.createTemporaryView(
                "v_user",   // 定义表明
                beanStream,  // 目标流
                Schema.newBuilder()  // 定义表结构
                        .column("uid", DataTypes.BIGINT())
                        .column("event_id",DataTypes.STRING())
                        .column("action_time",DataTypes.BIGINT())
                        .column("properties",DataTypes.MAP(DataTypes.STRING(),DataTypes.STRING()))
                        .columnByExpression("url","properties['url']")
                        .columnByExpression("rt","to_timestamp_ltz(action_time,3)")
                        .columnByExpression("pt","proctime()")
                        .watermark("rt","rt")
                        .build());



        tenv.executeSql(
                "create temporary view tmp2 as \n" +
                "SELECT\n" +
                "    url,\n" +
                "    uid,\n" +
                "    count(event_id) filter(where event_id = 'page_load') as cnt\n" +
                "from v_user\n" +
                "group by url,uid");

        tenv.executeSql("create temporary view tmp3 as \n" +
                "SELECT\n" +
                "\n" +
                "url,\n" +
                "uid,\n" +
                "cnt,\n" +
                "row_number() over(partition by url order by cnt desc) as rn\n" +
                "\n" +
                "from tmp2");

        tenv.executeSql(
                "SELECT\n" +
                "*\n" +
                "from tmp3\n" +
                "where rn<=2").print();

        env.execute();

    }

    @Data
    @NoArgsConstructor
    @AllArgsConstructor
    public static class UserEvent{
        private Long uid;
        private String event_id;
        private Long action_time;
        private Map<String,String> properties;
    }

}
