import com.nepu.gmall.realtime.bean.UnknowBean;
import io.debezium.data.Envelope;
import org.apache.flink.api.common.eventtime.SerializableTimestampAssigner;
import org.apache.flink.api.common.eventtime.WatermarkStrategy;
import org.apache.flink.api.common.functions.MapFunction;
import org.apache.flink.streaming.api.datastream.SingleOutputStreamOperator;
import org.apache.flink.streaming.api.environment.StreamExecutionEnvironment;
import org.apache.flink.table.api.Table;
import org.apache.flink.table.api.bridge.java.StreamTableEnvironment;

import java.time.Duration;

/**
 *
 * tableAPI中数据的Join状态默认是不会出现过期的。
 * @author chenshuaijun
 * @create 2023-02-24 20:14
 */
public class Flink_SQL_InnerJoin {

    public static void main(String[] args) throws Exception {
        // 创建两条sock流
        StreamExecutionEnvironment env = StreamExecutionEnvironment.getExecutionEnvironment();

        env.setParallelism(1);

        SingleOutputStreamOperator<UnknowBean> streamOne = env.socketTextStream("hadoop102", 8888).map(new MapFunction<String, UnknowBean>() {
            @Override
            public UnknowBean map(String value) throws Exception {
                String[] fields = value.split(",");
                return new UnknowBean(fields[0], fields[1], fields[2]);
            }
        }).assignTimestampsAndWatermarks(WatermarkStrategy.<UnknowBean>forBoundedOutOfOrderness(Duration.ZERO).withTimestampAssigner(new SerializableTimestampAssigner<UnknowBean>() {
            @Override
            public long extractTimestamp(UnknowBean element, long recordTimestamp) {
                return Long.parseLong(element.getScore());
            }
        }));

        //        streamOne.print("dataOne --> ");
        SingleOutputStreamOperator<UnknowBean> streamTwo = env.socketTextStream("hadoop102", 9999).map(new MapFunction<String, UnknowBean>() {
            @Override
            public UnknowBean map(String value) throws Exception {
                String[] fields = value.split(",");
                return new UnknowBean(fields[0], fields[1], fields[2]);
            }
        }).assignTimestampsAndWatermarks(WatermarkStrategy.<UnknowBean>forBoundedOutOfOrderness(Duration.ZERO).withTimestampAssigner(new SerializableTimestampAssigner<UnknowBean>() {
            @Override
            public long extractTimestamp(UnknowBean element, long recordTimestamp) {
                return Long.parseLong(element.getScore());
            }
        }));

        StreamTableEnvironment tableEnv = StreamTableEnvironment.create(env);

        // 默认的表中数据的状态是不会过期的，如果我们想要状态过期，可以使用如下的操作
        System.out.println(tableEnv.getConfig().getIdleStateRetention());
        tableEnv.getConfig().setIdleStateRetention(Duration.ofSeconds(10));
        tableEnv.createTemporaryView("t1",streamOne);
        tableEnv.createTemporaryView("t2",streamTwo);

        // Inner Join 生命周期的类型：左表：OnCreateAndWrite      右表：OnCreateAndWrite
        // Table table = tableEnv.sqlQuery("select t1.id ,t2.id, t1.uv, t2.uv from t1 INNER join t2 on t1.id = t2.id");

        // left Join 生命周期的类型：左表：OnReadAndWrite      右表：OnCreateAndWrite
        // Table table = tableEnv.sqlQuery("select t1.id ,t2.id, t1.uv, t2.uv from t1 left join t2 on t1.id = t2.id");

        // right Join 生命周期的类型：左表：OnCreateAndWrite      右表：OnReadAndWrite
        // Table table = tableEnv.sqlQuery("select t1.id ,t2.id, t1.uv, t2.uv from t1 right join t2 on t1.id = t2.id");

        // full Join 生命周期的类型：左表：OnReadAndWrite      右表：OnReadAndWrite
        Table table = tableEnv.sqlQuery("select t1.id ,t2.id, t1.uv, t2.uv from t1 right join t2 on t1.id = t2.id");
        tableEnv.toChangelogStream(table).print();


        env.execute();
    }

}
