package com.atguigu.day11;

import com.atguigu.bean.Table1;
import com.atguigu.bean.Table2;
import org.apache.flink.api.common.state.StateTtlConfig;
import org.apache.flink.api.common.state.ValueState;
import org.apache.flink.api.common.state.ValueStateDescriptor;
import org.apache.flink.api.common.time.Time;
import org.apache.flink.configuration.Configuration;
import org.apache.flink.streaming.api.datastream.SingleOutputStreamOperator;
import org.apache.flink.streaming.api.environment.StreamExecutionEnvironment;
import org.apache.flink.streaming.api.functions.KeyedProcessFunction;
import org.apache.flink.table.api.bridge.java.StreamTableEnvironment;
import org.apache.flink.util.Collector;

import java.time.Duration;

public class FlinkSQL20_JOIN {

    public static void main(String[] args) {

        //1.获取执行环境
        StreamExecutionEnvironment env = StreamExecutionEnvironment.getExecutionEnvironment();
        env.setParallelism(1);
        StreamTableEnvironment tableEnv = StreamTableEnvironment.create(env);

        //设置参数清理状态
        System.out.println(tableEnv.getConfig().getIdleStateRetention());
        tableEnv.getConfig().setIdleStateRetention(Duration.ofSeconds(10));

        //2.读取端口数据并转换为JavaBean
        SingleOutputStreamOperator<Table1> table1 = env.socketTextStream("hadoop102", 8888)
                .map(line -> {
                    String[] fields = line.split(",");
                    return new Table1(fields[0], fields[1]);
                });
        SingleOutputStreamOperator<Table2> table2 = env.socketTextStream("hadoop102", 9999)
                .map(line -> {
                    String[] fields = line.split(",");
                    return new Table2(fields[0], fields[1]);
                });

        //3.注册表
        tableEnv.createTemporaryView("table1", table1);
        tableEnv.createTemporaryView("table2", table2);

        //JOIN      左边：OnCreateAndWrite 右边：OnCreateAndWrite
//        tableEnv.sqlQuery("select table1.id,table1.name,table2.sex from table1 join table2 on table1.id=table2.id").execute().print();

        //leftJoin  左边：OnReadAndWrite 右边：OnCreateAndWrite
//        tableEnv.sqlQuery("select * from table1 left join table2 on table1.id=table2.id").execute().print();

        //fullJoin  左边：OnReadAndWrite 右边：OnReadAndWrite
        tableEnv.sqlQuery("select * from table1 full join table2 on table1.id=table2.id").execute().print();


        table1.keyBy(Table1::getId).process(new KeyedProcessFunction<String, Table1, Object>() {

            private ValueState<Long> valueState;

            @Override
            public void open(Configuration parameters) throws Exception {

                ValueStateDescriptor<Long> longValueStateDescriptor = new ValueStateDescriptor<>("value-state", Long.class);
                StateTtlConfig ttlConfig = StateTtlConfig.newBuilder(Time.seconds(10))
                        .setUpdateType(StateTtlConfig.UpdateType.OnReadAndWrite)   //读和写的时候重置超时时间
                        .setUpdateType(StateTtlConfig.UpdateType.OnCreateAndWrite) //创建和写的时候重置超时时间
                        .build();
                longValueStateDescriptor.enableTimeToLive(ttlConfig);

                valueState = getRuntimeContext().getState(longValueStateDescriptor);
            }

            @Override
            public void processElement(Table1 value, Context ctx, Collector<Object> out) throws Exception {

            }
        });
    }

}
