package com.atguigu.wuliu.app.dwd;

import com.alibaba.fastjson.JSON;
import com.alibaba.fastjson.JSONObject;
import com.atguigu.wuliu.app.func.BeanToJsonStrFunction;
import com.atguigu.wuliu.bean.DwdTransTransFinishBean;
import com.atguigu.wuliu.utils.DateFormatUtil;
import com.atguigu.wuliu.utils.KafkaUtil;
import org.apache.flink.api.common.eventtime.WatermarkStrategy;
import org.apache.flink.api.common.functions.FilterFunction;
import org.apache.flink.connector.kafka.sink.KafkaSink;
import org.apache.flink.connector.kafka.source.KafkaSource;
import org.apache.flink.streaming.api.datastream.DataStreamSource;
import org.apache.flink.streaming.api.datastream.SingleOutputStreamOperator;
import org.apache.flink.streaming.api.environment.StreamExecutionEnvironment;
import org.apache.flink.streaming.api.functions.ProcessFunction;
import org.apache.flink.util.Collector;

/**
 * 运输完成事务事实表
 */
public class DwdTransTransFinish {
    public static void main(String[] args) {
        System.setProperty("HADOOP_USER_NAME","atguigu");
        StreamExecutionEnvironment env = StreamExecutionEnvironment.getExecutionEnvironment();
        env.setParallelism(4);
        env.enableCheckpointing(5000L);
//        CheckpointConfig checkpointConfig = env.getCheckpointConfig();
//        checkpointConfig.setCheckpointStorage("hdfs://hadoop100:8020/wuliu/ck");
//        checkpointConfig.setCheckpointTimeout(60000L);
//        checkpointConfig.setExternalizedCheckpointCleanup(CheckpointConfig.ExternalizedCheckpointCleanup.RETAIN_ON_CANCELLATION);
//        checkpointConfig.setMinPauseBetweenCheckpoints(2000L);
//        env.setStateBackend(new HashMapStateBackend());
//        env.setRestartStrategy(RestartStrategies.failureRateRestart(3, Time.days(3),Time.seconds(3000L)));


        //从topic_db中读取数据
        String groupId="dwd_transport_group_xfw";
        String topicDb = "topic_db";
        KafkaSource<String> kafkaSource = KafkaUtil.getKafkaSource(topicDb, groupId);
        DataStreamSource<String> ds = env.fromSource(kafkaSource, WatermarkStrategy.noWatermarks(), "kafka_source");

        //过滤出业务数据 transport_task表 数据类型为修改 的数据
        SingleOutputStreamOperator<String> filterDs = ds.filter(new FilterFunction<String>() {
            @Override
            public boolean filter(String jsonStr) throws Exception {
                JSONObject jsonObject = JSON.parseObject(jsonStr);
                String table = jsonObject.getString("table");
                String type = jsonObject.getString("type");
                JSONObject dataObj = jsonObject.getJSONObject("data");
                String status = dataObj.getString("status");
                return "transport_task".equals(table) && "update".equals(type) && "67004".equals(status);
            }
        });

        SingleOutputStreamOperator<DwdTransTransFinishBean> dataDs = filterDs.process(new ProcessFunction<String, DwdTransTransFinishBean>() {
            @Override
            public void processElement(String jsonStr, ProcessFunction<String, DwdTransTransFinishBean>.Context ctx, Collector<DwdTransTransFinishBean> out) throws Exception {
                JSONObject jsonObject = JSON.parseObject(jsonStr);
                JSONObject dataObj = jsonObject.getJSONObject("data");
                DwdTransTransFinishBean finishBean = JSON.toJavaObject(dataObj, DwdTransTransFinishBean.class);
                finishBean.setTs(DateFormatUtil.toTs(finishBean.getActualEndTime(), true));
                out.collect(finishBean);
            }
        });
        dataDs.print(">>>");

        String topic="dwd_trans_trans_finish";
        KafkaSink<String> kafkaSink = KafkaUtil.getKafkaSink(topic);
        dataDs.map(new BeanToJsonStrFunction<>())
                .sinkTo(kafkaSink);


        try {
            env.execute();
        } catch (Exception e) {
            throw new RuntimeException(e);
        }
    }
}
