package com.raylu.realtime.app.dwd;

import com.alibaba.fastjson.JSONObject;
import com.raylu.realtime.app.template.MySqlTwoPhaseCommitSink2;
import com.raylu.realtime.bean.MetaBean;
import com.raylu.realtime.utils.DateUtils;
import com.raylu.realtime.utils.KafkaSourceUtil;
import org.apache.flink.api.common.functions.MapFunction;
import org.apache.flink.api.common.restartstrategy.RestartStrategies;
import org.apache.flink.api.common.time.Time;
import org.apache.flink.runtime.state.filesystem.FsStateBackend;
import org.apache.flink.streaming.api.CheckpointingMode;
import org.apache.flink.streaming.api.environment.CheckpointConfig;
import org.apache.flink.streaming.api.environment.StreamExecutionEnvironment;

import java.sql.Timestamp;

/**
 * Description:
 * <p>
 * Create by lucienoz on 2022/4/7.
 * Copyright © 2022 lucienoz. All rights reserved.
 */
public class KafkaWarnApp {
    public static void main(String[] args) throws Exception {
        //TODO 1. 创建执行环境
        StreamExecutionEnvironment env = StreamExecutionEnvironment.
                createLocalEnvironment();
        //getExecutionEnvironment();
        //TODO 2. 设置默认并行度
        env.setParallelism(1);
        //TODO 2.1. 开启检查点
        env.enableCheckpointing(5000L, CheckpointingMode.EXACTLY_ONCE);
        //TODO 2.2. 设置检查点超时时间
        env.getCheckpointConfig().setCheckpointTimeout(60 * 1000L);
        //TODO 2.3. job取消后，检查点是否保留
        env.getCheckpointConfig().enableExternalizedCheckpoints(CheckpointConfig.ExternalizedCheckpointCleanup.RETAIN_ON_CANCELLATION);
        //TODO 2.4. 设置状态后端
        env.setStateBackend(new FsStateBackend("hdfs://hadoop130.local:8020/kafka-warn-app/ck"));
        System.setProperty("HADOOP_USER_NAME", "raylu");
        //TODO 3.设置重启策略
        env.setRestartStrategy(RestartStrategies.fixedDelayRestart(3, Time.seconds(3)));
        env.addSource(KafkaSourceUtil.getKafkaSource("t_meta","kafka-warn-app"))
                .map(r -> JSONObject.parseObject(r, MetaBean.class))
//                .flatMap(new RichFlatMapFunction<MetaBean, HttpBean>() {
//
//                    @Override
//                    public void flatMap(MetaBean value, Collector<HttpBean> out) throws Exception {
//                        HttpBean httpBean = JSONObject.parseObject(value.getHttp(), HttpBean.class);
//                        out.collect(httpBean);
//                    }
//                })
                .map(new MapFunction<MetaBean, MetaBean>() {
                    @Override
                    public MetaBean map(MetaBean value) throws Exception {
                        String timestamp = value.getTimestamp();
                        value.setDatatime(new Timestamp(DateUtils.getEpochMilli(timestamp)*1000L));
                        return value;
                    }
                })
//                .addSink(new MySqlSink<MetaBean>());
        .addSink(new MySqlTwoPhaseCommitSink2<MetaBean>());
//        .addSink(new MySql2PhaseCommitSink());

        env.execute();


    }
}
