package com.zallds.flink.stream;

import com.alibaba.fastjson.JSON;
import com.zallds.flink.bean.UserBrowseLog;
import org.apache.flink.api.common.serialization.SimpleStringEncoder;
import org.apache.flink.api.common.serialization.SimpleStringSchema;
import org.apache.flink.api.java.utils.ParameterTool;
import org.apache.flink.core.fs.Path;
import org.apache.flink.core.io.SimpleVersionedSerializer;
import org.apache.flink.runtime.state.StateBackend;
import org.apache.flink.runtime.state.filesystem.FsStateBackend;
import org.apache.flink.streaming.api.datastream.DataStream;
import org.apache.flink.streaming.api.environment.StreamExecutionEnvironment;
import org.apache.flink.streaming.api.functions.ProcessFunction;
import org.apache.flink.streaming.api.functions.sink.filesystem.BucketAssigner;
import org.apache.flink.streaming.api.functions.sink.filesystem.OutputFileConfig;
import org.apache.flink.streaming.api.functions.sink.filesystem.StreamingFileSink;
import org.apache.flink.streaming.api.functions.sink.filesystem.bucketassigners.SimpleVersionedStringSerializer;
import org.apache.flink.streaming.api.functions.sink.filesystem.rollingpolicies.DefaultRollingPolicy;
import org.apache.flink.streaming.connectors.kafka.FlinkKafkaConsumer;
import org.apache.flink.table.api.EnvironmentSettings;
import org.apache.flink.table.api.java.StreamTableEnvironment;
import org.apache.flink.util.Collector;

import java.text.SimpleDateFormat;
import java.time.LocalDateTime;
import java.time.OffsetDateTime;
import java.time.ZoneOffset;
import java.time.format.DateTimeFormatter;
import java.util.Date;
import java.util.Properties;
import java.util.concurrent.TimeUnit;

/**
 * Created by liujian on 2020/5/6.
 */
public class TestKafka2HdfsWithName {

    public static void main(String[] args) throws Exception {

        ParameterTool fromArgs = ParameterTool.fromArgs(args);

        //1、解析命令行参数 加载properties
        String kafkaBootstrapServers = fromArgs.get("kafkaBootstrapServers","TV-ZDS-TMC-005:6667,TV-ZDS-TMC-004:6667");
        String browseTopic = fromArgs.get("browseTopic","tableSink");
        String browseTopicGroupID = fromArgs.get("browseTopicGroupID","testGroup");

        //2、设置运行环境
        EnvironmentSettings settings = EnvironmentSettings.newInstance().inStreamingMode().useBlinkPlanner().build();
        StreamExecutionEnvironment streamEnv = StreamExecutionEnvironment.getExecutionEnvironment();
        StreamTableEnvironment tableEnv = StreamTableEnvironment.create(streamEnv, settings);

//        // checkpoint
        streamEnv.enableCheckpointing(10000);
//
//        //env.setStateBackend((StateBackend) new FsStateBackend("file:///E://checkpoint"));
        streamEnv.setStateBackend((StateBackend) new FsStateBackend("hdfs:///test/flinkCheckPoint"));

//        CheckpointConfig config = streamEnv.getCheckpointConfig();
//        config.enableExternalizedCheckpoints(CheckpointConfig.ExternalizedCheckpointCleanup.DELETE_ON_CANCELLATION);


        //3、注册Kafka数据源
        Properties browseProperties = new Properties();
        browseProperties.put("bootstrap.servers", kafkaBootstrapServers);
        browseProperties.put("group.id", browseTopicGroupID);

        FlinkKafkaConsumer<String> consumer = new FlinkKafkaConsumer<String>(browseTopic, new SimpleStringSchema(), browseProperties);

        DataStream<String> browseStream = streamEnv
                .addSource(consumer)
                .process(new BrowseKafkaProcessFunction());

        browseStream.print();
        StreamingFileSink<String> sink = StreamingFileSink
                .forRowFormat(new Path("hdfs:///test/flinkOut"), new SimpleStringEncoder<String>("Utf-8"))
                .withRollingPolicy(
                        DefaultRollingPolicy.builder()
                                .withRolloverInterval(TimeUnit.MINUTES.toMillis(30))
                                .withInactivityInterval(TimeUnit.MINUTES.toMillis(10))
                                .withMaxPartSize(1024 * 1024 * 100)
                                .build()
                )
                .withBucketAssigner(new DayBucketAssigner())
                .withOutputFileConfig(OutputFileConfig.builder().withPartPrefix("0000_").build())
                .build();
        browseStream.addSink(sink);


        //6、开始执行
        tableEnv.execute(TestKafka2HdfsWithName.class.getSimpleName());

    }


    /**
     * 解析Kafka数据
     * 将Kafka JSON String 解析成JavaBean: UserBrowseLog
     * UserBrowseLog(String userID, String eventTime, String eventType, String productID, int productPrice, long eventTimeTimestamp)
     */
    private static class BrowseKafkaProcessFunction extends ProcessFunction<String, String> {
        @Override
        public void processElement(String value, Context ctx, Collector<String> out) throws Exception {
            try {

                UserBrowseLog log = JSON.parseObject(value, UserBrowseLog.class);

                // 增加一个long类型的时间戳
                // 指定eventTime为yyyy-MM-dd HH:mm:ss格式的北京时间
                DateTimeFormatter format = DateTimeFormatter.ofPattern("yyyy-MM-dd HH:mm:ss");
                OffsetDateTime eventTime = LocalDateTime.parse(log.getEventTime(), format).atOffset(ZoneOffset.of("+08:00"));
                // 转换成毫秒时间戳
                long eventTimeTimestamp = eventTime.toInstant().toEpochMilli();
                log.setEventTimeTimestamp(eventTimeTimestamp);

                out.collect(log.toString());
            } catch (Exception ex) {
                System.out.println("解析Kafka数据异常...");
            }
        }
    }

   private static class DayBucketAssigner implements BucketAssigner<String, String> {

        @Override
        public String getBucketId(String element, Context context) {
            return new SimpleDateFormat("yyyy-MM-dd").format(new Date());
        }

        @Override
        public SimpleVersionedSerializer<String> getSerializer() {
            return SimpleVersionedStringSerializer.INSTANCE;
        }
    }


}
