package com.ruyuan.event.streaming.process;
import com.ruyuan.event.streaming.pojo.EventClientLog;
import com.ruyuan.event.streaming.pojo.EventJoinLog;
import com.ruyuan.event.streaming.pojo.EventServerLog;
import com.ruyuan.event.streaming.schema.EventClientLogSchema;
import com.ruyuan.event.streaming.schema.EventJoinLogSchema;
import com.ruyuan.event.streaming.schema.EventServerLogSchema;
import com.ruyuan.event.streaming.utils.Constants;
import com.ruyuan.event.streaming.utils.FlinkKafkaUtils;
import com.twitter.chill.protobuf.ProtobufSerializer;
import org.apache.flink.api.common.serialization.SimpleStringSchema;
import org.apache.flink.streaming.api.TimeCharacteristic;
import org.apache.flink.streaming.api.datastream.DataStreamSource;
import org.apache.flink.streaming.api.datastream.SingleOutputStreamOperator;
import org.apache.flink.streaming.api.environment.StreamExecutionEnvironment;
import org.apache.flink.streaming.connectors.kafka.FlinkKafkaProducer;
import java.util.Properties;
/**
 *  Job入口
 * */
public class StreamingJoinEntryPoint {
    // topic 初始化信息
    private static String KAFKA_EVENT_SERVER_LOG = Constants.KAFKA_EVENT_SERVER_LOG;
    private static String KAFKA_EVENT_CLIENT_LOG = Constants.KAFKA_EVENT_CLIENT_LOG;
    private static String KAFKA_EVENT_JOIN_LOG_LOG = Constants.KAFKA_EVENT_JOIN_LOG;

    public static void main(String[] args) throws Exception {
        String groupId = "StreamingJoinMain1";
        StreamExecutionEnvironment environment = StreamExecutionEnvironment.getExecutionEnvironment();
        environment.enableCheckpointing(60000);
        environment.setParallelism(1);

        //在Checkpoint发生异常的时候，是否应该fail该task，默认为true，如果设置为false，则task会拒绝Checkpoint继续运行
        environment.getCheckpointConfig().setFailOnCheckpointingErrors(false);
        environment.setStreamTimeCharacteristic(TimeCharacteristic.EventTime);
        environment.getConfig().enableForceAvro();
        //自定义序列化器，完成反序列，将字符串反序列化为对象
        environment.getConfig().registerTypeWithKryoSerializer(EventServerLog.class, ProtobufSerializer.class);
        environment.getConfig().registerTypeWithKryoSerializer(EventClientLog.class, ProtobufSerializer.class);
        environment.getConfig().registerTypeWithKryoSerializer(EventJoinLog.class, ProtobufSerializer.class);

        DataStreamSource<EventServerLog> eventServerLogDataStreamSource = environment.addSource(FlinkKafkaUtils.getKafkaEventServerSource(Constants.KAFKA_EVENT_SERVER_LOG, new EventServerLogSchema(), groupId));
        DataStreamSource<EventClientLog> eventClientLogDataStreamSource = environment.addSource(FlinkKafkaUtils.getKafkaEventClientSource(Constants.KAFKA_EVENT_CLIENT_LOG, new EventClientLogSchema(), groupId));

        /**
         * serverLog的数据写入Redis+HBase
         * */
        eventServerLogDataStreamSource.flatMap(new EventServerLogRichFlatMap()).name("writeServerLog");

        /**
         *  数据拼接的逻辑开发：
         * */
        SingleOutputStreamOperator<EventJoinLog> eventJoinLogSingleOutputStreamOperator = eventClientLogDataStreamSource.flatMap(new EventClientLogRichFlatMap());

        /**
         * 写入数据到下游的topic，保存2份
         *  第1份是原始数据
         *  第2份是报表数据（需要进行数据格式转换，转换为Json格式）
         */
        Properties producerProperties = FlinkKafkaUtils.getProducerProperties(Constants.BROKERS);
        eventJoinLogSingleOutputStreamOperator.addSink(new FlinkKafkaProducer<EventJoinLog>(Constants.KAFKA_EVENT_JOIN_LOG,new EventJoinLogSchema(),producerProperties));

        eventJoinLogSingleOutputStreamOperator.flatMap(new EventJoinReportFlatMap()).addSink(new FlinkKafkaProducer<String>(Constants.KAFKA_EVENT_JOIN_REPORT,new SimpleStringSchema(),producerProperties));
        environment.execute();
    }
}