package com.iot.app.flink.processor;


import com.iot.app.flink.Source.RichSource;
import com.iot.app.flink.entity.POITrafficData;
import com.iot.app.flink.entity.TotalTrafficData;
import com.iot.app.flink.entity.WindowTrafficData;
import com.iot.app.flink.sink.RichSink;
import com.iot.app.flink.vo.AggregateKey;
import com.iot.app.flink.vo.IoTData;
import com.iot.app.flink.vo.KafkaProp;
import com.iot.app.flink.vo.POIData;
import com.iot.app.flink.util.IotDataStreamUtils;
import com.iot.app.flink.util.UniqueVehicleState;
import org.apache.flink.api.common.serialization.SimpleStringSchema;
import org.apache.flink.api.java.tuple.Tuple2;
import org.apache.flink.streaming.api.datastream.DataStream;
import org.apache.flink.streaming.api.datastream.SingleOutputStreamOperator;
import org.apache.flink.streaming.api.datastream.WindowedStream;
import org.apache.flink.streaming.api.environment.StreamExecutionEnvironment;
import org.apache.flink.streaming.api.windowing.time.Time;
import org.apache.flink.streaming.api.windowing.windows.TimeWindow;
import org.apache.flink.streaming.connectors.kafka.FlinkKafkaConsumer011;
import org.apache.log4j.Logger;

import java.util.Map;
import java.util.Properties;

import static com.iot.app.flink.util.IotDataStreamUtils.tokeyedStreamForCounting;

public class IoTDataProcessor {
    private static final Logger logger = Logger.getLogger(IoTDataProcessor.class);

    public static void process(StreamExecutionEnvironment env, KafkaProp conf) throws Exception {

        // set up the execution environment
        Properties consumerProperties = new Properties();
        consumerProperties.put("bootstrap.servers", conf.getBootstrapServers());
        consumerProperties.put("group.id", conf.getGroupId());
        consumerProperties.put("enable.auto.commit", conf.getCommitment());
        consumerProperties.put("auto.offset.reset", conf.getReset());

        logger.info("flink stream start ");

        FlinkKafkaConsumer011<String> kafkaConsumer = new FlinkKafkaConsumer011<>(conf.getTopic(),
                new SimpleStringSchema(),
                consumerProperties);

        kafkaConsumer.setCommitOffsetsOnCheckpoints(true);

        // Source and Map events to IotData.
        DataStream<IoTData> rawIotDataStream = env.addSource(kafkaConsumer)
                                                                    .flatMap(IotDataStreamUtils.streamToIotDataMap());

        //设置为广播变量
        DataStream<Map<String, String>> RedisSourceStream = env.addSource(RichSource.RedisSouce()).broadcast();

        //partition Stream by vehicle Id for deduplication. (删除vehicleId重复的数据)相同的key reduce 在一起
        //相同ID 数据不同的，被替换成一样的数据
        DataStream<IoTData> reduceKeyedStream = rawIotDataStream.keyBy(IotDataStreamUtils.iotDatakeySelector())
                                                                    .reduce(IotDataStreamUtils.dedupByKey());

        // Paasing Keyed stream thorugh a stateful mapper to dedup duplicate event from same vehicle Id.
        // Check vehicle Id is already processed
        DataStream<Tuple2<IoTData, Boolean>> dedupedKeyedStream = reduceKeyedStream.keyBy(IotDataStreamUtils.iotDatakeySelector())
                                                                   .flatMap(new UniqueVehicleState());

        DataStream<Tuple2<IoTData, Boolean>> uniqueVehicleStreams = dedupedKeyedStream.filter(p -> p.f1);
        DataStream<IoTData> filteredIotDataStream = uniqueVehicleStreams.map(p -> p.f0);


        //processTotalTrafficData(filteredIotDataStream);
        //processWindowTrafficData(filteredIotDataStream);
        processPOIData(rawIotDataStream, RedisSourceStream);

        env.execute("iot-Monitor-app");
    }

    public static void processTotalTrafficData(DataStream<IoTData> dataStream) throws Exception {

        // We need to get count of vehicle group by routeId and vehicleType
/*        SingleOutputStreamOperator<Tuple2<AggregateKey, Long>> groupStream = tokeyedStreamForCounting(dataStream).reduce(
                (a,b) -> new Tuple2<>(a.f0, a.f1.longValue() + b.f1.longValue()));*/

        DataStream<Tuple2<AggregateKey, Long>> groupStream = tokeyedStreamForCounting(dataStream).sum(1);
        DataStream<TotalTrafficData> TotalTrafficDataStream = groupStream.flatMap(IotDataStreamUtils.streamToTotalTrafficDataMap());
        RichSink.outPutToCassandra(TotalTrafficDataStream);

    }

    public static void processWindowTrafficData(DataStream<IoTData> dataStream) throws Exception {
        //Build a tumble Windowed keyedStream of 30 sec
        WindowedStream<Tuple2<AggregateKey, Long>, AggregateKey, TimeWindow> tumbleWindowedStream =
                tokeyedStreamForCounting(dataStream.assignTimestampsAndWatermarks(IotDataStreamUtils.eventTimeExtractor()))
                        .timeWindow(Time.seconds(30));

        SingleOutputStreamOperator<Tuple2<AggregateKey, Long>> reducedWindowStream = tumbleWindowedStream.sum(1);
        DataStream<WindowTrafficData> WindowTrafficDataStream = reducedWindowStream.flatMap(IotDataStreamUtils.streamToWindowTrafficDataMap());
        RichSink.outPutToCassandra(WindowTrafficDataStream);

    }

    public static void processPOIData(DataStream<IoTData> dataStream, DataStream<Map<String, String>> RedisSourceStream) throws Exception {

        //connect
        SingleOutputStreamOperator<Tuple2<IoTData, POIData>> unionStream = dataStream.connect(RedisSourceStream).flatMap(IotDataStreamUtils.streamToFilterByPoi());
        DataStream<POITrafficData> POItrafficDStream = unionStream.flatMap(IotDataStreamUtils.streamToPOITrafficDataMap());
        //RichSink.outPutToCassandra(POItrafficDStream);
    }

}
