package com.lee;

/**
 * Hello world!
 */


import com.lee.trigger.*;
import com.lee.windowFunction.*;
import org.apache.flink.api.java.functions.KeySelector;
import org.apache.flink.streaming.api.datastream.DataStream;
import org.apache.flink.streaming.api.datastream.SingleOutputStreamOperator;
import org.apache.flink.streaming.api.environment.StreamExecutionEnvironment;

import org.apache.flink.streaming.api.functions.ProcessFunction;
import org.apache.flink.streaming.api.windowing.assigners.ProcessingTimeSessionWindows;
import org.apache.flink.streaming.api.windowing.assigners.TumblingProcessingTimeWindows;
import org.apache.flink.streaming.api.windowing.time.Time;
import org.apache.flink.streaming.connectors.kafka.FlinkKafkaConsumer010;
import org.apache.flink.util.Collector;
import org.apache.flink.util.OutputTag;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;

import java.util.HashMap;
import java.util.Properties;

public class App {

    final static Logger logger = LoggerFactory.getLogger(App.class);

    private static final OutputTag<networkData> outputTag = new OutputTag<networkData>("tcpdata") {};

    public static void main(String[] args) throws Exception {
        // set up the execution environment
        final StreamExecutionEnvironment env = StreamExecutionEnvironment.getExecutionEnvironment();


        Properties properties = new Properties();

        properties.setProperty("bootstrap.servers", "10.36.8.128:9092");
        properties.setProperty("auto.offset.reset", "earliest");
        properties.setProperty("fetch.max.bytes", "1048576000");
        properties.setProperty("enable.auto.commit", "False");


        AvroDeserializationSchema<networkData> avroSchema = new AvroDeserializationSchema<>(networkData.class);

        FlinkKafkaConsumer010<networkData> consumer = new FlinkKafkaConsumer010<>("multirecord2", avroSchema, properties);
        consumer.setStartFromEarliest();

        HashMap<String, Integer> timeDstMap = new HashMap<String, Integer>();
        HashMap<String, Integer> timeServiceMap = new HashMap<String, Integer>();
        HashMap<String, Integer> countDstMap = new HashMap<String, Integer>();
        HashMap<String, Integer> countServiceMap = new HashMap<String, Integer>();

        HashMap<String, Integer> statisticTimeDstMap = new HashMap<String, Integer>();
        HashMap<String, Integer> statisticTimeServiceMap = new HashMap<String, Integer>();
        HashMap<String, Integer> statisticCountDstMap = new HashMap<String, Integer>();
        HashMap<String, Integer> statisticCountServiceMap = new HashMap<String, Integer>();
        SingleOutputStreamOperator<networkData> stream = env
                .addSource(consumer)
                .windowAll(TumblingProcessingTimeWindows.of(Time.seconds(2)))
                .trigger(new TimeWindowTrigger<>(timeDstMap, timeServiceMap))
                .apply(new StatisticalTimeWindowFunction(timeDstMap, timeServiceMap))
                .windowAll(TumblingProcessingTimeWindows.of(Time.seconds(2)))
                .trigger(new CountWindowTrigger(1000,countDstMap,countServiceMap))
                .apply(new StatisticalCountWindowFunction(countDstMap,countServiceMap))
                .process(new TCPOutputProcessionFunction());


        // 这里进行TCP会话重组和会话统计数据的计算。
        DataStream<networkData> sessionStream = stream.getSideOutput(outputTag)
                .keyBy(new KeySelector<networkData, String>() {
                    @Override
                    public String getKey(networkData data) throws Exception {
                        IP ip = (IP)data.getInternetLayer();
                        TCP tcp = (TCP)data.getTransportLayer();
                        String srcUrl = ip.getSrcIP().toString() + Integer.toString(tcp.getSrcPort());
                        String dstUrl = ip.getDstIP().toString() + Integer.toString(tcp.getDstPort());
                        String key = srcUrl.compareTo(dstUrl) > 0 ? srcUrl + dstUrl : dstUrl + srcUrl;
                        return key;
                    }
                })
                .window(ProcessingTimeSessionWindows.withGap(Time.seconds(5)))
                .trigger(new SessionDataTrigger())
                .apply(new TCPreorganizetionFunction())
                .windowAll(TumblingProcessingTimeWindows.of(Time.seconds(2)))
                .trigger(new SessionTimeWindowTrigger(statisticTimeDstMap, statisticTimeServiceMap))
                .apply(new SessionStatisticalTimeWindowFunction(statisticTimeDstMap, statisticTimeServiceMap))
                .windowAll(TumblingProcessingTimeWindows.of(Time.seconds(2)))
                .trigger(new SessionCountWindowTrigger(1000,statisticCountDstMap,statisticCountServiceMap))
                .apply(new SessionStatisticalCountWindowFunction(statisticCountDstMap,statisticCountServiceMap));


//        stream.print();
        sessionStream.print();

        env.execute("Flink avro kafka test!");
    }

    public static final class TCPOutputProcessionFunction extends ProcessFunction<networkData, networkData>{

        private static final long serialVersionUID = 1L;

        @Override
        public void processElement(networkData value, Context ctx, Collector<networkData> out) throws Exception {
            out.collect(value);
            if((value.getInternetLayer() instanceof IP)&&value.getTransportLayer() instanceof TCP){
//                App.logger.info(value.toString());
                ctx.output(outputTag,value);
            }
        }
    }
}





