package com.renrendoc.network.flow;

import com.google.gson.Gson;
import com.google.gson.JsonObject;
import com.renrendoc.network.flow.ana.*;
import com.renrendoc.network.flow.beans.NginxLogEntry;
import com.renrendoc.network.flow.beans.NginxLogEvent;
import com.renrendoc.network.flow.beans.out.ClickEvent;
import com.renrendoc.network.flow.beans.out.PageEvent;
import com.renrendoc.network.flow.beans.out.SpiderEvent;
import com.renrendoc.network.flow.beans.out.TopNPage;
import com.renrendoc.network.flow.sink.RpcSink;
import com.renrendoc.network.flow.trigger.CustomTimeWindowTrigger;
import com.renrendoc.network.flow.util.Common;
import com.renrendoc.network.flow.util.PropertiesUtil;
import org.apache.flink.api.common.eventtime.SerializableTimestampAssigner;
import org.apache.flink.api.common.eventtime.WatermarkStrategy;
import org.apache.flink.api.common.functions.FilterFunction;
import org.apache.flink.api.common.functions.MapFunction;
import org.apache.flink.api.common.functions.ReduceFunction;
import org.apache.flink.api.common.serialization.SimpleStringSchema;
import org.apache.flink.api.common.typeinfo.Types;
import org.apache.flink.api.java.functions.KeySelector;
import org.apache.flink.api.java.tuple.Tuple2;
import org.apache.flink.client.program.StreamContextEnvironment;
import org.apache.flink.connector.kafka.source.KafkaSource;
import org.apache.flink.connector.kafka.source.enumerator.initializer.OffsetsInitializer;
import org.apache.flink.contrib.streaming.state.EmbeddedRocksDBStateBackend;
import org.apache.flink.streaming.api.CheckpointingMode;
import org.apache.flink.streaming.api.TimeCharacteristic;
import org.apache.flink.streaming.api.datastream.DataStream;
import org.apache.flink.streaming.api.datastream.DataStreamSource;
import org.apache.flink.streaming.api.datastream.SingleOutputStreamOperator;
import org.apache.flink.streaming.api.environment.CheckpointConfig;
import org.apache.flink.streaming.api.environment.StreamExecutionEnvironment;
import org.apache.flink.streaming.api.functions.KeyedProcessFunction;
import org.apache.flink.streaming.api.functions.ProcessFunction;
import org.apache.flink.streaming.api.functions.windowing.ProcessWindowFunction;
import org.apache.flink.streaming.api.windowing.assigners.SlidingEventTimeWindows;
import org.apache.flink.streaming.api.windowing.assigners.TumblingEventTimeWindows;
import org.apache.flink.streaming.api.windowing.time.Time;
import org.apache.flink.streaming.api.windowing.windows.TimeWindow;
import org.apache.flink.util.Collector;
import org.apache.flink.util.OutputTag;
import org.apache.kafka.clients.consumer.OffsetResetStrategy;

import java.time.Duration;
import java.util.ArrayList;
import java.util.Iterator;
import java.util.Map;

import static com.renrendoc.network.flow.source.KafkaNginxEntrySource.parseLogEntry;
import static com.renrendoc.network.flow.util.Common.*;

public class AnaStream {

    public static void main(String[] args) throws Exception {
        // 执行统计
        new AnaStream().start();
    }

    public void start() throws Exception {
        // 设置基础执行环境
        StreamExecutionEnvironment env = StreamContextEnvironment.getExecutionEnvironment();
//        env.setStreamTimeCharacteristic(TimeCharacteristic.EventTime);
        // 每隔 x ms进行启动一个检查点【设置checkpoint的周期】
        env.enableCheckpointing(1000, CheckpointingMode.EXACTLY_ONCE);
        // 确保检查点之间有至少500 ms的间隔【checkpoint最小间隔】，留给任务执行时间
        env.getCheckpointConfig().setMinPauseBetweenCheckpoints(500);
        // 检查点必须在一分钟内完成，或者被丢弃【checkpoint的超时时间】

        env.getCheckpointConfig().setCheckpointTimeout(60000);
        // 同一时间只允许进行x个检查点
        env.getCheckpointConfig().setMaxConcurrentCheckpoints(1);
        // only two consecutive checkpoint failures are tolerated
        env.getCheckpointConfig().setTolerableCheckpointFailureNumber(2);
        // 表示一旦Flink处理程序被cancel后，会保留Checkpoint数据，以便根据实际需要恢复到指定的Checkpoint
        env.getCheckpointConfig().setExternalizedCheckpointCleanup(CheckpointConfig.ExternalizedCheckpointCleanup.RETAIN_ON_CANCELLATION);
        env.setStateBackend(new EmbeddedRocksDBStateBackend());
        // 设置hadoop user
        System.setProperty("HADOOP_USER_NAME", "hdfs");
        env.getCheckpointConfig().setCheckpointStorage("hdfs://hdp200:8020/user/flink/AnaStream");
        env.setParallelism(1);
        // kafka source
        DataStreamSource<String> kafkaDs = env.fromSource(KafkaSource.<String>builder()
                .setBootstrapServers(PropertiesUtil.getProperty("kafka.bootstrap_servers"))
                .setTopics("renrendoc_nginx_event")
                .setGroupId("AnaStream")
                .setStartingOffsets(OffsetsInitializer.committedOffsets(OffsetResetStrategy.EARLIEST))
                .setValueOnlyDeserializer(new SimpleStringSchema())
                .build(), WatermarkStrategy.noWatermarks(), "Kafka Source");
//        kafkaDs.print("kafka");
//        env.setParallelism(2);
        SingleOutputStreamOperator<NginxLogEvent> map = kafkaDs.rebalance().map(new MapFunction<String, NginxLogEvent>() {
            @Override
            public NginxLogEvent map(String s) throws Exception {
                Gson gson = new Gson();
                JsonObject jsonObject = gson.fromJson(s, JsonObject.class);
                String message = jsonObject.get("message").getAsString();
                NginxLogEntry entry = parseLogEntry(message);
                NginxLogEvent ngxEvent = new NginxLogEvent();
                if (entry != null) {
                    Map<String, String> urlMap = parseQueryString(entry.getUrl());
                    if (200 == entry.getStatus()) {
                        ngxEvent.setSk(urlMap.get("sk"))
                                .setSi(urlMap.get("si"))
                                .setCi(urlMap.get("ci"))
                                .setU(urlMap.get("u"))
                                .setRu(urlMap.get("ru"))
                                .setEt(urlMap.get("et"))
                                .setTt(urlMap.get("tt"))
                                .setUa(entry.getUserAgent())
                                .setCs(urlMap.get("cs"))
                                .setV(urlMap.get("v"))
                                .setEp(urlMap.get("ep"))
                                .setWh(urlMap.get("wh"))
                                .setRnd(urlMap.get("rnd"))
                                .setIp(entry.getIp())
                                .setTs(convertToDate(entry.getTimestamp()).getTime());
                    }
                }

                return ngxEvent;
            }
        }).filter(x -> null != x.sk && x.sk.length() == 32);

//        env.setParallelism(1);
        // 定义数据源, 后续会连接kafka数据源进行处理
        SingleOutputStreamOperator<NginxLogEvent> ds = map
                .assignTimestampsAndWatermarks(
                        WatermarkStrategy.<NginxLogEvent>forBoundedOutOfOrderness(Duration.ofSeconds(5))
                                .withTimestampAssigner(new SerializableTimestampAssigner<NginxLogEvent>() {
                                    @Override
                                    public long extractTimestamp(NginxLogEvent nginxLogEvent, long l) {
                                        return nginxLogEvent.ts;
                                    }
                                })
                );
//        ds.print("ds test");
        // 点击事件
        OutputTag<NginxLogEvent> clickTag = new OutputTag<NginxLogEvent>("click") {
        };
        // 自定义事件  todo  后续会有
        OutputTag<NginxLogEvent> customTag = new OutputTag<NginxLogEvent>("custom") {
        };
        // 蜘蛛访问数据
        OutputTag<NginxLogEvent> spiderTag = new OutputTag<NginxLogEvent>("spider") {
        };

        // 页面普通pv流
        SingleOutputStreamOperator<NginxLogEvent> pvDs = ds.process(new ProcessFunction<NginxLogEvent, NginxLogEvent>() {
            @Override
            public void processElement(NginxLogEvent value, ProcessFunction<NginxLogEvent, NginxLogEvent>.Context ctx, Collector<NginxLogEvent> out) throws Exception {
                // 首先过滤掉spider
                if (!"n".equals(Common.getSpider(value.ua))) {
                    ctx.output(spiderTag, value);
                } else if ("0".equals(value.et)) {
                    out.collect(value);
                } else if ("1".equals(value.et) || "2".equals(value.et)) {
                    ctx.output(clickTag, value);
                } else if ("3".equals(value.et)) {
                    ctx.output(customTag, value);
                }
            }
        });

        pvDs.print("page---");

        // 点击流
        DataStream<NginxLogEvent> clickDs = pvDs.getSideOutput(clickTag);
        // 自定义事件流  todo 后续可能会有
        DataStream<NginxLogEvent> customDs = pvDs.getSideOutput(customTag);
        // 蜘蛛分流
        DataStream<NginxLogEvent> spiderDs = pvDs.getSideOutput(spiderTag);


        // 统计处理逻辑
        // 页面统计
        handlePageDs(pvDs);

        // 页面topn
        handleTopNPage(pvDs);
        // 页面统计end


        // 点击流统计  后续考虑可把点击流单独存放 数据较少
        handleClickDs(clickDs);
        // 点击流统计end

        // 自定义事件
        //////

        // 蜘蛛
        handleSpider(spiderDs);

        env.execute("AnaStream");
    }

    /**
     * 页面pv统计相关数据
     * @param ds
     */
    public void handlePageDs(DataStream<NginxLogEvent> ds)
    {
        SingleOutputStreamOperator<PageEvent> aggregate = ds.keyBy((KeySelector<NginxLogEvent, String>) nginxLogEvent -> nginxLogEvent.ep)
                .window(TumblingEventTimeWindows.of(Time.seconds(Long.parseLong(PropertiesUtil.getProperty("ana_stream.tumble.window")))))
                .trigger(new CustomTimeWindowTrigger( Long.parseLong(PropertiesUtil.getProperty("ana_stream.tumble.window.out") )))
                .aggregate(new PageAnaAgg(), new PageAnaProcess());

        aggregate.print("page_agg");
        aggregate.addSink(new RpcSink<>("RenrenV2\\EventTracking\\ETPageEventStatService", "addData"));

    }

    /**
     * 页面url topn访问数据
     * @param ds
     */
    public void handleTopNPage(DataStream<NginxLogEvent> ds)
    {
//        DataStream<NginxLogEvent> shuffle = ds.shuffle();
//        shuffle.print("sh");
        // 详情页连接访问topn
        SingleOutputStreamOperator<TopNPage> topAgg = ds.filter(new FilterFunction<NginxLogEvent>() {
                    @Override
                    public boolean filter(NginxLogEvent nginxLogEvent) throws Exception {
                        if (null == nginxLogEvent.u) {
                            return false;
                        }
                        String s = extractNumber(nginxLogEvent.u);
                        if (null == s) {
                            return false;
                        }
                        long i = Long.parseLong(s);
                        double pow = Math.pow(2, 32);

                        return  i > 0 && i < pow;
                    }
                })
                .map(new MapFunction<NginxLogEvent, Long>() {
                    @Override
                    public Long map(NginxLogEvent nginxLogEvent) throws Exception {
                        return Long.parseLong(extractNumber(nginxLogEvent.u));
                    }
                })
                .keyBy(new KeySelector<Long, Long>() {
                    @Override
                    public Long getKey(Long aLong) throws Exception {
                        return aLong;
                    }
                })
                .window(TumblingEventTimeWindows.of(Time.days(7)))
                .trigger(new CustomTimeWindowTrigger( 5 ))
                .aggregate(new TopPageAgg(), new TopPageProcess());

//        topAgg.print("topBase");

        SingleOutputStreamOperator<ArrayList<TopNPage>> topN = topAgg
                .keyBy(x -> x.window)
                .process(new TopNHotPages(300));
        topN.print("topN");
        topN.addSink(new RpcSink<>("RenrenV2\\EventTracking\\ETTopNStatService", "setTopXCache"));


    }

    /**
     * 数据点击相关处理
     */
    public void handleClickDs(DataStream<NginxLogEvent> ds)
    {
        SingleOutputStreamOperator<ClickEvent> process = ds.map((MapFunction<NginxLogEvent, ClickEvent>) ngx -> {
                    String identAndPos = ngx.ep;
                    if (!identAndPos.contains("-")) {
                        // 如果没有给定位置参数, 则默认增加位置参数
                        identAndPos = identAndPos + "-0";
                    }
                    String[] split = identAndPos.split("-");

                    return new ClickEvent(split[0], split[1], ngx.cs, 0L, 0L, 0);
                }).keyBy((KeySelector<ClickEvent, String>) clickEvent -> clickEvent.ident + "-" + clickEvent.position)
                .window(TumblingEventTimeWindows.of(Time.seconds(Long.parseLong(PropertiesUtil.getProperty("ana_stream.tumble.window")))))
                .trigger(new CustomTimeWindowTrigger( Long.parseLong(PropertiesUtil.getProperty("ana_stream.tumble.window.out") )))
                .process(new ProcessWindowFunction<ClickEvent, ClickEvent, String, TimeWindow>() {
                    @Override
                    public void process(String s, ProcessWindowFunction<ClickEvent, ClickEvent, String, TimeWindow>.Context context, Iterable<ClickEvent> elements, Collector<ClickEvent> out) throws Exception {
                        long start = context.window().getStart();
                        long end = context.window().getEnd();
                        Iterator<ClickEvent> iterator = elements.iterator();
                        int sum = 0;
                        ClickEvent clickEvent = null;
                        while (iterator.hasNext()) {
                            clickEvent = iterator.next();
                            sum += 1;
                        }
                        clickEvent.startTime = start;
                        clickEvent.endTime = end;
                        clickEvent.total = sum;
                        out.collect(clickEvent);
                    }
                });

        process.print("click");

        process.addSink(new RpcSink<>("RenrenV2\\EventTracking\\ETClickEventStatService", "addData"));


        // 可能会有针对点击流的自定义参数处理..
    }

    /**
     * 蜘蛛数据处理
     * @param ds
     */
    public void handleSpider(DataStream<NginxLogEvent> ds)
    {
        SingleOutputStreamOperator<SpiderEvent> reduce = ds.map((MapFunction<NginxLogEvent, Tuple2<String, Long>>) ngx -> {
                    String spider = Common.getSpider(ngx.ua);
                    return Tuple2.of(spider, 1L);
                }).returns(Types.TUPLE(Types.STRING, Types.LONG))
                .keyBy(new KeySelector<Tuple2<String, Long>, String>() {
                    @Override
                    public String getKey(Tuple2<String, Long> stringLongTuple2) throws Exception {
                        return stringLongTuple2.f0;
                    }
                })
                .window(TumblingEventTimeWindows.of(Time.seconds(Long.parseLong(PropertiesUtil.getProperty("ana_stream.tumble.window")))))
                .trigger(new CustomTimeWindowTrigger( Long.parseLong(PropertiesUtil.getProperty("ana_stream.tumble.window.out") )))
                .reduce(new ReduceFunction<Tuple2<String, Long>>() {
                    @Override
                    public Tuple2<String, Long> reduce(Tuple2<String, Long> v1, Tuple2<String, Long> v2) throws Exception {
                        return Tuple2.of(v1.f0, v1.f1 + 1);
                    }
                }, new ProcessWindowFunction<Tuple2<String, Long>, SpiderEvent, String, TimeWindow>() {

                    @Override
                    public void process(String s, ProcessWindowFunction<Tuple2<String, Long>, SpiderEvent, String, TimeWindow>.Context context, Iterable<Tuple2<String, Long>> elements, Collector<SpiderEvent> out) throws Exception {
                        long start = context.window().getStart();
                        long end = context.window().getEnd();
                        Tuple2<String, Long> next = elements.iterator().next();
                        out.collect(new SpiderEvent(start, end, next.f0, next.f1));
                    }
                });

        reduce.print("spider");
        // 输出到rpc
        reduce.addSink(new RpcSink<>("RenrenV2\\EventTracking\\ETSpiderEventStatService", "addData"));
    }

}
