package com.bigdata.flink.consumer;

import com.bigdata.flink.conf.ConfigurationManager;
import com.bigdata.flink.conf.KafkaProperties;
import jdk.nashorn.internal.codegen.types.Type;
import org.apache.flink.api.common.eventtime.*;
import org.apache.flink.api.common.functions.FilterFunction;
import org.apache.flink.api.common.functions.MapFunction;
import org.apache.flink.api.common.functions.RuntimeContext;
import org.apache.flink.api.common.serialization.SimpleStringSchema;
import org.apache.flink.api.common.typeinfo.Types;
import org.apache.flink.api.java.functions.KeySelector;
import org.apache.flink.api.java.tuple.Tuple;
import org.apache.flink.api.java.tuple.Tuple3;
import org.apache.flink.streaming.api.TimeCharacteristic;
import org.apache.flink.streaming.api.datastream.DataStream;
import org.apache.flink.streaming.api.datastream.KeyedStream;
import org.apache.flink.streaming.api.environment.StreamExecutionEnvironment;
import org.apache.flink.streaming.api.functions.AssignerWithPeriodicWatermarks;
import org.apache.flink.streaming.api.functions.windowing.WindowFunction;
import org.apache.flink.streaming.api.watermark.Watermark;
import org.apache.flink.streaming.api.windowing.assigners.TumblingEventTimeWindows;
import org.apache.flink.streaming.api.windowing.time.Time;
import org.apache.flink.streaming.api.windowing.windows.TimeWindow;
import org.apache.flink.streaming.connectors.elasticsearch.ElasticsearchSinkFunction;
import org.apache.flink.streaming.connectors.elasticsearch.RequestIndexer;
import org.apache.flink.streaming.connectors.elasticsearch6.ElasticsearchSink;
import org.apache.flink.streaming.connectors.kafka.FlinkKafkaConsumer;
import org.apache.flink.util.Collector;
import org.apache.http.HttpHost;
import org.apache.logging.log4j.LogManager;
import org.apache.logging.log4j.Logger;
import org.apache.flink.api.java.tuple.Tuple4;
import org.elasticsearch.action.index.IndexRequest;
import org.elasticsearch.client.Requests;

import java.text.SimpleDateFormat;
import java.time.Duration;
import java.util.*;

public class LogAnalysis {

    private final Logger logger = LogManager.getLogger(LogAnalysis.class);

    public static void main(String[] args) throws Exception {
        //流处理执行环境
        final StreamExecutionEnvironment senv = StreamExecutionEnvironment.getExecutionEnvironment();

        //设置使用EventTime作为Flink的时间处理标准，不指定默认是ProcessTime
        senv.setStreamTimeCharacteristic(TimeCharacteristic.EventTime);

        //获取kafka生产者产生的记录到flink进行消费
        Properties props = KafkaProperties.getKafkaProperties();

        DataStream<String> sourceStream = senv.addSource(new FlinkKafkaConsumer<>(
                KafkaProperties.getTopic(),
                new SimpleStringSchema(),
                props));

        DataStream<Tuple3<Long,String,Long>> logData = sourceStream.map((MapFunction<String, Tuple4<String, Long, String, Long>>) value -> {
            String[] values = value.split("\t");
            String level = values[2];
            String timeStr = values[3];
            long time = 0L;
            SimpleDateFormat sourceFormat = new SimpleDateFormat("yyyy-MM-dd HH:mm:ss");
            time = sourceFormat.parse(timeStr).getTime();
            String domain = values[5];
            long traffic = Long.parseLong(values[6]);
            return new Tuple4<>(level,time,domain,traffic);
        }).returns(Types.TUPLE(Types.STRING, Types.LONG, Types.STRING, Types.LONG))
                .filter((FilterFunction<Tuple4<String, Long, String, Long>>) value -> value.f1!=0)
                .filter((FilterFunction<Tuple4<String, Long, String, Long>>) value -> value.f0.equals("E"))
                .map((MapFunction<Tuple4<String, Long, String, Long>, Tuple3<Long, String, Long>>) value -> {
                    Long time = value.f1;
                    String domain = value.f2;
                    Long traffic = value.f3;
                    return new Tuple3<>(time,domain,traffic);
                }).returns(Types.TUPLE(Types.LONG, Types.STRING, Types.LONG));

        /**
         * 在生产上进行业务处理时，一定要考虑处理的健壮性以及你数据的准确性
         * 脏数据或者是不符合业务规则的数据需要全部过滤掉之后
         * 再进行相应业务逻辑处理
         *
         * 对于我们的业务来说，我们只要统计level=E即可
         * 对于level非E，不作为业务统计的范畴
         */

        //logData.print().setParallelism(1);
        KeyedStream<Tuple3<Long, String, Long>, Tuple> keyedStream = logData.assignTimestampsAndWatermarks(WatermarkStrategy
                .<Tuple3<Long, String, Long>>forBoundedOutOfOrderness(Duration.ofSeconds(10))
                .withTimestampAssigner((SerializableTimestampAssigner<Tuple3<Long, String, Long>>) (element, recordTimestamp) -> element.f0)
                ).keyBy(1); //此处按照域名进行keyBy

        //keyedStream.print().setParallelism(1);
        DataStream<Tuple3<String, String, Long>> resultData = keyedStream.window(TumblingEventTimeWindows.of(Time.seconds(60)))
                .apply(new WindowFunction<Tuple3<Long, String, Long>, Tuple3<String, String, Long>, Tuple, TimeWindow>() {
                    @Override
                    public void apply(Tuple tuple, TimeWindow window, Iterable<Tuple3<Long, String, Long>> input, Collector<Tuple3<String, String, Long>> out) throws Exception {
                        String domain = tuple.getField(0).toString();
                        long sum = 0;
                        String time = "";
                        for (Tuple3<Long, String, Long> next : input) {
                            sum += next.f2;
                            time = new SimpleDateFormat("yyyy-MM-dd HH:mm").format(new Date(next.f0));
                        }
                        /**
                         * 第一个参数：这一分钟的时间 2020-09-09 20:20
                         * 第二个参数：域名
                         * 第三个参数：traffic的和
                         */
                        out.collect(new Tuple3<>(time,domain,sum));
                    }
                });

        //resultData.print().setParallelism(1);

        /*
            命令行运行curl -XPUT 'http://workstation:9200/cdn' 创建index cdn
            curl -H "Content-Type: application/json" -XPOST 'http://workstation:9200/cdn/traffic/_mapping' -d '{
            "traffic":{
                "properties":{
                    "domain":{"type":"text"},
                    "traffics":{"type":"long"},
                    "time":{"type":"date","format":"yyyy-MM-dd HH:mm"}
                }
             }
           }'
           命令行运行curl -XDELETE 'http://workstation:9200/cdn'
           curl -H "Content-Type: application/json" -XPOST 'http://workstation:9200/cdn/traffic/_mapping' -d '{
            "traffic":{
                "properties":{
                    "domain":{"type":"keyword"},
                    "traffics":{"type":"long"},
                    "time":{"type":"date","format":"yyyy-MM-dd HH:mm"}
                }
             }
           }'
        */
        //将数据记录写入elasticsearch
        List<HttpHost> httpHosts = new ArrayList<>();
        httpHosts.add(new HttpHost(ConfigurationManager.getProperty("es_host"),
                ConfigurationManager.getInteger("es_port"), "http"));
        ElasticsearchSink.Builder<Tuple3<String,String,Long>> esSinkBuilder = new ElasticsearchSink.Builder<>(
                httpHosts,
                new ElasticsearchSinkFunction<Tuple3<String,String,Long>>() {
                    public IndexRequest createIndexRequest(Tuple3<String,String,Long> element) {
                        Map<String, Object> json = new HashMap<>();
                        json.put("time", element.f0);
                        json.put("domain",element.f1);
                        json.put("traffics",element.f2);

                        String id = element.f0 + "-" + element.f1;
                        return Requests.indexRequest()
                                .index(ConfigurationManager.getProperty("es_index"))
                                .type(ConfigurationManager.getProperty("es_type"))
                                .id(id)
                                .source(json);
                    }

                    @Override
                    public void process(Tuple3<String, String, Long> element, RuntimeContext runtimeContext, RequestIndexer requestIndexer) {
                        requestIndexer.add(createIndexRequest(element));
                    }
                }
        );
        //设置批量写数据的缓冲区大小
        esSinkBuilder.setBulkFlushMaxActions(1);

        resultData.addSink(esSinkBuilder.build());

        senv.execute("LogAnalysis");
    }
}
