package com.jiuzhi.logger;

import com.jiuzhi.logger.util.NginxStrSplitter;
import org.apache.flink.api.common.functions.ReduceFunction;
import org.apache.flink.api.common.serialization.SimpleStringSchema;
import org.apache.flink.api.java.tuple.Tuple2;
import org.apache.flink.api.java.utils.ParameterTool;
import org.apache.flink.streaming.api.TimeCharacteristic;
import org.apache.flink.streaming.api.datastream.DataStream;
import org.apache.flink.streaming.api.environment.StreamExecutionEnvironment;
import org.apache.flink.streaming.api.windowing.time.Time;
import org.apache.flink.streaming.connectors.kafka.FlinkKafkaConsumer010;
import org.apache.flink.streaming.connectors.kafka.FlinkKafkaConsumer011;
import org.apache.flink.streaming.connectors.kafka.FlinkKafkaProducer010;
import org.apache.flink.streaming.connectors.kafka.FlinkKafkaProducer011;
import org.apache.flink.streaming.util.serialization.KeyedSerializationSchemaWrapper;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;

import java.util.Properties;

/**
 * Created with IDEA
 * author:tangsixiang@163.com
 * Date:2018/11/27
 * Time:11:26
 */


public class KafkaMessageStreaming {

    static Logger logger = LoggerFactory.getLogger(KafkaMessageStreaming.class);

    public static void main(String[] args) {

        final ParameterTool params = ParameterTool.fromArgs(args);
        String topic = "jiuzhi_nginx_log";//设置为topic默认值
        String kafkaIP = "172.16.1.151:9092";//赋值为默认
        if (args.length >= 1) {
            kafkaIP = args[0];
        }

        StreamExecutionEnvironment environment = StreamExecutionEnvironment.getExecutionEnvironment();
        environment.enableCheckpointing(10000);// // 非常关键，一定要设置启动检查点！！ 10秒一次
        environment.setStreamTimeCharacteristic(TimeCharacteristic.EventTime);
        environment.setParallelism(1); //设置job的默认并行度。
        //设置 kafka配置信息
        Properties properties = new Properties();
        properties.setProperty("bootstrap.servers", kafkaIP);
        properties.setProperty("zookeeper.connect", "172.16.1.151:2181,172.16.1.75:2181,172.16.1.74:2181");
        properties.setProperty("group.id", "bigData-flink");


        //开启一个消费者基于flink封装版本
        FlinkKafkaConsumer011<String> consumer010
                = new FlinkKafkaConsumer011<String>(topic, new SimpleStringSchema(), properties);
        consumer010.setStartFromLatest();

        //复杂业务字符串处理

//              DataStream<Tuple2<String,Long>> stream=  environment.addSource(consumer010).flatMap(new MessageSplitter()).keyBy(1).timeWindow(Time.seconds(20));
        DataStream dataStream = environment.addSource(consumer010).flatMap(new NginxStrSplitter())
                .keyBy(0).timeWindow(Time.seconds(30)).reduce(new ReduceFunction<Tuple2<String, Integer>>() {
                    @Override
                    public Tuple2<String, Integer> reduce(Tuple2<String, Integer> value1, Tuple2<String, Integer> value2) throws Exception {
                        return new Tuple2<>(value1.f0, value1.f1 + value2.f1);
                    }
                });
        dataStream.writeAsText("/home/flink/log/" + System.currentTimeMillis() + ".log");
        try {
            environment.execute("Nginx访问记录无边界数据统计");
        } catch (Exception e) {
            e.printStackTrace();
        }


    }
}
