package top.birdhk.stream;

import org.apache.flink.api.common.functions.FlatMapFunction;
import org.apache.flink.api.common.serialization.SimpleStringSchema;
import org.apache.flink.api.java.tuple.Tuple2;
import org.apache.flink.api.java.utils.MultipleParameterTool;
import org.apache.flink.streaming.api.datastream.DataStream;
import org.apache.flink.streaming.api.datastream.DataStreamSource;
import org.apache.flink.streaming.api.environment.StreamExecutionEnvironment;
import org.apache.flink.streaming.connectors.kafka.FlinkKafkaConsumer;
import org.apache.flink.util.Collector;
import org.apache.flink.util.Preconditions;

import java.util.Properties;
import java.util.UUID;

import org.apache.kafka.common.serialization.ByteArrayDeserializer;

/**
 * Created by yuanfq on 2021-05-21.
 */
public class KafkaConsumer {

    public static void main(String[] args) throws Exception {

        // Checking input parameters
        final StreamExecutionEnvironment env = StreamExecutionEnvironment.getExecutionEnvironment();
        env.setParallelism(1);
        final MultipleParameterTool params = MultipleParameterTool.fromArgs(args);
        DataStreamSource<String> kafkaSource = null;

        if (params.has("ip")) {
            System.out.println(params.get("ip"));
            System.out.println(params.get("topic"));
            Properties properties = SecurityUtils.getSecurityProperties();
            properties.setProperty("bootstrap.servers",params.get("ip"));
            properties.put("group.id", UUID.randomUUID().toString());
            properties.put("enable.auto.commit", "true");
            properties.put("auto.offset.reset", "earliest");
            properties.put("auto.commit.interval.ms", "1000");
            properties.put("session.timeout.ms", "30000");
            properties.put("key.deserializer", "org.apache.kafka.common.serialization.StringDeserializer");
            properties.put("value.deserializer", "org.apache.kafka.common.serialization.StringDeserializer");

            kafkaSource = env.addSource(new FlinkKafkaConsumer<String>(params.get("topic"), new SimpleStringSchema(), properties));
            Preconditions.checkNotNull(kafkaSource, "Input DataStream should not be null.");
        } else {
            System.out.println("没有配置参数使用10.20.7.1");
            Properties properties = new Properties();
            properties.setProperty("bootstrap.servers","10.20.7.1:9092");
            properties.setProperty("group.id", "test");
            properties.setProperty("key.deserializer",
                    "org.apache.kafka.common.serialization.StringDeserializer");
            properties.setProperty("value.deserializer",
                    "org.apache.kafka.common.serialization.StringDeserializer");
            properties.setProperty("auto.offset.reset", "latest");
            kafkaSource = env.addSource(new FlinkKafkaConsumer<String>("topic35", new SimpleStringSchema(), properties));
        }

        DataStream<Tuple2<String, Integer>> counts =
                // split up the lines in pairs (2-tuples) containing: (word,1)
                kafkaSource.flatMap(new WordCount.Tokenizer())
                        // group by the tuple field "0" and sum up tuple field "1"
                        .keyBy(0).sum(1);

        // emit result
        System.out.println("Printing result to stdout. Use --output to specify output path.");
        counts.print();
        // execute program
        env.execute("kafka Count");
    }

    // *************************************************************************
    // USER FUNCTIONS
    // *************************************************************************

    /**
     * Implements the string tokenizer that splits sentences into words as a
     * user-defined FlatMapFunction. The function takes a line (String) and
     * splits it into multiple pairs in the form of "(word,1)" ({@code Tuple2<String,
     * Integer>}).
     */
    public static final class Tokenizer implements FlatMapFunction<String, Tuple2<String, Integer>> {

        @Override
        public void flatMap(String value, Collector<Tuple2<String, Integer>> out) {
            // normalize and split the line
            String[] tokens = value.toLowerCase().split("\\W+");

            // emit the pairs
            for (String token : tokens) {
                if (token.length() > 0) {
                    out.collect(new Tuple2<>(token, 1));
                }
            }
        }
    }
}
