package com.aisun.flinkservice.tofile;

import com.aisun.flinkservice.tomessage.MessageTransform;
import com.aisun.flinkservice.tomessage.MessageWaterEmitter;
import com.alibaba.fastjson.JSONObject;
import org.apache.flink.api.common.serialization.SimpleStringSchema;
import org.apache.flink.api.java.tuple.Tuple;
import org.apache.flink.api.java.tuple.Tuple2;
import org.apache.flink.streaming.api.TimeCharacteristic;
import org.apache.flink.streaming.api.datastream.DataStream;
import org.apache.flink.streaming.api.environment.StreamExecutionEnvironment;
import org.apache.flink.streaming.api.functions.windowing.WindowFunction;
import org.apache.flink.streaming.api.windowing.assigners.TumblingEventTimeWindows;
import org.apache.flink.streaming.api.windowing.time.Time;
import org.apache.flink.streaming.api.windowing.windows.TimeWindow;
import org.apache.flink.streaming.connectors.fs.StringWriter;
import org.apache.flink.streaming.connectors.fs.bucketing.BucketingSink;
import org.apache.flink.streaming.connectors.fs.bucketing.DateTimeBucketer;
import org.apache.flink.streaming.connectors.kafka.FlinkKafkaConsumer010;
import org.apache.flink.util.Collector;

import java.time.ZoneId;
import java.util.Properties;

/**
 * @Author: LiuYuanQi
 * @Date: 2020/4/29 10:08
 * 读取kafka消息生成hdfs文件
 */
public class Kafka2Hdfs {

    public static void main(String[] args) throws Exception {

        // set up the streaming execution environment
        final StreamExecutionEnvironment env =StreamExecutionEnvironment.getExecutionEnvironment();
            env.enableCheckpointing(5000);
            env.setStreamTimeCharacteristic(TimeCharacteristic.EventTime);
            env.setParallelism(2);
        Properties properties = new Properties();

        // kafka目标环境的IP地址和端口号
        properties.setProperty("bootstrap.servers",  "server1:9092");

        // zookeeper目标环境的IP地址和端口号
        properties.setProperty("zookeeper.connect", "server1:2181");

        //  填写hdfs
        properties.setProperty("fs.default-scheme","hdfs://server1:8020");

        // 分区group.id
        properties.setProperty("group.id", "test-consumer-group");

        properties.put("enable.auto.commit", false);
        properties.put("max.poll.records", 1000);

        FlinkKafkaConsumer010<String> flinkKafkaConsumer010 = new FlinkKafkaConsumer010<String>("foo", new SimpleStringSchema(), properties);
        DataStream<Tuple2<String, Long>> keyedStream = env.addSource(flinkKafkaConsumer010)
                .map(new MessageTransform())
                .assignTimestampsAndWatermarks(new MessageWaterEmitter()).keyBy(0)
                .window(TumblingEventTimeWindows.of(Time.seconds(5)))
                .apply(new WindowFunction<Tuple2<String, Long>, Tuple2<String, Long>, Tuple, TimeWindow>() {
                    public void apply(Tuple tuple, TimeWindow window, Iterable<Tuple2<String, Long>> input,
                                      Collector<Tuple2<String, Long>> out) throws Exception {
                        long sum = 0L;
                        int count = 0;
                        for (Tuple2<String, Long> record: input) {
                            sum += record.f1;
                            count++;
                        }
                        System.out.println("****************** " + JSONObject.toJSONString(input));
                        Tuple2<String, Long> result = input.iterator().next();
                        result.f1 = sum / count;
                        out.collect(result);

                    }
                });

        keyedStream.print();
        // execute program

        System.out.println("*********** hdfs ***********************");
        BucketingSink<Tuple2<String, Long>> bucketingSink = new BucketingSink<>("/haha"); //hdfs上的路径
        bucketingSink.setWriter(new StringWriter<>())
                .setBucketer(new DateTimeBucketer<Tuple2<String, Long>>("yyyy-MM-dd", ZoneId.of("Asia/Shanghai")))
                .setBatchSize(1024 * 1024 * 400L) // this is 400 MB
                .setBatchRolloverInterval(60* 60 * 1000L)// this is 60 mins
                .setPendingPrefix("")
                .setPendingSuffix("")
                .setInProgressPrefix(".");

        keyedStream.addSink(bucketingSink);

        env.execute("Kafka2Hdfs");

    }

}
