package com.eeboot;

import com.alibaba.fastjson.JSONObject;
import com.eeboot.util.DateUtil;
import com.eeboot.util.KafKaUtil;
import com.eeboot.vo.LogCount;
import com.eeboot.vo.LogCountGroup;
import org.apache.flink.api.common.functions.MapFunction;
import org.apache.flink.api.common.serialization.SerializationSchema;
import org.apache.flink.api.common.serialization.SimpleStringSchema;
import org.apache.flink.api.java.tuple.Tuple;
import org.apache.flink.api.java.tuple.Tuple1;
import org.apache.flink.streaming.api.TimeCharacteristic;
import org.apache.flink.streaming.api.datastream.DataStreamSource;
import org.apache.flink.streaming.api.environment.StreamExecutionEnvironment;
import org.apache.flink.streaming.api.functions.timestamps.BoundedOutOfOrdernessTimestampExtractor;
import org.apache.flink.streaming.api.functions.windowing.WindowFunction;
import org.apache.flink.streaming.api.windowing.time.Time;
import org.apache.flink.streaming.api.windowing.windows.TimeWindow;
import org.apache.flink.streaming.connectors.kafka.FlinkKafkaConsumer011;
import org.apache.flink.streaming.connectors.kafka.FlinkKafkaProducer011;
import org.apache.flink.util.Collector;

import java.util.Properties;

public class LogJoiner {
    public static void main(String[] args) throws Exception{
        //参数检查
        if (args.length < 4) {
            System.err.println(new StringBuilder().append("USAGE:\n")
                    .append("LogJoiner <bootstrapServers> <groupId> <topic> <sinkTopic> [<windowSize(sec)>] [<windowSlide(sec)>] [<delay(sec)>]")
                    .append("\n")
                    .append("LogJoiner <bootstrapServers> <groupId> <读取topic> <输出topic> [<窗口长度(sec)>] [<滑动长度(sec)>] [<窗口等待时间(sec)>]")
                    .toString());
            return;
        }

        String bootstrapServers = args[0];
        String groupId = args[1];
        String topic = args[2];
        String sinkTopic = args[3];
        long windowSize = 60;
        long windowSlide = 60;
        long delay = 60;
        if(args.length>4){
            windowSize = Long.parseLong(args[4]);
        }
        if(args.length>5){
            windowSlide = Long.parseLong(args[5]);
        }
        if(args.length>6){
            delay = Long.parseLong(args[6]);
        }

        final StreamExecutionEnvironment env = StreamExecutionEnvironment.getExecutionEnvironment();
        Properties props = KafKaUtil.genKafkaProperties(bootstrapServers, groupId);

        env.setStreamTimeCharacteristic(TimeCharacteristic.EventTime);
        env.setParallelism(1);
        DataStreamSource<String> dataStreamSource = env.addSource(new FlinkKafkaConsumer011<>(
                topic,
                new SimpleStringSchema(),
                props
        )).setParallelism(1);

        dataStreamSource
                .map(new MapFunction<String, LogCount>() {
                        @Override
                        public LogCount map(String value) throws Exception {
                            LogCount logCount = JSONObject.parseObject(value, LogCount.class);
                            System.out.println("===receive==="+value + DateUtil.getStringFromLong("yyyy-MM-dd HH:mm:ss.SSS",logCount.windowEnd-30000)+"/"+ DateUtil.getNow(DateUtil.getDefaultFormatter()));
                            return logCount;
                        }
                    })
                .assignTimestampsAndWatermarks(new BoundedOutOfOrdernessTimestampExtractor<LogCount>(Time.seconds(delay)) {
                    @Override
                    public long extractTimestamp(LogCount element) {
                        return element.windowEnd-30000;
                    }
                })
                .keyBy("profile")
                .timeWindow(Time.seconds(windowSize), Time.seconds(windowSlide))
                .apply(new WindowFunction<LogCount, LogCountGroup, Tuple, TimeWindow>() {
                    @Override
                    public void apply(Tuple tuple, TimeWindow window, Iterable<LogCount> input, Collector<LogCountGroup> out) throws Exception {
                        LogCountGroup group = new LogCountGroup(((Tuple1<String>)tuple).f0, window.getEnd());
                        for (LogCount logCount: input){
                            group.putLogCount(logCount);
                            System.out.println(logCount.toJsonString());
                        }
                        out.collect(group);
                    }
                })
                //.print()
                .addSink(new FlinkKafkaProducer011<LogCountGroup>(bootstrapServers, sinkTopic, new SerializationSchema<LogCountGroup>() {
                    @Override
                    public byte[] serialize(LogCountGroup element) {
                        return element.toJsonString().getBytes();
                    }
                }))
        ;

        env.execute("log count join by window");


    }

}
