package com.behavioranalysis.flinkprogram.flink.tablesql;

import com.behavioranalysis.flinkprogram.conf.ConfigurationManager;
import com.behavioranalysis.flinkprogram.constant.Constants;
import org.apache.flink.api.common.functions.MapFunction;
import org.apache.flink.api.common.serialization.SimpleStringSchema;
import org.apache.flink.api.java.tuple.Tuple;
import org.apache.flink.api.java.tuple.Tuple6;
import org.apache.flink.streaming.api.TimeCharacteristic;
import org.apache.flink.streaming.api.datastream.DataStream;
import org.apache.flink.streaming.api.environment.StreamExecutionEnvironment;
import org.apache.flink.streaming.api.functions.AssignerWithPeriodicWatermarks;
import org.apache.flink.streaming.api.watermark.Watermark;
import org.apache.flink.streaming.connectors.kafka.FlinkKafkaConsumer011;
import org.apache.flink.table.api.Table;
import org.apache.flink.table.api.TableSchema;
import org.apache.flink.api.common.typeinfo.Types;
import org.apache.flink.table.api.bridge.java.StreamTableEnvironment;
import org.apache.flink.table.descriptors.Kafka;

import javax.annotation.Nullable;
import java.text.SimpleDateFormat;
import java.util.Date;
import java.util.Properties;


/**
 * 广告点击流量实时统计Flink作业
 */
public class AdClickRealTimeStatKafka2FlinkTableSql {
    public static void main(String[] args) throws Exception {
        // 获取流处理环境
        StreamExecutionEnvironment sEnv = getEnv();
        System.out.println("---------------1-------------------");
        // 指定时间特征为事件时间
        sEnv.setStreamTimeCharacteristic(TimeCharacteristic.EventTime);
        System.out.println("---------------2-------------------");



        // 创建流处理表环境
        StreamTableEnvironment sTableEnv = StreamTableEnvironment.create(sEnv);
        System.out.println("---------------3-------------------");


        // 连接Kafka Topic
        // configure Kafka consumer
        Properties props = new Properties();
        props.setProperty(Constants.ZOOKEEPER_CONNECT, ConfigurationManager.getProperty(Constants.ZOOKEEPER_CONNECT));
        props.setProperty(Constants.BOOTSTRAP_SERVERS, ConfigurationManager.getProperty(Constants.BOOTSTRAP_SERVERS));
        props.setProperty("group.id", "adRealTimeLogGroup");
        props.setProperty("auto.offset.reset", "earliest");

        // create a Kafka consumer
        FlinkKafkaConsumer011<String> consumer =
                new FlinkKafkaConsumer011<>(
                        ConfigurationManager.getProperty(Constants.KAFKA_TOPICS),
                        new SimpleStringSchema(),
                        props);

        DataStream<String> adRealTimeLog = sEnv.addSource(consumer);

        DataStream<Tuple6<Long, String, String, String, String, String>> adRealTimeLogTuple = adRealTimeLog.map(
                new MapFunction<String, Tuple6<Long, String, String, String, String, String>>() {
                    private SimpleDateFormat simpleDateFormat =
                            new SimpleDateFormat("yyyyMMdd");

                    @Override
                    public Tuple6<Long, String, String, String, String, String> map(String log) throws Exception {
                        String[] splitedLogs = log.split(" ");
                        long timepstamp = Long.valueOf(splitedLogs[0]);
                        String province = splitedLogs[1];
                        String city = splitedLogs[2];
                        String userid = splitedLogs[3];
                        String adid = splitedLogs[4];
                        // 提取日期(yyyyMMdd)、userid、adid
                        Date date = new Date(timepstamp);
                        String datekey = simpleDateFormat.format(date);

                        // 拼接key
                        String key = datekey + "_" + userid + "_" + adid;

                        return new Tuple6<>(timepstamp, province, city, userid, adid, key);
                    }
                }
        );

        /**
         * 一： 设置滚动窗口，进行黑名单用户过滤
         *      窗口大小：两分钟，120秒
         *      数据库黑名单表按流来读，行不通
         *      按静态数据来读
         */

        DataStream<Tuple6<Long, String, String, String, String, String>> watermarkStream = adRealTimeLogTuple
                .assignTimestampsAndWatermarks(
                        // 1.分配时间戳，并生成水印
                        new AssignerWithPeriodicWatermarks<Tuple6<Long, String, String, String, String, String>>() {
                            private long currentMaxTimestamp = 0L;
                            private long maxOutOfOrderness = 5000L;//最大乱序时间为10s

                            SimpleDateFormat format = new SimpleDateFormat("yyyy-MM-dd HH:mm:ss.SSS");

                            @Nullable
                            @Override
                            public Watermark getCurrentWatermark() {
                                return new Watermark(currentMaxTimestamp - maxOutOfOrderness);
                            }

                            @Override
                            public long extractTimestamp(Tuple6<Long, String, String, String, String, String> element, long previousElementTimestamp) {
                                long timestamp = element.f0;
                                currentMaxTimestamp = Math.max(timestamp, currentMaxTimestamp);

                                /**
                                 *
                                 */
                                System.out.println("timestamp:"
                                        + element.f0 + "|" + format.format(element.f0) + ","
                                        + currentMaxTimestamp + "|" + format.format(currentMaxTimestamp) + ","
                                        + getCurrentWatermark().toString());
                                return timestamp;
                            }
                        }
                );

        // 注册为adRealTimeLog
        sTableEnv.registerDataStream(
                "adRealTimeLog",
                watermarkStream,
                "rowtime.rowtime, province, city, userid, adid, key"
        );

        Table adRealTimeLogTable = sTableEnv.scan("adRealTimeLog");

        TableSchema watermarkTableSchema = adRealTimeLogTable.getSchema();
        System.out.println("------schema-----" + watermarkTableSchema.toString());


        DataStream<Tuple> appendStream = sTableEnv.toAppendStream(
                adRealTimeLogTable,
                Types.TUPLE(
                        Types.SQL_TIMESTAMP,
                        Types.STRING,
                        Types.STRING,
                        Types.STRING,
                        Types.STRING,
                        Types.STRING)
        );

        System.out.println("---------------3-------------------");


        appendStream.print();

        sEnv.execute("ad");
    }

    private static void connectKafka(StreamTableEnvironment sTableEnv) {
        sTableEnv
                .connect(
                        new Kafka()
                        .version("universal")   // required: valid connector versions are
                                           //   "0.8", "0.9", "0.10", "0.11", and "universal"
                        .topic(ConfigurationManager.getProperty(Constants.KAFKA_TOPICS))  // required: topic name from which the table is read

                        // optional: connector specific properties
                        .property(Constants.ZOOKEEPER_CONNECT, ConfigurationManager.getProperty(Constants.ZOOKEEPER_CONNECT))
                        .property(Constants.BOOTSTRAP_SERVERS, ConfigurationManager.getProperty(Constants.BOOTSTRAP_SERVERS))
                        .property(Constants.GROUP_ID, ConfigurationManager.getProperty(Constants.GROUP_ID))
                )

                // declare a format for this system
//                .withFormat(
//                        new Csv()
//                        .field("rowtime", Types.SQL_TIMESTAMP())
//                        .field("province", Types.STRING())
//                        .field("city", Types.STRING())
//                        .field("userid", Types.STRING())
//                        .field("adid", Types.STRING())
//                        .fieldDelimiter(" ")
//                        .lineDelimiter("\n")
//                        .ignoreParseErrors()
//                )

                // declare the schema of the table
//                .withSchema(
//                        new Schema()
//                        .field("rowtime", Types.SQL_TIMESTAMP())
////                                .rowtime(
////                                        new Rowtime()
////                                        .watermarksPeriodicBounded(5000L)
////                                )
//                        .field("province", Types.STRING())
//                        .field("city", Types.STRING())
//                        .field("userid", Types.STRING())
//                        .field("adid", Types.STRING())
//
//                )

                // specify the update-mode for streaming tables
                .inAppendMode()  // otherwise: inUpsertMode() or inRetractMode()

                // register as source, sink, or both and under a name
                .registerTableSource("adRealTimeLog");
    }

    private static StreamExecutionEnvironment getEnv() {
        // 构建flink流式处理程序的上下文，流式的话使用StreamExecutionEnvironment，批处理使用ExecutionEnvironment
        // 本地测试使用local environment（本地执行环境）
        StreamExecutionEnvironment env;
        if (ConfigurationManager.getBoolean(Constants.FLINK_LOCAL)) {
            env = StreamExecutionEnvironment.createLocalEnvironment();
        } else {
            env = StreamExecutionEnvironment.getExecutionEnvironment();
        }
        return env;
    }
}
