package com.apex.flink.source;

import com.alibaba.fastjson.JSONObject;
import com.apex.flink.FlinkEnvironment;
import com.apex.flink.stream.FlinkStreamSource;
import com.apex.flink.utils.ConfigKeyName;
import com.apex.flink.utils.SchemaUtil;
import com.apex.flink.utils.TableUtil;
import com.typesafe.config.Config;
import org.apache.commons.lang3.StringUtils;
import org.apache.flink.streaming.api.datastream.DataStream;
import org.apache.flink.table.api.Table;
import org.apache.flink.table.api.bridge.java.StreamTableEnvironment;
import org.apache.flink.table.descriptors.FormatDescriptor;
import org.apache.flink.table.descriptors.Kafka;
import org.apache.flink.table.descriptors.Rowtime;
import org.apache.flink.table.descriptors.Schema;
import org.apache.flink.types.Row;

import java.util.HashMap;
import java.util.Properties;

/**
 * kafka相关数据解析
 */
public class KafkaSource implements FlinkStreamSource<Row> {

    private Config config;
    private Properties kafkaParams = new Properties();
    private String bootstrapServers;
    private String krbConf;
    private String jaasFile;
    private boolean enableKerberos=false;
    private String groupId;
    private String topics;
    private String tableName;
    private String OFFSET_RESET;
    private String rowTimeField;
    private long watermark;
    private Object schemaInfo;
    private String format;

    /**
     *
     * @param plugin 执行环境
     */
    @Override
    public void prepare(FlinkEnvironment plugin) {
        if (config.hasPath(ConfigKeyName.SOURCE_KAFKA_CLUSTER_INFO)){
            bootstrapServers = config.getString(ConfigKeyName.SOURCE_KAFKA_CLUSTER_INFO);
            groupId = config.getString(ConfigKeyName.SOURCE_KAFKA_GROUPID);
            topics = config.getString(ConfigKeyName.SOURCE_KAFKA_TOPICS);
            tableName = config.getString(ConfigKeyName.SOURCE_KAFKA_TABLE_NAME);
            OFFSET_RESET = config.getString(ConfigKeyName.SOURCE_KAFKA_OFFSET_RESET);
            rowTimeField = config.getString(ConfigKeyName.SOURCE_KAFKA_ROW_TIME_FIELD);
            watermark = config.getLong(ConfigKeyName.SOURCE_KAFKA_WATERMARK);
            schemaInfo = config.getString(ConfigKeyName.SOURCE_KAFKA_SCHEMA_INFO);
            format = config.getString(ConfigKeyName.SOURCE_KAFKA_MESSAGE_FORMAT);
        }
        if (config.getBoolean(ConfigKeyName.SOURCE_KAFKA_ENABLE_KERBEROS)){
            krbConf = config.getString(ConfigKeyName.SOURCE_KAFKA_KRB5CONF);
            jaasFile = config.getString(ConfigKeyName.SOURCE_KAFKA_JAAS);
        }

    }

    /**
     *
     * @return config
     */
    @Override
    public Config getConfig() {
        return config;
    }

    /**
     *
     * @param config config
     */
    @Override
    public void setConfig(Config config) {
        this.config = config;
    }

    /**
     * 后期废弃次API,官方后期统一使用SQL方式创建KAFKA 连接器管理中心消费消息中间件数据
     * @param env
     * @return
     */
    @Override
    public DataStream<Row> getData(FlinkEnvironment env) {
        if (enableKerberos){
            System.setProperty("java.security.krb5.conf", krbConf);
            System.setProperty("java.security.auth.login.config", jaasFile);
            System.setProperty("javax.security.auth.useSubjectCredsOnly", "false");
            System.setProperty("sun.security.krb5.debug", "true");
        }
        StreamTableEnvironment tableEnvironment = env.getStreamTableEnvironment();
        tableEnvironment
                .connect(getKafkaConnect())
                .withFormat(setFormat())
                .withSchema(getSchema())
                .inAppendMode()
                .createTemporaryTable(tableName);
        Table table = tableEnvironment.from(tableName);
        return TableUtil.tableToDataStream(tableEnvironment, table, true);
    }

    /**
     *  kafka 相关策略设置
     * @return Kafka
     */
    private Kafka getKafkaConnect() {
        kafkaParams.put("bootstrap.servers",bootstrapServers);
        kafkaParams.put("group.id",groupId);
        if (enableKerberos){
            kafkaParams.put("security.protocol","SASL_PLAINTEXT");
            kafkaParams.put("sasl.kerberos.service.name","kafka");
        }
        Kafka kafka = new Kafka().version("universal");
        //todo
        kafka.topic(topics);
        kafka.properties(kafkaParams);
        if (config.hasPath(OFFSET_RESET)) {
            String reset = config.getString(OFFSET_RESET);
            switch (reset) {
                case "latest":
                    kafka.startFromLatest();
                    break;
                case "earliest":
                    kafka.startFromEarliest();
                    break;
                case "specific":
                    String offset = config.getString("offset.reset.specific");
                    HashMap<Integer, Long> map = new HashMap<>(16);
                    JSONObject.parseObject(offset).forEach((k, v) -> map.put(Integer.valueOf(k), Long.valueOf(v.toString())));
                    kafka.startFromSpecificOffsets(map);
                    break;
                default:
                    break;
            }
        }
        return kafka;
    }

    /**
     *  注册表相关schema
     * @return Schema
     */
    private Schema getSchema() {
        Schema schema = new Schema();
        SchemaUtil.setSchema(schema, schemaInfo, format);
        if (StringUtils.isNotBlank(rowTimeField)) {
            Rowtime rowtime = new Rowtime();
            rowtime.timestampsFromField(rowTimeField);
            rowtime.watermarksPeriodicBounded(watermark);
            schema.rowtime(rowtime);
        }
        return schema;
    }

    /**
     *  对数据格式描述
     * @return FormatDescriptor
     */
    private FormatDescriptor setFormat() {
        try {
            return SchemaUtil.setFormat(format, config);
        } catch (Exception e) {
            // TODO: logging
            e.printStackTrace();
        }
        throw new RuntimeException("format配置错误");
    }
}
