package com.linkedin.metadata.kafka;

import com.linkedin.common.urn.Urn;
import com.linkedin.data.template.RecordTemplate;
import com.linkedin.metadata.EventUtils;
import com.linkedin.metadata.dao.internal.BaseRemoteWriterDAO;
import com.linkedin.metadata.dao.internal.RestliRemoteWriterDAO;
import com.linkedin.metadata.dao.utils.ModelUtils;
import com.linkedin.metadata.dao.utils.RecordUtils;
import com.linkedin.metadata.restli.DefaultRestliClientFactory;
import com.linkedin.metadata.snapshot.Snapshot;
import com.linkedin.mxe.FailedMetadataChangeEvent;
import com.linkedin.mxe.MetadataChangeEvent;
import com.linkedin.mxe.Topics;
import com.linkedin.restli.client.Client;
import com.linkedin.util.Configuration;

import io.confluent.kafka.serializers.AbstractKafkaAvroSerDeConfig;
import io.confluent.kafka.streams.serdes.avro.GenericAvroSerde;
import io.confluent.kafka.streams.serdes.avro.GenericAvroSerializer;
import lombok.extern.slf4j.Slf4j;
import org.apache.avro.generic.GenericData;
import org.apache.avro.generic.GenericRecord;
import org.apache.commons.lang.exception.ExceptionUtils;
import org.apache.kafka.clients.producer.KafkaProducer;
import org.apache.kafka.clients.producer.ProducerConfig;
import org.apache.kafka.clients.producer.ProducerRecord;
import org.apache.kafka.common.serialization.Serdes;
import org.apache.kafka.common.serialization.StringSerializer;
import org.apache.kafka.streams.KafkaStreams;
import org.apache.kafka.streams.StreamsBuilder;
import org.apache.kafka.streams.StreamsConfig;
import org.apache.kafka.streams.errors.LogAndContinueExceptionHandler;
import org.apache.kafka.streams.kstream.KStream;

import javax.annotation.Nonnull;

import java.io.IOException;
import java.util.Properties;


@Slf4j
public class MceStreamTask {

  private static final String DEFAULT_MCE_KAFKA_TOPIC_NAME = Topics.METADATA_CHANGE_EVENT;
  private static final String DEFAULT_FMCE_KAFKA_TOPIC_NAME = Topics.FAILED_METADATA_CHANGE_EVENT;
  private static final String DEFAULT_GMS_HOST = "localhost";
  private static final String DEFAULT_GMS_PORT = "8080";
  private static final String DEFAULT_KAFKA_BOOTSTRAP_SERVER = "localhost:9092";
  private static final String DEFAULT_KAFKA_SCHEMAREGISTRY_URL = "http://localhost:8081";
  private static KafkaProducer<String, GenericRecord> producer;
  private static BaseRemoteWriterDAO _remoteWriterDAO;

  public static void main(final String[] args) {
    log.info("Creating MCE consumer task");
    final Client restClient = DefaultRestliClientFactory.getRestLiClient(
            Configuration.getEnvironmentVariable("GMS_HOST", DEFAULT_GMS_HOST),
            Integer.parseInt(Configuration.getEnvironmentVariable("GMS_PORT", DEFAULT_GMS_PORT))
    );
    _remoteWriterDAO = new RestliRemoteWriterDAO(restClient);
    log.info("RemoteWriterDAO built successfully");

    // Configure the Streams application.
    final Properties streamsConfiguration = getStreamsConfiguration();

    // Configure the Kakfa Producer for sending Failed MCE
    final Properties producerProperties = getProducerProperties();

    // Define the Producer for Sending Failed MCE Events
    producer = new KafkaProducer<>(producerProperties);


    // Define the processing topology of the Streams application.
    final StreamsBuilder builder = new StreamsBuilder();
    createProcessingTopology(builder);
    final KafkaStreams streams = new KafkaStreams(builder.build(), streamsConfiguration);

    // Clean local state prior to starting the processing topology.
    streams.cleanUp();

    // Now run the processing topology via `start()` to begin processing its input data.
    streams.start();

    // Add shutdown hook to respond to SIGTERM and gracefully close the Streams application.
    Runtime.getRuntime().addShutdownHook(new Thread(streams::close));
  }

  /**
   * Configure the Streams application.
   *
   * @return Properties getStreamsConfiguration
   */
  static Properties getStreamsConfiguration() {
    final Properties streamsConfiguration = new Properties();
    // Give the Streams application a unique name.  The name must be unique in the Kafka cluster
    // against which the application is run.
    streamsConfiguration.put(StreamsConfig.APPLICATION_ID_CONFIG, "mce-consuming-job");
    streamsConfiguration.put(StreamsConfig.CLIENT_ID_CONFIG, "mce-consuming-job-client");
    // Where to find Kafka broker(s).
    streamsConfiguration.put(StreamsConfig.BOOTSTRAP_SERVERS_CONFIG,
            Configuration.getEnvironmentVariable("KAFKA_BOOTSTRAP_SERVER", DEFAULT_KAFKA_BOOTSTRAP_SERVER));
    // Specify default (de)serializers for record keys and for record values.
    streamsConfiguration.put(StreamsConfig.DEFAULT_KEY_SERDE_CLASS_CONFIG, Serdes.String().getClass().getName());
    streamsConfiguration.put(StreamsConfig.DEFAULT_VALUE_SERDE_CLASS_CONFIG, GenericAvroSerde.class.getName());
    streamsConfiguration.put("schema.registry.url",
            Configuration.getEnvironmentVariable("KAFKA_SCHEMAREGISTRY_URL", DEFAULT_KAFKA_SCHEMAREGISTRY_URL));
    // Continue handling events after exception
    streamsConfiguration.put(StreamsConfig.DEFAULT_DESERIALIZATION_EXCEPTION_HANDLER_CLASS_CONFIG,
        LogAndContinueExceptionHandler.class);
    // Records will be flushed every 10 seconds.
    streamsConfiguration.put(StreamsConfig.COMMIT_INTERVAL_MS_CONFIG, Integer.valueOf(10000));
    // Disable record caches.
    streamsConfiguration.put(StreamsConfig.CACHE_MAX_BYTES_BUFFERING_CONFIG, 0);
    return streamsConfiguration;
  }

    /**
     * KafkaProducer Properties to produce FailedMetadataChangeEvent
     *
     * @return Properties producerConfig
     */
  static Properties getProducerProperties() {
      final Properties producerConfig = new Properties();

      producerConfig.put(ProducerConfig.BOOTSTRAP_SERVERS_CONFIG,
          Configuration.getEnvironmentVariable("KAFKA_BOOTSTRAP_SERVER", DEFAULT_KAFKA_BOOTSTRAP_SERVER));
      producerConfig.put(AbstractKafkaAvroSerDeConfig.SCHEMA_REGISTRY_URL_CONFIG,
          Configuration.getEnvironmentVariable("KAFKA_SCHEMAREGISTRY_URL", DEFAULT_KAFKA_SCHEMAREGISTRY_URL));
      producerConfig.put(ProducerConfig.CLIENT_ID_CONFIG, "failed-mce-producer");
      producerConfig.put(ProducerConfig.KEY_SERIALIZER_CLASS_CONFIG, StringSerializer.class);
      producerConfig.put(ProducerConfig.VALUE_SERIALIZER_CLASS_CONFIG, GenericAvroSerializer.class);

      return producerConfig;
  }

  /**
   * Define the processing topology for job.
   *
   * @param builder StreamsBuilder to use
   */
  static void createProcessingTopology(final StreamsBuilder builder) {
    // Construct a `KStream` from the input topic.
    // The default key and value serdes will be used.
     final KStream<String, GenericData.Record> messages = builder.stream(
            Configuration.getEnvironmentVariable("KAFKA_TOPIC_NAME", DEFAULT_MCE_KAFKA_TOPIC_NAME)
    );
     messages.foreach((k, v) -> processSingleMCE(v));
  }

  /**
   * Process MCE and write in the underlying DB.
   *
   * @param record single MCE message
   */
  static void processSingleMCE(final GenericData.Record record) {
    log.debug("Got MCE");
    com.linkedin.mxe.MetadataChangeEvent event = new MetadataChangeEvent();
    try {
        event = EventUtils.avroToPegasusMCE(record);

        if (event.hasProposedSnapshot()) {
            processProposedSnapshot(event.getProposedSnapshot());
        }
    } catch (Throwable throwable) {
        log.error("MCE Processor Error", throwable);
        log.error("Message: {}", record);
        sendFailedMCE(event, throwable);
    }
  }

    /**
     * Sending Failed MCE Event to Kafka Topic
     * @param event
     * @param throwable
     */
    private static void sendFailedMCE(@Nonnull MetadataChangeEvent event, @Nonnull Throwable throwable) {
        final FailedMetadataChangeEvent failedMetadataChangeEvent = createFailedMCEEvent(event, throwable);
        try {
            final GenericRecord genericFailedMCERecord = EventUtils.pegasusToAvroFailedMCE(failedMetadataChangeEvent);
            log.debug("FailedMetadataChangeEvent:"+failedMetadataChangeEvent);
            producer.send(new ProducerRecord<>(
                Configuration.getEnvironmentVariable("FAILED_MCE_KAFKA_TOPIC_NAME", DEFAULT_FMCE_KAFKA_TOPIC_NAME),
                genericFailedMCERecord));
        } catch (IOException e) {
            e.printStackTrace();
        }
    }

    /**
     * Populate a FailedMetadataChangeEvent from a MCE
     * @param event
     * @param throwable
     * @return FailedMetadataChangeEvent
     */
  @Nonnull
  private static FailedMetadataChangeEvent createFailedMCEEvent(@Nonnull MetadataChangeEvent event, @Nonnull Throwable throwable) {
      final FailedMetadataChangeEvent fmce = new FailedMetadataChangeEvent();
      fmce.setError(ExceptionUtils.getStackTrace(throwable));
      fmce.setMetadataChangeEvent(event);
      return fmce;
  }

  static void processProposedSnapshot(@Nonnull Snapshot snapshotUnion) {
    final RecordTemplate snapshot = RecordUtils.getSelectedRecordTemplateFromUnion(snapshotUnion);
    final Urn urn = ModelUtils.getUrnFromSnapshotUnion(snapshotUnion);
    _remoteWriterDAO.create(urn, snapshot);
  }
}
