
namespace Stee.CAP8.MessageBus.Kafka.Admin
{
    using System;
    using System.Collections.Generic;
    using System.Linq;
    using System.Threading;
    using Confluent.Kafka;
    using NLog;
    using Stee.CAP8.MessageBus.Interfaces.Delegates;
    using Stee.CAP8.MessageBus.Interfaces.Interfaces;
    using Stee.CAP8.MessageBus.Kafka.Constants;
    using Stee.CAP8.MessageBus.Kafka.Factories;
    using Stee.CAP8.MessageBus.Kafka.Models;
    using Stee.CAP8.MessageBus.Kafka.Utility;

    /*
    Dead-letter definition
    - message timestamp is more than specified DT from current DT, OK (Added published date time)
    - consumer groups' current offset is behind expired messages' offset, Not yet ()
    
    
    Topic queue retention time
    - there is a hard limit and soft limit to the message 'deletion'
        - hard limit value of more than 1 week
        - soft limit value of just under 1 week but less than hard limit
    
    
    Topic partition offset
    - only actual consumer groups will have the offset to compare
    
    
    Processing flow
    *) create X microservices to do the following where all microservices uses the same DLQ consumer group
    1) get all topics
    2) get all topic partitions
    3) get all consumer groups (to get the groups' current offset)
        - consumer groups should be fixed/known as no new groups will be created during runtime
        - create a DLQ consumer group, equivalent to actual group
    4) create a consumer using known groups and get offset position using method, IConsumer.Position(TopicPartition partition)
        - since auto commit is ENABLED, consumption as known group must be avoided
        - get all offsets of ALL consumer groups
    5) using the DLQ consumer group, assign the current offset value start consumption
        - consumption under DLQ consumer group does not affect actual group
        - use message publish timestamp & offset value to compare
    6) maintain offset of this DLQ consumer group
    
    
    Questions
    1) could a message be skipped by DLQ as message order is not guaranteed?
    yes, message 1 & 3 could meet DLQ definition while message 2 doesn't and changing this offset (offset to message 4) will skip message 2
    - disable auto commit for DLQ consumer group
    - run at 1/2/4/6/8/12 hour interval (or X hours, preferably less than 24 hours)
    - if a message does not meet the definition, stop consuming (using IConsumer.Pause() & IConsumer.Resume()) till the next interval

    2) can knowing only the topic be sufficient in determining expired messages?
    yes & no
    yes, this is enough to determine expired messages with 1 consumer per topic handling ALL messages.
    no, without knowing the partition & offset, messages are repeatedly read from 'earliest' resulting in duplicates in DLQ.
    when starting each read, service must seek messages from current consumer offset to 1st non-expired message.

    3) as multiple consumer groups can consume identical topic, if at least 2 consumer groups have expired reads,
       how are duplicated messages prevented?
    for a topic, the group with the smallest offset value will be used. this groups' offset is compared with the DLQ group offset and the
    larger value used, meaning only the latest recorded expired message is defined as the read start position.

    4) if monitor tool checks only at periodic intervals and operation consumer will simply skip late messages by changing offset,
       monitor tool when triggered will not have the correct consumer offset thus missing expired messages.
    to prevent this, operation consumer needs to have the ability to publish to DLQ topic if expired message is encountered instead of
    merely skipping late messages.

    */
    /// <summary>
    /// Tool to monitor operational topic queues and consume expired messages to a
    /// dead-letter topic queue. Messages will be consumed as a different consumer group to
    /// prevent a conflict with operational consumer groups.
    /// 
    /// Dead-letter definition:
    /// - Message timestamp is older than soft limit DT with respect to current DT (messages will be deleted after hard limit)
    /// - Consumer groups' current offset is earlier than expired messages' offset
    /// </summary>
    public class DeadLetterQueueImpl : IDeadLetterQueue
    {
        private static readonly Logger Logger = LogManager.GetCurrentClassLogger();


        #region Private Fields

        /// <summary>
        /// Broker connection configuration for admin, publisher & subscriber
        /// </summary>
        private string brokerConfigurationJSON;

        /// <summary>
        /// Operational topic retention time before messages deemed expired
        /// </summary>
        private TimeSpan topicRetention;

        /// <summary>
        /// Admin client error handler
        /// </summary>
        private HandleBrokerError adminClientErrorHandler;

        /// <summary>
        /// Publisher error handler
        /// </summary>
        private HandleBrokerError publisherErrorHandler;

        /// <summary>
        /// Subscriber error handler
        /// </summary>
        private HandleBrokerError subscriberErrorHandler;

        /// <summary>
        /// Cancellation token
        /// </summary>
        private CancellationTokenSource cancellationToken;

        #endregion Private Fields


        #region Properties

        /// <summary>
        /// Gets the flag indicating that admin client is initialized and ready
        /// </summary>
        public bool IsInitialized { get; private set; } = false;

        /// <summary>
        /// Gets the flag indicating if admin client is disposed
        /// </summary>
        public bool IsDisposed { get; private set; } = false;

        #endregion Properties


        #region Constructor

        /// <summary>
        /// Initializes a new instance of the DeadLetterQueueService class
        /// </summary>
        public DeadLetterQueueImpl() { }

        #endregion Constructor


        #region Public Methods

        /// <summary>
        /// Initialize the dead-letter queue monitor tool
        /// </summary>
        /// <param name="brokerConfigurationJSON">JSON configuration file path</param>
        /// <param name="topicRetention">Operational topic retention time before messages deemed expired</param>
        /// <param name="adminErrorHandler">Admin client error handler</param>
        /// <param name="publishErrorHandler">Publisher error handler</param>
        /// <param name="subscribeErrorHandler">Subscriber error handler</param>
        /// <returns>Return TRUE if initialized else FALSE</returns>
        public bool Initialize(string brokerConfigurationJSON, TimeSpan topicRetention,
            HandleBrokerError adminErrorHandler, HandleBrokerError publishErrorHandler, HandleBrokerError subscribeErrorHandler)
        {
            if (this.IsInitialized)
            {
                throw new InvalidOperationException($"Dead-letter tool is already initialized. IsInit: {this.IsInitialized}");
            }
            else if (adminErrorHandler == null)
            {
                throw new ArgumentNullException($"Error handler for broker is null: {nameof(adminErrorHandler)}");
            }
            else if (publishErrorHandler == null)
            {
                throw new ArgumentNullException($"Error handler for broker is null: {nameof(publishErrorHandler)}");
            }
            else if (subscribeErrorHandler == null)
            {
                throw new ArgumentNullException($"Error handler for broker is null: {nameof(subscribeErrorHandler)}");
            }

            this.brokerConfigurationJSON = ValidateHelper.CheckString(brokerConfigurationJSON);
            this.topicRetention = topicRetention;
            this.adminClientErrorHandler = adminErrorHandler;
            this.publisherErrorHandler = publishErrorHandler;
            this.subscriberErrorHandler = subscribeErrorHandler;

            return this.IsInitialized = true;
        }


        /// <summary>
        /// Check for & process all expired messages in all message broker topics
        /// </summary>
        /// <param name="topicSubscribeGroupMapping">Configured topic to subscriber/consumer group mapping</param>
        /// <returns>Return TRUE if processed successfully regardless of whether new expired message(s) was found else FALSE</returns>
        public bool CheckForExpiredMessages(IDictionary<string, HashSet<string>> topicSubscribeGroupMapping)
        {
            try
            {
                this.CheckDLQToolState();

                if (topicSubscribeGroupMapping?.Any() != true)
                {
                    throw new ArgumentNullException($"Topic subscriber group mapping is null or empty: {nameof(topicSubscribeGroupMapping)}");
                }

                this.cancellationToken = new CancellationTokenSource();

                //// To get offset, the following are required:
                //// - topic
                //// - partition
                //// - consumer group
                //// - operational mapping between topic -> consumer groups to get the smallest offset of a topic partition
                //// Get operational consumer group details (topic, partition & offset)
                var consumerGroupDetails = this.GetConsumerGroupDetails(topicSubscribeGroupMapping);

                //// Get DLQ consumer group details (topic, partition & offset)
                var topics = topicSubscribeGroupMapping.Keys.ToList();
                var dlqGroupDetails = this.GetDLQGroupDetails(topics);

                this.cancellationToken.Token.ThrowIfCancellationRequested();

                //// ***Note: DLQ must take the smallest operational offset across all consumers but
                ////          the lastest offset between operational & DLQ consumer groups (both consume the same topic).
                ////          If this rule is not satisfied, DLQ topic will have message duplication.
                //// Process operation topics for dead-letter messages
                this.GetExpiredMessages(consumerGroupDetails, dlqGroupDetails);

                return true;
            }
            catch (OperationCanceledException ex)
            {
                Logger.Info(ex, ex.Message);
                return false;
            }
            catch (KafkaException ex)
            {
                Logger.Error(ex, $"{ex.Error.Reason}: {ex.Message}");
                return false;
            }
            catch (Exception ex)
            {
                Logger.Error(ex, ex.Message);
                throw;
            }
        }

        /// <summary>
        /// Get the expired messages using cut-off time and largest offset of topic partition between consumer & DLQ groups
        /// </summary>
        /// <param name="consumerGroupDetails">Consumer group details</param>
        /// <param name="dlqGroupDetails">DLQ group details. The DLQ group count may be smaller than consumer group count</param>
        private void GetExpiredMessages(IList<GroupDetail> consumerGroupDetails, IList<GroupDetail> dlqGroupDetails)
        {
            //// Get the cut-off date time, messages earlier or equal to this are expired
            var cutoffDT = DateTime.Now.Subtract(this.topicRetention);
            var offsetHashset = new HashSet<TopicPartitionOffset>();

            foreach (var groupDetail in consumerGroupDetails)
            {
                var operationalDetail = consumerGroupDetails
                    .FirstOrDefault(x => ValidateHelper.StringEquals(x.TopicName, groupDetail.TopicName));
                var dlqDetail = dlqGroupDetails
                    .FirstOrDefault(x => ValidateHelper.StringEquals(x.TopicName, groupDetail.TopicName));
                var largerOffset = (dlqDetail == null) ?
                    operationalDetail.Offset : Math.Max(operationalDetail.Offset, dlqDetail.Offset);

                offsetHashset.Add(new TopicPartitionOffset(operationalDetail.TopicName, operationalDetail.PartitionID, largerOffset));
            }

            this.cancellationToken.Token.ThrowIfCancellationRequested();

            //// Create subscriber
            using (var dlqSubscriber = SubscriberFactory.CreateNewSubscriber(
                this.brokerConfigurationJSON, ConfigurationConstants.DLQConsumerGroup, false, this.SubscriberErrorHandler))
            {
                //// Consume messages for each detail
                foreach (var offsetDetail in offsetHashset)
                {
                    dlqSubscriber.Assign(offsetDetail);
                    var isStopCheck = false;

                    this.cancellationToken.Token.ThrowIfCancellationRequested();

                    //// Consume
                    while (!isStopCheck)
                    {
                        var tuple = this.Consume(dlqSubscriber, cutoffDT, out isStopCheck);  //// Blocking consume, not an error

                        if (tuple?.Item1 == true)
                        {
                            //// Generate DLQ topic name and produce
                            var dlqTopic = ConfigurationHelper.GenerateDLQTopicName(offsetDetail.Topic);

                            if (this.Produce(dlqTopic, tuple.Item2.Message))
                            {
                                //// Commit
                                Commit(dlqSubscriber, tuple.Item2);
                            }
                            else
                            {
                                //// Produce DLQ message fail
                                var msg = tuple.Item2;
                                Logger.Error($"Publish dead-letter message fail for topic. Topic: {msg.Topic}, Paritition: {msg.Partition}, Offset: {msg.Offset}");
                            }
                        }
                        else
                        {
                            //// No message
                            Logger.Info($"Non-expired message consumed. Stopping check on topic: {offsetDetail.Topic}");
                        }
                    }
                }
            }
        }

        /// <summary>
        /// Cancel check for new expired messages
        /// </summary>
        /// <returns>Return TRUE if cancelled successfully else FALSE</returns>
        public bool CancelCheck()
        {
            try
            {
                if (this.cancellationToken != null && !this.cancellationToken.IsCancellationRequested)
                {
                    this.cancellationToken.Cancel();
                    this.cancellationToken.Dispose();
                }

                this.cancellationToken = null;

                return true;
            }
            catch (Exception ex)
            {
                Logger.Error(ex, ex.Message);
                throw;
            }
        }

        /// <summary>
        /// Implement IDisposable
        /// </summary>
        public void Dispose()
        {
            this.Dispose(true);
            GC.SuppressFinalize(this);
        }

        #endregion Public Methods


        #region Private Methods

        /// <summary>
        /// Consume/poll for messages from assigned offset
        /// </summary>
        /// <param name="dlqSubscriber">Subscriber with assigned offset</param>
        /// <param name="cutoffDT">Cut-off date time for comparison where
        /// if the message timestamp is earlier or equal, message has expired</param>
        /// <param name="isStopCheck">Flag indicating to stop checking/consuming</param>
        /// <returns>Return tuple indicating if message is expired and Kafka message data</returns>
        private Tuple<bool, ConsumeResult<string, byte[]>> Consume(IConsumer<string, byte[]> dlqSubscriber, DateTime cutoffDT, out bool isStopCheck)
        {
            isStopCheck = false;

            if (dlqSubscriber == null)
            {
                throw new ArgumentNullException(nameof(dlqSubscriber));
            }

            //// Start consuming from assigned offset.
            //// If consumer offset has 0 offset latency/lag, consume call will block. Set timeout to stop consuming.
            var consumeResult = dlqSubscriber.Consume(TimeSpan.FromSeconds(ConfigurationConstants.AdminQueryTimeoutSeconds));

            if (consumeResult == null)
            {
                //// Consume timeout
                Logger.Info($"DLQ consumer has timed-out. Topic: {dlqSubscriber.Subscription.FirstOrDefault()}");
                isStopCheck = true;
                return null;
            }
            else if (consumeResult.IsPartitionEOF)
            {
                //// Reached end of topic X in partition Y at offset Z
                Logger.Info($"Reached end of topic: {consumeResult.Topic}, Partition: {consumeResult.Partition}, Offset: {consumeResult.Offset}.");
                return null;
            }

            //// Check DLQ definition, offset & timestamp
            var utcMessageDT = consumeResult.Message.Timestamp.UtcDateTime;
            var compare = utcMessageDT.CompareTo(cutoffDT);

            if (compare <= 0)
            {
                //// Message is earlier than or equal current timestamp, Expired
                //// Publish message to respective DLQ topic
                return new Tuple<bool, ConsumeResult<string, byte[]>>(true, consumeResult);
            }
            else
            {
                //// Message is later than current timestamp, Not expired
                //// Stop consuming/checking for this topic partition
                isStopCheck = true;
                return new Tuple<bool, ConsumeResult<string, byte[]>>(false, null);
            }
        }

        /// <summary>
        /// Publish Kafka message to specific topic 
        /// </summary>
        /// <param name="dlqTopicName">DLQ message topic</param>
        /// <param name="message">Kafka message to publish</param>
        /// <returns>Return TRUE if published successfully else FALSE</returns>
        private bool Produce(string dlqTopicName, Message<string, byte[]> message)
        {
            var topic = ValidateHelper.CheckString(dlqTopicName);

            if (message == null)
            {
                throw new ArgumentNullException(nameof(message));
            }

            //// Publish to DLQ topic
            using (var producer = PublisherFactory.CreateNewPublisher(this.brokerConfigurationJSON, this.PublishErrorHandler))
            {
                producer.Produce(topic, message, this.HandleDeliveryReport);
            }

            return true;
        }

        /// <summary>
        /// Handle delivery report callback.
        /// Callback for synchronous publish.
        /// </summary>
        /// <param name="report">Delivery report</param>
        private void HandleDeliveryReport(DeliveryReport<string, byte[]> report)
        {
            if (report == null)
            {
                Logger.Error("Publish DLQ message failed, no delivery report returned from broker.");
            }
            else if (report.Error?.Code != ErrorCode.NoError)
            {
                Logger.Error($"Publish DLQ message failed. Topic: {report.Topic}, Key: {report.Message.Key}");
            }
            else
            {
                Logger.Info($"DLQ message published successfully. Topic: {report.Topic}");
            }
        }

        /// <summary>
        /// Set the handler to call on error events e.g. connection failures or all brokers down.
        /// Note that the client will try to automatically recover from errors that are not marked as fatal.
        /// Non-fatal errors should be interpreted as informational rather than catastrophic.
        /// </summary>
        /// <param name="admin">Admin client instance</param>
        /// <param name="error">Kafka error</param>
        private void AdminClientErrorHandler(IAdminClient admin, Error error)
        {
            this.adminClientErrorHandler?.Invoke(error.Code.ToString(), error.Reason, error.IsFatal);
        }

        /// <summary>
        /// Set the handler to call on error events e.g. connection failures or all brokers down.
        /// Note that the client will try to automatically recover from errors that are not marked as fatal.
        /// Non-fatal errors should be interpreted as informational rather than catastrophic.
        /// </summary>
        /// <param name="iproducer">Producer instance</param>
        /// <param name="error">Kafka error</param>
        private void PublishErrorHandler(IProducer<string, byte[]> iproducer, Error error)
        {
            this.publisherErrorHandler?.Invoke(error.Code.ToString(), error.Reason, error.IsFatal);
        }

        /// <summary>
        /// Set the handler to call on error events e.g. connection failures or all brokers down.
        /// Note that the client will try to automatically recover from errors that are not marked as fatal.
        /// Non-fatal errors should be interpreted as informational rather than catastrophic.
        /// </summary>
        /// <param name="consumer">Consumer instance</param>
        /// <param name="error">Kafka error</param>
        private void SubscriberErrorHandler(IConsumer<string, byte[]> consumer, Error error)
        {
            this.subscriberErrorHandler?.Invoke(error.Code.ToString(), error.Reason, error.IsFatal);
        }

        /// <summary>
        /// Check current DLQ tool state
        /// </summary>
        private void CheckDLQToolState()
        {
            if (!this.IsInitialized)
            {
                throw new InvalidOperationException($"Dead-letter tool is not initialized. IsInit: {this.IsInitialized}");
            }
            else if (this.IsDisposed)
            {
                Logger.Error("Dead-letter tool have been disposed, create new instance required.");
                throw new TypeInitializationException(typeof(DeadLetterQueueImpl).FullName, null);
            }
        }

        /// <summary>
        /// Dispose
        /// </summary>
        /// <param name="disposing">If disposing equals true, the method has been called directly
        /// or indirectly by a user's code. Managed and unmanaged resources can be disposed.
        /// If disposing equals false, the method has been called by the runtime from inside the
        /// finalizer and you should not reference other objects. Only unmanaged resources can be disposed.</param>
        protected virtual void Dispose(bool disposing)
        {
            try
            {
                if (!this.IsDisposed && disposing)
                {
                    this.CancelCheck();
                }

                this.IsDisposed = true;
                this.IsInitialized = false;
            }
            catch (Exception ex)
            {
                Logger.Info(ex, ex.Message);
                throw;
            }
        }


        #region Dead Letter Operation

        /// <summary>
        /// Get active operational consumer group details
        /// </summary>
        /// <param name="topicSubscribeGroupMapping">Configured topic to subscriber group mapping</param>
        /// <returns>Return operational consumer group details</returns>
        private IList<GroupDetail> GetConsumerGroupDetails(IDictionary<string, HashSet<string>> topicSubscribeGroupMapping)
        {
            var consumerGroupDetails = new List<GroupDetail>();

            //// Get all topics & related partitions
            var topicList = this.GetAllTopicsMetadata();

            if (topicList?.Any() == true)
            {
                //// Loop through configuration mapping to get consumers' offsets
                foreach (var mapping in topicSubscribeGroupMapping)
                {
                    //// Get topic & consumer groups
                    var topicName = mapping.Key;
                    var consumerGroups = mapping.Value;
                    var groupDetails = new List<GroupDetail>();

                    //// Find consumer's offset
                    foreach (var group in consumerGroups)
                    {
                        //// Get operational topic & partitions
                        var topicDetail = topicList.FirstOrDefault(x => ValidateHelper.StringEquals(x.TopicName, topicName));

                        if (topicDetail == null)
                        {
                            //// Topic not found in message broker
                            Logger.Error($"Failed to get consumer offset. Topic \"{topicName}\" not found. ");
                        }
                        else
                        {
                            groupDetails.AddRange(this.BuildConsumerGroupDetails(topicDetail, group));
                        }
                    }

                    //// Find smallest/earliest offset in topic partition (exclude offset == -1001)
                    var smallestOffset = groupDetails.OrderBy(x => x.Offset).FirstOrDefault();

                    if (smallestOffset != null)
                    {
                        consumerGroupDetails.Add(smallestOffset);
                    }
                }
            }

            return consumerGroupDetails;
        }

        /// <summary>
        /// Get active DLQ consumer group details
        /// </summary>
        /// <param name="topics">Configured topics</param>
        /// <returns>Return active DLQ consumer group details</returns>
        private IList<GroupDetail> GetDLQGroupDetails(IList<string> topics)
        {
            var dlqGroupDetails = new List<GroupDetail>();

            //// Get all topics & related partitions
            var topicList = this.GetAllTopicsMetadata();

            if (topicList?.Any() == true)
            {
                //// Loop through topics to create DLQ group details
                foreach (var topicName in topics)
                {
                    //// Get operational topic & partitions
                    var topicDetail = topicList.FirstOrDefault(x => ValidateHelper.StringEquals(x.TopicName, topicName));

                    if (topicDetail == null)
                    {
                        //// Topic not found in message broker
                        Logger.Error($"Failed to get consumer offset. Topic \"{topicName}\" not found. ");
                    }
                    else
                    {
                        dlqGroupDetails.AddRange(this.BuildConsumerGroupDetails(topicDetail, ConfigurationConstants.DLQConsumerGroup));
                    }
                }
            }

            return dlqGroupDetails;
        }

        /// <summary>
        /// Get all topics' metadata
        /// </summary>
        /// <returns>Return collection of topic metadata</returns>
        private IList<TopicDetail> GetAllTopicsMetadata()
        {
            try
            {
                //// GetMetadata behaviour
                //// =====================
                //// If auto-create topic is enabled, get method will will create the non-existent topic.
                //// If auto-create topic is disabled, get method will return an empty topic list if
                //// specified does not exist and list of 1 if specified topic exist.
                //// Topic name will NOT be treated as substring where "topic1" will return topics: "topic1" & "topic1_dlq"

                var details = default(IList<TopicDetail>);

                //// Create admin client
                using (var adminClient = AdminClientFactory.CreateNewAdminClient(this.brokerConfigurationJSON, this.AdminClientErrorHandler))
                {
                    var metadata = adminClient.GetMetadata(TimeSpan.FromSeconds(ConfigurationConstants.AdminQueryTimeoutSeconds));
                    details = ExtractTopics(metadata);
                }

                return details;
            }
            catch (KafkaException ex)
            {
                Logger.Error(ex, ex.Message);
                return new List<TopicDetail>();
            }
            catch (Exception ex)
            {
                Logger.Error(ex, ex.Message);
                throw;
            }
        }

        /// <summary>
        /// Build collection of consumer group offset
        /// </summary>
        /// <param name="topicDetail">Topic details</param>
        /// <param name="group">Consumer group name</param>
        /// <returns>Return collection of consumer offsets</returns>
        private IList<GroupDetail> BuildConsumerGroupDetails(TopicDetail topicDetail, string group)
        {
            var groupDetails = new List<GroupDetail>();
            var partitionIDs = topicDetail.GetAllPartitions();

            //// Create group details
            foreach (var partitionID in partitionIDs)
            {
                var groupDetail = this.GetConsumerGroupDetail(topicDetail, group, partitionID);

                if (groupDetail != null)
                {
                    groupDetails.Add(groupDetail);
                }
            }

            return groupDetails;
        }

        /// <summary>
        /// Get details about consumer group for specific topic & partition
        /// </summary>
        /// <param name="topicDetail">Topic detail</param>
        /// <param name="group">Consumer group name</param>
        /// <param name="partitionID">Partition ID</param>
        /// <returns>Return consumer group details</returns>
        private GroupDetail GetConsumerGroupDetail(TopicDetail topicDetail, string group, int partitionID)
        {
            var groupDetail = default(GroupDetail);

            //// Get offset value
            using (var subscriber =
                SubscriberFactory.CreateNewSubscriber(this.brokerConfigurationJSON, group, null, this.SubscriberErrorHandler))
            {
                var partition = new Partition(partitionID);
                var topicPartition = new TopicPartition(topicDetail.TopicName, partition);
                var offset = subscriber.Committed(new TopicPartition[] { topicPartition }, TimeSpan.FromSeconds(15));
                ////var offset = subscriber.Position(topicPartition); // offset value is always -1001

                if (offset?.Any() == true)
                {
                    var value = offset.First().Offset.Value;
                    Logger.Info($"Offset value, {value}, found for topic: {topicDetail.TopicName}, consumer group: {group}, partition: {partitionID}.");
                    groupDetail = new GroupDetail(topicDetail.TopicName, group, partitionID, value);
                }
                else
                {
                    //// Offset not found
                    Logger.Warn($"Offset not found for topic: {topicDetail.TopicName}, consumer group: {group}, partition: {partitionID}.");
                }
            }

            return groupDetail;
        }

        #endregion Dead Letter Operation

        #endregion Private Methods


        #region Static Methods

        /// <summary>
        /// Commit kafka message
        /// </summary>
        /// <param name="dlqSubscriber"></param>
        /// <param name="message"></param>
        /// <returns></returns>
        private static void Commit(IConsumer<string, byte[]> dlqSubscriber, ConsumeResult<string, byte[]> consumeResult)
        {
            if (dlqSubscriber == null)
            {
                throw new ArgumentNullException(nameof(dlqSubscriber));
            }
            else if (consumeResult == null)
            {
                throw new ArgumentNullException(nameof(consumeResult));
            }

            dlqSubscriber.Commit(consumeResult);
        }

        /// <summary>
        /// Extract all topic details
        /// </summary>
        /// <param name="metadata">Broker metadata</param>
        /// <returns>Return collection of topic details</returns>
        private static IList<TopicDetail> ExtractTopics(Metadata metadata)
        {
            if (metadata == null || metadata.Topics?.Count < 1)
            {
                //// No metadata, possibly broker error or no topic found
                return new List<TopicDetail>();
            }

            var details = new List<TopicDetail>();

            foreach (var topicMetadata in metadata.Topics)
            {
                var topicDetail = ExtractTopic(topicMetadata);

                if (topicDetail != null)
                {
                    details.Add(topicDetail);
                }
            }

            return details;
        }

        /// <summary>
        /// Extract topic details
        /// </summary>
        /// <param name="topicMetadata">Topic metadata</param>
        /// <returns>Return topic detail if no errors encountered else NULL</returns>
        private static TopicDetail ExtractTopic(TopicMetadata topicMetadata)
        {
            if (topicMetadata == null)
            {
                return null;
            }
            else if (topicMetadata?.Error?.IsError == true)
            {
                //// Log topic error
                Logger.Error($"Error found in topic. Topic {topicMetadata?.Topic}, Error: {topicMetadata?.Error} ");
                return null;
            }
            else
            {
                var topicDetail = new TopicDetail(topicMetadata.Topic);
                ExtractPartitionIDs(topicDetail, topicMetadata.Partitions);
                return topicDetail;
            }
        }

        /// <summary>
        /// Extract topic partition IDs
        /// </summary>
        /// <param name="topicDetail">Topic detail</param>
        /// <param name="partitions">List of partition metadata</param>
        private static void ExtractPartitionIDs(TopicDetail topicDetail, List<PartitionMetadata> partitions)
        {
            if (topicDetail == null)
            {
                throw new ArgumentNullException(nameof(topicDetail));
            }
            else if (partitions == null)
            {
                throw new ArgumentNullException(nameof(partitions));
            }

            foreach (var partitionMD in partitions)
            {
                if (partitionMD == null)
                {
                    Logger.Warn($"Null partition data encountered.");
                }
                else if (partitionMD?.Error?.IsError == true)
                {
                    //// Log partition error
                    Logger.Warn($"Error found in topic partition. Topic {topicDetail.TopicName}, Partition: {partitionMD.PartitionId}, Error: {partitionMD.Error} ");
                }
                else
                {
                    topicDetail.AddPartitionID(partitionMD.PartitionId);
                }
            }
        }

        /// <summary>
        /// Extract collection of unique consumer groups
        /// </summary>
        /// <param name="groups">Kafka consumer group</param>
        /// <returns>Return a collection of unique groups</returns>
        private static HashSet<string> ExtractGroups(IList<GroupInfo> groups)
        {
            if (groups == null)
            {
                throw new ArgumentNullException(nameof(groups));
            }

            var details = new HashSet<string>();

            foreach (var group in groups)
            {
                if (group == null)
                {
                    Logger.Warn($"Null group info encountered.");
                }
                else if (group?.Error?.IsError == true)
                {
                    //// Log group error
                    Logger.Warn($"Error found in consumer group. Group {group.Group}, Error: {group.Error}");
                }
                else
                {
                    details.Add(group.Group);
                }
            }

            return details;
        }

        #endregion Static Methods
    }
}
