﻿using Confluent.Kafka;
using Confluent.Kafka.Serialization;
using System;
using System.Collections.Generic;
using System.Linq;
using System.Text;
using System.Threading.Tasks;

namespace log4net.extension.kafka.consumer
{
    class Program
    {
        static string TopicName = "TestLog";
        static List<string> brokerList => new List<string> { "172.16.58.99:9092" };
        static Dictionary<string, object> constructConfig(string brokerList, bool enableAutoCommit) =>
                new Dictionary<string, object>
                {
                            { "group.id", "test-consumer" },
                            { "enable.auto.commit", enableAutoCommit },
                            { "auto.commit.interval.ms", 2000 },
                            { "statistics.interval.ms", 60000 },
                            { "bootstrap.servers", brokerList },
                            { "default.topic.config", new Dictionary<string, object>()
                                {
                                    { "auto.offset.reset", "latest" }
                                }
                            }
                };
        static void Main(string[] args)
        {
            Run_Consumer();
        }

        static void Run_Consumer()
        {
            Console.WriteLine("kafka consumer ready to start.");

            using (var consumer = new Consumer<Null, Log4netLogEntity>(constructConfig(string.Join(",", brokerList), true),
                null, new LogEntityDeserializer()))
            {
                consumer.OnMessage += (_, msg)
                    => Console.WriteLine($"Topic: {msg.Topic} Partition: {msg.Partition} Offset: {msg.Offset} {msg.Value.Message}");

                consumer.OnPartitionEOF += (_, end)
                    => Console.WriteLine($"Reached end of topic {end.Topic} partition {end.Partition}, next message will be at offset {end.Offset}");

                consumer.OnError += (_, error)
                    => Console.WriteLine($"Error: {error}");

                consumer.OnConsumeError += (_, msg)
                    => Console.WriteLine($"Error consuming from topic/partition/offset {msg.Topic}/{msg.Partition}/{msg.Offset}: {msg.Error}");

                consumer.OnOffsetsCommitted += (_, commit) =>
                {
                    Console.WriteLine($"[{string.Join(", ", commit.Offsets)}]");

                    if (commit.Error)
                    {
                        Console.WriteLine($"Failed to commit offsets: {commit.Error}");
                    }
                    Console.WriteLine($"Successfully committed offsets: [{string.Join(", ", commit.Offsets)}]");
                };

                consumer.OnPartitionsAssigned += (_, partitions) =>
                {
                    Console.WriteLine($"Assigned partitions: [{string.Join(", ", partitions)}], member id: {consumer.MemberId}");
                    consumer.Assign(partitions);
                };

                consumer.OnPartitionsRevoked += (_, partitions) =>
                {
                    Console.WriteLine($"Revoked partitions: [{string.Join(", ", partitions)}]");
                    consumer.Unassign();
                };

                consumer.OnStatistics += (_, json)
                    => Console.WriteLine($"Statistics: {json}");

                consumer.Subscribe(TopicName);

                //consumer.

                Console.WriteLine($"Subscribed to: [{string.Join(", ", consumer.Subscription)}]");

                var cancelled = false;
                Console.CancelKeyPress += (_, e) =>
                {
                    e.Cancel = true; // prevent the process from terminating.
                    cancelled = true;
                };

                Console.WriteLine("Ctrl-C to exit.");
                while (!cancelled)
                {
                    consumer.Poll(TimeSpan.FromMilliseconds(100));
                }
            }

            Console.WriteLine("kafka consumer end.");
        }
    }
}
