﻿using Confluent.Kafka;
using KafkaUtil.Consumers;
using KafkaUtil.Integration;
using KafkaUtil.Interface;
using KafkaUtil.Logger;
using Microsoft.Extensions.Logging;
using System;
using System.Threading;
using System.Threading.Tasks;

namespace KafkaUtil
{
    class Program
    {
        public static string[] hosts = new string[] { "10.170.1.234:9094" };
        public static Kafka producerKafkaUtil = new Kafka();
        public static Kafka consumerKafkaUtil = new Kafka();
        public static TopicPartition topicPartition = null;
        public static long offset = 0;

        static void Main(string[] args)
        {
            //Console.WriteLine("Hello World!");

            Console.WriteLine("begin");

            //new Thread(producerExample).Start();
            //new Thread(loggerExample).Start();
            //new Thread(consumerExample).Start();

            //new Thread(producer).Start();
            //new Thread(consumer).Start();

            Console.WriteLine("end");
        }

        public static void producerExample()
        {
            KafkaProducerOptions producerOptions = new KafkaProducerOptions
            {
                BootstrapServers = hosts,
                InitializeCount = 3,
                Key = "kafka",
                Topic = "testysw"
            };
            IClientProducer producer = producerKafkaUtil.GetProducer(producerOptions);
            while (true)
            {
                //Thread.Sleep(1 * 1000);
                Thread.Sleep(1);
                string msg = "消息" + DateTime.Now;
                //Console.WriteLine("Publish msg:" + msg);
                producer.Publish(msg);
                //producer.PublishAsync(msg);
            }
        }

        public static void loggerExample()
        {
            KafkaLoggerOptions loggerOptions = new KafkaLoggerOptions
            {
                BootstrapServers = hosts,
                Category = "Home",
                InitializeCount = 10,
                Key = "log",
                MinLevel = LogLevel.Warning,
                Topic = "testyswlog",
                ApplicationName = "kafka.log"
            };
            ILogger logger = producerKafkaUtil.GetLogger(loggerOptions);

            //修改配置
            //loggerOptions.MinLevel = LogLevel.Error;
            //producerKafkaUtil.SetLoggerServicesOptions(loggerOptions);

            while (true)
            {
                Thread.Sleep(1 * 1000);
                string log = "日志" + DateTime.Now;

                //Console.WriteLine("log log:" + log);
                logger.LogWarning($"logger1(LogWarning):{log}");

                //logger.LogTrace($"logger1(LogTrace):{log}");
                //logger.LogDebug($"logger1(LogDebug):{log}");
                //logger.LogInformation($"logger1(LogInformation):{log}");
                //logger.LogWarning($"logger1(LogWarning):{log}");
                //logger.LogError($"logger1(LogError):{log}");
                //logger.LogCritical($"logger1(LogCritical):{log}");
            }
        }

        public static void consumerExample()
        {
            KafkaConsumerOptions consumerOptions = new KafkaConsumerOptions
            {
                BootstrapServers = hosts,
                EnableAutoCommit = false,
                GroupId = "group.1",
                Subscribers = new KafkaSubscriber[] { new KafkaSubscriber() { Topic = "testysw" } }
            };
            DefaultKafkaConsumerProvider consumerProvider = consumerKafkaUtil.GetConsumer(consumerOptions, callback);
            //consumerProvider.OnMessageRecieved += callback;
            Task task = consumerProvider.ListenAsync();
            Task task1 = null;
            while (true)
            {
                if (topicPartition != null && task1 == null)
                {
                    task1 = consumerProvider.QueryWatermarkOffsetsAsync(topicPartition, 1000, callback1);
                }
                Thread.Sleep(100);
            }
        }

        public static void callback1(TopicPartition topicPartition, WatermarkOffsets watermarkOffsets)
        {
            Console.WriteLine("offset = " + offset + " low = " + watermarkOffsets.Low + " hight = " + watermarkOffsets.High);
        }

        public static void callback(RecieveResult recieveResult)
        {
            topicPartition = recieveResult.TopicPartition;
            offset = recieveResult.Offset;

            //Console.WriteLine("callback KafkaConsumerListener:" + recieveResult.Message);
        }

        //public static void producer()
        //{
        //    //方法1
        //    Dictionary<string, string> config = new Dictionary<string, string>();
        //    config["bootstrap.servers"] = "10.170.1.234:9094";
        //    var builder = new ProducerBuilder<string, object>(config);
        //    builder.SetValueSerializer(new KafkaConverter());//设置序列化方式
        //    var producer = builder.Build();
        //    while (true)
        //    {
        //        Thread.Sleep(3 * 1000);
        //        string msg = "hello world!" + DateTime.Now;
        //        Console.WriteLine($"send message:" + msg);
        //        producer.Produce("testysw", new Message<string, object>() { Key = "testysw", Value = msg });
        //    }
        //    Console.ReadKey();

        //    //方法2
        //    //var config = new ProducerConfig { BootstrapServers = "10.170.1.234:9094" };
        //    //Action<DeliveryReport<Null, string>> handler = r => 
        //    //    Console.WriteLine(!r.Error.IsError ? $"Delivered message to {r.TopicPartitionOffset}" : $"Delivery Error: {r.Error.Reason}");
        //    //using (var p = new ProducerBuilder<Null, string>(config).Build())
        //    //{
        //    //    try
        //    //    {
        //    //        for (var i = 1; i <= 10000; i++)
        //    //        {
        //    //Thread.Sleep(3 * 1000);
        //    //            p.Produce("testysw", new Message<Null, string> { Value = $"my message: {i}" }, handler);
        //    //        }

        //    //        p.Flush(TimeSpan.FromSeconds(10));
        //    //    }
        //    //    catch (ProduceException<Null, string> e)
        //    //    {
        //    //        Console.WriteLine($"Delivery failed: {e.Error.Reason}");
        //    //    }
        //    //}
        //    //Console.WriteLine("Done!");
        //    //Console.ReadKey();
        //}

        //public static void consumer()
        //{
        //    //方法1
        //    ConsumerConfig config = new ConsumerConfig();
        //    config.BootstrapServers = "10.170.1.234:9094";
        //    config.GroupId = "test-consumer-group";
        //    config.AutoOffsetReset = AutoOffsetReset.Earliest;
        //    config.EnableAutoCommit = false;

        //    var builder = new ConsumerBuilder<string, object>(config);
        //    builder.SetValueDeserializer(new KafkaConverter());//设置反序列化方式
        //    var consumer = builder.Build();
        //    consumer.Subscribe("testysw");//订阅消息使用Subscribe方法
        //    //consumer.Assign(new TopicPartition("test", new Partition(1)));//从指定的Partition订阅消息使用Assign方法

        //    while (true)
        //    {
        //        var result = consumer.Consume();
        //        Console.WriteLine($"recieve message:{result.Message.Value}");
        //        consumer.Commit(result);//手动提交，如果上面的EnableAutoCommit=true表示自动提交，则无需调用Commit方法
        //    }

        //    //方法2
        //    //var conf = new ConsumerConfig
        //    //{
        //    //    GroupId = "test-consumer-group",
        //    //    BootstrapServers = "10.170.1.234:9094",
        //    //    AutoOffsetReset = AutoOffsetReset.Earliest,
        //    //    AutoCommitIntervalMs = 5000,
        //    //    EnableAutoCommit = true
        //    //};

        //    //using (var c = new ConsumerBuilder<Ignore, string>(conf).Build())
        //    //{
        //    //    c.Subscribe("testysw");
        //    //    CancellationTokenSource cts = new CancellationTokenSource();
        //    //    Console.CancelKeyPress += (_, e) => {
        //    //        e.Cancel = true; // prevent the process from terminating.
        //    //        cts.Cancel();
        //    //    };

        //    //    try
        //    //    {
        //    //        while (true)
        //    //        {
        //    //            try
        //    //            {
        //    //                var consumeResult = c.Consume(cts.Token);
        //    //                Console.WriteLine($"Consumed message '{consumeResult.Message.Value}' at: '{consumeResult.TopicPartitionOffset}'.");
        //    //            }
        //    //            catch (ConsumeException e)
        //    //            {
        //    //                Console.WriteLine($"Error occured: {e.Error.Reason}");
        //    //            }
        //    //        }
        //    //    }
        //    //    catch (OperationCanceledException)
        //    //    {
        //    //        // Ensure the consumer leaves the group cleanly and final offsets are committed.
        //    //        c.Close();
        //    //    }
        //    //}
        //}
    }
}
