﻿using Confluent.Kafka;
using Confluent.Kafka.SyncOverAsync;
using Confluent.SchemaRegistry;
using Confluent.SchemaRegistry.Serdes;
using kafkalearn.ProducerTest1.AvroSpecific;
using System;
using System.Collections.Generic;
using System.Linq;
using System.Text;
using System.Threading;
using System.Threading.Tasks;

namespace ConsumerTest1
{
    public class AvroSpecificConsumer : IConsumer
    {
        private readonly string _schemaUri;
        private readonly string _topic;
        public AvroSpecificConsumer(string schemaUri, string topic)
        {
            _topic = topic;
            _schemaUri = schemaUri;
        }
        public Task ExcuteAsync(CancellationToken token = default)
        {
            SchemaRegistryConfig schemaRegistryConfig = new SchemaRegistryConfig
            {
                Url = _schemaUri
            };
            
            ConsumerConfig config = new ConsumerConfig();
            config.BootstrapServers = "192.168.3.68:9092,192.168.3.66:9092,192.168.3.69:9092";
            config.GroupId = "charp-consumer";
            config.EnableAutoCommit = false;
            config.EnablePartitionEof = true;
            config.PartitionAssignmentStrategy = PartitionAssignmentStrategy.CooperativeSticky;
            config.AutoOffsetReset = AutoOffsetReset.Earliest;
            config.SessionTimeoutMs = 6000;
            config.StatisticsIntervalMs = 5000; //这是下面那个SetStatisticsHandler的触发间隔
            const int commitPeriod = 5;
            using(var schemaRegistry = new CachedSchemaRegistryClient(schemaRegistryConfig))
            {
                //注意，这里的User要和producer那里的User严格一样，即namespace都要一模一样，就和gRpc的proto文件类似
                using (IConsumer<string, User> consumer = new ConsumerBuilder<string, User>(config)
                .SetErrorHandler((_, e) => Console.WriteLine($"Error:{e.Reason}"))
                //.SetStatisticsHandler((_, json) => Console.WriteLine($"Statistics:{json}"))
                .SetKeyDeserializer(new AvroDeserializer<string>(schemaRegistry).AsSyncOverAsync())
                .SetValueDeserializer(new AvroDeserializer<User>(schemaRegistry).AsSyncOverAsync())
                //重新分配分区之后和consumer开始读取数据之前被调用
                .SetPartitionsAssignedHandler((_, partitions) =>
                {
                    //如果上一次的偏移量记录在数据库里(从持久化里取出进行初始化位置)
                    //这里可以使用seek进行一次指定
                    //_.Seek(new TopicPartitionOffset(new TopicPartition("Banana", 0), new Offset(1234)));
                    Console.WriteLine($"Incremental partition assignment:[{string.Join(", ", partitions)}]");
                })
                //revoke放弃，多半是协调器（group coordinator）通知consumer需要重平衡了，然后consumer就放弃当前的分区
                //再平衡开始之前和consumer停止读取消息之后被调用
                //所以这里最适合做提交偏移量，方便下一次接管的consumer从哪里开始读
                .SetPartitionsRevokedHandler((_, partitions) =>
                {
                    //这里可以做持久化偏移量的逻辑

                    Console.WriteLine($"Partitions were lost: [{string.Join(", ", partitions)}]");
                })
                .Build())
                {
                    //订阅指定主题，默认将从分区的最新内容开始监听
                    consumer.Subscribe(_topic);
                    try
                    {
                        //consumer.Consume(0); //使用这个立马要消息的技巧令consumer加入到group中获取到相应的分区
                        //然后立马调用seek指定从哪个偏移开始订阅,在while里的那个consume就会从我们想要的位置拿数据了。
                        //如果seek不合理，则consume时会抛异常
                        //consumer.Seek(...)
                        while (true)
                        {
                            try
                            {
                                var consumerResult = consumer.Consume(token);

                                if (consumerResult.IsPartitionEOF)
                                {
                                    Console.WriteLine($"Reached end of topic {consumerResult.Topic}, partition {consumerResult.Partition}, offset {consumerResult.Offset}.");
                                    continue;
                                }
                                string content = System.Text.Json.JsonSerializer.Serialize(consumerResult.Message.Value);
                                Console.WriteLine($"Received message at {consumerResult.TopicPartitionOffset}: {content}");

                                //这些地方也可以考虑持久化偏移量（但是高吞吐时持久化要影响性能时可以不做，主要看业务场景对这个偏移量的故障恢复要求高不高）

                                if (consumerResult.Offset % commitPeriod == 0)
                                {
                                    // Commit方法向Kafka集群发送“ commit offsets”请求，并同步等待响应。
                                    // 与消费者能够消费消息的速度相比，这是非常慢的。
                                    // 高性能应用程序通常不会相对频繁地提交偏移量，并设计为在发生故障时处理重复消息。
                                    try
                                    {
                                        consumer.Commit(consumerResult);
                                    }
                                    catch (KafkaException e)
                                    {
                                        Console.WriteLine($"Commit error: {e.Error.Reason}");
                                    }
                                }
                            }
                            catch (ConsumeException e)
                            {
                                Console.WriteLine($"Consume error: {e.Error.Reason}");
                            }
                        }
                    }
                    catch (OperationCanceledException e)
                    {
                        Console.WriteLine("Closing consumer.");
                        consumer.Close();
                    }
                }
            }
            return Task.CompletedTask;
        }
    }
}
