﻿#region 从另外一个topic收数据  
using Confluent.Kafka;
using CSRedis;
using Newtonsoft.Json;
using QskjHistoryDataKafka.Dto;
using QskjLibrary;
#region 消费数据
Task.Factory.StartNew(() =>
{
    string brokerList = "192.168.3.52:9092,192.168.3.146:9092,192.168.3.164:9092";
    List<string> topics = new List<string>();
    topics.Add("VEHICLE_ALARM");
    Console.WriteLine(DateTime.Now.ToString() + "qskj设备状态GPS告警数据kafka消费开始4");

    //Thread.Sleep(1000 * 20);
    try
    {
        Consumer(brokerList, topics);
    }
    catch (Exception e)
    {
        Console.WriteLine("数据消费异常" + e);
    }
    while (true)
    {
        Thread.Sleep(1000 * 200);
    }
});
#endregion
#endregion
while (true)
{
    Thread.Sleep(5000);
}

void VehicleAlarmDataToMysql(VehicleAlarm status, CSRedisClient redisDb5)
{
    try
    {
        DateTime dt = Convert.ToDateTime(status.alarmTime);
        DateTime dts = Convert.ToDateTime("2024-02-26 00:00:00");
        DateTime dte = Convert.ToDateTime("2024-02-26 13:22:01");
        if (dt >= dts && dt < dte)
        {
            string dtStr = DateTime.Now.ToString("yyyyMMdd");
            string drStr = redisDb5.Get(status.vehicleNo.Substring(1));//出租车不带沪
            string drId = string.Empty, drWId = string.Empty, drNa = string.Empty;
            if (!string.IsNullOrEmpty(drStr))
            {
                string[] drArr = drStr.Split(';');
                drId = drArr[0]; drWId = drArr[1]; drNa = drArr[2];
            }
            else
            {
                drStr = redisDb5.Get(status.vehicleNo);//大巴车带沪
                if (!string.IsNullOrEmpty(drStr))
                {
                    string[] drArr = drStr.Split(';');
                    drId = drArr[0]; drWId = drArr[1]; drNa = drArr[2];
                }
            }
            string sql = $@"insert into qskjrecord.B_Vehicle_Alarm{dtStr}(devGbId,alarmId,vehicleNo,alarmMainType,alarmSubType,alarmType,alarmTime,latitude,longitude,speed,url,platform,timestamp,createtime,driverId,driverWId,driverName)
                        values('{status.devGbId}','{status.alarmId}','{status.vehicleNo}',{status.alarmMainType},{status.alarmSubType},{status.alarmType},'{status.alarmTime}','{status.latitude}','{status.longitude}','{status.speed}','{status.url}','{status.platform}',{status.timestamp},'{DateTime.Now.ToString("yyyy-MM-dd HH:mm:ss")}','{drId}','{drWId}','{drNa}')";
            int res = MySqlHelper.ExecuteNonQuery(System.Data.CommandType.Text, sql);


        }
    }
    catch (Exception e)
    {
        Console.WriteLine("设备报警信息写入数据库异常" + JsonConvert.SerializeObject(e));
    }
}
async void Consumer(string brokerList, List<string> topics)
{
    CancellationTokenSource cts = new CancellationTokenSource();
    Console.CancelKeyPress += (_, e) =>
    {
        e.Cancel = true; // prevent the process from terminating.
        cts.Cancel();
    };
    Run_Consume(brokerList, topics, cts.Token);
}
/// <summary>
///     In this example
///         - offsets are automatically committed.
///         - no extra thread is created for the Poll (Consume) loop.
/// </summary>
void Run_Consume(string brokerList, List<string> topics, CancellationToken cancellationToken)
{
    CSRedisClient redisDb5 = null;
    if (redisDb5 == null)
    {
        redisDb5 = RedisHelp.GetInstance5();
    }
    //string brokerList = "192.168.3.52:9092,192.168.3.146:9092,192.168.3.164:9092";
    var config = new ConsumerConfig
    {
        BootstrapServers = brokerList,
        GroupId = "VEHICLE_ALARM_History",
        EnableAutoOffsetStore = false,
        EnableAutoCommit = true,
        StatisticsIntervalMs = 5000,
        SessionTimeoutMs = 6000,
        AutoOffsetReset = AutoOffsetReset.Earliest,
        EnablePartitionEof = true,
        // A good introduction to the CooperativeSticky assignor and incremental rebalancing:
        // https://www.confluent.io/blog/cooperative-rebalancing-in-kafka-streams-consumer-ksqldb/
        PartitionAssignmentStrategy = PartitionAssignmentStrategy.CooperativeSticky
    };

    // Note: If a key or value deserializer is not set (as is the case below), the 
    // deserializer corresponding to the appropriate type from Confluent.Kafka.Deserializers
    // will be used automatically (where available). The default deserializer for string
    // is UTF8. The default deserializer for Ignore returns null for all input data
    // (including non-null data).
    using (var consumer = new ConsumerBuilder<Ignore, string>(config)
        // Note: All handlers are called on the main .Consume thread.
        .SetErrorHandler((_, e) => Console.WriteLine($"Error: {e.Reason}"))
        .SetStatisticsHandler((_, json) =>
            //Console.WriteLine($"Statistics: {json}")
            Console.WriteLine("rw")
            )
        .SetPartitionsAssignedHandler((c, partitions) =>
        {
            // Since a cooperative assignor (CooperativeSticky) has been configured, the
            // partition assignment is incremental (adds partitions to any existing assignment).
            //Console.WriteLine(
            //    "Partitions incrementally assigned: [" +
            //    string.Join(',', partitions.Select(p => p.Partition.Value)) +
            //    "], all: [" +
            //    string.Join(',', c.Assignment.Concat(partitions).Select(p => p.Partition.Value)) +
            //    "]");

            // Possibly manually specify start offsets by returning a list of topic/partition/offsets
            // to assign to, e.g.:
            // return partitions.Select(tp => new TopicPartitionOffset(tp, externalOffsets[tp]));
        })
        .SetPartitionsRevokedHandler((c, partitions) =>
        {
            // Since a cooperative assignor (CooperativeSticky) has been configured, the revoked
            // assignment is incremental (may remove only some partitions of the current assignment).
            var remaining = c.Assignment.Where(atp => partitions.Where(rtp => rtp.TopicPartition == atp).Count() == 0);
            //Console.WriteLine(
            //    "Partitions incrementally revoked: [" +
            //    string.Join(',', partitions.Select(p => p.Partition.Value)) +
            //    "], remaining: [" +
            //    string.Join(',', remaining.Select(p => p.Partition.Value)) +
            //    "]");
        })
        .SetPartitionsLostHandler((c, partitions) =>
        {
            // The lost partitions handler is called when the consumer detects that it has lost ownership
            // of its assignment (fallen out of the group).
            //Console.WriteLine($"Partitions were lost: [{string.Join(", ", partitions)}]");
        })
        .Build())
    {
        consumer.Subscribe(topics);

        try
        {
            while (true)
            {
                try
                {
                    var consumeResult = consumer.Consume(cancellationToken);

                    if (consumeResult.IsPartitionEOF)
                    {
                        //Console.WriteLine(
                        //    $"Reached end of topic {consumeResult.Topic}, partition {consumeResult.Partition}, offset {consumeResult.Offset}.");

                        continue;
                    }

                    //Console.WriteLine($"Received message at {consumeResult.TopicPartitionOffset}: {consumeResult.Message.Value}");
                    if (consumeResult.Topic == "VEHICLE_ALARM")
                    {
                        try
                        {
                            VehicleAlarm vehicle = new VehicleAlarm(); vehicle = JsonConvert.DeserializeObject<VehicleAlarm>(consumeResult.Message.Value);
                            VehicleAlarmDataToMysql(vehicle, redisDb5);
                        }
                        catch (Exception e)
                        {
                            Console.WriteLine("数据转换失败" + consumeResult.Message.Value + "-------" + e.Message);
                        }
                    }
                    else
                    {

                    }
                    try
                    {
                        // Store the offset associated with consumeResult to a local cache. Stored offsets are committed to Kafka by a background thread every AutoCommitIntervalMs. 
                        // The offset stored is actually the offset of the consumeResult + 1 since by convention, committed offsets specify the next message to consume. 
                        // If EnableAutoOffsetStore had been set to the default value true, the .NET client would automatically store offsets immediately prior to delivering messages to the application. 
                        // Explicitly storing offsets after processing gives at-least once semantics, the default behavior does not.
                        consumer.StoreOffset(consumeResult);
                    }
                    catch (KafkaException e)
                    {
                        Console.WriteLine($"Store Offset error: {e.Error.Reason}");
                    }
                }
                catch (ConsumeException e)
                {
                    Console.WriteLine($"Consume error: {e.Error.Reason}");
                }
            }
        }
        catch (OperationCanceledException)
        {
            Console.WriteLine("Closing consumer.");
            consumer.Close();
        }
    }
}