package tc.alert.tckejisoc.kafka.service;

import com.alibaba.fastjson.JSONObject;
import lombok.extern.slf4j.Slf4j;
import org.apache.commons.lang3.StringUtils;
import org.apache.kafka.clients.consumer.ConsumerRecord;
import org.springframework.beans.factory.annotation.Autowired;
import org.springframework.context.annotation.Bean;
import org.springframework.kafka.core.DefaultKafkaProducerFactory;

import org.springframework.kafka.core.KafkaTemplate;
import org.springframework.scheduling.concurrent.ThreadPoolTaskExecutor;
import org.springframework.stereotype.Component;
import tc.alert.tckejisoc.domain.EventBase;
import tc.alert.tckejisoc.es.EventSingleWriter;
import tc.alert.tckejisoc.pool.ThreadPoolConfig;
import tc.alert.tckejisoc.rule.LogRuleManager;

import tc.alert.tckejisoc.utils.EsUtils;

import java.util.HashMap;
import java.util.Map;

// MessageProcessor.java
@Component
@Slf4j
public class MessageProcessor {


    @Autowired
    private ThreadPoolTaskExecutor dataProcessingPool;
    @Autowired
    private LogRuleManager logRuleManager;
    @Autowired
    private DataWork dataWork;
    @Autowired
    private EventSingleWriter eventSingleWriter;

    public void process(ConsumerRecord<String, String> record) {
        dataProcessingPool.execute(() -> {
            try {
                // 业务处理逻辑
                processRecord(record);
                // 异步提交偏移量  暂未实现异步提交
//                commitOffsetAsync(record);
            } catch (Exception e) {
                //可扩展为 重试   逻辑  增加死信队列  根据重试次数消费队列数据 N次后写入 异常数据库
                handleProcessingError(record, e);
            }
        });
    }

    private void processRecord(ConsumerRecord<String, String> record) throws InterruptedException {
        long start = System.currentTimeMillis();
        JSONObject jsonObject = JSONObject.parseObject(record.value());
        //deviceos  是logstash 打的标签
        String str = jsonObject.getString("deviceos");
        if(StringUtils.isBlank(str)){

             str = jsonObject.getString("payload_index");
        }
        if(StringUtils.isBlank(str)){
            log.error("无法匹配解析规则请关注 --------------------------------------"+record.value());
            return;
        }

        EventBase unifiedLog = logRuleManager.parseLog(record.value(), str);
        //解析ip
        dataWork.ip(unifiedLog);
        //ip 关联资产补充


        eventSingleWriter.saveSingle(unifiedLog, EsUtils.getindex("event-base-"));
        log.debug(Thread.currentThread().getName()+"线程，使用时间："+(System.currentTimeMillis()-start)+"ms，kafka offset"+record.offset());
    }

    private void commitOffsetAsync(ConsumerRecord<String, String> record) {
        // 实现异步提交逻辑
    }

    private void handleProcessingError(ConsumerRecord<String, String> record, Exception ex) {
        log.error("Error processing record [topic={}, partition={}, offset={}]",
                record.topic(), record.partition(), record.offset(), ex);
        // 实现重试或死信队列逻辑
    }


    @Bean
    public KafkaTemplate<String, String> dlqTemplate() {
        return new KafkaTemplate<>(new DefaultKafkaProducerFactory<>(dlqProducerConfigs()));
    }

    private Map<String, Object> dlqProducerConfigs() {
        Map<String, Object> props = new HashMap<>();
        // 配置死信队列生产者参数
        return props;
    }
}