package com.example.archivemanagement.audit;

import com.example.archivemanagement.common.CommonConstant;
import com.example.archivemanagement.config.KafkaAuditConfig;
import org.apache.kafka.clients.admin.NewTopic;
import org.apache.kafka.clients.consumer.ConsumerConfig;
import org.apache.kafka.clients.consumer.ConsumerRecord;
import org.apache.kafka.clients.consumer.ConsumerRecords;
import org.apache.kafka.clients.consumer.KafkaConsumer;
import org.apache.kafka.common.TopicPartition;
import org.apache.kafka.common.serialization.StringDeserializer;
import org.springframework.kafka.config.TopicBuilder;
import org.springframework.kafka.core.KafkaAdmin;
import org.springframework.kafka.core.KafkaTemplate;
import org.springframework.kafka.support.SendResult;
import org.springframework.kafka.support.serializer.JsonDeserializer;
import org.springframework.stereotype.Service;
import org.springframework.util.concurrent.ListenableFuture;
import org.springframework.util.concurrent.ListenableFutureCallback;

import java.time.Duration;
import java.time.format.DateTimeFormatter;
import java.util.*;
import java.util.stream.Collectors;

@Service
public class AuditEventProducer {

    private final KafkaTemplate<String, AuditEvent> kafkaTemplate;
    private final KafkaAdmin kafkaAdmin;
    private final short replicationFactor;
    private final int partitions;

    public AuditEventProducer(KafkaTemplate<String, AuditEvent> kafkaTemplate, KafkaAdmin kafkaAdmin, KafkaAuditConfig auditConfig) {
        this.kafkaTemplate = kafkaTemplate;
        this.kafkaAdmin = kafkaAdmin;
        this.replicationFactor = auditConfig.getReplicationFactor();
        this.partitions = auditConfig.getPartitions();
        createTopicIfNotExists();
    }

    private void createTopicIfNotExists() {
        NewTopic newTopic = TopicBuilder.name(CommonConstant.TOPIC)
                .partitions(partitions)
                .replicas(replicationFactor)
                .build();
        kafkaAdmin.createOrModifyTopics(newTopic);
    }

    public void sendAuditEvent(AuditEvent event) {
        // 使用ListenableFuture（Java 8兼容）
        ListenableFuture<SendResult<String, AuditEvent>> future = kafkaTemplate.send(
                CommonConstant.TOPIC,
                event.getEntityType() + "-" + event.getEntityId(),
                event
        );

        future.addCallback(new ListenableFutureCallback<SendResult<String, AuditEvent>>() {
            @Override
            public void onSuccess(SendResult<String, AuditEvent> result) {
                System.out.printf("消息发送成功: 主题=%s, 分区=%d, 偏移量=%d%n",
                        result.getRecordMetadata().topic(),
                        result.getRecordMetadata().partition(),
                        result.getRecordMetadata().offset());
            }

            @Override
            public void onFailure(Throwable ex) {
                System.err.printf("消息发送失败: 主题=%s, 原因=%s%n", CommonConstant.TOPIC, ex.getMessage());
            }
        });
    }

    public List<String> getLatestEvents(int maxCount) {
        DateTimeFormatter formatter = DateTimeFormatter.ofPattern("yyyy-MM-dd HH:mm:ss");
        // 转换为字符串
        Properties props = new Properties();
        props.put(ConsumerConfig.BOOTSTRAP_SERVERS_CONFIG, kafkaAdmin.getConfigurationProperties().get("bootstrap.servers"));
        props.put(ConsumerConfig.KEY_DESERIALIZER_CLASS_CONFIG, StringDeserializer.class);
        props.put(ConsumerConfig.VALUE_DESERIALIZER_CLASS_CONFIG, JsonDeserializer.class);
        props.put(ConsumerConfig.GROUP_ID_CONFIG, "audit-event-reader-" + UUID.randomUUID());
        props.put(ConsumerConfig.AUTO_OFFSET_RESET_CONFIG, "latest");
        props.put(ConsumerConfig.ENABLE_AUTO_COMMIT_CONFIG, "false");

        JsonDeserializer<AuditEvent> deserializer = new JsonDeserializer<>(
                AuditEvent.class,  // 目标类
                true               // 忽略未知属性
        );
        deserializer.addTrustedPackages("com.example.archivemanagement.audit.AuditEvent");

        List<AuditEvent> events = new ArrayList<>();
        try (KafkaConsumer<String, AuditEvent> consumer = new KafkaConsumer<>(props,
                new StringDeserializer(),
                deserializer)) {
            // 获取主题的所有分区
            List<TopicPartition> partitions = consumer.partitionsFor(CommonConstant.TOPIC)
                    .stream()
                    .map(info -> new TopicPartition(info.topic(), info.partition()))
                    .collect(Collectors.toList());

            // 分配分区但不订阅，避免消费组的偏移量管理
            consumer.assign(partitions);

            // 获取每个分区的末尾偏移量
            Map<TopicPartition, Long> endOffsets = consumer.endOffsets(partitions);

            // 设置每个分区的消费位置为末尾前10条
            for (TopicPartition partition : partitions) {
                long endOffset = endOffsets.get(partition);
                long startOffset = Math.max(0, endOffset - maxCount);
                consumer.seek(partition, startOffset);
            }

            // 消费消息直到获取足够数量或超时
            Duration timeout = Duration.ofSeconds(5);
            long startTime = System.currentTimeMillis();
            while (events.size() < maxCount && (System.currentTimeMillis() - startTime) < timeout.toMillis()) {
                ConsumerRecords<String, AuditEvent> records = consumer.poll(Duration.ofMillis(100));
                for (ConsumerRecord<String, AuditEvent> record : records) {
                    events.add(record.value());
                    if (events.size() >= maxCount) {
                        break;
                    }
                }
            }
        } catch (Exception e) {
            System.err.println("获取Kafka消息时出错: " + e.getMessage());
            e.printStackTrace();
        }

        // 按时间排序，最新的在前
        events.sort(Comparator.comparing(AuditEvent::getTimestamp).reversed());
        List<String> collect = events.stream().map(value -> String.format("%s 在 %s 时执行了 %s 操作", value.getUsername(), value.getTimestamp().format(formatter), value.getAction())).collect(Collectors.toList());
        return collect;
    }

}