package com.pk.flink.apps.userCountCalc;

import org.apache.kafka.clients.consumer.ConsumerRecord;
import org.apache.kafka.clients.consumer.ConsumerRecords;
import org.apache.kafka.clients.consumer.KafkaConsumer;
import org.apache.kafka.clients.consumer.ConsumerConfig;
import org.apache.kafka.clients.CommonClientConfigs;
import org.apache.kafka.common.KafkaException;
import org.apache.kafka.common.serialization.StringDeserializer;
import org.apache.kafka.common.serialization.LongDeserializer;

import java.time.Duration;
import java.util.Arrays;
import java.util.Deque;
import java.util.ArrayDeque;
import java.util.HashMap;
import java.util.HashSet;
import java.util.Map;
import java.util.Properties;
import java.util.Set;


public class KafkaConsumerApp {

    private static String SERVERS = "master:9092,slave1:9092,slave2:9092";
    private static String TOPIC = "pk-2-2";
    KafkaConsumer<String, Long> consumer = null;

    // window/reporting configuration
    private static final long BUCKET_MS = 60_000L;           // 1 minute buckets
    private static final long REPORT_EVERY_MS = 5 * 60_000L; // every 5 minutes
    private static final long LOOKBACK_MS = 10 * 60_000L;    // last 10 minutes

    // state
    private final Set<String> allUsersEverSeen = new HashSet<>();
    private final Map<Long, Set<String>> bucketToUsers = new HashMap<>();
    private final Deque<Long> bucketOrder = new ArrayDeque<>();

    public static void main(String[] args){
        new KafkaConsumerApp().run();
    }

    private void run(){
        Properties props = new Properties();
        props.put(CommonClientConfigs.BOOTSTRAP_SERVERS_CONFIG, SERVERS);
        props.put(ConsumerConfig.GROUP_ID_CONFIG, "user-count-consumer");
        props.put(ConsumerConfig.ENABLE_AUTO_COMMIT_CONFIG, true);
        props.put(ConsumerConfig.AUTO_COMMIT_INTERVAL_MS_CONFIG, 1000);
        props.put(ConsumerConfig.KEY_DESERIALIZER_CLASS_CONFIG, StringDeserializer.class.getName());
        props.put(ConsumerConfig.VALUE_DESERIALIZER_CLASS_CONFIG, LongDeserializer.class.getName());
        props.put(ConsumerConfig.AUTO_OFFSET_RESET_CONFIG, "earliest");

        consumer = new KafkaConsumer<>(props);
        consumer.subscribe(Arrays.asList(TOPIC));

        long now = System.currentTimeMillis();
        long nextReportAt = alignToNextBoundary(now, REPORT_EVERY_MS);

        try {
            while(true) {
                ConsumerRecords<String, Long> records = consumer.poll(Duration.ofMillis(500));
                for (ConsumerRecord<String, Long> record : records) {
                    String userId = record.key();
                    Long eventTs = record.value();
                    if (userId == null || eventTs == null) {
                        continue;
                    }
                    // requirement: userid, timestamp
                    allUsersEverSeen.add(userId);
                    // bucket by event time (minute alignment)
                    long bucketStart = (eventTs / BUCKET_MS) * BUCKET_MS;
                    Set<String> usersInBucket = bucketToUsers.computeIfAbsent(bucketStart, k -> {
                        bucketOrder.addLast(k);
                        return new HashSet<>();
                    });
                    usersInBucket.add(userId);
                }

                now = System.currentTimeMillis();
                // periodic reporting every 5 minutes
                if (now >= nextReportAt) {
                    // Demand 1: total unique users ever seen up to now
                    int totalEver = allUsersEverSeen.size();

                    // Demand 2: last 10 minutes distinct users
                    long windowStartInclusive = now - LOOKBACK_MS;
                    cleanupOldBuckets(windowStartInclusive);
                    int last10minDistinct = computeLastWindowDistinct(windowStartInclusive);

                    System.out.println(
                        String.format("[Report @ %tF %tT] totalEver=%d, last10min=%d",
                                      now, now, totalEver, last10minDistinct)
                    );

                    // schedule next boundary
                    do {
                        nextReportAt += REPORT_EVERY_MS;
                    } while (nextReportAt <= now);
                }
            }
        } catch (KafkaException e) {
            // We can't recover from these exceptions, so our only option is to close the producer and exit.
            System.err.println(e.getMessage());
        }// For all other exceptions, just abort the transaction and try again.
        finally {
            consumer.close();
        }

    }

    private static long alignToNextBoundary(long ts, long periodMs) {
        long base = (ts / periodMs) * periodMs;
        return base + periodMs;
    }

    private void cleanupOldBuckets(long windowStartInclusive) {
        while (!bucketOrder.isEmpty()) {
            Long head = bucketOrder.peekFirst();
            if (head < windowStartInclusive) {
                bucketOrder.removeFirst();
                bucketToUsers.remove(head);
            } else {
                break;
            }
        }
    }

    private int computeLastWindowDistinct(long windowStartInclusive) {
        if (bucketToUsers.isEmpty()) {
            return 0;
        }
        HashSet<String> distinct = new HashSet<>();
        for (Long bucketStart : bucketOrder) {
            if (bucketStart >= windowStartInclusive) {
                Set<String> users = bucketToUsers.get(bucketStart);
                if (users != null && !users.isEmpty()) {
                    distinct.addAll(users);
                }
            }
        }
        return distinct.size();
    }

}
