package com.lm.flink.example;

import org.apache.kafka.clients.producer.*;
import org.apache.kafka.common.serialization.StringSerializer;

import java.util.Properties;
import java.util.Random;
import java.util.concurrent.ExecutionException;
import java.util.concurrent.TimeUnit;

public class KafkaDataGenerator {

    public static void main(String[] args) {
        Properties props = new Properties();
        props.put(ProducerConfig.BOOTSTRAP_SERVERS_CONFIG, "10.2.0.230:9092"); // 多个broker
        props.put(ProducerConfig.KEY_SERIALIZER_CLASS_CONFIG, StringSerializer.class.getName());
        props.put(ProducerConfig.VALUE_SERIALIZER_CLASS_CONFIG, StringSerializer.class.getName());

        // 重要：增加重试配置
        props.put(ProducerConfig.RETRIES_CONFIG, 10); // 增加重试次数
        props.put(ProducerConfig.RETRY_BACKOFF_MS_CONFIG, 1000); // 重试间隔1秒
        props.put(ProducerConfig.MAX_BLOCK_MS_CONFIG, 60000); // 最大阻塞时间60秒
        props.put(ProducerConfig.REQUEST_TIMEOUT_MS_CONFIG, 30000); // 请求超时30秒
        //props.put(ProducerConfig.DELIVERY_TIMEOUT_MS_CONFIG, 120000); // 交付超时120秒

        // 元数据更新配置
        props.put(ProducerConfig.METADATA_MAX_AGE_CONFIG, 300000); // 5分钟更新元数据
        props.put(ProducerConfig.RECONNECT_BACKOFF_MS_CONFIG, 1000);
        props.put(ProducerConfig.RECONNECT_BACKOFF_MAX_MS_CONFIG, 10000);

        KafkaProducer<String, String> producer = new KafkaProducer<>(props);

        try {
            generateTestDataWithRetry(producer, "logtest-topic", 100);
        } finally {
            producer.close();
        }
    }

    private static void generateTestDataWithRetry(KafkaProducer<String, String> producer,
                                                  String topic, int messageCount) {
        Random random = new Random();
        String[] messageTypes = {"INFO", "WARN", "ERROR", "DEBUG"};

        for (int i = 0; i < messageCount; i++) {
            String messageType = messageTypes[random.nextInt(messageTypes.length)];
            String message = String.format("{\"level\":\"%s\",\"service\":\"order_%d\",\"message\":\"test_%d\",\"timestamp\":%d}",
                    messageType,i, i, System.currentTimeMillis());

            ProducerRecord<String, String> record = new ProducerRecord<>(topic, message);

            boolean sent = false;
            int retryCount = 0;
            final int maxRetries = 5;

            while (!sent && retryCount < maxRetries) {
                try {
                    RecordMetadata metadata = producer.send(record).get();
                    System.out.printf("Successfully sent message %d (attempt %d): partition=%d, offset=%d%n",
                            i, retryCount + 1, metadata.partition(), metadata.offset());
                    sent = true;

                } catch (ExecutionException e) {
                    if (e.getCause() instanceof org.apache.kafka.common.errors.NotLeaderForPartitionException) {
                        retryCount++;
                        System.out.printf("NotLeaderForPartitionException for message %d, retrying (%d/%d)...%n",
                                i, retryCount, maxRetries);

                        // 等待后重试
                        try {
                            TimeUnit.MILLISECONDS.sleep(500 * retryCount);
                        } catch (InterruptedException ie) {
                            Thread.currentThread().interrupt();
                            break;
                        }

                        // 强制更新元数据
                        producer.partitionsFor(topic);

                    } else {
                        System.err.printf("Failed to send message %d: %s%n", i, e.getCause().getMessage());
                        break;
                    }
                } catch (InterruptedException e) {
                    Thread.currentThread().interrupt();
                    break;
                }
            }

            if (!sent) {
                System.err.printf("Failed to send message %d after %d attempts%n", i, maxRetries);
            }

            try {
                Thread.sleep(100);
            } catch (InterruptedException e) {
                Thread.currentThread().interrupt();
                break;
            }
        }
    }
}