package org.xxd.kafka.clients.producer;

import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import org.xxd.kafka.clients.NetworkClient;
import org.xxd.kafka.clients.common.Cluster;
import org.xxd.kafka.clients.common.Metadata;
import org.xxd.kafka.clients.common.network.ChannelBuilder;
import org.xxd.kafka.clients.common.network.ChannelBuilders;
import org.xxd.kafka.clients.common.network.Selector;
import org.xxd.kafka.clients.common.util.KafkaThread;
import org.xxd.kafka.clients.common.util.SystemTime;
import org.xxd.kafka.clients.common.util.Time;
import org.xxd.kafka.clients.producer.internals.DefaultPartitioner;
import org.xxd.kafka.clients.producer.internals.RecordAccumulator;
import org.xxd.kafka.clients.producer.internals.Sender;

import java.net.InetSocketAddress;
import java.util.ArrayList;
import java.util.List;
import java.util.Properties;
import java.util.concurrent.atomic.AtomicInteger;

/**
 * @author: XiaoDong.Xie
 * @create: 2020-09-27 10:05
 * @description:
 */
public class KafkaProducer<K, V> implements Producer<K, V> {
    private static final Logger log = LoggerFactory.getLogger(KafkaProducer.class);
    private static final AtomicInteger PRODUCER_CLIENT_ID_SEQUENCE = new AtomicInteger(1);
    private final Thread ioThread;
    private final Sender sender;
    private final RecordAccumulator accumulator;
    private final Metadata metadata;
    private final ProducerConfig producerConfig;
    private String clientId;
    private final Partitioner partitioner;
    private final int maxRequestSize;
    private final long totalMemorySize;
    private final String compressionType;
    private final long maxBlockTimeMs;
    private final int requestTimeoutMs;
    private final Time time;

    public KafkaProducer(Properties properties) {
        log.info("Starting the kafka producer");
        this.producerConfig = new ProducerConfig(properties);
        this.time = new SystemTime();
        clientId = producerConfig.getString(ProducerConfig.CLIENT_ID_CONFIG,
                "producer-" + PRODUCER_CLIENT_ID_SEQUENCE.getAndIncrement());

        this.partitioner = producerConfig.getConfiguredInstance(ProducerConfig.PARTITIONER_CLASS_CONFIG,
                DefaultPartitioner.class);
        // 发送失败重试间隔
        long retryBackoffMs = producerConfig.getLong(ProducerConfig.RETRY_BACKOFF_MS_CONFIG, 100L);

        this.metadata = new Metadata(retryBackoffMs,
                // 元数据 定时更新间隔 5 分钟
                producerConfig.getLong(ProducerConfig.METADATA_MAX_AGE_CONFIG, 5 * 60 * 1000L));

        this.maxRequestSize = producerConfig.getInt(ProducerConfig.MAX_REQUEST_SIZE_CONFIG,
                // 发送一次请求最大的大小 1MB
                1 * 1024 * 1024);
        this.totalMemorySize = producerConfig.getLong(ProducerConfig.BUFFER_MEMORY_CONFIG,
                // 内存缓冲区最大大小 32 MB
                32 * 1024 * 1024L);
        this.compressionType = producerConfig.getString(ProducerConfig.COMPRESSION_TYPE_CONFIG,
                "none");

        // 缓冲区满了之后，最大可以阻塞60s
        this.maxBlockTimeMs = producerConfig.getLong(ProducerConfig.MAX_BLOCK_MS_CONFIG, 60 * 1000);

        // 发送出去的请求，超时时长
        this.requestTimeoutMs = producerConfig.getInt(ProducerConfig.REQUEST_TIMEOUT_MS_CONFIG, 30 * 1000);
        this.accumulator = new RecordAccumulator(
                producerConfig.getInt(ProducerConfig.BATCH_SIZE_CONFIG, 16 * 1024),
                totalMemorySize,
                compressionType,
                producerConfig.getLong(ProducerConfig.LINGER_MS_CONFIG, 0),
                retryBackoffMs,
                time
        );

        List<InetSocketAddress> addresses = new ArrayList<>();
        List<String> serverAddress = producerConfig.getList(ProducerConfig.BOOTSTRAP_SERVERS_CONFIG);
        for (String address : serverAddress) {
            addresses.add(new InetSocketAddress(address, 9092));
        }
        this.metadata.update(Cluster.bootstrap(addresses), time.milliseconds());
        ChannelBuilder channelBuilder = ChannelBuilders.create();
        NetworkClient client = new NetworkClient(
                new Selector(producerConfig.getLong(ProducerConfig.CONNECTIONS_MAX_IDLE_MS_CONFIG,
                        9 * 60 * 1000),
                        time,
                        channelBuilder),
                this.metadata,
                clientId,
                // 一个broker最多可以忍受发了5个请求，没有收到响应
                producerConfig.getInt(ProducerConfig.MAX_IN_FLIGHT_REQUESTS_PER_CONNECTION, 5),
                // 重新尝试连接需要等待的时间
                producerConfig.getLong(ProducerConfig.RECONNECT_BACKOFF_MS_CONFIG, 50L),
                // TCP 发送缓冲区的大小
                producerConfig.getInt(ProducerConfig.SEND_BUFFER_CONFIG, 128 * 1024),
                // TCP 接收请求缓冲区的大小
                producerConfig.getInt(ProducerConfig.RECEIVE_BUFFER_CONFIG, 32 * 1024),
                this.requestTimeoutMs, time);


        this.sender = new Sender(client,
                this.metadata,
                this.accumulator,
                producerConfig.getInt(ProducerConfig.MAX_IN_FLIGHT_REQUESTS_PER_CONNECTION, 5) == 1,
                producerConfig.getInt(ProducerConfig.MAX_REQUEST_SIZE_CONFIG, 1 * 1024 * 1024),
                (short) parseAcks(producerConfig.getString(ProducerConfig.ACKS_CONFIG, "1")),
                producerConfig.getInt(ProducerConfig.RETRIES_CONFIG, 0),
                new SystemTime(),
                clientId,
                this.requestTimeoutMs);

        String ioThreadName = "kafka-producer-network-thread" + (clientId.length() > 0 ? " | " + clientId : "");
        this.ioThread = new KafkaThread(ioThreadName, this.sender, true);
        this.ioThread.start();
        log.info("Kafka producer started");
    }

    private static int parseAcks(String acksString) {
        try {
            return acksString.trim().equalsIgnoreCase("all") ? -1 : Integer.parseInt(acksString.trim());
        } catch (NumberFormatException e) {
            e.printStackTrace();
        }
        return 0;
    }

    @Override
    public void send(ProducerRecord record) {

    }

    @Override
    public void send(ProducerRecord record, Callback callback) {

    }

    @Override
    public void close() {

    }
}
