package org.xxd.kafka.clients.producer.internals;

import com.sun.javafx.font.Metrics;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import org.xxd.kafka.clients.KafkaClient;
import org.xxd.kafka.clients.common.Cluster;
import org.xxd.kafka.clients.common.Metadata;
import org.xxd.kafka.clients.common.util.Time;

/**
 * @author: XiaoDong.Xie
 * @create: 2020-09-27 10:36
 * @description: 一个后台线程，专门用来处理发送nio请求给broker的
 */
public class Sender implements Runnable {

    private static final Logger log = LoggerFactory.getLogger(Sender.class);

    /* the state of each nodes connection */
    private final KafkaClient client;

    /* the record accumulator that batches records */
    private final RecordAccumulator accumulator;

    /* the metadata for the client */
    private final Metadata metadata;

    /* the flag indicating whether the producer should guarantee the message order on the broker or not. */
    private final boolean guaranteeMessageOrder;

    /* the maximum request size to attempt to send to the server */
    private final int maxRequestSize;

    /* the number of acknowledgements to request from the server */
    private final short acks;

    /* the number of times to retry a failed request before giving up */
    private final int retries;

    /* the clock instance used for getting the time */
    private final Time time;

    /* true while the sender thread is still running */
    private volatile boolean running;

    /* true when the caller wants to ignore all unsent/inflight messages and force close.  */
    private volatile boolean forceClose;

    /* param clientId of the client */
    private String clientId;

    /* the max time to wait for the server to respond to the request*/
    private final int requestTimeout;


    public Sender(KafkaClient client,
                  Metadata metadata,
                  RecordAccumulator accumulator,
                  boolean guaranteeMessageOrder,
                  int maxRequestSize,
                  short acks,
                  int retries,
                  Time time,
                  String clientId,
                  int requestTimeout) {
        this.client = client;
        this.accumulator = accumulator;
        this.metadata = metadata;
        this.guaranteeMessageOrder = guaranteeMessageOrder;
        this.maxRequestSize = maxRequestSize;
        this.running = true;
        this.acks = acks;
        this.retries = retries;
        this.time = time;
        this.clientId = clientId;
        this.requestTimeout = requestTimeout;
    }

    @Override
    public void run() {
        log.info("Kafka producer IO 线程 正在启动");

        while (running) {
            try {
                run(time.milliseconds());
            } catch (Exception e) {
                log.error("Uncaught error in kafka producer I/O thread: ", e);
            }

            log.info("Beginning shutdown of Kafka producer I/O thread, sending remaining records.");

            // okay we stopped accepting requests but there may still be
            // requests in the accumulator or waiting for acknowledgment,
            // wait until these are completed.
            // 如果我们现在要关闭kafka producer 了， 那么我们应该等待缓冲区的数据全部发送完毕，才可以关闭
            while (!forceClose && (this.accumulator.hasUnsent() || this.client.inFlightRequestCount() > 0)) {
                try {
                    run(time.milliseconds());
                } catch (Exception e) {
                    log.error("Uncaught error in kafka producer I/O thread: ", e);
                }
            }

            if (forceClose) {
                // We need to fail all the incomplete batches and wake up the threads waiting on
                // the futures.
                this.accumulator.abortIncompleteBatches();
            }

            try {
                this.client.close();
            } catch (Exception e) {
                log.error("Failed to close network client", e);
            }

            log.debug("Shutdown of Kafka producer I/O thread has completed.");
        }
    }

    private void run(long now) {
        Cluster cluster = metadata.fetch();
        RecordAccumulator.ReadyCheckResult result = this.accumulator.ready(cluster, now);

        if (result.unknownLeadersExist) {
            this.metadata.requestUpdate();
        }



    }
}
