#include "KafkaClient.h"
#include "pch.h"

#define RDKAFKA_LOG_EMERG 0
#define RDKAFKA_LOG_ALERT 1
#define RDKAFKA_LOG_CRIT 2
#define RDKAFKA_LOG_ERR 3
#define RDKAFKA_LOG_WARNING 4
#define RDKAFKA_LOG_NOTICE 5
#define RDKAFKA_LOG_INFO 6
#define RDKAFKA_LOG_DEBUG 7

void KafkaClient::Init(const std::string &uri, const std::string &topic) {
    state_ = STATE::IDLE;

    brokers_ = uri;
    topic_ = topic;

    rk_ = nullptr;
    rkt_ = nullptr;
    conf_ = nullptr;
    topic_conf_ = nullptr;
    message_ = nullptr;
}

bool KafkaClient::Start() {

    char signal[16];
    char errstr[512];

    state_ = (type_ == TYPE::CONSUMER) ? STATE::PRE_CONSUME : STATE::PRE_PRODUCE;

    SPDLOG_INFO("rd_kafka_conf_new");
    conf_ = rd_kafka_conf_new();
    if (!conf_) {
        SPDLOG_ERROR("rd_kafka_conf_new.error");
        Stop();
        return false;
    }

    SPDLOG_INFO("rd_kafka_conf_set_log_cb");
    /* Set logger */
    rd_kafka_conf_set_log_cb(conf_, logger);

    /* Quick termination */
    //   snprintf(signal, sizeof(signal), "%i", SIGIO);
    //   rd_kafka_conf_set(conf_, "internal.termination.signal", signal, nullptr, 0);

    /* Topic configuration */
    SPDLOG_INFO("rd_kafka_topic_conf_new");
    topic_conf_ = rd_kafka_topic_conf_new();
    if (!topic_conf_) {
        SPDLOG_ERROR("rd_kafka_topic_conf_new.error");
        Stop();
        return false;
    }

    if (type_ == TYPE::CONSUMER) {
        rd_kafka_conf_set(conf_, "enable.partition.eof", "true", nullptr, 0);
        rd_kafka_conf_set(conf_, "group.id", "kafka_test_group", nullptr, 0);

        /* Create Kafka consumer */
        if (!(rk_ = rd_kafka_new(RD_KAFKA_CONSUMER, conf_, errstr, sizeof(errstr)))) {
            SPDLOG_ERROR("Failed to create new consumer: {}", errstr);
            Stop();
            return false;
        }
    } else {
        /* Set up a mesage delivery report callback.
         * It will be called once for each message, either on successful
         * delivery to brokers, or upon failure to deliver to brokers */
        SPDLOG_INFO("rd_kafka_conf_set_dr_msg_cb");
        rd_kafka_conf_set_dr_msg_cb(conf_, KafkaClient::msg_delivered);

        rd_kafka_conf_set(conf_, "debug", "all", nullptr, 0);

        /* Create Kafka producer */
        SPDLOG_INFO("rd_kafka_new");
        if (!(rk_ = rd_kafka_new(RD_KAFKA_PRODUCER, conf_, errstr, sizeof(errstr)))) {
            SPDLOG_ERROR("Failed to create new producer: {}", errstr);
            Stop();
            return false;
        }
    }

    /* Add brokers */
    SPDLOG_INFO("rd_kafka_brokers_add");
    if (rd_kafka_brokers_add(rk_, brokers_.c_str()) == 0) {
        SPDLOG_ERROR("No valid brokers specified");
        Stop();
        return false;
    }

    /* Create topic */
    SPDLOG_INFO("rd_kafka_topic_new");
    rkt_ = rd_kafka_topic_new(rk_, topic_.c_str(), topic_conf_);
    if (!rkt_) {
        SPDLOG_ERROR("rd_kafka_topic_new.error");
        Stop();
        return false;
    }

    /* Now topic conf owned by topic */
    topic_conf_ = nullptr;

    if (type_ == TYPE::CONSUMER) {
        /* Start consuming */
        int64_t start_offset = RD_KAFKA_OFFSET_STORED;
        if (rd_kafka_consume_start(rkt_, partition_, start_offset) == -1) {
            SPDLOG_ERROR("Failed to start consuming");
            Stop();
            return false;
        }
        state_ = STATE::CONSUME;
    } else {
        state_ = STATE::PRODUCE;
    }

    return true;
}

bool KafkaClient::Stop() {
    if (state_ == STATE::IDLE) {
        SPDLOG_INFO("Already stopped");
        return true;
    }

    if (state_ == STATE::CONSUME) {
        /* Stop consuming */
        if (rkt_) {
            rd_kafka_consume_stop(rkt_, partition_);
        }
    } else {
        if (rk_) {
            /* Poll to handle delivery reports */
            rd_kafka_poll(rk_, 0);
            /* Wait for messages to be delivered */
            //   while (!instant && rd_kafka_outq_len(rk_) > 0) {
            //     rd_kafka_poll(rk_, 100);
            //   }
        }
    }

    /* Destroy topic */
    if (rkt_) {
        rd_kafka_topic_destroy(rkt_);
    }

    /* Destroy handle */
    if (rk_) {
        rd_kafka_destroy(rk_);
    }

    /* Destroy message */
    if (message_) {
        rd_kafka_message_destroy(message_);
    }

    state_ = STATE::IDLE;

    return true;
}

bool KafkaClient::Publish(const std::string &message) {
    if (state_ != STATE::PRODUCE) {
        return false;
    }

    /* Send/Produce message. */
    if (rd_kafka_produce(
            rkt_, partition_, RD_KAFKA_MSG_F_COPY, (void *)message.c_str(), message.size(), nullptr, 0,
            nullptr)
        == -1) {
        SPDLOG_ERROR("Failed to produce to topic: {}  partition: {}", rd_kafka_topic_name(rkt_), partition_);
        return false;
    }

    /* Poll to handle delivery reports */
    rd_kafka_poll(rk_, 0);
    return true;
}

/* Kafka logger callback (optional) */
void KafkaClient::logger(const rd_kafka_t *rk, int level, const char *fac, const char *buf) {
    if (level <= RDKAFKA_LOG_ERR) {

        SPDLOG_ERROR("log: fac: {} : {}", (rk ? (": " + std::string(rd_kafka_name(rk))) : ""), buf);
        // LOGE(Kafka) << fac << (rk ? (": " + std::string(rd_kafka_name(rk))) : "") << ": " << buf;
    } else if (level == RDKAFKA_LOG_NOTICE) {
        SPDLOG_WARN("log: fac: {} : {}", (rk ? (": " + std::string(rd_kafka_name(rk))) : ""), buf);
        // LOGW(Kafka) << fac << (rk ? (": " + std::string(rd_kafka_name(rk))) : "") << ": " << buf;
    } else if (level == RDKAFKA_LOG_INFO) {
        SPDLOG_INFO("log: fac: {} : {}", (rk ? (": " + std::string(rd_kafka_name(rk))) : ""), buf);

        // LOGI(Kafka) << fac << (rk ? (": " + std::string(rd_kafka_name(rk))) : "") << ": " << buf;
    } else {
        SPDLOG_DEBUG("log: fac: {} : {}", (rk ? (": " + std::string(rd_kafka_name(rk))) : ""), buf);

        // VLOG2(Kafka) << fac << (rk ? (": " + std::string(rd_kafka_name(rk))) : "") << ": " << buf;
    }
}

bool KafkaClient::msg_consume(rd_kafka_message_t *msg, uint8_t **p_payload, size_t *p_len) {
    if (msg->err) {
        *p_payload = nullptr;
        *p_len = 0;
        if (msg->err == RD_KAFKA_RESP_ERR__PARTITION_EOF) {
            //   LOGE(Kafka) << "Consumer reached end of " << rd_kafka_topic_name(msg->rkt) << " message queue
            //   at offset "
            //               << msg->offset;

            SPDLOG_ERROR(
                "Consumer reached end of {} message queue at offset {}", rd_kafka_topic_name(msg->rkt),
                msg->offset);
            return false;
        }

        SPDLOG_ERROR(
            "Consume error for topic: {}   offset {} {}", rd_kafka_topic_name(msg->rkt), msg->offset,
            rd_kafka_message_errstr(msg));

        // LOGE(Kafka) << "Consume error for topic:" << rd_kafka_topic_name(msg->rkt) << " offset:" <<
        // msg->offset << " "
        //             << rd_kafka_message_errstr(msg);

        if (msg->err == RD_KAFKA_RESP_ERR__UNKNOWN_PARTITION
            || msg->err == RD_KAFKA_RESP_ERR__UNKNOWN_TOPIC) {
            SPDLOG_ERROR("%% Exit read process");
        }

        return false;
    }

    *p_payload = reinterpret_cast<uint8_t *>(msg->payload);
    *p_len = msg->len;

    return true;
}

/* Message delivery report callback using the richer rd_kafka_message_t object. */
void KafkaClient::msg_delivered(rd_kafka_t *rk, const rd_kafka_message_t *msg, void *opaque) {
    if (msg->err) {
        // LOGE(Kafka) << "%% Message delivery failed: " << rd_kafka_err2str(msg->err);

        SPDLOG_ERROR("%% Message delivery failed: {}", rd_kafka_err2str(msg->err));
    }
}