#include "kafka_consumer.h"

#include <cstdint>
#include <librdkafka/rdkafkacpp.h>
#include <memory>

using namespace std;

KafkaConsumer::KafkaConsumer(const std::string &brokers, const std::string &groupId) : m_brokers(brokers), m_groupId(groupId)
{
    m_consumer  = nullptr;
    m_topic     = nullptr;
    m_partition = RdKafka::Topic::PARTITION_UA;
    m_eventcb   = new RdKafkaEventCb();
}

KafkaConsumer::~KafkaConsumer()
{
    if (m_topic) {
        unsubscribe();
    }
    if (m_consumer) {
        delete m_consumer;
    }
    if (m_eventcb) {
        delete m_eventcb;
    }
}

bool KafkaConsumer::init(bool               with_sasl,
                         const std::string &protocol,
                         const std::string &mechanisms,
                         const std::string &username,
                         const std::string &password,
                         bool               with_ssl,
                         const std::string &ca_location)
{
    string                    errstr;
    shared_ptr<RdKafka::Conf> conf(RdKafka::Conf::create(RdKafka::Conf::CONF_GLOBAL));
    if (conf->set("bootstrap.servers", m_brokers, errstr) != RdKafka::Conf::CONF_OK) {
        fprintf(stderr, "kafka consumer set bootstrap.servers failed: %s\n", errstr.c_str());
        return false;
    }

    // 添加SASL认证
    if (with_sasl) {
        if (conf->set("security.protocol", protocol, errstr) != RdKafka::Conf::CONF_OK) {
            fprintf(stderr, "kafka consumer set security.protocol failed: %s\n", errstr.c_str());
            return false;
        }
        if (conf->set("sasl.mechanisms", mechanisms, errstr) != RdKafka::Conf::CONF_OK) {
            fprintf(stderr, "kafka consumer set sasl.mechanisms failed: %s\n", errstr.c_str());
            return false;
        }
        if (conf->set("sasl.username", username, errstr) != RdKafka::Conf::CONF_OK) {
            fprintf(stderr, "kafka consumer set sasl.username failed: %s\n", errstr.c_str());
            return false;
        }
        if (conf->set("sasl.password", password, errstr) != RdKafka::Conf::CONF_OK) {
            fprintf(stderr, "kafka consumer set sasl.password failed: %s\n", errstr.c_str());
            return false;
        }
    }
    if (with_ssl) {
        if (conf->set("ssl.ca.location", ca_location, errstr) != RdKafka::Conf::CONF_OK) {
            fprintf(stderr, "kafka consumer set ssl.ca.location failed: %s\n", errstr.c_str());
            return false;
        }
    }
    if (conf->set("group.id", m_groupId, errstr) != RdKafka::Conf::CONF_OK) {
        fprintf(stderr, "kafka consumer set group.id failed : %s\n", errstr.c_str());
        return false;
    }
    if (conf->set("event_cb", m_eventcb, errstr) != RdKafka::Conf::CONF_OK) {
        fprintf(stderr, "kafka consumer set eventcb failed : %s\n", errstr.c_str());
        return false;
    }
    if (conf->set("socket.keepalive.enable", "true", errstr) != RdKafka::Conf::CONF_OK) {
        fprintf(stderr, "kafka consumer set socket.keepalive.enable failed : %s\n", errstr.c_str());
        return false;
    }
    if (conf->set("enable.partition.eof", "false", errstr) != RdKafka::Conf::CONF_OK) {
        fprintf(stderr, "kafka consumer set enable.partition.eof failed : %s\n", errstr.c_str());
        return false;
    }

    m_consumer = RdKafka::Consumer::create(conf.get(), errstr);
    if (!m_consumer) {
        fprintf(stderr, "create kafka consumer failed : %s\n", errstr.c_str());
        return false;
    }
    return true;
}

bool KafkaConsumer::subscribe(const std::string &topic_name, int32_t partition)
{
    return subscribe(topic_name, partition, "stored");
}

bool KafkaConsumer::subscribe(const std::string &topic_name, int32_t partition, const std::string &offset)
{
    if (m_topic != nullptr) {
        fprintf(stderr, "this consumer already subscribed!\n");
        return false;
    }
    m_partition = partition;

    string                    errstr;
    shared_ptr<RdKafka::Conf> conf(RdKafka::Conf::create(RdKafka::Conf::CONF_TOPIC));
    if (!conf) {
        fprintf(stderr, "create kafka topic config failed!\n");
        return false;
    }

    m_topic = RdKafka::Topic::create(m_consumer, topic_name.c_str(), conf.get(), errstr);
    if (!m_topic) {
        fprintf(stderr, "create kafka topic[%s] failed: %s\n", topic_name.c_str(), errstr.c_str());
        return false;
    }

    int64_t offsetVal = RdKafka::Topic::OFFSET_INVALID;
    if (!offset.compare("begin")) {
        offsetVal = RdKafka::Topic::OFFSET_BEGINNING;
    } else if (!offset.compare("end")) {
        offsetVal = RdKafka::Topic::OFFSET_END;
    } else {
        offsetVal = RdKafka::Topic::OFFSET_STORED;
    }
    RdKafka::ErrorCode errCode = m_consumer->start(m_topic, m_partition, offsetVal);
    if (errCode != RdKafka::ERR_NO_ERROR) {
        fprintf(stderr, "consumer->start() error: %d\n", errCode);
        if (m_topic) {
            delete m_topic;
            m_topic = nullptr;
        }
        return false;
    }
    return true;
}

bool KafkaConsumer::unsubscribe()
{
    if (!m_topic) {
        return true;
    }

    RdKafka::ErrorCode errCode = m_consumer->stop(m_topic, m_partition);
    if (m_topic) {
        delete m_topic;
        m_topic = nullptr;
    }
    if (errCode != RdKafka::ERR_NO_ERROR) {
        fprintf(stderr, "consumer->stop() error: %d\n", errCode);
        return false;
    }
    return true;
}

std::shared_ptr<RdKafka::Message> KafkaConsumer::consume(int timeout_ms)
{
    RdKafka::Message *msg = m_consumer->consume(m_topic, m_partition, timeout_ms);
    if (msg != nullptr && msg->err() == RdKafka::ERR_NO_ERROR) {
        return shared_ptr<RdKafka::Message>(msg);
    }

    if (msg->err() != RdKafka::ERR__TIMED_OUT) {
        fprintf(stderr, "consume failed, %d: %s\n", msg->err(), msg->errstr().c_str());
    }
    return nullptr;
}
