// g++ test_kafka.cpp kafka_*.cpp -lrdkafka++
#include <librdkafka/rdkafkacpp.h>
#include <unistd.h>
#include <thread>
#include <iostream>

void test_simple()
{
    std::string brokers = "localhost:9092";
    std::string topic_name = "test";

    std::thread([brokers, topic_name]()
                {
                    std::string error;
                    auto conf = RdKafka::Conf::create(RdKafka::Conf::CONF_GLOBAL);
                    conf->set("bootstrap.servers", brokers, error);
                    //conf->set("socket.keepalive.enable", "true", error);

                    auto consumer = RdKafka::Consumer::create(conf, error); 
                    if(!consumer)
                    {
                        std::cout << "create consumer failed:" << error << std::endl;
                        return;
                    }

                    delete conf;

                    std::cout << "consumer created" << std::endl;
                    
                    auto topic_conf = RdKafka::Conf::create(RdKafka::Conf::CONF_TOPIC);
                    auto topic = RdKafka::Topic::create(consumer, topic_name, topic_conf, error);
                    if(!topic)
                    {
                        std::cout << "create topic failed:" << error << std::endl;
                        return;
                    }

                    std::cout << "topic created" << std::endl;

                    auto ret = consumer->start(topic, 0, RdKafka::Topic::OFFSET_BEGINNING);
                    if(ret != RdKafka::ERR_NO_ERROR)
                    {
                        std::cout << "start consumer failed:" << ret << std::endl;
                        return;
                    }

                    while(true)
                    {
                        consumer->poll(0);

                        auto msg = consumer->consume(topic, 0, 200);

                        switch(msg->err())
                        {
                        case RdKafka::ERR_NO_ERROR:
                        {
                            std::cout << "Read msg at offset " << msg->offset() << std::endl;
                            if (msg->key())
                            {
                                std::cout << "Key: " << *msg->key() << std::endl;
                            }
                            printf("%.*s\n", static_cast<int>(msg->len()), static_cast<const char *>(msg->payload()));
                        }
                        break;
                        case RdKafka::ERR__TIMED_OUT:
                        {
                            // 读完了 再读取 状态码为RdKafka::ERR__TIMED_OUT
                            //std::cout << "error:" << msg->errstr() << std::endl;
                        }
                        break;
                        case RdKafka::ERR__PARTITION_EOF:
                        break;
                        default:
                            std::cout << "error:" << msg->errstr() <<msg->err() << std::endl;
                            break;
                        }
    
                        delete msg;
                    }

                    consumer->stop(topic, RdKafka::Topic::PARTITION_UA);
                    
                    delete consumer;
                    delete topic; })
        .detach();

    std::string error;
    auto conf = RdKafka::Conf::create(RdKafka::Conf::CONF_GLOBAL);
    conf->set("bootstrap.servers", brokers, error);

    auto producer = RdKafka::Producer::create(conf, error);
    if (!producer)
    {
        std::cout << "create producer failed:" << error << std::endl;
        return;
    }

    delete conf;

    time_t t;
    while (true)
    {
        time(&t);
        std::string timestr = ctime(&t);

        producer->produce(topic_name, RdKafka::Topic::PARTITION_UA, RdKafka::Producer::MSG_COPY, (void *)timestr.c_str(), timestr.length(), NULL, 0, 0, NULL);

        sleep(1);
    }

    delete producer;
}

namespace producer_ts
{
    static bool prun = true;
    static bool pexit_eof = false;

    void p_dump_config(RdKafka::Conf *conf)
    {
        auto dump = conf->dump();

        printf("config dump(%d):\n", (int32_t)dump->size());

        for (auto it = dump->begin(); it != dump->end();)
        {
            std::string name = *it++;
            std::string value = *it++;
            printf("%s = %s\n", name.c_str(), value.c_str());
        }

        printf("---------------------------------------------\n");
    }

    class p_event_cb : public RdKafka::EventCb
    {
    public:
        void event_cb(RdKafka::Event &event) override
        {
            switch (event.type())
            {
            case RdKafka::Event::EVENT_ERROR:
                std::cerr << "ERROR (" << RdKafka::err2str(event.err()) << "): " << event.str() << std::endl;
                if (event.err() == RdKafka::ERR__ALL_BROKERS_DOWN)
                    prun = false;
                break;
            case RdKafka::Event::EVENT_STATS:
                std::cerr << "\"STATS\": " << event.str() << std::endl;
                break;
            case RdKafka::Event::EVENT_LOG:
                fprintf(stderr, "LOG-%i-%s: %s\n", event.severity(), event.fac().c_str(), event.str().c_str());
                break;
            default:
                std::cerr << "EVENT " << event.type() << " (" << RdKafka::err2str(event.err()) << "): " << event.str() << std::endl;
                break;
            }
        }
    };

    class p_hash_partitioner_cb : public RdKafka::PartitionerCb
    {
    public:
        int32_t partitioner_cb(const RdKafka::Topic *topic, const std::string *key, int32_t partition_cnt, void *msg_opaque) override
        {
            return djb_hash(key->c_str(), key->size()) % partition_cnt;
        }

    private:
        static inline unsigned int djb_hash(const char *str, size_t len)
        {
            unsigned int hash = 5381;
            for (size_t i = 0; i < len; i++)
                hash = ((hash << 5) + hash) + str[i];
            return hash;
        }
    };

    class p_delivery_report_cb : public RdKafka::DeliveryReportCb
    {
    public:
        void dr_cb(RdKafka::Message &message) override
        {
            printf("message delivery %d bytes, error:%s, key: %s\n",
                   (int32_t)message.len(), message.errstr().c_str(), message.key() ? message.key()->c_str() : "");
        }
    };

    void producer_test()
    {
        printf("producer test\n");

        auto partition = RdKafka::Topic::PARTITION_UA;

        printf("input brokers list(127.0.0.1:9092;127.0.0.1:9093;127.0.0.1:9094):\n");
        std::string broker_list = "127.0.0.1:9092";

        printf("input partition:");

        // std::cin >> partition;
        partition = 0;

        // config
        auto global_conf = RdKafka::Conf::create(RdKafka::Conf::CONF_GLOBAL);
        auto topic_conf = RdKafka::Conf::create(RdKafka::Conf::CONF_TOPIC);

        p_hash_partitioner_cb hash_partitioner;
        p_event_cb event_cb;
        p_delivery_report_cb delivery_cb;

        std::string err_string;
        if (topic_conf->set("partitioner_cb", &hash_partitioner, err_string) != RdKafka::Conf::CONF_OK)
        {
            printf("set partitioner_cb error: %s\n", err_string.c_str());
            return;
        }

        global_conf->set("metadata.broker.list", broker_list, err_string);
        global_conf->set("event_cb", &event_cb, err_string);
        global_conf->set("dr_cb", &delivery_cb, err_string);
        // global_conf->set("retry.backoff.ms", "10", err_string);
        // global_conf->set("debug", "all", err_string);
        // global_conf->set("debug", "topic,msg", err_string);
        // global_conf->set("debug", "msg,queue", err_string);

        p_dump_config(global_conf);
        p_dump_config(topic_conf);

        // create producer
        RdKafka::Producer *producer = RdKafka::Producer::create(global_conf, err_string);
        if (!producer)
        {
            printf("failed to create producer, %s\n", err_string.c_str());
            return;
        }

        printf("created producer %s\n", producer->name().c_str());

        std::string topic_name = "test";
        time_t t;

        while (true)
        {
            // printf("input topic to create:\n");
            // std::cin >> topic_name;

            // create topic
            auto topic = RdKafka::Topic::create(producer, topic_name, topic_conf, err_string);

            if (!topic)
            {
                printf("try create topic[%s] failed, %s\n",
                       topic_name.c_str(), err_string.c_str());
                return;
            }

            // printf(">");
            // for (std::string line; prun && std::getline(std::cin, line);)
            // {
            //     if (line.empty())
            //     {
            //         producer->poll(0);
            //         continue;
            //     }

            //     if (line == "quit")
            //     {
            //         break;
            //     }

            while (true)
            {
                time(&t);
                std::string timestr = ctime(&t);

                std::string key = "kafka_test";

                // RdKafka::ErrorCode res = producer->produce(topic, partition,
                //                                            RdKafka::Producer::RK_MSG_COPY,
                //                                            (char *)line.c_str(), line.size(), key.c_str(), key.size(), NULL);
                RdKafka::ErrorCode res = producer->produce(topic, partition,
                                                           RdKafka::Producer::RK_MSG_COPY,
                                                           (char *)timestr.c_str(), timestr.size(), key.c_str(), key.size(), NULL);

                if (res != RdKafka::ERR_NO_ERROR)
                {
                    printf("produce failed, %s\n", RdKafka::err2str(res).c_str());
                }
                else
                {
                    // printf("produced msg, bytes %d\n", (int32_t)line.size());
                    printf("produced msg, bytes %d\n", (int32_t)timestr.size());
                }

                // do socket io
                producer->poll(0);

                printf("outq_len: %d\n", producer->outq_len());

                // producer->flush(1000);

                // while (run && producer->outq_len()) {
                //     printf("wait for write queue( size %d) write finish\n", producer->outq_len());
                //     producer->poll(1000);
                // }

                // printf(">");

                sleep(1);
            }
            //}

            delete topic;

            if (!prun)
            {
                break;
            }
        }

        prun = true;

        while (prun && producer->outq_len())
        {
            printf("wait for write queue( size %d) write finish\n", producer->outq_len());
            producer->poll(1000);
        }

        delete producer;
    }
}

namespace comsumer_ts
{
    static bool mrun = true;
    static bool mexit_eof = false;

    void m_dump_config(RdKafka::Conf *conf)
    {
        auto dump = conf->dump();

        printf("config dump(%d):\n", (int32_t)dump->size());

        for (auto it = dump->begin(); it != dump->end();)
        {
            std::string name = *it++;
            std::string value = *it++;
            printf("%s = %s\n", name.c_str(), value.c_str());
        }

        printf("---------------------------------------------\n");
    }

    class m_event_cb : public RdKafka::EventCb
    {
    public:
        void event_cb(RdKafka::Event &event) override
        {
            switch (event.type())
            {
            case RdKafka::Event::EVENT_ERROR:
                std::cerr << "ERROR (" << RdKafka::err2str(event.err()) << "): " << event.str() << std::endl;
                if (event.err() == RdKafka::ERR__ALL_BROKERS_DOWN)
                    mrun = false;
                break;
            case RdKafka::Event::EVENT_STATS:
                std::cerr << "\"STATS\": " << event.str() << std::endl;
                break;
            case RdKafka::Event::EVENT_LOG:
                fprintf(stderr, "LOG-%i-%s: %s\n", event.severity(), event.fac().c_str(), event.str().c_str());
                break;
            default:
                std::cerr << "EVENT " << event.type() << " (" << RdKafka::err2str(event.err()) << "): " << event.str() << std::endl;
                break;
            }
        }
    };

    class m_hash_partitioner_cb : public RdKafka::PartitionerCb
    {
    public:
        int32_t partitioner_cb(const RdKafka::Topic *topic, const std::string *key, int32_t partition_cnt, void *msg_opaque) override
        {
            return djb_hash(key->c_str(), key->size()) % partition_cnt;
        }

    private:
        static inline unsigned int djb_hash(const char *str, size_t len)
        {
            unsigned int hash = 5381;
            for (size_t i = 0; i < len; i++)
                hash = ((hash << 5) + hash) + str[i];
            return hash;
        }
    };

    void msg_consume(RdKafka::Message *message, void *opaque)
    {
        switch (message->err())
        {
        case RdKafka::ERR__TIMED_OUT:
            break;
        case RdKafka::ERR_NO_ERROR:
            /* Real message */
            std::cout << "Read msg at offset " << message->offset() << std::endl;
            if (message->key())
            {
                std::cout << "Key: " << *message->key() << std::endl;
            }
            printf("%.*s\n", static_cast<int>(message->len()), static_cast<const char *>(message->payload()));
            break;
        case RdKafka::ERR__PARTITION_EOF:
            /* Last message */
            if (mexit_eof)
            {
                mrun = false;
            }
            break;
        case RdKafka::ERR__UNKNOWN_TOPIC:
        case RdKafka::ERR__UNKNOWN_PARTITION:
            std::cerr << "Consume failed: " << message->errstr() << std::endl;
            mrun = false;
            break;
        default:
            /* Errors */
            std::cerr << "Consume failed: " << message->errstr() << std::endl;
            mrun = false;
        }
    }

    class m_consumer_cb : public RdKafka::ConsumeCb
    {
    public:
        void consume_cb(RdKafka::Message &msg, void *opaque) override
        {
            msg_consume(&msg, opaque);
        }
    };

    void consumer_test()
    {
        printf("conumer test\n");

        int32_t partition = RdKafka::Topic::PARTITION_UA;

        printf("input brokers list(127.0.0.1:9092;127.0.0.1:9093;127.0.0.1:9094):\n");
        std::string broker_list;

        // std::cin >> broker_list;
        broker_list = "127.0.0.1:9092";

        printf("inpute partition:");

        // std::cin >> partition;
        partition = 0;

        // config
        auto global_conf = RdKafka::Conf::create(RdKafka::Conf::CONF_GLOBAL);
        auto topic_conf = RdKafka::Conf::create(RdKafka::Conf::CONF_TOPIC);

        m_hash_partitioner_cb hash_partitioner;
        m_event_cb event_cb;
        m_consumer_cb consume_cb;

        int64_t start_offset = RdKafka::Topic::OFFSET_BEGINNING;

        std::string err_string;
        if (topic_conf->set("partitioner_cb", &hash_partitioner, err_string) != RdKafka::Conf::CONF_OK)
        {
            printf("set partitioner_cb error: %s\n", err_string.c_str());
            return;
        }

        global_conf->set("metadata.broker.list", broker_list, err_string);
        global_conf->set("event_cb", &event_cb, err_string);
        // global_conf->set("debug", "all", err_string);
        // global_conf->set("debug", "topic,msg", err_string);
        // global_conf->set("debug", "topic,msg,queue", err_string);

        m_dump_config(global_conf);
        m_dump_config(topic_conf);

        // create consumer
        RdKafka::Consumer *consumer = RdKafka::Consumer::create(global_conf, err_string);
        if (!consumer)
        {
            printf("failed to create consumer, %s\n", err_string.c_str());
            return;
        }

        printf("created consumer %s\n", consumer->name().c_str());

        // create topic
        printf("input topic name:\n");

        std::string topic_name = "test";
        // std::cin >> topic_name;

        auto topic = RdKafka::Topic::create(consumer, topic_name, topic_conf, err_string);
        if (!topic)
        {
            printf("try create topic[%s] failed, %s\n", topic_name.c_str(), err_string.c_str());
            return;
        }

        // Start consumer for topic+partition at start offset
        auto resp = consumer->start(topic, partition, start_offset);
        if (resp != RdKafka::ERR_NO_ERROR)
        {
            printf("Failed to start consumer: %s\n",
                   RdKafka::err2str(resp).c_str());
            return;
        }

        int use_ccb = 0;

        while (mrun)
        {
            // consumer->consume_callback(topic, partition, 1000, &consume_cb, &use_ccb);
            // consumer->poll(0);

            auto msg = consumer->consume(topic, partition, 2000);
            msg_consume(msg, NULL);
            delete msg;
        }

        // stop consumer
        consumer->stop(topic, partition);
        consumer->poll(1000);

        delete topic;
        delete consumer;
    }
}

namespace metadata_ts
{
    static bool run = false;

    class my_event_cb : public RdKafka::EventCb
    {
    public:
        void event_cb(RdKafka::Event &event) override
        {
            switch (event.type())
            {
            case RdKafka::Event::EVENT_ERROR:
                std::cerr << "ERROR (" << RdKafka::err2str(event.err()) << "): " << event.str() << std::endl;
                if (event.err() == RdKafka::ERR__ALL_BROKERS_DOWN)
                    run = false;
                break;

            case RdKafka::Event::EVENT_STATS:
                std::cerr << "\"STATS\": " << event.str() << std::endl;
                break;

            case RdKafka::Event::EVENT_LOG:
                fprintf(stderr, "LOG-%i-%s: %s\n",
                        event.severity(), event.fac().c_str(), event.str().c_str());
                break;
            default:
                std::cerr << "EVENT " << event.type() << " (" << RdKafka::err2str(event.err()) << "): " << event.str() << std::endl;
                break;
            }
        }
    };

    class my_hash_partitioner_cb : public RdKafka::PartitionerCb
    {
    public:
        int32_t partitioner_cb(const RdKafka::Topic *topic, const std::string *key,
                               int32_t partition_cnt, void *msg_opaque) override
        {
            return djb_hash(key->c_str(), key->size()) % partition_cnt;
        }

    private:
        static inline unsigned int djb_hash(const char *str, size_t len)
        {
            unsigned int hash = 5381;
            for (size_t i = 0; i < len; i++)
                hash = ((hash << 5) + hash) + str[i];
            return hash;
        }
    };

    static void metadata_print(const std::string &topic, const RdKafka::Metadata *metadata)
    {
        if (!metadata)
        {
            printf("try metadata_print for topic: %s failed.\n", topic.empty() ? "all topic" : topic.c_str());
            return;
        }

        printf("Metadata for %s ( from broker %d:%s)\n",
               topic.empty() ? "all topic" : topic.c_str(),
               metadata->orig_broker_id(), metadata->orig_broker_name().c_str());

        /* Iterate brokers */
        printf("brokers(%d):\n", (int32_t)metadata->brokers()->size());
        RdKafka::Metadata::BrokerMetadataIterator ib;
        for (ib = metadata->brokers()->begin();
             ib != metadata->brokers()->end();
             ++ib)
        {
            printf("broker[%d] at %s:%d\n", (*ib)->id(), (*ib)->host().c_str(), (*ib)->port());
        }

        /* Iterate topics */
        printf("topics(%d):\n", (int32_t)metadata->topics()->size());
        RdKafka::Metadata::TopicMetadataIterator it;
        for (it = metadata->topics()->begin();
             it != metadata->topics()->end();
             ++it)
        {
            printf("    topic\"%s\" with %d partitions:",
                   (*it)->topic().c_str(), (int32_t)(*it)->partitions()->size());

            if ((*it)->err() != RdKafka::ERR_NO_ERROR)
            {
                printf("  %s", err2str((*it)->err()).c_str());
                if ((*it)->err() == RdKafka::ERR_LEADER_NOT_AVAILABLE)
                    printf(" (try again)");
            }
            printf("\n");

            /* Iterate topic's partitions */
            RdKafka::TopicMetadata::PartitionMetadataIterator ip;
            for (ip = (*it)->partitions()->begin();
                 ip != (*it)->partitions()->end();
                 ++ip)
            {
                printf("      partition %d, leader %d, replicas:", (*ip)->id(), (*ip)->leader());

                /* Iterate partition's replicas */
                RdKafka::PartitionMetadata::ReplicasIterator ir;
                for (ir = (*ip)->replicas()->begin();
                     ir != (*ip)->replicas()->end();
                     ++ir)
                {
                    printf("%s%d", (ir == (*ip)->replicas()->begin() ? "" : ","), *ir);
                }

                /* Iterate partition's ISRs */
                printf(", isrs: ");
                RdKafka::PartitionMetadata::ISRSIterator iis;
                for (iis = (*ip)->isrs()->begin(); iis != (*ip)->isrs()->end(); ++iis)
                    printf("%s%d", (iis == (*ip)->isrs()->begin() ? "" : ","), *iis);

                if ((*ip)->err() != RdKafka::ERR_NO_ERROR)
                    printf(", %s\n", RdKafka::err2str((*ip)->err()).c_str());
                else
                    printf("\n");
            }
        }
    }

    void metadata_test()
    {
        printf("metadata_test\n");

        printf("input brokers list(127.0.0.1:9092;127.0.0.1:9093;127.0.0.1:9094):\n");
        std::string broker_list;

        // std::cin >> broker_list;
        broker_list = "127.0.0.1:9092";

        // config
        RdKafka::Conf *global_conf = RdKafka::Conf::create(RdKafka::Conf::CONF_GLOBAL);
        std::string err_string;
        my_hash_partitioner_cb hash_partitioner;
        my_event_cb event_cb;
        global_conf->set("metadata.broker.list", broker_list, err_string);
        global_conf->set("event_cb", &event_cb, err_string);

        // create producer
        RdKafka::Producer *producer = RdKafka::Producer::create(global_conf, err_string);
        if (!producer)
        {
            printf("failed to create producer, %s\n", err_string.c_str());
            return;
        }

        printf("created producer %s\n", producer->name().c_str());

        while (run)
        {
            std::string cmd;
            std::cin >> cmd;

            if (cmd == "ls")
            {
                class RdKafka::Metadata *metadata;
                /* Fetch metadata */
                RdKafka::ErrorCode err = producer->metadata(true, NULL,
                                                            &metadata, 5000);
                if (err != RdKafka::ERR_NO_ERROR)
                {
                    std::cerr << "%% Failed to acquire metadata: "
                              << RdKafka::err2str(err) << std::endl;
                    run = 0;
                    break;
                }

                std::string topic_name;
                metadata_print(topic_name, metadata);

                delete metadata;
            }
            // run = 0;
        }
    }
}

void test_async()
{
    std::thread([]()
                { comsumer_ts::consumer_test(); })
        .detach();

    std::thread([]()
                { metadata_ts::metadata_test(); })
        .detach();

    producer_ts::producer_test();
}

#include "kafka_producer.h"
#include "kafka_comsumer.h"

void test_class()
{
    std::thread([]()
                {
        kafka_consumer_client consumer("localhost:9092", "test", "asdfbds11", 0);
        consumer.initClient();
        consumer.consume(200); })
        .detach();

    auto Kafkapr_ = new KafkaProducer("localhost:9092", "test", 0);
    Kafkapr_->Init();
    Kafkapr_->Send("hello world!");

    char str_msg[] = "Hello Kafka!";

    while (fgets(str_msg, sizeof(str_msg), stdin))
    {
        size_t len = strlen(str_msg);
        if (str_msg[len - 1] == '\n')
        {
            str_msg[--len] = '\0';
        }

        if (strcmp(str_msg, "end") == 0)
        {
            break;
        }

        Kafkapr_->Send(str_msg);
    }
}

int main(int argc, char *argv[])
{
    // test_simple();
    // test_async();
    test_class();

    return 0;
}
