﻿#include "kafka_client.h"
#include <iostream>
#include <string>
#include <cstdlib>
#include <cstdio>
#include <QFile>
#include <QDebug>
#include <QDateTime>

class ExampleEventCb : public RdKafka::EventCb {
public:
    ExampleEventCb(KafkaClient* context): context(context)
    {

    }

    void event_cb (RdKafka::Event &event) {
        switch (event.type())
        {
        case RdKafka::Event::EVENT_ERROR:
            {
                QString error_info = QString("ERROR (%1): %2").arg(QString::fromStdString(RdKafka::err2str(event.err()))).arg(QString::fromStdString(event.str()));
			    qDebug() << error_info;
                QMetaObject::invokeMethod(context, "kafkaFailedMsg", Q_ARG(const QString&, error_info));
			    if (event.err() == RdKafka::ERR__ALL_BROKERS_DOWN)
			    {
                    error_info = QString("ERROR (ERR__ALL_BROKERS_DOWN): %1").arg(QString::fromStdString(event.str()));
				    qDebug() << error_info;
                    QMetaObject::invokeMethod(context, "kafkaFailedMsg", Q_ARG(const QString&, error_info));
			    }
            }            
            break;

        case RdKafka::Event::EVENT_STATS:
            qDebug() << "\"STATS\": " << QString::fromStdString(event.str());
            break;

        case RdKafka::Event::EVENT_LOG:
            {
                QString log_info = QString("RdKafkaLOG-%1-%2: %3").arg(event.severity()).arg(event.fac().c_str()).arg(event.str().c_str());
                qDebug() << log_info;
                if (event.severity() <= RdKafka::Event::EVENT_SEVERITY_ERROR && context)
                {
					QMetaObject::invokeMethod(context, "kafkaFailedMsg", Q_ARG(const QString&, log_info));
                }
            }
            break;

        case RdKafka::Event::EVENT_THROTTLE:
            qDebug() << "THROTTLED: " << event.throttle_time() << "ms by " << QString::fromStdString(event.broker_name()) << " id " << (int)event.broker_id();
            break;

        default:
            qDebug() << "EVENT " << event.type() <<
                         " (" << QString::fromStdString(RdKafka::err2str(event.err())) << "): " << QString::fromStdString(event.str());
            break;
        }
    }

private:
    KafkaClient* context;
};




KafkaClient::KafkaClient(std::string ip, QObject *parent):
    QThread(parent), g_bUdpThreadRun(true)
    , msg_cnt(0), msg_bytes(0)
    , consumer(nullptr), global_conf(nullptr), topic_conf(nullptr)
{
    m_kafkaIp = ip;
}

KafkaClient::~KafkaClient()
{
    if (consumer)
    {
        consumer->close();
        delete global_conf;
        delete topic_conf;
        delete consumer;
    }
}

void KafkaClient::notify_thread_exit()
{
    g_bUdpThreadRun = false;
}

void KafkaClient::msg_consume(RdKafka::Message* message, void* opaque)
{
    //qDebug() << "卡夫卡"<< message->err()<< static_cast<const char*>(message->payload())<< * message->key()->c_str() << message->offset();
    switch (message->err())
    {
    case RdKafka::ERR__TIMED_OUT:
        //qDebug()<< "RdKafka::ERR__TIMED_OUT"<<" 错误代码："+ message->err();
        break;

    case RdKafka::ERR_NO_ERROR:
        /* Real message */
        msg_cnt++;
        msg_bytes += message->len();
        RdKafka::MessageTimestamp timeStamp;
        timeStamp = message->timestamp();
        if(!message->key())
            break;
        else
        {
            QDateTime now = QDateTime::currentDateTime();

            emit kafkaMessageReceived(static_cast<const char *>(message->payload()), *message->key(), now);
        }
        break;

    case RdKafka::ERR__PARTITION_EOF:
        /* Last message */
        break;

    case RdKafka::ERR__UNKNOWN_TOPIC:
    case RdKafka::ERR__UNKNOWN_PARTITION:
        qDebug() << "Consume failed: " << QString::fromStdString(message->errstr());
        g_bUdpThreadRun = false;
        break;

    default:
        /* Errors */
        qDebug() << "Consume failed: " << QString::fromStdString(message->errstr());
        g_bUdpThreadRun = false;
    }
}

//重写线程执行函数
void KafkaClient::run()
{
    QDateTime current_date_time = QDateTime::currentDateTime();
    QString id = current_date_time.toString("yyyyMMddHHmmss");
    
    std::string brokers = m_kafkaIp;
    qDebug()<<"kafka Ip地址"<<m_kafkaIp.c_str();
    //std::string brokers = "127.0.0.1";
    std::string errstr;
    std::string topic_str="ui-responses";
    std::vector<std::string> topics;

    // 测试
    
    //QFile file("C:/Optima/okng/kafka_groupid.txt");
    //if (file.open(QIODevice::ReadOnly))
    {
    //    id = file.readLine();
    //    file.close();
    }
    // 只要不更改group.id，每次重新消费kafka，都是从上次消费结束的地方继续开始，不论"auto.offset.reset”属性设置的是什么
    std::string group_id=id.toStdString();
    global_conf = RdKafka::Conf::create(RdKafka::Conf::CONF_GLOBAL);
    topic_conf = RdKafka::Conf::create(RdKafka::Conf::CONF_TOPIC);

    //group.id必须设置
    if (global_conf->set("group.id", group_id, errstr) != RdKafka::Conf::CONF_OK) {
           qDebug() << QString::fromStdString(errstr);
           exit(1);
         }

    topics.push_back(topic_str);
   //bootstrap.servers可以替换为metadata.broker.list
    global_conf->set("bootstrap.servers", brokers, errstr);
    
    //关闭自动提交offset，改提交offset为手动提交
    if (global_conf->set("enable.auto.offset.store", "false", errstr) != RdKafka::Conf::CONF_OK) {
        qWarning()<<QString("Failed to set enable.auto.offset.store:%1").arg(errstr.c_str());
    }
    
    //consumer失活时间配置
    global_conf->set("session.timeout.ms", "12000", errstr);    // 失活时间设置大一点，避免ERR__TIMED_OUT
    //consumer 心跳频率
    global_conf->set("heartbeat.interval.ms","2000",errstr);
    //避免消费耗时过长导致 进程失活,设置poll间隔时间
    global_conf->set("max.poll.interval.ms","420000", errstr);   //此配置应该大于消费时间

//    conf->set("partition", "1", errstr);
    ExampleEventCb ex_event_cb(this);
    global_conf->set("event_cb", &ex_event_cb, errstr);
    global_conf->set("default_topic_conf", topic_conf, errstr);

    consumer = RdKafka::KafkaConsumer::create(global_conf, errstr);
    if (!consumer) {
        qDebug() << "Failed to create consumer: " << QString::fromStdString(errstr);
        exit(1);
    }

    qDebug() << "% Created consumer " << QString::fromStdString(consumer->name());
    RdKafka::ErrorCode err = consumer->subscribe(topics);

    if (err) {
        qDebug() << "Failed to subscribe to " << topics.size() << " topics: "
                  << QString::fromStdString(RdKafka::err2str(err));
        exit(1);
    }
    //基本思路为先获取server端的状态信息，将与订阅相关的topic找出来，根据分区，创建TopicPartion；最后使用assign消费
    RdKafka::Metadata* metadataMap{ nullptr };
    err = consumer->metadata(true, nullptr, &metadataMap, 2000);
    if (err != RdKafka::ERR_NO_ERROR) {
        std::cout << RdKafka::err2str(err) << std::endl;
    }
    else
    {
        const RdKafka::Metadata::TopicMetadataVector* topicList = metadataMap->topics();
        std::cout << "broker topic size: " << topicList->size() << std::endl;
        RdKafka::Metadata::TopicMetadataVector subTopicMetaVec;
        std::copy_if(topicList->begin(), topicList->end(), std::back_inserter(subTopicMetaVec), [&topics](const RdKafka::TopicMetadata* data) {
            return std::find_if(topics.begin(), topics.end(), [data](const std::string& tname) {return data->topic() == tname; }) != topics.end();
            });
        std::vector<RdKafka::TopicPartition*> topicpartions;
        std::for_each(subTopicMetaVec.begin(), subTopicMetaVec.end(), [&topicpartions](const RdKafka::TopicMetadata* data) {
            auto parVec = data->partitions();
            std::for_each(parVec->begin(), parVec->end(), [&](const RdKafka::PartitionMetadata* value) {
                std::cout << data->topic() << " has partion: " << value->id() << " Leader is : " << value->leader() << std::endl;
                topicpartions.push_back(RdKafka::TopicPartition::create(data->topic(), value->id(), RdKafka::Topic::OFFSET_END));
                });
            });
        consumer->assign(topicpartions);
    }

    while (g_bUdpThreadRun)
    {
        //0毫秒未订阅到消息，触发RdKafka::ERR__TIMED_OUT
        RdKafka::Message *msg = consumer->consume(6000);
        
        consumer->commitSync();//阻塞
        msg_consume(msg, NULL);
        
        qDebug() << "msg.offset" << msg->offset();
    }
    qDebug() << "% Consumed " << msg_cnt << " messages (" << msg_bytes << " bytes)";
    quit();

}



