#include <ctype.h>
#include <stdint.h>
#include <sys/eventfd.h>

#include "kafka_thread.h"
#include "game_event.h"
#include "game_thread.h"
#include "librdkafka/rdkafka.h"
#include "msg_queue.h"

//kafka的接收线程使用这个
struct thread_kafka_recv_param
{
	char *             brokers;
	char *             groupid;
	int                partid;
	int                topic_cnt;
	const char **      topics;
	kafka_msg_callback callback;
};
//kafka的发送线程使用这个
struct thread_kafka_send_param
{
	char *               brokers;
	struct normal_queue nq;
	pthread_cond_t       cond;
	pthread_mutex_t      mutex;
	kafka_msg_callback   callback;
};

static struct thread_kafka_send_param send_thread_param;
static struct thread_kafka_recv_param recv_thread_param;

// extern struct thread_send_param send_param;
// extern int wakeup_send_thread(struct thread_send_param *param);

// /**
//  * @brief Signal termination of program
//  */
// static void stop(int sig)
// {
// 	g_thread_run = 0;
// 	global_el->stop = 1;
// 	pthread_cond_signal(&send_param.cond);
// }

// static int is_printable(const char *buf, size_t size)
// {
// 	size_t i;

// 	for (i = 0; i < size; i++)
// 		if (!isprint((int)buf[i]))
// 			return 0;

// 	return 1;
// }

static void send_message_queue(struct message_queue *q, rd_kafka_t *rk)	
{
	rd_kafka_resp_err_t err;         /* librdkafka API error code */
	while (q)
	{
		struct send_msg *msg = (struct send_msg *)q->data;
		err = rd_kafka_producev(
		    /* Producer handle */
		    rk,
		    /* Topic name */
		    RD_KAFKA_V_TOPIC(msg->topic),
		    /* Make a copy of the payload. */
		    RD_KAFKA_V_MSGFLAGS(RD_KAFKA_MSG_F_COPY),
		    /* Message value and length */
		    RD_KAFKA_V_VALUE(msg->data, q->len),
			RD_KAFKA_V_PARTITION(msg->partid),
		    /* Per-Message opaque, provided in
                         * delivery report callback as
                         * msg_opaque. */
		    RD_KAFKA_V_OPAQUE(NULL),
		    /* End sentinel */
		    RD_KAFKA_V_END);

		if (err)
		{
			/*
                         * Failed to *enqueue* message for producing.
                         */
			LOG_ERR(
			        "%% Failed to produce to topic %s: %s\n",
			        msg->topic, rd_kafka_err2str(err));

			if (err == RD_KAFKA_RESP_ERR__QUEUE_FULL)
			{
				/* If the internal queue is full, wait for
                                 * messages to be delivered and then retry.
                                 * The internal queue represents both
                                 * messages to be sent and messages that have
                                 * been sent or failed, awaiting their
                                 * delivery report callback to be called.
                                 *
                                 * The internal queue is limited by the
                                 * configuration property
                                 * queue.buffering.max.messages */
				rd_kafka_poll(rk, 1000 /*block for max 1000ms*/);
				continue;
			}
		}
		else
		{
			LOG_DEBUG(
			        "%% Enqueued message (%zd bytes) "
			        "for topic %s\n",
			        q->len, msg->topic);
		}

		q = q->next;
	}
}

static void dr_msg_cb (rd_kafka_t *rk,
                       const rd_kafka_message_t *rkmessage, void *opaque) {
        if (rkmessage->err)
                LOG_ERR( "%% Message delivery failed: %s\n",
                        rd_kafka_err2str(rkmessage->err));
        // else
        //         LOG_ERR(
        //                 "%% Message delivered (%zd bytes, "
        //                 "partition %"PRId32")\n",
        //                 rkmessage->len, rkmessage->partition);

        /* The rkmessage is destroyed automatically by librdkafka */
}

static void *thread_send_kafka(void *p)
{
	struct thread_kafka_send_param *param = &send_thread_param;
	rd_kafka_t *                    rk;          /* Producer instance handle */
	rd_kafka_conf_t *               conf;        /* Temporary configuration object */
	const char *                    brokers;     /* Argument: broker list */
	char                            errstr[512]; /* librdkafka API error reporting buffer */

	brokers = param->brokers;

	/*
         * Create Kafka client configuration place-holder
         */
	conf = rd_kafka_conf_new();
	if (rd_kafka_conf_set(conf, "bootstrap.servers", brokers,
	                      errstr, sizeof(errstr)) != RD_KAFKA_CONF_OK)
	{
		LOG_ERR( "%s\n", errstr);
		return NULL;
	}
	rd_kafka_conf_set_dr_msg_cb(conf, dr_msg_cb);
	rk = rd_kafka_new(RD_KAFKA_PRODUCER, conf, errstr, sizeof(errstr));
	if (!rk)
	{
		LOG_ERR("%% Failed to create new producer: %s\n", errstr);
		return NULL;
	}

	while (g_thread_run) {
		struct message_queue *q = normal_queue_getmq(&param->nq);
		if (q)
		{
			send_message_queue(q, rk);
		}
		else
		{
			rd_kafka_poll(rk, 0 /*non-blocking*/);
			if (pthread_mutex_lock(&param->mutex) == 0)
			{
				if (!is_normal_queue_empty(&param->nq))
					continue;
				// ++ m->sleep;
				if (g_thread_run)
					pthread_cond_wait(&param->cond, &param->mutex);
				// -- m->sleep;
				if (pthread_mutex_unlock(&param->mutex))
				{
					LOG_ERR( "unlock mutex error");
					exit(1);
				}
			}
		}
	}

	return NULL;
}

static void *thread_recv_kafka(void *p)
{
	struct thread_kafka_recv_param * param = &recv_thread_param;
	rd_kafka_t *                     rk;           /* Consumer instance handle */
	rd_kafka_conf_t *                conf;         /* Temporary configuration object */
	rd_kafka_resp_err_t              err;          /* librdkafka API error code */
	char                             errstr[512];  /* librdkafka API error reporting buffer */
	const char *                     brokers;      /* Argument: broker list */
	const char *                     groupid;      /* Argument: Consumer group id */
	const char **                    topics;       /* Argument: list of topics to subscribe to */
	int                              topic_cnt;    /* Number of topics to subscribe to */
	rd_kafka_topic_partition_list_t *subscription; /* Subscribed topics */
	int                              i;

	brokers   = param->brokers;
	groupid   = param->groupid;
	topics    = param->topics;
	topic_cnt = param->topic_cnt;

	/*
         * Create Kafka client configuration place-holder
         */
	conf = rd_kafka_conf_new();

	// rd_kafka_conf_set_log_cb(conf, kafka_log_func);

	/* Set bootstrap broker(s) as a comma-separated list of
         * host or host:port (default port 9092).
         * librdkafka will use the bootstrap brokers to acquire the full
         * set of brokers from the cluster. */
	if (rd_kafka_conf_set(conf, "bootstrap.servers", brokers,
	                      errstr, sizeof(errstr)) != RD_KAFKA_CONF_OK)
	{
		LOG_ERR( "%s\n", errstr);
		rd_kafka_conf_destroy(conf);
		exit(0);
	}

	/* Set the consumer group id.
         * All consumers sharing the same group id will join the same
         * group, and the subscribed topic' partitions will be assigned
         * according to the partition.assignment.strategy
         * (consumer config property) to the consumers in the group. */
	if (rd_kafka_conf_set(conf, "group.id", groupid,
	                      errstr, sizeof(errstr)) != RD_KAFKA_CONF_OK)
	{
		LOG_ERR( "%s\n", errstr);
		rd_kafka_conf_destroy(conf);
		exit(0);
	}

	/* If there is no previously committed offset for a partition
         * the auto.offset.reset strategy will be used to decide where
         * in the partition to start fetching messages.
         * By setting this to earliest the consumer will read all messages
         * in the partition if there was no previously committed offset. */
	if (rd_kafka_conf_set(conf, "auto.offset.reset", "earliest",
	                      errstr, sizeof(errstr)) != RD_KAFKA_CONF_OK)
	{
		LOG_ERR( "%s\n", errstr);
		rd_kafka_conf_destroy(conf);
		exit(0);
	}

	/*
         * Create consumer instance.
         *
         * NOTE: rd_kafka_new() takes ownership of the conf object
         *       and the application must not reference it again after
         *       this call.
         */
	rk = rd_kafka_new(RD_KAFKA_CONSUMER, conf, errstr, sizeof(errstr));
	if (!rk)
	{
		LOG_ERR(
		        "%% Failed to create new consumer: %s\n", errstr);
		exit(0);
	}

	conf = NULL; /* Configuration object is now owned, and freed,
                      * by the rd_kafka_t instance. */


	/* Redirect all messages from per-partition queues to
         * the main queue so that messages can be consumed with one
         * call from all assigned partitions.
         *
         * The alternative is to poll the main queue (for events)
         * and each partition queue separately, which requires setting
         * up a rebalance callback and keeping track of the assignment:
         * but that is more complex and typically not recommended. */
	rd_kafka_poll_set_consumer(rk);

	/* Convert the list of topics to a format suitable for librdkafka */
	subscription = rd_kafka_topic_partition_list_new(topic_cnt);
	for (i = 0; i < topic_cnt; i++)
		rd_kafka_topic_partition_list_add(subscription, topics[i], param->partid);		
//		rd_kafka_topic_partition_list_add(subscription, topics[i], RD_KAFKA_PARTITION_UA);

	/* Subscribe to the list of topics */
	err = rd_kafka_subscribe(rk, subscription);
	if (err)
	{
		LOG_ERR("%% Failed to subscribe to %d topics: %s\n", subscription->cnt, rd_kafka_err2str(err));
		rd_kafka_topic_partition_list_destroy(subscription);
		rd_kafka_destroy(rk);
		return NULL;
	}

	LOG_DEBUG("%% Subscribed to %d topic(s), waiting for rebalance and messages...\n", subscription->cnt);

	rd_kafka_topic_partition_list_destroy(subscription);

	/* Signal handler for clean shutdown */
	// signal(SIGINT, stop);
	// signal(SIGUSR2, stop);	

	/* Subscribing to topics will trigger a group rebalance
         * which may take some time to finish, but there is no need
         * for the application to handle this idle period in a special way
         * since a rebalance may happen at any time.
         * Start polling for messages. */

	int wait_time = 0;
	while (g_thread_run)
	{
		rd_kafka_message_t *rkm;

		rkm = rd_kafka_consumer_poll(rk, wait_time);
		if (!rkm)
		{
			wait_time = 1000;
			continue; /* Timeout: no message within 100ms,
                                   *  try again. This short timeout allows
                                   *  checking for `run` at frequent intervals.
                                   */
		}
		wait_time = 0;

		/* consumer_poll() will return either a proper message
                 * or a consumer error (rkm->err is set). */
		if (rkm->err)
		{
			/* Consumer errors are generally to be considered
                         * informational as the consumer will automatically
                         * try to recover from all types of errors. */
			LOG_ERR("%% Consumer error: %s\n", rd_kafka_message_errstr(rkm));
			rd_kafka_message_destroy(rkm);
			continue;
		}
		// rd_kafka_message_destroy(rkm);
		
		param->callback(rkm);
		// struct message_queue *msg = (struct message_queue *)malloc(sizeof(struct message_queue));
		// if (!msg)
		// {
		// 	LOG_ERR("%% mallooc error: %d\n", errno);
		// 	rd_kafka_message_destroy(rkm);
		// 	continue;
		// }
		// msg->cmd = 0;
		// msg->len = 0;
		// msg->data = rkm;
		// msg->next = NULL;
		// normal_queue_push(param->nq, msg);
		// uint64_t t = 1;
		// write(param->efd, &t, sizeof(uint64_t));
	}


	/* Close the consumer: commit final offsets and leave the group. */
	LOG_INFO( "%% Closing consumer\n");
	rd_kafka_consumer_close(rk);

	/* Destroy the consumer */
	rd_kafka_destroy(rk);

	return NULL;
}

static int init_thread_recv_param(char *brokers, char *groupid, int partid, char *topic, kafka_msg_callback callback)
{
	recv_thread_param.brokers   = brokers;
	recv_thread_param.groupid   = groupid;
	recv_thread_param.partid    = partid;
	recv_thread_param.topic_cnt = 1;
	recv_thread_param.topics    = (const char **)malloc(sizeof(char *) * 1);
	recv_thread_param.topics[0] = topic;
	recv_thread_param.callback  = callback;

	return (0);
}
pthread_t create_kafka_recv_thread(char *brokers, char *groupid, int partid, char *topic, kafka_msg_callback callback, void *param)
{
	pthread_t pid;
	init_thread_recv_param(brokers, groupid, partid, topic, callback);
	create_thread(&pid, thread_recv_kafka, param);
	return pid;
}

static int init_thread_send_param(char *brokers)
{
	normal_queue_init(&(send_thread_param.nq));	
	send_thread_param.brokers = brokers;

	if (pthread_mutex_init(&send_thread_param.mutex, NULL)) {
		fprintf(stderr, "Init mutex error");
		exit(1);
	}
	if (pthread_cond_init(&send_thread_param.cond, NULL)) {
		fprintf(stderr, "Init cond error");
		exit(1);
	}
	return (0);
}
pthread_t create_kafka_send_thread(char *brokers, void *param)
{
	pthread_t pid;
	init_thread_send_param(brokers);
	create_thread(&pid, thread_send_kafka, param);
	return pid;
}


int wakeup_send_thread(int force)
{
	if (force > 0 || !is_normal_queue_empty(&send_thread_param.nq))
	{
			if (pthread_mutex_lock(&send_thread_param.mutex) == 0)
		    {
			    pthread_cond_signal(&send_thread_param.cond);
				if (pthread_mutex_unlock(&send_thread_param.mutex))
				{
					LOG_ERR( "unlock mutex error");
					exit(1);
				}
		    }
	}
	return (0);
}

int send_msg(void *data, int len, char *topic, int partid)
{
	struct send_msg *msg_data = (struct send_msg *)malloc(sizeof(struct send_msg));
	struct message_queue *msg = (struct message_queue *)malloc(sizeof(struct message_queue));
	if (!msg)
	{
		fprintf(stderr,
		        "%% mallooc error: %d\n",
		        errno);
		return -1;
	}
	msg->cmd  = 0;
	msg->len  = len;
	msg->data = msg_data;
	msg->next = NULL;
	msg_data->topic = topic;
	msg_data->partid = partid;
	msg_data->data = data;
	normal_queue_push(&send_thread_param.nq, msg);
	return (0);
}
