#include <linux/workqueue.h>
#include <linux/list.h>
#include <linux/percpu.h>
#include <linux/stdarg.h>
#include <linux/stdlib.h>

#include "internal.h"

/* Return the first idle worker.  Called with pool->lock held. */
static struct worker *first_idle_worker(struct worker_pool *pool)
{
	if (unlikely(list_empty(&pool->idle_list)))
		return NULL;

	return list_first_entry(&pool->idle_list, struct worker, entry);
}

/**
 * insert_work - insert a work into a pool
 * @pwq: pwq @work belongs to
 * @work: work to insert
 * @head: insertion point
 * @extra_flags: extra WORK_STRUCT_* flags to set
 *
 * Insert @work which belongs to @pwq after @head.  @extra_flags is or'd to
 * work_struct flags.
 *
 * CONTEXT:
 * raw_spin_lock_irq(pool->lock).
 */
static void insert_work(struct pool_workqueue *pwq, struct work_struct *work,
						struct list_head *head, unsigned int extra_flags)
{
	list_add_tail(&work->entry, head);
}

/*
 * Need to wake up a worker?  Called from anything but currently
 * running workers.
 *
 * Note that, because unbound workers never contribute to nr_running, this
 * function will always return %true for unbound pools as long as the
 * worklist isn't empty.
 */
static bool need_more_worker(struct worker_pool *pool)
{
	return !list_empty(&pool->worklist) && !pool->nr_running;
}

/**
 * kick_pool - wake up an idle worker if necessary
 * @pool: pool to kick
 *
 * @pool may have pending work items. Wake up worker if necessary. Returns
 * whether a worker was woken up.
 */
static bool kick_pool(struct worker_pool *pool)
{
	struct worker *worker = first_idle_worker(pool);
	struct task_struct *p;

	if (!need_more_worker(pool) || !worker)
		return false;

	return false;
}

static void __queue_work(int cpu, struct workqueue_struct *wq,
						 struct work_struct *work)
{
	struct pool_workqueue *pwq;
	struct worker_pool *last_pool, *pool;
	unsigned int work_flags;
	unsigned int req_cpu = cpu;
}

bool queue_work_on(int cpu, struct workqueue_struct *wq,
				   struct work_struct *work)
{
	bool ret = false;

	__queue_work(cpu, wq, work);

	return ret;
}

static int alloc_and_link_pwqs(struct workqueue_struct *wq)
{
	int ret = -ENOMEM;

    wq->cpu_pwq = alloc_percpu(struct pool_workqueue *);
    if (wq->cpu_pwq)
	{

		ret = 0;
	}

	return ret;
}

static struct workqueue_struct *__alloc_workqueue(const char *fmt,
												  unsigned int flags,
												  int max_active, va_list args)
{
	struct workqueue_struct *wq;
	size_t wq_size;
	int name_len;

	wq_size = sizeof(*wq);

	wq = kzalloc(wq_size, GFP_KERNEL);
	if (!wq)
		return NULL;

	return wq;
}

struct workqueue_struct *alloc_workqueue_noprof(const char *fmt, unsigned int flags,
												int max_active, ...)
{
	struct workqueue_struct *wq;
	va_list args;

	va_start(args, max_active);
	wq = __alloc_workqueue(fmt, flags, max_active, args);
	va_end(args);

	return wq;
}
