#include <stdlib.h>
#include <stdlib.h>
#include <string.h>
#include <syserr.h>
#include <thread.h>
#include <utils.h>
#include <dlist.h>
#include <thread.h>
#include <threadpool.h>
#include <printk.h>

#define MAX_THREADPOOL_SIZE 128

static svc_once_t once = SVC_ONCE_INIT;
static svc_cond_t cond;
static svc_mutex_t mutex;
static u32 idle_threads;
static svc_thread_t default_threads[4];
static LIST_HEAD(exit_message);
static LIST_HEAD(wq);

static void svc__cancelled(struct svc__work *w)
{
    abort();
}


/* To avoid deadlock with svc_cancel() it's crucial that the worker
 * never holds the global mutex and the loop-local mutex at the same time.
 */
static void worker(void *arg)
{
    struct svc__work *w;
    struct list_head *q;

    svc_sem_post((svc_sem_t *) arg);
    arg = NULL;

    svc_mutex_lock(&mutex);
    for (;;)
    {
        /* `mutex` should always be locked at this point. */

        /* Keep waiting while either no work is present or only slow I/O
           and we're at the threshold for that. */
        while (list_empty(&wq))
        {
            idle_threads += 1;
            svc_cond_wait(&cond, &mutex);
            idle_threads -= 1;
        }

        q = wq.next;
        if (q == &exit_message)
        {
            svc_cond_signal(&cond);
            svc_mutex_unlock(&mutex);
            break;
        }

        list_del_init(q);  /* Signal svc_cancel() that the work req is executing. */

        svc_mutex_unlock(&mutex);

        w = container_of(q, struct svc__work, wq);
        w->work(w);

        svc_mutex_lock(&w->loop->wq_mutex);
        w->work = NULL;  /* Signal svc_cancel() that the work req is done
                        executing. */
        list_add_tail(&w->wq, &w->loop->wq);
        svc_async_send(&w->loop->wq_async);
        svc_mutex_unlock(&w->loop->wq_mutex);

        /* Lock `mutex` since that is expected at the start of the next
         * iteration. */
        svc_mutex_lock(&mutex);
    }
}


static void post(struct list_head *q)
{
    svc_mutex_lock(&mutex);
    list_add_tail(q, &wq);
    if (idle_threads > 0)
        svc_cond_signal(&cond);
    svc_mutex_unlock(&mutex);
}

void uv__threadpool_cleanup(void)
{
    u32 i;

    post(&exit_message);

    for (i = 0; i < array_size(default_threads); i++)
        svc_thread_join(&default_threads[i]);

    svc_mutex_destroy(&mutex);
    svc_cond_destroy(&cond);
}

static void init_threads(void)
{
    u32 i;
    svc_sem_t sem;

    svc_cond_init(&cond);
    svc_mutex_init(&mutex);
    svc_sem_init(&sem, 0);

    for (i = 0; i < array_size(default_threads); i++)
        svc_thread_create(&default_threads[i], worker, &sem);

    for (i = 0; i < array_size(default_threads); i++)
        svc_sem_wait(&sem);

    svc_sem_destroy(&sem);
}

#if 0
static void reset_once(void)
{
    svc_once_t child_once = SVC_ONCE_INIT;
    memcpy(&once, &child_once, sizeof(child_once));
}
#endif
static void init_once(void)
{
#if 0
    /* Re-initialize the threadpool after fork.
     * Note that this discards the global mutex and condition as well
     * as the work queue.
     */
    pthread_atfork(NULL, NULL, &reset_once);
#endif
    init_threads();
}
void svc__work_submit(ev_loop_t *loop,
                      struct svc__work *w,
                      void (*work)(struct svc__work *w),
                      void (*done)(struct svc__work *w, int status))
{
    svc_once(&once, init_once);
    w->loop = loop;
    w->work = work;
    w->done = done;
    post(&w->wq);
}

void svc__work_done(svc_async_t *handle)
{
    struct svc__work *w, *n;
    ev_loop_t *loop = container_of(handle, ev_loop_t, wq_async);

    svc_mutex_lock(&loop->wq_mutex);
    list_for_each_entry_safe(w, n, &loop->wq, wq)
    {
        svc_mutex_unlock(&loop->wq_mutex);
        list_del(&w->wq);
        int err = (w->work == svc__cancelled) ? -1 : 0;
        w->done(w, err);
        svc_mutex_lock(&loop->wq_mutex);
    }
    svc_mutex_unlock(&loop->wq_mutex);
}


static void svc__queue_work(struct svc__work *w)
{
    svc_work_t *req = container_of(w, svc_work_t, work_req);

    req->work_cb(req);
}
static void svc__queue_done(struct svc__work *w, int err)
{
    svc_work_t *req = container_of(w, svc_work_t, work_req);

    ev__req_del(req->loop, req);

    if (req->after_work_cb)
        req->after_work_cb(req, err);
}


int svc_queue_work(ev_loop_t *loop, svc_work_t *w, svc_work_cb_t work_cb,
                   svc_after_work_cb_t after_work_cb)
{
    if (!work_cb) return -EINVAL;

    ev__req_add(loop, w);
    w->loop           = loop;
    w->work_cb        = work_cb;
    w->after_work_cb  = after_work_cb;
    svc__work_submit(loop,
                     &w->work_req,
                     svc__queue_work,
                     svc__queue_done);
    return 0;
}
int svc_cancel(ev_loop_t *loop, svc_work_t *w)
{
    int cancelled;
    struct svc__work *_w = &w->work_req;

    svc_mutex_lock(&mutex);
    svc_mutex_lock(&_w->loop->wq_mutex);

    cancelled = !list_empty(&_w->wq) && _w->work != NULL;
    if (cancelled) list_del(&_w->wq);

    svc_mutex_unlock(&_w->loop->wq_mutex);
    svc_mutex_unlock(&mutex);

    if (!cancelled) return -EBUSY;

    _w->work = svc__cancelled;
    svc_mutex_lock(&loop->wq_mutex);
    list_add_tail(&_w->wq, &loop->wq);
    svc_async_send(&loop->wq_async);
    svc_mutex_unlock(&loop->wq_mutex);

    return 0;
}
