#include <stdio.h>
#include <stdlib.h>
#include <time.h>

#include "../hle.h"

#include "__SysMemUserForUser.h"
#include "__ThreadManForUser.h"
#include "../blkalloc.h"

/*************
 * CALLBACKS *
 *************/

void sceKernelCreateCallback()
{
    u32 entrypoint = PARAM(1);
    u32 argument = PARAM(2);

    char *name = mem_readStr(PARAM(0));

    RETURN(__KernelCreateCallback(name, entrypoint, argument));
    mem_freeStr(name);
}

void _sceKernelReturnFromCallback()
{
    SceUID id = PARAM(0);
    _log(ERR, HLE, "_sceKernelReturnFromCallback(%d)", id);
    __KernelReturnFromCallback();
}

void sceKernelDeleteCallback()
{
    SceUID id = PARAM(0);
    if (ko_get(id))
    {
        _log(INF, HLE, "sceKernelDeleteCallback(uid = %d)", id);
        ko_free(id);
        RETURN(0);
    }
    else {
        _log(ERR, HLE, "sceKernelDeleteCallback(uid = %d): bad callback UID", id);
        RETURN(SCE_ERROR_NOT_FOUND_CALLBACK);
    }
}

void sceKernelNotifyCallback()
{
    SceUID id = PARAM(0);
    _log(ERR, HLE, "UNIMPL sceKernelNotifyCallback(%d)", id);
}

void sceKernelCheckCallback()
{
    _log(DBG, HLE, "sceKernelCheckCallback()");
    RETURN(0);
}

void sceKernelRegisterExitCallback()
{
    SceUID id = PARAM(0);
    Callback *c = ko_get(id);
    if (c)                                                                                                                                                         
    {                                                                                                                                                              
        _log(INF, HLE, "sceKernelRegisterExitCallback(uid = %d)", id);
        __KernelRegisterCallback(CBTYPE_EXIT, id);
        RETURN(0);
    }
    else {
        _log(ERR, HLE, "sceKernelRegisterExitCallback(uid = %d): bad callback UID");
        RETURN(SCE_ERROR_NOT_FOUND_CALLBACK);
    }
}

/***********
 * MUTEXES *
 ***********/

typedef struct
{
    char name[32];
    u32 attr;
    u32 initCount;
    u32 curCount;
    u32 numWaitThreads;
} NativeMutex;

typedef struct
{
    NativeMutex n;
    SceUID waitingThreads[MAX_THREAD]; /* TODO: STATIC */
} Mutex;

void sceKernelCreateMutex()
{
    char *name = mem_readStr(PARAM(0));
    u32 attr = PARAM(1);
    u32 count = PARAM(2);
    u32 optaddr = PARAM(3);
    Mutex *mtx = malloc(sizeof(Mutex));
    SceUID id = ko_init(mtx, KO_MUTEX);

    strncpy(mtx->n.name, name, 32);
    mtx->n.name[31] = '\0';
    mtx->n.attr = attr;
    mtx->n.curCount = mtx->n.initCount = count;
    mtx->n.numWaitThreads = 0;

    _log(INF, HLE, "%d = sceKernelCreateMutex(name = %s, attr = %08x, count = %08x, options_addr = %08x)", id, name, attr, count, optaddr);

    RETURN(id);
    mem_freeStr(name);
}

void sceKernelDeleteMutex()
{
    SceUID id = PARAM(0);
    if (ko_get(id))
    {
        ko_free(id);
        _log(INF, HLE, "0 = sceKernelDeleteMutex(%d)", id);
        RETURN(0);
    }
    else {
        _log(ERR, HLE, "sceKernelDeleteMutex(%d): mutex not found", id);
        RETURN(SCE_ERROR_MUTEX_NOT_FOUND);
    }
}

void sceKernelLockMutex()
{
    SceUID id = PARAM(0);
    u32 count = PARAM(1);
    u32 timeoutAddr = PARAM(2);
    u32 timeout;
    Mutex *mtx = ko_get(id);
    if (mem_isOk(timeoutAddr))
        timeout = mem_read32(timeoutAddr);
    else
        timeout = 0;
    if (mtx)
    {
        if (mtx->n.curCount == 0)
            mtx->n.curCount += count;
        else {
           mtx->waitingThreads[mtx->n.numWaitThreads++] = __KernelGetCurThread();
            __KernelWaitCurThread(WAITTYPE_MUTEX, id, 0, timeout, 0);
        }
        _log(INF, HLE, "0 = sceKernelLockMutex(id = %d, count = %d, timeoutAddr = %08x)", id, count, timeoutAddr);
        RETURN(0);
    }
    else {
        _log(ERR, HLE, "sceKernelLockMutex(id = %d, count = %d, timeoutAddr = %08x): mutex not found", id, count, timeoutAddr);
        RETURN(SCE_ERROR_MUTEX_NOT_FOUND);
    }
}

void sceKernelLockMutexCB()
{
    SceUID id = PARAM(0);
    u32 count = PARAM(1);
    u32 timeoutAddr = PARAM(2);
    u32 timeout;
    Mutex *mtx = ko_get(id);
    if (mem_isOk(timeoutAddr))
        timeout = mem_read32(timeoutAddr);
    else
        timeout = 0;
    if (mtx)
    {
        if (mtx->n.curCount == 0)
            mtx->n.curCount += count;
        else {
           mtx->waitingThreads[mtx->n.numWaitThreads++] = __KernelGetCurThread();
            __KernelWaitCurThread(WAITTYPE_MUTEX, id, 0, timeout, 1);
        }
        _log(INF, HLE, "0 = sceKernelLockMutexCB(id = %d, count = %d, timeoutAddr = %08x)", id, count, timeoutAddr);
        RETURN(0);
    }
    else {
        _log(ERR, HLE, "sceKernelLockMutexCB(id = %d, count = %d, timeoutAddr = %08x): mutex not found", id, count, timeoutAddr);
        RETURN(SCE_ERROR_MUTEX_NOT_FOUND);
    }
}

void sceKernelUnlockMutex()
{
    SceUID id = PARAM(0);
    u32 count = PARAM(1);
    Mutex *mtx = ko_get(id);
    if (mtx)
    {
        if ((s32)mtx->n.curCount - (s32)count >= 0)
        {
            u32 oldval = mtx->n.curCount;
            mtx->n.curCount -= count;
            if (mtx->n.curCount == 0 && mtx->n.numWaitThreads != 0)
            {
                /* TODO: use attribute to know which mutex to restart */
                u32 i;
                SceUID id;

                id = mtx->waitingThreads[0];
                __KernelResumeThread(id);

                mtx->n.numWaitThreads--;
                for (i = 0; i < mtx->n.numWaitThreads; i++)
                    mtx->waitingThreads[i] = mtx->waitingThreads[i + 1];
                __KernelReSchedule();
            }
            _log(INF, HLE, "sceKernelUnlockMutex(id = %d, count = %d) [old: %d, new: %d]", id, count, oldval, mtx->n.curCount);
            RETURN(0);
        }
        else if (mtx->n.curCount != 0) {
            _log(ERR, HLE, "sceKernelUnlockMutex(id = %d, count = %d): mutex underflow", id, count);
            RETURN(SCE_ERROR_MUTEX_UNLOCK_UNDERFLOW);
        }
        else {
            _log(ERR, HLE, "sceKernelUnlockMutex(id = %d, count = %d): mutex already unlocked", id, count);
            RETURN(SCE_ERROR_MUTEX_UNLOCKED);
        }
    }
    else {
        _log(ERR, HLE, "sceKernelUnlockMutex(id = %d, count = %d): mutex not found", id, count);
        RETURN(SCE_ERROR_MUTEX_NOT_FOUND);
    }
}

/*************
 * LWMUTEXES *
 *************/

void sceKernelCreateLwMutex()
{
    u32 workAddr = PARAM(0);
    char *name = mem_readStr(PARAM(1));
    u32 attr = PARAM(2);
    u32 count = PARAM(3);
    u32 optaddr = PARAM(4);
    LwMutex *lwmtx = malloc(sizeof(LwMutex));
    SceUID id = ko_init(lwmtx, KO_LWMUTEX);

    strncpy(lwmtx->n.name, name, 32);
    lwmtx->n.name[31] = '\0';
    lwmtx->n.attr = attr;
    lwmtx->n.uid = id;
    lwmtx->n.curCount = lwmtx->n.initCount = count;
    lwmtx->n.numWaitThreads = 0;
    lwmtx->thread = 0;

    if (mem_isOk(workAddr))
        mem_write32(workAddr, id);

    _log(INF, HLE, "%d = sceKernelCreateLwMutex(workAddr = %08x, name = %s, attr = %08x, count = %08x, options_addr = %08x)", id, workAddr, name, attr, count, optaddr);

    RETURN(id);
    mem_freeStr(name);
}

void sceKernelDeleteLwMutex()
{
    SceUID id = mem_read32(PARAM(0));
    if (ko_get(id))
    {  
        ko_free(id);
        _log(INF, HLE, "0 = sceKernelDeleteLwMutex(%08x [%d])", PARAM(0), id);
        RETURN(0);
    }
    else {
        _log(ERR, HLE, "sceKernelDeleteLwMutex(%08x [%d]): lwmutex not found", PARAM(0), id);
        RETURN(SCE_ERROR_LWMUTEX_NOT_FOUND);
    }
}

/**************
 * SEMAPHORES *
 **************/

typedef struct
{
    SceSize size;
    char    name[32];
    SceUInt attr;
    s32     initCount;
    s32     currentCount;
    s32     maxCount;
} NativeSemaphore;

typedef struct
{
    NativeSemaphore ns;
    SceUID waitingThreads[MAX_THREAD]; /* TODO: STATIC */
    u32 numWaitThreads;
} Semaphore;

void sceKernelCancelSema()
{
    /* SceUID id = PARAM(0); */
    _log(ERR, HLE, "UNIMPL: sceKernelCancelSema()");
    RETURN(0);
}

/* SceUID sceKernelCreateSema(const char *name, SceUInt attr, s32 initVal, s32 maxVal, SceKernelSemaOptParam *option); */
void sceKernelCreateSema()
{
    char *name = mem_readStr(PARAM(0));

    Semaphore *s = malloc(sizeof(Semaphore));
    SceUID id = ko_init(s, KO_SEMAPHORE);

    strncpy(s->ns.name, name, 32);
    s->ns.attr = PARAM(1);
    s->ns.initCount = PARAM(2);
    s->ns.currentCount = s->ns.initCount;
    s->ns.maxCount = PARAM(3);
    s->numWaitThreads = 0;

    _log(INF, HLE, "%d = sceKernelCreateSema(%s, %08x, %d, %d, %08x)", id, s->ns.name, s->ns.attr, s->ns.initCount, s->ns.maxCount, PARAM(4));

    RETURN(id);
    mem_freeStr(name);
}

/* s32 sceKernelDeleteSema(SceUID semaid); */
void sceKernelDeleteSema()
{
    SceUID id = PARAM(0);
    if (ko_get(id))
    {
        _log(INF, HLE, "sceKernelDeleteSema(%d)", id);
        ko_free(id);
        RETURN(0);
    }
    else {
        _log(ERR, HLE, "sceKernelDeleteSema(%d): bad semaphore", id);
        RETURN(SCE_ERROR_NOT_FOUND_SEMAPHORE);
    }
}

void sceKernelReferSemaStatus()
{
    SceUID id = PARAM(0);
    Semaphore *s = ko_get(id);
    if (s)
    {
        NativeSemaphore outptr;
        s32 szToCopy;
        _log(INF, HLE, "sceKernelReferSemaStatus(%d, %08x)", id, PARAM(1));
        mem_readStruct(PARAM(1), &outptr, sizeof(NativeSemaphore));
        szToCopy = outptr.size - 4;
        mem_writeStruct(PARAM(1) + 4, s + 4, szToCopy);
        RETURN(0);
    }
    else {
        _log(ERR, HLE, "sceKernelReferSemaStatus(%d, %08x): bad semaphore", id, PARAM(1));
        RETURN(SCE_ERROR_NOT_FOUND_SEMAPHORE);
    }
}

/* s32 sceKernelSignalSema(SceUID semaid, s32 signal); */
void sceKernelSignalSema()
{
    /* TODO: check that this thing really works :) */
    SceUID id = PARAM(0);
    u32 signal = PARAM(1);
    Semaphore *s = ko_get(id);
    if (s)
    {
        s32 oldval = s->ns.currentCount;
        u8 wokeThreads = 0;
        u32 i, j;
        s->ns.currentCount += signal;
        _log(DBG, HLE, "sceKernelSignalSema(%d, %d) (old: %d, new: %d)", id, signal, oldval, s->ns.currentCount);

        /* Check for threads to wake up - wake them */
        for (i = 0; i < s->numWaitThreads; i++)
        {
            SceUID id = s->waitingThreads[i];
            s32 wVal = __KernelGetWaitValue(id);
            if (wVal <= s->ns.currentCount)
            {
                __KernelResumeThread(id);
                s->ns.currentCount -= wVal;
                wokeThreads = 1;
                s->numWaitThreads--;
                for (j = i; j < s->numWaitThreads; j++)
                    s->waitingThreads[j] = s->waitingThreads[j + 1];
                i--;
            }
        }

        /* pop the thread that were released from waiting */
        if (wokeThreads)
            __KernelReSchedule();

        RETURN(0);
    }
    else {
        _log(ERR, HLE, "sceKernelSignalSema(%d, %d): bad semaphore", id, signal);
        RETURN(SCE_ERROR_NOT_FOUND_SEMAPHORE);
    }
}

/* s32 sceKernelWaitSema(SceUID semaid, s32 signal, SceUInt *timeout); */
void sceKernelWaitSema()
{
    SceUID id = PARAM(0);
    s32 signal = PARAM(1);
    Semaphore *s = ko_get(id);
    u32 timeoutPtr = PARAM(2);
    u32 timeout;
    if (mem_isOk(timeoutPtr))
        timeout = mem_read32(timeoutPtr);
    else
        timeout = 0;
    if (s)
    {
        _log(DBG, HLE, "sceKernelWaitSema(%d, %d, %08x [%d])", id, signal, timeoutPtr, timeout);
        if (s->ns.currentCount >= signal)
            s->ns.currentCount -= signal;
        else {
            s->waitingThreads[s->numWaitThreads++] = __KernelGetCurThread();
            __KernelWaitCurThread(WAITTYPE_SEMA, id, signal, timeout, 0);
        }
        RETURN(0);
    }
    else {
        _log(ERR, HLE, "sceKernelWaitSema(%d, %d, %08x [%d]): bad semaphore", id, signal, timeout, mem_isOk(timeout) ? mem_read32(timeout) : -1);
        RETURN(SCE_ERROR_NOT_FOUND_SEMAPHORE);
    }
}

/* Should be same as WaitSema but without the wait */
void sceKernelPollSema()
{
    SceUID id = PARAM(0);
    s32 signal = PARAM(1);
    Semaphore *s = ko_get(id);
    if (s)
    {
        if (s->ns.currentCount - signal < 0) {
            _log(INF, HLE, "sceKernelPollSema(%d, %d): actual %d => error", id, signal, s->ns.currentCount);
            RETURN(SCE_ERROR_SEMA_ZERO);
        }
        else
        {
            s->ns.currentCount -= signal;
            _log(INF, HLE, "sceKernelPollSema(%d, %d): new %d", id, signal, s->ns.currentCount);
            RETURN(0);
        }
    }
    else {
        _log(ERR, HLE, "%d = sceKernelPollSema(%d, %d): bad semaphore", SCE_ERROR_NOT_FOUND_SEMAPHORE, id, signal);
        RETURN(SCE_ERROR_NOT_FOUND_SEMAPHORE);
    }
}

/* s32 sceKernelWaitSemaCB(SceUID semaid, s32 signal, SceUInt *timeout); */
void sceKernelWaitSemaCB()
{
    SceUID id = PARAM(0);
    s32 signal = PARAM(1);
    Semaphore *s = ko_get(id);
    u32 timeoutPtr = PARAM(2);
    u32 timeout;
    if (mem_isOk(timeoutPtr))
        timeout = mem_read32(timeoutPtr);
    else
        timeout = 0;
    if (s)
    {
        _log(DBG, HLE, "sceKernelWaitSemaCB(%d, %d, %08x [%d])", id, signal, timeoutPtr, timeout);
        if (s->ns.currentCount >= signal)
            s->ns.currentCount -= signal;
        else {
            s->waitingThreads[s->numWaitThreads++] = __KernelGetCurThread();
            __KernelWaitCurThread(WAITTYPE_SEMA, id, signal, timeout, 1);
        }
        RETURN(0);
    }
    else {
        _log(ERR, HLE, "sceKernelWaitSemaCB(%d, %d, %08x [%d]): bad semaphore", id, signal, timeoutPtr, timeout);
        RETURN(SCE_ERROR_NOT_FOUND_SEMAPHORE);
    }
}

/***************
 * EVENT FLAGS *
 ***************/

typedef struct
{
    SceUID tid;
    u32 bits;
    u32 wait;
    u32 outAddr;
} EventFlagTh;

typedef struct
{
    SceSize     size;
    char        name[KERNELOBJECT_MAX_NAME_LENGTH + 1];
    SceUInt     attr;
    SceUInt     initPattern;
    SceUInt     currentPattern;
    u32         numWaitThreads;
} NativeEventFlag;

typedef struct
{
    NativeEventFlag nt;
    EventFlagTh waitingThreads[MAX_THREAD]; /* TODO: STATIC */
} EventFlag;

/** Event flag creation attributes */
enum PspEventFlagAttributes
{
    /** Allow the event flag to be waited upon by multiple threads */
    PSP_EVENT_WAITMULTIPLE = 0x200
};

/* SceUID sceKernelCreateEventFlag(const char *name, s32 attr, s32 bits, SceKernelEventFlagOptParam *opt); */
void sceKernelCreateEventFlag()
{
    char *name = mem_readStr(PARAM(0));

    EventFlag *e = malloc(sizeof(EventFlag));
    SceUID id = ko_init(e, KO_EVENTFLAG);
    e->nt.size = sizeof(EventFlag);
    strncpy(e->nt.name, name, 32);
    e->nt.attr = PARAM(1);
    e->nt.initPattern = PARAM(2);
    e->nt.currentPattern = e->nt.initPattern;
    e->nt.numWaitThreads = 0;

    _log(INF, HLE, "%d = sceKernelCreateEventFlag(\"%s\", %08x, %08x, %08x)", id, e->nt.name, e->nt.attr, e->nt.currentPattern, PARAM(3));
    RETURN(id);
    mem_freeStr(name);
}

/* s32 sceKernelClearEventFlag(SceUID evid, u32 bits); */
void sceKernelClearEventFlag()
{
    SceUID id = PARAM(0);
    u32 bits = PARAM(1);

    EventFlag *e = ko_get(id);
    if (e)
    {
        _log(INF, HLE, "sceKernelClearEventFlag(%d, %08x)", id, bits);
        e->nt.currentPattern &= ~bits;
        RETURN(0);
    }
    else {
        _log(ERR, HLE, "sceKernelClearEventFlag(%d, %08x): bad event flag", id, bits);
        RETURN(SCE_ERROR_NOT_FOUND_EVENT_FLAG);
    }
}

/* s32 sceKernelDeleteEventFlag(s32 evid); */
void sceKernelDeleteEventFlag()
{
    SceUID id = PARAM(0);
    if (ko_get(id))
    {
        _log(INF, HLE, "sceKernelDeleteEventFlag(%d)", id);
        ko_free(id);
        RETURN(0);
    }
    else {
        _log(ERR, HLE, "sceKernelDeleteEventFlag(%d): bad event flag", id);
        RETURN(SCE_ERROR_NOT_FOUND_EVENT_FLAG);
    }
}

/* s32 sceKernelSetEventFlag(SceUID evid, u32 bits); */
void sceKernelSetEventFlag()
{
    SceUID id = PARAM(0);
    u32 bits = PARAM(1);
    EventFlag *e = ko_get(id);

    if (e)
    {
        u8 wokeThreads = 0;
        u32 i, j;

        _log(INF, HLE, "sceKernelSetEventFlag(%d, %08x)", id, bits);

        e->nt.currentPattern |= bits;

        /* Check if threads waiting for the event flag now match */
        for (i = 0; i < e->nt.numWaitThreads; i++)
        {
            EventFlagTh *t = &e->waitingThreads[i];
            if (__KernelEventFlagMatches(&e->nt.currentPattern, t->bits, t->wait, t->outAddr))
            {
                __KernelResumeThread(t->tid);
                wokeThreads = 1;
                e->nt.numWaitThreads--;
                for (j = i; j < e->nt.numWaitThreads; j++)
                    e->waitingThreads[j] = e->waitingThreads[j + 1];
                i--;
            }
        }

        if (wokeThreads)
            __KernelReSchedule();

        RETURN(0);
    }
    else {
        _log(ERR, HLE, "sceKernelSetEventFlag(%d, %08x): bad event flag", id, bits);
        RETURN(SCE_ERROR_NOT_FOUND_EVENT_FLAG);
    }
}

/* s32 sceKernelWaitEventFlag(SceUID evid, u32 bits, u32 wait, u32 *outBits, SceUInt *timeout); */
void sceKernelWaitEventFlag()
{
    SceUID id = PARAM(0);
    u32 bits = PARAM(1);
    u32 wait = PARAM(2);
    u32 outBitsPtr = PARAM(3);
    u32 timeoutPtr = PARAM(4);
    EventFlag *e = ko_get(id);

    if (e)
    {
        EventFlagTh *th;
        _log(INF, HLE, "sceKernelWaitEventFlag(%d, %08x, %d, %08x, %08x)", id, bits, wait, outBitsPtr, timeoutPtr);
        if (!__KernelEventFlagMatches(&e->nt.currentPattern, bits, wait, outBitsPtr))
        {
            u32 timeout = 0;
            th = &e->waitingThreads[e->nt.numWaitThreads++];
            th->tid = __KernelGetCurThread();
            th->bits = bits;
            th->wait = wait;
            th->outAddr = outBitsPtr;
            if (mem_isOk(timeoutPtr))
                timeout = mem_read32(timeoutPtr);

            __KernelWaitCurThread(WAITTYPE_EVENTFLAG, id, 0, timeout, 0);
        }
        RETURN(0);
    }
    else {
        _log(ERR, HLE, "sceKernelWaitEventFlag(%d, %08x, %d, %08x, %08x): bad event flag", id, bits, wait, outBitsPtr, timeoutPtr);
        RETURN(SCE_ERROR_NOT_FOUND_EVENT_FLAG);
    }
}

/* s32 sceKernelWaitEventFlagCB(SceUID evid, u32 bits, u32 wait, u32 *outBits, SceUInt *timeout); */
void sceKernelWaitEventFlagCB()
{
    SceUID id = PARAM(0);
    u32 bits = PARAM(1);
    u32 wait = PARAM(2);
    u32 outBitsPtr = PARAM(3);
    u32 timeoutPtr = PARAM(4);
    EventFlag *e = ko_get(id);

    if (e)
    {
        EventFlagTh *th;
        _log(INF, HLE, "sceKernelWaitEventFlagCB(%d, %08x, %d, %08x, %08x)", id, bits, wait, outBitsPtr, timeoutPtr);
        if (!__KernelEventFlagMatches(&e->nt.currentPattern, bits, wait, outBitsPtr))
        {
            u32 timeout = 0;
            th = &e->waitingThreads[e->nt.numWaitThreads++];
            th->tid = __KernelGetCurThread();
            th->bits = bits;
            th->wait = wait;
            th->outAddr = outBitsPtr;
            if (mem_isOk(timeoutPtr))
                timeout = mem_read32(timeoutPtr);

            __KernelWaitCurThread(WAITTYPE_EVENTFLAG, id, 0, timeout, 1);
        }
        RETURN(0);
    }
    else {
        _log(ERR, HLE, "sceKernelWaitEventFlagCB(%d, %08x, %d, %08x, %08x): bad event flag", id, bits, wait, outBitsPtr, timeoutPtr);
        RETURN(SCE_ERROR_NOT_FOUND_EVENT_FLAG);
    }
}

/* s32 sceKernelPollEventFlag(s32 evid, u32 bits, u32 wait, u32 *outBits); */
void sceKernelPollEventFlag()
{
    SceUID id = PARAM(0);
    u32 bits = PARAM(1);
    u32 wait = PARAM(2);
    u32 outBitsPtr = PARAM(3);
    EventFlag *e = ko_get(id);

    if (e)
    {
        if (__KernelEventFlagMatches(&e->nt.currentPattern, bits, wait, outBitsPtr)) {
            _log(INF, HLE, "sceKernelPollEventFlag(%d, %08x, %d, %08x)", id, bits, wait, outBitsPtr);
            RETURN(0);
        }
        else
        {
            if (mem_isOk(outBitsPtr))
                mem_write32(outBitsPtr, e->nt.currentPattern);
            _log(INF, HLE, "sceKernelPollEventFlag(%d, %08x, %d, %08x): polling failed", id, bits, wait, outBitsPtr);
            RETURN(SCE_ERROR_EVENT_FLAG_POLL_FAILED);
        }
    }
    else {
        _log(ERR, HLE, "sceKernelPollEventFlag(%d, %08x, %d, %08x): bad event flag", id, bits, wait, outBitsPtr);
        RETURN(SCE_ERROR_NOT_FOUND_EVENT_FLAG);
    }
}

/* s32 sceKernelReferEventFlagStatus(SceUID event, SceKernelEventFlagInfo *status); */
void sceKernelReferEventFlagStatus()
{
    SceUID id = PARAM(0);
    u32 ptr = PARAM(1);
    EventFlag *e = ko_get(id);

    _log(INF, HLE, "sceKernelReferEventFlagStatus(%d, %08x)", id, ptr);
    if (e)
    {
        if (mem_isOk(PARAM(1)))
            mem_writeStruct(ptr, &e->nt, sizeof(NativeEventFlag));
        RETURN(0);
    }
    else {
        _log(ERR, HLE, "sceKernelReferEventFlagStatus(%d, %08x): bad event flag", id, ptr);
        RETURN(SCE_ERROR_NOT_FOUND_EVENT_FLAG);
    }
}

void sceKernelCancelEventFlag()
{
    _log(ERR, HLE, "UNIMPL: sceKernelCancelEventFlag()");
    RETURN(0);
}

/*******
 * VPL *
 *******/

typedef struct
{
    SceSize size;
    char name[KERNELOBJECT_MAX_NAME_LENGTH + 1];
    SceUInt attr;
    s32 poolSize;
    s32 freeSize;
    s32 numWaitThreads;
} SceKernelVplInfo;

typedef struct
{
    SceKernelVplInfo nv;
    s32 size;
    s32 *freeBlocks;
    BlkAlloc alloc;
    u32 address;
} VPL;

void sceKernelCreateVpl()
{
    char *name = mem_readStr(PARAM(0));
    VPL *vpl = malloc(sizeof(VPL));
    SceUID id = ko_init(vpl, KO_VPL);
    char blkName[37] = {0};

    strncpy(vpl->nv.name, name, 32);
    vpl->nv.name[31] = '\0';
    /* vpl->nv.mpid = PARAM(1); seems to be the standard memory partition (user, kernel etc) */
    vpl->nv.attr = PARAM(2);
    vpl->size = PARAM(3);
    vpl->nv.poolSize = vpl->size;
    vpl->nv.size = sizeof(vpl->nv);
    vpl->nv.numWaitThreads = 0;
    vpl->nv.freeSize = vpl->nv.poolSize;
    
    vpl->address = blkAlloc_alloc(&userMemory, vpl->size, 0, "VPL", 1);
    sprintf(blkName, "VPL: %s", vpl->nv.name);
    blkAlloc_init(&vpl->alloc, vpl->address, vpl->size, blkName);

    _log(INF, HLE, "sceKernelCreateVpl(\"%s\", block = %d, attr = %d, size = %d)", 
                      name, PARAM(1), vpl->nv.attr, vpl->size);

    RETURN(id);
    mem_freeStr(name);
}

void sceKernelDeleteVpl()
{
    SceUID id = PARAM(0);
    VPL *vpl = ko_get(id);
    if (vpl)
    {
        _log(INF, HLE, "sceKernelDeleteVpl(%d)", id);
        blkAlloc_shutdown(&vpl->alloc);
        blkAlloc_free(&userMemory, vpl->address);
        ko_free(id);
        RETURN(0);
    }
    else {
        _log(ERR, HLE, "sceKernelDeleteVpl(%d): bad VPL", id);
        RETURN(SCE_ERROR_NOT_FOUND_VPOOL);
    }
}

void sceKernelAllocateVpl()
{
    SceUID id = PARAM(0);
    u32 size = PARAM(1);
    u32 addrPtr = PARAM(2);
    u32 timeOut = PARAM(3);
    VPL *vpl = ko_get(id);
    if (vpl)
    {
        u32 addr = blkAlloc_alloc(&vpl->alloc, size, 0, NULL, 1);
        if (addr)
        {
            _log(INF, HLE, "0 = sceKernelAllocateVpl(vpl = %d, size = %d, ptrout = %08x, timeout = %d): 0x%08x", id, size, addrPtr, timeOut, addr);
            mem_write32(addrPtr, addr);
            RETURN(0);
        }
        else {
            _log(ERR, HLE, "sceKernelAllocateVpl(vpl = %d, size = %d, ptrout = %08x, timeout = %d): no memory left", id, size, PARAM(2), timeOut);
            RETURN(SCE_ERROR_NO_MEMORY);
        }
    }
    else {
        _log(ERR, HLE, "sceKernelAllocateVpl(vpl = %d, size = %d, ptrout = %08x, timeout = %d): invalid VPL", id, size, PARAM(2), timeOut);
        RETURN(SCE_ERROR_NOT_FOUND_VPOOL);
    }
}

void sceKernelAllocateVplCB()
{
    SceUID id = PARAM(0);
    u32 size = PARAM(1);
    u32 timeOut = PARAM(2);
    VPL *vpl = ko_get(id);
    if (vpl)
    {
        u32 addr = blkAlloc_alloc(&vpl->alloc, size, 0, NULL, 1);
        if (addr)
        {
            _log(INF, HLE, "sceKernelAllocateVplCB(vpl = %d, size = %d, ptrout = %08x, timeout = %d)", id, size, PARAM(2), timeOut);
            mem_write32(PARAM(2), addr);
            RETURN(0);
        }
        else {
            _log(ERR, HLE, "sceKernelAllocateVplCB(vpl = %d, size = %d, ptrout = %08x, timeout = %d): no memory left", id, size, PARAM(2), timeOut);
            RETURN(SCE_ERROR_NO_MEMORY);
        }
    }
    else {
        _log(ERR, HLE, "sceKernelAllocateVplCB(vpl = %d, size = %d, ptrout = %08x, timeout = %d): bad VPL", id, size, PARAM(2), timeOut);
        RETURN(SCE_ERROR_NOT_FOUND_VPOOL);
    }
}

void sceKernelTryAllocateVpl()
{
    SceUID id = PARAM(0);
    u32 size = PARAM(1);
    u32 timeOut = PARAM(2);
    VPL *vpl = ko_get(id);
    if (vpl)
    {
        u32 addr = blkAlloc_alloc(&vpl->alloc, size, 0, NULL, 1);
        if (addr)
        {
            _log(INF, HLE, "sceKernelAllocateVplCB(vpl = %d, size = %d, ptrout = %08x, timeout = %d)", id, size, PARAM(2), timeOut);
            mem_write32(PARAM(2), addr);
            RETURN(0);
        }
        else {
            _log(ERR, HLE, "sceKernelAllocateVplCB(vpl = %d, size = %d, ptrout = %08x, timeout = %d): no memory left", id, size, PARAM(2), timeOut);
            RETURN(SCE_ERROR_NO_MEMORY);
        }
    }
    else {
        _log(ERR, HLE, "sceKernelAllocateVplCB(vpl = %d, size = %d, ptrout = %08x, timeout = %d): bad VPL", id, size, PARAM(2), timeOut);
        RETURN(SCE_ERROR_NOT_FOUND_VPOOL);
    }
}

void sceKernelFreeVpl()
{
    SceUID id = PARAM(0);
    u32 addr = PARAM(1);
    VPL *vpl = ko_get(id);

    if (vpl)
    {
        if (blkAlloc_addrInBlock(&vpl->alloc, addr))
        {
            blkAlloc_free(&vpl->alloc, addr);
            _log(INF, HLE, "0 = sceKernelFreeVpl(vpl = %d, ptr = %08x)", id, addr);
            RETURN(0);
        }
        else {
            _log(ERR, HLE, "sceKernelFreeVpl(vpl = %d, ptr = %08x): bad address", id, addr);
            RETURN(SCE_ERROR_ILLEGAL_MEMBLOCK);
        }
    }
    else {
        _log(ERR, HLE, "sceKernelFreeVpl(vpl = %d, ptr = %08x): bad VPL", id, addr);
        RETURN(SCE_ERROR_NOT_FOUND_VPOOL);
    }
}

void sceKernelCancelVpl()
{
    _log(ERR, HLE, "UNIMPL: sceKernelCancelVpl()");
    RETURN(0);
}

void sceKernelReferVplStatus()
{
    SceUID id = PARAM(0);
    VPL *v = ko_get(id);
    if (v)
    {
        _log(INF, HLE, "sceKernelReferVplStatus(%d, %08x)", id, PARAM(1));
        v->nv.freeSize = blkAlloc_getFreeSize(&v->alloc);
        mem_writeStruct(PARAM(1), &v->nv, sizeof(SceKernelVplInfo));
        RETURN(0);
    }
    else {
        _log(ERR, HLE, "sceKernelReferVplStatus(%d, %08x): bad VPL", id, PARAM(1));
        RETURN(SCE_ERROR_NOT_FOUND_VPOOL);
    }
}

/*******
 * FPL *
 *******/

/* FPL - Fixed Length Dynamic Memory Pool - every item has the same length */
typedef struct
{
    char name[KERNELOBJECT_MAX_NAME_LENGTH + 1];
    SceUID mpid;
    u32 attr;
    s32 blocksize;
    s32 numBlocks;
    s32 numFreeBlocks;
    s32 numWaitThreads;
    s32 *freeBlocks;
    u32 address;
} FPL;

/* sceKernelCreateFpl(const char *name, SceUID mpid, SceUs32 attr, SceSize blocksize, s32 numBlocks, optparam) */
void sceKernelCreateFpl()
{
    char *name = mem_readStr(PARAM(0));
    s32 totalSize;

    FPL *fpl = malloc(sizeof(FPL));
    SceUID id = ko_init(fpl, KO_FPL);
    strncpy(fpl->name, name, 32);
    fpl->mpid = PARAM(1); /* partition */
    fpl->attr = PARAM(2);
    fpl->blocksize = PARAM(3);
    fpl->numBlocks = PARAM(4);
    fpl->numWaitThreads = 0;
    fpl->freeBlocks = malloc(sizeof(char) * fpl->numBlocks);
    
    totalSize = fpl->blocksize * fpl->numBlocks;
    fpl->address = blkAlloc_alloc(&userMemory, totalSize, 0, "FPL", 1);

    memset(fpl->freeBlocks, 0, fpl->numBlocks * sizeof(char));
    _log(INF, HLE, "%d = sceKernelCreateFpl(\"%s\", partition = %d, attr = %d, bsize = %d, nb = %d)", 
                     id, name, fpl->mpid, fpl->attr, fpl->blocksize, fpl->numBlocks);

    RETURN(id);
    mem_freeStr(name);
}

void sceKernelDeleteFpl()
{
    SceUID id = PARAM(0);
    FPL *fpl = ko_get(id);
    if (fpl)
    {
        _log(INF, HLE, "sceKernelDeleteFpl(%d)", id);
        blkAlloc_free(&userMemory, fpl->address);
        ko_free(id);
        RETURN(0);
    }
    else {
        _log(ERR, HLE, "sceKernelDeleteFpl(%d): bad FPL", id);
        RETURN(SCE_ERROR_NOT_FOUND_FPOOL);
    }
}

void sceKernelAllocateFpl()
{
    SceUID id = PARAM(0);
    u32 timeOut = PARAM(2);
    FPL *fpl = ko_get(id);
    if (fpl)
    {
        _log(INF, HLE, "sceKernelAllocateFpl(%d, %08x, %08x [%d])", id, PARAM(1), timeOut, mem_isOk(timeOut) ? mem_read32(timeOut) : -1);
        mem_write32(PARAM(1), fpl->address);
        RETURN(0);
    }
    else {
        _log(ERR, HLE, "sceKernelAllocateFpl(%d, %08x, %08x [%d]): bad FPL", id, PARAM(1), timeOut, mem_isOk(timeOut) ? mem_read32(timeOut) : -1);
        RETURN(SCE_ERROR_NOT_FOUND_FPOOL);
    }
}

void sceKernelAllocateFplCB()
{
    SceUID id = PARAM(0);
    _log(ERR, HLE, "UNIMPL: sceKernelAllocateFplCB(%d)", id);
    RETURN(-1);
}

void sceKernelTryAllocateFpl()
{
    SceUID id = PARAM(0);
    FPL *fpl = ko_get(id);
    if (fpl)
    {
        _log(INF, HLE, "sceKernelTryAllocateFpl(%d, %08x)", id, PARAM(1));
        mem_write32(PARAM(1), fpl->address);
        RETURN(0);
    }
    else {
        _log(ERR, HLE, "sceKernelAllocateFpl(%d, %08x): bad FPL", id, PARAM(1));
        RETURN(SCE_ERROR_NOT_FOUND_FPOOL);
    }
}

void sceKernelFreeFpl()
{
    SceUID id = PARAM(0);
    _log(ERR, HLE, "UNIMPL: sceKernelFreeFpl(%d)", id);
    RETURN(-1);
}

void sceKernelCancelFpl()
{
    SceUID id = PARAM(0);
    _log(ERR, HLE, "UNIMPL: sceKernelCancelFpl(%d)", id);
    RETURN(-1);
}

void sceKernelReferFplStatus()
{
    SceUID id = PARAM(0);
    _log(ERR, HLE, "UNIMPL: sceKernelReferFplStatus(%d)", id);
    RETURN(-1);
}

/*********
 * TIMER *
 *********/

void _sceKernelReturnFromTimerHandler()
{
    _log(ERR, HLE, "_sceKernelReturnFromTimerHandler");
    RETURN(0);
}

void sceKernelGetSystemTime()
{
    clock_t t = clock();
    SceKernelSysClock clock;
    clock.lo = t;
    clock.hi = 0;
    mem_writeStruct(PARAM(0), &clock, sizeof(SceKernelSysClock));
    _log(INF, HLE, "(%08x, %08x) = sceKernelGetSystemTime(%08x)", clock.hi, clock.lo, PARAM(0));
}

void sceKernelGetSystemTimeLow()
{
    clock_t t = clock();
    _log(DBG, HLE, "%08x = sceKernelGetSystemTimeLow()", t);
    RETURN(t & 0xffffffff);
}

void sceKernelGetSystemTimeWide()
{
    clock_t t = clock();
    _log(INF, HLE, "%016lx = sceKernelGetSystemTimeWide()", t);
    RETURN(t);
    RETURN1(0);
}

void sceKernelUSec2SysClock()
{
    u32 microseconds = PARAM(0);
    SceKernelSysClock clock;
    clock.lo = microseconds;
    mem_writeStruct(PARAM(1), &clock, sizeof(SceKernelSysClock));
    _log(WRN, HLE, "sceKernelUSec2SysClock(%d, %08x)", PARAM(0), PARAM(1));
    RETURN(0);
}

void sceKernelUSec2SysClockWide()
{
    _log(INF, HLE, "sceKernelUSec2SysClockWide(%08x)", PARAM(0));
    RETURN(PARAM(0));
    RETURN1(0);
}

void sceKernelSysClock2USec()
{
    SceKernelSysClock clock;
    mem_readStruct(PARAM(0), &clock, sizeof(SceKernelSysClock));
    u64 time = clock.lo | ((u64)clock.hi << 32);
    if (mem_isOk(PARAM(1)))
        mem_write32(PARAM(1), time / 1000000);
    if (mem_isOk(PARAM(2)))
        mem_write32(PARAM(2), time % 1000000);
    RETURN(0);
}

void sceKernelSysClock2USecWide()
{
    u64 clock = PARAM(0) | ((u64)PARAM(1) << 32);
    _log(INF, HLE, "sceKernelSysClock2USecWide(clock = %16lx, lo = %08x, hi = %08x)", clock, PARAM(2), PARAM(3));
    if (mem_isOk(PARAM(2)))
        mem_write32(PARAM(2), clock / 1000000);
    if (mem_isOk(PARAM(3)))
        mem_write32(PARAM(3), clock % 1000000);
    RETURN(0);
}

/**********
 * VTIMER *
 **********/

typedef struct
{
    SceSize size;
    char name[KERNELOBJECT_MAX_NAME_LENGTH + 1];
    u64 startTime;
    u8 running;
    u32 handler;
    u64 handlerTime;
    u32 argument;
} VTimer;

void sceKernelCreateVTimer()
{
    char *name = mem_readStr(PARAM(0));
    VTimer *vt = malloc(sizeof(VTimer));

    SceUID id = ko_init(vt, KO_VTIMER);

    _log(ERR, HLE, "%d = sceKernelCreateVTimer(%s)", id, name);
    strncpy(vt->name, name, 8);
    vt->running = 1;
    vt->startTime = 0;
    RETURN(id);
    mem_freeStr(name);
}

void sceKernelStartVTimer()
{
    _log(ERR, HLE, "sceKernelStartVTimer");
    RETURN(0);
}

void sceKernelSetVTimerHandler()
{
    _log(ERR, HLE, "sceKernelSetVTimerHandler");
    RETURN(0);
}

/***********
 * THREADS *
 ***********/

void sceKernelReferThreadStatus()
{
    SceUID threadID = PARAM(0);
    SceKernelThread *t;

    if (threadID == 0)
        threadID = __KernelGetCurThread();

    t = ko_get(threadID);
    if (t)
    {
        _log(INF, THREAD, "sceKernelReferThreadStatus(%d, %08x)", threadID, PARAM(1));
        t->nt.nativeSize = sizeof(NativeThread);
        mem_writeStruct(PARAM(1), (void*)&t->nt, sizeof(NativeThread));
        RETURN(0);
    }
    else {
        _log(ERR, THREAD, "sceKernelReferThreadStatus(%d, %08x): bad thread", threadID, PARAM(1));
        RETURN(SCE_ERROR_NOT_FOUND_THREAD);
    }
}

void sceKernelCheckThreadStack()
{
    _log(ERR, THREAD, "sceKernelCheckThreadStack() (returned 65536 (lie))");
    RETURN(65536); /* Blatant lie */
}

void sceKernelCreateThread()
{
    char *threadName;
    u32 entry = PARAM(1);
    u32 prio  = PARAM(2);
    u32 stacksize = PARAM(3);
    u32 attr  = PARAM(4);
    SceUID id;
    if (!mem_isOk(PARAM(0))) {
        _log(ERR, THREAD, "WTF, sceKernelCreateThread ran with a not valid name?");
        return;
    }
    threadName = mem_readStr(PARAM(0));
    /* ignore PARAM(5)  */
    id = __KernelCreateThread(threadName, entry, prio, stacksize, attr);
    _log(INF, THREAD, "%d = sceKernelCreateThread(name = \"%s\", entry = %08x, prio = %08x, stacksize = %d)", id, threadName, entry, prio, stacksize);
    RETURN(id);
    mem_freeStr(threadName);
}

void sceKernelStartThread()
{
    s32 threadToStartID = PARAM(0);
    u32 argSize = PARAM(1);
    u32 argBlockPtr = PARAM(2);
    SceKernelThread *t = ko_get(threadToStartID);
    if (t)
    {
        if (t->nt.status == THREADSTATUS_DORMANT)
        {
            s32 i;

            _log(INF, THREAD, "sceKernelStartThread(thread = %d, argSize = %d, argPtr = %08x)", threadToStartID, argSize, argBlockPtr);

            RETURN(0); /* return success (this does not exit this function) */

            __KernelResetThread(t);

            t->nt.status = THREADSTATUS_READY;
            t->context.r[A0] = argSize;
            /* now copy argument to stack */
            for (i = 0; i < (s32)argSize; i++)
                mem_write8(t->context.r[SP] + i, mem_read8(argBlockPtr + i));
        }
        else {
            _log(ERR, THREAD, "sceKernelStartThread(thread = %d, argSize = %d, argPtr = %08x): thread not dormant", threadToStartID, argSize, argBlockPtr);
            RETURN(SCE_ERROR_THREAD_IS_NOT_DORMANT);
        }
    }
    else {
        _log(ERR, THREAD, "sceKernelStartThread(thread = %d, argSize = %d, argPtr = %08x): bad thread", threadToStartID, argSize, argBlockPtr);
        RETURN(SCE_ERROR_NOT_FOUND_THREAD);
    }
}

void sceKernelExitThread()
{
    SceKernelThread *t;

    _log(INF, THREAD, "sceKernelExitThread(%d)", PARAM(0));

    t = ko_get(__KernelGetCurThread());
    t->nt.status = THREADSTATUS_DORMANT;
    t->nt.exitStatus = PARAM(0);

    /* Find threads that waited for me
       Wake them */
    __KernelResumeThreads(WAITTYPE_THREADEND, __KernelGetCurThread(), RESUMETYPE_ID);
    __KernelReSchedule();
}

void sceKernelExitDeleteThread()
{
    SceKernelThread *t;
    u32 delThread = __KernelGetCurThread();

    _log(INF, THREAD, "[%d] sceKernelExitDeleteThread()", __KernelGetCurThread());
    /* Find threads that waited for me
       Wake them */

    t = ko_get(__KernelGetCurThread());
    t->nt.status = THREADSTATUS_DORMANT;
    t->nt.exitStatus = t->context.r[V0];
    __KernelResumeThreads(WAITTYPE_THREADEND, __KernelGetCurThread(), RESUMETYPE_ID);
    __KernelReSchedule();
    RETURN(__KernelDeleteThread(delThread));
    _log(INF, THREAD, "Thread removed");
}    

void _sceKernelExitThread()
{
    SceKernelThread *t;

    _log(INF, THREAD, "_sceKernelExitThread()");
    /* Find threads that waited for me
       Wake them */

    t = ko_get(__KernelGetCurThread());
    t->nt.status = THREADSTATUS_DORMANT;
    t->nt.exitStatus = t->context.r[V0];
    __KernelResumeThreads(WAITTYPE_THREADEND, __KernelGetCurThread(), RESUMETYPE_ID);
    __KernelReSchedule();
}

void sceKernelRotateThreadReadyQueue()
{
    _log(WRN, THREAD, "sceKernelRotateThreadReadyQueue : rescheduling");
    __KernelReSchedule();
}

void sceKernelDeleteThread()
{
    s32 threadno = PARAM(0);
    if (threadno != __KernelGetCurThread())
    {
        _log(INF, THREAD, "sceKernelDeleteThread(%d)", threadno);
        __KernelResumeThreads(WAITTYPE_THREADEND, threadno, RESUMETYPE_ID);
        __KernelReSchedule();
        RETURN(__KernelDeleteThread(threadno));
    }
    else
    {
        SceKernelThread *t = ko_get(__KernelGetCurThread());
        _log(WRN, THREAD, "Thread \"%s\" tries to delete itself! :(", t->nt.name);
        RETURN(-1);
    }
}

void sceKernelTerminateDeleteThread()
{
    s32 threadno = PARAM(0);
    if (threadno != __KernelGetCurThread())
    {
        _log(INF, THREAD, "sceKernelTerminateDeleteThread(%d)", threadno);
        __KernelResumeThreads(WAITTYPE_THREADEND, threadno, RESUMETYPE_ID);
        __KernelReSchedule();
        RETURN(__KernelDeleteThread(threadno));
    }
    else
    {
        SceKernelThread *t = ko_get(__KernelGetCurThread());
        _log(WRN, THREAD, "Thread \"%s\" tries to delete itself! :(", t->nt.name);
        RETURN(-1);
    }
}

void sceKernelGetThreadId()
{
    _log(DBG, THREAD, "%d = sceKernelGetThreadId()", __KernelGetCurThread());
    RETURN(__KernelGetCurThread());
}

void sceKernelChangeCurrentThreadAttr()
{
    s32 clearAttr = PARAM(0);
    s32 setAttr = PARAM(1);
    SceKernelThread *t = ko_get(__KernelGetCurThread());

    if (t)
    {
        _log(INF, THREAD, "0 = sceKernelChangeCurrentThreadAttr(clear = %08x, set = %08x)", clearAttr, setAttr);
        t->nt.attr = (t->nt.attr & ~clearAttr) | setAttr;
        RETURN(0);
    }
    else {
        _log(ERR, THREAD, "sceKernelChangeCurrentThreadAttr(clear = %08x, set = %08x): bad thread", clearAttr, setAttr);
        RETURN(SCE_ERROR_NOT_FOUND_THREAD);
    }
}

void sceKernelChangeThreadPriority()
{
    s32 id = PARAM(0);
    SceKernelThread *thread;

    if (id == 0)
        id = __KernelGetCurThread(); /* special */

    thread = ko_get(id);
    if (thread)
    {
        _log(INF, THREAD, "sceKernelChangeThreadPriority(%d, %08x)", id, PARAM(1));
        thread->nt.currentPriority = PARAM(1);
        RETURN(0);
    }
    else
    {
        _log(ERR, THREAD, "sceKernelChangeThreadPriority(%d, %08x): bad thread", id, PARAM(1));
        RETURN(SCE_ERROR_NOT_FOUND_THREAD);
    }
}

void sceKernelDelayThreadCB()
{
    u32 usec = PARAM(0);
    _log(DBG, THREAD, "sceKernelDelayThreadCB(%d usec)", usec);
    __KernelDelayCurThread(usec, 1);
}

void sceKernelDelayThread()
{
    u32 usec = PARAM(0);
    _log(DBG, THREAD, "sceKernelDelayThread(%d usec)", usec);
    __KernelDelayCurThread(usec, 0);
}

void sceKernelWakeupThread()
{
    SceUID id = PARAM(0);
    SceKernelThread *t = ko_get(id);
    if (t)
    {
        t->nt.wakeupCount++;
        _log(INF, THREAD, "sceKernelWakeupThread(%d) - wakeupCount incremented to %d", id, t->nt.wakeupCount);
        if (t->nt.waitType == WAITTYPE_SLEEP && t->nt.wakeupCount >= 0)
            __KernelResumeThread(id);
        RETURN(0);
    }
    else {
        _log(ERR, THREAD, "sceKernelWakeupThread(%d): bad thread", id);
        RETURN(SCE_ERROR_NOT_FOUND_THREAD);
    }
}

void sceKernelSleepThread()
{
    SceKernelThread *t;

    t = ko_get(__KernelGetCurThread());
    t->nt.wakeupCount--;
    _log(WRN, THREAD, "sceKernelSleepThread() - wakeupCount decremented to %d", t->nt.wakeupCount);

    if (t->nt.wakeupCount < 0)
        __KernelWaitCurThread(WAITTYPE_SLEEP, 0, 0, 0, 0);
    else
        RETURN(0);
}

void sceKernelSleepThreadCB()
{
    _log(INF, THREAD, "sceKernelSleepThreadCB()");
    __KernelWaitCurThread(WAITTYPE_SLEEP, 0, 0, 0, 1);
}

void sceKernelWaitThreadEnd()
{
    SceUID id = PARAM(0);
    SceKernelThread *t = ko_get(id);
    _log(INF, THREAD, "sceKernelWaitThreadEnd(%d)", id);
    if (t)
    {
        if (t->nt.status != THREADSTATUS_DORMANT)
            __KernelWaitCurThread(WAITTYPE_THREADEND, id, 0, 0, 0);
        else
            _log(WRN, THREAD, "sceKernelWaitThreadEnd - thread %d already ended. Doing nothing.", id);
    }
    else
        _log(WRN, THREAD, "sceKernelWaitThreadEnd - bad thread %d", id);
    RETURN(0);
}

void sceKernelSuspendThread()
{
    _log(ERR, THREAD, "UNIMPL sceKernelSuspendThread");
    RETURN(0);
}

void sceKernelResumeThread()
{
    _log(ERR, THREAD, "UNIMPL sceKernelResumeThread");
    RETURN(0);
}

void sceKernelGetThreadCurrentPriority()
{
    SceKernelThread *t = ko_get(__KernelGetCurThread());
    _log(INF, THREAD, "0x%08x = sceKernelGetThreadCurrentPriority", t->nt.currentPriority);
    RETURN(t->nt.currentPriority);
}

void sceKernelGetThreadExitStatus()
{
    SceUID id = PARAM(0);
    SceKernelThread *t = ko_get(id);
    if (t)
    {
        if (t->nt.status == THREADSTATUS_DORMANT) {
            _log(INF, THREAD, "%d = sceKernelGetThreadExitStatus(%d)", t->nt.exitStatus, id);
            RETURN(t->nt.exitStatus);
        }
        else {
            _log(ERR, THREAD, "sceKernelGetThreadExitStatus(%d): thread is not dormant!", id);
            RETURN(SCE_ERROR_THREAD_IS_NOT_DORMANT);
        }
    }
    else {
        _log(ERR, THREAD, "sceKernelGetThreadExitStatus(%d): thread not found!", id);
        RETURN(SCE_ERROR_NOT_FOUND_THREAD);
    }
}

