//==========================================================================
// Copyright (c) 2000-2008,  Elastos, Inc.  All Rights Reserved.
//==========================================================================

/*
 * abc.c
 *    Asynchronous Buffer Cache
 *
 * Buffered block I/O interface down to a physical device.  Provides
 * both read-ahead and write-behind, while providing the illusion of
 * a synchronous block I/O device to its caller.
 */
#include "ddk.h"
#include <stdio.h>
#include <time.h>
#include <stdlib.h>
#include <misc.h>
#include <llist.h>
#include <hash.h>
#include <abc.h>
#include <fat.h>
#include "string.h"
#include <bufcache.h>
#ifndef _win32
#include <event.h>
#endif

/*
 * stob()
 *    Convert sectors to bytes
 */
__inline UInt32 abc::
stob(UInt32 nsec)
{
    return (nsec << p_Sharedata->secshift);
}

/*
 * read_secs()
 *    Do sector reads
 */
int abc::
read_secs(daddr_t start, unsigned char *buf, UInt32 nsec)
{
    if (0 == nsec) {
        return 0;
    }

    if (NULL == buf) {
        return 1;
    }
    if (p_Sharedata->m_sdCardEjected)
        return 1; // the sd card has been ejected...

    ECode ec = ReadDevice(
                pBlkDev, stob(start+p_Sharedata->bpb_base), stob(nsec),
                (PByte)buf, NULL, NULL);
    return ((ec == NOERROR)? 0 : 1);
}

/*
 * write_secs()
 *    Do sector writes
 */
int abc::
write_secs(daddr_t start, unsigned char *buf, UInt32 nsec)
{
    if (p_Sharedata->m_sdCardEjected)
        return 1; // the sd card has been ejected...

    ECode ec = WriteDevice(
                    pBlkDev, stob(start+p_Sharedata->bpb_base), stob(nsec),
                    (PByte)buf, NULL, NULL);
    return ((ec == NOERROR)? 0 : 1);
}

/*
 * free_buf()
 *    Release buffer storage, remove from hash
 */
void abc::
free_buf(struct buf *b)
{
    ASSERT_DEBUG(b->b_list, "free_buf: null b_list");
    //ASSERT_DEBUG(b->b_locks == 0, "free_buf: locks");

    hash_delete(bufpool, b->b_start);
    /*
     * Can not lock allbufs_lock, if do that then deadLock,
     * age_buf and abc_clearbuf have to use it.
     */
    ll_delete(b->b_list);
    bufsize -= b->b_nsec;
    ASSERT_DEBUG(b->b_data, "free_buf: null b_data");
    pBufCache->Put(b);
}

/*
 * exec_qio()
 *    Do the actions specified by a qio operation
 */
Boolean abc::
exec_qio(struct buf *b, int op)
{
    switch (op) {
        case Q_FILLBUF:
            if (b->b_flags & B_SEC0) {
                if (read_secs(b->b_start + 1,
                    (unsigned char *)b->b_data + p_Sharedata->secsz,
                    b->b_nsec - 1)) {
                    return FALSE;
                }
            }
            else {
                if (read_secs(b->b_start,
                    (unsigned char *)b->b_data, b->b_nsec)) {
                    return FALSE;
                }
            }
            b->b_flags |= (B_SEC0|B_SECS);
            break;

        case Q_FLUSHBUF:

            if (_sync_buf(b, 1)) {
                return FALSE;
            }
            break;

        default:
            return FALSE;
            break;
    }

    return TRUE;
}

/*
 * busywait()
 *    Wait and sleep for a location to return to zero
 */
void
busywait(volatile UInt32 *ptr)
{
    while (*ptr) {
        DzSleep(100, NULL);
    }
}

/*
 * age_buf()
 *    Find the next available buf header, flush and free it
 *
 * Since this is basically a paging algorithm, it can become arbitrarily
 * complex.  The algorithm here tries to be simple, yet somewhat fair.
 */
int abc::
age_buf(void)
{
    struct llist *l;
    struct buf *b;
    int flag = -1;
    int errCount = 0;

    /*
     * Sync out the old dirty buf until found a clean one.
     */
    for (l = allbufs.l_back; l != &allbufs ; l = l->l_back) {

        if (p_Sharedata->m_sdCardEjected) {
            return 0;
        }
        /*
         * Only skip if wired or active
         */
        b =(struct buf *) l->l_data;
        if (b->b_locks) {
            continue;
        }

        if (!(b->b_flags & B_DIRTY)) {
            /*
             * Remove from list, update data structures
             */
            free_buf(b);
            return 1;
        }

        ASSERT_DEBUG(b->b_flags & B_DIRTY, "age_buf failed.");

        /*
         * Sync out data, if sync failed just skip it.
         */
        if (!exec_qio(b, Q_FLUSHBUF)) {
            // write back failed
            ++errCount;
            if (errCount > 5) return 0;
            continue;
        }
        errCount = 0;
        flag = 1;
        ASSERT_DEBUG(!(b->b_flags & B_DIRTY), "age_buf failed.");
    }

    return flag;
}

/*
 * find_buf()
 *    Given starting sector #, return pointer to buf
 */
struct buf * abc::
find_buf(daddr_t d, UInt32 nsec, int flags)
{
    struct buf *b;
    int ret = 0;

    ASSERT_DEBUG(nsec > 0, "find_buf: zero");
    ASSERT_DEBUG(nsec <= EXTSIZ, "find_buf: too big");

    /*
     * If we can find it, this is easy
     */

    Ktfs_Lock(&allbufs_lock);

    b = (struct buf *)hash_lookup(bufpool, d);
    if (b) {
        lock_buf(b);
        Ktfs_Unlock(&allbufs_lock);
        return(b);
    }

    /*
     * Make room in our buffer cache if needed
     */
    while ((bufsize+nsec) > NCACHE) {
        ret = age_buf();
        if(ret == 0)
            return ERR_PTR(struct buf *, EIO);
        else if(ret == -1)
            return ERR_PTR(struct buf *, EAGAIN);
    }
    /*
     * Get a buf struct
     */
    b = pBufCache->Get(nsec);
    if (NULL == b) {
        Ktfs_Unlock(&allbufs_lock);
        return ERR_PTR(struct buf *, ENOMEM);
    }

    b->b_start = d;
    b->b_nsec = nsec;
    b->b_locks = 0;
    b->b_handles = 0;
    b->b_nhandle = 0;
    if (flags & ABC_FILL) {
        b->b_flags = 0;
    }
    else {
        b->b_flags = B_SEC0 | B_SECS;
    }

    /*
     * Add us to pool, and mark us very new
     */
    b->b_list = ll_insert(&allbufs, b);
    if (NULL == b->b_list) {
        pBufCache->Put(b);
        Ktfs_Unlock(&allbufs_lock);
        return ERR_PTR(struct buf *, ENOMEM);
    }
    if (hash_insert(bufpool, d, b)) {
        ll_delete(b->b_list);
        pBufCache->Put(b);
        Ktfs_Unlock(&allbufs_lock);
        return ERR_PTR(struct buf *, ENOMEM);
    }

    /*
     * If ABC_BG, initiate fill now
     */

    if (flags & ABC_BG) {
        if(!exec_qio(b, Q_FILLBUF)) {
            ll_delete(b->b_list);
            hash_delete(bufpool, b->b_start);
            pBufCache->Put(b);
            Ktfs_Unlock(&allbufs_lock);
            return ERR_PTR(struct buf *, EIO);
        }
    }

    lock_buf(b);
    bufsize += nsec;
    Ktfs_Unlock(&allbufs_lock);

    return(b);
}

/*
 * index_buf()
 *    Get a pointer to a run of data under a particular buf entry
 *
 * As a side effect, move us to front of list to make us relatively
 * undesirable for aging.
 */
void * abc::
index_buf(struct buf *b, UInt32 index, UInt32 nsec)
{
    ASSERT_DEBUG((index+nsec) <= b->b_nsec, "index_buf: too far");

    Ktfs_Lock(&allbufs_lock);
    ll_movehead(&allbufs, b->b_list);
    Ktfs_Unlock(&allbufs_lock);

    Ktfs_Lock(&b->b_lock);
    if ((index == 0) && (nsec == 1)) {
        /*
         * Only looking at 1st sector.  See about reading
         * only 1st sector, if we don't yet have it.
         */
        if ((b->b_flags & B_SEC0) == 0) {
            /*
             * Load the sector, mark it as present
             */
            if (read_secs(b->b_start, (unsigned char *)b->b_data, 1)) {
                Ktfs_Unlock(&b->b_lock);
                return NULL;
            }
            b->b_flags |= B_SEC0;
        }
    }
    else if ((b->b_flags & B_SECS) == 0) {
        /*
         * Otherwise if we don't have the whole buffer, get
         * it now.  Don't read in sector 0 if we already
         * have it.
         */
        if (b->b_flags & B_SEC0) {
            if (read_secs(b->b_start + 1, (unsigned char *)b->b_data + p_Sharedata->secsz,
                b->b_nsec - 1)) {
                Ktfs_Unlock(&b->b_lock);
                return NULL;
            }
        }
        else {
            if (read_secs(b->b_start, (unsigned char *)b->b_data, b->b_nsec)) {
                Ktfs_Unlock(&b->b_lock);
                return NULL;
            }
        }
        b->b_flags |= (B_SEC0|B_SECS);
    }
    Ktfs_Unlock(&b->b_lock);
    return((char *)b->b_data + stob(index));
}

/*
 * init_buf()
 *    Initialize the buffering system
 */
void abc::
init_buf(void *pDevice, fatfs_sharedata* pdata)
{
    pBlkDev = (PDevDriver)pDevice;
    p_Sharedata = pdata;

    p_Sharedata->AddRef();

    // Initialize data structures

    ll_init(&allbufs);
    bufpool = hash_alloc(NCACHE / 8);
    bufsize = 0;
    ASSERT_DEBUG(bufpool, "init_buf: bufpool");
}

/*
 * dirty_buf()
 *    Mark the given buffer dirty
 *
 * If a handle is given, mark the dirty buffer with this handle
 */
void abc::
dirty_buf(struct buf *b, void *handle)
{

    assert(handle == 0);
    Ktfs_Lock(&b->b_lock);
    /*
     * Mark buffer dirty
     */
    b->b_flags |= B_DIRTY;

    /*
     * No handle -> done
     */
    if (!handle) {
        Ktfs_Unlock(&b->b_lock);
        return;
    }
#if 0 // no use codes
    void **p, **zp;
    UInt32 x;
    void **temp = NULL;
    /*
     * See if this handle is already tagged
     */
    p = b->b_handles;
    zp = 0;
    for (x = 0; x < b->b_nhandle;++x) {
        /*
         * Yup, all done
         */
        if (p[x] == handle) {
            Ktfs_Unlock(&b->b_lock);
            return;
        }

        /*
         * Record an open position if found
         */
        if (p[x] == 0) {
            zp = &p[x];
        }
    }

    /*
     * Not tagged, but there's a position here
     */
    if (zp) {
        *zp = handle;
        Ktfs_Unlock(&b->b_lock);
        return;
    }

    /*
     * Grow the array, and save us at the end
     */
    //b->b_handles =
    //    (void **)realloc(b->b_handles,
    //        (b->b_nhandle + 1) * sizeof(void *));
    temp = (void **)malloc((b->b_nhandle + 1) * sizeof(void *));
    memcpy(temp, b->b_handles, (b->b_nhandle) * sizeof(void *));
    free(b->b_handles);
    b->b_handles = temp;
    b->b_handles[b->b_nhandle] = handle;
    b->b_nhandle += 1;
    Ktfs_Unlock(&b->b_lock);
#endif
}

/*
 * lock_buf()
 *    Lock down the buf; make sure it won't go away
 */
void abc::
lock_buf(struct buf *b)
{
    b->b_locks += 1;
    ASSERT_DEBUG(b->b_locks > 0, "lock_buf: overflow");
}

/*
 * unlock_buf()
 *    Release previously taken lock
 */
void abc::
unlock_buf(struct buf *b)
{
    ASSERT_DEBUG(b->b_locks > 0, "unlock_buf: underflow");
    b->b_locks -= 1;
}

/*
 * _sync_buf()
 *    Sync back buffer if dirty
 *
 * Write back the 1st sector, or the whole buffer, as appropriate
 */
int abc::
_sync_buf(struct buf *b, int from_qio)
{
    ASSERT_DEBUG(b->b_flags & (B_SEC0 | B_SECS), "sync_buf: not ref'ed");

    /*
     * Skip it if not dirty
     */
    if (!(b->b_flags & B_DIRTY)) {
        return 0;
    }

    /*
     * Do the I/O--whole buffer, or just 1st sector if that was
     * the only sector referenced.
     */

    if (b->b_flags & B_SECS) {
        if (write_secs(b->b_start, (unsigned char *)b->b_data, b->b_nsec)) {
            return(1);
        }
    }
    else {
        if (write_secs(b->b_start, (unsigned char *)b->b_data, 1)) {
            return(1);
        }
    }

    b->b_flags &= ~B_DIRTY;

    /*
     * If there are possible handles, clear them too
     */
    if (b->b_handles) {
        bzero(b->b_handles, b->b_nhandle * sizeof(void *));
    }
    return 0;
}

/*
 * sync()
 *    Write dirty buffers to disk
 *
 * If handle is not NULL, sync all buffers dirtied with this handle.
 * Otherwise sync all dirty buffers.
 */
void abc::
sync_bufs(void *handle)
{
    Boolean bRet = TRUE;
    struct llist *l;
    UInt32 x;
    Ktfs_Lock(&allbufs_lock);
    for (l = LL_NEXT(&allbufs); l != &allbufs; l = LL_NEXT(l)) {
        struct buf *b = (struct buf *)l->l_data;

        /*
         * Not dirty--easy
         */
        if (!(b->b_flags & B_DIRTY)) {
            continue;
        }

        Ktfs_Lock(&b->b_lock);

        /*
         * Not dirty after interlock--still easy
         */
        if (!(b->b_flags & B_DIRTY)) {
            Ktfs_Unlock(&b->b_lock);
            continue;
        }

        /*
         * No handle, just sync dirty buffers
         */
        if (!handle) {
            exec_qio(b, Q_FLUSHBUF);
            b->b_flags &= ~B_DIRTY;
            Ktfs_Unlock(&b->b_lock);
            continue;
        }

        /*
         * Check for match.
         */
        for (x = 0; x < b->b_nhandle;++x) {
            if (b->b_handles[x] == handle) {
                bRet=exec_qio(b, Q_FLUSHBUF);
                b->b_flags &= ~B_DIRTY;
                ASSERT_DEBUG(bRet, "Check for match");
                break;
            }
        }
        Ktfs_Unlock(&b->b_lock);
    }
    Ktfs_Unlock(&allbufs_lock);
}

/*
 * sync_one_block()
 *    Only sync one dirty buffer to disk
 *
 */
bool abc::
sync_one_block()
{
    struct llist *l;

    Ktfs_Lock(&allbufs_lock);
    for (l = LL_NEXT(&allbufs); l != &allbufs; l = LL_NEXT(l)) {
        struct buf *b = (struct buf *)l->l_data;

        /*
         * Not dirty--easy
         */
        if (!(b->b_flags & B_DIRTY) || b->b_locks) {
            continue;
        }

        Ktfs_Lock(&b->b_lock);

        /*
         * Not dirty after interlock--still easy
         */
        if (!(b->b_flags & B_DIRTY)) {
            Ktfs_Unlock(&b->b_lock);
            continue;
        }

        lock_buf(b);
        /*
         * just sync dirty buffers
         */
        Ktfs_Unlock(&allbufs_lock);
        exec_qio(b, Q_FLUSHBUF);
        b->b_flags &= ~B_DIRTY;
        unlock_buf(b);
        Ktfs_Unlock(&b->b_lock);
        return TRUE;
    }
    Ktfs_Unlock(&allbufs_lock);

    return FALSE;
}
void abc::abc_clearbuf()
{
    struct llist *l, *tmp;
    struct buf *b;
    Ktfs_Lock(&allbufs_lock);
    for (l = allbufs.l_back; l != &allbufs;) {
        tmp = l->l_back;
        /*
         * Only skip if wired or active
         */
        b =(struct buf *) l->l_data;
        free_buf(b);
        l = tmp;
    }
    Ktfs_Unlock(&allbufs_lock);
}

abc::abc() :bufsize(0),bufpool(NULL), pBufCache(NULL), pBlkDev(NULL),
                m_nRef(0), p_Sharedata(NULL)
{
    Ktfs_Init_Lock(&allbufs_lock);
}

abc::~abc()
{

    if (bufpool) {
        abc_clearbuf();
        hash_dealloc(bufpool);
    }
    Ktfs_Term_Lock(&allbufs_lock);
    if (pBufCache)
        pBufCache->Release();
    if (p_Sharedata)
        p_Sharedata->Release();
}

UInt32 abc::AddRef()
{
    return InterlockedIncrement((PInt32)&m_nRef);
}

UInt32  abc::Release()
{
    UInt32 Refs;

    Refs = InterlockedDecrement((PInt32)&m_nRef);
    if (Refs == 0) {
        delete this;
    }
    return Refs;
}

void abc::setBufCache(BufCache *pCache)
{
    pBufCache = pCache;
    pBufCache->AddRef();
}

#if defined(_DEBUG)
void abc::
abc_bufs_stat(UInt32 *Data_secs, UInt32 *Cache_secs)
{
    UInt32 x, cnt = 0;
    struct hash_node *hn;
    struct buf* b;

    for (x = 0; x < (UInt32)bufpool->h_hashsize;++x) {
        for (hn = bufpool->h_hash[x]; hn; hn = hn->h_next) {
            b = (struct buf*)hn->h_data;
            if (b->b_flags & B_DIRTY)
                cnt+=b->b_nsec;
        }
    }

    *Data_secs = cnt;
    *Cache_secs = bufsize;
}
#endif
