#include "callother.h"

#define PERTURB_SHIFT 5

/* Object used as dummy key to fill deleted entries. Initialized
   by first call to CoDict_New().
 */
static CoValue *dummy = NULL;

#define EMPTY_TO_MINSIZE(dict) do {   \
    memset((dict)->d_smalltable, 0, sizeof((dict)->d_smalltable));  \
    (dict)->d_used = (dict)->d_fill = 0;    \
    (dict)->d_table = (dict)->d_smalltable; \
    (dict)->d_mask = DICT_MIN_SIZE - 1;   \
    } while(0)

CoValue *CoDict_New(void)
{
    CoDict *dict;

    if (dummy == NULL) {
        dummy = CoString_FromString("<dummy key>");
        if (dummy == NULL) {
            /* TODO: [ERROR] no memory */
            return NULL;
        }
    }

    dict = (CoDict *)CO_MALLOC(sizeof(CoDict));
    if (dict == NULL) {
        /* TODO: [ERROR] no memory */
        return NULL;
    }
    COVALUE_INIT(dict, &CoDict_Type);

    EMPTY_TO_MINSIZE(dict);

    return (CoValue *)dict;
}

/*
The basic lookup function used by all operations.
This is based on Algorithm D from Knuth Vol. 3, Sec. 6.4.
Open addressing is preferred over chaining since the link overhead for
chaining would be substantial (100% with typical malloc overhead).

When the key isn't found a CoDictEntry* is returned for which the
de_value field is NULL; this is the slot in the dict at which the key
would have been found, and the caller can (if it wishes) add the
<key, value> pair to the returned CoDictEntry*.
*/
static CoDictEntry *lookup(CoDict *dict, CoValue *key, long hash)
{
	size_t i;
	size_t perturb;
	CoDictEntry *freeslot;
	size_t mask = (size_t)dict->d_mask;
	CoDictEntry *ep0 = dict->d_table;
	CoDictEntry *entry;

	i = hash & mask;
	entry = &ep0[i];
	if (entry->de_key == NULL || entry->de_key == key) {
		return entry;
    }
	if (entry->de_key == dummy) {
		freeslot = entry;
    }
	else {
		if (entry->de_hash == hash && CoValue_Equal(entry->de_key, key))
			return entry;
		freeslot = NULL;
	}

	/* In the loop, de_key == dummy is by far (factor of 100s) the
	   least likely outcome, so test for that last. */
	for (perturb = hash; ; perturb >>= PERTURB_SHIFT) {
		i = (i << 2) + i + perturb + 1;
		entry = &ep0[i & mask];
		if (entry->de_key == NULL)
			return freeslot == NULL ? entry : freeslot;
		if (entry->de_key == key
		    || (entry->de_hash == hash
		        && entry->de_key != dummy
			&& CoValue_Equal(entry->de_key, key)))
			return entry;
		if (entry->de_key == dummy && freeslot == NULL)
			freeslot = entry;
	}
}

CoValue *CoDict_GetItem(CoValue *self, CoValue *key)
{
    long hash;
    CoDictEntry *entry;

    if (!CODICT_CHECK(self)) {
        return NULL;
    }
    if (!COSTRING_CHECK(key) ||
            (hash = ((CoString *) key)->s_hash) == -1)
    {
        hash = CoValue_Hash(key);
	}

    entry = lookup((CoDict *)self, key, hash);
	return entry->de_value;
}

/*
Internal routine to insert a new item into the table.
Used both by the internal resize routine and by the public insert routine.
Eats a reference to key and one to value.
*/
static void insertdict(CoDict *dict, CoValue *key, long hash, CoValue *value)
{
	CoValue *old_value;
	CoDictEntry *entry;

	entry = lookup(dict, key, hash);
	if (entry->de_value != NULL) {
		old_value = entry->de_value;
		entry->de_value = value;
		CO_DECREF(old_value); /* which **CAN** re-enter */
		CO_DECREF(key);
	}
	else {
		if (entry->de_key == NULL)
			dict->d_fill++;
		else {
			assert(entry->de_key == dummy);
			CO_DECREF(dummy);
		}
		entry->de_key = key;
		entry->de_hash = hash;
		entry->de_value = value;
		dict->d_used++;
	}
}

/*
Internal routine used by dictresize() to insert an item which is
known to be absent from the dict.  This routine also assumes that
the dict contains no deleted entries.  Besides the performance benefit,
using insertdict() in dictresize() is dangerous (SF bug #1456209).
Note that no refcounts are changed by this routine; if needed, the caller
is responsible for incref'ing `key` and `value`.
*/
static void insertdict_clean(CoDict *dict, CoValue *key, long hash, CoValue *value)
{
	size_t i;
	size_t perturb;
	size_t mask = (size_t)dict->d_mask;
	CoDictEntry *ep0 = dict->d_table;
	CoDictEntry *entry;

	i = hash & mask;
	entry = &ep0[i];
	for (perturb = hash; entry->de_key != NULL; perturb >>= PERTURB_SHIFT) {
		i = (i << 2) + i + perturb + 1;
		entry = &ep0[i & mask];
	}
	assert(entry->de_value == NULL);
	dict->d_fill++;
	entry->de_key = key;
	entry->de_hash = hash;
	entry->de_value = value;
	dict->d_used++;
}

/*
Restructure the table by allocating a new table and reinserting all
items again.  When entries have been deleted, the new table may
actually be smaller than the old one.
*/
static int dictresize(CoDict *dict, ssize_t minused)
{
	ssize_t newsize;
	CoDictEntry *oldtable, *newtable, *entry;
	ssize_t i;
	int is_oldtable_malloced;
	CoDictEntry small_copy[DICT_MIN_SIZE];

	assert(minused >= 0);

	/* Find the smallest table size > minused. */
	for (newsize = DICT_MIN_SIZE;
	     newsize <= minused && newsize > 0;
	     newsize <<= 1)
		;
	if (newsize <= 0) {
		/* TODO: [ERROR] no memory */
		return -1;
	}

	/* Get space for a new table. */
	oldtable = dict->d_table;
	assert(oldtable != NULL);
	is_oldtable_malloced = oldtable != dict->d_smalltable;

	if (newsize == DICT_MIN_SIZE) {
		/* A large table is shrinking, or we can't get any smaller. */
		newtable = dict->d_smalltable;
		if (newtable == oldtable) {
			if (dict->d_fill == dict->d_used) {
				/* No dummies, so no point doing anything. */
				return 0;
			}
			/* We're not going to resize it, but rebuild the
			   table anyway to purge old dummy entries.
			   Subtle:  This is *necessary* if fill==size,
			   as lookdict needs at least one virgin slot to
			   terminate failing searches.  If fill < size, it's
			   merely desirable, as dummies slow searches. */
			assert(dict->d_fill > dict->d_used);
			memcpy(small_copy, oldtable, sizeof(small_copy));
			oldtable = small_copy;
		}
	}
	else {
		newtable = CO_NEW(CoDictEntry, newsize);
		if (newtable == NULL) {
            /* TODO: [ERROR] no memory */
			return -1;
		}
	}

	/* Make the dict empty, using the new table. */
	assert(newtable != oldtable);
	dict->d_table = newtable;
	dict->d_mask = newsize - 1;
	memset(newtable, 0, sizeof(CoDictEntry) * newsize);
	dict->d_used = 0;
	i = dict->d_fill;
	dict->d_fill = 0;

	/* Copy the data over; this is refcount-neutral for active entries;
	   dummy entries aren't copied over, of course */
	for (entry = oldtable; i > 0; entry++) {
		if (entry->de_value != NULL) {	/* active entry */
			--i;
			insertdict_clean(dict, entry->de_key, entry->de_hash,
					 entry->de_value);
		}
		else if (entry->de_key != NULL) {	/* dummy entry */
			--i;
			assert(entry->de_key == dummy);
			CO_DECREF(entry->de_key);
		}
		/* else key == value == NULL:  nothing to do */
	}

	if (is_oldtable_malloced) {
		CO_FREE(oldtable);
    }
	return 0;
}

int CoDict_SetItem(CoValue *self, CoValue *key, CoValue *value)
{
	CoDict *dict;
	long hash;
	ssize_t n_used;

	if (!CODICT_CHECK(self)) {
		/* TODO: [ERROR] bad internal call */
		return -1;
	}
	assert(key);
	assert(value);

	dict = (CoDict *)self;
    if (!COSTRING_CHECK(key) ||
            (hash = ((CoString *) key)->s_hash) == -1)
    {
        hash = CoValue_Hash(key);
	}
	assert(dict->d_fill <= dict->d_mask);  /* at least one empty slot */

	n_used = dict->d_used;
	CO_INCREF(value);
	CO_INCREF(key);
	insertdict(dict, key, hash, value);

    /* If we added a key, we can safely resize.  Otherwise just return!
	 * If fill >= 2/3 size, adjust size.  Normally, this doubles or
	 * quaduples the size, but it's also possible for the dict to shrink
	 * (if d_fill is much larger than d_used, meaning a lot of dict
	 * keys have been * deleted).
	 *
	 * Quadrupling the size improves average dictionary sparseness
	 * (reducing collisions) at the cost of some memory and iteration
	 * speed (which loops over every possible entry).  It also halves
	 * the number of expensive resize operations in a growing dictionary.
	 *
	 * Very large dictionaries (over 50K items) use doubling instead.
	 * This may help applications with severe memory constraints.
	 */
	if (!(dict->d_used > n_used && dict->d_fill*3 >= (dict->d_mask+1)*2)) {
		return 0;
    }
	return dictresize(dict, (dict->d_used > 50000 ? 2 : 4) * dict->d_used);
}

int CoDict_DelItem(CoValue *self, CoValue *key)
{
	CoDict *dict;
	long hash;
	CoDictEntry *entry;
	CoValue *old_value, *old_key;

	if (!CODICT_CHECK(self)) {
		/* TODO: [ERROR] bad internal call */
		return -1;
	}
	assert(key);

    if (!COSTRING_CHECK(key) ||
            (hash = ((CoString *) key)->s_hash) == -1)
    {
        hash = CoValue_Hash(key);
	}

	dict = (CoDict *)self;
	entry = lookup(dict, key, hash);
	if (entry->de_value == NULL) {
		/* TODO: [ERROR] key error */
		return -1;
	}

	old_key = entry->de_key;
	CO_INCREF(dummy);
	entry->de_key = dummy;
	old_value = entry->de_value;
	entry->de_value = NULL;
	dict->d_used--;
	CO_DECREF(old_value);
	CO_DECREF(old_key);
	return 0;
}

void CoDict_Clear(CoValue *self)
{
	CoDict *dict;
	CoDictEntry *entry, *table;
	int table_is_malloced;
	ssize_t fill;
	CoDictEntry small_copy[DICT_MIN_SIZE];

	if (!CODICT_CHECK(self)) {
		return;
    }

	dict = (CoDict *)self;

	table = dict->d_table;
	assert(table != NULL);
	table_is_malloced = table != dict->d_smalltable;

	/* This is delicate.  During the process of clearing the dict,
	 * decrefs can cause the dict to mutate.  To avoid fatal confusion
	 * (voice of experience), we have to make the dict empty before
	 * clearing the slots, and never refer to anything via dict->xxx while
	 * clearing.
	 */
	fill = dict->d_fill;
	if (table_is_malloced) {
		EMPTY_TO_MINSIZE(dict);
    }
	else if (fill > 0) {
		/* It's a small table with something that needs to be cleared.
		 * Afraid the only safe way is to copy the dict entries into
		 * another small table first.
		 */
		memcpy(small_copy, table, sizeof(small_copy));
		table = small_copy;
		EMPTY_TO_MINSIZE(dict);
	}
	/* else it's a small table that's already empty */

	/* Now we can finally clear things.  If C had refcounts, we could
	 * assert that the refcount on table is 1 now, i.e. that this function
	 * has unique access to it, so decref side-effects can't alter it.
	 */
	for (entry = table; fill > 0; ++entry) {
		if (entry->de_key) {
			--fill;
			CO_DECREF(entry->de_key);
			CO_XDECREF(entry->de_value);
		}
	}

	if (table_is_malloced) {
		CO_FREE(table);
    }
}

static void dict_dealloc(CoValue *self)
{
    CoDict *dict;
	CoDictEntry *entry;
	ssize_t fill;

    assert(CODICT_CHECK(self));
    dict = (CoDict *)self;
    fill = dict->d_fill;

	for (entry = dict->d_table; fill > 0; entry++) {
		if (entry->de_key) {
			--fill;
			CO_DECREF(entry->de_key);
			CO_XDECREF(entry->de_value);
		}
	}
	if (dict->d_table != dict->d_smalltable) {
		CO_FREE(dict->d_table);
    }

	CoValue_Free(self);
}

CoType CoDict_Type = {
    "dict",
	dict_dealloc,                   /* tp_dealloc */
    0,                              /* tp_hash */
    0,                              /* tp_equal */
    0,                              /* tp_str */
    0,                              /* tp_iter */
    0,                              /* tp_iternext */
};
