/*
 * Copyright (C) 2011 Christopher Probst
 * All rights reserved.
 * 
 * Redistribution and use in source and binary forms, with or without
 * modification, are permitted provided that the following conditions
 * are met:
 * 
 * * Redistributions of source code must retain the above copyright
 *   notice, this list of conditions and the following disclaimer.
 * 
 * * Redistributions in binary form must reproduce the above copyright
 *   notice, this list of conditions and the following disclaimer in the
 *   documentation and/or other materials provided with the distribution.
 *  
 * * Neither the name of the 'FoxNet Codec' nor the names of its
 *   contributors may be used to endorse or promote products derived
 *   from this software without specific prior written permission.
 * 
 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS 
 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
 * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
 * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
 * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR 
 * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
 * LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
 * NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
 * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
 */
package com.foxnet.codec.impl;

import java.util.Arrays;

import com.foxnet.codec.Cache;

/**
 * Copied from java.io.ObjectOutputStream.
 * 
 * @author Christopher Probst
 */
public final class DefaultCache implements Cache {
    /* number of mappings in table/next available handle */
    private int size;
    /* size threshold determining when to expand hash spine */
    private int threshold;
    /* factor for computing size threshold */
    private final float loadFactor;
    /* maps hash value -> candidate handle value */
    private int[] spine;
    /* maps handle value -> next candidate handle value */
    private int[] next;
    /* maps handle value -> associated object */
    private Object[] objs;

    public DefaultCache() {
	this(16, 0.7f);
    }

    /**
     * Creates new HandleTable with given capacity and load factor.
     */
    public DefaultCache(int initialCapacity, float loadFactor) {
	this.loadFactor = loadFactor;
	spine = new int[initialCapacity];
	next = new int[initialCapacity];
	objs = new Object[initialCapacity];
	threshold = (int) (initialCapacity * loadFactor);
	clear();
    }

    /**
     * Assigns next available handle to given object, and returns handle value.
     * Handles are assigned in ascending order starting at 0.
     */
    @Override
    public int assign(Object obj) {
	if (size >= next.length) {
	    growEntries();
	}
	if (size >= threshold) {
	    growSpine();
	}
	insert(obj, size);
	return size++;
    }

    /**
     * Looks up and returns handle associated with given object, or -1 if no
     * mapping found.
     */
    @Override
    public int lookup(Object obj) {
	if (size == 0) {
	    return -1;
	}
	int index = hash(obj) % spine.length;
	for (int i = spine[index]; i >= 0; i = next[i]) {
	    if (objs[i] == obj) {
		return i;
	    }
	}
	return -1;
    }

    /**
     * Resets table to its initial (empty) state.
     */
    @Override
    public void clear() {
	Arrays.fill(spine, -1);
	Arrays.fill(objs, 0, size, null);
	size = 0;
    }

    /**
     * Returns the number of mappings currently in table.
     */
    @Override
    public int size() {
	return size;
    }

    /**
     * Inserts mapping object -> handle mapping into table. Assumes table is
     * large enough to accommodate new mapping.
     */
    private void insert(Object obj, int handle) {
	int index = hash(obj) % spine.length;
	objs[handle] = obj;
	next[handle] = spine[index];
	spine[index] = handle;
    }

    /**
     * Expands the hash "spine" -- equivalent to increasing the number of
     * buckets in a conventional hash table.
     */
    private void growSpine() {
	spine = new int[(spine.length << 1) + 1];
	threshold = (int) (spine.length * loadFactor);
	Arrays.fill(spine, -1);
	for (int i = 0; i < size; i++) {
	    insert(objs[i], i);
	}
    }

    /**
     * Increases hash table capacity by lengthening entry arrays.
     */
    private void growEntries() {
	int newLength = (next.length << 1) + 1;
	int[] newNext = new int[newLength];
	System.arraycopy(next, 0, newNext, 0, size);
	next = newNext;

	Object[] newObjs = new Object[newLength];
	System.arraycopy(objs, 0, newObjs, 0, size);
	objs = newObjs;
    }

    /**
     * Returns hash value for given object.
     */
    private int hash(Object obj) {
	return System.identityHashCode(obj) & 0x7FFFFFFF;
    }
}
