package com.fenyin.samples.study.jdk.collections.map;

import java.util.concurrent.locks.ReentrantLock;

import com.fenyin.samples.study.util.TimeUtil;

/**
 * 该实现存在问题，放弃!
 *@Title:  
 *@Description:  
 *@Author:fengzhichao  
 *@Since:2012-12-28  
 *@Version:1.1.0
 */
public class ConcurrentLRUCache<K, V> implements Cache<K,V>{

	/**
	 * Segement默认最大数
	 */
	static final int DEFAULT_SEGEMENT_MAX_CAPACITY = 100;

	/**
	 * The default load factor for this table, used when not otherwise specified
	 * in a constructor.
	 */
	static final float DEFAULT_LOAD_FACTOR = 0.75f;

	/**
	 * The default concurrency level for this table, used when not otherwise
	 * specified in a constructor.
	 */
	static final int DEFAULT_CONCURRENCY_LEVEL = 16;

	/**
	 * The maximum capacity, used if a higher value is implicitly specified by
	 * either of the constructors with arguments. MUST be a power of two <=
	 * 1<<30 to ensure that entries are indexable using ints.
	 */
	static final int MAXIMUM_CAPACITY = 1 << 30;

	/**
	 * The maximum number of segments to allow; used to bound constructor
	 * arguments.
	 */
	static final int MAX_SEGMENTS = 1 << 16; // slightly conservative

	/**
	 * Mask value for indexing into segments. The upper bits of a key's hash
	 * code are used to choose the segment.
	 */
	final int segmentMask;

	/**
	 * Shift value for indexing within segments.
	 */
	final int segmentShift;

	/**
	 * The segments, each of which is a specialized hash table
	 */
	final Segment<K, V>[] segments;

	/**大小*/
	int size;
	
	final int capation;
	
	final long deadTime;

	/**双向链表表头*/
	HashEntry head;
	
	ReentrantLock golabLock = new ReentrantLock();

	/* ---------------- Small Utilities -------------- */
	/**
	 * Applies a supplemental hash function to a given hashCode, which defends
	 * against poor quality hash functions. This is critical because
	 * ConcurrentHashMap uses power-of-two length hash tables, that otherwise
	 * encounter collisions for hashCodes that do not differ in lower or upper
	 * bits.
	 */
	private static int hash(int h) {
		h += (h << 15) ^ 0xffffcd7d;
		h ^= (h >>> 10);
		h += (h << 3);
		h ^= (h >>> 6);
		h += (h << 2) + (h << 14);
		return h ^ (h >>> 16);
	}

	/**
	 * Returns the segment that should be used for key with given hash
	 * 
	 * @param hash
	 *            the hash code for the key
	 * @return the segment
	 */
	final Segment<K, V> segmentFor(int hash) {
		return segments[(hash >>> segmentShift) & segmentMask];
	}

	/* ---------------- Inner Classes -------------- */
	/**
	 * 修改原HashEntry，
	 */
	final class HashEntry<K, V> implements Cache.Entry<K,V>{
		final K key;
		final int hash;
		volatile V value;
		final long createTime;
		
		/**内部双向链表指针域*/
		HashEntry<K, V> prev;
		HashEntry<K, V> next;

		/**
		 * 外部双向链表指针域
		 */
		HashEntry<K, V> linkNext;
		HashEntry<K, V> linkPrev;

		HashEntry(K key, int hash,HashEntry<K, V> prev,HashEntry<K, V> next, V value) {
			this.key = key;
			this.hash = hash;
			this.prev = prev;
			this.next = next;
			this.value = value;
			this.createTime = TimeUtil.getCurrentTime17();
			this.linkNext=null;
			this.linkPrev=null;
		}
		
		HashEntry(K key,int hash,V value){
			this.key = key;
			this.hash = hash;
			this.prev = null;
			this.next = null;
			this.value = value;
			this.createTime = TimeUtil.getCurrentTime17();
			this.linkNext=null;
			this.linkPrev=null;
		}

	}

	final class Segment<K, V> extends ReentrantLock implements Cache.Bluck<K,V>{
		/**
		 * The number of elements in this segment's region.
		 */
		transient volatile int count;

		/**
		 * Number of updates that alter the size of the table. This is used
		 * during bulk-read methods to make sure they see a consistent snapshot:
		 * If modCounts change during a traversal of segments computing size or
		 * checking containsValue, then we might have an inconsistent view of
		 * state so (usually) must retry.
		 */
		transient int modCount;

		/**
		 * The table is rehashed when its size exceeds this threshold. (The
		 * value of this field is always <tt>(int)(capacity *
		 * loadFactor)</tt>.)
		 */
		transient int threshold;

		/**
		 * The per-segment table.
		 */
		transient volatile HashEntry<K, V>[] table;

		/**
		 * The load factor for the hash table. Even though this value is same
		 * for all segments, it is replicated to avoid needing links to outer
		 * object.
		 * 
		 * @serial
		 */
		final float loadFactor;
		
		int segmentIndex;

		Segment(int initialCapacity, float lf,int segmentIndex) {
			loadFactor = lf;
			table = new HashEntry[initialCapacity];
			threshold = (int) (initialCapacity * loadFactor);
			this.segmentIndex = segmentIndex;
		}

		/**
         * Returns properly casted first entry of bin for given hash.
         */
        HashEntry<K,V> getFirst(int hash) {
            HashEntry<K,V>[] tab = table;
            return tab[hash & (tab.length - 1)];
        }

        /**
         * Reads value field of an entry under lock. Called if value
         * field ever appears to be null. This is possible only if a
         * compiler happens to reorder a HashEntry initialization with
         * its table assignment, which is legal under memory model
         * but is not known to ever occur.
         */
        V readValueUnderLock(HashEntry<K,V> e) {
            lock();
            try {
                return e.value;
            } finally {
                unlock();
            }
        }
        
        void rehash() {
        	//判断容量是否已达到最大值
            HashEntry<K,V>[] oldTable = table;
            int oldCapacity = oldTable.length;
            if (oldCapacity >= MAXIMUM_CAPACITY){
            	return;
            }
            //构建新的数组
            HashEntry<K,V>[] newTable = new HashEntry[oldCapacity<<1];
            //计算下次域值
            threshold = (int)(newTable.length * loadFactor);
            int sizeMask = newTable.length - 1;
            for (int i = 0; i < oldCapacity ; i++) {
            	//循环拷贝数组
                HashEntry<K,V> e = oldTable[i];
                if (e != null) {
                    //检索位置，新的Hash位置
                    int idx = e.hash & sizeMask;
                    newTable[idx] = e;
                    oldTable[i] = null;
                }
            }
            table = newTable;
        }
		
		V get(K key, int hash) {
			//该处的业务逻辑需要修改
            if (count != 0) { // read-volatile
            	//获取散列桶表头数据
                HashEntry<K, V> e = getFirst(hash);
                while (e != null) {//循环查找表头数据
                    if (e.hash == hash && key.equals(e.key)){//在Hash冲突链表中查找对应的节点
                        V v = e.value;
                        //判断当前节点是否已失效
                        if((System.currentTimeMillis()-e.createTime) > deadTime){//查询不触发,清理节点的操作 
                        	return null;
                        }else{
                        	//外部链表重排序，并插入表头部分,该操作要保证事务
                        	addBefore(e);
                        }
                        if(v!=null){
                        	return v;
                        }
                        return readValueUnderLock(e);
                    }
                    e = e.next;
                }
            }
            return null;
        }
		
		
		void put(K key, int hash, V value, boolean onlyIfAbsent) {
            lock();
            try {
            	//判断段内容量是否够
                int c = count;
                if (c++ > threshold){// ensure capacity
                	//重新进行Hash
                	rehash();
                } 
                HashEntry<K,V>[] tab = table;
                int index = hash & (tab.length - 1);
                HashEntry<K,V> first = tab[index];
                HashEntry<K,V> e = first;
                while (e != null && (e.hash != hash || !key.equals(e.key))){
                	e = e.next;
                }

                if (e != null) {//节点数据已存在
                	e.value = value;
                }else {
                    if(getSize() == capation ){//如果容量已经满
                    	HashEntry tail = head.linkPrev;//表尾处需要删除的
                    	boolean flag = ((hash >>> segmentShift) & segmentMask)==segmentIndex ? true : false;
                    	if(flag){//如果容量已经满，而且同段
                    		tab[index] = new HashEntry<K,V>(key, hash, null,first, value);//新增表头节点
                    		first.prev = tab[index];
                    		++modCount;
                    		//添加全局锁
                    		removeNode(e);
                    	}else{//如果容量已经满,而且跨段
                    		tab[index] = new HashEntry<K,V>(key, hash, null,first, value);//新增表头节点
                    		first.prev = tab[index];
                    		++modCount;
                    		count = c; // write-volatile
                    		//添加全局锁
                    		removeNode(e);
                    	}
                    }else{//容量未满
                    	tab[index] = new HashEntry<K,V>(key, hash, null,first, value);
                    	first.prev = tab[index];
                    	++modCount;
                        count = c; // write-volatile
                        setSize();
                    }
                }
            } finally {
                unlock();
            }
        }
		
	}
	
	
    void addBefore(HashEntry e){
		golabLock.lock();
		try{
			e.linkPrev.next     = e.linkNext;
        	e.linkNext.linkPrev = e.linkPrev;
        	e.linkPrev          = head;
        	head.linkNext       = e;
		}finally{
			golabLock.unlock();
		}
	}
    
    void removeNode(HashEntry tail){
    	golabLock.lock();
		try{
        	head.linkPrev = tail.prev;
        	tail.prev.next = head;
        	tail.prev.next = tail.next;
    		tail.next.prev = tail.prev;
		}finally{
			golabLock.unlock();
		}
		
    }
    
    int getSize(){
    	golabLock.lock();
		try{
        	return size;
		}finally{
			golabLock.unlock();
		}
    }
    
    void setSize(){
    	golabLock.lock();
		try{
        	size++;
		}finally{
			golabLock.unlock();
		}
    }
    
    

	public ConcurrentLRUCache(int initialCapacity, float loadFactor,int concurrencyLevel,long deadTime) {
		if (!(loadFactor > 0) || initialCapacity < 0 || concurrencyLevel <= 0 || deadTime <= 0)
			throw new IllegalArgumentException();

		if (concurrencyLevel > MAX_SEGMENTS)
			concurrencyLevel = MAX_SEGMENTS;

		// Find power-of-two sizes best matching arguments
		int sshift = 0;
		int ssize = 1;
		while (ssize < concurrencyLevel) {
			++sshift;
			ssize <<= 1;
		}
		segmentShift = 32 - sshift;
		segmentMask = ssize - 1;
		this.segments = new Segment[ssize];

		if (initialCapacity > MAXIMUM_CAPACITY)
			initialCapacity = MAXIMUM_CAPACITY;
		int c = initialCapacity / ssize;
		if (c * ssize < initialCapacity){
			++c;
		}
		int cap = 1;
		while (cap < c){
			cap <<= 1;
		}
		
		this.capation = initialCapacity;
		this.size = 0;
		this.deadTime = deadTime;
		this.head = new HashEntry<K,V>(null,0,null);
		this.head.linkNext = this.head.linkPrev = this.head;
		for (int i = 0; i < this.segments.length; ++i){
			this.segments[i] = new Segment<K, V>(cap, loadFactor,i);
		}
	}

	public V get(K key) {
		//计算散列桶的hash值
		int hash = hash(key.hashCode());
		//获取对应的值
        return segmentFor(hash).get(key, hash);
	}

	public void put(K key, V value) {
		if (value == null){
			throw new NullPointerException();
		}
		//计算散列桶的hash值
        int hash = hash(key.hashCode());
//        segmentFor(hash).put(key, hash, value, false);
	}

	public void remove(K key) {
		//计算散列桶的hash值
		int hash = hash(key.hashCode());
//        segmentFor(hash).remove(key, hash);
	}

	
}
