/*
 * Copyright (c) 1997, 2013, Oracle and/or its affiliates. All rights reserved.
 * ORACLE PROPRIETARY/CONFIDENTIAL. Use is subject to license terms.
 *
 *
 *
 *
 *
 *
 *
 *
 *
 *
 *
 *
 *
 *
 *
 *
 *
 *
 *
 *
 */

package java.util;

import java.util.function.Consumer;
import java.util.function.BiConsumer;
import java.util.function.BiFunction;
import java.io.IOException;

/**
 * <p>hash table和linked list实现的<tt>Map</tt>接口, 具有可预测的迭代顺序. 此实现
 * 与<tt>HashMap</tt>的不同之处在于, 它维护一个贯穿所有条目的双链表. 这个链表定义迭代
 * 顺序, 通常是key插入到map中的顺序(<i>insertion-order</i>). 注意, 如果一个key重新
 * 插入到map中, 则插入顺序不受影响. (如果调用<tt>m.put(k, v)</tt>, 而
 * <tt>m.containsKey(k)</tt>将在调用前立即返回<tt>true</tt>, 则<tt>k</tt>将重新
 * 插入map<tt>m</tt>.)
 *
 * <p>此实现使其客户端免于{@link HashMap}(和{@link Hashtable})提供的未指定的、通常
 * 混乱的排序, 而不会导致与{@link TreeMap}相关的成本增加. 无论原始map的实现如何, 它都
 * 可以用于生产与原始map具有相同顺序的map副本:
 * <pre>
 *     void foo(Map m) {
 *         Map copy = new LinkedHashMap(m);
 *         ...
 *     }
 * </pre>
 * 如果模块接受输入上的map, 复制它, 然后返回由复制的顺序决定的结果, 那么这种技术特别有用.
 * (通常, 客户通常喜欢按物品原来的顺序返回.)
 *
 * <p>提供了一个特殊的{@link #LinkedHashMap(int,float,boolean) constructor}来创建
 * 一个linked hash map, 它的迭代顺序是它的条目最后访问的顺序, 从最近最少访问到最近最近访问
 * (<i>access-order</i>). 这种map非常实用于构建LRU缓存. 调用@code put}, {@code putIfAbsent},
 * {@code get}, {@code getOrDefault}, {@code compute}, {@code computeIfAbsent},
 * {@code computeIfPresent}或{@code merge}方法会调用对相应条目的访问(假设它在调用完成后存在).
 * 如果该值被替换, {@code replace}方法只会导致对条目的访问. {@code putAll}方法为指定map中的每个
 * 映射生成一个条目访问, 按照指定map的entry set iterator提供key-value映射的顺序. 没有其他方法生成
 * 条目访问. </i>特别是, collection-views上的操作不会影响备份map的迭代顺序).
 *
 * <p>可以重写{@link #removeEldestEntry(Map.Entry)}方法, 以便在向map添加新映射时自动删除过时
 * 映射的策略.
 *
 * <p>该类提供所有可选的<tt>Map</tt>操作, 并允许null元素. 与<tt>HashMap</tt>类似,
 * 它为基本操作(<tt>add</tt>, <tt>contains</tt>和<tt>remove</tt>)提供恒定的时间
 * 性能, 假设hash函数将元素适当地分散在桶中. 由于维护链表的额外开销, 性能可能略低于
 * <tt>HashMap</tt>, 但有一个例外: 在<tt>LinkedHashMap</tt>的collection-views
 * 上迭代所需要的时间与map的<i>size</i>成正比, 而不管它的容量如何.  迭代<tt>HashMap</tt>
 * 的开销可能更大, 所需时间与<i>容量</i>成正比.
 *
 * <p>一个linked hash map有两个影响其性能的参数:
 * <i>初始容量</i>和<i>负载因子</i>. 他们的定义与<tt>HashMap</tt>完全相同. 然而, 请注意,
 * 为初始容量选择过高的值对该类的惩罚不如<tt>HashMap</tt>严重, 因为该类的迭代时间不受容量的
 * 影响.
 *
 * <p><strong>请注意, 此实现不同步.</strong>
 * 如果多个线程同时访问另一个linked hash map, 并且至少有一个线程在结构上修改了该map,
 * 那么, 它<em>必须</em>在外部进行同步. 这通常通过在自然封装该map的某个对象上进行同步来实现.
 *
 * 如果不存在这样的对象, 则应该使用
 * {@link Collections#synchronizedMap Collections.synchronizedMap}
 * 方法"包装"map. 这最好在创建时完成, 以防止对map的意外不同步访问:<pre>
 *   Map m = Collections.synchronizedMap(new LinkedHashMap(...));</pre>
 *
 * 结构修改是添加或删除一个或多个映射的任何操作, 或者在访问顺利linked hash map的情况下,
 * 影响迭代顺序. 在插入有序linked hash map中, 仅更改与map中已包含的key关联的value
 * 并不是结构修改. <strong>在访问有序的linked hash map中, 仅使用<tt>get</tt>查询
 * map是一个结构修改.</strong>
 *
 * <p>这个类的所有collection view方法所返回的集合的<tt>iterator</tt>方法返回的
 * iterator都是<em>fail-fast</em>: 如果在iterator创建后的任何时候, 以除iterator
 * 自己的<tt>remove</tt>方法以外的任何方式对map进行了结构修改, iterator将抛出
 * {@link ConcurrentModificationException}. 因此, 在面对并发修改时, iterator
 * 会快速而干净地失败, 而不是在未来不确定的时间发生任意的、不确定的行为.
 *
 * <p>注意, iterator的快速失败行为不能得到保证, 因为一般来说在, 在存在不同步的修改时,
 * 不可能做出任何硬保证. 快速失败iterator尽可能抛出
 * <tt>ConcurrentModificationException</tt>. 因此, 编写一个依赖于此异常的程序
 * 是错误的: <i>iterator的快速失败行为应该只用于检测错误.</i>
 *
 * <p>这个类的所有的collection view方法返回的collection的spliterator方法返回的
 * spliterator<em><a href="Spliterator.html#binding">late-binding</a></em>,
 * <em>fail-fast</em>, 和附加报告{@link Spliterator#ORDERED}.
 *
 * <p>这个类是
 * <a href="{@docRoot}/../technotes/guides/collections/index.html">
 * Java Collections Framework的成员</a>.
 *
 * @implNote
 * 这个类的所有collection view方法返回的collection的spliterator方法返回的
 * spliterator是从相应collection的iterator创建的.
 *
 * @param <K> 此map维护的key的类型
 * @param <V> 映射value的类型
 *
 * @author  Josh Bloch
 * @see     Object#hashCode()
 * @see     Collection
 * @see     Map
 * @see     HashMap
 * @see     TreeMap
 * @see     Hashtable
 * @since   1.4
 */
public class LinkedHashMap<K,V>
		extends HashMap<K,V>
		implements Map<K,V>
{

	/*
	 * 实现注意事项. 这个类的前一个版本的内部结构略有不同. 因为超类HashMap现在对它的
	 * 一些节点使用树, 类LinkedHashMap.Entry现在被视为中间节点, 也可以转换为树的
	 * 形式. 这个类的名称LinkedHashMap.Entry在其当前context中在许多方面令人疑惑,
	 * 但不能更改. 否则, 即使它没有被导出到这个包之外, 已知一些现有源代码在调用removeEldestEntry
	 * 时依赖于符号解析的极端情况规则, 该规则抑制了由于用法不明确而导致的编译错误.
	 * 因此, 我们保留名称以保持未修改的可编译性.
	 *
	 * 节点类中更改还需要使用两个字段(head、tail), 而不是一个指向head节点的指针,
	 * 以维护双向链接的before/after list. 这个类以前还在访问、插入和删除时使用了
	 * 不同风格的回调方法.
	 */

	/**
	 * HashMap.Node子类用于普通LinkedHashMap条目.
	 */
	static class Entry<K,V> extends HashMap.Node<K,V> {
		Entry<K,V> before, after;
		Entry(int hash, K key, V value, Node<K,V> next) {
			super(hash, key, value, next);
		}
	}

	private static final long serialVersionUID = 3801124242820219131L;

	/**
	 * 双向链表的head(最年长的).
	 */
	transient LinkedHashMap.Entry<K,V> head;

	/**
	 * 双向链表的tail(最年轻的).
	 */
	transient LinkedHashMap.Entry<K,V> tail;

	/**
	 * 此linked hash map的迭代排序方法:
	 * <tt>true</tt>表示访问顺序, <tt>false</tt>表示插入顺序.
	 *
	 * @serial
	 */
	final boolean accessOrder;

	// 内部公用设施

	// 将一个节点链接到list的末尾
	private void linkNodeLast(LinkedHashMap.Entry<K,V> p) {
		LinkedHashMap.Entry<K,V> last = tail;
		tail = p;
		if (last == null)
			head = p;
		else {
			p.before = last;
			last.after = p;
		}
	}

	// 将src的链接应用到dst: 其实说白了就是在链表中用dst节点替换src节点
	private void transferLinks(LinkedHashMap.Entry<K,V> src,
	                           LinkedHashMap.Entry<K,V> dst) {
		LinkedHashMap.Entry<K,V> b = dst.before = src.before;
		LinkedHashMap.Entry<K,V> a = dst.after = src.after;
		if (b == null)
			head = dst;
		else
			b.after = dst;
		if (a == null)
			tail = dst;
		else
			a.before = dst;
	}

	// 重写HashMap钩子方法

	void reinitialize() {
		super.reinitialize();
		head = tail = null;
	}

	// 重写父类方法, 将普通节点替换为LinkedHashMap.Entry节点, 且维护双向链表
	Node<K,V> newNode(int hash, K key, V value, Node<K,V> e) {
		LinkedHashMap.Entry<K,V> p =
				new LinkedHashMap.Entry<K,V>(hash, key, value, e);
		linkNodeLast(p);
		return p;
	}

	// 重写父类方法, 并维护本类的双向链表
	Node<K,V> replacementNode(Node<K,V> p, Node<K,V> next) {
		LinkedHashMap.Entry<K,V> q = (LinkedHashMap.Entry<K,V>)p;
		LinkedHashMap.Entry<K,V> t =
				new LinkedHashMap.Entry<K,V>(q.hash, q.key, q.value, next);
		transferLinks(q, t);
		return t;
	}

	// 重写父类方法, 并维护本类的双向链表
	TreeNode<K,V> newTreeNode(int hash, K key, V value, Node<K,V> next) {
		TreeNode<K,V> p = new TreeNode<K,V>(hash, key, value, next);
		linkNodeLast(p);
		return p;
	}

	// 重写父类方法, 并维护本类的双向链表
	TreeNode<K,V> replacementTreeNode(Node<K,V> p, Node<K,V> next) {
		LinkedHashMap.Entry<K,V> q = (LinkedHashMap.Entry<K,V>)p;
		TreeNode<K,V> t = new TreeNode<K,V>(q.hash, q.key, q.value, next);
		transferLinks(q, t);
		return t;
	}

	// 从本类的双向链表中删除一个节点
	void afterNodeRemoval(Node<K,V> e) { // unlink
		LinkedHashMap.Entry<K,V> p =
				(LinkedHashMap.Entry<K,V>)e, b = p.before, a = p.after;
		p.before = p.after = null;
		if (b == null)
			head = a;
		else
			b.after = a;
		if (a == null)
			tail = b;
		else
			a.before = b;
	}

	void afterNodeInsertion(boolean evict) { // 可能删除年老的节点
		LinkedHashMap.Entry<K,V> first;
		// 暂时来看因为removeEldestEntry永远返回false, 所以其实啥也没做
		if (evict && (first = head) != null && removeEldestEntry(first)) {
			K key = first.key;
			removeNode(hash(key), key, null, false, true);
		}
	}

	void afterNodeAccess(Node<K,V> e) { // 将节点移动到最后一个
		LinkedHashMap.Entry<K,V> last;
		if (accessOrder && (last = tail) != e) {
			LinkedHashMap.Entry<K,V> p =
					(LinkedHashMap.Entry<K,V>)e, b = p.before, a = p.after;
			p.after = null;
			if (b == null)
				head = a;
			else
				b.after = a;
			if (a != null)
				a.before = b;
			else
				last = b;
			if (last == null)
				head = p;
			else {
				p.before = last;
				last.after = p;
			}
			tail = p;
			++modCount;
		}
	}

	void internalWriteEntries(java.io.ObjectOutputStream s) throws IOException {
		for (LinkedHashMap.Entry<K,V> e = head; e != null; e = e.after) {
			s.writeObject(e.key);
			s.writeObject(e.value);
		}
	}

	/**
	 * 使用指定的初始容量和负载因子构造一个按插入顺序排列的空<tt>LinkedHashMap</tt>.
	 *
	 * @param  initialCapacity 初始容量
	 * @param  loadFactor      负载因
	 * @throws IllegalArgumentException 如果初始容量为负或负载因子非正
	 */
	public LinkedHashMap(int initialCapacity, float loadFactor) {
		super(initialCapacity, loadFactor);
		accessOrder = false;
	}

	/**
	 * Constructs an empty insertion-ordered <tt>LinkedHashMap</tt> instance
	 * with the specified initial capacity and a default load factor (0.75).
	 *
	 * @param  initialCapacity 初始容量
	 * @throws IllegalArgumentException if the initial capacity is negative
	 */
	public LinkedHashMap(int initialCapacity) {
		super(initialCapacity);
		accessOrder = false;
	}

	/**
	 * 构造一个空的、按插入顺序排列的<tt>LinkedHashMap</tt>实例, 具有指定的
	 * 初始容量(16)和默认负载因子(0.75).
	 */
	public LinkedHashMap() {
		super();
		accessOrder = false;
	}

	/**
	 * 构造一个按插入顺序排列的<tt>LinkedHashMap</tt>实例, 其映射与指定的映射相同.
	 * <tt>LinkedHashMap</tt>实例的创建具有默认负载因子(0.75)和足够容纳指定Map中的
	 * 映射的初始容量.
	 *
	 * @param  m 其映射将被放置在此Map中的Map
	 * @throws NullPointerException 如果指定的map为null
	 */
	public LinkedHashMap(Map<? extends K, ? extends V> m) {
		super();
		accessOrder = false;
		putMapEntries(m, false);
	}

	/**
	 * 构造一个空的<tt>LinkedHashMap</tt>实例, 具有指定的初始容量、负载因子和排序模式.
	 *
	 * @param  initialCapacity 初始容量
	 * @param  loadFactor      负载因
	 * @param  accessOrder     排序模式 - <tt>true</tt>表示访问顺序, <tt>false</tt>表示插入顺序
	 * @throws IllegalArgumentException 如果初始容量为负或负载因子非正
	 */
	public LinkedHashMap(int initialCapacity,
	                     float loadFactor,
	                     boolean accessOrder) {
		super(initialCapacity, loadFactor);
		this.accessOrder = accessOrder;
	}


	/**
	 * Returns <tt>true</tt> if this map maps one or more keys to the
	 * specified value.
	 *
	 * @param value value whose presence in this map is to be tested
	 * @return <tt>true</tt> if this map maps one or more keys to the
	 *         specified value
	 */
	public boolean containsValue(Object value) {
		for (LinkedHashMap.Entry<K,V> e = head; e != null; e = e.after) {
			V v = e.value;
			if (v == value || (value != null && value.equals(v)))
				return true;
		}
		return false;
	}

	/**
	 * Returns the value to which the specified key is mapped,
	 * or {@code null} if this map contains no mapping for the key.
	 *
	 * <p>More formally, if this map contains a mapping from a key
	 * {@code k} to a value {@code v} such that {@code (key==null ? k==null :
	 * key.equals(k))}, then this method returns {@code v}; otherwise
	 * it returns {@code null}.  (There can be at most one such mapping.)
	 *
	 * <p>A return value of {@code null} does not <i>necessarily</i>
	 * indicate that the map contains no mapping for the key; it's also
	 * possible that the map explicitly maps the key to {@code null}.
	 * The {@link #containsKey containsKey} operation may be used to
	 * distinguish these two cases.
	 */
	public V get(Object key) {
		Node<K,V> e;
		if ((e = getNode(hash(key), key)) == null)
			return null;
		if (accessOrder)
			afterNodeAccess(e);
		return e.value;
	}

	/**
	 * {@inheritDoc}
	 */
	public V getOrDefault(Object key, V defaultValue) {
		Node<K,V> e;
		if ((e = getNode(hash(key), key)) == null)
			return defaultValue;
		if (accessOrder)
			afterNodeAccess(e);
		return e.value;
	}

	/**
	 * {@inheritDoc}
	 */
	public void clear() {
		super.clear();
		head = tail = null;
	}

	/**
	 * Returns <tt>true</tt> if this map should remove its eldest entry.
	 * This method is invoked by <tt>put</tt> and <tt>putAll</tt> after
	 * inserting a new entry into the map.  It provides the implementor
	 * with the opportunity to remove the eldest entry each time a new one
	 * is added.  This is useful if the map represents a cache: it allows
	 * the map to reduce memory consumption by deleting stale entries.
	 *
	 * <p>Sample use: this override will allow the map to grow up to 100
	 * entries and then delete the eldest entry each time a new entry is
	 * added, maintaining a steady state of 100 entries.
	 * <pre>
	 *     private static final int MAX_ENTRIES = 100;
	 *
	 *     protected boolean removeEldestEntry(Map.Entry eldest) {
	 *        return size() &gt; MAX_ENTRIES;
	 *     }
	 * </pre>
	 *
	 * <p>This method typically does not modify the map in any way,
	 * instead allowing the map to modify itself as directed by its
	 * return value.  It <i>is</i> permitted for this method to modify
	 * the map directly, but if it does so, it <i>must</i> return
	 * <tt>false</tt> (indicating that the map should not attempt any
	 * further modification).  The effects of returning <tt>true</tt>
	 * after modifying the map from within this method are unspecified.
	 *
	 * <p>This implementation merely returns <tt>false</tt> (so that this
	 * map acts like a normal map - the eldest element is never removed).
	 *
	 * @param    eldest The least recently inserted entry in the map, or if
	 *           this is an access-ordered map, the least recently accessed
	 *           entry.  This is the entry that will be removed it this
	 *           method returns <tt>true</tt>.  If the map was empty prior
	 *           to the <tt>put</tt> or <tt>putAll</tt> invocation resulting
	 *           in this invocation, this will be the entry that was just
	 *           inserted; in other words, if the map contains a single
	 *           entry, the eldest entry is also the newest.
	 * @return   <tt>true</tt> if the eldest entry should be removed
	 *           from the map; <tt>false</tt> if it should be retained.
	 */
	protected boolean removeEldestEntry(Map.Entry<K,V> eldest) {
		return false;
	}

	/**
	 * Returns a {@link Set} view of the keys contained in this map.
	 * The set is backed by the map, so changes to the map are
	 * reflected in the set, and vice-versa.  If the map is modified
	 * while an iteration over the set is in progress (except through
	 * the iterator's own <tt>remove</tt> operation), the results of
	 * the iteration are undefined.  The set supports element removal,
	 * which removes the corresponding mapping from the map, via the
	 * <tt>Iterator.remove</tt>, <tt>Set.remove</tt>,
	 * <tt>removeAll</tt>, <tt>retainAll</tt>, and <tt>clear</tt>
	 * operations.  It does not support the <tt>add</tt> or <tt>addAll</tt>
	 * operations.
	 * Its {@link Spliterator} typically provides faster sequential
	 * performance but much poorer parallel performance than that of
	 * {@code HashMap}.
	 *
	 * @return a set view of the keys contained in this map
	 */
	public Set<K> keySet() {
		Set<K> ks = keySet;
		if (ks == null) {
			ks = new LinkedKeySet();
			keySet = ks;
		}
		return ks;
	}

	final class LinkedKeySet extends AbstractSet<K> {
		public final int size()                 { return size; }
		public final void clear()               { LinkedHashMap.this.clear(); }
		public final Iterator<K> iterator() {
			return new LinkedKeyIterator();
		}
		public final boolean contains(Object o) { return containsKey(o); }
		public final boolean remove(Object key) {
			return removeNode(hash(key), key, null, false, true) != null;
		}
		public final Spliterator<K> spliterator()  {
			return Spliterators.spliterator(this, Spliterator.SIZED |
					Spliterator.ORDERED |
					Spliterator.DISTINCT);
		}
		public final void forEach(Consumer<? super K> action) {
			if (action == null)
				throw new NullPointerException();
			int mc = modCount;
			for (LinkedHashMap.Entry<K,V> e = head; e != null; e = e.after)
				action.accept(e.key);
			if (modCount != mc)
				throw new ConcurrentModificationException();
		}
	}

	/**
	 * Returns a {@link Collection} view of the values contained in this map.
	 * The collection is backed by the map, so changes to the map are
	 * reflected in the collection, and vice-versa.  If the map is
	 * modified while an iteration over the collection is in progress
	 * (except through the iterator's own <tt>remove</tt> operation),
	 * the results of the iteration are undefined.  The collection
	 * supports element removal, which removes the corresponding
	 * mapping from the map, via the <tt>Iterator.remove</tt>,
	 * <tt>Collection.remove</tt>, <tt>removeAll</tt>,
	 * <tt>retainAll</tt> and <tt>clear</tt> operations.  It does not
	 * support the <tt>add</tt> or <tt>addAll</tt> operations.
	 * Its {@link Spliterator} typically provides faster sequential
	 * performance but much poorer parallel performance than that of
	 * {@code HashMap}.
	 *
	 * @return a view of the values contained in this map
	 */
	public Collection<V> values() {
		Collection<V> vs = values;
		if (vs == null) {
			vs = new LinkedValues();
			values = vs;
		}
		return vs;
	}

	final class LinkedValues extends AbstractCollection<V> {
		public final int size()                 { return size; }
		public final void clear()               { LinkedHashMap.this.clear(); }
		public final Iterator<V> iterator() {
			return new LinkedValueIterator();
		}
		public final boolean contains(Object o) { return containsValue(o); }
		public final Spliterator<V> spliterator() {
			return Spliterators.spliterator(this, Spliterator.SIZED |
					Spliterator.ORDERED);
		}
		public final void forEach(Consumer<? super V> action) {
			if (action == null)
				throw new NullPointerException();
			int mc = modCount;
			for (LinkedHashMap.Entry<K,V> e = head; e != null; e = e.after)
				action.accept(e.value);
			if (modCount != mc)
				throw new ConcurrentModificationException();
		}
	}

	/**
	 * Returns a {@link Set} view of the mappings contained in this map.
	 * The set is backed by the map, so changes to the map are
	 * reflected in the set, and vice-versa.  If the map is modified
	 * while an iteration over the set is in progress (except through
	 * the iterator's own <tt>remove</tt> operation, or through the
	 * <tt>setValue</tt> operation on a map entry returned by the
	 * iterator) the results of the iteration are undefined.  The set
	 * supports element removal, which removes the corresponding
	 * mapping from the map, via the <tt>Iterator.remove</tt>,
	 * <tt>Set.remove</tt>, <tt>removeAll</tt>, <tt>retainAll</tt> and
	 * <tt>clear</tt> operations.  It does not support the
	 * <tt>add</tt> or <tt>addAll</tt> operations.
	 * Its {@link Spliterator} typically provides faster sequential
	 * performance but much poorer parallel performance than that of
	 * {@code HashMap}.
	 *
	 * @return a set view of the mappings contained in this map
	 */
	public Set<Map.Entry<K,V>> entrySet() {
		Set<Map.Entry<K,V>> es;
		return (es = entrySet) == null ? (entrySet = new LinkedEntrySet()) : es;
	}

	final class LinkedEntrySet extends AbstractSet<Map.Entry<K,V>> {
		public final int size()                 { return size; }
		public final void clear()               { LinkedHashMap.this.clear(); }
		public final Iterator<Map.Entry<K,V>> iterator() {
			return new LinkedEntryIterator();
		}
		public final boolean contains(Object o) {
			if (!(o instanceof Map.Entry))
				return false;
			Map.Entry<?,?> e = (Map.Entry<?,?>) o;
			Object key = e.getKey();
			Node<K,V> candidate = getNode(hash(key), key);
			return candidate != null && candidate.equals(e);
		}
		public final boolean remove(Object o) {
			if (o instanceof Map.Entry) {
				Map.Entry<?,?> e = (Map.Entry<?,?>) o;
				Object key = e.getKey();
				Object value = e.getValue();
				return removeNode(hash(key), key, value, true, true) != null;
			}
			return false;
		}
		public final Spliterator<Map.Entry<K,V>> spliterator() {
			return Spliterators.spliterator(this, Spliterator.SIZED |
					Spliterator.ORDERED |
					Spliterator.DISTINCT);
		}
		public final void forEach(Consumer<? super Map.Entry<K,V>> action) {
			if (action == null)
				throw new NullPointerException();
			int mc = modCount;
			for (LinkedHashMap.Entry<K,V> e = head; e != null; e = e.after)
				action.accept(e);
			if (modCount != mc)
				throw new ConcurrentModificationException();
		}
	}

	// Map overrides

	public void forEach(BiConsumer<? super K, ? super V> action) {
		if (action == null)
			throw new NullPointerException();
		int mc = modCount;
		for (LinkedHashMap.Entry<K,V> e = head; e != null; e = e.after)
			action.accept(e.key, e.value);
		if (modCount != mc)
			throw new ConcurrentModificationException();
	}

	public void replaceAll(BiFunction<? super K, ? super V, ? extends V> function) {
		if (function == null)
			throw new NullPointerException();
		int mc = modCount;
		for (LinkedHashMap.Entry<K,V> e = head; e != null; e = e.after)
			e.value = function.apply(e.key, e.value);
		if (modCount != mc)
			throw new ConcurrentModificationException();
	}

	// Iterators

	abstract class LinkedHashIterator {
		LinkedHashMap.Entry<K,V> next;
		LinkedHashMap.Entry<K,V> current;
		int expectedModCount;

		LinkedHashIterator() {
			next = head;
			expectedModCount = modCount;
			current = null;
		}

		public final boolean hasNext() {
			return next != null;
		}

		final LinkedHashMap.Entry<K,V> nextNode() {
			LinkedHashMap.Entry<K,V> e = next;
			if (modCount != expectedModCount)
				throw new ConcurrentModificationException();
			if (e == null)
				throw new NoSuchElementException();
			current = e;
			next = e.after;
			return e;
		}

		public final void remove() {
			Node<K,V> p = current;
			if (p == null)
				throw new IllegalStateException();
			if (modCount != expectedModCount)
				throw new ConcurrentModificationException();
			current = null;
			K key = p.key;
			removeNode(hash(key), key, null, false, false);
			expectedModCount = modCount;
		}
	}

	final class LinkedKeyIterator extends LinkedHashIterator
			implements Iterator<K> {
		public final K next() { return nextNode().getKey(); }
	}

	final class LinkedValueIterator extends LinkedHashIterator
			implements Iterator<V> {
		public final V next() { return nextNode().value; }
	}

	final class LinkedEntryIterator extends LinkedHashIterator
			implements Iterator<Map.Entry<K,V>> {
		public final Map.Entry<K,V> next() { return nextNode(); }
	}


}
