/*
 * Copyright (c) 1997, 2013, Oracle and/or its affiliates. All rights reserved.
 * ORACLE PROPRIETARY/CONFIDENTIAL. Use is subject to license terms.
 *
 *
 *
 *
 *
 *
 *
 *
 *
 *
 *
 *
 *
 *
 *
 *
 *
 *
 *
 *
 */

package com.iskech.mycollection.map;

import java.io.IOException;
import java.io.InvalidObjectException;
import java.io.Serializable;
import java.lang.reflect.ParameterizedType;
import java.lang.reflect.Type;
import java.util.*;
import java.util.function.BiConsumer;
import java.util.function.BiFunction;
import java.util.function.Consumer;
import java.util.function.Function;

/**
 * Hash table based implementation of the <tt>Map</tt> interface. This implementation provides all
 * of the optional map operations, and permits <tt>null</tt> values and the <tt>null</tt> key. (The
 * <tt>MyHashMap</tt> class is roughly equivalent to <tt>Hashtable</tt>, except that it is
 * unsynchronized and permits nulls.) This class makes no guarantees as to the order of the map; in
 * particular, it does not guarantee that the order will remain constant over time.
 *
 * <p>This implementation provides constant-time performance for the basic operations (<tt>get</tt>
 * and <tt>put</tt>), assuming the hash function disperses the elements properly among the buckets.
 * Iteration over collection views requires time proportional to the "capacity" of the
 * <tt>MyHashMap</tt> instance (the number of buckets) plus its size (the number of key-value
 * mappings). Thus, it's very important not to set the initial capacity too high (or the load factor
 * too low) if iteration performance is important.
 *
 * <p>An instance of <tt>MyHashMap</tt> has two parameters that affect its performance: <i>initial
 * capacity</i> and <i>load factor</i>. The <i>capacity</i> is the number of buckets in the hash
 * table, and the initial capacity is simply the capacity at the time the hash table is created. The
 * <i>load factor</i> is a measure of how full the hash table is allowed to get before its capacity
 * is automatically increased. When the number of entries in the hash table exceeds the product of
 * the load factor and the current capacity, the hash table is <i>rehashed</i> (that is, internal
 * data structures are rebuilt) so that the hash table has approximately twice the number of
 * buckets.
 *
 * <p>As a general rule, the default load factor (.75) offers a good tradeoff between time and space
 * costs. Higher values decrease the space overhead but increase the lookup cost (reflected in most
 * of the operations of the <tt>MyHashMap</tt> class, including <tt>get</tt> and <tt>put</tt>). The
 * expected number of entries in the map and its load factor should be taken into account when
 * setting its initial capacity, so as to minimize the number of rehash operations. If the initial
 * capacity is greater than the maximum number of entries divided by the load factor, no rehash
 * operations will ever occur.
 *
 * <p>If many mappings are to be stored in a <tt>MyHashMap</tt> instance, creating it with a
 * sufficiently large capacity will allow the mappings to be stored more efficiently than letting it
 * perform automatic rehashing as needed to grow the table. Note that using many keys with the same
 * {@code hashCode()} is a sure way to slow down performance of any hash table. To ameliorate
 * impact, when keys are {@link Comparable}, this class may use comparison order among keys to help
 * break ties.
 *
 * <p><strong>Note that this implementation is not synchronized.</strong> If multiple threads access
 * a hash map concurrently, and at least one of the threads modifies the map structurally, it
 * <i>must</i> be synchronized externally. (A structural modification is any operation that adds or
 * deletes one or more mappings; merely changing the value associated with a key that an instance
 * already contains is not a structural modification.) This is typically accomplished by
 * synchronizing on some object that naturally encapsulates the map. If no such object exists, the
 * map should be "wrapped" using the {@link Collections#synchronizedMap Collections.synchronizedMap}
 * method. This is best done at creation time, to prevent accidental unsynchronized access to the
 * map:
 *
 * <pre>
 *   Map m = Collections.synchronizedMap(new MyHashMap(...));</pre>
 *
 * <p>The iterators returned by all of this class's "collection view methods" are <i>fail-fast</i>:
 * if the map is structurally modified at any time after the iterator is created, in any way except
 * through the iterator's own <tt>remove</tt> method, the iterator will throw a {@link
 * ConcurrentModificationException}. Thus, in the face of concurrent modification, the iterator
 * fails quickly and cleanly, rather than risking arbitrary, non-deterministic behavior at an
 * undetermined time in the future.
 *
 * <p>Note that the fail-fast behavior of an iterator cannot be guaranteed as it is, generally
 * speaking, impossible to make any hard guarantees in the presence of unsynchronized concurrent
 * modification. Fail-fast iterators throw <tt>ConcurrentModificationException</tt> on a best-effort
 * basis. Therefore, it would be wrong to write a program that depended on this exception for its
 * correctness: <i>the fail-fast behavior of iterators should be used only to detect bugs.</i>
 *
 * <p>This class is a member of the <a href="{@docRoot}/../technotes/guides/collections/index.html">
 * Java Collections Framework</a>.
 *
 * @param <K> the type of keys maintained by this map
 * @param <V> the type of mapped values
 * @author Doug Lea
 * @author Josh Bloch
 * @author Arthur van Hoff
 * @author Neal Gafter
 * @see Object#hashCode()
 * @see Collection
 * @see Map
 * @see TreeMap
 * @see Hashtable
 * @since 1.2
 */
public class MyHashMap<K, V> extends MyAbstractMap<K, V>
    implements Map<K, V>, Cloneable, Serializable {

  private static final long serialVersionUID = 362498820763181265L;

  /*
   * Implementation notes.
   *
   * This map usually acts as a binned (bucketed) hash table, but
   * when bins get too large, they are transformed into bins of
   * TreeNodes, each structured similarly to those in
   * java.util.TreeMap. Most methods try to use normal bins, but
   * relay to TreeNode methods when applicable (simply by checking
   * instanceof a node).  Bins of TreeNodes may be traversed and
   * used like any others, but additionally support faster lookup
   * when overpopulated. However, since the vast majority of bins in
   * normal use are not overpopulated, checking for existence of
   * tree bins may be delayed in the course of table methods.
   *
   * Tree bins (i.e., bins whose elements are all TreeNodes) are
   * ordered primarily by hashCode, but in the case of ties, if two
   * elements are of the same "class C implements Comparable<C>",
   * type then their compareTo method is used for ordering. (We
   * conservatively check generic types via reflection to validate
   * this -- see method comparableClassFor).  The added complexity
   * of tree bins is worthwhile in providing worst-case O(log n)
   * operations when keys either have distinct hashes or are
   * orderable, Thus, performance degrades gracefully under
   * accidental or malicious usages in which hashCode() methods
   * return values that are poorly distributed, as well as those in
   * which many keys share a hashCode, so long as they are also
   * Comparable. (If neither of these apply, we may waste about a
   * factor of two in time and space compared to taking no
   * precautions. But the only known cases stem from poor user
   * programming practices that are already so slow that this makes
   * little difference.)
   *
   * Because TreeNodes are about twice the size of regular nodes, we
   * use them only when bins contain enough nodes to warrant use
   * (see TREEIFY_THRESHOLD). And when they become too small (due to
   * removal or resizing) they are converted back to plain bins.  In
   * usages with well-distributed user hashCodes, tree bins are
   * rarely used.  Ideally, under random hashCodes, the frequency of
   * nodes in bins follows a Poisson distribution
   * (http://en.wikipedia.org/wiki/Poisson_distribution) with a
   * parameter of about 0.5 on average for the default resizing
   * threshold of 0.75, although with a large variance because of
   * resizing granularity. Ignoring variance, the expected
   * occurrences of list size k are (exp(-0.5) * pow(0.5, k) /
   * factorial(k)). The first values are:
   *
   * 0:    0.60653066
   * 1:    0.30326533
   * 2:    0.07581633
   * 3:    0.01263606
   * 4:    0.00157952
   * 5:    0.00015795
   * 6:    0.00001316
   * 7:    0.00000094
   * 8:    0.00000006
   * more: less than 1 in ten million
   *
   * The root of a tree bin is normally its first node.  However,
   * sometimes (currently only upon Iterator.remove), the root might
   * be elsewhere, but can be recovered following parent links
   * (method TreeNode.root()).
   *
   * All applicable internal methods accept a hash code as an
   * argument (as normally supplied from a public method), allowing
   * them to call each other without recomputing user hashCodes.
   * Most internal methods also accept a "tab" argument, that is
   * normally the current table, but may be a new or old one when
   * resizing or converting.
   *
   * When bin lists are treeified, split, or untreeified, we keep
   * them in the same relative access/traversal order (i.e., field
   * Node.next) to better preserve locality, and to slightly
   * simplify handling of splits and traversals that invoke
   * iterator.remove. When using comparators on insertion, to keep a
   * total ordering (or as close as is required here) across
   * rebalancings, we compare classes and identityHashCodes as
   * tie-breakers.
   *
   * The use and transitions among plain vs tree modes is
   * complicated by the existence of subclass MyLinkedHashMap. See
   * below for hook methods defined to be invoked upon insertion,
   * removal and access that allow MyLinkedHashMap internals to
   * otherwise remain independent of these mechanics. (This also
   * requires that a map instance be passed to some utility methods
   * that may create new nodes.)
   *
   * The concurrent-programming-like SSA-based coding style helps
   * avoid aliasing errors amid all of the twisty pointer operations.
   */

  /** The default initial capacity - MUST be a power of two. */
  static final int DEFAULT_INITIAL_CAPACITY = 1 << 4; // aka 16

  /**
   * The maximum capacity, used if a higher value is implicitly specified by either of the
   * constructors with arguments. MUST be a power of two <= 1<<30.
   */
  static final int MAXIMUM_CAPACITY = 1 << 30;

  /** The load factor used when none specified in constructor. */
  static final float DEFAULT_LOAD_FACTOR = 0.75f;

  /**
   * The bin count threshold for using a tree rather than list for a bin. Bins are converted to
   * trees when adding an element to a bin with at least this many nodes. The value must be greater
   * than 2 and should be at least 8 to mesh with assumptions in tree removal about conversion back
   * to plain bins upon shrinkage.
   */
  static final int TREEIFY_THRESHOLD = 8;

  /**
   * The bin count threshold for untreeifying a (split) bin during a resize operation. Should be
   * less than TREEIFY_THRESHOLD, and at most 6 to mesh with shrinkage detection under removal.
   */
  static final int UNTREEIFY_THRESHOLD = 6;

  /**
   * The smallest table capacity for which bins may be treeified. (Otherwise the table is resized if
   * too many nodes in a bin.) Should be at least 4 * TREEIFY_THRESHOLD to avoid conflicts between
   * resizing and treeification thresholds.
   */
  static final int MIN_TREEIFY_CAPACITY = 64;

  /**
   * Basic hash bin node, used for most entries. (See below for TreeNode subclass, and in
   * MyLinkedHashMap for its Entry subclass.)
   */
  static class Node<K, V> implements Entry<K, V> {
    final int hash;
    final K key;
    V value;
    Node<K, V> next;

    Node(int hash, K key, V value, Node<K, V> next) {
      this.hash = hash;
      this.key = key;
      this.value = value;
      this.next = next;
    }

    public final K getKey() {
      return key;
    }

    public final V getValue() {
      return value;
    }

    public final String toString() {
      return key + "=" + value;
    }

    public final int hashCode() {
      return Objects.hashCode(key) ^ Objects.hashCode(value);
    }

    public final V setValue(V newValue) {
      V oldValue = value;
      value = newValue;
      return oldValue;
    }

    public final boolean equals(Object o) {
      if (o == this) return true;
      if (o instanceof Map.Entry) {
        Entry<?, ?> e = (Entry<?, ?>) o;
        if (Objects.equals(key, e.getKey()) && Objects.equals(value, e.getValue())) return true;
      }
      return false;
    }
  }

  /* ---------------- Static utilities -------------- */

  /**
   * Computes key.hashCode() and spreads (XORs) higher bits of hash to lower. Because the table uses
   * power-of-two masking, sets of hashes that vary only in bits above the current mask will always
   * collide. (Among known examples are sets of Float keys holding consecutive whole numbers in
   * small tables.) So we apply a transform that spreads the impact of higher bits downward. There
   * is a tradeoff between speed, utility, and quality of bit-spreading. Because many common sets of
   * hashes are already reasonably distributed (so don't benefit from spreading), and because we use
   * trees to handle large sets of collisions in bins, we just XOR some shifted bits in the cheapest
   * possible way to reduce systematic lossage, as well as to incorporate impact of the highest bits
   * that would otherwise never be used in index calculations because of table bounds.
   */
  static final int hash(Object key) {
    int h;
    return (key == null) ? 0 : (h = key.hashCode()) ^ (h >>> 16);
  }

  /** Returns x's Class if it is of the form "class C implements Comparable<C>", else null. */
  static Class<?> comparableClassFor(Object x) {
    if (x instanceof Comparable) {
      Class<?> c;
      Type[] ts, as;
      Type t;
      ParameterizedType p;
      if ((c = x.getClass()) == String.class) // bypass checks
      return c;
      if ((ts = c.getGenericInterfaces()) != null) {
        for (int i = 0; i < ts.length; ++i) {
          if (((t = ts[i]) instanceof ParameterizedType)
              && ((p = (ParameterizedType) t).getRawType() == Comparable.class)
              && (as = p.getActualTypeArguments()) != null
              && as.length == 1
              && as[0] == c) // type arg is c
          return c;
        }
      }
    }
    return null;
  }

  /** Returns k.compareTo(x) if x matches kc (k's screened comparable class), else 0. */
  @SuppressWarnings({"rawtypes", "unchecked"}) // for cast to Comparable
  static int compareComparables(Class<?> kc, Object k, Object x) {
    return (x == null || x.getClass() != kc ? 0 : ((Comparable) k).compareTo(x));
  }

  /** Returns a power of two size for the given target capacity. */
  static final int tableSizeFor(int cap) {
    int n = cap - 1;
    n |= n >>> 1;
    n |= n >>> 2;
    n |= n >>> 4;
    n |= n >>> 8;
    n |= n >>> 16;
    return (n < 0) ? 1 : (n >= MAXIMUM_CAPACITY) ? MAXIMUM_CAPACITY : n + 1;
  }

  /* ---------------- Fields -------------- */

  /**
   * The table, initialized on first use, and resized as necessary. When allocated, length is always
   * a power of two. (We also tolerate length zero in some operations to allow bootstrapping
   * mechanics that are currently not needed.)
   */
  transient Node<K, V>[] table;

  /** Holds cached entrySet(). Note that MyAbstractMap fields are used for keySet() and values(). */
  transient Set<Entry<K, V>> entrySet;

  /** The number of key-value mappings contained in this map. */
  transient int size;

  /**
   * The number of times this MyHashMap has been structurally modified Structural modifications are
   * those that change the number of mappings in the MyHashMap or otherwise modify its internal
   * structure (e.g., rehash). This field is used to make iterators on Collection-views of the
   * MyHashMap fail-fast. (See ConcurrentModificationException).
   */
  transient int modCount;

  /**
   * The next size value at which to resize (capacity * load factor).
   *
   * @serial
   */
  // (The javadoc description is true upon serialization.
  // Additionally, if the table array has not been allocated, this
  // field holds the initial array capacity, or zero signifying
  // DEFAULT_INITIAL_CAPACITY.)
  int threshold;

  /**
   * The load factor for the hash table.
   *
   * @serial
   */
  final float loadFactor;

  /* ---------------- Public operations -------------- */

  /**
   * Constructs an empty <tt>MyHashMap</tt> with the specified initial capacity and load factor.
   *
   * @param initialCapacity the initial capacity
   * @param loadFactor the load factor
   * @throws IllegalArgumentException if the initial capacity is negative or the load factor is
   *     nonpositive
   */
  public MyHashMap(int initialCapacity, float loadFactor) {
    if (initialCapacity < 0)
      throw new IllegalArgumentException("Illegal initial capacity: " + initialCapacity);
    if (initialCapacity > MAXIMUM_CAPACITY) initialCapacity = MAXIMUM_CAPACITY;
    if (loadFactor <= 0 || Float.isNaN(loadFactor))
      throw new IllegalArgumentException("Illegal load factor: " + loadFactor);
    this.loadFactor = loadFactor;
    this.threshold = tableSizeFor(initialCapacity);
  }

  /**
   * Constructs an empty <tt>MyHashMap</tt> with the specified initial capacity and the default load
   * factor (0.75).
   *
   * @param initialCapacity the initial capacity.
   * @throws IllegalArgumentException if the initial capacity is negative.
   */
  public MyHashMap(int initialCapacity) {
    this(initialCapacity, DEFAULT_LOAD_FACTOR);
  }

  /**
   * Constructs an empty <tt>MyHashMap</tt> with the default initial capacity (16) and the default
   * load factor (0.75).
   */
  public MyHashMap() {
    this.loadFactor = DEFAULT_LOAD_FACTOR; // all other fields defaulted
  }

  /**
   * Constructs a new <tt>MyHashMap</tt> with the same mappings as the specified <tt>Map</tt>. The
   * <tt>MyHashMap</tt> is created with default load factor (0.75) and an initial capacity
   * sufficient to hold the mappings in the specified <tt>Map</tt>.
   *
   * @param m the map whose mappings are to be placed in this map
   * @throws NullPointerException if the specified map is null
   */
  public MyHashMap(Map<? extends K, ? extends V> m) {
    this.loadFactor = DEFAULT_LOAD_FACTOR;
    putMapEntries(m, false);
  }

  /**
   * Implements Map.putAll and Map constructor
   *
   * @param m the map
   * @param evict false when initially constructing this map, else true (relayed to method
   *     afterNodeInsertion).
   */
  final void putMapEntries(Map<? extends K, ? extends V> m, boolean evict) {
    int s = m.size();
    if (s > 0) {
      if (table == null) { // pre-size
        float ft = ((float) s / loadFactor) + 1.0F;
        int t = ((ft < (float) MAXIMUM_CAPACITY) ? (int) ft : MAXIMUM_CAPACITY);
        if (t > threshold) threshold = tableSizeFor(t);
      } else if (s > threshold) resize();
      for (Entry<? extends K, ? extends V> e : m.entrySet()) {
        K key = e.getKey();
        V value = e.getValue();
        putVal(hash(key), key, value, false, evict);
      }
    }
  }

  /**
   * Returns the number of key-value mappings in this map.
   *
   * @return the number of key-value mappings in this map
   */
  public int size() {
    return size;
  }

  /**
   * Returns <tt>true</tt> if this map contains no key-value mappings.
   *
   * @return <tt>true</tt> if this map contains no key-value mappings
   */
  public boolean isEmpty() {
    return size == 0;
  }

  /**
   * Returns the value to which the specified key is mapped, or {@code null} if this map contains no
   * mapping for the key.
   *
   * <p>More formally, if this map contains a mapping from a key {@code k} to a value {@code v} such
   * that {@code (key==null ? k==null : key.equals(k))}, then this method returns {@code v};
   * otherwise it returns {@code null}. (There can be at most one such mapping.)
   *
   * <p>A return value of {@code null} does not <i>necessarily</i> indicate that the map contains no
   * mapping for the key; it's also possible that the map explicitly maps the key to {@code null}.
   * The {@link #containsKey containsKey} operation may be used to distinguish these two cases.
   *
   * @see #put(Object, Object)
   */
  public V get(Object key) {
    Node<K, V> e;
    return (e = getNode(hash(key), key)) == null ? null : e.value;
  }

  /**
   * Implements Map.get and related methods
   *
   * @param hash hash for key
   * @param key the key
   * @return the node, or null if none
   */
  final Node<K, V> getNode(int hash, Object key) {
    Node<K, V>[] tab;
    Node<K, V> first, e;
    int n;
    K k;
    if ((tab = table) != null && (n = tab.length) > 0 && (first = tab[(n - 1) & hash]) != null) {
      if (first.hash == hash
          && // always check first node
          ((k = first.key) == key || (key != null && key.equals(k)))) return first;
      if ((e = first.next) != null) {
        if (first instanceof TreeNode) return ((TreeNode<K, V>) first).getTreeNode(hash, key);
        do {
          if (e.hash == hash && ((k = e.key) == key || (key != null && key.equals(k)))) return e;
        } while ((e = e.next) != null);
      }
    }
    return null;
  }

  /**
   * Returns <tt>true</tt> if this map contains a mapping for the specified key.
   *
   * @param key The key whose presence in this map is to be tested
   * @return <tt>true</tt> if this map contains a mapping for the specified key.
   */
  public boolean containsKey(Object key) {
    return getNode(hash(key), key) != null;
  }

  /**
   * Associates the specified value with the specified key in this map. If the map previously
   * contained a mapping for the key, the old value is replaced.
   *
   * @param key key with which the specified value is to be associated
   * @param value value to be associated with the specified key
   * @return the previous value associated with <tt>key</tt>, or <tt>null</tt> if there was no
   *     mapping for <tt>key</tt>. (A <tt>null</tt> return can also indicate that the map previously
   *     associated <tt>null</tt> with <tt>key</tt>.)
   */
  public V put(K key, V value) {
    return putVal(hash(key), key, value, false, true);
  }
	
  
  
  /**
   * Implements Map.put and related methods
   *
   * @param hash hash for key
   * @param key the key
   * @param value the value to put
   * @param onlyIfAbsent if true, don't change existing value
   * @param evict if false, the table is in creation mode.
   * @return previous value, or null if none
   */
  final V putVal(int hash, K key, V value, boolean onlyIfAbsent, boolean evict) {
    // table是否需要初始化，是否需要扩容
    Node<K, V>[] tab;
    Node<K, V> buketNode;
    int lth, index;
    if ((tab = this.table) == null || (lth = tab.length) == 0) {
      lth = (tab = resize2()).length;
    }
    // table[index]是否存在不存在直接在索引处插入新的Node节点
    index = (lth - 1) & hash; // 等价于 hash%lth 即为根据hash计算table所在索引
    if ((buketNode = tab[index]) == null) {
      tab[index] = new Node<>(hash, key, value, null);
    }
    // 如果table[index]节点存在
    else {
      // 声明节点e记录重复节点，与当前节点 key k
      Node<K, V> repeatNode = null;
      K k;
      // 判断table[index]节点是否碰巧与待插入key重复
      if (buketNode.hash == hash && ((k = buketNode.key) != null && k.equals(key) || key == k)) {
        // 记录重复的节点
        repeatNode = buketNode;
      }
      // 判断table[index]是否属于TreeNode 红黑树节点
      else if (buketNode instanceof TreeNode) {
        repeatNode = ((TreeNode<K, V>) buketNode).putTreeVal(this, tab, hash, key, value);
      } // 维护哈希链表
      else {
        // 记录链表节点索引用于最后判断是否链表介于临界值8而需要转换为红黑树
        int hashNodeIndex = 0;
        while (buketNode.next != null) {
          buketNode = buketNode.next;
          hashNodeIndex++;
          // 当前hashNode 节点元素的key可能与待插入key重复
          if (buketNode.hash == hash
              && (((k = buketNode.key) != null && k.equals(key)) || k == key)) {
            repeatNode = buketNode;
            break;
          }
        }
        // 此时buketNode已经为链表最后一个节点
        Node newNode = new Node(hash, key, value, null);
        buketNode.next = newNode;
        // 判断是否需要将链表转换为红黑树
        if (hashNodeIndex + 2 >= MyHashMap.TREEIFY_THRESHOLD) {
          treeifyBin(tab, hash);
        }
      }
      // 如果e不为null,则说明e指向的节点key与被插入key重复需要替换e指向的节点value
      if (repeatNode != null) {
        V olderValue = repeatNode.value;
        if (!onlyIfAbsent || olderValue == null) {
          repeatNode.value = value;
        }
        
        // todo 该方法与linkedHashMap关联？
        afterNodeAccess(repeatNode);
        return olderValue;
      }
    }
    // 最后收尾
    this.modCount++; // 可理解为版本号map.size改变则递增
    if (++this.size > this.threshold) {
      this.resize();
    }
    // todo
    afterNodeInsertion(evict);
    return null;
  }
	
	final Node<K,V>[] resize2() {
		Node<K,V>[] oldTab = table;
		int oldCap = (oldTab == null) ? 0 : oldTab.length;
		int oldThr = threshold;
		int newCap, newThr = 0;
		if (oldCap > 0) {
			if (oldCap >= MAXIMUM_CAPACITY) {
				threshold = Integer.MAX_VALUE;
				return oldTab;
			}
			else if ((newCap = oldCap << 1) < MAXIMUM_CAPACITY &&
					         oldCap >= DEFAULT_INITIAL_CAPACITY)
				newThr = oldThr << 1; // double threshold
		}
		else if (oldThr > 0) // initial capacity was placed in threshold
			newCap = oldThr;
		else {               // zero initial threshold signifies using defaults
			newCap = DEFAULT_INITIAL_CAPACITY;
			newThr = (int)(DEFAULT_LOAD_FACTOR * DEFAULT_INITIAL_CAPACITY);
		}
		if (newThr == 0) {
			float ft = (float)newCap * loadFactor;
			newThr = (newCap < MAXIMUM_CAPACITY && ft < (float)MAXIMUM_CAPACITY ?
					          (int)ft : Integer.MAX_VALUE);
		}
		threshold = newThr;
		@SuppressWarnings({"rawtypes","unchecked"})
		Node<K,V>[] newTab = (Node<K,V>[])new Node[newCap];
		table = newTab;
		if (oldTab != null) {
			for (int j = 0; j < oldCap; ++j) {
				Node<K,V> e;
				if ((e = oldTab[j]) != null) {
					oldTab[j] = null;
					if (e.next == null)
						newTab[e.hash & (newCap - 1)] = e;
					else if (e instanceof TreeNode)
						((TreeNode<K,V>)e).split(this, newTab, j, oldCap);
					else { // preserve order
						Node<K,V> loHead = null, loTail = null;
						Node<K,V> hiHead = null, hiTail = null;
						Node<K,V> next;
						do {
							next = e.next;
							if ((e.hash & oldCap) == 0) {
								if (loTail == null)
									loHead = e;
								else
									loTail.next = e;
								loTail = e;
							}
							else {
								if (hiTail == null)
									hiHead = e;
								else
									hiTail.next = e;
								hiTail = e;
							}
						} while ((e = next) != null);
						if (loTail != null) {
							loTail.next = null;
							newTab[j] = loHead;
						}
						if (hiTail != null) {
							hiTail.next = null;
							newTab[j + oldCap] = hiHead;
						}
					}
				}
			}
		}
		return newTab;
	}
  /**
   * Initializes or doubles table size. If null, allocates in accord with initial capacity target
   * held in field threshold. Otherwise, because we are using power-of-two expansion, the elements
   * from each bin must either stay at same index, or move with a power of two offset in the new
   * table.
   *
   * @return the table
   */
  final Node<K, V>[] resize3() {
    // PS1对table的length,threshold 进行两倍扩增，需要考虑，是否table初始化，是否超过最大限制，
    int newCap, newThreshold = 0;
    int olderCap = table == null ? 0 : table.length;
    int olderThreshold = this.threshold;
    Node<K, V>[] olderTable = this.table;
    // 已经初始化table
    if (olderCap > 0) {
      // 是否扩容后大于hashMap最大容器限制
      if (olderCap > MyHashMap.MAXIMUM_CAPACITY) {
        this.threshold = Integer.MAX_VALUE;
        // 无法再扩容
        return olderTable;
      } else {
        // 计算新的数组容器阈值
        if ((newCap = olderCap << 1) < MyHashMap.MAXIMUM_CAPACITY
            && olderCap > MyHashMap.DEFAULT_INITIAL_CAPACITY) {
          newThreshold = olderThreshold << 1;
        }
      }
    }
    // 未初始时化table
    else {
      // 是否初始化 容器阈值 threshold
      if (this.threshold > 0) {
        newCap = olderThreshold;
      } else {
        // 未初始化，table,threshold
        newCap = MyHashMap.DEFAULT_INITIAL_CAPACITY;
        newThreshold = (int) MyHashMap.DEFAULT_LOAD_FACTOR * MyHashMap.DEFAULT_INITIAL_CAPACITY;
      }
    }

    // 如果扩容阀值为0 todo 没有理解
    if (newThreshold == 0) {
      float ft = (float) newCap * loadFactor;
      newThreshold =
          (newCap < MAXIMUM_CAPACITY && ft < (float) MAXIMUM_CAPACITY
              ? (int) ft
              : Integer.MAX_VALUE);
    }
    // 更新阀子
    this.threshold = newThreshold;
    // 创建新的数组
    Node<K, V>[] newTable = new Node[newCap];

    // PS2 迁移原数组的元素到新数组中，需要考虑原数组是否有元素，迭代所有元素，需要考虑该元素节点属性（红黑树，链表）
    // 红黑树使用红黑树的逻辑迁移数据至新的数组的指定索引hash&newLength-1,链表需要考虑所有元素的索引是重新计算的，需要
    // 重新开始维护链表
    // 原数组为空
    if (this.size <= 0) {
      // 直接返回空的新数组
      return newTable;
    } // 原数组有元素
    else {
      Node<K, V> currentOldNode = null;
      // 迭代原table
      for (int oldI = 0; oldI < olderCap; oldI++) {
        currentOldNode = olderTable[oldI];
        olderTable[oldI] = null;
        // 如果当前迭代的桶为null
        if (currentOldNode == null) {
          break;
        }
        // 当前迭代元素属于红黑树类型
        if (currentOldNode instanceof TreeNode) {
        	//todo 理解红黑树
	        ((TreeNode<K, V>) currentOldNode).split(this, newTable, oldI, olderCap);
        } // 如果当前节点没有next节点即不存在hash链表或者说没有hash值冲突则不需要重新维护链表
        else if (currentOldNode.next == null) {
          newTable[(currentOldNode.hash & newCap - 1)] = currentOldNode;
        }
        // 当前迭代元素属于链表节点
        else {
          // 索引分两种情况 原索引，原索引+newCap
          // 低位索引hash链表 的头尾
          Node<K, V> loTail = null, loHead = null;
          // 高位索引hash链表 的头尾
          Node<K, V> hiTail = null, hiHead = null;
          Node<K, V> next;
          // 迭代获取所有当前节点的next节点并根据新的length计算个元素索引根据索引分组构建新的hash链表
          do {
            next = currentOldNode.next;
            // 索引不变情况，即低位索引
            if ((currentOldNode.hash & olderCap) == 0) {
              // 维护一条新的hash链：先将头部定位好，再依次将后续的元素补在尾部并维护好每个元素的next属性
              if (loHead == null) {
                loHead = currentOldNode;
              } // 第一次以后的元素都应该追加在尾部
              else {
                loTail.next = currentOldNode;
              }
              loTail = currentOldNode;
            }
            // 索引右移newCap单位情况，即高位索引
            else {
              if (hiHead == null) {
                hiHead = currentOldNode;
              } else {
                hiTail.next = currentOldNode;
              }
              hiTail = currentOldNode;
            }
          } while (next != null);
          // 分别将高低位索引重构的hash链表挂在指定的索引桶
          if (hiHead != null) {
            // 前面的逻辑最后一个元素会挂在自己的next节点上
            hiTail.next = null;
            newTable[oldI + newCap] = hiHead;
          }
          if (loHead != null) {
            loTail.next = null;
            newTable[oldI] = loHead;
          }
        }
      }
    }

    return newTable;
  }

  final Node<K, V>[] resize() {
    Node<K, V>[] oldTab = table; // 当前所有元素所在的数组，称为老的元素数组
    int oldCap = (oldTab == null) ? 0 : oldTab.length; // 老的元素数组长度
    int oldThr = threshold; // 老的扩容阀值设置
    int newCap, newThr = 0; // 新数组的容量，新数组的扩容阀值都初始化为0
    if (oldCap > 0) { // 如果老数组长度大于0，说明已经存在元素
      // PS1
      if (oldCap >= MAXIMUM_CAPACITY) { // 如果数组元素个数大于等于限定的最大容量（2的30次方）
        // 扩容阀值设置为int最大值（2的31次方 -1 ），因为oldCap再乘2就溢出了。
        threshold = Integer.MAX_VALUE;
        return oldTab; // 返回老的元素数组
      }

      /*
       * 如果数组元素个数在正常范围内，那么新的数组容量为老的数组容量的2倍（左移1位相当于乘以2）
       * 如果扩容之后的新容量小于最大容量  并且  老的数组容量大于等于默认初始化容量（16），那么新数组的扩容阀值设置为老阀值的2倍。（老的数组容量大于16意味着：要么构造函数指定了一个大于16的初始化容量值，要么已经经历过了至少一次扩容）
       */
      else if ((newCap = oldCap << 1) < MAXIMUM_CAPACITY && oldCap >= DEFAULT_INITIAL_CAPACITY)
        newThr = oldThr << 1; // double threshold
    }

    // PS2
    // 运行到这个else if  说明老数组没有任何元素
    // 如果老数组的扩容阀值大于0，那么设置新数组的容量为该阀值
    // 这一步也就意味着构造该map的时候，指定了初始化容量。
    else if (oldThr > 0) // initial capacity was placed in threshold
    newCap = oldThr;
    else { // zero initial threshold signifies using defaults
      // 能运行到这里的话，说明是调用无参构造函数创建的该map，并且第一次添加元素
      newCap = DEFAULT_INITIAL_CAPACITY; // 设置新数组容量 为 16
      newThr = (int) (DEFAULT_LOAD_FACTOR * DEFAULT_INITIAL_CAPACITY); // 设置新数组扩容阀值为 16*0.75 =
      // 12。0.75为负载因子（当元素个数达到容量了4分之3，那么扩容）
    }

    // 如果扩容阀值为0 （PS2的情况）
    if (newThr == 0) {
      float ft = (float) newCap * loadFactor;
      newThr =
          (newCap < MAXIMUM_CAPACITY && ft < (float) MAXIMUM_CAPACITY
              ? (int) ft
              : Integer.MAX_VALUE); // 参见：PS2
    }
    threshold = newThr; // 设置map的扩容阀值为 新的阀值
    @SuppressWarnings({"rawtypes", "unchecked"})
    // 创建新的数组（对于第一次添加元素，那么这个数组就是第一个数组；对于存在oldTab的时候，那么这个数组就是要需要扩容到的新数组）
    Node<K, V>[] newTab = (Node<K, V>[]) new Node[newCap];
    table = newTab; // 将该map的table属性指向到该新数组
    if (oldTab != null) { // 如果老数组不为空，说明是扩容操作，那么涉及到元素的转移操作
      for (int j = 0; j < oldCap; ++j) { // 遍历老数组
        Node<K, V> e;
        if ((e = oldTab[j]) != null) { // 如果当前位置元素不为空，那么需要转移该元素到新数组
          oldTab[j] = null; // 释放掉老数组对于要转移走的元素的引用（主要为了使得数组可被回收）
          if (e.next == null) // 如果元素没有有下一个节点，说明该元素不存在hash冲突
            // PS3
            // 把元素存储到新的数组中，存储到数组的哪个位置需要根据hash值和数组长度来进行取模
            // 【hash值  %   数组长度】   =    【  hash值   & （数组长度-1）】
            //  这种与运算求模的方式要求
            // 数组长度必须是2的N次方，但是可以通过构造函数随意指定初始化容量呀，如果指定了17,15这种，岂不是出问题了就？没关系，最终会通过tableSizeFor方法将用户指定的转化为大于其并且最相近的2的N次方。 15 -> 16、17-> 32
            newTab[e.hash & (newCap - 1)] = e;

          // 如果该元素有下一个节点，那么说明该位置上存在一个链表了（hash相同的多个元素以链表的方式存储到了老数组的这个位置上了）
          // 例如：数组长度为16，那么hash值为1（1%16=1）的和hash值为17（17%16=1）的两个元素都是会存储在数组的第2个位置上（对应数组下标为1），当数组扩容为32（1%32=1）时，hash值为1的还应该存储在新数组的第二个位置上，但是hash值为17（17%32=17）的就应该存储在新数组的第18个位置上了。
          // 所以，数组扩容后，所有元素都需要重新计算在新数组中的位置。

          else if (e instanceof TreeNode) // 如果该节点为TreeNode类型
          ((TreeNode<K, V>) e).split(this, newTab, j, oldCap); // 此处单独展开讨论
          else { // preserve order
            Node<K, V> loHead = null, loTail = null; // 按命名来翻译的话，应该叫低位首尾节点
            Node<K, V> hiHead = null, hiTail = null; // 按命名来翻译的话，应该叫高位首尾节点
            // 以上的低位指的是新数组的 0  到 oldCap-1 、高位指定的是oldCap 到 newCap - 1
            Node<K, V> next;
            // 遍历链表
            do {
              next = e.next;
              // 这一步判断好狠，拿元素的hash值  和  老数组的长度  做与运算
              // PS3里曾说到，数组的长度一定是2的N次方（例如16），如果hash值和该长度做与运算，那么该hash值可参与计算的有效二进制位就是和长度二进制对等的后几位，如果结果为0，说明hash值中参与计算的对等的二进制位的最高位一定为0.
              // 因为数组长度的二进制有效最高位是1（例如16对应的二进制是10000），只有*..0**** 和 10000
              // 进行与运算结果才为00000（*..表示不确定的多个二进制位）。又因为定位下标时的取模运算是以hash值和长度减1进行与运算，所以下标 = (*..0**** &
              // 1111) 也= (*..0**** & 11111) 。1111是15的二进制、11111是16*2-1 也就是31的二级制（2倍扩容）。
              // 所以该hash值再和新数组的长度取摸的话mod值也不会放生变化，也就是说该元素的在新数组的位置和在老数组的位置是相同的，所以该元素可以放置在低位链表中。

              if ((e.hash & oldCap) == 0) {
                // PS4
                if (loTail == null) // 如果没有尾，说明链表为空
                loHead = e; // 链表为空时，头节点指向该元素
                else loTail.next = e; // 如果有尾，那么链表不为空，把该元素挂到链表的最后。
                loTail = e; // 把尾节点设置为当前元素
              }

              // 如果与运算结果不为0，说明hash值大于老数组长度（例如hash值为17）
              // 此时该元素应该放置到新数组的高位位置上
              // 例：老数组长度16，那么新数组长度为32，hash为17的应该放置在数组的第17个位置上，也就是下标为16，那么下标为16已经属于高位了，低位是[0-15]，高位是[16-31]
              else { // 以下逻辑同PS4
                if (hiTail == null) hiHead = e;
                else hiTail.next = e;
                hiTail = e;
              }
            } while ((e = next) != null);
            if (loTail != null) { // 低位的元素组成的链表还是放置在原来的位置
              loTail.next = null;
              newTab[j] = loHead;
            }
            if (hiTail != null) { // 高位的元素组成的链表放置的位置只是在原有位置上偏移了老数组的长度个位置。
              hiTail.next = null;
              newTab[j + oldCap] =
                  hiHead; // 例：hash为 17 在老数组放置在0下标，在新数组放置在16下标；    hash为 18 在老数组放置在1下标，在新数组放置在17下标；
            }
          }
        }
      }
    }
    return newTab; // 返回新数组
  }

  /**
   * Replaces all linked nodes in bin at index for given hash unless table is too small, in which
   * case resizes instead.
   */
  final void treeifyBin(Node<K, V>[] tab, int hash) {
    int n, index;
    Node<K, V> e;
    if (tab == null || (n = tab.length) < MIN_TREEIFY_CAPACITY) resize();
    else if ((e = tab[index = (n - 1) & hash]) != null) {
      TreeNode<K, V> hd = null, tl = null;
      do {
        TreeNode<K, V> p = replacementTreeNode(e, null);
        if (tl == null) hd = p;
        else {
          p.prev = tl;
          tl.next = p;
        }
        tl = p;
      } while ((e = e.next) != null);
      if ((tab[index] = hd) != null) hd.treeify(tab);
    }
  }

  /**
   * Copies all of the mappings from the specified map to this map. These mappings will replace any
   * mappings that this map had for any of the keys currently in the specified map.
   *
   * @param m mappings to be stored in this map
   * @throws NullPointerException if the specified map is null
   */
  public void putAll(Map<? extends K, ? extends V> m) {
    putMapEntries(m, true);
  }

  /**
   * Removes the mapping for the specified key from this map if present.
   *
   * @param key key whose mapping is to be removed from the map
   * @return the previous value associated with <tt>key</tt>, or <tt>null</tt> if there was no
   *     mapping for <tt>key</tt>. (A <tt>null</tt> return can also indicate that the map previously
   *     associated <tt>null</tt> with <tt>key</tt>.)
   */
  @Override
  public V remove(Object key) {
    Node<K, V> e;
    return (e = removeNode(hash(key), key, null, false, true)) == null ? null : e.value;
  }

  /**
   * Implements Map.remove and related methods
   *
   * @param hash hash for key
   * @param key the key
   * @param value the value to match if matchValue, else ignored
   * @param matchValue if true only remove if value is equal
   * @param movable if false do not move other nodes while removing
   * @return the node, or null if none
   */
  final Node<K, V> removeNode(
      int hash, Object key, Object value, boolean matchValue, boolean movable) {
    //根据key 计算hash值后获取buketIndex=hash&length-1,再获取该索引指向的元素
    //与get(index)逻辑类似先判断table[buketIndex]元素是否匹配目标匹配则记录该节点元素ps1
    //反之，则根据是否有next域值判断是否有下一节点即是否hash值冲突，再继续判断是否有红黑树ps2
    //反之则说明存在节点链则迭代获取匹配的目标节点以及目标节点上一节点ps3
    //ps1，只需要将节点所在索引指空，ps2，ps3，将目标节点的上一节点和下一节点关系重新维护即可
    Node<K,V>[] tempTable=this.table;
    Node<K,V> buketNode;
    int tempLength,buketIndex;
    if(tempTable!=null&&(tempLength=tempTable.length)>0&&((buketNode=tempTable[buketIndex=(hash&tempLength-1)])!=null)){
      K k;
      V v;
      Node<K,V> destinationNode=null;
      Node<K,V> beforNode=null;
      //ps1
      if(buketNode.hash==hash&&(k=buketNode.key)!=null&&(k==key||k.equals(key))){
        destinationNode=buketNode;
      }else if(buketNode.next!=null){
        //ps2
        if(buketNode instanceof TreeNode){
          destinationNode = ((TreeNode) buketNode).getTreeNode(hash, key);
        }else {
          //ps3
          destinationNode=buketNode;
          do {
            if (destinationNode.hash == hash
                && (k = destinationNode.key) != null
                && (k == key || k.equals(key))) {
              break;
            }
            beforNode=destinationNode;
          } while ((destinationNode = destinationNode.next) != null);
        }
      }
      if (destinationNode != null
          && (!matchValue
              || (v = destinationNode.value) == value
              || (value != null && value.equals(v)))) {
        if(destinationNode instanceof  TreeNode){
          ((TreeNode)destinationNode).removeTreeNode(this,tempTable,movable);
        }else if(destinationNode==buketNode){
          tempTable[buketIndex]=null;
        }else {
          beforNode.next=destinationNode.next;
        }
      }
      this.modCount++;
      this.size--;
      this.afterNodeRemoval(beforNode);
      return destinationNode;
    }
    
    
    
  
    return null;
  }

  /** Removes all of the mappings from this map. The map will be empty after this call returns. */
  public void clear() {
    Node<K, V>[] tab;
    modCount++;
    if ((tab = table) != null && size > 0) {
      size = 0;
      for (int i = 0; i < tab.length; ++i) tab[i] = null;
    }
  }

  /**
   * Returns <tt>true</tt> if this map maps one or more keys to the specified value.
   *
   * @param value value whose presence in this map is to be tested
   * @return <tt>true</tt> if this map maps one or more keys to the specified value
   */
  public boolean containsValue(Object value) {
    Node<K, V>[] tab;
    V v;
    if ((tab = table) != null && size > 0) {
      for (int i = 0; i < tab.length; ++i) {
        for (Node<K, V> e = tab[i]; e != null; e = e.next) {
          if ((v = e.value) == value || (value != null && value.equals(v))) return true;
        }
      }
    }
    return false;
  }

  /**
   * Returns a {@link Set} view of the keys contained in this map. The set is backed by the map, so
   * changes to the map are reflected in the set, and vice-versa. If the map is modified while an
   * iteration over the set is in progress (except through the iterator's own <tt>remove</tt>
   * operation), the results of the iteration are undefined. The set supports element removal, which
   * removes the corresponding mapping from the map, via the <tt>Iterator.remove</tt>,
   * <tt>Set.remove</tt>, <tt>removeAll</tt>, <tt>retainAll</tt>, and <tt>clear</tt> operations. It
   * does not support the <tt>add</tt> or <tt>addAll</tt> operations.
   *
   * @return a set view of the keys contained in this map
   */
  public Set<K> keySet() {
    Set<K> ks = keySet;
    if (ks == null) {
      ks = new KeySet();
      keySet = ks;
    }
    return ks;
  }

  final class KeySet extends AbstractSet<K> {
    public final int size() {
      return size;
    }

    public final void clear() {
      MyHashMap.this.clear();
    }

    public final Iterator<K> iterator() {
      return new KeyIterator();
    }

    public final boolean contains(Object o) {
      return containsKey(o);
    }

    public final boolean remove(Object key) {
      return removeNode(hash(key), key, null, false, true) != null;
    }

    public final Spliterator<K> spliterator() {
      return new KeySpliterator<>(MyHashMap.this, 0, -1, 0, 0);
    }

    public final void forEach(Consumer<? super K> action) {
      Node<K, V>[] tab;
      if (action == null) throw new NullPointerException();
      if (size > 0 && (tab = table) != null) {
        int mc = modCount;
        for (int i = 0; i < tab.length; ++i) {
          for (Node<K, V> e = tab[i]; e != null; e = e.next) action.accept(e.key);
        }
        if (modCount != mc) throw new ConcurrentModificationException();
      }
    }
  }

  /**
   * Returns a {@link Collection} view of the values contained in this map. The collection is backed
   * by the map, so changes to the map are reflected in the collection, and vice-versa. If the map
   * is modified while an iteration over the collection is in progress (except through the
   * iterator's own <tt>remove</tt> operation), the results of the iteration are undefined. The
   * collection supports element removal, which removes the corresponding mapping from the map, via
   * the <tt>Iterator.remove</tt>, <tt>Collection.remove</tt>, <tt>removeAll</tt>,
   * <tt>retainAll</tt> and <tt>clear</tt> operations. It does not support the <tt>add</tt> or
   * <tt>addAll</tt> operations.
   *
   * @return a view of the values contained in this map
   */
  public Collection<V> values() {
    Collection<V> vs = values;
    if (vs == null) {
      vs = new Values();
      values = vs;
    }
    return vs;
  }

  final class Values extends AbstractCollection<V> {
    public final int size() {
      return size;
    }

    public final void clear() {
      MyHashMap.this.clear();
    }

    public final Iterator<V> iterator() {
      return new ValueIterator();
    }

    public final boolean contains(Object o) {
      return containsValue(o);
    }

    public final Spliterator<V> spliterator() {
      return new ValueSpliterator<>(MyHashMap.this, 0, -1, 0, 0);
    }

    public final void forEach(Consumer<? super V> action) {
      Node<K, V>[] tab;
      if (action == null) throw new NullPointerException();
      if (size > 0 && (tab = table) != null) {
        int mc = modCount;
        for (int i = 0; i < tab.length; ++i) {
          for (Node<K, V> e = tab[i]; e != null; e = e.next) action.accept(e.value);
        }
        if (modCount != mc) throw new ConcurrentModificationException();
      }
    }
  }

  /**
   * Returns a {@link Set} view of the mappings contained in this map. The set is backed by the map,
   * so changes to the map are reflected in the set, and vice-versa. If the map is modified while an
   * iteration over the set is in progress (except through the iterator's own <tt>remove</tt>
   * operation, or through the <tt>setValue</tt> operation on a map entry returned by the iterator)
   * the results of the iteration are undefined. The set supports element removal, which removes the
   * corresponding mapping from the map, via the <tt>Iterator.remove</tt>, <tt>Set.remove</tt>,
   * <tt>removeAll</tt>, <tt>retainAll</tt> and <tt>clear</tt> operations. It does not support the
   * <tt>add</tt> or <tt>addAll</tt> operations.
   *
   * @return a set view of the mappings contained in this map
   */
  public Set<Entry<K, V>> entrySet() {
    Set<Entry<K, V>> es;
    return (es = entrySet) == null ? (entrySet = new EntrySet()) : es;
  }

  final class EntrySet extends AbstractSet<Entry<K, V>> {
    public final int size() {
      return size;
    }

    public final void clear() {
      MyHashMap.this.clear();
    }

    public final Iterator<Entry<K, V>> iterator() {
      return new EntryIterator();
    }

    public final boolean contains(Object o) {
      if (!(o instanceof Map.Entry)) return false;
      Entry<?, ?> e = (Entry<?, ?>) o;
      Object key = e.getKey();
      Node<K, V> candidate = getNode(hash(key), key);
      return candidate != null && candidate.equals(e);
    }

    public final boolean remove(Object o) {
      if (o instanceof Map.Entry) {
        Entry<?, ?> e = (Entry<?, ?>) o;
        Object key = e.getKey();
        Object value = e.getValue();
        return removeNode(hash(key), key, value, true, true) != null;
      }
      return false;
    }

    public final Spliterator<Entry<K, V>> spliterator() {
      return new EntrySpliterator<>(MyHashMap.this, 0, -1, 0, 0);
    }

    public final void forEach(Consumer<? super Entry<K, V>> action) {
      Node<K, V>[] tab;
      if (action == null) throw new NullPointerException();
      if (size > 0 && (tab = table) != null) {
        int mc = modCount;
        for (int i = 0; i < tab.length; ++i) {
          for (Node<K, V> e = tab[i]; e != null; e = e.next) action.accept(e);
        }
        if (modCount != mc) throw new ConcurrentModificationException();
      }
    }
  }

  // Overrides of JDK8 Map extension methods

  @Override
  public V getOrDefault(Object key, V defaultValue) {
    Node<K, V> e;
    return (e = getNode(hash(key), key)) == null ? defaultValue : e.value;
  }

  @Override
  public V putIfAbsent(K key, V value) {
    return putVal(hash(key), key, value, true, true);
  }

  @Override
  public boolean remove(Object key, Object value) {
    return removeNode(hash(key), key, value, true, true) != null;
  }

  @Override
  public boolean replace(K key, V oldValue, V newValue) {
    Node<K, V> e;
    V v;
    if ((e = getNode(hash(key), key)) != null
        && ((v = e.value) == oldValue || (v != null && v.equals(oldValue)))) {
      e.value = newValue;
      afterNodeAccess(e);
      return true;
    }
    return false;
  }

  @Override
  public V replace(K key, V value) {
    Node<K, V> e;
    if ((e = getNode(hash(key), key)) != null) {
      V oldValue = e.value;
      e.value = value;
      afterNodeAccess(e);
      return oldValue;
    }
    return null;
  }

  @Override
  public V computeIfAbsent(K key, Function<? super K, ? extends V> mappingFunction) {
    if (mappingFunction == null) throw new NullPointerException();
    int hash = hash(key);
    Node<K, V>[] tab;
    Node<K, V> first;
    int n, i;
    int binCount = 0;
    TreeNode<K, V> t = null;
    Node<K, V> old = null;
    if (size > threshold || (tab = table) == null || (n = tab.length) == 0)
      n = (tab = resize()).length;
    if ((first = tab[i = (n - 1) & hash]) != null) {
      if (first instanceof TreeNode) old = (t = (TreeNode<K, V>) first).getTreeNode(hash, key);
      else {
        Node<K, V> e = first;
        K k;
        do {
          if (e.hash == hash && ((k = e.key) == key || (key != null && key.equals(k)))) {
            old = e;
            break;
          }
          ++binCount;
        } while ((e = e.next) != null);
      }
      V oldValue;
      if (old != null && (oldValue = old.value) != null) {
        afterNodeAccess(old);
        return oldValue;
      }
    }
    V v = mappingFunction.apply(key);
    if (v == null) {
      return null;
    } else if (old != null) {
      old.value = v;
      afterNodeAccess(old);
      return v;
    } else if (t != null) t.putTreeVal(this, tab, hash, key, v);
    else {
      tab[i] = newNode(hash, key, v, first);
      if (binCount >= TREEIFY_THRESHOLD - 1) treeifyBin(tab, hash);
    }
    ++modCount;
    ++size;
    afterNodeInsertion(true);
    return v;
  }

  public V computeIfPresent(
      K key, BiFunction<? super K, ? super V, ? extends V> remappingFunction) {
    if (remappingFunction == null) throw new NullPointerException();
    Node<K, V> e;
    V oldValue;
    int hash = hash(key);
    if ((e = getNode(hash, key)) != null && (oldValue = e.value) != null) {
      V v = remappingFunction.apply(key, oldValue);
      if (v != null) {
        e.value = v;
        afterNodeAccess(e);
        return v;
      } else removeNode(hash, key, null, false, true);
    }
    return null;
  }

  @Override
  public V compute(K key, BiFunction<? super K, ? super V, ? extends V> remappingFunction) {
    if (remappingFunction == null) throw new NullPointerException();
    int hash = hash(key);
    Node<K, V>[] tab;
    Node<K, V> first;
    int n, i;
    int binCount = 0;
    TreeNode<K, V> t = null;
    Node<K, V> old = null;
    if (size > threshold || (tab = table) == null || (n = tab.length) == 0)
      n = (tab = resize()).length;
    if ((first = tab[i = (n - 1) & hash]) != null) {
      if (first instanceof TreeNode) old = (t = (TreeNode<K, V>) first).getTreeNode(hash, key);
      else {
        Node<K, V> e = first;
        K k;
        do {
          if (e.hash == hash && ((k = e.key) == key || (key != null && key.equals(k)))) {
            old = e;
            break;
          }
          ++binCount;
        } while ((e = e.next) != null);
      }
    }
    V oldValue = (old == null) ? null : old.value;
    V v = remappingFunction.apply(key, oldValue);
    if (old != null) {
      if (v != null) {
        old.value = v;
        afterNodeAccess(old);
      } else removeNode(hash, key, null, false, true);
    } else if (v != null) {
      if (t != null) t.putTreeVal(this, tab, hash, key, v);
      else {
        tab[i] = newNode(hash, key, v, first);
        if (binCount >= TREEIFY_THRESHOLD - 1) treeifyBin(tab, hash);
      }
      ++modCount;
      ++size;
      afterNodeInsertion(true);
    }
    return v;
  }

  @Override
  public V merge(K key, V value, BiFunction<? super V, ? super V, ? extends V> remappingFunction) {
    if (value == null) throw new NullPointerException();
    if (remappingFunction == null) throw new NullPointerException();
    int hash = hash(key);
    Node<K, V>[] tab;
    Node<K, V> first;
    int n, i;
    int binCount = 0;
    TreeNode<K, V> t = null;
    Node<K, V> old = null;
    if (size > threshold || (tab = table) == null || (n = tab.length) == 0)
      n = (tab = resize()).length;
    if ((first = tab[i = (n - 1) & hash]) != null) {
      if (first instanceof TreeNode) old = (t = (TreeNode<K, V>) first).getTreeNode(hash, key);
      else {
        Node<K, V> e = first;
        K k;
        do {
          if (e.hash == hash && ((k = e.key) == key || (key != null && key.equals(k)))) {
            old = e;
            break;
          }
          ++binCount;
        } while ((e = e.next) != null);
      }
    }
    if (old != null) {
      V v;
      if (old.value != null) v = remappingFunction.apply(old.value, value);
      else v = value;
      if (v != null) {
        old.value = v;
        afterNodeAccess(old);
      } else removeNode(hash, key, null, false, true);
      return v;
    }
    if (value != null) {
      if (t != null) t.putTreeVal(this, tab, hash, key, value);
      else {
        tab[i] = newNode(hash, key, value, first);
        if (binCount >= TREEIFY_THRESHOLD - 1) treeifyBin(tab, hash);
      }
      ++modCount;
      ++size;
      afterNodeInsertion(true);
    }
    return value;
  }

  @Override
  public void forEach(BiConsumer<? super K, ? super V> action) {
    Node<K, V>[] tab;
    if (action == null) throw new NullPointerException();
    if (size > 0 && (tab = table) != null) {
      int mc = modCount;
      for (int i = 0; i < tab.length; ++i) {
        for (Node<K, V> e = tab[i]; e != null; e = e.next) action.accept(e.key, e.value);
      }
      if (modCount != mc) throw new ConcurrentModificationException();
    }
  }

  @Override
  public void replaceAll(BiFunction<? super K, ? super V, ? extends V> function) {
    Node<K, V>[] tab;
    if (function == null) throw new NullPointerException();
    if (size > 0 && (tab = table) != null) {
      int mc = modCount;
      for (int i = 0; i < tab.length; ++i) {
        for (Node<K, V> e = tab[i]; e != null; e = e.next) {
          e.value = function.apply(e.key, e.value);
        }
      }
      if (modCount != mc) throw new ConcurrentModificationException();
    }
  }

  /* ------------------------------------------------------------ */
  // Cloning and serialization

  /**
   * Returns a shallow copy of this <tt>MyHashMap</tt> instance: the keys and values themselves are
   * not cloned.
   *
   * @return a shallow copy of this map
   */
  @SuppressWarnings("unchecked")
  @Override
  public Object clone() {
    MyHashMap<K, V> result;
    try {
      result = (MyHashMap<K, V>) super.clone();
    } catch (CloneNotSupportedException e) {
      // this shouldn't happen, since we are Cloneable
      throw new InternalError(e);
    }
    result.reinitialize();
    result.putMapEntries(this, false);
    return result;
  }

  // These methods are also used when serializing HashSets
  final float loadFactor() {
    return loadFactor;
  }

  final int capacity() {
    return (table != null) ? table.length : (threshold > 0) ? threshold : DEFAULT_INITIAL_CAPACITY;
  }

  /**
   * Save the state of the <tt>MyHashMap</tt> instance to a stream (i.e., serialize it).
   *
   * @serialData The <i>capacity</i> of the MyHashMap (the length of the bucket array) is emitted
   *     (int), followed by the <i>size</i> (an int, the number of key-value mappings), followed by
   *     the key (Object) and value (Object) for each key-value mapping. The key-value mappings are
   *     emitted in no particular order.
   */
  private void writeObject(java.io.ObjectOutputStream s) throws IOException {
    int buckets = capacity();
    // Write out the threshold, loadfactor, and any hidden stuff
    s.defaultWriteObject();
    s.writeInt(buckets);
    s.writeInt(size);
    internalWriteEntries(s);
  }

  /** Reconstitute the {@code MyHashMap} instance from a stream (i.e., deserialize it). */
  private void readObject(java.io.ObjectInputStream s) throws IOException, ClassNotFoundException {
    // Read in the threshold (ignored), loadfactor, and any hidden stuff
    s.defaultReadObject();
    reinitialize();
    if (loadFactor <= 0 || Float.isNaN(loadFactor))
      throw new InvalidObjectException("Illegal load factor: " + loadFactor);
    s.readInt(); // Read and ignore number of buckets
    int mappings = s.readInt(); // Read number of mappings (size)
    if (mappings < 0) throw new InvalidObjectException("Illegal mappings count: " + mappings);
    else if (mappings > 0) { // (if zero, use defaults)
      // Size the table using given load factor only if within
      // range of 0.25...4.0
      float lf = Math.min(Math.max(0.25f, loadFactor), 4.0f);
      float fc = (float) mappings / lf + 1.0f;
      int cap =
          ((fc < DEFAULT_INITIAL_CAPACITY)
              ? DEFAULT_INITIAL_CAPACITY
              : (fc >= MAXIMUM_CAPACITY) ? MAXIMUM_CAPACITY : tableSizeFor((int) fc));
      float ft = (float) cap * lf;
      threshold =
          ((cap < MAXIMUM_CAPACITY && ft < MAXIMUM_CAPACITY) ? (int) ft : Integer.MAX_VALUE);
      @SuppressWarnings({"rawtypes", "unchecked"})
      Node<K, V>[] tab = (Node<K, V>[]) new Node[cap];
      table = tab;

      // Read the keys and values, and put the mappings in the MyHashMap
      for (int i = 0; i < mappings; i++) {
        @SuppressWarnings("unchecked")
        K key = (K) s.readObject();
        @SuppressWarnings("unchecked")
        V value = (V) s.readObject();
        putVal(hash(key), key, value, false, false);
      }
    }
  }

  /* ------------------------------------------------------------ */
  // iterators

  abstract class HashIterator {
    Node<K, V> next; // next entry to return
    Node<K, V> current; // current entry
    int expectedModCount; // for fast-fail
    int index; // current slot

    HashIterator() {
      expectedModCount = modCount;
      Node<K, V>[] t = table;
      current = next = null;
      index = 0;
      if (t != null && size > 0) { // advance to first entry
        do {} while (index < t.length && (next = t[index++]) == null);
      }
    }

    public final boolean hasNext() {
      return next != null;
    }

    final Node<K, V> nextNode() {
      Node<K, V>[] t;
      Node<K, V> e = next;
      if (modCount != expectedModCount) throw new ConcurrentModificationException();
      if (e == null) throw new NoSuchElementException();
      if ((next = (current = e).next) == null && (t = table) != null) {
        do {} while (index < t.length && (next = t[index++]) == null);
      }
      return e;
    }

    public final void remove() {
      Node<K, V> p = current;
      if (p == null) throw new IllegalStateException();
      if (modCount != expectedModCount) throw new ConcurrentModificationException();
      current = null;
      K key = p.key;
      removeNode(hash(key), key, null, false, false);
      expectedModCount = modCount;
    }
  }

  final class KeyIterator extends HashIterator implements Iterator<K> {
    public final K next() {
      return nextNode().key;
    }
  }

  final class ValueIterator extends HashIterator implements Iterator<V> {
    public final V next() {
      return nextNode().value;
    }
  }

  final class EntryIterator extends HashIterator implements Iterator<Entry<K, V>> {
    public final Entry<K, V> next() {
      return nextNode();
    }
  }

  /* ------------------------------------------------------------ */
  // spliterators

  static class HashMapSpliterator<K, V> {
    final MyHashMap<K, V> map;
    Node<K, V> current; // current node
    int index; // current index, modified on advance/split
    int fence; // one past last index
    int est; // size estimate
    int expectedModCount; // for comodification checks

    HashMapSpliterator(MyHashMap<K, V> m, int origin, int fence, int est, int expectedModCount) {
      this.map = m;
      this.index = origin;
      this.fence = fence;
      this.est = est;
      this.expectedModCount = expectedModCount;
    }

    final int getFence() { // initialize fence and size on first use
      int hi;
      if ((hi = fence) < 0) {
        MyHashMap<K, V> m = map;
        est = m.size;
        expectedModCount = m.modCount;
        Node<K, V>[] tab = m.table;
        hi = fence = (tab == null) ? 0 : tab.length;
      }
      return hi;
    }

    public final long estimateSize() {
      getFence(); // force init
      return (long) est;
    }
  }

  static final class KeySpliterator<K, V> extends HashMapSpliterator<K, V>
      implements Spliterator<K> {
    KeySpliterator(MyHashMap<K, V> m, int origin, int fence, int est, int expectedModCount) {
      super(m, origin, fence, est, expectedModCount);
    }

    public KeySpliterator<K, V> trySplit() {
      int hi = getFence(), lo = index, mid = (lo + hi) >>> 1;
      return (lo >= mid || current != null)
          ? null
          : new KeySpliterator<>(map, lo, index = mid, est >>>= 1, expectedModCount);
    }

    public void forEachRemaining(Consumer<? super K> action) {
      int i, hi, mc;
      if (action == null) throw new NullPointerException();
      MyHashMap<K, V> m = map;
      Node<K, V>[] tab = m.table;
      if ((hi = fence) < 0) {
        mc = expectedModCount = m.modCount;
        hi = fence = (tab == null) ? 0 : tab.length;
      } else mc = expectedModCount;
      if (tab != null
          && tab.length >= hi
          && (i = index) >= 0
          && (i < (index = hi) || current != null)) {
        Node<K, V> p = current;
        current = null;
        do {
          if (p == null) p = tab[i++];
          else {
            action.accept(p.key);
            p = p.next;
          }
        } while (p != null || i < hi);
        if (m.modCount != mc) throw new ConcurrentModificationException();
      }
    }

    public boolean tryAdvance(Consumer<? super K> action) {
      int hi;
      if (action == null) throw new NullPointerException();
      Node<K, V>[] tab = map.table;
      if (tab != null && tab.length >= (hi = getFence()) && index >= 0) {
        while (current != null || index < hi) {
          if (current == null) current = tab[index++];
          else {
            K k = current.key;
            current = current.next;
            action.accept(k);
            if (map.modCount != expectedModCount) throw new ConcurrentModificationException();
            return true;
          }
        }
      }
      return false;
    }

    public int characteristics() {
      return (fence < 0 || est == map.size ? Spliterator.SIZED : 0) | Spliterator.DISTINCT;
    }
  }

  static final class ValueSpliterator<K, V> extends HashMapSpliterator<K, V>
      implements Spliterator<V> {
    ValueSpliterator(MyHashMap<K, V> m, int origin, int fence, int est, int expectedModCount) {
      super(m, origin, fence, est, expectedModCount);
    }

    public ValueSpliterator<K, V> trySplit() {
      int hi = getFence(), lo = index, mid = (lo + hi) >>> 1;
      return (lo >= mid || current != null)
          ? null
          : new ValueSpliterator<>(map, lo, index = mid, est >>>= 1, expectedModCount);
    }

    public void forEachRemaining(Consumer<? super V> action) {
      int i, hi, mc;
      if (action == null) throw new NullPointerException();
      MyHashMap<K, V> m = map;
      Node<K, V>[] tab = m.table;
      if ((hi = fence) < 0) {
        mc = expectedModCount = m.modCount;
        hi = fence = (tab == null) ? 0 : tab.length;
      } else mc = expectedModCount;
      if (tab != null
          && tab.length >= hi
          && (i = index) >= 0
          && (i < (index = hi) || current != null)) {
        Node<K, V> p = current;
        current = null;
        do {
          if (p == null) p = tab[i++];
          else {
            action.accept(p.value);
            p = p.next;
          }
        } while (p != null || i < hi);
        if (m.modCount != mc) throw new ConcurrentModificationException();
      }
    }

    public boolean tryAdvance(Consumer<? super V> action) {
      int hi;
      if (action == null) throw new NullPointerException();
      Node<K, V>[] tab = map.table;
      if (tab != null && tab.length >= (hi = getFence()) && index >= 0) {
        while (current != null || index < hi) {
          if (current == null) current = tab[index++];
          else {
            V v = current.value;
            current = current.next;
            action.accept(v);
            if (map.modCount != expectedModCount) throw new ConcurrentModificationException();
            return true;
          }
        }
      }
      return false;
    }

    public int characteristics() {
      return (fence < 0 || est == map.size ? Spliterator.SIZED : 0);
    }
  }

  static final class EntrySpliterator<K, V> extends HashMapSpliterator<K, V>
      implements Spliterator<Entry<K, V>> {
    EntrySpliterator(MyHashMap<K, V> m, int origin, int fence, int est, int expectedModCount) {
      super(m, origin, fence, est, expectedModCount);
    }

    public EntrySpliterator<K, V> trySplit() {
      int hi = getFence(), lo = index, mid = (lo + hi) >>> 1;
      return (lo >= mid || current != null)
          ? null
          : new EntrySpliterator<>(map, lo, index = mid, est >>>= 1, expectedModCount);
    }

    public void forEachRemaining(Consumer<? super Entry<K, V>> action) {
      int i, hi, mc;
      if (action == null) throw new NullPointerException();
      MyHashMap<K, V> m = map;
      Node<K, V>[] tab = m.table;
      if ((hi = fence) < 0) {
        mc = expectedModCount = m.modCount;
        hi = fence = (tab == null) ? 0 : tab.length;
      } else mc = expectedModCount;
      if (tab != null
          && tab.length >= hi
          && (i = index) >= 0
          && (i < (index = hi) || current != null)) {
        Node<K, V> p = current;
        current = null;
        do {
          if (p == null) p = tab[i++];
          else {
            action.accept(p);
            p = p.next;
          }
        } while (p != null || i < hi);
        if (m.modCount != mc) throw new ConcurrentModificationException();
      }
    }

    public boolean tryAdvance(Consumer<? super Entry<K, V>> action) {
      int hi;
      if (action == null) throw new NullPointerException();
      Node<K, V>[] tab = map.table;
      if (tab != null && tab.length >= (hi = getFence()) && index >= 0) {
        while (current != null || index < hi) {
          if (current == null) current = tab[index++];
          else {
            Node<K, V> e = current;
            current = current.next;
            action.accept(e);
            if (map.modCount != expectedModCount) throw new ConcurrentModificationException();
            return true;
          }
        }
      }
      return false;
    }

    public int characteristics() {
      return (fence < 0 || est == map.size ? Spliterator.SIZED : 0) | Spliterator.DISTINCT;
    }
  }

  /* ------------------------------------------------------------ */
  // MyLinkedHashMap support

  /*
   * The following package-protected methods are designed to be
   * overridden by MyLinkedHashMap, but not by any other subclass.
   * Nearly all other internal methods are also package-protected
   * but are declared final, so can be used by MyLinkedHashMap, view
   * classes, and HashSet.
   */

  // Create a regular (non-tree) node
  Node<K, V> newNode(int hash, K key, V value, Node<K, V> next) {
    return new Node<>(hash, key, value, next);
  }

  // For conversion from TreeNodes to plain nodes
  Node<K, V> replacementNode(Node<K, V> p, Node<K, V> next) {
    return new Node<>(p.hash, p.key, p.value, next);
  }

  // Create a tree bin node
  TreeNode<K, V> newTreeNode(int hash, K key, V value, Node<K, V> next) {
    return new TreeNode<>(hash, key, value, next);
  }

  // For treeifyBin
  TreeNode<K, V> replacementTreeNode(Node<K, V> p, Node<K, V> next) {
    return new TreeNode<>(p.hash, p.key, p.value, next);
  }

  /** Reset to initial default state. Called by clone and readObject. */
  void reinitialize() {
    table = null;
    entrySet = null;
    keySet = null;
    values = null;
    modCount = 0;
    threshold = 0;
    size = 0;
  }

  // Callbacks to allow MyLinkedHashMap post-actions
  void afterNodeAccess(Node<K, V> p) {}

  void afterNodeInsertion(boolean evict) {}

  void afterNodeRemoval(Node<K, V> p) {}

  // Called only from writeObject, to ensure compatible ordering.
  void internalWriteEntries(java.io.ObjectOutputStream s) throws IOException {
    Node<K, V>[] tab;
    if (size > 0 && (tab = table) != null) {
      for (int i = 0; i < tab.length; ++i) {
        for (Node<K, V> e = tab[i]; e != null; e = e.next) {
          s.writeObject(e.key);
          s.writeObject(e.value);
        }
      }
    }
  }

  /* ------------------------------------------------------------ */
  // Tree bins

  /**
   * Entry for Tree bins. Extends MyLinkedHashMap.Entry (which in turn extends Node) so can be used
   * as extension of either regular or linked node.
   */
  static final class TreeNode<K, V> extends MyLinkedHashMap.Entry<K, V> {
    TreeNode<K, V> parent; // red-black tree links
    TreeNode<K, V> left;
    TreeNode<K, V> right;
    TreeNode<K, V> prev; // needed to unlink next upon deletion
    boolean red;

    TreeNode(int hash, K key, V val, Node<K, V> next) {
      super(hash, key, val, next);
    }

    /** Returns root of tree containing this node. */
    final TreeNode<K, V> root() {
      for (TreeNode<K, V> r = this, p; ; ) {
        if ((p = r.parent) == null) return r;
        r = p;
      }
    }

    /** Ensures that the given root is the first node of its bin. */
    static <K, V> void moveRootToFront(Node<K, V>[] tab, TreeNode<K, V> root) {
      int n;
      if (root != null && tab != null && (n = tab.length) > 0) {
        int index = (n - 1) & root.hash;
        TreeNode<K, V> first = (TreeNode<K, V>) tab[index];
        if (root != first) {
          Node<K, V> rn;
          tab[index] = root;
          TreeNode<K, V> rp = root.prev;
          if ((rn = root.next) != null) ((TreeNode<K, V>) rn).prev = rp;
          if (rp != null) rp.next = rn;
          if (first != null) first.prev = root;
          root.next = first;
          root.prev = null;
        }
        assert checkInvariants(root);
      }
    }

    /**
     * Finds the node starting at root p with the given hash and key. The kc argument caches
     * comparableClassFor(key) upon first use comparing keys.
     */
    final TreeNode<K, V> find(int h, Object k, Class<?> kc) {
      TreeNode<K, V> p = this;
      do {
        int ph, dir;
        K pk;
        TreeNode<K, V> pl = p.left, pr = p.right, q;
        if ((ph = p.hash) > h) p = pl;
        else if (ph < h) p = pr;
        else if ((pk = p.key) == k || (k != null && k.equals(pk))) return p;
        else if (pl == null) p = pr;
        else if (pr == null) p = pl;
        else if ((kc != null || (kc = comparableClassFor(k)) != null)
            && (dir = compareComparables(kc, k, pk)) != 0) p = (dir < 0) ? pl : pr;
        else if ((q = pr.find(h, k, kc)) != null) return q;
        else p = pl;
      } while (p != null);
      return null;
    }

    /** Calls find for root node. */
    final TreeNode<K, V> getTreeNode(int h, Object k) {
      return ((parent != null) ? root() : this).find(h, k, null);
    }

    /**
     * Tie-breaking utility for ordering insertions when equal hashCodes and non-comparable. We
     * don't require a total order, just a consistent insertion rule to maintain equivalence across
     * rebalancings. Tie-breaking further than necessary simplifies testing a bit.
     */
    static int tieBreakOrder(Object a, Object b) {
      int d;
      if (a == null
          || b == null
          || (d = a.getClass().getName().compareTo(b.getClass().getName())) == 0)
        d = (System.identityHashCode(a) <= System.identityHashCode(b) ? -1 : 1);
      return d;
    }

    /**
     * Forms tree of the nodes linked from this node.
     *
     * @return root of tree
     */
    final void treeify(Node<K, V>[] tab) {
      TreeNode<K, V> root = null;
      for (TreeNode<K, V> x = this, next; x != null; x = next) {
        next = (TreeNode<K, V>) x.next;
        x.left = x.right = null;
        if (root == null) {
          x.parent = null;
          x.red = false;
          root = x;
        } else {
          K k = x.key;
          int h = x.hash;
          Class<?> kc = null;
          for (TreeNode<K, V> p = root; ; ) {
            int dir, ph;
            K pk = p.key;
            if ((ph = p.hash) > h) dir = -1;
            else if (ph < h) dir = 1;
            else if ((kc == null && (kc = comparableClassFor(k)) == null)
                || (dir = compareComparables(kc, k, pk)) == 0) dir = tieBreakOrder(k, pk);

            TreeNode<K, V> xp = p;
            if ((p = (dir <= 0) ? p.left : p.right) == null) {
              x.parent = xp;
              if (dir <= 0) xp.left = x;
              else xp.right = x;
              root = balanceInsertion(root, x);
              break;
            }
          }
        }
      }
      moveRootToFront(tab, root);
    }

    /** Returns a list of non-TreeNodes replacing those linked from this node. */
    final Node<K, V> untreeify(MyHashMap<K, V> map) {
      Node<K, V> hd = null, tl = null;
      for (Node<K, V> q = this; q != null; q = q.next) {
        Node<K, V> p = map.replacementNode(q, null);
        if (tl == null) hd = p;
        else tl.next = p;
        tl = p;
      }
      return hd;
    }

    /** Tree version of putVal. */
    final TreeNode<K, V> putTreeVal(MyHashMap<K, V> map, Node<K, V>[] tab, int h, K k, V v) {
      Class<?> kc = null;
      boolean searched = false;
      TreeNode<K, V> root = (parent != null) ? root() : this;
      for (TreeNode<K, V> p = root; ; ) {
        int dir, ph;
        K pk;
        if ((ph = p.hash) > h) dir = -1;
        else if (ph < h) dir = 1;
        else if ((pk = p.key) == k || (k != null && k.equals(pk))) return p;
        else if ((kc == null && (kc = comparableClassFor(k)) == null)
            || (dir = compareComparables(kc, k, pk)) == 0) {
          if (!searched) {
            TreeNode<K, V> q, ch;
            searched = true;
            if (((ch = p.left) != null && (q = ch.find(h, k, kc)) != null)
                || ((ch = p.right) != null && (q = ch.find(h, k, kc)) != null)) return q;
          }
          dir = tieBreakOrder(k, pk);
        }

        TreeNode<K, V> xp = p;
        if ((p = (dir <= 0) ? p.left : p.right) == null) {
          Node<K, V> xpn = xp.next;
          TreeNode<K, V> x = map.newTreeNode(h, k, v, xpn);
          if (dir <= 0) xp.left = x;
          else xp.right = x;
          xp.next = x;
          x.parent = x.prev = xp;
          if (xpn != null) ((TreeNode<K, V>) xpn).prev = x;
          moveRootToFront(tab, balanceInsertion(root, x));
          return null;
        }
      }
    }

    /**
     * Removes the given node, that must be present before this call. This is messier than typical
     * red-black deletion code because we cannot swap the contents of an interior node with a leaf
     * successor that is pinned by "next" pointers that are accessible independently during
     * traversal. So instead we swap the tree linkages. If the current tree appears to have too few
     * nodes, the bin is converted back to a plain bin. (The test triggers somewhere between 2 and 6
     * nodes, depending on tree structure).
     */
    final void removeTreeNode(MyHashMap<K, V> map, Node<K, V>[] tab, boolean movable) {
      int n;
      if (tab == null || (n = tab.length) == 0) return;
      int index = (n - 1) & hash;
      TreeNode<K, V> first = (TreeNode<K, V>) tab[index], root = first, rl;
      TreeNode<K, V> succ = (TreeNode<K, V>) next, pred = prev;
      if (pred == null) tab[index] = first = succ;
      else pred.next = succ;
      if (succ != null) succ.prev = pred;
      if (first == null) return;
      if (root.parent != null) root = root.root();
      if (root == null || root.right == null || (rl = root.left) == null || rl.left == null) {
        tab[index] = first.untreeify(map); // too small
        return;
      }
      TreeNode<K, V> p = this, pl = left, pr = right, replacement;
      if (pl != null && pr != null) {
        TreeNode<K, V> s = pr, sl;
        while ((sl = s.left) != null) // find successor
        s = sl;
        boolean c = s.red;
        s.red = p.red;
        p.red = c; // swap colors
        TreeNode<K, V> sr = s.right;
        TreeNode<K, V> pp = p.parent;
        if (s == pr) { // p was s's direct parent
          p.parent = s;
          s.right = p;
        } else {
          TreeNode<K, V> sp = s.parent;
          if ((p.parent = sp) != null) {
            if (s == sp.left) sp.left = p;
            else sp.right = p;
          }
          if ((s.right = pr) != null) pr.parent = s;
        }
        p.left = null;
        if ((p.right = sr) != null) sr.parent = p;
        if ((s.left = pl) != null) pl.parent = s;
        if ((s.parent = pp) == null) root = s;
        else if (p == pp.left) pp.left = s;
        else pp.right = s;
        if (sr != null) replacement = sr;
        else replacement = p;
      } else if (pl != null) replacement = pl;
      else if (pr != null) replacement = pr;
      else replacement = p;
      if (replacement != p) {
        TreeNode<K, V> pp = replacement.parent = p.parent;
        if (pp == null) root = replacement;
        else if (p == pp.left) pp.left = replacement;
        else pp.right = replacement;
        p.left = p.right = p.parent = null;
      }

      TreeNode<K, V> r = p.red ? root : balanceDeletion(root, replacement);

      if (replacement == p) { // detach
        TreeNode<K, V> pp = p.parent;
        p.parent = null;
        if (pp != null) {
          if (p == pp.left) pp.left = null;
          else if (p == pp.right) pp.right = null;
        }
      }
      if (movable) moveRootToFront(tab, r);
    }

    /**
     * Splits nodes in a tree bin into lower and upper tree bins, or untreeifies if now too small.
     * Called only from resize; see above discussion about split bits and indices.
     *
     * @param map the map
     * @param tab the table for recording bin heads
     * @param index the index of the table being split
     * @param bit the bit of hash to split on
     */
    final void split(MyHashMap<K, V> map, Node<K, V>[] tab, int index, int bit) {
      TreeNode<K, V> b = this;
      // Relink into lo and hi lists, preserving order
      TreeNode<K, V> loHead = null, loTail = null;
      TreeNode<K, V> hiHead = null, hiTail = null;
      int lc = 0, hc = 0;
      for (TreeNode<K, V> e = b, next; e != null; e = next) {
        next = (TreeNode<K, V>) e.next;
        e.next = null;
        if ((e.hash & bit) == 0) {
          if ((e.prev = loTail) == null) loHead = e;
          else loTail.next = e;
          loTail = e;
          ++lc;
        } else {
          if ((e.prev = hiTail) == null) hiHead = e;
          else hiTail.next = e;
          hiTail = e;
          ++hc;
        }
      }

      if (loHead != null) {
        if (lc <= UNTREEIFY_THRESHOLD) tab[index] = loHead.untreeify(map);
        else {
          tab[index] = loHead;
          if (hiHead != null) // (else is already treeified)
          loHead.treeify(tab);
        }
      }
      if (hiHead != null) {
        if (hc <= UNTREEIFY_THRESHOLD) tab[index + bit] = hiHead.untreeify(map);
        else {
          tab[index + bit] = hiHead;
          if (loHead != null) hiHead.treeify(tab);
        }
      }
    }

    /* ------------------------------------------------------------ */
    // Red-black tree methods, all adapted from CLR

    static <K, V> TreeNode<K, V> rotateLeft(TreeNode<K, V> root, TreeNode<K, V> p) {
      TreeNode<K, V> r, pp, rl;
      if (p != null && (r = p.right) != null) {
        if ((rl = p.right = r.left) != null) rl.parent = p;
        if ((pp = r.parent = p.parent) == null) (root = r).red = false;
        else if (pp.left == p) pp.left = r;
        else pp.right = r;
        r.left = p;
        p.parent = r;
      }
      return root;
    }

    static <K, V> TreeNode<K, V> rotateRight(TreeNode<K, V> root, TreeNode<K, V> p) {
      TreeNode<K, V> l, pp, lr;
      if (p != null && (l = p.left) != null) {
        if ((lr = p.left = l.right) != null) lr.parent = p;
        if ((pp = l.parent = p.parent) == null) (root = l).red = false;
        else if (pp.right == p) pp.right = l;
        else pp.left = l;
        l.right = p;
        p.parent = l;
      }
      return root;
    }

    static <K, V> TreeNode<K, V> balanceInsertion(TreeNode<K, V> root, TreeNode<K, V> x) {
      x.red = true;
      for (TreeNode<K, V> xp, xpp, xppl, xppr; ; ) {
        if ((xp = x.parent) == null) {
          x.red = false;
          return x;
        } else if (!xp.red || (xpp = xp.parent) == null) return root;
        if (xp == (xppl = xpp.left)) {
          if ((xppr = xpp.right) != null && xppr.red) {
            xppr.red = false;
            xp.red = false;
            xpp.red = true;
            x = xpp;
          } else {
            if (x == xp.right) {
              root = rotateLeft(root, x = xp);
              xpp = (xp = x.parent) == null ? null : xp.parent;
            }
            if (xp != null) {
              xp.red = false;
              if (xpp != null) {
                xpp.red = true;
                root = rotateRight(root, xpp);
              }
            }
          }
        } else {
          if (xppl != null && xppl.red) {
            xppl.red = false;
            xp.red = false;
            xpp.red = true;
            x = xpp;
          } else {
            if (x == xp.left) {
              root = rotateRight(root, x = xp);
              xpp = (xp = x.parent) == null ? null : xp.parent;
            }
            if (xp != null) {
              xp.red = false;
              if (xpp != null) {
                xpp.red = true;
                root = rotateLeft(root, xpp);
              }
            }
          }
        }
      }
    }

    static <K, V> TreeNode<K, V> balanceDeletion(TreeNode<K, V> root, TreeNode<K, V> x) {
      for (TreeNode<K, V> xp, xpl, xpr; ; ) {
        if (x == null || x == root) return root;
        else if ((xp = x.parent) == null) {
          x.red = false;
          return x;
        } else if (x.red) {
          x.red = false;
          return root;
        } else if ((xpl = xp.left) == x) {
          if ((xpr = xp.right) != null && xpr.red) {
            xpr.red = false;
            xp.red = true;
            root = rotateLeft(root, xp);
            xpr = (xp = x.parent) == null ? null : xp.right;
          }
          if (xpr == null) x = xp;
          else {
            TreeNode<K, V> sl = xpr.left, sr = xpr.right;
            if ((sr == null || !sr.red) && (sl == null || !sl.red)) {
              xpr.red = true;
              x = xp;
            } else {
              if (sr == null || !sr.red) {
                if (sl != null) sl.red = false;
                xpr.red = true;
                root = rotateRight(root, xpr);
                xpr = (xp = x.parent) == null ? null : xp.right;
              }
              if (xpr != null) {
                xpr.red = (xp == null) ? false : xp.red;
                if ((sr = xpr.right) != null) sr.red = false;
              }
              if (xp != null) {
                xp.red = false;
                root = rotateLeft(root, xp);
              }
              x = root;
            }
          }
        } else { // symmetric
          if (xpl != null && xpl.red) {
            xpl.red = false;
            xp.red = true;
            root = rotateRight(root, xp);
            xpl = (xp = x.parent) == null ? null : xp.left;
          }
          if (xpl == null) x = xp;
          else {
            TreeNode<K, V> sl = xpl.left, sr = xpl.right;
            if ((sl == null || !sl.red) && (sr == null || !sr.red)) {
              xpl.red = true;
              x = xp;
            } else {
              if (sl == null || !sl.red) {
                if (sr != null) sr.red = false;
                xpl.red = true;
                root = rotateLeft(root, xpl);
                xpl = (xp = x.parent) == null ? null : xp.left;
              }
              if (xpl != null) {
                xpl.red = (xp == null) ? false : xp.red;
                if ((sl = xpl.left) != null) sl.red = false;
              }
              if (xp != null) {
                xp.red = false;
                root = rotateRight(root, xp);
              }
              x = root;
            }
          }
        }
      }
    }

    /** Recursive invariant check */
    static <K, V> boolean checkInvariants(TreeNode<K, V> t) {
      TreeNode<K, V> tp = t.parent,
          tl = t.left,
          tr = t.right,
          tb = t.prev,
          tn = (TreeNode<K, V>) t.next;
      if (tb != null && tb.next != t) return false;
      if (tn != null && tn.prev != t) return false;
      if (tp != null && t != tp.left && t != tp.right) return false;
      if (tl != null && (tl.parent != t || tl.hash > t.hash)) return false;
      if (tr != null && (tr.parent != t || tr.hash < t.hash)) return false;
      if (t.red && tl != null && tl.red && tr != null && tr.red) return false;
      if (tl != null && !checkInvariants(tl)) return false;
      if (tr != null && !checkInvariants(tr)) return false;
      return true;
    }
  }
}
