package org.alex.synchronizedAnalyse;

/**
 * monitorenter Cpp源码
 */
public class MonitorenterCpp {
    /**
     * monitorenter分析
     * monitorenter  for locking/unlocking an object
     * CASE(_monitorenter): {
     *         oop lockee = STACK_OBJECT(-1);                                                               # 获取栈顶元素（就是对应的对象）
     *         // derefing's lockee ought to provoke implicit null check
     *         CHECK_NULL(lockee);
     *         // find a free monitor or one already allocated for this object
     *         // if we find a matching object then we need a new monitor
     *         // since this is recursive enter
     *         BasicObjectLock* limit = istate->monitor_base();                                             # 遍历线程的BOLs对象 获取空闲的BOL对象
     *         BasicObjectLock* most_recent = (BasicObjectLock*) istate->stack_base();                      # 注：BOL对象里面有2个属性
     *         BasicObjectLock* entry = NULL;                                                               # 1个是_lock(jvm对象头){cpp类型是markOopDesc}
     *         while (most_recent != limit ) {                                                              # 1个是_obj(锁住的对象)
     *           if (most_recent->obj() == NULL) entry = most_recent;
     *           else if (most_recent->obj() == lockee) break;
     *           most_recent++;
     *         }
     *         if (entry != NULL) {                                                                         # 获取空闲BOL对象成功
     *           entry->set_obj(lockee);                                                                    # 给BOL设置对象
     *           int success = false;
     *           uintptr_t epoch_mask_in_place = (uintptr_t)markOopDesc::epoch_mask_in_place;
     *
     *           markOop mark = lockee->mark();                                                             # 获取对象中的对象头
     *           intptr_t hash = (intptr_t) markOopDesc::no_hash;
     *           // implies UseBiasedLocking
     *           if (mark->has_bias_pattern()) {                                                            ## 判断头是否含有偏向锁
     *             uintptr_t thread_ident;
     *             uintptr_t anticipated_bias_locking_value;
     *             thread_ident = (uintptr_t)istate->thread();                                              ## 获取当前对象线程ID
     *             anticipated_bias_locking_value =                                                         ## 做一系列的& | ^ 计算出一个值
     *               (((uintptr_t)lockee->klass()->prototype_header() | thread_ident) ^ (uintptr_t)mark) &  ## lockee->klass()->prototype_header()=>该类的class的对象头
     *               ~((uintptr_t) markOopDesc::age_mask_in_place);                                         ## 与当前线程id做或计算（|） 结果就是让该对象头里面的线程id修改为当前线程id（由于从类获取的对象头线程id为空）
     *                                                                                                      ## 得到的结果再与加锁对象头进行异或运行  当当前线程持有锁时,线程id区域一定是0
     *                                                                                                      ## 与markOopDesc::age_mask_in_place的反相与,age_mask_in_place是markword中分代年龄的掩码, 所以最后一步操作将会 使分代年龄区域置0
     *             if  (anticipated_bias_locking_value == 0) {                                              ## 分支1 如果该值=0则标识当前对象偏向锁偏向自己
     *               // already biased towards this thread, nothing to do
     *               if (PrintBiasedLockingStatistics) {
     *                 (* BiasedLocking::biased_lock_entry_count_addr())++;
     *               }
     *               success = true;
     *             }
     *             else if ((anticipated_bias_locking_value & markOopDesc::biased_lock_mask_in_place) != 0) { ## 分支2 类头部偏向模式是关闭的 则进化为轻量级锁
     *               // try revoke bias
     *               markOop header = lockee->klass()->prototype_header();                                      ## 构造新的对象头（轻量级锁头部）
     *               if (hash != markOopDesc::no_hash) {
     *                 header = header->copy_set_hash(hash);
     *               }
     *               if (Atomic::cmpxchg_ptr(header, lockee->mark_addr(), mark) == mark) {                      ## 使用cas替换对象的markword为类中的markword.接下来由于success仍然为false,
     *                 if (PrintBiasedLockingStatistics)                                                        ## 所以这个分支会进入下面的轻量级锁逻辑. (这里的这个轻量级锁会判断重入)
     *                   (*BiasedLocking::revoked_lock_entry_count_addr())++;
     *               }
     *             }
     *             else if ((anticipated_bias_locking_value & epoch_mask_in_place) !=0) {                       ## 分支3 epoch不等于class中的epoch，尝试重偏向
     *                                                                                                          ## 经过上面的判断,已经可以确定类的偏向模式开启并且也没有偏向当前线程. 接下来判断epoch是否与class中的不一致, 为什么会不一致呢,
     *                                                                                                          ## 是因为Jvm有批量重偏向的机制, 在批量重偏向中会更新class中的epoch 并且会更新所有正在使用的锁对象上的epoch,
     *                                                                                                          ## 那么会出现epoch过期的情况就只有一个:该对象不是正在使用的锁对象,所以它可以被重偏向到 当前线程.
     *               // try rebias
     *               markOop new_header = (markOop) ( (intptr_t) lockee->klass()->prototype_header() | thread_ident);  ## 构造新的对象头（偏向锁头部）
     *               if (hash != markOopDesc::no_hash) {
     *                 new_header = new_header->copy_set_hash(hash);
     *               }
     *               if (Atomic::cmpxchg_ptr((void*)new_header, lockee->mark_addr(), mark) == mark) {           ## 通过cas尝试重偏向
     *                 if (PrintBiasedLockingStatistics)
     *                   (* BiasedLocking::rebiased_lock_entry_count_addr())++;
     *               }
     *               else {                                                                                     ## 重偏向失败  进行锁膨胀
     *                 CALL_VM(InterpreterRuntime::monitorenter(THREAD, entry), handle_exception);
     *               }
     *               success = true;
     *             }
     *             else {                                                                                         ## 分支4 线程id不一致,可能是匿名偏向
     *               // try to bias towards thread in case object is anonymously biased
     *               markOop header = (markOop) ((uintptr_t) mark & ((uintptr_t)markOopDesc::biased_lock_mask_in_place |
     *                                                               (uintptr_t)markOopDesc::age_mask_in_place |
     *                                                               epoch_mask_in_place));
     *               if (hash != markOopDesc::no_hash) {
     *                 header = header->copy_set_hash(hash);
     *               }
     *               markOop new_header = (markOop) ((uintptr_t) header | thread_ident);
     *               // debugging hint
     *               DEBUG_ONLY(entry->lock()->set_displaced_header((markOop) (uintptr_t) 0xdeaddead);)
     *               if (Atomic::cmpxchg_ptr((void*)new_header, lockee->mark_addr(), header) == header) {         ## cas判断是否是匿名偏向
     *                 if (PrintBiasedLockingStatistics)
     *                   (* BiasedLocking::anonymously_biased_lock_entry_count_addr())++;
     *               }
     *               else {                                                                                       ## 不是的话 直接锁升级
     *                 CALL_VM(InterpreterRuntime::monitorenter(THREAD, entry), handle_exception);
     *               }
     *               success = true;
     *             }
     *           }
     *
     *           // traditional lightweight locking
     *           if (!success) {                                                                                   ## 实际上 只有分支2能走到这里 里面会判断锁的可重入
     *             markOop displaced = lockee->mark()->set_unlocked();                                             ## 这里将displaced header设置为unlocked的原因是等到解锁时,会将displaced header中的markword替换回对象头上,
     *             entry->lock()->set_displaced_header(displaced);                                                 ## 这时对象应该时无锁的所以直接设置为 unlocked.
     *             bool call_vm = UseHeavyMonitors;                                                                ## 是否直接使用重量级锁 （可以通过-XX:+UseHeavyMonitors 直接禁用轻量级和偏向锁）
     *             if (call_vm || Atomic::cmpxchg_ptr(entry, lockee->mark_addr(), displaced) != displaced) {       ## cmpxchg_ptr中如果失败, 则意味着对象已经被锁住,
     *               // Is it simple recursive case?
     *               if (!call_vm && THREAD->is_lock_owned((address) displaced->clear_lock_bits())) {              ## 这里使用is_lock_owned判断是否重入,如果是,那么直接设置displaced header为null (在轻量级锁解锁时会解释为什么这么做),如果被其他线程持有,那么进入InterpreterRuntime::monitorenter逻辑中.
     *                 entry->lock()->set_displaced_header(NULL);
     *               } else {
     *                 CALL_VM(InterpreterRuntime::monitorenter(THREAD, entry), handle_exception);
     *               }
     *             }
     *           }
     *           UPDATE_PC_AND_TOS_AND_CONTINUE(1, -1);
     *         } else {
     *           istate->set_msg(more_monitors);
     *           UPDATE_PC_AND_RETURN(0); // Re-execute
     *         }
     *       }
     */




    /**
     * # monitorenter方法
     *
     * IRT_ENTRY_NO_ASYNC(void, InterpreterRuntime::monitorenter(JavaThread* thread, BasicObjectLock* elem))
     * #ifdef ASSERT
     *   thread->last_frame().interpreter_frame_verify_monitor(elem);
     * #endif
     *   if (PrintBiasedLockingStatistics) {
     *     Atomic::inc(BiasedLocking::slow_path_entry_count_addr());
     *   }
     *   Handle h_obj(thread, elem->obj());                                          # 将java线程和锁对象封装起来
     *   assert(Universe::heap()->is_in_reserved_or_null(h_obj()),
     *          "must be NULL or an object");
     *   if (UseBiasedLocking) {                                                     # 是否开启偏向锁 （jkd15后可以通过-XX:+UseBiasedLocking 开启偏向锁）
     *     // Retry fast entry if bias is revoked to avoid unnecessary inflation
     *     ObjectSynchronizer::fast_enter(h_obj, elem->lock(), true, CHECK);
     *   } else {
     *     ObjectSynchronizer::slow_enter(h_obj, elem->lock(), CHECK);
     *   }
     *   assert(Universe::heap()->is_in_reserved_or_null(elem->obj()),
     *          "must be NULL or an object");
     * #ifdef ASSERT
     *   thread->last_frame().interpreter_frame_verify_monitor(elem);
     * #endif
     * IRT_END
     */





    /**
     * # fast_enter 方法
     *
     * void ObjectSynchronizer::fast_enter(Handle obj, BasicLock* lock, bool attempt_rebias, TRAPS) {
     *  if (UseBiasedLocking) {                                                             # 是否开启偏向锁 （jkd15后可以通过-XX:+UseBiasedLocking 开启偏向锁）
     *     if (!SafepointSynchronize::is_at_safepoint()) {                                   # 是否处于线程安全点 否： F1
     *       BiasedLocking::Condition cond = BiasedLocking::revoke_and_rebias(obj, attempt_rebias, THREAD);  # 撤销并重偏向
     *       if (cond == BiasedLocking::BIAS_REVOKED_AND_REBIASED) {                              ## 是否取消偏向并重偏向
     *         return;
     *       }
     *     } else {                                                                                 # 否：F2
     *       assert(!attempt_rebias, "can not rebias toward VM thread");
     *       BiasedLocking::revoke_at_safepoint(obj);
     *     }
     *     assert(!obj->mark()->has_bias_pattern(), "biases should be revoked by now");
     *  }
     *
     *  slow_enter (obj, lock, THREAD) ;                                                             # 进行锁升级
     * }
     */





    /**
     * # revoke_and_rebias 方法
     * BiasedLocking::Condition BiasedLocking::revoke_and_rebias(Handle obj, bool attempt_rebias, TRAPS) {
     *   assert(!SafepointSynchronize::is_at_safepoint(), "must not be called while at safepoint");
     *
     *   // We can revoke the biases of anonymously-biased objects
     *   // efficiently enough that we should not cause these revocations to
     *   // update the heuristics because doing so may cause unwanted bulk
     *   // revocations (which are expensive) to occur.
     *   markOop mark = obj->mark();                                                # 获取锁对象的对象头
     *   if (mark->is_biased_anonymously() && !attempt_rebias) {
     *     // We are probably trying to revoke the bias of this object due to
     *     // an identity hash code computation. Try to revoke the bias
     *     // without a safepoint. This is possible if we can successfully
     *     // compare-and-exchange an unbiased header into the mark word of
     *     // the object, meaning that no other thread has raced to acquire
     *     // the bias of the object.
     *     markOop biased_value       = mark;
     *     markOop unbiased_prototype = markOopDesc::prototype()->set_age(mark->age());
     *     markOop res_mark = (markOop) Atomic::cmpxchg_ptr(unbiased_prototype, obj->mark_addr(), mark);
     *     if (res_mark == biased_value) {
     *       return BIAS_REVOKED;
     *     }
     *   } else if (mark->has_bias_pattern()) {                                              # 是否是偏向锁
     *     Klass* k = obj->klass();
     *     markOop prototype_header = k->prototype_header();
     *     if (!prototype_header->has_bias_pattern()) {                                     # 原始类头是否未含有偏向锁
     *       // This object has a stale bias from before the bulk revocation
     *       // for this data type occurred. It's pointless to update the
     *       // heuristics at this point so simply update the header with a
     *       // CAS. If we fail this race, the object's bias has been revoked
     *       // by another thread so we simply return and let the caller deal
     *       // with it.
     *       markOop biased_value       = mark;                                                    # 锁撤销
     *       markOop res_mark = (markOop) Atomic::cmpxchg_ptr(prototype_header, obj->mark_addr(), mark);
     *       assert(!(*(obj->mark_addr()))->has_bias_pattern(), "even if we raced, should still be revoked");
     *       return BIAS_REVOKED;
     *     } else if (prototype_header->bias_epoch() != mark->bias_epoch()) {                          # epoch过期
     *       // The epoch of this biasing has expired indicating that the
     *       // object is effectively unbiased. Depending on whether we need
     *       // to rebias or revoke the bias of this object we can do it
     *       // efficiently enough with a CAS that we shouldn't update the
     *       // heuristics. This is normally done in the assembly code but we
     *       // can reach this point due to various points in the runtime
     *       // needing to revoke biases.
     *       if (attempt_rebias) {                                                                   # 进行锁的撤销和重偏向
     *         assert(THREAD->is_Java_thread(), "");
     *         markOop biased_value       = mark;
     *         markOop rebiased_prototype = markOopDesc::encode((JavaThread*) THREAD, mark->age(), prototype_header->bias_epoch());
     *         markOop res_mark = (markOop) Atomic::cmpxchg_ptr(rebiased_prototype, obj->mark_addr(), mark);
     *         if (res_mark == biased_value) {
     *           return BIAS_REVOKED_AND_REBIASED;
     *         }
     *       } else {
     *         markOop biased_value       = mark;
     *         markOop unbiased_prototype = markOopDesc::prototype()->set_age(mark->age());
     *         markOop res_mark = (markOop) Atomic::cmpxchg_ptr(unbiased_prototype, obj->mark_addr(), mark);
     *         if (res_mark == biased_value) {
     *           return BIAS_REVOKED;
     *         }
     *       }
     *     }
     *   }
     *
     *  # 不是偏向锁或者偏向锁重偏向失败
     *   HeuristicsResult heuristics = update_heuristics(obj(), attempt_rebias); # 计算需要怎么样的偏向锁撤销  不撤销、单撤销、批量撤销等
     *   if (heuristics == HR_NOT_BIASED) {                                 # 1 不是偏向锁
     *     return NOT_BIASED;
     *   } else if (heuristics == HR_SINGLE_REVOKE) {                       # 2 单撤销
     *     Klass *k = obj->klass();
     *     markOop prototype_header = k->prototype_header();
     *     if (mark->biased_locker() == THREAD &&
     *         prototype_header->bias_epoch() == mark->bias_epoch()) {
     *       // A thread is trying to revoke the bias of an object biased
     *       // toward it, again likely due to an identity hash code
     *       // computation. We can again avoid a safepoint in this case
     *       // since we are only going to walk our own stack. There are no
     *       // races with revocations occurring in other threads because we
     *       // reach no safepoints in the revocation path.
     *       // Also check the epoch because even if threads match, another thread
     *       // can come in with a CAS to steal the bias of an object that has a
     *       // stale epoch.
     *       ResourceMark rm;
     *       if (TraceBiasedLocking) {
     *         tty->print_cr("Revoking bias by walking my own stack:");
     *       }
     *       BiasedLocking::Condition cond = revoke_bias(obj(), false, false, (JavaThread*) THREAD);   ## 进入撤销逻辑
     *       ((JavaThread*) THREAD)->set_cached_monitor_info(NULL);
     *       assert(cond == BIAS_REVOKED, "why not?");
     *       return cond;
     *     } else {
     *       VM_RevokeBias revoke(&obj, (JavaThread*) THREAD);
     *       VMThread::execute(&revoke);
     *       return revoke.status_code();
     *     }
     *   }
     *
     *   assert((heuristics == HR_BULK_REVOKE) ||
     *          (heuristics == HR_BULK_REBIAS), "?");
     *   VM_BulkRevokeBias bulk_revoke(&obj, (JavaThread*) THREAD,
     *                                 (heuristics == HR_BULK_REBIAS),
     *                                 attempt_rebias);               # 3 批量撤销
     *   VMThread::execute(&bulk_revoke);
     *   return bulk_revoke.status_code();
     * }
     */


    /**
     * revoke_bias 方法
     *
     * static BiasedLocking::Condition revoke_bias(oop obj, bool allow_rebias, bool is_bulk, JavaThread* requesting_thread) {
     *   markOop mark = obj->mark();                                        # 获取对象头
     *   if (!mark->has_bias_pattern()) {                                   # 如果未开启偏向模式
     *     if (TraceBiasedLocking) {
     *       ResourceMark rm;
     *       tty->print_cr("  (Skipping revocation of object of type %s because it's no longer biased)",
     *                     obj->klass()->external_name());
     *     }
     *     return BiasedLocking::NOT_BIASED;
     *   }
     *
     *   uint age = mark->age();                                            # 获取分代年龄
     *   markOop   biased_prototype = markOopDesc::biased_locking_prototype()->set_age(age);   # 构建匿名偏向锁头部
     *   markOop unbiased_prototype = markOopDesc::prototype()->set_age(age);                  # 构建无锁头部
     *
     *   if (TraceBiasedLocking && (Verbose || !is_bulk)) {
     *     ResourceMark rm;
     *     tty->print_cr("Revoking bias of object " INTPTR_FORMAT " , mark " INTPTR_FORMAT " , type %s , prototype header " INTPTR_FORMAT " , allow rebias %d , requesting thread " INTPTR_FORMAT,
     *                   p2i((void *)obj), (intptr_t) mark, obj->klass()->external_name(), (intptr_t) obj->klass()->prototype_header(), (allow_rebias ? 1 : 0), (intptr_t) requesting_thread);
     *   }
     *
     *   JavaThread* biased_thread = mark->biased_locker();                                 # 从对象头中获取持有锁的线程
     *   if (biased_thread == NULL) {                                                       # 如果是匿名偏向
     *     // Object is anonymously biased. We can get here if, for
     *     // example, we revoke the bias due to an identity hash code
     *     // being computed for an object.
     *     if (!allow_rebias) {
     *       obj->set_mark(unbiased_prototype);
     *     }
     *     if (TraceBiasedLocking && (Verbose || !is_bulk)) {
     *       tty->print_cr("  Revoked bias of anonymously-biased object");
     *     }
     *     return BiasedLocking::BIAS_REVOKED;
     *   }
     *
     *   // Handle case where the thread toward which the object was biased has exited
     *   bool thread_is_alive = false;
     *   if (requesting_thread == biased_thread) {                                             ## 如果对象头中持有锁的线程是当前线程  （该流程目的判断当前持有锁的线程是否存活）
     *     thread_is_alive = true;
     *   } else {                                                                               ## 其他线程持有该锁
     *     for (JavaThread* cur_thread = Threads::first(); cur_thread != NULL; cur_thread = cur_thread->next()) {
     *       if (cur_thread == biased_thread) {
     *         thread_is_alive = true;
     *         break;
     *       }
     *     }
     *   }
     *   if (!thread_is_alive) {                                       ## 如果持有锁的其他线程已死亡
     *     if (allow_rebias) {
     *       obj->set_mark(biased_prototype);
     *     } else {
     *       obj->set_mark(unbiased_prototype);
     *     }
     *     if (TraceBiasedLocking && (Verbose || !is_bulk)) {
     *       tty->print_cr("  Revoked bias of object biased toward dead thread");
     *     }
     *     return BiasedLocking::BIAS_REVOKED;
     *   }
     *
     *   // Thread owning bias is alive.
     *   // Check to see whether it currently owns the lock and, if so,
     *   // write down the needed displaced headers to the thread's stack.
     *   // Otherwise, restore the object's header either to the unlocked
     *   // or unbiased state.
     *   GrowableArray<MonitorInfo*>* cached_monitor_info = get_or_compute_monitor_info(biased_thread); # 获取持有锁线程的monitor
     *   BasicLock* highest_lock = NULL;
     *   for (int i = 0; i < cached_monitor_info->length(); i++) {
     *     MonitorInfo* mon_info = cached_monitor_info->at(i);
     *     if (mon_info->owner() == obj) {                                                              # 判断monitor是否还持有锁对象
     *       if (TraceBiasedLocking && Verbose) {
     *         tty->print_cr("   mon_info->owner (" PTR_FORMAT ") == obj (" PTR_FORMAT ")",
     *                       p2i((void *) mon_info->owner()),
     *                       p2i((void *) obj));
     *       }
     *       // Assume recursive case and fix up highest lock later
     *       markOop mark = markOopDesc::encode((BasicLock*) NULL);                                     # 重新生成一个对象头 并将其设置成为monitor里面的lock
     *       highest_lock = mon_info->lock();
     *       highest_lock->set_displaced_header(mark);
     *     } else {
     *       if (TraceBiasedLocking && Verbose) {
     *         tty->print_cr("   mon_info->owner (" PTR_FORMAT ") != obj (" PTR_FORMAT ")",
     *                       p2i((void *) mon_info->owner()),
     *                       p2i((void *) obj));
     *       }
     *     }
     *   }
     *   if (highest_lock != NULL) {                                                          # 持有锁线程的monitor还在使用该对象
     *     // Fix up highest lock to contain displaced header and point
     *     // object at it
     *     highest_lock->set_displaced_header(unbiased_prototype);                            # 撤销锁
     *     // Reset object header to point to displaced mark.
     *     // Must release storing the lock address for platforms without TSO
     *     // ordering (e.g. ppc).
     *     obj->release_set_mark(markOopDesc::encode(highest_lock));                          #
     *     assert(!obj->mark()->has_bias_pattern(), "illegal mark state: stack lock used bias bit");
     *     if (TraceBiasedLocking && (Verbose || !is_bulk)) {
     *       tty->print_cr("  Revoked bias of currently-locked object");
     *     }
     *   } else {                                                                              # 持有锁线程的monitor未使用该对象
     *     if (TraceBiasedLocking && (Verbose || !is_bulk)) {
     *       tty->print_cr("  Revoked bias of currently-unlocked object");
     *     }
     *     if (allow_rebias) {
     *       obj->set_mark(biased_prototype);
     *     } else {
     *       // Store the unlocked value into the object's header.
     *       obj->set_mark(unbiased_prototype);
     *     }
     *   }
     *
     *   return BiasedLocking::BIAS_REVOKED;
     * }
     */


    /**
     * slow_enter 方法
     *
     * void ObjectSynchronizer::slow_enter(Handle obj, BasicLock* lock, TRAPS) {
     *   markOop mark = obj->mark();                                                            # 获取锁对象的对象头
     *   assert(!mark->has_bias_pattern(), "should not see bias pattern here");
     *
     *   if (mark->is_neutral()) {                                                              # 是否是无锁状态
     *     // Anticipate successful CAS -- the ST of the displaced mark must
     *     // be visible <= the ST performed by the CAS.
     *     lock->set_displaced_header(mark);                                                    # 轻量级锁设置
     *     if (mark == (markOop) Atomic::cmpxchg_ptr(lock, obj()->mark_addr(), mark)) {
     *       TEVENT (slow_enter: release stacklock) ;
     *       return ;
     *     }
     *     // Fall through to inflate() ...
     *   } else
     *   if (mark->has_locker() && THREAD->is_lock_owned((address)mark->locker())) {            # 有锁状态 但是是锁重入
     *     assert(lock != mark->locker(), "must not re-lock the same lock");
     *     assert(lock != (BasicLock*)obj->mark(), "don't relock with same BasicLock");
     *     lock->set_displaced_header(NULL);
     *     return;
     *   }
     *
     * #if 0
     *   // The following optimization isn't particularly useful.
     *   if (mark->has_monitor() && mark->monitor()->is_entered(THREAD)) {
     *     lock->set_displaced_header (NULL) ;
     *     return ;
     *   }
     * #endif
     *
     *   // The object header will never be displaced to this lock,
     *   // so it does not matter what the value is, except that it
     *   // must be non-zero to avoid looking like a re-entrant lock,
     *   // and must not look locked either.
     *   lock->set_displaced_header(markOopDesc::unused_mark());
     *   ObjectSynchronizer::inflate(THREAD, obj())->enter(THREAD);   # 获取监视器后加锁  重量级锁
     * }
     */


    /**
     * inflate 方法
     *
     * ObjectMonitor * ATTR ObjectSynchronizer::inflate (Thread * Self, oop object) {
     *   // Inflate mutates the heap ...
     *   // Relaxing assertion for bug 6320749.
     *   assert (Universe::verify_in_progress() ||
     *           !SafepointSynchronize::is_at_safepoint(), "invariant") ;
     *
     *   for (;;) {                                                                 # 死循环
     *       const markOop mark = object->mark() ;                                   # 获取对象头
     *       assert (!mark->has_bias_pattern(), "invariant") ;
     *
     *       // The mark can be in one of the following states:
     *       // *  Inflated     - just return
     *       // *  Stack-locked - coerce it to inflated
     *       // *  INFLATING    - busy wait for conversion to complete
     *       // *  Neutral      - aggressively inflate the object.
     *       // *  BIASED       - Illegal.  We should never see this
     *
     *       // CASE: inflated
     *       if (mark->has_monitor()) {                                             # 如果是重量级锁  获取该monitor并返回
     *           ObjectMonitor * inf = mark->monitor() ;
     *           assert (inf->header()->is_neutral(), "invariant");
     *           assert (inf->object() == object, "invariant") ;
     *           assert (ObjectSynchronizer::verify_objmon_isinpool(inf), "monitor is invalid");
     *           return inf ;
     *       }
     *
     *       // CASE: inflation in progress - inflating over a stack-lock.
     *       // Some other thread is converting from stack-locked to inflated.
     *       // Only that thread can complete inflation -- other threads must wait.
     *       // The INFLATING value is transient.
     *       // Currently, we spin/yield/park and poll the markword, waiting for inflation to finish.
     *       // We could always eliminate polling by parking the thread on some auxiliary list.
     *       if (mark == markOopDesc::INFLATING()) {                                    # 锁是否在膨胀（从无锁/轻量级锁 膨胀到重量级锁）状态
     *          TEVENT (Inflate: spin while INFLATING) ;
     *          ReadStableMark(object) ;
     *          continue ;
     *       }
     *
     *       // CASE: stack-locked
     *       // Could be stack-locked either by this thread or by some other thread.
     *       //
     *       // Note that we allocate the objectmonitor speculatively, _before_ attempting
     *       // to install INFLATING into the mark word.  We originally installed INFLATING,
     *       // allocated the objectmonitor, and then finally STed the address of the
     *       // objectmonitor into the mark.  This was correct, but artificially lengthened
     *       // the interval in which INFLATED appeared in the mark, thus increasing
     *       // the odds of inflation contention.
     *       //
     *       // We now use per-thread private objectmonitor free lists.
     *       // These list are reprovisioned from the global free list outside the
     *       // critical INFLATING...ST interval.  A thread can transfer
     *       // multiple objectmonitors en-mass from the global free list to its local free list.
     *       // This reduces coherency traffic and lock contention on the global free list.
     *       // Using such local free lists, it doesn't matter if the omAlloc() call appears
     *       // before or after the CAS(INFLATING) operation.
     *       // See the comments in omAlloc().
     *
     *       if (mark->has_locker()) {              # 如果是有锁状态
     *           ObjectMonitor * m = omAlloc (Self) ;   # 开辟空间 获取Monitor对象
     *           // Optimistically prepare the objectmonitor - anticipate successful CAS
     *           // We do this before the CAS in order to minimize the length of time
     *           // in which INFLATING appears in the mark.
     *           m->Recycle();                          # 初始化Monitor
     *           m->_Responsible  = NULL ;
     *           m->OwnerIsThread = 0 ;
     *           m->_recursions   = 0 ;
     *           m->_SpinDuration = ObjectMonitor::Knob_SpinLimit ;   // Consider: maintain by type/class
     *
     *           markOop cmp = (markOop) Atomic::cmpxchg_ptr (markOopDesc::INFLATING(), object->mark_addr(), mark) ; # 修改对象状态cas
     *           if (cmp != mark) {                                                 # cas失败
     *              omRelease (Self, m, true) ;                                     # 释放monitor
     *              continue ;       // Interference -- just retry                  # 重新循环
     *           }
     *
     *           // We've successfully installed INFLATING (0) into the mark-word.
     *           // This is the only case where 0 will appear in a mark-work.
     *           // Only the singular thread that successfully swings the mark-word
     *           // to 0 can perform (or more precisely, complete) inflation.
     *           //
     *           // Why do we CAS a 0 into the mark-word instead of just CASing the
     *           // mark-word from the stack-locked value directly to the new inflated state?
     *           // Consider what happens when a thread unlocks a stack-locked object.
     *           // It attempts to use CAS to swing the displaced header value from the
     *           // on-stack basiclock back into the object header.  Recall also that the
     *           // header value (hashcode, etc) can reside in (a) the object header, or
     *           // (b) a displaced header associated with the stack-lock, or (c) a displaced
     *           // header in an objectMonitor.  The inflate() routine must copy the header
     *           // value from the basiclock on the owner's stack to the objectMonitor, all
     *           // the while preserving the hashCode stability invariants.  If the owner
     *           // decides to release the lock while the value is 0, the unlock will fail
     *           // and control will eventually pass from slow_exit() to inflate.  The owner
     *           // will then spin, waiting for the 0 value to disappear.   Put another way,
     *           // the 0 causes the owner to stall if the owner happens to try to
     *           // drop the lock (restoring the header from the basiclock to the object)
     *           // while inflation is in-progress.  This protocol avoids races that might
     *           // would otherwise permit hashCode values to change or "flicker" for an object.
     *           // Critically, while object->mark is 0 mark->displaced_mark_helper() is stable.
     *           // 0 serves as a "BUSY" inflate-in-progress indicator.
     *
     *
     *           // fetch the displaced mark from the owner's stack.
     *           // The owner can't die or unwind past the lock while our INFLATING
     *           // object is in the mark.  Furthermore the owner can't complete
     *           // an unlock on the object, either.
     *           markOop dmw = mark->displaced_mark_helper() ;          # 获取对象头
     *           assert (dmw->is_neutral(), "invariant") ;
     *
     *           // Setup monitor fields to proper values -- prepare the monitor
     *           m->set_header(dmw) ;                                    # 将设置对象头
     *
     *           // Optimization: if the mark->locker stack address is associated
     *           // with this thread we could simply set m->_owner = Self and
     *           // m->OwnerIsThread = 1. Note that a thread can inflate an object
     *           // that it has stack-locked -- as might happen in wait() -- directly
     *           // with CAS.  That is, we can avoid the xchg-NULL .... ST idiom.
     *           m->set_owner(mark->locker());
     *           m->set_object(object);                                # 设置锁对象
     *           // TODO-FIXME: assert BasicLock->dhw != 0.
     *
     *           // Must preserve store ordering. The monitor state must
     *           // be stable at the time of publishing the monitor address.
     *           guarantee (object->mark() == markOopDesc::INFLATING(), "invariant") ;
     *           object->release_set_mark(markOopDesc::encode(m));  # 将锁对象的对象头中设置成monitor
     *
     *           // Hopefully the performance counters are allocated on distinct cache lines
     *           // to avoid false sharing on MP systems ...
     *           if (ObjectMonitor::_sync_Inflations != NULL) ObjectMonitor::_sync_Inflations->inc() ;
     *           TEVENT(Inflate: overwrite stacklock) ;
     *           if (TraceMonitorInflation) {
     *             if (object->is_instance()) {
     *               ResourceMark rm;
     *               tty->print_cr("Inflating object " INTPTR_FORMAT " , mark " INTPTR_FORMAT " , type %s",
     *                 (void *) object, (intptr_t) object->mark(),
     *                 object->klass()->external_name());
     *             }
     *           }
     *           return m ;
     *       }
     *
     *       // CASE: neutral
     *       // TODO-FIXME: for entry we currently inflate and then try to CAS _owner.
     *       // If we know we're inflating for entry it's better to inflate by swinging a
     *       // pre-locked objectMonitor pointer into the object header.   A successful
     *       // CAS inflates the object *and* confers ownership to the inflating thread.
     *       // In the current implementation we use a 2-step mechanism where we CAS()
     *       // to inflate and then CAS() again to try to swing _owner from NULL to Self.
     *       // An inflateTry() method that we could call from fast_enter() and slow_enter()
     *       // would be useful.
     *
     *       assert (mark->is_neutral(), "invariant");   # 对象是无锁状态
     *       ObjectMonitor * m = omAlloc (Self) ;         # 创建monitor对象并初始化
     *       // prepare m for installation - set monitor to initial state
     *       m->Recycle();
     *       m->set_header(mark);
     *       m->set_owner(NULL);
     *       m->set_object(object);
     *       m->OwnerIsThread = 1 ;
     *       m->_recursions   = 0 ;
     *       m->_Responsible  = NULL ;
     *       m->_SpinDuration = ObjectMonitor::Knob_SpinLimit ;       // consider: keep metastats by type/class
     *
     *       if (Atomic::cmpxchg_ptr (markOopDesc::encode(m), object->mark_addr(), mark) != mark) {  # 将对象头地址给予monitor
     *           m->set_object (NULL) ;                                             # cas 失败 重新执行
     *           m->set_owner  (NULL) ;
     *           m->OwnerIsThread = 0 ;
     *           m->Recycle() ;
     *           omRelease (Self, m, true) ;
     *           m = NULL ;
     *           continue ;
     *           // interference - the markword changed - just retry.
     *           // The state-transitions are one-way, so there's no chance of
     *           // live-lock -- "Inflated" is an absorbing state.
     *       }
     *
     *       // Hopefully the performance counters are allocated on distinct
     *       // cache lines to avoid false sharing on MP systems ...
     *       if (ObjectMonitor::_sync_Inflations != NULL) ObjectMonitor::_sync_Inflations->inc() ;
     *       TEVENT(Inflate: overwrite neutral) ;
     *       if (TraceMonitorInflation) {
     *         if (object->is_instance()) {
     *           ResourceMark rm;
     *           tty->print_cr("Inflating object " INTPTR_FORMAT " , mark " INTPTR_FORMAT " , type %s",
     *             (void *) object, (intptr_t) object->mark(),
     *             object->klass()->external_name());
     *         }
     *       }
     *       return m ;
     *   }
     * }
     */


    /**
     * ReadStableMark 方法
     *
     * static markOop ReadStableMark (oop obj) {
     *   markOop mark = obj->mark() ;
     *   if (!mark->is_being_inflated()) {    # 是否已经完成锁的膨胀
     *     return mark ;       // normal fast-path return
     *   }
     *
     *   int its = 0 ;
     *   for (;;) {
     *     markOop mark = obj->mark() ;
     *     if (!mark->is_being_inflated()) {  # 是否已经完成锁的膨胀
     *       return mark ;    // normal fast-path return
     *     }
     *
     *     // The object is being inflated by some other thread.
     *     // The caller of ReadStableMark() must wait for inflation to complete.
     *     // Avoid live-lock
     *     // TODO: consider calling SafepointSynchronize::do_call_back() while
     *     // spinning to see if there's a safepoint pending.  If so, immediately
     *     // yielding or blocking would be appropriate.  Avoid spinning while
     *     // there is a safepoint pending.
     *     // TODO: add inflation contention performance counters.
     *     // TODO: restrict the aggregate number of spinners.
     *
     *     ++its ;
     *     if (its > 10000 || !os::is_MP()) {  # 自旋10000次 或者cpu是单核的
     *        if (its & 1) {
     *          os::NakedYield() ;                       # 让出cpu执行权
     *          TEVENT (Inflate: INFLATING - yield) ;
     *        } else {
     *          // Note that the following code attenuates the livelock problem but is not
     *          // a complete remedy.  A more complete solution would require that the inflating
     *          // thread hold the associated inflation lock.  The following code simply restricts
     *          // the number of spinners to at most one.  We'll have N-2 threads blocked
     *          // on the inflationlock, 1 thread holding the inflation lock and using
     *          // a yield/park strategy, and 1 thread in the midst of inflation.
     *          // A more refined approach would be to change the encoding of INFLATING
     *          // to allow encapsulation of a native thread pointer.  Threads waiting for
     *          // inflation to complete would use CAS to push themselves onto a singly linked
     *          // list rooted at the markword.  Once enqueued, they'd loop, checking a per-thread flag
     *          // and calling park().  When inflation was complete the thread that accomplished inflation
     *          // would detach the list and set the markword to inflated with a single CAS and
     *          // then for each thread on the list, set the flag and unpark() the thread.
     *          // This is conceptually similar to muxAcquire-muxRelease, except that muxRelease
     *          // wakes at most one thread whereas we need to wake the entire list.
     *          int ix = (cast_from_oop<intptr_t>(obj) >> 5) & (NINFLATIONLOCKS-1) ;
     *          int YieldThenBlock = 0 ;
     *          assert (ix >= 0 && ix < NINFLATIONLOCKS, "invariant") ;
     *          assert ((NINFLATIONLOCKS & (NINFLATIONLOCKS-1)) == 0, "invariant") ;
     *          Thread::muxAcquire (InflationLocks + ix, "InflationLock") ;
     *          while (obj->mark() == markOopDesc::INFLATING()) {
     *            // Beware: NakedYield() is advisory and has almost no effect on some platforms
     *            // so we periodically call Self->_ParkEvent->park(1).
     *            // We use a mixed spin/yield/block mechanism.
     *            if ((YieldThenBlock++) >= 16) {
     *               Thread::current()->_ParkEvent->park(1) ;
     *            } else {
     *               os::NakedYield() ;
     *            }
     *          }
     *          Thread::muxRelease (InflationLocks + ix ) ;
     *          TEVENT (Inflate: INFLATING - yield/park) ;
     *        }
     *     } else {   # 多核cpu
     *        SpinPause() ;       // SMP-polite spinning   # 自旋
     *     }
     *   }
     * }
     */


    /**
     * ObjectMonitor::enter 方法
     *
     * void ATTR ObjectMonitor::enter(TRAPS) {
     *   // The following code is ordered to check the most common cases first
     *   // and to reduce RTS->RTO cache line upgrades on SPARC and IA32 processors.
     *   Thread * const Self = THREAD ;
     *   void * cur ;
     *
     *   cur = Atomic::cmpxchg_ptr (Self, &_owner, NULL) ;  #将 线程设置入monitor
     *   if (cur == NULL) {                                # 加锁成功
     *      // Either ASSERT _recursions == 0 or explicitly set _recursions = 0.
     *      assert (_recursions == 0   , "invariant") ;
     *      assert (_owner      == Self, "invariant") ;
     *      // CONSIDER: set or assert OwnerIsThread == 1
     *      return ;
     *   }
     *
     *   if (cur == Self) {                               # 如果返回的值是当前线程  则进行锁的重入
     *      // TODO-FIXME: check for integer overflow!  BUGID 6557169.
     *      _recursions ++ ;
     *      return ;
     *   }
     *
     *   if (Self->is_lock_owned ((address)cur)) {         # 判断地址是否在本线程中  如果是则表示 是轻量级锁锁重入
     *     assert (_recursions == 0, "internal state error");
     *     _recursions = 1 ;
     *     // Commute owner from a thread-specific on-stack BasicLockObject address to
     *     // a full-fledged "Thread *".
     *     _owner = Self ;
     *     OwnerIsThread = 1 ;
     *     return ;
     *   }
     *
     *   // We've encountered genuine contention.
     *   assert (Self->_Stalled == 0, "invariant") ;
     *   Self->_Stalled = intptr_t(this) ;
     *
     *   // Try one round of spinning *before* enqueueing Self
     *   // and before going through the awkward and expensive state
     *   // transitions.  The following spin is strictly optional ...
     *   // Note that if we acquire the monitor from an initial spin
     *   // we forgo posting JVMTI events and firing DTRACE probes.
     *   if (Knob_SpinEarly && TrySpin (Self) > 0) {                # 如果执行到这里代表加锁失败   TrySpin为自适应自旋算法 进行自适应自旋如果设置了固定自旋则在内部代替
     *      assert (_owner == Self      , "invariant") ;
     *      assert (_recursions == 0    , "invariant") ;
     *      assert (((oop)(object()))->mark() == markOopDesc::encode(this), "invariant") ;
     *      Self->_Stalled = 0 ;
     *      return ;
     *   }
     *
     *   assert (_owner != Self          , "invariant") ;
     *   assert (_succ  != Self          , "invariant") ;
     *   assert (Self->is_Java_thread()  , "invariant") ;
     *   JavaThread * jt = (JavaThread *) Self ;                            # 执行到者代表自旋之后也未拿到锁
     *   assert (!SafepointSynchronize::is_at_safepoint(), "invariant") ;
     *   assert (jt->thread_state() != _thread_blocked   , "invariant") ;
     *   assert (this->object() != NULL  , "invariant") ;
     *   assert (_count >= 0, "invariant") ;
     *
     *   // Prevent deflation at STW-time.  See deflate_idle_monitors() and is_busy().
     *   // Ensure the object-monitor relationship remains stable while there's contention.
     *   Atomic::inc_ptr(&_count);
     *
     *   EventJavaMonitorEnter event;
     *
     *   { // Change java thread status to indicate blocked on monitor enter.
     *     JavaThreadBlockedOnMonitorEnterState jtbmes(jt, this);
     *
     *     DTRACE_MONITOR_PROBE(contended__enter, this, object(), jt);
     *     if (JvmtiExport::should_post_monitor_contended_enter()) {
     *       JvmtiExport::post_monitor_contended_enter(jt, this);
     *
     *       // The current thread does not yet own the monitor and does not
     *       // yet appear on any queues that would get it made the successor.
     *       // This means that the JVMTI_EVENT_MONITOR_CONTENDED_ENTER event
     *       // handler cannot accidentally consume an unpark() meant for the
     *       // ParkEvent associated with this ObjectMonitor.
     *     }
     *
     *     OSThreadContendState osts(Self->osthread());
     *     ThreadBlockInVM tbivm(jt);
     *
     *     Self->set_current_pending_monitor(this);
     *
     *     // TODO-FIXME: change the following for(;;) loop to straight-line code.
     *     for (;;) {                                               # 死循环
     *       jt->set_suspend_equivalent();
     *       // cleared by handle_special_suspend_equivalent_condition()
     *       // or java_suspend_self()
     *
     *       EnterI (THREAD) ;          # 阻塞+自旋+cas获取锁
     *
     *       if (!ExitSuspendEquivalent(jt)) break ;      # 如果未获取到锁   （代表出现意外情况）
     *
     *       //
     *       // We have acquired the contended monitor, but while we were
     *       // waiting another thread suspended us. We don't want to enter
     *       // the monitor while suspended because that would surprise the
     *       // thread that suspended us.
     *       //
     *           _recursions = 0 ;
     *       _succ = NULL ;
     *       exit (false, Self) ;
     *
     *       jt->java_suspend_self();
     *     }
     *     Self->set_current_pending_monitor(NULL);
     *
     *     // We cleared the pending monitor info since we've just gotten past
     *     // the enter-check-for-suspend dance and we now own the monitor free
     *     // and clear, i.e., it is no longer pending. The ThreadBlockInVM
     *     // destructor can go to a safepoint at the end of this block. If we
     *     // do a thread dump during that safepoint, then this thread will show
     *     // as having "-locked" the monitor, but the OS and java.lang.Thread
     *     // states will still report that the thread is blocked trying to
     *     // acquire it.
     *   }
     *
     *   Atomic::dec_ptr(&_count);
     *   assert (_count >= 0, "invariant") ;
     *   Self->_Stalled = 0 ;
     *
     *   // Must either set _recursions = 0 or ASSERT _recursions == 0.
     *   assert (_recursions == 0     , "invariant") ;
     *   assert (_owner == Self       , "invariant") ;
     *   assert (_succ  != Self       , "invariant") ;
     *   assert (((oop)(object()))->mark() == markOopDesc::encode(this), "invariant") ;
     *
     *   // The thread -- now the owner -- is back in vm mode.
     *   // Report the glorious news via TI,DTrace and jvmstat.
     *   // The probe effect is non-trivial.  All the reportage occurs
     *   // while we hold the monitor, increasing the length of the critical
     *   // section.  Amdahl's parallel speedup law comes vividly into play.
     *   //
     *   // Another option might be to aggregate the events (thread local or
     *   // per-monitor aggregation) and defer reporting until a more opportune
     *   // time -- such as next time some thread encounters contention but has
     *   // yet to acquire the lock.  While spinning that thread could
     *   // spinning we could increment JVMStat counters, etc.
     *
     *   DTRACE_MONITOR_PROBE(contended__entered, this, object(), jt);
     *   if (JvmtiExport::should_post_monitor_contended_entered()) {
     *     JvmtiExport::post_monitor_contended_entered(jt, this);
     *
     *     // The current thread already owns the monitor and is not going to
     *     // call park() for the remainder of the monitor enter protocol. So
     *     // it doesn't matter if the JVMTI_EVENT_MONITOR_CONTENDED_ENTERED
     *     // event handler consumed an unpark() issued by the thread that
     *     // just exited the monitor.
     *   }
     *
     *   if (event.should_commit()) {
     *     event.set_klass(((oop)this->object())->klass());
     *     event.set_previousOwner((TYPE_JAVALANGTHREAD)_previous_owner_tid);
     *     event.set_address((TYPE_ADDRESS)(uintptr_t)(this->object_addr()));
     *     event.commit();
     *   }
     *
     *   if (ObjectMonitor::_sync_ContendedLockAttempts != NULL) {
     *      ObjectMonitor::_sync_ContendedLockAttempts->inc() ;
     *   }
     * }
     */

}
