// Copyright (C) 2024 Kumo inc.
// Author: Jeff.li lijippy@163.com
// All rights reserved.
// This program is free software: you can redistribute it and/or modify
// it under the terms of the GNU Affero General Public License as published
// by the Free Software Foundation, either version 3 of the License, or
// (at your option) any later version.
//
// This program is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
// GNU Affero General Public License for more details.
//
// You should have received a copy of the GNU Affero General Public License
// along with this program.  If not, see <https://www.gnu.org/licenses/>.
//
#include <pthread.h>
#include <execinfo.h>
#include <dlfcn.h>                               // dlsym
#include <fcntl.h>                               // O_RDONLY
#include <atomic>
#include <turbo/container/flat_map.h>
#include <turbo/base/fd_guard.h>
#include <turbo/files/file_util.h>
#include <memory>
#include <turbo/hash/m3.h>
#include <turbo/log/logging.h>
#include <turbo/memory/object_pool.h>
#include <kthread/internal/kutex.h>                       // kutex_*
#include <kthread/internal/processor.h>                   // cpu_relax, barrier
#include <kthread/internal/mutex.h>                       // kthread_mutex_t
#include <kthread/internal/sys_futex.h>
#include <kthread/internal/log.h>
#include <tally/tally.h>
#include <turbo/files/file_util.h>
#include <turbo/strings/cord.h>

extern "C" {
extern void *__attribute__((weak)) _dl_sym(void *handle, const char *symbol, void *caller);
}

extern int __attribute__((weak)) GetStackTrace(void **result, int max_depth, int skip_count);

namespace kthread {
    // Warm up backtrace before main().
    void *dummy_buf[4];
    const int ALLOW_UNUSED dummy_bt = GetStackTrace
                                      ? GetStackTrace(dummy_buf, TURBO_ARRAYSIZE(dummy_buf), 0)
                                      : backtrace(dummy_buf, TURBO_ARRAYSIZE(dummy_buf));

// For controlling contentions collected per second.
    static tally::CollectorSpeedLimit g_cp_sl = {0, false, 0, 0};

    const size_t MAX_CACHED_CONTENTIONS = 512;
// Skip frames which are always same: the unlock function and submit_contention()
    const int SKIPPED_STACK_FRAMES = 2;

    struct SampledContention : public tally::Collected {
        // time taken by lock and unlock, normalized according to sampling_range
        int64_t duration_ns;
        // number of samples, normalized according to to sampling_range
        double count;
        void *stack[26];      // backtrace.
        int nframes;          // #elements in stack

        // Implement tally::Collected
        void dump_and_destroy(size_t round) override;

        void destroy() override;

        tally::CollectorSpeedLimit *speed_limit() override { return &g_cp_sl; }

        size_t hash_code() const {
            if (nframes == 0) {
                return 0;
            }
            if (_hash_code == 0) {
                _hash_code = 1;
                uint32_t seed = nframes;
                turbo::MurmurHash3_x86_32(stack, sizeof(void *) * nframes, seed, &_hash_code);
            }
            return _hash_code;
        }

    private:
        friend turbo::ObjectPool<SampledContention>;

        SampledContention()
                : duration_ns(0), count(0), stack{nullptr}, nframes(0), _hash_code(0) {}

        ~SampledContention() override = default;

        mutable uint32_t _hash_code; // For combining samples with hashmap.
    };

    static_assert(sizeof(SampledContention) == 256, "be_friendly_to_allocator");

// Functor to compare contentions.
    struct ContentionEqual {
        bool operator()(const SampledContention *c1,
                        const SampledContention *c2) const {
            return c1->hash_code() == c2->hash_code() &&
                   c1->nframes == c2->nframes &&
                   memcmp(c1->stack, c2->stack, sizeof(void *) * c1->nframes) == 0;
        }
    };

// Functor to hash contentions.
    struct ContentionHash {
        size_t operator()(const SampledContention *c) const {
            return c->hash_code();
        }
    };

// The global context for contention profiler.
    class ContentionProfiler {
    public:
        typedef turbo::FlatMap<SampledContention *, SampledContention *,
                ContentionHash, ContentionEqual> ContentionMap;

        explicit ContentionProfiler(const char *name);

        ~ContentionProfiler();

        void dump_and_destroy(SampledContention *c);

        // Write buffered data into resulting file. If `ending' is true, append
        // content of /proc/self/maps and retry writing until buffer is empty.
        void flush_to_disk(bool ending);

        void init_if_needed();

    private:
        bool _init;  // false before first dump_and_destroy is called
        bool _first_write;      // true if buffer was not written to file yet.
        std::string _filename;  // the file storing profiling result.
        turbo::Cord _disk_buf;  // temp buf before saving the file.
        ContentionMap _dedup_map; // combining same samples to make result smaller.
    };

    ContentionProfiler::ContentionProfiler(const char *name)
            : _init(false), _first_write(true), _filename(name) {
    }

    ContentionProfiler::~ContentionProfiler() {
        if (!_init) {
            // Don't write file if dump_and_destroy was never called. We may create
            // such instances in ContentionProfilerStart.
            return;
        }
        flush_to_disk(true);
    }

    void ContentionProfiler::init_if_needed() {
        if (!_init) {
            // Already output nanoseconds, always set cycles/second to 1000000000.
            _disk_buf.append("--- contention\ncycles/second=1000000000\n");
            KCHECK_EQ(0, _dedup_map.init(1024, 60));
            _init = true;
        }
    }

    void ContentionProfiler::dump_and_destroy(SampledContention *c) {
        init_if_needed();
        // Categorize the contention.
        SampledContention **p_c2 = _dedup_map.seek(c);
        if (p_c2) {
            // Most contentions are caused by several hotspots, this should be
            // the common branch.
            SampledContention *c2 = *p_c2;
            c2->duration_ns += c->duration_ns;
            c2->count += c->count;
            c->destroy();
        } else {
            _dedup_map.insert(c, c);
        }
        if (_dedup_map.size() > MAX_CACHED_CONTENTIONS) {
            flush_to_disk(false);
        }
    }

    void ContentionProfiler::flush_to_disk(bool ending) {
        BT_VLOG << "flush_to_disk(ending=" << ending << ")";

        // Serialize contentions in _dedup_map into _disk_buf.
        if (!_dedup_map.empty()) {
            BT_VLOG << "dedup_map=" << _dedup_map.size();
            for (ContentionMap::const_iterator
                         it = _dedup_map.begin(); it != _dedup_map.end(); ++it) {
                std::stringstream os;
                SampledContention *c = it->second;
                os << c->duration_ns << ' ' << (size_t) ceil(c->count) << " @";
                for (int i = SKIPPED_STACK_FRAMES; i < c->nframes; ++i) {
                    os << ' ' << (void *) c->stack[i];
                }
                os << '\n';
                c->destroy();
                _disk_buf.append(os.str());
            }
            _dedup_map.clear();
        }

        // Append /proc/self/maps to the end of the contention file, required by
        // pprof.pl, otherwise the functions in sys libs are not interpreted.
        if (ending) {
            BT_VLOG << "Append /proc/self/maps";
            // Failures are not critical, don't return directly.
            std::string mem_maps;
            auto  r = turbo::read_file_to_string("/proc/self/maps", &mem_maps);
            if (r.ok()) {
                _disk_buf.append(mem_maps);
            } else {
                PKLOG(ERROR) << "Fail to open /proc/self/maps";
            }
        }
        // Write _disk_buf into _filename
        std::error_code error;
        turbo::FilePath path(_filename);
        auto dir = path.parent_path();
        if (!turbo::create_directories(dir, error)) {
            KLOG(ERROR) << "Fail to create directory=`" << dir
                       << "', " << error;
            return;
        }
        // Truncate on first write, append on later writes.
        int flag = O_APPEND;
        if (_first_write) {
            _first_write = false;
            flag = O_TRUNC;
        }
        auto content = _disk_buf.flatten();
        auto r = turbo::write_to_file(_filename, content);
        if(!r.ok()) {
            PKLOG(ERROR) << "Fail to write into " << _filename;
        }
        _disk_buf.clear();
    }

    // If contention profiler is on, this variable will be set with a valid
    // instance. nullptr otherwise.
    TURBO_CACHELINE_ALIGNED static ContentionProfiler *g_cp = nullptr;
    // Need this version to solve an issue that non-empty entries left by
    // previous contention profilers should be detected and overwritten.
    static uint64_t g_cp_version = 0;
    // Protecting accesses to g_cp.
    static std::mutex g_cp_mutex;

    // The map storing information for profiling pthread_mutex. Different from
    // kthread_mutex, we can't save stuff into pthread_mutex, we neither can
    // save the info in TLS reliably, since a mutex can be unlocked in a different
    // thread from the one locked (although rare)
    // This map must be very fast, since it's accessed inside the lock.
    // Layout of the map:
    //  * Align each entry by cacheline so that different threads do not collide.
    //  * Hash the mutex into the map by its address. If the entry is occupied,
    //    cancel sampling.
    // The canceling rate should be small provided that programs are unlikely to
    // lock a lot of mutexes simultaneously.
    const size_t MUTEX_MAP_SIZE = 1024;
    static_assert((MUTEX_MAP_SIZE & (MUTEX_MAP_SIZE - 1)) == 0, "must_be_power_of_2");
    struct TURBO_CACHELINE_ALIGNED MutexMapEntry {
        std::atomic<uint64_t> versioned_mutex;
        kthread_contention_site_t csite;
    };
    static MutexMapEntry g_mutex_map[MUTEX_MAP_SIZE] = {}; // zero-initialize

    void SampledContention::dump_and_destroy(size_t /*round*/) {
        if (g_cp) {
            // Must be protected with mutex to avoid race with deletion of ctx.
            // dump_and_destroy is called from dumping thread only so this mutex
            // is not contended at most of time.
            std::unique_lock lk(g_cp_mutex);
            if (g_cp) {
                g_cp->dump_and_destroy(this);
                return;
            }
        }
        destroy();
    }

    void SampledContention::destroy() {
        _hash_code = 0;
        turbo::return_object(this);
    }

    // Remember the conflict hashes for troubleshooting, should be 0 at most of time.
    static std::atomic<int64_t> g_nconflicthash = 0;

    static int64_t get_nconflicthash(void *) {
        return g_nconflicthash.load(std::memory_order_relaxed);
    }

    // Start profiling contention.
    bool ContentionProfilerStart(const char *filename) {
        if (filename == nullptr) {
            KLOG(ERROR) << "Parameter [filename] is nullptr";
            return false;
        }
        // g_cp is also the flag marking start/stop.
        if (g_cp) {
            return false;
        }

        // Create related global tally lazily.
        static tally::FuncGauge<int64_t> g_nconflicthash_var
                ("contention_profiler_conflict_hash", "help", []() {
                    return g_nconflicthash.load(std::memory_order_relaxed);
                });
        static tally::DisplaySamplingRatio g_sampling_ratio_var(
                "contention_profiler_sampling_ratio", &g_cp_sl);

        // Optimistic locking. A not-used ContentionProfiler does not write file.
        std::unique_ptr<ContentionProfiler> ctx(new ContentionProfiler(filename));
        {
            std::unique_lock lk(g_cp_mutex);
            if (g_cp) {
                return false;
            }
            g_cp = ctx.release();
            ++g_cp_version;  // invalidate non-empty entries that may exist.
        }
        return true;
    }

    // Stop contention profiler.
    void ContentionProfilerStop() {
        ContentionProfiler *ctx = nullptr;
        if (g_cp) {
            std::unique_lock mu(g_cp_mutex);
            if (g_cp) {
                ctx = g_cp;
                g_cp = nullptr;
                mu.unlock();

                // make sure it's initialiazed in case no sample was gathered,
                // otherwise nothing will be written and succeeding pprof will fail.
                ctx->init_if_needed();
                // Deletion is safe because usages of g_cp are inside g_cp_mutex.
                delete ctx;
                return;
            }
        }
        KLOG(ERROR) << "Contention profiler is not started!";
    }

    bool
    is_contention_site_valid(const kthread_contention_site_t &cs) {
        return cs.sampling_range;
    }

    void
    make_contention_site_invalid(kthread_contention_site_t *cs) {
        cs->sampling_range = 0;
    }

// Replace pthread_mutex_lock and pthread_mutex_unlock:
// First call to sys_pthread_mutex_lock sets sys_pthread_mutex_lock to the
// real function so that next calls go to the real function directly. This
// technique avoids calling pthread_once each time.
    typedef int (*MutexOp)(pthread_mutex_t *);

    int first_sys_pthread_mutex_lock(pthread_mutex_t *mutex);

    int first_sys_pthread_mutex_unlock(pthread_mutex_t *mutex);

    static MutexOp sys_pthread_mutex_lock = first_sys_pthread_mutex_lock;
    static MutexOp sys_pthread_mutex_unlock = first_sys_pthread_mutex_unlock;
    static pthread_once_t init_sys_mutex_lock_once = PTHREAD_ONCE_INIT;

// dlsym may call malloc to allocate space for dlerror and causes contention
// profiler to deadlock at boostraping when the program is linked with
// libunwind. The deadlock bt:
//   #0  0x00007effddc99b80 in __nanosleep_nocancel () at ../sysdeps/unix/syscall-template.S:81
//   #1  0x00000000004b4df7 in kutil::internal::SpinLockDelay(int volatile*, int, int) ()
//   #2  0x00000000004b4d57 in SpinLock::SlowLock() ()
//   #3  0x00000000004b4a63 in tcmalloc::ThreadCache::InitModule() ()
//   #4  0x00000000004aa2b5 in tcmalloc::ThreadCache::GetCache() ()
//   #5  0x000000000040c6c5 in (anonymous namespace)::do_malloc_no_errno(unsigned long) [clone.part.16] ()
//   #6  0x00000000006fc125 in tc_calloc ()
//   #7  0x00007effdd245690 in _dlerror_run (operate=operate@entry=0x7effdd245130 <dlsym_doit>, args=args@entry=0x7fff483dedf0) at dlerror.c:141
//   #8  0x00007effdd245198 in __dlsym (handle=<optimized out>, name=<optimized out>) at dlsym.c:70
//   #9  0x0000000000666517 in kthread::init_sys_mutex_lock () at kthread/mutex.cpp:358
//   #10 0x00007effddc97a90 in pthread_once () at ../nptl/sysdeps/unix/sysv/linux/x86_64/pthread_once.S:103
//   #11 0x000000000066649f in kthread::first_sys_pthread_mutex_lock (mutex=0xbaf880 <_ULx86_64_lock>) at kthread/mutex.cpp:366
//   #12 0x00000000006678bc in pthread_mutex_lock_impl (mutex=0xbaf880 <_ULx86_64_lock>) at kthread/mutex.cpp:489
//   #13 pthread_mutex_lock (__mutex=__mutex@entry=0xbaf880 <_ULx86_64_lock>) at kthread/mutex.cpp:751
//   #14 0x00000000004c6ea1 in _ULx86_64_init () at x86_64/Gglobal.c:83
//   #15 0x00000000004c44fb in _ULx86_64_init_local (cursor=0x7fff483df340, uc=0x7fff483def90) at x86_64/Ginit_local.c:47
//   #16 0x00000000004b5012 in GetStackTrace(void**, int, int) ()
//   #17 0x00000000004b2095 in tcmalloc::PageHeap::GrowHeap(unsigned long) ()
//   #18 0x00000000004b23a3 in tcmalloc::PageHeap::New(unsigned long) ()
//   #19 0x00000000004ad457 in tcmalloc::CentralFreeList::Populate() ()
//   #20 0x00000000004ad628 in tcmalloc::CentralFreeList::FetchFromSpansSafe() ()
//   #21 0x00000000004ad6a3 in tcmalloc::CentralFreeList::RemoveRange(void**, void**, int) ()
//   #22 0x00000000004b3ed3 in tcmalloc::ThreadCache::FetchFromCentralCache(unsigned long, unsigned long) ()
//   #23 0x00000000006fbb9a in tc_malloc ()
// Call _dl_sym which is a private function in glibc to workaround the malloc
// causing deadlock temporarily. This fix is hardly portable.

    static void init_sys_mutex_lock() {
#if defined(OS_LINUX)
        // TODO: may need dlvsym when GLIBC has multiple versions of a same symbol.
        // http://blog.fesnel.com/blog/2009/08/25/preloading-with-multiple-symbol-versions
        if (_dl_sym) {
            sys_pthread_mutex_lock = (MutexOp) _dl_sym(RTLD_NEXT, "pthread_mutex_lock", (void *) init_sys_mutex_lock);
            sys_pthread_mutex_unlock = (MutexOp) _dl_sym(RTLD_NEXT, "pthread_mutex_unlock",
                                                         (void *) init_sys_mutex_lock);
        } else {
            // _dl_sym may be undefined reference in some system, fallback to dlsym
            sys_pthread_mutex_lock = (MutexOp) dlsym(RTLD_NEXT, "pthread_mutex_lock");
            sys_pthread_mutex_unlock = (MutexOp) dlsym(RTLD_NEXT, "pthread_mutex_unlock");
        }
#elif defined(OS_MACOSX)
        // TODO: look workaround for dlsym on mac
        sys_pthread_mutex_lock = (MutexOp)dlsym(RTLD_NEXT, "pthread_mutex_lock");
        sys_pthread_mutex_unlock = (MutexOp)dlsym(RTLD_NEXT, "pthread_mutex_unlock");
#endif
    }

// Make sure pthread functions are ready before main().
    const int ALLOW_UNUSED dummy = pthread_once(&init_sys_mutex_lock_once, init_sys_mutex_lock);

    int first_sys_pthread_mutex_lock(pthread_mutex_t *mutex) {
        pthread_once(&init_sys_mutex_lock_once, init_sys_mutex_lock);
        return sys_pthread_mutex_lock(mutex);
    }

    int first_sys_pthread_mutex_unlock(pthread_mutex_t *mutex) {
        pthread_once(&init_sys_mutex_lock_once, init_sys_mutex_lock);
        return sys_pthread_mutex_unlock(mutex);
    }

    template<typename Mutex>
    inline uint64_t hash_mutex_ptr(const Mutex *m) {
        return turbo::fmix64((uint64_t) m);
    }

// Mark being inside locking so that pthread_mutex calls inside collecting
// code are never sampled, otherwise deadlock may occur.
    static __thread bool tls_inside_lock = false;

// Speed up with TLS:
//   Most pthread_mutex are locked and unlocked in the same thread. Putting
//   contention information in TLS avoids collisions that may occur in
//   g_mutex_map. However when user unlocks in another thread, the info cached
//   in the locking thread is not removed, making the space bloated. We use a
//   simple strategy to solve the issue: If a thread has enough thread-local
//   space to store the info, save it, otherwise save it in g_mutex_map. For
//   a program that locks and unlocks in the same thread and does not lock a
//   lot of mutexes simulateneously, this strategy always uses the TLS.
#ifndef DONT_SPEEDUP_PTHREAD_CONTENTION_PROFILER_WITH_TLS
    const int TLS_MAX_COUNT = 3;
    struct MutexAndContentionSite {
        void *mutex;
        kthread_contention_site_t csite;
    };
    struct TLSPthreadContentionSites {
        int count;
        uint64_t cp_version;
        MutexAndContentionSite list[TLS_MAX_COUNT];
    };
    static __thread TLSPthreadContentionSites tls_csites = {0, 0, {}};
#endif  // DONT_SPEEDUP_PTHREAD_CONTENTION_PROFILER_WITH_TLS

// Guaranteed in linux/win.
    const int PTR_BITS = 48;

    template<typename Mutex>
    inline kthread_contention_site_t *
    add_pthread_contention_site(const Mutex *mutex) {
        MutexMapEntry &entry = g_mutex_map[hash_mutex_ptr(mutex) & (MUTEX_MAP_SIZE - 1)];
        std::atomic<uint64_t> &m = entry.versioned_mutex;
        uint64_t expected = m.load(std::memory_order_relaxed);
        // If the entry is not used or used by previous profiler, try to CAS it.
        if (expected == 0 ||
            (expected >> PTR_BITS) != (g_cp_version & ((1 << (64 - PTR_BITS)) - 1))) {
            uint64_t desired = (g_cp_version << PTR_BITS) | (uint64_t) mutex;
            if (m.compare_exchange_strong(
                    expected, desired, std::memory_order_acquire)) {
                return &entry.csite;
            }
        }
        g_nconflicthash.fetch_add(1, std::memory_order_relaxed);
        return nullptr;
    }

    template<typename Mutex>
    inline bool remove_pthread_contention_site(const Mutex *mutex,
                                               kthread_contention_site_t *saved_csite) {
        MutexMapEntry &entry = g_mutex_map[hash_mutex_ptr(mutex) & (MUTEX_MAP_SIZE - 1)];
        std::atomic<uint64_t> &m = entry.versioned_mutex;
        if ((m.load(std::memory_order_relaxed) & ((((uint64_t) 1) << PTR_BITS) - 1))
            != (uint64_t) mutex) {
            // This branch should be the most common case since most locks are
            // neither contended nor sampled. We have one memory indirection and
            // several bitwise operations here, the cost should be ~ 5-50ns
            return false;
        }
        // Although this branch is inside a contended lock, we should also make it
        // as simple as possible because altering the critical section too much
        // may make unpredictable impact to thread interleaving status, which
        // makes profiling result less accurate.
        *saved_csite = entry.csite;
        make_contention_site_invalid(&entry.csite);
        m.store(0, std::memory_order_release);
        return true;
    }

    // Submit the contention along with the callsite('s stacktrace)
    void submit_contention(const kthread_contention_site_t &csite, int64_t now_ns) {
        tls_inside_lock = true;
        auto sc = turbo::get_object<SampledContention>();
        // Normalize duration_us and count so that they're addable in later
        // processings. Notice that sampling_range is adjusted periodically by
        // collecting thread.
        sc->duration_ns = csite.duration_ns * tally::COLLECTOR_SAMPLING_BASE
                          / csite.sampling_range;
        sc->count = tally::COLLECTOR_SAMPLING_BASE / (double) csite.sampling_range;
        sc->nframes = GetStackTrace
                      ? GetStackTrace(sc->stack, TURBO_ARRAYSIZE(sc->stack), 0)
                      : backtrace(sc->stack, TURBO_ARRAYSIZE(sc->stack)); // may lock
        sc->submit(now_ns / 1000);  // may lock
        tls_inside_lock = false;
    }

    namespace internal {
        int pthread_mutex_lock_internal(pthread_mutex_t *mutex) {
            return sys_pthread_mutex_lock(mutex);
        }

        int pthread_mutex_trylock_internal(pthread_mutex_t *mutex) {
            return ::pthread_mutex_trylock(mutex);
        }

        int pthread_mutex_unlock_internal(pthread_mutex_t *mutex) {
            return sys_pthread_mutex_unlock(mutex);
        }

        int pthread_mutex_lock_internal(FastPthreadMutex *mutex) {
            mutex->lock();
            return 0;
        }

        int pthread_mutex_trylock_internal(FastPthreadMutex *mutex) {
            return mutex->try_lock() ? 0 : EBUSY;
        }

        int pthread_mutex_unlock_internal(FastPthreadMutex *mutex) {
            mutex->unlock();
            return 0;
        }

        template<typename Mutex>
        int pthread_mutex_lock_impl(Mutex *mutex) {
            // Don't change behavior of lock when profiler is off.
            if (!g_cp ||
                // collecting code including backtrace() and submit() may call
                // pthread_mutex_lock and cause deadlock. Don't sample.
                tls_inside_lock) {
                return pthread_mutex_lock_internal(mutex);
            }
            // Don't slow down non-contended locks.
            int rc = pthread_mutex_trylock_internal(mutex);
            if (rc != EBUSY) {
                return rc;
            }
            // Ask tally::Collector if this (contended) locking should be sampled
            const size_t sampling_range = tally::is_collectable(&g_cp_sl);

            kthread_contention_site_t *csite = nullptr;
#ifndef DONT_SPEEDUP_PTHREAD_CONTENTION_PROFILER_WITH_TLS
            TLSPthreadContentionSites &fast_alt = tls_csites;
            if (fast_alt.cp_version != g_cp_version) {
                fast_alt.cp_version = g_cp_version;
                fast_alt.count = 0;
            }
            if (fast_alt.count < TLS_MAX_COUNT) {
                MutexAndContentionSite &entry = fast_alt.list[fast_alt.count++];
                entry.mutex = mutex;
                csite = &entry.csite;
                if (!sampling_range) {
                    make_contention_site_invalid(&entry.csite);
                    return pthread_mutex_lock_internal(mutex);
                }
            }
#endif
            if (!sampling_range) {  // don't sample
                return pthread_mutex_lock_internal(mutex);
            }
            // Lock and monitor the waiting time.
            const int64_t start_ns = turbo::Time::current_nanoseconds();
            rc = pthread_mutex_lock_internal(mutex);
            if (!rc) { // Inside lock
                if (!csite) {
                    csite = add_pthread_contention_site(mutex);
                    if (csite == nullptr) {
                        return rc;
                    }
                }
                csite->duration_ns = turbo::Time::current_nanoseconds() - start_ns;
                csite->sampling_range = sampling_range;
            } // else rare
            return rc;
        }

        template<typename Mutex>
        int pthread_mutex_unlock_impl(Mutex *mutex) {
            // Don't change behavior of unlock when profiler is off.
            if (!g_cp || tls_inside_lock) {
                // This branch brings an issue that an entry created by
                // add_pthread_contention_site may not be cleared. Thus we add a
                // 16-bit rolling version in the entry to find out such entry.
                return pthread_mutex_unlock_internal(mutex);
            }
            int64_t unlock_start_ns = 0;
            bool miss_in_tls = true;
            kthread_contention_site_t saved_csite = {0, 0};
#ifndef DONT_SPEEDUP_PTHREAD_CONTENTION_PROFILER_WITH_TLS
            TLSPthreadContentionSites &fast_alt = tls_csites;
            for (int i = fast_alt.count - 1; i >= 0; --i) {
                if (fast_alt.list[i].mutex == mutex) {
                    if (is_contention_site_valid(fast_alt.list[i].csite)) {
                        saved_csite = fast_alt.list[i].csite;
                        unlock_start_ns = turbo::Time::current_nanoseconds();
                    }
                    fast_alt.list[i] = fast_alt.list[--fast_alt.count];
                    miss_in_tls = false;
                    break;
                }
            }
#endif
            // Check the map to see if the lock is sampled. Notice that we're still
            // inside critical section.
            if (miss_in_tls) {
                if (remove_pthread_contention_site(mutex, &saved_csite)) {
                    unlock_start_ns = turbo::Time::current_nanoseconds();
                }
            }
            const int rc = pthread_mutex_unlock_internal(mutex);
            // [Outside lock]
            if (unlock_start_ns) {
                const int64_t unlock_end_ns =turbo::Time::current_nanoseconds();
                saved_csite.duration_ns += unlock_end_ns - unlock_start_ns;
                submit_contention(saved_csite, unlock_end_ns);
            }
            return rc;
        }

    }

    int pthread_mutex_lock_impl(pthread_mutex_t *mutex) {
        return internal::pthread_mutex_lock_impl(mutex);
    }

    int pthread_mutex_unlock_impl(pthread_mutex_t *mutex) {
        return internal::pthread_mutex_unlock_impl(mutex);
    }

    // Implement kthread_mutex_t related functions
    struct MutexInternal {
        std::atomic<unsigned char> locked;
        std::atomic<unsigned char> contended;
        unsigned short padding;
    };

    const MutexInternal MUTEX_CONTENDED_RAW = {{1}, {1}, 0};
    const MutexInternal MUTEX_LOCKED_RAW = {{1}, {0}, 0};
// Define as macros rather than constants which can't be put in read-only
// section and affected by initialization-order fiasco.
#define KTHREAD_MUTEX_CONTENDED (*(const unsigned*)&kthread::MUTEX_CONTENDED_RAW)
#define KTHREAD_MUTEX_LOCKED (*(const unsigned*)&kthread::MUTEX_LOCKED_RAW)

    static_assert(sizeof(unsigned) == sizeof(MutexInternal),
                  "sizeof_mutex_internal_must_equal_unsigned");

    inline int mutex_lock_contended(kthread_mutex_t *m) {
        std::atomic<unsigned> *whole = (std::atomic<unsigned> *) m->kutex;
        while (whole->exchange(KTHREAD_MUTEX_CONTENDED) & KTHREAD_MUTEX_LOCKED) {
            if (kthread::kutex_wait(whole, KTHREAD_MUTEX_CONTENDED, nullptr) < 0 &&
                errno != EWOULDBLOCK && errno != EINTR/*note*/) {
                // a mutex lock should ignore interruptions in general since
                // user code is unlikely to check the return value.
                return errno;
            }
        }
        return 0;
    }

    inline int mutex_timedlock_contended(
            kthread_mutex_t *m, const struct timespec *__restrict abstime) {
        std::atomic<unsigned> *whole = (std::atomic<unsigned> *) m->kutex;
        while (whole->exchange(KTHREAD_MUTEX_CONTENDED) & KTHREAD_MUTEX_LOCKED) {
            if (kthread::kutex_wait(whole, KTHREAD_MUTEX_CONTENDED, abstime) < 0 &&
                errno != EWOULDBLOCK && errno != EINTR/*note*/) {
                // a mutex lock should ignore interrruptions in general since
                // user code is unlikely to check the return value.
                return errno;
            }
        }
        return 0;
    }

    namespace internal {

        int FastPthreadMutex::lock_contended() {
            std::atomic<unsigned> *whole = (std::atomic<unsigned> *) &_futex;
            while (whole->exchange(KTHREAD_MUTEX_CONTENDED) & KTHREAD_MUTEX_LOCKED) {
                if (futex_wait_private(whole, KTHREAD_MUTEX_CONTENDED, nullptr) < 0
                    && errno != EWOULDBLOCK) {
                    return errno;
                }
            }
            return 0;
        }

        void FastPthreadMutex::lock() {
            kthread::MutexInternal *split = (kthread::MutexInternal *) &_futex;
            if (split->locked.exchange(1, std::memory_order_acquire)) {
                (void) lock_contended();
            }
        }

        bool FastPthreadMutex::try_lock() {
            kthread::MutexInternal *split = (kthread::MutexInternal *) &_futex;
            return !split->locked.exchange(1, std::memory_order_acquire);
        }

        void FastPthreadMutex::unlock() {
            std::atomic<unsigned> *whole = (std::atomic<unsigned> *) &_futex;
            const unsigned prev = whole->exchange(0, std::memory_order_release);
            // CAUTION: the mutex may be destroyed, check comments before kutex_create
            if (prev != KTHREAD_MUTEX_LOCKED) {
                futex_wake_private(whole, 1);
            }
        }

    } // namespace internal

    void FastPthreadMutex::lock() {
        internal::pthread_mutex_lock_impl(&_mutex);
    }

    void FastPthreadMutex::unlock() {
        internal::pthread_mutex_unlock_impl(&_mutex);
    }

} // namespace kthread

extern "C" {

int kthread_mutex_init(kthread_mutex_t *__restrict m,
                       const kthread_mutexattr_t *__restrict) {
    kthread::make_contention_site_invalid(&m->csite);
    m->kutex = kthread::kutex_create_checked<unsigned>();
    if (!m->kutex) {
        return ENOMEM;
    }
    *m->kutex = 0;
    return 0;
}

int kthread_mutex_destroy(kthread_mutex_t *m) {
    kthread::kutex_destroy(m->kutex);
    return 0;
}

int kthread_mutex_trylock(kthread_mutex_t *m) {
    kthread::MutexInternal *split = (kthread::MutexInternal *) m->kutex;
    if (!split->locked.exchange(1, std::memory_order_acquire)) {
        return 0;
    }
    return EBUSY;
}

int kthread_mutex_lock_contended(kthread_mutex_t *m) {
    return kthread::mutex_lock_contended(m);
}

int kthread_mutex_lock(kthread_mutex_t *m) {
    kthread::MutexInternal *split = (kthread::MutexInternal *) m->kutex;
    if (!split->locked.exchange(1, std::memory_order_acquire)) {
        return 0;
    }
    // Don't sample when contention profiler is off.
    if (!kthread::g_cp) {
        return kthread::mutex_lock_contended(m);
    }
    // Ask Collector if this (contended) locking should be sampled.
    const size_t sampling_range = tally::is_collectable(&kthread::g_cp_sl);
    if (!sampling_range) { // Don't sample
        return kthread::mutex_lock_contended(m);
    }
    // Start sampling.
    const int64_t start_ns = turbo::Time::current_nanoseconds();
    // NOTE: Don't modify m->csite outside lock since multiple threads are
    // still contending with each other.
    const int rc = kthread::mutex_lock_contended(m);
    if (!rc) { // Inside lock
        m->csite.duration_ns = turbo::Time::current_nanoseconds() - start_ns;
        m->csite.sampling_range = sampling_range;
    } // else rare
    return rc;
}

int kthread_mutex_timedlock(kthread_mutex_t *__restrict m,
                            const struct timespec *__restrict abstime) {
    kthread::MutexInternal *split = (kthread::MutexInternal *) m->kutex;
    if (!split->locked.exchange(1, std::memory_order_acquire)) {
        return 0;
    }
    // Don't sample when contention profiler is off.
    if (!kthread::g_cp) {
        return kthread::mutex_timedlock_contended(m, abstime);
    }
    // Ask Collector if this (contended) locking should be sampled.
    const size_t sampling_range = tally::is_collectable(&kthread::g_cp_sl);
    if (!sampling_range) { // Don't sample
        return kthread::mutex_timedlock_contended(m, abstime);
    }
    // Start sampling.
    const int64_t start_ns = turbo::Time::current_nanoseconds();
    // NOTE: Don't modify m->csite outside lock since multiple threads are
    // still contending with each other.
    const int rc = kthread::mutex_timedlock_contended(m, abstime);
    if (!rc) { // Inside lock
        m->csite.duration_ns = turbo::Time::current_nanoseconds() - start_ns;
        m->csite.sampling_range = sampling_range;
    } else if (rc == ETIMEDOUT) {
        // Failed to lock due to ETIMEDOUT, submit the elapse directly.
        const int64_t end_ns = turbo::Time::current_nanoseconds();
        const kthread_contention_site_t csite = {end_ns - start_ns, sampling_range};
        kthread::submit_contention(csite, end_ns);
    }
    return rc;
}

int kthread_mutex_unlock(kthread_mutex_t *m) {
    std::atomic<unsigned> *whole = (std::atomic<unsigned> *) m->kutex;
    kthread_contention_site_t saved_csite = {0, 0};
    if (kthread::is_contention_site_valid(m->csite)) {
        saved_csite = m->csite;
        kthread::make_contention_site_invalid(&m->csite);
    }
    const unsigned prev = whole->exchange(0, std::memory_order_release);
    // CAUTION: the mutex may be destroyed, check comments before kutex_create
    if (prev == KTHREAD_MUTEX_LOCKED) {
        return 0;
    }
    // Wakeup one waiter
    if (!kthread::is_contention_site_valid(saved_csite)) {
        kthread::kutex_wake(whole);
        return 0;
    }
    const int64_t unlock_start_ns = turbo::Time::current_nanoseconds();
    kthread::kutex_wake(whole);
    const int64_t unlock_end_ns = turbo::Time::current_nanoseconds();
    saved_csite.duration_ns += unlock_end_ns - unlock_start_ns;
    kthread::submit_contention(saved_csite, unlock_end_ns);
    return 0;
}

int pthread_mutex_lock(pthread_mutex_t *__mutex) {
    return kthread::pthread_mutex_lock_impl(__mutex);
}
int pthread_mutex_unlock(pthread_mutex_t *__mutex) {
    return kthread::pthread_mutex_unlock_impl(__mutex);
}

}  // extern "C"
