
/**
  ******************************************************************************
  * Copyright 2021 The Microbee Authors. All Rights Reserved.
  * 
  * Licensed under the Apache License, Version 2.0 (the "License");
  * you may not use this file except in compliance with the License.
  * You may obtain a copy of the License at
  * 
  * http://www.apache.org/licenses/LICENSE-2.0
  * 
  * Unless required by applicable law or agreed to in writing, software
  * distributed under the License is distributed on an "AS IS" BASIS,
  * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
  * See the License for the specific language governing permissions and
  * limitations under the License.
  * 
  * @file       perf_counter.c
  * @author     baiyang
  * @date       2023-3-20
  ******************************************************************************
  */

/*----------------------------------include-----------------------------------*/
#include "perf_counter.h"

#include <inttypes.h>
#include <stdlib.h>
#include <stdio.h>
#include <string.h>
#include <math.h>

#include <rtthread.h>

#include <common/microbee.h>
#include <common/time/gp_time.h>
#include <common/console/console.h>
/*-----------------------------------macro------------------------------------*/
#define PERF_PRINTF      console_printf

#define PERF_COUNTER_HEADER struct \
{ \
    rt_slist_t       link;        \
    enum perf_counter_type type; \
    const char      *name;       \
}
/*----------------------------------typedef-----------------------------------*/
/**
 * Header common to all counters.
 */
struct perf_ctr_header {
    rt_slist_t      link;           /**< list linkage */
    enum perf_counter_type type;    /**< counter type */
    const char      *name;          /**< counter name */
};

/**
 * PC_EVENT counter.
 */
struct perf_ctr_count {
    PERF_COUNTER_HEADER;

    uint64_t        event_count;
};

/**
 * PC_ELAPSED counter.
 */
struct perf_ctr_elapsed {
    PERF_COUNTER_HEADER;

    uint64_t        event_count;
    uint64_t        time_start;
    uint64_t        time_total;
    uint32_t        time_least;
    uint32_t        time_most;
    float           mean;
    float           M2;
};

/**
 * PC_INTERVAL counter.
 */
struct perf_ctr_interval {
    PERF_COUNTER_HEADER;

    uint64_t        event_count;
    uint64_t        time_event;
    uint64_t        time_first;
    uint64_t        time_last;
    uint32_t        time_least;
    uint32_t        time_most;
    float           mean;
    float           M2;
};

/*---------------------------------prototype----------------------------------*/

/*----------------------------------variable----------------------------------*/
/**
 * List of all known counters.
 */
static rt_slist_t perf_counters = { NULL };

/**
 * mutex protecting access to the perf_counters linked list (which is read from & written to by different threads)
 */
struct rt_mutex perf_counters_mutex;

// FIXME: the mutex does **not** protect against access to/from the perf
// counter's data. It can still happen that a counter is updated while it is
// printed. This can lead to inconsistent output, or completely bogus values
// (especially the 64bit values which are in general not atomically updated).
// The same holds for shared perf counters (perf_alloc_once), that can be updated
// concurrently (this affects the 'ctrl_latency' counter).
/*-------------------------------------os-------------------------------------*/

/*----------------------------------function----------------------------------*/
perf_counter_t
perf_alloc(enum perf_counter_type type, const char *name)
{
    perf_counter_t ctr = NULL;
    uint32_t ctr_len = 0;

    switch (type) {
    case PC_COUNT:
        ctr_len = sizeof(struct perf_ctr_count);
        ctr = (perf_counter_t)rt_malloc(sizeof(struct perf_ctr_count));
        break;

    case PC_ELAPSED:
        ctr_len = sizeof(struct perf_ctr_elapsed);
        ctr = (perf_counter_t)rt_malloc(sizeof(struct perf_ctr_elapsed));
        break;

    case PC_INTERVAL:
        ctr_len = sizeof(struct perf_ctr_interval);
        ctr = (perf_counter_t)rt_malloc(sizeof(struct perf_ctr_interval));
        break;

    default:
        break;
    }

    if (ctr != NULL) {
        rt_memset(ctr, 0, ctr_len);

        ctr->type = type;
        ctr->name = name;
        rt_mutex_take(&perf_counters_mutex, RT_WAITING_FOREVER);
        rt_slist_insert(&perf_counters, &ctr->link);
        rt_mutex_release(&perf_counters_mutex);
    }

    return ctr;
}

perf_counter_t
perf_alloc_once(enum perf_counter_type type, const char *name)
{
    rt_mutex_take(&perf_counters_mutex, RT_WAITING_FOREVER);

    perf_counter_t handle = NULL;
    struct rt_slist_node *node = NULL;

    if (!rt_slist_isempty(&perf_counters)) {
        node = rt_slist_first(&perf_counters);
    }

    while (node != NULL) {
        handle = rt_slist_entry(node, struct perf_ctr_header, link);

        if (!strcmp(handle->name, name)) {
            if (type == handle->type) {
                /* they are the same counter */
                rt_mutex_release(&perf_counters_mutex);
                return handle;

            } else {
                /* same name but different type, assuming this is an error and not intended */
                rt_mutex_release(&perf_counters_mutex);
                return NULL;
            }
        }
        node = rt_slist_next(node);
    }

    rt_mutex_release(&perf_counters_mutex);

    /* if the execution reaches here, no existing counter of that name was found */
    return perf_alloc(type, name);
}

void
perf_free(perf_counter_t handle)
{
    if (handle == NULL) {
        return;
    }

    rt_mutex_take(&perf_counters_mutex, RT_WAITING_FOREVER);
    rt_slist_remove(&perf_counters, &handle->link);
    rt_mutex_release(&perf_counters_mutex);

    switch (handle->type) {
    case PC_COUNT:
    case PC_ELAPSED:
    case PC_INTERVAL:
        rt_free(handle);
        break;

    default:
        break;
    }
}

void
perf_count(perf_counter_t handle)
{
    if (handle == NULL) {
        return;
    }

    switch (handle->type) {
    case PC_COUNT:
        ((struct perf_ctr_count *)handle)->event_count++;
        break;

    case PC_INTERVAL:
        perf_count_interval(handle, time_micros64());
        break;

    default:
        break;
    }
}

void
perf_begin(perf_counter_t handle)
{
    if (handle == NULL) {
        return;
    }

    switch (handle->type) {
    case PC_ELAPSED:
        ((struct perf_ctr_elapsed *)handle)->time_start = time_micros64();
        break;

    default:
        break;
    }
}

void
perf_end(perf_counter_t handle)
{
    if (handle == NULL) {
        return;
    }

    switch (handle->type) {
    case PC_ELAPSED: {
            struct perf_ctr_elapsed *pce = (struct perf_ctr_elapsed *)handle;

            if (pce->time_start != 0) {
                perf_set_elapsed(handle, time_micros64() - pce->time_start);
            }
        }
        break;

    default:
        break;
    }
}

void
perf_set_elapsed(perf_counter_t handle, int64_t elapsed)
{
    if (handle == NULL) {
        return;
    }

    switch (handle->type) {
    case PC_ELAPSED: {
            struct perf_ctr_elapsed *pce = (struct perf_ctr_elapsed *)handle;

            if (elapsed >= 0) {
                pce->event_count++;
                pce->time_total += elapsed;

                if ((pce->time_least > (uint32_t)elapsed) || (pce->time_least == 0)) {
                    pce->time_least = elapsed;
                }

                if (pce->time_most < (uint32_t)elapsed) {
                    pce->time_most = elapsed;
                }

                // maintain mean and variance of the elapsed time in seconds
                // Knuth/Welford recursive mean and variance of update intervals (via Wikipedia)
                float dt = elapsed / 1e6f;
                float delta_intvl = dt - pce->mean;
                pce->mean += delta_intvl / pce->event_count;
                pce->M2 += delta_intvl * (dt - pce->mean);

                pce->time_start = 0;
            }
        }
        break;

    default:
        break;
    }
}

void
perf_count_interval(perf_counter_t handle, uint64_t now)
{
    if (handle == NULL) {
        return;
    }

    switch (handle->type) {
    case PC_INTERVAL: {
            struct perf_ctr_interval *pci = (struct perf_ctr_interval *)handle;

            switch (pci->event_count) {
            case 0:
                pci->time_first = now;
                break;

            case 1:
                pci->time_least = (uint32_t)(now - pci->time_last);
                pci->time_most = (uint32_t)(now - pci->time_last);
                pci->mean = pci->time_least / 1e6f;
                pci->M2 = 0;
                break;

            default: {
                    uint64_t interval = now - pci->time_last;

                    if ((uint32_t)interval < pci->time_least) {
                        pci->time_least = (uint32_t)interval;
                    }

                    if ((uint32_t)interval > pci->time_most) {
                        pci->time_most = (uint32_t)interval;
                    }

                    // maintain mean and variance of interval in seconds
                    // Knuth/Welford recursive mean and variance of update intervals (via Wikipedia)
                    float dt = interval / 1e6f;
                    float delta_intvl = dt - pci->mean;
                    pci->mean += delta_intvl / pci->event_count;
                    pci->M2 += delta_intvl * (dt - pci->mean);
                    break;
                }
            }

            pci->time_last = now;
            pci->event_count++;
            break;
        }

    default:
        break;
    }
}

void
perf_set_count(perf_counter_t handle, uint64_t count)
{
    if (handle == NULL) {
        return;
    }

    switch (handle->type) {
    case PC_COUNT: {
            ((struct perf_ctr_count *)handle)->event_count = count;
        }
        break;

    default:
        break;
    }
}

void
perf_cancel(perf_counter_t handle)
{
    if (handle == NULL) {
        return;
    }

    switch (handle->type) {
    case PC_ELAPSED: {
            struct perf_ctr_elapsed *pce = (struct perf_ctr_elapsed *)handle;

            pce->time_start = 0;
        }
        break;

    default:
        break;
    }
}

void
perf_reset(perf_counter_t handle)
{
    if (handle == NULL) {
        return;
    }

    switch (handle->type) {
    case PC_COUNT:
        ((struct perf_ctr_count *)handle)->event_count = 0;
        break;

    case PC_ELAPSED: {
            struct perf_ctr_elapsed *pce = (struct perf_ctr_elapsed *)handle;
            pce->event_count = 0;
            pce->time_start = 0;
            pce->time_total = 0;
            pce->time_least = 0;
            pce->time_most = 0;
            break;
        }

    case PC_INTERVAL: {
            struct perf_ctr_interval *pci = (struct perf_ctr_interval *)handle;
            pci->event_count = 0;
            pci->time_event = 0;
            pci->time_first = 0;
            pci->time_last = 0;
            pci->time_least = 0;
            pci->time_most = 0;
            break;
        }
    }
}

void
perf_print_counter(perf_counter_t handle)
{
    if (handle == NULL) {
        return;
    }

    switch (handle->type) {
    case PC_COUNT:
        PERF_PRINTF("%s: %" PRIu64 " events\n",
                 handle->name,
                 ((struct perf_ctr_count *)handle)->event_count);
        break;

    case PC_ELAPSED: {
            struct perf_ctr_elapsed *pce = (struct perf_ctr_elapsed *)handle;
            float rms = sqrtf(pce->M2 / (pce->event_count - 1));
            PERF_PRINTF("%s: %" PRIu64 " events, %" PRIu64 "us elapsed, %.2fus avg, min %" PRIu32 "us max %" PRIu32
                     "us %5.3fus rms\n",
                     handle->name,
                     pce->event_count,
                     pce->time_total,
                     (pce->event_count == 0) ? 0 : (double)pce->time_total / (double)pce->event_count,
                     pce->time_least,
                     pce->time_most,
                     (double)(1e6f * rms));
            break;
        }

    case PC_INTERVAL: {
            struct perf_ctr_interval *pci = (struct perf_ctr_interval *)handle;
            float rms = sqrtf(pci->M2 / (pci->event_count - 1));

            PERF_PRINTF("%s: %" PRIu64 " events, %.2fus avg, min %" PRIu32 "us max %" PRIu32 "us %5.3fus rms\n",
                     handle->name,
                     pci->event_count,
                     (pci->event_count == 0) ? 0 : (double)(pci->time_last - pci->time_first) / (double)pci->event_count,
                     pci->time_least,
                     pci->time_most,
                     (double)(1e6f * rms));
            break;
        }

    default:
        break;
    }
}

int
perf_print_counter_buffer(char *buffer, int length, perf_counter_t handle)
{
    int num_written = 0;

    if (handle == NULL) {
        return 0;
    }

    switch (handle->type) {
    case PC_COUNT:
        num_written = snprintf(buffer, length, "%s: %" PRIu64 " events",
                       handle->name,
                       ((struct perf_ctr_count *)handle)->event_count);
        break;

    case PC_ELAPSED: {
            struct perf_ctr_elapsed *pce = (struct perf_ctr_elapsed *)handle;
            float rms = sqrtf(pce->M2 / (pce->event_count - 1));
            num_written = snprintf(buffer, length,
                           "%s: %" PRIu64 " events, %" PRIu64 "us elapsed, %.2fus avg, min %" PRIu32 "us max %" PRIu32 "us %5.3fus rms",
                           handle->name,
                           pce->event_count,
                           pce->time_total,
                           (pce->event_count == 0) ? 0 : (double)pce->time_total / (double)pce->event_count,
                           pce->time_least,
                           pce->time_most,
                           (double)(1e6f * rms));
            break;
        }

    case PC_INTERVAL: {
            struct perf_ctr_interval *pci = (struct perf_ctr_interval *)handle;
            float rms = sqrtf(pci->M2 / (pci->event_count - 1));

            num_written = snprintf(buffer, length,
                           "%s: %" PRIu64 " events, %.2f avg, min %" PRIu32 "us max %" PRIu32 "us %5.3fus rms",
                           handle->name,
                           pci->event_count,
                           (pci->event_count == 0) ? 0 : (double)(pci->time_last - pci->time_first) / (double)pci->event_count,
                           pci->time_least,
                           pci->time_most,
                           (double)(1e6f * rms));
            break;
        }

    default:
        break;
    }

    buffer[length - 1] = 0; // ensure 0-termination
    return num_written;
}

uint64_t
perf_event_count(perf_counter_t handle)
{
    if (handle == NULL) {
        return 0;
    }

    switch (handle->type) {
    case PC_COUNT:
        return ((struct perf_ctr_count *)handle)->event_count;

    case PC_ELAPSED: {
            struct perf_ctr_elapsed *pce = (struct perf_ctr_elapsed *)handle;
            return pce->event_count;
        }

    case PC_INTERVAL: {
            struct perf_ctr_interval *pci = (struct perf_ctr_interval *)handle;
            return pci->event_count;
        }

    default:
        break;
    }

    return 0;
}

float
perf_mean(perf_counter_t handle)
{
    if (handle == NULL) {
        return 0;
    }

    switch (handle->type) {
    case PC_ELAPSED: {
            struct perf_ctr_elapsed *pce = (struct perf_ctr_elapsed *)handle;
            return pce->mean;
        }

    case PC_INTERVAL: {
            struct perf_ctr_interval *pci = (struct perf_ctr_interval *)handle;
            return pci->mean;
        }

    default:
        break;
    }

    return 0.0f;
}

void
perf_iterate_all(perf_callback cb, void *user)
{
    rt_mutex_take(&perf_counters_mutex, RT_WAITING_FOREVER);

    perf_counter_t handle = NULL;
    struct rt_slist_node *node = NULL;

    if (!rt_slist_isempty(&perf_counters)) {
        node = rt_slist_first(&perf_counters);
    }

    while (node != NULL) {
        handle = rt_slist_entry(node, struct perf_ctr_header, link);

        cb(handle, user);
        node = rt_slist_next(node);
    }

    rt_mutex_release(&perf_counters_mutex);
}

void
perf_print_all(void)
{
    rt_mutex_take(&perf_counters_mutex, RT_WAITING_FOREVER);

    perf_counter_t handle = NULL;
    struct rt_slist_node *node = NULL;

    if (!rt_slist_isempty(&perf_counters)) {
        node = rt_slist_first(&perf_counters);
    }

    while (node != NULL) {
        handle = rt_slist_entry(node, struct perf_ctr_header, link);

        perf_print_counter(handle);
        node = rt_slist_next(node);
    }

    rt_mutex_release(&perf_counters_mutex);
}
MSH_CMD_EXPORT_ALIAS(perf_print_all, perf_print, Output all performance counter.);

void
perf_reset_all(void)
{
    rt_mutex_take(&perf_counters_mutex, RT_WAITING_FOREVER);

    perf_counter_t handle = NULL;
    struct rt_slist_node *node = NULL;

    if (!rt_slist_isempty(&perf_counters)) {
        node = rt_slist_first(&perf_counters);
    }

    while (node != NULL) {
        handle = rt_slist_entry(node, struct perf_ctr_header, link);

        perf_reset(handle);
        node = rt_slist_next(node);
    }

    rt_mutex_release(&perf_counters_mutex);
}
MSH_CMD_EXPORT_ALIAS(perf_reset_all, perf_reset, reset all performance counter.);

static int perf_counters_mutex_init(void)
{
    return (int)rt_mutex_init(&perf_counters_mutex, "perf", RT_IPC_FLAG_PRIO);
}
MB_INIT_BOARD_PREV_EXPORT(perf_counters_mutex_init);
/*------------------------------------test------------------------------------*/


