#include <uapi/linux/ptrace.h>
#include <uapi/linux/bpf_perf_event.h>

struct perf_delta {
    u64 time_delta;
};

BPF_PERF_OUTPUT(output);
BPF_PERCPU_ARRAY(data, u64);
BPF_HASH(miss_count,u32);
BPF_HASH(ref_count, u32);

#define TIME_ID 0

void fun_start(struct pt_regs *ctx)
{
    u32 time=TIME_ID;
    u64 time_start=bpf_ktime_get_ns();
    u64 *kptr = data.lookup(&time);
    if (kptr) {
        data.update(&time, &time_start);
    } else {
        data.insert(&time, &time_start);
    }
}

void fun_end(struct pt_regs *ctx)
{
    u32 time = TIME_ID;
    u64 timeid=0;
    u64 time_end = bpf_ktime_get_ns();
    
    struct perf_delta perf_data = {} ;
    u64 *kptr = data.lookup(&time);
    if (kptr) {
        perf_data.time_delta = time_end - *kptr;
        data.update(&time, &timeid);
    } else {
        data.insert(&time, &timeid);
        return;
    }
    output.perf_submit(ctx, &perf_data, sizeof(struct perf_delta));
}

#define CACHE_KEY 0
int on_cache_miss(struct bpf_perf_event_data *ctx) {
    u32 key=CACHE_KEY;
    u32 time=TIME_ID;
    u64 *kptr=data.lookup(&time);
    if(kptr && *kptr!=0)
        miss_count.increment(key,ctx->sample_period);
    return 0;
}

int on_cache_ref(struct bpf_perf_event_data *ctx) {
    u32 key=CACHE_KEY;
    u32 time=TIME_ID;
    u64 *kptr=data.lookup(&time);
    if(kptr && *kptr!=0)
        ref_count.increment(key,ctx->sample_period);
    return 0;
}