#include <linux/perf_event.h>
#include <sys/syscall.h>
#include <sys/mman.h>
#include <unistd.h>
#include <stdlib.h>
#include <stdio.h>
#include <string.h>
#include <sys/ioctl.h>
#include "sample.h"

#define PAGE_SIZE 4096
#define MMAP_PAGES 32

struct sample_event {
	struct perf_event_header header;
	__u64    ip;
	__u32    pid, tid;
	__u64    time;
	__u64    addr;
	__u64    period;
	union perf_mem_data_src    data_src;
	//union perf_sample_weight weight;
};

union base_event {
    struct perf_event_header header;
    struct sample_event sample;
};

struct mmap_context {
        struct perf_event_mmap_page *buffer;
        __u64 prev;
        __u64 mask;
        int fd;
        __u64 start;
        __u64 end;
        int cpu;
	union base_event wrap_event;
};

static struct mmap_context *ctx_list = NULL;

static int perf_open(int pid, int cpu)
{
    struct perf_event_attr attr = {0};
    attr.type = 4;
    attr.config = 0x1cd;
    attr.config1 = 0xb;
    attr.sample_freq = 4000;
    attr.size = sizeof(struct perf_event_attr);
    attr.sample_type = PERF_SAMPLE_IP | PERF_SAMPLE_TID | PERF_SAMPLE_TIME | PERF_SAMPLE_ADDR | PERF_SAMPLE_PERIOD | PERF_SAMPLE_DATA_SRC;
    attr.read_format = PERF_FORMAT_ID;
    attr.disabled = 0;
    attr.inherit = 1;
    attr.exclude_kernel = 0;
    attr.exclude_user = 0;
    attr.exclude_hv = 1;
    attr.mmap = 1;
    attr.comm = 1;
    attr.freq = 1;
    attr.enable_on_exec = 1;
    attr.task = 1;
    attr.precise_ip = 3;
    attr.mmap_data = 1;
    attr.sample_id_all = 1;
    attr.mmap2 = 1;
    attr.comm_exec = 1;

    int fd = syscall(__NR_perf_event_open, &attr, pid, cpu, -1, 0);
    if (fd < 0) {
	perror("failed to open perf event:");
	return -1;
    }
    return fd;
}

static int perf_init_mmap(int *fd, struct mmap_context *ctx)
{
    int mmapLen = (MMAP_PAGES + 1) * PAGE_SIZE;
    __u64 mask = mmapLen - PAGE_SIZE - 1;
    if (mask < 0) {
        return -1;
    }

    ctx->mask = (__u64)(mask);
    void *currentMap =
            mmap(NULL, ctx->mask + 1 + PAGE_SIZE, PROT_READ | PROT_WRITE, MAP_SHARED, *fd, 0);
    if (currentMap == MAP_FAILED) {
	perror("failed to mmap:");
        return -1;
    }
    ctx->buffer = (struct perf_event_mmap_page *)(currentMap);
    ctx->fd = *fd;
    return 0;
}

static void mmap_consume(struct mmap_context *ctx)
{
	// TODO: check atomic
	ctx->buffer->data_tail = ctx->start;
}

static int copy_event(struct mmap_context *ctx, union base_event *evt, __u64 offset, unsigned char* data, __u64 len)
{
    	char *evt_byte = (char*)evt;
    	__u64 copiedData = 0;
    	while (len) {
    	    __u64 restSize = offset & ctx->mask;
    	    copiedData = ctx->mask + 1 - restSize < len ? ctx->mask + 1 - restSize : len;

    	    memcpy(evt_byte, &data[restSize], copiedData);

    	    offset += copiedData;
    	    evt_byte += copiedData;
    	    len -= copiedData;
    	}
	return 0;
}

static int read_event(struct mmap_context *ctx, union base_event** event)
{
	unsigned char *data = (unsigned char *)ctx->buffer + PAGE_SIZE;
    	__u64 diff = ctx->end >= ctx->start ? ctx->end - ctx->start : 0;
    	if (diff >= sizeof(struct perf_event_header)) {
    	    *event = (union base_event *)&data[ctx->start & ctx->mask];
    	    __u64 size = (*event)->header.size;

    	    if (__glibc_unlikely(size < sizeof(struct perf_event_header) || diff < size)) {
    	        return -1;
    	    }

    	    size_t event_size = sizeof(union base_event);
    	    if ((ctx->start & ctx->mask) + size != ((ctx->start + size) & ctx->mask)) {
    	        __u64 offset = ctx->start;
    	        __u64 len = event_size < size ? event_size : size;
    	        copy_event(ctx, &ctx->wrap_event, offset, data, len);
		*event = &ctx->wrap_event;
    	    }

    	    ctx->start += size;
    	}

    	return *event != NULL ? 0 : -1;
}

static int read_buffer(struct mmap_context *ctx, struct cache_stat *stat)
{
	// TODO: check atomic
    	__u64 head = ctx->buffer->data_head;
	//printf("head: %d tail: %d\n", head, ctx->buffer->data_tail);
    	ctx->end = head;

    	unsigned long size;
    	size = ctx->end - ctx->start;
    	if (size > (unsigned long)(ctx->mask) + 1) {
    	        ctx->start = head;
    	        mmap_consume(ctx);
    	}

	while (1) {
		// ReadEvent
		struct perf_event_mmap_page *base = ctx->buffer;
		// TODO: check atomic
        	ctx->end = base->data_head;

		union base_event *evt = NULL;
		int err = read_event(ctx, &evt);

		if (err < 0) {
			break;
		}

		union perf_mem_data_src data_src = evt->sample.data_src;
		if (data_src.mem_lvl & PERF_MEM_LVL_HIT) {
			stat[ctx->cpu].cnt[data_src.mem_lvl_num]++;
		} else {
			stat[ctx->cpu].cnt[data_src.mem_lvl_num+LVL_NUM]++;
		}

		//printf("pid: %d mem_lvl: 0x%d mem_lvl_num: %d addr: 0x%x\n", evt->sample.pid, evt->sample.data_src.mem_lvl, evt->sample.data_src.mem_lvl_num, evt->sample.addr);
		mmap_consume(ctx);
	}
	
	return 0;
}

int start_sample(int pid)
{
        int *fd_list = (int *)malloc(cpu_nums * sizeof(int));
        memset(fd_list, 0, cpu_nums * sizeof(int));
        for (int i = 0; i< cpu_nums; ++i) {
                fd_list[i] = perf_open(pid, i);
        }

        ctx_list = (struct mmap_context *)malloc(cpu_nums * sizeof(struct mmap_context));
        memset(ctx_list, 0, cpu_nums * sizeof(struct mmap_context));
        for (int i = 0; i< cpu_nums; ++i) {
                int err = perf_init_mmap(&fd_list[i], &ctx_list[i]);
		ctx_list[i].cpu = i;
                if (err != 0) {
                        printf("failed to init mmap\n");
                }
        }

        for (int i = 0; i< cpu_nums; ++i) {
                int err = ioctl(fd_list[i], PERF_EVENT_IOC_RESET, 0);
                 err = ioctl(fd_list[i], PERF_EVENT_IOC_ENABLE, 0);
                if (err != 0) {
                        printf("failed to ioctl\n");
			return -1;
                }
        }

	return 0;
}

int get_cache_stat(struct cache_stat* stat)
{
        for (int i = 0; i< cpu_nums; ++i) {
                read_buffer(&ctx_list[i], stat);
        }

	return 0;
}
