#include "common.h"
#include "memory/memory.h"
#include "memory/cache.h"

extern uint32_t dram_read(hwaddr_t addr, size_t len);
extern void dram_write(hwaddr_t addr, size_t len, uint32_t data);
#include "memory/cache2.h"

static l1_cache_t g_cache;
static uint64_t g_hits = 0;
static uint64_t g_misses = 0;
static uint64_t g_cycles = 0; /* +2 on hit, +200 on miss */

static inline uint32_t l1_get_set_index(hwaddr_t addr) {
    return (addr >> L1_OFFSET_BITS) & L1_SET_MASK;
}

static inline uint32_t l1_get_tag(hwaddr_t addr) {
    return (uint32_t)(addr >> (L1_OFFSET_BITS + L1_INDEX_BITS));
}

static inline uint32_t lcg_next(uint32_t *state) {
    *state = (*state) * 1664525u + 1013904223u;
    return *state;
}

void cache_init(void) {
    uint32_t s;
    uint32_t w;
    for (s = 0; s < L1_SETS; s++) {
        for (w = 0; w < L1_WAYS; w++) {
            g_cache.sets[s].ways[w].valid = 0;
            g_cache.sets[s].ways[w].tag = 0;
        }
    }
    g_cache.rng_state = 2463534242u;
    g_hits = g_misses = g_cycles = 0;
}

static int cache_find_way(uint32_t set_idx, uint32_t tag) {
    cache_set_t *set = &g_cache.sets[set_idx];
    uint32_t w;
    for (w = 0; w < L1_WAYS; w++) {
        cache_line_t *line = &set->ways[w];
        if (line->valid && line->tag == tag) return (int)w;
    }
    return -1;
}

static int cache_choose_victim(uint32_t set_idx) {
    cache_set_t *set = &g_cache.sets[set_idx];
    uint32_t w;
    for (w = 0; w < L1_WAYS; w++) {
        if (!set->ways[w].valid) return (int)w;
    }
    return (int)(lcg_next(&g_cache.rng_state) % L1_WAYS);
}

static void cache_fill_line(uint32_t set_idx, int way, hwaddr_t aligned_addr) {
    cache_line_t *line = &g_cache.sets[set_idx].ways[way];
    /* fetch 64B from L2 instead of DRAM */
    cache2_read_block(aligned_addr, line->data);
    line->tag = l1_get_tag(aligned_addr);
    line->valid = 1;
}

static inline uint32_t load_u32_le(const uint8_t *p) {
    return (uint32_t)p[0] | ((uint32_t)p[1] << 8) | ((uint32_t)p[2] << 16) | ((uint32_t)p[3] << 24);
}

static inline void store_u32_le(uint8_t *p, uint32_t v) {
    p[0] = (uint8_t)(v & 0xff);
    p[1] = (uint8_t)((v >> 8) & 0xff);
    p[2] = (uint8_t)((v >> 16) & 0xff);
    p[3] = (uint8_t)((v >> 24) & 0xff);
}

static uint32_t cache_read_single(hwaddr_t addr, size_t len) {
    uint32_t set_idx = l1_get_set_index(addr);
    uint32_t tag = l1_get_tag(addr);
    uint32_t off = addr & L1_BLOCK_MASK;
    int way = cache_find_way(set_idx, tag);
    if (way < 0) {
        /* miss -> fill */
        hwaddr_t aligned = addr & ~((hwaddr_t)L1_BLOCK_MASK);
        int victim = cache_choose_victim(set_idx);
        cache_fill_line(set_idx, victim, aligned);
        way = victim;
        g_misses++;
        g_cycles += 200;
    }
    else {
        g_hits++;
        g_cycles += 2;
    }
    cache_line_t *line = &g_cache.sets[set_idx].ways[way];
    /* read len bytes */
    if (len == 1) return line->data[off];
    if (len == 2) return (uint32_t)line->data[off] | ((uint32_t)line->data[off+1] << 8);
    /* len == 4 */
    return load_u32_le(&line->data[off]);
}

uint32_t cache_read(hwaddr_t addr, size_t len) {
    uint32_t off = addr & L1_BLOCK_MASK;
    if (off + len <= L1_BLOCK_SIZE) {
        return cache_read_single(addr, len);
    }
    /* cross block: split */
    size_t first = L1_BLOCK_SIZE - off;
    size_t second = len - first;
    uint32_t lo = cache_read_single(addr, first);
    uint32_t hi = cache_read_single((addr & ~((hwaddr_t)L1_BLOCK_MASK)) + L1_BLOCK_SIZE, second);
    /* stitch little-endian */
    uint32_t val = 0;
    /* write first bytes to low */
    if (first == 1) val |= (lo & 0xFF);
    else if (first == 2) val |= (lo & 0xFFFF);
    else /* first==3 */ val |= (lo & 0xFFFFFF);
    val |= hi << (first * 8);
    return val;
}

static void cache_write_single(hwaddr_t addr, size_t len, uint32_t data) {
    uint32_t set_idx = l1_get_set_index(addr);
    uint32_t tag = l1_get_tag(addr);
    uint32_t off = addr & L1_BLOCK_MASK;
    int way = cache_find_way(set_idx, tag);
    if (way >= 0) {
        /* write hit: update line */
        cache_line_t *line = &g_cache.sets[set_idx].ways[way];
        if (len == 1) {
            line->data[off] = (uint8_t)(data & 0xFF);
        } else if (len == 2) {
            line->data[off] = (uint8_t)(data & 0xFF);
            line->data[off+1] = (uint8_t)((data >> 8) & 0xFF);
        } else {
            store_u32_le(&line->data[off], data);
        }
    }
    /* write-through always to L2 */
    cache2_write_bytes(addr, len, data);
}

void cache_write(hwaddr_t addr, size_t len, uint32_t data) {
    uint32_t off = addr & L1_BLOCK_MASK;
    if (off + len <= L1_BLOCK_SIZE) {
        cache_write_single(addr, len, data);
        return;
    }
    /* cross block: split */
    size_t first = L1_BLOCK_SIZE - off;
    size_t second = len - first;
    uint32_t lo_mask = (first == 1) ? 0xFFu : (first == 2 ? 0xFFFFu : 0xFFFFFFu);
    uint32_t lo = data & lo_mask;
    uint32_t hi = data >> (first * 8);
    cache_write_single(addr, first, lo);
    cache_write_single((addr & ~((hwaddr_t)L1_BLOCK_MASK)) + L1_BLOCK_SIZE, second, hi);
}

void cache_stats_reset(void) {
    g_hits = g_misses = g_cycles = 0;
}

void cache_stats_dump(void) {
    uint64_t total = g_hits + g_misses;
    double hit_rate = (total ? (double)g_hits / (double)total : 0.0);
    printf("[L1] hits=%llu misses=%llu total=%llu hit_rate=%.2f%% cycles=%llu\n",
           (unsigned long long)g_hits,
           (unsigned long long)g_misses,
           (unsigned long long)total,
           hit_rate * 100.0,
           (unsigned long long)g_cycles);
}


