#define pr_fmt(fmt) "poc-test: " fmt
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/kthread.h>
#include <linux/cpumask.h>
#include <linux/smp.h>
#include <linux/jiffies.h>
#include <linux/device.h>
#include <linux/platform_device.h>
#include <linux/slab.h>
#include <linux/dma-mapping.h>
#include <linux/dmaengine.h>
#include <linux/io.h>
#include <linux/random.h>
#include <asm/pgtable.h>

struct mem_region {
    phys_addr_t pa;         /* 物理地址 */
    size_t size;
    void __iomem *va_nc;        /* non-cacheable映射 */
    void __iomem *va_c_sh;      /* cacheable + shareble映射 */
    void __iomem *va_c_nsh;     /* cacheable + non-shareble映射 */
    void __iomem *va_nGnRnE;     /* Device nGnRnE映射 */
};
struct mem_region src_region, dst_region;

static struct task_struct **threads;

static volatile unsigned long use_result_dummy;

static void use_int(int result) { use_result_dummy += result; }

#define USE_HW_DMA

#ifdef USE_HW_DMA
static struct dma_chan *chan = NULL;
#endif

static int dma_init(void)
{
    int ret = 0;
#ifdef USE_HW_DMA
    dma_cap_mask_t mask;

    dma_cap_zero(mask);
    dma_cap_set(DMA_MEMCPY, mask);

    chan = dma_request_channel(mask, NULL, NULL);
    if (!chan) {
        pr_err("failed to request dma channel\n");
        ret = -ENODEV;
    }
#else

#endif
    return ret;
}

static int dma_deinit(void)
{
#ifdef USE_HW_DMA
    if (chan) {
        dma_release_channel(chan);
    }
#else

#endif

    return 0;
}

static int dma_memcopy(struct mem_region *dst, struct mem_region *src, size_t size)
{
#ifdef USE_HW_DMA
    struct dma_async_tx_descriptor *tx;
    dma_cookie_t cookie;
    enum dma_status status;

    tx = dmaengine_prep_dma_memcpy(chan, dst->pa, src->pa, size, DMA_CTRL_ACK);

    cookie = dmaengine_submit(tx);

    status = dma_sync_wait(chan, cookie);
    dmaengine_terminate_sync(chan);
#else
    memcpy(dst->va_nc, src->va_nc, size);
#endif
    return 0;
}

static volatile bool speculative = false;
static volatile bool data_prefetch_disable = false;

static inline u64 read_cpuectlr_el1(void)
{
    u64 val;

    __asm__ __volatile__("mrs %0, S3_0_C15_C1_4" : "=r"(val));

    return val;
}

static inline void write_cpuectlr_el1(u64 val)
{
    __asm__ __volatile__("msr S3_0_C15_C1_4, %0" : : "r"(val));
    isb();
}

/*
 * 关闭当前core的hw prefetch
 */
static void enable_hw_data_prefetch(bool en)
{
    u64 val;

    val = read_cpuectlr_el1();
    if (en) {
        val &= ~(1<<15);   /* PF_DIS */
    } else {
        val |= (1<<15);
    }
    write_cpuectlr_el1(val);
}

/*
 * 通过cacheable映射访问dst_region, 
 * - 当speculative=false时，将在cache或者SLC发起cacheline allocation
 * - 当speculative=true时, 不会实际访问dst_region，尝试引起dst_region的投机预取行为
 */
static void do_cacheline_fill(void)
{
    register int *p = dst_region.va_c_sh;
    register int *lastone = dst_region.va_c_sh + dst_region.size;
    register int sum = 0;

    while (p < lastone) {
        if (likely(speculative == false)) {
            sum +=
            #define DOIT(i) p[i]+
            DOIT(0) DOIT(4) DOIT(8) DOIT(12) DOIT(16) DOIT(20) DOIT(24)
            DOIT(28) DOIT(32) DOIT(36) DOIT(40) DOIT(44) DOIT(48) DOIT(52)
            DOIT(56) DOIT(60) DOIT(64) DOIT(68) DOIT(72) DOIT(76)
            DOIT(80) DOIT(84) DOIT(88) DOIT(92) DOIT(96) DOIT(100)
            DOIT(104) DOIT(108) DOIT(112) DOIT(116) DOIT(120)
            p[124];
            p += 128;
        }
    };

    /* 防止优化 */
    use_int(sum);
}
#undef  DOIT

static int cacheline_fill_thread(void *data)
{

    int cpu = smp_processor_id();
    bool pf_disabled = false;

    pr_info("Scratch thread running on CPU %d\n", cpu);

    if (data_prefetch_disable) {
        enable_hw_data_prefetch(false);
        pf_disabled = true;
    }

    while (!kthread_should_stop()) {
        set_current_state(TASK_INTERRUPTIBLE);
        schedule_timeout(msecs_to_jiffies(5));

        do_cacheline_fill();
    }

    /* restore when exit */
    if (pf_disabled)
        enable_hw_data_prefetch(true);

    pr_info("Thread/%d exiting\n", cpu);
    return 0;
}

/*
 * 通过non-cacheable映射地址读取@region数据，
 * 判断内容是否为特征值@val
 */
static int do_compare_region(struct mem_region *region, char val)
{
    register char *p = region->va_nc;
    register char *lastone = region->va_nc + region->size;
    int err = 0;

    while (p < lastone) {
        if (*p != val)
            err++;
        p++;
    };

    return err;
}

static int test_thread(void *data)
{
    struct platform_device *pdev = (struct platform_device *)data;
    int cpu = smp_processor_id();
    unsigned long round = 0;
    int err;
    static char random;

    pr_info("Test thread running on CPU %d\n", cpu);

    dma_init();

    while (!kthread_should_stop()) {
        set_current_state(TASK_INTERRUPTIBLE);
        schedule_timeout(msecs_to_jiffies(10));

        /*
         * 1. 把src_region初始化为随机特征值，dst_region清0
         */
        prandom_bytes(&random, sizeof(random));
        memset(src_region.va_c_sh, random, src_region.size);
        memset(dst_region.va_c_sh, 0x00, src_region.size);
        dma_sync_single_for_device(&pdev->dev, src_region.pa, src_region.size, DMA_FROM_DEVICE);
        dma_sync_single_for_device(&pdev->dev, dst_region.pa, dst_region.size, DMA_FROM_DEVICE);

        /*
         * 2. DMA数据拷贝:src ---> dst
         */
        dma_memcopy(&dst_region, &src_region, src_region.size);

        /*
         * 3. 对比结果
         */
        err = do_compare_region(&dst_region, random);
        if (err)
            pr_err("round[%ld]: do_compare_region() failed count: %d\n", round, err);

        if (!(round % 400))
            pr_info("Test_thread: round %ld\n", round);

        round++;
    }

    dma_deinit();

    pr_info("Thread/%d exiting\n", cpu);
    return 0;
}

/*
 * 
 */
static void *mem_remap(phys_addr_t phys, size_t size, pgprot_t prot)
{
    int count = PAGE_ALIGN(size) >> PAGE_SHIFT;
    struct page *page = phys_to_page(phys);
    struct page **pages;
    void *vaddr;
    int i;

    pages = kmalloc_array(count, sizeof(struct page *), GFP_KERNEL);
    if (!pages)
        return NULL;
    for (i = 0; i < count; i++)
        pages[i] = nth_page(page, i);

    vaddr = vmap(pages, count, VM_DMA_COHERENT, prot);
    kfree(pages);

    return vaddr;
}

#define PTE_SHARE_MASK          (_AT(pteval_t, 3) << 8)
#define PTE_NONSHARED           (_AT(pteval_t, 0) << 8)

static int poc_memory_alloc(struct platform_device *pdev)
{
    int ret;
    size_t size = PAGE_SIZE;

    ret = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64));
    if (ret)
        return -EIO;

    src_region.va_nc = dmam_alloc_coherent(&pdev->dev, 0x200000, &src_region.pa, GFP_KERNEL);
    if (!src_region.va_nc ||
        WARN_ON(src_region.pa & (PAGE_SIZE - 1))) {
        pr_err("Memory allocation failed\n");
        return -ENOMEM;
    }
    src_region.va_c_sh = mem_remap(src_region.pa, size, PAGE_KERNEL);
    src_region.va_c_nsh = mem_remap(src_region.pa, size,
                        __pgprot_modify(PAGE_KERNEL, PTE_SHARE_MASK, PTE_NONSHARED));
    src_region.va_nGnRnE = mem_remap(dst_region.pa, size, pgprot_noncached(PAGE_KERNEL));
    src_region.size = size;


    dst_region.va_nc = dmam_alloc_coherent(&pdev->dev, 0x200000, &dst_region.pa, GFP_KERNEL);
    if (!dst_region.va_nc ||
        WARN_ON(dst_region.pa & (PAGE_SIZE - 1))) {
        pr_err("Memory allocation failed\n");
        return -ENOMEM;
    }
    dst_region.va_c_sh = mem_remap(dst_region.pa, size, PAGE_KERNEL);
    dst_region.va_c_nsh = mem_remap(dst_region.pa, size,
                        __pgprot_modify(PAGE_KERNEL, PTE_SHARE_MASK, PTE_NONSHARED));
    dst_region.va_nGnRnE = mem_remap(dst_region.pa, size, pgprot_noncached(PAGE_KERNEL));
    dst_region.size = size;

    return 0;
}

static int poc_probe(struct platform_device *pdev)
{
    int cpu, ret = 0;
    int num_cpus;

    if (poc_memory_alloc(pdev))
        return -ENOMEM;

    num_cpus = num_online_cpus();
    threads = kmalloc_array(num_cpus, sizeof(struct task_struct *), GFP_KERNEL);
    if (!threads)
        return -ENOMEM;

    pr_info("Creating %d kernel threads\n", num_cpus);

    /*
     * core0上运行测试线程
     * 其他core上运行cacheline"碰瓷"线程
     */
    for_each_online_cpu(cpu) {
        if (cpu == 0) {
            threads[cpu] = kthread_create(test_thread, pdev, "poctest_test_thread/%d", cpu);
        } else {
            threads[cpu] = kthread_create(cacheline_fill_thread, NULL, "poctest_scratch_thread/%d", cpu);
        }

        if (IS_ERR(threads[cpu])) {
            ret = PTR_ERR(threads[cpu]);
            pr_err("Failed to create thread for CPU %d: %d\n", cpu, ret);
            threads[cpu] = NULL;
            goto cleanup;
        }

        kthread_bind(threads[cpu], cpu);
        wake_up_process(threads[cpu]);
    }

    return 0;

cleanup:
    for_each_online_cpu(cpu) {
        if (threads[cpu] && !IS_ERR(threads[cpu])) {
            kthread_stop(threads[cpu]);
        }
    }
    kfree(threads);
    return ret;
}

static int poc_remove(struct platform_device *pdev)
{
    int cpu;

    pr_info("Stopping all threads\n");

    for_each_online_cpu(cpu) {
        if (threads[cpu]) {
            kthread_stop(threads[cpu]);
        }
    }

    kfree(threads);
    pr_info("Module unloaded\n");

    return 0;
}

static struct platform_device *poc_test_dev;
static struct platform_driver poc_test_driver = {
    .probe          = poc_probe,
    .remove         = poc_remove,
    .driver = {
        .name       = "poc_test",
        .owner      = THIS_MODULE,
    },
};

static int __init poc_test_module_init(void)
{
    int ret;

    poc_test_dev = platform_device_register_simple("poc_test", PLATFORM_DEVID_NONE, NULL, 0);
    if (PTR_ERR_OR_ZERO(poc_test_dev))
        return -1;

    ret = platform_driver_register(&poc_test_driver);
    if (ret) {
        platform_device_unregister(poc_test_dev);
        return -1;
    }

    return 0;
}

static void __exit poc_test_module_exit(void)
{
    platform_driver_unregister(&poc_test_driver);
    platform_device_unregister(poc_test_dev);
}

module_init(poc_test_module_init);
module_exit(poc_test_module_exit);

MODULE_LICENSE("GPL");
MODULE_DESCRIPTION("Point of Coherency Test");
