/*
 * vendor/amlogic/media/common/codec_mm/codec_mm.c
 *
 * Copyright (C) 2017 Amlogic, Inc. All rights reserved.
 *
 * This program is free software; you can redistribute it and/or modify
 * it under the terms of the GNU General Public License as published by
 * the Free Software Foundation; either version 2 of the License, or
 * (at your option) any later version.
 *
 * This program is distributed in the hope that it will be useful, but WITHOUT
 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
 * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
 * more details.
 *
 */

#include <linux/version.h>
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/types.h>
#include <linux/errno.h>
#include <linux/interrupt.h>
#include <linux/timer.h>
#include <linux/cma.h>
#include <linux/slab.h>
#include <linux/of.h>
#include <linux/of_fdt.h>
#include <linux/libfdt_env.h>
#include <linux/of_reserved_mem.h>
#include <linux/list.h>
#include <linux/platform_device.h>
#include <linux/genalloc.h>
#include <linux/dma-mapping.h>
#include <linux/dma-map-ops.h>
#include <linux/delay.h>

#include <linux/amlogic/media/codec_mm/codec_mm.h>
#include <linux/amlogic/media/codec_mm/codec_mm_scatter.h>
#include <linux/amlogic/media/codec_mm/configs.h>

#include <linux/highmem.h>
#include <linux/page-flags.h>
#include <linux/vmalloc.h>
#include "codec_mm_priv.h"
#include "codec_mm_scatter_priv.h"
#include "codec_mm_keeper_priv.h"
#ifdef CONFIG_AMLOGIC_TEE
#include <linux/amlogic/tee.h>
#endif

#define TVP_POOL_NAME "TVP_POOL"
#define CMA_RES_POOL_NAME "CMA_RES"

#define CONFIG_PATH "media.codec_mm"
#define CONFIG_PREFIX "media"

#define MM_ALIGN_DOWN(addr, size) ((addr) & (~((size)-1)))
#define MM_ALIGN_UP2N(addr, alg2n) (((addr) + (1 << (alg2n)) - 1) & (~((1 << (alg2n)) - 1)))

#define RES_IS_MAPED
#define DEFAULT_TVP_SIZE_FOR_4K (236 * SZ_1M)
#define DEFAULT_TVP_SIZE_FOR_NO4K (160 * SZ_1M)

#define ALLOC_MAX_RETRY 1

#define CODEC_MM_FOR_DMA_ONLY(flags) (((flags)&CODEC_MM_FLAGS_FROM_MASK) == CODEC_MM_FLAGS_DMA)

#define CODEC_MM_FOR_CPU_ONLY(flags) (((flags)&CODEC_MM_FLAGS_FROM_MASK) == CODEC_MM_FLAGS_CPU)

#define CODEC_MM_FOR_DMACPU(flags) (((flags)&CODEC_MM_FLAGS_FROM_MASK) == CODEC_MM_FLAGS_DMA_CPU)

#define RESERVE_MM_ALIGNED_2N 17

#define RES_MEM_FLAGS_HAVE_MAPED 0x4
static int dump_mem_infos(void *buf, int size);
static int dump_free_mem_infos(void *buf, int size);

#if 1
int cma_mmu_op(struct page *page, int count, bool set)
{
    pgd_t *pgd;
    pud_t *pud;
    pmd_t *pmd;
    pte_t *pte;
    unsigned long addr, end;
    struct mm_struct *mm;

    if (!page || PageHighMem(page)) {
        return -EINVAL;
    }

    addr = (unsigned long)page_address(page);
    end = addr + count * PAGE_SIZE;
    mm = &init_mm;
    for (; addr < end; addr += PAGE_SIZE) {
        pgd = pgd_offset(mm, addr);
        if (pgd_none(*pgd) || pgd_bad(*pgd)) {
            break;
        }

        pud = pud_offset((p4d_t *)pgd, addr);
        if (pud_none(*pud) || pud_bad(*pud)) {
            break;
        }

        pmd = pmd_offset(pud, addr);
        if (pmd_none(*pmd)) {
            break;
        }

        pte = pte_offset_map(pmd, addr);
        if (set) {
            set_pte_at(mm, addr, pte, mk_pte(page, PAGE_KERNEL));
        } else {
            pte_clear(mm, addr, pte);
        }
        pte_unmap(pte);
#ifdef CONFIG_ARM
        pr_debug("%s, add:%lx, pgd:%p %x, pmd:%p %x, pte:%p %x\n", __func__, addr, pgd, (int)pgd_val(*pgd), pmd,
                 (int)pmd_val(*pmd), pte, (int)pte_val(*pte));
#elif defined(CONFIG_ARM64)
        pr_debug("%s, add:%lx, pgd:%p %llx, pmd:%p %llx, pte:%p %llx\n", __func__, addr, pgd, pgd_val(*pgd), pmd,
                 pmd_val(*pmd), pte, pte_val(*pte));
#endif
        page++;
    }
    return 0;
}
#endif

/*
 * debug_mode:
 *
 * disable reserved:1
 * disable cma:2
 * disable sys memory:4
 * disable half memory:8,
 * only used half memory,for debug.
 * return nomem,if alloced > total/2;
 * dump memory info on failed:0x10,
 * trace memory alloc/free info:0x20,
 */
static u32 debug_mode;

static u32 debug_sc_mode;
u32 codec_mm_get_sc_debug_mode(void)
{
    return debug_sc_mode;
}
EXPORT_SYMBOL(codec_mm_get_sc_debug_mode);

static u32 debug_keep_mode;
u32 codec_mm_get_keep_debug_mode(void)
{
    return debug_keep_mode;
}
EXPORT_SYMBOL(codec_mm_get_keep_debug_mode);

static int default_tvp_size;
static int default_tvp_4k_size;
static int default_cma_res_size;

#define TVP_MAX_SLOT 8
/*
 * tvp_mode == 0 means protect secure memory in secmem ta
 * tvp_mode == 1 means use protect secure memory in codec_mm
 */
static u32 tvp_mode;

struct extpool_mgt_s {
    struct gen_pool *gen_pool[TVP_MAX_SLOT];
    struct codec_mm_s *mm[TVP_MAX_SLOT];
    int slot_num;
    int alloced_size;
    int total_size;
    struct mutex pool_lock;
};

struct codec_mm_mgt_s {
    struct cma *cma;
    struct device *dev;
    struct list_head mem_list;
    struct gen_pool *res_pool;
    struct extpool_mgt_s tvp_pool;
    struct extpool_mgt_s cma_res_pool;
    struct reserved_mem rmem;
    int total_codec_mem_size;
    int total_alloced_size;
    int total_cma_size;
    int total_reserved_size;
    int max_used_mem_size;

    int alloced_res_size;
    int alloced_cma_size;
    int alloced_sys_size;
    int alloced_for_sc_size;
    int alloced_for_sc_cnt;
    int alloced_from_coherent;
    int phys_vmaped_page_cnt;

    int alloc_from_sys_pages_max;
    int enable_kmalloc_on_nomem;
    int res_mem_flags;
    int global_memid;
    /* 1:for 1080p,2:for 4k */
    int tvp_enable;
    /* 1:for 1080p,2:for 4k */
    int fastplay_enable;
    spinlock_t lock;
    atomic_t tvp_user_count;
    /* for tvp operator used */
    struct mutex tvp_protect_lock;
};

#define PHY_OFF() offsetof(struct codec_mm_s, phy_addr)
#define HANDLE_OFF() offsetof(struct codec_mm_s, mem_handle)
#define VADDR_OFF() offsetof(struct codec_mm_s, vbuffer)
#define VAL_OFF_VAL(mem, off) (*(unsigned long *)((unsigned long)(mem) + (off)))

#ifndef CONFIG_AMLOGIC_TEE
uint32_t tee_protect_tvp_mem(uint32_t start, uint32_t size, uint32_t *handle)
{
    return 0xFFFFFFFF;
}

void tee_unprotect_tvp_mem(uint32_t handle)
{
}
#endif

static int codec_mm_extpool_pool_release(struct extpool_mgt_s *tvp_pool);

static struct codec_mm_mgt_s *get_mem_mgt(void)
{
    int ret = 0;
    int handle = 0;
    static struct codec_mm_mgt_s mgt;
    static int inited;

    if (!inited) {
        memset(&mgt, 0, sizeof(struct codec_mm_mgt_s));
        /* If tee_protect_tvp_mem is not implement
         * will return 0xFFFFFFF we used to init use
         * which mode
         */
        ret = tee_protect_tvp_mem(0, 0, &handle);
        if (ret == 0xFFFFFFFF) {
            tvp_mode = 0;
        } else {
            tvp_mode = 1;
        }
        mutex_init(&mgt.tvp_protect_lock);
        inited++;
    }
    return &mgt;
};

static void *codec_mm_extpool_alloc(struct extpool_mgt_s *tvp_pool, void **from_pool, int size)
{
    int i = 0;
    void *handle = NULL;

    for (i = 0; i < tvp_pool->slot_num; i++) {
        if (!tvp_pool->gen_pool[i]) {
            return NULL;
        }
        handle = (void *)gen_pool_alloc(tvp_pool->gen_pool[i], size);
        if (handle) {
            *from_pool = tvp_pool->gen_pool[i];
            return handle;
        }
    }
    return NULL;
}

static void *codec_mm_extpool_free(struct gen_pool *from_pool, void *handle, int size)
{
    gen_pool_free(from_pool, (unsigned long)handle, size);
    return 0;
}

static int codec_mm_valid_mm_locked(struct codec_mm_s *mmhandle)
{
    struct codec_mm_mgt_s *mgt = get_mem_mgt();
    struct codec_mm_s *mem;
    int have_found = 0;

    if (!list_empty(&mgt->mem_list)) {
        list_for_each_entry(mem, &mgt->mem_list, list)
        {
            if (mem == mmhandle) {
                have_found = 1;
                break;
            }
        }
    }
    return have_found;
}

/*
 * 1:  can alloced from reserved
 * 2:  can alloced from cma
 * 4:  can alloced from sys.
 * 8:  can alloced from tvp.
 *
 * is tvp = (flags & 1)
 */
static int codec_mm_alloc_pre_check_in(struct codec_mm_mgt_s *mgt, int need_size, int flags)
{
    int have_space = 0;
    int aligned_size = PAGE_ALIGN(need_size);
    if (aligned_size <= mgt->total_reserved_size - mgt->alloced_res_size) {
        have_space |= 1;
    }
    if (aligned_size <= mgt->cma_res_pool.total_size - mgt->cma_res_pool.alloced_size) {
        have_space |= 1;
    }

    if (aligned_size <= mgt->total_cma_size - mgt->alloced_cma_size) {
        have_space |= 2L;
    }

    if (aligned_size / PAGE_SIZE <= mgt->alloc_from_sys_pages_max) {
        have_space |= 4L;
    }
    if (aligned_size <= mgt->tvp_pool.total_size - mgt->tvp_pool.alloced_size) {
        have_space |= 8L;
    }

    if (flags & 1) {
        have_space = have_space & 8L;
    }

    if (debug_mode & 0xf) {
        have_space = have_space & (~(debug_mode & 1));
        have_space = have_space & (~(debug_mode & 2L));
        have_space = have_space & (~(debug_mode & 4L));
        if (debug_mode & 8L) {
            if (mgt->total_alloced_size > mgt->total_codec_mem_size / 2L) {
                pr_info("codec mm memory is limited by %d, (bit8 enable)\n", debug_mode);
                have_space = 0;
            }
        }
    }

    return have_space;
}

static ulong codec_mm_search_phy_addr(char *vaddr)
{
    struct codec_mm_mgt_s *mgt = get_mem_mgt();
    struct codec_mm_s *mem = NULL;
    ulong phy_addr = 0;
    unsigned long flags;

    spin_lock_irqsave(&mgt->lock, flags);

    list_for_each_entry(mem, &mgt->mem_list, list)
    {
        if (vaddr - mem->vbuffer >= 0 && vaddr - mem->vbuffer < mem->buffer_size) {
            if (mem->phy_addr) {
                phy_addr = mem->phy_addr + (vaddr - mem->vbuffer);
            }
            break;
        }
    }

    spin_unlock_irqrestore(&mgt->lock, flags);

    return phy_addr;
}

static void *codec_mm_search_vaddr(unsigned long phy_addr)
{
    struct codec_mm_mgt_s *mgt = get_mem_mgt();
    struct codec_mm_s *mem = NULL;
    void *vaddr = NULL;
    unsigned long flags;

    if (!PageHighMem(phys_to_page(phy_addr))) {
        return phys_to_virt(phy_addr);
    }

    spin_lock_irqsave(&mgt->lock, flags);

    list_for_each_entry(mem, &mgt->mem_list, list)
    {
        if (phy_addr >= mem->phy_addr && phy_addr < mem->phy_addr + mem->buffer_size) {
            if (mem->vbuffer) {
                vaddr = mem->vbuffer + (phy_addr - mem->phy_addr);
            }
            break;
        }
    }

    spin_unlock_irqrestore(&mgt->lock, flags);

    return vaddr;
}

u8 *codec_mm_vmap(ulong addr, u32 size)
{
    u8 *vaddr = NULL;
    struct page **pages = NULL;
    u32 i, npages, offset = 0;
    ulong phys, page_start;
    pgprot_t pgprot = PAGE_KERNEL;

    if (!PageHighMem(phys_to_page(addr))) {
        return phys_to_virt(addr);
    }

    offset = offset_in_page(addr);
    page_start = addr - offset;
    npages = DIV_ROUND_UP(size + offset, PAGE_SIZE);

    pages = kmalloc_array(npages, sizeof(struct page *), GFP_KERNEL);
    if (!pages) {
        return NULL;
    }

    for (i = 0; i < npages; i++) {
        phys = page_start + i * PAGE_SIZE;
        pages[i] = pfn_to_page(phys >> PAGE_SHIFT);
    }

    vaddr = vmap(pages, npages, VM_MAP, pgprot);
    if (!vaddr) {
        pr_err("the phy(%lx) vmaped fail, size: %d\n", page_start, npages << PAGE_SHIFT);
        kfree(pages);
        return NULL;
    }

    kfree(pages);

    if (debug_mode & 0x20) {
        pr_info("[HIGH-MEM-MAP] %s, pa(%lx) to va(%p), size: %d\n", __func__, page_start, vaddr, npages << PAGE_SHIFT);
    }

    return vaddr + offset;
}
EXPORT_SYMBOL(codec_mm_vmap);

void codec_mm_unmap_phyaddr(u8 *vaddr)
{
    void *addr = (void *)(PAGE_MASK & (ulong)vaddr);

    if (is_vmalloc_or_module_addr(vaddr)) {
        vunmap(addr);
    }
}
EXPORT_SYMBOL(codec_mm_unmap_phyaddr);

static void *codec_mm_map_phyaddr(struct codec_mm_s *mem)
{
    void *vaddr = NULL;
    unsigned int phys = mem->phy_addr;
    unsigned int size = mem->buffer_size;

    if (!PageHighMem(phys_to_page(phys))) {
        return phys_to_virt(phys);
    }

    vaddr = codec_mm_vmap(phys, size);

    mem->flags |= CODEC_MM_FLAGS_FOR_PHYS_VMAPED;

    return vaddr;
}

static int codec_mm_alloc_in(struct codec_mm_mgt_s *mgt, struct codec_mm_s *mem)
{
    int try_alloced_from_sys = 0;
    int try_alloced_from_reserved = 0;
    int align_2n = mem->align2n < PAGE_SHIFT ? PAGE_SHIFT : mem->align2n;
    int try_cma_first = mem->flags & CODEC_MM_FLAGS_CMA_FIRST;
    int max_retry = ALLOC_MAX_RETRY;
    int have_space;
    int alloc_trace_mask = 0;

    int can_from_res = (((mgt->res_pool != NULL) || (mgt->cma_res_pool.total_size > 0)) && /* have res */
                        !(mem->flags & CODEC_MM_FLAGS_CMA)) ||                             /* must not CMA */
                       ((mem->flags & CODEC_MM_FLAGS_RESERVED) ||                          /* need RESERVED */
                        CODEC_MM_FOR_DMA_ONLY(mem->flags) ||                               /* NO CPU */
                        ((mem->flags & CODEC_MM_FLAGS_CPU) && (mgt->res_mem_flags & RES_MEM_FLAGS_HAVE_MAPED)));
    /* CPU */
    int can_from_cma = ((mgt->total_cma_size > 0) && /* have cma */
                        !(mem->flags & CODEC_MM_FLAGS_RESERVED)) ||
                       (mem->flags & CODEC_MM_FLAGS_CMA); /* can from CMA */
    /* not always reserved. */

    int can_from_sys = (mem->flags & CODEC_MM_FLAGS_DMA_CPU) && (mem->page_count <= mgt->alloc_from_sys_pages_max);

    int can_from_tvp = (mem->flags & CODEC_MM_FLAGS_TVP);

    if (can_from_tvp) {
        can_from_sys = 0;
        can_from_res = 0;
        can_from_cma = 0;
    }

    have_space = codec_mm_alloc_pre_check_in(mgt, mem->buffer_size, 0);
    if (!have_space) {
        return -10001L;
    }

    can_from_res = can_from_res && (have_space & 1);
    can_from_cma = can_from_cma && (have_space & 2L);
    can_from_sys = can_from_sys && (have_space & 4L);
    can_from_tvp = can_from_tvp && (have_space & 8L);
    if (!can_from_res && !can_from_cma && !can_from_sys && !can_from_tvp) {
        if (debug_mode & 0x10) {
            pr_info("error, codec mm have space:%x\n", have_space);
        }
        return -10002L;
    }

    do {
        if ((mem->flags & CODEC_MM_FLAGS_DMA_CPU) && mem->page_count <= mgt->alloc_from_sys_pages_max &&
            align_2n <= PAGE_SHIFT) {
            alloc_trace_mask |= 1 << 0;
            mem->mem_handle = (void *)__get_free_pages(GFP_KERNEL, get_order(mem->buffer_size));
            mem->from_flags = AMPORTS_MEM_FLAGS_FROM_GET_FROM_PAGES;
            if (mem->mem_handle) {
                mem->vbuffer = mem->mem_handle;
                mem->phy_addr = virt_to_phys(mem->mem_handle);
                break;
            }
            try_alloced_from_sys = 1;
        }
        /* cma first. */
        if (try_cma_first && can_from_cma) {
            /*
             * normal cma.
             */
            alloc_trace_mask |= 1 << 1;
            mem->mem_handle = dma_alloc_from_contiguous(mgt->dev, mem->page_count, align_2n - PAGE_SHIFT, false);
            mem->from_flags = AMPORTS_MEM_FLAGS_FROM_GET_FROM_CMA;
            if (mem->mem_handle) {
                mem->vbuffer = mem->mem_handle;
                mem->phy_addr = page_to_phys((struct page *)mem->mem_handle);
#ifdef CONFIG_ARM64
                if (mem->flags & CODEC_MM_FLAGS_CMA_CLEAR) {
                }
#endif
                break;
            }
        }
        /* reserved alloc.. */
        if ((can_from_res && mgt->res_pool) && (align_2n <= RESERVE_MM_ALIGNED_2N)) {
            int aligned_buffer_size = ALIGN(mem->buffer_size, (1 << RESERVE_MM_ALIGNED_2N));
            alloc_trace_mask |= 1 << 2L;
            mem->mem_handle = (void *)gen_pool_alloc(mgt->res_pool, aligned_buffer_size);
            mem->from_flags = AMPORTS_MEM_FLAGS_FROM_GET_FROM_REVERSED;
            if (mem->mem_handle) {
                /* default is no maped */
                mem->vbuffer = NULL;
                mem->phy_addr = (unsigned long)mem->mem_handle;
                mem->buffer_size = aligned_buffer_size;
                break;
            }
            try_alloced_from_reserved = 1;
        }
        /* can_from_res is reserved.. */
        if (can_from_res) {
            if (mgt->cma_res_pool.total_size > 0 &&
                (mgt->cma_res_pool.alloced_size + mem->buffer_size) < mgt->cma_res_pool.total_size) {
                /*
                 * from cma res first.
                 */
                int aligned_buffer_size = ALIGN(mem->buffer_size, (1 << RESERVE_MM_ALIGNED_2N));
                alloc_trace_mask |= 1 << 3L;
                mem->mem_handle =
                    (void *)codec_mm_extpool_alloc(&mgt->cma_res_pool, &mem->from_ext, aligned_buffer_size);
                mem->from_flags = AMPORTS_MEM_FLAGS_FROM_GET_FROM_CMA_RES;
                if (mem->mem_handle) {
                    /* no vaddr for TVP MEMORY */
                    mem->vbuffer = NULL;
                    mem->phy_addr = (unsigned long)mem->mem_handle;
                    mem->buffer_size = aligned_buffer_size;
                    break;
                }
            }
        }
        if (can_from_cma) {
            /*
             * normal cma.
             */
            alloc_trace_mask |= 1 << 4L;
            mem->mem_handle = dma_alloc_from_contiguous(mgt->dev, mem->page_count, align_2n - PAGE_SHIFT, false);
            mem->from_flags = AMPORTS_MEM_FLAGS_FROM_GET_FROM_CMA;
            if (mem->mem_handle) {
                mem->phy_addr = page_to_phys((struct page *)mem->mem_handle);
                if (mem->flags & CODEC_MM_FLAGS_CPU) {
                    mem->vbuffer = codec_mm_map_phyaddr(mem);
                }
#ifdef CONFIG_ARM64
                if (mem->flags & CODEC_MM_FLAGS_CMA_CLEAR) {
                }
#endif
                break;
            }
        }
        if (can_from_tvp && align_2n <= RESERVE_MM_ALIGNED_2N) {
            /* 64k,aligend */
            int aligned_buffer_size = ALIGN(mem->buffer_size, (1 << RESERVE_MM_ALIGNED_2N));
            alloc_trace_mask |= 1 << 5L;
            mem->mem_handle = (void *)codec_mm_extpool_alloc(&mgt->tvp_pool, &mem->from_ext, aligned_buffer_size);
            mem->from_flags = AMPORTS_MEM_FLAGS_FROM_GET_FROM_TVP;
            if (mem->mem_handle) {
                /* no vaddr for TVP MEMORY */
                mem->vbuffer = NULL;
                mem->phy_addr = (unsigned long)mem->mem_handle;
                mem->buffer_size = aligned_buffer_size;
                break;
            }
        }

        if ((mem->flags & CODEC_MM_FLAGS_DMA_CPU) && mgt->enable_kmalloc_on_nomem && !try_alloced_from_sys) {
            alloc_trace_mask |= 1 << 6L;
            mem->mem_handle = (void *)__get_free_pages(GFP_KERNEL, get_order(mem->buffer_size));
            mem->from_flags = AMPORTS_MEM_FLAGS_FROM_GET_FROM_PAGES;
            if (mem->mem_handle) {
                mem->vbuffer = mem->mem_handle;
                mem->phy_addr = virt_to_phys((void *)mem->mem_handle);
                break;
            }
        }
    } while (--max_retry > 0);
    if (mem->mem_handle) {
        return 0;
    } else {
        if (debug_mode & 0x10) {
            pr_info("codec mm have space:%x\n", have_space);
            pr_info("canfrom: %d,%d,%d,%d\n", can_from_tvp, can_from_sys, can_from_res, can_from_cma);
            pr_info("alloc flags:%d,align=%d,%d,pages:%d,s:%d\n", mem->flags, mem->align2n, align_2n, mem->page_count,
                    mem->buffer_size);
            pr_info("try alloc mask:%x\n", alloc_trace_mask);
        }
        return -10003L;
    }
}

static int codec_mm_tvp_pool_unprotect(struct extpool_mgt_s *tvp_pool)
{
    struct codec_mm_mgt_s *mgt = get_mem_mgt();
    int ret = -1;
    int i = 0;

    if (mgt->tvp_pool.alloced_size <= 0) {
        for (i = 0; i < tvp_pool->slot_num; i++) {
            pr_info("unprotect tvp %d handle is %d\n", i, tvp_pool->mm[i]->tvp_handle);
            if (tvp_pool->mm[i]->tvp_handle > 0) {
                tee_unprotect_tvp_mem(tvp_pool->mm[i]->tvp_handle);
                tvp_pool->mm[i]->tvp_handle = -1;
            }
        }
        ret = 0;
    }
    return ret;
}

static void codec_mm_free_in(struct codec_mm_mgt_s *mgt, struct codec_mm_s *mem)
{
    unsigned long flags;
    if (mem->from_flags == AMPORTS_MEM_FLAGS_FROM_GET_FROM_CMA) {
        if (mem->flags & CODEC_MM_FLAGS_FOR_PHYS_VMAPED) {
            codec_mm_unmap_phyaddr(mem->vbuffer);
        }

        dma_release_from_contiguous(mgt->dev, mem->mem_handle, mem->page_count);
    } else if (mem->from_flags == AMPORTS_MEM_FLAGS_FROM_GET_FROM_REVERSED) {
        gen_pool_free(mgt->res_pool, (unsigned long)mem->mem_handle, mem->buffer_size);
    } else if (mem->from_flags == AMPORTS_MEM_FLAGS_FROM_GET_FROM_TVP) {
        codec_mm_extpool_free((struct gen_pool *)mem->from_ext, mem->mem_handle, mem->buffer_size);
    } else if (mem->from_flags == AMPORTS_MEM_FLAGS_FROM_GET_FROM_PAGES) {
        free_pages((unsigned long)mem->mem_handle, get_order(mem->buffer_size));
    } else if (mem->from_flags == AMPORTS_MEM_FLAGS_FROM_GET_FROM_CMA_RES) {
        codec_mm_extpool_free((struct gen_pool *)mem->from_ext, mem->mem_handle, mem->buffer_size);
    }
    spin_lock_irqsave(&mgt->lock, flags);
    if (!(mem->flags & CODEC_MM_FLAGS_FOR_LOCAL_MGR)) {
        mgt->total_alloced_size -= mem->buffer_size;
    }
    if (mem->flags & CODEC_MM_FLAGS_FOR_SCATTER) {
        mgt->alloced_for_sc_size -= mem->buffer_size;
        mgt->alloced_for_sc_cnt--;
    }

    if (mem->flags & CODEC_MM_FLAGS_FOR_PHYS_VMAPED) {
        mgt->phys_vmaped_page_cnt -= mem->page_count;
    }

    if (mem->from_flags == AMPORTS_MEM_FLAGS_FROM_GET_FROM_CMA) {
        mgt->alloced_cma_size -= mem->buffer_size;
    } else if (mem->from_flags == AMPORTS_MEM_FLAGS_FROM_GET_FROM_REVERSED) {
        mgt->alloced_res_size -= mem->buffer_size;
    } else if (mem->from_flags == AMPORTS_MEM_FLAGS_FROM_GET_FROM_TVP) {
        mgt->tvp_pool.alloced_size -= mem->buffer_size;
    } else if (mem->from_flags == AMPORTS_MEM_FLAGS_FROM_GET_FROM_PAGES) {
        mgt->alloced_sys_size -= mem->buffer_size;
    } else if (mem->from_flags == AMPORTS_MEM_FLAGS_FROM_GET_FROM_CMA_RES) {
        mgt->cma_res_pool.alloced_size -= mem->buffer_size;
    }

    spin_unlock_irqrestore(&mgt->lock, flags);
    if ((mem->from_flags == AMPORTS_MEM_FLAGS_FROM_GET_FROM_TVP) && (tvp_mode >= 1)) {
        mutex_lock(&mgt->tvp_protect_lock);
        if (atomic_read(&mgt->tvp_user_count) == 0) {
            if (codec_mm_tvp_pool_unprotect(&mgt->tvp_pool) == 0) {
                codec_mm_extpool_pool_release(&mgt->tvp_pool);
                mgt->tvp_enable = 0;
                pr_info("disalbe tvp\n");
            }
        }
        mutex_unlock(&mgt->tvp_protect_lock);
    }
    return;
}

struct codec_mm_s *codec_mm_alloc(const char *owner, int size, int align2n, int memflags)
{
    struct codec_mm_mgt_s *mgt = get_mem_mgt();
    struct codec_mm_s *mem = kmalloc(sizeof(struct codec_mm_s), GFP_KERNEL);
    int count;
    int ret;
    unsigned long flags;

    if (!mem) {
        pr_err("not enough mem for struct codec_mm_s\n");
        return NULL;
    }

    if (mgt->tvp_enable & 3L) {
        /* if tvp & video decoder used tvp memory. */
        /* Audio don't protect for default now. */
        if (memflags & CODEC_MM_FLAGS_TVP) {
            /* clear other flags, when tvp mode. */
            memflags = memflags & (~CODEC_MM_FLAGS_FROM_MASK);
            memflags |= CODEC_MM_FLAGS_TVP;
        }
    } else { /* tvp not enabled */
        if (memflags & CODEC_MM_FLAGS_TVP) {
            pr_err("TVP not enabled, when alloc from tvp %s need %d\n", owner, size);
        }
    }
    if ((memflags & CODEC_MM_FLAGS_FROM_MASK) == 0) {
        memflags |= CODEC_MM_FLAGS_DMA;
    }

    memset(mem, 0, sizeof(struct codec_mm_s));
    mem->buffer_size = PAGE_ALIGN(size);
    count = mem->buffer_size / PAGE_SIZE;
    mem->page_count = count;
    mem->align2n = align2n;
    mem->flags = memflags;
    ret = codec_mm_alloc_in(mgt, mem);
    if (ret < 0 && mgt->alloced_for_sc_cnt > 0 && /* have used for scatter. */
        !(memflags & CODEC_MM_FLAGS_FOR_SCATTER)) {
        /* if not scatter, free scatter caches. */
        pr_err(" No mem ret=%d, clear scatter cache!!\n", ret);
        dump_free_mem_infos(NULL, 0);
        if (memflags & CODEC_MM_FLAGS_TVP) {
            codec_mm_scatter_free_all_ignorecache(2L);
        } else {
            codec_mm_scatter_free_all_ignorecache(1);
        }
        ret = codec_mm_alloc_in(mgt, mem);
    }
    if (ret < 0) {
        pr_err("not enough mem for %s size %d, ret=%d\n", owner, size, ret);
        pr_err("mem flags %d %d, %d\n", memflags, mem->flags, align2n);
        kfree(mem);
        if (debug_mode & 0x10) {
            dump_mem_infos(NULL, 0);
        }
        return NULL;
    }

    atomic_set(&mem->use_cnt, 1);
    mem->owner[0] = owner;
    spin_lock_init(&mem->lock);
    spin_lock_irqsave(&mgt->lock, flags);
    mem->mem_id = mgt->global_memid++;
    list_add_tail(&mem->list, &mgt->mem_list);
    switch (mem->from_flags) {
        case AMPORTS_MEM_FLAGS_FROM_GET_FROM_PAGES:
            mgt->alloced_sys_size += mem->buffer_size;
            break;
        case AMPORTS_MEM_FLAGS_FROM_GET_FROM_CMA:
            mgt->alloced_cma_size += mem->buffer_size;
            break;
        case AMPORTS_MEM_FLAGS_FROM_GET_FROM_TVP:
            mgt->tvp_pool.alloced_size += mem->buffer_size;
            break;
        case AMPORTS_MEM_FLAGS_FROM_GET_FROM_REVERSED:
            mgt->alloced_res_size += mem->buffer_size;
            break;
        case AMPORTS_MEM_FLAGS_FROM_GET_FROM_CMA_RES:
            mgt->cma_res_pool.alloced_size += mem->buffer_size;
            break;
        default:
            pr_err("error alloc flags %d\n", mem->from_flags);
    }
    if (!(mem->flags & CODEC_MM_FLAGS_FOR_LOCAL_MGR)) {
        mgt->total_alloced_size += mem->buffer_size;
        if (mgt->total_alloced_size > mgt->max_used_mem_size) {
            mgt->max_used_mem_size = mgt->total_alloced_size;
        }
    }
    if ((mem->flags & CODEC_MM_FLAGS_FOR_SCATTER)) {
        mgt->alloced_for_sc_size += mem->buffer_size;
        mgt->alloced_for_sc_cnt++;
    }

    if (mem->flags & CODEC_MM_FLAGS_FOR_PHYS_VMAPED) {
        mgt->phys_vmaped_page_cnt += mem->page_count;
    }

    spin_unlock_irqrestore(&mgt->lock, flags);
    mem->alloced_jiffies = get_jiffies_64();
    if (debug_mode & 0x20) {
        pr_err("%s alloc size %d at %lx from %d,2n:%d,flags:%d\n", owner, size, mem->phy_addr, mem->from_flags, align2n,
               memflags);
    }
    return mem;
}
EXPORT_SYMBOL(codec_mm_alloc);

void codec_mm_release(struct codec_mm_s *mem, const char *owner)
{
    int index;
    unsigned long flags;
    int i;
    const char *max_owner;
    struct codec_mm_mgt_s *mgt = get_mem_mgt();

    if (!mem) {
        return;
    }

    spin_lock_irqsave(&mgt->lock, flags);
    if (!codec_mm_valid_mm_locked(mem)) {
        pr_err("codec mm not valied!\n");
        spin_unlock_irqrestore(&mgt->lock, flags);
        return;
    }
    index = atomic_dec_return(&mem->use_cnt);
    max_owner = mem->owner[index];
    for (i = 0; i < index; i++) {
        if (mem->owner[i] && strcmp(owner, mem->owner[i]) == 0) {
            mem->owner[i] = max_owner;
        }
    }
    if (debug_mode & 0x20) {
        pr_err("%s free mem size %d at %lx from %d,index =%d\n", owner, mem->buffer_size, mem->phy_addr,
               mem->from_flags, index);
    }
    mem->owner[index] = NULL;
    if (index == 0) {
        list_del(&mem->list);
        spin_unlock_irqrestore(&mgt->lock, flags);
        codec_mm_free_in(mgt, mem);
        kfree(mem);
        return;
    }
    spin_unlock_irqrestore(&mgt->lock, flags);
}
EXPORT_SYMBOL(codec_mm_release);

void *codec_mm_dma_alloc_coherent(const char *owner, int size, dma_addr_t *dma_handle, gfp_t flag, int memflags)
{
    struct codec_mm_mgt_s *mgt = get_mem_mgt();
    void *addr = NULL;

    addr = dma_alloc_coherent(mgt->dev, size, dma_handle, flag);
    return addr;
}
EXPORT_SYMBOL(codec_mm_dma_alloc_coherent);

void codec_mm_dma_free_coherent(const char *owner, int size, void *cpu_addr, dma_addr_t dma_handle, int memflags)
{
    struct codec_mm_mgt_s *mgt = get_mem_mgt();

    dma_free_coherent(mgt->dev, size, cpu_addr, dma_handle);
}
EXPORT_SYMBOL(codec_mm_dma_free_coherent);

void codec_mm_release_with_check(struct codec_mm_s *mem, const char *owner)
{
    struct codec_mm_mgt_s *mgt = get_mem_mgt();
    unsigned long flags;
    int ret;
    spin_lock_irqsave(&mgt->lock, flags);
    ret = codec_mm_valid_mm_locked(mem);
    spin_unlock_irqrestore(&mgt->lock, flags);
    if (ret) {
        /* for check, */
        return codec_mm_release(mem, owner);
    }
}
EXPORT_SYMBOL(codec_mm_release_with_check);

void codec_mm_dma_flush(void *vaddr, int size, enum dma_data_direction dir)
{
    struct codec_mm_mgt_s *mgt = get_mem_mgt();
    dma_addr_t dma_addr;
    ulong phy_addr;

    if (is_vmalloc_or_module_addr(vaddr)) {
        phy_addr = codec_mm_search_phy_addr(vaddr);
        if (!phy_addr) {
            phy_addr = page_to_phys(vmalloc_to_page(vaddr)) + offset_in_page(vaddr);
        }
        if (phy_addr && PageHighMem(phys_to_page(phy_addr))) {
            dma_sync_single_for_device(mgt->dev, phy_addr, size, dir);
        }
        return;
    }

    /* only apply to the lowmem. */
    dma_addr = dma_map_single(mgt->dev, vaddr, size, dir);
    if (dma_mapping_error(mgt->dev, dma_addr)) {
        pr_err("dma map %d bytes error\n", size);
        return;
    }

    dma_sync_single_for_device(mgt->dev, dma_addr, size, dir);
    dma_unmap_single(mgt->dev, dma_addr, size, dir);
}
EXPORT_SYMBOL(codec_mm_dma_flush);

int codec_mm_has_owner(struct codec_mm_s *mem, const char *owner)
{
    int index;
    int i;
    unsigned long flags;
    int is_owner = 0;

    struct codec_mm_mgt_s *mgt = get_mem_mgt();

    if (mem) {
        spin_lock_irqsave(&mgt->lock, flags);
        if (!codec_mm_valid_mm_locked(mem)) {
            spin_unlock_irqrestore(&mgt->lock, flags);
            pr_err("codec mm %p not valied!\n", mem);
            return 0;
        }

        index = atomic_read(&mem->use_cnt);

        for (i = 0; i < index; i++) {
            if (mem->owner[i] && strcmp(owner, mem->owner[i]) == 0) {
                is_owner = 1;
                break;
            }
        }
        spin_unlock_irqrestore(&mgt->lock, flags);
    }

    return is_owner;
}

int codec_mm_request_shared_mem(struct codec_mm_s *mem, const char *owner)
{
    struct codec_mm_mgt_s *mgt = get_mem_mgt();
    unsigned long flags;
    int ret = -1;

    spin_lock_irqsave(&mgt->lock, flags);
    if (!codec_mm_valid_mm_locked(mem)) {
        ret = -2L;
        goto out;
    }
    if (atomic_read(&mem->use_cnt) > 7L) {
        ret = -3L;
        goto out;
    }
    ret = 0;
    mem->owner[atomic_inc_return(&mem->use_cnt) - 1] = owner;

out:
    spin_unlock_irqrestore(&mgt->lock, flags);

    return 0;
}
EXPORT_SYMBOL(codec_mm_request_shared_mem);

static struct codec_mm_s *codec_mm_get_by_val_off(unsigned long val, int off)
{
    struct codec_mm_mgt_s *mgt = get_mem_mgt();
    struct codec_mm_s *mem, *want_mem;
    unsigned long flags;

    want_mem = NULL;
    spin_lock_irqsave(&mgt->lock, flags);
    if (!list_empty(&mgt->mem_list)) {
        list_for_each_entry(mem, &mgt->mem_list, list)
        {
            if (mem && VAL_OFF_VAL(mem, off) == val) {
                want_mem = mem;
            }
        }
    }
    spin_unlock_irqrestore(&mgt->lock, flags);
    return want_mem;
}

unsigned long codec_mm_alloc_for_dma(const char *owner, int page_cnt, int align2n, int memflags)
{
    struct codec_mm_s *mem;

    mem = codec_mm_alloc(owner, page_cnt << PAGE_SHIFT, align2n, memflags);
    if (!mem) {
        return 0;
    }
    return mem->phy_addr;
}
EXPORT_SYMBOL(codec_mm_alloc_for_dma);

unsigned long codec_mm_alloc_for_dma_ex(const char *owner, int page_cnt, int align2n, int memflags, int ins_id,
                                        int buffer_id)
{
    struct codec_mm_s *mem;

    mem = codec_mm_alloc(owner, page_cnt << PAGE_SHIFT, align2n, memflags);
    if (!mem) {
        return 0;
    }
    mem->ins_id = ins_id;
    mem->ins_buffer_id = buffer_id;
    if (debug_mode & 0x20) {
        pr_err("%s, for ins %d, buffer id:%d\n", mem->owner[0] ? mem->owner[0] : "no", mem->ins_id, buffer_id);
    }
    return mem->phy_addr;
}
EXPORT_SYMBOL(codec_mm_alloc_for_dma_ex);

int codec_mm_free_for_dma(const char *owner, unsigned long phy_addr)
{
    struct codec_mm_s *mem;

    mem = codec_mm_get_by_val_off(phy_addr, PHY_OFF());

    if (mem) {
        codec_mm_release(mem, owner);
    } else {
        return -1;
    }
    return 0;
}
EXPORT_SYMBOL(codec_mm_free_for_dma);

static int codec_mm_init_tvp_pool(struct extpool_mgt_s *tvp_pool, struct codec_mm_s *mm)
{
    struct gen_pool *pool;
    int ret;

    pool = gen_pool_create(RESERVE_MM_ALIGNED_2N, -1);
    if (!pool) {
        return -ENOMEM;
    }
    ret = gen_pool_add(pool, mm->phy_addr, mm->buffer_size, -1);
    if (ret < 0) {
        gen_pool_destroy(pool);
        return -1;
    }
    tvp_pool->gen_pool[tvp_pool->slot_num] = pool;
    mm->tvp_handle = -1;
    tvp_pool->mm[tvp_pool->slot_num] = mm;
    return 0;
}

static int codec_mm_tvp_pool_protect(struct extpool_mgt_s *tvp_pool)
{
    int ret = 0;
    int i = 0;

    for (i = 0; i < tvp_pool->slot_num; i++) {
        if (tvp_pool->mm[i]->tvp_handle == -1) {
            ret = tee_protect_tvp_mem((uint32_t)tvp_pool->mm[i]->phy_addr, (uint32_t)tvp_pool->mm[i]->buffer_size,
                                      &tvp_pool->mm[i]->tvp_handle);
            pr_info("protect tvp %d %d ret %d\n", i, tvp_pool->mm[i]->tvp_handle, ret);
        } else {
            pr_info("protect tvp %d %d ret %d\n", i, tvp_pool->mm[i]->tvp_handle, ret);
        }
    }
    return ret;
}

int codec_mm_extpool_pool_alloc(struct extpool_mgt_s *tvp_pool, int size, int memflags, int for_tvp)
{
    struct codec_mm_mgt_s *mgt = get_mem_mgt();
    struct codec_mm_s *mem;
    int alloced_size = tvp_pool->total_size;
    int try_alloced_size = size;
    int ret;

    /* alloced from reserved */
    mutex_lock(&tvp_pool->pool_lock);
    try_alloced_size = mgt->total_reserved_size - mgt->alloced_res_size;
    if (try_alloced_size > 0 && for_tvp) {
        int retry = 0;
        try_alloced_size = min_t(int, size - alloced_size, try_alloced_size);
        try_alloced_size = MM_ALIGN_DOWN(try_alloced_size, RESERVE_MM_ALIGNED_2N);
        do {
            mem = codec_mm_alloc(TVP_POOL_NAME, try_alloced_size, RESERVE_MM_ALIGNED_2N,
                                 CODEC_MM_FLAGS_FOR_LOCAL_MGR | CODEC_MM_FLAGS_RESERVED);
            if (mem) {
                ret = codec_mm_init_tvp_pool(tvp_pool, mem);
                if (ret < 0) {
                    codec_mm_release(mem, TVP_POOL_NAME);
                } else {
                    alloced_size += try_alloced_size;
                    tvp_pool->slot_num++;
                }
                break;
            } else {
                try_alloced_size = try_alloced_size - 4L * SZ_1M;
                if (try_alloced_size < 16L * SZ_1M) {
                    break;
                }
            }
        } while (retry++ < 10L);
    }
    if (alloced_size >= size) {
        /* alloc finished. */
        goto alloced_finished;
    }

    /* alloced from cma: */
    try_alloced_size = mgt->total_cma_size - mgt->alloced_cma_size;
    if (try_alloced_size > 0) {
        int retry = 0;

        try_alloced_size = min_t(int, size - alloced_size, try_alloced_size);
        try_alloced_size = MM_ALIGN_DOWN(try_alloced_size, RESERVE_MM_ALIGNED_2N);
        do {
            mem = codec_mm_alloc(for_tvp ? TVP_POOL_NAME : CMA_RES_POOL_NAME, try_alloced_size, RESERVE_MM_ALIGNED_2N,
                                 CODEC_MM_FLAGS_FOR_LOCAL_MGR | CODEC_MM_FLAGS_CMA);
            if (mem) {
                struct page *mm = mem->mem_handle;

                if (mem->from_flags == AMPORTS_MEM_FLAGS_FROM_GET_FROM_CMA_RES) {
                    mm = phys_to_page((unsigned long)mm);
                }
                if (for_tvp) {
                    cma_mmu_op(mm, mem->page_count, 0);
                }
                ret = codec_mm_init_tvp_pool(tvp_pool, mem);
                if (ret < 0) {
                    if (for_tvp) {
                        cma_mmu_op(mm, mem->page_count, 1);
                    }
                    codec_mm_release(mem, TVP_POOL_NAME);
                } else {
                    alloced_size += try_alloced_size;
                    tvp_pool->slot_num++;
                }
                break;
            } else {
                try_alloced_size = try_alloced_size - 4L * SZ_1M;
                if (try_alloced_size < 16L * SZ_1M) {
                    break;
                }
            }
        } while (retry++ < 10L);
    }

alloced_finished:
    if (alloced_size > 0) {
        tvp_pool->total_size = alloced_size;
    }
    if (tvp_mode >= 1 && for_tvp) {
        codec_mm_tvp_pool_protect(tvp_pool);
    }
    mutex_unlock(&tvp_pool->pool_lock);
    return alloced_size;
}
EXPORT_SYMBOL(codec_mm_extpool_pool_alloc);

/*
 * call this before free all
 * alloced TVP memory;
 * it not free,some memory may free ignore.
 */
static int codec_mm_extpool_pool_release(struct extpool_mgt_s *tvp_pool)
{
    struct codec_mm_mgt_s *mgt = get_mem_mgt();
    int i;
    int ignored = 0;
    mutex_lock(&tvp_pool->pool_lock);
    for (i = 0; i < tvp_pool->slot_num; i++) {
        struct gen_pool *gpool = tvp_pool->gen_pool[i];
        int slot_mem_size = 0;

        if (gpool) {
            if (gen_pool_avail(gpool) != gen_pool_size(gpool)) {
                pr_err("ERROR: TVP pool is not free.\n");
                ignored++;
                continue; /* ignore this free now, */
            }
            slot_mem_size = gen_pool_size(gpool);
            gen_pool_destroy(tvp_pool->gen_pool[i]);
            if (tvp_pool->mm[i]) {
                struct page *mm = tvp_pool->mm[i]->mem_handle;

                if (tvp_pool->mm[i]->from_flags == AMPORTS_MEM_FLAGS_FROM_GET_FROM_CMA_RES) {
                    mm = phys_to_page((unsigned long)mm);
                }
                cma_mmu_op(mm, tvp_pool->mm[i]->page_count, 1);
                codec_mm_release(tvp_pool->mm[i], TVP_POOL_NAME);
            }
        }
        mgt->tvp_pool.total_size -= slot_mem_size;
        tvp_pool->gen_pool[i] = NULL;
        tvp_pool->mm[i] = NULL;
    }
    if (ignored > 0) {
        int before_free_slot = tvp_pool->slot_num + 1;

        for (i = 0; i < tvp_pool->slot_num; i++) {
            if (tvp_pool->gen_pool[i] && before_free_slot < i) {
                tvp_pool->gen_pool[before_free_slot] = tvp_pool->gen_pool[i];
                tvp_pool->mm[before_free_slot] = tvp_pool->mm[i];
                tvp_pool->gen_pool[i] = NULL;
                tvp_pool->mm[i] = NULL;
                before_free_slot++;
            }
            if (!tvp_pool->gen_pool[i] && before_free_slot > i) {
                before_free_slot = i;
            }
        }
    }
    tvp_pool->slot_num = ignored;
    mutex_unlock(&tvp_pool->pool_lock);
    return ignored;
}

static int codec_mm_tvp_get_mem_resource(ulong *res, int victor_size)
{
    struct codec_mm_mgt_s *mgt = get_mem_mgt();
    struct extpool_mgt_s *tvp_pool = &mgt->tvp_pool;
    int i;
    mutex_lock(&tvp_pool->pool_lock);
    for (i = 0; i < tvp_pool->slot_num && i < victor_size / 2L; i++) {
        if (tvp_pool->mm[i]) {
            res[2L * i] = tvp_pool->mm[i]->phy_addr;
            res[2L * i + 1] = tvp_pool->mm[i]->phy_addr + tvp_pool->mm[i]->buffer_size - 1;
        }
    }
    mutex_unlock(&tvp_pool->pool_lock);
    return i;
}

static int codec_mm_is_in_tvp_region(ulong phy_addr)
{
    struct codec_mm_mgt_s *mgt = get_mem_mgt();
    struct extpool_mgt_s *tvp_pool = &mgt->tvp_pool;
    int i;
    int in = 0, in2 = 0;
    mutex_lock(&tvp_pool->pool_lock);
    for (i = 0; i < tvp_pool->slot_num; i++) {
        if (tvp_pool->mm[i]) {
            in = tvp_pool->mm[i]->phy_addr <= phy_addr;
            in2 = (tvp_pool->mm[i]->phy_addr + tvp_pool->mm[i]->buffer_size - 1) >= phy_addr;
            in = in && in2;
            if (in) {
                break;
            }
            in = 0;
        }
    }
    mutex_unlock(&tvp_pool->pool_lock);
    return in;
}

void *codec_mm_phys_to_virt(unsigned long phy_addr)
{
    struct codec_mm_mgt_s *mgt = get_mem_mgt();

    if (codec_mm_is_in_tvp_region(phy_addr)) {
        return NULL; /* no virt for tvp memory; */
    }

    if (phy_addr >= mgt->rmem.base && phy_addr < mgt->rmem.base + mgt->rmem.size) {
        if (mgt->res_mem_flags & RES_MEM_FLAGS_HAVE_MAPED) {
            return codec_mm_search_vaddr(phy_addr);
        }
        return NULL; /* no virt for reserved memory; */
    }

    return codec_mm_search_vaddr(phy_addr);
}
EXPORT_SYMBOL(codec_mm_phys_to_virt);

unsigned long codec_mm_virt_to_phys(void *vaddr)
{
    return page_to_phys((struct page *)vaddr);
}
EXPORT_SYMBOL(codec_mm_virt_to_phys);

unsigned long dma_get_cma_size_int_byte(struct device *dev)
{
    unsigned long size = 0;
    struct cma *cma = NULL;

    if (!dev) {
        pr_err("CMA: NULL DEV\n");
        return 0;
    }

    cma = dev_get_cma_area(dev);
    if (!cma) {
        pr_err("CMA:  NO CMA region\n");
        return 0;
    }
    size = cma_get_size(cma);
    return size;
}
EXPORT_SYMBOL(dma_get_cma_size_int_byte);

static int codec_mm_get_cma_size_int_byte(struct device *dev)
{
    static int static_size = -1;
    struct cma *cma = NULL;

    if (static_size >= 0) {
        return static_size;
    }
    if (!dev) {
        pr_err("CMA: NULL DEV\n");
        return 0;
    }

    cma = dev_get_cma_area(dev);
    if (!cma) {
        pr_err("CMA:  NO CMA region\n");
        return 0;
    }
    if (cma == dev_get_cma_area(NULL)) {
        static_size = 0; /* ignore default cma pool */
    } else {
        static_size = cma_get_size(cma);
    }

    return static_size;
}

static int dump_mem_infos(void *buf, int size)
{
    struct codec_mm_mgt_s *mgt = get_mem_mgt();
    struct codec_mm_s *mem;
    unsigned long flags;
    int size_tmp = size;

    char *pbuf = buf;
    char sbuf[512];
    int tsize = 0;
    int s;

    if (!pbuf) {
        pbuf = sbuf;
        size_tmp = 512L;
    }
    s = snprintf(pbuf, size_tmp - tsize, "codec mem info:\n\ttotal codec mem size:%d MB\n",
                 mgt->total_codec_mem_size / SZ_1M);
    tsize += s;
    pbuf += s;

    s = snprintf(pbuf, size_tmp - tsize, "\talloced size= %d MB\n\tmax alloced: %d MB\n",
                 mgt->total_alloced_size / SZ_1M, mgt->max_used_mem_size / SZ_1M);
    tsize += s;
    pbuf += s;

    s = snprintf(pbuf, size_tmp - tsize, "\tCMA:%d,RES:%d,TVP:%d,SYS:%d,VMAPED:%d MB\n", mgt->alloced_cma_size / SZ_1M,
                 mgt->alloced_res_size / SZ_1M, mgt->tvp_pool.alloced_size / SZ_1M, mgt->alloced_sys_size / SZ_1M,
                 (mgt->phys_vmaped_page_cnt << PAGE_SHIFT) / SZ_1M);
    tsize += s;
    pbuf += s;

    if (mgt->res_pool) {
        s = snprintf(pbuf, size_tmp - tsize, "\t[%d]RES size:%d MB,alloced:%d MB free:%d MB\n",
                     AMPORTS_MEM_FLAGS_FROM_GET_FROM_REVERSED, (int)(gen_pool_size(mgt->res_pool) / SZ_1M),
                     (int)(mgt->alloced_res_size / SZ_1M), (int)(gen_pool_avail(mgt->res_pool) / SZ_1M));
        tsize += s;
        pbuf += s;
    }

    s = snprintf(pbuf, size_tmp - tsize, "\t[%d]CMA size:%d MB:alloced: %d MB,free:%d MB\n",
                 AMPORTS_MEM_FLAGS_FROM_GET_FROM_CMA, (int)(mgt->total_cma_size / SZ_1M),
                 (int)(mgt->alloced_cma_size / SZ_1M), (int)((mgt->total_cma_size - mgt->alloced_cma_size) / SZ_1M));
    tsize += s;
    pbuf += s;

    if (mgt->tvp_pool.slot_num > 0) {
        s = snprintf(pbuf, size_tmp - tsize, "\t[%d]TVP size:%d MB,alloced:%d MB free:%d MB\n",
                     AMPORTS_MEM_FLAGS_FROM_GET_FROM_TVP, (int)(mgt->tvp_pool.total_size / SZ_1M),
                     (int)(mgt->tvp_pool.alloced_size / SZ_1M),
                     (int)((mgt->tvp_pool.total_size - mgt->tvp_pool.alloced_size) / SZ_1M));
        tsize += s;
        pbuf += s;
    }
    if (mgt->cma_res_pool.slot_num > 0) {
        s = snprintf(pbuf, size_tmp - tsize, "\t[%d]CMA_RES size:%d MB,alloced:%d MB free:%d MB\n",
                     AMPORTS_MEM_FLAGS_FROM_GET_FROM_CMA_RES, (int)(mgt->cma_res_pool.total_size / SZ_1M),
                     (int)(mgt->cma_res_pool.alloced_size / SZ_1M),
                     (int)((mgt->cma_res_pool.total_size - mgt->cma_res_pool.alloced_size) / SZ_1M));
        tsize += s;
        pbuf += s;
    }

    if (!buf && tsize > 0) {
        pr_info("%s", sbuf);
        pbuf = sbuf;
        sbuf[0] = '\0';
        tsize = 0;
    }
    spin_lock_irqsave(&mgt->lock, flags);
    if (list_empty(&mgt->mem_list)) {
        spin_unlock_irqrestore(&mgt->lock, flags);
        return tsize;
    }
    list_for_each_entry(mem, &mgt->mem_list, list)
    {
        s = snprintf(pbuf, size_tmp - tsize, "\t[%d].%d:%s.%d,addr=%p,size=%d,from=%d,cnt=%d,", mem->mem_id,
                     mem->ins_id, mem->owner[0] ? mem->owner[0] : "no", mem->ins_buffer_id, (void *)mem->phy_addr,
                     mem->buffer_size, mem->from_flags, atomic_read(&mem->use_cnt));
        s += snprintf(pbuf + s, size_tmp - tsize, "flags=%d,used:%u ms\n", mem->flags,
                      jiffies_to_msecs(get_jiffies_64() - mem->alloced_jiffies));

        tsize += s;
        if (buf) {
            pbuf += s;
            if (tsize > size_tmp - 256L) {
                s += snprintf(pbuf + s, size_tmp - tsize, "\n\t\t**NOT END**\n");
                tsize += s;
                break; /* no memory for dump now. */
            }
        } else {
            pr_info("%s", sbuf);
            tsize = 0;
        }
    }
    spin_unlock_irqrestore(&mgt->lock, flags);

    return tsize;
}

static int dump_free_mem_infos(void *buf, int size)
{
    struct codec_mm_mgt_s *mgt = get_mem_mgt();
    struct codec_mm_s *mem;
    unsigned long flags;
    int i = 0, j, k;
    unsigned long minphy;
    u32 cma_base_phy, cma_end_phy;
    struct dump_buf_s {
        unsigned long phy_addr;
        int buffer_size;
        int align2n;
    } * usedb, *freeb;
    int total_free_size = 0;

    usedb = kzalloc(sizeof(struct dump_buf_s) * 256L, GFP_KERNEL);
    if (usedb == NULL) {
        return 0;
    }
    freeb = usedb + 128L;

    spin_lock_irqsave(&mgt->lock, flags);
    list_for_each_entry(mem, &mgt->mem_list, list)
    {
        usedb[i].phy_addr = mem->phy_addr;
        usedb[i].buffer_size = mem->buffer_size;
        usedb[i].align2n = mem->align2n;
        if (++i >= 128L) {
            pr_info("too many memlist in codec_mm, break;\n");
            break;
        }
    };
    cma_base_phy = cma_get_base(dev_get_cma_area(mgt->dev));
    cma_end_phy = cma_base_phy + dma_get_cma_size_int_byte(mgt->dev);
    spin_unlock_irqrestore(&mgt->lock, flags);
    pr_info("free cma idea[%x-%x] from codec_mm items %d\n", cma_base_phy, cma_end_phy, i);
    for (j = 0; j < i; j++) { /* free */
        freeb[j].phy_addr = usedb[j].phy_addr + MM_ALIGN_UP2N(usedb[j].buffer_size, usedb[j].align2n);
        minphy = cma_end_phy;
        for (k = 0; k < i; k++) { /* used */
            if (usedb[k].phy_addr >= freeb[j].phy_addr && usedb[k].phy_addr < minphy) {
                minphy = usedb[k].phy_addr;
            }
        }
        freeb[j].buffer_size = minphy - freeb[j].phy_addr;
        total_free_size += freeb[j].buffer_size;
        if (freeb[j].buffer_size > 0) {
            pr_info("\t[%d] free buf phyaddr=%p, used [%p,%x], size=%x\n", j, (void *)freeb[j].phy_addr,
                    (void *)usedb[j].phy_addr, usedb[j].buffer_size, freeb[j].buffer_size);
        }
    }
    pr_info("total_free_size %x\n", total_free_size);
    kfree(usedb);
    return 0;
}

int codec_mm_enable_tvp(void)
{
    int ret;
    struct codec_mm_mgt_s *mgt = get_mem_mgt();

    mutex_lock(&mgt->tvp_protect_lock);
    if (tvp_mode == 0) {
        pr_err("not support in tvp_mode 0\n");
        mutex_unlock(&mgt->tvp_protect_lock);
        return -1;
    }
    ret = atomic_add_return(1, &mgt->tvp_user_count);
    if (ret == 1) {
        codec_mm_extpool_pool_alloc(&mgt->tvp_pool, default_tvp_4k_size, 0, 1);
        mgt->tvp_enable = 2L;
    }
    pr_info("tvp_user_count is %d\n", atomic_read(&mgt->tvp_user_count));
    mutex_unlock(&mgt->tvp_protect_lock);
    return ret;
}
EXPORT_SYMBOL(codec_mm_enable_tvp);

int codec_mm_disable_tvp(void)
{
    int ret = -1;
    struct codec_mm_mgt_s *mgt = get_mem_mgt();

    mutex_lock(&mgt->tvp_protect_lock);
    if (tvp_mode == 0) {
        pr_err("not support in tvp_mode 0\n");
        mutex_unlock(&mgt->tvp_protect_lock);
        return ret;
    }
    if (atomic_dec_and_test(&mgt->tvp_user_count)) {
        if (codec_mm_tvp_pool_unprotect(&mgt->tvp_pool) == 0) {
            ret = codec_mm_extpool_pool_release(&mgt->tvp_pool);
            mgt->tvp_enable = 0;
            pr_info("disalbe tvp\n");
            mutex_unlock(&mgt->tvp_protect_lock);
            return ret;
        }
    }
    ret = 0;
    if (atomic_read(&mgt->tvp_user_count) < 0) {
        atomic_set(&mgt->tvp_user_count, 0);
    }
    pr_info("tvp_user_count is %d\n", atomic_read(&mgt->tvp_user_count));
    mutex_unlock(&mgt->tvp_protect_lock);
    return ret;
}
EXPORT_SYMBOL(codec_mm_disable_tvp);

int codec_mm_video_tvp_enabled(void)
{
    struct codec_mm_mgt_s *mgt = get_mem_mgt();

    return mgt->tvp_enable;
}
EXPORT_SYMBOL(codec_mm_video_tvp_enabled);

int codec_mm_get_total_size(void)
{
    struct codec_mm_mgt_s *mgt = get_mem_mgt();
    int total_size = mgt->total_codec_mem_size;

    if ((debug_mode & 0xf) == 0) { /* no debug memory mode. */
        return total_size;
    }
    /*
     * disable reserved:1
     * disable cma:2
     * disable sys memory:4
     * disable half memory:8,
     */
    if (debug_mode & 0x8) {
        total_size -= mgt->total_codec_mem_size / 2L;
    }
    if (debug_mode & 0x1) {
        total_size -= mgt->total_reserved_size;
        total_size -= mgt->cma_res_pool.total_size;
    }
    if (debug_mode & 0x2) {
        total_size -= mgt->total_cma_size;
    }
    if (total_size < 0) {
        total_size = 0;
    }
    return total_size;
}
EXPORT_SYMBOL(codec_mm_get_total_size);

int codec_mm_get_free_size(void)
{
    struct codec_mm_mgt_s *mgt = get_mem_mgt();
    return codec_mm_get_total_size() - mgt->total_alloced_size;
}
EXPORT_SYMBOL(codec_mm_get_free_size);

int codec_mm_get_reserved_size(void)
{
    struct codec_mm_mgt_s *mgt = get_mem_mgt();

    return mgt->total_reserved_size;
}
EXPORT_SYMBOL(codec_mm_get_reserved_size);

struct device *v4l_get_dev_from_codec_mm(void)
{
    struct codec_mm_mgt_s *mgt = get_mem_mgt();

    return mgt->dev;
}
EXPORT_SYMBOL(v4l_get_dev_from_codec_mm);

struct codec_mm_s *v4l_reqbufs_from_codec_mm(const char *owner, unsigned int addr, unsigned int size,
                                             unsigned int index)
{
    unsigned long flags;
    struct codec_mm_mgt_s *mgt = get_mem_mgt();
    struct codec_mm_s *mem = NULL;
    int buf_size = PAGE_ALIGN(size);

    mem = kzalloc(sizeof(struct codec_mm_s), GFP_KERNEL);
    if (IS_ERR_OR_NULL(mem)) {
        goto out;
    }

    mem->owner[0] = owner;
    mem->mem_handle = NULL;
    mem->buffer_size = buf_size;
    mem->page_count = buf_size / PAGE_SIZE;
    mem->phy_addr = addr;
    mem->ins_buffer_id = index;
    mem->alloced_jiffies = get_jiffies_64();
    mem->from_flags = AMPORTS_MEM_FLAGS_FROM_GET_FROM_COHERENT;

    spin_lock_irqsave(&mgt->lock, flags);

    mem->mem_id = mgt->global_memid++;
    mgt->alloced_from_coherent += buf_size;
    mgt->total_alloced_size += buf_size;
    mgt->alloced_cma_size += buf_size;
    list_add_tail(&mem->list, &mgt->mem_list);

    spin_unlock_irqrestore(&mgt->lock, flags);

    if (debug_mode & 0x20) {
        pr_info("%s alloc coherent size %d at %lx from %d.\n", owner, buf_size, mem->phy_addr, mem->from_flags);
    }
out:
    return mem;
}
EXPORT_SYMBOL(v4l_reqbufs_from_codec_mm);

void v4l_freebufs_back_to_codec_mm(const char *owner, struct codec_mm_s *mem)
{
    unsigned long flags;
    struct codec_mm_mgt_s *mgt = get_mem_mgt();

    if (IS_ERR_OR_NULL(mem)) {
        return;
    }

    if (!mem->owner[0] || strcmp(owner, mem->owner[0]) || !mem->buffer_size) {
        goto out;
    }

    spin_lock_irqsave(&mgt->lock, flags);

    mgt->alloced_from_coherent -= mem->buffer_size;
    mgt->total_alloced_size -= mem->buffer_size;
    mgt->alloced_cma_size -= mem->buffer_size;
    list_del(&mem->list);

    spin_unlock_irqrestore(&mgt->lock, flags);

    if (debug_mode & 0x20) {
        pr_info("%s free mem size %d at %lx from %d\n", mem->owner[0], mem->buffer_size, mem->phy_addr,
                mem->from_flags);
    }
out:
    kfree(mem);
}
EXPORT_SYMBOL(v4l_freebufs_back_to_codec_mm);

/*
 * 1: if no mem, do wait and free some cache.
 * 0: do not wait.
 */
int codec_mm_enough_for_size(int size, int with_wait)
{
    struct codec_mm_mgt_s *mgt = get_mem_mgt();
    int have_mem = codec_mm_alloc_pre_check_in(mgt, size, 0);
    if (!have_mem && with_wait && mgt->alloced_for_sc_cnt > 0) {
        pr_err(" No mem, clear scatter cache!!\n");
        codec_mm_scatter_free_all_ignorecache(1);
        have_mem = codec_mm_alloc_pre_check_in(mgt, size, 0);
        if (have_mem) {
            return 1;
        }
        if (debug_mode & 0x20) {
            dump_mem_infos(NULL, 0);
        }
        msleep(50L);
        return 0;
    }
    return 1;
}
EXPORT_SYMBOL(codec_mm_enough_for_size);

int codec_mm_mgt_init(struct device *dev)
{
    struct codec_mm_mgt_s *mgt = get_mem_mgt();

    INIT_LIST_HEAD(&mgt->mem_list);
    mgt->dev = dev;
    mgt->alloc_from_sys_pages_max = 4L;
    if (mgt->rmem.size > 0) {
        unsigned long aligned_addr;
        int aligned_size;
        /* mem aligned, */
        mgt->res_pool = gen_pool_create(RESERVE_MM_ALIGNED_2N, -1);
        if (!mgt->res_pool) {
            return -ENOMEM;
        }
        aligned_addr = ((unsigned long)mgt->rmem.base + ((1 << RESERVE_MM_ALIGNED_2N) - 1)) &
                       (~((1 << RESERVE_MM_ALIGNED_2N) - 1));
        aligned_size = mgt->rmem.size - (int)(aligned_addr - (unsigned long)mgt->rmem.base);
        gen_pool_add(mgt->res_pool, aligned_addr, aligned_size, -1);
        pr_debug("add reserve memory %p(aligned %p) size=%x(aligned %x)\n", (void *)mgt->rmem.base,
                 (void *)aligned_addr, (int)mgt->rmem.size, (int)aligned_size);
        mgt->total_reserved_size = aligned_size;
        mgt->total_codec_mem_size = aligned_size;
#ifdef RES_IS_MAPED
        mgt->res_mem_flags |= RES_MEM_FLAGS_HAVE_MAPED;
#endif
    }
    mgt->total_cma_size = codec_mm_get_cma_size_int_byte(mgt->dev);
    mgt->total_codec_mem_size += mgt->total_cma_size;
    /* 2M for audio not protect. */
    default_tvp_4k_size = mgt->total_codec_mem_size - SZ_1M * 2L;
    if (default_tvp_4k_size > DEFAULT_TVP_SIZE_FOR_4K) {
        default_tvp_4k_size = DEFAULT_TVP_SIZE_FOR_4K;
    }
    /* 97MB -> 160MB, may not enough for h265 */
    default_tvp_size = (mgt->total_codec_mem_size - (SZ_1M * 2L)) > DEFAULT_TVP_SIZE_FOR_NO4K
                           ? DEFAULT_TVP_SIZE_FOR_NO4K
                           : default_tvp_4k_size;

    default_cma_res_size = mgt->total_cma_size;
    mgt->global_memid = 0;
    mutex_init(&mgt->tvp_pool.pool_lock);
    mutex_init(&mgt->cma_res_pool.pool_lock);
    spin_lock_init(&mgt->lock);
    return 0;
}
EXPORT_SYMBOL(codec_mm_mgt_init);

static int __init amstream_test_init(void)
{
    return 0;
}

static ssize_t codec_mm_dump_show(struct class *class, struct class_attribute *attr, char *buf)
{
    size_t ret;

    ret = dump_mem_infos(buf, PAGE_SIZE);
    return ret;
}

static ssize_t codec_mm_scatter_dump_show(struct class *class, struct class_attribute *attr, char *buf)
{
    size_t ret;

    ret = codec_mm_scatter_info_dump(buf, PAGE_SIZE);
    return ret;
}

static ssize_t codec_mm_keeper_dump_show(struct class *class, struct class_attribute *attr, char *buf)
{
    size_t ret;

    ret = codec_mm_keeper_dump_info(buf, PAGE_SIZE);
    return ret;
}

static ssize_t tvp_enable_show(struct class *class, struct class_attribute *attr, char *buf)
{
    ssize_t size = 0;

    struct codec_mm_mgt_s *mgt = get_mem_mgt();

    mutex_lock(&mgt->tvp_protect_lock);
    size += sprintf(buf, "tvp_flag=%d\n", (tvp_mode << 4L) + mgt->tvp_enable);
    size += sprintf(buf + size, "tvp ref count=%d\n", atomic_read(&mgt->tvp_user_count));
    size += sprintf(buf + size, "tvp enable help:\n");
    size += sprintf(buf + size, "echo n > tvp_enable\n");
    size += sprintf(buf + size, "0: disable tvp(tvp size to 0)\n");
    size += sprintf(buf + size, "1: enable tvp for 1080p playing(use default size)\n");
    size += sprintf(buf + size, "2: enable tvp for 4k playing(use default 4k size)\n");
    mutex_unlock(&mgt->tvp_protect_lock);
    return size;
}

static ssize_t tvp_enable_store(struct class *class, struct class_attribute *attr, const char *buf, size_t size)
{
    struct codec_mm_mgt_s *mgt = get_mem_mgt();
    unsigned int val;
    ssize_t ret;

    val = -1;
    ret = kstrtoint(buf, 0, &val);
    if (ret != 0) {
        return -EINVAL;
    }
    switch (tvp_mode) {
        case 0: {
            /*
             * always free all scatter cache for
             * tvp changes.
             */
            mutex_lock(&mgt->tvp_protect_lock);
            codec_mm_keeper_free_all_keep(2L);
            codec_mm_scatter_free_all_ignorecache(3L);
            switch (val) {
                case 0:
                    ret = codec_mm_extpool_pool_release(&mgt->tvp_pool);
                    mgt->tvp_enable = 0;
                    pr_info("disalbe tvp\n");
                    break;
                case 1:
                    codec_mm_extpool_pool_alloc(&mgt->tvp_pool, default_tvp_size, 0, 1);
                    mgt->tvp_enable = 1;
                    pr_info("enable tvp for 1080p\n");
                    break;
                case 2L:
                    codec_mm_extpool_pool_alloc(&mgt->tvp_pool, default_tvp_4k_size, 0, 1);
                    mgt->tvp_enable = 2L;
                    pr_info("enable tvp for 4k\n");
                    break;
                default:
                    pr_err("unknown cmd! %d\n", val);
            }
            mutex_unlock(&mgt->tvp_protect_lock);
            break;
        }
        case 1: {
            switch (val) {
                case 0:
                    codec_mm_disable_tvp();
                    break;
                case 1:
                case 2L:
                    codec_mm_enable_tvp();
                    break;
                default:
                    pr_err("unknown cmd! %d\n", val);
            }
            break;
        }
        default:
            break;
    }
    return size;
}

static ssize_t fastplay_enable_show(struct class *class, struct class_attribute *attr, char *buf)
{
    ssize_t size = 0;

    size += sprintf(buf, "fastplay enable help:\n");
    size += sprintf(buf + size, "echo n > tvp_enable\n");
    size += sprintf(buf + size, "0: disable fastplay\n");
    size += sprintf(buf + size, "1: enable fastplay for 1080p playing(use default size)\n");
    return size;
}

static ssize_t fastplay_enable_store(struct class *class, struct class_attribute *attr, const char *buf, size_t size)
{
    struct codec_mm_mgt_s *mgt = get_mem_mgt();
    unsigned int val;
    ssize_t ret;

    val = -1;
    ret = kstrtoint(buf, 0, &val);
    if (ret != 0) {
        return -EINVAL;
    }
    switch (val) {
        case 0:
            ret = codec_mm_extpool_pool_release(&mgt->cma_res_pool);
            mgt->fastplay_enable = 0;
            pr_err("disalbe fastplay\n");
            break;
        case 1:
            codec_mm_extpool_pool_alloc(&mgt->cma_res_pool, default_cma_res_size, 0, 0);
            mgt->fastplay_enable = 1;
            pr_err("enable fastplay\n");
            break;
        default:
            pr_err("unknown cmd! %d\n", val);
    }
    return size;
}

static ssize_t config_show(struct class *class, struct class_attribute *attr, char *buf)
{
    ssize_t ret;

    ret = configs_list_path_nodes(CONFIG_PATH, buf, PAGE_SIZE, LIST_MODE_NODE_CMDVAL_ALL);
    return ret;
}

static ssize_t config_store(struct class *class, struct class_attribute *attr, const char *buf, size_t size)
{
    int ret;

    ret = configs_set_prefix_path_valonpath(CONFIG_PREFIX, buf);
    if (ret < 0) {
        pr_err("set config failed %s\n", buf);
    }
    return size;
}

static ssize_t tvp_region_show(struct class *class, struct class_attribute *attr, char *buf)
{
    size_t ret;
    ulong res_victor[16];
    int i;
    int off = 0;

    ret = codec_mm_tvp_get_mem_resource(res_victor, 8L);
    for (i = 0; i < ret; i++) {
        off += sprintf(buf + off, "segment%d:0x%p - 0x%p (size:0x%x)\n", i, (void *)res_victor[2L * i],
                       (void *)res_victor[2L * i + 1], (int)(res_victor[2L * i + 1] - res_victor[2L * i] + 1));
    }
    return off;
}

static ssize_t debug_show(struct class *class, struct class_attribute *attr, char *buf)
{
    ssize_t size = 0;

    size += sprintf(buf, "mm_scatter help:\n");
    size += sprintf(buf + size, "echo n > mm_scatter_debug\n");
    size += sprintf(buf + size, "n==0: clear all debugs)\n");
    size += sprintf(buf + size, "n=1: dump all alloced scatters\n");
    size += sprintf(buf + size, "n=2: dump all slots\n");

    size += sprintf(buf + size, "n=3: dump all free slots\n");

    size += sprintf(buf + size, "n=4: dump all sid hash table\n");

    size += sprintf(buf + size, "n=5: free all free slot now!\n");

    size += sprintf(buf + size, "n=6: clear all time infos!\n");

    size += sprintf(buf + size, "n=10: force free all keeper\n");

    size += sprintf(buf + size, "n=20: dump memory,# 20 #addr(hex) #len\n");

    size += sprintf(buf + size, "n==100: cmd mode p1 p ##mode:0,dump, 1,alloc 2,more,3,free some,4,free all\n");
    return size;
}

static int codec_mm_mem_dump(unsigned long addr, int isphy, int len)
{
    void *vaddr;
    int is_map = 0;

    pr_info("start dump addr: %p %d\n", (void *)addr, len);
    if (!isphy) {
        vaddr = (void *)addr;
    } else {
        vaddr = ioremap(addr, len);
        if (!vaddr) {
            pr_info("map addr: %p len: %d, failed\n", (void *)addr, len);
            vaddr = codec_mm_phys_to_virt(addr);
        } else {
            is_map = 1;
        }
    }
    if (vaddr) {
        unsigned int *p, *vint;
        int i;

        vint = (unsigned int *)vaddr;
        for (i = 0; i <= len - 32L; i += sizeof(int) * 8L) {
            p = (int *)&vint[i];
            pr_info("%p: %08x %08x %08x %08x %08x %08x %08x %08x\n", p, p[0], p[1], p[2L], p[3L], p[4L], p[5L], p[6L],
                    p[7L]);
        }
    }
    if (vaddr && is_map) {
        /* maped free... */
        iounmap(vaddr);
    }
    return 0;
}

static ssize_t debug_store(struct class *class, struct class_attribute *attr, const char *buf, size_t size)
{
    unsigned int val;
    ssize_t ret;

    val = -1;
    ret = kstrtoint(buf, 0, &val);
    if (ret != 0) {
        return -EINVAL;
    }
    switch (val) {
        case 0:
            pr_info("clear debug!\n");
            break;
        case 1:
            codec_mm_dump_all_scatters();
            break;
        case 2L:
            codec_mm_dump_all_slots();
            break;
        case 3L:
            codec_mm_dump_free_slots();
            break;
        case 4L:
            codec_mm_dump_all_hash_table();
            break;
        case 5L:
            codec_mm_free_all_free_slots();
            break;
        case 6L:
            codec_mm_clear_alloc_infos();
            break;
        case 10L:
            codec_mm_keeper_free_all_keep(1);
            break;
        case 11L:
            dump_mem_infos(NULL, 0);
            break;
        case 12L:
            dump_free_mem_infos(NULL, 0);
            break;
        case 20L: {
            int cmd, len;
            unsigned int addr;

            cmd = len = 0;
            addr = 0;
            ret = sscanf(buf, "%d %x %d", &cmd, &addr, &len);
            if (addr > 0 && len > 0) {
                codec_mm_mem_dump(addr, 1, len);
            }
        }
            break;
        case 100L: {
            int cmd, mode, p1, p2;

            mode = p1 = p2 = 0;
            ret = sscanf(buf, "%d %d %d %d", &cmd, &mode, &p1, &p2);
            codec_mm_scatter_test(mode, p1, p2);
        }
            break;
        default:
            pr_err("unknown cmd! %d\n", val);
    }
    return size;
}

static ssize_t debug_sc_mode_show(struct class *class, struct class_attribute *attr, char *buf)
{
    ssize_t size = 0;

    size = sprintf(buf, "%u\n", debug_sc_mode);

    return size;
}

static ssize_t debug_sc_mode_store(struct class *class, struct class_attribute *attr, const char *buf, size_t size)
{
    unsigned int val;
    ssize_t ret;

    val = -1;
    ret = kstrtoint(buf, 0, &val);
    if (ret != 0) {
        return -EINVAL;
    }

    debug_sc_mode = val;

    return size;
}

static ssize_t debug_keep_mode_show(struct class *class, struct class_attribute *attr, char *buf)
{
    ssize_t size = 0;

    size = sprintf(buf, "%u\n", debug_keep_mode);

    return size;
}

static ssize_t debug_keep_mode_store(struct class *class, struct class_attribute *attr, const char *buf, size_t size)
{
    unsigned int val;
    ssize_t ret;

    val = -1;
    ret = kstrtoint(buf, 0, &val);
    if (ret != 0) {
        return -EINVAL;
    }

    debug_keep_mode = val;

    return size;
}

static CLASS_ATTR_RO(codec_mm_dump);
static CLASS_ATTR_RO(codec_mm_scatter_dump);
static CLASS_ATTR_RO(codec_mm_keeper_dump);
static CLASS_ATTR_RO(tvp_region);
static CLASS_ATTR_RW(tvp_enable);
static CLASS_ATTR_RW(fastplay_enable);
static CLASS_ATTR_RW(config);
static CLASS_ATTR_RW(debug);
static CLASS_ATTR_RW(debug_sc_mode);
static CLASS_ATTR_RW(debug_keep_mode);

static struct attribute *codec_mm_class_attrs[] = {&class_attr_codec_mm_dump.attr,
                                                   &class_attr_codec_mm_scatter_dump.attr,
                                                   &class_attr_codec_mm_keeper_dump.attr,
                                                   &class_attr_tvp_region.attr,
                                                   &class_attr_tvp_enable.attr,
                                                   &class_attr_fastplay_enable.attr,
                                                   &class_attr_config.attr,
                                                   &class_attr_debug.attr,
                                                   &class_attr_debug_sc_mode.attr,
                                                   &class_attr_debug_keep_mode.attr,
                                                   NULL};

ATTRIBUTE_GROUPS(codec_mm_class);

static struct class codec_mm_class = {
    .name = "codec_mm",
    .class_groups = codec_mm_class_groups,
};

static struct mconfig codec_mm_configs[] = {
    MC_PI32("default_tvp_size", &default_tvp_size),
    MC_PI32("default_tvp_4k_size", &default_tvp_4k_size),
    MC_PI32("default_cma_res_size", &default_cma_res_size),
};
static struct mconfig_node codec_mm_trigger_node;
int codec_mm_trigger_fun(const char *trigger, int id, const char *buf, int size)
{
    int ret = size;

    switch (trigger[0]) {
        case 't':
            tvp_enable_store(NULL, NULL, buf, size);
            break;
        case 'f':
            fastplay_enable_store(NULL, NULL, buf, size);
            break;
        case 'd':
            debug_store(NULL, NULL, buf, size);
            break;
        default:
            ret = -1;
    }
    return size;
}
int codec_mm_trigger_help_fun(const char *trigger, int id, char *sbuf, int size)
{
    int ret = -1;
    void *buf, *getbuf = NULL;

    if (size < PAGE_SIZE) {
        getbuf = (void *)__get_free_page(GFP_KERNEL);
        if (!getbuf) {
            return -ENOMEM;
        }
        buf = getbuf;
    } else {
        buf = sbuf;
    }
    switch (trigger[0]) {
        case 't':
            ret = tvp_enable_show(NULL, NULL, buf);
            break;
        case 'f':
            ret = fastplay_enable_show(NULL, NULL, buf);
            break;
        case 'd':
            ret = debug_show(NULL, NULL, buf);
            break;
        default:
            pr_err("unknown trigger:[%s]\n", trigger);
            ret = -1;
    }
    if (ret > 0 && getbuf != NULL) {
        ret = min_t(int, ret, size);

        strncpy(sbuf, buf, ret);
    }
    if (getbuf != NULL) {
        free_page((unsigned long)getbuf);
    }
    return ret;
}

static struct mconfig codec_mm_trigger[] = {
    MC_FUN("tvp_enable", codec_mm_trigger_help_fun, codec_mm_trigger_fun),
    MC_FUN("fastplay", codec_mm_trigger_help_fun, codec_mm_trigger_fun),
    MC_FUN("debug", codec_mm_trigger_help_fun, codec_mm_trigger_fun),
};

static int codec_mm_probe(struct platform_device *pdev)
{
    int r;

    pdev->dev.platform_data = get_mem_mgt();
    r = of_reserved_mem_device_init(&pdev->dev);
    if (r == 0) {
        pr_debug("codec_mm_probe mem init done\n");
    }

    codec_mm_mgt_init(&pdev->dev);
    r = class_register(&codec_mm_class);
    if (r) {
        pr_err("vdec class create fail.\n");
        return r;
    }
    r = of_reserved_mem_device_init(&pdev->dev);
    if (r == 0) {
        pr_debug("codec_mm reserved memory probed done\n");
    }

    pr_info("codec_mm_probe ok\n");

    codec_mm_scatter_mgt_init();
    codec_mm_keeper_mgr_init();
    amstream_test_init();
    codec_mm_scatter_mgt_test();
    REG_PATH_CONFIGS(CONFIG_PATH, codec_mm_configs);
    INIT_REG_NODE_CONFIGS(CONFIG_PATH, &codec_mm_trigger_node, "trigger", codec_mm_trigger,
                          CONFIG_FOR_RW | CONFIG_FOR_T);
    return 0;
}

static const struct of_device_id amlogic_mem_dt_match[] = {
    {
        .compatible = "amlogic, codec, mm",
    },
    {},
};

static struct platform_driver codec_mm_driver = {.probe = codec_mm_probe,
                                                 .remove = NULL,
                                                 .driver = {
                                                     .owner = THIS_MODULE,
                                                     .name = "codec_mm",
                                                     .of_match_table = amlogic_mem_dt_match,

                                                 }};

static int __init codec_mm_module_init(void)
{
    pr_err("codec_mm_module_init\n");

    if (platform_driver_register(&codec_mm_driver)) {
        pr_err("failed to register amports mem module\n");
        return -ENODEV;
    }

    return 0;
}

arch_initcall(codec_mm_module_init);
MODULE_DESCRIPTION("AMLOGIC amports mem  driver");
MODULE_LICENSE("GPL");

static int codec_mm_reserved_init(struct reserved_mem *rmem, struct device *dev)
{
    struct codec_mm_mgt_s *mgt = get_mem_mgt();

    mgt->rmem = *rmem;
    pr_debug("codec_mm_reserved_init %p->%p\n", (void *)mgt->rmem.base, (void *)mgt->rmem.base + mgt->rmem.size);
    return 0;
}

static const struct reserved_mem_ops codec_mm_rmem_vdec_ops = {
    .device_init = codec_mm_reserved_init,
};

static int __init codec_mm_res_setup(struct reserved_mem *rmem)
{
    rmem->ops = &codec_mm_rmem_vdec_ops;
    pr_debug("vdec: reserved mem setup\n");

    return 0;
}

RESERVEDMEM_OF_DECLARE(codec_mm_reserved, "amlogic, codec-mm-reserved", codec_mm_res_setup);

module_param(debug_mode, uint, 0664);
MODULE_PARM_DESC(debug_mode, "\n debug module\n");
module_param(debug_sc_mode, uint, 0664);
MODULE_PARM_DESC(debug_sc_mode, "\n debug scatter module\n");
module_param(debug_keep_mode, uint, 0664);
MODULE_PARM_DESC(debug_keep_mode, "\n debug keep module\n");
module_param(tvp_mode, uint, 0664);
MODULE_PARM_DESC(tvp_mode, "\n tvp module\n");
