// SPDX-License-Identifier: GPL-2.0-only
/*
 * Dynamic DMA mapping support.
 *
 * This implementation is a fallback for platforms that do not support
 * I/O TLBs (aka DMA address translation hardware).
 * Copyright (C) 2000 Asit Mallick <Asit.K.Mallick@intel.com>
 * Copyright (C) 2000 Goutham Rao <goutham.rao@intel.com>
 * Copyright (C) 2000, 2003 Hewlett-Packard Co
 *	David Mosberger-Tang <davidm@hpl.hp.com>
 *
 * 03/05/07 davidm	Switch from PCI-DMA to generic device DMA API.
 * 00/12/13 davidm	Rename to swiotlb.c and add mark_clean() to avoid
 *			unnecessary i-cache flushing.
 * 04/07/.. ak		Better overflow handling. Assorted fixes.
 * 05/09/10 linville	Add support for syncing ranges, support syncing for
 *			DMA_BIDIRECTIONAL mappings, miscellaneous cleanup.
 * 08/12/11 beckyb	Add highmem support
 */

#define pr_fmt(fmt) "software IO TLB: " fmt

#include <linux/cache.h>
#include <linux/dma-direct.h>
#include <linux/dma-map-ops.h>
#include <linux/mm.h>
#include <linux/export.h>
#include <linux/spinlock.h>
#include <linux/string.h>
#include <linux/swiotlb.h>
#include <linux/pfn.h>
#include <linux/types.h>
#include <linux/ctype.h>
#include <linux/highmem.h>
#include <linux/gfp.h>
#include <linux/rculist.h>
#include <linux/scatterlist.h>
#include <linux/mem_encrypt.h>
#include <linux/set_memory.h>
#include <linux/slab.h>
#ifdef CONFIG_DEBUG_FS
#include <linux/debugfs.h>
#endif

#include <asm/io.h>
#include <asm/dma.h>

#include <linux/init.h>
#include <linux/memblock.h>
#include <linux/iommu-helper.h>

#define CREATE_TRACE_POINTS
#include <trace/events/swiotlb.h>

#define SLABS_PER_PAGE (1 << (PAGE_SHIFT - IO_TLB_SHIFT))

/*
 * Minimum IO TLB size to bother booting with.  Systems with mainly
 * 64bit capable cards will only lightly use the swiotlb.  If we can't
 * allocate a contiguous 1MB, we're probably in trouble anyway.
 */
#define IO_TLB_MIN_SLABS ((1<<20) >> IO_TLB_SHIFT)

#define INVALID_PHYS_ADDR (~(phys_addr_t)0)

enum swiotlb_force swiotlb_force;

#ifdef CONFIG_SWIOTLB_DYNAMIC

static void swiotlb_dyn_alloc(struct work_struct *work);

struct io_tlb_mem io_tlb_default_mem = {
	.lock = __SPIN_LOCK_UNLOCKED(io_tlb_default_mem.lock),
	.pools = LIST_HEAD_INIT(io_tlb_default_mem.pools),
	.dyn_alloc = __WORK_INITIALIZER(io_tlb_default_mem.dyn_alloc,
					swiotlb_dyn_alloc),
};

#else  /* !CONFIG_SWIOTLB_DYNAMIC */

struct io_tlb_mem io_tlb_default_mem;

#endif	/* CONFIG_SWIOTLB_DYNAMIC */

/*
 * Max segment that we can provide which (if pages are contingous) will
 * not be bounced (unless SWIOTLB_FORCE is set).
 */
static unsigned int max_segment;

/**
 * struct io_tlb_area - IO TLB memory area descriptor
 *
 * This is a single area with a single lock.
 *
 * @used:	The number of used IO TLB block.
 * @index:	The slot index to start searching in this area for next round.
 * @lock:	The lock to protect the above data structures in the map and
 *		unmap calls.
 */
struct io_tlb_area {
	unsigned long used;
	unsigned int index;
	spinlock_t lock;
};

static void swiotlb_adjust_nareas(unsigned int nareas)
{
	struct io_tlb_pool *mem = &io_tlb_default_mem.defpool;
	unsigned int default_nslabs;

	/* use a single area when non is specified */
	if (!nareas)
		nareas = 1;
	else if (!is_power_of_2(nareas))
		nareas = roundup_pow_of_two(nareas);

	mem->nareas = nareas;

	pr_info("area num %d.\n", nareas);
	/*
	 * Round up number of slabs to the next power of 2.
	 * The last area is going be smaller than the rest if
	 * default_nslabs is not power of two.
	 */
	default_nslabs = swiotlb_size_or_default() >> IO_TLB_SHIFT;
	if (nareas && !is_power_of_2(default_nslabs)) {
		mem->nslabs = roundup_pow_of_two(default_nslabs);
		pr_info("SWIOTLB bounce buffer size roundup to %luMB",
			(mem->nslabs << IO_TLB_SHIFT) >> 20);
	}
}

/**
 * limit_nareas() - get the maximum number of areas for a given memory pool size
 * @nareas:	Desired number of areas.
 * @nslots:	Total number of slots in the memory pool.
 *
 * Limit the number of areas to the maximum possible number of areas in
 * a memory pool of the given size.
 *
 * Return: Maximum possible number of areas.
 */
static unsigned int limit_nareas(unsigned int nareas, unsigned long nslots)
{
	if (nslots < nareas * IO_TLB_SEGSIZE)
		return nslots / IO_TLB_SEGSIZE;
	return nareas;
}

static int __init
setup_io_tlb_npages(char *str)
{
	struct io_tlb_pool *mem = &io_tlb_default_mem.defpool;

	if (isdigit(*str)) {
		mem->nslabs = simple_strtoul(str, &str, 0);
		/* avoid tail segment of size < IO_TLB_SEGSIZE */
		mem->nslabs = ALIGN(mem->nslabs, IO_TLB_SEGSIZE);
	}
	if (*str == ',')
		++str;
	if (isdigit(*str))
		swiotlb_adjust_nareas(simple_strtoul(str, &str, 0));
	if (*str == ',')
		++str;
	if (!strcmp(str, "force")) {
		swiotlb_force = SWIOTLB_FORCE;
	} else if (!strcmp(str, "noforce")) {
		swiotlb_force = SWIOTLB_NO_FORCE;
		mem->nslabs = 1;
	}

	return 0;
}
early_param("swiotlb", setup_io_tlb_npages);

static bool no_iotlb_memory;

unsigned long swiotlb_nr_tbl(void)
{
	return unlikely(no_iotlb_memory) ? 0 : io_tlb_default_mem.nslabs;
}
EXPORT_SYMBOL_GPL(swiotlb_nr_tbl);

unsigned int swiotlb_max_segment(void)
{
	return unlikely(no_iotlb_memory) ? 0 : max_segment;
}
EXPORT_SYMBOL_GPL(swiotlb_max_segment);

void swiotlb_set_max_segment(unsigned int val)
{
	if (swiotlb_force == SWIOTLB_FORCE)
		max_segment = 1;
	else
		max_segment = rounddown(val, PAGE_SIZE);
}

/* default to 64MB */
#define IO_TLB_DEFAULT_SIZE (64UL<<20)
unsigned long swiotlb_size_or_default(void)
{
	unsigned long size;

	size = io_tlb_default_mem.defpool.nslabs << IO_TLB_SHIFT;

	return size ? size : (IO_TLB_DEFAULT_SIZE);
}

void swiotlb_print_info(void)
{
	struct io_tlb_pool *mem = &io_tlb_default_mem.defpool;
	unsigned long bytes = mem->nslabs << IO_TLB_SHIFT;

	if (no_iotlb_memory) {
		pr_warn("No low mem\n");
		return;
	}

	pr_info("mapped [mem %pa-%pa] (%luMB)\n", &mem->start, &mem->end,
	       bytes >> 20);
}

static inline unsigned long io_tlb_offset(unsigned long val)
{
	return val & (IO_TLB_SEGSIZE - 1);
}

static inline unsigned long nr_slots(u64 val)
{
	return DIV_ROUND_UP(val, IO_TLB_SIZE);
}

/*
 * Early SWIOTLB allocation may be too early to allow an architecture to
 * perform the desired operations.  This function allows the architecture to
 * call SWIOTLB when the operations are possible.  It needs to be called
 * before the SWIOTLB memory is used.
 */
void __init swiotlb_update_mem_attributes(void)
{
	struct io_tlb_pool *mem = &io_tlb_default_mem.defpool;
	void *vaddr;
	unsigned long bytes;

	if (no_iotlb_memory || mem->late_alloc)
		return;

	vaddr = phys_to_virt(mem->start);
	bytes = PAGE_ALIGN(mem->nslabs << IO_TLB_SHIFT);
	set_memory_decrypted((unsigned long)vaddr, bytes >> PAGE_SHIFT);
	memset(vaddr, 0, bytes);
}

static void swiotlb_init_io_tlb_pool(struct io_tlb_pool *mem, phys_addr_t start,
				    unsigned long nslabs, bool late_alloc,
				    unsigned int nareas)
{
	void *vaddr = phys_to_virt(start);
	unsigned long i, bytes;

	bytes = nslabs << IO_TLB_SHIFT;

	mem->nslabs = nslabs;
	mem->start = start;
	mem->end = mem->start + bytes;
	mem->late_alloc = late_alloc;
	mem->nareas = nareas;
	mem->area_nslabs = nslabs / mem->nareas;

	for (i = 0; i < mem->nareas; i++) {
		spin_lock_init(&mem->areas[i].lock);
		mem->areas[i].index = 0;
		mem->areas[i].used = 0;
	}

	for (i = 0; i < mem->nslabs; i++) {
		mem->list[i] = min(IO_TLB_SEGSIZE - io_tlb_offset(i),
				   mem->nslabs - i);
		mem->orig_addr[i] = INVALID_PHYS_ADDR;
	}
	memset(vaddr, 0, bytes);
}

/**
 * add_mem_pool() - add a memory pool to the allocator
 * @mem:	Software IO TLB allocator.
 * @pool:	Memory pool to be added.
 */
static void add_mem_pool(struct io_tlb_mem *mem, struct io_tlb_pool *pool)
{
#ifdef CONFIG_SWIOTLB_DYNAMIC
	spin_lock(&mem->lock);
	list_add_rcu(&pool->node, &mem->pools);
	mem->nslabs += pool->nslabs;
	spin_unlock(&mem->lock);
#else
	mem->nslabs = pool->nslabs;
#endif
}

int __init swiotlb_init_with_tbl(char *tlb, unsigned long nslabs, int verbose)
{
	struct io_tlb_pool *mem = &io_tlb_default_mem.defpool;
	size_t alloc_size;

	/*
	 * Allocate and initialize the free list array.  This array is used
	 * to find contiguous free memory regions of size up to IO_TLB_SEGSIZE
	 * between mem->start and mem->end.
	 */
	alloc_size = PAGE_ALIGN(nslabs * sizeof(int));
	mem->list = memblock_alloc(alloc_size, PAGE_SIZE);
	if (!mem->list)
		panic("%s: Failed to allocate %zu bytes align=0x%lx\n",
		      __func__, alloc_size, PAGE_SIZE);

	alloc_size = PAGE_ALIGN(nslabs * sizeof(phys_addr_t));
	mem->orig_addr = memblock_alloc(alloc_size, PAGE_SIZE);
	if (!mem->orig_addr)
		panic("%s: Failed to allocate %zu bytes align=0x%lx\n",
		      __func__, alloc_size, PAGE_SIZE);

	/* swiotlb_init_with_tbl() may be called by area-unaware code */
	if (!mem->nareas)
		mem->nareas = 1;
	mem->nareas = limit_nareas(mem->nareas, nslabs);

	mem->areas = memblock_alloc(array_size(sizeof(struct io_tlb_area),
		mem->nareas), SMP_CACHE_BYTES);
	if (!mem->areas)
		panic("%s: Failed to allocate mem->areas.\n", __func__);

	swiotlb_init_io_tlb_pool(mem, __pa(tlb), nslabs, false,
				 mem->nareas);
	add_mem_pool(&io_tlb_default_mem, mem);
	no_iotlb_memory = false;

	if (verbose)
		swiotlb_print_info();

	swiotlb_set_max_segment(mem->nslabs << IO_TLB_SHIFT);
	return 0;
}

/*
 * Statically reserve bounce buffer space and initialize bounce buffer data
 * structures for the software IO TLB used to implement the DMA API.
 */
void  __init
swiotlb_init(int verbose)
{
	struct io_tlb_pool *mem = &io_tlb_default_mem.defpool;
	size_t default_size = IO_TLB_DEFAULT_SIZE;
	unsigned char *vstart;
	unsigned long bytes;

#ifdef CONFIG_SWIOTLB_DYNAMIC
	io_tlb_default_mem.can_grow = true;
	io_tlb_default_mem.phys_limit = ARCH_LOW_ADDRESS_LIMIT;
#endif

	if (!mem->nslabs) {
		mem->nslabs = (default_size >> IO_TLB_SHIFT);
		mem->nslabs = ALIGN(mem->nslabs, IO_TLB_SEGSIZE);
	}
	/*
	 * mem->nslabs maybe changed when adjust area number.
	 * So allocate bounce buffer after adjusting area number.
	 */
	if (!mem->nareas)
		swiotlb_adjust_nareas(num_possible_cpus());

	bytes = mem->nslabs << IO_TLB_SHIFT;

	/* Get IO TLB memory from the low pages */
	vstart = memblock_alloc_low(PAGE_ALIGN(bytes), PAGE_SIZE);
	if (vstart && !swiotlb_init_with_tbl(vstart, mem->nslabs, verbose))
		return;

	if (mem->start) {
		memblock_free_early(mem->start,
				    PAGE_ALIGN(mem->nslabs << IO_TLB_SHIFT));
		mem->start = 0;
	}
	pr_warn("Cannot allocate buffer");
	no_iotlb_memory = true;
}

/*
 * Systems with larger DMA zones (those that don't support ISA) can
 * initialize the swiotlb later using the slab allocator if needed.
 * This should be just like above, but with some error catching.
 */
int
swiotlb_late_init_with_default_size(size_t default_size)
{
	struct io_tlb_pool *mem = &io_tlb_default_mem.defpool;
	unsigned long bytes, req_nslabs = mem->nslabs;
	unsigned char *vstart = NULL;
	unsigned int order;
	int rc = 0;

#ifdef CONFIG_SWIOTLB_DYNAMIC
	io_tlb_default_mem.can_grow = true;
	if (IS_ENABLED(CONFIG_ZONE_DMA))
		io_tlb_default_mem.phys_limit = DMA_BIT_MASK(zone_dma_bits);
	else if (IS_ENABLED(CONFIG_ZONE_DMA32))
		io_tlb_default_mem.phys_limit = DMA_BIT_MASK(32);
	else
		io_tlb_default_mem.phys_limit = virt_to_phys(high_memory - 1);
#endif

	if (!mem->nslabs) {
		mem->nslabs = (default_size >> IO_TLB_SHIFT);
		mem->nslabs = ALIGN(mem->nslabs, IO_TLB_SEGSIZE);
	}
	if (!mem->nareas)
		swiotlb_adjust_nareas(num_possible_cpus());

	/*
	 * Get IO TLB memory from the low pages
	 */
	order = get_order(mem->nslabs << IO_TLB_SHIFT);
	mem->nslabs = SLABS_PER_PAGE << order;
	bytes = mem->nslabs << IO_TLB_SHIFT;

	while ((SLABS_PER_PAGE << order) > IO_TLB_MIN_SLABS) {
		vstart = (void *)__get_free_pages(GFP_DMA | __GFP_NOWARN,
						  order);
		if (vstart)
			break;
		order--;
	}

	if (!vstart) {
		mem->nslabs = req_nslabs;
		return -ENOMEM;
	}
	if (order != get_order(bytes)) {
		pr_warn("only able to allocate %ld MB\n",
			(PAGE_SIZE << order) >> 20);
		mem->nslabs = SLABS_PER_PAGE << order;
	}
	rc = swiotlb_late_init_with_tbl(vstart, mem->nslabs);
	if (rc)
		free_pages((unsigned long)vstart, order);

	return rc;
}

static void swiotlb_cleanup(void)
{
	struct io_tlb_pool *mem = &io_tlb_default_mem.defpool;

	mem->end = 0;
	mem->start = 0;
	mem->nslabs = 0;
	io_tlb_default_mem.nslabs = 0;
#ifdef CONFIG_SWIOTLB_DYNAMIC
	atomic_long_set(&io_tlb_default_mem.transient_nslabs, 0);
#endif
	max_segment = 0;
}

int
swiotlb_late_init_with_tbl(char *tlb, unsigned long nslabs)
{
	struct io_tlb_pool *mem = &io_tlb_default_mem.defpool;
	unsigned long bytes;
	unsigned int area_order;

	bytes = nslabs << IO_TLB_SHIFT;

	memset(mem, 0, sizeof(*mem));
	set_memory_decrypted((unsigned long)tlb, bytes >> PAGE_SHIFT);

	/* swiotlb_late_init_with_tbl() may be called by area-unaware code */
	if (!mem->nareas)
		mem->nareas = 1;
	mem->nareas = limit_nareas(mem->nareas, nslabs);

	area_order = get_order(array_size(sizeof(*mem->areas), mem->nareas));
	mem->areas = (struct io_tlb_area *)
		__get_free_pages(GFP_KERNEL | __GFP_ZERO, area_order);
	if (!mem->areas)
		goto cleanup2;

	/*
	 * Allocate and initialize the free list array.  This array is used
	 * to find contiguous free memory regions of size up to IO_TLB_SEGSIZE
	 * between mem->start and mem->end.
	 */
	mem->list = (unsigned int *)__get_free_pages(GFP_KERNEL,
					get_order(nslabs * sizeof(int)));
	if (!mem->list)
		goto cleanup3;

	mem->orig_addr = (phys_addr_t *)
		__get_free_pages(GFP_KERNEL,
				 get_order(nslabs *
					   sizeof(phys_addr_t)));
	if (!mem->orig_addr)
		goto cleanup4;

	swiotlb_init_io_tlb_pool(mem, virt_to_phys(tlb), nslabs, true,
				 mem->nareas);
	add_mem_pool(&io_tlb_default_mem, mem);
	no_iotlb_memory = false;

	swiotlb_print_info();
	swiotlb_set_max_segment(mem->nslabs << IO_TLB_SHIFT);
	return 0;

cleanup4:
	free_pages((unsigned long)mem->list,
		   get_order(mem->nslabs * sizeof(int)));
	mem->list = NULL;
cleanup3:
	free_pages((unsigned long)mem->areas, area_order);
cleanup2:
	swiotlb_cleanup();
	return -ENOMEM;
}

void __init swiotlb_exit(void)
{
	struct io_tlb_pool *mem = &io_tlb_default_mem.defpool;
	unsigned int area_order;

	if (!mem->orig_addr)
		return;

	if (mem->late_alloc) {
		area_order = get_order(array_size(sizeof(*mem->areas),
			mem->nareas));
		free_pages((unsigned long)mem->areas, area_order);
		free_pages((unsigned long)mem->orig_addr,
			   get_order(mem->nslabs * sizeof(phys_addr_t)));
		free_pages((unsigned long)mem->list,
			   get_order(mem->nslabs * sizeof(int)));
		free_pages((unsigned long)phys_to_virt(mem->start),
			   get_order(mem->nslabs << IO_TLB_SHIFT));
	} else {
		memblock_free_late(__pa(mem->areas),
			array_size(sizeof(*mem->areas), mem->nareas));
		memblock_free_late(__pa(mem->orig_addr),
				   PAGE_ALIGN(mem->nslabs * sizeof(phys_addr_t)));
		memblock_free_late(__pa(mem->list),
				   PAGE_ALIGN(mem->nslabs * sizeof(int)));
		memblock_free_late(mem->start,
				   PAGE_ALIGN(mem->nslabs << IO_TLB_SHIFT));
	}
	swiotlb_cleanup();
}

#ifdef CONFIG_SWIOTLB_DYNAMIC

/**
 * alloc_dma_pages() - allocate pages to be used for DMA
 * @gfp:	GFP flags for the allocation.
 * @bytes:	Size of the buffer.
 * @phys_limit:	Maximum allowed physical address of the buffer.
 *
 * Allocate pages from the buddy allocator. If successful, make the allocated
 * pages decrypted that they can be used for DMA.
 *
 * Return: Decrypted pages, %NULL on allocation failure, or ERR_PTR(-EAGAIN)
 * if the allocated physical address was above @phys_limit.
 */
static struct page *alloc_dma_pages(gfp_t gfp, size_t bytes, u64 phys_limit)
{
	unsigned int order = get_order(bytes);
	struct page *page;
	phys_addr_t paddr;
	void *vaddr;

	page = alloc_pages(gfp, order);
	if (!page)
		return NULL;

	paddr = page_to_phys(page);
	if (paddr + bytes - 1 > phys_limit) {
		__free_pages(page, order);
		return ERR_PTR(-EAGAIN);
	}

	vaddr = phys_to_virt(paddr);
	if (set_memory_decrypted((unsigned long)vaddr, PFN_UP(bytes)))
		goto error;
	return page;

error:
	/* Intentional leak if pages cannot be encrypted again. */
	if (!set_memory_encrypted((unsigned long)vaddr, PFN_UP(bytes)))
		__free_pages(page, order);
	return NULL;
}

/**
 * swiotlb_alloc_tlb() - allocate a dynamic IO TLB buffer
 * @dev:	Device for which a memory pool is allocated.
 * @bytes:	Size of the buffer.
 * @phys_limit:	Maximum allowed physical address of the buffer.
 * @gfp:	GFP flags for the allocation.
 *
 * Return: Allocated pages, or %NULL on allocation failure.
 */
static struct page *swiotlb_alloc_tlb(struct device *dev, size_t bytes,
		u64 phys_limit, gfp_t gfp)
{
	struct page *page;

	/*
	 * Allocate from the atomic pools if memory is encrypted and
	 * the allocation is atomic, because decrypting may block.
	 */
	if (!gfpflags_allow_blocking(gfp) && dev && force_dma_unencrypted(dev)) {
		void *vaddr;

		if (!IS_ENABLED(CONFIG_DMA_COHERENT_POOL))
			return NULL;

		return dma_alloc_from_pool(dev, bytes, &vaddr, gfp,
					   dma_coherent_ok);
	}

	gfp &= ~GFP_ZONEMASK;
	if (phys_limit <= DMA_BIT_MASK(zone_dma_bits))
		gfp |= __GFP_DMA;
	else if (phys_limit <= DMA_BIT_MASK(32))
		gfp |= __GFP_DMA32;

	while (IS_ERR(page = alloc_dma_pages(gfp, bytes, phys_limit))) {
		if (IS_ENABLED(CONFIG_ZONE_DMA32) &&
		    phys_limit < DMA_BIT_MASK(64) &&
		    !(gfp & (__GFP_DMA32 | __GFP_DMA)))
			gfp |= __GFP_DMA32;
		else if (IS_ENABLED(CONFIG_ZONE_DMA) &&
			 !(gfp & __GFP_DMA))
			gfp = (gfp & ~__GFP_DMA32) | __GFP_DMA;
		else
			return NULL;
	}

	return page;
}

/**
 * swiotlb_free_tlb() - free a dynamically allocated IO TLB buffer
 * @vaddr:	Virtual address of the buffer.
 * @bytes:	Size of the buffer.
 */
static void swiotlb_free_tlb(void *vaddr, size_t bytes)
{
	if (IS_ENABLED(CONFIG_DMA_COHERENT_POOL) &&
	    dma_free_from_pool(NULL, vaddr, bytes))
		return;

	/* Intentional leak if pages cannot be encrypted again. */
	if (!set_memory_encrypted((unsigned long)vaddr, PFN_UP(bytes)))
		__free_pages(virt_to_page(vaddr), get_order(bytes));
}

/**
 * swiotlb_alloc_pool() - allocate a new IO TLB memory pool
 * @dev:	Device for which a memory pool is allocated.
 * @minslabs:	Minimum number of slabs.
 * @nslabs:	Desired (maximum) number of slabs.
 * @nareas:	Number of areas.
 * @phys_limit:	Maximum DMA buffer physical address.
 * @gfp:	GFP flags for the allocations.
 *
 * Allocate and initialize a new IO TLB memory pool. The actual number of
 * slabs may be reduced if allocation of @nslabs fails. If even
 * @minslabs cannot be allocated, this function fails.
 *
 * Return: New memory pool, or %NULL on allocation failure.
 */
static struct io_tlb_pool *swiotlb_alloc_pool(struct device *dev,
		unsigned long minslabs, unsigned long nslabs,
		unsigned int nareas, u64 phys_limit, gfp_t gfp)
{
	struct io_tlb_pool *pool;
	unsigned int order;
	struct page *tlb;
	size_t pool_size;
	size_t tlb_size;

	if (nslabs > SLABS_PER_PAGE << (MAX_ORDER - 1)) {
		nslabs = SLABS_PER_PAGE << (MAX_ORDER - 1);
		nareas = limit_nareas(nareas, nslabs);
	}

	pool_size = sizeof(*pool) + array_size(sizeof(*pool->areas), nareas);
	pool = kzalloc(pool_size, gfp);
	if (!pool)
		goto error;
	pool->areas = (void *)pool + sizeof(*pool);

	tlb_size = nslabs << IO_TLB_SHIFT;
	while (!(tlb = swiotlb_alloc_tlb(dev, tlb_size, phys_limit, gfp))) {
		if (nslabs <= minslabs)
			goto error_tlb;
		nslabs = ALIGN(nslabs >> 1, IO_TLB_SEGSIZE);
		nareas = limit_nareas(nareas, nslabs);
		tlb_size = nslabs << IO_TLB_SHIFT;
	}

	order = get_order(array_size(sizeof(*pool->list), nslabs) +
			  array_size(sizeof(*pool->orig_addr), nslabs));
	pool->list = (unsigned int *)__get_free_pages(gfp, order);
	if (!pool->list)
		goto error_slots;
	pool->orig_addr = (void *)pool->list +
		array_size(sizeof(*pool->list), nslabs);

	swiotlb_init_io_tlb_pool(pool, page_to_phys(tlb), nslabs, true, nareas);
	return pool;

error_slots:
	swiotlb_free_tlb(page_address(tlb), tlb_size);
error_tlb:
	kfree(pool);
error:
	return NULL;
}

/**
 * swiotlb_dyn_alloc() - dynamic memory pool allocation worker
 * @work:	Pointer to dyn_alloc in struct io_tlb_mem.
 */
static void swiotlb_dyn_alloc(struct work_struct *work)
{
	struct io_tlb_mem *mem =
		container_of(work, struct io_tlb_mem, dyn_alloc);
	struct io_tlb_pool *pool;

	pool = swiotlb_alloc_pool(NULL, IO_TLB_MIN_SLABS,
				  io_tlb_default_mem.defpool.nslabs,
				  io_tlb_default_mem.defpool.nareas,
				  mem->phys_limit, GFP_KERNEL);
	if (!pool) {
		pr_warn_ratelimited("Failed to allocate new pool");
		return;
	}

	add_mem_pool(mem, pool);
}

/**
 * swiotlb_dyn_free() - RCU callback to free a memory pool
 * @rcu:	RCU head in the corresponding struct io_tlb_pool.
 */
static void swiotlb_dyn_free(struct rcu_head *rcu)
{
	struct io_tlb_pool *pool = container_of(rcu, struct io_tlb_pool, rcu);
	size_t slots_size = array_size(sizeof(*pool->list), pool->nslabs) +
		array_size(sizeof(*pool->orig_addr), pool->nslabs);
	size_t tlb_size = pool->end - pool->start;

	free_pages((unsigned long)pool->list, get_order(slots_size));
	swiotlb_free_tlb(phys_to_virt(pool->start), tlb_size);
	kfree(pool);
}

/**
 * swiotlb_find_pool() - find the IO TLB pool for a physical address
 * @dev:        Device which has mapped the DMA buffer.
 * @paddr:      Physical address within the DMA buffer.
 *
 * Find the IO TLB memory pool descriptor which contains the given physical
 * address, if any.
 *
 * Return: Memory pool which contains @paddr, or %NULL if none.
 */
struct io_tlb_pool *swiotlb_find_pool(struct device *dev, phys_addr_t paddr)
{
	struct dev_dma_io_tlb *dev_iotlb = dev->iotlb;
	struct io_tlb_mem *mem = &io_tlb_default_mem;
	struct io_tlb_pool *pool;

	rcu_read_lock();
	list_for_each_entry_rcu(pool, &mem->pools, node) {
		if (paddr >= pool->start && paddr < pool->end)
			goto out;
	}

	list_for_each_entry_rcu(pool, &dev_iotlb->dma_io_tlb_pools, node) {
		if (paddr >= pool->start && paddr < pool->end)
			goto out;
	}
	pool = NULL;
out:
	rcu_read_unlock();
	return pool;
}

/**
 * swiotlb_del_pool() - remove an IO TLB pool from a device
 * @dev:	Owning device.
 * @pool:	Memory pool to be removed.
 */
static void swiotlb_del_pool(struct device *dev, struct io_tlb_pool *pool)
{
	unsigned long flags;
	struct dev_dma_io_tlb *dev_iotlb = dev->iotlb;

	spin_lock_irqsave(&dev_iotlb->dma_io_tlb_lock, flags);
	list_del_rcu(&pool->node);
	spin_unlock_irqrestore(&dev_iotlb->dma_io_tlb_lock, flags);

	call_rcu(&pool->rcu, swiotlb_dyn_free);
}

int swiotlb_dev_init(struct device *dev)
{
	struct dev_dma_io_tlb *dev_iotlb;

	dev_iotlb = kzalloc(sizeof(*dev_iotlb), GFP_KERNEL);
	if (!dev_iotlb)
		return -ENOMEM;
	INIT_LIST_HEAD(&dev_iotlb->dma_io_tlb_pools);
	spin_lock_init(&dev_iotlb->dma_io_tlb_lock);
	dev_iotlb->dma_uses_io_tlb = false;
	dev->iotlb = dev_iotlb;
	return 0;
}

void swiotlb_dev_remove(struct device *dev)
{
	kfree(dev->iotlb);
	dev->iotlb = NULL;
}

#endif	/* CONFIG_SWIOTLB_DYNAMIC */

/*
 * Bounce: copy the swiotlb buffer from or back to the original dma location
 */
static void swiotlb_bounce(phys_addr_t orig_addr, phys_addr_t tlb_addr,
			   size_t size, enum dma_data_direction dir)
{
	unsigned long pfn = PFN_DOWN(orig_addr);
	unsigned char *vaddr = phys_to_virt(tlb_addr);

	if (PageHighMem(pfn_to_page(pfn))) {
		/* The buffer does not have a mapping.  Map it in and copy */
		unsigned int offset = orig_addr & ~PAGE_MASK;
		char *buffer;
		unsigned int sz = 0;
		unsigned long flags;

		while (size) {
			sz = min_t(size_t, PAGE_SIZE - offset, size);

			local_irq_save(flags);
			buffer = kmap_atomic(pfn_to_page(pfn));
			if (dir == DMA_TO_DEVICE)
				memcpy(vaddr, buffer + offset, sz);
			else
				memcpy(buffer + offset, vaddr, sz);
			kunmap_atomic(buffer);
			local_irq_restore(flags);

			size -= sz;
			pfn++;
			vaddr += sz;
			offset = 0;
		}
	} else if (dir == DMA_TO_DEVICE) {
		memcpy(vaddr, phys_to_virt(orig_addr), size);
	} else {
		memcpy(phys_to_virt(orig_addr), vaddr, size);
	}
}

static inline phys_addr_t slot_addr(phys_addr_t start, phys_addr_t idx)
{
	return start + (idx << IO_TLB_SHIFT);
}

/*
 * Return the offset into a iotlb slot required to keep the device happy.
 */
static unsigned int swiotlb_align_offset(struct device *dev, u64 addr)
{
	return addr & dma_get_min_align_mask(dev) & (IO_TLB_SIZE - 1);
}

/*
 * Carefully handle integer overflow which can occur when boundary_mask == ~0UL.
 */
static inline unsigned long get_max_slots(unsigned long boundary_mask)
{
	if (boundary_mask == ~0UL)
		return 1UL << (BITS_PER_LONG - IO_TLB_SHIFT);
	return nr_slots(boundary_mask + 1);
}

static unsigned int wrap_area_index(struct io_tlb_pool *mem, unsigned int index)
{
	if (index >= mem->area_nslabs)
		return 0;
	return index;
}

#ifdef CONFIG_SWIOTLB_DYNAMIC
#ifdef CONFIG_DEBUG_FS
static void inc_transient_used(struct io_tlb_mem *mem, unsigned int nslots)
{
	atomic_long_add(nslots, &mem->transient_nslabs);
}

static void dec_transient_used(struct io_tlb_mem *mem, unsigned int nslots)
{
	atomic_long_sub(nslots, &mem->transient_nslabs);
}

#else /* !CONFIG_DEBUG_FS */
static void inc_transient_used(struct io_tlb_mem *mem, unsigned int nslots)
{
}
static void dec_transient_used(struct io_tlb_mem *mem, unsigned int nslots)
{
}
#endif /* CONFIG_DEBUG_FS */
#endif /* CONFIG_SWIOTLB_DYNAMIC */

/*
 * Find a suitable number of IO TLB entries size that will fit this request and
 * allocate a buffer from that IO TLB pool.
 */
static int swiotlb_area_find_slots(struct device *dev, struct io_tlb_pool *pool,
		int area_index, phys_addr_t orig_addr, size_t alloc_size)
{
	struct io_tlb_area *area = pool->areas + area_index;
	unsigned long boundary_mask = dma_get_seg_boundary(dev);
	dma_addr_t tbl_dma_addr =
		phys_to_dma_unencrypted(dev, pool->start) & boundary_mask;
	unsigned long max_slots = get_max_slots(boundary_mask);
	unsigned int iotlb_align_mask =
		dma_get_min_align_mask(dev) & ~(IO_TLB_SIZE - 1);
	unsigned int nslots = nr_slots(alloc_size), stride;
	unsigned int index, wrap, count = 0, i;
	unsigned long flags;
	unsigned int slot_base;
	unsigned int slot_index;

	BUG_ON(!nslots);
	BUG_ON(area_index >= pool->nareas);

	/*
	 * For mappings with an alignment requirement don't bother looping to
	 * unaligned slots once we found an aligned one.  For allocations of
	 * PAGE_SIZE or larger only look for page aligned allocations.
	 */
	stride = (iotlb_align_mask >> IO_TLB_SHIFT) + 1;
	if (alloc_size >= PAGE_SIZE)
		stride = max(stride, stride << (PAGE_SHIFT - IO_TLB_SHIFT));

	spin_lock_irqsave(&area->lock, flags);
	if (unlikely(nslots > pool->area_nslabs - area->used))
		goto not_found;

	slot_base = area_index * pool->area_nslabs;
	index = wrap = wrap_area_index(pool, ALIGN(area->index, stride));

	do {
		slot_index = slot_base + index;

		if ((slot_addr(tbl_dma_addr, slot_index) &
		     iotlb_align_mask) != (orig_addr & iotlb_align_mask)) {
			index = wrap_area_index(pool, index + 1);
			continue;
		}

		/*
		 * If we find a slot that indicates we have 'nslots' number of
		 * contiguous buffers, we allocate the buffers from that slot
		 * and mark the entries as '0' indicating unavailable.
		 */
		if (!iommu_is_span_boundary(slot_index, nslots,
					    nr_slots(tbl_dma_addr),
					    max_slots)) {
			if (pool->list[slot_index] >= nslots)
				goto found;
		}
		index = wrap_area_index(pool, index + stride);
	} while (index != wrap);

not_found:
	spin_unlock_irqrestore(&area->lock, flags);
	return -1;

found:
	for (i = slot_index; i < slot_index + nslots; i++)
		pool->list[i] = 0;
	for (i = slot_index - 1;
	     io_tlb_offset(i) != IO_TLB_SEGSIZE - 1 &&
	     pool->list[i]; i--)
		pool->list[i] = ++count;

	/*
	 * Update the indices to avoid searching in the next round.
	 */
	if (index + nslots < pool->area_nslabs)
		area->index = index + nslots;
	else
		area->index = 0;
	area->used += nslots;
	spin_unlock_irqrestore(&area->lock, flags);
	return slot_index;
}

static int swiotlb_pool_find_slots(struct device *dev, struct io_tlb_pool *pool,
		phys_addr_t orig_addr, size_t alloc_size)
{
	int start = raw_smp_processor_id() & (pool->nareas - 1);
	int i = start, index;

	do {
		index = swiotlb_area_find_slots(dev, pool, i, orig_addr,
						alloc_size);
		if (index >= 0)
			return index;
		if (++i >= pool->nareas)
			i = 0;
	} while (i != start);

	return -1;
}

#ifdef CONFIG_SWIOTLB_DYNAMIC

static int swiotlb_find_slots(struct device *dev, phys_addr_t orig_addr,
		size_t alloc_size, struct io_tlb_pool **retpool)
{
	struct dev_dma_io_tlb *dev_iotlb = dev->iotlb;
	struct io_tlb_mem *mem = &io_tlb_default_mem;
	struct io_tlb_pool *pool;
	unsigned long nslabs;
	unsigned long flags;
	u64 phys_limit;
	int index;

	if (alloc_size > IO_TLB_SEGSIZE * IO_TLB_SIZE)
		return -1;

	rcu_read_lock();
	list_for_each_entry_rcu(pool, &mem->pools, node) {
		index = swiotlb_pool_find_slots(dev, pool, orig_addr,
						alloc_size);
		if (index >= 0) {
			rcu_read_unlock();
			goto found;
		}
	}
	rcu_read_unlock();
	if (!mem->can_grow)
		return -1;

	schedule_work(&mem->dyn_alloc);

	nslabs = nr_slots(alloc_size);
	phys_limit = min_not_zero(*dev->dma_mask, dev->bus_dma_limit);
	pool = swiotlb_alloc_pool(dev, nslabs, nslabs, 1, phys_limit,
				  GFP_NOWAIT | __GFP_NOWARN);
	if (!pool)
		return -1;

	index = swiotlb_pool_find_slots(dev, pool, orig_addr,
					alloc_size);
	if (index < 0) {
		swiotlb_dyn_free(&pool->rcu);
		return -1;
	}

	pool->transient = true;
	spin_lock_irqsave(&dev_iotlb->dma_io_tlb_lock, flags);
	list_add_rcu(&pool->node, &dev_iotlb->dma_io_tlb_pools);
	spin_unlock_irqrestore(&dev_iotlb->dma_io_tlb_lock, flags);
	inc_transient_used(mem, pool->nslabs);

found:
	WRITE_ONCE(dev->iotlb->dma_uses_io_tlb, true);

	/*
	 * The general barrier orders reads and writes against a presumed store
	 * of the SWIOTLB buffer address by a device driver (to a driver private
	 * data structure). It serves two purposes.
	 *
	 * First, the store to dma_uses_io_tlb must be ordered before the
	 * presumed store. This guarantees that the returned buffer address
	 * cannot be passed to another CPU before updating dma_uses_io_tlb.
	 *
	 * Second, the load from mem->pools must be ordered before the same
	 * presumed store. This guarantees that the returned buffer address
	 * cannot be observed by another CPU before an update of the RCU list
	 * that was made by swiotlb_dyn_alloc() on a third CPU (cf. multicopy
	 * atomicity).
	 *
	 * See also the comment in is_swiotlb_buffer().
	 */
	smp_mb();

	*retpool = pool;
	return index;
}

#else  /* !CONFIG_SWIOTLB_DYNAMIC */

static int swiotlb_find_slots(struct device *dev, phys_addr_t orig_addr,
		size_t alloc_size, struct io_tlb_pool **retpool)
{
	*retpool = &io_tlb_default_mem.defpool;
	return swiotlb_pool_find_slots(dev, *retpool,
				       orig_addr, alloc_size);
}

#endif /* CONFIG_SWIOTLB_DYNAMIC */

/**
 * mem_pool_used() - get number of used slots in a memory pool
 * @pool:	Software IO TLB memory pool.
 *
 * The result is not accurate, see mem_used().
 *
 * Return: Approximate number of used slots.
 */
static unsigned long mem_pool_used(struct io_tlb_pool *pool)
{
	int i;
	unsigned long used = 0;

	for (i = 0; i < pool->nareas; i++)
		used += pool->areas[i].used;
	return used;
}

static unsigned long mem_used(struct io_tlb_mem *mem)
{
#ifdef CONFIG_SWIOTLB_DYNAMIC
	struct io_tlb_pool *pool;
	unsigned long used = 0;

	rcu_read_lock();
	list_for_each_entry_rcu(pool, &mem->pools, node)
		used += mem_pool_used(pool);
	rcu_read_unlock();

	return used;
#else
	return mem_pool_used(&mem->defpool);
#endif
}

phys_addr_t swiotlb_tbl_map_single(struct device *dev, phys_addr_t orig_addr,
		size_t mapping_size, size_t alloc_size,
		enum dma_data_direction dir, unsigned long attrs)
{
	struct io_tlb_mem *mem = &io_tlb_default_mem;
	unsigned int offset = swiotlb_align_offset(dev, orig_addr);
	struct io_tlb_pool *pool;
	unsigned int i;
	int index;
	phys_addr_t tlb_addr;

	if (no_iotlb_memory)
		panic("Can not allocate SWIOTLB buffer earlier and can't now provide you with the DMA bounce buffer");

	if (mem_encrypt_active())
		pr_warn_once("Memory encryption is active and system is using DMA bounce buffers\n");

	if (mapping_size > alloc_size) {
		dev_warn_once(dev, "Invalid sizes (mapping: %zd bytes, alloc: %zd bytes)",
			      mapping_size, alloc_size);
		return (phys_addr_t)DMA_MAPPING_ERROR;
	}

	index = swiotlb_find_slots(dev, orig_addr, alloc_size + offset, &pool);
	if (index == -1) {
		if (!(attrs & DMA_ATTR_NO_WARN))
			dev_warn_ratelimited(dev,
	"swiotlb buffer is full (sz: %zd bytes), total %lu (slots), used %lu (slots)\n",
				 alloc_size, mem->nslabs, mem_used(mem));
		return (phys_addr_t)DMA_MAPPING_ERROR;
	}

	/*
	 * Save away the mapping from the original address to the DMA address.
	 * This is needed when we sync the memory.  Then we sync the buffer if
	 * needed.
	 */
	for (i = 0; i < nr_slots(alloc_size + offset); i++)
		pool->orig_addr[index + i] = slot_addr(orig_addr, i);

	tlb_addr = slot_addr(pool->start, index) + offset;
	/*
	 * When dir == DMA_FROM_DEVICE we could omit the copy from the orig
	 * to the tlb buffer, if we knew for sure the device will
	 * overwirte the entire current content. But we don't. Thus
	 * unconditional bounce may prevent leaking swiotlb content (i.e.
	 * kernel memory) to user-space.
	 */
	swiotlb_bounce(orig_addr, tlb_addr, mapping_size, DMA_TO_DEVICE);
	return tlb_addr;
}

#ifdef CONFIG_SWIOTLB_DYNAMIC

/**
 * swiotlb_del_transient() - delete a transient memory pool
 * @dev:	Device which mapped the buffer.
 * @tlb_addr:	Physical address within a bounce buffer.
 *
 * Check whether the address belongs to a transient SWIOTLB memory pool.
 * If yes, then delete the pool.
 *
 * Return: %true if @tlb_addr belonged to a transient pool that was released.
 */
static bool swiotlb_del_transient(struct device *dev, phys_addr_t tlb_addr)
{
	struct io_tlb_pool *pool;

	pool = swiotlb_find_pool(dev, tlb_addr);
	if (!pool->transient)
		return false;

	swiotlb_del_pool(dev, pool);
	dec_transient_used(&io_tlb_default_mem, pool->nslabs);
	return true;
}

#else  /* !CONFIG_SWIOTLB_DYNAMIC */

static inline bool swiotlb_del_transient(struct device *dev,
					 phys_addr_t tlb_addr)
{
	return false;
}

#endif	/* CONFIG_SWIOTLB_DYNAMIC */

/*
 * tlb_addr is the physical address of the bounce buffer to unmap.
 */
void swiotlb_tbl_unmap_single(struct device *hwdev, phys_addr_t tlb_addr,
			      size_t mapping_size, size_t alloc_size,
			      enum dma_data_direction dir, unsigned long attrs)
{
	struct io_tlb_pool *mem = swiotlb_find_pool(hwdev, tlb_addr);
	unsigned long flags;
	unsigned int offset = swiotlb_align_offset(hwdev, tlb_addr);
	int i, count, nslots = nr_slots(alloc_size + offset);
	int index = (tlb_addr - offset - mem->start) >> IO_TLB_SHIFT;
	phys_addr_t orig_addr = mem->orig_addr[index];
	int aindex = index / mem->area_nslabs;
	struct io_tlb_area *area = &mem->areas[aindex];

	/*
	 * First, sync the memory before unmapping the entry
	 */
	if (orig_addr != INVALID_PHYS_ADDR &&
	    !(attrs & DMA_ATTR_SKIP_CPU_SYNC) &&
	    ((dir == DMA_FROM_DEVICE) || (dir == DMA_BIDIRECTIONAL)))
		swiotlb_bounce(orig_addr, tlb_addr, mapping_size, DMA_FROM_DEVICE);

	if (swiotlb_del_transient(hwdev, tlb_addr))
		return;
	/*
	 * Return the buffer to the free list by setting the corresponding
	 * entries to indicate the number of contiguous entries available.
	 * While returning the entries to the free list, we merge the entries
	 * with slots below and above the pool being returned.
	 */
	BUG_ON(aindex >= mem->nareas);

	spin_lock_irqsave(&area->lock, flags);
	if (index + nslots < ALIGN(index + 1, IO_TLB_SEGSIZE))
		count = mem->list[index + nslots];
	else
		count = 0;

	/*
	 * Step 1: return the slots to the free list, merging the slots with
	 * superceeding slots
	 */
	for (i = index + nslots - 1; i >= index; i--) {
		mem->list[i] = ++count;
		mem->orig_addr[i] = INVALID_PHYS_ADDR;
	}

	/*
	 * Step 2: merge the returned slots with the preceding slots, if
	 * available (non zero)
	 */
	for (i = index - 1;
	     io_tlb_offset(i) != IO_TLB_SEGSIZE - 1 && mem->list[i];
	     i--)
		mem->list[i] = ++count;
	area->used -= nslots;
	spin_unlock_irqrestore(&area->lock, flags);
}

void swiotlb_tbl_sync_single(struct device *hwdev, phys_addr_t tlb_addr,
			     size_t size, enum dma_data_direction dir,
			     enum dma_sync_target target)
{
	struct io_tlb_pool *mem = swiotlb_find_pool(hwdev, tlb_addr);
	int index = (tlb_addr - mem->start) >> IO_TLB_SHIFT;
	phys_addr_t orig_addr = mem->orig_addr[index];

	if (orig_addr == INVALID_PHYS_ADDR)
		return;

	orig_addr += (tlb_addr & (IO_TLB_SIZE - 1)) -
		swiotlb_align_offset(hwdev, orig_addr);

	switch (target) {
	case SYNC_FOR_CPU:
		if (likely(dir == DMA_FROM_DEVICE || dir == DMA_BIDIRECTIONAL))
			swiotlb_bounce(orig_addr, tlb_addr,
				       size, DMA_FROM_DEVICE);
		else
			BUG_ON(dir != DMA_TO_DEVICE);
		break;
	case SYNC_FOR_DEVICE:
		if (likely(dir == DMA_TO_DEVICE || dir == DMA_BIDIRECTIONAL))
			swiotlb_bounce(orig_addr, tlb_addr,
				       size, DMA_TO_DEVICE);
		else
			BUG_ON(dir != DMA_FROM_DEVICE);
		break;
	default:
		BUG();
	}
}

/*
 * Create a swiotlb mapping for the buffer at @paddr, and in case of DMAing
 * to the device copy the data into it as well.
 */
dma_addr_t swiotlb_map(struct device *dev, phys_addr_t paddr, size_t size,
		enum dma_data_direction dir, unsigned long attrs)
{
	phys_addr_t swiotlb_addr;
	dma_addr_t dma_addr;

	trace_swiotlb_bounced(dev, phys_to_dma(dev, paddr), size,
			      swiotlb_force);

	swiotlb_addr = swiotlb_tbl_map_single(dev, paddr, size, size, dir,
			attrs);
	if (swiotlb_addr == (phys_addr_t)DMA_MAPPING_ERROR)
		return DMA_MAPPING_ERROR;

	/* Ensure that the address returned is DMA'ble */
	dma_addr = phys_to_dma_unencrypted(dev, swiotlb_addr);
	if (unlikely(!dma_capable(dev, dma_addr, size, true))) {
		swiotlb_tbl_unmap_single(dev, swiotlb_addr, size, size, dir,
			attrs | DMA_ATTR_SKIP_CPU_SYNC);
		dev_WARN_ONCE(dev, 1,
			"swiotlb addr %pad+%zu overflow (mask %llx, bus limit %llx).\n",
			&dma_addr, size, *dev->dma_mask, dev->bus_dma_limit);
		return DMA_MAPPING_ERROR;
	}

	if (!dev_is_dma_coherent(dev) && !(attrs & DMA_ATTR_SKIP_CPU_SYNC))
		arch_sync_dma_for_device(swiotlb_addr, size, dir);
	return dma_addr;
}

size_t swiotlb_max_mapping_size(struct device *dev)
{
	int min_align_mask = dma_get_min_align_mask(dev);
	int min_align = 0;

	/*
	 * swiotlb_find_slots() skips slots according to
	 * min align mask. This affects max mapping size.
	 * Take it into acount here.
	 */
	if (min_align_mask)
		min_align = roundup(min_align_mask, IO_TLB_SIZE);

	return ((size_t)IO_TLB_SIZE) * IO_TLB_SEGSIZE - min_align;
}

bool is_swiotlb_active(void)
{
	/*
	 * When SWIOTLB is initialized, even if mem->start points to physical
	 * address zero, mem->end surely doesn't.
	 */
	return io_tlb_default_mem.defpool.end != 0;
}

#ifdef CONFIG_DEBUG_FS
#ifdef CONFIG_SWIOTLB_DYNAMIC
static unsigned long mem_transient_used(struct io_tlb_mem *mem)
{
	return atomic_long_read(&mem->transient_nslabs);
}

static int io_tlb_transient_used_get(void *data, u64 *val)
{
	*val = mem_transient_used(&io_tlb_default_mem);
	return 0;
}
DEFINE_DEBUGFS_ATTRIBUTE(fops_io_tlb_transient_used, io_tlb_transient_used_get,
			 NULL, "%llu\n");
#endif /* CONFIG_SWIOTLB_DYNAMIC */

static int io_tlb_used_get(void *data, u64 *val)
{
	*val = mem_used(&io_tlb_default_mem);
	return 0;
}
DEFINE_DEBUGFS_ATTRIBUTE(fops_io_tlb_used, io_tlb_used_get, NULL, "%llu\n");

static int __init swiotlb_create_debugfs(void)
{
	struct io_tlb_mem *mem = &io_tlb_default_mem;
	mem->debugfs = debugfs_create_dir("swiotlb", NULL);
	debugfs_create_ulong("io_tlb_nslabs", 0400, mem->debugfs, &mem->nslabs);
	debugfs_create_file("io_tlb_used", 0400, mem->debugfs, NULL,
			&fops_io_tlb_used);
#ifdef CONFIG_SWIOTLB_DYNAMIC
	atomic_long_set(&mem->transient_nslabs, 0);
	debugfs_create_file("io_tlb_transient_nslabs", 0400, mem->debugfs,
			    NULL, &fops_io_tlb_transient_used);
#endif
	return 0;
}

late_initcall(swiotlb_create_debugfs);

#endif
