#include <rdma/ib_verbs.h>
#include "hns_roce_peer_mem.h"

static DEFINE_MUTEX(peer_memory_mutex);
static LIST_HEAD(peer_memory_list);

static struct ib_umem *peer_umem_get(struct ib_peer_memory_client *ib_peer_mem,
				     struct ib_umem *umem, unsigned long addr)
{
	int ret;
	struct hns_umem *humem = to_hns_umem(umem);
	const struct peer_memory_client *peer_mem = ib_peer_mem->peer_mem;

	humem->ib_peer_mem = ib_peer_mem;
	/*
	 * We always request write permissions to the pages, to force breaking of any CoW
	 * during the registration of the MR. For read-only MRs we use the "force" flag to
	 * indicate that CoW breaking is required but the registration should not fail if
	 * referencing read-only areas.
	 */
	ret = peer_mem->get_pages(addr, humem->umem.length,
				  1, !humem->umem.writable,
				  &humem->umem.sg_head,
				  humem->peer_mem_client_context,
				  0);
	if (ret)
		goto end;

	humem->page_shift = ilog2(peer_mem->get_page_size(humem->peer_mem_client_context));
	if (BIT(humem->page_shift) <= 0)
		goto put_pages;

	ret = peer_mem->dma_map(&humem->umem.sg_head,
				humem->peer_mem_client_context,
				humem->umem.ibdev->dma_device,
				&humem->umem.nmap);
	if (ret)
		goto put_pages;

	atomic64_add(humem->umem.nmap, &ib_peer_mem->stats.num_reg_pages);
	atomic64_add(humem->umem.nmap * BIT(humem->page_shift), &ib_peer_mem->stats.num_reg_bytes);
	atomic64_inc(&ib_peer_mem->stats.num_alloc_mrs);
	return umem;

put_pages:
	peer_mem->put_pages(&humem->umem.sg_head, humem->peer_mem_client_context);
end:
	ib_put_peer_client(ib_peer_mem, humem->peer_mem_client_context);
	kfree(humem);
	return ERR_PTR(ret);
}

void hns_peer_umem_release(struct ib_umem* umem)
{
	struct hns_umem *humem = to_hns_umem(umem);
	struct ib_peer_memory_client* ib_peer_mem = humem->ib_peer_mem;
	const struct peer_memory_client* peer_mem = ib_peer_mem->peer_mem;

	peer_mem->dma_unmap(&umem->sg_head, humem->peer_mem_client_context, umem->ibdev->dma_device);
	peer_mem->put_pages(&umem->sg_head, humem->peer_mem_client_context);
	atomic64_add(umem->nmap, &ib_peer_mem->stats.num_dereg_pages);
	atomic64_add(umem->nmap * BIT(humem->page_shift), &ib_peer_mem->stats.num_dereg_bytes);
	atomic64_inc(&ib_peer_mem->stats.num_dealloc_mrs);
	ib_put_peer_client(ib_peer_mem, humem->peer_mem_client_context);
	kfree(humem);
}
#ifdef ROCE_AI_CUSTOM_SUPPORT
EXPORT_SYMBOL(hns_peer_umem_release);
#endif

struct ib_umem* hns_umem_get(struct ib_device *device, unsigned long addr,
			        size_t size, int access, void* peer_mem_private_data, char* peer_mem_name)
{
	struct ib_umem *umem;
	struct hns_umem *humem;

	/*
	 * If the combination of the addr and size requested for this memory
	 * region causes an integer overflow, return error.
	 */
	if (((addr + size) < addr) ||
	    PAGE_ALIGN(addr + size) < (addr + size))
		return ERR_PTR(-EINVAL);

	if (!can_do_mlock())
		return ERR_PTR(-EPERM);

	humem = kzalloc(sizeof(*humem), GFP_KERNEL);
	if (!humem) {
		ibdev_err(device, "failed to alloc humem\n");
		return ERR_PTR(-ENOMEM);
	}

	humem->umem.ibdev    = device;
	humem->umem.length     = size;
	humem->umem.address    = addr;
	humem->page_shift = PAGE_SHIFT;
	humem->umem.writable   = ib_access_writable(access);

	umem = (struct ib_umem *)humem;

	if (peer_mem_private_data) {
		struct ib_peer_memory_client *peer_mem_client = NULL;
		peer_mem_client = ib_get_peer_client(addr, size, peer_mem_private_data, peer_mem_name,
					&humem->peer_mem_client_context);
		if (peer_mem_client) {
			return peer_umem_get(peer_mem_client, umem, addr);
		}
	}

	kfree(humem);

	return ib_umem_get(device, addr, size, access);
}
EXPORT_SYMBOL(hns_umem_get);

static void complete_peer(struct kref *kref)
{
	struct ib_peer_memory_client *ib_peer_client =
		container_of(kref, struct ib_peer_memory_client, ref);

	kfree(ib_peer_client);
	ib_peer_client = NULL;
}

void *ib_register_peer_memory_client(const struct peer_memory_client *peer_client,
				     invalidate_peer_memory *invalidate_callback)
{
	struct ib_peer_memory_client *ib_peer_client;

	ib_peer_client = kzalloc(sizeof(*ib_peer_client), GFP_KERNEL);
	if (!ib_peer_client)
		return NULL;

	INIT_LIST_HEAD(&ib_peer_client->core_ticket_list);
	init_completion(&ib_peer_client->unload_comp);
	kref_init(&ib_peer_client->ref);
	ib_peer_client->peer_mem = peer_client;

	/* Once peer supplied a non NULL callback it's an indication that
	 * invalidation support is required for any memory owning.
	 */

	*invalidate_callback = NULL;
	ib_peer_client->invalidation_required = 0;
	ib_peer_client->last_ticket = 1;

	mutex_lock(&peer_memory_mutex);
	list_add_tail(&ib_peer_client->core_peer_list, &peer_memory_list);
	mutex_unlock(&peer_memory_mutex);

	return ib_peer_client;
}
EXPORT_SYMBOL(ib_register_peer_memory_client);

void ib_unregister_peer_memory_client(void *reg_handle)
{
	struct ib_peer_memory_client *ib_peer_client = reg_handle;

	mutex_lock(&peer_memory_mutex);
	list_del(&ib_peer_client->core_peer_list);
	mutex_unlock(&peer_memory_mutex);

	kref_put(&ib_peer_client->ref, complete_peer);
}
EXPORT_SYMBOL(ib_unregister_peer_memory_client);

struct ib_peer_memory_client *ib_get_peer_client(unsigned long addr,
						 size_t size, void *peer_mem_private_data,
						 char *peer_mem_name, void **peer_client_context)
{
	struct ib_peer_memory_client *ib_peer_client;
	int ret = 0;

	mutex_lock(&peer_memory_mutex);
	list_for_each_entry(ib_peer_client, &peer_memory_list, core_peer_list) {
		ret = ib_peer_client->peer_mem->acquire(addr, size,
						   peer_mem_private_data,
						   peer_mem_name,
						   peer_client_context);
		if (ret > 0) {
			goto found;
		}
	}

    ib_peer_client = NULL;

found:
	if (ib_peer_client)
		kref_get(&ib_peer_client->ref);

	mutex_unlock(&peer_memory_mutex);
	return ib_peer_client;
}
EXPORT_SYMBOL(ib_get_peer_client);

void ib_put_peer_client(struct ib_peer_memory_client *ib_peer_client,
			void *peer_client_context)
{
	if (ib_peer_client->peer_mem->release)
		ib_peer_client->peer_mem->release(peer_client_context);

	kref_put(&ib_peer_client->ref, complete_peer);
}
EXPORT_SYMBOL(ib_put_peer_client);
