/*
 * Copyright (c) 2023 Nordic Semiconductor ASA
 *
 * SPDX-License-Identifier: Apache-2.0
 */

#include <zephyr/kernel.h>
#include <zephyr/storage/disk_access.h>
#include <zephyr/sys/byteorder.h>

#include <zephyr/usb/usbd.h>
#include <zephyr/usb/usb_ch9.h>
#include <zephyr/usb/class/usbd_msc.h>
#include <zephyr/sys/iterable_sections.h>
#include <zephyr/drivers/usb/udc.h>

#include "usbd_msc_scsi.h"

#include <zephyr/logging/log.h>
LOG_MODULE_REGISTER(usbd_msc, CONFIG_USBD_MSC_LOG_LEVEL);

/* Subclass and Protocol codes */
#define SCSI_TRANSPARENT_COMMAND_SET	0x06
#define BULK_ONLY_TRANSPORT		0x50

/* Control requests */
#define GET_MAX_LUN			0xFE
#define BULK_ONLY_MASS_STORAGE_RESET	0xFF

/* Command wrapper */
#define CBW_SIGNATURE			0x43425355

#define CBW_FLAGS_DIRECTION_IN		0x80
#define CBW_FLAGS_RESERVED_MASK		0x3F

struct CBW {
	uint32_t dCBWSignature;
	uint32_t dCBWTag;
	uint32_t dCBWDataTransferLength;
	uint8_t bmCBWFlags;
	uint8_t bCBWLUN;
	uint8_t bCBWCBLength;
	uint8_t CBWCB[16];
} __packed;

/* Status wrapper */
#define CSW_SIGNATURE			0x53425355

#define CSW_STATUS_COMMAND_PASSED	0x00
#define CSW_STATUS_COMMAND_FAILED	0x01
#define CSW_STATUS_PHASE_ERROR		0x02

struct CSW {
	uint32_t dCSWSignature;
	uint32_t dCSWTag;
	uint32_t dCSWDataResidue;
	uint8_t bCSWStatus;
} __packed;

/* Single instance is likely enough because it can support multiple LUNs */
#define MSC_NUM_INSTANCES CONFIG_USBD_MSC_INSTANCES_COUNT

#define MSC_NUM_BUFFERS UTIL_INC(IS_ENABLED(CONFIG_USBD_MSC_DOUBLE_BUFFERING))

#if USBD_MAX_BULK_MPS > CONFIG_USBD_MSC_SCSI_BUFFER_SIZE
#error "SCSI buffer must be at least USB bulk endpoint wMaxPacketSize"
#endif

UDC_BUF_POOL_DEFINE(msc_ep_pool, MSC_NUM_INSTANCES * (1 + MSC_NUM_BUFFERS),
		    0, sizeof(struct udc_buf_info), NULL);

struct msc_event {
	struct usbd_class_data *c_data;
	/* NULL to request Bulk-Only Mass Storage Reset
	 * Otherwise must point to previously enqueued endpoint buffer
	 */
	struct net_buf *buf;
	int err;
};

/* Each instance has 2 endpoints and can receive bulk only reset command */
K_MSGQ_DEFINE(msc_msgq, sizeof(struct msc_event), MSC_NUM_INSTANCES * 3, 4);

/* Make supported vendor request visible for the device stack */
static const struct usbd_cctx_vendor_req msc_bot_vregs =
	USBD_VENDOR_REQ(GET_MAX_LUN, BULK_ONLY_MASS_STORAGE_RESET);

struct msc_bot_desc {
	struct usb_if_descriptor if0;
	struct usb_ep_descriptor if0_in_ep;
	struct usb_ep_descriptor if0_out_ep;
	struct usb_ep_descriptor if0_hs_in_ep;
	struct usb_ep_descriptor if0_hs_out_ep;
	struct usb_desc_header nil_desc;
};

enum {
	MSC_CLASS_ENABLED,
	MSC_BULK_IN_WEDGED,
	MSC_BULK_OUT_WEDGED,
};

enum msc_bot_state {
	MSC_BBB_EXPECT_CBW,
	MSC_BBB_PROCESS_CBW,
	MSC_BBB_PROCESS_READ,
	MSC_BBB_PROCESS_WRITE,
	MSC_BBB_SEND_CSW,
	MSC_BBB_WAIT_FOR_CSW_SENT,
	MSC_BBB_WAIT_FOR_RESET_RECOVERY,
};

struct msc_bot_ctx {
	struct usbd_class_data *class_node;
	struct msc_bot_desc *const desc;
	const struct usb_desc_header **const fs_desc;
	const struct usb_desc_header **const hs_desc;
	uint8_t *scsi_bufs[MSC_NUM_BUFFERS];
	atomic_t bits;
	enum msc_bot_state state;
	uint8_t scsi_bufs_used;
	uint8_t num_in_queued;
	uint8_t num_out_queued;
	uint8_t registered_luns;
	struct scsi_ctx luns[CONFIG_USBD_MSC_LUNS_PER_INSTANCE];
	struct CBW cbw;
	struct CSW csw;
	uint32_t transferred_data;
	size_t scsi_bytes;
};

static struct net_buf *msc_buf_alloc_data(const uint8_t ep, uint8_t *data, size_t len)
{
	struct net_buf *buf = NULL;
	struct udc_buf_info *bi;

	buf = net_buf_alloc_with_data(&msc_ep_pool, data, len, K_NO_WAIT);
	if (buf == NULL) {
		return NULL;
	}

	if (USB_EP_DIR_IS_OUT(ep)) {
		/* Buffer is empty, USB stack will write data from host */
		buf->len = 0;
	}

	bi = udc_get_buf_info(buf);
	bi->ep = ep;

	return buf;
}

static uint8_t *msc_alloc_scsi_buf(struct msc_bot_ctx *ctx)
{
	for (int i = 0; i < MSC_NUM_BUFFERS; i++) {
		if (!(ctx->scsi_bufs_used & BIT(i))) {
			ctx->scsi_bufs_used |= BIT(i);
			return ctx->scsi_bufs[i];
		}
	}

	/* Code must not attempt to queue more than MSC_NUM_BUFFERS at once */
	__ASSERT(false, "MSC ran out of SCSI buffers");
	return NULL;
}

void msc_free_scsi_buf(struct msc_bot_ctx *ctx, uint8_t *buf)
{
	for (int i = 0; i < MSC_NUM_BUFFERS; i++) {
		if (buf == ctx->scsi_bufs[i]) {
			ctx->scsi_bufs_used &= ~BIT(i);
			return;
		}
	}
}

static size_t clamp_transfer_length(struct usbd_context *uds_ctx,
				    struct scsi_ctx *lun,
				    size_t len)
{
	len = MIN(CONFIG_USBD_MSC_SCSI_BUFFER_SIZE, len);

	/* Limit transfer to bulk endpoint wMaxPacketSize multiple */
	if (USBD_SUPPORTS_HIGH_SPEED &&
	    usbd_bus_speed(uds_ctx) == USBD_SPEED_HS) {
		len = ROUND_DOWN(len, 512);
	} else {
		/* Full-Speed */
		len = ROUND_DOWN(len, 64);
	}

	/* Round down to sector size multiple */
	if (lun->sector_size) {
		len = ROUND_DOWN(len, lun->sector_size);
	}

	return len;
}

static size_t msc_next_in_transfer_length(struct usbd_class_data *const c_data)
{
	struct usbd_context *uds_ctx = usbd_class_get_ctx(c_data);
	struct msc_bot_ctx *ctx = usbd_class_get_private(c_data);
	struct scsi_ctx *lun = &ctx->luns[ctx->cbw.bCBWLUN];
	size_t len = scsi_cmd_remaining_data_len(lun);

	return clamp_transfer_length(uds_ctx, lun, len);
}

static size_t msc_next_out_transfer_length(struct usbd_class_data *const c_data)
{
	struct usbd_context *uds_ctx = usbd_class_get_ctx(c_data);
	struct msc_bot_ctx *ctx = usbd_class_get_private(c_data);
	struct scsi_ctx *lun = &ctx->luns[ctx->cbw.bCBWLUN];
	size_t remaining = scsi_cmd_remaining_data_len(lun);
	size_t len = clamp_transfer_length(uds_ctx, lun, remaining);

	/* This function can only estimate one more transfer after the current
	 * one. Queueing more buffers is not supported.
	 */
	__ASSERT_NO_MSG(ctx->num_out_queued < 2);

	if (ctx->num_out_queued == 0) {
		return len;
	}

	/* MSC BOT specification requires host to send all the data it intends
	 * to send. Therefore it should be safe to use "remaining - len" here.
	 */
	return clamp_transfer_length(uds_ctx, lun, remaining - len);
}

static uint8_t msc_get_bulk_in(struct usbd_class_data *const c_data)
{
	struct usbd_context *uds_ctx = usbd_class_get_ctx(c_data);
	struct msc_bot_ctx *ctx = usbd_class_get_private(c_data);
	struct msc_bot_desc *desc = ctx->desc;

	if (USBD_SUPPORTS_HIGH_SPEED &&
	    usbd_bus_speed(uds_ctx) == USBD_SPEED_HS) {
		return desc->if0_hs_in_ep.bEndpointAddress;
	}

	return desc->if0_in_ep.bEndpointAddress;
}

static uint8_t msc_get_bulk_out(struct usbd_class_data *const c_data)
{
	struct usbd_context *uds_ctx = usbd_class_get_ctx(c_data);
	struct msc_bot_ctx *ctx = usbd_class_get_private(c_data);
	struct msc_bot_desc *desc = ctx->desc;

	if (USBD_SUPPORTS_HIGH_SPEED &&
	    usbd_bus_speed(uds_ctx) == USBD_SPEED_HS) {
		return desc->if0_hs_out_ep.bEndpointAddress;
	}

	return desc->if0_out_ep.bEndpointAddress;
}

static void msc_stall_bulk_out_ep(struct usbd_class_data *const c_data)
{
	uint8_t ep;

	ep = msc_get_bulk_out(c_data);
	usbd_ep_set_halt(usbd_class_get_ctx(c_data), ep);
}

static void msc_stall_bulk_in_ep(struct usbd_class_data *const c_data)
{
	uint8_t ep;

	ep = msc_get_bulk_in(c_data);
	usbd_ep_set_halt(usbd_class_get_ctx(c_data), ep);
}

static void msc_stall_and_wait_for_recovery(struct msc_bot_ctx *ctx)
{
	atomic_set_bit(&ctx->bits, MSC_BULK_IN_WEDGED);
	atomic_set_bit(&ctx->bits, MSC_BULK_OUT_WEDGED);
	msc_stall_bulk_in_ep(ctx->class_node);
	msc_stall_bulk_out_ep(ctx->class_node);
	ctx->state = MSC_BBB_WAIT_FOR_RESET_RECOVERY;
}

static void msc_queue_write(struct msc_bot_ctx *ctx)
{
	struct net_buf *buf;
	uint8_t *scsi_buf;
	uint8_t ep;
	size_t len;
	int ret;

	ep = msc_get_bulk_out(ctx->class_node);

	/* Ensure there are as many OUT transfers queued as possible */
	while ((ctx->num_out_queued < MSC_NUM_BUFFERS) &&
	       (len = msc_next_out_transfer_length(ctx->class_node))) {
		scsi_buf = msc_alloc_scsi_buf(ctx);
		buf = msc_buf_alloc_data(ep, scsi_buf, len);

		/* The pool is large enough to support all allocations. Failing
		 * alloc indicates either a memory leak or logic error.
		 */
		__ASSERT_NO_MSG(buf);

		ret = usbd_ep_enqueue(ctx->class_node, buf);
		if (ret) {
			LOG_ERR("Failed to enqueue net_buf for 0x%02x", ep);
			net_buf_unref(buf);
			msc_free_scsi_buf(ctx, scsi_buf);
			/* 6.6.2 Internal Device Error */
			msc_stall_and_wait_for_recovery(ctx);
			return;
		}

		ctx->num_out_queued++;
	}
}

static void msc_queue_cbw(struct usbd_class_data *const c_data)
{
	struct msc_bot_ctx *ctx = usbd_class_get_private(c_data);
	struct net_buf *buf;
	uint8_t *scsi_buf;
	uint8_t ep;
	int ret;

	if (ctx->num_out_queued) {
		/* Already queued */
		return;
	}

	__ASSERT(ctx->scsi_bufs_used == 0,
		 "CBW can only be queued when SCSI buffers are free");

	LOG_DBG("Queuing OUT");
	ep = msc_get_bulk_out(c_data);
	scsi_buf = msc_alloc_scsi_buf(ctx);
	buf = msc_buf_alloc_data(ep, scsi_buf, USBD_MAX_BULK_MPS);

	/* The pool is large enough to support all allocations. Failing alloc
	 * indicates either a memory leak or logic error.
	 */
	__ASSERT_NO_MSG(buf);

	ret = usbd_ep_enqueue(c_data, buf);
	if (ret) {
		LOG_ERR("Failed to enqueue net_buf for 0x%02x", ep);
		net_buf_unref(buf);
		msc_free_scsi_buf(ctx, scsi_buf);
		/* 6.6.2 Internal Device Error */
		msc_stall_and_wait_for_recovery(ctx);
	} else {
		ctx->num_out_queued++;
	}
}

static void msc_reset_handler(struct usbd_class_data *c_data)
{
	struct msc_bot_ctx *ctx = usbd_class_get_private(c_data);
	int i;

	LOG_INF("Bulk-Only Mass Storage Reset");
	ctx->state = MSC_BBB_EXPECT_CBW;
	for (i = 0; i < ctx->registered_luns; i++) {
		scsi_reset(&ctx->luns[i]);
	}

	atomic_clear_bit(&ctx->bits, MSC_BULK_IN_WEDGED);
	atomic_clear_bit(&ctx->bits, MSC_BULK_OUT_WEDGED);
}

static bool is_cbw_meaningful(struct msc_bot_ctx *const ctx)
{
	if (ctx->cbw.bmCBWFlags & CBW_FLAGS_RESERVED_MASK) {
		/* Reserved bits are set = not meaningful */
		return false;
	}

	if (ctx->cbw.bCBWLUN >= ctx->registered_luns) {
		/* Either not registered LUN or invalid (> 0x0F) */
		return false;
	}

	if (ctx->cbw.bCBWCBLength < 1 || ctx->cbw.bCBWCBLength > 16) {
		/* Only legal values are 1 to 16, other are reserved */
		return false;
	}

	return true;
}

static void msc_queue_bulk_in_ep(struct msc_bot_ctx *ctx, uint8_t *data, int len)
{
	struct net_buf *buf;
	uint8_t ep;
	int ret;

	ep = msc_get_bulk_in(ctx->class_node);
	buf = msc_buf_alloc_data(ep, data, len);
	/* The pool is large enough to support all allocations. Failing alloc
	 * indicates either a memory leak or logic error.
	 */
	__ASSERT_NO_MSG(buf);

	/* Either the net buf is full or there is no more SCSI data */
	ctx->csw.dCSWDataResidue -= len;
	ret = usbd_ep_enqueue(ctx->class_node, buf);
	if (ret) {
		LOG_ERR("Failed to enqueue net_buf for 0x%02x", ep);
		msc_free_scsi_buf(ctx, data);
		net_buf_unref(buf);
		/* 6.6.2 Internal Device Error */
		msc_stall_and_wait_for_recovery(ctx);
	} else {
		ctx->num_in_queued++;
	}
}

static void msc_process_read(struct msc_bot_ctx *ctx)
{
	struct scsi_ctx *lun = &ctx->luns[ctx->cbw.bCBWLUN];
	int bytes_queued = 0;
	uint8_t *scsi_buf;
	size_t len;

	/* Data can be already in scsi_buf0 only on first call after CBW */
	if (ctx->scsi_bytes) {
		__ASSERT_NO_MSG(ctx->scsi_bufs_used == 0);
		ctx->scsi_bufs_used = BIT(0);
		msc_queue_bulk_in_ep(ctx, ctx->scsi_bufs[0], ctx->scsi_bytes);
		/* All data is submitted in one go. Any potential new data will
		 * have to be retrieved using scsi_read_data() later.
		 */
		ctx->scsi_bytes = 0;
	}

	/* Fill SCSI Data IN buffer if there is avaialble buffer and data */
	while ((ctx->num_in_queued < MSC_NUM_BUFFERS) &&
	       (ctx->state == MSC_BBB_PROCESS_READ) &&
	       (len = msc_next_in_transfer_length(ctx->class_node))) {
		scsi_buf = msc_alloc_scsi_buf(ctx);
		bytes_queued = scsi_read_data(lun, scsi_buf, len);
		msc_queue_bulk_in_ep(ctx, scsi_buf, bytes_queued);
	}
}

static void msc_process_cbw(struct msc_bot_ctx *ctx)
{
	struct scsi_ctx *lun = &ctx->luns[ctx->cbw.bCBWLUN];
	bool cmd_is_data_read, cmd_is_data_write;
	size_t data_len;
	int cb_len;

	/* All SCSI buffers must be available */
	__ASSERT_NO_MSG(ctx->scsi_bufs_used == 0);

	cb_len = scsi_usb_boot_cmd_len(ctx->cbw.CBWCB, ctx->cbw.bCBWCBLength);
	data_len = scsi_cmd(lun, ctx->cbw.CBWCB, cb_len, ctx->scsi_bufs[0]);
	ctx->scsi_bytes = data_len;
	cmd_is_data_read = scsi_cmd_is_data_read(lun);
	cmd_is_data_write = scsi_cmd_is_data_write(lun);
	data_len += scsi_cmd_remaining_data_len(lun);

	/* Write commands must not return any data to initiator (host) */
	__ASSERT_NO_MSG(cmd_is_data_read || ctx->scsi_bytes == 0);

	if (ctx->cbw.dCBWDataTransferLength == 0) {
		/* 6.7.1 Hn - Host expects no data transfers */
		if (data_len == 0) {
			/* Case (1) Hn = Dn */
			if (scsi_cmd_get_status(lun) == GOOD) {
				ctx->csw.bCSWStatus = CSW_STATUS_COMMAND_PASSED;
			} else {
				ctx->csw.bCSWStatus = CSW_STATUS_COMMAND_FAILED;
			}
		} else {
			/* Case (2) Hn < Di or (3) Hn < Do */
			ctx->csw.bCSWStatus = CSW_STATUS_PHASE_ERROR;
		}

		ctx->state = MSC_BBB_SEND_CSW;
	} else if (data_len == 0) {
		/* SCSI target does not want any data, but host either wants to
		 * send or receive data. Note that SCSI target data direction is
		 * irrelevant, because opcode can simply be not supported. Even
		 * if host maliciously issues 0 sectors read and wants to write
		 * data as indicated in CB it is still Case (9) Ho > Dn.
		 */
		if (ctx->cbw.bmCBWFlags & CBW_FLAGS_DIRECTION_IN) {
			/* Case (4) Hi > Dn */
			msc_stall_bulk_in_ep(ctx->class_node);
		} else {
			/* Case (9) Ho > Dn */
			msc_stall_bulk_out_ep(ctx->class_node);
		}

		if (scsi_cmd_get_status(lun) == GOOD) {
			ctx->csw.bCSWStatus = CSW_STATUS_COMMAND_PASSED;
		} else {
			ctx->csw.bCSWStatus = CSW_STATUS_COMMAND_FAILED;
		}

		ctx->state = MSC_BBB_SEND_CSW;
	} else if (ctx->cbw.bmCBWFlags & CBW_FLAGS_DIRECTION_IN) {
		/* 6.7.2 Hi - Host expects to receive data from device */
		if ((data_len > ctx->cbw.dCBWDataTransferLength) ||
		    !cmd_is_data_read) {
			/* Case (7) Hi < Di or (8) Hi <> Do */
			msc_stall_bulk_in_ep(ctx->class_node);
			ctx->csw.bCSWStatus = CSW_STATUS_PHASE_ERROR;
			ctx->state = MSC_BBB_SEND_CSW;
		} else {
			/* Case (5) Hi > Di or (6) Hi = Di */
			ctx->state = MSC_BBB_PROCESS_READ;
		}
	} else {
		/* 6.7.3 Ho - Host expects to send data to the device */
		if ((data_len > ctx->cbw.dCBWDataTransferLength) ||
		    !cmd_is_data_write) {
			/* Case (10) Ho <> Di or (13) Ho < Do */
			msc_stall_bulk_out_ep(ctx->class_node);
			ctx->csw.bCSWStatus = CSW_STATUS_PHASE_ERROR;
			ctx->state = MSC_BBB_SEND_CSW;
		} else {
			/* Case (11) Ho > Do or (12) Ho = Do */
			ctx->state = MSC_BBB_PROCESS_WRITE;
		}
	}
}

static void msc_process_write(struct msc_bot_ctx *ctx,
			      uint8_t *buf, size_t len)
{
	size_t tmp;
	struct scsi_ctx *lun = &ctx->luns[ctx->cbw.bCBWLUN];

	ctx->transferred_data += len;

	if ((len > 0) && (scsi_cmd_remaining_data_len(lun) > 0)) {
		/* Pass data to SCSI layer. */
		tmp = scsi_write_data(lun, buf, len);
		__ASSERT(tmp <= len, "Processed more data than requested");
		if (tmp == 0) {
			LOG_WRN("SCSI handler didn't process %zu bytes", len);
		} else {
			LOG_DBG("SCSI processed %zu out of %zu bytes", tmp, len);
		}

		ctx->csw.dCSWDataResidue -= tmp;
	}

	if ((ctx->transferred_data >= ctx->cbw.dCBWDataTransferLength) ||
	    (scsi_cmd_remaining_data_len(lun) == 0)) {
		if (ctx->transferred_data < ctx->cbw.dCBWDataTransferLength) {
			/* Case (11) Ho > Do and the transfer is still in
			 * progress. We do not intend to process more data so
			 * stall the Bulk-Out pipe.
			 */
			msc_stall_bulk_out_ep(ctx->class_node);
		}

		if (scsi_cmd_get_status(lun) == GOOD) {
			ctx->csw.bCSWStatus = CSW_STATUS_COMMAND_PASSED;
		} else {
			ctx->csw.bCSWStatus = CSW_STATUS_COMMAND_FAILED;
		}

		ctx->state = MSC_BBB_SEND_CSW;
	}
}

static void msc_handle_bulk_out(struct msc_bot_ctx *ctx,
				uint8_t *buf, size_t len)
{
	if (ctx->state == MSC_BBB_EXPECT_CBW) {
		if (len == sizeof(struct CBW) && sys_get_le32(buf) == CBW_SIGNATURE) {
			memcpy(&ctx->cbw, buf, sizeof(struct CBW));
			/* Convert dCBWDataTransferLength endianness, other
			 * fields are either single byte or not relevant.
			 */
			ctx->cbw.dCBWDataTransferLength =
				sys_le32_to_cpu(ctx->cbw.dCBWDataTransferLength);
			/* Fill CSW with relevant information */
			ctx->csw.dCSWSignature = sys_cpu_to_le32(CSW_SIGNATURE);
			ctx->csw.dCSWTag = ctx->cbw.dCBWTag;
			ctx->csw.dCSWDataResidue = ctx->cbw.dCBWDataTransferLength;
			ctx->transferred_data = 0;
			if (is_cbw_meaningful(ctx)) {
				ctx->csw.bCSWStatus = CSW_STATUS_COMMAND_FAILED;
				ctx->state = MSC_BBB_PROCESS_CBW;
			} else {
				LOG_INF("Not meaningful CBW");
				/* Mass Storage Class - Bulk Only Transport
				 * does not specify response to not meaningful
				 * CBW. Stall Bulk IN and Report Phase Error.
				 */
				msc_stall_bulk_in_ep(ctx->class_node);
				ctx->csw.bCSWStatus = CSW_STATUS_PHASE_ERROR;
				ctx->state = MSC_BBB_SEND_CSW;
			}
		} else {
			/* 6.6.1 CBW Not Valid */
			LOG_INF("Invalid CBW");
			msc_stall_and_wait_for_recovery(ctx);
		}
	} else if (ctx->state == MSC_BBB_PROCESS_WRITE) {
		msc_process_write(ctx, buf, len);
	}
}

static void msc_handle_bulk_in(struct msc_bot_ctx *ctx,
			       uint8_t *buf, size_t len)
{
	if (ctx->state == MSC_BBB_WAIT_FOR_CSW_SENT) {
		LOG_DBG("CSW sent");
		ctx->state = MSC_BBB_EXPECT_CBW;
	} else if (ctx->state == MSC_BBB_PROCESS_READ) {
		struct scsi_ctx *lun = &ctx->luns[ctx->cbw.bCBWLUN];

		ctx->transferred_data += len;
		if (msc_next_in_transfer_length(ctx->class_node) == 0) {
			if (ctx->csw.dCSWDataResidue > 0) {
				/* Case (5) Hi > Di
				 * While we may have sent short packet, device
				 * shall STALL the Bulk-In pipe (if it does not
				 * send padding data).
				 */
				msc_stall_bulk_in_ep(ctx->class_node);
			}
			if (scsi_cmd_get_status(lun) == GOOD) {
				ctx->csw.bCSWStatus = CSW_STATUS_COMMAND_PASSED;
			} else {
				ctx->csw.bCSWStatus = CSW_STATUS_COMMAND_FAILED;
			}
			ctx->state = MSC_BBB_SEND_CSW;
		}
	}
}

static void msc_send_csw(struct msc_bot_ctx *ctx)
{
	struct net_buf *buf;
	uint8_t *scsi_buf;
	uint8_t ep;
	int ret;

	if (ctx->num_in_queued) {
		__ASSERT_NO_MSG(false);
		LOG_ERR("IN already queued");
		return;
	}

	__ASSERT(ctx->scsi_bufs_used == 0,
		 "CSW can be sent only if SCSI buffers are free");

	/* Convert dCSWDataResidue to LE, other fields are already set */
	ctx->csw.dCSWDataResidue = sys_cpu_to_le32(ctx->csw.dCSWDataResidue);
	ep = msc_get_bulk_in(ctx->class_node);
	scsi_buf = msc_alloc_scsi_buf(ctx);
	memcpy(scsi_buf, &ctx->csw, sizeof(ctx->csw));
	buf = msc_buf_alloc_data(ep, scsi_buf, sizeof(ctx->csw));
	/* The pool is large enough to support all allocations. Failing alloc
	 * indicates either a memory leak or logic error.
	 */
	__ASSERT_NO_MSG(buf);

	ret = usbd_ep_enqueue(ctx->class_node, buf);
	if (ret) {
		LOG_ERR("Failed to enqueue net_buf for 0x%02x", ep);
		net_buf_unref(buf);
		msc_free_scsi_buf(ctx, scsi_buf);
		/* 6.6.2 Internal Device Error */
		msc_stall_and_wait_for_recovery(ctx);
	} else {
		ctx->num_in_queued++;
		ctx->state = MSC_BBB_WAIT_FOR_CSW_SENT;
	}
}

static void usbd_msc_handle_request(struct usbd_class_data *c_data,
				    struct net_buf *buf, int err)
{
	struct usbd_context *uds_ctx = usbd_class_get_ctx(c_data);
	struct msc_bot_ctx *ctx = usbd_class_get_private(c_data);
	struct udc_buf_info *bi;

	bi = udc_get_buf_info(buf);
	if (err) {
		if (err == -ECONNABORTED) {
			LOG_WRN("request ep 0x%02x, len %u cancelled",
				bi->ep, buf->len);
		} else {
			LOG_ERR("request ep 0x%02x, len %u failed",
				bi->ep, buf->len);
		}

		goto ep_request_error;
	}

	if (bi->ep == msc_get_bulk_out(c_data)) {
		msc_handle_bulk_out(ctx, buf->data, buf->len);
	} else if (bi->ep == msc_get_bulk_in(c_data)) {
		msc_handle_bulk_in(ctx, buf->data, buf->len);
	}

ep_request_error:
	if (bi->ep == msc_get_bulk_out(c_data)) {
		ctx->num_out_queued--;
		if (buf->frags) {
			ctx->num_out_queued--;
		}
	} else if (bi->ep == msc_get_bulk_in(c_data)) {
		ctx->num_in_queued--;
		if (buf->frags) {
			ctx->num_in_queued--;
		}
	}
	msc_free_scsi_buf(ctx, buf->__buf);
	if (buf->frags) {
		msc_free_scsi_buf(ctx, buf->frags->__buf);
	}
	usbd_ep_buf_free(uds_ctx, buf);
}

static void usbd_msc_thread(void *arg1, void *arg2, void *arg3)
{
	ARG_UNUSED(arg1);
	ARG_UNUSED(arg2);
	ARG_UNUSED(arg3);
	struct msc_event evt;
	struct msc_bot_ctx *ctx;

	while (1) {
		k_msgq_get(&msc_msgq, &evt, K_FOREVER);

		ctx = usbd_class_get_private(evt.c_data);
		if (evt.buf == NULL) {
			msc_reset_handler(evt.c_data);
		} else {
			usbd_msc_handle_request(evt.c_data, evt.buf, evt.err);
		}

		if (!atomic_test_bit(&ctx->bits, MSC_CLASS_ENABLED)) {
			continue;
		}

		switch (ctx->state) {
		case MSC_BBB_EXPECT_CBW:
			msc_queue_cbw(evt.c_data);
			break;
		case MSC_BBB_PROCESS_WRITE:
			/* Ensure we can accept next OUT packet */
			msc_queue_write(ctx);
			break;
		case MSC_BBB_PROCESS_READ:
			msc_process_read(ctx);
			break;
		default:
			break;
		}

		/* Skip (potentially) response generating code if there is
		 * IN data already available for the host to pick up.
		 */
		if (ctx->num_in_queued) {
			continue;
		}

		if (ctx->state == MSC_BBB_PROCESS_CBW) {
			msc_process_cbw(ctx);
		}

		if (ctx->state == MSC_BBB_PROCESS_READ) {
			msc_process_read(ctx);
		} else if (ctx->state == MSC_BBB_PROCESS_WRITE) {
			msc_queue_write(ctx);
		} else if (ctx->state == MSC_BBB_SEND_CSW) {
			msc_send_csw(ctx);
		}
	}
}

static void msc_bot_schedule_reset(struct usbd_class_data *c_data)
{
	struct msc_event request = {
		.c_data = c_data,
		.buf = NULL, /* Bulk-Only Mass Storage Reset */
	};

	k_msgq_put(&msc_msgq, &request, K_FOREVER);
}

/* Feature endpoint halt state handler */
static void msc_bot_feature_halt(struct usbd_class_data *const c_data,
				 const uint8_t ep, const bool halted)
{
	struct msc_bot_ctx *ctx = usbd_class_get_private(c_data);

	if (ep == msc_get_bulk_in(c_data) && !halted &&
	    atomic_test_bit(&ctx->bits, MSC_BULK_IN_WEDGED)) {
		/* Endpoint shall remain halted until Reset Recovery */
		usbd_ep_set_halt(usbd_class_get_ctx(c_data), ep);
	} else if (ep == msc_get_bulk_out(c_data) && !halted &&
	    atomic_test_bit(&ctx->bits, MSC_BULK_OUT_WEDGED)) {
		/* Endpoint shall remain halted until Reset Recovery */
		usbd_ep_set_halt(usbd_class_get_ctx(c_data), ep);
	}
}

/* USB control request handler to device */
static int msc_bot_control_to_dev(struct usbd_class_data *const c_data,
				  const struct usb_setup_packet *const setup,
				  const struct net_buf *const buf)
{
	if (setup->bRequest == BULK_ONLY_MASS_STORAGE_RESET &&
	    setup->wValue == 0 && setup->wLength == 0) {
		msc_bot_schedule_reset(c_data);
	} else {
		errno = -ENOTSUP;
	}

	return 0;
}

/* USB control request handler to host */
static int msc_bot_control_to_host(struct usbd_class_data *const c_data,
				   const struct usb_setup_packet *const setup,
				   struct net_buf *const buf)
{
	struct msc_bot_ctx *ctx = usbd_class_get_private(c_data);
	uint8_t max_lun;

	if (setup->bRequest == GET_MAX_LUN &&
	    setup->wValue == 0 && setup->wLength >= 1) {
		/* If there is no LUN registered we cannot really do anything,
		 * because STALLing this request means that device does not
		 * support multiple LUNs and host should only address LUN 0.
		 */
		max_lun = ctx->registered_luns ? ctx->registered_luns - 1 : 0;
		net_buf_add_mem(buf, &max_lun, 1);
	} else {
		errno = -ENOTSUP;
	}

	return 0;
}

/* Endpoint request completion event handler */
static int msc_bot_request_handler(struct usbd_class_data *const c_data,
				   struct net_buf *buf, int err)
{
	struct msc_event request = {
		.c_data = c_data,
		.buf = buf,
		.err = err,
	};

	/* Defer request handling to mass storage thread */
	k_msgq_put(&msc_msgq, &request, K_FOREVER);

	return 0;
}

/* Class associated configuration is selected */
static void msc_bot_enable(struct usbd_class_data *const c_data)
{
	struct msc_bot_ctx *ctx = usbd_class_get_private(c_data);

	LOG_INF("Enable");
	atomic_set_bit(&ctx->bits, MSC_CLASS_ENABLED);
	msc_bot_schedule_reset(c_data);
}

/* Class associated configuration is disabled */
static void msc_bot_disable(struct usbd_class_data *const c_data)
{
	struct msc_bot_ctx *ctx = usbd_class_get_private(c_data);

	LOG_INF("Disable");
	atomic_clear_bit(&ctx->bits, MSC_CLASS_ENABLED);
}

static void *msc_bot_get_desc(struct usbd_class_data *const c_data,
			      const enum usbd_speed speed)
{
	struct msc_bot_ctx *ctx = usbd_class_get_private(c_data);

	if (USBD_SUPPORTS_HIGH_SPEED && speed == USBD_SPEED_HS) {
		return ctx->hs_desc;
	}

	return ctx->fs_desc;
}

/* Initialization of the class implementation */
static int msc_bot_init(struct usbd_class_data *const c_data)
{
	struct msc_bot_ctx *ctx = usbd_class_get_private(c_data);

	ctx->class_node = c_data;
	ctx->state = MSC_BBB_EXPECT_CBW;
	ctx->registered_luns = 0;

	STRUCT_SECTION_FOREACH(usbd_msc_lun, lun) {
		if (ctx->registered_luns >= CONFIG_USBD_MSC_LUNS_PER_INSTANCE) {
			LOG_ERR("Cannot register LUN %s", lun->disk);
			return -ENOMEM;
		}

		scsi_init(&ctx->luns[ctx->registered_luns++], lun->disk,
			  lun->vendor, lun->product, lun->revision);
	}

	return 0;
}

#define DEFINE_MSC_BOT_DESCRIPTOR(n, _)						\
static struct msc_bot_desc msc_bot_desc_##n = {					\
	.if0 = {								\
		.bLength = sizeof(struct usb_if_descriptor),			\
		.bDescriptorType = USB_DESC_INTERFACE,				\
		.bInterfaceNumber = 0,						\
		.bAlternateSetting = 0,						\
		.bNumEndpoints = 2,						\
		.bInterfaceClass = USB_BCC_MASS_STORAGE,			\
		.bInterfaceSubClass = SCSI_TRANSPARENT_COMMAND_SET,		\
		.bInterfaceProtocol = BULK_ONLY_TRANSPORT,			\
		.iInterface = 0,						\
	},									\
	.if0_in_ep = {								\
		.bLength = sizeof(struct usb_ep_descriptor),			\
		.bDescriptorType = USB_DESC_ENDPOINT,				\
		.bEndpointAddress = 0x81,					\
		.bmAttributes = USB_EP_TYPE_BULK,				\
		.wMaxPacketSize = sys_cpu_to_le16(64U),				\
		.bInterval = 0,							\
	},									\
	.if0_out_ep = {								\
		.bLength = sizeof(struct usb_ep_descriptor),			\
		.bDescriptorType = USB_DESC_ENDPOINT,				\
		.bEndpointAddress = 0x01,					\
		.bmAttributes = USB_EP_TYPE_BULK,				\
		.wMaxPacketSize = sys_cpu_to_le16(64U),				\
		.bInterval = 0,							\
	},									\
	.if0_hs_in_ep = {							\
		.bLength = sizeof(struct usb_ep_descriptor),			\
		.bDescriptorType = USB_DESC_ENDPOINT,				\
		.bEndpointAddress = 0x81,					\
		.bmAttributes = USB_EP_TYPE_BULK,				\
		.wMaxPacketSize = sys_cpu_to_le16(512U),			\
		.bInterval = 0,							\
	},									\
	.if0_hs_out_ep = {							\
		.bLength = sizeof(struct usb_ep_descriptor),			\
		.bDescriptorType = USB_DESC_ENDPOINT,				\
		.bEndpointAddress = 0x01,					\
		.bmAttributes = USB_EP_TYPE_BULK,				\
		.wMaxPacketSize = sys_cpu_to_le16(512U),			\
		.bInterval = 0,							\
	},									\
										\
	.nil_desc = {								\
		.bLength = 0,							\
		.bDescriptorType = 0,						\
	},									\
};										\
										\
const static struct usb_desc_header *msc_bot_fs_desc_##n[] = {			\
	(struct usb_desc_header *) &msc_bot_desc_##n.if0,			\
	(struct usb_desc_header *) &msc_bot_desc_##n.if0_in_ep,			\
	(struct usb_desc_header *) &msc_bot_desc_##n.if0_out_ep,		\
	(struct usb_desc_header *) &msc_bot_desc_##n.nil_desc,			\
};										\
										\
const static struct usb_desc_header *msc_bot_hs_desc_##n[] = {			\
	(struct usb_desc_header *) &msc_bot_desc_##n.if0,			\
	(struct usb_desc_header *) &msc_bot_desc_##n.if0_hs_in_ep,		\
	(struct usb_desc_header *) &msc_bot_desc_##n.if0_hs_out_ep,		\
	(struct usb_desc_header *) &msc_bot_desc_##n.nil_desc,			\
};


struct usbd_class_api msc_bot_api = {
	.feature_halt = msc_bot_feature_halt,
	.control_to_dev = msc_bot_control_to_dev,
	.control_to_host = msc_bot_control_to_host,
	.request = msc_bot_request_handler,
	.enable = msc_bot_enable,
	.disable = msc_bot_disable,
	.get_desc = msc_bot_get_desc,
	.init = msc_bot_init,
};

#define BUF_NAME(x, i) scsi_buf##i##_##x

#define DEFINE_SCSI_BUF(x, i)							\
	UDC_STATIC_BUF_DEFINE(BUF_NAME(x, i), CONFIG_USBD_MSC_SCSI_BUFFER_SIZE);

#define DEFINE_SCSI_BUFS(x)							\
	DEFINE_SCSI_BUF(x, 0)							\
	IF_ENABLED(CONFIG_USBD_MSC_DOUBLE_BUFFERING, (DEFINE_SCSI_BUF(x, 1)))

#define NAME_SCSI_BUFS(x)							\
	BUF_NAME(x, 0)								\
	IF_ENABLED(CONFIG_USBD_MSC_DOUBLE_BUFFERING, (, BUF_NAME(x, 1)))

#define DEFINE_MSC_BOT_CLASS_DATA(x, _)						\
	DEFINE_SCSI_BUFS(x)							\
										\
	static struct msc_bot_ctx msc_bot_ctx_##x = {				\
		.desc = &msc_bot_desc_##x,					\
		.fs_desc = msc_bot_fs_desc_##x,					\
		.hs_desc = msc_bot_hs_desc_##x,					\
		.scsi_bufs = { NAME_SCSI_BUFS(x) },				\
	};									\
										\
	USBD_DEFINE_CLASS(msc_##x, &msc_bot_api, &msc_bot_ctx_##x,		\
			  &msc_bot_vregs);

LISTIFY(MSC_NUM_INSTANCES, DEFINE_MSC_BOT_DESCRIPTOR, ())
LISTIFY(MSC_NUM_INSTANCES, DEFINE_MSC_BOT_CLASS_DATA, ())

K_THREAD_DEFINE(usbd_msc, CONFIG_USBD_MSC_STACK_SIZE,
		usbd_msc_thread, NULL, NULL, NULL,
		CONFIG_SYSTEM_WORKQUEUE_PRIORITY, 0, 0);
