/* One structure per video node */
struct rkisp_vdev_node {
	struct vb2_queue buf_queue;
	struct video_device vdev;
	struct media_pad pad;
};

struct rkisp_stream {
	unsigned int id;
	unsigned interlaced:1;
	struct rkisp_device *ispdev;
	struct rkisp_vdev_node vnode;
	struct capture_fmt out_isp_fmt;
	struct v4l2_pix_format_mplane out_fmt;
	struct v4l2_rect dcrop;
	struct streams_ops *ops;
	struct stream_config *config;
	spinlock_t vbq_lock;
	struct list_head buf_queue;
	struct rkisp_buffer *curr_buf;
	struct rkisp_buffer *next_buf;
	struct mutex apilock;
	bool streaming;
	bool stopping;
	bool frame_end;
	bool linked;
	bool start_stream;
	wait_queue_head_t done;
	unsigned int burst;
	atomic_t sequence;
	struct frame_debug_info dbg;
	u32 memory;
	union {
		struct rkisp_stream_sp sp;
		struct rkisp_stream_mp mp;
		struct rkisp_stream_dmarx dmarx;
		struct rkisp_stream_dmatx dmatx;
	} u;
};

struct rkisp_capture_device {
	struct rkisp_device *ispdev;
	struct rkisp_stream stream[RKISP_MAX_STREAM];
	struct rkisp_buffer *rdbk_buf[RDBK_MAX];
	atomic_t refcnt;
	u32 wait_line;
	bool is_done_early;
};


struct rkisp_device {
	struct list_head list;
	void __iomem *base_addr;
	struct device *dev;
	char name[128];
	void *sw_base_addr;
	struct rkisp_hw_dev *hw_dev;
	struct v4l2_device v4l2_dev;
	struct v4l2_ctrl_handler ctrl_handler;
	struct media_device media_dev;
	struct v4l2_async_notifier notifier;
	struct v4l2_subdev *subdevs[RKISP_SD_MAX];
	struct rkisp_sensor_info *active_sensor;
	struct rkisp_sensor_info sensors[RKISP_MAX_SENSOR];
	int num_sensors;
	struct rkisp_isp_subdev isp_sdev;
	struct rkisp_capture_device cap_dev;
	struct rkisp_isp_stats_vdev stats_vdev;
	struct rkisp_isp_params_vdev params_vdev;
	struct rkisp_dmarx_device dmarx_dev;
	struct rkisp_csi_device csi_dev;
	struct rkisp_bridge_device br_dev;
	struct rkisp_luma_vdev luma_vdev;
	struct proc_dir_entry *procfs;
	struct rkisp_pipeline pipe;
	enum rkisp_isp_ver isp_ver;
	struct rkisp_emd_data emd_data_fifo[RKISP_EMDDATA_FIFO_MAX];
	unsigned int emd_data_idx;
	unsigned int emd_vc;
	unsigned int emd_dt;
	int vs_irq;
	struct gpio_desc *vs_irq_gpio;
	struct rkisp_hdr hdr;
	unsigned int isp_state;
	unsigned int isp_err_cnt;
	unsigned int isp_isr_cnt;
	unsigned int isp_inp;
	struct mutex apilock; /* mutex to serialize the calls of stream */
	struct mutex iqlock; /* mutex to serialize the calls of iq */
	wait_queue_head_t sync_onoff;
	dma_addr_t resmem_addr;
	phys_addr_t resmem_pa;
	size_t resmem_size;
	int dev_id;
	unsigned int skip_frame;
	unsigned int irq_ends;
	unsigned int irq_ends_mask;
	bool send_fbcgain;
	struct rkisp_ispp_buf *cur_fbcgain;
	struct rkisp_buffer *cur_spbuf;
	bool is_thunderboot;

	struct kfifo rdbk_kfifo;
	spinlock_t rdbk_lock;
	int rdbk_cnt;
	int rdbk_cnt_x1;
	int rdbk_cnt_x2;
	int rdbk_cnt_x3;
	u32 rd_mode;
	u8 filt_state[RDBK_F_MAX];
};


static const struct of_device_id rkisp_plat_of_match[] = {
	{
		.compatible = "rockchip,rkisp-vir",
	}
	{},
};

struct platform_driver rkisp_plat_drv = {
	.driver = {
		   .name = DRIVER_NAME,
		   .of_match_table = of_match_ptr(rkisp_plat_of_match),
		   .pm = &rkisp_plat_pm_ops,
	},
	.probe = rkisp_plat_probe,
	.remove = rkisp_plat_remove,
};

static const struct media_device_ops rkisp_media_ops = {
	.link_notify = v4l2_pipeline_link_notify,
};

int rkisp_attach_hw(struct rkisp_device *isp)
{
	struct device_node *np;
	struct platform_device *pdev;
	struct rkisp_hw_dev *hw;

	/* rockchip,hw 既rockchip,hw = <&rkisp>; */
	np = of_parse_phandle(isp->dev->of_node, "rockchip,hw", 0);

	pdev = of_find_device_by_node(np);
	of_node_put(np);

	hw = platform_get_drvdata(pdev);

	if (hw->dev_num)
		hw->is_single = false;

	isp->dev_id = hw->dev_num;
	hw->isp[hw->dev_num] = isp;
	hw->dev_num++;
	isp->hw_dev = hw;
	isp->isp_ver = hw->isp_ver;
	isp->base_addr = hw->base_addr;

	return 0;
}


static int rkisp_plat_probe(struct platform_device *pdev)
{
	struct device *dev = &pdev->dev;
	struct v4l2_device *v4l2_dev;
	struct rkisp_device *isp_dev;
	int i, ret;

	sprintf(rkisp_version, "v%02x.%02x.%02x",
		RKISP_DRIVER_VERSION >> 16,
		(RKISP_DRIVER_VERSION & 0xff00) >> 8,
		RKISP_DRIVER_VERSION & 0x00ff);

	dev_info(dev, "rkisp driver version: %s\n", rkisp_version);

	isp_dev = devm_kzalloc(dev, sizeof(*isp_dev), GFP_KERNEL);

	isp_dev->sw_base_addr = devm_kzalloc(dev, RKISP_ISP_SW_MAX_SIZE, GFP_KERNEL);

	dev_set_drvdata(dev, isp_dev);
	isp_dev->dev = dev;

	ret = rkisp_vs_irq_parse(dev);

	ret = rkisp_attach_hw(isp_dev);

	sprintf(isp_dev->media_dev.model, "%s%d", DRIVER_NAME, isp_dev->dev_id);

	if (isp_dev->hw_dev->is_thunderboot) {
		ret = rkisp_get_reserved_mem(isp_dev);
		if (ret)
			return ret;
	}

	mutex_init(&isp_dev->apilock);
	mutex_init(&isp_dev->iqlock);
	atomic_set(&isp_dev->pipe.power_cnt, 0);
	atomic_set(&isp_dev->pipe.stream_cnt, 0);
	init_waitqueue_head(&isp_dev->sync_onoff);
	isp_dev->pipe.open = rkisp_pipeline_open;
	isp_dev->pipe.close = rkisp_pipeline_close;
	isp_dev->pipe.set_stream = rkisp_pipeline_set_stream;

	if (isp_dev->isp_ver == ISP_V20 || isp_dev->isp_ver == ISP_V21) {
		atomic_set(&isp_dev->hdr.refcnt, 0);
		for (i = 0; i < HDR_DMA_MAX; i++) {
			INIT_LIST_HEAD(&isp_dev->hdr.q_tx[i]);
			INIT_LIST_HEAD(&isp_dev->hdr.q_rx[i]);
		}
	}

	strscpy(isp_dev->name, dev_name(dev), sizeof(isp_dev->name));
	strscpy(isp_dev->media_dev.driver_name, isp_dev->name,
		sizeof(isp_dev->media_dev.driver_name));

	isp_dev->media_dev.dev = dev;
	isp_dev->media_dev.ops = &rkisp_media_ops;

	v4l2_dev = &isp_dev->v4l2_dev;
	v4l2_dev->mdev = &isp_dev->media_dev;
	strlcpy(v4l2_dev->name, isp_dev->name, sizeof(v4l2_dev->name));
	v4l2_ctrl_handler_init(&isp_dev->ctrl_handler, 5);
	v4l2_dev->ctrl_handler = &isp_dev->ctrl_handler;

	ret = v4l2_device_register(isp_dev->dev, &isp_dev->v4l2_dev);


	media_device_init(&isp_dev->media_dev);
	ret = media_device_register(&isp_dev->media_dev);

	/* create & register platefom subdev (from of_node) */
	ret = rkisp_register_platform_subdevs(isp_dev);

	rkisp_wait_line = 0;
	of_property_read_u32(dev->of_node, "wait-line", &rkisp_wait_line);

	rkisp_proc_init(isp_dev);

	mutex_lock(&rkisp_dev_mutex);
	list_add_tail(&isp_dev->list, &rkisp_device_list);
	mutex_unlock(&rkisp_dev_mutex);

	pm_runtime_enable(dev);
	if (isp_dev->hw_dev->is_thunderboot && isp_dev->is_thunderboot)
		pm_runtime_get_noresume(isp_dev->hw_dev->dev);
	return 0;
}

static int rkisp_register_platform_subdevs(struct rkisp_device *dev)
{
	int ret;

	ret = rkisp_register_isp_subdev(dev, &dev->v4l2_dev);

	ret = rkisp_register_csi_subdev(dev, &dev->v4l2_dev);

	/* rkisp_register_bridge_subdev为空 */
	ret = rkisp_register_bridge_subdev(dev, &dev->v4l2_dev);

	ret = rkisp_register_stream_vdevs(dev);

	ret = rkisp_register_dmarx_vdev(dev);

	ret = rkisp_register_stats_vdev(&dev->stats_vdev, &dev->v4l2_dev, dev);

	ret = rkisp_register_params_vdev(&dev->params_vdev, &dev->v4l2_dev, dev);

	ret = rkisp_register_luma_vdev(&dev->luma_vdev, &dev->v4l2_dev, dev);

	ret = isp_subdev_notifier(dev);
	if (ret < 0) {
		v4l2_err(&dev->v4l2_dev,
			 "Failed to register subdev notifier(%d)\n", ret);
		/* maybe use dmarx to input image */
		ret = v4l2_device_register_subdev_nodes(&dev->v4l2_dev);
		if (ret == 0)
			return 0;
		goto err_unreg_luma_vdev;
	}

	return 0;
}



static const struct v4l2_async_notifier_operations subdev_notifier_ops = {
	.bound = subdev_notifier_bound,
	.complete = subdev_notifier_complete,
};


static int isp_subdev_notifier(struct rkisp_device *isp_dev)
{
	struct v4l2_async_notifier *ntf = &isp_dev->notifier;
	struct device *dev = isp_dev->dev;
	int ret;

	ret = v4l2_async_notifier_parse_fwnode_endpoints(
		dev, ntf, sizeof(struct rkisp_async_subdev),
		rkisp_fwnode_parse);

	if (!ntf->num_subdevs)
		return -ENODEV;	/* no endpoint */

	ntf->ops = &subdev_notifier_ops;

	return v4l2_async_notifier_register(&isp_dev->v4l2_dev, ntf);
}


static int subdev_notifier_bound(struct v4l2_async_notifier *notifier,
				 struct v4l2_subdev *subdev,
				 struct v4l2_async_subdev *asd)
{
	struct rkisp_device *isp_dev = container_of(notifier, struct rkisp_device, notifier);
	struct rkisp_async_subdev *s_asd = container_of(asd, struct rkisp_async_subdev, asd);

	if (isp_dev->num_sensors == ARRAY_SIZE(isp_dev->sensors))
		return -EBUSY;

	/* 这里的sd是csi2-dphy */
	isp_dev->sensors[isp_dev->num_sensors].mbus = s_asd->mbus;
	isp_dev->sensors[isp_dev->num_sensors].sd = subdev;
	++isp_dev->num_sensors;

	v4l2_dbg(1, rkisp_debug, subdev, "Async registered subdev\n");

	return 0;
}

/* Get sensor by enabled media link */
static struct v4l2_subdev *get_remote_sensor(struct v4l2_subdev *sd)
{
	struct media_pad *local, *remote;
	struct media_entity *sensor_me;
	struct v4l2_subdev *remote_sd = NULL;

	local = &sd->entity.pads[0];
	if (!local)
		goto end;
	remote = rkisp_media_entity_remote_pad(local);
	if (!remote)
		goto end;

	//skip csi subdev
	/* 如果是csi2    subdev */
	if (!strcmp(remote->entity->name, CSI_DEV_NAME)) {
		/* csi的sink端是sensor */
		local = &remote->entity->pads[CSI_SINK];
		if (!local)
			goto end;
		remote = media_entity_remote_pad(local);
		if (!remote)
			goto end;
	}

	sensor_me = remote->entity;
	remote_sd = media_entity_to_v4l2_subdev(sensor_me);
end:
	return remote_sd;
}


static int subdev_notifier_complete(struct v4l2_async_notifier *notifier)
{
	struct rkisp_device *dev;
	int ret;

	dev = container_of(notifier, struct rkisp_device, notifier);

	mutex_lock(&dev->media_dev.graph_mutex);
	ret = rkisp_create_links(dev);

	ret = v4l2_device_register_subdev_nodes(&dev->v4l2_dev);


	ret = rkisp_update_sensor_info(dev);

	ret = _set_pipeline_default_fmt(dev);


	v4l2_info(&dev->v4l2_dev, "Async subdev notifier completed\n");

unlock:
	mutex_unlock(&dev->media_dev.graph_mutex);
	return ret;
}

static int rkisp_create_links(struct rkisp_device *dev)
{
	unsigned int s, pad;
	int ret = 0;

	/* sensor links(or mipi-phy) */
	for (s = 0; s < dev->num_sensors; ++s) {

		struct rkisp_sensor_info *sensor = &dev->sensors[s];
		u32 type = sensor->sd->entity.function;
		bool en = s ? 0 : MEDIA_LNK_FL_ENABLED;

		/* 寻找source pad */
		for (pad = 0; pad < sensor->sd->entity.num_pads; pad++)
			if (sensor->sd->entity.pads[pad].flags & MEDIA_PAD_FL_SOURCE)
				break;

		if (pad == sensor->sd->entity.num_pads) {
			dev_err(dev->dev, "failed to find src pad for %s\n",
				sensor->sd->name);
			return -ENXIO;
		}

		/* sensor link -> isp */
		if (type == MEDIA_ENT_F_CAM_SENSOR) {
			dev->isp_inp = INP_DVP;
			ret = media_create_pad_link(&sensor->sd->entity, pad,
				&dev->isp_sdev.sd.entity, RKISP_ISP_PAD_SINK, en);
	
		} else if (type == MEDIA_ENT_F_PROC_VIDEO_COMPOSER) {
			dev->isp_inp = INP_CIF;
			ret = media_create_pad_link(&sensor->sd->entity, pad,
				&dev->isp_sdev.sd.entity, RKISP_ISP_PAD_SINK, en);

		} else {
			v4l2_subdev_call(sensor->sd, video, g_mbus_config, &sensor->mbus);
			if (sensor->mbus.type == V4L2_MBUS_CCP2) {
				/* mipi-phy lvds link -> isp */
				dev->isp_inp = INP_LVDS;
				ret = media_create_pad_link(&sensor->sd->entity, pad,
					&dev->isp_sdev.sd.entity, RKISP_ISP_PAD_SINK, en);
			} else {
				/* mipi-phy link -> csi -> isp */
				dev->isp_inp = INP_CSI;
				/* 创建csi2-dphy->csi */
				ret = media_create_pad_link(&sensor->sd->entity, pad, &dev->csi_dev.sd.entity, CSI_SINK, en);

				/* 创建csi->isp */
				ret |= media_create_pad_link(&dev->csi_dev.sd.entity, CSI_SRC_CH0, &dev->isp_sdev.sd.entity, 
					RKISP_ISP_PAD_SINK, en);

				dev->csi_dev.sink[0].linked = en;
				dev->csi_dev.sink[0].index = BIT(0);
			}
		}
		if (ret)
			dev_err(dev->dev, "failed to create link for %s\n", sensor->sd->name);
	}
	return ret;
}


static int rkisp_fwnode_parse(struct device *dev,
			       struct v4l2_fwnode_endpoint *vep,
			       struct v4l2_async_subdev *asd)
{
	struct rkisp_async_subdev *rk_asd =
			container_of(asd, struct rkisp_async_subdev, asd);
	struct v4l2_fwnode_bus_parallel *bus = &vep->bus.parallel;

	/*
	 * MIPI sensor is linked with a mipi dphy and its media bus config can
	 * not be get in here
	 */
	if (vep->bus_type != V4L2_MBUS_BT656 &&
	    vep->bus_type != V4L2_MBUS_PARALLEL)
		return 0;

	rk_asd->mbus.flags = bus->flags;
	rk_asd->mbus.type = vep->bus_type;

	return 0;
}


static int __v4l2_async_notifier_clr_unready_dev(
	struct v4l2_async_notifier *notifier)
{
	struct v4l2_subdev *sd, *tmp;
	int clr_num = 0;

	list_for_each_entry_safe(sd, tmp, &notifier->done, async_list) {
		/* 找到应匹配的sd的notifer */
		struct v4l2_async_notifier *subdev_notifier =
			v4l2_async_find_subdev_notifier(sd);

		if (subdev_notifier) /* 递归调用 */
			clr_num += __v4l2_async_notifier_clr_unready_dev(
					subdev_notifier);
	}

	/* 如果notifier->waiting中还有sd，从notifier->waiting中删除 */
	/* v4l2_async_notifier_clr_unready_dev 在linux upstream上并没有，应该是rk自己添加的 
	 * 而且这里的list_for_each_entry_safe(sd, tmp, &notifier->waiting, async_list)用的是
	 * 错误的，在代码中从来没有看到会把sd->async_list 添加到notifier->waiting链表中
	 * 但是可以工作，没有具体分析，但是感觉用着是不对的，修改成下面的才是ok的
	 * struct v4l2_async_subdev *asd, *asd_tmp;
	 * list_for_each_entry_safe(asd, asd_tmp, &notifier->waiting, list) {
	 *		list_del_init(&asd->list);
	 *		clr_num++;
	 * }
	 */
	list_for_each_entry_safe(sd, tmp, &notifier->waiting, async_list) {
		list_del_init(&sd->async_list);
		sd->asd = NULL;
		sd->dev = NULL;
		clr_num++;
	}

	return clr_num;
}


int v4l2_async_notifier_clr_unready_dev(struct v4l2_async_notifier *notifier)
{
	int ret = 0;
	int clr_num = 0;

	mutex_lock(&list_lock);

	while (notifier->parent)
		notifier = notifier->parent;

	if (!notifier->v4l2_dev)
		goto out;

	clr_num = __v4l2_async_notifier_clr_unready_dev(notifier);
	dev_info(notifier->v4l2_dev->dev,
		 "clear unready subdev num: %d\n", clr_num);

	pr_info("wyj %s\n", __func__);
	dump_stack();

	
	if (clr_num > 0) /* 有清除的subdev， notifer为root notifer */
		ret = v4l2_async_notifier_try_complete(notifier);

out:
	mutex_unlock(&list_lock);

	return ret;
}



static int __maybe_unused __rkisp_clr_unready_dev(void)
{
	struct rkisp_device *isp_dev;

	mutex_lock(&rkisp_dev_mutex);
	list_for_each_entry(isp_dev, &rkisp_device_list, list)
		/*  root notifer */
		v4l2_async_notifier_clr_unready_dev(&isp_dev->notifier);
	mutex_unlock(&rkisp_dev_mutex);

	return 0;
}


#ifndef MODULE
static int __init rkisp_clr_unready_dev(void)
{
	__rkisp_clr_unready_dev();

	return 0;
}
/* late_initcall_sync 在module_init 执行之后才会执行 */
late_initcall_sync(rkisp_clr_unready_dev);

