/* SPDX-License-Identifier: GPL-2.0 OR BSD-2-Clause */
/* Kvaser PCICan-4HS CAN FD specific stuff
 * 
 * (c) 2006-2021 Heinz-Jürgen Oertel hj.oertel@t-online.de
 * 
 * Code is based heavily on the Kvaser SocketCAN driver pciefd.c  
 */


/* some commands to the CAN controller needs some time to complete
 * linux wait loops are used in this case:
 *
 * unsigned long __sched wait_for_completion_timeout(struct completion * x,
 	unsigned long timeout);
 *     x holds the state of this particular completion 
 *     timeout     timeout value in jiffies 
 * the timeout is always the same : 1000 ms
 * #define KVASER_PCIEFD_WAIT_TIMEOUT msecs_to_jiffies(1000)
 * This waits for either a completion of a specific task to be signaled
 * or for a specified timeout to expire. The timeout is in jiffies.
 * It is not interruptible.
 * Two different wait states are used 
 * struct completion	start_comp,
 * 			flush_comp;
 *
 * Test to see if a completion has any waiters:
 * bool completion_done (struct completion * x);
 * 0 if there are waiters (wait_for_completion in progress)
 * 1 if there are no waiters
 *
 *
 * */

/* use it for pr_info() and consorts */
#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt

#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/device.h>
#include <linux/pci.h>
#include <linux/can/dev.h>		/* SocketCAN */
#include <linux/timer.h>
// #include <linux/netdevice.h>
#include <linux/crc32.h>
#include <linux/iopoll.h>
#include "linux/delay.h"

#include <linux/pci.h>
#include "defs.h"

# ifndef CONFIG_PCI
#   error "trying to compile a PCI driver for a kernel without CONFIG_PCI"
# endif


/* used for storing the global pci register address */
/* one element more than needed for marking the end */
struct	pci_dev *can_pcidev[MAX_CHANNELS + 1] = { NULL };


/* PCI Bridge AMCC 5920 registers */
#define S5920_OMB    0x0C
#define S5920_IMB    0x1C
#define S5920_MBEF   0x34
#define S5920_INTCSR 0x38
#define S5920_RCR    0x3C
#define S5920_PTCR   0x60

#define INTCSR_ADDON_INTENABLE_M        0x2000
#define INTCSR_INTERRUPT_ASSERTED_M     0x800000



#define PCIEFD_VENDOR (0x1a07)
#define PCIEFD_4HS_ID     (0x000d)
#define PCIEFD_2HS_ID     (0x000e)
#define PCIEFD_HS_ID      (0x000f)
#define MINIPCIEFD_HS_ID  (0x0010)
#define MINIPCIEFD_2HS_ID (0x0011)


#define KVASER_PCIEFD_VENDOR 0x1a07
#define KVASER_PCIEFD_4HS_ID 0x0d
#define KVASER_PCIEFD_2HS_ID 0x0e
#define KVASER_PCIEFD_HS_ID 0x0f
#define KVASER_PCIEFD_MINIPCIE_HS_ID 0x10
#define KVASER_PCIEFD_MINIPCIE_2HS_ID 0x11

/* PCIe IRQ registers */
#define KVASER_PCIEFD_IRQ_REG 0x40
#define KVASER_PCIEFD_IEN_REG 0x50
/* DMA map */
#define KVASER_PCIEFD_DMA_MAP_BASE 0x1000
/* Loopback control register */
#define KVASER_PCIEFD_LOOP_REG 0x1f000
/* System identification and information registers */
#define KVASER_PCIEFD_SYSID_BASE 0x1f020
#define KVASER_PCIEFD_SYSID_VERSION_REG (KVASER_PCIEFD_SYSID_BASE + 0x8)
#define KVASER_PCIEFD_SYSID_CANFREQ_REG (KVASER_PCIEFD_SYSID_BASE + 0xc)
#define KVASER_PCIEFD_SYSID_BUSFREQ_REG (KVASER_PCIEFD_SYSID_BASE + 0x10)
#define KVASER_PCIEFD_SYSID_BUILD_REG (KVASER_PCIEFD_SYSID_BASE + 0x14)
/* Shared receive buffer registers */
#define KVASER_PCIEFD_SRB_BASE 0x1f200
/* The KCAN manual locates these registers at 0x80, so here they are
 * multiplied with 4: 0x80 * 4 = 0x200, 0x81 *4 = 0x204 ... */
#define KVASER_PCIEFD_SRB_CMD_REG (KVASER_PCIEFD_SRB_BASE + 0x200) /* Command */ 
#define KVASER_PCIEFD_SRB_IEN_REG (KVASER_PCIEFD_SRB_BASE + 0x204) /* Interrupt Enable */
#define KVASER_PCIEFD_SRB_IRQ_REG (KVASER_PCIEFD_SRB_BASE + 0x20c) /* Interrupt Request / Interrupt Clear */
#define KVASER_PCIEFD_SRB_STAT_REG (KVASER_PCIEFD_SRB_BASE + 0x210)/* Status */
#define KVASER_PCIEFD_SRB_CTRL_REG (KVASER_PCIEFD_SRB_BASE + 0x218)/* Control */ 
/* EPCS flash controller registers */
#define KVASER_PCIEFD_SPI_BASE 0x1fc00
#define KVASER_PCIEFD_SPI_RX_REG KVASER_PCIEFD_SPI_BASE
#define KVASER_PCIEFD_SPI_TX_REG (KVASER_PCIEFD_SPI_BASE + 0x4)
#define KVASER_PCIEFD_SPI_STATUS_REG (KVASER_PCIEFD_SPI_BASE + 0x8)
#define KVASER_PCIEFD_SPI_CTRL_REG (KVASER_PCIEFD_SPI_BASE + 0xc)
#define KVASER_PCIEFD_SPI_SSEL_REG (KVASER_PCIEFD_SPI_BASE + 0x14)

#define KVASER_PCIEFD_IRQ_ALL_MSK 0x1f
#define KVASER_PCIEFD_IRQ_SRB BIT(4) /* Shared Receive Buffer */

#define KVASER_PCIEFD_SYSID_NRCHAN_SHIFT 24
#define KVASER_PCIEFD_SYSID_MAJOR_VER_SHIFT 16
#define KVASER_PCIEFD_SYSID_BUILD_VER_SHIFT 1

/* Reset DMA buffer 0, 1 and FIFO offset */
#define KVASER_PCIEFD_SRB_CMD_RDB0 BIT(4)
#define KVASER_PCIEFD_SRB_CMD_RDB1 BIT(5)
#define KVASER_PCIEFD_SRB_CMD_FOR BIT(0)

/* DMA packet done, buffer 0 and 1 */
#define KVASER_PCIEFD_SRB_IRQ_DPD0 BIT(8)
#define KVASER_PCIEFD_SRB_IRQ_DPD1 BIT(9)
/* DMA overflow, buffer 0 and 1 */
#define KVASER_PCIEFD_SRB_IRQ_DOF0 BIT(10)
#define KVASER_PCIEFD_SRB_IRQ_DOF1 BIT(11)
/* DMA underflow, buffer 0 and 1 */
#define KVASER_PCIEFD_SRB_IRQ_DUF0 BIT(12)
#define KVASER_PCIEFD_SRB_IRQ_DUF1 BIT(13)

/* DMA idle */
#define KVASER_PCIEFD_SRB_STAT_DI BIT(15)
/* DMA support */
#define KVASER_PCIEFD_SRB_STAT_DMA BIT(24)

/* DMA Enable */
#define KVASER_PCIEFD_SRB_CTRL_DMA_ENABLE BIT(0)

/* EPCS flash controller definitions */
#define KVASER_PCIEFD_CFG_IMG_SZ (64 * 1024)
#define KVASER_PCIEFD_CFG_IMG_OFFSET (31 * 65536L)
#define KVASER_PCIEFD_CFG_MAX_PARAMS 256
#define KVASER_PCIEFD_CFG_MAGIC 0xcafef00d
#define KVASER_PCIEFD_CFG_PARAM_MAX_SZ 24
#define KVASER_PCIEFD_CFG_SYS_VER 1
#define KVASER_PCIEFD_CFG_PARAM_NR_CHAN 130
#define KVASER_PCIEFD_SPI_TMT BIT(5)
#define KVASER_PCIEFD_SPI_TRDY BIT(6)
#define KVASER_PCIEFD_SPI_RRDY BIT(7)
#define KVASER_PCIEFD_FLASH_ID_EPCS16 0x14
/* Commands for controlling the onboard flash */
#define KVASER_PCIEFD_FLASH_RES_CMD 0xab
#define KVASER_PCIEFD_FLASH_READ_CMD 0x3
#define KVASER_PCIEFD_FLASH_STATUS_CMD 0x5

/* Kvaser KCAN definitions */
#define KVASER_PCIEFD_KCAN_CTRL_EFLUSH (4 << 29)
#define KVASER_PCIEFD_KCAN_CTRL_EFRAME (5 << 29)

#define KVASER_PCIEFD_KCAN_CMD_SEQ_SHIFT 16
/* Request status packet */
#define KVASER_PCIEFD_KCAN_CMD_SRQ BIT(0)
/* Abort, flush and reset */
#define KVASER_PCIEFD_KCAN_CMD_AT BIT(1)

#if 0
/* include/uapi/linux/can/netlink.h
 * CAN bus error counters
 */
struct can_berr_counter {
	__u16 txerr;
	__u16 rxerr;
};
#endif

struct kvaser_pciefd_cfg_param {
	__le32 magic;
	__le32 nr;
	__le32 len;
	u8 data[KVASER_PCIEFD_CFG_PARAM_MAX_SZ];   /* 24 */
};

struct kvaser_pciefd_cfg_img {
	__le32 version;
	__le32 magic;
	__le32 crc;					/* 256 */
	struct kvaser_pciefd_cfg_param params[KVASER_PCIEFD_CFG_MAX_PARAMS];
};


#if 0  /* not used */
static struct pci_device_id id_table[] = {
  {
    .vendor    = PCIEFD_VENDOR,
    .device    = PCIEFD_4HS_ID,
    .subvendor = PCI_ANY_ID,
    .subdevice = PCI_ANY_ID
  },
 {0,},
};
#endif


/* SocketCAN defined functions */
/* Onboard flash memory functions */
static int kvaser_pciefd_spi_wait_loop(struct kvaser_pciefd *pcie, int msk)
{
	u32 res;
	int ret;

	ret = readl_poll_timeout(pcie->reg_base + KVASER_PCIEFD_SPI_STATUS_REG,
				 res, res & msk, 0, 10);

	return ret;
}

static int kvaser_pciefd_spi_cmd(struct kvaser_pciefd *pcie, const u8 *tx,
				 u32 tx_len, u8 *rx, u32 rx_len)
{
	int c;
// printk(" spi_cmd 01\n");

	iowrite32(BIT(0), pcie->reg_base + KVASER_PCIEFD_SPI_SSEL_REG);
	iowrite32(BIT(10), pcie->reg_base + KVASER_PCIEFD_SPI_CTRL_REG);
	ioread32(pcie->reg_base + KVASER_PCIEFD_SPI_RX_REG);

// printk(" spi_cmd 02\n");
	c = tx_len;
	while (c--) {
		if (kvaser_pciefd_spi_wait_loop(pcie, KVASER_PCIEFD_SPI_TRDY))
			return -EIO;

		iowrite32(*tx++, pcie->reg_base + KVASER_PCIEFD_SPI_TX_REG);

		if (kvaser_pciefd_spi_wait_loop(pcie, KVASER_PCIEFD_SPI_RRDY))
			return -EIO;

		ioread32(pcie->reg_base + KVASER_PCIEFD_SPI_RX_REG);
	}

// printk(" spi_cmd 03\n");
	c = rx_len;
	while (c-- > 0) {
		if (kvaser_pciefd_spi_wait_loop(pcie, KVASER_PCIEFD_SPI_TRDY))
			return -EIO;

		iowrite32(0, pcie->reg_base + KVASER_PCIEFD_SPI_TX_REG);

		if (kvaser_pciefd_spi_wait_loop(pcie, KVASER_PCIEFD_SPI_RRDY))
			return -EIO;

		*rx++ = ioread32(pcie->reg_base + KVASER_PCIEFD_SPI_RX_REG);
	}
// printk(" spi_cmd 04\n");

	if (kvaser_pciefd_spi_wait_loop(pcie, KVASER_PCIEFD_SPI_TMT))
		return -EIO;
// printk(" spi_cmd 05\n");

	iowrite32(0, pcie->reg_base + KVASER_PCIEFD_SPI_CTRL_REG);

	if (c != -1) {
		dev_err(&pcie->pci->dev, "Flash SPI transfer failed\n");
		return -EIO;
	}

	return 0;
}

/* read the FLASH image */
static int kvaser_pciefd_cfg_read_and_verify(struct kvaser_pciefd *pcie,
					     struct kvaser_pciefd_cfg_img *img)
{
	int offset = KVASER_PCIEFD_CFG_IMG_OFFSET;
	int res, crc;
	u8 *crc_buff;

	u8 cmd[] = {
		KVASER_PCIEFD_FLASH_READ_CMD,
		(u8)((offset >> 16) & 0xff),
		(u8)((offset >> 8) & 0xff),
		(u8)(offset & 0xff)
	};
// printk(" cfg_read_and_verify 01\n");

	/* returns FLASH content in img
	 * KVASER_PCIEFD_CFG_IMG_SZ (64 * 1024) byte
	 * */
	res = kvaser_pciefd_spi_cmd(pcie, cmd, ARRAY_SIZE(cmd), (u8 *)img,
				    KVASER_PCIEFD_CFG_IMG_SZ);
	if (res)
		return res;
// printk(" cfg_read_and_verify 02, got 64K FLASH Image\n");

	crc_buff = (u8 *)img->params;

	if (le32_to_cpu(img->version) != KVASER_PCIEFD_CFG_SYS_VER) {
		dev_err(&pcie->pci->dev,
			"Config flash corrupted, version number is wrong\n");
		return -ENODEV;
	}

	if (le32_to_cpu(img->magic) != KVASER_PCIEFD_CFG_MAGIC) {
		dev_err(&pcie->pci->dev,
			"Config flash corrupted, magic number is wrong\n");
		return -ENODEV;
	}

	crc = ~crc32_be(0xffffffff, crc_buff, sizeof(img->params));
	if (le32_to_cpu(img->crc) != crc) {
		dev_err(&pcie->pci->dev,
			"Stored CRC does not match flash image contents\n");
		return -EIO;
	}

	return 0;
}

static void kvaser_pciefd_cfg_read_params(struct kvaser_pciefd *pcie,
					  struct kvaser_pciefd_cfg_img *img)
{
	struct kvaser_pciefd_cfg_param *param;
// printk("pciefd_cfg_read_params()\n");
	param = &img->params[KVASER_PCIEFD_CFG_PARAM_NR_CHAN];
/*
	magic sollte 
	KVASER_PCIEFD_CFG_MAGIC 0xcafef00d sein
printk("magic %0X, nr %d, len %d, data[0]= %d\n",
 	param->magic, 
 	param->nr,
 	param->len,
 	param->data[0] );
*/
	memcpy(&pcie->nr_channels, param->data, le32_to_cpu(param->len));
}

static int kvaser_pciefd_read_cfg(struct kvaser_pciefd *pcie)
{
int minor = -1;
	
	int res;
	struct kvaser_pciefd_cfg_img *img;


	/* Read electronic signature */
	u8 cmd[] = {KVASER_PCIEFD_FLASH_RES_CMD, 0, 0, 0};

	DBGIN();
//printk(" read_cfg 01\n");

	res = kvaser_pciefd_spi_cmd(pcie, cmd, ARRAY_SIZE(cmd), cmd, 1);
	if (res)
		return -EIO;

//printk(" got FLASH status %0x, now read_cfg 02\n", cmd[0]);
	img = kmalloc(KVASER_PCIEFD_CFG_IMG_SZ, GFP_KERNEL);
	if (!img)
		return -ENOMEM;

//printk(" got kmalloc to 64K data, read_cfg 03\n");
	if (cmd[0] != KVASER_PCIEFD_FLASH_ID_EPCS16) {
		dev_err(&pcie->pci->dev,
			"Flash id is 0x%x instead of expected EPCS16 (0x%x)\n",
			cmd[0], KVASER_PCIEFD_FLASH_ID_EPCS16);

		res = -ENODEV;
		goto image_free;
	}

//printk(" read_cfg 04\n");
	cmd[0] = KVASER_PCIEFD_FLASH_STATUS_CMD;
			    /*  (*pcie, *tx, tx_len, *rx, rx_len) */	
	res = kvaser_pciefd_spi_cmd(pcie, cmd, 1, cmd, 1);
//printk(" read_cfg 05\n");
	if (res) { /* spi error */
		goto image_free;
	} else if (cmd[0] & 1) {
		res = -EIO;
		/* No write is ever done, the WIP should never be set */
		dev_err(&pcie->pci->dev, "Unexpected WIP bit set in flash\n");
		goto image_free;
	}
//printk(" read_cfg 06\n");

	/* read the FLASH image */
	res = kvaser_pciefd_cfg_read_and_verify(pcie, img);
	if (res) {
		res = -EIO;
		goto image_free;
	}

//printk(" read_cfg 07\n");
	/* result is stored in pcie->nr_channels */
	kvaser_pciefd_cfg_read_params(pcie, img);

image_free:
	kfree(img);
	return res;
}

static void kvaser_pciefd_request_status(struct kvaser_pciefd_can *can)
{
	u32 cmd;

	cmd = KVASER_PCIEFD_KCAN_CMD_SRQ;
	cmd |= ++can->cmd_seq << KVASER_PCIEFD_KCAN_CMD_SEQ_SHIFT;
	iowrite32(cmd, can->reg_base + KVASER_PCIEFD_KCAN_CMD_REG);
}

static void kvaser_pciefd_enable_err_gen(struct kvaser_pciefd_can *can)
{
	u32 mode;
	unsigned long irq;

	spin_lock_irqsave(&can->lock, irq);
	mode = ioread32(can->reg_base + KVASER_PCIEFD_KCAN_MODE_REG);
	if (!(mode & KVASER_PCIEFD_KCAN_MODE_EPEN)) {
		mode |= KVASER_PCIEFD_KCAN_MODE_EPEN;
		iowrite32(mode, can->reg_base + KVASER_PCIEFD_KCAN_MODE_REG);
	}
	spin_unlock_irqrestore(&can->lock, irq);
}

static void kvaser_pciefd_disable_err_gen(struct kvaser_pciefd_can *can)
{
	u32 mode;
	unsigned long irq;

	spin_lock_irqsave(&can->lock, irq);
	mode = ioread32(can->reg_base + KVASER_PCIEFD_KCAN_MODE_REG);
	mode &= ~KVASER_PCIEFD_KCAN_MODE_EPEN;
	iowrite32(mode, can->reg_base + KVASER_PCIEFD_KCAN_MODE_REG);
	spin_unlock_irqrestore(&can->lock, irq);
}

#if 0
static int kvaser_pciefd_set_tx_irq(struct kvaser_pciefd_can *can)
{
	u32 msk;
printk("set tx irq mask\n");

	msk = KVASER_PCIEFD_KCAN_IRQ_TE | /* KVASER_PCIEFD_KCAN_IRQ_ROF |*/ 
	      KVASER_PCIEFD_KCAN_IRQ_TOF | KVASER_PCIEFD_KCAN_IRQ_ABD |
	      KVASER_PCIEFD_KCAN_IRQ_TAE | KVASER_PCIEFD_KCAN_IRQ_TAL |
	      KVASER_PCIEFD_KCAN_IRQ_FDIC | KVASER_PCIEFD_KCAN_IRQ_BPP |
	      KVASER_PCIEFD_KCAN_IRQ_TAR | KVASER_PCIEFD_KCAN_IRQ_TFD;

	iowrite32(msk, can->reg_base + KVASER_PCIEFD_KCAN_IEN_REG);

	return 0;
}
#endif

/* set various KCAN mode register flags 
 * according to can->can.ctrlmode */
void kvaser_pciefd_setup_controller(struct kvaser_pciefd_can *can)
{
	u32 mode;
	unsigned long irq;

	printk("kvaser_pciefd_setup_controller()\n");

	spin_lock_irqsave(&can->lock, irq);

	mode = ioread32(can->reg_base + KVASER_PCIEFD_KCAN_MODE_REG);

/* can->can is a socketcan struct, not used in can4linux */
// printk(" can.ctrlmode = %0X\n", can->can.ctrlmode);

	if (can->can.ctrlmode & CAN_CTRLMODE_FD) {
	        /* reset classic CAN mode: set CAN FD mode and handle ISO/non
		 * ISO */
		mode &= ~KVASER_PCIEFD_KCAN_MODE_CCM;
		if (can->can.ctrlmode & CAN_CTRLMODE_FD_NON_ISO)
			mode |= KVASER_PCIEFD_KCAN_MODE_NIFDEN;
		else
			mode &= ~KVASER_PCIEFD_KCAN_MODE_NIFDEN;
	} else {
	    /* set classic CAN mode and reset Non-ISO FD enable */
		mode |= KVASER_PCIEFD_KCAN_MODE_CCM;
		mode &= ~KVASER_PCIEFD_KCAN_MODE_NIFDEN;
	}

	if (can->can.ctrlmode & CAN_CTRLMODE_LISTENONLY)
		mode |= KVASER_PCIEFD_KCAN_MODE_LOM;

	/* Active Error Flag Enable.
	 * Clear to force error passive behavior to avoid sending active
	 * error flags */
	mode |= KVASER_PCIEFD_KCAN_MODE_EEN;
	/* Error Packet Enable. */
	mode |= KVASER_PCIEFD_KCAN_MODE_EPEN;
	/* Use ACK packet type */
	mode &= ~KVASER_PCIEFD_KCAN_MODE_APT;
	/* remove reset mode */
	mode &= ~KVASER_PCIEFD_KCAN_MODE_RM;
mode = 0;
mode = 
	KVASER_PCIEFD_KCAN_MODE_CCM
	| KVASER_PCIEFD_KCAN_MODE_APT
	| KVASER_PCIEFD_KCAN_MODE_DWH
	;








	iowrite32(mode, can->reg_base + KVASER_PCIEFD_KCAN_MODE_REG);

	spin_unlock_irqrestore(&can->lock, irq);
}

void kvaser_pciefd_start_controller_flush(struct kvaser_pciefd_can *can)
{
	u32 status;
	unsigned long irq;
// printk("kvaser_pciefd_start_controller_flush()\n");
	spin_lock_irqsave(&can->lock, irq);
	/* clear all interrupts */
	iowrite32(-1, can->reg_base + KVASER_PCIEFD_KCAN_IRQ_REG);
	/* set Abort Done,  Transmit Buffer Flush Done interrupts */
	iowrite32(KVASER_PCIEFD_KCAN_IRQ_ABD | KVASER_PCIEFD_KCAN_IRQ_TFD,
		  can->reg_base + KVASER_PCIEFD_KCAN_IEN_REG);

	status = ioread32(can->reg_base + KVASER_PCIEFD_KCAN_STAT_REG);
	if (status & KVASER_PCIEFD_KCAN_STAT_IDLE) {
		u32 cmd;

		/* If controller is already idle, run abort, flush and reset */
		cmd = KVASER_PCIEFD_KCAN_CMD_AT;
		cmd |= ++can->cmd_seq << KVASER_PCIEFD_KCAN_CMD_SEQ_SHIFT;
		iowrite32(cmd, can->reg_base + KVASER_PCIEFD_KCAN_CMD_REG);
	} else if (!(status & KVASER_PCIEFD_KCAN_STAT_RMR)) {
		u32 mode;

		/* Put controller in reset mode */
		mode = ioread32(can->reg_base + KVASER_PCIEFD_KCAN_MODE_REG);
		mode |= KVASER_PCIEFD_KCAN_MODE_RM;
		iowrite32(mode, can->reg_base + KVASER_PCIEFD_KCAN_MODE_REG);
	}

	spin_unlock_irqrestore(&can->lock, irq);
}

int kvaser_pciefd_bus_on(struct kvaser_pciefd_can *can)
{
	u32 mode;
	int minor = -1;  /* needed for DBGIN/DBGOUT or DBGPRINT macro */
	unsigned long irq;

	DBGPRINT(DBG_ENTRY, ("  ============== bus_on ==============\n"));

	del_timer(&can->bec_poll_timer);

	if (!completion_done(&can->flush_comp))
		kvaser_pciefd_start_controller_flush(can);

	if (!wait_for_completion_timeout(&can->flush_comp,
					 KVASER_PCIEFD_WAIT_TIMEOUT)) {
		netdev_err(can->can.dev, "Timeout during bus on flush\n");
		return -ETIMEDOUT;
	}

	spin_lock_irqsave(&can->lock, irq);
	iowrite32(0, can->reg_base + KVASER_PCIEFD_KCAN_IEN_REG);
	iowrite32(-1, can->reg_base + KVASER_PCIEFD_KCAN_IRQ_REG);

	iowrite32(KVASER_PCIEFD_KCAN_IRQ_ABD | KVASER_PCIEFD_KCAN_IRQ_TFD,
		  can->reg_base + KVASER_PCIEFD_KCAN_IEN_REG);

	mode = ioread32(can->reg_base + KVASER_PCIEFD_KCAN_MODE_REG);
	mode &= ~KVASER_PCIEFD_KCAN_MODE_RM;
	iowrite32(mode, can->reg_base + KVASER_PCIEFD_KCAN_MODE_REG);
	spin_unlock_irqrestore(&can->lock, irq);

	if (!wait_for_completion_timeout(&can->start_comp,
					 KVASER_PCIEFD_WAIT_TIMEOUT)) {
		netdev_err(can->can.dev, "Timeout during bus on reset\n");
		return -ETIMEDOUT;
	}
	/* Reset interrupt handling */
	/* disable all int sources */
	iowrite32(0, can->reg_base + KVASER_PCIEFD_KCAN_IEN_REG);
	/* reset pending irqs */
	iowrite32(-1, can->reg_base + KVASER_PCIEFD_KCAN_IRQ_REG);

	// kvaser_pciefd_set_tx_irq(can);
	// kvaser_pciefd_setup_controller(can);

	can->can.state = CAN_STATE_ERROR_ACTIVE;
	// netif_wake_queue(can->can.dev);
	can->bec.txerr = 0;
	can->bec.rxerr = 0;
	can->err_rep_cnt = 0;

	return 0;
}

/* stop CAN FD transceiver * Transmitter power control  */ 
void kvaser_pciefd_pwm_stop(struct kvaser_pciefd_can *can)
{
#if 0
	u8 top;
	u32 pwm_ctrl;
	unsigned long irq;
/* ========== */
return;

printk("kvaser_pciefd_pwm_stop(%p)\n", can);
	spin_lock_irqsave(&can->lock, irq);
	pwm_ctrl = ioread32(can->reg_base + KVASER_PCIEFD_KCAN_PWM_REG);
	top = (pwm_ctrl >> KVASER_PCIEFD_KCAN_PWM_TOP_SHIFT) & 0xff;

	/* Set duty cycle to zero */
	pwm_ctrl |= top;
	iowrite32(pwm_ctrl, can->reg_base + KVASER_PCIEFD_KCAN_PWM_REG);
	spin_unlock_irqrestore(&can->lock, irq);
#endif
}

void kvaser_pciefd_pwm_start(struct kvaser_pciefd_can *can)
{
	int top, trigger;
	u32 pwm_ctrl;
	unsigned long irq;

// printk("kvaser_pciefd_pwm_start(%p)\n", can);

	kvaser_pciefd_pwm_stop(can);
	spin_lock_irqsave(&can->lock, irq);

	/* Set frequency to 500 KHz*/
	top = can->kv_pcie->bus_freq / (2 * 500000) - 1;

	pwm_ctrl = top & 0xff;
	pwm_ctrl |= (top & 0xff) << KVASER_PCIEFD_KCAN_PWM_TOP_SHIFT;
	iowrite32(pwm_ctrl, can->reg_base + KVASER_PCIEFD_KCAN_PWM_REG);

	/* Set duty cycle to 95 */
	trigger = (100 * top - 95 * (top + 1) + 50) / 100;
	pwm_ctrl = trigger & 0xff;
	pwm_ctrl |= (top & 0xff) << KVASER_PCIEFD_KCAN_PWM_TOP_SHIFT;
	iowrite32(pwm_ctrl, can->reg_base + KVASER_PCIEFD_KCAN_PWM_REG);
	spin_unlock_irqrestore(&can->lock, irq);
}


static int kvaser_pciefd_stop(struct net_device *netdev)
{
	struct kvaser_pciefd_can *can = netdev_priv(netdev);
	int ret = 0;
printk("kvaser_pciefd_stop()\n");
	/* Don't interrupt ongoing flush */
	if (!completion_done(&can->flush_comp))
		kvaser_pciefd_start_controller_flush(can);

	if (!wait_for_completion_timeout(&can->flush_comp,
					 KVASER_PCIEFD_WAIT_TIMEOUT)) {
		netdev_err(can->can.dev, "Timeout during stop\n");
		ret = -ETIMEDOUT;
	} else {
		iowrite32(0, can->reg_base + KVASER_PCIEFD_KCAN_IEN_REG);
		del_timer(&can->bec_poll_timer);
	}
//	close_candev(netdev);

	return ret;
}

#if 0
static int kvaser_pciefd_prepare_tx_packet(struct kvaser_pciefd_tx_packet *p,
					   struct kvaser_pciefd_can *can,
					   struct sk_buff *skb)
{
	struct canfd_frame *cf = (struct canfd_frame *)skb->data;
	int packet_size;
	int seq = can->echo_idx;

	memset(p, 0, sizeof(*p));

	if (can->can.ctrlmode & CAN_CTRLMODE_ONE_SHOT)
		p->header[1] |= KVASER_PCIEFD_TPACKET_SMS;

	if (cf->can_id & CAN_RTR_FLAG)
		p->header[0] |= KVASER_PCIEFD_RPACKET_RTR;

	if (cf->can_id & CAN_EFF_FLAG)
		p->header[0] |= KVASER_PCIEFD_RPACKET_IDE;

	p->header[0] |= cf->can_id & CAN_EFF_MASK;
	p->header[1] |= len2dlc(cf->len) << KVASER_PCIEFD_RPACKET_DLC_SHIFT;
	p->header[1] |= KVASER_PCIEFD_TPACKET_AREQ;

	if (can_is_canfd_skb(skb)) {
		p->header[1] |= KVASER_PCIEFD_RPACKET_FDF;
		if (cf->flags & CANFD_BRS)
			p->header[1] |= KVASER_PCIEFD_RPACKET_BRS;
		if (cf->flags & CANFD_ESI)
			p->header[1] |= KVASER_PCIEFD_RPACKET_ESI;
	}

	p->header[1] |= seq & KVASER_PCIEFD_PACKET_SEQ_MSK;

	packet_size = cf->len;
	memcpy(p->data, cf->data, packet_size);

	return DIV_ROUND_UP(packet_size, 4);
}
#endif

#if 0
static netdev_tx_t kvaser_pciefd_start_xmit(struct sk_buff *skb,
					    struct net_device *netdev)
{
	struct kvaser_pciefd_can *can = netdev_priv(netdev);
	unsigned long irq_flags;
	struct kvaser_pciefd_tx_packet packet;
	int nwords;
	u8 count;

	if (can_dropped_invalid_skb(netdev, skb))
		return NETDEV_TX_OK;

	nwords = kvaser_pciefd_prepare_tx_packet(&packet, can, skb);

	spin_lock_irqsave(&can->echo_lock, irq_flags);

	/* Prepare and save echo skb in internal slot */
// can_put_echo_skb(skb, netdev, can->echo_idx);

	/* Move echo index to the next slot */
	can->echo_idx = (can->echo_idx + 1) % can->can.echo_skb_max;

	/* Write header to fifo */
	iowrite32(packet.header[0],
		  can->reg_base + KVASER_PCIEFD_KCAN_FIFO_REG);
	iowrite32(packet.header[1],
		  can->reg_base + KVASER_PCIEFD_KCAN_FIFO_REG);

	if (nwords) {
		u32 data_last = ((u32 *)packet.data)[nwords - 1];

		/* Write data to fifo, except last word */
		iowrite32_rep(can->reg_base +
			      KVASER_PCIEFD_KCAN_FIFO_REG, packet.data,
			      nwords - 1);
		/* Write last word to end of fifo */
		__raw_writel(data_last, can->reg_base +
			     KVASER_PCIEFD_KCAN_FIFO_LAST_REG);
	} else {
		/* Complete write to fifo */
		__raw_writel(0, can->reg_base +
			     KVASER_PCIEFD_KCAN_FIFO_LAST_REG);
	}

	count = ioread32(can->reg_base + KVASER_PCIEFD_KCAN_TX_NPACKETS_REG);
	/* No room for a new message, stop the queue until at least one
	 * successful transmit
	 */
	if (count >= KVASER_PCIEFD_CAN_TX_MAX_COUNT ||
	    can->can.echo_skb[can->echo_idx])
		netif_stop_queue(netdev);

	spin_unlock_irqrestore(&can->echo_lock, irq_flags);

	return NETDEV_TX_OK;
}
#endif



#if 0
static int kvaser_pciefd_set_mode(struct net_device *ndev, enum can_mode mode)
{
	struct kvaser_pciefd_can *can = netdev_priv(ndev);
	int ret = 0;

	switch (mode) {
	case CAN_MODE_START:
		if (!can->can.restart_ms)
			ret = kvaser_pciefd_bus_on(can);
		break;
	default:
		return -EOPNOTSUPP;
	}

	return ret;
}
#endif

static int kvaser_pciefd_get_berr_counter(const struct net_device *ndev,
					  struct can_berr_counter *bec)
{
	struct kvaser_pciefd_can *can = netdev_priv(ndev);

	bec->rxerr = can->bec.rxerr;
	bec->txerr = can->bec.txerr;
	return 0;
}

static void kvaser_pciefd_bec_poll_timer(struct timer_list *data)
{
	struct kvaser_pciefd_can *can = from_timer(can, data, bec_poll_timer);

	kvaser_pciefd_enable_err_gen(can);
	kvaser_pciefd_request_status(can);
	can->err_rep_cnt = 0;
}

static const struct net_device_ops kvaser_pciefd_netdev_ops = {
//	.ndo_open = kvaser_pciefd_open,
	.ndo_stop = kvaser_pciefd_stop,
//	.ndo_start_xmit = kvaser_pciefd_start_xmit,
//	.ndo_change_mtu = can_change_mtu,
};

static int kvaser_pciefd_setup_can_ctrls(struct pci_dev *pdev, struct kvaser_pciefd *pcie)
{
	int i;

// printk("kvaser_pciefd_setup_can_ctrls()\n");

	for (i = 0; i < pcie->nr_channels; i++) {
		struct kvaser_pciefd_can *can;
		u32 status, tx_npackets;
		/* get kernel memory for all CAN controller structures */
		can = devm_kzalloc(&pdev->dev, sizeof(*can), GFP_KERNEL);
		if (!can)
		    return -ENOMEM;

// pr_info("sizeof can %d, %08lX\n", (int)sizeof(*can), sizeof(*can));

// pr_info("address of can[%d] 0x%p\n", i, can); /* das ist der pointer */

		can->minor = i;
		can->reg_base = pcie->reg_base + KVASER_PCIEFD_KCAN0_BASE +
				i * KVASER_PCIEFD_KCAN_BASE_OFFSET;
// pr_info("can[%d] reg_base 0x%p\n", i, can->reg_base); /* das ist der pointer */

		can->kv_pcie = pcie;
		can->cmd_seq = 0;
		can->err_rep_cnt = 0;
		can->bec.txerr = 0;
		can->bec.rxerr = 0;

		init_completion(&can->start_comp);
		init_completion(&can->flush_comp);
		timer_setup(&can->bec_poll_timer, kvaser_pciefd_bec_poll_timer,
			    0);

		tx_npackets = ioread32(can->reg_base +
				       KVASER_PCIEFD_KCAN_TX_NPACKETS_REG);
		if (((tx_npackets >> KVASER_PCIEFD_KCAN_TX_NPACKETS_MAX_SHIFT) &
		      0xff) < KVASER_PCIEFD_CAN_TX_MAX_COUNT) {
			dev_err(&pcie->pci->dev,
				"Max Tx count is smaller than expected\n");
			return -ENODEV;
		}


/* alle infos aus can->can.*  werden eigentlich nicht benötigt */
		can->can.clock.freq = pcie->freq;
		can->can.echo_skb_max = KVASER_PCIEFD_CAN_TX_MAX_COUNT;
		can->echo_idx = 0;
		spin_lock_init(&can->echo_lock);
		spin_lock_init(&can->lock);
		can->can.bittiming_const = &kvaser_pciefd_bittiming_const;
		can->can.data_bittiming_const = &kvaser_pciefd_bittiming_const;

//		can->can.do_set_bittiming = kvaser_pciefd_set_nominal_bittiming;
//		can->can.do_set_data_bittiming =
//			kvaser_pciefd_set_data_bittiming;

//	can->can.do_set_mode = kvaser_pciefd_set_mode;
		can->can.do_get_berr_counter = kvaser_pciefd_get_berr_counter;

		can->can.ctrlmode_supported = CAN_CTRLMODE_LISTENONLY |
					      CAN_CTRLMODE_FD |
					      CAN_CTRLMODE_FD_NON_ISO;

/* can4linux specific */
		proc_clock = pcie->freq;
		status = ioread32(can->reg_base + KVASER_PCIEFD_KCAN_STAT_REG);
		if (!(status & KVASER_PCIEFD_KCAN_STAT_FD)) {
			dev_err(&pcie->pci->dev,
				"CAN FD not supported as expected %d\n", i);
			return -ENODEV;
		}

		if (status & KVASER_PCIEFD_KCAN_STAT_CAP)
			can->can.ctrlmode_supported |= CAN_CTRLMODE_ONE_SHOT;

/* set can.ctrlmode = 0; */
can->can.ctrlmode = CAN_CTRLMODE_FD; 
		/* reset all pending Kcan irq */
		iowrite32(-1, can->reg_base + KVASER_PCIEFD_KCAN_IRQ_REG);

		/* enable Abort done and Tx buffer flash done */
		iowrite32(KVASER_PCIEFD_KCAN_IRQ_ABD |
			  KVASER_PCIEFD_KCAN_IRQ_TFD,
			  can->reg_base + KVASER_PCIEFD_KCAN_IEN_REG);

		can_pcidev[i] = pdev;
		pcie->can[i]  = can;
		kvaser_pciefd_pwm_start(can);
	}  /* end for( pcie->nr_channels) */ 
	return 0;
}

#if 0
/* only useful for SocketCAN */
static int kvaser_pciefd_reg_candev(struct kvaser_pciefd *pcie)
{
	int i;

	for (i = 0; i < pcie->nr_channels; i++) {
		int err = register_candev(pcie->can[i]->can.dev);

		if (err) {
			int j;

			/* Unregister all successfully registered devices. */
			for (j = 0; j < i; j++)
				unregister_candev(pcie->can[j]->can.dev);
			return err;
		}
	}

	return 0;
}
#endif

static void kvaser_pciefd_write_dma_map(struct kvaser_pciefd *pcie,
					dma_addr_t addr, int offset)
{
	u32 word1, word2;

#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
	word1 = addr | KVASER_PCIEFD_64BIT_DMA_BIT;
	word2 = addr >> 32;
#else
	word1 = addr;
	word2 = 0;
#endif
	iowrite32(word1, pcie->reg_base + offset);
	iowrite32(word2, pcie->reg_base + offset + 4);
}

static int kvaser_pciefd_setup_dma(struct kvaser_pciefd *pcie)
{
	int i;
	u32 srb_status;
	dma_addr_t dma_addr[KVASER_PCIEFD_DMA_COUNT];

	/* Disable the DMA */
	iowrite32(0, pcie->reg_base + KVASER_PCIEFD_SRB_CTRL_REG);
	for (i = 0; i < KVASER_PCIEFD_DMA_COUNT; i++) {
		unsigned int offset = KVASER_PCIEFD_DMA_MAP_BASE + 8 * i;

		pcie->dma_data[i] =
			dmam_alloc_coherent(&pcie->pci->dev,
					    KVASER_PCIEFD_DMA_SIZE,
					    &dma_addr[i],
					    GFP_KERNEL);

		if (!pcie->dma_data[i] || !dma_addr[i]) {
			dev_err(&pcie->pci->dev, "Rx dma_alloc(%u) failure\n",
				KVASER_PCIEFD_DMA_SIZE);
			return -ENOMEM;
		}

		kvaser_pciefd_write_dma_map(pcie, dma_addr[i], offset);
	}

	/* Reset Rx FIFO, and both DMA buffers */
	iowrite32(KVASER_PCIEFD_SRB_CMD_FOR | KVASER_PCIEFD_SRB_CMD_RDB0 |
		  KVASER_PCIEFD_SRB_CMD_RDB1,
		  pcie->reg_base + KVASER_PCIEFD_SRB_CMD_REG);

	srb_status = ioread32(pcie->reg_base + KVASER_PCIEFD_SRB_STAT_REG);
	if (!(srb_status & KVASER_PCIEFD_SRB_STAT_DI)) {
		dev_err(&pcie->pci->dev, "DMA not idle before enabling\n");
		return -EIO;
	}

	/* Enable the DMA */
	iowrite32(KVASER_PCIEFD_SRB_CTRL_DMA_ENABLE,
		  pcie->reg_base + KVASER_PCIEFD_SRB_CTRL_REG);

	return 0;
}

static int kvaser_pciefd_setup_board(struct kvaser_pciefd *pcie)
{
	u32 sysid, srb_status, build;
	u8 sysid_nr_chan;
	int ret, i;
	int minor = -1;  /* needed for DBGIN/DBGOUT macro */

	DBGIN();
	ret = kvaser_pciefd_read_cfg(pcie);
	if (ret)
		return ret;

	sysid = ioread32(pcie->reg_base + KVASER_PCIEFD_SYSID_VERSION_REG);
	sysid_nr_chan = (sysid >> KVASER_PCIEFD_SYSID_NRCHAN_SHIFT) & 0xff;

	dev_info(&pcie->pci->dev, "Board has %d CAN channels\n", sysid_nr_chan);

	if (pcie->nr_channels != sysid_nr_chan) {
		dev_err(&pcie->pci->dev,
			"Number of channels does not match: %u vs %u\n",
			pcie->nr_channels,
			sysid_nr_chan);
		return -ENODEV;
	}

/* * initialize can4linux proc_base */
// printk("nr channels %d\n", pcie->nr_channels); 

	for (i = 0; i < pcie->nr_channels; i++) {
	    proc_base[i] = (upointer_t) pcie->reg_base
		+ KVASER_PCIEFD_KCAN0_BASE +
				i * KVASER_PCIEFD_KCAN_BASE_OFFSET;
// DBGPRINT(DBG_DATA, ("CAN base[%d] =  0x%lx", i, proc_base[i]));
	    IRQ[i]	    = pcie->pci->irq;
	}
/* */

	if (pcie->nr_channels > KVASER_PCIEFD_MAX_CAN_CHANNELS)
		pcie->nr_channels = KVASER_PCIEFD_MAX_CAN_CHANNELS;

	build = ioread32(pcie->reg_base + KVASER_PCIEFD_SYSID_BUILD_REG);
	dev_dbg(&pcie->pci->dev, "Version %u.%u.%u\n",
		(sysid >> KVASER_PCIEFD_SYSID_MAJOR_VER_SHIFT) & 0xff,
		sysid & 0xff,
		(build >> KVASER_PCIEFD_SYSID_BUILD_VER_SHIFT) & 0x7fff);

	srb_status = ioread32(pcie->reg_base + KVASER_PCIEFD_SRB_STAT_REG);
	if (!(srb_status & KVASER_PCIEFD_SRB_STAT_DMA)) {
		dev_err(&pcie->pci->dev,
			"Hardware without DMA is not supported\n");
		return -ENODEV;
	}

	pcie->bus_freq = ioread32(pcie->reg_base +
				  KVASER_PCIEFD_SYSID_BUSFREQ_REG);
	pcie->freq = ioread32(pcie->reg_base + KVASER_PCIEFD_SYSID_CANFREQ_REG);
	pcie->freq_to_ticks_div = pcie->freq / 1000000;
	if (pcie->freq_to_ticks_div == 0)
		pcie->freq_to_ticks_div = 1;

	/* Turn off all loopback functionality */
	iowrite32(0, pcie->reg_base + KVASER_PCIEFD_LOOP_REG);

	DBGOUT();
	return ret;
}

static int kvaser_pciefd_handle_data_packet(struct kvaser_pciefd *pcie,
					    struct kvaser_pciefd_rx_packet *p,
					    __le32 *data)
{
int minor;			/* CAN channel */
int rx_fifo;
msg_fifo_t   *RxFifo; 
u32 id;				/* received CAN id */
int dlc;			/* received DLC - data length code */
int edl = 0;			/* extended data frame length received */
int rtr = 0;			/* RTR frame received */
int flags = 0;
unsigned int length;		/* real number of data bytes received */
struct timeval  timestamp;

// struct canfd_frame *cf;

struct can_priv *priv;
// struct net_device_stats *stats;
// struct skb_shared_hwtstamps *shhwtstamps;

    /* u8 ch_id */
    /* if we have more than one board, minor has to be accumulated */
    minor = (p->header[1] >> KVASER_PCIEFD_PACKET_CHID_SHIFT) & 0x7;
    RxFifo = &rx_buf[minor][0];

    if (minor >= pcie->nr_channels)
	    return -EIO;

	priv = &pcie->can[minor]->can;
	// stats = &priv->dev->stats;



	get_timestamp(minor, &timestamp);

	for(rx_fifo = 0; rx_fifo < CAN_MAX_OPEN; rx_fifo++) {
	    RxFifo = &rx_buf[minor][rx_fifo];

	    RxFifo->data[RxFifo->head].timestamp = timestamp;

	    /* preset flags */
	    (RxFifo->data[RxFifo->head]).flags =
			    (RxFifo->status & BUF_OVERRUN ? MSG_BOVR : 0);
	}




	if (p->header[1] & KVASER_PCIEFD_RPACKET_FDF) {
	    /* CAN Extended Data Length or FDF - Flexible Data Rate Format */
	    flags |= MSG_CANFD;
	    edl = 1;
//  pr_info("received CAN FDF frame\n");
		if (p->header[1] & KVASER_PCIEFD_RPACKET_BRS) {
		    /* Bit Rate Switch */
			flags |= MSG_RBRS;
//  pr_info("received CAN FD frame with BRS bit set\n");
		}

		if (p->header[1] & KVASER_PCIEFD_RPACKET_ESI) {
		    /* Error Status Indicator */
			flags |= MSG_RESI;
//  pr_info("received CAN FD frame with ESI bit set\n");
		}
	} else {
	    ;
	}
	id = p->header[0] & CAN_EFF_MASK;

	if (p->header[0] & KVASER_PCIEFD_RPACKET_IDE)
		flags |= MSG_EXT;
	if (p->header[0] & KVASER_PCIEFD_RPACKET_RTR)
		flags |= MSG_RTR;
	dlc = (p->header[1] >> KVASER_PCIEFD_RPACKET_DLC_SHIFT) & 0x0f;


	/* ---------- fill frame data ----------------------- */
	/* handle all subscribed rx fifos */
	for (rx_fifo = 0; rx_fifo < CAN_MAX_OPEN; rx_fifo++) {
		/* for every rx fifo */

		/*
		pr_info(" used fifos [%d][%d] = %d\n",
			minor, rx_fifo,
			can_waitflag[minor][rx_fifo]);
		*/

		if (can_waitflag[minor][rx_fifo] == 1) {
			/* this FIFO is in use */
			/* prepare buffer to be used */
			RxFifo = &rx_buf[minor][rx_fifo];
			/* pr_info("> filling buffer [%d][%d]\n",
				minor, rx_fifo);  */
		(RxFifo->data[RxFifo->head]).flags = flags;
		(RxFifo->data[RxFifo->head]).id = id;

		(RxFifo->data[RxFifo->head]).length = dlc;

		if (edl) {
		    length = dlc2len(dlc);   /* dlc to length */
		} else {
		    /* use dlc restricted to 8 */
		    if ((dlc > 8) && !edl)
			    length = 8;
		    else
			    length = dlc;
		}

		if (length > 0 && !rtr)  {
			memcpy(&(RxFifo->data[RxFifo->head]).data,
				data, length);
		}
		/* mark just written entry as OK and full */
		RxFifo->status = BUF_OK;
		/* Handle buffer wrap-around */
		++(RxFifo->head);
		RxFifo->head %= MAX_BUFSIZE;
		if (RxFifo->head == RxFifo->tail) {
			pr_err("CAN[%d][%d] RX: FIFO overrun\n",
					minor, rx_fifo);
			RxFifo->status = BUF_OVERRUN;
		}

		/*---------- kick the select() call  -*/
		/* This function will wake up all processes
		that are waiting on this event queue,
		that are in interruptible sleep
		*/
		/* pr_info(" should wake [%d][%d]\n",
			minor, rx_fifo); */
		wake_up_interruptible(&can_wait[minor][rx_fifo]);
		}
	}
	/* ---------- / fill frame data -------------------------------- */

	return 0;
}

static void kvaser_pciefd_change_state(struct kvaser_pciefd_can *can,
				       struct can_frame *cf,
				       enum can_state new_state,
				       enum can_state tx_state,
				       enum can_state rx_state)
{
//	can_change_state(can->can.dev, cf, tx_state, rx_state);

	if (new_state == CAN_STATE_BUS_OFF) {
		/* Prevent CAN controller from auto recover from bus off */
		if (!can->can.restart_ms) {
			kvaser_pciefd_start_controller_flush(can);
//			can_bus_off(ndev);
		}
	}
}

static void kvaser_pciefd_packet_to_state(struct kvaser_pciefd_rx_packet *p,
					  struct can_berr_counter *bec,
					  enum can_state *new_state,
					  enum can_state *tx_state,
					  enum can_state *rx_state)
{
	if (p->header[0] & KVASER_PCIEFD_SPACK_BOFF ||
	    p->header[0] & KVASER_PCIEFD_SPACK_IRM)
		*new_state = CAN_STATE_BUS_OFF;
	else if (bec->txerr >= 255 ||  bec->rxerr >= 255)
		*new_state = CAN_STATE_BUS_OFF;
	else if (p->header[1] & KVASER_PCIEFD_SPACK_EPLR)
		*new_state = CAN_STATE_ERROR_PASSIVE;
	else if (bec->txerr >= 128 || bec->rxerr >= 128)
		*new_state = CAN_STATE_ERROR_PASSIVE;
	else if (p->header[1] & KVASER_PCIEFD_SPACK_EWLR)
		*new_state = CAN_STATE_ERROR_WARNING;
	else if (bec->txerr >= 96 || bec->rxerr >= 96)
		*new_state = CAN_STATE_ERROR_WARNING;
	else
		*new_state = CAN_STATE_ERROR_ACTIVE;

	*tx_state = bec->txerr >= bec->rxerr ? *new_state : 0;
	*rx_state = bec->txerr <= bec->rxerr ? *new_state : 0;
}

static int kvaser_pciefd_rx_error_frame(struct kvaser_pciefd_can *can,
					struct kvaser_pciefd_rx_packet *p)
{
	struct can_berr_counter bec;
	enum can_state old_state, new_state, tx_state, rx_state;
	struct net_device *ndev = can->can.dev;
// struct sk_buff *skb;
	struct can_frame *cf = NULL;
//	struct skb_shared_hwtstamps *shhwtstamps;
	struct net_device_stats *stats = &ndev->stats;

printk("rx_error_frame() \n");
/* ============= */
udelay(10000);
return 0;

	old_state = can->can.state;

	bec.txerr = p->header[0] & 0xff;
	bec.rxerr = (p->header[0] >> KVASER_PCIEFD_SPACK_RXERR_SHIFT) & 0xff;

	kvaser_pciefd_packet_to_state(p, &bec, &new_state, &tx_state,
				      &rx_state);

//	skb = alloc_can_err_skb(ndev, &cf);

	if (new_state != old_state) {
		kvaser_pciefd_change_state(can, cf, new_state, tx_state,
					   rx_state);

		if (old_state == CAN_STATE_BUS_OFF &&
		    new_state == CAN_STATE_ERROR_ACTIVE &&
		    can->can.restart_ms) {
			can->can.can_stats.restarts++;
//			if (skb)
//				cf->can_id |= CAN_ERR_RESTARTED;
		}
	}

	can->err_rep_cnt++;
	can->can.can_stats.bus_error++;
	stats->rx_errors++;

	can->bec.txerr = bec.txerr;
	can->bec.rxerr = bec.rxerr;

//	if (!skb) {
//		stats->rx_dropped++;
//		return -ENOMEM;
//	}

// shhwtstamps = skb_hwtstamps(skb);
// shhwtstamps->hwtstamp =
// 	ns_to_ktime(div_u64(p->timestamp * 1000,
// 			    can->kv_pcie->freq_to_ticks_div));
	cf->can_id |= CAN_ERR_BUSERROR;

	cf->data[6] = bec.txerr;
	cf->data[7] = bec.rxerr;

	stats->rx_packets++;
	stats->rx_bytes += cf->can_dlc;

//netif_rx(skb);
	return 0;
}

static int kvaser_pciefd_handle_error_packet(struct kvaser_pciefd *pcie,
					     struct kvaser_pciefd_rx_packet *p)
{
	struct kvaser_pciefd_can *can;
	u8 ch_id = (p->header[1] >> KVASER_PCIEFD_PACKET_CHID_SHIFT) & 0x7;
printk("handle_error_packet() %d\n", ch_id);
	
/* ============ */
//udelay(10000);
return 0;


	if (ch_id >= pcie->nr_channels)
		return -EIO;

	can = pcie->can[ch_id];

	kvaser_pciefd_rx_error_frame(can, p);
	if (can->err_rep_cnt >= KVASER_PCIEFD_MAX_ERR_REP)
		/* Do not report more errors, until bec_poll_timer expires */
		kvaser_pciefd_disable_err_gen(can);
	/* Start polling the error counters */
	mod_timer(&can->bec_poll_timer, KVASER_PCIEFD_BEC_POLL_FREQ);
	return 0;
}

static int kvaser_pciefd_handle_status_resp(struct kvaser_pciefd_can *can,
					    struct kvaser_pciefd_rx_packet *p)
{
	struct can_berr_counter bec;
	enum can_state old_state, new_state, tx_state, rx_state;

printk("handle_status_resp()\n");
	old_state = can->can.state;

	bec.txerr = p->header[0] & 0xff;
	bec.rxerr = (p->header[0] >> KVASER_PCIEFD_SPACK_RXERR_SHIFT) & 0xff;

	kvaser_pciefd_packet_to_state(p, &bec, &new_state, &tx_state,
				      &rx_state);

	if (new_state != old_state) {
//		struct net_device *ndev = can->can.dev;
		struct sk_buff *skb;
		struct can_frame *cf;
		struct skb_shared_hwtstamps *shhwtstamps;
printk("  CAN state changed\n");

//		skb = alloc_can_err_skb(ndev, &cf);
//		if (!skb) {
//			struct net_device_stats *stats = &ndev->stats;
//
//			stats->rx_dropped++;
//			return -ENOMEM;
//		}

		kvaser_pciefd_change_state(can, cf, new_state, tx_state,
					   rx_state);

		if (old_state == CAN_STATE_BUS_OFF &&
		    new_state == CAN_STATE_ERROR_ACTIVE &&
		    can->can.restart_ms) {
			can->can.can_stats.restarts++;
			cf->can_id |= CAN_ERR_RESTARTED;
		}

		shhwtstamps = skb_hwtstamps(skb);
		shhwtstamps->hwtstamp =
			ns_to_ktime(div_u64(p->timestamp * 1000,
					    can->kv_pcie->freq_to_ticks_div));

		cf->data[6] = bec.txerr;
		cf->data[7] = bec.rxerr;

//		netif_rx(skb);
	}
	can->bec.txerr = bec.txerr;
	can->bec.rxerr = bec.rxerr;
	/* Check if we need to poll the error counters */
	if (bec.txerr || bec.rxerr) {
//	mod_timer(&can->bec_poll_timer, KVASER_PCIEFD_BEC_POLL_FREQ);
	; }

	return 0;
}

static int kvaser_pciefd_handle_status_packet(struct kvaser_pciefd *pcie,
					      struct kvaser_pciefd_rx_packet *p)
{
struct kvaser_pciefd_can *can;
u8 cmdseq;
u32 status;
	u8 minor = (p->header[1] >> KVASER_PCIEFD_PACKET_CHID_SHIFT) & 0x7;

	if (minor >= pcie->nr_channels)
		return -EIO;

	can = pcie->can[minor];

// can_showstat(minor);
	status = ioread32(can->reg_base + KVASER_PCIEFD_KCAN_STAT_REG);
	cmdseq = (status >> KVASER_PCIEFD_KCAN_STAT_SEQNO_SHIFT) & 0xff;
// printk("handle_status_packet() minor = %d, status = 0x%08x\n", minor, status);

	/* Reset done, start abort and flush */
	if (p->header[0] & KVASER_PCIEFD_SPACK_IRM &&
	    p->header[0] & KVASER_PCIEFD_SPACK_RMCD &&
	    p->header[1] & KVASER_PCIEFD_SPACK_AUTO &&
	    cmdseq == (p->header[1] & KVASER_PCIEFD_PACKET_SEQ_MSK) &&
	    status & KVASER_PCIEFD_KCAN_STAT_IDLE) {
		u32 cmd;

		iowrite32(KVASER_PCIEFD_KCAN_IRQ_ABD,
			  can->reg_base + KVASER_PCIEFD_KCAN_IRQ_REG);
		cmd = KVASER_PCIEFD_KCAN_CMD_AT;
		cmd |= ++can->cmd_seq << KVASER_PCIEFD_KCAN_CMD_SEQ_SHIFT;
		iowrite32(cmd, can->reg_base + KVASER_PCIEFD_KCAN_CMD_REG);

		iowrite32(KVASER_PCIEFD_KCAN_IRQ_TFD,
			  can->reg_base + KVASER_PCIEFD_KCAN_IEN_REG);
	} else if (p->header[0] & KVASER_PCIEFD_SPACK_IDET &&
		   p->header[0] & KVASER_PCIEFD_SPACK_IRM &&
		   cmdseq == (p->header[1] & KVASER_PCIEFD_PACKET_SEQ_MSK) &&
		   status & KVASER_PCIEFD_KCAN_STAT_IDLE) {
		/* Reset detected, send end of flush if no packet are in FIFO */
		u8 count = ioread32(can->reg_base +
				    KVASER_PCIEFD_KCAN_TX_NPACKETS_REG) & 0xff;

		if (!count)
			iowrite32(KVASER_PCIEFD_KCAN_CTRL_EFLUSH,
				  can->reg_base + KVASER_PCIEFD_KCAN_CTRL_REG);
	} else if (!(p->header[1] & KVASER_PCIEFD_SPACK_AUTO) &&
		   cmdseq == (p->header[1] & KVASER_PCIEFD_PACKET_SEQ_MSK)) {
		/* Response to status request received */
		kvaser_pciefd_handle_status_resp(can, p);
		if (can->can.state != CAN_STATE_BUS_OFF &&
		    can->can.state != CAN_STATE_ERROR_ACTIVE) {
			mod_timer(&can->bec_poll_timer,
				  KVASER_PCIEFD_BEC_POLL_FREQ);
		}
	} else if (p->header[0] & KVASER_PCIEFD_SPACK_RMCD &&
		   !(status & KVASER_PCIEFD_KCAN_STAT_BUS_OFF_MSK)) {
		/* Reset to bus on detected */
		if (!completion_done(&can->start_comp))
			complete(&can->start_comp);
	}
	return 0;
}

static int kvaser_pciefd_handle_eack_packet(struct kvaser_pciefd *pcie,
					    struct kvaser_pciefd_rx_packet *p)
{
	struct kvaser_pciefd_can *can;
	u8 ch_id = (p->header[1] >> KVASER_PCIEFD_PACKET_CHID_SHIFT) & 0x7;

	if (ch_id >= pcie->nr_channels)
		return -EIO;

printk("handle_eack() %d\n", ch_id);
	can = pcie->can[ch_id];

	/* If this is the last flushed packet, send end of flush */
	if (p->header[0] & KVASER_PCIEFD_APACKET_FLU) {
		u8 count = ioread32(can->reg_base +
				    KVASER_PCIEFD_KCAN_TX_NPACKETS_REG) & 0xff;

		if (count == 0)
			iowrite32(KVASER_PCIEFD_KCAN_CTRL_EFLUSH,
				  can->reg_base + KVASER_PCIEFD_KCAN_CTRL_REG);
	} else {
//		int echo_idx = p->header[0] & KVASER_PCIEFD_PACKET_SEQ_MSK;
// int dlc = can_get_echo_skb(can->can.dev, echo_idx);
		struct net_device_stats *stats = &can->can.dev->stats;

//		stats->tx_bytes += dlc;
		stats->tx_bytes += 8;
//
		stats->tx_packets++;

		if (netif_queue_stopped(can->can.dev))
			netif_wake_queue(can->can.dev);
	}

	return 0;
}

static void kvaser_pciefd_handle_nack_packet(struct kvaser_pciefd_can *can,
					     struct kvaser_pciefd_rx_packet *p)
{
printk("handle_nack()\n");
#if 0
	struct sk_buff *skb;
	struct net_device_stats *stats = &can->can.dev->stats;
	struct can_frame *cf;

//	skb = alloc_can_err_skb(can->can.dev, &cf);

	stats->tx_errors++;
	if (p->header[0] & KVASER_PCIEFD_APACKET_ABL) {
		if (skb)
			cf->can_id |= CAN_ERR_LOSTARB;
		can->can.can_stats.arbitration_lost++;
	} else if (skb) {
		cf->can_id |= CAN_ERR_ACK;
	}

	if (skb) {
		cf->can_id |= CAN_ERR_BUSERROR;
		stats->rx_bytes += cf->can_dlc;
		stats->rx_packets++;
		netif_rx(skb);
	} else {
		stats->rx_dropped++;
		netdev_warn(can->can.dev, "No memory left for err_skb\n");
	}
#endif
}

static int kvaser_pciefd_handle_ack_packet(struct kvaser_pciefd *pcie,
					   struct kvaser_pciefd_rx_packet *p)
{
	struct kvaser_pciefd_can *can;
	bool one_shot_fail = false;
	u8 ch_id = (p->header[1] >> KVASER_PCIEFD_PACKET_CHID_SHIFT) & 0x7;

	if (ch_id >= pcie->nr_channels)
		return -EIO;

printk("handle_ack_resp() %d: p[0]= 0x%04x\n", ch_id, p->header[0] & 0xffff);
printk("handle_ack_resp() %d: p[0]= 0x%04x\n", ch_id, p->header[0]);
printk("handle_ack_resp() %d: p[1]= 0x%04x\n", ch_id, p->header[1]);
/*
 *  0x0100 == FLU bit
Flushed Packet. If this bit is set it indicates that the packet was not handled by the
hardware because of a flush or abort operation.
 */
	can = pcie->can[ch_id];
	/* Ignore control packet ACK */
	if (p->header[0] & KVASER_PCIEFD_APACKET_CT)
		return 0;

	if (p->header[0] & KVASER_PCIEFD_APACKET_NACK) {
		kvaser_pciefd_handle_nack_packet(can, p);
		one_shot_fail = true;
	}

	if (p->header[0] & KVASER_PCIEFD_APACKET_FLU) {
		netdev_dbg(can->can.dev, "Packet was flushed\n");
	} else {
//	int echo_idx = p->header[0] & KVASER_PCIEFD_PACKET_SEQ_MSK;
//		int dlc = can_get_echo_skb(can->can.dev, echo_idx);
//
//  vorläufig!!
		int dlc = 8;
		u8 count = ioread32(can->reg_base +
				    KVASER_PCIEFD_KCAN_TX_NPACKETS_REG) & 0xff;

		(void)count;

/*		if (count < KVASER_PCIEFD_CAN_TX_MAX_COUNT &&
		    netif_queue_stopped(can->can.dev))
			netif_wake_queue(can->can.dev);
*/

		if (!one_shot_fail) {
			struct net_device_stats *stats = &can->can.dev->stats;

			stats->tx_bytes += dlc;
			stats->tx_packets++;
		}
	}

	return 0;
}

static int kvaser_pciefd_handle_eflush_packet(struct kvaser_pciefd *pcie,
					      struct kvaser_pciefd_rx_packet *p)
{
	struct kvaser_pciefd_can *can;
	u8 ch_id = (p->header[1] >> KVASER_PCIEFD_PACKET_CHID_SHIFT) & 0x7;

	if (ch_id >= pcie->nr_channels)
		return -EIO;
// printk("   handle_eflush_packet() %d\n", ch_id);

	can = pcie->can[ch_id];

	if (!completion_done(&can->flush_comp))
		complete(&can->flush_comp);

	return 0;
}

/* What happens here ?
 * dma_buf DMA packet done, buffer 0 or buffer 1
 *
 */
static int kvaser_pciefd_read_packet(struct kvaser_pciefd *pcie, int *start_pos,
				     int dma_buf)
{
	__le32 *buffer = pcie->dma_data[dma_buf];
	__le64 timestamp;
	struct kvaser_pciefd_rx_packet packet;
	struct kvaser_pciefd_rx_packet *p = &packet;
	u8 type;
	int pos = *start_pos;
	int size;
	int ret = 0;

	size = le32_to_cpu(buffer[pos++]);
	if (!size) {
		*start_pos = 0;
		return 0;
	}

	p->header[0] = le32_to_cpu(buffer[pos++]);
	p->header[1] = le32_to_cpu(buffer[pos++]);

#if 1 /* even if we don't need this timestamp for now, pos must be incremented */
	/* Read 64-bit timestamp */
	memcpy(&timestamp, &buffer[pos], sizeof(__le64));
	pos += 2;
	p->timestamp = le64_to_cpu(timestamp);
#endif
	type = (p->header[1] >> KVASER_PCIEFD_PACKET_TYPE_SHIFT) & 0xf;

// pr_info(" dma buf %d\n", dma_buf);
// can_showpackagetype(type);
	switch (type) {
	case KVASER_PCIEFD_PACK_TYPE_DATA:
		ret = kvaser_pciefd_handle_data_packet(pcie, p, &buffer[pos]);
		if (!(p->header[0] & KVASER_PCIEFD_RPACKET_RTR)) {
			u8 data_len;

			data_len = dlc2len(p->header[1] >>
					       KVASER_PCIEFD_RPACKET_DLC_SHIFT & 0x0f);
			pos += DIV_ROUND_UP(data_len, 4);
		}
		break;

	case KVASER_PCIEFD_PACK_TYPE_ACK:
		ret = kvaser_pciefd_handle_ack_packet(pcie, p);
		break;

	case KVASER_PCIEFD_PACK_TYPE_STATUS:
		ret = kvaser_pciefd_handle_status_packet(pcie, p);
		break;

	case KVASER_PCIEFD_PACK_TYPE_ERROR:
		ret = kvaser_pciefd_handle_error_packet(pcie, p);
		break;

	case KVASER_PCIEFD_PACK_TYPE_EFRAME_ACK:
		ret = kvaser_pciefd_handle_eack_packet(pcie, p);
		break;

	case KVASER_PCIEFD_PACK_TYPE_EFLUSH_ACK:
		ret = kvaser_pciefd_handle_eflush_packet(pcie, p);
		break;

	case KVASER_PCIEFD_PACK_TYPE_ACK_DATA:
	case KVASER_PCIEFD_PACK_TYPE_BUS_LOAD:
	case KVASER_PCIEFD_PACK_TYPE_TXRQ:
		dev_info(&pcie->pci->dev,
			 "Received unexpected packet type 0x%08X\n", type);
		break;

	default:
		dev_err(&pcie->pci->dev, "Unknown packet type 0x%08X\n", type);
		ret = -EIO;
		break;
	}

	if (ret)
		return ret;

	/* Position does not point to the end of the package,
	 * corrupted packet size?
	 */
	if ((*start_pos + size) != pos)
		return -EIO;

	/* Point to the next packet header, if any */
	*start_pos = pos;
	return ret;
}

/* dma_buf DMA packet done, buffer 0 or buffer 1 */
static int kvaser_pciefd_read_buffer(struct kvaser_pciefd *pcie, int dma_buf)
{
	int pos = 0;
	int res = 0;
// printk("kvaser_pciefd_read_buffer(%d)\n", dma_buf);
	do {
		res = kvaser_pciefd_read_packet(pcie, &pos, dma_buf);
	} while (!res && pos > 0 && pos < KVASER_PCIEFD_DMA_SIZE);
	return res;
}

static int kvaser_pciefd_receive_irq(struct kvaser_pciefd *pcie)
{
	u32 irq;
// printk("got receive FIFO IRQ\n");

	irq = ioread32(pcie->reg_base + KVASER_PCIEFD_SRB_IRQ_REG);

// printk("irq = 	0x%08x\n", irq);


	/* DMA packet done, buffer 0 DPD0 */
	if (irq & KVASER_PCIEFD_SRB_IRQ_DPD0) {
		kvaser_pciefd_read_buffer(pcie, 0);
		/* Reset DMA buffer 0 */
		iowrite32(KVASER_PCIEFD_SRB_CMD_RDB0,
			  pcie->reg_base + KVASER_PCIEFD_SRB_CMD_REG);
	}

	/* DMA packet done, buffer 1 DPD1 */
	if (irq & KVASER_PCIEFD_SRB_IRQ_DPD1) {
		kvaser_pciefd_read_buffer(pcie, 1);
		/* Reset DMA buffer 1 */
		iowrite32(KVASER_PCIEFD_SRB_CMD_RDB1,
			  pcie->reg_base + KVASER_PCIEFD_SRB_CMD_REG);
	}

	if (irq & KVASER_PCIEFD_SRB_IRQ_DOF0 ||
	    irq & KVASER_PCIEFD_SRB_IRQ_DOF1 ||
	    irq & KVASER_PCIEFD_SRB_IRQ_DUF0 ||
	    irq & KVASER_PCIEFD_SRB_IRQ_DUF1)
		dev_err(&pcie->pci->dev, "DMA IRQ error 0x%08X\n", irq);

	iowrite32(irq, pcie->reg_base + KVASER_PCIEFD_SRB_IRQ_REG);
	return 0;
}


/* 
 * if TX FIFO empty, that means successful sent,
 * copy the just successful sent tx into all open rx queues
 *
 * after that, look at the software queue if there is
 * anything else to transmit.
 * */
static int kvaser_pciefd_transmit_irq(struct kvaser_pciefd_can *can)
{
msg_fifo_t   *RxFifo;
msg_fifo_t   *TxFifo;
struct timeval  timestamp;	/* oder raus aus rx und tx in ten int eintritt */
unsigned long irq_flags;		/* irq save */		
u32 irq;
u32 dlc;
int minor;
int nwords;	/* number of 4 byte words to be written to tx data FIFO */
u8 count;
struct kvaser_pciefd_tx_packet packet;
// struct kvaser_pciefd *pcie;
int seq;


	irq = ioread32(can->reg_base + KVASER_PCIEFD_KCAN_IRQ_REG);
	minor = can->minor;
	get_timestamp(minor, &timestamp);

printk(" transmit interrupt 0x%08x\n", irq);
// can_showstat(can->minor);
// udelay(10000);

	if (irq & KVASER_PCIEFD_KCAN_IRQ_TOF)
		netdev_err(can->can.dev, "Tx FIFO overflow\n");

	if (irq & KVASER_PCIEFD_KCAN_IRQ_TFD) {
		/* Transmit Buffer Flush Done */
		u8 count = ioread32(can->reg_base +
				    KVASER_PCIEFD_KCAN_TX_NPACKETS_REG) & 0xff;

// pr_info(" tx buffer flash done: packets count %d\n", count);
		if (count == 0)
			iowrite32(KVASER_PCIEFD_KCAN_CTRL_EFLUSH,
				  can->reg_base + KVASER_PCIEFD_KCAN_CTRL_REG);
	}

	if (irq & KVASER_PCIEFD_KCAN_IRQ_BPP)
		netdev_err(can->can.dev,
			   "Fail to change bittiming, when not in reset mode\n");

	if (irq & KVASER_PCIEFD_KCAN_IRQ_FDIC)
		netdev_err(can->can.dev, "CAN FD frame in CAN mode\n");

	if (irq & KVASER_PCIEFD_KCAN_IRQ_ROF)
		netdev_err(can->can.dev, "Rx FIFO overflow\n");

	/* is this the right place now for do the can4linux tx int handling ? */
	if (irq & KVASER_PCIEFD_KCAN_IRQ_TE) {
printk("IRQ TX FIFO empty - look for more \n");
	    /* TX FIFO empty */
	    // unsigned int id;
	    RxFifo = &rx_buf[minor][0];
	    TxFifo = &tx_buf[minor];

	    /* use time stamp sampled with last INT
	     * last_tx_object[] is global */
	    last_tx_object[minor].timestamp = timestamp;

	    /* depending on the number of open processes
	    * the TX data has to be copied in different
	    * rx fifos
	    */

	    int rx_fifo;
	    for (rx_fifo = 0; rx_fifo < CAN_MAX_OPEN; rx_fifo++) {
//printk("selfreception irxfifo %d\n", selfreception[minor][rx_fifo]);
		if (selfreception[minor][rx_fifo] != 0) {
			/* for every rx fifo */
			if (can_waitflag[minor][rx_fifo] == 1) {
			    /* this FIFO is in use */
			    pr_info("self copy to [%d][%d]\n", minor, rx_fifo);

			    /*
			    * Don't copy the message in the receive queue
			    * of the process that sent the message unless
			    * this process requested selfreception.
			    */
			    if ((last_tx_object[minor].cob == rx_fifo)
				&& (selfreception[minor][rx_fifo] == 0)) {
			
				pr_info("CAN[%d][%d] Don't copy message in my queue\n",
					minor, rx_fifo);

				continue;
			    }

#ifdef VERBOSE
			    pr_info(
				"CAN[%d][%d] Copy message from %d in queue id 0x%lx 0x%x\n",
					minor, rx_fifo,
					last_tx_object[minor].cob,
					last_tx_object[minor].id,
					last_tx_object[minor].data[0]);
#endif
			    /* prepare buffer to be used */
			    RxFifo = &rx_buf[minor][rx_fifo];

			    /*
			    prinfo("ISR[%d] dlc= %d flags= 0x%03x\n",
				minor, last_tx_object[minor].length,
				last_tx_object[minor].flags);
			    */
	
			    /* copying into the receive queue is like receiving it directly
				from CAN.
				Take care here of the data bytes length and DLC code CAN FD
				in classic CAN dlc == length
				with CAN FD that is different.
			    */
			    memcpy(
				(void *)&RxFifo->data[RxFifo->head],
				(void *)&last_tx_object[minor],
				sizeof(canmsg_t));
			    /* correct .length fill to next fitting CAN FD frame length */
			    RxFifo->data[RxFifo->head].length =
			    dlc2len(RxFifo->data[RxFifo->head].length);
	
			    /* Mark message as 'self sent/received' */
			    RxFifo->data[RxFifo->head].flags |= MSG_SELF;
	
			    /* increment write index */
			    RxFifo->status = BUF_OK;
			    ++(RxFifo->head);
			    RxFifo->head %= MAX_BUFSIZE;
	
			    if (RxFifo->head == RxFifo->tail) {
				pr_err("CAN[%d][%d] RX: FIFO overrun\n",
					minor, rx_fifo);
				RxFifo->status = BUF_OVERRUN;
			    }
			    /*---------- kick the select() call  -*/
			    /* This function will wake up all processes
				that are waiting on this event queue,
				that are in interruptible sleep
			    */
			    wake_up_interruptible(&can_wait[minor][rx_fifo]);
			} /* this FIFO is in use */
		    }
	    } /* end for loop filling all rx-fifos */

	    if (TxFifo->free[TxFifo->tail] == BUF_EMPTY) {
		    /* TX FIFO empty, nothing more to sent */
		    /* pr_info("TXE\n"); */
		    TxFifo->status = BUF_EMPTY;
		    TxFifo->active = 0;
		    /* This function will wake up all processes
		    that are waiting on this event queue,
		    that are in interruptible sleep
		    */
		    wake_up_interruptible(&canout_wait[minor]);
		    goto tx_done;
	    }
	    
	    memcpy(
		    (void *)&last_tx_object[minor],
		    (void *)&TxFifo->data[TxFifo->tail],
		    sizeof(canmsg_t));
/* something like can_send_message() here */

// printk("send next message <==================\n");
	   /* data is in (TxFifo->data[TxFifo->tail]).*  */
	   seq = can->echo_idx;
	   /* To send a message, the TX FIFO must be filled
	   * kvaser uses for this the tx package struct
	   * fill in message id, message data, .... */
	
	/* prepare tx packet */
	memset(&packet, 0, sizeof(packet));
	/* message id */
	packet.header[0] = (TxFifo->data[TxFifo->tail]).id;
	/* RTR */
	if ((TxFifo->data[TxFifo->tail].flags) & MSG_RTR)
	    packet.header[0] |= KVASER_PCIEFD_RPACKET_RTR;
	/* extended id frame */
	if ((TxFifo->data[TxFifo->tail].flags) & MSG_EXT)
	    packet.header[0] |= KVASER_PCIEFD_RPACKET_IDE;
	/* DLC */
	dlc = len2dlc((TxFifo->data[TxFifo->tail]).length); 
	packet.header[1] |= (dlc & 0x0f) << KVASER_PCIEFD_RPACKET_DLC_SHIFT;

	packet.header[1] |= seq & KVASER_PCIEFD_PACKET_SEQ_MSK;
/* handle CAN FD Flags  BRS and ESI */

	if ((TxFifo->data[TxFifo->tail].flags) & MSG_CANFD)
		packet.header[1] |= KVASER_PCIEFD_RPACKET_FDF;

/* /handle CAN FD Flags  BRS and ESI */
	if ((TxFifo->data[TxFifo->tail].flags) & MSG_RBRS)
		packet.header[1] |= KVASER_PCIEFD_RPACKET_BRS;

	nwords = DIV_ROUND_UP((TxFifo->data[TxFifo->tail]).length, 4);

	/* sequence counter, 8bit, , used in the acknowlege receive */
	/* not used yet */
	packet.header[1] |= 0; /* |= seq & KVASER_PCIEFD_PACKET_SEQ_MSK; */


	/* copy data bytes in the packet struct */
	memcpy(packet.data,
		(TxFifo->data[TxFifo->tail]).data, (TxFifo->data[TxFifo->tail]).length);

	spin_lock_irqsave(&can->echo_lock, irq_flags);

	/* Write header to fifo */
	iowrite32(packet.header[0],
		  (void *)proc_base[minor] + KVASER_PCIEFD_KCAN_FIFO_REG);
	iowrite32(packet.header[1],
		  (void *)proc_base[minor] + KVASER_PCIEFD_KCAN_FIFO_REG);

	if (nwords) {
	    u32 data_last = ((u32 *)packet.data)[nwords - 1];

	    /* Write data to fifo, except last word */
	    iowrite32_rep((void *)proc_base[minor] +
			  KVASER_PCIEFD_KCAN_FIFO_REG, packet.data,
			  nwords - 1);
	    /* Write last word to end of fifo */
	    __raw_writel(data_last, (void *)proc_base[minor] +
			 KVASER_PCIEFD_KCAN_FIFO_LAST_REG);
	} else {
	    /* Complete write to fifo */
	    __raw_writel(0, (void *)proc_base[minor] +
			 KVASER_PCIEFD_KCAN_FIFO_LAST_REG);
	}

	count = ioread32((void *)proc_base[minor] + KVASER_PCIEFD_KCAN_TX_NPACKETS_REG);

/* ------------------------- */
	    TxFifo->free[TxFifo->tail] = BUF_EMPTY; /* now this entry is EMPTY */
	    ++(TxFifo->tail);
	    TxFifo->tail %= MAX_BUFSIZE;

	    /* leave critical section */
	    /* pr_info("CAN[%d][%d] leave\n", minor, rx_fifo); */
	// local_irq_restore(flags);
	    spin_unlock_irqrestore(&can->echo_lock, irq_flags);
	}

tx_done:
	/* reset all interrupt sources */
	iowrite32(irq, can->reg_base + KVASER_PCIEFD_KCAN_IRQ_REG);
	return 0;
}

static irqreturn_t kvaser_pciefd_irq_handler(int irq, void *dev)
{
	struct kvaser_pciefd *pcie = (struct kvaser_pciefd *)dev;
	u32 board_irq;
	int i;

	board_irq = ioread32(pcie->reg_base + KVASER_PCIEFD_IRQ_REG);
// printk("got Irq 0x%08x\n", board_irq);

	if (!(board_irq & KVASER_PCIEFD_IRQ_ALL_MSK)) {
// printk("=========> irq 1,  got unknown irq 0x%08x\n", board_irq);
		return IRQ_NONE;
	}

	if (board_irq & KVASER_PCIEFD_IRQ_SRB) {
		kvaser_pciefd_receive_irq(pcie);
// printk("=========> irq 2\n");
	}

	for (i = 0; i < pcie->nr_channels; i++) {
		if (!pcie->can[i]) {
			dev_err(&pcie->pci->dev,
				"IRQ mask points to unallocated controller\n");
			break;
// printk("=========> irq 3\n");
		}

		/* Check that mask matches channel (i) IRQ mask */
		if (board_irq & (1 << i)) {
// printk("=========> irq 4\n");
// udelay(10000);
			kvaser_pciefd_transmit_irq(pcie->can[i]);
		}
	}

	iowrite32(board_irq, pcie->reg_base + KVASER_PCIEFD_IRQ_REG);
// printk("=========> irq 5: irq handled\n");
	return IRQ_HANDLED;
}

static void kvaser_pciefd_teardown_can_ctrls(struct kvaser_pciefd *pcie)
{
	int i;
	struct kvaser_pciefd_can *can;

	for (i = 0; i < pcie->nr_channels; i++) {
		can = pcie->can[i];
		if (can) {
			iowrite32(0,
				  can->reg_base + KVASER_PCIEFD_KCAN_IEN_REG);
			kvaser_pciefd_pwm_stop(can);
//			free_candev(can->can.dev);
		}
	}
}



/* /end SocketCAN defined functions */

/* this is a global interface board structure of the kvaser PCIe FD */
//struct kvaser_pciefd *pcie;

/*
 * This function scans the PCI bus for Kvaser Interface boards.
 * if one is found, board specific data structures are initialized.
 * The board interrupt is a assigned to a irq_handler,
 * CAN controller registers are mapped into the kernel memory,
 * board DMA is initialized. (and more)
 *
 * May be it currently doesn't work for more than one interface board,
 * especially if some errors are detected while initializing the first board.
 *
 */
int pcimod_scan(void)
{
struct	pci_dev *pdev = NULL;
int	candev = 0;			/* number of devices found so far */
int	board  = 0;			/* number of boards found so far */
int	minor  = -1;			/* need for pr_info() */

int err;
struct kvaser_pciefd *pcie;

DBGIN();

    for_each_pci_dev(pdev) { /* loop through all PCI interface boards */
	if(
	   (pdev->vendor == PCI_VENDOR_CAN_KVASER_ID2)
	&& (
	    (pdev->device == PCI_DEVICE_CAN_KVASER_ID2_4)
	||  (pdev->device == PCI_DEVICE_CAN_KVASER_ID2_2)
	   )
	  ) {
	    board++;
	    pr_info("  found new KVASER PCI CAN FD board %d", board);
	    pr_info("  found KVASER-PCICAN_FD: %s : %s\n",
	    		pci_pretty_name(pdev), pci_name(pdev));

/* include code from  kvaser_pciefd_probe(struct pci_dev *pdev,
			       const struct pci_device_id *id)
*/

	/* devm_kzalloc() is managed kzalloc().
	 * The memory allocated with managed functions is associated with the device.
	 * When the device is detached from the system
	 * or the driver for the device is unloaded,
	 * that memory is freed automatically.
    	 */
	pcie = devm_kzalloc(&pdev->dev, sizeof(*pcie), GFP_KERNEL);
	if (!pcie)
		return -ENOMEM;
/*
pr_info("sizeof pcie %d, %08lX\n",
		(int)sizeof(*pcie),
		sizeof(*pcie));
	// pcie is the pointer to the pcie board structure
pr_info("address of pcie 0x%p\n", pcie);
*/


// pr_info("--> 01\n");

	/* function to save a pointer to a local
	 * dynamically allocated device context
	 * in the device probe callback
	 * and then retrieve it back with pci_get_drvdata
	 * in the device remove callback and do a proper cleanup
	 * of the context. */
	pci_set_drvdata(pdev, pcie);
	pcie->pci = pdev;
	/* index counts can channels, but pdev is the same for all
	 * CAN channels found on ONE board */
	can_pcidev[0] = pdev;

	err = pci_enable_device(pdev);
	if (err)
		return err;
/* --- */
	err = pci_request_regions(pdev, KVASER_PCIEFD_DRV_NAME);
	if (err) {
	    pr_err("Check if another driver is using this ressource\n");
	    goto err_disable_pci;
	}
	pcie->reg_base = pci_iomap(pdev, 0, 0);
	if (!pcie->reg_base) {
	    err = -ENOMEM;
	    goto err_release_regions;
	}

// pr_info("--> 02\n");
	err = kvaser_pciefd_setup_board(pcie);
	if (err)
		goto err_pci_iounmap;

// pr_info("--> 03\n");
	err = kvaser_pciefd_setup_dma(pcie);
	if (err)
		goto err_pci_iounmap;

// pr_info("--> 04\n");
	pci_set_master(pdev);


// pr_info("--> 05\n");
	err = kvaser_pciefd_setup_can_ctrls(pdev, pcie);
	if (err)
		goto err_teardown_can_ctrls;

// pr_info("--> 06\n");
	iowrite32(KVASER_PCIEFD_SRB_IRQ_DPD0 | KVASER_PCIEFD_SRB_IRQ_DPD1,
		  pcie->reg_base + KVASER_PCIEFD_SRB_IRQ_REG);

	iowrite32(KVASER_PCIEFD_SRB_IRQ_DPD0 | KVASER_PCIEFD_SRB_IRQ_DPD1 
		 |
		  KVASER_PCIEFD_SRB_IRQ_DOF0 | KVASER_PCIEFD_SRB_IRQ_DOF1 |
		  KVASER_PCIEFD_SRB_IRQ_DUF0 | KVASER_PCIEFD_SRB_IRQ_DUF1 ,
		  pcie->reg_base + KVASER_PCIEFD_SRB_IEN_REG);
// pr_info("--> 07\n");
	/* Reset IRQ handling, expected to be off before */
	iowrite32(KVASER_PCIEFD_IRQ_ALL_MSK,
		  pcie->reg_base + KVASER_PCIEFD_IRQ_REG);
	iowrite32(KVASER_PCIEFD_IRQ_ALL_MSK,
		  pcie->reg_base + KVASER_PCIEFD_IEN_REG);

// pr_info("--> 08\n");
	/* Ready the DMA buffers */
	iowrite32(KVASER_PCIEFD_SRB_CMD_RDB0,
		  pcie->reg_base + KVASER_PCIEFD_SRB_CMD_REG);
	iowrite32(KVASER_PCIEFD_SRB_CMD_RDB1,
		  pcie->reg_base + KVASER_PCIEFD_SRB_CMD_REG);

// pr_info("--> 09\n");
	err = request_irq(pcie->pci->irq, kvaser_pciefd_irq_handler,
			  IRQF_SHARED, KVASER_PCIEFD_DRV_NAME, pcie);
	if (err)
		goto err_teardown_can_ctrls;

	candev += pcie->nr_channels;
	}
    }
    return candev;

/* ============= */

// err_free_irq:
	free_irq(pcie->pci->irq, pcie);

err_teardown_can_ctrls:
	kvaser_pciefd_teardown_can_ctrls(pcie);
	iowrite32(0, pcie->reg_base + KVASER_PCIEFD_SRB_CTRL_REG);
	pci_clear_master(pdev);

err_pci_iounmap:
	pci_iounmap(pdev, pcie->reg_base);

err_release_regions:
	pci_release_regions(pdev);

err_disable_pci:
	pci_disable_device(pdev);

	DBGOUT();
	return err;
}

/* Called from __init,  once when driver is loaded
   set up physical addresses, irq number
   and initialize clock source for the CAN module

   take care it will be called only once
   because it is called for every CAN channel out of MAX_CHANNELS

   n is the CAN channel number
*/
int init_board_hw(int n)
{
static int already_called;
int ret;
int minor = -1;

	DBGIN();
	ret = 0;
	if (!already_called && virtual == 0) {
		/* make some sysctl entries read only
		 * IRQ number
		 * Base address
		 * and access mode
		 * are fixed and provided by the PCI BIOS
		 */
		can_sysctl_table[CAN_SYSCTL_IRQ - 1].mode = 0444;
		can_sysctl_table[CAN_SYSCTL_BASE - 1].mode = 0444;

		if ((ret = pcimod_scan())) {
		    pr_info("  pci scan success, found %d CAN\n", ret);
		    ret = 0;
		} else {
			pr_err("  no valid PCI CAN found");
			ret = -EIO;
		}

		already_called = 1;
	}
	DBGOUT();
	return ret;
}

/* this is called when the driver is unloaded
 * should give free all allocated ressources
 * */
void exit_board_hw(void)
{
int minor = -1;
struct kvaser_pciefd *pcie;
// u32 mode;

    DBGIN()

    /* more or less what is done in SocketCAN kvaser_pciefd_remove() */
// struct kvaser_pciefd *pcie;

	pcie = pci_get_drvdata(can_pcidev[0]);
// pr_info("address of pcie 0x%p\n", pcie);
// pr_info("address to pcie 0x%p\n", &pcie);
// pr_info("& can 0x%p\n", &(pcie->can[0]->reg_base));
// pr_info("proc_base[0] 0x%p\n", (void *)proc_base[0]);

//  Kann weg hier, ist sowieso nur von can0 
//    mode = ioread32((void *)proc_base[0] + KVASER_PCIEFD_KCAN_MODE_REG);
//    DBGPRINT(DBG_DATA, (KERN_CONT "CAN mode 0x%08X = %d", mode, mode & 0xFF));

	/* Turn off IRQ generation */
	iowrite32(0, pcie->reg_base + KVASER_PCIEFD_SRB_CTRL_REG);
	iowrite32(KVASER_PCIEFD_IRQ_ALL_MSK,
		  pcie->reg_base + KVASER_PCIEFD_IRQ_REG);
	iowrite32(0, pcie->reg_base + KVASER_PCIEFD_IEN_REG);
	
	free_irq(pcie->pci->irq, pcie);
	pci_clear_master(pcie->pci);

// err_pci_iounmap:
	pci_iounmap(pcie->pci, pcie->reg_base);

// err_release_regions:
	pci_release_regions(pcie->pci);

// err_disable_pci:
	// disable_pci_interrupt(pci_resource_start(can_pcidev[minor], 0));
	pci_disable_device(pcie->pci);
}

#if 0
inline void disable_pci_interrupt(unsigned int base)
{
unsigned long tmp;

    /* pr_info("disable pci int add 0x%x, 0x%x", base, base + S5920_INTCSR); */
    /* Disable PCI interrupts from card */
}

inline void enable_pci_interrupt(unsigned int base)
{
unsigned long tmp;

    /* Enable PCI interrupts from card */
}
#endif





/* reset all CAN controllers on the Kvaser-PCI Board */
void reset_kvaser_pci(unsigned long address)
{
int minor = -1;
    DBGIN();
    pr_err("did nothing here\n");
    DBGOUT();
}




int can_vendor_init(int minor)
{
    DBGIN();
    can_range[minor] = CAN_RANGE;
    
    /* Request the controllers address space
     * Nothing to do for the Kvaser PCICAN, we have io-addresses 
     * can_base in this case stores a (unsigned char *)
     *
     * CAN_PORT_IO only uses proc_base[]
     */

    /* IRQ already requested in int pcimod_scan(void) */
// enable_pci_interrupt(pci_resource_start(can_pcidev[minor], 0));

    DBGOUT(); return 0;
}




void board_clear_interrupts(int minor)
{}

int can_freeirq(int minor, int irq )
{
    DBGIN();
#if 0
    irq_requested[minor] = 0;
    pr_info(" Free IRQ %d  minor %d", irq, minor);

    /* Disable Interrupt on the PCI board only if all channels
     * are not in use */
    if(    irq_requested[0] == 0
        && irq_requested[1] == 0 
        && irq_requested[2] == 0 
        && irq_requested[3] == 0 )
    /* and what happens if we only have 2 channels on the board,
       or we have minor == 4, that's a second board ??) */
    {
	disable_pci_interrupt(pci_resource_start(can_pcidev[minor], 0));
    }
    /* free irq is called with this implementation at unloading the driver */
//    free_irq(irq, &can_minors[minor]);
#endif
    DBGOUT();
    return 0;
}
/* called from close() */
int can_release(int minor)
{
struct kvaser_pciefd *pcie;
struct kvaser_pciefd_can *can; 
int ret = 0;

    DBGIN();
    /* get controller data */
    pcie = pci_get_drvdata(can_pcidev[minor]);
    can  =  pcie->can[minor];

    /* stop CAN FD transceiver * Transmitter power control  */ 

    // may be enough when done in can_stopchip() ?
    // kvaser_pciefd_pwm_stop(can);

/* function from 
 * int kvaser_pciefd_stop(struct net_device *netdev)  */

    /* Don't interrupt ongoing flush */
    if (!completion_done(&can->flush_comp))
	    kvaser_pciefd_start_controller_flush(can);

    if (!wait_for_completion_timeout(&can->flush_comp,
				     KVASER_PCIEFD_WAIT_TIMEOUT)) {
	    netdev_err(can->can.dev, "Timeout during stop\n");
	    ret = -ETIMEDOUT;
    } else {
	    iowrite32(0, can->reg_base + KVASER_PCIEFD_KCAN_IEN_REG);
	    del_timer(&can->bec_poll_timer);
    }
    DBGOUT()
    return ret;
}

