// Linux Device Driver Template/Skeleton with mmap
// Kernel Module
#include <linux/config.h>
#include <linux/kernel.h>
#include <linux/string.h>
#include <linux/ptrace.h>
#include <linux/ioport.h>
#include <linux/slab.h>
#include <linux/pci.h>
#include <linux/delay.h>
#include <linux/netdevice.h>
#include <linux/etherdevice.h>
#include <linux/skbuff.h>
#include <linux/spinlock.h>
#include <linux/if_arp.h>
#include <asm/8xx_immap.h>
#include <asm/pgtable.h>
#include <asm/mpc8xx.h>
#include <asm/bitops.h>
#include <asm/uaccess.h>
#include <asm/commproc.h>
/* my hdlc.h with board specific defines */
#include "hdlc.h"
#include <linux/module.h>
#include <linux/init.h>
#include <linux/version.h>
#include <linux/errno.h>
#include <linux/fs.h>
#include <linux/mm.h>
#include <linux/interrupt.h>
#include <linux/sched.h>
#include <asm/uaccess.h>
#include <asm/io.h>
#include <linux/vmalloc.h>
#include <linux/mman.h>
#include <linux/slab.h>

#if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,0)
#include <linux/wrapper.h>
#endif

#define SKELETON_MAJOR 240
#define SKELETON_NAME "skeleton"
#define CASE1 1
#define CASE2 2
#define CASE3 3
#define CASE4 4

#define MODE_HDLC   0
#define MODE_TRANS  1

#define TX_TIMEOUT  (2*HZ)

#ifdef CONFIG_ENET_BIG_BUFFERS
#define CPM_ENET_RX_PAGES   32
#define CPM_ENET_RX_FRSIZE  2048
#define CPM_ENET_RX_FRPPG   (PAGE_SIZE / CPM_ENET_RX_FRSIZE)
#define RX_RING_SIZE        (CPM_ENET_RX_FRPPG * CPM_ENET_RX_PAGES)
#define TX_RING_SIZE        64  /* Must be power of two */
#define TX_RING_MOD_MASK    63  /*   for this to work */
#else
#define CPM_ENET_RX_PAGES   4
#define CPM_ENET_RX_FRSIZE  2048
#define CPM_ENET_RX_FRPPG   (PAGE_SIZE / CPM_ENET_RX_FRSIZE)
#define RX_RING_SIZE        (CPM_ENET_RX_FRPPG * CPM_ENET_RX_PAGES)
#define TX_RING_SIZE        8   /* Must be power of two */
#define TX_RING_MOD_MASK    7   /*   for this to work */
#endif

/* The CPM stores dest/src/type, data, and checksum for receive packets.
 */
#define PKT_MAXBUF_SIZE     1518
#define PKT_MINBUF_SIZE     64
#define PKT_MAXBLR_SIZE     1520



#ifdef CONFIG_TQM860L
/* TQM860L uses SCC2 */
#define CPM_CR_ENET CPM_CR_CH_SCC2
#define PROFF_ENET  PROFF_SCC2
#define SCC_ENET    1       /* Index, not number! */
#define CPMVEC_ENET CPMVEC_SCC2
#else
/* IDS uses SCC4 */
/*
#define CPM_CR_ENET CPM_CR_CH_SCC4
#define PROFF_ENET  PROFF_SCC4
#define SCC_ENET    3       // Index, not number! 
#define CPMVEC_ENET CPMVEC_SCC4
*/
#define CPM_CR_ENET CPM_CR_CH_SCC3
#define PROFF_ENET  PROFF_SCC3
#define SCC_ENET    2       // Index, not number! 
#define CPMVEC_ENET CPMVEC_SCC3
#endif

/* flags in the buffer descriptor not defined anywhere else */
#define BD_SC_CT 0x01
#define BD_SC_DE 0x80
#define BD_SC_FIRST 0x400
#define BD_SC_STATS (BD_SC_BR | BD_SC_FR | BD_SC_PR | BD_SC_NAK | BD_SC_CD | BD_SC_OV | BD_SC_DE)


#define DEBUG_INTR 1
#define DEBUG_INIT 2
#define DEBUG_READ 4
#define DEBUG_WRIT 8
#define DEBUG_IOCT 16
#define DEBUG_RX 32
#define DEBUG_TX   64
#define IDSARP_USES_ETHADDR

struct hdlc_enet_private {
    /* addresses in DPRAM so we can free them */
    uint rxdp_addr, txdp_addr;
    /* The saved address of a sent-in-place packet/buffer, for skfree(). */
    struct  sk_buff* tx_skbuff[TX_RING_SIZE];
    ushort  skb_cur;
    ushort  skb_dirty;

    /* CPM dual port RAM relative addresses.
    */
    cbd_t   *rx_bd_base;        /* Address of Rx and Tx buffers. */
    cbd_t   *tx_bd_base;
    cbd_t   *cur_rx, *cur_tx;   /* The next free ring entry */
    cbd_t   *dirty_tx;      /* The ring entries to be free()ed. */
    scc_t   *sccp;
    /*
     * Virtual addresses of the RX buffers because we can't do a __va()
     * on them anymore.
     */
    unsigned char *rx_vaddr[RX_RING_SIZE];
    struct  net_device_stats stats;
    uint    tx_full;
    spinlock_t lock;
    /* keep track of my HADDR */
    unsigned char myhaddr;
};

static int devel_debug = 0; /*MAC ADDED*/
static int eaddl       = 0; /*MAC ADDED*/
static int drvmode     = MODE_HDLC;

static int scc_init(void);
static void hdlc_enet_interrupt(void *dev_id, struct pt_regs *regs);
static unsigned char *rx_pages[CPM_ENET_RX_PAGES];
int static switch_mode( int mode );

static unsigned int counter = 0;
static char string [128];
static int data;

//#define USEASCII

#ifdef USEASCII
static char *kmalloc_area = NULL;
static char *kmalloc_ptr = NULL;
#else
static unsigned int *kmalloc_area = NULL;
static unsigned int *kmalloc_ptr = NULL;
#endif

#define LEN (64*1024)
unsigned long virt_addr;

DECLARE_WAIT_QUEUE_HEAD(skeleton_wait);
static int data_not_ready = 0;

// open function - called when the "file" /dev/skeleton is opened in userspace
static int skeleton_open (struct inode *inode, struct file *file) {
	printk("skeleton_open\n");
	// we could do some checking on the flags supplied by "open"
	// i.e. O_NONBLOCK
	// -> set some flag to disable interruptible_sleep_on in skeleton_read
	return 0;
}

// close function - called when the "file" /dev/skeleton is closed in userspace  
static int skeleton_release (struct inode *inode, struct file *file) {
	printk("skeleton_release\n");
	return 0;
}

// read function called when from /dev/skeleton is read
static ssize_t skeleton_read (struct file *file, char *buf,
		size_t count, loff_t *ppos) {
	int len, err;
	
	// check if we have data - if not, sleep
	// wake up in interrupt_handler
	while (data_not_ready) {
		interruptible_sleep_on(&skeleton_wait);
	}
	//data_not_ready = 1;
	
	if( counter <= 0 ) 
		return 0;
	err = copy_to_user(buf,string,counter);
	if (err != 0)
		return -EFAULT;
	len  = counter;
	counter = 0;
	return len;
}

// write function called when to /dev/skeleton is written
static ssize_t skeleton_write (struct file *file, const char *buf,
		size_t count, loff_t *ppos) {
	int err;
	err = copy_from_user(string,buf,count);
	if (err != 0)
		return -EFAULT;
	counter += count;
	return count;
}

// ioctl - I/O control
static int skeleton_ioctl(struct inode *inode, struct file *file,
		unsigned int cmd, unsigned long arg) {
	int retval = 0;
	switch ( cmd ) {
		case CASE1:/* for writing data to arg */
			if (copy_from_user(&data, (int *)arg, sizeof(int)))
			return -EFAULT;
			break;
		case CASE2:/* for reading data from arg */
			if (copy_to_user((int *)arg, &data, sizeof(int)))
			return -EFAULT;
			break;
		case CASE3:
            printk("\n %ld:HDLC mode....(Driver)",arg);
			switch_mode(MODE_HDLC);	
            break;
        case CASE4:
            printk("\n %ld:TRANS mode....(Driver)",arg);
			switch_mode(MODE_TRANS);
            break;
		default:
			retval = -EINVAL;
	}
	return retval;
}

#ifndef VMALLOC_VMADDR
#define VMALLOC_VMADDR(x) ((unsigned long)(x))
#endif

// From: http://www.scs.ch/~frey/linux/memorymap.html
volatile void *virt_to_kseg(volatile void *address) {
	pgd_t *pgd; pmd_t *pmd; pte_t *ptep, pte;
	unsigned long va, ret = 0UL;
	va=VMALLOC_VMADDR((unsigned long)address);
	/* get the page directory. Use the kernel memory map. */
	pgd = pgd_offset_k(va);
	/* check whether we found an entry */
	if (!pgd_none(*pgd)) {
#if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,0)
		/* get the page middle directory */
		pmd = pmd_offset(pgd, va);
#else
		// I'm not sure if we need this, or the line for 2.4
		//    above will work reliably too
		// If you know, please email me :-)
		pud_t *pud = pud_offset(pgd, va);		
		pmd = pmd_offset(pud, va);
#endif
		/* check whether we found an entry */
		if (!pmd_none(*pmd)) {
			/* get a pointer to the page table entry */
#if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,0)
			ptep = pte_offset(pmd, va);
#else
			ptep = pte_offset_map(pmd, va);
#endif
			pte = *ptep;
			/* check for a valid page */
			if (pte_present(pte)) {
				/* get the address the page is refering to */
				ret = (unsigned long)page_address(pte_page(pte));
				/* add the offset within the page to the page address */
				ret |= (va & (PAGE_SIZE -1));
			}
		}
	}
	return((volatile void *)ret);
}

static int skeleton_mmap(struct file * filp, struct vm_area_struct * vma) {
	int ret;
#if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,0)
	ret = remap_page_range(vma->vm_start,
			virt_to_phys((void*)((unsigned long)kmalloc_area)),
			vma->vm_end-vma->vm_start,
			PAGE_SHARED);
//			       vma->vm_page_prot);
#else
        ret = remap_pfn_range(vma,
               vma->vm_start,
               virt_to_phys((void*)((unsigned long)kmalloc_area)) >> PAGE_SHIFT,
               vma->vm_end-vma->vm_start,
               PAGE_SHARED);
//               vma->vm_page_prot); 
#endif
	if(ret != 0) {
		return -EAGAIN;
	}
	return 0;
}

// define which file operations are supported
struct file_operations skeleton_fops = {
	.owner	=	THIS_MODULE,
	.llseek	=	NULL,
	.read		=	skeleton_read,
	.write	=	skeleton_write,
	.readdir	=	NULL,
	.poll		=	NULL,
	.ioctl	=	skeleton_ioctl,
	.mmap		=	skeleton_mmap,
	.open		=	skeleton_open,
	.flush	=	NULL,
	.release	=	skeleton_release,
	.fsync	=	NULL,
	.fasync	=	NULL,
	.lock		=	NULL,
	.readv	=	NULL,
	.writev	=	NULL,
};

// initialize module
static int skeleton_init_module (void) {
	int i;
#ifndef USEASCII
	int tmp, tmp2;
#endif
	printk("initializing module\n");
	
	i = register_chrdev (SKELETON_MAJOR, SKELETON_NAME, &skeleton_fops);
	if (i != 0) return - EIO;
	
	// reserve memory with kmalloc - Allocating Memory in the Kernel
	kmalloc_ptr = kmalloc(LEN + 2 * PAGE_SIZE, GFP_KERNEL);
	if (!kmalloc_ptr) {
		printk("kmalloc failed\n");
		return 0;
	}
#ifdef USEASCII
	kmalloc_area = (char *)(((unsigned long)kmalloc_ptr + PAGE_SIZE -1) & PAGE_MASK);
#else
	kmalloc_area = (unsigned int *)(((unsigned long)kmalloc_ptr + PAGE_SIZE -1) & PAGE_MASK);
#endif
	for (virt_addr=(unsigned long)kmalloc_area; virt_addr < (unsigned long)kmalloc_area + LEN;
		virt_addr+=PAGE_SIZE) {
			// reserve all pages to make them remapable
#if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,0)
			mem_map_reserve(virt_to_page(virt_addr));
#else
			SetPageReserved(virt_to_page(virt_addr));
#endif
	}
	printk("kmalloc_area at 0x%p (phys 0x%lx)\n", kmalloc_area,
		virt_to_phys((void *)virt_to_kseg(kmalloc_area)));

#ifdef USEASCII
	// fill allocated memory with 0123456789 ascii
	for( i = 48; i < 58; i++) {
		kmalloc_ptr[i-48] = (char)i;
	}
	i = 0;
	kmalloc_ptr[58-48] = (char)i;
#else
	// fill allocated memory with integers
	tmp = sizeof(int);
	for( i = 0; i < (10 * tmp); i = i + tmp) {
		kmalloc_ptr[i] = (unsigned int)i;
      
		tmp2 = (unsigned int)kmalloc_ptr[i];
		printk("kmalloc_ptr[%d]=%d\n", i, tmp2);
	}
#endif

	scc_init();

	return 0;
}

// close and cleanup module
static void skeleton_cleanup_module (void) {
	printk("cleaning up module\n");
#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,0)
	for (virt_addr=(unsigned long)kmalloc_area; virt_addr < (unsigned long)kmalloc_area + LEN;
		virt_addr+=PAGE_SIZE) {
			// clear all pages
			ClearPageReserved(virt_to_page(virt_addr));
	}
#endif
	kfree(kmalloc_ptr);
	unregister_chrdev (SKELETON_MAJOR, SKELETON_NAME);
}

static int scc_init(void)
{
    unsigned char *ba;
    dma_addr_t  mem_addr;
    struct hdlc_enet_private *cep;
    int i, j, k;
    bd_t        *bd;
    volatile    cbd_t       *bdp;
    volatile    cpm8xx_t    *cp;
    volatile    scc_t       *sccp;
    volatile    scc_hdlc_t  *sup;
    volatile    iop8xx_t    *iop;

    cp = cpmp;  /* Get pointer to Communication Processor */
    iop = (iop8xx_t *)&(((volatile immap_t *)IMAP_ADDR)->im_ioport);

    bd = (bd_t *)__res;

    /* Allocate some private information.
    */
    cep = (struct hdlc_enet_private *)kmalloc(sizeof(*cep), GFP_KERNEL);
    if (cep == NULL)
        return -ENOMEM;

    __clear_user(cep, sizeof(*cep));
    spin_lock_init(&cep->lock);

    /* Get pointer to SCC area in parameter RAM.
    */
    sup = (scc_hdlc_t *)(&cp->cp_dparam[PROFF_ENET]);

    /* And another to the SCC register area.
    */
    sccp = (volatile scc_t *)(&cp->cp_scc[SCC_ENET]);
    cep->sccp = (scc_t *)sccp;      /* Keep the pointer handy */

    /* Disable receive and transmit just in case.
    */
    sccp->scc_gsmrl &= ~(SCC_GSMRL_ENR | SCC_GSMRL_ENT);

//  0 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15
//  1 1 x x x x x x x x 1  1  x  x  x  x

    iop->iop_papar = iop->iop_papar | 0xc030;

    /* port A Data Direction reg */
    iop->iop_padir = iop->iop_padir & 0x3fcf;

    cp->cp_sicr = ((cp->cp_sicr & 0xff00ffff) | 0x00370000);

    /* Allocate space for the buffer descriptors in the DP ram.
     * These are relative offsets in the DP ram address space.
     * Initialize base addresses for the buffer descriptors.
     */
    cep->rxdp_addr = i = m8xx_cpm_dpalloc(sizeof(cbd_t) * RX_RING_SIZE);
    sup->shd_genscc.scc_rbase = i;
    cep->rx_bd_base = (cbd_t *)&cp->cp_dpmem[i];

    cep->txdp_addr = i = m8xx_cpm_dpalloc(sizeof(cbd_t) * TX_RING_SIZE);
    sup->shd_genscc.scc_tbase = i;
    cep->tx_bd_base = (cbd_t *)&cp->cp_dpmem[i];

    cep->dirty_tx = cep->cur_tx = cep->tx_bd_base;
    cep->cur_rx = cep->rx_bd_base;

    /* Issue init Rx BD command for SCC.
     * Manual says to perform an Init Rx parameters here.  We have
     * to perform both Rx and Tx because the SCC may have been
     * already running.
     * In addition, we have to do it later because we don't yet have
     * all of the BD control/status set properly.
    cp->cp_cpcr = mk_cr_cmd(CPM_CR_ENET, CPM_CR_INIT_RX) | CPM_CR_FLG;
    while (cp->cp_cpcr & CPM_CR_FLG);
     */

    /* Initialize function code registers for big-endian.
    */
    sup->shd_genscc.scc_rfcr = SCC_EB;
    sup->shd_genscc.scc_tfcr = SCC_EB;

    /* Set maximum bytes per receive buffer.
     * This appears to be an Ethernet frame size, not the buffer
     * fragment size.  It must be a multiple of four.
     */
    sup->shd_genscc.scc_mrblr = PKT_MAXBLR_SIZE;

    /* Setup CRC generator values for HDLC */
    sup->scc_c_mask = 0x0000F0B8;
    sup->scc_c_pres = 0x0000FFFF;

    /* Initialize all error counters to 0 */
    sup->scc_disfc = 0;
    sup->scc_crcec = 0;
    sup->scc_abtsc = 0;
    sup->scc_nmarc = 0;
    sup->scc_retrc = 0;

    /* Set maximum frame length size */
    sup->scc_mflr = PKT_MAXBLR_SIZE;

    /* set to 1 for per frame processing change later if needed */
    sup->scc_rfthr = 1;

    sup->scc_hmask  = 0xfd;

	/* broadcast */
    sup->scc_haddr1 = 0x80;
	
    sup->scc_haddr2 = MKIDSADDR(cep->myhaddr);
    sup->scc_haddr3 = sup->scc_haddr2;
    sup->scc_haddr4 = sup->scc_haddr2;
    
    /* Now allocate the host memory pages and initialize the
     * buffer descriptors.
     */
    bdp = cep->tx_bd_base;
    for (i = 0; i < TX_RING_SIZE; i++) {
        /* Initialize the BD for every fragment in the page.
        */
        bdp->cbd_sc = 0;
        bdp->cbd_bufaddr = 0;
        bdp++;
    }

    /* Set the last buffer to wrap.
    */
    bdp--;
    bdp->cbd_sc |= BD_SC_WRAP;

    bdp = cep->rx_bd_base;
    k = 0;
    for (i = 0; i < CPM_ENET_RX_PAGES; i++) {

        /* Allocate a page.
        */
        ba = (unsigned char *)consistent_alloc(GFP_KERNEL, PAGE_SIZE, &mem_addr);
#ifdef MODULE
        rx_pages[i] = ba;
#endif

        /* Initialize the BD for every fragment in the page.
        */
        for (j = 0; j < CPM_ENET_RX_FRPPG; j++) {
            bdp->cbd_sc = BD_SC_EMPTY | BD_SC_INTRPT;
            bdp->cbd_bufaddr = mem_addr;
            cep->rx_vaddr[k++] = ba;
            ba += CPM_ENET_RX_FRSIZE;
            mem_addr += CPM_ENET_RX_FRSIZE;
            bdp++;
        }
    }

    /* Set the last buffer to wrap.
    */

    bdp--;
    bdp->cbd_sc |= BD_SC_WRAP;

    /* Let's re-initialize the channel now.  We have to do it later
     * than the manual describes because we have just now finished
     * the BD initialization.
     */
    cp->cp_cpcr = mk_cr_cmd(CPM_CR_ENET, CPM_CR_INIT_TRX) | CPM_CR_FLG;
    while (cp->cp_cpcr & CPM_CR_FLG);

    cep->skb_cur = cep->skb_dirty = 0;

    sccp->scc_gsmrh = 0x00000000;
    sccp->scc_gsmrl = 0x20000000; // to set rx in rising edge VP
    if (devel_debug & DEBUG_INIT)
        printk("%s: NOT using Busmode\n", __FUNCTION__);
    /* NOF=2, RTE=0, DRT=1, BUS=0 */
    //sccp->scc_pmsr = 0x0040;
    sccp->scc_pmsr = 0x0000; // Need to rx while tx

    /* Disable all interrupts and clear all pending
     * events.
     */
    sccp->scc_sccm = 0;
    sccp->scc_scce = 0xffff;
    sccp->scc_dsr  = 0x7e7e;

    if (devel_debug & DEBUG_INIT)
        printk("%s: NOT using Busmode\n", __FUNCTION__);
    /* NOF=2, RTE=0, DRT=1, BUS=0 */
    //sccp->scc_pmsr = 0x0040;
    sccp->scc_pmsr = 0x0000; // Need to rx while tx

    /* Install our interrupt handler.
    */
    cpm_install_handler(CPMVEC_ENET, hdlc_enet_interrupt, &skeleton_fops);

    /* enable interrupts */
    /* txbuf empty, rxbuf full, tx underrun, bsy */
    sccp->scc_sccm = 0x1f;

    /* And last, enable the transmit and receive processing.
    */

	sccp->scc_gsmrl |= (SCC_GSMRL_ENR | SCC_GSMRL_ENT);
    printk("%s: HDLC ENET Version 0.3 on SCC%d, ", "skeleton", SCC_ENET+1);
    
    return 0;
}                                              

/* The interrupt handler.
 * This is called from the CPM handler, not the MPC core interrupt.
 */
#define SCCM_RF 0x08
static void hdlc_enet_interrupt (void *dev_id, struct pt_regs *regs)
{
    struct net_device *dev = dev_id;
    volatile struct hdlc_enet_private *cep;
    volatile cbd_t *bdp;
    ushort int_events;
    int must_restart;

    if (devel_debug & DEBUG_INTR)
        printk ("%s\n", __FUNCTION__);
    cep = (struct hdlc_enet_private *) dev->priv;

    /* Get the interrupt events that caused us to be here.
     */
    int_events = cep->sccp->scc_scce;
    printk ("scce %04x\n", int_events);

    if (devel_debug & DEBUG_INTR)
        printk ("scce %#x\n", int_events);
    cep->sccp->scc_scce = int_events;
    must_restart = 0;

    /* Handle receive event in its own function.
     */
    if (int_events & (SCCM_RF | SCCM_RX))
        hdlc_enet_rx (dev_id);

    /* Transmit OK, or non-fatal error.  Update the buffer descriptors.
     */
    if (int_events & SCCM_TX) {
        spin_lock (&cep->lock);
        bdp = cep->dirty_tx;
        if (devel_debug & DEBUG_INTR)
            printk ("TX:cbd_sc %#x\n", bdp->cbd_sc);

        while ((bdp->cbd_sc & BD_SC_READY) == 0) {
            if ((bdp == cep->cur_tx) && (cep->tx_full == 0))
                break;

            if ((bdp->cbd_sc & (BD_SC_CT | BD_SC_UN)) != 0) {
                /* CTS lost - map to carrier lost */
                if ((bdp->cbd_sc & BD_SC_CT) != 0) {
                    if (devel_debug & DEBUG_INTR)
                        printk("TX CTS lost - must restart\n");
                    cep->stats.tx_carrier_errors++;
                }
                /* underrun */
                if ((bdp->cbd_sc & BD_SC_UN) != 0) {
                    if (devel_debug & DEBUG_INTR)
                        printk("TX UNDERRUN - must restart\n");
                    cep->stats.tx_fifo_errors++;
                }
                /* both errors cause the transmitter to stop */
                must_restart = 1;
                cep->stats.tx_errors++;
            }

            cep->stats.tx_packets++;

            /* Free the sk buffer associated with this last transmit.
             */
            dev_kfree_skb_irq (cep->tx_skbuff[cep->skb_dirty]);
            cep->skb_dirty =
                (cep->skb_dirty + 1) & TX_RING_MOD_MASK;

            /* Update pointer to next buffer descriptor to be transmitted.
             */
            if (bdp->cbd_sc & BD_SC_WRAP)
                bdp = cep->tx_bd_base;
            else
                bdp++;

            /* Since we have freed up a buffer, the ring is no longer
             * full.
             */
            if (cep->tx_full) {
                cep->tx_full = 0;
                if (netif_queue_stopped (dev))
                    netif_wake_queue (dev);
            }

            cep->dirty_tx = (cbd_t *) bdp;
        }

        if (must_restart) {
            volatile cpm8xx_t *cp;

            /* Some transmit errors cause the transmitter to shut
             * down.  We now issue a restart transmit.  Since the
             * errors close the BD and update the pointers, the restart
             * _should_ pick up without having to reset any of our
             * pointers either.
             */
            cp = cpmp;
            cp->cp_cpcr = mk_cr_cmd (CPM_CR_ENET, CPM_CR_RESTART_TX) |
                    CPM_CR_FLG;
            while (cp->cp_cpcr & CPM_CR_FLG)
                ;
        }
        spin_unlock (&cep->lock);
    }

    /* Check for receive busy, i.e. packets coming but no place to
	* put them.  This "can't happen" because the receive interrupt
     * is tossing previous frames.
     */
    if (int_events & SCCM_BSY) {
        cep->stats.rx_dropped++;
        printk ("HDLC ENET: BSY can't happen.\n");
    }

    return;
}

int static switch_mode( int mode ) {

    volatile    cpm8xx_t    *cp;
    volatile    scc_t       *sccp;
    volatile    iop8xx_t    *iop;

    iop = (iop8xx_t *)&(((volatile immap_t *)IMAP_ADDR)->im_ioport);

    cp = cpmp;  /* Get pointer to Communication Processor */
    sccp = (volatile scc_t *)(&cp->cp_scc[SCC_ENET]);
    /* Disable receive and transmit just in case.
    */
    sccp->scc_gsmrl &= ~(SCC_GSMRL_ENR | SCC_GSMRL_ENT);
    /* Disable all interrupts and clear all pending
     * events.
     */
    sccp->scc_sccm = 0;
    sccp->scc_scce = 0xffff;

    if(mode == MODE_TRANS) {
        drvmode=MODE_TRANS;
        printk("switching to trans mode\n");
//       sccp->scc_gsmrh = 0x00001800; //set ttx and trx to make trans
         sccp->scc_gsmrh = 0x00001888; //set ttx and trx to make trans

//       iop->iop_pcpar = iop->iop_pcpar & 0xfcf7;
#ifdef HW_HANDSHAKE
            //temp gpio used to set cd and cts status   
         iop->iop_pcpar = (iop->iop_pcpar & 0xfff7);
         iop->iop_pcdir = (iop->iop_pcdir | 0x0008);
         iop->iop_pcso  = (iop->iop_pcso  & 0xfff7);
            //setting up for CTS and CD for SCC3
         iop->iop_pcpar = (iop->iop_pcpar | 0x0300);
         iop->iop_pcdir = (iop->iop_pcdir & 0xfcff);
         iop->iop_pcso  = (iop->iop_pcso  | 0x0300);
#endif
	     sccp->scc_dsr  = 0x8080;
         sccp->scc_sccm = 0x1f;
         sccp->scc_gsmrl |= (SCC_GSMRL_ENR | SCC_GSMRL_ENT);

#ifdef HW_HANDSHAKE

         printk("pcdat = %04x\n",iop->iop_pcdat);
         iop->iop_pcdat = iop->iop_pcdat | 0x0008;
         printk("pcdat = %04x\n",iop->iop_pcdat);
         udelay(20);
         iop->iop_pcdat = iop->iop_pcdat & 0xfff7;
         printk("pcdat = %04x\n",iop->iop_pcdat);
#endif
    }
    else {
        drvmode=MODE_HDLC;
        printk("switching to hdlc_mode\n");
        sccp->scc_gsmrh = 0x00000000; //reset ttx and trx to make hdlc

#ifdef HW_HANDSHAKE
        iop->iop_pcpar = iop->iop_pcpar & 0xfff7;
        iop->iop_pcdir = iop->iop_pcdir | 0x0008;
        iop->iop_pcdat = iop->iop_pcdat | 0x0008;
#endif
        /* And last, enable the transmit and receive processing. */
        sccp->scc_dsr  = 0x7e7e;
        sccp->scc_sccm = 0x1f;
        sccp->scc_gsmrl |= (SCC_GSMRL_ENR | SCC_GSMRL_ENT);
    }
    return drvmode;
}


module_init(skeleton_init_module);
module_exit(skeleton_cleanup_module);
MODULE_AUTHOR("www.captain.at");
MODULE_LICENSE("GPL");
MODULE_DESCRIPTION("Linux Device Driver Template with MMAP");
