#include <linux/module.h>
#include <linux/netdevice.h>
#include <linux/device.h>
#include <linux/etherdevice.h>
#include <linux/kernel.h>
#include <linux/types.h>
#include <linux/fcntl.h>
#include <linux/interrupt.h>
#include <linux/in.h>
#include <linux/skbuff.h>
#include <linux/slab.h>
#include <linux/spinlock.h>
#include <linux/string.h>
#include <linux/init.h>
#include <linux/bitops.h>
#include <net/sock.h>
#include <net/netlink.h>
#include <linux/if_ether.h>
#include <net/net_namespace.h>

#include "type1_type2_defs.h"
#include "config.h"

MODULE_AUTHOR("xiejiawu_nL");
MODULE_LICENSE("GPL");
typedef struct sk_buff SRIO_PKT;
#define SRIO 0xf4f7
#define SRIO_CORE_OPEN 0
#define SRIO_EXEC_SUCC 0
#define MAX_STORED_PKT 100
#define NETLINK_TEST 31
#define NL_DEST_PID 100
#define CORE_CLOSED 0
#define CORE_OPENED 1

struct type1_buffer_t
{
	u_int8_t buffer[NLMSG_SPACE(MAX_PAYLOAD)];
	u_int16_t offset;
	u_int16_t total_len;
};

struct type1_buffer_t *type1_buffer;

//------------------------functions declaration---------------------------//
static DEFINE_MUTEX(nl_srio_mutex);
void nl_recfromuser(struct sk_buff *skb);
int nl_sendtouser(struct sk_buff *skb);
int core_netlink_init(void);
void netlink_release(void);
void nl_send_packet_pending(struct sk_buff *skb);
int nl_sendtodriver(struct sk_buff *skb, struct nlmsghdr *nlh);

int srio_open(struct net_device *dev);
int srio_close(struct net_device *dev);
struct net_device_stats *srio_dev_get_stats(struct net_device *dev);
int srio_dev_set_config(struct net_device *dev, struct ifmap *p_ifmap);
//int srio_dev_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd);
int srio_tx(struct sk_buff *skb, struct net_device *dev); //define but not use cause unneccesary
void srio_send_packet_pending(SRIO_PKT *pkptr, struct net_device *dev, u16 protocol);
void srio_send_packet(unsigned long data);
void srio_tx_time_out(struct net_device *dev);
void srio_sendtouser(unsigned long data);
void srio_rx_pending(SRIO_PKT *pkptr, struct net_device *dev, u16 protocol);
int srio_recv(struct sk_buff *skb, struct net_device *dev1, struct packet_type *pktype, struct net_device *dev2);
void dev_init(struct net_device *dev);
int srio_init_module(void);
void srio_release(void);
//-----------------------------------------------------------------------//
struct sock *nl_sk = NULL;
void *type2 = NULL;
struct netlink_kernel_cfg cfg = {
	.input = nl_recfromuser,
	.cb_mutex = &nl_srio_mutex,
};
struct packet_type pk_t = {
	.type = __constant_htons(SRIO),
	.func = srio_recv,
};
typedef struct SRIO_CORE
{
	int state;
	void *dev;
	SRIO_PKT *pkptr;
	spinlock_t lock; //lock the core
	struct sk_buff_head *tx_queue;
	struct sk_buff_head *rx_queue;
	struct sk_buff_head *nl_tx_queue; //try
	struct tasklet_struct *tx_tsklt;
	struct tasklet_struct *rx_tsklt;
	struct tasklet_struct *nl_tx_tsklt;
	//struct tasklet_struct*  do_ioctl_tsklt;
	spinlock_t tx_queue_lock; //lock the pending tx queue
	spinlock_t rx_queue_lock;
	spinlock_t nl_tx_queue_lock;
	void *dev_eth;

} SRIO_CORE;
static SRIO_CORE srio_core;

const struct net_device_ops srio_netdev_ops = {
	.ndo_open = srio_open,
	.ndo_stop = srio_close,
	.ndo_start_xmit = srio_tx,
	.ndo_get_stats = srio_dev_get_stats,
	//.ndo_do_ioctl = srio_dev_ioctl,
	.ndo_set_config = srio_dev_set_config,
	.ndo_tx_timeout = srio_tx_time_out,
};

typedef struct srio_sv
{
	int packet_count;
	int recv_count;
	unsigned int interval;
	bool racomplete;
	u16 sfn;
	u16 subframeN;
	SRIO_PKT *pkts[MAX_STORED_PKT]; //for 5ms send to srio test
	u16 stored_pkt_count;			//for 5ms send to srio test
	u32 recv_upper_pkt_num;
	u32 recv_pkt_from_enb;
	char *mem_zero;
	u32 pre_pkt_no;
	u32 recv_interval;
	u32 send_interval;
	u32 lost_pkt_count;
	u32 send_count;
	u32 recvFromUp;
	u32 sendToLower;
	//u32 sendToDsp;
} srio_sv;
typedef struct srio_priv
{
	struct net_device_stats stats;
	srio_sv *srio_sv_ptr;
} srio_priv; //define here but not use yet
//-----------------------------------------------------------------------//
struct net_device_stats *srio_dev_get_stats(struct net_device *dev)
{
	srio_priv *priv;
	priv = (srio_priv *)netdev_priv(dev);
	return &(priv->stats);
}
int srio_dev_set_config(struct net_device *dev, struct ifmap *p_ifmap)
{
	return 0;
}

int srio_open(struct net_device *dev)
{
	unsigned long flags;
	srio_core.dev_eth = dev_get_by_name(&init_net, net_dev_name); //transmix data to eth driver
	netif_start_queue(dev);
	//pk_t.dev=dev_get_by_name(&init_net,"eth0");
	dev_add_pack(&pk_t);
	//srio_core open
	if (srio_core.state == CORE_CLOSED)
	{
		spin_lock_irqsave(&srio_core.lock, flags);
		//Initialize queues
		skb_queue_head_init(srio_core.tx_queue);
		skb_queue_head_init(srio_core.rx_queue);
		skb_queue_head_init(srio_core.nl_tx_queue);
		spin_lock_init(&srio_core.tx_queue_lock);
		spin_lock_init(&srio_core.rx_queue_lock);
		spin_lock_init(&srio_core.nl_tx_queue_lock);
		//Initialize tasklets
		tasklet_init(srio_core.tx_tsklt, srio_send_packet, 0);
		tasklet_init(srio_core.rx_tsklt, srio_sendtouser, 0);
		//tasklet_init(srio_core.nl_tx_tsklt, nl_sendtodriver, 0);
		srio_core.state = CORE_OPENED;
		spin_unlock_irqrestore(&srio_core.lock, flags);
		printk("core has opened.\n");
	}

	return SRIO_CORE_OPEN;
}
int srio_close(struct net_device *dev)
{
	unsigned long flags;
	if (srio_core.state == CORE_CLOSED)
		return SRIO_EXEC_SUCC;
	spin_lock_irqsave(&srio_core.lock, flags);
	tasklet_kill(srio_core.tx_tsklt);
	tasklet_kill(srio_core.rx_tsklt);
	tasklet_kill(srio_core.nl_tx_tsklt);
	if (srio_core.dev_eth)
	{
		dev_put(srio_core.dev_eth);
	}
	while (!skb_queue_empty(srio_core.tx_queue))
	{
		kfree_skb(skb_dequeue(srio_core.tx_queue));
	}
	while (!skb_queue_empty(srio_core.nl_tx_queue))
	{
		kfree_skb(skb_dequeue(srio_core.nl_tx_queue));
	}
	srio_core.state = CORE_CLOSED;
	spin_unlock_irqrestore(&srio_core.lock, flags);
	dev_remove_pack(&pk_t);
	netif_stop_queue(dev);
	return SRIO_EXEC_SUCC;
}

int srio_tx(SRIO_PKT *pkptr, struct net_device *dev)
{
	unsigned long flags2;
	unsigned int size;
	int erro;
	//SRIO_PKT* ptr;
	SRIO_PKT *ptr_cpy;
	struct ethhdr *head_ptr;

	u_int8_t more;

	u_int16_t raw_len = pkptr->len;

	while (pkptr->len != 0)
	{
		//数据长度小于等于最大长度  不分段
		if (raw_len <= PACKET_SIZE)
		{
			more = 3;
			size = pkptr->len;
		}
		//剩余长度等于总长度(还没开始发)  第一个分段
		else if (pkptr->len == raw_len)
		{
			more = 2;
			size = PACKET_SIZE;
		}
		//中间分段
		else if (pkptr->len > PACKET_SIZE)
		{
			more = 1;
			size = PACKET_SIZE;
		}
		//最后一个分段
		else
		{
			more = 0;
			size = pkptr->len;
		}
		printk("more: %d, size:%d, pkptr->len:%d, raw_len:%d\n", more, size, pkptr->len, raw_len);

		//虽然最后一次的数据长度可能小于packetsize，但是还是发送定长
		ptr_cpy = (SRIO_PKT *)dev_alloc_skb(sizeof(more) + PACKET_SIZE);
		if (ptr_cpy == NULL)
		{
			srio_close(srio_core.dev);
			//printk("[send]something wrong with allocate skb!\n");
		}
		memset(ptr_cpy->head, 0, sizeof(more) + size + sizeof(struct ethhdr));
		//tail指针向尾部移动size，以便填充数据
		skb_put(ptr_cpy, size + sizeof(more));
		//填写more字段 more:0 最后一个分片 1 不是最后一个分片
		memcpy((void *)ptr_cpy->data, &more, sizeof(more));
		//填写上层数据
		memcpy((void *)ptr_cpy->data + sizeof(more), pkptr->data, size);

		//填写以太帧头部
		skb_push(ptr_cpy, ETH_HLEN);
		head_ptr = (struct ethhdr *)ptr_cpy->data;
		memcpy(head_ptr->h_dest, dst_addr, ETH_ALEN);
		memcpy(head_ptr->h_source, sour_addr, ETH_ALEN);
		head_ptr->h_proto = htons(SRIO);

		srio_send_packet_pending(ptr_cpy, srio_core.dev_eth, SRIO);

		skb_pull(pkptr, size); //将data指针向尾部移动，因为这部分数据已经处理过了
	}

	return 0;
}
//wait to send to eth when receive from netlink
void srio_send_packet_pending(SRIO_PKT *pkptr, struct net_device *dev, u16 protocol)
{
	struct sk_buff *skb;
	unsigned long flags;
	skb = (struct sk_buff *)pkptr;
	skb->dev = srio_core.dev_eth;
	skb->protocol = htons(SRIO);
	spin_lock_irqsave(&srio_core.tx_queue_lock, flags);
	skb_queue_tail(srio_core.tx_queue, pkptr);
	spin_unlock_irqrestore(&srio_core.tx_queue_lock, flags);
	tasklet_schedule(srio_core.tx_tsklt);
	return;
}
//send to eth by netlink after pending
void srio_send_packet(unsigned long data)
{

	SRIO_PKT *pkptr;
	unsigned long flags;
	int ret;
	while (1)
	{
		spin_lock_irqsave(&srio_core.tx_queue_lock, flags);
		if (skb_queue_empty(srio_core.tx_queue))
		{
			spin_unlock_irqrestore(&srio_core.tx_queue_lock, flags);
			break;
		}
		else
		{
			pkptr = skb_dequeue(srio_core.tx_queue);
			spin_unlock_irqrestore(&srio_core.tx_queue_lock, flags);
			ret = dev_queue_xmit(pkptr);
			if (ret != 0)
			{
				srio_close(srio_core.dev);
				printk(" srio send to eth fail ret:%d\n",ret);
			}
		}
	}

	return;
}

void srio_tx_time_out(struct net_device *dev)
{
	return;
}
//wait to send to uplayer when receive from eth
void srio_rx_pending(SRIO_PKT *pkptr, struct net_device *dev, u16 protocol)
{
	struct sk_buff *skb;
	unsigned long flags;
	skb = (struct sk_buff *)pkptr;
	skb->protocol = protocol;
	skb->dev = dev;
	if (skb->protocol == htons(ETH_P_IP))
	{
		skb->ip_summed = CHECKSUM_UNNECESSARY;
	}

	struct ethhdr *eth;
	eth = (struct ethhdr *)eth_hdr(pkptr);
	int is_broadcast = 1;
	int i;
	printk("dst mac: ");
	for (i = 0; i < 6; i++){
		printk("%02x ",eth->h_dest[i]);
		if(eth->h_dest[i] != 0xff){
			is_broadcast = 0;
			break;
		}
	}
	printk("\n");
	if(is_broadcast == 1)
		return;

	spin_lock_irqsave(&srio_core.rx_queue_lock, flags);
	skb_queue_tail(srio_core.rx_queue, pkptr);
	spin_unlock_irqrestore(&srio_core.rx_queue_lock, flags);
	tasklet_schedule(srio_core.rx_tsklt);

	return;
}
//send to uplayer by netlink after pending
void srio_sendtouser(unsigned long data)
{
	SRIO_PKT *pkptr;
	unsigned long flags;

	while (1)
	{
		spin_lock_irqsave(&srio_core.rx_queue_lock, flags);
		if (skb_queue_empty(srio_core.rx_queue))
		{

			spin_unlock_irqrestore(&srio_core.rx_queue_lock, flags);
			break;
		}
		else
		{
			pkptr = skb_dequeue(srio_core.rx_queue);
			spin_unlock_irqrestore(&srio_core.rx_queue_lock, flags);
			nl_sendtouser(pkptr);
		}
	}
	return;
}
int srio_recv(SRIO_PKT *pkptr, struct net_device *dev1, struct packet_type *pktype, struct net_device *dev2)
{
	unsigned long flags;
	if (pkptr != NULL)
	{
		srio_rx_pending(pkptr, srio_core.dev, SRIO);
		return 0;
	}
	return 0;
}

void dev_init(struct net_device *dev)
{
	srio_priv *priv;
	//dev->init=devinti;
	dev->tx_queue_len = 128; /* the queue length */
	dev->mtu = 1500;		 /* now we do not fragmentate the packet */
	dev->header_ops = 0;	 /* cancle eth_header function for not building ethnet mac header */
	dev->addr_len = 0;
	dev->hard_header_len = 0;
	dev->watchdog_timeo = 1000; /* the time unit is jiffy */
	dev->flags = IFF_BROADCAST | IFF_MULTICAST | IFF_NOARP;
	dev->netdev_ops = &srio_netdev_ops;
	core_netlink_init();
	//dev->features |= NETIF_F_NO_CSUM;
	priv = (srio_priv *)netdev_priv(dev);
	memset(priv, 0, sizeof(srio_priv));
	//init srio_core
	spin_lock_init(&srio_core.lock);
	srio_core.tx_queue = (struct sk_buff_head *)kmalloc(sizeof(struct sk_buff_head), GFP_ATOMIC);
	srio_core.rx_queue = (struct sk_buff_head *)kmalloc(sizeof(struct sk_buff_head), GFP_ATOMIC);
	srio_core.nl_tx_queue = (struct sk_buff_head *)kmalloc(sizeof(struct sk_buff_head), GFP_ATOMIC);
	srio_core.tx_tsklt = (struct tasklet_struct *)kmalloc(sizeof(struct tasklet_struct), GFP_ATOMIC);
	srio_core.rx_tsklt = (struct tasklet_struct *)kmalloc(sizeof(struct tasklet_struct), GFP_ATOMIC);
	srio_core.nl_tx_tsklt = (struct tasklet_struct *)kmalloc(sizeof(struct tasklet_struct), GFP_ATOMIC);
	srio_core.dev = dev;
	srio_core.dev_eth = 0;
	srio_core.pkptr = 0;
	priv->srio_sv_ptr = kmalloc(sizeof(srio_sv), GFP_ATOMIC);
	memset(priv->srio_sv_ptr, 0, sizeof(srio_sv));
	srio_core.state = CORE_CLOSED;

	return;
}
int srio_init_module(void)
{
	srio_priv *priv;
	const char *name = "SRIO%d";
	char dev_addr[6] = {0x01, 0x12, 0x34, 0x56, 0x78, 0x9a};
	struct net_device *dev = NULL;

	dev = alloc_netdev(sizeof(srio_priv), name, NET_NAME_UNKNOWN, dev_init);
	if (dev == NULL) /* allocation error */
	{
		return -1;
	}

	memcpy(dev->dev_addr, &dev_addr, 6);
	/* ---------ADD some private data into the net_device struct */
	priv = netdev_priv(dev);
	if (priv == NULL)
	{
		return -2;
	}

	/* register the net device */
	if (register_netdev(dev)) /* register error */
	{
		free_netdev(dev); /* release the struct net_device */
		dev = NULL;
		printk("register failled\n");
		return -3;
	}
	//printk("register success\n");
	return 0;
}
void srio_release(void)
{
	//struct net_device* dev;
	srio_priv *priv;
	unsigned long flags;
	if (srio_core.state != CORE_CLOSED)
	{
		spin_lock_irqsave(&srio_core.lock, flags);
		tasklet_kill(srio_core.tx_tsklt);
		tasklet_kill(srio_core.rx_tsklt);
		tasklet_kill(srio_core.nl_tx_tsklt);
		if (srio_core.dev_eth)
		{
			dev_put(srio_core.dev_eth);
		}
		while (!skb_queue_empty(srio_core.tx_queue))
		{
			kfree_skb(skb_dequeue(srio_core.tx_queue));
		}
		while (!skb_queue_empty(srio_core.rx_queue))
		{
			kfree_skb(skb_dequeue(srio_core.rx_queue));
		}
		while (!skb_queue_empty(srio_core.nl_tx_queue))
		{
			kfree_skb(skb_dequeue(srio_core.nl_tx_queue));
		}
		srio_core.state = CORE_CLOSED;
		spin_unlock_irqrestore(&srio_core.lock, flags);
	};
	priv = netdev_priv(srio_core.dev);
	kfree(priv->srio_sv_ptr);
	kfree(srio_core.tx_queue);
	kfree(srio_core.rx_queue);
	kfree(srio_core.nl_tx_queue);
	kfree(srio_core.tx_tsklt);
	kfree(srio_core.rx_tsklt);
	kfree(srio_core.nl_tx_tsklt);
	unregister_netdev(srio_core.dev);
	free_netdev(srio_core.dev);
	netlink_release();
}
//--------------------------------netlink----------------------------//
void nl_recfromuser(struct sk_buff *skb)
{
	unsigned long flags;

	struct nlmsghdr *nlh;
	int ret;

	void *packet_data = (void *)NLMSG_DATA(nlmsg_hdr(skb));
	u_int32_t ul_parameters_flag;
	memcpy(&ul_parameters_flag, packet_data, sizeof(u_int32_t));
	ul_parameters_flag = ntohl(ul_parameters_flag);
	//skb_pull(skb, sizeof(u_int32_t));
	if (ul_parameters_flag == 0)
	{
		printk("[nl_recfromuser] cl get normal data, send it\n");
		mutex_lock(&nl_srio_mutex);
		netlink_rcv_skb(skb, &nl_sendtodriver);
		mutex_unlock(&nl_srio_mutex);
	}
	else{
		printk("[nl_recfromuser] cl get ul parameters, drop it\n");
	}
}

void nl_send_packet_pending(struct sk_buff *skb)
{
	//struct sk_buff* skb;
	unsigned long flags;
	skb->dev = srio_core.dev;
	skb->protocol = htons(SRIO);
	spin_lock_irqsave(&srio_core.nl_tx_queue_lock, flags);
	skb_queue_tail(srio_core.nl_tx_queue, skb);
	spin_unlock_irqrestore(&srio_core.nl_tx_queue_lock, flags);
	tasklet_schedule(srio_core.nl_tx_tsklt);

	return;
}
int nl_sendtodriver(struct sk_buff *skb, struct nlmsghdr *nlh)
{
	if (skb)
	{
		srio_tx(skb, srio_core.dev);
		return 0;
	}
	return 0;
}

//cl add将type1格式转换成type2
void type1_to_type2_ul(void *src, void *dst)
{
	uint16_t current_offset = sizeof(ENBULPHYtoPHYADPType2);
	/***** UEULPHYADPtoPHYType1 -> ENBULPHYtoPHYADPType2 ************/
	UEULPHYADPtoPHYType1 *type1 = (UEULPHYADPtoPHYType1 *)src;
	ENBULPHYtoPHYADPType2 *type2 = (ENBULPHYtoPHYADPType2 *)dst;
	//GeneralHeaderUL -> GeneralHeader12
	//完全一致 不需要转换
	memcpy(type2, type1, sizeof(GeneralHeader12));

	//UE_UL_TYPE1_PUBLIC_C -> ENB_UL_TYPE2_PUBLIC_C
	UE_UL_TYPE1_PUBLIC_C *ue_publicc = (UE_UL_TYPE1_PUBLIC_C *)((void *)type1 + sizeof(GeneralHeaderUL));
	ENB_UL_TYPE2_PUBLIC_C *enb_publicc = (ENB_UL_TYPE2_PUBLIC_C *)((void *)type2 + sizeof(GeneralHeader12));
	uint32_t ue_prach_offset = ntohl(ue_publicc->PRACHOffset);

	printk("[type1_to_type2_ul] cl ue_prach_offset:%d\n", ue_prach_offset);

	//填写PRACH 以及PRACHOffset
	if (ue_prach_offset != 0)
	{
		uint16_t enb_prach_offset = current_offset;
		ENB_UL_TYPE2_PRACH *enb_prach = (ENB_UL_TYPE2_PRACH *)(dst + enb_prach_offset);
		UE_UL_TYPE1_PRACH *ue_prach = (UE_UL_TYPE1_PRACH *)(src + ue_prach_offset);
		enb_prach->ra_RNTI = ue_prach->ra_RNTI; //字段长度相同 不需要转换
		printk("[type1_to_type2_ul] cl ue_prach->ra_RNTI:%d\n", ntohs(ue_prach->ra_RNTI));
		printk("[type1_to_type2_ul] cl enb_prach->ra_RNTI:%d\n", ntohs(enb_prach->ra_RNTI));
		enb_prach->ra_PreambleIndex = ue_prach->ra_PreambleIndex;

		enb_publicc->PRACHOffset = htons(enb_prach_offset);

		current_offset += sizeof(ENB_UL_TYPE2_PRACH);
	}

	//填写PUCCH_C 以及PUCCH_C_Offset
	if (ntohl(ue_publicc->PUCCHDOffset) != 0)
	{
		enb_publicc->PUCCHCOffset = htons(current_offset);
		ENB_UL_TYPE2_PUCCH_C *enb_pucch_c = (ENB_UL_TYPE2_PUCCH_C *)(dst + current_offset);
		UE_UL_TYPE1_PUCCH_D *ue_pucch_d = (UE_UL_TYPE1_PUCCH_D *)(dst + ntohl(ue_publicc->PUCCHDOffset));
		enb_pucch_c->rnti = ue_pucch_d->RNTI;
		enb_pucch_c->UlChSel = ue_pucch_d->UlChSel;
		enb_pucch_c->SR = ue_pucch_d->SR;
		enb_pucch_c->cqi = ue_pucch_d->CQI;
		current_offset += sizeof(ENB_UL_TYPE2_PUCCH_C);
		printk("[type1_to_type2_ul] cl ue_publicc->PUCCHDOffset:%d\n", ntohl(ue_publicc->PUCCHDOffset));
		printk("[type1_to_type2_ul] cl enb_publicc->PUCCHCOffset:%d\n", ntohs(enb_publicc->PUCCHCOffset));
	}
	else
	{
		//固定填写pucchc
		enb_publicc->PUCCHCOffset = htons(current_offset);
		current_offset += sizeof(ENB_UL_TYPE2_PUCCH_C);
	}
	

	//填写PUSCH_C
	UE_UL_TYPE1_PUSCH_C *ue_pusch_c;
	if (ntohl(ue_publicc->PUSCHCOffset) != 0)
	{
		enb_publicc->PUSCHCOffset = htons(current_offset);
		ENB_UL_TYPE2_PUSCH_C *enb_pusch_c = (ENB_UL_TYPE2_PUSCH_C *)(dst + current_offset);
		ue_pusch_c = (UE_UL_TYPE1_PUSCH_C *)(src + ntohl(ue_publicc->PUSCHCOffset));
		enb_pusch_c->rnti = ue_pusch_c->RNTI;
		enb_pusch_c->UlChSel = ue_pusch_c->UlChSel;
		enb_pusch_c->NumofNP = ue_pusch_c->NumofNP;
		current_offset += sizeof(ENB_UL_TYPE2_PUSCH_C);
		printk("[type1_to_type2_ul] cl ue_publicc->PUSCHCOffset:%d\n", ntohl(ue_publicc->PUSCHCOffset));
		printk("[type1_to_type2_ul] cl enb_publicc->PUSCHCOffset:%d\n", ntohs(enb_publicc->PUSCHCOffset));
	}
	else
	{
		//固定填写puschc
		enb_publicc->PUSCHCOffset = htons(current_offset);
		current_offset += sizeof(ENB_UL_TYPE2_PUSCH_C);
	}
	

	//填写PUSCH_D
	if (ntohl(ue_publicc->PUSCHDOffset) != 0)
	{
		enb_publicc->PUSCHDOffset = htons(current_offset);
		ENB_UL_TYPE2_PUSCH_D *enb_pusch_d = (ENB_UL_TYPE2_PUSCH_D *)(dst + current_offset);
		UE_UL_TYPE1_PUSCH_D *ue_pusch_d = (UE_UL_TYPE1_PUSCH_D *)(src + ntohl(ue_publicc->PUSCHDOffset));
		enb_pusch_d->rach_flag = ue_pusch_d->RACH_FLAG;
		enb_pusch_d->rnti = ue_pusch_d->RNTI;
		enb_pusch_d->UlChSel = ue_pusch_d->UlChSel;
		uint32_t tbSize = ntohl(ue_pusch_c->tbSize);
		uint32_t datalen = tbSize / 8;
		enb_pusch_d->datalen = htonl(datalen);
		current_offset += sizeof(ENB_UL_TYPE2_PUSCH_D);
		printk("[type1_to_type2_ul] cl ue_publicc->PUSCHDOffset:%d\n", ntohl(ue_publicc->PUSCHDOffset));
		printk("[type1_to_type2_ul] cl enb_publicc->PUSCHDOffset:%d\n", ntohs(enb_publicc->PUSCHDOffset));

		//填写数据部分
		memcpy(dst + current_offset, (char *)ue_pusch_d + sizeof(UE_UL_TYPE1_PUSCH_D), datalen);

		enb_publicc->UserNum = htons(1);
	}
	else
	{
		enb_publicc->UserNum = htons(0);
	}
	
}

// send data to user by netlink
int nl_sendtouser(struct sk_buff *skb_from_mac)
{
	int len;
	int status;
	/***
	 * more字段说明：
	 * 	3: 未分段（包含nl头部，且不需等待后续分段） 
	 *  2：第一个分段（包含nl头部 需要继续等待） 
	 *  1：中间分段（不包含头部 且需要等待） 
	 *  0：最后一个分段（不包含头部，不需要等待，组装后发送到user）
	***/
	u_int8_t more;
	struct nlmsghdr *nlh_from_mac;
	struct sk_buff *nl_skb_to_user;
	struct nlmsghdr *nlh_to_user;
	if (skb_from_mac)
	{

		memcpy(&more, skb_from_mac->data, sizeof(more));
		skb_pull(skb_from_mac, sizeof(more));
		if (more == 3)
		{
			nlh_from_mac = (struct nlmsghdr *)skb_from_mac->data;
			len = nlh_from_mac->nlmsg_len;
			nl_skb_to_user = alloc_skb(NLMSG_SPACE(len), GFP_ATOMIC);
			nlh_to_user = (struct nlmsghdr *)nl_skb_to_user->data;
			skb_put(nl_skb_to_user, NLMSG_SPACE(len));

			//cl add type1totype2
			memset(type2, 0, len);
			type1_to_type2_ul(NLMSG_DATA(nlh_from_mac), type2);
			memcpy(NLMSG_DATA(nlh_to_user), type2, len);

			kfree_skb(skb_from_mac);
			nlh_to_user->nlmsg_len = NLMSG_SPACE(len);
			nlh_to_user->nlmsg_pid = 0;
			NETLINK_CB(nl_skb_to_user).portid = 0;
			if (nlh_to_user == NULL)
			{
				nlmsg_free(nl_skb_to_user);
				return -1;
			}
			status = netlink_unicast(nl_sk, nl_skb_to_user, NL_DEST_PID, MSG_DONTWAIT);
		}
		else if (more == 2)
		{
			memcpy(type1_buffer->buffer, skb_from_mac->data, PACKET_SIZE);
			type1_buffer->offset += PACKET_SIZE;
			type1_buffer->total_len = ((struct nlmsghdr*)skb_from_mac->data)->nlmsg_len;
		}
		else if (more == 1)
		{
			memcpy(type1_buffer->buffer + type1_buffer->offset, skb_from_mac->data, PACKET_SIZE);
			type1_buffer->offset += PACKET_SIZE;
		}
		else if (more == 0)
		{
			memcpy(type1_buffer->buffer + type1_buffer->offset, skb_from_mac->data, PACKET_SIZE);
			type1_buffer->offset += PACKET_SIZE;
			len = type1_buffer->offset;

			//发送
			nl_skb_to_user = alloc_skb(NLMSG_SPACE(MAX_PAYLOAD), GFP_ATOMIC);
			nlh_to_user = (struct nlmsghdr *)nl_skb_to_user->data;
			skb_put(nl_skb_to_user, NLMSG_SPACE(MAX_PAYLOAD));

			//cl add type1totype2
			memset(type2, 0, NLMSG_SPACE(MAX_PAYLOAD));
			type1_to_type2_ul(NLMSG_DATA(type1_buffer->buffer), type2);
			memcpy(NLMSG_DATA(nlh_to_user), type2, MAX_PAYLOAD);

			kfree_skb(skb_from_mac);
			nlh_to_user->nlmsg_len = NLMSG_SPACE(MAX_PAYLOAD);
			nlh_to_user->nlmsg_pid = 0;
			NETLINK_CB(nl_skb_to_user).portid = 0;
			status = netlink_unicast(nl_sk, nl_skb_to_user, NL_DEST_PID, MSG_DONTWAIT);
			memset(type1_buffer->buffer, 0, type1_buffer->offset);
			type1_buffer->offset = 0;
		}
	}
	return status;
}

//creat kernel netlink
int core_netlink_init(void)
{
	nl_sk = netlink_kernel_create(&init_net, NETLINK_TEST, &cfg);
	//可以改成使用skb管理缓存   减少拷贝工作
	type2 = (void *)kmalloc(NLMSG_SPACE(MAX_PAYLOAD), GFP_ATOMIC);
	type1_buffer = (struct type1_buffer_t *)kmalloc(sizeof(struct type1_buffer_t), GFP_ATOMIC);

	if (!nl_sk)
	{
		printk(KERN_ERR "net_link: Cannot create netlink socket.\n");
		return -1;
	}
	printk("net_link: create socket ok.\n");
	return 0;
}
//netlink release
void netlink_release(void)
{

	printk("[NETLINK] Releasing netlink socket\n");
	if (nl_sk)
	{
		netlink_kernel_release(nl_sk);
	}
	if (type2)
	{
		kfree(type2);
	}
	if(type1_buffer)
	{
		kfree(type1_buffer);
	}
}
module_init(srio_init_module);
module_exit(srio_release);
