/*
 *

背景信息:
linux内核版本: 4.4.19
bluez版本: 5.4
 
kernel 蓝牙协议栈关键队列说明:
蓝牙控制器链表
hci_dev_list(head) =>list(struct hci_dev) =>list(struct hci_dev)
通过hci_register_dev()将hci_dev->list加入进hci_dev_list

蓝域控制器已连接设备信息链表
struct hci_conn_hash conn_hash->list  =>list(struct hci_conn) =>list(struct hci_conn)
hci_conn_hash_add() 将连接信息结构体加入conn_hash->list链表里.
连接设备信息结构体struct hci_conn 包含host和对端蓝牙设备的地址等信息.

从蓝牙控制器接收到的蓝牙协议包队列
hdev->rx_q => bcsp->rx_skb => bcsp->rx_skb
hci_uart_register_dev()中初始化rx_q队列.
bcsp->rx_skb是已经剥去BCSP协议外壳的原始蓝牙协议包, skb_queue_tail()函数将skb加入rx_q队列.

希望发送给蓝牙控制器的蓝牙命令包队列
hdev->cmd_q => skb =>skb
hci_uart_register_dev()中初始化cmd_q队列.
hci_req_add()-> ... ->skb_queue_tail(). 通过hci_req_add命令, 将蓝牙命令包加入cmd_q队列,
等待BCSP uart协议驱动加上BCSP协议外壳, 写给蓝牙控制器.

总体测试流程:
APP:
insmod bluetooth.ko
insmod hci_uart.ko
hciattach /dev/bttty BCSP
hciconfig hci0 up
hcitool lecc <BLE设备地址> //已知BLE设备地址,所以跳过lescan扫描步骤

个步骤分步说明:

蓝牙协议驱动模块安装:
APP: insmod bluetooth.ko
kernel:
bt_init
	=>sock_register(&bt_sock_family_ops)
	=>hci_sock_init
		=>bt_sock_register(BTPROTO_HCI, &hci_sock_family_ops)

蓝牙控制器uart协议驱动安装:
APP: insmod hci_uart.ko
kernel:
hci_uart_init
	=>tty_register_ldisc(N_HCI, &hci_uart_ldisc)
	=>bcsp_init

执行流程:
APP 
hciattach /dev/bttty BCSP
	=>ioctl(tty, HCIUARTSETPROTO)
Kernel
hci_uart_tty_ioctl
	=>case HCIUARTSETPROTO:
		=>hci_uart_set_proto
			=>将uart数据的回调函数, 设置为BCSP,即通过BCSP协议封装蓝牙协议数据.
			=>hci_uart_register_dev
				=>hci_alloc_dev 创建新蓝牙控制器
					=>hdev = kzalloc(sizeof(*hdev), GFP_KERNEL);
					=>INIT_WORK(&hdev->rx_work, hci_rx_work);
						INIT_WORK(&hdev->cmd_work, hci_cmd_work);
						INIT_WORK(&hdev->tx_work, hci_tx_work);
						INIT_WORK(&hdev->power_on, hci_power_on);
						INIT_WORK(&hdev->error_reset, hci_error_reset);
						INIT_DELAYED_WORK(&hdev->power_off, hci_power_off); 控制器自动关机延时队列.
						skb_queue_head_init(&hdev->rx_q);
						skb_queue_head_init(&hdev->cmd_q);
						init_waitqueue_head(&hdev->req_wait_q);
				=>hci_register_dev(struct hci_dev *hdev)
					=>hdev->workqueue = alloc_workqueue("%s", WQ_HIGHPRI | WQ_UNBOUND | WQ_MEM_RECLAIM, 1, hdev->name);
					=>list_add(&hdev->list, &hci_dev_list); 将新建的uart接口的蓝牙控制器加入hci_dev链表.
				=>INIT_WORK(&hdev->power_on, hci_power_on);
			=>hci_sock_dev_event(hdev, HCI_DEV_REG);
				=>向AP发送sock event<HCI_DEV_REG>
			=>queue_work(hdev->req_workqueue, &hdev->power_on);
				=>将会运行hdev->power_on()函数,即hci_power_on().
				=>hci_power_on
					=>hci_dev_do_open
						=>hdev->open(hdev)=>hci_uart_open
							=> 是个空函数...
						=>设置hdev->flags:HCI_RUNNING| HCI_INIT
						=>hdev->setup(hdev)=>hci_uart_setup
							=>如果设置了tty->disc_data中的波特率成员, 这里会使用该
							波特率去配置uart
						=>__hci_init
							=>hci_init1_req
								=>检查hdev->dev_type类型为HCI_BREDR, 则 (另一种蓝牙控制器类型是HCI_AMP)
									=>bredr_init, 与蓝牙控制器进行交互,获取蓝牙控制器信息
										=>Read Local Supported Features,通过HCI_OP_READ_LOCAL_FEATURES命令
											=>hci_req_add()将蓝牙命令请求加入发送队列里
											=>发送完成后, 接收到蓝牙控制器返回的complete_event
											  由hci_cmd_complete_evt()去异步处理这些event
												=>hci_cc_read_local_features(), 对应event处理函数.
										=>Read Local Version,通过HCI_OP_READ_LOCAL_VERSION命令
											=>hci_cc_read_local_version()
												=> 获取蓝牙控制器版本, 厂商信息等.
												=>每种命令的返回内容,可以查看源码或蓝牙协议文档.
										=>Read BD Address,通过HCI_OP_READ_BD_ADDR命令
											=>hci_cc_read_bd_addr
												=>获取蓝牙控制器地址.
										(与蓝牙控制器的通信是异步通信, cpu发送命令给控制器后, 控制器会
										返回compltete_event, 由hci_cmd_complete_evt()去处理不同的_event.)
							=>hci_init2_req
								=>判断蓝牙控制器类型为BREDR,则
									=>bredr_setup
										=>Read Buffer Size (ACL mtu, max pkt, etc.), 通过命令
											HCI_OP_READ_BUFFER_SIZE
											=>hci_cc_read_buffer_size
										=>Read Class of Device, by cmd HCI_OP_READ_CLASS_OF_DEV
											=>hci_cc_read_class_of_dev
										=>Read Local Name, by cmd HCI_OP_READ_LOCAL_NAME
											=>hci_cc_read_local_name
										=>Read Voice Setting, by cmd HCI_OP_READ_VOICE_SETTING
											=>hci_cc_read_voice_setting
										=>Read Number of Supported IAC, by cmd HCI_OP_READ_NUM_SUPPORTED_IAC
											=>hci_cc_read_num_supported_iac
										=>Read Current IAC LAP, by cmd HCI_OP_READ_CURRENT_IAC_LAP
										=>Clear Event Filters, HCI_OP_SET_EVENT_FLT
										=>Connection accept timeout ~20 secs,HCI_OP_WRITE_CA_TIMEOUT
								=>判断蓝牙控制器支持BLE, 通过之前发送HCI_OP_READ_LOCAL_FEATURES, 获取
								  控制器的feature得知. 这里使用的控制器支持BLE
									=>le_setup
										=>Read LE Buffer Size, HCI_OP_LE_READ_BUFFER_SIZE
											=>hci_cc_le_read_buffer_size
										=>Read LE Local Supported Features, HCI_OP_LE_READ_LOCAL_FEATURES
											=>hci_cc_le_read_local_features
										=>Read LE Supported States, HCI_OP_LE_READ_SUPPORTED_STATES
											=>hci_cc_le_read_supported_states
										=>Read Local Supported Commands command,HCI_OP_READ_LOCAL_COMMANDS
											=>hci_cc_read_local_commands
							=>hci_init3_req
								=>HCI_OP_READ_STORED_LINK_KEY
									=>hci_cc_read_stored_link_key
								=>hci_setup_link_policy
									=>HCI_OP_WRITE_DEF_LINK_POLICY
										=>hci_cc_write_def_link_policy
								=>HCI_OP_READ_PAGE_SCAN_ACTIVITY
									=>hci_cc_read_page_scan_activity
								=>HCI_OP_READ_PAGE_SCAN_TYPE
									=>hci_cc_read_page_scan_type
								=>判断支持BLE, 则
									=>Read LE Advertising Channel TX Power,HCI_OP_LE_READ_ADV_TX_POWER
										=>hci_cc_le_read_adv_tx_power
									=>Read LE White List Size,HCI_OP_LE_READ_WHITE_LIST_SIZE
										=>hci_cc_le_read_white_list_size
									=>Clear LE White List,HCI_OP_LE_CLEAR_WHITE_LIST
										=>hci_cc_le_clear_white_list
							=>hci_init4_req
								=>HCI_OP_DELETE_STORED_LINK_KEY
									=>hci_cc_delete_stored_link_key
						=>hdev->post_init(hdev) 当前没有定义, 空函数指针.
						=>hdev->flags, 清除HCI_INIT, 设置HCI_UP
					=>将hdev->power_off函数加入延时工作队列, 一定时间未执行hciconfig hci0 up, 就关闭控制器.
						=>hci_power_off
								
hciattach 命令执行后, 许久未执行hciconfig hci0 up, 就会执行延时工作队列内的hdev->power_off函数
hci_power_off
	=> 主要是关闭各种命令发送队列,等待event队列..
	=> 关闭蓝牙扫描, 取消discover

APP 执行
hciconfig hci0 up
1. main()
	=>socket(AF_BLUETOOTH, SOCK_RAW, BTPROTO_HCI)
		与AF_BLUETOOTH建立socket连接.
	=>根据参数 "hci0", 得知需要操作的蓝牙设备id为0.
		这里id的判断是直接读取"hci0"[3]("hci0"字符串的第4个字节,即"0"),因此, "hci0"参数替换为"DNF0", 也不影响.
		=>ioctl(ctl, HCIGETDEVINFO, (void *) &di), 这di的值就是0, 0号蓝牙控制器
			获取蓝牙控制器设备信息.
	=>从命令列表里面,找到与参数"up" 对应的命令=>cmd_up()
		=>ioctl(ctl, HCIDEVUP, hdev), 拉起对应的蓝牙控制器.

对应的KERNEL 执行代码:
APP: socket(AF_BLUETOOTH, SOCK_RAW, BTPROTO_HCI)
=>bt_sock_create
	=>bt_proto[proto]->create
		=>sk_alloc(net, PF_BLUETOOTH, GFP_ATOMIC, &hci_sk_proto, kern)

APP: ioctl(ctl, HCIGETDEVINFO, (void *) &di)
APP: ioctl(ctl, HCIDEVUP, hdev)
=>hci_sock_ioctl
	=>case HCIGETDEVINFO
		=>hci_get_dev_info
			=>hci_dev_get
			=> 将蓝牙设备信息copy to user.
	=>case HCIDEVUP
		=>hci_dev_open
			=>遍历hci_dev_list链表,找到id为0的蓝牙控制器.
			=>清除hdev->flags:HCI_AUTO_OFF, 清除 AUTO_OFF 标志, 否则HCI_UP标志无效,AP无法使用该HCI
			=>设置hdev->flags: HCI_BONDABLE
			=>hci_dev_do_open, 之前在执行hci_power_on()时,也会执行该函数.
				实践后证实, 将hci_power_on函数的调用取消掉, 蓝牙控制器依旧可以正常初始化.

APP执行BLE设备连接
hcitool lecc <BLE设备地址>
=>main->cmd_lecc
	=>hci_get_route
		=>hci_for_each_dev(HCI_UP, __other_bdaddr,(long) (bdaddr ? bdaddr : BDADDR_ANY));
			这里在调用hcitool命令时没有指定蓝牙控制器id, 则默认返回第一个带HCI_UP flags 的蓝牙控制器.
			=>ioctl(sk, HCIGETDEVLIST, (void *) dl)
				在遍历之前, 会获取蓝牙控制器列表.
	=>socket(AF_BLUETOOTH, SOCK_RAW | SOCK_CLOEXEC, BTPROTO_HCI), bind()
		创建socket连接, 与对应id的蓝牙控制器建立连接
	=>hci_le_create_conn
		=>配置BLE连接参数, 目标BLE设备的地址
		=>hci_send_req(OCF_LE_CREATE_CONN) 发送BLE连接命令, 通过socket.
			=>这里发送的OCF_LE_CREATE_CONN命令就是BLE连接命令, 值为0x000D, 是蓝牙规范里的值, 而发送的参数结构体
			也符合蓝牙规范. 即不存在这里发送的cmd到达kernel 蓝牙协议层后需要转换为蓝牙规范的cmd的过程, 因此
			内核协议层中的封装HCI_OP_LE_CREATE_CONN的BLE连接命令的BLE连接函数并没有被调用.

对应kernel 的执行代码:
APP: cmd_lecc->hci_get_route->hci_for_each_dev->ioctl(sk, HCIGETDEVLIST, (void *) dl)
kernel: 
hci_sock_ioctl
	=>case HCIGETDEVLIST
		=>hci_get_dev_list
			=>遍历hci_dev_list链表, 将链表里所以得蓝牙控制器返回给user.
				若是蓝牙控制器hdev->flags:HCI_AUTO_OFF标志位存在,则将HCI_UP标志位取消.
				因为该控制器会自动关闭, 因此UP标志位显得无意义.
				注释原文:
				 When the auto-off is configured it means the transport
				 is running, but in that case still indicate that the
				 device is actually down.
				 
APP: cmd_lecc->hci_le_create_conn
根据蓝牙协议手册Core6.1.pdf 的 7.8.12 LE Create Connection command章节,
BLE连接命令发送给控制器后,控制器会发送HCI_Command_Status event 给mcu.
当BLE设备连接成功后, 控制器会发送HCI_LE_Connection_Complete or HCI_LE_Enhanced_Connection_Complete event
给mcu.
原文:
Event(s) generated (unless masked away):
	When the Controller receives the HCI_LE_Create_Connection command, the Controller
	sends the HCI_Command_Status event to the Host. An HCI_LE_Connection_Complete
	or HCI_LE_Enhanced_Connection_Complete event shall be generated when a
	connection is created because of this command or the connection creation procedure
	is cancelled; until one of these events is generated, the command is considered
	pending. If a connection is created and the Controller supports the LE Channel
	Selection Algorithm #2 feature, this event shall be immediately followed by an
	HCI_LE_Channel_Selection_Algorithm event.

mcu接收到的HCI_Command_Status event, 由hci_cmd_status_evt函数处理.
	status event 的说明在蓝牙协议规范Core6.1.pdf 的 7.7.15 Command Status event章节.
mcu接收到的HCI_LE_Connection_Complete 或 HCI_LE_Enhanced_Connection_Complete event 是LE_META event,
由hci_le_meta_evt函数处理.

hci_cmd_status_evt
	=>通过event->opcode判断event类型
	=>case HCI_OP_LE_CREATE_CONN
		=>hci_cs_le_create_conn
			=>hci_conn_hash_lookup_le
				=>遍历hdev->conn_hash->list链表, 寻找相同地址相同的BLE设备, 表明BLE设备已经连接过了.
				hdev表示控制器, conn_hash表示该控制器下连接了的BLE设备.
				这里还没有连接对应的BLE设备, 返回了NULL.

BLE设备连接完成后, 蓝牙控制器返回该event.
hci_le_meta_evt
	=>根据le_ev->subevent, 判断LE_META event的类型
		=>case HCI_EV_LE_CONN_COMPLETE
			=>hci_le_conn_complete_evt
				=>hci_dev_clear_flag(hdev, HCI_LE_ADV) 清除蓝牙控制器BLE广播标志位.
				=>hci_lookup_le_connect, 确认目标BLE设备是否之前就连接上了?
				=>hci_conn_add, BLE设备是新连接的, 在蓝牙系统里添加该信连接的信息.
					=>hci_conn_hash_add, 将BLE设备添加进hdev->conn_hash->list队列内.
					=>hdev->notify(hdev, HCI_NOTIFY_CONN_ADD), 通知APPceng, 有新设备add了.
				=>hci_bdaddr_list_lookup, 黑名单查找, 若对应了当前的BLE设备, 则将该设备断开连接.
					=>hci_conn_drop(conn)
				=>hci_pend_le_action_lookup, 查找hdev->pend_le_conns BLE等待连接队列,
					如果当前设备还有等待连接的请求在队列里的话, 就将这些请求取消, 毕竟设备已经连接上了.

mcu接收uart 蓝牙控制器event流程:
初始化:
insmod hci_uart.ko
hci_uart_init
	=>tty_register_ldisc(N_HCI, &hci_uart_ldisc)
		=>hci_uart_ldisc.receive_buf	= hci_uart_tty_receive; 注册接收uart数据的回调函数
	=>bcsp_init
		=>hci_uart_register_proto(&bcsp)
			=>hup[p->id] = &bcsp
				=>static const struct hci_uart_proto bcsp = {
					.recv		= bcsp_recv,  BCSP协议的蓝牙协议包的接收解析回调函数注册.
				}

hciattach /dev/bttty BCSP
hci_uart_tty_ioctl
	=>case HCIUARTSETPROTO:
		=>hci_uart_set_proto
			=>将uart数据的回调函数, 设置为BCSP,即通过BCSP协议封装蓝牙协议数据.
			=>hci_uart_register_dev
				=>hdev = kzalloc(sizeof(*hdev), GFP_KERNEL);
				=>INIT_WORK(&hdev->cmd_work, hci_cmd_work);
				=>INIT_WORK(&hdev->rx_work, hci_rx_work);
				=>skb_queue_head_init(&hdev->rx_q); 初始化蓝牙控制器数据包接收队列.
				=>skb_queue_head_init(&hdev->cmd_q); 蓝牙控制器 蓝牙命令发送队列.
				=>init_waitqueue_head(&hdev->req_wait_q);
			=>hci_register_dev(struct hci_dev *hdev)
				=>hdev->workqueue = alloc_workqueue("%s", WQ_HIGHPRI | WQ_UNBOUND | WQ_MEM_RECLAIM, 1, hdev->name);

接收uart蓝牙控制器event:
hci_uart_tty_receive
	=>hu->proto->recv(hu, data, count) => bcsp_recv
		=>bcsp_complete_rx_pkt(hu)
			=>将BCSP格式的event脱壳为原始的蓝牙协议数据包, 存放筋bcsp->rx_skb. 我想是这样的..
			=>hci_recv_frame(hu->hdev, bcsp->rx_skb)
				=>__net_timestamp(skb);
				=>skb_queue_tail(&hdev->rx_q, skb); 将bcsp->rx_skb原始蓝牙协议数据包, 加入rx_q队列.
				=>queue_work(hdev->workqueue, &hdev->rx_work) => hci_rx_work 将rx_work加入workqueue, work要运作起来了.
					=>while ((skb = skb_dequeue(&hdev->rx_q))) 遍历所有的蓝牙协议数据包.
						switch (bt_cb(skb)->pkt_type) {
						case HCI_EVENT_PKT:{
							.....
							case HCI_EV_CMD_COMPLETE: 获取蓝牙控制器的信息等交互, 都是返回这种包.
								hci_cmd_complete_evt(hdev, skb, &opcode, &status,
											 &req_complete, &req_complete_skb);
							case HCI_EV_CMD_STATUS: 耗时较长的命令, 蓝牙控制器会先返回一个status包, 等处理完成后再返回complete包
													例如BLE设备连接命令.
								hci_cmd_status_evt(hdev, skb, &opcode, &status, &req_complete,
										   &req_complete_skb);
							case HCI_EV_LE_META:  BLE设备连接成功后, 蓝牙控制器会返回LE_META包.
								hci_le_meta_evt(hdev, skb);
							.....
						}
						case HCI_ACLDATA_PKT:
						case HCI_SCODATA_PKT:

向uart蓝牙控制器发送命令流程:
req 的初始化, req只是一个临时变量, 非hdev的成员.
struct hci_request req;
skb_queue_head_init(&req->cmd_q);

wait等待队列的初始化:
DECLARE_WAITQUEUE(wait, current);
add_wait_queue(&hdev->req_wait_q, &wait);

蓝牙命令发送请求函数:
hci_req_add
	=>hci_req_add_ev(req, opcode, plen, param, 0)
		=>skb = hci_prepare_cmd(hdev, opcode, plen, param)
			=>skb = bt_skb_alloc(len, GFP_ATOMIC)
			=>hdr->opcode = cpu_to_le16(opcode)  //填充skb, 设置要发送的蓝牙命令
		=>skb_queue_tail(&req->cmd_q, skb);

hci_req_run_skb(&req, hci_req_sync_complete);
	=>req_run(req, NULL, complete)
		=>skb = skb_peek_tail(&req->cmd_q) 取出队列的最后一个skb
		=>bt_cb(skb)->hci.req_complete_skb = complete_skb --->hci_req_sync_complete
				设置最后一个skb完成后的回调函数为 hci_req_sync_complete()
				当skb完成后, hci_req_sync_complete 会唤醒req_wait_q, 但似乎没有地方在执行wait_event_xxx()
				函数阻塞等待?
		=>skb_queue_splice_tail(&req->cmd_q, &hdev->cmd_q) 将hdev->cmd_q作为req->cmd_q的队列头,
			这样, 就可以通过hdev->cmd_q遍历整个req->cmd_q
		=>queue_work(hdev->workqueue, &hdev->cmd_work) -->  hci_cmd_work  将cmd_work加入延时工作队列
			=>if (atomic_read(&hdev->cmd_cnt))  判断有待处理的蓝牙命令要发送
			=>skb = skb_dequeue(&hdev->cmd_q) 从cmd_q队列里取出skb, skb内就包含则要发送的蓝牙命令
			//=>hdev->sent_cmd = skb_clone(skb, GFP_KERNEL) 将skb的内容复制到sent_cmd.
			=>hci_send_frame(hdev, skb)
				=>__net_timestamp(skb) 加时间戳
				=>hci_send_to_monitor(hdev, skb) 我们在应用层, 可以使用btmon监听蓝牙命令的收发, 是多亏这一个步骤?
				=>hdev->send(hdev, skb) --> hci_uart_send_frame()
					=>hu->proto->enqueue(hu, skb) --> bcsp_enqueue
						=>switch (bt_cb(skb)->pkt_type) 区分skb的类型, 归属不同的发送队列
							case HCI_ACLDATA_PKT:
							case HCI_COMMAND_PKT:
								skb_queue_tail(&bcsp->rel, skb)
								break;
							case HCI_SCODATA_PKT:
								skb_queue_tail(&bcsp->unrel, skb)
								break;
					=>hci_uart_tx_wakeup(hu);
						=>schedule_work(&hu->write_work) ---> hci_uart_write_work ()
							=>while 不断地从bcsp->rel和bcsp->unrel队列中, 取出等待处理的skb.
									=>skb = hci_uart_dequeue(hu)
										=>skb = hu->proto->dequeue(hu) ---> bcsp_dequeue 
											=>skb = skb_dequeue(&bcsp->unrel) 原代码注释: unrel下的是sco语言数据包, 处理优先级高
												=>nskb = bcsp_prepare_pkt(bcsp,skb->data, skb->len, bt_cb(skb)->pkt_type)
													将原始skb包,封装一层BCSP协议壳, 返回封装后的skb包(nskb)
											=>skb = skb_dequeue(&bcsp->rel);
												=>nskb = bcsp_prepare_pkt(bcsp, skb->data, skb->len,bt_cb(skb)->pkt_type);
								=>tty->ops->write(tty, skb->data, skb->len) 将带BCSP协议壳的skb通过串口发送给蓝牙控制器.
									=>在串口 写操作完成后, 会导致tty_wakeup被调用
									=>hci_uart_tty_wakeup
										=>hci_uart_tx_complete
											=> 对每种完成的蓝牙协议包类型进行计数.
												switch (pkt_type)
												case HCI_COMMAND_PKT:
													hdev->stat.cmd_tx++;
													break;

												case HCI_ACLDATA_PKT:
													hdev->stat.acl_tx++;
													break;

												case HCI_SCODATA_PKT:
													hdev->stat.sco_tx++;
													break;

*/
		

static const struct net_proto_family hci_sock_family_ops = {
	.family	= PF_BLUETOOTH,
	.owner	= THIS_MODULE,
	.create	= hci_sock_create,
};

static const struct net_proto_family *bt_proto[BT_MAX_PROTO];
	=>[BTPROTO_HCI] = hci_sock_family_ops;<tag1>
	=>[BTPROTO_L2CAP] = l2cap_sock_family_ops;<tag2>

static struct hci_cb l2cap_cb = {
	.name		= "L2CAP",
	.connect_cfm	= l2cap_connect_cfm,
	.disconn_cfm	= l2cap_disconn_cfm,
	.security_cfm	= l2cap_security_cfm,
};

LIST_HEAD(hci_cb_list);
	=> l2cap_cb->list <tag3>

static struct hci_mgmt_chan chan = {
	.channel	= HCI_CHANNEL_CONTROL,
	.handler_count	= ARRAY_SIZE(mgmt_handlers),
	.handlers	= mgmt_handlers,
	.hdev_init	= mgmt_init_hdev,
};

static LIST_HEAD(mgmt_chan_list);
	=> chan.list <tag4>

static const struct hci_uart_proto bcsp = {
	.id		= HCI_UART_BCSP,
	.name		= "BCSP",
	.open		= bcsp_open,
	.close		= bcsp_close,
	.enqueue	= bcsp_enqueue,
	.dequeue	= bcsp_dequeue,
	.recv		= bcsp_recv,
	.flush		= bcsp_flush
};

static const struct hci_uart_proto *hup[HCI_UART_MAX_PROTO];
	=> [bcsp.id] = bcsp <tag5> <obj6>

struct tty_struct *tty <tag6> <obj2>
	->disc_data = <obj1> <tag7>

struct hci_uart *hu; <obj1> <tag0>
	->tty = <obj2> <tag8>
		->dev <obj8>
	->init_ready = hci_uart_init_work <tag9>
	->write_work = hci_uart_write_work
	->priv = <obj3> <tag10>
	->const struct hci_uart_proto *proto = <obj6> <tag13>
		->unsigned int init_speed; /* 可以替代应用层的串口设置, 但实际没有设置value */
		->setup() /* 可以替代应用层的setup(), 但实际没有设置 */
	->struct hci_dev		*hdev = <obj5> <tag17>
	->unsigned long		flags = HCI_UART_REGISTERED <tag27>
	->unsigned int init_speed;/* 可以替代应用层的初始时串口设置, 但实际没有设置value */
	->unsigned int oper_speed;/* 可以替代应用层的运行时串口设置, 但实际没有设置value */

struct bcsp_struct *bcsp; <obj3>
	->struct	timer_list tbcsp;
		->function = bcsp_timed_event <tag11>
		->unsigned long		data = <obj1> 
		->rx_state = BCSP_W4_PKT_DELIMITER <tag12>

LIST_HEAD(hci_dev_list); <list1>
	<obj5>->list <tag25>

struct hci_dev *hdev; <tag14> <obj5>
	-> name = "hci0" <tag22>
	->id  = 0<tag22>
	->__u8		dev_type = HCI_BREDR <tag21>
	->__u8		bus = HCI_UART <tag18>
	->struct device		dev;
		->driver_data =  <obj1> <tag19>
		->parent = <obj8> <tag20>
	->struct discovery_state	discovery;
		->state = DISCOVERY_STOPPED <tag15>
	->dev_flags = HCI_SETUP|HCI_AUTO_OFF <tag23> HCI_BREDR_ENABLED<tag24> HCI_RPA_EXPIRED <tag33> (clear HCI_SETUP)<tag34>
							(clear HCI_AUTO_OFF) <tag68>
	->unsigned long	flags=HCI_RUNNING <tag29> | HCI_INIT <tag30> (clear HCI_INIT) <tag32> | HCI_UP <tag33>
	->__u16		pkt_type <tag35>
	->__u16		esco_type; <tag36>
	->__u8		hci_ver; <tag37>
	->hci_rev <tag38>
	->lmp_ver <tag39>
	->manufacturer <tag40>
	->lmp_subver <tag41>
	->bdaddr <tag42>
	->setup_addr <tag43>
	->unsigned int	acl_mtu; <tag44>
	->unsigned int	sco_mtu;<tag44>
	->acl_pkts <tag44>
	->sco_pkts <tag44>
	->acl_cnt <tag44>
	->sco_cnt <tag44>
	->__u8		dev_class[3]; <tag45>
	->__u8		dev_name[HCI_MAX_NAME_LENGTH]; <tag46>
	->voice_setting <tag47>
	->num_iac <tag48>
	->le_mtu <tag49> /* GM: 最大MTU */
	->le_pkts <tag49>
	->le_cnt <tag49>
	->le_features <tag50>
	->le_states <tag51>
	->commands <tag52>
 	->max_page =0x1 <tag53>
	->inq_tx_power <tag54>
	->max_page <tag55>
	->features <tag56>
	->stored_max_keys <tag57>
	->stored_num_keys <tag57>
	->link_policy <tag58>
	->page_scan_interval <tag59>
	->page_scan_window <tag59>
	->page_scan_type <tag60>
	->adv_tx_power <tag61>
	->le_white_list_size <tag62>
	->struct list_head list; <tag25 add to list1>
	->struct hci_conn_hash	conn_hash;
		->struct list_head list; <list2>

/* conn表明連接信息, 包含連接雙方的設備地址. */
struct hci_conn *conn; <tag63>
	->atomic_t	refcnt  0x0 <tag64>
	->list <tag65 add to list2>

bluetooth.ko insmod
static int __init bt_init(void)
{
	pr_err("joker %s:in %d.\n",__func__,__LINE__);
	int err;

	sock_skb_cb_check_size(sizeof(struct bt_skb_cb));

	BT_INFO("Core ver %s", VERSION);

	err = bt_selftest();
	if (err < 0)
		return err;

	bt_debugfs = debugfs_create_dir("bluetooth", NULL);

	err = bt_sysfs_init();
	if (err < 0)
		return err;
	int __init bt_sysfs_init(void)
	{
		pr_err("joker %s:in %d.\n",__func__,__LINE__);
		bt_class = class_create(THIS_MODULE, "bluetooth");

		return PTR_ERR_OR_ZERO(bt_class);
	}

	err = sock_register(&bt_sock_family_ops);
	if (err < 0) {
		bt_sysfs_cleanup();
		return err;
	}
	int sock_register(const struct net_proto_family *ops)
	{
		int err;

		if (ops->family >= NPROTO) {
			pr_crit("protocol %d >= NPROTO(%d)\n", ops->family, NPROTO);
			return -ENOBUFS;
		}

		spin_lock(&net_family_lock);
		if (rcu_dereference_protected(net_families[ops->family],
						  lockdep_is_held(&net_family_lock)))
			err = -EEXIST;
		else {
			rcu_assign_pointer(net_families[ops->family], ops);
			err = 0;
		}
		spin_unlock(&net_family_lock);

		/* NET: Registered protocol family 31 */
		pr_info("NET: Registered protocol family %d\n", ops->family);
		return err;
	}

	BT_INFO("HCI device and connection manager initialized");

	err = hci_sock_init();
	if (err < 0)
		goto error;
	int __init hci_sock_init(void)
	{
		pr_err("joker %s:in %d.\n",__func__,__LINE__);
		int err;

		BUILD_BUG_ON(sizeof(struct sockaddr_hci) > sizeof(struct sockaddr));

		err = proto_register(&hci_sk_proto, 0);
		if (err < 0)
			return err;

		err = bt_sock_register(BTPROTO_HCI, &hci_sock_family_ops);<tag1>
		int bt_sock_register(int proto, const struct net_proto_family *ops)
		{
			pr_err("joker %s:in %d.\n",__func__,__LINE__);
			int err = 0;

			if (proto < 0 || proto >= BT_MAX_PROTO)
				return -EINVAL;

			write_lock(&bt_proto_lock);

			if (bt_proto[proto])
				err = -EEXIST;
			else
				bt_proto[proto] = ops;

			write_unlock(&bt_proto_lock);

			return err;
		}

		err = bt_procfs_init(&init_net, "hci", &hci_sk_list, NULL);
		int bt_procfs_init(struct net *net, const char *name,
				   struct bt_sock_list* sk_list,
				   int (* seq_show)(struct seq_file *, void *))
		{
			pr_err("joker %s:in %d.\n",__func__,__LINE__);
			sk_list->custom_seq_show = seq_show;

			if (!proc_create_data(name, 0, net->proc_net, &bt_fops, sk_list))
				return -ENOMEM;
			return 0;
		}

		BT_INFO("HCI socket layer initialized");

		return 0;

	error:
		proto_unregister(&hci_sk_proto);
		return err;
	}

	err = l2cap_init();
	int __init l2cap_init(void)
	{
		pr_err("joker %s:in %d.\n",__func__,__LINE__);
		int err;

		err = l2cap_init_sockets();
		int __init l2cap_init_sockets(void)
		{
			pr_err("joker %s:in %d.\n",__func__,__LINE__);
			int err;

			BUILD_BUG_ON(sizeof(struct sockaddr_l2) > sizeof(struct sockaddr));

			err = proto_register(&l2cap_proto, 0);
			if (err < 0)
				return err;

			err = bt_sock_register(BTPROTO_L2CAP, &l2cap_sock_family_ops); <tag2>
			if (err < 0) {
				BT_ERR("L2CAP socket registration failed");
				goto error;
			}

			err = bt_procfs_init(&init_net, "l2cap", &l2cap_sk_list,
						 NULL);
			if (err < 0) {
				BT_ERR("Failed to create L2CAP proc file");
				bt_sock_unregister(BTPROTO_L2CAP);
				goto error;
			}

			BT_INFO("L2CAP socket layer initialized");

			return 0;

		error:
			proto_unregister(&l2cap_proto);
			return err;
		}

		hci_register_cb(&l2cap_cb);
		int hci_register_cb(struct hci_cb *cb)
		{
			pr_err("joker %s:in %d.\n",__func__,__LINE__);

			/* bf050138 name L2CAP */
			BT_DBG("%p name %s", cb, cb->name);

			mutex_lock(&hci_cb_list_lock);
			list_add_tail(&cb->list, &hci_cb_list); <tag3>
			mutex_unlock(&hci_cb_list_lock);

			return 0;
		}

		if (IS_ERR_OR_NULL(bt_debugfs))
			return 0;

		l2cap_debugfs = debugfs_create_file("l2cap", 0444, bt_debugfs,
							NULL, &l2cap_debugfs_fops);

		debugfs_create_u16("l2cap_le_max_credits", 0644, bt_debugfs,
				   &le_max_credits);
		debugfs_create_u16("l2cap_le_default_mps", 0644, bt_debugfs,
				   &le_default_mps);

		return 0;
	}

	err = sco_init();
	static inline int sco_init(void) /* 只有使能经典蓝牙,才有sco */
	{
		return 0;
	}

	err = mgmt_init();
	int mgmt_init(void)
	{
		pr_err("joker %s:in %d.\n",__func__,__LINE__);
		return hci_mgmt_chan_register(&chan);
		int hci_mgmt_chan_register(struct hci_mgmt_chan *c)
		{
			pr_err("joker %s:in %d.\n",__func__,__LINE__);
			if (c->channel < HCI_CHANNEL_CONTROL)
				return -EINVAL;

			mutex_lock(&mgmt_chan_list_lock);
			if (__hci_mgmt_chan_find(c->channel)) {
				mutex_unlock(&mgmt_chan_list_lock);
				return -EALREADY;
			}
			static struct hci_mgmt_chan *__hci_mgmt_chan_find(unsigned short channel)
			{
				pr_err("joker %s:in %d.\n",__func__,__LINE__);
				struct hci_mgmt_chan *c;

				list_for_each_entry(c, &mgmt_chan_list, list) {
					if (c->channel == channel)
						return c;
				}

				return NULL;
			}

			list_add_tail(&c->list, &mgmt_chan_list); <tag4>

			mutex_unlock(&mgmt_chan_list_lock);

			return 0;
		}
	}

	return 0;

sock_err:
	hci_sock_cleanup();

error:
	sock_unregister(PF_BLUETOOTH);
	bt_sysfs_cleanup();

	return err;
}

hci_uart.ko insmod
static int __init hci_uart_init(void)
{
	pr_err("joker %s:in %d.\n",__func__,__LINE__);
	static struct tty_ldisc_ops hci_uart_ldisc; /* GM: 一个物理通道,只能有一个实例,然后让这些数据走不同的数据协议处理, 发送出去. */
	int err;

	/* HCI UART driver ver 2.3 */
	BT_INFO("HCI UART driver ver %s", VERSION);

	/* Register the tty discipline */

	memset(&hci_uart_ldisc, 0, sizeof(hci_uart_ldisc));
	hci_uart_ldisc.magic		= TTY_LDISC_MAGIC;
	hci_uart_ldisc.name		= "n_hci";
	hci_uart_ldisc.open		= hci_uart_tty_open;
	hci_uart_ldisc.close		= hci_uart_tty_close;
	hci_uart_ldisc.read		= hci_uart_tty_read;
	hci_uart_ldisc.write		= hci_uart_tty_write;
	hci_uart_ldisc.ioctl		= hci_uart_tty_ioctl;
	hci_uart_ldisc.poll		= hci_uart_tty_poll;
	hci_uart_ldisc.receive_buf	= hci_uart_tty_receive;
	hci_uart_ldisc.write_wakeup	= hci_uart_tty_wakeup;
	hci_uart_ldisc.owner		= THIS_MODULE;

	err = tty_register_ldisc(N_HCI, &hci_uart_ldisc);
	if (err) {
		BT_ERR("HCI line discipline registration failed. (%d)", err);
		return err;
	}

	bcsp_init();
	int __init bcsp_init(void)
	{
		pr_err("joker %s:in %d.\n",__func__,__LINE__);
		return hci_uart_register_proto(&bcsp); <tag5>
		int hci_uart_register_proto(const struct hci_uart_proto *p)
		{
			pr_err("joker %s:in %d.\n",__func__,__LINE__);
			if (p->id >= HCI_UART_MAX_PROTO)
				return -EINVAL;

			if (hup[p->id])
				return -EEXIST;

			hup[p->id] = p;

			/* HCI UART protocol BCSP registered */
			BT_INFO("HCI UART protocol %s registered", p->name);

			return 0;
		}
	}

	return 0;
}


hciattach /dev/bttty BCSP
static int hci_uart_tty_open(struct tty_struct *tty) <tag6>
{
	pr_err("joker %s:in %d.\n",__func__,__LINE__);
	struct hci_uart *hu;

	/* tty c5de3c00 */
	BT_DBG("tty %p", tty);

	/* Error if the tty has no write op instead of leaving an exploitable
	   hole */
	if (tty->ops->write == NULL)
		return -EOPNOTSUPP;

	hu = kzalloc(sizeof(struct hci_uart), GFP_KERNEL); <tag0>
	if (!hu) {
		BT_ERR("Can't allocate control structure");
		return -ENFILE;
	}

	tty->disc_data = hu; <tag7>
	hu->tty = tty; <tag8>
	tty->receive_room = 65536;

	INIT_WORK(&hu->init_ready, hci_uart_init_work); <tag9>
	INIT_WORK(&hu->write_work, hci_uart_write_work);

	/* Flush any pending characters in the driver and line discipline. */

	/* FIXME: why is this needed. Note don't use ldisc_ref here as the
	   open path is before the ldisc is referencable */

	if (tty->ldisc->ops->flush_buffer)
		tty->ldisc->ops->flush_buffer(tty);
	tty_driver_flush_buffer(tty);

	return 0;
}
static int hci_uart_tty_ioctl(struct tty_struct *tty, struct file *file,
			      unsigned int cmd, unsigned long arg)
{
	pr_err("joker %s:in %d.\n",__func__,__LINE__);
	struct hci_uart *hu = tty->disc_data;
	int err = 0;

	BT_DBG("");

	/* Verify the status of the device */
	if (!hu)
		return -EBADF;

	switch (cmd) {
	case HCIUARTSETPROTO:
		if (!test_and_set_bit(HCI_UART_PROTO_SET, &hu->flags)) {
			err = hci_uart_set_proto(hu, arg);
		} else
			return -EBUSY;
		break;
		static int hci_uart_set_proto(struct hci_uart *hu, int id)
		{
			pr_err("joker %s:in %d.\n",__func__,__LINE__);
			const struct hci_uart_proto *p;
			int err;

			p = hci_uart_get_proto(id);
			static const struct hci_uart_proto *hci_uart_get_proto(unsigned int id)
			{
				pr_err("joker %s:in %d.\n",__func__,__LINE__);
				if (id >= HCI_UART_MAX_PROTO)
					return NULL;

				return hup[id];
			}

			err = p->open(hu);
			if (err)
				return err;
			static int bcsp_open(struct hci_uart *hu)
			{
				pr_err("joker %s:in %d.\n",__func__,__LINE__);
				struct bcsp_struct *bcsp; <obj3>

				/* hu c5df9900 */
				BT_DBG("hu %p", hu);

				bcsp = kzalloc(sizeof(*bcsp), GFP_KERNEL);
				if (!bcsp)
					return -ENOMEM;

				hu->priv = bcsp; <tag10>
				skb_queue_head_init(&bcsp->unack);
				skb_queue_head_init(&bcsp->rel);
				skb_queue_head_init(&bcsp->unrel);

				init_timer(&bcsp->tbcsp);
				bcsp->tbcsp.function = bcsp_timed_event; <tag11>
				bcsp->tbcsp.data     = (u_long) hu;

				bcsp->rx_state = BCSP_W4_PKT_DELIMITER; <tag12>

				if (txcrc)
					bcsp->use_crc = 1;

				return 0;
			}

			hu->proto = p; <tag13>

			err = hci_uart_register_dev(hu);
			static int hci_uart_register_dev(struct hci_uart *hu)
			{
				pr_err("joker %s:in %d.\n",__func__,__LINE__);
				struct hci_dev *hdev; <tag14>

				BT_DBG("");

				/* Initialize and register HCI device */
				hdev = hci_alloc_dev();
				struct hci_dev *hci_alloc_dev(void) /* GM: 设置蓝牙和ble的各种参数. */
				{
					pr_err("joker %s:in %d.\n",__func__,__LINE__);
					struct hci_dev *hdev;

					hdev = kzalloc(sizeof(*hdev), GFP_KERNEL);
					if (!hdev)
						return NULL;

					hdev->pkt_type  = (HCI_DM1 | HCI_DH1 | HCI_HV1);
					hdev->esco_type = (ESCO_HV1);
					hdev->link_mode = (HCI_LM_ACCEPT);
					hdev->num_iac = 0x01;		/* One IAC support is mandatory */
					hdev->io_capability = 0x03;	/* No Input No Output */
					hdev->manufacturer = 0xffff;	/* Default to internal use */
					hdev->inq_tx_power = HCI_TX_POWER_INVALID;
					hdev->adv_tx_power = HCI_TX_POWER_INVALID;
					hdev->adv_instance_cnt = 0;
					hdev->cur_adv_instance = 0x00;
					hdev->adv_instance_timeout = 0;

					hdev->sniff_max_interval = 800;
					hdev->sniff_min_interval = 80;

					hdev->le_adv_channel_map = 0x07;
					hdev->le_adv_min_interval = 0x0800;
					hdev->le_adv_max_interval = 0x0800;
					hdev->le_scan_interval = 0x0060;
					hdev->le_scan_window = 0x0030;
					hdev->le_conn_min_interval = 0x0028;
					hdev->le_conn_max_interval = 0x0038;
					hdev->le_conn_latency = 0x0000;
					hdev->le_supv_timeout = 0x002a;
					hdev->le_def_tx_len = 0x001b;
					hdev->le_def_tx_time = 0x0148;
					hdev->le_max_tx_len = 0x001b;
					hdev->le_max_tx_time = 0x0148;
					hdev->le_max_rx_len = 0x001b;
					hdev->le_max_rx_time = 0x0148;

					hdev->rpa_timeout = HCI_DEFAULT_RPA_TIMEOUT;
					hdev->discov_interleaved_timeout = DISCOV_INTERLEAVED_TIMEOUT;
					hdev->conn_info_min_age = DEFAULT_CONN_INFO_MIN_AGE;
					hdev->conn_info_max_age = DEFAULT_CONN_INFO_MAX_AGE;

					mutex_init(&hdev->lock);
					mutex_init(&hdev->req_lock);

					INIT_LIST_HEAD(&hdev->mgmt_pending);
					INIT_LIST_HEAD(&hdev->blacklist);
					INIT_LIST_HEAD(&hdev->whitelist);
					INIT_LIST_HEAD(&hdev->uuids);
					INIT_LIST_HEAD(&hdev->link_keys);
					INIT_LIST_HEAD(&hdev->long_term_keys);
					INIT_LIST_HEAD(&hdev->identity_resolving_keys);
					INIT_LIST_HEAD(&hdev->remote_oob_data);
					INIT_LIST_HEAD(&hdev->le_white_list);
					INIT_LIST_HEAD(&hdev->le_conn_params);
					INIT_LIST_HEAD(&hdev->pend_le_conns);
					INIT_LIST_HEAD(&hdev->pend_le_reports);
					INIT_LIST_HEAD(&hdev->conn_hash.list);
					INIT_LIST_HEAD(&hdev->adv_instances);

					INIT_WORK(&hdev->rx_work, hci_rx_work);
					INIT_WORK(&hdev->cmd_work, hci_cmd_work);
					INIT_WORK(&hdev->tx_work, hci_tx_work);
					INIT_WORK(&hdev->power_on, hci_power_on);
					INIT_WORK(&hdev->error_reset, hci_error_reset);

					INIT_DELAYED_WORK(&hdev->power_off, hci_power_off);
					INIT_DELAYED_WORK(&hdev->discov_off, hci_discov_off);
					INIT_DELAYED_WORK(&hdev->le_scan_disable, le_scan_disable_work);
					INIT_DELAYED_WORK(&hdev->le_scan_restart, le_scan_restart_work);
					INIT_DELAYED_WORK(&hdev->adv_instance_expire, hci_adv_timeout_expire);

					skb_queue_head_init(&hdev->rx_q);
					skb_queue_head_init(&hdev->cmd_q);
					skb_queue_head_init(&hdev->raw_q);

					init_waitqueue_head(&hdev->req_wait_q);

					INIT_DELAYED_WORK(&hdev->cmd_timer, hci_cmd_timeout);

					hci_init_sysfs(hdev);
					void hci_init_sysfs(struct hci_dev *hdev)
					{
						pr_err("joker %s:in %d.\n",__func__,__LINE__);
						struct device *dev = &hdev->dev;

						dev->type = &bt_host;
						dev->class = bt_class;

						__module_get(THIS_MODULE);
						device_initialize(dev);
					}
					discovery_init(hdev);
					static inline void discovery_init(struct hci_dev *hdev)
					{
						hdev->discovery.state = DISCOVERY_STOPPED; <tag15>
						INIT_LIST_HEAD(&hdev->discovery.all);
						INIT_LIST_HEAD(&hdev->discovery.unknown);
						INIT_LIST_HEAD(&hdev->discovery.resolve);
						hdev->discovery.report_invalid_rssi = true;
						hdev->discovery.rssi = HCI_RSSI_INVALID;
					}

					return hdev;
				}

				hu->hdev = hdev; <tag17>

				hdev->bus = HCI_UART; <tag18>
				hci_set_drvdata(hdev, hu); <tag19>
				static inline void hci_set_drvdata(struct hci_dev *hdev, void *data)
				{
					dev_set_drvdata(&hdev->dev, data);
				}

				/* Only when vendor specific setup callback is provided, consider
				 * the manufacturer information valid. This avoids filling in the
				 * value for Ericsson when nothing is specified.
				 */
				if (hu->proto->setup)
					hdev->manufacturer = hu->proto->manufacturer;

				hdev->open  = hci_uart_open;
				hdev->close = hci_uart_close;
				hdev->flush = hci_uart_flush;
				hdev->send  = hci_uart_send_frame;
				hdev->setup = hci_uart_setup;
				SET_HCIDEV_DEV(hdev, hu->tty->dev);
				#define SET_HCIDEV_DEV(hdev, pdev) ((hdev)->dev.parent = (pdev)) <tag20>

				if (test_bit(HCI_UART_RAW_DEVICE, &hu->hdev_flags))
					set_bit(HCI_QUIRK_RAW_DEVICE, &hdev->quirks);

				if (test_bit(HCI_UART_EXT_CONFIG, &hu->hdev_flags))
					set_bit(HCI_QUIRK_EXTERNAL_CONFIG, &hdev->quirks);

				if (!test_bit(HCI_UART_RESET_ON_INIT, &hu->hdev_flags))
					set_bit(HCI_QUIRK_RESET_ON_CLOSE, &hdev->quirks);

				if (test_bit(HCI_UART_CREATE_AMP, &hu->hdev_flags))
					hdev->dev_type = HCI_AMP;
				else
					hdev->dev_type = HCI_BREDR; <tag21>

				if (test_bit(HCI_UART_INIT_PENDING, &hu->hdev_flags))
					return 0;

				if (hci_register_dev(hdev) < 0) {
					BT_ERR("Can't register HCI device");
					hci_free_dev(hdev);
					return -ENODEV;
				}
				int hci_register_dev(struct hci_dev *hdev)
				{
					pr_err("joker %s:in %d.\n",__func__,__LINE__);
					int id, error;

					if (!hdev->open || !hdev->close || !hdev->send)
						return -EINVAL;

					/* Do not allow HCI_AMP devices to register at index 0,
					 * so the index can be used as the AMP controller ID.
					 */
					switch (hdev->dev_type) {
					case HCI_BREDR:
						id = ida_simple_get(&hci_index_ida, 0, 0, GFP_KERNEL);/* GM:从0~0x8000000中获取ID */
						break;
					case HCI_AMP:
						id = ida_simple_get(&hci_index_ida, 1, 0, GFP_KERNEL);
						break;
					default:
						return -EINVAL;
					}

					if (id < 0)
						return id;

					sprintf(hdev->name, "hci%d", id);
					hdev->id = id; <tag22>

					/* c5e61000 name hci0 bus 3 */
					BT_DBG("%p name %s bus %d", hdev, hdev->name, hdev->bus);

					hdev->workqueue = alloc_workqueue("%s", WQ_HIGHPRI | WQ_UNBOUND |
									  WQ_MEM_RECLAIM, 1, hdev->name);
					if (!hdev->workqueue) {
						error = -ENOMEM;
						goto err;
					}

					hdev->req_workqueue = alloc_workqueue("%s", WQ_HIGHPRI | WQ_UNBOUND |
										  WQ_MEM_RECLAIM, 1, hdev->name);
					if (!hdev->req_workqueue) {
						destroy_workqueue(hdev->workqueue);
						error = -ENOMEM;
						goto err;
					}

					if (!IS_ERR_OR_NULL(bt_debugfs))
						hdev->debugfs = debugfs_create_dir(hdev->name, bt_debugfs);

					dev_set_name(&hdev->dev, "%s", hdev->name);

					error = device_add(&hdev->dev);
					if (error < 0)
						goto err_wqueue;

					hdev->rfkill = rfkill_alloc(hdev->name, &hdev->dev,
									RFKILL_TYPE_BLUETOOTH, &hci_rfkill_ops,
									hdev);
					if (hdev->rfkill) {
						if (rfkill_register(hdev->rfkill) < 0) {
							rfkill_destroy(hdev->rfkill);
							hdev->rfkill = NULL;
						}
					}

					if (hdev->rfkill && rfkill_blocked(hdev->rfkill))
						hci_dev_set_flag(hdev, HCI_RFKILLED);

					hci_dev_set_flag(hdev, HCI_SETUP);
					hci_dev_set_flag(hdev, HCI_AUTO_OFF); <tag23>
					#define hci_dev_set_flag(hdev, nr)             set_bit((nr), (hdev)->dev_flags)

					if (hdev->dev_type == HCI_BREDR) {
						/* Assume BR/EDR support until proven otherwise (such as
						 * through reading supported features during init.
						 */
						hci_dev_set_flag(hdev, HCI_BREDR_ENABLED); <tag24>
					}

					write_lock(&hci_dev_list_lock);
					list_add(&hdev->list, &hci_dev_list); <tag25>
					write_unlock(&hci_dev_list_lock);

					/* Devices that are marked for raw-only usage are unconfigured
					 * and should not be included in normal operation.
					 */
					if (test_bit(HCI_QUIRK_RAW_DEVICE, &hdev->quirks))
						hci_dev_set_flag(hdev, HCI_UNCONFIGURED);

					hci_sock_dev_event(hdev, HCI_DEV_REG);
					void hci_sock_dev_event(struct hci_dev *hdev, int event)
					{
						pr_err("joker %s:in %d.\n",__func__,__LINE__);

						/* hdev hci0 event 1 */
						BT_DBG("hdev %s event %d", hdev->name, event);

						if (atomic_read(&monitor_promisc)) {
							struct sk_buff *skb;

							/* Send event to monitor */
							skb = create_monitor_event(hdev, event);
							if (skb) {
								hci_send_to_channel(HCI_CHANNEL_MONITOR, skb,
											HCI_SOCK_TRUSTED, NULL);
								kfree_skb(skb);
							}
						}

						if (event <= HCI_DEV_DOWN) {
							struct hci_ev_si_device ev;

							/* Send event to sockets */
							ev.event  = event;
							ev.dev_id = hdev->id;
							hci_si_event(NULL, HCI_EV_SI_DEVICE, sizeof(ev), &ev);
						}
							static void hci_si_event(struct hci_dev *hdev, int type, int dlen, void *data)
							{
								pr_err("joker %s:in %d.\n",__func__,__LINE__);
								struct hci_event_hdr *hdr;
								struct hci_ev_stack_internal *ev;
								struct sk_buff *skb;

								skb = bt_skb_alloc(HCI_EVENT_HDR_SIZE + sizeof(*ev) + dlen, GFP_ATOMIC);
								if (!skb)
									return;

								hdr = (void *) skb_put(skb, HCI_EVENT_HDR_SIZE);
								hdr->evt  = HCI_EV_STACK_INTERNAL;
								hdr->plen = sizeof(*ev) + dlen;

								ev  = (void *) skb_put(skb, sizeof(*ev) + dlen);
								ev->type = type;
								memcpy(ev->data, data, dlen);

								bt_cb(skb)->incoming = 1;
								__net_timestamp(skb);

								bt_cb(skb)->pkt_type = HCI_EVENT_PKT;
								hci_send_to_sock(hdev, skb);
								void hci_send_to_sock(struct hci_dev *hdev, struct sk_buff *skb)
								{
									pr_err("joker %s:in %d.\n",__func__,__LINE__);
									struct sock *sk;
									struct sk_buff *skb_copy = NULL;

									/* hdev   (null) len 8 */
									BT_DBG("hdev %p len %d", hdev, skb->len);

									read_lock(&hci_sk_list.lock);

									sk_for_each(sk, &hci_sk_list.head) {
										struct sk_buff *nskb;

										if (sk->sk_state != BT_BOUND || hci_pi(sk)->hdev != hdev)
											continue;

										/* Don't send frame to the socket it came from */
										if (skb->sk == sk)
											continue;

										if (hci_pi(sk)->channel == HCI_CHANNEL_RAW) {
											if (bt_cb(skb)->pkt_type != HCI_COMMAND_PKT &&
												bt_cb(skb)->pkt_type != HCI_EVENT_PKT &&
												bt_cb(skb)->pkt_type != HCI_ACLDATA_PKT &&
												bt_cb(skb)->pkt_type != HCI_SCODATA_PKT)
												continue;
											if (is_filtered_packet(sk, skb))
												continue;
										} else if (hci_pi(sk)->channel == HCI_CHANNEL_USER) {
											if (!bt_cb(skb)->incoming)
												continue;
											if (bt_cb(skb)->pkt_type != HCI_EVENT_PKT &&
												bt_cb(skb)->pkt_type != HCI_ACLDATA_PKT &&
												bt_cb(skb)->pkt_type != HCI_SCODATA_PKT)
												continue;
										} else {
											/* Don't send frame to other channel types */
											continue;
										}

										if (!skb_copy) {
											/* Create a private copy with headroom */
											skb_copy = __pskb_copy_fclone(skb, 1, GFP_ATOMIC, true);
											if (!skb_copy)
												continue;

											/* Put type byte before the data */
											memcpy(skb_push(skb_copy, 1), &bt_cb(skb)->pkt_type, 1);
										}

										nskb = skb_clone(skb_copy, GFP_ATOMIC);
										if (!nskb)
											continue;

										if (sock_queue_rcv_skb(sk, nskb))
											kfree_skb(nskb);
									}

									read_unlock(&hci_sk_list.lock);

									kfree_skb(skb_copy);
								}
								kfree_skb(skb);
							}

						if (event == HCI_DEV_UNREG) {
							struct sock *sk;

							/* Detach sockets from device */
							read_lock(&hci_sk_list.lock);
							sk_for_each(sk, &hci_sk_list.head) {
								bh_lock_sock_nested(sk);
								if (hci_pi(sk)->hdev == hdev) {
									hci_pi(sk)->hdev = NULL;
									sk->sk_err = EPIPE;
									sk->sk_state = BT_OPEN;
									sk->sk_state_change(sk);

									hci_dev_put(hdev);
								}
								bh_unlock_sock(sk);
							}
							read_unlock(&hci_sk_list.lock);
						}
					}
					hci_dev_hold(hdev);
					static inline struct hci_dev *hci_dev_hold(struct hci_dev *d)
					{
						/*hci0 orig refcnt 3 */
						BT_DBG("%s orig refcnt %d", d->name,
							   atomic_read(&d->dev.kobj.kref.refcount));

						get_device(&d->dev);
						return d;
					}

					queue_work(hdev->req_workqueue, &hdev->power_on);

					return id;

				err_wqueue:
					destroy_workqueue(hdev->workqueue);
					destroy_workqueue(hdev->req_workqueue);
				err:
					ida_simple_remove(&hci_index_ida, hdev->id);

					return error;
				}

				set_bit(HCI_UART_REGISTERED, &hu->flags); <tag27>

				return 0;
			}

			return 0;
		}

	case HCIUARTGETPROTO:
		if (test_bit(HCI_UART_PROTO_SET, &hu->flags))
			return hu->proto->id;
		return -EUNATCH;

	case HCIUARTGETDEVICE:
		if (test_bit(HCI_UART_REGISTERED, &hu->flags))
			return hu->hdev->id;
		return -EUNATCH;

	case HCIUARTSETFLAGS:
		if (test_bit(HCI_UART_PROTO_SET, &hu->flags))
			return -EBUSY;
		err = hci_uart_set_flags(hu, arg);
		if (err)
			return err;
		break;

	case HCIUARTGETFLAGS:
		return hu->hdev_flags;

	default:
		err = n_tty_ioctl_helper(tty, file, cmd, arg);
		break;
	}

	return err;
}

static void hci_power_on(struct work_struct *work)
{
	pr_err("joker %s:in %d.\n",__func__,__LINE__);
	struct hci_dev *hdev = container_of(work, struct hci_dev, power_on);
	int err;

	BT_DBG("%s", hdev->name);

	err = hci_dev_do_open(hdev);
	if (err < 0) {
		hci_dev_lock(hdev);
		mgmt_set_powered_failed(hdev, err);
		hci_dev_unlock(hdev);
		return;
	}
	static int hci_dev_do_open(struct hci_dev *hdev)
	{
		pr_err("joker %s:in %d.\n",__func__,__LINE__);
		int ret = 0;

		/* hci0 c5e61000 */
		BT_DBG("%s %p", hdev->name, hdev);

		hci_req_lock(hdev);

		if (hci_dev_test_flag(hdev, HCI_UNREGISTER)) {
			ret = -ENODEV;
			goto done;
		}

		if (!hci_dev_test_flag(hdev, HCI_SETUP) &&
			!hci_dev_test_flag(hdev, HCI_CONFIG)) {
			/* Check for rfkill but allow the HCI setup stage to
			 * proceed (which in itself doesn't cause any RF activity).
			 */
			if (hci_dev_test_flag(hdev, HCI_RFKILLED)) {
				ret = -ERFKILL;
				goto done;
			}

			/* Check for valid public address or a configured static
			 * random adddress, but let the HCI setup proceed to
			 * be able to determine if there is a public address
			 * or not.
			 *
			 * In case of user channel usage, it is not important
			 * if a public address or static random address is
			 * available.
			 *
			 * This check is only valid for BR/EDR controllers
			 * since AMP controllers do not have an address.
			 */
			if (!hci_dev_test_flag(hdev, HCI_USER_CHANNEL) &&
				hdev->dev_type == HCI_BREDR &&
				!bacmp(&hdev->bdaddr, BDADDR_ANY) &&
				!bacmp(&hdev->static_addr, BDADDR_ANY)) {
				ret = -EADDRNOTAVAIL;
				goto done;
			}
		}

		if (test_bit(HCI_UP, &hdev->flags)) {
			ret = -EALREADY;
			goto done;
		}

		if (hdev->open(hdev)) { //hci_uart_open
			ret = -EIO;
			goto done;
		}
		static int hci_uart_open(struct hci_dev *hdev)
		{
			pr_err("joker %s:in %d.\n",__func__,__LINE__);
			BT_DBG("%s %p", hdev->name, hdev);

			/* Nothing to do for UART driver */
			return 0;
		}

		set_bit(HCI_RUNNING, &hdev->flags); <tag29>
		hci_sock_dev_event(hdev, HCI_DEV_OPEN);

		atomic_set(&hdev->cmd_cnt, 1);
		set_bit(HCI_INIT, &hdev->flags); <tag30>

		if (hci_dev_test_flag(hdev, HCI_SETUP)) {
			hci_sock_dev_event(hdev, HCI_DEV_SETUP);

			if (hdev->setup) {
				ret = hdev->setup(hdev);
			}
			static int hci_uart_setup(struct hci_dev *hdev)
			{
				pr_err("joker %s:in %d.\n",__func__,__LINE__);
				struct hci_uart *hu = hci_get_drvdata(hdev);
				struct hci_rp_read_local_version *ver;
				struct sk_buff *skb;
				unsigned int speed;
				int err;

				/* Init speed if any */
				if (hu->init_speed)
					speed = hu->init_speed;
				else if (hu->proto->init_speed)
					speed = hu->proto->init_speed;
				else
					speed = 0;

				if (speed)
					hci_uart_set_baudrate(hu, speed);

				/* Operational speed if any */
				if (hu->oper_speed)
					speed = hu->oper_speed;
				else if (hu->proto->oper_speed)
					speed = hu->proto->oper_speed;
				else
					speed = 0;

				if (hu->proto->set_baudrate && speed) {
					err = hu->proto->set_baudrate(hu, speed);
					if (!err)
						hci_uart_set_baudrate(hu, speed);
				}

				if (hu->proto->setup)
					return hu->proto->setup(hu);

				if (!test_bit(HCI_UART_VND_DETECT, &hu->hdev_flags))
					return 0;

				skb = __hci_cmd_sync(hdev, HCI_OP_READ_LOCAL_VERSION, 0, NULL,
							 HCI_INIT_TIMEOUT);
				if (IS_ERR(skb)) {
					BT_ERR("%s: Reading local version information failed (%ld)",
						   hdev->name, PTR_ERR(skb));
					return 0;
				}

				if (skb->len != sizeof(*ver)) {
					BT_ERR("%s: Event length mismatch for version information",
						   hdev->name);
					goto done;
				}

				ver = (struct hci_rp_read_local_version *)skb->data;

				switch (le16_to_cpu(ver->manufacturer)) {
				#ifdef CONFIG_BT_HCIUART_INTEL
				case 2:
					hdev->set_bdaddr = btintel_set_bdaddr;
					btintel_check_bdaddr(hdev);
					break;
				#endif
				#ifdef CONFIG_BT_HCIUART_BCM
				case 15:
					hdev->set_bdaddr = btbcm_set_bdaddr;
					btbcm_check_bdaddr(hdev);
					break;
				#endif
				}

			done:
				kfree_skb(skb);
				return 0;
			}

			/* The transport driver can set these quirks before
			 * creating the HCI device or in its setup callback.
			 *
			 * In case any of them is set, the controller has to
			 * start up as unconfigured.
			 */
			if (test_bit(HCI_QUIRK_EXTERNAL_CONFIG, &hdev->quirks) ||
				test_bit(HCI_QUIRK_INVALID_BDADDR, &hdev->quirks))
				hci_dev_set_flag(hdev, HCI_UNCONFIGURED);

			/* For an unconfigured controller it is required to
			 * read at least the version information provided by
			 * the Read Local Version Information command.
			 *
			 * If the set_bdaddr driver callback is provided, then
			 * also the original Bluetooth public device address
			 * will be read using the Read BD Address command.
			 */
			if (hci_dev_test_flag(hdev, HCI_UNCONFIGURED))
				ret = __hci_unconf_init(hdev);
		}

		if (hci_dev_test_flag(hdev, HCI_CONFIG)) {
			/* If public address change is configured, ensure that
			 * the address gets programmed. If the driver does not
			 * support changing the public address, fail the power
			 * on procedure.
			 */
			if (bacmp(&hdev->public_addr, BDADDR_ANY) &&
				hdev->set_bdaddr)
				ret = hdev->set_bdaddr(hdev, &hdev->public_addr);
			else
				ret = -EADDRNOTAVAIL;
		}

		if (!ret) {
			if (!hci_dev_test_flag(hdev, HCI_UNCONFIGURED) &&
				!hci_dev_test_flag(hdev, HCI_USER_CHANNEL)) {
				ret = __hci_init(hdev);
				static int __hci_init(struct hci_dev *hdev)
				{
					pr_err("joker %s:in %d.\n",__func__,__LINE__);
					int err;

					err = __hci_req_sync(hdev, hci_init1_req, 0, HCI_INIT_TIMEOUT);
					static int __hci_req_sync(struct hci_dev *hdev,
								  void (*func)(struct hci_request *req,
										  unsigned long opt),
								  unsigned long opt, __u32 timeout)
					{
						pr_err("joker %s:in %d.\n",__func__,__LINE__);
						struct hci_request req;
						DECLARE_WAITQUEUE(wait, current);
						int err = 0;

						/* hci0 start */
						BT_DBG("%s start", hdev->name);

						hci_req_init(&req, hdev);
						void hci_req_init(struct hci_request *req, struct hci_dev *hdev)
						{
							pr_err("joker %s:in %d.\n",__func__,__LINE__);
							skb_queue_head_init(&req->cmd_q);
							req->hdev = hdev;
							req->err = 0;
						}

						hdev->req_status = HCI_REQ_PEND;

						func(&req, opt); //hci_init1_req
						static void hci_init1_req(struct hci_request *req, unsigned long opt)
						{
							pr_err("joker %s:in %d.\n",__func__,__LINE__);
							struct hci_dev *hdev = req->hdev;

							/* hci0 0 */
							BT_DBG("%s %ld", hdev->name, opt);

							/* Reset */
							if (!test_bit(HCI_QUIRK_RESET_ON_CLOSE, &hdev->quirks))
								hci_reset_req(req, 0);

							switch (hdev->dev_type) {
							case HCI_BREDR:
								bredr_init(req);
								break;
							static void bredr_init(struct hci_request *req)
							{
								pr_err("joker %s:in %d.\n",__func__,__LINE__);
								req->hdev->flow_ctl_mode = HCI_FLOW_CTL_MODE_PACKET_BASED;

								/* Read Local Supported Features */
								hci_req_add(req, HCI_OP_READ_LOCAL_FEATURES, 0, NULL);

								/* Read Local Version */
								hci_req_add(req, HCI_OP_READ_LOCAL_VERSION, 0, NULL);

								/* Read BD Address */
								hci_req_add(req, HCI_OP_READ_BD_ADDR, 0, NULL);
								void hci_req_add(struct hci_request *req, u16 opcode, u32 plen,
										 const void *param)
								{
									pr_err("joker %s:in %d.\n",__func__,__LINE__);
									hci_req_add_ev(req, opcode, plen, param, 0);
									void hci_req_add_ev(struct hci_request *req, u16 opcode, u32 plen,
												const void *param, u8 event)
									{
										pr_err("joker %s:in %d.\n",__func__,__LINE__);
										struct hci_dev *hdev = req->hdev;
										struct sk_buff *skb;

										/* hci0 opcode 0x1003 plen 0 HCI_OP_READ_LOCAL_FEATURES */
										/* hci0 opcode 0x1001 plen 0 HCI_OP_READ_LOCAL_VERSION */
										/* hci0 opcode 0x1009 plen 0 HCI_OP_READ_BD_ADDR */
										BT_DBG("%s opcode 0x%4.4x plen %d", hdev->name, opcode, plen);

										/* If an error occurred during request building, there is no point in
										 * queueing the HCI command. We can simply return.
										 */
										if (req->err)
											return;

										skb = hci_prepare_cmd(hdev, opcode, plen, param);
										if (!skb) {
											BT_ERR("%s no memory for command (opcode 0x%4.4x)",
												   hdev->name, opcode);
											req->err = -ENOMEM;
											return;
										}
										struct sk_buff *hci_prepare_cmd(struct hci_dev *hdev, u16 opcode, u32 plen,
														const void *param)
										{
											pr_err("joker %s:in %d.\n",__func__,__LINE__);
											int len = HCI_COMMAND_HDR_SIZE + plen;
											struct hci_command_hdr *hdr;
											struct sk_buff *skb;

											skb = bt_skb_alloc(len, GFP_ATOMIC);
											if (!skb)
												return NULL;

											hdr = (struct hci_command_hdr *) skb_put(skb, HCI_COMMAND_HDR_SIZE);
											hdr->opcode = cpu_to_le16(opcode);
											hdr->plen   = plen;

											if (plen)
												memcpy(skb_put(skb, plen), param, plen);

											/* skb len 3 */
											BT_DBG("skb len %d", skb->len);

											bt_cb(skb)->pkt_type = HCI_COMMAND_PKT;
											bt_cb(skb)->hci.opcode = opcode;

											return skb;
										}

										if (skb_queue_empty(&req->cmd_q))
											bt_cb(skb)->hci.req_start = true;

										bt_cb(skb)->hci.req_event = event;

										skb_queue_tail(&req->cmd_q, skb);
									}
								}
							}

							case HCI_AMP:
								amp_init1(req);
								break;

							default:
								BT_ERR("Unknown device type %d", hdev->dev_type);
								break;
							}
						}

						add_wait_queue(&hdev->req_wait_q, &wait);
						set_current_state(TASK_INTERRUPTIBLE);

						err = hci_req_run_skb(&req, hci_req_sync_complete);
						int hci_req_run_skb(struct hci_request *req, hci_req_complete_skb_t complete)
						{
							pr_err("joker %s:in %d.\n",__func__,__LINE__);
							return req_run(req, NULL, complete);
							static int req_run(struct hci_request *req, hci_req_complete_t complete,
									   hci_req_complete_skb_t complete_skb)
							{
								pr_err("joker %s:in %d.\n",__func__,__LINE__);
								struct hci_dev *hdev = req->hdev;
								struct sk_buff *skb;
								unsigned long flags;

								/* length 3 */
								BT_DBG("length %u", skb_queue_len(&req->cmd_q));

								/* If an error occurred during request building, remove all HCI
								 * commands queued on the HCI request queue.
								 */
								if (req->err) {
									skb_queue_purge(&req->cmd_q);
									return req->err;
								}

								/* Do not allow empty requests */
								if (skb_queue_empty(&req->cmd_q))
									return -ENODATA;

								skb = skb_peek_tail(&req->cmd_q);
								bt_cb(skb)->hci.req_complete = complete;
								bt_cb(skb)->hci.req_complete_skb = complete_skb;

								spin_lock_irqsave(&hdev->cmd_q.lock, flags);
								skb_queue_splice_tail(&req->cmd_q, &hdev->cmd_q);
								spin_unlock_irqrestore(&hdev->cmd_q.lock, flags);

								queue_work(hdev->workqueue, &hdev->cmd_work); /* hci_cmd_work */
								static void hci_cmd_work(struct work_struct *work)
								{
									pr_err("joker %s:in %d.\n",__func__,__LINE__);
									struct hci_dev *hdev = container_of(work, struct hci_dev, cmd_work);
									struct sk_buff *skb;

									/* hci0 cmd_cnt 1 cmd queued 3 */
									BT_DBG("%s cmd_cnt %d cmd queued %d", hdev->name,
										   atomic_read(&hdev->cmd_cnt), skb_queue_len(&hdev->cmd_q));

									/* Send queued commands */
									if (atomic_read(&hdev->cmd_cnt)) {
										skb = skb_dequeue(&hdev->cmd_q);
										if (!skb)
											return;

										kfree_skb(hdev->sent_cmd);

										hdev->sent_cmd = skb_clone(skb, GFP_KERNEL);
										if (hdev->sent_cmd) {
											atomic_dec(&hdev->cmd_cnt);
											hci_send_frame(hdev, skb);
											static void hci_send_frame(struct hci_dev *hdev, struct sk_buff *skb)
											{
												pr_err("joker %s:in %d.\n",__func__,__LINE__);
												int err;

												/* hci0 type 1 len 3 */
												BT_DBG("%s type %d len %d", hdev->name, bt_cb(skb)->pkt_type, skb->len);

												/* Time stamp */
												__net_timestamp(skb);

												/* Send copy to monitor */
												hci_send_to_monitor(hdev, skb);
												void hci_send_to_monitor(struct hci_dev *hdev, struct sk_buff *skb)
												{
													pr_err("joker %s:in %d.\n",__func__,__LINE__);
													struct sk_buff *skb_copy = NULL;
													struct hci_mon_hdr *hdr;
													__le16 opcode;

													if (!atomic_read(&monitor_promisc))
														return;

													BT_DBG("hdev %p len %d", hdev, skb->len);

													switch (bt_cb(skb)->pkt_type) {
													case HCI_COMMAND_PKT:
														opcode = cpu_to_le16(HCI_MON_COMMAND_PKT);
														break;
													case HCI_EVENT_PKT:
														opcode = cpu_to_le16(HCI_MON_EVENT_PKT);
														break;
													case HCI_ACLDATA_PKT:
														if (bt_cb(skb)->incoming)
															opcode = cpu_to_le16(HCI_MON_ACL_RX_PKT);
														else
															opcode = cpu_to_le16(HCI_MON_ACL_TX_PKT);
														break;
													case HCI_SCODATA_PKT:
														if (bt_cb(skb)->incoming)
															opcode = cpu_to_le16(HCI_MON_SCO_RX_PKT);
														else
															opcode = cpu_to_le16(HCI_MON_SCO_TX_PKT);
														break;
													case HCI_DIAG_PKT:
														opcode = cpu_to_le16(HCI_MON_VENDOR_DIAG);
														break;
													default:
														return;
													}

													/* Create a private copy with headroom */
													skb_copy = __pskb_copy_fclone(skb, HCI_MON_HDR_SIZE, GFP_ATOMIC, true);
													if (!skb_copy)
														return;

													/* Put header before the data */
													hdr = (void *) skb_push(skb_copy, HCI_MON_HDR_SIZE);
													hdr->opcode = opcode;
													hdr->index = cpu_to_le16(hdev->id);
													hdr->len = cpu_to_le16(skb->len);

													hci_send_to_channel(HCI_CHANNEL_MONITOR, skb_copy,
																HCI_SOCK_TRUSTED, NULL);
													kfree_skb(skb_copy);
												}

												if (atomic_read(&hdev->promisc)) {
													/* Send copy to the sockets */
													hci_send_to_sock(hdev, skb);
												}

												/* Get rid of skb owner, prior to sending to the driver. */
												skb_orphan(skb);

												if (!test_bit(HCI_RUNNING, &hdev->flags)) {
													kfree_skb(skb);
													return;
												}

												err = hdev->send(hdev, skb); /* hci_uart_send_frame */
												if (err < 0) {
													BT_ERR("%s sending frame failed (%d)", hdev->name, err);
													kfree_skb(skb);
												}
												static int hci_uart_send_frame(struct hci_dev *hdev, struct sk_buff *skb)
												{
													pr_err("joker %s:in %d.\n",__func__,__LINE__);
													struct hci_uart *hu = hci_get_drvdata(hdev);

													/* hci0: type 1 len 3 */
													BT_DBG("%s: type %d len %d", hdev->name, bt_cb(skb)->pkt_type, skb->len);

													hu->proto->enqueue(hu, skb); /* bcsp_enqueue */
													static int bcsp_enqueue(struct hci_uart *hu, struct sk_buff *skb)
													{
														pr_err("joker %s:in %d.\n",__func__,__LINE__);
														struct bcsp_struct *bcsp = hu->priv;

														if (skb->len > 0xFFF) {
															BT_ERR("Packet too long");
															kfree_skb(skb);
															return 0;
														}

														switch (bt_cb(skb)->pkt_type) {
														case HCI_ACLDATA_PKT:
														case HCI_COMMAND_PKT:
															skb_queue_tail(&bcsp->rel, skb);
															break;

														case HCI_SCODATA_PKT:
															skb_queue_tail(&bcsp->unrel, skb);
															break;

														default:
															BT_ERR("Unknown packet type");
															kfree_skb(skb);
															break;
														}

														return 0;
													}

													hci_uart_tx_wakeup(hu);
													int hci_uart_tx_wakeup(struct hci_uart *hu)
													{
														pr_err("joker %s:in %d.\n",__func__,__LINE__);
														if (test_and_set_bit(HCI_UART_SENDING, &hu->tx_state)) {
															set_bit(HCI_UART_TX_WAKEUP, &hu->tx_state);
															return 0;
														}

														BT_DBG("");

														schedule_work(&hu->write_work); /* hci_uart_write_work */
														static void hci_uart_write_work(struct work_struct *work)
														{
															pr_err("joker %s:in %d.\n",__func__,__LINE__);
															struct hci_uart *hu = container_of(work, struct hci_uart, write_work);
															struct tty_struct *tty = hu->tty;
															struct hci_dev *hdev = hu->hdev;
															struct sk_buff *skb;

															/* REVISIT: should we cope with bad skbs or ->write() returning
															 * and error value ?
															 */

														restart:
															clear_bit(HCI_UART_TX_WAKEUP, &hu->tx_state);

															while ((skb = hci_uart_dequeue(hu)))
															static inline struct sk_buff *hci_uart_dequeue(struct hci_uart *hu)
															{
																pr_err("joker %s:in %d.\n",__func__,__LINE__);
																struct sk_buff *skb = hu->tx_skb;

																if (!skb) {
																	skb = hu->proto->dequeue(hu); /* bcsp_dequeue */
																	static struct sk_buff *bcsp_dequeue(struct hci_uart *hu)
																	{
																		pr_err("joker %s:in %d.\n",__func__,__LINE__);
																		struct bcsp_struct *bcsp = hu->priv;
																		unsigned long flags;
																		struct sk_buff *skb;
																		
																		/* First of all, check for unreliable messages in the queue,
																		   since they have priority */

																		skb = skb_dequeue(&bcsp->unrel);
																		if (skb != NULL) {
																			struct sk_buff *nskb = bcsp_prepare_pkt(bcsp,
																					skb->data, skb->len, bt_cb(skb)->pkt_type);
																			static struct sk_buff *bcsp_prepare_pkt(struct bcsp_struct *bcsp, u8 *data,
																					int len, int pkt_type)
																			{
																				pr_err("joker %s:in %d.\n",__func__,__LINE__);
																				struct sk_buff *nskb;
																				u8 hdr[4], chan;
																				u16 BCSP_CRC_INIT(bcsp_txmsg_crc);
																				int rel, i;

																				switch (pkt_type) {
																				case HCI_ACLDATA_PKT:
																					chan = 6;	/* BCSP ACL channel */
																					rel = 1;	/* reliable channel */
																					break;
																				case HCI_COMMAND_PKT:
																					chan = 5;	/* BCSP cmd/evt channel */
																					rel = 1;	/* reliable channel */
																					break;
																				case HCI_SCODATA_PKT:
																					chan = 7;	/* BCSP SCO channel */
																					rel = 0;	/* unreliable channel */
																					break;
																				case BCSP_LE_PKT:
																					chan = 1;	/* BCSP LE channel */
																					rel = 0;	/* unreliable channel */
																					break;
																				case BCSP_ACK_PKT:
																					chan = 0;	/* BCSP internal channel */
																					rel = 0;	/* unreliable channel */
																					break;
																				default:
																					BT_ERR("Unknown packet type");
																					return NULL;
																				}

																				if (hciextn && chan == 5) {
																					__le16 opcode = ((struct hci_command_hdr *)data)->opcode;

																					/* Vendor specific commands */
																					if (hci_opcode_ogf(__le16_to_cpu(opcode)) == 0x3f) {
																						u8 desc = *(data + HCI_COMMAND_HDR_SIZE);
																						if ((desc & 0xf0) == 0xc0) {
																							data += HCI_COMMAND_HDR_SIZE + 1;
																							len  -= HCI_COMMAND_HDR_SIZE + 1;
																							chan = desc & 0x0f;
																						}
																					}
																				}

																				/* Max len of packet: (original len +4(bcsp hdr) +2(crc))*2
																				   (because bytes 0xc0 and 0xdb are escaped, worst case is
																				   when the packet is all made of 0xc0 and 0xdb :) )
																				   + 2 (0xc0 delimiters at start and end). */

																				nskb = alloc_skb((len + 6) * 2 + 2, GFP_ATOMIC);
																				if (!nskb)
																					return NULL;

																				bt_cb(nskb)->pkt_type = pkt_type;

																				bcsp_slip_msgdelim(nskb);
																				static void bcsp_slip_msgdelim(struct sk_buff *skb)
																				{
																					pr_err("joker %s:in %d.\n",__func__,__LINE__);
																					const char pkt_delim = 0xc0;

																					memcpy(skb_put(skb, 1), &pkt_delim, 1);
																				}

																				hdr[0] = bcsp->rxseq_txack << 3;
																				bcsp->txack_req = 0;

																				/* We request packet no 1 to card */
																				BT_DBG("We request packet no %u to card", bcsp->rxseq_txack);

																				if (rel) {
																					hdr[0] |= 0x80 + bcsp->msgq_txseq;
																					/* Sending packet with seqno 0 */
																					BT_DBG("Sending packet with seqno %u", bcsp->msgq_txseq);
																					bcsp->msgq_txseq = (bcsp->msgq_txseq + 1) & 0x07;
																				}

																				if (bcsp->use_crc)
																					hdr[0] |= 0x40;

																				hdr[1] = ((len << 4) & 0xff) | chan;
																				hdr[2] = len >> 4;
																				hdr[3] = ~(hdr[0] + hdr[1] + hdr[2]);

																				/* Put BCSP header */
																				for (i = 0; i < 4; i++) {
																					bcsp_slip_one_byte(nskb, hdr[i]);

																					if (bcsp->use_crc)
																						bcsp_crc_update(&bcsp_txmsg_crc, hdr[i]);
																				}

																				/* Put payload */
																				for (i = 0; i < len; i++) {
																					bcsp_slip_one_byte(nskb, data[i]);

																					if (bcsp->use_crc)
																						bcsp_crc_update(&bcsp_txmsg_crc, data[i]);
																				}

																				/* Put CRC */
																				if (bcsp->use_crc) {
																					bcsp_txmsg_crc = bitrev16(bcsp_txmsg_crc);
																					bcsp_slip_one_byte(nskb, (u8) ((bcsp_txmsg_crc >> 8) & 0x00ff));
																					bcsp_slip_one_byte(nskb, (u8) (bcsp_txmsg_crc & 0x00ff));
																				}

																				bcsp_slip_msgdelim(nskb);
																				return nskb;
																			}
																			if (nskb) {
																				kfree_skb(skb);
																				return nskb;
																			} else {
																				skb_queue_head(&bcsp->unrel, skb);
																				BT_ERR("Could not dequeue pkt because alloc_skb failed");
																			}
																		}

																		/* Now, try to send a reliable pkt. We can only send a
																		   reliable packet if the number of packets sent but not yet ack'ed
																		   is < than the winsize */

																		spin_lock_irqsave_nested(&bcsp->unack.lock, flags, SINGLE_DEPTH_NESTING);

																		if (bcsp->unack.qlen < BCSP_TXWINSIZE) {
																			skb = skb_dequeue(&bcsp->rel);
																			if (skb != NULL) {
																				struct sk_buff *nskb = bcsp_prepare_pkt(bcsp, skb->data, skb->len,
																									bt_cb(skb)->pkt_type);
																				if (nskb) {
																					__skb_queue_tail(&bcsp->unack, skb);
																					mod_timer(&bcsp->tbcsp, jiffies + HZ / 4);
																					spin_unlock_irqrestore(&bcsp->unack.lock, flags);
																					return nskb;
																				} else {
																					skb_queue_head(&bcsp->rel, skb);
																					BT_ERR("Could not dequeue pkt because alloc_skb failed");
																				}
																			}
																		}

																		spin_unlock_irqrestore(&bcsp->unack.lock, flags);

																		/* We could not send a reliable packet, either because there are
																		   none or because there are too many unack'ed pkts. Did we receive
																		   any packets we have not acknowledged yet ? */

																		if (bcsp->txack_req) {
																			/* if so, craft an empty ACK pkt and send it on BCSP unreliable
																			   channel 0 */
																			struct sk_buff *nskb = bcsp_prepare_pkt(bcsp, NULL, 0, BCSP_ACK_PKT);
																			return nskb;
																		}

																		/* We have nothing to send */
																		return NULL;
																	}
																}
																else {
																	hu->tx_skb = NULL;
																}

																return skb;
															}
															{
																int len;

																set_bit(TTY_DO_WRITE_WAKEUP, &tty->flags);
																len = tty->ops->write(tty, skb->data, skb->len);
																/* 这里是tty的write函数, 调用了wakeup函数 */
																static void hci_uart_tty_wakeup(struct tty_struct *tty)
																{
																	pr_err("joker %s:in %d.\n",__func__,__LINE__);
																	struct hci_uart *hu = tty->disc_data;

																	BT_DBG("");

																	if (!hu)
																		return;

																	clear_bit(TTY_DO_WRITE_WAKEUP, &tty->flags);

																	if (tty != hu->tty)
																		return;

																	if (test_bit(HCI_UART_PROTO_SET, &hu->flags)) {
																		hci_uart_tx_wakeup(hu);
																		int hci_uart_tx_wakeup(struct hci_uart *hu)
																		{
																			pr_err("joker %s:in %d.\n",__func__,__LINE__);
																			if (test_and_set_bit(HCI_UART_SENDING, &hu->tx_state)) {
																				/* 从这里退出了? */
																				set_bit(HCI_UART_TX_WAKEUP, &hu->tx_state);
																				return 0;
																			}

																			BT_DBG("");

																			schedule_work(&hu->write_work);

																			return 0;
																		}
																	}
																}
																hdev->stat.byte_tx += len;

																skb_pull(skb, len);
																if (skb->len) {
																	hu->tx_skb = skb;
																	break;
																}

																hci_uart_tx_complete(hu, bt_cb(skb)->pkt_type);
																static inline void hci_uart_tx_complete(struct hci_uart *hu, int pkt_type)
																{
																	pr_err("joker %s:in %d.\n",__func__,__LINE__);
																	struct hci_dev *hdev = hu->hdev;

																	/* Update HCI stat counters */
																	switch (pkt_type) {
																	case HCI_COMMAND_PKT:
																		hdev->stat.cmd_tx++;
																		break;

																	case HCI_ACLDATA_PKT:
																		hdev->stat.acl_tx++;
																		break;

																	case HCI_SCODATA_PKT:
																		hdev->stat.sco_tx++;
																		break;
																	}
																}
																kfree_skb(skb);
															}

															if (test_bit(HCI_UART_TX_WAKEUP, &hu->tx_state))
																goto restart;

															clear_bit(HCI_UART_SENDING, &hu->tx_state);
														}

														return 0;
													}

													return 0;
												}
											}
											if (test_bit(HCI_RESET, &hdev->flags))
												cancel_delayed_work(&hdev->cmd_timer);
											else
												schedule_delayed_work(&hdev->cmd_timer,
															  HCI_CMD_TIMEOUT);
										} else {
											skb_queue_head(&hdev->cmd_q, skb);
											queue_work(hdev->workqueue, &hdev->cmd_work);
										}
									}
								}

								return 0;
							}
						}
						if (err < 0) {
							hdev->req_status = 0;

							remove_wait_queue(&hdev->req_wait_q, &wait);
							set_current_state(TASK_RUNNING);

							/* ENODATA means the HCI request command queue is empty.
							 * This can happen when a request with conditionals doesn't
							 * trigger any commands to be sent. This is normal behavior
							 * and should not trigger an error return.
							 */
							if (err == -ENODATA)
								return 0;

							return err;
						}

						schedule_timeout(timeout);

						remove_wait_queue(&hdev->req_wait_q, &wait);

						if (signal_pending(current))
							return -EINTR;

						switch (hdev->req_status) {
						case HCI_REQ_DONE:
							err = -bt_to_errno(hdev->req_result);
							break;

						case HCI_REQ_CANCELED:
							err = -hdev->req_result;
							break;

						default:
							err = -ETIMEDOUT;
							break;
						}

						hdev->req_status = hdev->req_result = 0;

						BT_DBG("%s end: err %d", hdev->name, err);

						return err;
					}

					if (hci_dev_test_flag(hdev, HCI_SETUP))
						hci_debugfs_create_basic(hdev);

					err = __hci_req_sync(hdev, hci_init2_req, 0, HCI_INIT_TIMEOUT);
					if (err < 0)
						return err;

					/* HCI_BREDR covers both single-mode LE, BR/EDR and dual-mode
					 * BR/EDR/LE type controllers. AMP controllers only need the
					 * first two stages of init.
					 */
					if (hdev->dev_type != HCI_BREDR)
						return 0;

					err = __hci_req_sync(hdev, hci_init3_req, 0, HCI_INIT_TIMEOUT);
					if (err < 0)
						return err;

					err = __hci_req_sync(hdev, hci_init4_req, 0, HCI_INIT_TIMEOUT);
					if (err < 0)
						return err;

					/* This function is only called when the controller is actually in
					 * configured state. When the controller is marked as unconfigured,
					 * this initialization procedure is not run.
					 *
					 * It means that it is possible that a controller runs through its
					 * setup phase and then discovers missing settings. If that is the
					 * case, then this function will not be called. It then will only
					 * be called during the config phase.
					 *
					 * So only when in setup phase or config phase, create the debugfs
					 * entries and register the SMP channels.
					 */
					if (!hci_dev_test_flag(hdev, HCI_SETUP) &&
						!hci_dev_test_flag(hdev, HCI_CONFIG))
						return 0;

					hci_debugfs_create_common(hdev);

					if (lmp_bredr_capable(hdev))
						hci_debugfs_create_bredr(hdev);

					if (lmp_le_capable(hdev))
						hci_debugfs_create_le(hdev);

					return 0;
				}
				if (!ret && hdev->post_init)
					ret = hdev->post_init(hdev); /* GM: no into */
			}
		}

		/* If the HCI Reset command is clearing all diagnostic settings,
		 * then they need to be reprogrammed after the init procedure
		 * completed.
		 */
		if (test_bit(HCI_QUIRK_NON_PERSISTENT_DIAG, &hdev->quirks) &&
			hci_dev_test_flag(hdev, HCI_VENDOR_DIAG) && hdev->set_diag)
			ret = hdev->set_diag(hdev, true);

		clear_bit(HCI_INIT, &hdev->flags); <tag32>

		if (!ret) { //
			hci_dev_hold(hdev);
			hci_dev_set_flag(hdev, HCI_RPA_EXPIRED); <tag33>
			set_bit(HCI_UP, &hdev->flags);
			hci_sock_dev_event(hdev, HCI_DEV_UP);
			if (!hci_dev_test_flag(hdev, HCI_SETUP) &&
				!hci_dev_test_flag(hdev, HCI_CONFIG) &&
				!hci_dev_test_flag(hdev, HCI_UNCONFIGURED) &&
				!hci_dev_test_flag(hdev, HCI_USER_CHANNEL) &&
				hdev->dev_type == HCI_BREDR) {
				hci_dev_lock(hdev); /* GM no into */
				mgmt_powered(hdev, 1);
				hci_dev_unlock(hdev);
			}
		} else {
			/* Init failed, cleanup */
			flush_work(&hdev->tx_work);
			flush_work(&hdev->cmd_work);
			flush_work(&hdev->rx_work);

			skb_queue_purge(&hdev->cmd_q);
			skb_queue_purge(&hdev->rx_q);

			if (hdev->flush)
				hdev->flush(hdev);

			if (hdev->sent_cmd) {
				kfree_skb(hdev->sent_cmd);
				hdev->sent_cmd = NULL;
			}

			clear_bit(HCI_RUNNING, &hdev->flags);
			hci_sock_dev_event(hdev, HCI_DEV_CLOSE);

			hdev->close(hdev);
			hdev->flags &= BIT(HCI_RAW);
		}

	done:
		hci_req_unlock(hdev);
		return ret;
	}

	/* During the HCI setup phase, a few error conditions are
	 * ignored and they need to be checked now. If they are still
	 * valid, it is important to turn the device back off.
	 */
	if (hci_dev_test_flag(hdev, HCI_RFKILLED) ||
	    hci_dev_test_flag(hdev, HCI_UNCONFIGURED) ||
	    (hdev->dev_type == HCI_BREDR &&
	     !bacmp(&hdev->bdaddr, BDADDR_ANY) &&
	     !bacmp(&hdev->static_addr, BDADDR_ANY))) {
		hci_dev_clear_flag(hdev, HCI_AUTO_OFF);
		hci_dev_do_close(hdev);
	} else if (hci_dev_test_flag(hdev, HCI_AUTO_OFF)) { //
		queue_delayed_work(hdev->req_workqueue, &hdev->power_off,
				   HCI_AUTO_OFF_TIMEOUT); /* hci_power_off */
	}

	if (hci_dev_test_and_clear_flag(hdev, HCI_SETUP)) {// <tag34>
		/* For unconfigured devices, set the HCI_RAW flag
		 * so that userspace can easily identify them.
		 */
		if (hci_dev_test_flag(hdev, HCI_UNCONFIGURED))
			set_bit(HCI_RAW, &hdev->flags);

		/* For fully configured devices, this will send
		 * the Index Added event. For unconfigured devices,
		 * it will send Unconfigued Index Added event.
		 *
		 * Devices with HCI_QUIRK_RAW_DEVICE are ignored
		 * and no event will be send.
		 */
		mgmt_index_added(hdev);
		void mgmt_index_added(struct hci_dev *hdev)
		{
			pr_err("joker %s:in %d.\n",__func__,__LINE__);
			struct mgmt_ev_ext_index ev;

			if (test_bit(HCI_QUIRK_RAW_DEVICE, &hdev->quirks))
				return;

			switch (hdev->dev_type) {
			case HCI_BREDR:
				if (hci_dev_test_flag(hdev, HCI_UNCONFIGURED)) {
					mgmt_index_event(MGMT_EV_UNCONF_INDEX_ADDED, hdev,
							 NULL, 0, HCI_MGMT_UNCONF_INDEX_EVENTS);
					ev.type = 0x01;
				} else {
					mgmt_index_event(MGMT_EV_INDEX_ADDED, hdev, NULL, 0,
							 HCI_MGMT_INDEX_EVENTS);
					ev.type = 0x00;
				}
				break;
			case HCI_AMP:
				ev.type = 0x02;
				break;
			default:
				return;
			}

			ev.bus = hdev->bus;

			mgmt_index_event(MGMT_EV_EXT_INDEX_ADDED, hdev, &ev, sizeof(ev),
					 HCI_MGMT_EXT_INDEX_EVENTS);
		}
	} else if (hci_dev_test_and_clear_flag(hdev, HCI_CONFIG)) {
		/* When the controller is now configured, then it
		 * is important to clear the HCI_RAW flag.
		 */
		if (!hci_dev_test_flag(hdev, HCI_UNCONFIGURED))
			clear_bit(HCI_RAW, &hdev->flags);

		/* Powering on the controller with HCI_CONFIG set only
		 * happens with the transition from unconfigured to
		 * configured. This will send the Index Added event.
		 */
		mgmt_index_added(hdev);
	}
}

/* hci cmd 发送后, 接收event. */
static void hci_uart_tty_receive(struct tty_struct *tty, const u8 *data,
				 char *flags, int count)
{
	pr_err("joker %s:in %d.\n",__func__,__LINE__);
	struct hci_uart *hu = tty->disc_data;

	if (!hu || tty != hu->tty)
		return;

	if (!test_bit(HCI_UART_PROTO_SET, &hu->flags))
		return;

	/* It does not need a lock here as it is already protected by a mutex in
	 * tty caller
	 */
	hu->proto->recv(hu, data, count);/* bcsp_recv */
	static int bcsp_recv(struct hci_uart *hu, const void *data, int count)
	{
		pr_err("joker %s:in %d.\n",__func__,__LINE__);
		struct bcsp_struct *bcsp = hu->priv;
		const unsigned char *ptr;

		/* hu c5eb9e80 count 31 rx_state 1 rx_count 0 */
		BT_DBG("hu %p count %d rx_state %d rx_count %ld", 
			hu, count, bcsp->rx_state, bcsp->rx_count);

		ptr = data;
		while (count) {
			if (bcsp->rx_count) {
				if (*ptr == 0xc0) {
					BT_ERR("Short BCSP packet");
					kfree_skb(bcsp->rx_skb);
					bcsp->rx_state = BCSP_W4_PKT_START;
					bcsp->rx_count = 0;
				} else
					bcsp_unslip_one_byte(bcsp, *ptr);

				ptr++; count--;
				continue;
			}

			switch (bcsp->rx_state) {
			case BCSP_W4_BCSP_HDR:
				if ((0xff & (u8) ~ (bcsp->rx_skb->data[0] + bcsp->rx_skb->data[1] +
						bcsp->rx_skb->data[2])) != bcsp->rx_skb->data[3]) {
					BT_ERR("Error in BCSP hdr checksum");
					kfree_skb(bcsp->rx_skb);
					bcsp->rx_state = BCSP_W4_PKT_DELIMITER;
					bcsp->rx_count = 0;
					continue;
				}

				bcsp->rx_state = BCSP_W4_DATA;
				bcsp->rx_count = (bcsp->rx_skb->data[1] >> 4) + 
						(bcsp->rx_skb->data[2] << 4);	/* May be 0 */
				continue;

			case BCSP_W4_DATA:
				if (bcsp->rx_skb->data[0] & 0x40) {	/* pkt with crc */
					bcsp->rx_state = BCSP_W4_CRC;
					bcsp->rx_count = 2;
				} else
					bcsp_complete_rx_pkt(hu);
				continue;

			case BCSP_W4_CRC:
				if (bitrev16(bcsp->message_crc) != bscp_get_crc(bcsp)) {
					BT_ERR ("Checksum failed: computed %04x received %04x",
						bitrev16(bcsp->message_crc),
						bscp_get_crc(bcsp));

					kfree_skb(bcsp->rx_skb);
					bcsp->rx_state = BCSP_W4_PKT_DELIMITER;
					bcsp->rx_count = 0;
					continue;
				}
				skb_trim(bcsp->rx_skb, bcsp->rx_skb->len - 2);
				bcsp_complete_rx_pkt(hu);
				continue;

			case BCSP_W4_PKT_DELIMITER:
				switch (*ptr) {
				case 0xc0:
					bcsp->rx_state = BCSP_W4_PKT_START;
					break;
				default:
					/*BT_ERR("Ignoring byte %02x", *ptr);*/
					break;
				}
				ptr++; count--;
				break;

			case BCSP_W4_PKT_START:
				switch (*ptr) {
				case 0xc0:
					ptr++; count--;
					break;

				default:
					bcsp->rx_state = BCSP_W4_BCSP_HDR;
					bcsp->rx_count = 4;
					bcsp->rx_esc_state = BCSP_ESCSTATE_NOESC;
					BCSP_CRC_INIT(bcsp->message_crc);

					/* Do not increment ptr or decrement count
					 * Allocate packet. Max len of a BCSP pkt= 
					 * 0xFFF (payload) +4 (header) +2 (crc) */

					bcsp->rx_skb = bt_skb_alloc(0x1005, GFP_ATOMIC);
					if (!bcsp->rx_skb) {
						BT_ERR("Can't allocate mem for new packet");
						bcsp->rx_state = BCSP_W4_PKT_DELIMITER;
						bcsp->rx_count = 0;
						return 0;
					}
					break;
				}
				break;
			}
		}
		return count;
	}

	if (hu->hdev)
		hu->hdev->stat.byte_rx += count;

	tty_unthrottle(tty);
}

static void bcsp_complete_rx_pkt(struct hci_uart *hu)
{
	pr_err("joker %s:in %d.\n",__func__,__LINE__);
	struct bcsp_struct *bcsp = hu->priv;
	int pass_up = 0;

	if (bcsp->rx_skb->data[0] & 0x80) {	/* reliable pkt */
		BT_DBG("Received seqno %u from card", bcsp->rxseq_txack);

		/* check the rx sequence number is as expected */
		if ((bcsp->rx_skb->data[0] & 0x07) == bcsp->rxseq_txack) {
			bcsp->rxseq_txack++;
			bcsp->rxseq_txack %= 0x8;
		} else {
			/* handle re-transmitted packet or
			 * when packet was missed
			 */
			BT_ERR("Out-of-order packet arrived, got %u expected %u, set pass_up to 2\n",
			       bcsp->rx_skb->data[0] & 0x07, bcsp->rxseq_txack);

			/* do not process out-of-order packet payload */
			pass_up = 2;
		}

		/* send current txack value to all received reliable packets */
		bcsp->txack_req = 1;

		/* If needed, transmit an ack pkt */
		hci_uart_tx_wakeup(hu);
	}

	bcsp->rxack = (bcsp->rx_skb->data[0] >> 3) & 0x07;
	BT_DBG("Request for pkt %u from card", bcsp->rxack);

	/* handle received ACK indications,
	 * including those from out-of-order packets
	 */
	bcsp_pkt_cull(bcsp);
	static void bcsp_pkt_cull(struct bcsp_struct *bcsp)
	{
		pr_err("joker %s:in %d.\n",__func__,__LINE__);
		struct sk_buff *skb, *tmp;
		unsigned long flags;
		int i, pkts_to_be_removed;
		u8 seqno;

		spin_lock_irqsave(&bcsp->unack.lock, flags);

		pkts_to_be_removed = skb_queue_len(&bcsp->unack);
		seqno = bcsp->msgq_txseq;

		while (pkts_to_be_removed) {
			if (bcsp->rxack == seqno)
				break;
			pkts_to_be_removed--;
			seqno = (seqno - 1) & 0x07;
		}

		if (bcsp->rxack != seqno)
			BT_ERR("Peer acked invalid packet");

		BT_DBG("Removing %u pkts out of %u, up to seqno %u",
			   pkts_to_be_removed, skb_queue_len(&bcsp->unack),
			   (seqno - 1) & 0x07);

		i = 0;
		skb_queue_walk_safe(&bcsp->unack, skb, tmp) {
			if (i >= pkts_to_be_removed)
				break;
			i++;

			__skb_unlink(skb, &bcsp->unack);
			kfree_skb(skb);
		}

		if (skb_queue_empty(&bcsp->unack))
			del_timer(&bcsp->tbcsp);

		spin_unlock_irqrestore(&bcsp->unack.lock, flags);

		if (i != pkts_to_be_removed)
			BT_ERR("Removed only %u out of %u pkts", i, pkts_to_be_removed);
	}

	if (pass_up != 2) {
		if ((bcsp->rx_skb->data[1] & 0x0f) == 6 &&
		    (bcsp->rx_skb->data[0] & 0x80)) {
			bt_cb(bcsp->rx_skb)->pkt_type = HCI_ACLDATA_PKT;
			pass_up = 1;
		} else if ((bcsp->rx_skb->data[1] & 0x0f) == 5 &&
			   (bcsp->rx_skb->data[0] & 0x80)) {
			bt_cb(bcsp->rx_skb)->pkt_type = HCI_EVENT_PKT;
			pass_up = 1;
		} else if ((bcsp->rx_skb->data[1] & 0x0f) == 7) {
			bt_cb(bcsp->rx_skb)->pkt_type = HCI_SCODATA_PKT;
			pass_up = 1;
		} else if ((bcsp->rx_skb->data[1] & 0x0f) == 1 &&
				!(bcsp->rx_skb->data[0] & 0x80)) {
			bcsp_handle_le_pkt(hu);
			pass_up = 0;
		} else
			pass_up = 0;
	}

	if (pass_up == 0) {
		struct hci_event_hdr hdr;
		u8 desc = (bcsp->rx_skb->data[1] & 0x0f);

		if (desc != 0 && desc != 1) {
			if (hciextn) {
				desc |= 0xc0;
				skb_pull(bcsp->rx_skb, 4);
				memcpy(skb_push(bcsp->rx_skb, 1), &desc, 1);

				hdr.evt = 0xff;
				hdr.plen = bcsp->rx_skb->len;
				memcpy(skb_push(bcsp->rx_skb, HCI_EVENT_HDR_SIZE), &hdr, HCI_EVENT_HDR_SIZE);
				bt_cb(bcsp->rx_skb)->pkt_type = HCI_EVENT_PKT;

				hci_recv_frame(hu->hdev, bcsp->rx_skb);
			} else {
				BT_ERR("Packet for unknown channel (%u %s)",
					bcsp->rx_skb->data[1] & 0x0f,
					bcsp->rx_skb->data[0] & 0x80 ? 
					"reliable" : "unreliable");
				kfree_skb(bcsp->rx_skb);
			}
		} else
			kfree_skb(bcsp->rx_skb);
	} else if (pass_up == 1) {
		/* Pull out BCSP hdr */
		skb_pull(bcsp->rx_skb, 4);

		hci_recv_frame(hu->hdev, bcsp->rx_skb);
	} else {
		/* ignore packet payload of already ACKed re-transmitted
		 * packets or when a packet was missed in the BCSP window
		 */
		kfree_skb(bcsp->rx_skb);
	}

	bcsp->rx_state = BCSP_W4_PKT_DELIMITER;
	bcsp->rx_skb = NULL;
}
int hci_recv_frame(struct hci_dev *hdev, struct sk_buff *skb)
{
	pr_err("joker %s:in %d.\n",__func__,__LINE__);
	if (!hdev || (!test_bit(HCI_UP, &hdev->flags)
		      && !test_bit(HCI_INIT, &hdev->flags))) {
		kfree_skb(skb);
		return -ENXIO;
	}

	if (bt_cb(skb)->pkt_type != HCI_EVENT_PKT &&
	    bt_cb(skb)->pkt_type != HCI_ACLDATA_PKT &&
	    bt_cb(skb)->pkt_type != HCI_SCODATA_PKT) {
		kfree_skb(skb);
		return -EINVAL;
	}

	/* Incoming skb */
	bt_cb(skb)->incoming = 1;

	/* Time stamp */
	__net_timestamp(skb);

	skb_queue_tail(&hdev->rx_q, skb);
	queue_work(hdev->workqueue, &hdev->rx_work); /* hci_rx_work */
	static void hci_rx_work(struct work_struct *work)
	{
		pr_err("joker %s:in %d.\n",__func__,__LINE__);
		struct hci_dev *hdev = container_of(work, struct hci_dev, rx_work);
		struct sk_buff *skb;

		BT_DBG("%s", hdev->name);

		while ((skb = skb_dequeue(&hdev->rx_q))) {
			/* Send copy to monitor */
			hci_send_to_monitor(hdev, skb);

			if (atomic_read(&hdev->promisc)) {
				/* Send copy to the sockets */
				hci_send_to_sock(hdev, skb);
			}

			if (hci_dev_test_flag(hdev, HCI_USER_CHANNEL)) {
				kfree_skb(skb);
				continue;
			}

			if (test_bit(HCI_INIT, &hdev->flags)) {
				/* Don't process data packets in this states. */
				switch (bt_cb(skb)->pkt_type) {
				case HCI_ACLDATA_PKT:
				case HCI_SCODATA_PKT:
					kfree_skb(skb);
					continue;
				}
			}

			/* Process frame */
			switch (bt_cb(skb)->pkt_type) {
			case HCI_EVENT_PKT:
				/* hci0 Event packet */
				BT_DBG("%s Event packet", hdev->name);
				hci_event_packet(hdev, skb);
				break;
				void hci_event_packet(struct hci_dev *hdev, struct sk_buff *skb)
				{
					pr_err("joker %s:in %d.\n",__func__,__LINE__);
					struct hci_event_hdr *hdr = (void *) skb->data;
					hci_req_complete_t req_complete = NULL;
					hci_req_complete_skb_t req_complete_skb = NULL;
					struct sk_buff *orig_skb = NULL;
					u8 status = 0, event = hdr->evt, req_evt = 0;
					u16 opcode = HCI_OP_NOP;

					if (hdev->sent_cmd && bt_cb(hdev->sent_cmd)->hci.req_event == event) {
						struct hci_command_hdr *cmd_hdr = (void *) hdev->sent_cmd->data;
						opcode = __le16_to_cpu(cmd_hdr->opcode);
						hci_req_cmd_complete(hdev, opcode, status, &req_complete,
									 &req_complete_skb);
						req_evt = event;
					}

					/* If it looks like we might end up having to call
					 * req_complete_skb, store a pristine copy of the skb since the
					 * various handlers may modify the original one through
					 * skb_pull() calls, etc.
					 */
					if (req_complete_skb || event == HCI_EV_CMD_STATUS ||
						event == HCI_EV_CMD_COMPLETE)
						orig_skb = skb_clone(skb, GFP_KERNEL);

					skb_pull(skb, HCI_EVENT_HDR_SIZE);

					switch (event) {
					case HCI_EV_INQUIRY_COMPLETE:
						hci_inquiry_complete_evt(hdev, skb);
						break;

					case HCI_EV_INQUIRY_RESULT:
						hci_inquiry_result_evt(hdev, skb);
						break;

					case HCI_EV_CONN_COMPLETE:
						hci_conn_complete_evt(hdev, skb);
						break;

					case HCI_EV_CONN_REQUEST:
						hci_conn_request_evt(hdev, skb);
						break;

					case HCI_EV_DISCONN_COMPLETE:
						hci_disconn_complete_evt(hdev, skb);
						break;

					case HCI_EV_AUTH_COMPLETE:
						hci_auth_complete_evt(hdev, skb);
						break;

					case HCI_EV_REMOTE_NAME:
						hci_remote_name_evt(hdev, skb);
						break;

					case HCI_EV_ENCRYPT_CHANGE:
						hci_encrypt_change_evt(hdev, skb);
						break;

					case HCI_EV_CHANGE_LINK_KEY_COMPLETE:
						hci_change_link_key_complete_evt(hdev, skb);
						break;

					case HCI_EV_REMOTE_FEATURES:
						hci_remote_features_evt(hdev, skb);
						break;

					case HCI_EV_CMD_COMPLETE:
						hci_cmd_complete_evt(hdev, skb, &opcode, &status,
									 &req_complete, &req_complete_skb);
						break;

					case HCI_EV_CMD_STATUS:
						hci_cmd_status_evt(hdev, skb, &opcode, &status, &req_complete,
								   &req_complete_skb);
						break;

					case HCI_EV_HARDWARE_ERROR:
						hci_hardware_error_evt(hdev, skb);
						break;

					case HCI_EV_ROLE_CHANGE:
						hci_role_change_evt(hdev, skb);
						break;

					case HCI_EV_NUM_COMP_PKTS:
						hci_num_comp_pkts_evt(hdev, skb);
						break;

					case HCI_EV_MODE_CHANGE:
						hci_mode_change_evt(hdev, skb);
						break;

					case HCI_EV_PIN_CODE_REQ:
						hci_pin_code_request_evt(hdev, skb);
						break;

					case HCI_EV_LINK_KEY_REQ:
						hci_link_key_request_evt(hdev, skb);
						break;

					case HCI_EV_LINK_KEY_NOTIFY:
						hci_link_key_notify_evt(hdev, skb);
						break;

					case HCI_EV_CLOCK_OFFSET:
						hci_clock_offset_evt(hdev, skb);
						break;

					case HCI_EV_PKT_TYPE_CHANGE:
						hci_pkt_type_change_evt(hdev, skb);
						break;

					case HCI_EV_PSCAN_REP_MODE:
						hci_pscan_rep_mode_evt(hdev, skb);
						break;

					case HCI_EV_INQUIRY_RESULT_WITH_RSSI:
						hci_inquiry_result_with_rssi_evt(hdev, skb);
						break;

					case HCI_EV_REMOTE_EXT_FEATURES:
						hci_remote_ext_features_evt(hdev, skb);
						break;

					case HCI_EV_SYNC_CONN_COMPLETE:
						hci_sync_conn_complete_evt(hdev, skb);
						break;

					case HCI_EV_EXTENDED_INQUIRY_RESULT:
						hci_extended_inquiry_result_evt(hdev, skb);
						break;

					case HCI_EV_KEY_REFRESH_COMPLETE:
						hci_key_refresh_complete_evt(hdev, skb);
						break;

					case HCI_EV_IO_CAPA_REQUEST:
						hci_io_capa_request_evt(hdev, skb);
						break;

					case HCI_EV_IO_CAPA_REPLY:
						hci_io_capa_reply_evt(hdev, skb);
						break;

					case HCI_EV_USER_CONFIRM_REQUEST:
						hci_user_confirm_request_evt(hdev, skb);
						break;

					case HCI_EV_USER_PASSKEY_REQUEST:
						hci_user_passkey_request_evt(hdev, skb);
						break;

					case HCI_EV_USER_PASSKEY_NOTIFY:
						hci_user_passkey_notify_evt(hdev, skb);
						break;

					case HCI_EV_KEYPRESS_NOTIFY:
						hci_keypress_notify_evt(hdev, skb);
						break;

					case HCI_EV_SIMPLE_PAIR_COMPLETE:
						hci_simple_pair_complete_evt(hdev, skb);
						break;

					case HCI_EV_REMOTE_HOST_FEATURES:
						hci_remote_host_features_evt(hdev, skb);
						break;

					case HCI_EV_LE_META:
						hci_le_meta_evt(hdev, skb);
						break;

					case HCI_EV_REMOTE_OOB_DATA_REQUEST:
						hci_remote_oob_data_request_evt(hdev, skb);
						break;

#if IS_ENABLED(CONFIG_BT_HS)
					case HCI_EV_CHANNEL_SELECTED:
						hci_chan_selected_evt(hdev, skb);
						break;

					case HCI_EV_PHY_LINK_COMPLETE:
						hci_phy_link_complete_evt(hdev, skb);
						break;

					case HCI_EV_LOGICAL_LINK_COMPLETE:
						hci_loglink_complete_evt(hdev, skb);
						break;

					case HCI_EV_DISCONN_LOGICAL_LINK_COMPLETE:
						hci_disconn_loglink_complete_evt(hdev, skb);
						break;

					case HCI_EV_DISCONN_PHY_LINK_COMPLETE:
						hci_disconn_phylink_complete_evt(hdev, skb);
						break;
#endif

					case HCI_EV_NUM_COMP_BLOCKS:
						hci_num_comp_blocks_evt(hdev, skb);
						break;

					default:
						BT_DBG("%s event 0x%2.2x", hdev->name, event);
						break;
					}

					if (req_complete) {
						req_complete(hdev, status, opcode);
					} else if (req_complete_skb) {
						if (!hci_get_cmd_complete(hdev, opcode, req_evt, orig_skb)) {
							kfree_skb(orig_skb);
							orig_skb = NULL;
						}
						req_complete_skb(hdev, status, opcode, orig_skb);
					}

					kfree_skb(orig_skb);
					kfree_skb(skb);
					hdev->stat.evt_rx++;
				}

			case HCI_ACLDATA_PKT:
				BT_DBG("%s ACL data packet", hdev->name);
				hci_acldata_packet(hdev, skb);
				break;

			case HCI_SCODATA_PKT:
				BT_DBG("%s SCO data packet", hdev->name);
				hci_scodata_packet(hdev, skb);
				break;

			default:
				kfree_skb(skb);
				break;
			}
		}
	}

	return 0;
}

static void hci_cmd_complete_evt(struct hci_dev *hdev, struct sk_buff *skb,
				 u16 *opcode, u8 *status,
				 hci_req_complete_t *req_complete,
				 hci_req_complete_skb_t *req_complete_skb)
{
	pr_err("joker %s:in %d.\n",__func__,__LINE__);
	struct hci_ev_cmd_complete *ev = (void *) skb->data;

	*opcode = __le16_to_cpu(ev->opcode);
	*status = skb->data[sizeof(*ev)];

	skb_pull(skb, sizeof(*ev));

	switch (*opcode) {
	case HCI_OP_INQUIRY_CANCEL:
		hci_cc_inquiry_cancel(hdev, skb);
		break;

	case HCI_OP_PERIODIC_INQ:
		hci_cc_periodic_inq(hdev, skb);
		break;

	case HCI_OP_EXIT_PERIODIC_INQ:
		hci_cc_exit_periodic_inq(hdev, skb);
		break;

	case HCI_OP_REMOTE_NAME_REQ_CANCEL:
		hci_cc_remote_name_req_cancel(hdev, skb);
		break;

	case HCI_OP_ROLE_DISCOVERY:
		hci_cc_role_discovery(hdev, skb);
		break;

	case HCI_OP_READ_LINK_POLICY:
		hci_cc_read_link_policy(hdev, skb);
		break;

	case HCI_OP_WRITE_LINK_POLICY:
		hci_cc_write_link_policy(hdev, skb);
		break;

	case HCI_OP_READ_DEF_LINK_POLICY:
		hci_cc_read_def_link_policy(hdev, skb);
		break;

	case HCI_OP_WRITE_DEF_LINK_POLICY:
		hci_cc_write_def_link_policy(hdev, skb);
		break;

	case HCI_OP_RESET:
		hci_cc_reset(hdev, skb);
		break;

	case HCI_OP_READ_STORED_LINK_KEY:
		hci_cc_read_stored_link_key(hdev, skb);
		break;

	case HCI_OP_DELETE_STORED_LINK_KEY:
		hci_cc_delete_stored_link_key(hdev, skb);
		break;

	case HCI_OP_WRITE_LOCAL_NAME:
		hci_cc_write_local_name(hdev, skb);
		break;

	case HCI_OP_READ_LOCAL_NAME:
		hci_cc_read_local_name(hdev, skb);
		break;

	case HCI_OP_WRITE_AUTH_ENABLE:
		hci_cc_write_auth_enable(hdev, skb);
		break;

	case HCI_OP_WRITE_ENCRYPT_MODE:
		hci_cc_write_encrypt_mode(hdev, skb);
		break;

	case HCI_OP_WRITE_SCAN_ENABLE:
		hci_cc_write_scan_enable(hdev, skb);
		break;

	case HCI_OP_READ_CLASS_OF_DEV:
		hci_cc_read_class_of_dev(hdev, skb);
		break;

	case HCI_OP_WRITE_CLASS_OF_DEV:
		hci_cc_write_class_of_dev(hdev, skb);
		break;

	case HCI_OP_READ_VOICE_SETTING:
		hci_cc_read_voice_setting(hdev, skb);
		break;

	case HCI_OP_WRITE_VOICE_SETTING:
		hci_cc_write_voice_setting(hdev, skb);
		break;

	case HCI_OP_READ_NUM_SUPPORTED_IAC:
		hci_cc_read_num_supported_iac(hdev, skb);
		break;

	case HCI_OP_WRITE_SSP_MODE:
		hci_cc_write_ssp_mode(hdev, skb);
		break;

	case HCI_OP_WRITE_SC_SUPPORT:
		hci_cc_write_sc_support(hdev, skb);
		break;

	case HCI_OP_READ_LOCAL_VERSION:
		hci_cc_read_local_version(hdev, skb);
		break;

	case HCI_OP_READ_LOCAL_COMMANDS:
		hci_cc_read_local_commands(hdev, skb);
		break;

	case HCI_OP_READ_LOCAL_FEATURES:
		hci_cc_read_local_features(hdev, skb);
		break;

	case HCI_OP_READ_LOCAL_EXT_FEATURES:
		hci_cc_read_local_ext_features(hdev, skb);
		break;

	case HCI_OP_READ_BUFFER_SIZE:
		hci_cc_read_buffer_size(hdev, skb);
		break;

	case HCI_OP_READ_BD_ADDR:
		hci_cc_read_bd_addr(hdev, skb);
		break;

	case HCI_OP_READ_PAGE_SCAN_ACTIVITY:
		hci_cc_read_page_scan_activity(hdev, skb);
		break;

	case HCI_OP_WRITE_PAGE_SCAN_ACTIVITY:
		hci_cc_write_page_scan_activity(hdev, skb);
		break;

	case HCI_OP_READ_PAGE_SCAN_TYPE:
		hci_cc_read_page_scan_type(hdev, skb);
		break;

	case HCI_OP_WRITE_PAGE_SCAN_TYPE:
		hci_cc_write_page_scan_type(hdev, skb);
		break;

	case HCI_OP_READ_DATA_BLOCK_SIZE:
		hci_cc_read_data_block_size(hdev, skb);
		break;

	case HCI_OP_READ_FLOW_CONTROL_MODE:
		hci_cc_read_flow_control_mode(hdev, skb);
		break;

	case HCI_OP_READ_LOCAL_AMP_INFO:
		hci_cc_read_local_amp_info(hdev, skb);
		break;

	case HCI_OP_READ_CLOCK:
		hci_cc_read_clock(hdev, skb);
		break;

	case HCI_OP_READ_INQ_RSP_TX_POWER:
		hci_cc_read_inq_rsp_tx_power(hdev, skb);
		break;

	case HCI_OP_PIN_CODE_REPLY:
		hci_cc_pin_code_reply(hdev, skb);
		break;

	case HCI_OP_PIN_CODE_NEG_REPLY:
		hci_cc_pin_code_neg_reply(hdev, skb);
		break;

	case HCI_OP_READ_LOCAL_OOB_DATA:
		hci_cc_read_local_oob_data(hdev, skb);
		break;

	case HCI_OP_READ_LOCAL_OOB_EXT_DATA:
		hci_cc_read_local_oob_ext_data(hdev, skb);
		break;

	case HCI_OP_LE_READ_BUFFER_SIZE:
		hci_cc_le_read_buffer_size(hdev, skb);
		break;

	case HCI_OP_LE_READ_LOCAL_FEATURES:
		hci_cc_le_read_local_features(hdev, skb);
		break;

	case HCI_OP_LE_READ_ADV_TX_POWER:
		hci_cc_le_read_adv_tx_power(hdev, skb);
		break;

	case HCI_OP_USER_CONFIRM_REPLY:
		hci_cc_user_confirm_reply(hdev, skb);
		break;

	case HCI_OP_USER_CONFIRM_NEG_REPLY:
		hci_cc_user_confirm_neg_reply(hdev, skb);
		break;

	case HCI_OP_USER_PASSKEY_REPLY:
		hci_cc_user_passkey_reply(hdev, skb);
		break;

	case HCI_OP_USER_PASSKEY_NEG_REPLY:
		hci_cc_user_passkey_neg_reply(hdev, skb);
		break;

	case HCI_OP_LE_SET_RANDOM_ADDR:
		hci_cc_le_set_random_addr(hdev, skb);
		break;

	case HCI_OP_LE_SET_ADV_ENABLE:
		hci_cc_le_set_adv_enable(hdev, skb);
		break;

	case HCI_OP_LE_SET_SCAN_PARAM:
		hci_cc_le_set_scan_param(hdev, skb);
		break;

	case HCI_OP_LE_SET_SCAN_ENABLE:
		hci_cc_le_set_scan_enable(hdev, skb);
		break;

	case HCI_OP_LE_READ_WHITE_LIST_SIZE:
		hci_cc_le_read_white_list_size(hdev, skb);
		break;

	case HCI_OP_LE_CLEAR_WHITE_LIST:
		hci_cc_le_clear_white_list(hdev, skb);
		break;

	case HCI_OP_LE_ADD_TO_WHITE_LIST:
		hci_cc_le_add_to_white_list(hdev, skb);
		break;

	case HCI_OP_LE_DEL_FROM_WHITE_LIST:
		hci_cc_le_del_from_white_list(hdev, skb);
		break;

	case HCI_OP_LE_READ_SUPPORTED_STATES:
		hci_cc_le_read_supported_states(hdev, skb);
		break;

	case HCI_OP_LE_READ_DEF_DATA_LEN:
		hci_cc_le_read_def_data_len(hdev, skb);
		break;

	case HCI_OP_LE_WRITE_DEF_DATA_LEN:
		hci_cc_le_write_def_data_len(hdev, skb);
		break;

	case HCI_OP_LE_READ_MAX_DATA_LEN:
		hci_cc_le_read_max_data_len(hdev, skb);
		break;

	case HCI_OP_WRITE_LE_HOST_SUPPORTED:
		hci_cc_write_le_host_supported(hdev, skb);
		break;

	case HCI_OP_LE_SET_ADV_PARAM:
		hci_cc_set_adv_param(hdev, skb);
		break;

	case HCI_OP_READ_RSSI:
		hci_cc_read_rssi(hdev, skb);
		break;

	case HCI_OP_READ_TX_POWER:
		hci_cc_read_tx_power(hdev, skb);
		break;

	case HCI_OP_WRITE_SSP_DEBUG_MODE:
		hci_cc_write_ssp_debug_mode(hdev, skb);
		break;

	default:
		BT_DBG("%s opcode 0x%4.4x", hdev->name, *opcode);
		break;
	}

	if (*opcode != HCI_OP_NOP)
		cancel_delayed_work(&hdev->cmd_timer);

	if (/*ev->ncmd && */!test_bit(HCI_RESET, &hdev->flags))
		atomic_set(&hdev->cmd_cnt, 1);

	hci_req_cmd_complete(hdev, *opcode, *status, req_complete,
			     req_complete_skb);
	void hci_req_cmd_complete(struct hci_dev *hdev, u16 opcode, u8 status,
				  hci_req_complete_t *req_complete,
				  hci_req_complete_skb_t *req_complete_skb)
	{
		pr_err("joker %s:in %d.\n",__func__,__LINE__);
		struct sk_buff *skb;
		unsigned long flags;

		/* opcode 0x1003 status 0x00 */
		BT_DBG("opcode 0x%04x status 0x%02x", opcode, status);

		/* If the completed command doesn't match the last one that was
		 * sent we need to do special handling of it.
		 */
		if (!hci_sent_cmd_data(hdev, opcode)) {
			/* Some CSR based controllers generate a spontaneous
			 * reset complete event during init and any pending
			 * command will never be completed. In such a case we
			 * need to resend whatever was the last sent
			 * command.
			 */
			if (test_bit(HCI_INIT, &hdev->flags) && opcode == HCI_OP_RESET)
				hci_resend_last(hdev);

			return;
		}
		void *hci_sent_cmd_data(struct hci_dev *hdev, __u16 opcode)
		{
			pr_err("joker %s:in %d.\n",__func__,__LINE__);
			struct hci_command_hdr *hdr;

			if (!hdev->sent_cmd)
				return NULL;

			hdr = (void *) hdev->sent_cmd->data;

			if (hdr->opcode != cpu_to_le16(opcode))
				return NULL;

			/* hci0 opcode 0x1003 */
			BT_DBG("%s opcode 0x%4.4x", hdev->name, opcode);

			return hdev->sent_cmd->data + HCI_COMMAND_HDR_SIZE;
		}

		/* If the command succeeded and there's still more commands in
		 * this request the request is not yet complete.
		 */
		if (!status && !hci_req_is_complete(hdev))
			return;
		static bool hci_req_is_complete(struct hci_dev *hdev)
		{
			pr_err("joker %s:in %d.\n",__func__,__LINE__);
			struct sk_buff *skb;

			skb = skb_peek(&hdev->cmd_q);
			if (!skb)
				return true;

			return bt_cb(skb)->hci.req_start;
			#define bt_cb(skb) ((struct bt_skb_cb *)((skb)->cb))
		}

		/* If this was the last command in a request the complete
		 * callback would be found in hdev->sent_cmd instead of the
		 * command queue (hdev->cmd_q).
		 */
		if (bt_cb(hdev->sent_cmd)->hci.req_complete) {
			*req_complete = bt_cb(hdev->sent_cmd)->hci.req_complete;
			return;
		}

		if (bt_cb(hdev->sent_cmd)->hci.req_complete_skb) {
			*req_complete_skb = bt_cb(hdev->sent_cmd)->hci.req_complete_skb;
			return;
		}

		/* Remove all pending commands belonging to this request */
		spin_lock_irqsave(&hdev->cmd_q.lock, flags);
		while ((skb = __skb_dequeue(&hdev->cmd_q))) {
			if (bt_cb(skb)->hci.req_start) {
				__skb_queue_head(&hdev->cmd_q, skb);
				break;
			}

			*req_complete = bt_cb(skb)->hci.req_complete;
			*req_complete_skb = bt_cb(skb)->hci.req_complete_skb;
			kfree_skb(skb);
		}
		spin_unlock_irqrestore(&hdev->cmd_q.lock, flags);
	}

	if (atomic_read(&hdev->cmd_cnt) && !skb_queue_empty(&hdev->cmd_q))
		queue_work(hdev->workqueue, &hdev->cmd_work);
}

static void hci_cc_read_local_features(struct hci_dev *hdev,
				       struct sk_buff *skb)
{
	pr_err("joker %s:in %d.\n",__func__,__LINE__);
	struct hci_rp_read_local_features *rp = (void *) skb->data;

	/* hci0 status 0x00 */
	BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);

	if (rp->status)
		return;

	memcpy(hdev->features, rp->features, 8);

	/* Adjust default settings according to features
	 * supported by device. */

	if (hdev->features[0][0] & LMP_3SLOT)
		hdev->pkt_type |= (HCI_DM3 | HCI_DH3); <tag35>

	if (hdev->features[0][0] & LMP_5SLOT)
		hdev->pkt_type |= (HCI_DM5 | HCI_DH5);

	if (hdev->features[0][1] & LMP_HV2) {
		hdev->pkt_type  |= (HCI_HV2);
		hdev->esco_type |= (ESCO_HV2); <tag36>
	}

	if (hdev->features[0][1] & LMP_HV3) {
		hdev->pkt_type  |= (HCI_HV3);
		hdev->esco_type |= (ESCO_HV3);
	}

	if (lmp_esco_capable(hdev))
		hdev->esco_type |= (ESCO_EV3);

	if (hdev->features[0][4] & LMP_EV4)
		hdev->esco_type |= (ESCO_EV4);

	if (hdev->features[0][4] & LMP_EV5)
		hdev->esco_type |= (ESCO_EV5);

	if (hdev->features[0][5] & LMP_EDR_ESCO_2M)
		hdev->esco_type |= (ESCO_2EV3);

	if (hdev->features[0][5] & LMP_EDR_ESCO_3M)
		hdev->esco_type |= (ESCO_3EV3);

	if (hdev->features[0][5] & LMP_EDR_3S_ESCO)
		hdev->esco_type |= (ESCO_2EV5 | ESCO_3EV5);
}

static void hci_cc_read_local_version(struct hci_dev *hdev, struct sk_buff *skb)
{
	pr_err("joker %s:in %d.\n",__func__,__LINE__);
	struct hci_rp_read_local_version *rp = (void *) skb->data;

	BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);

	if (rp->status)
		return;

	if (hci_dev_test_flag(hdev, HCI_SETUP) ||
	    hci_dev_test_flag(hdev, HCI_CONFIG)) {
		hdev->hci_ver = rp->hci_ver; <tag37>
		hdev->hci_rev = __le16_to_cpu(rp->hci_rev); <tag38>
		hdev->lmp_ver = rp->lmp_ver; <tag39>
		hdev->manufacturer = __le16_to_cpu(rp->manufacturer); <tag40>
		hdev->lmp_subver = __le16_to_cpu(rp->lmp_subver); <tag41>
	}
}

static void hci_cc_read_bd_addr(struct hci_dev *hdev, struct sk_buff *skb)
{
	pr_err("joker %s:in %d.\n",__func__,__LINE__);
	struct hci_rp_read_bd_addr *rp = (void *) skb->data;

	BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);

	if (rp->status)
		return;

	if (test_bit(HCI_INIT, &hdev->flags))
		bacpy(&hdev->bdaddr, &rp->bdaddr); <tag42>

	if (hci_dev_test_flag(hdev, HCI_SETUP))
		bacpy(&hdev->setup_addr, &rp->bdaddr); <tag43>
}

static void hci_init2_req(struct hci_request *req, unsigned long opt)
{
	pr_err("joker %s:in %d.\n",__func__,__LINE__);
	struct hci_dev *hdev = req->hdev;

	if (hdev->dev_type == HCI_AMP) {
	pr_err("joker %s:in %d.\n",__func__,__LINE__);
		return amp_init2(req);
	}

	if (lmp_bredr_capable(hdev)) {
	pr_err("joker %s:in %d.\n",__func__,__LINE__); //
		bredr_setup(req);
		static void bredr_setup(struct hci_request *req)
		{
			pr_err("joker %s:in %d.\n",__func__,__LINE__);
			__le16 param;
			__u8 flt_type;

			/* Read Buffer Size (ACL mtu, max pkt, etc.) */
			hci_req_add(req, HCI_OP_READ_BUFFER_SIZE, 0, NULL);
			/* GM: 接收hci返回数据的对应处理函数. */
			static void hci_cc_read_buffer_size(struct hci_dev *hdev, struct sk_buff *skb)
			{
				pr_err("joker %s:in %d.\n",__func__,__LINE__);
				struct hci_rp_read_buffer_size *rp = (void *) skb->data;

				BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);

				if (rp->status)
					return;

				hdev->acl_mtu  = __le16_to_cpu(rp->acl_mtu); <tag44>
				hdev->sco_mtu  = rp->sco_mtu;
				hdev->acl_pkts = __le16_to_cpu(rp->acl_max_pkt);
				hdev->sco_pkts = __le16_to_cpu(rp->sco_max_pkt);

				if (test_bit(HCI_QUIRK_FIXUP_BUFFER_SIZE, &hdev->quirks)) {
					hdev->sco_mtu  = 64;
					hdev->sco_pkts = 8;
				}

				hdev->acl_cnt = hdev->acl_pkts;
				hdev->sco_cnt = hdev->sco_pkts;

				BT_DBG("%s acl mtu %d:%d sco mtu %d:%d", hdev->name, hdev->acl_mtu,
					   hdev->acl_pkts, hdev->sco_mtu, hdev->sco_pkts);
			}

			/* Read Class of Device */
			hci_req_add(req, HCI_OP_READ_CLASS_OF_DEV, 0, NULL);
			static void hci_cc_read_class_of_dev(struct hci_dev *hdev, struct sk_buff *skb)
			{
				pr_err("joker %s:in %d.\n",__func__,__LINE__);
				struct hci_rp_read_class_of_dev *rp = (void *) skb->data;

				BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);

				if (rp->status)
					return;

				memcpy(hdev->dev_class, rp->dev_class, 3); <tag45>

				BT_DBG("%s class 0x%.2x%.2x%.2x", hdev->name,
					   hdev->dev_class[2], hdev->dev_class[1], hdev->dev_class[0]);
			}

			/* Read Local Name */
			hci_req_add(req, HCI_OP_READ_LOCAL_NAME, 0, NULL);
			static void hci_cc_read_local_name(struct hci_dev *hdev, struct sk_buff *skb)
			{
				pr_err("joker %s:in %d.\n",__func__,__LINE__);
				struct hci_rp_read_local_name *rp = (void *) skb->data;

				BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);

				if (rp->status)
					return;

				if (hci_dev_test_flag(hdev, HCI_SETUP) ||
					hci_dev_test_flag(hdev, HCI_CONFIG))
					memcpy(hdev->dev_name, rp->name, HCI_MAX_NAME_LENGTH); <tag46>
			}

			/* Read Voice Setting */
			hci_req_add(req, HCI_OP_READ_VOICE_SETTING, 0, NULL);
			static void hci_cc_read_voice_setting(struct hci_dev *hdev, struct sk_buff *skb)
			{
				pr_err("joker %s:in %d.\n",__func__,__LINE__);
				struct hci_rp_read_voice_setting *rp = (void *) skb->data;
				__u16 setting;

				BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);

				if (rp->status)
					return;

				setting = __le16_to_cpu(rp->voice_setting);

				if (hdev->voice_setting == setting)
					return;

				hdev->voice_setting = setting; <tag47>

				BT_DBG("%s voice setting 0x%4.4x", hdev->name, setting);

				if (hdev->notify)
					hdev->notify(hdev, HCI_NOTIFY_VOICE_SETTING);
			}

			/* Read Number of Supported IAC */
			hci_req_add(req, HCI_OP_READ_NUM_SUPPORTED_IAC, 0, NULL);
			static void hci_cc_read_num_supported_iac(struct hci_dev *hdev,
								  struct sk_buff *skb)
			{
				pr_err("joker %s:in %d.\n",__func__,__LINE__);
				struct hci_rp_read_num_supported_iac *rp = (void *) skb->data;

				BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);

				if (rp->status)
					return;

				hdev->num_iac = rp->num_iac; <tag48>

				BT_DBG("%s num iac %d", hdev->name, hdev->num_iac);
			}

			/* Read Current IAC LAP */
			hci_req_add(req, HCI_OP_READ_CURRENT_IAC_LAP, 0, NULL);

			/* Clear Event Filters */
			flt_type = HCI_FLT_CLEAR_ALL;
			hci_req_add(req, HCI_OP_SET_EVENT_FLT, 1, &flt_type);

			/* Connection accept timeout ~20 secs */
			param = cpu_to_le16(0x7d00);
			hci_req_add(req, HCI_OP_WRITE_CA_TIMEOUT, 2, &param);
		}
	}
	else {
	pr_err("joker %s:in %d.\n",__func__,__LINE__);
		hci_dev_clear_flag(hdev, HCI_BREDR_ENABLED);
	}

	#define lmp_le_capable(dev)        ((dev)->features[0][4] & LMP_LE)
	if (lmp_le_capable(hdev)) {
	pr_err("joker %s:in %d.\n",__func__,__LINE__);//
		le_setup(req);
		static void le_setup(struct hci_request *req)
		{
			pr_err("joker %s:in %d.\n",__func__,__LINE__);
			struct hci_dev *hdev = req->hdev;

			/* Read LE Buffer Size */
			hci_req_add(req, HCI_OP_LE_READ_BUFFER_SIZE, 0, NULL);
			static void hci_cc_le_read_buffer_size(struct hci_dev *hdev,
								   struct sk_buff *skb)
			{
				pr_err("joker %s:in %d.\n",__func__,__LINE__);
				struct hci_rp_le_read_buffer_size *rp = (void *) skb->data;

				BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);

				if (rp->status)
					return;

				hdev->le_mtu = __le16_to_cpu(rp->le_mtu); <tag49>
				hdev->le_pkts = rp->le_max_pkt;

				hdev->le_cnt = hdev->le_pkts;

				BT_DBG("%s le mtu %d:%d", hdev->name, hdev->le_mtu, hdev->le_pkts);
			}

			/* Read LE Local Supported Features */
			hci_req_add(req, HCI_OP_LE_READ_LOCAL_FEATURES, 0, NULL); /* GM: HCI_LE_Read_Local_Supported_-Features_Page_0*/
			static void hci_cc_le_read_local_features(struct hci_dev *hdev,
								  struct sk_buff *skb)
			{
				pr_err("joker %s:in %d.\n",__func__,__LINE__);
				struct hci_rp_le_read_local_features *rp = (void *) skb->data;

				BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);

				if (rp->status)
					return;

				memcpy(hdev->le_features, rp->features, 8); <tag50>
			}

			/* Read LE Supported States */
			hci_req_add(req, HCI_OP_LE_READ_SUPPORTED_STATES, 0, NULL);
			static void hci_cc_le_read_supported_states(struct hci_dev *hdev,
									struct sk_buff *skb)
			{
				pr_err("joker %s:in %d.\n",__func__,__LINE__);
				struct hci_rp_le_read_supported_states *rp = (void *) skb->data;

				BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);

				if (rp->status)
					return;

				memcpy(hdev->le_states, rp->le_states, 8); <tag51>
			}

			/* LE-only controllers have LE implicitly enabled */
			if (!lmp_bredr_capable(hdev))
				hci_dev_set_flag(hdev, HCI_LE_ENABLED); /* GM: no into */
		}
	}

	/* All Bluetooth 1.2 and later controllers should support the
	 * HCI command for reading the local supported commands.
	 *
	 * Unfortunately some controllers indicate Bluetooth 1.2 support,
	 * but do not have support for this command. If that is the case,
	 * the driver can quirk the behavior and skip reading the local
	 * supported commands.
	 */
	if (hdev->hci_ver > BLUETOOTH_VER_1_1 &&
	    !test_bit(HCI_QUIRK_BROKEN_LOCAL_COMMANDS, &hdev->quirks)) {
		pr_err("joker %s:in %d.\n",__func__,__LINE__); //
		hci_req_add(req, HCI_OP_READ_LOCAL_COMMANDS, 0, NULL); /* GM: Read Local Supported Commands command */
	}
		static void hci_cc_read_local_commands(struct hci_dev *hdev,
							   struct sk_buff *skb)
		{
			pr_err("joker %s:in %d.\n",__func__,__LINE__);
			struct hci_rp_read_local_commands *rp = (void *) skb->data;

			BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);

			if (rp->status)
				return;

			if (hci_dev_test_flag(hdev, HCI_SETUP) ||
				hci_dev_test_flag(hdev, HCI_CONFIG))
				memcpy(hdev->commands, rp->commands, sizeof(hdev->commands)); <tag52>
		}

	if (lmp_ssp_capable(hdev)) {
		pr_err("joker %s:in %d.\n",__func__,__LINE__); //
		/* When SSP is available, then the host features page
		 * should also be available as well. However some
		 * controllers list the max_page as 0 as long as SSP
		 * has not been enabled. To achieve proper debugging
		 * output, force the minimum max_page to 1 at least.
		 */
		hdev->max_page = 0x01; <tag53>

		if (hci_dev_test_flag(hdev, HCI_SSP_ENABLED)) {
			pr_err("joker %s:in %d.\n",__func__,__LINE__);
			u8 mode = 0x01;

			hci_req_add(req, HCI_OP_WRITE_SSP_MODE,
				    sizeof(mode), &mode);
		} else {
			pr_err("joker %s:in %d.\n",__func__,__LINE__); //
			struct hci_cp_write_eir cp;

			memset(hdev->eir, 0, sizeof(hdev->eir));
			memset(&cp, 0, sizeof(cp));

			hci_req_add(req, HCI_OP_WRITE_EIR, sizeof(cp), &cp);
		}
	}

	if (lmp_inq_rssi_capable(hdev) ||
	    test_bit(HCI_QUIRK_FIXUP_INQUIRY_MODE, &hdev->quirks)) {
		pr_err("joker %s:in %d.\n",__func__,__LINE__); //
		u8 mode;

		/* If Extended Inquiry Result events are supported, then
		 * they are clearly preferred over Inquiry Result with RSSI
		 * events.
		 */
		mode = lmp_ext_inq_capable(hdev) ? 0x02 : 0x01;

		hci_req_add(req, HCI_OP_WRITE_INQUIRY_MODE, 1, &mode);
	}

	if (lmp_inq_tx_pwr_capable(hdev)) {
		pr_err("joker %s:in %d.\n",__func__,__LINE__); //
		hci_req_add(req, HCI_OP_READ_INQ_RSP_TX_POWER, 0, NULL);
	}
		static void hci_cc_read_inq_rsp_tx_power(struct hci_dev *hdev,
							 struct sk_buff *skb)
		{
			pr_err("joker %s:in %d.\n",__func__,__LINE__);
			struct hci_rp_read_inq_rsp_tx_power *rp = (void *) skb->data;

			BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);

			if (rp->status)
				return;

			hdev->inq_tx_power = rp->tx_power; <tag54>
		}

	if (lmp_ext_feat_capable(hdev)) {
		pr_err("joker %s:in %d.\n",__func__,__LINE__); //
		struct hci_cp_read_local_ext_features cp;

		cp.page = 0x01;
		hci_req_add(req, HCI_OP_READ_LOCAL_EXT_FEATURES,sizeof(cp), &cp);
	}
		static void hci_cc_read_local_ext_features(struct hci_dev *hdev,
							   struct sk_buff *skb)
		{
			pr_err("joker %s:in %d.\n",__func__,__LINE__);
			struct hci_rp_read_local_ext_features *rp = (void *) skb->data;

			BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);

			if (rp->status)
				return;

			if (hdev->max_page < rp->max_page)
				hdev->max_page = rp->max_page; <tag55>

			if (rp->page < HCI_MAX_PAGES)
				memcpy(hdev->features[rp->page], rp->features, 8); <tag56>
		}

	if (hci_dev_test_flag(hdev, HCI_LINK_SECURITY)) {
		pr_err("joker %s:in %d.\n",__func__,__LINE__);
		u8 enable = 1;
		hci_req_add(req, HCI_OP_WRITE_AUTH_ENABLE, sizeof(enable),&enable);
	}
	pr_err("joker %s:in %d.\n",__func__,__LINE__); //
}

static void hci_init3_req(struct hci_request *req, unsigned long opt)
{
	pr_err("joker %s:in %d.\n",__func__,__LINE__);
	struct hci_dev *hdev = req->hdev;
	u8 p;

	hci_setup_event_mask(req);

	if (hdev->commands[6] & 0x20 &&
	    !test_bit(HCI_QUIRK_BROKEN_STORED_LINK_KEY, &hdev->quirks)) { //
		struct hci_cp_read_stored_link_key cp;

		bacpy(&cp.bdaddr, BDADDR_ANY);
		cp.read_all = 0x01;
		hci_req_add(req, HCI_OP_READ_STORED_LINK_KEY, sizeof(cp), &cp);
	}
		static void hci_cc_read_stored_link_key(struct hci_dev *hdev,
							struct sk_buff *skb)
		{
			pr_err("joker %s:in %d.\n",__func__,__LINE__);
			struct hci_rp_read_stored_link_key *rp = (void *)skb->data;
			struct hci_cp_read_stored_link_key *sent;

			BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);

			sent = hci_sent_cmd_data(hdev, HCI_OP_READ_STORED_LINK_KEY);
			if (!sent)
				return;

			if (!rp->status && sent->read_all == 0x01) {
				hdev->stored_max_keys = rp->max_keys; <tag57>
				hdev->stored_num_keys = rp->num_keys;
			}
		}

	if (hdev->commands[5] & 0x10) { //
		hci_setup_link_policy(req);
	}
		static void hci_setup_link_policy(struct hci_request *req)
		{
			pr_err("joker %s:in %d.\n",__func__,__LINE__);
			struct hci_dev *hdev = req->hdev;
			struct hci_cp_write_def_link_policy cp;
			u16 link_policy = 0;

			if (lmp_rswitch_capable(hdev))
				link_policy |= HCI_LP_RSWITCH;
			if (lmp_hold_capable(hdev))
				link_policy |= HCI_LP_HOLD;
			if (lmp_sniff_capable(hdev))
				link_policy |= HCI_LP_SNIFF;
			if (lmp_park_capable(hdev))
				link_policy |= HCI_LP_PARK;

			cp.policy = cpu_to_le16(link_policy);
			hci_req_add(req, HCI_OP_WRITE_DEF_LINK_POLICY, sizeof(cp), &cp);
			static void hci_cc_write_def_link_policy(struct hci_dev *hdev,
								 struct sk_buff *skb)
			{
				pr_err("joker %s:in %d.\n",__func__,__LINE__);
				__u8 status = *((__u8 *) skb->data);
				void *sent;

				BT_DBG("%s status 0x%2.2x", hdev->name, status);

				if (status)
					return;

				sent = hci_sent_cmd_data(hdev, HCI_OP_WRITE_DEF_LINK_POLICY);
				if (!sent)
					return;

				hdev->link_policy = get_unaligned_le16(sent); <tag58>
			}
		}

	if (hdev->commands[8] & 0x01) {
		pr_err("joker %s:in %d.\n",__func__,__LINE__); //
		hci_req_add(req, HCI_OP_READ_PAGE_SCAN_ACTIVITY, 0, NULL);
	}
		static void hci_cc_read_page_scan_activity(struct hci_dev *hdev,
							   struct sk_buff *skb)
		{
			pr_err("joker %s:in %d.\n",__func__,__LINE__);
			struct hci_rp_read_page_scan_activity *rp = (void *) skb->data;

			BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);

			if (rp->status)
				return;

			if (test_bit(HCI_INIT, &hdev->flags)) {
				hdev->page_scan_interval = __le16_to_cpu(rp->interval); <tag59>
				hdev->page_scan_window = __le16_to_cpu(rp->window);
			}
		}

	/* Some older Broadcom based Bluetooth 1.2 controllers do not
	 * support the Read Page Scan Type command. Check support for
	 * this command in the bit mask of supported commands.
	 */
	if (hdev->commands[13] & 0x01) {
		pr_err("joker %s:in %d.\n",__func__,__LINE__);//
		hci_req_add(req, HCI_OP_READ_PAGE_SCAN_TYPE, 0, NULL);
	}
		static void hci_cc_read_page_scan_type(struct hci_dev *hdev,
							   struct sk_buff *skb)
		{
			pr_err("joker %s:in %d.\n",__func__,__LINE__);
			struct hci_rp_read_page_scan_type *rp = (void *) skb->data;

			BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);

			if (rp->status)
				return;

			if (test_bit(HCI_INIT, &hdev->flags))
				hdev->page_scan_type = rp->type; <tag60>
		}

	if (lmp_le_capable(hdev)) {
		pr_err("joker %s:in %d.\n",__func__,__LINE__);//
		u8 events[8];

		memset(events, 0, sizeof(events));
		events[0] = 0x0f;

		if (hdev->le_features[0] & HCI_LE_ENCRYPTION) {
			pr_err("joker %s:in %d.\n",__func__,__LINE__); //
			events[0] |= 0x10;	/* LE Long Term Key Request */
		}

		/* If controller supports the Connection Parameters Request
		 * Link Layer Procedure, enable the corresponding event.
		 */
		if (hdev->le_features[0] & HCI_LE_CONN_PARAM_REQ_PROC) {
			pr_err("joker %s:in %d.\n",__func__,__LINE__);
			events[0] |= 0x20;	/* LE Remote Connection
						 * Parameter Request
						 */
		}

		/* If the controller supports the Data Length Extension
		 * feature, enable the corresponding event.
		 */
		if (hdev->le_features[0] & HCI_LE_DATA_LEN_EXT) {
			pr_err("joker %s:in %d.\n",__func__,__LINE__);
			events[0] |= 0x40;	/* LE Data Length Change */
		}

		/* If the controller supports Extended Scanner Filter
		 * Policies, enable the correspondig event.
		 */
		if (hdev->le_features[0] & HCI_LE_EXT_SCAN_POLICY) {
			pr_err("joker %s:in %d.\n",__func__,__LINE__);
			events[1] |= 0x04;	/* LE Direct Advertising
						 * Report
						 */
		}

		/* If the controller supports the LE Read Local P-256
		 * Public Key command, enable the corresponding event.
		 */
		if (hdev->commands[34] & 0x02) {
			pr_err("joker %s:in %d.\n",__func__,__LINE__);
			events[0] |= 0x80;	/* LE Read Local P-256
						 * Public Key Complete
						 */
		}

		/* If the controller supports the LE Generate DHKey
		 * command, enable the corresponding event.
		 */
		if (hdev->commands[34] & 0x04) {
			pr_err("joker %s:in %d.\n",__func__,__LINE__);
			events[1] |= 0x01;	/* LE Generate DHKey Complete */
		}

		hci_req_add(req, HCI_OP_LE_SET_EVENT_MASK, sizeof(events),
			    events); /* 告诉HCI为那些BLE事件生成中断 */

		if (hdev->commands[25] & 0x40) {
			pr_err("joker %s:in %d.\n",__func__,__LINE__);//
			/* Read LE Advertising Channel TX Power */
			hci_req_add(req, HCI_OP_LE_READ_ADV_TX_POWER, 0, NULL);
		}
			static void hci_cc_le_read_adv_tx_power(struct hci_dev *hdev,
								struct sk_buff *skb)
			{
				pr_err("joker %s:in %d.\n",__func__,__LINE__);
				struct hci_rp_le_read_adv_tx_power *rp = (void *) skb->data;

				BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);

				if (rp->status)
					return;

				hdev->adv_tx_power = rp->tx_power; <tag61>
			}

		if (hdev->commands[26] & 0x40) {
			pr_err("joker %s:in %d.\n",__func__,__LINE__);//
			/* Read LE White List Size */
			hci_req_add(req, HCI_OP_LE_READ_WHITE_LIST_SIZE, 0, NULL);
		}
			static void hci_cc_le_read_white_list_size(struct hci_dev *hdev,
								   struct sk_buff *skb)
			{
				pr_err("joker %s:in %d.\n",__func__,__LINE__);
				struct hci_rp_le_read_white_list_size *rp = (void *) skb->data;

				BT_DBG("%s status 0x%2.2x size %u", hdev->name, rp->status, rp->size);

				if (rp->status)
					return;

				hdev->le_white_list_size = rp->size; <tag62>
			}

		if (hdev->commands[26] & 0x80) {
			pr_err("joker %s:in %d.\n",__func__,__LINE__);//
			/* Clear LE White List */
			hci_req_add(req, HCI_OP_LE_CLEAR_WHITE_LIST, 0, NULL);
		}
			static void hci_cc_le_clear_white_list(struct hci_dev *hdev,
								   struct sk_buff *skb)
			{
				pr_err("joker %s:in %d.\n",__func__,__LINE__);
				__u8 status = *((__u8 *) skb->data);

				BT_DBG("%s status 0x%2.2x", hdev->name, status);

				if (status)
					return;

				hci_bdaddr_list_clear(&hdev->le_white_list);
			}

		if (hdev->le_features[0] & HCI_LE_DATA_LEN_EXT) {
			pr_err("joker %s:in %d.\n",__func__,__LINE__);
			/* Read LE Maximum Data Length */
			hci_req_add(req, HCI_OP_LE_READ_MAX_DATA_LEN, 0, NULL);

			/* Read LE Suggested Default Data Length */
			hci_req_add(req, HCI_OP_LE_READ_DEF_DATA_LEN, 0, NULL);
		}

		hci_set_le_support(req);
		static void hci_set_le_support(struct hci_request *req)
		{
			pr_err("joker %s:in %d.\n",__func__,__LINE__); //
			struct hci_dev *hdev = req->hdev;
			struct hci_cp_write_le_host_supported cp;

			/* LE-only devices do not support explicit enablement */
			if (!lmp_bredr_capable(hdev))
				return;

			memset(&cp, 0, sizeof(cp));

			if (hci_dev_test_flag(hdev, HCI_LE_ENABLED)) {
				cp.le = 0x01;
				cp.simul = 0x00;
			}

			if (cp.le != lmp_host_le_capable(hdev))
				hci_req_add(req, HCI_OP_WRITE_LE_HOST_SUPPORTED, sizeof(cp),&cp);
		}
	}

	/* Read features beyond page 1 if available */
	for (p = 2; p < HCI_MAX_PAGES && p <= hdev->max_page; p++) { /* GM: no into */
		struct hci_cp_read_local_ext_features cp;

		cp.page = p;
		hci_req_add(req, HCI_OP_READ_LOCAL_EXT_FEATURES, sizeof(cp), &cp);
	}
	pr_err("joker %s:in %d.\n",__func__,__LINE__); //
}

static void hci_init4_req(struct hci_request *req, unsigned long opt)
{
	pr_err("joker %s:in %d.\n",__func__,__LINE__);
	struct hci_dev *hdev = req->hdev;

	/* Some Broadcom based Bluetooth controllers do not support the
	 * Delete Stored Link Key command. They are clearly indicating its
	 * absence in the bit mask of supported commands.
	 *
	 * Check the supported commands and only if the the command is marked
	 * as supported send it. If not supported assume that the controller
	 * does not have actual support for stored link keys which makes this
	 * command redundant anyway.
	 *
	 * Some controllers indicate that they support handling deleting
	 * stored link keys, but they don't. The quirk lets a driver
	 * just disable this command.
	 */
	if (hdev->commands[6] & 0x80 &&
	    !test_bit(HCI_QUIRK_BROKEN_STORED_LINK_KEY, &hdev->quirks)) {
		pr_err("joker %s:in %d.\n",__func__,__LINE__);//
		struct hci_cp_delete_stored_link_key cp;

		bacpy(&cp.bdaddr, BDADDR_ANY);
		cp.delete_all = 0x01;
		hci_req_add(req, HCI_OP_DELETE_STORED_LINK_KEY, sizeof(cp), &cp);
	}
		static void hci_cc_delete_stored_link_key(struct hci_dev *hdev,
							  struct sk_buff *skb)
		{
			pr_err("joker %s:in %d.\n",__func__,__LINE__);
			struct hci_rp_delete_stored_link_key *rp = (void *)skb->data;

			BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);

			if (rp->status)
				return;

			if (rp->num_keys <= hdev->stored_num_keys)
				hdev->stored_num_keys -= rp->num_keys;
			else
				hdev->stored_num_keys = 0;
		}

	/* Set event mask page 2 if the HCI command for it is supported */
	if (hdev->commands[22] & 0x04) {
		pr_err("joker %s:in %d.\n",__func__,__LINE__);
		hci_set_event_mask_page_2(req);
	}

	/* Read local codec list if the HCI command is supported */
	if (hdev->commands[29] & 0x20) {
		pr_err("joker %s:in %d.\n",__func__,__LINE__);
		hci_req_add(req, HCI_OP_READ_LOCAL_CODECS, 0, NULL);
	}

	/* Get MWS transport configuration if the HCI command is supported */
	if (hdev->commands[30] & 0x08) {
		pr_err("joker %s:in %d.\n",__func__,__LINE__);
		hci_req_add(req, HCI_OP_GET_MWS_TRANSPORT_CONFIG, 0, NULL);
	}

	/* Check for Synchronization Train support */
	if (lmp_sync_train_capable(hdev)) {
		pr_err("joker %s:in %d.\n",__func__,__LINE__);
		hci_req_add(req, HCI_OP_READ_SYNC_TRAIN_PARAMS, 0, NULL);
	}

	/* Enable Secure Connections if supported and configured */
	if (hci_dev_test_flag(hdev, HCI_SSP_ENABLED) &&
	    bredr_sc_enabled(hdev)) {
		pr_err("joker %s:in %d.\n",__func__,__LINE__);
		u8 support = 0x01;

		hci_req_add(req, HCI_OP_WRITE_SC_SUPPORT,
			    sizeof(support), &support);
	}
	pr_err("joker %s:in %d.\n",__func__,__LINE__); //
}

static void hci_power_off(struct work_struct *work)
{
	pr_err("joker %s:in %d.\n",__func__,__LINE__);
	struct hci_dev *hdev = container_of(work, struct hci_dev,
					    power_off.work);

	/* hci0 */
	BT_DBG("%s", hdev->name);

	hci_dev_do_close(hdev);
	int hci_dev_do_close(struct hci_dev *hdev)
	{
		pr_err("joker %s:in %d.\n",__func__,__LINE__);
		bool auto_off;

		/* hci0 c5d45000 */
		BT_DBG("%s %p", hdev->name, hdev);

		if (!hci_dev_test_flag(hdev, HCI_UNREGISTER) &&
			!hci_dev_test_flag(hdev, HCI_USER_CHANNEL) &&
			test_bit(HCI_UP, &hdev->flags)) { //
			/* Execute vendor specific shutdown routine */
			if (hdev->shutdown) {
				hdev->shutdown(hdev);
			}
		}

		cancel_delayed_work(&hdev->power_off);

		hci_req_cancel(hdev, ENODEV);
		static void hci_req_cancel(struct hci_dev *hdev, int err)
		{
			pr_err("joker %s:in %d.\n",__func__,__LINE__);
			/* hci0 err 0x13 */
			BT_DBG("%s err 0x%2.2x", hdev->name, err);

			if (hdev->req_status == HCI_REQ_PEND) {
				hdev->req_result = err;
				hdev->req_status = HCI_REQ_CANCELED;
				wake_up_interruptible(&hdev->req_wait_q);
			}
		}
		hci_req_lock(hdev);
		#define hci_req_lock(d)		mutex_lock(&d->req_lock)

		if (!test_and_clear_bit(HCI_UP, &hdev->flags)) {
			cancel_delayed_work_sync(&hdev->cmd_timer);
			hci_req_unlock(hdev);
			return 0;
		}

		/* Flush RX and TX works */
		flush_work(&hdev->tx_work);
		flush_work(&hdev->rx_work);

		if (hdev->discov_timeout > 0) {
			cancel_delayed_work(&hdev->discov_off);
			hdev->discov_timeout = 0;
			hci_dev_clear_flag(hdev, HCI_DISCOVERABLE);
			hci_dev_clear_flag(hdev, HCI_LIMITED_DISCOVERABLE);
		}

		if (hci_dev_test_and_clear_flag(hdev, HCI_SERVICE_CACHE)) {
			cancel_delayed_work(&hdev->service_cache);
		}

		cancel_delayed_work_sync(&hdev->le_scan_disable);
		cancel_delayed_work_sync(&hdev->le_scan_restart);

		if (hci_dev_test_flag(hdev, HCI_MGMT)) {
			cancel_delayed_work_sync(&hdev->rpa_expired);
		}

		if (hdev->adv_instance_timeout) {
			cancel_delayed_work_sync(&hdev->adv_instance_expire);
			hdev->adv_instance_timeout = 0;
		}

		/* Avoid potential lockdep warnings from the *_flush() calls by
		 * ensuring the workqueue is empty up front.
		 */
		drain_workqueue(hdev->workqueue);

		hci_dev_lock(hdev);

		hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
		void hci_discovery_set_state(struct hci_dev *hdev, int state)
		{
			pr_err("joker %s:in %d.\n",__func__,__LINE__);
			int old_state = hdev->discovery.state;

			/* hci0 state 0 -> 0 */
			BT_DBG("%s state %u -> %u", hdev->name, hdev->discovery.state, state);

			if (old_state == state)
				return;

			hdev->discovery.state = state;

			switch (state) {
			case DISCOVERY_STOPPED:
				hci_update_background_scan(hdev);

				if (old_state != DISCOVERY_STARTING)
					mgmt_discovering(hdev, 0);
				break;
			case DISCOVERY_STARTING:
				break;
			case DISCOVERY_FINDING:
				mgmt_discovering(hdev, 1);
				break;
			case DISCOVERY_RESOLVING:
				break;
			case DISCOVERY_STOPPING:
				break;
			}
		}

		auto_off = hci_dev_test_and_clear_flag(hdev, HCI_AUTO_OFF);

		if (!auto_off && hdev->dev_type == HCI_BREDR) {
			mgmt_powered(hdev, 0);
		}

		hci_inquiry_cache_flush(hdev);
		void hci_inquiry_cache_flush(struct hci_dev *hdev)
		{
			pr_err("joker %s:in %d.\n",__func__,__LINE__);
			struct discovery_state *cache = &hdev->discovery;
			struct inquiry_entry *p, *n;

			list_for_each_entry_safe(p, n, &cache->all, all) {
				list_del(&p->all);
				kfree(p);
			}

			INIT_LIST_HEAD(&cache->unknown);
			INIT_LIST_HEAD(&cache->resolve);
		}
		hci_pend_le_actions_clear(hdev);
		static void hci_pend_le_actions_clear(struct hci_dev *hdev)
		{
			pr_err("joker %s:in %d.\n",__func__,__LINE__);
			struct hci_conn_params *p;

			list_for_each_entry(p, &hdev->le_conn_params, list) {
				if (p->conn) {
					hci_conn_drop(p->conn);
					hci_conn_put(p->conn);
					p->conn = NULL;
				}
				list_del_init(&p->action);
			}

			BT_DBG("All LE pending actions cleared");
		}
		hci_conn_hash_flush(hdev);
		void hci_conn_hash_flush(struct hci_dev *hdev)
		{
			pr_err("joker %s:in %d.\n",__func__,__LINE__);
			struct hci_conn_hash *h = &hdev->conn_hash;
			struct hci_conn *c, *n;

			/* hdev hci0 */
			BT_DBG("hdev %s", hdev->name);

			list_for_each_entry_safe(c, n, &h->list, list) {
				c->state = BT_CLOSED;

				hci_disconn_cfm(c, HCI_ERROR_LOCAL_HOST_TERM);
				hci_conn_del(c);
			}
		}
		hci_dev_unlock(hdev);

		smp_unregister(hdev);
		void smp_unregister(struct hci_dev *hdev)
		{
			pr_err("joker %s:in %d.\n",__func__,__LINE__);
			struct l2cap_chan *chan;

			if (hdev->smp_bredr_data) {
				chan = hdev->smp_bredr_data;
				hdev->smp_bredr_data = NULL;
				smp_del_chan(chan);
			}

			if (hdev->smp_data) {
				chan = hdev->smp_data;
				hdev->smp_data = NULL;
				smp_del_chan(chan);
			}
		}

		hci_sock_dev_event(hdev, HCI_DEV_DOWN);

		if (hdev->flush) { //
			hdev->flush(hdev); /* hci_uart_flush */
		}
			static int hci_uart_flush(struct hci_dev *hdev)
			{
				pr_err("joker %s:in %d.\n",__func__,__LINE__);
				struct hci_uart *hu  = hci_get_drvdata(hdev);
				struct tty_struct *tty = hu->tty;

				/* hdev c5d43000 tty c5de7400 */
				BT_DBG("hdev %p tty %p", hdev, tty);

				if (hu->tx_skb) {
					kfree_skb(hu->tx_skb); hu->tx_skb = NULL;
				}

				/* Flush any pending characters in the driver and discipline. */
				tty_ldisc_flush(tty);
				tty_driver_flush_buffer(tty);

				if (test_bit(HCI_UART_PROTO_SET, &hu->flags)) {
					hu->proto->flush(hu); /* bcsp_flush */
				}
					static int bcsp_flush(struct hci_uart *hu)
					{
						pr_err("joker %s:in %d.\n",__func__,__LINE__);
						BT_DBG("hu %p", hu);
						return 0;
					}

				return 0;
			}

		/* Reset device */
		skb_queue_purge(&hdev->cmd_q);
		atomic_set(&hdev->cmd_cnt, 1);
		if (test_bit(HCI_QUIRK_RESET_ON_CLOSE, &hdev->quirks) &&
			!auto_off && !hci_dev_test_flag(hdev, HCI_UNCONFIGURED)) {
			set_bit(HCI_INIT, &hdev->flags);
			__hci_req_sync(hdev, hci_reset_req, 0, HCI_CMD_TIMEOUT);
			clear_bit(HCI_INIT, &hdev->flags);
		}

		/* flush cmd  work */
		flush_work(&hdev->cmd_work);

		/* Drop queues */
		skb_queue_purge(&hdev->rx_q);
		skb_queue_purge(&hdev->cmd_q);
		skb_queue_purge(&hdev->raw_q);

		/* Drop last sent command */
		if (hdev->sent_cmd) { //
			cancel_delayed_work_sync(&hdev->cmd_timer);
			kfree_skb(hdev->sent_cmd);
			hdev->sent_cmd = NULL;
		}

		clear_bit(HCI_RUNNING, &hdev->flags);
		hci_sock_dev_event(hdev, HCI_DEV_CLOSE);

		/* After this point our queues are empty
		 * and no tasks are scheduled. */
		hdev->close(hdev); /* hci_uart_close */
		static int hci_uart_close(struct hci_dev *hdev)
		{
			pr_err("joker %s:in %d.\n",__func__,__LINE__);
			BT_DBG("hdev %p", hdev);

			hci_uart_flush(hdev);
			hdev->flush = NULL;
			return 0;
		}

		/* Clear flags */
		hdev->flags &= BIT(HCI_RAW);
		hci_dev_clear_volatile_flags(hdev);

		/* Controller radio is available but is currently powered down */
		hdev->amp_status = AMP_STATUS_POWERED_DOWN;

		memset(hdev->eir, 0, sizeof(hdev->eir));
		memset(hdev->dev_class, 0, sizeof(hdev->dev_class));
		bacpy(&hdev->random_addr, BDADDR_ANY);

		hci_req_unlock(hdev);

		hci_dev_put(hdev);
		pr_err("joker %s:in %d.\n",__func__,__LINE__); //
		return 0;
	}
}





############################################ hciconfig hci0 up ######################
static int bt_sock_create(struct net *net, struct socket *sock, int proto,
			  int kern)
{
	pr_err("joker %s:in %d.\n",__func__,__LINE__);
	int err;

	if (net != &init_net)
		return -EAFNOSUPPORT;

	if (proto < 0 || proto >= BT_MAX_PROTO)
		return -EINVAL;

	if (!bt_proto[proto])
		request_module("bt-proto-%d", proto);

	err = -EPROTONOSUPPORT;

	read_lock(&bt_proto_lock);

	if (bt_proto[proto] && try_module_get(bt_proto[proto]->owner)) {
		err = bt_proto[proto]->create(net, sock, proto, kern); /* hci_sock_create */
		static int hci_sock_create(struct net *net, struct socket *sock, int protocol,
					   int kern)
		{
			pr_err("joker %s:in %d.\n",__func__,__LINE__);
			struct sock *sk;

			BT_DBG("sock %p", sock);

			if (sock->type != SOCK_RAW)
				return -ESOCKTNOSUPPORT;

			sock->ops = &hci_sock_ops;

			sk = sk_alloc(net, PF_BLUETOOTH, GFP_ATOMIC, &hci_sk_proto, kern);
			if (!sk)
				return -ENOMEM;

			sock_init_data(sock, sk);

			sock_reset_flag(sk, SOCK_ZAPPED);

			sk->sk_protocol = protocol;

			sock->state = SS_UNCONNECTED;
			sk->sk_state = BT_OPEN;

			bt_sock_link(&hci_sk_list, sk);
			void bt_sock_link(struct bt_sock_list *l, struct sock *sk)
			{
				pr_err("joker %s:in %d.\n",__func__,__LINE__);
				write_lock(&l->lock);
				sk_add_node(sk, &l->head);
				write_unlock(&l->lock);
			}
			return 0;
		}
		if (!err) {
			bt_sock_reclassify_lock(sock->sk, proto);
		}
		void bt_sock_reclassify_lock(struct sock *sk, int proto)
		{
			pr_err("joker %s:in %d.\n",__func__,__LINE__);
			BUG_ON(!sk);
			BUG_ON(sock_owned_by_user(sk));

			sock_lock_init_class_and_name(sk,
					bt_slock_key_strings[proto], &bt_slock_key[proto],
						bt_key_strings[proto], &bt_lock_key[proto]);
			#define sock_lock_init_class_and_name(sk, sname, skey, name, key)	\
			do {									\
				sk->sk_lock.owned = 0;						\
				init_waitqueue_head(&sk->sk_lock.wq);				\
				spin_lock_init(&(sk)->sk_lock.slock);				\
				debug_check_no_locks_freed((void *)&(sk)->sk_lock,		\
						sizeof((sk)->sk_lock));				\
				lockdep_set_class_and_name(&(sk)->sk_lock.slock,		\
							(skey), (sname));				\
				lockdep_init_map(&(sk)->sk_lock.dep_map, (name), (key), 0);	\
			} while (0)

		}
		module_put(bt_proto[proto]->owner);
	}

	read_unlock(&bt_proto_lock);

	return err;
}
static int hci_sock_ioctl(struct socket *sock, unsigned int cmd,
			  unsigned long arg)
{
	pr_err("joker %s:in %d.\n",__func__,__LINE__);
	void __user *argp = (void __user *) arg;
	struct sock *sk = sock->sk;
	int err;

	/* cmd 800448d3 arg 46d38 */
	BT_DBG("cmd %x arg %lx", cmd, arg);

	lock_sock(sk);

	if (hci_pi(sk)->channel != HCI_CHANNEL_RAW) {
		err = -EBADFD;
		goto done;
	}

	release_sock(sk);

	switch (cmd) {
	case HCIGETDEVLIST:
		return hci_get_dev_list(argp);

	case HCIGETDEVINFO:
		return hci_get_dev_info(argp);
		int hci_get_dev_info(void __user *arg)
		{
			pr_err("joker %s:in %d.\n",__func__,__LINE__);
			struct hci_dev *hdev;
			struct hci_dev_info di;
			unsigned long flags;
			int err = 0;

			if (copy_from_user(&di, arg, sizeof(di)))
				return -EFAULT;

			hdev = hci_dev_get(di.dev_id);
			if (!hdev)
				return -ENODEV;

			/* When the auto-off is configured it means the transport
			 * is running, but in that case still indicate that the
			 * device is actually down.
			 */
			if (hci_dev_test_flag(hdev, HCI_AUTO_OFF))
				flags = hdev->flags & ~BIT(HCI_UP);
			else
				flags = hdev->flags;

			strcpy(di.name, hdev->name);
			di.bdaddr   = hdev->bdaddr;
			di.type     = (hdev->bus & 0x0f) | ((hdev->dev_type & 0x03) << 4);
			di.flags    = flags;
			di.pkt_type = hdev->pkt_type;
			if (lmp_bredr_capable(hdev)) {
				di.acl_mtu  = hdev->acl_mtu;
				di.acl_pkts = hdev->acl_pkts;
				di.sco_mtu  = hdev->sco_mtu;
				di.sco_pkts = hdev->sco_pkts;
			} else {
				di.acl_mtu  = hdev->le_mtu;
				di.acl_pkts = hdev->le_pkts;
				di.sco_mtu  = 0;
				di.sco_pkts = 0;
			}
			di.link_policy = hdev->link_policy;
			di.link_mode   = hdev->link_mode;

			memcpy(&di.stat, &hdev->stat, sizeof(di.stat));
			memcpy(&di.features, &hdev->features, sizeof(di.features));

			if (copy_to_user(arg, &di, sizeof(di)))
				err = -EFAULT;

			hci_dev_put(hdev);

			return err;
		}
	case HCIGETCONNLIST:
		return hci_get_conn_list(argp);

	case HCIDEVUP:
		if (!capable(CAP_NET_ADMIN))
			return -EPERM;
		return hci_dev_open(arg);
		int hci_dev_open(__u16 dev)
		{
			pr_err("joker %s:in %d.\n",__func__,__LINE__);
			struct hci_dev *hdev;
			int err;

			hdev = hci_dev_get(dev);
			if (!hdev)
				return -ENODEV;
			struct hci_dev *hci_dev_get(int index)
			{
				pr_err("joker %s:in %d.\n",__func__,__LINE__);
				struct hci_dev *hdev = NULL, *d;

				BT_DBG("%d", index);

				if (index < 0)
					return NULL;

				read_lock(&hci_dev_list_lock);
				list_for_each_entry(d, &hci_dev_list, list) {
					if (d->id == index) {
						hdev = hci_dev_hold(d);
						break;
					}
				}
				read_unlock(&hci_dev_list_lock);
				return hdev;
			}

			/* Devices that are marked as unconfigured can only be powered
			 * up as user channel. Trying to bring them up as normal devices
			 * will result into a failure. Only user channel operation is
			 * possible.
			 *
			 * When this function is called for a user channel, the flag
			 * HCI_USER_CHANNEL will be set first before attempting to
			 * open the device.
			 */
			if (hci_dev_test_flag(hdev, HCI_UNCONFIGURED) &&
				!hci_dev_test_flag(hdev, HCI_USER_CHANNEL)) {
				pr_err("joker %s:in %d.\n",__func__,__LINE__);
				err = -EOPNOTSUPP;
				goto done;
			}

			/* We need to ensure that no other power on/off work is pending
			 * before proceeding to call hci_dev_do_open. This is
			 * particularly important if the setup procedure has not yet
			 * completed.
			 */
			<tag68>
			if (hci_dev_test_and_clear_flag(hdev, HCI_AUTO_OFF)) { /* 清除 AUTO_OFF 标志, 否则HCI_UP标志无效,AP无法使用该HCI */
				pr_err("joker %s:in %d.\n",__func__,__LINE__);
				cancel_delayed_work(&hdev->power_off);
			}

			/* After this call it is guaranteed that the setup procedure
			 * has finished. This means that error conditions like RFKILL
			 * or no valid public or static random address apply.
			 */
			flush_workqueue(hdev->req_workqueue);

			/* For controllers not using the management interface and that
			 * are brought up using legacy ioctl, set the HCI_BONDABLE bit
			 * so that pairing works for them. Once the management interface
			 * is in use this bit will be cleared again and userspace has
			 * to explicitly enable it.
			 */
			if (!hci_dev_test_flag(hdev, HCI_USER_CHANNEL) &&
				!hci_dev_test_flag(hdev, HCI_MGMT)) {
				pr_err("joker %s:in %d.\n",__func__,__LINE__);//
				hci_dev_set_flag(hdev, HCI_BONDABLE);
			}

			err = hci_dev_do_open(hdev);

		done:
			hci_dev_put(hdev);
			pr_err("joker %s:in %d.\n",__func__,__LINE__);//
			return err;
		}

	case HCIDEVDOWN:
		if (!capable(CAP_NET_ADMIN))
			return -EPERM;
		return hci_dev_close(arg);

	case HCIDEVRESET:
		if (!capable(CAP_NET_ADMIN))
			return -EPERM;
		return hci_dev_reset(arg);

	case HCIDEVRESTAT:
		if (!capable(CAP_NET_ADMIN))
			return -EPERM;
		return hci_dev_reset_stat(arg);

	case HCISETSCAN:
	case HCISETAUTH:
	case HCISETENCRYPT:
	case HCISETPTYPE:
	case HCISETLINKPOL:
	case HCISETLINKMODE:
	case HCISETACLMTU:
	case HCISETSCOMTU:
		if (!capable(CAP_NET_ADMIN))
			return -EPERM;
		return hci_dev_cmd(cmd, argp);

	case HCIINQUIRY:
		return hci_inquiry(argp);
	}

	lock_sock(sk);

	err = hci_sock_bound_ioctl(sk, cmd, arg);

done:
	release_sock(sk);
	return err;
}

static int hci_sock_release(struct socket *sock)
{
	pr_err("joker %s:in %d.\n",__func__,__LINE__);
	struct sock *sk = sock->sk;
	struct hci_dev *hdev;

	BT_DBG("sock %p sk %p", sock, sk);

	if (!sk)
		return 0;

	hdev = hci_pi(sk)->hdev;

	if (hci_pi(sk)->channel == HCI_CHANNEL_MONITOR)
		atomic_dec(&monitor_promisc);

	bt_sock_unlink(&hci_sk_list, sk);
	void bt_sock_unlink(struct bt_sock_list *l, struct sock *sk)
	{
		pr_err("joker %s:in %d.\n",__func__,__LINE__);
		write_lock(&l->lock);
		sk_del_node_init(sk);
		write_unlock(&l->lock);
	}

	if (hdev) {
		if (hci_pi(sk)->channel == HCI_CHANNEL_USER) {
			/* When releasing an user channel exclusive access,
			 * call hci_dev_do_close directly instead of calling
			 * hci_dev_close to ensure the exclusive access will
			 * be released and the controller brought back down.
			 *
			 * The checking of HCI_AUTO_OFF is not needed in this
			 * case since it will have been cleared already when
			 * opening the user channel.
			 */
			hci_dev_do_close(hdev);
			hci_dev_clear_flag(hdev, HCI_USER_CHANNEL);
			mgmt_index_added(hdev);
		}

		atomic_dec(&hdev->promisc);
		hci_dev_put(hdev);
	}

	sock_orphan(sk);

	skb_queue_purge(&sk->sk_receive_queue);
	skb_queue_purge(&sk->sk_write_queue);

	sock_put(sk);
	return 0;
}

#######################################  hcitool lecc ######################
bt_sock_create
static int bt_sock_create(struct net *net, struct socket *sock, int proto,
			  int kern)
{
	pr_err("joker %s:in %d.\n",__func__,__LINE__);
	int err;

	if (net != &init_net)
		return -EAFNOSUPPORT;

	if (proto < 0 || proto >= BT_MAX_PROTO)
		return -EINVAL;

	if (!bt_proto[proto])
		request_module("bt-proto-%d", proto);

	err = -EPROTONOSUPPORT;

	read_lock(&bt_proto_lock);

	if (bt_proto[proto] && try_module_get(bt_proto[proto]->owner)) {
		err = bt_proto[proto]->create(net, sock, proto, kern);
		if (!err)
			bt_sock_reclassify_lock(sock->sk, proto);
		module_put(bt_proto[proto]->owner);
	}

	read_unlock(&bt_proto_lock);

	return err;
}

static int hci_sock_ioctl(struct socket *sock, unsigned int cmd,
			  unsigned long arg)
{
	pr_err("joker %s:in %d.\n",__func__,__LINE__);
	void __user *argp = (void __user *) arg;
	struct sock *sk = sock->sk;
	int err;

	BT_DBG("cmd %x arg %lx", cmd, arg);

	lock_sock(sk);

	if (hci_pi(sk)->channel != HCI_CHANNEL_RAW) {
		err = -EBADFD;
		goto done;
	}

	release_sock(sk);

	switch (cmd) {
	case HCIGETDEVLIST:
		return hci_get_dev_list(argp); //
	int hci_get_dev_list(void __user *arg)
	{
		pr_err("joker %s:in %d.\n",__func__,__LINE__);
		struct hci_dev *hdev;
		struct hci_dev_list_req *dl;
		struct hci_dev_req *dr;
		int n = 0, size, err;
		__u16 dev_num;

		if (get_user(dev_num, (__u16 __user *) arg))
			return -EFAULT;

		if (!dev_num || dev_num > (PAGE_SIZE * 2) / sizeof(*dr))
			return -EINVAL;

		size = sizeof(*dl) + dev_num * sizeof(*dr);

		dl = kzalloc(size, GFP_KERNEL);
		if (!dl)
			return -ENOMEM;

		dr = dl->dev_req;

		read_lock(&hci_dev_list_lock);
		list_for_each_entry(hdev, &hci_dev_list, list) {
			unsigned long flags = hdev->flags;

			/* When the auto-off is configured it means the transport
			 * is running, but in that case still indicate that the
			 * device is actually down.
			 */
			if (hci_dev_test_flag(hdev, HCI_AUTO_OFF)) /* 因为设置了该标志位, hci会自动poweroff,即逆初始化. */
				flags &= ~BIT(HCI_UP);

			(dr + n)->dev_id  = hdev->id;
			(dr + n)->dev_opt = flags;

			if (++n >= dev_num)
				break;
		}
		read_unlock(&hci_dev_list_lock);

		dl->dev_num = n;
		size = sizeof(*dl) + n * sizeof(*dr);

		err = copy_to_user(arg, dl, size);
		kfree(dl);

		return err ? -EFAULT : 0;
	}

	case HCIGETDEVINFO:
		return hci_get_dev_info(argp);

	case HCIGETCONNLIST:
		return hci_get_conn_list(argp);

	case HCIDEVUP:
		if (!capable(CAP_NET_ADMIN))
			return -EPERM;
		return hci_dev_open(arg);

	case HCIDEVDOWN:
		if (!capable(CAP_NET_ADMIN))
			return -EPERM;
		return hci_dev_close(arg);

	case HCIDEVRESET:
		if (!capable(CAP_NET_ADMIN))
			return -EPERM;
		return hci_dev_reset(arg);

	case HCIDEVRESTAT:
		if (!capable(CAP_NET_ADMIN))
			return -EPERM;
		return hci_dev_reset_stat(arg);

	case HCISETSCAN:
	case HCISETAUTH:
	case HCISETENCRYPT:
	case HCISETPTYPE:
	case HCISETLINKPOL:
	case HCISETLINKMODE:
	case HCISETACLMTU:
	case HCISETSCOMTU:
		if (!capable(CAP_NET_ADMIN))
			return -EPERM;
		return hci_dev_cmd(cmd, argp);

	case HCIINQUIRY:
		return hci_inquiry(argp);
	}

	lock_sock(sk);

	err = hci_sock_bound_ioctl(sk, cmd, arg);

done:
	release_sock(sk);
	return err;
}

static int hci_sock_bind(struct socket *sock, struct sockaddr *addr,
			 int addr_len)
{
	pr_err("joker %s:in %d.\n",__func__,__LINE__);
	struct sockaddr_hci haddr;
	struct sock *sk = sock->sk;
	struct hci_dev *hdev = NULL;
	int len, err = 0;

	BT_DBG("sock %p sk %p", sock, sk);

	if (!addr)
		return -EINVAL;

	memset(&haddr, 0, sizeof(haddr));
	len = min_t(unsigned int, sizeof(haddr), addr_len);
	memcpy(&haddr, addr, len);

	if (haddr.hci_family != AF_BLUETOOTH) {
		return -EINVAL;
	}

	lock_sock(sk);

	if (sk->sk_state == BT_BOUND) {
		err = -EALREADY;
		goto done;
	}

	switch (haddr.hci_channel) {
	case HCI_CHANNEL_RAW: //
		if (hci_pi(sk)->hdev) {
			err = -EALREADY;
			goto done;
		}

		if (haddr.hci_dev != HCI_DEV_NONE) { //
			hdev = hci_dev_get(haddr.hci_dev);
			if (!hdev) {
				err = -ENODEV;
				goto done;
			}
			struct hci_dev *hci_dev_get(int index)
			{
				pr_err("joker %s:in %d.\n",__func__,__LINE__);
				struct hci_dev *hdev = NULL, *d;

				BT_DBG("%d", index);

				if (index < 0)
					return NULL;

				read_lock(&hci_dev_list_lock);
				list_for_each_entry(d, &hci_dev_list, list) {
					if (d->id == index) {
						hdev = hci_dev_hold(d);
						break;
					}
				}
				read_unlock(&hci_dev_list_lock);
				return hdev;
			}

			atomic_inc(&hdev->promisc);
		}

		hci_pi(sk)->hdev = hdev;
		break;

	case HCI_CHANNEL_USER:
		if (hci_pi(sk)->hdev) {
			err = -EALREADY;
			goto done;
		}

		if (haddr.hci_dev == HCI_DEV_NONE) {
			err = -EINVAL;
			goto done;
		}

		if (!capable(CAP_NET_ADMIN)) {
			err = -EPERM;
			goto done;
		}

		hdev = hci_dev_get(haddr.hci_dev);
		if (!hdev) {
			err = -ENODEV;
			goto done;
		}

		if (test_bit(HCI_INIT, &hdev->flags) ||
		    hci_dev_test_flag(hdev, HCI_SETUP) ||
		    hci_dev_test_flag(hdev, HCI_CONFIG) ||
		    (!hci_dev_test_flag(hdev, HCI_AUTO_OFF) &&
		     test_bit(HCI_UP, &hdev->flags))) {
			err = -EBUSY;
			hci_dev_put(hdev);
			goto done;
		}

		if (hci_dev_test_and_set_flag(hdev, HCI_USER_CHANNEL)) {
			err = -EUSERS;
			hci_dev_put(hdev);
			goto done;
		}

		mgmt_index_removed(hdev);

		err = hci_dev_open(hdev->id);
		if (err) {
			if (err == -EALREADY) {
				/* In case the transport is already up and
				 * running, clear the error here.
				 *
				 * This can happen when opening an user
				 * channel and HCI_AUTO_OFF grace period
				 * is still active.
				 */
				err = 0;
			} else {
				hci_dev_clear_flag(hdev, HCI_USER_CHANNEL);
				mgmt_index_added(hdev);
				hci_dev_put(hdev);
				goto done;
			}
		}

		atomic_inc(&hdev->promisc);

		hci_pi(sk)->hdev = hdev;
		break;

	case HCI_CHANNEL_MONITOR:
		if (haddr.hci_dev != HCI_DEV_NONE) {
			err = -EINVAL;
			goto done;
		}

		if (!capable(CAP_NET_RAW)) {
			err = -EPERM;
			goto done;
		}

		/* The monitor interface is restricted to CAP_NET_RAW
		 * capabilities and with that implicitly trusted.
		 */
		hci_sock_set_flag(sk, HCI_SOCK_TRUSTED);

		send_monitor_replay(sk);

		atomic_inc(&monitor_promisc);
		break;

	default:
		if (!hci_mgmt_chan_find(haddr.hci_channel)) {
			err = -EINVAL;
			goto done;
		}

		if (haddr.hci_dev != HCI_DEV_NONE) {
			err = -EINVAL;
			goto done;
		}

		/* Users with CAP_NET_ADMIN capabilities are allowed
		 * access to all management commands and events. For
		 * untrusted users the interface is restricted and
		 * also only untrusted events are sent.
		 */
		if (capable(CAP_NET_ADMIN)) {
			hci_sock_set_flag(sk, HCI_SOCK_TRUSTED);
		}

		/* At the moment the index and unconfigured index events
		 * are enabled unconditionally. Setting them on each
		 * socket when binding keeps this functionality. They
		 * however might be cleared later and then sending of these
		 * events will be disabled, but that is then intentional.
		 *
		 * This also enables generic events that are safe to be
		 * received by untrusted users. Example for such events
		 * are changes to settings, class of device, name etc.
		 */
		if (haddr.hci_channel == HCI_CHANNEL_CONTROL) {
			hci_sock_set_flag(sk, HCI_MGMT_INDEX_EVENTS);
			hci_sock_set_flag(sk, HCI_MGMT_UNCONF_INDEX_EVENTS);
			hci_sock_set_flag(sk, HCI_MGMT_GENERIC_EVENTS);
		}
		break;
	}


	hci_pi(sk)->channel = haddr.hci_channel;
	sk->sk_state = BT_BOUND;

done:
	release_sock(sk);
	return err; //
}
static int hci_sock_getsockopt(struct socket *sock, int level, int optname,
			       char __user *optval, int __user *optlen)
{
	pr_err("joker %s:in %d.\n",__func__,__LINE__);
	struct hci_ufilter uf;
	struct sock *sk = sock->sk;
	int len, opt, err = 0;

	BT_DBG("sk %p, opt %d", sk, optname);

	if (get_user(len, optlen))
		return -EFAULT;

	lock_sock(sk);

	if (hci_pi(sk)->channel != HCI_CHANNEL_RAW) {
		err = -EBADFD;
		goto done;
	}

	switch (optname) {
	case HCI_DATA_DIR:
	pr_err("joker %s:in %d.HCI_DATA_DIR\n",__func__,__LINE__);
		if (hci_pi(sk)->cmsg_mask & HCI_CMSG_DIR)
			opt = 1;
		else
			opt = 0;

		if (put_user(opt, optval))
			err = -EFAULT;
		break;

	case HCI_TIME_STAMP:
	pr_err("joker %s:in %d.HCI_TIME_STAMP\n",__func__,__LINE__);
		if (hci_pi(sk)->cmsg_mask & HCI_CMSG_TSTAMP)
			opt = 1;
		else
			opt = 0;

		if (put_user(opt, optval))
			err = -EFAULT;
		break;

	case HCI_FILTER:
	pr_err("joker %s:in %d.HCI_FILTER\n",__func__,__LINE__); //
		{
			struct hci_filter *f = &hci_pi(sk)->filter;

			memset(&uf, 0, sizeof(uf));
			uf.type_mask = f->type_mask;
			uf.opcode    = f->opcode;
			uf.event_mask[0] = *((u32 *) f->event_mask + 0);
			uf.event_mask[1] = *((u32 *) f->event_mask + 1);
		}

		len = min_t(unsigned int, len, sizeof(uf));
		if (copy_to_user(optval, &uf, len))
			err = -EFAULT;
		break;

	default:
	pr_err("joker %s:in %d.\n",__func__,__LINE__);
		err = -ENOPROTOOPT;
		break;
	}

done:
	release_sock(sk);
	pr_err("joker %s:in %d.\n",__func__,__LINE__); //
	return err;
}

static int hci_sock_setsockopt(struct socket *sock, int level, int optname,
			       char __user *optval, unsigned int len)
{
	pr_err("joker %s:in %d.\n",__func__,__LINE__);
	struct hci_ufilter uf = { .opcode = 0 };
	struct sock *sk = sock->sk;
	int err = 0, opt = 0;

	BT_DBG("sk %p, opt %d", sk, optname);

	lock_sock(sk);

	if (hci_pi(sk)->channel != HCI_CHANNEL_RAW) {
		err = -EBADFD;
		goto done;
	}

	switch (optname) {
	case HCI_DATA_DIR:
	pr_err("joker %s:in %d.HCI_DATA_DIR\n",__func__,__LINE__);
		if (get_user(opt, (int __user *)optval)) {
			err = -EFAULT;
			break;
		}

		if (opt)
			hci_pi(sk)->cmsg_mask |= HCI_CMSG_DIR;
		else
			hci_pi(sk)->cmsg_mask &= ~HCI_CMSG_DIR;
		break;

	case HCI_TIME_STAMP:
	pr_err("joker %s:in %d.HCI_TIME_STAMP\n",__func__,__LINE__);
		if (get_user(opt, (int __user *)optval)) {
			err = -EFAULT;
			break;
		}

		if (opt)
			hci_pi(sk)->cmsg_mask |= HCI_CMSG_TSTAMP;
		else
			hci_pi(sk)->cmsg_mask &= ~HCI_CMSG_TSTAMP;
		break;

	case HCI_FILTER:
	pr_err("joker %s:in %d.HCI_FILTER\n",__func__,__LINE__);//
		{
			struct hci_filter *f = &hci_pi(sk)->filter;

			uf.type_mask = f->type_mask;
			uf.opcode    = f->opcode;
			uf.event_mask[0] = *((u32 *) f->event_mask + 0);
			uf.event_mask[1] = *((u32 *) f->event_mask + 1);
		}

		len = min_t(unsigned int, len, sizeof(uf));
		if (copy_from_user(&uf, optval, len)) {
			err = -EFAULT;
			break;
		}

		if (!capable(CAP_NET_RAW)) {
			uf.type_mask &= hci_sec_filter.type_mask;
			uf.event_mask[0] &= *((u32 *) hci_sec_filter.event_mask + 0);
			uf.event_mask[1] &= *((u32 *) hci_sec_filter.event_mask + 1);
		}

		{
			struct hci_filter *f = &hci_pi(sk)->filter;

			f->type_mask = uf.type_mask;
			f->opcode    = uf.opcode;
			*((u32 *) f->event_mask + 0) = uf.event_mask[0];
			*((u32 *) f->event_mask + 1) = uf.event_mask[1];
		}
		break;

	default:
	pr_err("joker %s:in %d.\n",__func__,__LINE__);
		err = -ENOPROTOOPT;
		break;
	}

done:
	release_sock(sk);
	pr_err("joker %s:in %d.\n",__func__,__LINE__); //
	return err;
}

static int hci_sock_sendmsg(struct socket *sock, struct msghdr *msg,
			    size_t len)
{
	pr_err("joker %s:in %d.\n",__func__,__LINE__);
	struct sock *sk = sock->sk;
	struct hci_mgmt_chan *chan;
	struct hci_dev *hdev;
	struct sk_buff *skb;
	int err;

	BT_DBG("sock %p sk %p", sock, sk);

	if (msg->msg_flags & MSG_OOB)
		return -EOPNOTSUPP;

	if (msg->msg_flags & ~(MSG_DONTWAIT|MSG_NOSIGNAL|MSG_ERRQUEUE))
		return -EINVAL;

	if (len < 4 || len > HCI_MAX_FRAME_SIZE)
		return -EINVAL;

	lock_sock(sk);

	switch (hci_pi(sk)->channel) {
	case HCI_CHANNEL_RAW:
	case HCI_CHANNEL_USER: //
		break;
	case HCI_CHANNEL_MONITOR:
		err = -EOPNOTSUPP;
		goto done;
	default:
		mutex_lock(&mgmt_chan_list_lock);
		chan = __hci_mgmt_chan_find(hci_pi(sk)->channel);
		if (chan)
			err = hci_mgmt_cmd(chan, sk, msg, len);
		else
			err = -EINVAL;

		mutex_unlock(&mgmt_chan_list_lock);
		goto done;
	}

	hdev = hci_pi(sk)->hdev;
	if (!hdev) {
		err = -EBADFD;
		goto done;
	}

	if (!test_bit(HCI_UP, &hdev->flags)) {
		err = -ENETDOWN;
		goto done;
	}

	skb = bt_skb_send_alloc(sk, len, msg->msg_flags & MSG_DONTWAIT, &err);
	if (!skb)
		goto done;
	static inline struct sk_buff *bt_skb_send_alloc(struct sock *sk,
						unsigned long len, int nb, int *err)
	{
		struct sk_buff *skb;

		skb = sock_alloc_send_skb(sk, len + BT_SKB_RESERVE, nb, err);
		if (skb) {
			skb_reserve(skb, BT_SKB_RESERVE);
			bt_cb(skb)->incoming  = 0;
		}

		if (!skb && *err)
			return NULL;

		*err = sock_error(sk);
		if (*err)
			goto out;

		if (sk->sk_shutdown) {
			*err = -ECONNRESET;
			goto out;
		}

		return skb;

	out:
		kfree_skb(skb);
		return NULL;
	}

	if (memcpy_from_msg(skb_put(skb, len), msg, len)) {
		err = -EFAULT;
		goto drop;
	}

	bt_cb(skb)->pkt_type = *((unsigned char *) skb->data);
	skb_pull(skb, 1);

	if (hci_pi(sk)->channel == HCI_CHANNEL_USER) {
		/* No permission check is needed for user channel
		 * since that gets enforced when binding the socket.
		 *
		 * However check that the packet type is valid.
		 */
		if (bt_cb(skb)->pkt_type != HCI_COMMAND_PKT &&
		    bt_cb(skb)->pkt_type != HCI_ACLDATA_PKT &&
		    bt_cb(skb)->pkt_type != HCI_SCODATA_PKT) {
			err = -EINVAL;
			goto drop;
		}

		skb_queue_tail(&hdev->raw_q, skb);
		queue_work(hdev->workqueue, &hdev->tx_work);
	} else if (bt_cb(skb)->pkt_type == HCI_COMMAND_PKT) { //
		u16 opcode = get_unaligned_le16(skb->data);
		u16 ogf = hci_opcode_ogf(opcode);
		u16 ocf = hci_opcode_ocf(opcode);

		if (((ogf > HCI_SFLT_MAX_OGF) ||
		     !hci_test_bit(ocf & HCI_FLT_OCF_BITS,
				   &hci_sec_filter.ocf_mask[ogf])) &&
		    !capable(CAP_NET_RAW)) {
			err = -EPERM;
			goto drop;
		}

		if (ogf == 0x3f) {
			skb_queue_tail(&hdev->raw_q, skb);
			queue_work(hdev->workqueue, &hdev->tx_work);
		} else { //
			/* Stand-alone HCI commands must be flagged as
			 * single-command requests.
			 */
			bt_cb(skb)->hci.req_start = true;

			skb_queue_tail(&hdev->cmd_q, skb);
			queue_work(hdev->workqueue, &hdev->cmd_work); /* hci_cmd_work */
		}
	} else {
		if (!capable(CAP_NET_RAW)) {
			err = -EPERM;
			goto drop;
		}

		if (bt_cb(skb)->pkt_type != HCI_ACLDATA_PKT &&
		    bt_cb(skb)->pkt_type != HCI_SCODATA_PKT) {
			err = -EINVAL;
			goto drop;
		}

		skb_queue_tail(&hdev->raw_q, skb);
		queue_work(hdev->workqueue, &hdev->tx_work);
	}

	err = len;

done:
	release_sock(sk); //
	return err;

drop:
	kfree_skb(skb);
	goto done;
}
static void hci_cmd_status_evt(struct hci_dev *hdev, struct sk_buff *skb,
			       u16 *opcode, u8 *status,
			       hci_req_complete_t *req_complete,
			       hci_req_complete_skb_t *req_complete_skb)
{
	pr_err("joker %s:in %d.\n",__func__,__LINE__);
	struct hci_ev_cmd_status *ev = (void *) skb->data;

	skb_pull(skb, sizeof(*ev));

	*opcode = __le16_to_cpu(ev->opcode);
	*status = ev->status;

	switch (*opcode) {
	case HCI_OP_INQUIRY:
		hci_cs_inquiry(hdev, ev->status);
		break;

	case HCI_OP_CREATE_CONN:
		hci_cs_create_conn(hdev, ev->status);
		break;

	case HCI_OP_DISCONNECT:
		hci_cs_disconnect(hdev, ev->status);
		break;

	case HCI_OP_ADD_SCO:
		hci_cs_add_sco(hdev, ev->status);
		break;

	case HCI_OP_AUTH_REQUESTED:
		hci_cs_auth_requested(hdev, ev->status);
		break;

	case HCI_OP_SET_CONN_ENCRYPT:
		hci_cs_set_conn_encrypt(hdev, ev->status);
		break;

	case HCI_OP_REMOTE_NAME_REQ:
		hci_cs_remote_name_req(hdev, ev->status);
		break;

	case HCI_OP_READ_REMOTE_FEATURES:
		hci_cs_read_remote_features(hdev, ev->status);
		break;

	case HCI_OP_READ_REMOTE_EXT_FEATURES:
		hci_cs_read_remote_ext_features(hdev, ev->status);
		break;

	case HCI_OP_SETUP_SYNC_CONN:
		hci_cs_setup_sync_conn(hdev, ev->status);
		break;

	case HCI_OP_SNIFF_MODE:
		hci_cs_sniff_mode(hdev, ev->status);
		break;

	case HCI_OP_EXIT_SNIFF_MODE:
		hci_cs_exit_sniff_mode(hdev, ev->status);
		break;

	case HCI_OP_SWITCH_ROLE:
		hci_cs_switch_role(hdev, ev->status);
		break;

	case HCI_OP_LE_CREATE_CONN:
		hci_cs_le_create_conn(hdev, ev->status);
		break;

	case HCI_OP_LE_READ_REMOTE_FEATURES:
		hci_cs_le_read_remote_features(hdev, ev->status);
		break;

	case HCI_OP_LE_START_ENC:
		hci_cs_le_start_enc(hdev, ev->status);
		break;

	default:
		BT_DBG("%s opcode 0x%4.4x", hdev->name, *opcode);
		break;
	}

	if (*opcode != HCI_OP_NOP)
		cancel_delayed_work(&hdev->cmd_timer);

	if (/*ev->ncmd && */!test_bit(HCI_RESET, &hdev->flags))
		atomic_set(&hdev->cmd_cnt, 1);

	/* Indicate request completion if the command failed. Also, if
	 * we're not waiting for a special event and we get a success
	 * command status we should try to flag the request as completed
	 * (since for this kind of commands there will not be a command
	 * complete event).
	 */
	if (ev->status ||
	    (hdev->sent_cmd && !bt_cb(hdev->sent_cmd)->hci.req_event))
		hci_req_cmd_complete(hdev, *opcode, ev->status, req_complete,
				     req_complete_skb);

	if (atomic_read(&hdev->cmd_cnt) && !skb_queue_empty(&hdev->cmd_q))
		queue_work(hdev->workqueue, &hdev->cmd_work);
}
static void hci_cs_le_create_conn(struct hci_dev *hdev, u8 status)
{
	pr_err("joker %s:in %d.\n",__func__,__LINE__);
	struct hci_cp_le_create_conn *cp;
	struct hci_conn *conn;

	BT_DBG("%s status 0x%2.2x", hdev->name, status);

	/* All connection failure handling is taken care of by the
	 * hci_le_conn_failed function which is triggered by the HCI
	 * request completion callbacks used for connecting.
	 */
	if (status)
		return;

	cp = hci_sent_cmd_data(hdev, HCI_OP_LE_CREATE_CONN);
	if (!cp)
		return;
	void *hci_sent_cmd_data(struct hci_dev *hdev, __u16 opcode)
	{
		pr_err("joker %s:in %d.\n",__func__,__LINE__);
		struct hci_command_hdr *hdr;

		if (!hdev->sent_cmd)
			return NULL;

		hdr = (void *) hdev->sent_cmd->data;

		if (hdr->opcode != cpu_to_le16(opcode))
			return NULL;

		BT_DBG("%s opcode 0x%4.4x", hdev->name, opcode);

		return hdev->sent_cmd->data + HCI_COMMAND_HDR_SIZE;
	}

	hci_dev_lock(hdev);

	conn = hci_conn_hash_lookup_le(hdev, &cp->peer_addr, cp->peer_addr_type);
	if (!conn) { /* GM: into here*/
		goto unlock;
	}
	static inline struct hci_conn *hci_conn_hash_lookup_le(struct hci_dev *hdev,
								   bdaddr_t *ba,
								   __u8 ba_type)
	{
		struct hci_conn_hash *h = &hdev->conn_hash;
		struct hci_conn  *c;

		rcu_read_lock();

		list_for_each_entry_rcu(c, &h->list, list) {
			if (c->type != LE_LINK)
				   continue;

			if (ba_type == c->dst_type && !bacmp(&c->dst, ba)) {
				rcu_read_unlock();
				return c;
			}
		}

		rcu_read_unlock();

		return NULL;
	}

	/* Store the initiator and responder address information which
	 * is needed for SMP. These values will not change during the
	 * lifetime of the connection.
	 */
	conn->init_addr_type = cp->own_address_type;
	if (cp->own_address_type == ADDR_LE_DEV_RANDOM)
		bacpy(&conn->init_addr, &hdev->random_addr);
	else
		bacpy(&conn->init_addr, &hdev->bdaddr);

	conn->resp_addr_type = cp->peer_addr_type;
	bacpy(&conn->resp_addr, &cp->peer_addr);

	/* We don't want the connection attempt to stick around
	 * indefinitely since LE doesn't have a page timeout concept
	 * like BR/EDR. Set a timer for any connection that doesn't use
	 * the white list for connecting.
	 */
	if (cp->filter_policy == HCI_LE_USE_PEER_ADDR)
		queue_delayed_work(conn->hdev->workqueue,
				   &conn->le_conn_timeout,
				   conn->conn_timeout); /* GM: 连接超时设置 */

unlock:
	hci_dev_unlock(hdev);
}

static int hci_sock_recvmsg(struct socket *sock, struct msghdr *msg, size_t len,
			    int flags)
{
	pr_err("joker %s:in %d.\n",__func__,__LINE__);
	int noblock = flags & MSG_DONTWAIT;
	struct sock *sk = sock->sk;
	struct sk_buff *skb;
	int copied, err;

	BT_DBG("sock %p, sk %p", sock, sk);

	if (flags & MSG_OOB)
		return -EOPNOTSUPP;

	if (sk->sk_state == BT_CLOSED)
		return 0;

	skb = skb_recv_datagram(sk, flags, noblock, &err);
	if (!skb)
		return err;

	copied = skb->len;
	if (len < copied) {
		msg->msg_flags |= MSG_TRUNC;
		copied = len;
	}

	skb_reset_transport_header(skb);
	err = skb_copy_datagram_msg(skb, 0, msg, copied);

	switch (hci_pi(sk)->channel) {
	case HCI_CHANNEL_RAW:
		hci_sock_cmsg(sk, msg, skb);
		break;
		static void hci_sock_cmsg(struct sock *sk, struct msghdr *msg,
					  struct sk_buff *skb)
		{
			pr_err("joker %s:in %d.\n",__func__,__LINE__);
			__u32 mask = hci_pi(sk)->cmsg_mask;

			if (mask & HCI_CMSG_DIR) {
				int incoming = bt_cb(skb)->incoming;
				put_cmsg(msg, SOL_HCI, HCI_CMSG_DIR, sizeof(incoming),
					 &incoming);
			}

			if (mask & HCI_CMSG_TSTAMP) {
				#ifdef CONFIG_COMPAT
				struct compat_timeval ctv;
				#endif
				struct timeval tv;
				void *data;
				int len;

				skb_get_timestamp(skb, &tv);

				data = &tv;
				len = sizeof(tv);
				#ifdef CONFIG_COMPAT
				if (!COMPAT_USE_64BIT_TIME &&
					(msg->msg_flags & MSG_CMSG_COMPAT)) {
					ctv.tv_sec = tv.tv_sec;
					ctv.tv_usec = tv.tv_usec;
					data = &ctv;
					len = sizeof(ctv);
				}
				#endif

				put_cmsg(msg, SOL_HCI, HCI_CMSG_TSTAMP, len, data);
			}
		}
	case HCI_CHANNEL_USER:
	case HCI_CHANNEL_MONITOR:
		sock_recv_timestamp(msg, sk, skb);
		break;
	default:
		if (hci_mgmt_chan_find(hci_pi(sk)->channel))
			sock_recv_timestamp(msg, sk, skb);
		break;
	}

	skb_free_datagram(sk, skb);

	return err ? : copied;
}
static void hci_le_meta_evt(struct hci_dev *hdev, struct sk_buff *skb)
{
	pr_err("joker %s:in %d.\n",__func__,__LINE__);
	struct hci_ev_le_meta *le_ev = (void *) skb->data;

	skb_pull(skb, sizeof(*le_ev));

	switch (le_ev->subevent) {
	case HCI_EV_LE_CONN_COMPLETE:
		hci_le_conn_complete_evt(hdev, skb); //
		break;

	case HCI_EV_LE_CONN_UPDATE_COMPLETE:
		hci_le_conn_update_complete_evt(hdev, skb);
		break;

	case HCI_EV_LE_ADVERTISING_REPORT:
		hci_le_adv_report_evt(hdev, skb);
		break;

	case HCI_EV_LE_REMOTE_FEAT_COMPLETE:
		hci_le_remote_feat_complete_evt(hdev, skb);
		break;

	case HCI_EV_LE_LTK_REQ:
		hci_le_ltk_request_evt(hdev, skb);
		break;

	case HCI_EV_LE_REMOTE_CONN_PARAM_REQ:
		hci_le_remote_conn_param_req_evt(hdev, skb);
		break;

	case HCI_EV_LE_DIRECT_ADV_REPORT:
		hci_le_direct_adv_report_evt(hdev, skb);
		break;

	default:
		break;
	}
}
static void hci_le_conn_complete_evt(struct hci_dev *hdev, struct sk_buff *skb)
{
	pr_err("joker %s:in %d.\n",__func__,__LINE__);
	struct hci_ev_le_conn_complete *ev = (void *) skb->data;
	struct hci_conn_params *params;
	struct hci_conn *conn;
	struct smp_irk *irk;
	u8 addr_type;

	/* hci0 status 0x00 */
	BT_DBG("%s status 0x%2.2x", hdev->name, ev->status);

	hci_dev_lock(hdev);

	/* All controllers implicitly stop advertising in the event of a
	 * connection, so ensure that the state bit is cleared.
	 */
	hci_dev_clear_flag(hdev, HCI_LE_ADV);

	conn = hci_lookup_le_connect(hdev);
	static inline struct hci_conn *hci_lookup_le_connect(struct hci_dev *hdev)
	{
		struct hci_conn_hash *h = &hdev->conn_hash;
		struct hci_conn  *c;

		rcu_read_lock();

		list_for_each_entry_rcu(c, &h->list, list) {
			if (c->type == LE_LINK && c->state == BT_CONNECT &&
				!test_bit(HCI_CONN_SCANNING, &c->flags)) {
				rcu_read_unlock();
				return c;
			}
		}

		rcu_read_unlock();

		return NULL;
	}
	/* 還沒有連接該設備 */
	if (!conn) { //
		conn = hci_conn_add(hdev, LE_LINK, &ev->bdaddr, ev->role);
		if (!conn) {
			BT_ERR("No memory for new connection");
			goto unlock;
		}
		struct hci_conn *hci_conn_add(struct hci_dev *hdev, int type, bdaddr_t *dst,
						  u8 role)
		{
			pr_err("joker %s:in %d.\n",__func__,__LINE__);
			struct hci_conn *conn;

			BT_DBG("%s dst %pMR", hdev->name, dst); /* hci0 dst d7:23:19:5a:7a:e1 */

			conn = kzalloc(sizeof(*conn), GFP_KERNEL);  <tag63>
			if (!conn)
				return NULL;

			/* conn表明連接信息, 包含連接雙方的設備地址. */
			bacpy(&conn->dst, dst);
			bacpy(&conn->src, &hdev->bdaddr);
			conn->hdev  = hdev;
			conn->type  = type;
			conn->role  = role;
			conn->mode  = HCI_CM_ACTIVE;
			conn->state = BT_OPEN;
			conn->auth_type = HCI_AT_GENERAL_BONDING;
			conn->io_capability = hdev->io_capability;
			conn->remote_auth = 0xff;
			conn->key_type = 0xff;
			conn->rssi = HCI_RSSI_INVALID;
			conn->tx_power = HCI_TX_POWER_INVALID;
			conn->max_tx_power = HCI_TX_POWER_INVALID;
			conn->pending_sec_level = BT_SECURITY_LOW;

			set_bit(HCI_CONN_POWER_SAVE, &conn->flags);
			conn->disc_timeout = HCI_DISCONN_TIMEOUT;

			if (conn->role == HCI_ROLE_MASTER) { //
				conn->out = true;
			}

			switch (type) {
			case ACL_LINK:
				conn->pkt_type = hdev->pkt_type & ACL_PTYPE_MASK;
				break;
			case LE_LINK: //
				/* conn->src should reflect the local identity address */
				hci_copy_identity_address(hdev, &conn->src, &conn->src_type);
				break;
				void hci_copy_identity_address(struct hci_dev *hdev, bdaddr_t *bdaddr,
								   u8 *bdaddr_type)
				{
					if (hci_dev_test_flag(hdev, HCI_FORCE_STATIC_ADDR) ||
						!bacmp(&hdev->bdaddr, BDADDR_ANY) ||
						(!hci_dev_test_flag(hdev, HCI_BREDR_ENABLED) &&
						 bacmp(&hdev->static_addr, BDADDR_ANY))) {
						bacpy(bdaddr, &hdev->static_addr);
						*bdaddr_type = ADDR_LE_DEV_RANDOM;
					} else { //
						bacpy(bdaddr, &hdev->bdaddr);
						*bdaddr_type = ADDR_LE_DEV_PUBLIC;
					}
				}
			case SCO_LINK:
				if (lmp_esco_capable(hdev))
					conn->pkt_type = (hdev->esco_type & SCO_ESCO_MASK) |
							(hdev->esco_type & EDR_ESCO_MASK);
				else
					conn->pkt_type = hdev->pkt_type & SCO_PTYPE_MASK;
				break;
			case ESCO_LINK:
				conn->pkt_type = hdev->esco_type & ~EDR_ESCO_MASK;
				break;
			}

			skb_queue_head_init(&conn->data_q);

			INIT_LIST_HEAD(&conn->chan_list);

			INIT_DELAYED_WORK(&conn->disc_work, hci_conn_timeout);
			INIT_DELAYED_WORK(&conn->auto_accept_work, hci_conn_auto_accept);
			INIT_DELAYED_WORK(&conn->idle_work, hci_conn_idle);
			INIT_DELAYED_WORK(&conn->le_conn_timeout, le_conn_timeout);
			INIT_WORK(&conn->le_scan_cleanup, le_scan_cleanup);

			atomic_set(&conn->refcnt, 0); <tag64>

			hci_dev_hold(hdev);

			hci_conn_hash_add(hdev, conn);
			static inline void hci_conn_hash_add(struct hci_dev *hdev, struct hci_conn *c)
			{
				struct hci_conn_hash *h = &hdev->conn_hash;
				list_add_rcu(&c->list, &h->list); <tag65>
				switch (c->type) {
				case ACL_LINK:
					h->acl_num++;
					break;
				case AMP_LINK:
					h->amp_num++;
					break;
				case LE_LINK:
					h->le_num++;
					if (c->role == HCI_ROLE_SLAVE)
						h->le_num_slave++;
					break;
				case SCO_LINK:
				case ESCO_LINK:
					h->sco_num++;
					break;
				}
			}
			/* 通知app層, 新的連接增加了. */
			if (hdev->notify) {
				hdev->notify(hdev, HCI_NOTIFY_CONN_ADD);
			}

			hci_conn_init_sysfs(conn);
			void hci_conn_init_sysfs(struct hci_conn *conn)
			{
				pr_err("joker %s:in %d.\n",__func__,__LINE__);
				struct hci_dev *hdev = conn->hdev;

				BT_DBG("conn %p", conn);

				conn->dev.type = &bt_link;
				conn->dev.class = bt_class;
				conn->dev.parent = &hdev->dev;

				device_initialize(&conn->dev);
			}

			return conn; //
		}

		conn->dst_type = ev->bdaddr_type;

		/* If we didn't have a hci_conn object previously
		 * but we're in master role this must be something
		 * initiated using a white list. Since white list based
		 * connections are not "first class citizens" we don't
		 * have full tracking of them. Therefore, we go ahead
		 * with a "best effort" approach of determining the
		 * initiator address based on the HCI_PRIVACY flag.
		 */
		if (conn->out) { //
			conn->resp_addr_type = ev->bdaddr_type;
			bacpy(&conn->resp_addr, &ev->bdaddr);
			if (hci_dev_test_flag(hdev, HCI_PRIVACY)) {
				conn->init_addr_type = ADDR_LE_DEV_RANDOM;
				bacpy(&conn->init_addr, &hdev->rpa);
			} else { //
				hci_copy_identity_address(hdev,
							  &conn->init_addr,
							  &conn->init_addr_type);
				void hci_copy_identity_address(struct hci_dev *hdev, bdaddr_t *bdaddr,
								   u8 *bdaddr_type)
				{
					if (hci_dev_test_flag(hdev, HCI_FORCE_STATIC_ADDR) ||
						!bacmp(&hdev->bdaddr, BDADDR_ANY) ||
						(!hci_dev_test_flag(hdev, HCI_BREDR_ENABLED) &&
						 bacmp(&hdev->static_addr, BDADDR_ANY))) {
						bacpy(bdaddr, &hdev->static_addr);
						*bdaddr_type = ADDR_LE_DEV_RANDOM;
					} else { //
						bacpy(bdaddr, &hdev->bdaddr);
						*bdaddr_type = ADDR_LE_DEV_PUBLIC;
					}
				}
			}
		}
	} else {
		cancel_delayed_work(&conn->le_conn_timeout);
	}

	if (!conn->out) {
		/* Set the responder (our side) address type based on
		 * the advertising address type.
		 */
		conn->resp_addr_type = hdev->adv_addr_type;
		if (hdev->adv_addr_type == ADDR_LE_DEV_RANDOM) {
			bacpy(&conn->resp_addr, &hdev->random_addr);
		}
		else {
			bacpy(&conn->resp_addr, &hdev->bdaddr);
		}

		conn->init_addr_type = ev->bdaddr_type;
		bacpy(&conn->init_addr, &ev->bdaddr);

		/* For incoming connections, set the default minimum
		 * and maximum connection interval. They will be used
		 * to check if the parameters are in range and if not
		 * trigger the connection update procedure.
		 */
		conn->le_conn_min_interval = hdev->le_conn_min_interval;
		conn->le_conn_max_interval = hdev->le_conn_max_interval;
	}

	/* Lookup the identity address from the stored connection
	 * address and address type.
	 *
	 * When establishing connections to an identity address, the
	 * connection procedure will store the resolvable random
	 * address first. Now if it can be converted back into the
	 * identity address, start using the identity address from
	 * now on.
	 */
	irk = hci_get_irk(hdev, &conn->dst, conn->dst_type);
	static inline struct smp_irk *hci_get_irk(struct hci_dev *hdev,
						  bdaddr_t *bdaddr, u8 addr_type)
	{
		if (!hci_bdaddr_is_rpa(bdaddr, addr_type))
			return NULL;

		return hci_find_irk_by_rpa(hdev, bdaddr);
	}
	if (irk) {
		bacpy(&conn->dst, &irk->bdaddr);
		conn->dst_type = irk->addr_type;
	}

	if (ev->status) {
		hci_le_conn_failed(conn, ev->status);
		goto unlock;
	}

	if (conn->dst_type == ADDR_LE_DEV_PUBLIC) {
		addr_type = BDADDR_LE_PUBLIC;
	}
	else { //
		addr_type = BDADDR_LE_RANDOM;
	}

	/* GM: 黑名单查找? */
	/* Drop the connection if the device is blocked */
	if (hci_bdaddr_list_lookup(&hdev->blacklist, &conn->dst, addr_type)) {
		hci_conn_drop(conn);
		goto unlock;
	}
	struct bdaddr_list *hci_bdaddr_list_lookup(struct list_head *bdaddr_list,
						 bdaddr_t *bdaddr, u8 type)
	{
		pr_err("joker %s:in %d.\n",__func__,__LINE__);
		struct bdaddr_list *b;

		list_for_each_entry(b, bdaddr_list, list) {
			if (!bacmp(&b->bdaddr, bdaddr) && b->bdaddr_type == type)
				return b;
		}

		return NULL;
	}

	if (!test_and_set_bit(HCI_CONN_MGMT_CONNECTED, &conn->flags)) { //
		mgmt_device_connected(hdev, conn, 0, NULL, 0);
    }
		void mgmt_device_connected(struct hci_dev *hdev, struct hci_conn *conn,
					   u32 flags, u8 *name, u8 name_len)
		{
			char buf[512];
			struct mgmt_ev_device_connected *ev = (void *) buf;
			u16 eir_len = 0;

			bacpy(&ev->addr.bdaddr, &conn->dst);
			ev->addr.type = link_to_bdaddr(conn->type, conn->dst_type);

			ev->flags = __cpu_to_le32(flags);

			/* We must ensure that the EIR Data fields are ordered and
			 * unique. Keep it simple for now and avoid the problem by not
			 * adding any BR/EDR data to the LE adv.
			 */
			if (conn->le_adv_data_len > 0) {
				memcpy(&ev->eir[eir_len],
					   conn->le_adv_data, conn->le_adv_data_len);
				eir_len = conn->le_adv_data_len;
			} else { //
				if (name_len > 0) {
					eir_len = eir_append_data(ev->eir, 0, EIR_NAME_COMPLETE,
								  name, name_len);
				}

				if (memcmp(conn->dev_class, "\0\0\0", 3) != 0) {
					eir_len = eir_append_data(ev->eir, eir_len,
								  EIR_CLASS_OF_DEV,
								  conn->dev_class, 3);
				}
			}

			ev->eir_len = cpu_to_le16(eir_len);

			mgmt_event(MGMT_EV_DEVICE_CONNECTED, hdev, buf,
					sizeof(*ev) + eir_len, NULL);
			static int mgmt_event(u16 event, struct hci_dev *hdev, void *data, u16 len,
						  struct sock *skip_sk)
			{
				pr_err("joker %s:in %d.\n",__func__,__LINE__);
				return mgmt_send_event(event, hdev, HCI_CHANNEL_CONTROL, data, len,
							   HCI_SOCK_TRUSTED, skip_sk);
				int mgmt_send_event(u16 event, struct hci_dev *hdev, unsigned short channel,
							void *data, u16 data_len, int flag, struct sock *skip_sk)
				{
					pr_err("joker %s:in %d.\n",__func__,__LINE__);
					struct sk_buff *skb;
					struct mgmt_hdr *hdr;

					skb = alloc_skb(sizeof(*hdr) + data_len, GFP_KERNEL);
					if (!skb)
						return -ENOMEM;

					hdr = (void *) skb_put(skb, sizeof(*hdr));
					hdr->opcode = cpu_to_le16(event);
					if (hdev)
						hdr->index = cpu_to_le16(hdev->id);
					else
						hdr->index = cpu_to_le16(MGMT_INDEX_NONE);
					hdr->len = cpu_to_le16(data_len);

					if (data)
						memcpy(skb_put(skb, data_len), data, data_len);

					/* Time stamp */
					__net_timestamp(skb);

					hci_send_to_channel(channel, skb, flag, skip_sk);
					void hci_send_to_channel(unsigned short channel, struct sk_buff *skb,
								 int flag, struct sock *skip_sk)
					{
						pr_err("joker %s:in %d.\n",__func__,__LINE__);
						struct sock *sk;

						/* channel 3 len 19 */
						BT_DBG("channel %u len %d", channel, skb->len);

						read_lock(&hci_sk_list.lock);

						sk_for_each(sk, &hci_sk_list.head) {
							struct sk_buff *nskb;

							/* Ignore socket without the flag set */
							if (!hci_sock_test_flag(sk, flag))
								continue;

							/* Skip the original socket */
							if (sk == skip_sk)
								continue;

							if (sk->sk_state != BT_BOUND)
								continue;

							if (hci_pi(sk)->channel != channel)
								continue;

							nskb = skb_clone(skb, GFP_ATOMIC);
							if (!nskb)
								continue;

							if (sock_queue_rcv_skb(sk, nskb))
								kfree_skb(nskb);
						}

						read_unlock(&hci_sk_list.lock);
					}
					kfree_skb(skb);

					return 0;
				}
			}
		}

	conn->sec_level = BT_SECURITY_LOW;
	conn->handle = __le16_to_cpu(ev->handle);
	conn->state = BT_CONFIG;

	conn->le_conn_interval = le16_to_cpu(ev->interval);
	conn->le_conn_latency = le16_to_cpu(ev->latency);
	conn->le_supv_timeout = le16_to_cpu(ev->supervision_timeout);

	hci_debugfs_create_conn(conn);
	void hci_debugfs_create_conn(struct hci_conn *conn)
	{
		pr_err("joker %s:in %d.\n",__func__,__LINE__);
		struct hci_dev *hdev = conn->hdev;
		char name[6];

		if (IS_ERR_OR_NULL(hdev->debugfs))
			return;

		snprintf(name, sizeof(name), "%u", conn->handle);
		conn->debugfs = debugfs_create_dir(name, hdev->debugfs);
	}
	hci_conn_add_sysfs(conn);
	void hci_conn_add_sysfs(struct hci_conn *conn)
	{
		pr_err("joker %s:in %d.\n",__func__,__LINE__);
		struct hci_dev *hdev = conn->hdev;

		BT_DBG("conn %p", conn);

		dev_set_name(&conn->dev, "%s:%d", hdev->name, conn->handle);

		if (device_add(&conn->dev) < 0) {
			BT_ERR("Failed to register connection device");
			return;
		}

		hci_dev_hold(hdev);
	}

	if (!ev->status) { //
		/* The remote features procedure is defined for master
		 * role only. So only in case of an initiated connection
		 * request the remote features.
		 *
		 * If the local controller supports slave-initiated features
		 * exchange, then requesting the remote features in slave
		 * role is possible. Otherwise just transition into the
		 * connected state without requesting the remote features.
		 */
		if (conn->out ||
		    (hdev->le_features[0] & HCI_LE_SLAVE_FEATURES)) { //
			struct hci_cp_le_read_remote_features cp;

			cp.handle = __cpu_to_le16(conn->handle);

			hci_send_cmd(hdev, HCI_OP_LE_READ_REMOTE_FEATURES,
				     sizeof(cp), &cp); /* GM: 获取LE features */

			hci_conn_hold(conn);
		} else {
			conn->state = BT_CONNECTED;
			hci_connect_cfm(conn, ev->status);
		}
	} else {
		hci_connect_cfm(conn, ev->status);
	}

	params = hci_pend_le_action_lookup(&hdev->pend_le_conns, &conn->dst,
					   conn->dst_type);
	struct hci_conn_params *hci_pend_le_action_lookup(struct list_head *list,
							  bdaddr_t *addr, u8 addr_type)
	{
		pr_err("joker %s:in %d.\n",__func__,__LINE__);
		struct hci_conn_params *param;

		list_for_each_entry(param, list, action) {
			if (bacmp(&param->addr, addr) == 0 &&
				param->addr_type == addr_type)
				return param;
		}

		return NULL;
	}
	/* 隊列內,有相同設備的連接請求的話, 就取消掉. 因為目標設備已經連接上了. */
	if (params) {
		list_del_init(&params->action);
		if (params->conn) {
			hci_conn_drop(params->conn);
			hci_conn_put(params->conn);
			params->conn = NULL;
		}
	}

unlock:
	hci_update_background_scan(hdev);
	void hci_update_background_scan(struct hci_dev *hdev)
	{
		pr_err("joker %s:in %d.\n",__func__,__LINE__);
		int err;
		struct hci_request req;

		hci_req_init(&req, hdev);
		void hci_req_init(struct hci_request *req, struct hci_dev *hdev)
		{
			pr_err("joker %s:in %d.\n",__func__,__LINE__);
			skb_queue_head_init(&req->cmd_q);
			req->hdev = hdev;
			req->err = 0;
		}

		__hci_update_background_scan(&req);
		void __hci_update_background_scan(struct hci_request *req)
		{
			pr_err("joker %s:in %d.\n",__func__,__LINE__);
			struct hci_dev *hdev = req->hdev;

			if (!test_bit(HCI_UP, &hdev->flags) ||
				test_bit(HCI_INIT, &hdev->flags) ||
				hci_dev_test_flag(hdev, HCI_SETUP) ||
				hci_dev_test_flag(hdev, HCI_CONFIG) ||
				hci_dev_test_flag(hdev, HCI_AUTO_OFF) ||
				hci_dev_test_flag(hdev, HCI_UNREGISTER))
				return;

			/* No point in doing scanning if LE support hasn't been enabled */
			if (!hci_dev_test_flag(hdev, HCI_LE_ENABLED))
				return;

			/* If discovery is active don't interfere with it */
			if (hdev->discovery.state != DISCOVERY_STOPPED)
				return;

			/* Reset RSSI and UUID filters when starting background scanning
			 * since these filters are meant for service discovery only.
			 *
			 * The Start Discovery and Start Service Discovery operations
			 * ensure to set proper values for RSSI threshold and UUID
			 * filter list. So it is safe to just reset them here.
			 */
			hci_discovery_filter_clear(hdev);

			if (list_empty(&hdev->pend_le_conns) &&
				list_empty(&hdev->pend_le_reports)) { /* not into */
				/* If there is no pending LE connections or devices
				 * to be scanned for, we should stop the background
				 * scanning.
				 */

				/* If controller is not scanning we are done. */
				if (!hci_dev_test_flag(hdev, HCI_LE_SCAN))
					return;

				hci_req_add_le_scan_disable(req);

				BT_DBG("%s stopping background scanning", hdev->name);
			} else {/* not into */
				/* If there is at least one pending LE connection, we should
				 * keep the background scan running.
				 */

				/* If controller is connecting, we should not start scanning
				 * since some controllers are not able to scan and connect at
				 * the same time.
				 */
				if (hci_lookup_le_connect(hdev))
					return;

				/* If controller is currently scanning, we stop it to ensure we
				 * don't miss any advertising (due to duplicates filter).
				 */
				if (hci_dev_test_flag(hdev, HCI_LE_SCAN))
					hci_req_add_le_scan_disable(req);

				hci_req_add_le_passive_scan(req);

				BT_DBG("%s starting background scanning", hdev->name);
			}
		}

		err = hci_req_run(&req, update_background_scan_complete);
		if (err && err != -ENODATA)
			BT_ERR("Failed to run HCI request: err %d", err);
	}
	hci_dev_unlock(hdev); //
}
