text
stringlengths 213
7.14k
| idx
int64 16
12.5k
|
---|---|
--- initial
+++ final
@@ -1,67 +1,67 @@
int setup_aio_ctx_iter(struct cifs_aio_ctx *ctx, struct iov_iter *iter, int rw) {
ssize_t rc;
unsigned int cur_npages;
unsigned int npages = 0;
unsigned int i;
size_t len;
size_t count = iov_iter_count(iter);
unsigned int saved_len;
size_t start;
unsigned int max_pages = iov_iter_npages(iter, INT_MAX);
struct page **pages = NULL;
struct bio_vec *bv = NULL;
if (iov_iter_is_kvec(iter)) {
memcpy(&ctx->iter, iter, sizeof(struct iov_iter));
ctx->len = count;
iov_iter_advance(iter, count);
return 0;
}
if (max_pages * sizeof(struct bio_vec) <= CIFS_AIO_KMALLOC_LIMIT) bv = kmalloc_array(max_pages, sizeof(struct bio_vec), GFP_KERNEL);
if (!bv) {
bv = vmalloc(array_size(max_pages, sizeof(struct bio_vec)));
if (!bv) return -ENOMEM;
}
if (max_pages * sizeof(struct page *) <= CIFS_AIO_KMALLOC_LIMIT) pages = kmalloc_array(max_pages, sizeof(struct page *), GFP_KERNEL);
if (!pages) {
pages = vmalloc(array_size(max_pages, sizeof(struct page *)));
if (!pages) {
kvfree(bv);
return -ENOMEM;
}
}
saved_len = count;
while (count && npages < max_pages) {
rc = iov_iter_get_pages(iter, pages, count, max_pages, &start);
if (rc < 0) {
cifs_dbg(VFS, "couldn't get user pages (rc=%zd)\n", rc);
break;
}
if (rc > count) {
cifs_dbg(VFS, "get pages rc=%zd more than %zu\n", rc, count);
break;
}
iov_iter_advance(iter, rc);
count -= rc;
rc += start;
cur_npages = DIV_ROUND_UP(rc, PAGE_SIZE);
if (npages + cur_npages > max_pages) {
cifs_dbg(VFS, "out of vec array capacity (%u vs %u)\n", npages + cur_npages, max_pages);
break;
}
for (i = 0; i < cur_npages; i++) {
len = rc > PAGE_SIZE ? PAGE_SIZE : rc;
bv[npages + i].bv_page = pages[i];
bv[npages + i].bv_offset = start;
bv[npages + i].bv_len = len - start;
rc -= len;
start = 0;
}
npages += cur_npages;
}
kvfree(pages);
ctx->bv = bv;
ctx->len = saved_len - count;
ctx->npages = npages;
- iov_iter_bvec(&ctx->iter, ITER_BVEC | rw, ctx->bv, npages, ctx->len);
+ iov_iter_bvec(&ctx->iter, rw, ctx->bv, npages, ctx->len);
return 0;
}<sep>@@
expression e1,e2,e3,e4,e;
@@
iov_iter_bvec(e1,
- ITER_BVEC | e
+ e
, e2,e3,e4)
<|end_of_text|> | 12,145 |
--- initial
+++ final
@@ -1,44 +1,44 @@
static int v9fs_dir_readdir(struct file *file, struct dir_context *ctx) {
bool over;
struct p9_wstat st;
int err = 0;
struct p9_fid *fid;
int buflen;
int reclen = 0;
struct p9_rdir *rdir;
struct kvec kvec;
p9_debug(P9_DEBUG_VFS, "name %pD\n", file);
fid = file->private_data;
buflen = fid->clnt->msize - P9_IOHDRSZ;
rdir = v9fs_alloc_rdir_buf(file, buflen);
if (!rdir) return -ENOMEM;
kvec.iov_base = rdir->buf;
kvec.iov_len = buflen;
while (1) {
if (rdir->tail == rdir->head) {
struct iov_iter to;
int n;
- iov_iter_kvec(&to, READ | ITER_KVEC, &kvec, 1, buflen);
+ iov_iter_kvec(&to, READ, &kvec, 1, buflen);
n = p9_client_read(file->private_data, ctx->pos, &to, &err);
if (err) return err;
if (n == 0) return 0;
rdir->head = 0;
rdir->tail = n;
}
while (rdir->head < rdir->tail) {
p9stat_init(&st);
err = p9stat_read(fid->clnt, rdir->buf + rdir->head, rdir->tail - rdir->head, &st);
if (err) {
p9_debug(P9_DEBUG_VFS, "returned %d\n", err);
p9stat_free(&st);
return -EIO;
}
reclen = st.size + 2;
over = !dir_emit(ctx, st.name, strlen(st.name), v9fs_qid2ino(&st.qid), dt_type(&st));
p9stat_free(&st);
if (over) return 0;
rdir->head += reclen;
ctx->pos += reclen;
}
}
}<sep>@@
expression e1,e2,e3,e4,e;
@@
iov_iter_kvec(e1,
- ITER_KVEC | e
+ e
, e2,e3,e4)
<|end_of_text|> | 12,179 |
--- initial
+++ final
@@ -1,18 +1,18 @@
static void send_disconnects(struct nbd_device *nbd) {
struct nbd_config *config = nbd->config;
struct nbd_request request = {
.magic = htonl(NBD_REQUEST_MAGIC),
.type = htonl(NBD_CMD_DISC),
};
struct kvec iov = {.iov_base = &request, .iov_len = sizeof(request)};
struct iov_iter from;
int i, ret;
for (i = 0; i < config->num_connections; i++) {
struct nbd_sock *nsock = config->socks[i];
- iov_iter_kvec(&from, WRITE | ITER_KVEC, &iov, 1, sizeof(request));
+ iov_iter_kvec(&from, WRITE, &iov, 1, sizeof(request));
mutex_lock(&nsock->tx_lock);
ret = sock_xmit(nbd, i, 1, &from, 0, NULL);
if (ret <= 0) dev_err(disk_to_dev(nbd->disk), "Send disconnect failed %d\n", ret);
mutex_unlock(&nsock->tx_lock);
}
}<sep>@@
expression e1,e2,e3,e4,e;
@@
iov_iter_kvec(e1,
- ITER_KVEC | e
+ e
, e2,e3,e4)
<|end_of_text|> | 12,148 |
--- initial
+++ final
@@ -1,21 +1,21 @@
static int send_pkt(struct l2cap_chan *chan, struct sk_buff *skb, struct net_device *netdev) {
struct msghdr msg;
struct kvec iv;
int err;
/* Remember the skb so that we can send EAGAIN to the caller if
* we run out of credits.
*/
chan->data = skb;
iv.iov_base = skb->data;
iv.iov_len = skb->len;
memset(&msg, 0, sizeof(msg));
- iov_iter_kvec(&msg.msg_iter, WRITE | ITER_KVEC, &iv, 1, skb->len);
+ iov_iter_kvec(&msg.msg_iter, WRITE, &iv, 1, skb->len);
err = l2cap_chan_send(chan, &msg, skb->len);
if (err > 0) {
netdev->stats.tx_bytes += err;
netdev->stats.tx_packets++;
return 0;
}
if (err < 0) netdev->stats.tx_errors++;
return err;
}<sep>@@
expression e1,e2,e3,e4,e;
@@
iov_iter_kvec(e1,
- ITER_KVEC | e
+ e
, e2,e3,e4)
<|end_of_text|> | 12,120 |
--- initial
+++ final
@@ -1,14 +1,14 @@
ssize_t vmci_qpair_dequeue(struct vmci_qp *qpair, void *buf, size_t buf_size, int buf_type) {
ssize_t result;
struct iov_iter to;
struct kvec v = {.iov_base = buf, .iov_len = buf_size};
if (!qpair || !buf) return VMCI_ERROR_INVALID_ARGS;
- iov_iter_kvec(&to, READ | ITER_KVEC, &v, 1, buf_size);
+ iov_iter_kvec(&to, READ, &v, 1, buf_size);
qp_lock(qpair);
do {
result = qp_dequeue_locked(qpair->produce_q, qpair->consume_q, qpair->consume_q_size, &to, true);
if (result == VMCI_ERROR_QUEUEPAIR_NOT_READY && !qp_wait_for_ready_queue(qpair)) result = VMCI_ERROR_WOULD_BLOCK;
} while (result == VMCI_ERROR_QUEUEPAIR_NOT_READY);
qp_unlock(qpair);
return result;
}<sep>@@
expression e1,e2,e3,e4,e;
@@
iov_iter_kvec(e1,
- ITER_KVEC | e
+ e
, e2,e3,e4)
<|end_of_text|> | 12,182 |
--- initial
+++ final
@@ -1,21 +1,21 @@
static void afs_load_bvec(struct afs_call *call, struct msghdr *msg, struct bio_vec *bv, pgoff_t first, pgoff_t last, unsigned offset) {
struct page *pages[AFS_BVEC_MAX];
unsigned int nr, n, i, to, bytes = 0;
nr = min_t(pgoff_t, last - first + 1, AFS_BVEC_MAX);
n = find_get_pages_contig(call->mapping, first, nr, pages);
ASSERTCMP(n, ==, nr);
msg->msg_flags |= MSG_MORE;
for (i = 0; i < nr; i++) {
to = PAGE_SIZE;
if (first + i >= last) {
to = call->last_to;
msg->msg_flags &= ~MSG_MORE;
}
bv[i].bv_page = pages[i];
bv[i].bv_len = to - offset;
bv[i].bv_offset = offset;
bytes += to - offset;
offset = 0;
}
- iov_iter_bvec(&msg->msg_iter, WRITE | ITER_BVEC, bv, nr, bytes);
+ iov_iter_bvec(&msg->msg_iter, WRITE, bv, nr, bytes);
}<sep>@@
expression e1,e2,e3,e4,e;
@@
iov_iter_bvec(e1,
- ITER_BVEC | e
+ e
, e2,e3,e4)
<|end_of_text|> | 12,154 |
--- initial
+++ final
@@ -1,61 +1,61 @@
static int fd_do_rw(struct se_cmd *cmd, struct file *fd, u32 block_size, struct scatterlist *sgl, u32 sgl_nents, u32 data_length, int is_write) {
struct scatterlist *sg;
struct iov_iter iter;
struct bio_vec *bvec;
ssize_t len = 0;
loff_t pos = (cmd->t_task_lba * block_size);
int ret = 0, i;
bvec = kcalloc(sgl_nents, sizeof(struct bio_vec), GFP_KERNEL);
if (!bvec) {
pr_err("Unable to allocate fd_do_readv iov[]\n");
return -ENOMEM;
}
for_each_sg(sgl, sg, sgl_nents, i) {
bvec[i].bv_page = sg_page(sg);
bvec[i].bv_len = sg->length;
bvec[i].bv_offset = sg->offset;
len += sg->length;
}
- iov_iter_bvec(&iter, ITER_BVEC, bvec, sgl_nents, len);
+ iov_iter_bvec(&iter, READ, bvec, sgl_nents, len);
if (is_write)
ret = vfs_iter_write(fd, &iter, &pos, 0);
else
ret = vfs_iter_read(fd, &iter, &pos, 0);
if (is_write) {
if (ret < 0 || ret != data_length) {
pr_err("%s() write returned %d\n", __func__, ret);
if (ret >= 0) ret = -EINVAL;
}
} else {
/*
* Return zeros and GOOD status even if the READ did not return
* the expected virt_size for struct file w/o a backing struct
* block_device.
*/
if (S_ISBLK(file_inode(fd)->i_mode)) {
if (ret < 0 || ret != data_length) {
pr_err("%s() returned %d, expecting %u for "
"S_ISBLK\n",
__func__, ret, data_length);
if (ret >= 0) ret = -EINVAL;
}
} else {
if (ret < 0) {
pr_err("%s() returned %d for non S_ISBLK\n", __func__, ret);
} else if (ret != data_length) {
/*
* Short read case:
* Probably some one truncate file under us.
* We must explicitly zero sg-pages to prevent
* expose uninizialized pages to userspace.
*/
if (ret < data_length)
ret += iov_iter_zero(data_length - ret, &iter);
else
ret = -EINVAL;
}
}
}
kfree(bvec);
return ret;
}<sep>@@
expression e1,e2,e3,e4;
@@
iov_iter_bvec(e1,
- ITER_BVEC
+ READ
, e2,e3,e4)
<|end_of_text|> | 12,168 |
--- initial
+++ final
@@ -1,46 +1,46 @@
static void ceph_aio_complete_req(struct ceph_osd_request *req) {
int rc = req->r_result;
struct inode *inode = req->r_inode;
struct ceph_aio_request *aio_req = req->r_priv;
struct ceph_osd_data *osd_data = osd_req_op_extent_osd_data(req, 0);
BUG_ON(osd_data->type != CEPH_OSD_DATA_TYPE_BVECS);
BUG_ON(!osd_data->num_bvecs);
dout("ceph_aio_complete_req %p rc %d bytes %u\n", inode, rc, osd_data->bvec_pos.iter.bi_size);
if (rc == -EOLDSNAPC) {
struct ceph_aio_work *aio_work;
BUG_ON(!aio_req->write);
aio_work = kmalloc(sizeof(*aio_work), GFP_NOFS);
if (aio_work) {
INIT_WORK(&aio_work->work, ceph_aio_retry_work);
aio_work->req = req;
queue_work(ceph_inode_to_client(inode)->wb_wq, &aio_work->work);
return;
}
rc = -ENOMEM;
} else if (!aio_req->write) {
if (rc == -ENOENT) rc = 0;
if (rc >= 0 && osd_data->bvec_pos.iter.bi_size > rc) {
struct iov_iter i;
int zlen = osd_data->bvec_pos.iter.bi_size - rc;
/*
* If read is satisfied by single OSD request,
* it can pass EOF. Otherwise read is within
* i_size.
*/
if (aio_req->num_reqs == 1) {
loff_t i_size = i_size_read(inode);
loff_t endoff = aio_req->iocb->ki_pos + rc;
if (endoff < i_size) zlen = min_t(size_t, zlen, i_size - endoff);
aio_req->total_len = rc + zlen;
}
- iov_iter_bvec(&i, ITER_BVEC, osd_data->bvec_pos.bvecs, osd_data->num_bvecs, osd_data->bvec_pos.iter.bi_size);
+ iov_iter_bvec(&i, READ, osd_data->bvec_pos.bvecs, osd_data->num_bvecs, osd_data->bvec_pos.iter.bi_size);
iov_iter_advance(&i, rc);
iov_iter_zero(zlen, &i);
}
}
put_bvecs(osd_data->bvec_pos.bvecs, osd_data->num_bvecs, aio_req->should_dirty);
ceph_osdc_put_request(req);
if (rc < 0) cmpxchg(&aio_req->error, 0, rc);
ceph_aio_complete(inode, aio_req);
return;
}<sep>@@
expression e1,e2,e3,e4;
@@
iov_iter_bvec(e1,
- ITER_BVEC
+ READ
, e2,e3,e4)
<|end_of_text|> | 12,127 |
--- initial
+++ final
@@ -1,9 +1,9 @@
int kernel_recvmsg(struct socket *sock, struct msghdr *msg, struct kvec *vec, size_t num, size_t size, int flags) {
mm_segment_t oldfs = get_fs();
int result;
- iov_iter_kvec(&msg->msg_iter, READ | ITER_KVEC, vec, num, size);
+ iov_iter_kvec(&msg->msg_iter, READ, vec, num, size);
set_fs(KERNEL_DS);
result = sock_recvmsg(sock, msg, flags);
set_fs(oldfs);
return result;
}<sep>@@
expression e1,e2,e3,e4,e;
@@
iov_iter_kvec(e1,
- ITER_KVEC | e
+ e
, e2,e3,e4)
<|end_of_text|> | 12,161 |
--- initial
+++ final
@@ -1,61 +1,61 @@
static void pvcalls_conn_back_read(void *opaque) {
struct sock_mapping *map = (struct sock_mapping *)opaque;
struct msghdr msg;
struct kvec vec[2];
RING_IDX cons, prod, size, wanted, array_size, masked_prod, masked_cons;
int32_t error;
struct pvcalls_data_intf *intf = map->ring;
struct pvcalls_data *data = &map->data;
unsigned long flags;
int ret;
array_size = XEN_FLEX_RING_SIZE(map->ring_order);
cons = intf->in_cons;
prod = intf->in_prod;
error = intf->in_error;
/* read the indexes first, then deal with the data */
virt_mb();
if (error) return;
size = pvcalls_queued(prod, cons, array_size);
if (size >= array_size) return;
spin_lock_irqsave(&map->sock->sk->sk_receive_queue.lock, flags);
if (skb_queue_empty(&map->sock->sk->sk_receive_queue)) {
atomic_set(&map->read, 0);
spin_unlock_irqrestore(&map->sock->sk->sk_receive_queue.lock, flags);
return;
}
spin_unlock_irqrestore(&map->sock->sk->sk_receive_queue.lock, flags);
wanted = array_size - size;
masked_prod = pvcalls_mask(prod, array_size);
masked_cons = pvcalls_mask(cons, array_size);
memset(&msg, 0, sizeof(msg));
if (masked_prod < masked_cons) {
vec[0].iov_base = data->in + masked_prod;
vec[0].iov_len = wanted;
- iov_iter_kvec(&msg.msg_iter, ITER_KVEC | WRITE, vec, 1, wanted);
+ iov_iter_kvec(&msg.msg_iter, WRITE, vec, 1, wanted);
} else {
vec[0].iov_base = data->in + masked_prod;
vec[0].iov_len = array_size - masked_prod;
vec[1].iov_base = data->in;
vec[1].iov_len = wanted - vec[0].iov_len;
- iov_iter_kvec(&msg.msg_iter, ITER_KVEC | WRITE, vec, 2, wanted);
+ iov_iter_kvec(&msg.msg_iter, WRITE, vec, 2, wanted);
}
atomic_set(&map->read, 0);
ret = inet_recvmsg(map->sock, &msg, wanted, MSG_DONTWAIT);
WARN_ON(ret > wanted);
if (ret == -EAGAIN) /* shouldn't happen */
return;
if (!ret) ret = -ENOTCONN;
spin_lock_irqsave(&map->sock->sk->sk_receive_queue.lock, flags);
if (ret > 0 && !skb_queue_empty(&map->sock->sk->sk_receive_queue)) atomic_inc(&map->read);
spin_unlock_irqrestore(&map->sock->sk->sk_receive_queue.lock, flags);
/* write the data, then modify the indexes */
virt_wmb();
if (ret < 0)
intf->in_error = ret;
else
intf->in_prod = prod + ret;
/* update the indexes, then notify the other end */
virt_wmb();
notify_remote_via_irq(map->irq);
return;
}<sep>@@
expression e1,e2,e3,e4,e;
@@
iov_iter_kvec(e1,
- ITER_KVEC | e
+ e
, e2,e3,e4)
<|end_of_text|> | 12,150 |
--- initial
+++ final
@@ -1,21 +1,21 @@
static int tipc_conn_rcv_from_sock(struct tipc_conn *con) {
struct tipc_topsrv *srv = con->server;
struct sock *sk = con->sock->sk;
struct msghdr msg = {};
struct tipc_subscr s;
struct kvec iov;
int ret;
iov.iov_base = &s;
iov.iov_len = sizeof(s);
msg.msg_name = NULL;
- iov_iter_kvec(&msg.msg_iter, READ | ITER_KVEC, &iov, 1, iov.iov_len);
+ iov_iter_kvec(&msg.msg_iter, READ, &iov, 1, iov.iov_len);
ret = sock_recvmsg(con->sock, &msg, MSG_DONTWAIT);
if (ret == -EWOULDBLOCK) return -EWOULDBLOCK;
if (ret > 0) {
read_lock_bh(&sk->sk_callback_lock);
ret = tipc_conn_rcv_sub(srv, con, &s);
read_unlock_bh(&sk->sk_callback_lock);
}
if (ret < 0) tipc_conn_close(con);
return ret;
}<sep>@@
expression e1,e2,e3,e4,e;
@@
iov_iter_kvec(e1,
- ITER_KVEC | e
+ e
, e2,e3,e4)
<|end_of_text|> | 12,174 |
--- initial
+++ final
@@ -1,56 +1,56 @@
static int lo_rw_aio(struct loop_device *lo, struct loop_cmd *cmd, loff_t pos, bool rw) {
struct iov_iter iter;
struct bio_vec *bvec;
struct request *rq = blk_mq_rq_from_pdu(cmd);
struct bio *bio = rq->bio;
struct file *file = lo->lo_backing_file;
unsigned int offset;
int segments = 0;
int ret;
if (rq->bio != rq->biotail) {
struct req_iterator iter;
struct bio_vec tmp;
__rq_for_each_bio(bio, rq) segments += bio_segments(bio);
bvec = kmalloc_array(segments, sizeof(struct bio_vec), GFP_NOIO);
if (!bvec) return -EIO;
cmd->bvec = bvec;
/*
* The bios of the request may be started from the middle of
* the 'bvec' because of bio splitting, so we can't directly
* copy bio->bi_iov_vec to new bvec. The rq_for_each_segment
* API will take care of all details for us.
*/
rq_for_each_segment(tmp, rq, iter) {
*bvec = tmp;
bvec++;
}
bvec = cmd->bvec;
offset = 0;
} else {
/*
* Same here, this bio may be started from the middle of the
* 'bvec' because of bio splitting, so offset from the bvec
* must be passed to iov iterator
*/
offset = bio->bi_iter.bi_bvec_done;
bvec = __bvec_iter_bvec(bio->bi_io_vec, bio->bi_iter);
segments = bio_segments(bio);
}
atomic_set(&cmd->ref, 2);
- iov_iter_bvec(&iter, ITER_BVEC | rw, bvec, segments, blk_rq_bytes(rq));
+ iov_iter_bvec(&iter, rw, bvec, segments, blk_rq_bytes(rq));
iter.iov_offset = offset;
cmd->iocb.ki_pos = pos;
cmd->iocb.ki_filp = file;
cmd->iocb.ki_complete = lo_rw_aio_complete;
cmd->iocb.ki_flags = IOCB_DIRECT;
cmd->iocb.ki_ioprio = IOPRIO_PRIO_VALUE(IOPRIO_CLASS_NONE, 0);
if (cmd->css) kthread_associate_blkcg(cmd->css);
if (rw == WRITE)
ret = call_write_iter(file, &cmd->iocb, &iter);
else
ret = call_read_iter(file, &cmd->iocb, &iter);
lo_rw_aio_do_completion(cmd);
kthread_associate_blkcg(NULL);
if (ret != -EIOCBQUEUED) cmd->iocb.ki_complete(&cmd->iocb, ret, 0);
return 0;
}<sep>@@
expression e1,e2,e3,e4,e;
@@
iov_iter_bvec(e1,
- ITER_BVEC | e
+ e
, e2,e3,e4)
<|end_of_text|> | 12,139 |
--- initial
+++ final
@@ -1,48 +1,48 @@
static ssize_t default_file_splice_read(struct file *in, loff_t *ppos, struct pipe_inode_info *pipe, size_t len, unsigned int flags) {
struct kvec *vec, __vec[PIPE_DEF_BUFFERS];
struct iov_iter to;
struct page **pages;
unsigned int nr_pages;
size_t offset, base, copied = 0;
ssize_t res;
int i;
if (pipe->nrbufs == pipe->buffers) return -EAGAIN;
/*
* Try to keep page boundaries matching to source pagecache ones -
* it probably won't be much help, but...
*/
offset = *ppos & ~PAGE_MASK;
- iov_iter_pipe(&to, ITER_PIPE | READ, pipe, len + offset);
+ iov_iter_pipe(&to, READ, pipe, len + offset);
res = iov_iter_get_pages_alloc(&to, &pages, len + offset, &base);
if (res <= 0) return -ENOMEM;
nr_pages = DIV_ROUND_UP(res + base, PAGE_SIZE);
vec = __vec;
if (nr_pages > PIPE_DEF_BUFFERS) {
vec = kmalloc_array(nr_pages, sizeof(struct kvec), GFP_KERNEL);
if (unlikely(!vec)) {
res = -ENOMEM;
goto out;
}
}
pipe->bufs[to.idx].offset = offset;
pipe->bufs[to.idx].len -= offset;
for (i = 0; i < nr_pages; i++) {
size_t this_len = min_t(size_t, len, PAGE_SIZE - offset);
vec[i].iov_base = page_address(pages[i]) + offset;
vec[i].iov_len = this_len;
len -= this_len;
offset = 0;
}
res = kernel_readv(in, vec, nr_pages, *ppos);
if (res > 0) {
copied = res;
*ppos += res;
}
if (vec != __vec) kfree(vec);
out:
for (i = 0; i < nr_pages; i++)
put_page(pages[i]);
kvfree(pages);
iov_iter_advance(&to, copied); /* truncates and discards */
return res;
}<sep>@@
expression e1,e2,e3,e;
@@
iov_iter_pipe(e1,
- ITER_PIPE | e
+ e
, e2,e3)
<|end_of_text|> | 12,164 |
--- initial
+++ final
@@ -1,4 +1,4 @@
int kernel_sendmsg(struct socket *sock, struct msghdr *msg, struct kvec *vec, size_t num, size_t size) {
- iov_iter_kvec(&msg->msg_iter, WRITE | ITER_KVEC, vec, num, size);
+ iov_iter_kvec(&msg->msg_iter, WRITE, vec, num, size);
return sock_sendmsg(sock, msg);
}<sep>@@
expression e1,e2,e3,e4,e;
@@
iov_iter_kvec(e1,
- ITER_KVEC | e
+ e
, e2,e3,e4)
<|end_of_text|> | 12,162 |
--- initial
+++ final
@@ -1,48 +1,48 @@
static void pvcalls_conn_back_write(struct sock_mapping *map) {
struct pvcalls_data_intf *intf = map->ring;
struct pvcalls_data *data = &map->data;
struct msghdr msg;
struct kvec vec[2];
RING_IDX cons, prod, size, array_size;
int ret;
cons = intf->out_cons;
prod = intf->out_prod;
/* read the indexes before dealing with the data */
virt_mb();
array_size = XEN_FLEX_RING_SIZE(map->ring_order);
size = pvcalls_queued(prod, cons, array_size);
if (size == 0) return;
memset(&msg, 0, sizeof(msg));
msg.msg_flags |= MSG_DONTWAIT;
if (pvcalls_mask(prod, array_size) > pvcalls_mask(cons, array_size)) {
vec[0].iov_base = data->out + pvcalls_mask(cons, array_size);
vec[0].iov_len = size;
- iov_iter_kvec(&msg.msg_iter, ITER_KVEC | READ, vec, 1, size);
+ iov_iter_kvec(&msg.msg_iter, READ, vec, 1, size);
} else {
vec[0].iov_base = data->out + pvcalls_mask(cons, array_size);
vec[0].iov_len = array_size - pvcalls_mask(cons, array_size);
vec[1].iov_base = data->out;
vec[1].iov_len = size - vec[0].iov_len;
- iov_iter_kvec(&msg.msg_iter, ITER_KVEC | READ, vec, 2, size);
+ iov_iter_kvec(&msg.msg_iter, READ, vec, 2, size);
}
atomic_set(&map->write, 0);
ret = inet_sendmsg(map->sock, &msg, size);
if (ret == -EAGAIN || (ret >= 0 && ret < size)) {
atomic_inc(&map->write);
atomic_inc(&map->io);
}
if (ret == -EAGAIN) return;
/* write the data, then update the indexes */
virt_wmb();
if (ret < 0) {
intf->out_error = ret;
} else {
intf->out_error = 0;
intf->out_cons = cons + ret;
prod = intf->out_prod;
}
/* update the indexes, then notify the other end */
virt_wmb();
if (prod != cons + ret) atomic_inc(&map->write);
notify_remote_via_irq(map->irq);
}<sep>@@
expression e1,e2,e3,e4,e;
@@
iov_iter_kvec(e1,
- ITER_KVEC | e
+ e
, e2,e3,e4)
<|end_of_text|> | 12,151 |
--- initial
+++ final
@@ -1,75 +1,75 @@
ssize_t iter_file_splice_write(struct pipe_inode_info *pipe, struct file *out, loff_t *ppos, size_t len, unsigned int flags) {
struct splice_desc sd = {
.total_len = len,
.flags = flags,
.pos = *ppos,
.u.file = out,
};
int nbufs = pipe->buffers;
struct bio_vec *array = kcalloc(nbufs, sizeof(struct bio_vec), GFP_KERNEL);
ssize_t ret;
if (unlikely(!array)) return -ENOMEM;
pipe_lock(pipe);
splice_from_pipe_begin(&sd);
while (sd.total_len) {
struct iov_iter from;
size_t left;
int n, idx;
ret = splice_from_pipe_next(pipe, &sd);
if (ret <= 0) break;
if (unlikely(nbufs < pipe->buffers)) {
kfree(array);
nbufs = pipe->buffers;
array = kcalloc(nbufs, sizeof(struct bio_vec), GFP_KERNEL);
if (!array) {
ret = -ENOMEM;
break;
}
}
/* build the vector */
left = sd.total_len;
for (n = 0, idx = pipe->curbuf; left && n < pipe->nrbufs; n++, idx++) {
struct pipe_buffer *buf = pipe->bufs + idx;
size_t this_len = buf->len;
if (this_len > left) this_len = left;
if (idx == pipe->buffers - 1) idx = -1;
ret = pipe_buf_confirm(pipe, buf);
if (unlikely(ret)) {
if (ret == -ENODATA) ret = 0;
goto done;
}
array[n].bv_page = buf->page;
array[n].bv_len = this_len;
array[n].bv_offset = buf->offset;
left -= this_len;
}
- iov_iter_bvec(&from, ITER_BVEC | WRITE, array, n, sd.total_len - left);
+ iov_iter_bvec(&from, WRITE, array, n, sd.total_len - left);
ret = vfs_iter_write(out, &from, &sd.pos, 0);
if (ret <= 0) break;
sd.num_spliced += ret;
sd.total_len -= ret;
*ppos = sd.pos;
/* dismiss the fully eaten buffers, adjust the partial one */
while (ret) {
struct pipe_buffer *buf = pipe->bufs + pipe->curbuf;
if (ret >= buf->len) {
ret -= buf->len;
buf->len = 0;
pipe_buf_release(pipe, buf);
pipe->curbuf = (pipe->curbuf + 1) & (pipe->buffers - 1);
pipe->nrbufs--;
if (pipe->files) sd.need_wakeup = true;
} else {
buf->offset += ret;
buf->len -= ret;
ret = 0;
}
}
}
done:
kfree(array);
splice_from_pipe_end(pipe, &sd);
pipe_unlock(pipe);
if (sd.num_spliced) ret = sd.num_spliced;
return ret;
}<sep>@@
expression e1,e2,e3,e4,e;
@@
iov_iter_bvec(e1,
- ITER_BVEC | e
+ e
, e2,e3,e4)
<|end_of_text|> | 12,166 |
--- initial
+++ final
@@ -1,14 +1,14 @@
static int ip_vs_receive(struct socket *sock, char *buffer, const size_t buflen) {
struct msghdr msg = {
NULL,
};
struct kvec iov = {buffer, buflen};
int len;
EnterFunction(7);
/* Receive a packet */
- iov_iter_kvec(&msg.msg_iter, READ | ITER_KVEC, &iov, 1, buflen);
+ iov_iter_kvec(&msg.msg_iter, READ, &iov, 1, buflen);
len = sock_recvmsg(sock, &msg, MSG_DONTWAIT);
if (len < 0) return len;
LeaveFunction(7);
return len;
}<sep>@@
expression e1,e2,e3,e4,e;
@@
iov_iter_kvec(e1,
- ITER_KVEC | e
+ e
, e2,e3,e4)
<|end_of_text|> | 12,133 |
--- initial
+++ final
@@ -1,20 +1,20 @@
int tls_device_sendpage(struct sock *sk, struct page *page, int offset, size_t size, int flags) {
struct iov_iter msg_iter;
char *kaddr = kmap(page);
struct kvec iov;
int rc;
if (flags & MSG_SENDPAGE_NOTLAST) flags |= MSG_MORE;
lock_sock(sk);
if (flags & MSG_OOB) {
rc = -ENOTSUPP;
goto out;
}
iov.iov_base = kaddr + offset;
iov.iov_len = size;
- iov_iter_kvec(&msg_iter, WRITE | ITER_KVEC, &iov, 1, size);
+ iov_iter_kvec(&msg_iter, WRITE, &iov, 1, size);
rc = tls_push_data(sk, &msg_iter, size, flags, TLS_RECORD_TYPE_DATA);
kunmap(page);
out:
release_sock(sk);
return rc;
}<sep>@@
expression e1,e2,e3,e4,e;
@@
iov_iter_kvec(e1,
- ITER_KVEC | e
+ e
, e2,e3,e4)
<|end_of_text|> | 12,173 |
--- initial
+++ final
@@ -1,6 +1,6 @@
int cifs_read_from_socket(struct TCP_Server_Info *server, char *buf, unsigned int to_read) {
struct msghdr smb_msg;
struct kvec iov = {.iov_base = buf, .iov_len = to_read};
- iov_iter_kvec(&smb_msg.msg_iter, READ | ITER_KVEC, &iov, 1, to_read);
+ iov_iter_kvec(&smb_msg.msg_iter, READ, &iov, 1, to_read);
return cifs_readv_from_socket(server, &smb_msg);
}<sep>@@
expression e1,e2,e3,e4,e;
@@
iov_iter_kvec(e1,
- ITER_KVEC | e
+ e
, e2,e3,e4)
<|end_of_text|> | 12,123 |
--- initial
+++ final
@@ -1,19 +1,19 @@
static int lo_read_simple(struct loop_device *lo, struct request *rq, loff_t pos) {
struct bio_vec bvec;
struct req_iterator iter;
struct iov_iter i;
ssize_t len;
rq_for_each_segment(bvec, rq, iter) {
- iov_iter_bvec(&i, ITER_BVEC, &bvec, 1, bvec.bv_len);
+ iov_iter_bvec(&i, READ, &bvec, 1, bvec.bv_len);
len = vfs_iter_read(lo->lo_backing_file, &i, &pos, 0);
if (len < 0) return len;
flush_dcache_page(bvec.bv_page);
if (len != bvec.bv_len) {
struct bio *bio;
__rq_for_each_bio(bio, rq) zero_fill_bio(bio);
break;
}
cond_resched();
}
return 0;
}<sep>@@
expression e1,e2,e3,e4;
@@
iov_iter_bvec(e1,
- ITER_BVEC
+ READ
, e2,e3,e4)
<|end_of_text|> | 12,137 |
--- initial
+++ final
@@ -1,52 +1,52 @@
static int rtnl_dump_ifinfo(struct sk_buff *skb, struct netlink_callback *cb) {
struct net *net = sock_net(skb->sk);
int h, s_h;
int idx = 0, s_idx;
struct net_device *dev;
struct hlist_head *head;
struct nlattr *tb[IFLA_MAX + 1];
u32 ext_filter_mask = 0;
const struct rtnl_link_ops *kind_ops = NULL;
unsigned int flags = NLM_F_MULTI;
int master_idx = 0;
int err;
int hdrlen;
s_h = cb->args[0];
s_idx = cb->args[1];
cb->seq = net->dev_base_seq;
/* A hack to preserve kernel<->userspace interface.
* The correct header is ifinfomsg. It is consistent with rtnl_getlink.
* However, before Linux v3.9 the code here assumed rtgenmsg and that's
* what iproute2 < v3.9.0 used.
* We can detect the old iproute2. Even including the IFLA_EXT_MASK
* attribute, its netlink message is shorter than struct ifinfomsg.
*/
hdrlen = nlmsg_len(cb->nlh) < sizeof(struct ifinfomsg) ? sizeof(struct rtgenmsg) : sizeof(struct ifinfomsg);
- if (nlmsg_parse(cb->nlh, hdrlen, tb, IFLA_MAX, ifla_policy) >= 0) {
+ if (nlmsg_parse(cb->nlh, hdrlen, tb, IFLA_MAX, ifla_policy, NULL) >= 0) {
if (tb[IFLA_EXT_MASK]) ext_filter_mask = nla_get_u32(tb[IFLA_EXT_MASK]);
if (tb[IFLA_MASTER]) master_idx = nla_get_u32(tb[IFLA_MASTER]);
if (tb[IFLA_LINKINFO]) kind_ops = linkinfo_to_kind_ops(tb[IFLA_LINKINFO]);
if (master_idx || kind_ops) flags |= NLM_F_DUMP_FILTERED;
}
for (h = s_h; h < NETDEV_HASHENTRIES; h++, s_idx = 0) {
idx = 0;
head = &net->dev_index_head[h];
hlist_for_each_entry(dev, head, index_hlist) {
if (link_dump_filtered(dev, master_idx, kind_ops)) goto cont;
if (idx < s_idx) goto cont;
err = rtnl_fill_ifinfo(skb, dev, RTM_NEWLINK, NETLINK_CB(cb->skb).portid, cb->nlh->nlmsg_seq, 0, flags, ext_filter_mask);
/* If we ran out of room on the first message,
* we're in trouble
*/
WARN_ON((err == -EMSGSIZE) && (skb->len == 0));
if (err < 0) goto out;
nl_dump_check_consistent(cb, nlmsg_hdr(skb));
cont:
idx++;
}
}
out:
cb->args[1] = idx;
cb->args[0] = h;
return skb->len;
}<sep>@@
expression e1,e2,e3,e4,e5;
@@
nlmsg_parse(e1,e2,e3,e4,e5
+ , NULL
)
<|end_of_text|> | 9,327 |
--- initial
+++ final
@@ -1,44 +1,44 @@
static int tc_get_qdisc(struct sk_buff *skb, struct nlmsghdr *n) {
struct net *net = sock_net(skb->sk);
struct tcmsg *tcm = nlmsg_data(n);
struct nlattr *tca[TCA_MAX + 1];
struct net_device *dev;
u32 clid;
struct Qdisc *q = NULL;
struct Qdisc *p = NULL;
int err;
if ((n->nlmsg_type != RTM_GETQDISC) && !netlink_ns_capable(skb, net->user_ns, CAP_NET_ADMIN)) return -EPERM;
- err = nlmsg_parse(n, sizeof(*tcm), tca, TCA_MAX, NULL);
+ err = nlmsg_parse(n, sizeof(*tcm), tca, TCA_MAX, NULL, NULL);
if (err < 0) return err;
dev = __dev_get_by_index(net, tcm->tcm_ifindex);
if (!dev) return -ENODEV;
clid = tcm->tcm_parent;
if (clid) {
if (clid != TC_H_ROOT) {
if (TC_H_MAJ(clid) != TC_H_MAJ(TC_H_INGRESS)) {
p = qdisc_lookup(dev, TC_H_MAJ(clid));
if (!p) return -ENOENT;
q = qdisc_leaf(p, clid);
} else if (dev_ingress_queue(dev)) {
q = dev_ingress_queue(dev)->qdisc_sleeping;
}
} else {
q = dev->qdisc;
}
if (!q) return -ENOENT;
if (tcm->tcm_handle && q->handle != tcm->tcm_handle) return -EINVAL;
} else {
q = qdisc_lookup(dev, tcm->tcm_handle);
if (!q) return -ENOENT;
}
if (tca[TCA_KIND] && nla_strcmp(tca[TCA_KIND], q->ops->id)) return -EINVAL;
if (n->nlmsg_type == RTM_DELQDISC) {
if (!clid) return -EINVAL;
if (q->handle == 0) return -ENOENT;
err = qdisc_graft(dev, p, skb, n, clid, NULL, q);
if (err != 0) return err;
} else {
qdisc_notify(net, skb, n, clid, NULL, q);
}
return 0;
}<sep>@@
expression e1,e2,e3,e4,e5;
@@
nlmsg_parse(e1,e2,e3,e4,e5
+ , NULL
)
<|end_of_text|> | 9,335 |
--- initial
+++ final
@@ -1,14 +1,14 @@
static struct nlattr *find_dump_kind(const struct nlmsghdr *n) {
struct nlattr *tb1, *tb2[TCA_ACT_MAX + 1];
struct nlattr *tb[TCA_ACT_MAX_PRIO + 1];
struct nlattr *nla[TCAA_MAX + 1];
struct nlattr *kind;
- if (nlmsg_parse(n, sizeof(struct tcamsg), nla, TCAA_MAX, NULL) < 0) return NULL;
+ if (nlmsg_parse(n, sizeof(struct tcamsg), nla, TCAA_MAX, NULL, NULL) < 0) return NULL;
tb1 = nla[TCA_ACT_TAB];
if (tb1 == NULL) return NULL;
if (nla_parse(tb, TCA_ACT_MAX_PRIO, nla_data(tb1), NLMSG_ALIGN(nla_len(tb1)), NULL) < 0) return NULL;
if (tb[1] == NULL) return NULL;
if (nla_parse_nested(tb2, TCA_ACT_MAX, tb[1], NULL) < 0) return NULL;
kind = tb2[TCA_ACT_KIND];
return kind;
}<sep>@@
expression e1,e2,e3,e4,e5;
@@
nlmsg_parse(e1,e2,e3,e4,e5
+ , NULL
)
<|end_of_text|> | 9,273 |
--- initial
+++ final
@@ -1,22 +1,22 @@
static int nl802154_dump_wpan_phy_parse(struct sk_buff *skb, struct netlink_callback *cb, struct nl802154_dump_wpan_phy_state *state) {
struct nlattr **tb = genl_family_attrbuf(&nl802154_fam);
- int ret = nlmsg_parse(cb->nlh, GENL_HDRLEN + nl802154_fam.hdrsize, tb, nl802154_fam.maxattr, nl802154_policy);
+ int ret = nlmsg_parse(cb->nlh, GENL_HDRLEN + nl802154_fam.hdrsize, tb, nl802154_fam.maxattr, nl802154_policy, NULL);
/* TODO check if we can handle error here,
* we have no backward compatibility
*/
if (ret) return 0;
if (tb[NL802154_ATTR_WPAN_PHY]) state->filter_wpan_phy = nla_get_u32(tb[NL802154_ATTR_WPAN_PHY]);
if (tb[NL802154_ATTR_WPAN_DEV]) state->filter_wpan_phy = nla_get_u64(tb[NL802154_ATTR_WPAN_DEV]) >> 32;
if (tb[NL802154_ATTR_IFINDEX]) {
struct net_device *netdev;
struct cfg802154_registered_device *rdev;
int ifidx = nla_get_u32(tb[NL802154_ATTR_IFINDEX]);
netdev = __dev_get_by_index(&init_net, ifidx);
if (!netdev) return -ENODEV;
if (netdev->ieee802154_ptr) {
rdev = wpan_phy_to_rdev(netdev->ieee802154_ptr->wpan_phy);
state->filter_wpan_phy = rdev->wpan_phy_idx;
}
}
return 0;
}<sep>@@
expression e1,e2,e3,e4,e5;
@@
nlmsg_parse(e1,e2,e3,e4,e5
+ , NULL
)
<|end_of_text|> | 9,318 |
--- initial
+++ final
@@ -1,44 +1,44 @@
static int inet6_rtm_newaddr(struct sk_buff *skb, struct nlmsghdr *nlh) {
struct net *net = sock_net(skb->sk);
struct ifaddrmsg *ifm;
struct nlattr *tb[IFA_MAX + 1];
struct in6_addr *pfx, *peer_pfx;
struct inet6_ifaddr *ifa;
struct net_device *dev;
u32 valid_lft = INFINITY_LIFE_TIME, preferred_lft = INFINITY_LIFE_TIME;
u32 ifa_flags;
int err;
- err = nlmsg_parse(nlh, sizeof(*ifm), tb, IFA_MAX, ifa_ipv6_policy);
+ err = nlmsg_parse(nlh, sizeof(*ifm), tb, IFA_MAX, ifa_ipv6_policy, NULL);
if (err < 0) return err;
ifm = nlmsg_data(nlh);
pfx = extract_addr(tb[IFA_ADDRESS], tb[IFA_LOCAL], &peer_pfx);
if (!pfx) return -EINVAL;
if (tb[IFA_CACHEINFO]) {
struct ifa_cacheinfo *ci;
ci = nla_data(tb[IFA_CACHEINFO]);
valid_lft = ci->ifa_valid;
preferred_lft = ci->ifa_prefered;
} else {
preferred_lft = INFINITY_LIFE_TIME;
valid_lft = INFINITY_LIFE_TIME;
}
dev = __dev_get_by_index(net, ifm->ifa_index);
if (!dev) return -ENODEV;
ifa_flags = tb[IFA_FLAGS] ? nla_get_u32(tb[IFA_FLAGS]) : ifm->ifa_flags;
/* We ignore other flags so far. */
ifa_flags &= IFA_F_NODAD | IFA_F_HOMEADDRESS | IFA_F_MANAGETEMPADDR | IFA_F_NOPREFIXROUTE | IFA_F_MCAUTOJOIN;
ifa = ipv6_get_ifaddr(net, pfx, dev, 1);
if (!ifa) {
/*
* It would be best to check for !NLM_F_CREATE here but
* userspace already relies on not having to provide this.
*/
return inet6_addr_add(net, ifm->ifa_index, pfx, peer_pfx, ifm->ifa_prefixlen, ifa_flags, preferred_lft, valid_lft);
}
if (nlh->nlmsg_flags & NLM_F_EXCL || !(nlh->nlmsg_flags & NLM_F_REPLACE))
err = -EEXIST;
else
err = inet6_addr_modify(ifa, ifa_flags, preferred_lft, valid_lft);
in6_ifa_put(ifa);
return err;
}<sep>@@
expression e1,e2,e3,e4,e5;
@@
nlmsg_parse(e1,e2,e3,e4,e5
+ , NULL
)
<|end_of_text|> | 9,278 |
--- initial
+++ final
@@ -1,107 +1,107 @@
static int tc_modify_qdisc(struct sk_buff *skb, struct nlmsghdr *n) {
struct net *net = sock_net(skb->sk);
struct tcmsg *tcm;
struct nlattr *tca[TCA_MAX + 1];
struct net_device *dev;
u32 clid;
struct Qdisc *q, *p;
int err;
if (!netlink_ns_capable(skb, net->user_ns, CAP_NET_ADMIN)) return -EPERM;
replay:
/* Reinit, just in case something touches this. */
- err = nlmsg_parse(n, sizeof(*tcm), tca, TCA_MAX, NULL);
+ err = nlmsg_parse(n, sizeof(*tcm), tca, TCA_MAX, NULL, NULL);
if (err < 0) return err;
tcm = nlmsg_data(n);
clid = tcm->tcm_parent;
q = p = NULL;
dev = __dev_get_by_index(net, tcm->tcm_ifindex);
if (!dev) return -ENODEV;
if (clid) {
if (clid != TC_H_ROOT) {
if (clid != TC_H_INGRESS) {
p = qdisc_lookup(dev, TC_H_MAJ(clid));
if (!p) return -ENOENT;
q = qdisc_leaf(p, clid);
} else if (dev_ingress_queue_create(dev)) {
q = dev_ingress_queue(dev)->qdisc_sleeping;
}
} else {
q = dev->qdisc;
}
/* It may be default qdisc, ignore it */
if (q && q->handle == 0) q = NULL;
if (!q || !tcm->tcm_handle || q->handle != tcm->tcm_handle) {
if (tcm->tcm_handle) {
if (q && !(n->nlmsg_flags & NLM_F_REPLACE)) return -EEXIST;
if (TC_H_MIN(tcm->tcm_handle)) return -EINVAL;
q = qdisc_lookup(dev, tcm->tcm_handle);
if (!q) goto create_n_graft;
if (n->nlmsg_flags & NLM_F_EXCL) return -EEXIST;
if (tca[TCA_KIND] && nla_strcmp(tca[TCA_KIND], q->ops->id)) return -EINVAL;
if (q == p || (p && check_loop(q, p, 0))) return -ELOOP;
atomic_inc(&q->refcnt);
goto graft;
} else {
if (!q) goto create_n_graft;
/* This magic test requires explanation.
*
* We know, that some child q is already
* attached to this parent and have choice:
* either to change it or to create/graft new one.
*
* 1. We are allowed to create/graft only
* if CREATE and REPLACE flags are set.
*
* 2. If EXCL is set, requestor wanted to say,
* that qdisc tcm_handle is not expected
* to exist, so that we choose create/graft too.
*
* 3. The last case is when no flags are set.
* Alas, it is sort of hole in API, we
* cannot decide what to do unambiguously.
* For now we select create/graft, if
* user gave KIND, which does not match existing.
*/
if ((n->nlmsg_flags & NLM_F_CREATE) && (n->nlmsg_flags & NLM_F_REPLACE) && ((n->nlmsg_flags & NLM_F_EXCL) || (tca[TCA_KIND] && nla_strcmp(tca[TCA_KIND], q->ops->id)))) goto create_n_graft;
}
}
} else {
if (!tcm->tcm_handle) return -EINVAL;
q = qdisc_lookup(dev, tcm->tcm_handle);
}
/* Change qdisc parameters */
if (q == NULL) return -ENOENT;
if (n->nlmsg_flags & NLM_F_EXCL) return -EEXIST;
if (tca[TCA_KIND] && nla_strcmp(tca[TCA_KIND], q->ops->id)) return -EINVAL;
err = qdisc_change(q, tca);
if (err == 0) qdisc_notify(net, skb, n, clid, NULL, q);
return err;
create_n_graft:
if (!(n->nlmsg_flags & NLM_F_CREATE)) return -ENOENT;
if (clid == TC_H_INGRESS) {
if (dev_ingress_queue(dev))
q = qdisc_create(dev, dev_ingress_queue(dev), p, tcm->tcm_parent, tcm->tcm_parent, tca, &err);
else
err = -ENOENT;
} else {
struct netdev_queue *dev_queue;
if (p && p->ops->cl_ops && p->ops->cl_ops->select_queue)
dev_queue = p->ops->cl_ops->select_queue(p, tcm);
else if (p)
dev_queue = p->dev_queue;
else
dev_queue = netdev_get_tx_queue(dev, 0);
q = qdisc_create(dev, dev_queue, p, tcm->tcm_parent, tcm->tcm_handle, tca, &err);
}
if (q == NULL) {
if (err == -EAGAIN) goto replay;
return err;
}
graft:
err = qdisc_graft(dev, p, skb, n, clid, q, NULL);
if (err) {
if (q) qdisc_destroy(q);
return err;
}
return 0;
}<sep>@@
expression e1,e2,e3,e4,e5;
@@
nlmsg_parse(e1,e2,e3,e4,e5
+ , NULL
)
<|end_of_text|> | 9,336 |
--- initial
+++ final
@@ -1,40 +1,40 @@
static int inet_netconf_get_devconf(struct sk_buff *in_skb, struct nlmsghdr *nlh) {
struct net *net = sock_net(in_skb->sk);
struct nlattr *tb[NETCONFA_MAX + 1];
struct netconfmsg *ncm;
struct sk_buff *skb;
struct ipv4_devconf *devconf;
struct in_device *in_dev;
struct net_device *dev;
int ifindex;
int err;
- err = nlmsg_parse(nlh, sizeof(*ncm), tb, NETCONFA_MAX, devconf_ipv4_policy);
+ err = nlmsg_parse(nlh, sizeof(*ncm), tb, NETCONFA_MAX, devconf_ipv4_policy, NULL);
if (err < 0) goto errout;
err = -EINVAL;
if (!tb[NETCONFA_IFINDEX]) goto errout;
ifindex = nla_get_s32(tb[NETCONFA_IFINDEX]);
switch (ifindex) {
case NETCONFA_IFINDEX_ALL: devconf = net->ipv4.devconf_all; break;
case NETCONFA_IFINDEX_DEFAULT: devconf = net->ipv4.devconf_dflt; break;
default:
dev = __dev_get_by_index(net, ifindex);
if (!dev) goto errout;
in_dev = __in_dev_get_rtnl(dev);
if (!in_dev) goto errout;
devconf = &in_dev->cnf;
break;
}
err = -ENOBUFS;
skb = nlmsg_new(inet_netconf_msgsize_devconf(NETCONFA_ALL), GFP_KERNEL);
if (!skb) goto errout;
err = inet_netconf_fill_devconf(skb, ifindex, devconf, NETLINK_CB(in_skb).portid, nlh->nlmsg_seq, RTM_NEWNETCONF, 0, NETCONFA_ALL);
if (err < 0) {
/* -EMSGSIZE implies BUG in inet_netconf_msgsize_devconf() */
WARN_ON(err == -EMSGSIZE);
kfree_skb(skb);
goto errout;
}
err = rtnl_unicast(skb, net, NETLINK_CB(in_skb).portid);
errout:
return err;
}<sep>@@
expression e1,e2,e3,e4,e5;
@@
nlmsg_parse(e1,e2,e3,e4,e5
+ , NULL
)
<|end_of_text|> | 9,289 |
--- initial
+++ final
@@ -1,29 +1,29 @@
static int xfrm_user_rcv_msg(struct sk_buff *skb, struct nlmsghdr *nlh, struct netlink_ext_ack *extack) {
struct net *net = sock_net(skb->sk);
struct nlattr *attrs[XFRMA_MAX + 1];
const struct xfrm_link *link;
int type, err;
#ifdef CONFIG_COMPAT
if (in_compat_syscall()) return -EOPNOTSUPP;
#endif
type = nlh->nlmsg_type;
if (type > XFRM_MSG_MAX) return -EINVAL;
type -= XFRM_MSG_BASE;
link = &xfrm_dispatch[type];
/* All operations require privileges, even GET */
if (!netlink_net_capable(skb, CAP_NET_ADMIN)) return -EPERM;
if ((type == (XFRM_MSG_GETSA - XFRM_MSG_BASE) || type == (XFRM_MSG_GETPOLICY - XFRM_MSG_BASE)) && (nlh->nlmsg_flags & NLM_F_DUMP)) {
if (link->dump == NULL) return -EINVAL;
{
struct netlink_dump_control c = {
.dump = link->dump,
.done = link->done,
};
return netlink_dump_start(net->xfrm.nlsk, skb, nlh, &c);
}
}
- err = nlmsg_parse(nlh, xfrm_msg_min[type], attrs, link->nla_max ?: XFRMA_MAX, link->nla_pol ?: xfrma_policy);
+ err = nlmsg_parse(nlh, xfrm_msg_min[type], attrs, link->nla_max ?: XFRMA_MAX, link->nla_pol ?: xfrma_policy, NULL);
if (err < 0) return err;
if (link->doit == NULL) return -EINVAL;
return link->doit(skb, nlh, attrs);
}<sep>@@
expression e1,e2,e3,e4,e5;
@@
nlmsg_parse(e1,e2,e3,e4,e5
+ , NULL
)
<|end_of_text|> | 9,338 |
--- initial
+++ final
@@ -1,47 +1,47 @@
static int rtm_to_ipmr_mfcc(struct net *net, struct nlmsghdr *nlh, struct mfcctl *mfcc, int *mrtsock, struct mr_table **mrtret) {
struct net_device *dev = NULL;
u32 tblid = RT_TABLE_DEFAULT;
struct mr_table *mrt;
struct nlattr *attr;
struct rtmsg *rtm;
int ret, rem;
- ret = nlmsg_validate(nlh, sizeof(*rtm), RTA_MAX, rtm_ipmr_policy);
+ ret = nlmsg_validate(nlh, sizeof(*rtm), RTA_MAX, rtm_ipmr_policy, NULL);
if (ret < 0) goto out;
rtm = nlmsg_data(nlh);
ret = -EINVAL;
if (rtm->rtm_family != RTNL_FAMILY_IPMR || rtm->rtm_dst_len != 32 || rtm->rtm_type != RTN_MULTICAST || rtm->rtm_scope != RT_SCOPE_UNIVERSE || !ipmr_rtm_validate_proto(rtm->rtm_protocol)) goto out;
memset(mfcc, 0, sizeof(*mfcc));
mfcc->mfcc_parent = -1;
ret = 0;
nlmsg_for_each_attr(attr, nlh, sizeof(struct rtmsg), rem) {
switch (nla_type(attr)) {
case RTA_SRC: mfcc->mfcc_origin.s_addr = nla_get_be32(attr); break;
case RTA_DST: mfcc->mfcc_mcastgrp.s_addr = nla_get_be32(attr); break;
case RTA_IIF:
dev = __dev_get_by_index(net, nla_get_u32(attr));
if (!dev) {
ret = -ENODEV;
goto out;
}
break;
case RTA_MULTIPATH:
if (ipmr_nla_get_ttls(attr, mfcc) < 0) {
ret = -EINVAL;
goto out;
}
break;
case RTA_PREFSRC: ret = 1; break;
case RTA_TABLE: tblid = nla_get_u32(attr); break;
}
}
mrt = ipmr_get_table(net, tblid);
if (!mrt) {
ret = -ENOENT;
goto out;
}
*mrtret = mrt;
*mrtsock = rtm->rtm_protocol == RTPROT_MROUTED ? 1 : 0;
if (dev) mfcc->mfcc_parent = ipmr_find_vif(mrt, dev);
out:
return ret;
}<sep>@@
expression e1,e2,e3,e4;
@@
nlmsg_validate(e1,e2,e3,e4
+ , NULL
)
<|end_of_text|> | 9,303 |
--- initial
+++ final
@@ -1,68 +1,68 @@
static int neigh_add(struct sk_buff *skb, struct nlmsghdr *nlh) {
int flags = NEIGH_UPDATE_F_ADMIN | NEIGH_UPDATE_F_OVERRIDE;
struct net *net = sock_net(skb->sk);
struct ndmsg *ndm;
struct nlattr *tb[NDA_MAX + 1];
struct neigh_table *tbl;
struct net_device *dev = NULL;
struct neighbour *neigh;
void *dst, *lladdr;
int err;
ASSERT_RTNL();
- err = nlmsg_parse(nlh, sizeof(*ndm), tb, NDA_MAX, NULL);
+ err = nlmsg_parse(nlh, sizeof(*ndm), tb, NDA_MAX, NULL, NULL);
if (err < 0) goto out;
err = -EINVAL;
if (tb[NDA_DST] == NULL) goto out;
ndm = nlmsg_data(nlh);
if (ndm->ndm_ifindex) {
dev = __dev_get_by_index(net, ndm->ndm_ifindex);
if (dev == NULL) {
err = -ENODEV;
goto out;
}
if (tb[NDA_LLADDR] && nla_len(tb[NDA_LLADDR]) < dev->addr_len) goto out;
}
tbl = neigh_find_table(ndm->ndm_family);
if (tbl == NULL) return -EAFNOSUPPORT;
if (nla_len(tb[NDA_DST]) < tbl->key_len) goto out;
dst = nla_data(tb[NDA_DST]);
lladdr = tb[NDA_LLADDR] ? nla_data(tb[NDA_LLADDR]) : NULL;
if (ndm->ndm_flags & NTF_PROXY) {
struct pneigh_entry *pn;
err = -ENOBUFS;
pn = pneigh_lookup(tbl, net, dst, dev, 1);
if (pn) {
pn->flags = ndm->ndm_flags;
err = 0;
}
goto out;
}
if (dev == NULL) goto out;
neigh = neigh_lookup(tbl, dst, dev);
if (neigh == NULL) {
if (!(nlh->nlmsg_flags & NLM_F_CREATE)) {
err = -ENOENT;
goto out;
}
neigh = __neigh_lookup_errno(tbl, dst, dev);
if (IS_ERR(neigh)) {
err = PTR_ERR(neigh);
goto out;
}
} else {
if (nlh->nlmsg_flags & NLM_F_EXCL) {
err = -EEXIST;
neigh_release(neigh);
goto out;
}
if (!(nlh->nlmsg_flags & NLM_F_REPLACE)) flags &= ~NEIGH_UPDATE_F_OVERRIDE;
}
if (ndm->ndm_flags & NTF_USE) {
neigh_event_send(neigh, NULL);
err = 0;
} else
err = neigh_update(neigh, lladdr, ndm->ndm_state, flags, NETLINK_CB(skb).portid);
neigh_release(neigh);
out:
return err;
}<sep>@@
expression e1,e2,e3,e4,e5;
@@
nlmsg_parse(e1,e2,e3,e4,e5
+ , NULL
)
<|end_of_text|> | 9,306 |
--- initial
+++ final
@@ -1,21 +1,21 @@
static int nl80211_dump_wiphy_parse(struct sk_buff *skb, struct netlink_callback *cb, struct nl80211_dump_wiphy_state *state) {
struct nlattr **tb = genl_family_attrbuf(&nl80211_fam);
- int ret = nlmsg_parse(cb->nlh, GENL_HDRLEN + nl80211_fam.hdrsize, tb, nl80211_fam.maxattr, nl80211_policy);
+ int ret = nlmsg_parse(cb->nlh, GENL_HDRLEN + nl80211_fam.hdrsize, tb, nl80211_fam.maxattr, nl80211_policy, NULL);
/* ignore parse errors for backward compatibility */
if (ret) return 0;
state->split = tb[NL80211_ATTR_SPLIT_WIPHY_DUMP];
if (tb[NL80211_ATTR_WIPHY]) state->filter_wiphy = nla_get_u32(tb[NL80211_ATTR_WIPHY]);
if (tb[NL80211_ATTR_WDEV]) state->filter_wiphy = nla_get_u64(tb[NL80211_ATTR_WDEV]) >> 32;
if (tb[NL80211_ATTR_IFINDEX]) {
struct net_device *netdev;
struct cfg80211_registered_device *rdev;
int ifidx = nla_get_u32(tb[NL80211_ATTR_IFINDEX]);
netdev = __dev_get_by_index(sock_net(skb->sk), ifidx);
if (!netdev) return -ENODEV;
if (netdev->ieee80211_ptr) {
rdev = wiphy_to_rdev(netdev->ieee80211_ptr->wiphy);
state->filter_wiphy = rdev->wiphy_idx;
}
}
return 0;
}<sep>@@
expression e1,e2,e3,e4,e5;
@@
nlmsg_parse(e1,e2,e3,e4,e5
+ , NULL
)
<|end_of_text|> | 9,314 |
--- initial
+++ final
@@ -1,165 +1,165 @@
static int tc_ctl_tfilter(struct sk_buff *skb, struct nlmsghdr *n) {
struct net *net = sock_net(skb->sk);
struct nlattr *tca[TCA_MAX + 1];
struct tcmsg *t;
u32 protocol;
u32 prio;
u32 nprio;
u32 parent;
struct net_device *dev;
struct Qdisc *q;
struct tcf_proto __rcu **back;
struct tcf_proto __rcu **chain;
struct tcf_proto *next;
struct tcf_proto *tp;
const struct Qdisc_class_ops *cops;
unsigned long cl;
unsigned long fh;
int err;
int tp_created;
if ((n->nlmsg_type != RTM_GETTFILTER) && !netlink_ns_capable(skb, net->user_ns, CAP_NET_ADMIN)) return -EPERM;
replay:
tp_created = 0;
- err = nlmsg_parse(n, sizeof(*t), tca, TCA_MAX, NULL);
+ err = nlmsg_parse(n, sizeof(*t), tca, TCA_MAX, NULL, NULL);
if (err < 0) return err;
t = nlmsg_data(n);
protocol = TC_H_MIN(t->tcm_info);
prio = TC_H_MAJ(t->tcm_info);
nprio = prio;
parent = t->tcm_parent;
cl = 0;
if (prio == 0) {
switch (n->nlmsg_type) {
case RTM_DELTFILTER:
if (protocol || t->tcm_handle || tca[TCA_KIND]) return -ENOENT;
break;
case RTM_NEWTFILTER:
/* If no priority is provided by the user,
* we allocate one.
*/
if (n->nlmsg_flags & NLM_F_CREATE) {
prio = TC_H_MAKE(0x80000000U, 0U);
break;
}
/* fall-through */
default: return -ENOENT;
}
}
/* Find head of filter chain. */
/* Find link */
dev = __dev_get_by_index(net, t->tcm_ifindex);
if (dev == NULL) return -ENODEV;
/* Find qdisc */
if (!parent) {
q = dev->qdisc;
parent = q->handle;
} else {
q = qdisc_lookup(dev, TC_H_MAJ(t->tcm_parent));
if (q == NULL) return -EINVAL;
}
/* Is it classful? */
cops = q->ops->cl_ops;
if (!cops) return -EINVAL;
if (cops->tcf_chain == NULL) return -EOPNOTSUPP;
/* Do we search for filter, attached to class? */
if (TC_H_MIN(parent)) {
cl = cops->get(q, parent);
if (cl == 0) return -ENOENT;
}
/* And the last stroke */
chain = cops->tcf_chain(q, cl);
if (chain == NULL) {
err = -EINVAL;
goto errout;
}
if (n->nlmsg_type == RTM_DELTFILTER && prio == 0) {
tfilter_notify_chain(net, skb, n, chain, RTM_DELTFILTER);
tcf_destroy_chain(chain);
err = 0;
goto errout;
}
/* Check the chain for existence of proto-tcf with this priority */
for (back = chain; (tp = rtnl_dereference(*back)) != NULL; back = &tp->next) {
if (tp->prio >= prio) {
if (tp->prio == prio) {
if (!nprio || (tp->protocol != protocol && protocol)) {
err = -EINVAL;
goto errout;
}
} else {
tp = NULL;
}
break;
}
}
if (tp == NULL) {
/* Proto-tcf does not exist, create new one */
if (tca[TCA_KIND] == NULL || !protocol) {
err = -EINVAL;
goto errout;
}
if (n->nlmsg_type != RTM_NEWTFILTER || !(n->nlmsg_flags & NLM_F_CREATE)) {
err = -ENOENT;
goto errout;
}
if (!nprio) nprio = TC_H_MAJ(tcf_auto_prio(rtnl_dereference(*back)));
tp = tcf_proto_create(nla_data(tca[TCA_KIND]), protocol, nprio, parent, q);
if (IS_ERR(tp)) {
err = PTR_ERR(tp);
goto errout;
}
tp_created = 1;
} else if (tca[TCA_KIND] && nla_strcmp(tca[TCA_KIND], tp->ops->kind)) {
err = -EINVAL;
goto errout;
}
fh = tp->ops->get(tp, t->tcm_handle);
if (fh == 0) {
if (n->nlmsg_type == RTM_DELTFILTER && t->tcm_handle == 0) {
next = rtnl_dereference(tp->next);
RCU_INIT_POINTER(*back, next);
tfilter_notify(net, skb, n, tp, fh, RTM_DELTFILTER, false);
tcf_proto_destroy(tp, true);
err = 0;
goto errout;
}
if (n->nlmsg_type != RTM_NEWTFILTER || !(n->nlmsg_flags & NLM_F_CREATE)) {
err = -ENOENT;
goto errout;
}
} else {
switch (n->nlmsg_type) {
case RTM_NEWTFILTER:
if (n->nlmsg_flags & NLM_F_EXCL) {
if (tp_created) tcf_proto_destroy(tp, true);
err = -EEXIST;
goto errout;
}
break;
case RTM_DELTFILTER:
err = tp->ops->delete (tp, fh);
if (err) goto errout;
next = rtnl_dereference(tp->next);
tfilter_notify(net, skb, n, tp, t->tcm_handle, RTM_DELTFILTER, false);
if (tcf_proto_destroy(tp, false)) RCU_INIT_POINTER(*back, next);
goto errout;
case RTM_GETTFILTER: err = tfilter_notify(net, skb, n, tp, fh, RTM_NEWTFILTER, true); goto errout;
default: err = -EINVAL; goto errout;
}
}
err = tp->ops->change(net, skb, tp, cl, t->tcm_handle, tca, &fh, n->nlmsg_flags & NLM_F_CREATE ? TCA_ACT_NOREPLACE : TCA_ACT_REPLACE);
if (err == 0) {
if (tp_created) {
RCU_INIT_POINTER(tp->next, rtnl_dereference(*back));
rcu_assign_pointer(*back, tp);
}
tfilter_notify(net, skb, n, tp, fh, RTM_NEWTFILTER, false);
} else {
if (tp_created) tcf_proto_destroy(tp, true);
}
errout:
if (cl) cops->put(q, cl);
if (err == -EAGAIN) /* Replay the request. */
goto replay;
return err;
}<sep>@@
expression e1,e2,e3,e4,e5;
@@
nlmsg_parse(e1,e2,e3,e4,e5
+ , NULL
)
<|end_of_text|> | 9,286 |
--- initial
+++ final
@@ -1,39 +1,39 @@
static int neigh_dump_table(struct neigh_table *tbl, struct sk_buff *skb, struct netlink_callback *cb) {
struct net *net = sock_net(skb->sk);
const struct nlmsghdr *nlh = cb->nlh;
struct nlattr *tb[NDA_MAX + 1];
struct neighbour *n;
int rc, h, s_h = cb->args[1];
int idx, s_idx = idx = cb->args[2];
struct neigh_hash_table *nht;
int filter_master_idx = 0, filter_idx = 0;
unsigned int flags = NLM_F_MULTI;
int err;
- err = nlmsg_parse(nlh, sizeof(struct ndmsg), tb, NDA_MAX, NULL);
+ err = nlmsg_parse(nlh, sizeof(struct ndmsg), tb, NDA_MAX, NULL, NULL);
if (!err) {
if (tb[NDA_IFINDEX]) filter_idx = nla_get_u32(tb[NDA_IFINDEX]);
if (tb[NDA_MASTER]) filter_master_idx = nla_get_u32(tb[NDA_MASTER]);
if (filter_idx || filter_master_idx) flags |= NLM_F_DUMP_FILTERED;
}
rcu_read_lock_bh();
nht = rcu_dereference_bh(tbl->nht);
for (h = s_h; h < (1 << nht->hash_shift); h++) {
if (h > s_h) s_idx = 0;
for (n = rcu_dereference_bh(nht->hash_buckets[h]), idx = 0; n != NULL; n = rcu_dereference_bh(n->next)) {
if (idx < s_idx || !net_eq(dev_net(n->dev), net)) goto next;
if (neigh_ifindex_filtered(n->dev, filter_idx) || neigh_master_filtered(n->dev, filter_master_idx)) goto next;
if (neigh_fill_info(skb, n, NETLINK_CB(cb->skb).portid, cb->nlh->nlmsg_seq, RTM_NEWNEIGH, flags) < 0) {
rc = -1;
goto out;
}
next:
idx++;
}
}
rc = skb->len;
out:
rcu_read_unlock_bh();
cb->args[1] = h;
cb->args[2] = idx;
return rc;
}<sep>@@
expression e1,e2,e3,e4,e5;
@@
nlmsg_parse(e1,e2,e3,e4,e5
+ , NULL
)
<|end_of_text|> | 9,307 |
--- initial
+++ final
@@ -1,75 +1,75 @@
int fib_nl_delrule(struct sk_buff *skb, struct nlmsghdr *nlh) {
struct net *net = sock_net(skb->sk);
struct fib_rule_hdr *frh = nlmsg_data(nlh);
struct fib_rules_ops *ops = NULL;
struct fib_rule *rule, *tmp;
struct nlattr *tb[FRA_MAX + 1];
struct fib_kuid_range range;
int err = -EINVAL;
if (nlh->nlmsg_len < nlmsg_msg_size(sizeof(*frh))) goto errout;
ops = lookup_rules_ops(net, frh->family);
if (ops == NULL) {
err = -EAFNOSUPPORT;
goto errout;
}
- err = nlmsg_parse(nlh, sizeof(*frh), tb, FRA_MAX, ops->policy);
+ err = nlmsg_parse(nlh, sizeof(*frh), tb, FRA_MAX, ops->policy, NULL);
if (err < 0) goto errout;
err = validate_rulemsg(frh, tb, ops);
if (err < 0) goto errout;
if (tb[FRA_UID_RANGE]) {
range = nla_get_kuid_range(tb);
if (!uid_range_set(&range)) goto errout;
} else {
range = fib_kuid_range_unset;
}
list_for_each_entry(rule, &ops->rules_list, list) {
if (frh->action && (frh->action != rule->action)) continue;
if (frh_get_table(frh, tb) && (frh_get_table(frh, tb) != rule->table)) continue;
if (tb[FRA_PRIORITY] && (rule->pref != nla_get_u32(tb[FRA_PRIORITY]))) continue;
if (tb[FRA_IIFNAME] && nla_strcmp(tb[FRA_IIFNAME], rule->iifname)) continue;
if (tb[FRA_OIFNAME] && nla_strcmp(tb[FRA_OIFNAME], rule->oifname)) continue;
if (tb[FRA_FWMARK] && (rule->mark != nla_get_u32(tb[FRA_FWMARK]))) continue;
if (tb[FRA_FWMASK] && (rule->mark_mask != nla_get_u32(tb[FRA_FWMASK]))) continue;
if (tb[FRA_TUN_ID] && (rule->tun_id != nla_get_be64(tb[FRA_TUN_ID]))) continue;
if (tb[FRA_L3MDEV] && (rule->l3mdev != nla_get_u8(tb[FRA_L3MDEV]))) continue;
if (uid_range_set(&range) && (!uid_eq(rule->uid_range.start, range.start) || !uid_eq(rule->uid_range.end, range.end))) continue;
if (!ops->compare(rule, frh, tb)) continue;
if (rule->flags & FIB_RULE_PERMANENT) {
err = -EPERM;
goto errout;
}
if (ops->delete) {
err = ops->delete (rule);
if (err) goto errout;
}
if (rule->tun_id) ip_tunnel_unneed_metadata();
list_del_rcu(&rule->list);
if (rule->action == FR_ACT_GOTO) {
ops->nr_goto_rules--;
if (rtnl_dereference(rule->ctarget) == NULL) ops->unresolved_rules--;
}
/*
* Check if this rule is a target to any of them. If so,
* disable them. As this operation is eventually very
* expensive, it is only performed if goto rules have
* actually been added.
*/
if (ops->nr_goto_rules > 0) {
list_for_each_entry(tmp, &ops->rules_list, list) {
if (rtnl_dereference(tmp->ctarget) == rule) {
RCU_INIT_POINTER(tmp->ctarget, NULL);
ops->unresolved_rules++;
}
}
}
notify_rule_change(RTM_DELRULE, rule, ops, nlh, NETLINK_CB(skb).portid);
fib_rule_put(rule);
flush_route_cache(ops);
rules_ops_put(ops);
return 0;
}
err = -ENOENT;
errout:
rules_ops_put(ops);
return err;
}<sep>@@
expression e1,e2,e3,e4,e5;
@@
nlmsg_parse(e1,e2,e3,e4,e5
+ , NULL
)
<|end_of_text|> | 9,298 |
--- initial
+++ final
@@ -1,17 +1,17 @@
static int inet6_rtm_deladdr(struct sk_buff *skb, struct nlmsghdr *nlh) {
struct net *net = sock_net(skb->sk);
struct ifaddrmsg *ifm;
struct nlattr *tb[IFA_MAX + 1];
struct in6_addr *pfx, *peer_pfx;
u32 ifa_flags;
int err;
- err = nlmsg_parse(nlh, sizeof(*ifm), tb, IFA_MAX, ifa_ipv6_policy);
+ err = nlmsg_parse(nlh, sizeof(*ifm), tb, IFA_MAX, ifa_ipv6_policy, NULL);
if (err < 0) return err;
ifm = nlmsg_data(nlh);
pfx = extract_addr(tb[IFA_ADDRESS], tb[IFA_LOCAL], &peer_pfx);
if (!pfx) return -EINVAL;
ifa_flags = tb[IFA_FLAGS] ? nla_get_u32(tb[IFA_FLAGS]) : ifm->ifa_flags;
/* We ignore other flags so far. */
ifa_flags &= IFA_F_MANAGETEMPADDR;
return inet6_addr_del(net, ifm->ifa_index, ifa_flags, pfx, ifm->ifa_prefixlen);
}<sep>@@
expression e1,e2,e3,e4,e5;
@@
nlmsg_parse(e1,e2,e3,e4,e5
+ , NULL
)
<|end_of_text|> | 9,276 |
--- initial
+++ final
@@ -1,28 +1,28 @@
static int inet_rtm_deladdr(struct sk_buff *skb, struct nlmsghdr *nlh) {
struct net *net = sock_net(skb->sk);
struct nlattr *tb[IFA_MAX + 1];
struct in_device *in_dev;
struct ifaddrmsg *ifm;
struct in_ifaddr *ifa, **ifap;
int err = -EINVAL;
ASSERT_RTNL();
- err = nlmsg_parse(nlh, sizeof(*ifm), tb, IFA_MAX, ifa_ipv4_policy);
+ err = nlmsg_parse(nlh, sizeof(*ifm), tb, IFA_MAX, ifa_ipv4_policy, NULL);
if (err < 0) goto errout;
ifm = nlmsg_data(nlh);
in_dev = inetdev_by_index(net, ifm->ifa_index);
if (!in_dev) {
err = -ENODEV;
goto errout;
}
for (ifap = &in_dev->ifa_list; (ifa = *ifap) != NULL; ifap = &ifa->ifa_next) {
if (tb[IFA_LOCAL] && ifa->ifa_local != nla_get_in_addr(tb[IFA_LOCAL])) continue;
if (tb[IFA_LABEL] && nla_strcmp(tb[IFA_LABEL], ifa->ifa_label)) continue;
if (tb[IFA_ADDRESS] && (ifm->ifa_prefixlen != ifa->ifa_prefixlen || !inet_ifa_match(nla_get_in_addr(tb[IFA_ADDRESS]), ifa))) continue;
if (ipv4_is_multicast(ifa->ifa_address)) ip_mc_config(net->ipv4.mc_autojoin_sk, false, ifa);
__inet_del_ifa(in_dev, ifap, 1, nlh, NETLINK_CB(skb).portid);
return 0;
}
err = -EADDRNOTAVAIL;
errout:
return err;
}<sep>@@
expression e1,e2,e3,e4,e5;
@@
nlmsg_parse(e1,e2,e3,e4,e5
+ , NULL
)
<|end_of_text|> | 9,290 |
--- initial
+++ final
@@ -1,13 +1,13 @@
static struct nfc_dev *__get_device_from_cb(struct netlink_callback *cb) {
struct nlattr **attrbuf = genl_family_attrbuf(&nfc_genl_family);
struct nfc_dev *dev;
int rc;
u32 idx;
- rc = nlmsg_parse(cb->nlh, GENL_HDRLEN + nfc_genl_family.hdrsize, attrbuf, nfc_genl_family.maxattr, nfc_genl_policy);
+ rc = nlmsg_parse(cb->nlh, GENL_HDRLEN + nfc_genl_family.hdrsize, attrbuf, nfc_genl_family.maxattr, nfc_genl_policy, NULL);
if (rc < 0) return ERR_PTR(rc);
if (!attrbuf[NFC_ATTR_DEVICE_INDEX]) return ERR_PTR(-EINVAL);
idx = nla_get_u32(attrbuf[NFC_ATTR_DEVICE_INDEX]);
dev = nfc_get_device(idx);
if (!dev) return ERR_PTR(-ENODEV);
return dev;
}<sep>@@
expression e1,e2,e3,e4,e5;
@@
nlmsg_parse(e1,e2,e3,e4,e5
+ , NULL
)
<|end_of_text|> | 9,313 |
--- initial
+++ final
@@ -1,32 +1,32 @@
static int mpls_netconf_get_devconf(struct sk_buff *in_skb, struct nlmsghdr *nlh) {
struct net *net = sock_net(in_skb->sk);
struct nlattr *tb[NETCONFA_MAX + 1];
struct netconfmsg *ncm;
struct net_device *dev;
struct mpls_dev *mdev;
struct sk_buff *skb;
int ifindex;
int err;
- err = nlmsg_parse(nlh, sizeof(*ncm), tb, NETCONFA_MAX, devconf_mpls_policy);
+ err = nlmsg_parse(nlh, sizeof(*ncm), tb, NETCONFA_MAX, devconf_mpls_policy, NULL);
if (err < 0) goto errout;
err = -EINVAL;
if (!tb[NETCONFA_IFINDEX]) goto errout;
ifindex = nla_get_s32(tb[NETCONFA_IFINDEX]);
dev = __dev_get_by_index(net, ifindex);
if (!dev) goto errout;
mdev = mpls_dev_get(dev);
if (!mdev) goto errout;
err = -ENOBUFS;
skb = nlmsg_new(mpls_netconf_msgsize_devconf(NETCONFA_ALL), GFP_KERNEL);
if (!skb) goto errout;
err = mpls_netconf_fill_devconf(skb, mdev, NETLINK_CB(in_skb).portid, nlh->nlmsg_seq, RTM_NEWNETCONF, 0, NETCONFA_ALL);
if (err < 0) {
/* -EMSGSIZE implies BUG in mpls_netconf_msgsize_devconf() */
WARN_ON(err == -EMSGSIZE);
kfree_skb(skb);
goto errout;
}
err = rtnl_unicast(skb, net, NETLINK_CB(in_skb).portid);
errout:
return err;
}<sep>@@
expression e1,e2,e3,e4,e5;
@@
nlmsg_parse(e1,e2,e3,e4,e5
+ , NULL
)
<|end_of_text|> | 9,281 |
--- initial
+++ final
@@ -1,41 +1,41 @@
static int ip6addrlbl_get(struct sk_buff *in_skb, struct nlmsghdr *nlh) {
struct net *net = sock_net(in_skb->sk);
struct ifaddrlblmsg *ifal;
struct nlattr *tb[IFAL_MAX + 1];
struct in6_addr *addr;
u32 lseq;
int err = 0;
struct ip6addrlbl_entry *p;
struct sk_buff *skb;
- err = nlmsg_parse(nlh, sizeof(*ifal), tb, IFAL_MAX, ifal_policy);
+ err = nlmsg_parse(nlh, sizeof(*ifal), tb, IFAL_MAX, ifal_policy, NULL);
if (err < 0) return err;
ifal = nlmsg_data(nlh);
if (ifal->ifal_family != AF_INET6 || ifal->ifal_prefixlen != 128) return -EINVAL;
if (ifal->ifal_index && !__dev_get_by_index(net, ifal->ifal_index)) return -EINVAL;
if (!tb[IFAL_ADDRESS]) return -EINVAL;
addr = nla_data(tb[IFAL_ADDRESS]);
rcu_read_lock();
p = __ipv6_addr_label(net, addr, ipv6_addr_type(addr), ifal->ifal_index);
if (p && !ip6addrlbl_hold(p)) p = NULL;
lseq = ip6addrlbl_table.seq;
rcu_read_unlock();
if (!p) {
err = -ESRCH;
goto out;
}
skb = nlmsg_new(ip6addrlbl_msgsize(), GFP_KERNEL);
if (!skb) {
ip6addrlbl_put(p);
return -ENOBUFS;
}
err = ip6addrlbl_fill(skb, p, lseq, NETLINK_CB(in_skb).portid, nlh->nlmsg_seq, RTM_NEWADDRLABEL, 0);
ip6addrlbl_put(p);
if (err < 0) {
WARN_ON(err == -EMSGSIZE);
kfree_skb(skb);
goto out;
}
err = rtnl_unicast(skb, net, NETLINK_CB(in_skb).portid);
out:
return err;
}<sep>@@
expression e1,e2,e3,e4,e5;
@@
nlmsg_parse(e1,e2,e3,e4,e5
+ , NULL
)
<|end_of_text|> | 9,279 |
--- initial
+++ final
@@ -1,101 +1,101 @@
static int cgw_parse_attr(struct nlmsghdr *nlh, struct cf_mod *mod, u8 gwtype, void *gwtypeattr, u8 *limhops) {
struct nlattr *tb[CGW_MAX + 1];
struct cgw_frame_mod mb;
int modidx = 0;
int err = 0;
/* initialize modification & checksum data space */
memset(mod, 0, sizeof(*mod));
- err = nlmsg_parse(nlh, sizeof(struct rtcanmsg), tb, CGW_MAX, cgw_policy);
+ err = nlmsg_parse(nlh, sizeof(struct rtcanmsg), tb, CGW_MAX, cgw_policy, NULL);
if (err < 0) return err;
if (tb[CGW_LIM_HOPS]) {
*limhops = nla_get_u8(tb[CGW_LIM_HOPS]);
if (*limhops < 1 || *limhops > max_hops) return -EINVAL;
}
/* check for AND/OR/XOR/SET modifications */
if (tb[CGW_MOD_AND]) {
nla_memcpy(&mb, tb[CGW_MOD_AND], CGW_MODATTR_LEN);
canframecpy(&mod->modframe.and, &mb.cf);
mod->modtype.and = mb.modtype;
if (mb.modtype & CGW_MOD_ID) mod->modfunc[modidx++] = mod_and_id;
if (mb.modtype & CGW_MOD_DLC) mod->modfunc[modidx++] = mod_and_dlc;
if (mb.modtype & CGW_MOD_DATA) mod->modfunc[modidx++] = mod_and_data;
}
if (tb[CGW_MOD_OR]) {
nla_memcpy(&mb, tb[CGW_MOD_OR], CGW_MODATTR_LEN);
canframecpy(&mod->modframe.or, &mb.cf);
mod->modtype.or = mb.modtype;
if (mb.modtype & CGW_MOD_ID) mod->modfunc[modidx++] = mod_or_id;
if (mb.modtype & CGW_MOD_DLC) mod->modfunc[modidx++] = mod_or_dlc;
if (mb.modtype & CGW_MOD_DATA) mod->modfunc[modidx++] = mod_or_data;
}
if (tb[CGW_MOD_XOR]) {
nla_memcpy(&mb, tb[CGW_MOD_XOR], CGW_MODATTR_LEN);
canframecpy(&mod->modframe.xor, &mb.cf);
mod->modtype.xor = mb.modtype;
if (mb.modtype & CGW_MOD_ID) mod->modfunc[modidx++] = mod_xor_id;
if (mb.modtype & CGW_MOD_DLC) mod->modfunc[modidx++] = mod_xor_dlc;
if (mb.modtype & CGW_MOD_DATA) mod->modfunc[modidx++] = mod_xor_data;
}
if (tb[CGW_MOD_SET]) {
nla_memcpy(&mb, tb[CGW_MOD_SET], CGW_MODATTR_LEN);
canframecpy(&mod->modframe.set, &mb.cf);
mod->modtype.set = mb.modtype;
if (mb.modtype & CGW_MOD_ID) mod->modfunc[modidx++] = mod_set_id;
if (mb.modtype & CGW_MOD_DLC) mod->modfunc[modidx++] = mod_set_dlc;
if (mb.modtype & CGW_MOD_DATA) mod->modfunc[modidx++] = mod_set_data;
}
/* check for checksum operations after CAN frame modifications */
if (modidx) {
if (tb[CGW_CS_CRC8]) {
struct cgw_csum_crc8 *c = nla_data(tb[CGW_CS_CRC8]);
err = cgw_chk_csum_parms(c->from_idx, c->to_idx, c->result_idx);
if (err) return err;
nla_memcpy(&mod->csum.crc8, tb[CGW_CS_CRC8], CGW_CS_CRC8_LEN);
/*
* select dedicated processing function to reduce
* runtime operations in receive hot path.
*/
if (c->from_idx < 0 || c->to_idx < 0 || c->result_idx < 0)
mod->csumfunc.crc8 = cgw_csum_crc8_rel;
else if (c->from_idx <= c->to_idx)
mod->csumfunc.crc8 = cgw_csum_crc8_pos;
else
mod->csumfunc.crc8 = cgw_csum_crc8_neg;
}
if (tb[CGW_CS_XOR]) {
struct cgw_csum_xor *c = nla_data(tb[CGW_CS_XOR]);
err = cgw_chk_csum_parms(c->from_idx, c->to_idx, c->result_idx);
if (err) return err;
nla_memcpy(&mod->csum.xor, tb[CGW_CS_XOR], CGW_CS_XOR_LEN);
/*
* select dedicated processing function to reduce
* runtime operations in receive hot path.
*/
if (c->from_idx < 0 || c->to_idx < 0 || c->result_idx < 0)
mod->csumfunc.xor = cgw_csum_xor_rel;
else if (c->from_idx <= c->to_idx)
mod->csumfunc.xor = cgw_csum_xor_pos;
else
mod->csumfunc.xor = cgw_csum_xor_neg;
}
if (tb[CGW_MOD_UID]) { nla_memcpy(&mod->uid, tb[CGW_MOD_UID], sizeof(u32)); }
}
if (gwtype == CGW_TYPE_CAN_CAN) {
/* check CGW_TYPE_CAN_CAN specific attributes */
struct can_can_gw *ccgw = (struct can_can_gw *)gwtypeattr;
memset(ccgw, 0, sizeof(*ccgw));
/* check for can_filter in attributes */
if (tb[CGW_FILTER]) nla_memcpy(&ccgw->filter, tb[CGW_FILTER], sizeof(struct can_filter));
err = -ENODEV;
/* specifying two interfaces is mandatory */
if (!tb[CGW_SRC_IF] || !tb[CGW_DST_IF]) return err;
ccgw->src_idx = nla_get_u32(tb[CGW_SRC_IF]);
ccgw->dst_idx = nla_get_u32(tb[CGW_DST_IF]);
/* both indices set to 0 for flushing all routing entries */
if (!ccgw->src_idx && !ccgw->dst_idx) return 0;
/* only one index set to 0 is an error */
if (!ccgw->src_idx || !ccgw->dst_idx) return err;
}
/* add the checks for other gwtypes here */
return 0;
}<sep>@@
expression e1,e2,e3,e4,e5;
@@
nlmsg_parse(e1,e2,e3,e4,e5
+ , NULL
)
<|end_of_text|> | 9,301 |
--- initial
+++ final
@@ -1,41 +1,41 @@
static int inet6_rtm_getaddr(struct sk_buff *in_skb, struct nlmsghdr *nlh) {
struct net *net = sock_net(in_skb->sk);
struct ifaddrmsg *ifm;
struct nlattr *tb[IFA_MAX + 1];
struct in6_addr *addr = NULL, *peer;
struct net_device *dev = NULL;
struct inet6_ifaddr *ifa;
struct sk_buff *skb;
int err;
- err = nlmsg_parse(nlh, sizeof(*ifm), tb, IFA_MAX, ifa_ipv6_policy);
+ err = nlmsg_parse(nlh, sizeof(*ifm), tb, IFA_MAX, ifa_ipv6_policy, NULL);
if (err < 0) goto errout;
addr = extract_addr(tb[IFA_ADDRESS], tb[IFA_LOCAL], &peer);
if (!addr) {
err = -EINVAL;
goto errout;
}
ifm = nlmsg_data(nlh);
if (ifm->ifa_index) dev = __dev_get_by_index(net, ifm->ifa_index);
ifa = ipv6_get_ifaddr(net, addr, dev, 1);
if (!ifa) {
err = -EADDRNOTAVAIL;
goto errout;
}
skb = nlmsg_new(inet6_ifaddr_msgsize(), GFP_KERNEL);
if (!skb) {
err = -ENOBUFS;
goto errout_ifa;
}
err = inet6_fill_ifaddr(skb, ifa, NETLINK_CB(in_skb).portid, nlh->nlmsg_seq, RTM_NEWADDR, 0);
if (err < 0) {
/* -EMSGSIZE implies BUG in inet6_ifaddr_msgsize() */
WARN_ON(err == -EMSGSIZE);
kfree_skb(skb);
goto errout_ifa;
}
err = rtnl_unicast(skb, net, NETLINK_CB(in_skb).portid);
errout_ifa:
in6_ifa_put(ifa);
errout:
return err;
}<sep>@@
expression e1,e2,e3,e4,e5;
@@
nlmsg_parse(e1,e2,e3,e4,e5
+ , NULL
)
<|end_of_text|> | 9,277 |
--- initial
+++ final
@@ -1,27 +1,27 @@
static int route_doit(struct sk_buff *skb, struct nlmsghdr *nlh) {
struct net *net = sock_net(skb->sk);
struct nlattr *tb[RTA_MAX + 1];
struct net_device *dev;
struct rtmsg *rtm;
int err;
u8 dst;
if (!netlink_capable(skb, CAP_NET_ADMIN)) return -EPERM;
if (!netlink_capable(skb, CAP_SYS_ADMIN)) return -EPERM;
ASSERT_RTNL();
- err = nlmsg_parse(nlh, sizeof(*rtm), tb, RTA_MAX, rtm_phonet_policy);
+ err = nlmsg_parse(nlh, sizeof(*rtm), tb, RTA_MAX, rtm_phonet_policy, NULL);
if (err < 0) return err;
rtm = nlmsg_data(nlh);
if (rtm->rtm_table != RT_TABLE_MAIN || rtm->rtm_type != RTN_UNICAST) return -EINVAL;
if (tb[RTA_DST] == NULL || tb[RTA_OIF] == NULL) return -EINVAL;
dst = nla_get_u8(tb[RTA_DST]);
if (dst & 3) /* Phonet addresses only have 6 high-order bits */
return -EINVAL;
dev = __dev_get_by_index(net, nla_get_u32(tb[RTA_OIF]));
if (dev == NULL) return -ENODEV;
if (nlh->nlmsg_type == RTM_NEWROUTE)
err = phonet_route_add(dev, dst);
else
err = phonet_route_del(dev, dst);
if (!err) rtm_phonet_notify(nlh->nlmsg_type, dev, dst);
return err;
}<sep>@@
expression e1,e2,e3,e4,e5;
@@
nlmsg_parse(e1,e2,e3,e4,e5
+ , NULL
)
<|end_of_text|> | 9,321 |
--- initial
+++ final
@@ -1,31 +1,31 @@
static int rtnl_net_getid(struct sk_buff *skb, struct nlmsghdr *nlh) {
struct net *net = sock_net(skb->sk);
struct nlattr *tb[NETNSA_MAX + 1];
struct sk_buff *msg;
struct net *peer;
int err, id;
- err = nlmsg_parse(nlh, sizeof(struct rtgenmsg), tb, NETNSA_MAX, rtnl_net_policy);
+ err = nlmsg_parse(nlh, sizeof(struct rtgenmsg), tb, NETNSA_MAX, rtnl_net_policy, NULL);
if (err < 0) return err;
if (tb[NETNSA_PID])
peer = get_net_ns_by_pid(nla_get_u32(tb[NETNSA_PID]));
else if (tb[NETNSA_FD])
peer = get_net_ns_by_fd(nla_get_u32(tb[NETNSA_FD]));
else
return -EINVAL;
if (IS_ERR(peer)) return PTR_ERR(peer);
msg = nlmsg_new(rtnl_net_get_size(), GFP_KERNEL);
if (!msg) {
err = -ENOMEM;
goto out;
}
id = peernet2id(net, peer);
err = rtnl_net_fill(msg, NETLINK_CB(skb).portid, nlh->nlmsg_seq, 0, RTM_NEWNSID, net, id);
if (err < 0) goto err_out;
err = rtnl_unicast(msg, net, NETLINK_CB(skb).portid);
goto out;
err_out:
nlmsg_free(msg);
out:
put_net(peer);
return err;
}<sep>@@
expression e1,e2,e3,e4,e5;
@@
nlmsg_parse(e1,e2,e3,e4,e5
+ , NULL
)
<|end_of_text|> | 9,309 |
--- initial
+++ final
@@ -1,25 +1,25 @@
static int dn_nl_deladdr(struct sk_buff *skb, struct nlmsghdr *nlh) {
struct net *net = sock_net(skb->sk);
struct nlattr *tb[IFA_MAX + 1];
struct dn_dev *dn_db;
struct ifaddrmsg *ifm;
struct dn_ifaddr *ifa;
struct dn_ifaddr __rcu **ifap;
int err = -EINVAL;
if (!netlink_capable(skb, CAP_NET_ADMIN)) return -EPERM;
if (!net_eq(net, &init_net)) goto errout;
- err = nlmsg_parse(nlh, sizeof(*ifm), tb, IFA_MAX, dn_ifa_policy);
+ err = nlmsg_parse(nlh, sizeof(*ifm), tb, IFA_MAX, dn_ifa_policy, NULL);
if (err < 0) goto errout;
err = -ENODEV;
ifm = nlmsg_data(nlh);
if ((dn_db = dn_dev_by_index(ifm->ifa_index)) == NULL) goto errout;
err = -EADDRNOTAVAIL;
for (ifap = &dn_db->ifa_list; (ifa = rtnl_dereference(*ifap)) != NULL; ifap = &ifa->ifa_next) {
if (tb[IFA_LOCAL] && nla_memcmp(tb[IFA_LOCAL], &ifa->ifa_local, 2)) continue;
if (tb[IFA_LABEL] && nla_strcmp(tb[IFA_LABEL], ifa->ifa_label)) continue;
dn_dev_del_ifa(dn_db, ifap, 1);
return 0;
}
errout:
return err;
}<sep>@@
expression e1,e2,e3,e4,e5;
@@
nlmsg_parse(e1,e2,e3,e4,e5
+ , NULL
)
<|end_of_text|> | 9,292 |
--- initial
+++ final
@@ -1,53 +1,53 @@
static int rtnl_fdb_del(struct sk_buff *skb, struct nlmsghdr *nlh) {
struct net *net = sock_net(skb->sk);
struct ndmsg *ndm;
struct nlattr *tb[NDA_MAX + 1];
struct net_device *dev;
int err = -EINVAL;
__u8 *addr;
u16 vid;
if (!netlink_capable(skb, CAP_NET_ADMIN)) return -EPERM;
- err = nlmsg_parse(nlh, sizeof(*ndm), tb, NDA_MAX, NULL);
+ err = nlmsg_parse(nlh, sizeof(*ndm), tb, NDA_MAX, NULL, NULL);
if (err < 0) return err;
ndm = nlmsg_data(nlh);
if (ndm->ndm_ifindex == 0) {
pr_info("PF_BRIDGE: RTM_DELNEIGH with invalid ifindex\n");
return -EINVAL;
}
dev = __dev_get_by_index(net, ndm->ndm_ifindex);
if (dev == NULL) {
pr_info("PF_BRIDGE: RTM_DELNEIGH with unknown ifindex\n");
return -ENODEV;
}
if (!tb[NDA_LLADDR] || nla_len(tb[NDA_LLADDR]) != ETH_ALEN) {
pr_info("PF_BRIDGE: RTM_DELNEIGH with invalid address\n");
return -EINVAL;
}
addr = nla_data(tb[NDA_LLADDR]);
err = fdb_vid_parse(tb[NDA_VLAN], &vid);
if (err) return err;
err = -EOPNOTSUPP;
/* Support fdb on master device the net/bridge default case */
if ((!ndm->ndm_flags || ndm->ndm_flags & NTF_MASTER) && (dev->priv_flags & IFF_BRIDGE_PORT)) {
struct net_device *br_dev = netdev_master_upper_dev_get(dev);
const struct net_device_ops *ops = br_dev->netdev_ops;
if (ops->ndo_fdb_del) err = ops->ndo_fdb_del(ndm, tb, dev, addr, vid);
if (err)
goto out;
else
ndm->ndm_flags &= ~NTF_MASTER;
}
/* Embedded bridge, macvlan, and any other device support */
if (ndm->ndm_flags & NTF_SELF) {
if (dev->netdev_ops->ndo_fdb_del)
err = dev->netdev_ops->ndo_fdb_del(ndm, tb, dev, addr, vid);
else
err = ndo_dflt_fdb_del(ndm, tb, dev, addr, vid);
if (!err) {
rtnl_fdb_notify(dev, addr, vid, RTM_DELNEIGH, ndm->ndm_state);
ndm->ndm_flags &= ~NTF_SELF;
}
}
out:
return err;
}<sep>@@
expression e1,e2,e3,e4,e5;
@@
nlmsg_parse(e1,e2,e3,e4,e5
+ , NULL
)
<|end_of_text|> | 9,329 |
--- initial
+++ final
@@ -1,31 +1,31 @@
static int rtnl_setlink(struct sk_buff *skb, struct nlmsghdr *nlh) {
struct net *net = sock_net(skb->sk);
struct ifinfomsg *ifm;
struct net_device *dev;
int err;
struct nlattr *tb[IFLA_MAX + 1];
char ifname[IFNAMSIZ];
- err = nlmsg_parse(nlh, sizeof(*ifm), tb, IFLA_MAX, ifla_policy);
+ err = nlmsg_parse(nlh, sizeof(*ifm), tb, IFLA_MAX, ifla_policy, NULL);
if (err < 0) goto errout;
if (tb[IFLA_IFNAME])
nla_strlcpy(ifname, tb[IFLA_IFNAME], IFNAMSIZ);
else
ifname[0] = '\0';
err = -EINVAL;
ifm = nlmsg_data(nlh);
if (ifm->ifi_index > 0)
dev = __dev_get_by_index(net, ifm->ifi_index);
else if (tb[IFLA_IFNAME])
dev = __dev_get_by_name(net, ifname);
else
goto errout;
if (dev == NULL) {
err = -ENODEV;
goto errout;
}
err = validate_linkmsg(dev, tb);
if (err < 0) goto errout;
err = do_setlink(skb, dev, ifm, tb, ifname, 0);
errout:
return err;
}<sep>@@
expression e1,e2,e3,e4,e5;
@@
nlmsg_parse(e1,e2,e3,e4,e5
+ , NULL
)
<|end_of_text|> | 9,332 |
--- initial
+++ final
@@ -1,32 +1,32 @@
static int rtnl_getlink(struct sk_buff *skb, struct nlmsghdr *nlh) {
struct net *net = sock_net(skb->sk);
struct ifinfomsg *ifm;
char ifname[IFNAMSIZ];
struct nlattr *tb[IFLA_MAX + 1];
struct net_device *dev = NULL;
struct sk_buff *nskb;
int err;
u32 ext_filter_mask = 0;
- err = nlmsg_parse(nlh, sizeof(*ifm), tb, IFLA_MAX, ifla_policy);
+ err = nlmsg_parse(nlh, sizeof(*ifm), tb, IFLA_MAX, ifla_policy, NULL);
if (err < 0) return err;
if (tb[IFLA_IFNAME]) nla_strlcpy(ifname, tb[IFLA_IFNAME], IFNAMSIZ);
if (tb[IFLA_EXT_MASK]) ext_filter_mask = nla_get_u32(tb[IFLA_EXT_MASK]);
ifm = nlmsg_data(nlh);
if (ifm->ifi_index > 0)
dev = __dev_get_by_index(net, ifm->ifi_index);
else if (tb[IFLA_IFNAME])
dev = __dev_get_by_name(net, ifname);
else
return -EINVAL;
if (dev == NULL) return -ENODEV;
nskb = nlmsg_new(if_nlmsg_size(dev, ext_filter_mask), GFP_KERNEL);
if (nskb == NULL) return -ENOBUFS;
err = rtnl_fill_ifinfo(nskb, dev, RTM_NEWLINK, NETLINK_CB(skb).portid, nlh->nlmsg_seq, 0, 0, ext_filter_mask);
if (err < 0) {
/* -EMSGSIZE implies BUG in if_nlmsg_size */
WARN_ON(err == -EMSGSIZE);
kfree_skb(nskb);
} else
err = rtnl_unicast(nskb, net, NETLINK_CB(skb).portid);
return err;
}<sep>@@
expression e1,e2,e3,e4,e5;
@@
nlmsg_parse(e1,e2,e3,e4,e5
+ , NULL
)
<|end_of_text|> | 9,331 |
--- initial
+++ final
@@ -1,65 +1,65 @@
static int inet6_rtm_getroute(struct sk_buff *in_skb, struct nlmsghdr *nlh) {
struct net *net = sock_net(in_skb->sk);
struct nlattr *tb[RTA_MAX + 1];
struct rt6_info *rt;
struct sk_buff *skb;
struct rtmsg *rtm;
struct flowi6 fl6;
int err, iif = 0, oif = 0;
- err = nlmsg_parse(nlh, sizeof(*rtm), tb, RTA_MAX, rtm_ipv6_policy);
+ err = nlmsg_parse(nlh, sizeof(*rtm), tb, RTA_MAX, rtm_ipv6_policy, NULL);
if (err < 0) goto errout;
err = -EINVAL;
memset(&fl6, 0, sizeof(fl6));
rtm = nlmsg_data(nlh);
fl6.flowlabel = ip6_make_flowinfo(rtm->rtm_tos, 0);
if (tb[RTA_SRC]) {
if (nla_len(tb[RTA_SRC]) < sizeof(struct in6_addr)) goto errout;
fl6.saddr = *(struct in6_addr *)nla_data(tb[RTA_SRC]);
}
if (tb[RTA_DST]) {
if (nla_len(tb[RTA_DST]) < sizeof(struct in6_addr)) goto errout;
fl6.daddr = *(struct in6_addr *)nla_data(tb[RTA_DST]);
}
if (tb[RTA_IIF]) iif = nla_get_u32(tb[RTA_IIF]);
if (tb[RTA_OIF]) oif = nla_get_u32(tb[RTA_OIF]);
if (tb[RTA_MARK]) fl6.flowi6_mark = nla_get_u32(tb[RTA_MARK]);
if (tb[RTA_UID])
fl6.flowi6_uid = make_kuid(current_user_ns(), nla_get_u32(tb[RTA_UID]));
else
fl6.flowi6_uid = iif ? INVALID_UID : current_uid();
if (iif) {
struct net_device *dev;
int flags = 0;
dev = __dev_get_by_index(net, iif);
if (!dev) {
err = -ENODEV;
goto errout;
}
fl6.flowi6_iif = iif;
if (!ipv6_addr_any(&fl6.saddr)) flags |= RT6_LOOKUP_F_HAS_SADDR;
rt = (struct rt6_info *)ip6_route_input_lookup(net, dev, &fl6, flags);
} else {
fl6.flowi6_oif = oif;
rt = (struct rt6_info *)ip6_route_output(net, NULL, &fl6);
}
if (rt == net->ipv6.ip6_null_entry) {
err = rt->dst.error;
ip6_rt_put(rt);
goto errout;
}
skb = alloc_skb(NLMSG_GOODSIZE, GFP_KERNEL);
if (!skb) {
ip6_rt_put(rt);
err = -ENOBUFS;
goto errout;
}
skb_dst_set(skb, &rt->dst);
err = rt6_fill_node(net, skb, rt, &fl6.daddr, &fl6.saddr, iif, RTM_NEWROUTE, NETLINK_CB(in_skb).portid, nlh->nlmsg_seq, 0);
if (err < 0) {
kfree_skb(skb);
goto errout;
}
err = rtnl_unicast(skb, net, NETLINK_CB(in_skb).portid);
errout:
return err;
}<sep>@@
expression e1,e2,e3,e4,e5;
@@
nlmsg_parse(e1,e2,e3,e4,e5
+ , NULL
)
<|end_of_text|> | 9,323 |
--- initial
+++ final
@@ -1,12 +1,12 @@
bool intel_gmbus_is_valid_pin(struct drm_i915_private *dev_priv, unsigned int pin) {
unsigned int size;
- if (IS_BROXTON(dev_priv))
+ if (IS_GEN9_LP(dev_priv))
size = ARRAY_SIZE(gmbus_pins_bxt);
else if (IS_SKYLAKE(dev_priv) || IS_KABYLAKE(dev_priv))
size = ARRAY_SIZE(gmbus_pins_skl);
else if (IS_BROADWELL(dev_priv))
size = ARRAY_SIZE(gmbus_pins_bdw);
else
size = ARRAY_SIZE(gmbus_pins);
return pin < size && i915_mmio_reg_valid(get_gmbus_pin(dev_priv, pin)->reg);
}<sep>@@
expression e;
@@
- IS_BROXTON
+ IS_GEN9_LP
(e)
<|end_of_text|> | 8,748 |
--- initial
+++ final
@@ -1,5 +1,5 @@
static bool skl_needs_memory_bw_wa(struct intel_atomic_state *state) {
struct drm_i915_private *dev_priv = to_i915(state->base.dev);
- if (IS_SKYLAKE(dev_priv) || IS_BROXTON(dev_priv) || IS_KABYLAKE(dev_priv)) return true;
+ if (IS_SKYLAKE(dev_priv) || IS_GEN9_LP(dev_priv) || IS_KABYLAKE(dev_priv)) return true;
return false;
}<sep>@@
expression e;
@@
- IS_BROXTON
+ IS_GEN9_LP
(e)
<|end_of_text|> | 8,754 |
--- initial
+++ final
@@ -1,34 +1,34 @@
static void gen9_sseu_device_status(struct drm_i915_private *dev_priv, struct sseu_dev_info *sseu) {
int s_max = 3, ss_max = 4;
int s, ss;
u32 s_reg[s_max], eu_reg[2 * s_max], eu_mask[2];
/* BXT has a single slice and at most 3 subslices. */
- if (IS_BROXTON(dev_priv)) {
+ if (IS_GEN9_LP(dev_priv)) {
s_max = 1;
ss_max = 3;
}
for (s = 0; s < s_max; s++) {
s_reg[s] = I915_READ(GEN9_SLICE_PGCTL_ACK(s));
eu_reg[2 * s] = I915_READ(GEN9_SS01_EU_PGCTL_ACK(s));
eu_reg[2 * s + 1] = I915_READ(GEN9_SS23_EU_PGCTL_ACK(s));
}
eu_mask[0] = GEN9_PGCTL_SSA_EU08_ACK | GEN9_PGCTL_SSA_EU19_ACK | GEN9_PGCTL_SSA_EU210_ACK | GEN9_PGCTL_SSA_EU311_ACK;
eu_mask[1] = GEN9_PGCTL_SSB_EU08_ACK | GEN9_PGCTL_SSB_EU19_ACK | GEN9_PGCTL_SSB_EU210_ACK | GEN9_PGCTL_SSB_EU311_ACK;
for (s = 0; s < s_max; s++) {
if ((s_reg[s] & GEN9_PGCTL_SLICE_ACK) == 0) /* skip disabled slice */
continue;
sseu->slice_mask |= BIT(s);
if (IS_SKYLAKE(dev_priv) || IS_KABYLAKE(dev_priv)) sseu->subslice_mask = INTEL_INFO(dev_priv)->sseu.subslice_mask;
for (ss = 0; ss < ss_max; ss++) {
unsigned int eu_cnt;
- if (IS_BROXTON(dev_priv)) {
+ if (IS_GEN9_LP(dev_priv)) {
if (!(s_reg[s] & (GEN9_PGCTL_SS_ACK(ss)))) /* skip disabled subslice */
continue;
sseu->subslice_mask |= BIT(ss);
}
eu_cnt = 2 * hweight32(eu_reg[2 * s + ss / 2] & eu_mask[ss % 2]);
sseu->eu_total += eu_cnt;
sseu->eu_per_subslice = max_t(unsigned int, sseu->eu_per_subslice, eu_cnt);
}
}
}<sep>@@
expression e;
@@
- IS_BROXTON
+ IS_GEN9_LP
(e)
<|end_of_text|> | 8,696 |
--- initial
+++ final
@@ -1,6 +1,6 @@
u32 intel_dsi_get_pclk(struct intel_encoder *encoder, int pipe_bpp, struct intel_crtc_state *config) {
- if (IS_BROXTON(to_i915(encoder->base.dev)))
+ if (IS_GEN9_LP(to_i915(encoder->base.dev)))
return bxt_dsi_get_pclk(encoder, pipe_bpp, config);
else
return vlv_dsi_get_pclk(encoder, pipe_bpp, config);
}<sep>@@
expression e;
@@
- IS_BROXTON
+ IS_GEN9_LP
(e)
<|end_of_text|> | 8,740 |
--- initial
+++ final
@@ -1,15 +1,15 @@
static void gtt_write_workarounds(struct drm_i915_private *dev_priv) {
/* This function is for gtt related workarounds. This function is
* called on driver load and after a GPU reset, so you can place
* workarounds here even if they get overwritten by GPU reset.
*/
/* WaIncreaseDefaultTLBEntries:chv,bdw,skl,bxt */
if (IS_BROADWELL(dev_priv))
I915_WRITE(GEN8_L3_LRA_1_GPGPU, GEN8_L3_LRA_1_GPGPU_DEFAULT_VALUE_BDW);
else if (IS_CHERRYVIEW(dev_priv))
I915_WRITE(GEN8_L3_LRA_1_GPGPU, GEN8_L3_LRA_1_GPGPU_DEFAULT_VALUE_CHV);
else if (IS_SKYLAKE(dev_priv))
I915_WRITE(GEN8_L3_LRA_1_GPGPU, GEN9_L3_LRA_1_GPGPU_DEFAULT_VALUE_SKL);
- else if (IS_BROXTON(dev_priv))
+ else if (IS_GEN9_LP(dev_priv))
I915_WRITE(GEN8_L3_LRA_1_GPGPU, GEN9_L3_LRA_1_GPGPU_DEFAULT_VALUE_BXT);
}<sep>@@
expression e;
@@
- IS_BROXTON
+ IS_GEN9_LP
(e)
<|end_of_text|> | 8,701 |
--- initial
+++ final
@@ -1,10 +1,10 @@
static void intel_dsi_get_config(struct intel_encoder *encoder, struct intel_crtc_state *pipe_config) {
struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
u32 pclk;
DRM_DEBUG_KMS("\n");
- if (IS_BROXTON(dev_priv)) bxt_dsi_get_pipe_config(encoder, pipe_config);
+ if (IS_GEN9_LP(dev_priv)) bxt_dsi_get_pipe_config(encoder, pipe_config);
pclk = intel_dsi_get_pclk(encoder, pipe_config->pipe_bpp, pipe_config);
if (!pclk) return;
pipe_config->base.adjusted_mode.crtc_clock = pclk;
pipe_config->port_clock = pclk;
}<sep>@@
expression e;
@@
- IS_BROXTON
+ IS_GEN9_LP
(e)
<|end_of_text|> | 8,732 |
--- initial
+++ final
@@ -1,45 +1,45 @@
bool intel_ddi_get_hw_state(struct intel_encoder *encoder, enum pipe *pipe) {
struct drm_device *dev = encoder->base.dev;
struct drm_i915_private *dev_priv = to_i915(dev);
enum port port = intel_ddi_get_encoder_port(encoder);
enum intel_display_power_domain power_domain;
u32 tmp;
int i;
bool ret;
power_domain = intel_display_port_power_domain(encoder);
if (!intel_display_power_get_if_enabled(dev_priv, power_domain)) return false;
ret = false;
tmp = I915_READ(DDI_BUF_CTL(port));
if (!(tmp & DDI_BUF_CTL_ENABLE)) goto out;
if (port == PORT_A) {
tmp = I915_READ(TRANS_DDI_FUNC_CTL(TRANSCODER_EDP));
switch (tmp & TRANS_DDI_EDP_INPUT_MASK) {
case TRANS_DDI_EDP_INPUT_A_ON:
case TRANS_DDI_EDP_INPUT_A_ONOFF: *pipe = PIPE_A; break;
case TRANS_DDI_EDP_INPUT_B_ONOFF: *pipe = PIPE_B; break;
case TRANS_DDI_EDP_INPUT_C_ONOFF: *pipe = PIPE_C; break;
}
ret = true;
goto out;
}
for (i = TRANSCODER_A; i <= TRANSCODER_C; i++) {
tmp = I915_READ(TRANS_DDI_FUNC_CTL(i));
if ((tmp & TRANS_DDI_PORT_MASK) == TRANS_DDI_SELECT_PORT(port)) {
if ((tmp & TRANS_DDI_MODE_SELECT_MASK) == TRANS_DDI_MODE_SELECT_DP_MST) goto out;
*pipe = i;
ret = true;
goto out;
}
}
DRM_DEBUG_KMS("No pipe for ddi port %c found\n", port_name(port));
out:
- if (ret && IS_BROXTON(dev_priv)) {
+ if (ret && IS_GEN9_LP(dev_priv)) {
tmp = I915_READ(BXT_PHY_CTL(port));
if ((tmp & (BXT_PHY_LANE_POWERDOWN_ACK | BXT_PHY_LANE_ENABLED)) != BXT_PHY_LANE_ENABLED)
DRM_ERROR("Port %c enabled but PHY powered down? "
"(PHY_CTL %08x)\n",
port_name(port), tmp);
}
intel_display_power_put(dev_priv, power_domain);
return ret;
}<sep>@@
expression e;
@@
- IS_BROXTON
+ IS_GEN9_LP
(e)
<|end_of_text|> | 8,712 |
--- initial
+++ final
@@ -1,15 +1,15 @@
uint32_t ddi_signal_levels(struct intel_dp *intel_dp) {
struct intel_digital_port *dport = dp_to_dig_port(intel_dp);
struct drm_i915_private *dev_priv = to_i915(dport->base.base.dev);
struct intel_encoder *encoder = &dport->base;
uint8_t train_set = intel_dp->train_set[0];
int signal_levels = train_set & (DP_TRAIN_VOLTAGE_SWING_MASK | DP_TRAIN_PRE_EMPHASIS_MASK);
enum port port = dport->port;
uint32_t level;
level = translate_signal_level(signal_levels);
if (IS_SKYLAKE(dev_priv) || IS_KABYLAKE(dev_priv))
skl_ddi_set_iboost(encoder, level);
- else if (IS_BROXTON(dev_priv))
+ else if (IS_GEN9_LP(dev_priv))
bxt_ddi_vswing_sequence(dev_priv, level, port, encoder->type);
return DDI_BUF_TRANS_SELECT(level);
}<sep>@@
expression e;
@@
- IS_BROXTON
+ IS_GEN9_LP
(e)
<|end_of_text|> | 8,708 |
--- initial
+++ final
@@ -1,40 +1,40 @@
static void intel_dp_init_panel_power_sequencer_registers(struct drm_device *dev, struct intel_dp *intel_dp) {
struct drm_i915_private *dev_priv = to_i915(dev);
u32 pp_on, pp_off, pp_div, port_sel = 0;
int div = dev_priv->rawclk_freq / 1000;
struct pps_registers regs;
enum port port = dp_to_dig_port(intel_dp)->port;
const struct edp_power_seq *seq = &intel_dp->pps_delays;
lockdep_assert_held(&dev_priv->pps_mutex);
intel_pps_get_registers(dev_priv, intel_dp, ®s);
pp_on = (seq->t1_t3 << PANEL_POWER_UP_DELAY_SHIFT) | (seq->t8 << PANEL_LIGHT_ON_DELAY_SHIFT);
pp_off = (seq->t9 << PANEL_LIGHT_OFF_DELAY_SHIFT) | (seq->t10 << PANEL_POWER_DOWN_DELAY_SHIFT);
/* Compute the divisor for the pp clock, simply match the Bspec
* formula. */
- if (IS_BROXTON(dev_priv)) {
+ if (IS_GEN9_LP(dev_priv)) {
pp_div = I915_READ(regs.pp_ctrl);
pp_div &= ~BXT_POWER_CYCLE_DELAY_MASK;
pp_div |= (DIV_ROUND_UP((seq->t11_t12 + 1), 1000) << BXT_POWER_CYCLE_DELAY_SHIFT);
} else {
pp_div = ((100 * div) / 2 - 1) << PP_REFERENCE_DIVIDER_SHIFT;
pp_div |= (DIV_ROUND_UP(seq->t11_t12, 1000) << PANEL_POWER_CYCLE_DELAY_SHIFT);
}
/* Haswell doesn't have any port selection bits for the panel
* power sequencer any more. */
if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) {
port_sel = PANEL_PORT_SELECT_VLV(port);
} else if (HAS_PCH_IBX(dev_priv) || HAS_PCH_CPT(dev_priv)) {
if (port == PORT_A)
port_sel = PANEL_PORT_SELECT_DPA;
else
port_sel = PANEL_PORT_SELECT_DPD;
}
pp_on |= port_sel;
I915_WRITE(regs.pp_on, pp_on);
I915_WRITE(regs.pp_off, pp_off);
- if (IS_BROXTON(dev_priv))
+ if (IS_GEN9_LP(dev_priv))
I915_WRITE(regs.pp_ctrl, pp_div);
else
I915_WRITE(regs.pp_div, pp_div);
- DRM_DEBUG_KMS("panel power sequencer register settings: PP_ON %#x, PP_OFF %#x, PP_DIV %#x\n", I915_READ(regs.pp_on), I915_READ(regs.pp_off), IS_BROXTON(dev_priv) ? (I915_READ(regs.pp_ctrl) & BXT_POWER_CYCLE_DELAY_MASK) : I915_READ(regs.pp_div));
+ DRM_DEBUG_KMS("panel power sequencer register settings: PP_ON %#x, PP_OFF %#x, PP_DIV %#x\n", I915_READ(regs.pp_on), I915_READ(regs.pp_off), IS_GEN9_LP(dev_priv) ? (I915_READ(regs.pp_ctrl) & BXT_POWER_CYCLE_DELAY_MASK) : I915_READ(regs.pp_div));
}<sep>@@
expression e;
@@
- IS_BROXTON
+ IS_GEN9_LP
(e)
<|end_of_text|> | 8,721 |
--- initial
+++ final
@@ -1,29 +1,29 @@
int intel_power_domains_init(struct drm_i915_private *dev_priv) {
struct i915_power_domains *power_domains = &dev_priv->power_domains;
i915.disable_power_well = sanitize_disable_power_well_option(dev_priv, i915.disable_power_well);
dev_priv->csr.allowed_dc_mask = get_allowed_dc_mask(dev_priv, i915.enable_dc);
BUILD_BUG_ON(POWER_DOMAIN_NUM > 31);
mutex_init(&power_domains->lock);
/*
* The enabling order will be from lower to higher indexed wells,
* the disabling order is reversed.
*/
if (IS_HASWELL(dev_priv)) {
set_power_wells(power_domains, hsw_power_wells);
} else if (IS_BROADWELL(dev_priv)) {
set_power_wells(power_domains, bdw_power_wells);
} else if (IS_SKYLAKE(dev_priv) || IS_KABYLAKE(dev_priv)) {
set_power_wells(power_domains, skl_power_wells);
- } else if (IS_BROXTON(dev_priv)) {
+ } else if (IS_GEN9_LP(dev_priv)) {
set_power_wells(power_domains, bxt_power_wells);
} else if (IS_GEMINILAKE(dev_priv)) {
set_power_wells(power_domains, glk_power_wells);
} else if (IS_CHERRYVIEW(dev_priv)) {
set_power_wells(power_domains, chv_power_wells);
} else if (IS_VALLEYVIEW(dev_priv)) {
set_power_wells(power_domains, vlv_power_wells);
} else {
set_power_wells(power_domains, i9xx_always_on_power_well);
}
return 0;
}<sep>@@
expression e;
@@
- IS_BROXTON
+ IS_GEN9_LP
(e)
<|end_of_text|> | 8,757 |
--- initial
+++ final
@@ -1,38 +1,38 @@
void intel_init_clock_gating_hooks(struct drm_i915_private *dev_priv) {
if (IS_SKYLAKE(dev_priv))
dev_priv->display.init_clock_gating = skylake_init_clock_gating;
else if (IS_KABYLAKE(dev_priv))
dev_priv->display.init_clock_gating = kabylake_init_clock_gating;
- else if (IS_BROXTON(dev_priv))
+ else if (IS_GEN9_LP(dev_priv))
dev_priv->display.init_clock_gating = bxt_init_clock_gating;
else if (IS_BROADWELL(dev_priv))
dev_priv->display.init_clock_gating = broadwell_init_clock_gating;
else if (IS_CHERRYVIEW(dev_priv))
dev_priv->display.init_clock_gating = cherryview_init_clock_gating;
else if (IS_HASWELL(dev_priv))
dev_priv->display.init_clock_gating = haswell_init_clock_gating;
else if (IS_IVYBRIDGE(dev_priv))
dev_priv->display.init_clock_gating = ivybridge_init_clock_gating;
else if (IS_VALLEYVIEW(dev_priv))
dev_priv->display.init_clock_gating = valleyview_init_clock_gating;
else if (IS_GEN6(dev_priv))
dev_priv->display.init_clock_gating = gen6_init_clock_gating;
else if (IS_GEN5(dev_priv))
dev_priv->display.init_clock_gating = ironlake_init_clock_gating;
else if (IS_G4X(dev_priv))
dev_priv->display.init_clock_gating = g4x_init_clock_gating;
else if (IS_CRESTLINE(dev_priv))
dev_priv->display.init_clock_gating = crestline_init_clock_gating;
else if (IS_BROADWATER(dev_priv))
dev_priv->display.init_clock_gating = broadwater_init_clock_gating;
else if (IS_GEN3(dev_priv))
dev_priv->display.init_clock_gating = gen3_init_clock_gating;
else if (IS_I85X(dev_priv) || IS_I865G(dev_priv))
dev_priv->display.init_clock_gating = i85x_init_clock_gating;
else if (IS_GEN2(dev_priv))
dev_priv->display.init_clock_gating = i830_init_clock_gating;
else {
MISSING_CASE(INTEL_DEVID(dev_priv));
dev_priv->display.init_clock_gating = nop_init_clock_gating;
}
}<sep>@@
expression e;
@@
- IS_BROXTON
+ IS_GEN9_LP
(e)
<|end_of_text|> | 8,752 |
--- initial
+++ final
@@ -1,27 +1,27 @@
static void gen8_de_irq_postinstall(struct drm_i915_private *dev_priv) {
uint32_t de_pipe_masked = GEN8_PIPE_CDCLK_CRC_DONE;
uint32_t de_pipe_enables;
u32 de_port_masked = GEN8_AUX_CHANNEL_A;
u32 de_port_enables;
u32 de_misc_masked = GEN8_DE_MISC_GSE;
enum pipe pipe;
if (INTEL_INFO(dev_priv)->gen >= 9) {
de_pipe_masked |= GEN9_PIPE_PLANE1_FLIP_DONE | GEN9_DE_PIPE_IRQ_FAULT_ERRORS;
de_port_masked |= GEN9_AUX_CHANNEL_B | GEN9_AUX_CHANNEL_C | GEN9_AUX_CHANNEL_D;
- if (IS_BROXTON(dev_priv)) de_port_masked |= BXT_DE_PORT_GMBUS;
+ if (IS_GEN9_LP(dev_priv)) de_port_masked |= BXT_DE_PORT_GMBUS;
} else {
de_pipe_masked |= GEN8_PIPE_PRIMARY_FLIP_DONE | GEN8_DE_PIPE_IRQ_FAULT_ERRORS;
}
de_pipe_enables = de_pipe_masked | GEN8_PIPE_VBLANK | GEN8_PIPE_FIFO_UNDERRUN;
de_port_enables = de_port_masked;
- if (IS_BROXTON(dev_priv))
+ if (IS_GEN9_LP(dev_priv))
de_port_enables |= BXT_DE_PORT_HOTPLUG_MASK;
else if (IS_BROADWELL(dev_priv))
de_port_enables |= GEN8_PORT_DP_A_HOTPLUG;
dev_priv->de_irq_mask[PIPE_A] = ~de_pipe_masked;
dev_priv->de_irq_mask[PIPE_B] = ~de_pipe_masked;
dev_priv->de_irq_mask[PIPE_C] = ~de_pipe_masked;
for_each_pipe(dev_priv, pipe) if (intel_display_power_is_enabled(dev_priv, POWER_DOMAIN_PIPE(pipe))) GEN8_IRQ_INIT_NDX(DE_PIPE, pipe, dev_priv->de_irq_mask[pipe], de_pipe_enables);
GEN5_IRQ_INIT(GEN8_DE_PORT_, ~de_port_masked, de_port_enables);
GEN5_IRQ_INIT(GEN8_DE_MISC_, ~de_misc_masked, de_misc_masked);
}<sep>@@
expression e;
@@
- IS_BROXTON
+ IS_GEN9_LP
(e)
<|end_of_text|> | 8,705 |
--- initial
+++ final
@@ -1,30 +1,30 @@
static void intel_dsi_port_enable(struct intel_encoder *encoder) {
struct drm_device *dev = encoder->base.dev;
struct drm_i915_private *dev_priv = to_i915(dev);
struct intel_crtc *intel_crtc = to_intel_crtc(encoder->base.crtc);
struct intel_dsi *intel_dsi = enc_to_intel_dsi(&encoder->base);
enum port port;
if (intel_dsi->dual_link == DSI_DUAL_LINK_FRONT_BACK) {
u32 temp;
temp = I915_READ(VLV_CHICKEN_3);
temp &= ~PIXEL_OVERLAP_CNT_MASK | intel_dsi->pixel_overlap << PIXEL_OVERLAP_CNT_SHIFT;
I915_WRITE(VLV_CHICKEN_3, temp);
}
for_each_dsi_port(port, intel_dsi->ports) {
- i915_reg_t port_ctrl = IS_BROXTON(dev_priv) ? BXT_MIPI_PORT_CTRL(port) : MIPI_PORT_CTRL(port);
+ i915_reg_t port_ctrl = IS_GEN9_LP(dev_priv) ? BXT_MIPI_PORT_CTRL(port) : MIPI_PORT_CTRL(port);
u32 temp;
temp = I915_READ(port_ctrl);
temp &= ~LANE_CONFIGURATION_MASK;
temp &= ~DUAL_LINK_MODE_MASK;
if (intel_dsi->ports == (BIT(PORT_A) | BIT(PORT_C))) {
temp |= (intel_dsi->dual_link - 1) << DUAL_LINK_MODE_SHIFT;
- if (IS_BROXTON(dev_priv))
+ if (IS_GEN9_LP(dev_priv))
temp |= LANE_CONFIGURATION_DUAL_LINK_A;
else
temp |= intel_crtc->pipe ? LANE_CONFIGURATION_DUAL_LINK_B : LANE_CONFIGURATION_DUAL_LINK_A;
}
/* assert ip_tg_enable signal */
I915_WRITE(port_ctrl, temp | DPI_ENABLE);
POSTING_READ(port_ctrl);
}
}<sep>@@
expression e;
@@
- IS_BROXTON
+ IS_GEN9_LP
(e)
<|end_of_text|> | 8,736 |
--- initial
+++ final
@@ -1,9 +1,9 @@
void intel_ddi_clock_get(struct intel_encoder *encoder, struct intel_crtc_state *pipe_config) {
struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
if (INTEL_GEN(dev_priv) <= 8)
hsw_ddi_clock_get(encoder, pipe_config);
else if (IS_SKYLAKE(dev_priv) || IS_KABYLAKE(dev_priv))
skl_ddi_clock_get(encoder, pipe_config);
- else if (IS_BROXTON(dev_priv))
+ else if (IS_GEN9_LP(dev_priv))
bxt_ddi_clock_get(encoder, pipe_config);
}<sep>@@
expression e;
@@
- IS_BROXTON
+ IS_GEN9_LP
(e)
<|end_of_text|> | 8,709 |
--- initial
+++ final
@@ -1,30 +1,30 @@
static bool intel_dsi_compute_config(struct intel_encoder *encoder, struct intel_crtc_state *pipe_config, struct drm_connector_state *conn_state) {
struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
struct intel_dsi *intel_dsi = container_of(encoder, struct intel_dsi, base);
struct intel_connector *intel_connector = intel_dsi->attached_connector;
struct intel_crtc *crtc = to_intel_crtc(pipe_config->base.crtc);
const struct drm_display_mode *fixed_mode = intel_connector->panel.fixed_mode;
struct drm_display_mode *adjusted_mode = &pipe_config->base.adjusted_mode;
int ret;
DRM_DEBUG_KMS("\n");
if (fixed_mode) {
intel_fixed_panel_mode(fixed_mode, adjusted_mode);
if (HAS_GMCH_DISPLAY(dev_priv))
intel_gmch_panel_fitting(crtc, pipe_config, intel_connector->panel.fitting_mode);
else
intel_pch_panel_fitting(crtc, pipe_config, intel_connector->panel.fitting_mode);
}
/* DSI uses short packets for sync events, so clear mode flags for DSI */
adjusted_mode->flags = 0;
- if (IS_BROXTON(dev_priv)) {
+ if (IS_GEN9_LP(dev_priv)) {
/* Dual link goes to DSI transcoder A. */
if (intel_dsi->ports == BIT(PORT_C))
pipe_config->cpu_transcoder = TRANSCODER_DSI_C;
else
pipe_config->cpu_transcoder = TRANSCODER_DSI_A;
}
ret = intel_compute_dsi_pll(encoder, pipe_config);
if (ret) return false;
pipe_config->clock_set = true;
return true;
}<sep>@@
expression e;
@@
- IS_BROXTON
+ IS_GEN9_LP
(e)
<|end_of_text|> | 8,730 |
--- initial
+++ final
@@ -1,34 +1,34 @@
static u8 intel_hdmi_ddc_pin(struct drm_i915_private *dev_priv, enum port port) {
const struct ddi_vbt_port_info *info = &dev_priv->vbt.ddi_port_info[port];
u8 ddc_pin;
if (info->alternate_ddc_pin) {
DRM_DEBUG_KMS("Using DDC pin 0x%x for port %c (VBT)\n", info->alternate_ddc_pin, port_name(port));
return info->alternate_ddc_pin;
}
switch (port) {
case PORT_B:
- if (IS_BROXTON(dev_priv))
+ if (IS_GEN9_LP(dev_priv))
ddc_pin = GMBUS_PIN_1_BXT;
else
ddc_pin = GMBUS_PIN_DPB;
break;
case PORT_C:
- if (IS_BROXTON(dev_priv))
+ if (IS_GEN9_LP(dev_priv))
ddc_pin = GMBUS_PIN_2_BXT;
else
ddc_pin = GMBUS_PIN_DPC;
break;
case PORT_D:
if (IS_CHERRYVIEW(dev_priv))
ddc_pin = GMBUS_PIN_DPD_CHV;
else
ddc_pin = GMBUS_PIN_DPD;
break;
default:
MISSING_CASE(port);
ddc_pin = GMBUS_PIN_DPB;
break;
}
DRM_DEBUG_KMS("Using DDC pin 0x%x for port %c (platform default)\n", ddc_pin, port_name(port));
return ddc_pin;
}<sep>@@
expression e;
@@
- IS_BROXTON
+ IS_GEN9_LP
(e)
<|end_of_text|> | 8,746 |
--- initial
+++ final
@@ -1,35 +1,35 @@
static int gen8_gmch_probe(struct i915_ggtt *ggtt) {
struct drm_i915_private *dev_priv = ggtt->base.i915;
struct pci_dev *pdev = dev_priv->drm.pdev;
unsigned int size;
u16 snb_gmch_ctl;
/* TODO: We're not aware of mappable constraints on gen8 yet */
ggtt->mappable_base = pci_resource_start(pdev, 2);
ggtt->mappable_end = pci_resource_len(pdev, 2);
if (!pci_set_dma_mask(pdev, DMA_BIT_MASK(39))) pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(39));
pci_read_config_word(pdev, SNB_GMCH_CTRL, &snb_gmch_ctl);
if (INTEL_GEN(dev_priv) >= 9) {
ggtt->stolen_size = gen9_get_stolen_size(snb_gmch_ctl);
size = gen8_get_total_gtt_size(snb_gmch_ctl);
} else if (IS_CHERRYVIEW(dev_priv)) {
ggtt->stolen_size = chv_get_stolen_size(snb_gmch_ctl);
size = chv_get_total_gtt_size(snb_gmch_ctl);
} else {
ggtt->stolen_size = gen8_get_stolen_size(snb_gmch_ctl);
size = gen8_get_total_gtt_size(snb_gmch_ctl);
}
ggtt->base.total = (size / sizeof(gen8_pte_t)) << PAGE_SHIFT;
- if (IS_CHERRYVIEW(dev_priv) || IS_BROXTON(dev_priv))
+ if (IS_CHERRYVIEW(dev_priv) || IS_GEN9_LP(dev_priv))
chv_setup_private_ppat(dev_priv);
else
bdw_setup_private_ppat(dev_priv);
ggtt->base.cleanup = gen6_gmch_remove;
ggtt->base.bind_vma = ggtt_bind_vma;
ggtt->base.unbind_vma = ggtt_unbind_vma;
ggtt->base.insert_page = gen8_ggtt_insert_page;
ggtt->base.clear_range = nop_clear_range;
if (!USES_FULL_PPGTT(dev_priv) || intel_scanout_needs_vtd_wa(dev_priv)) ggtt->base.clear_range = gen8_ggtt_clear_range;
ggtt->base.insert_entries = gen8_ggtt_insert_entries;
if (IS_CHERRYVIEW(dev_priv)) ggtt->base.insert_entries = gen8_ggtt_insert_entries__BKL;
return ggtt_probe_common(ggtt, size);
}<sep>@@
expression e;
@@
- IS_BROXTON
+ IS_GEN9_LP
(e)
<|end_of_text|> | 8,699 |
--- initial
+++ final
@@ -1,32 +1,32 @@
struct intel_shared_dpll *intel_ddi_get_link_dpll(struct intel_dp *intel_dp, int clock) {
struct intel_connector *connector = intel_dp->attached_connector;
struct intel_encoder *encoder = connector->encoder;
struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
struct intel_shared_dpll *pll = NULL;
struct intel_shared_dpll_config tmp_pll_config;
enum intel_dpll_id dpll_id;
- if (IS_BROXTON(dev_priv)) {
+ if (IS_GEN9_LP(dev_priv)) {
dpll_id = (enum intel_dpll_id)dig_port->port;
/*
* Select the required PLL. This works for platforms where
* there is no shared DPLL.
*/
pll = &dev_priv->shared_dplls[dpll_id];
if (WARN_ON(pll->active_mask)) {
DRM_ERROR("Shared DPLL in use. active_mask:%x\n", pll->active_mask);
return NULL;
}
tmp_pll_config = pll->config;
if (!bxt_ddi_dp_set_dpll_hw_state(clock, &pll->config.hw_state)) {
DRM_ERROR("Could not setup DPLL\n");
pll->config = tmp_pll_config;
return NULL;
}
} else if (IS_SKYLAKE(dev_priv) || IS_KABYLAKE(dev_priv)) {
pll = skl_find_link_pll(dev_priv, clock);
} else if (IS_HASWELL(dev_priv) || IS_BROADWELL(dev_priv)) {
pll = hsw_ddi_dp_get_dpll(encoder, clock);
}
return pll;
}<sep>@@
expression e;
@@
- IS_BROXTON
+ IS_GEN9_LP
(e)
<|end_of_text|> | 8,713 |
--- initial
+++ final
@@ -1,10 +1,10 @@
static enum drm_mode_status hdmi_port_clock_valid(struct intel_hdmi *hdmi, int clock, bool respect_downstream_limits) {
struct drm_i915_private *dev_priv = to_i915(intel_hdmi_to_dev(hdmi));
if (clock < 25000) return MODE_CLOCK_LOW;
if (clock > hdmi_port_clock_limit(hdmi, respect_downstream_limits)) return MODE_CLOCK_HIGH;
/* BXT DPLL can't generate 223-240 MHz */
- if (IS_BROXTON(dev_priv) && clock > 223333 && clock < 240000) return MODE_CLOCK_RANGE;
+ if (IS_GEN9_LP(dev_priv) && clock > 223333 && clock < 240000) return MODE_CLOCK_RANGE;
/* CHV DPLL can't generate 216-240 MHz */
if (IS_CHERRYVIEW(dev_priv) && clock > 216000 && clock < 240000) return MODE_CLOCK_RANGE;
return MODE_OK;
}<sep>@@
expression e;
@@
- IS_BROXTON
+ IS_GEN9_LP
(e)
<|end_of_text|> | 8,745 |
--- initial
+++ final
@@ -1,100 +1,100 @@
static irqreturn_t gen8_de_irq_handler(struct drm_i915_private *dev_priv, u32 master_ctl) {
irqreturn_t ret = IRQ_NONE;
u32 iir;
enum pipe pipe;
if (master_ctl & GEN8_DE_MISC_IRQ) {
iir = I915_READ(GEN8_DE_MISC_IIR);
if (iir) {
I915_WRITE(GEN8_DE_MISC_IIR, iir);
ret = IRQ_HANDLED;
if (iir & GEN8_DE_MISC_GSE)
intel_opregion_asle_intr(dev_priv);
else
DRM_ERROR("Unexpected DE Misc interrupt\n");
} else
DRM_ERROR("The master control interrupt lied (DE MISC)!\n");
}
if (master_ctl & GEN8_DE_PORT_IRQ) {
iir = I915_READ(GEN8_DE_PORT_IIR);
if (iir) {
u32 tmp_mask;
bool found = false;
I915_WRITE(GEN8_DE_PORT_IIR, iir);
ret = IRQ_HANDLED;
tmp_mask = GEN8_AUX_CHANNEL_A;
if (INTEL_INFO(dev_priv)->gen >= 9) tmp_mask |= GEN9_AUX_CHANNEL_B | GEN9_AUX_CHANNEL_C | GEN9_AUX_CHANNEL_D;
if (iir & tmp_mask) {
dp_aux_irq_handler(dev_priv);
found = true;
}
- if (IS_BROXTON(dev_priv)) {
+ if (IS_GEN9_LP(dev_priv)) {
tmp_mask = iir & BXT_DE_PORT_HOTPLUG_MASK;
if (tmp_mask) {
bxt_hpd_irq_handler(dev_priv, tmp_mask, hpd_bxt);
found = true;
}
} else if (IS_BROADWELL(dev_priv)) {
tmp_mask = iir & GEN8_PORT_DP_A_HOTPLUG;
if (tmp_mask) {
ilk_hpd_irq_handler(dev_priv, tmp_mask, hpd_bdw);
found = true;
}
}
- if (IS_BROXTON(dev_priv) && (iir & BXT_DE_PORT_GMBUS)) {
+ if (IS_GEN9_LP(dev_priv) && (iir & BXT_DE_PORT_GMBUS)) {
gmbus_irq_handler(dev_priv);
found = true;
}
if (!found) DRM_ERROR("Unexpected DE Port interrupt\n");
} else
DRM_ERROR("The master control interrupt lied (DE PORT)!\n");
}
for_each_pipe(dev_priv, pipe) {
u32 flip_done, fault_errors;
if (!(master_ctl & GEN8_DE_PIPE_IRQ(pipe))) continue;
iir = I915_READ(GEN8_DE_PIPE_IIR(pipe));
if (!iir) {
DRM_ERROR("The master control interrupt lied (DE PIPE)!\n");
continue;
}
ret = IRQ_HANDLED;
I915_WRITE(GEN8_DE_PIPE_IIR(pipe), iir);
if (iir & GEN8_PIPE_VBLANK && intel_pipe_handle_vblank(dev_priv, pipe)) intel_check_page_flip(dev_priv, pipe);
flip_done = iir;
if (INTEL_INFO(dev_priv)->gen >= 9)
flip_done &= GEN9_PIPE_PLANE1_FLIP_DONE;
else
flip_done &= GEN8_PIPE_PRIMARY_FLIP_DONE;
if (flip_done) intel_finish_page_flip_cs(dev_priv, pipe);
if (iir & GEN8_PIPE_CDCLK_CRC_DONE) hsw_pipe_crc_irq_handler(dev_priv, pipe);
if (iir & GEN8_PIPE_FIFO_UNDERRUN) intel_cpu_fifo_underrun_irq_handler(dev_priv, pipe);
fault_errors = iir;
if (INTEL_INFO(dev_priv)->gen >= 9)
fault_errors &= GEN9_DE_PIPE_IRQ_FAULT_ERRORS;
else
fault_errors &= GEN8_DE_PIPE_IRQ_FAULT_ERRORS;
if (fault_errors) DRM_ERROR("Fault errors on pipe %c: 0x%08x\n", pipe_name(pipe), fault_errors);
}
if (HAS_PCH_SPLIT(dev_priv) && !HAS_PCH_NOP(dev_priv) && master_ctl & GEN8_DE_PCH_IRQ) {
/*
* FIXME(BDW): Assume for now that the new interrupt handling
* scheme also closed the SDE interrupt handling race we've seen
* on older pch-split platforms. But this needs testing.
*/
iir = I915_READ(SDEIIR);
if (iir) {
I915_WRITE(SDEIIR, iir);
ret = IRQ_HANDLED;
if (HAS_PCH_SPT(dev_priv) || HAS_PCH_KBP(dev_priv))
spt_irq_handler(dev_priv, iir);
else
cpt_irq_handler(dev_priv, iir);
} else {
/*
* Like on previous PCH there seems to be something
* fishy going on with forwarding PCH interrupts.
*/
DRM_DEBUG_DRIVER("The master control interrupt lied (SDE)!\n");
}
}
return ret;
}<sep>@@
expression e;
@@
- IS_BROXTON
+ IS_GEN9_LP
(e)
<|end_of_text|> | 8,704 |
--- initial
+++ final
@@ -1,41 +1,41 @@
void i915_gem_restore_gtt_mappings(struct drm_i915_private *dev_priv) {
struct i915_ggtt *ggtt = &dev_priv->ggtt;
struct drm_i915_gem_object *obj, *on;
i915_check_and_clear_faults(dev_priv);
/* First fill our portion of the GTT with scratch pages */
ggtt->base.clear_range(&ggtt->base, ggtt->base.start, ggtt->base.total);
ggtt->base.closed = true; /* skip rewriting PTE on VMA unbind */
/* clflush objects bound into the GGTT and rebind them. */
list_for_each_entry_safe(obj, on, &dev_priv->mm.bound_list, global_link) {
bool ggtt_bound = false;
struct i915_vma *vma;
list_for_each_entry(vma, &obj->vma_list, obj_link) {
if (vma->vm != &ggtt->base) continue;
if (!i915_vma_unbind(vma)) continue;
WARN_ON(i915_vma_bind(vma, obj->cache_level, PIN_UPDATE));
ggtt_bound = true;
}
if (ggtt_bound) WARN_ON(i915_gem_object_set_to_gtt_domain(obj, false));
}
ggtt->base.closed = false;
if (INTEL_GEN(dev_priv) >= 8) {
- if (IS_CHERRYVIEW(dev_priv) || IS_BROXTON(dev_priv))
+ if (IS_CHERRYVIEW(dev_priv) || IS_GEN9_LP(dev_priv))
chv_setup_private_ppat(dev_priv);
else
bdw_setup_private_ppat(dev_priv);
return;
}
if (USES_PPGTT(dev_priv)) {
struct i915_address_space *vm;
list_for_each_entry(vm, &dev_priv->vm_list, global_link) {
/* TODO: Perhaps it shouldn't be gen6 specific */
struct i915_hw_ppgtt *ppgtt;
if (i915_is_ggtt(vm))
ppgtt = dev_priv->mm.aliasing_ppgtt;
else
ppgtt = i915_vm_to_ppgtt(vm);
gen6_write_page_range(dev_priv, &ppgtt->pd, 0, ppgtt->base.total);
}
}
i915_ggtt_flush(dev_priv);
}<sep>@@
expression e;
@@
- IS_BROXTON
+ IS_GEN9_LP
(e)
<|end_of_text|> | 8,702 |
--- initial
+++ final
@@ -1,28 +1,28 @@
static void intel_dsi_clear_device_ready(struct intel_encoder *encoder) {
struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
struct intel_dsi *intel_dsi = enc_to_intel_dsi(&encoder->base);
enum port port;
DRM_DEBUG_KMS("\n");
for_each_dsi_port(port, intel_dsi->ports) {
/* Common bit for both MIPI Port A & MIPI Port C on VLV/CHV */
- i915_reg_t port_ctrl = IS_BROXTON(dev_priv) ? BXT_MIPI_PORT_CTRL(port) : MIPI_PORT_CTRL(PORT_A);
+ i915_reg_t port_ctrl = IS_GEN9_LP(dev_priv) ? BXT_MIPI_PORT_CTRL(port) : MIPI_PORT_CTRL(PORT_A);
u32 val;
I915_WRITE(MIPI_DEVICE_READY(port), DEVICE_READY | ULPS_STATE_ENTER);
usleep_range(2000, 2500);
I915_WRITE(MIPI_DEVICE_READY(port), DEVICE_READY | ULPS_STATE_EXIT);
usleep_range(2000, 2500);
I915_WRITE(MIPI_DEVICE_READY(port), DEVICE_READY | ULPS_STATE_ENTER);
usleep_range(2000, 2500);
/* Wait till Clock lanes are in LP-00 state for MIPI Port A
* only. MIPI Port C has no similar bit for checking
*/
if (intel_wait_for_register(dev_priv, port_ctrl, AFE_LATCHOUT, 0, 30)) DRM_ERROR("DSI LP not going Low\n");
/* Disable MIPI PHY transparent latch */
val = I915_READ(port_ctrl);
I915_WRITE(port_ctrl, val & ~LP_OUTPUT_HOLD);
usleep_range(1000, 1500);
I915_WRITE(MIPI_DEVICE_READY(port), 0x00);
usleep_range(2000, 2500);
}
intel_disable_dsi_pll(encoder);
}<sep>@@
expression e;
@@
- IS_BROXTON
+ IS_GEN9_LP
(e)
<|end_of_text|> | 8,729 |
--- initial
+++ final
@@ -1,31 +1,31 @@
static int ggtt_probe_common(struct i915_ggtt *ggtt, u64 size) {
struct drm_i915_private *dev_priv = ggtt->base.i915;
struct pci_dev *pdev = dev_priv->drm.pdev;
phys_addr_t phys_addr;
int ret;
/* For Modern GENs the PTEs and register space are split in the BAR */
phys_addr = pci_resource_start(pdev, 0) + pci_resource_len(pdev, 0) / 2;
/*
* On BXT writes larger than 64 bit to the GTT pagetable range will be
* dropped. For WC mappings in general we have 64 byte burst writes
* when the WC buffer is flushed, so we can't use it, but have to
* resort to an uncached mapping. The WC issue is easily caught by the
* readback check when writing GTT PTE entries.
*/
- if (IS_BROXTON(dev_priv))
+ if (IS_GEN9_LP(dev_priv))
ggtt->gsm = ioremap_nocache(phys_addr, size);
else
ggtt->gsm = ioremap_wc(phys_addr, size);
if (!ggtt->gsm) {
DRM_ERROR("Failed to map the ggtt page table\n");
return -ENOMEM;
}
ret = setup_scratch_page(dev_priv, &ggtt->base.scratch_page, GFP_DMA32);
if (ret) {
DRM_ERROR("Scratch setup failed\n");
/* iounmap will also get called at remove, but meh */
iounmap(ggtt->gsm);
return ret;
}
return 0;
}<sep>@@
expression e;
@@
- IS_BROXTON
+ IS_GEN9_LP
(e)
<|end_of_text|> | 8,700 |
--- initial
+++ final
@@ -1,7 +1,7 @@
void intel_enable_dsi_pll(struct intel_encoder *encoder, const struct intel_crtc_state *config) {
struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv))
vlv_enable_dsi_pll(encoder, config);
- else if (IS_BROXTON(dev_priv))
+ else if (IS_GEN9_LP(dev_priv))
bxt_enable_dsi_pll(encoder, config);
}<sep>@@
expression e;
@@
- IS_BROXTON
+ IS_GEN9_LP
(e)
<|end_of_text|> | 8,743 |
--- initial
+++ final
@@ -1,103 +1,103 @@
void intel_ddi_init(struct drm_i915_private *dev_priv, enum port port) {
struct intel_digital_port *intel_dig_port;
struct intel_encoder *intel_encoder;
struct drm_encoder *encoder;
bool init_hdmi, init_dp, init_lspcon = false;
int max_lanes;
if (I915_READ(DDI_BUF_CTL(PORT_A)) & DDI_A_4_LANES) {
switch (port) {
case PORT_A: max_lanes = 4; break;
case PORT_E: max_lanes = 0; break;
default: max_lanes = 4; break;
}
} else {
switch (port) {
case PORT_A: max_lanes = 2; break;
case PORT_E: max_lanes = 2; break;
default: max_lanes = 4; break;
}
}
init_hdmi = (dev_priv->vbt.ddi_port_info[port].supports_dvi || dev_priv->vbt.ddi_port_info[port].supports_hdmi);
init_dp = dev_priv->vbt.ddi_port_info[port].supports_dp;
if (intel_bios_is_lspcon_present(dev_priv, port)) {
/*
* Lspcon device needs to be driven with DP connector
* with special detection sequence. So make sure DP
* is initialized before lspcon.
*/
init_dp = true;
init_lspcon = true;
init_hdmi = false;
DRM_DEBUG_KMS("VBT says port %c has lspcon\n", port_name(port));
}
if (!init_dp && !init_hdmi) {
DRM_DEBUG_KMS("VBT says port %c is not DVI/HDMI/DP compatible, respect it\n", port_name(port));
return;
}
intel_dig_port = kzalloc(sizeof(*intel_dig_port), GFP_KERNEL);
if (!intel_dig_port) return;
intel_encoder = &intel_dig_port->base;
encoder = &intel_encoder->base;
drm_encoder_init(&dev_priv->drm, encoder, &intel_ddi_funcs, DRM_MODE_ENCODER_TMDS, "DDI %c", port_name(port));
intel_encoder->compute_config = intel_ddi_compute_config;
intel_encoder->enable = intel_enable_ddi;
- if (IS_BROXTON(dev_priv)) intel_encoder->pre_pll_enable = bxt_ddi_pre_pll_enable;
+ if (IS_GEN9_LP(dev_priv)) intel_encoder->pre_pll_enable = bxt_ddi_pre_pll_enable;
intel_encoder->pre_enable = intel_ddi_pre_enable;
intel_encoder->disable = intel_disable_ddi;
intel_encoder->post_disable = intel_ddi_post_disable;
intel_encoder->get_hw_state = intel_ddi_get_hw_state;
intel_encoder->get_config = intel_ddi_get_config;
intel_encoder->suspend = intel_dp_encoder_suspend;
intel_dig_port->port = port;
intel_dig_port->saved_port_bits = I915_READ(DDI_BUF_CTL(port)) & (DDI_BUF_PORT_REVERSAL | DDI_A_4_LANES);
/*
* Bspec says that DDI_A_4_LANES is the only supported configuration
* for Broxton. Yet some BIOS fail to set this bit on port A if eDP
* wasn't lit up at boot. Force this bit on in our internal
* configuration so that we use the proper lane count for our
* calculations.
*/
- if (IS_BROXTON(dev_priv) && port == PORT_A) {
+ if (IS_GEN9_LP(dev_priv) && port == PORT_A) {
if (!(intel_dig_port->saved_port_bits & DDI_A_4_LANES)) {
DRM_DEBUG_KMS("BXT BIOS forgot to set DDI_A_4_LANES for port A; fixing\n");
intel_dig_port->saved_port_bits |= DDI_A_4_LANES;
max_lanes = 4;
}
}
intel_dig_port->max_lanes = max_lanes;
intel_encoder->type = INTEL_OUTPUT_UNKNOWN;
intel_encoder->port = port;
intel_encoder->crtc_mask = (1 << 0) | (1 << 1) | (1 << 2);
intel_encoder->cloneable = 0;
if (init_dp) {
if (!intel_ddi_init_dp_connector(intel_dig_port)) goto err;
intel_dig_port->hpd_pulse = intel_dp_hpd_pulse;
/*
* On BXT A0/A1, sw needs to activate DDIA HPD logic and
* interrupts to check the external panel connection.
*/
if (IS_BXT_REVID(dev_priv, 0, BXT_REVID_A1) && port == PORT_B)
dev_priv->hotplug.irq_port[PORT_A] = intel_dig_port;
else
dev_priv->hotplug.irq_port[port] = intel_dig_port;
}
/* In theory we don't need the encoder->type check, but leave it just in
* case we have some really bad VBTs... */
if (intel_encoder->type != INTEL_OUTPUT_EDP && init_hdmi) {
if (!intel_ddi_init_hdmi_connector(intel_dig_port)) goto err;
}
if (init_lspcon) {
if (lspcon_init(intel_dig_port)) /* TODO: handle hdmi info frame part */
DRM_DEBUG_KMS("LSPCON init success on port %c\n", port_name(port));
else
/*
* LSPCON init faied, but DP init was success, so
* lets try to drive as DP++ port.
*/
DRM_ERROR("LSPCON init failed on port %c\n", port_name(port));
}
return;
err:
drm_encoder_cleanup(encoder);
kfree(intel_dig_port);
}<sep>@@
expression e;
@@
- IS_BROXTON
+ IS_GEN9_LP
(e)
<|end_of_text|> | 8,715 |
--- initial
+++ final
@@ -1,60 +1,60 @@
static void intel_panel_init_backlight_funcs(struct intel_panel *panel) {
struct intel_connector *connector = container_of(panel, struct intel_connector, panel);
struct drm_i915_private *dev_priv = to_i915(connector->base.dev);
if (connector->base.connector_type == DRM_MODE_CONNECTOR_eDP && intel_dp_aux_init_backlight_funcs(connector) == 0) return;
if (connector->base.connector_type == DRM_MODE_CONNECTOR_DSI && intel_dsi_dcs_init_backlight_funcs(connector) == 0) return;
- if (IS_BROXTON(dev_priv)) {
+ if (IS_GEN9_LP(dev_priv)) {
panel->backlight.setup = bxt_setup_backlight;
panel->backlight.enable = bxt_enable_backlight;
panel->backlight.disable = bxt_disable_backlight;
panel->backlight.set = bxt_set_backlight;
panel->backlight.get = bxt_get_backlight;
panel->backlight.hz_to_pwm = bxt_hz_to_pwm;
} else if (HAS_PCH_LPT(dev_priv) || HAS_PCH_SPT(dev_priv) || HAS_PCH_KBP(dev_priv)) {
panel->backlight.setup = lpt_setup_backlight;
panel->backlight.enable = lpt_enable_backlight;
panel->backlight.disable = lpt_disable_backlight;
panel->backlight.set = lpt_set_backlight;
panel->backlight.get = lpt_get_backlight;
if (HAS_PCH_LPT(dev_priv))
panel->backlight.hz_to_pwm = lpt_hz_to_pwm;
else
panel->backlight.hz_to_pwm = spt_hz_to_pwm;
} else if (HAS_PCH_SPLIT(dev_priv)) {
panel->backlight.setup = pch_setup_backlight;
panel->backlight.enable = pch_enable_backlight;
panel->backlight.disable = pch_disable_backlight;
panel->backlight.set = pch_set_backlight;
panel->backlight.get = pch_get_backlight;
panel->backlight.hz_to_pwm = pch_hz_to_pwm;
} else if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) {
if (connector->base.connector_type == DRM_MODE_CONNECTOR_DSI) {
panel->backlight.setup = pwm_setup_backlight;
panel->backlight.enable = pwm_enable_backlight;
panel->backlight.disable = pwm_disable_backlight;
panel->backlight.set = pwm_set_backlight;
panel->backlight.get = pwm_get_backlight;
} else {
panel->backlight.setup = vlv_setup_backlight;
panel->backlight.enable = vlv_enable_backlight;
panel->backlight.disable = vlv_disable_backlight;
panel->backlight.set = vlv_set_backlight;
panel->backlight.get = vlv_get_backlight;
panel->backlight.hz_to_pwm = vlv_hz_to_pwm;
}
} else if (IS_GEN4(dev_priv)) {
panel->backlight.setup = i965_setup_backlight;
panel->backlight.enable = i965_enable_backlight;
panel->backlight.disable = i965_disable_backlight;
panel->backlight.set = i9xx_set_backlight;
panel->backlight.get = i9xx_get_backlight;
panel->backlight.hz_to_pwm = i965_hz_to_pwm;
} else {
panel->backlight.setup = i9xx_setup_backlight;
panel->backlight.enable = i9xx_enable_backlight;
panel->backlight.disable = i9xx_disable_backlight;
panel->backlight.set = i9xx_set_backlight;
panel->backlight.get = i9xx_get_backlight;
panel->backlight.hz_to_pwm = i9xx_hz_to_pwm;
}
}<sep>@@
expression e;
@@
- IS_BROXTON
+ IS_GEN9_LP
(e)
<|end_of_text|> | 8,750 |
--- initial
+++ final
@@ -1,127 +1,127 @@
static void intel_dsi_prepare(struct intel_encoder *intel_encoder, struct intel_crtc_state *pipe_config) {
struct drm_encoder *encoder = &intel_encoder->base;
struct drm_device *dev = encoder->dev;
struct drm_i915_private *dev_priv = to_i915(dev);
struct intel_crtc *intel_crtc = to_intel_crtc(pipe_config->base.crtc);
struct intel_dsi *intel_dsi = enc_to_intel_dsi(encoder);
const struct drm_display_mode *adjusted_mode = &pipe_config->base.adjusted_mode;
enum port port;
unsigned int bpp = mipi_dsi_pixel_format_to_bpp(intel_dsi->pixel_format);
u32 val, tmp;
u16 mode_hdisplay;
DRM_DEBUG_KMS("pipe %c\n", pipe_name(intel_crtc->pipe));
mode_hdisplay = adjusted_mode->crtc_hdisplay;
if (intel_dsi->dual_link) {
mode_hdisplay /= 2;
if (intel_dsi->dual_link == DSI_DUAL_LINK_FRONT_BACK) mode_hdisplay += intel_dsi->pixel_overlap;
}
for_each_dsi_port(port, intel_dsi->ports) {
if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) {
/*
* escape clock divider, 20MHz, shared for A and C.
* device ready must be off when doing this! txclkesc?
*/
tmp = I915_READ(MIPI_CTRL(PORT_A));
tmp &= ~ESCAPE_CLOCK_DIVIDER_MASK;
I915_WRITE(MIPI_CTRL(PORT_A), tmp | ESCAPE_CLOCK_DIVIDER_1);
/* read request priority is per pipe */
tmp = I915_READ(MIPI_CTRL(port));
tmp &= ~READ_REQUEST_PRIORITY_MASK;
I915_WRITE(MIPI_CTRL(port), tmp | READ_REQUEST_PRIORITY_HIGH);
- } else if (IS_BROXTON(dev_priv)) {
+ } else if (IS_GEN9_LP(dev_priv)) {
enum pipe pipe = intel_crtc->pipe;
tmp = I915_READ(MIPI_CTRL(port));
tmp &= ~BXT_PIPE_SELECT_MASK;
tmp |= BXT_PIPE_SELECT(pipe);
I915_WRITE(MIPI_CTRL(port), tmp);
}
/* XXX: why here, why like this? handling in irq handler?! */
I915_WRITE(MIPI_INTR_STAT(port), 0xffffffff);
I915_WRITE(MIPI_INTR_EN(port), 0xffffffff);
I915_WRITE(MIPI_DPHY_PARAM(port), intel_dsi->dphy_reg);
I915_WRITE(MIPI_DPI_RESOLUTION(port), adjusted_mode->crtc_vdisplay << VERTICAL_ADDRESS_SHIFT | mode_hdisplay << HORIZONTAL_ADDRESS_SHIFT);
}
set_dsi_timings(encoder, adjusted_mode);
val = intel_dsi->lane_count << DATA_LANES_PRG_REG_SHIFT;
if (is_cmd_mode(intel_dsi)) {
val |= intel_dsi->channel << CMD_MODE_CHANNEL_NUMBER_SHIFT;
val |= CMD_MODE_DATA_WIDTH_8_BIT; /* XXX */
} else {
val |= intel_dsi->channel << VID_MODE_CHANNEL_NUMBER_SHIFT;
val |= pixel_format_to_reg(intel_dsi->pixel_format);
}
tmp = 0;
if (intel_dsi->eotp_pkt == 0) tmp |= EOT_DISABLE;
if (intel_dsi->clock_stop) tmp |= CLOCKSTOP;
- if (IS_BROXTON(dev_priv)) {
+ if (IS_GEN9_LP(dev_priv)) {
tmp |= BXT_DPHY_DEFEATURE_EN;
if (!is_cmd_mode(intel_dsi)) tmp |= BXT_DEFEATURE_DPI_FIFO_CTR;
}
for_each_dsi_port(port, intel_dsi->ports) {
I915_WRITE(MIPI_DSI_FUNC_PRG(port), val);
/* timeouts for recovery. one frame IIUC. if counter expires,
* EOT and stop state. */
/*
* In burst mode, value greater than one DPI line Time in byte
* clock (txbyteclkhs) To timeout this timer 1+ of the above
* said value is recommended.
*
* In non-burst mode, Value greater than one DPI frame time in
* byte clock(txbyteclkhs) To timeout this timer 1+ of the above
* said value is recommended.
*
* In DBI only mode, value greater than one DBI frame time in
* byte clock(txbyteclkhs) To timeout this timer 1+ of the above
* said value is recommended.
*/
if (is_vid_mode(intel_dsi) && intel_dsi->video_mode_format == VIDEO_MODE_BURST) {
I915_WRITE(MIPI_HS_TX_TIMEOUT(port), txbyteclkhs(adjusted_mode->crtc_htotal, bpp, intel_dsi->lane_count, intel_dsi->burst_mode_ratio) + 1);
} else {
I915_WRITE(MIPI_HS_TX_TIMEOUT(port), txbyteclkhs(adjusted_mode->crtc_vtotal * adjusted_mode->crtc_htotal, bpp, intel_dsi->lane_count, intel_dsi->burst_mode_ratio) + 1);
}
I915_WRITE(MIPI_LP_RX_TIMEOUT(port), intel_dsi->lp_rx_timeout);
I915_WRITE(MIPI_TURN_AROUND_TIMEOUT(port), intel_dsi->turn_arnd_val);
I915_WRITE(MIPI_DEVICE_RESET_TIMER(port), intel_dsi->rst_timer_val);
/* dphy stuff */
/* in terms of low power clock */
I915_WRITE(MIPI_INIT_COUNT(port), txclkesc(intel_dsi->escape_clk_div, 100));
- if (IS_BROXTON(dev_priv) && (!intel_dsi->dual_link)) {
+ if (IS_GEN9_LP(dev_priv) && (!intel_dsi->dual_link)) {
/*
* BXT spec says write MIPI_INIT_COUNT for
* both the ports, even if only one is
* getting used. So write the other port
* if not in dual link mode.
*/
I915_WRITE(MIPI_INIT_COUNT(port == PORT_A ? PORT_C : PORT_A), intel_dsi->init_count);
}
/* recovery disables */
I915_WRITE(MIPI_EOT_DISABLE(port), tmp);
/* in terms of low power clock */
I915_WRITE(MIPI_INIT_COUNT(port), intel_dsi->init_count);
/* in terms of txbyteclkhs. actual high to low switch +
* MIPI_STOP_STATE_STALL * MIPI_LP_BYTECLK.
*
* XXX: write MIPI_STOP_STATE_STALL?
*/
I915_WRITE(MIPI_HIGH_LOW_SWITCH_COUNT(port), intel_dsi->hs_to_lp_count);
/* XXX: low power clock equivalence in terms of byte clock.
* the number of byte clocks occupied in one low power clock.
* based on txbyteclkhs and txclkesc.
* txclkesc time / txbyteclk time * (105 + MIPI_STOP_STATE_STALL
* ) / 105.???
*/
I915_WRITE(MIPI_LP_BYTECLK(port), intel_dsi->lp_byte_clk);
/* the bw essential for transmitting 16 long packets containing
* 252 bytes meant for dcs write memory command is programmed in
* this register in terms of byte clocks. based on dsi transfer
* rate and the number of lanes configured the time taken to
* transmit 16 long packets in a dsi stream varies. */
I915_WRITE(MIPI_DBI_BW_CTRL(port), intel_dsi->bw_timer);
I915_WRITE(MIPI_CLK_LANE_SWITCH_TIME_CNT(port), intel_dsi->clk_lp_to_hs_count << LP_HS_SSW_CNT_SHIFT | intel_dsi->clk_hs_to_lp_count << HS_LP_PWR_SW_CNT_SHIFT);
if (is_vid_mode(intel_dsi))
/* Some panels might have resolution which is not a
* multiple of 64 like 1366 x 768. Enable RANDOM
* resolution support for such panels by default */
I915_WRITE(MIPI_VIDEO_MODE_FORMAT(port), intel_dsi->video_frmt_cfg_bits | intel_dsi->video_mode_format | IP_TG_CONFIG | RANDOM_DPI_DISPLAY_RESOLUTION);
}
}<sep>@@
expression e;
@@
- IS_BROXTON
+ IS_GEN9_LP
(e)
<|end_of_text|> | 8,737 |
--- initial
+++ final
@@ -1,32 +1,32 @@
static void gen6_init_rps_frequencies(struct drm_i915_private *dev_priv) {
/* All of these values are in units of 50MHz */
/* static values from HW: RP0 > RP1 > RPn (min_freq) */
- if (IS_BROXTON(dev_priv)) {
+ if (IS_GEN9_LP(dev_priv)) {
u32 rp_state_cap = I915_READ(BXT_RP_STATE_CAP);
dev_priv->rps.rp0_freq = (rp_state_cap >> 16) & 0xff;
dev_priv->rps.rp1_freq = (rp_state_cap >> 8) & 0xff;
dev_priv->rps.min_freq = (rp_state_cap >> 0) & 0xff;
} else {
u32 rp_state_cap = I915_READ(GEN6_RP_STATE_CAP);
dev_priv->rps.rp0_freq = (rp_state_cap >> 0) & 0xff;
dev_priv->rps.rp1_freq = (rp_state_cap >> 8) & 0xff;
dev_priv->rps.min_freq = (rp_state_cap >> 16) & 0xff;
}
/* hw_max = RP0 until we check for overclocking */
dev_priv->rps.max_freq = dev_priv->rps.rp0_freq;
dev_priv->rps.efficient_freq = dev_priv->rps.rp1_freq;
if (IS_HASWELL(dev_priv) || IS_BROADWELL(dev_priv) || IS_SKYLAKE(dev_priv) || IS_KABYLAKE(dev_priv)) {
u32 ddcc_status = 0;
if (sandybridge_pcode_read(dev_priv, HSW_PCODE_DYNAMIC_DUTY_CYCLE_CONTROL, &ddcc_status) == 0) dev_priv->rps.efficient_freq = clamp_t(u8, ((ddcc_status >> 8) & 0xff), dev_priv->rps.min_freq, dev_priv->rps.max_freq);
}
if (IS_SKYLAKE(dev_priv) || IS_KABYLAKE(dev_priv)) {
/* Store the frequency values in 16.66 MHZ units, which is
* the natural hardware unit for SKL
*/
dev_priv->rps.rp0_freq *= GEN9_FREQ_SCALER;
dev_priv->rps.rp1_freq *= GEN9_FREQ_SCALER;
dev_priv->rps.min_freq *= GEN9_FREQ_SCALER;
dev_priv->rps.max_freq *= GEN9_FREQ_SCALER;
dev_priv->rps.efficient_freq *= GEN9_FREQ_SCALER;
}
}<sep>@@
expression e;
@@
- IS_BROXTON
+ IS_GEN9_LP
(e)
<|end_of_text|> | 8,751 |
--- initial
+++ final
@@ -1,28 +1,28 @@
static void intel_pps_readout_hw_state(struct drm_i915_private *dev_priv, struct intel_dp *intel_dp, struct edp_power_seq *seq) {
u32 pp_on, pp_off, pp_div = 0, pp_ctl = 0;
struct pps_registers regs;
intel_pps_get_registers(dev_priv, intel_dp, ®s);
/* Workaround: Need to write PP_CONTROL with the unlock key as
* the very first thing. */
pp_ctl = ironlake_get_pp_control(intel_dp);
pp_on = I915_READ(regs.pp_on);
pp_off = I915_READ(regs.pp_off);
- if (!IS_BROXTON(dev_priv)) {
+ if (!IS_GEN9_LP(dev_priv)) {
I915_WRITE(regs.pp_ctrl, pp_ctl);
pp_div = I915_READ(regs.pp_div);
}
/* Pull timing values out of registers */
seq->t1_t3 = (pp_on & PANEL_POWER_UP_DELAY_MASK) >> PANEL_POWER_UP_DELAY_SHIFT;
seq->t8 = (pp_on & PANEL_LIGHT_ON_DELAY_MASK) >> PANEL_LIGHT_ON_DELAY_SHIFT;
seq->t9 = (pp_off & PANEL_LIGHT_OFF_DELAY_MASK) >> PANEL_LIGHT_OFF_DELAY_SHIFT;
seq->t10 = (pp_off & PANEL_POWER_DOWN_DELAY_MASK) >> PANEL_POWER_DOWN_DELAY_SHIFT;
- if (IS_BROXTON(dev_priv)) {
+ if (IS_GEN9_LP(dev_priv)) {
u16 tmp = (pp_ctl & BXT_POWER_CYCLE_DELAY_MASK) >> BXT_POWER_CYCLE_DELAY_SHIFT;
if (tmp > 0)
seq->t11_t12 = (tmp - 1) * 1000;
else
seq->t11_t12 = 0;
} else {
seq->t11_t12 = ((pp_div & PANEL_POWER_CYCLE_DELAY_MASK) >> PANEL_POWER_CYCLE_DELAY_SHIFT) * 1000;
}
}<sep>@@
expression e;
@@
- IS_BROXTON
+ IS_GEN9_LP
(e)
<|end_of_text|> | 8,727 |
--- initial
+++ final
@@ -1,13 +1,13 @@
static void intel_pps_get_registers(struct drm_i915_private *dev_priv, struct intel_dp *intel_dp, struct pps_registers *regs) {
int pps_idx = 0;
memset(regs, 0, sizeof(*regs));
- if (IS_BROXTON(dev_priv))
+ if (IS_GEN9_LP(dev_priv))
pps_idx = bxt_power_sequencer_idx(intel_dp);
else if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv))
pps_idx = vlv_power_sequencer_pipe(intel_dp);
regs->pp_ctrl = PP_CONTROL(pps_idx);
regs->pp_stat = PP_STATUS(pps_idx);
regs->pp_on = PP_ON_DELAYS(pps_idx);
regs->pp_off = PP_OFF_DELAYS(pps_idx);
- if (!IS_BROXTON(dev_priv)) regs->pp_div = PP_DIVISOR(pps_idx);
+ if (!IS_GEN9_LP(dev_priv)) regs->pp_div = PP_DIVISOR(pps_idx);
}<sep>@@
expression e;
@@
- IS_BROXTON
+ IS_GEN9_LP
(e)
<|end_of_text|> | 8,726 |
--- initial
+++ final
@@ -1,53 +1,53 @@
bool rtl_hal_pwrseqcmdparsing(struct rtl_priv *rtlpriv, u8 cut_version, u8 faversion, u8 interface_type, struct wlan_pwr_cfg pwrcfgcmd[]) {
struct wlan_pwr_cfg cfg_cmd = {0};
bool polling_bit = false;
u32 ary_idx = 0;
u8 value = 0;
u32 offset = 0;
u32 polling_count = 0;
u32 max_polling_cnt = 5000;
do {
cfg_cmd = pwrcfgcmd[ary_idx];
RT_TRACE(rtlpriv, COMP_INIT, DBG_TRACE, "rtl_hal_pwrseqcmdparsing(): offset(%#x),cut_msk(%#x), famsk(%#x), interface_msk(%#x), base(%#x), cmd(%#x), msk(%#x), value(%#x)\n", GET_PWR_CFG_OFFSET(cfg_cmd), GET_PWR_CFG_CUT_MASK(cfg_cmd), GET_PWR_CFG_FAB_MASK(cfg_cmd), GET_PWR_CFG_INTF_MASK(cfg_cmd), GET_PWR_CFG_BASE(cfg_cmd), GET_PWR_CFG_CMD(cfg_cmd), GET_PWR_CFG_MASK(cfg_cmd), GET_PWR_CFG_VALUE(cfg_cmd));
if ((GET_PWR_CFG_FAB_MASK(cfg_cmd) & faversion) && (GET_PWR_CFG_CUT_MASK(cfg_cmd) & cut_version) && (GET_PWR_CFG_INTF_MASK(cfg_cmd) & interface_type)) {
switch (GET_PWR_CFG_CMD(cfg_cmd)) {
case PWR_CMD_READ: RT_TRACE(rtlpriv, COMP_INIT, DBG_TRACE, "rtl_hal_pwrseqcmdparsing(): PWR_CMD_READ\n"); break;
case PWR_CMD_WRITE:
RT_TRACE(rtlpriv, COMP_INIT, DBG_TRACE, "rtl_hal_pwrseqcmdparsing(): PWR_CMD_WRITE\n");
offset = GET_PWR_CFG_OFFSET(cfg_cmd);
/*Read the value from system register*/
value = rtl_read_byte(rtlpriv, offset);
value &= (~(GET_PWR_CFG_MASK(cfg_cmd)));
value |= (GET_PWR_CFG_VALUE(cfg_cmd) & GET_PWR_CFG_MASK(cfg_cmd));
/*Write the value back to sytem register*/
rtl_write_byte(rtlpriv, offset, value);
break;
case PWR_CMD_POLLING:
RT_TRACE(rtlpriv, COMP_INIT, DBG_TRACE, "rtl_hal_pwrseqcmdparsing(): PWR_CMD_POLLING\n");
polling_bit = false;
offset = GET_PWR_CFG_OFFSET(cfg_cmd);
do {
value = rtl_read_byte(rtlpriv, offset);
value &= GET_PWR_CFG_MASK(cfg_cmd);
if (value == (GET_PWR_CFG_VALUE(cfg_cmd) & GET_PWR_CFG_MASK(cfg_cmd)))
polling_bit = true;
else
udelay(10);
if (polling_count++ > max_polling_cnt) return false;
} while (!polling_bit);
break;
case PWR_CMD_DELAY:
RT_TRACE(rtlpriv, COMP_INIT, DBG_TRACE, "rtl_hal_pwrseqcmdparsing(): PWR_CMD_DELAY\n");
if (GET_PWR_CFG_VALUE(cfg_cmd) == PWRSEQ_DELAY_US)
udelay(GET_PWR_CFG_OFFSET(cfg_cmd));
else
mdelay(GET_PWR_CFG_OFFSET(cfg_cmd));
break;
case PWR_CMD_END: RT_TRACE(rtlpriv, COMP_INIT, DBG_TRACE, "rtl_hal_pwrseqcmdparsing(): PWR_CMD_END\n"); return true;
- default: RT_ASSERT(false, "rtl_hal_pwrseqcmdparsing(): Unknown CMD!!\n"); break;
+ default: WARN_ONCE(true, "rtl_hal_pwrseqcmdparsing(): Unknown CMD!!\n"); break;
}
}
ary_idx++;
} while (1);
return true;
}<sep>@@
expression list e;
@@
- RT_ASSERT(false,e)
+ WARN_ONCE(true,e)
<|end_of_text|> | 8,760 |
--- initial
+++ final
@@ -1,63 +1,63 @@
void rtl8821ae_add_wowlan_pattern(struct ieee80211_hw *hw, struct rtl_wow_pattern *rtl_pattern, u8 index) {
struct rtl_priv *rtlpriv = rtl_priv(hw);
u32 cam = 0;
u8 addr = 0;
u16 rxbuf_addr;
u8 tmp, count = 0;
u16 cam_start;
u16 offset;
/* Count the WFCAM entry start offset. */
/* RX page size = 128 byte */
offset = MAX_RX_DMA_BUFFER_SIZE_8812 / 128;
/* We should start from the boundry */
cam_start = offset * 128;
/* Enable Rx packet buffer access. */
rtl_write_byte(rtlpriv, REG_PKT_BUFF_ACCESS_CTRL, RXPKT_BUF_SELECT);
for (addr = 0; addr < WKFMCAM_ADDR_NUM; addr++) {
/* Set Rx packet buffer offset.
* RxBufer pointer increases 1,
* we can access 8 bytes in Rx packet buffer.
* CAM start offset (unit: 1 byte) = index*WKFMCAM_SIZE
* RxBufer addr = (CAM start offset +
* per entry offset of a WKFM CAM)/8
* * index: The index of the wake up frame mask
* * WKFMCAM_SIZE: the total size of one WKFM CAM
* * per entry offset of a WKFM CAM: Addr*4 bytes
*/
rxbuf_addr = (cam_start + index * WKFMCAM_SIZE + addr * 4) >> 3;
/* Set R/W start offset */
rtl_write_word(rtlpriv, REG_PKTBUF_DBG_CTRL, rxbuf_addr);
if (addr == 0) {
cam = BIT(31) | rtl_pattern->crc;
if (rtl_pattern->type == UNICAST_PATTERN)
cam |= BIT(24);
else if (rtl_pattern->type == MULTICAST_PATTERN)
cam |= BIT(25);
else if (rtl_pattern->type == BROADCAST_PATTERN)
cam |= BIT(26);
rtl_write_dword(rtlpriv, REG_PKTBUF_DBG_DATA_L, cam);
RT_TRACE(rtlpriv, COMP_POWER, DBG_TRACE, "WRITE entry[%d] 0x%x: %x\n", addr, REG_PKTBUF_DBG_DATA_L, cam);
/* Write to Rx packet buffer. */
rtl_write_word(rtlpriv, REG_RXPKTBUF_CTRL, 0x0f01);
} else if (addr == 2 || addr == 4) { /* WKFM[127:0] */
cam = rtl_pattern->mask[addr - 2];
rtl_write_dword(rtlpriv, REG_PKTBUF_DBG_DATA_L, cam);
RT_TRACE(rtlpriv, COMP_POWER, DBG_TRACE, "WRITE entry[%d] 0x%x: %x\n", addr, REG_PKTBUF_DBG_DATA_L, cam);
rtl_write_word(rtlpriv, REG_RXPKTBUF_CTRL, 0x0f01);
} else if (addr == 3 || addr == 5) { /* WKFM[127:0] */
cam = rtl_pattern->mask[addr - 2];
rtl_write_dword(rtlpriv, REG_PKTBUF_DBG_DATA_H, cam);
RT_TRACE(rtlpriv, COMP_POWER, DBG_TRACE, "WRITE entry[%d] 0x%x: %x\n", addr, REG_PKTBUF_DBG_DATA_H, cam);
rtl_write_word(rtlpriv, REG_RXPKTBUF_CTRL, 0xf001);
}
count = 0;
do {
tmp = rtl_read_byte(rtlpriv, REG_RXPKTBUF_CTRL);
udelay(2);
count++;
} while (tmp && count < 100);
- RT_ASSERT((count < 100), "Write wake up frame mask FAIL %d value!\n", tmp);
+ WARN_ONCE((count >= 100), "Write wake up frame mask FAIL %d value!\n", tmp);
}
/* Disable Rx packet buffer access. */
rtl_write_byte(rtlpriv, REG_PKT_BUFF_ACCESS_CTRL, DISABLE_TRXPKT_BUF_ACCESS);
}<sep>@@
expression list e;
expression e1,e2;
@@
- RT_ASSERT((e1 < e2),e)
+ WARN_ONCE((e1 >= e2),e)
<|end_of_text|> | 8,765 |
--- initial
+++ final
@@ -1 +1 @@
-static void _rtl8723e_phy_fw_rf_serial_write(struct ieee80211_hw *hw, enum radio_path rfpath, u32 offset, u32 data) { RT_ASSERT(false, "deprecated!\n"); }
+static void _rtl8723e_phy_fw_rf_serial_write(struct ieee80211_hw *hw, enum radio_path rfpath, u32 offset, u32 data) { WARN_ONCE(true, "deprecated!\n"); }<sep>@@
expression list e;
@@
- RT_ASSERT(false,e)
+ WARN_ONCE(true,e)
<|end_of_text|> | 8,806 |
--- initial
+++ final
@@ -1,23 +1,23 @@
void rtl92de_set_desc(struct ieee80211_hw *hw, u8 *pdesc, bool istx, u8 desc_name, u8 *val) {
if (istx) {
switch (desc_name) {
case HW_DESC_OWN:
wmb();
SET_TX_DESC_OWN(pdesc, 1);
break;
case HW_DESC_TX_NEXTDESC_ADDR: SET_TX_DESC_NEXT_DESC_ADDRESS(pdesc, *(u32 *)val); break;
- default: RT_ASSERT(false, "ERR txdesc :%d not process\n", desc_name); break;
+ default: WARN_ONCE(true, "ERR txdesc :%d not process\n", desc_name); break;
}
} else {
switch (desc_name) {
case HW_DESC_RXOWN:
wmb();
SET_RX_DESC_OWN(pdesc, 1);
break;
case HW_DESC_RXBUFF_ADDR: SET_RX_DESC_BUFF_ADDR(pdesc, *(u32 *)val); break;
case HW_DESC_RXPKT_LEN: SET_RX_DESC_PKT_LEN(pdesc, *(u32 *)val); break;
case HW_DESC_RXERO: SET_RX_DESC_EOR(pdesc, 1); break;
- default: RT_ASSERT(false, "ERR rxdesc :%d not process\n", desc_name); break;
+ default: WARN_ONCE(true, "ERR rxdesc :%d not process\n", desc_name); break;
}
}
}<sep>@@
expression list e;
@@
- RT_ASSERT(false,e)
+ WARN_ONCE(true,e)
<|end_of_text|> | 8,791 |
--- initial
+++ final
@@ -1,20 +1,20 @@
u8 rtl88e_phy_sw_chnl(struct ieee80211_hw *hw) {
struct rtl_priv *rtlpriv = rtl_priv(hw);
struct rtl_phy *rtlphy = &rtlpriv->phy;
struct rtl_hal *rtlhal = rtl_hal(rtl_priv(hw));
if (rtlphy->sw_chnl_inprogress) return 0;
if (rtlphy->set_bwmode_inprogress) return 0;
- RT_ASSERT((rtlphy->current_channel <= 14), "WIRELESS_MODE_G but channel>14");
+ WARN_ONCE((rtlphy->current_channel > 14), "WIRELESS_MODE_G but channel>14");
rtlphy->sw_chnl_inprogress = true;
rtlphy->sw_chnl_stage = 0;
rtlphy->sw_chnl_step = 0;
if (!(is_hal_stop(rtlhal)) && !(RT_CANNOT_IO(hw))) {
rtl88e_phy_sw_chnl_callback(hw);
RT_TRACE(rtlpriv, COMP_CHAN, DBG_LOUD, "sw_chnl_inprogress false schedule workitem current channel %d\n", rtlphy->current_channel);
rtlphy->sw_chnl_inprogress = false;
} else {
RT_TRACE(rtlpriv, COMP_CHAN, DBG_LOUD, "sw_chnl_inprogress false driver sleep or unload\n");
rtlphy->sw_chnl_inprogress = false;
}
return 1;
}<sep>@@
expression list e;
expression e1,e2;
@@
- RT_ASSERT((e1 <= e2),e)
+ WARN_ONCE((e1 > e2),e)
<|end_of_text|> | 8,773 |
--- initial
+++ final
@@ -1,18 +1,18 @@
u32 rtl8723be_get_desc(u8 *pdesc, bool istx, u8 desc_name) {
u32 ret = 0;
if (istx) {
switch (desc_name) {
case HW_DESC_OWN: ret = GET_TX_DESC_OWN(pdesc); break;
case HW_DESC_TXBUFF_ADDR: ret = GET_TX_DESC_TX_BUFFER_ADDRESS(pdesc); break;
- default: RT_ASSERT(false, "ERR txdesc :%d not process\n", desc_name); break;
+ default: WARN_ONCE(true, "ERR txdesc :%d not process\n", desc_name); break;
}
} else {
switch (desc_name) {
case HW_DESC_OWN: ret = GET_RX_DESC_OWN(pdesc); break;
case HW_DESC_RXPKT_LEN: ret = GET_RX_DESC_PKT_LEN(pdesc); break;
case HW_DESC_RXBUFF_ADDR: ret = GET_RX_DESC_BUFF_ADDR(pdesc); break;
- default: RT_ASSERT(false, "ERR rxdesc :%d not process\n", desc_name); break;
+ default: WARN_ONCE(true, "ERR rxdesc :%d not process\n", desc_name); break;
}
}
return ret;
}<sep>@@
expression list e;
@@
- RT_ASSERT(false,e)
+ WARN_ONCE(true,e)
<|end_of_text|> | 8,815 |
--- initial
+++ final
@@ -1,14 +1,14 @@
static bool _rtl92ee_phy_set_sw_chnl_cmdarray(struct swchnlcmd *cmdtable, u32 cmdtableidx, u32 cmdtablesz, enum swchnlcmd_id cmdid, u32 para1, u32 para2, u32 msdelay) {
struct swchnlcmd *pcmd;
if (cmdtable == NULL) {
- RT_ASSERT(false, "cmdtable cannot be NULL.\n");
+ WARN_ONCE(true, "cmdtable cannot be NULL.\n");
return false;
}
if (cmdtableidx >= cmdtablesz) return false;
pcmd = cmdtable + cmdtableidx;
pcmd->cmdid = cmdid;
pcmd->para1 = para1;
pcmd->para2 = para2;
pcmd->msdelay = msdelay;
return true;
}<sep>@@
expression list e;
@@
- RT_ASSERT(false,e)
+ WARN_ONCE(true,e)
<|end_of_text|> | 8,794 |
--- initial
+++ final
@@ -1,18 +1,18 @@
u32 rtl8723e_get_desc(u8 *pdesc, bool istx, u8 desc_name) {
u32 ret = 0;
if (istx == true) {
switch (desc_name) {
case HW_DESC_OWN: ret = GET_TX_DESC_OWN(pdesc); break;
case HW_DESC_TXBUFF_ADDR: ret = GET_TX_DESC_TX_BUFFER_ADDRESS(pdesc); break;
- default: RT_ASSERT(false, "ERR txdesc :%d not process\n", desc_name); break;
+ default: WARN_ONCE(true, "ERR txdesc :%d not process\n", desc_name); break;
}
} else {
switch (desc_name) {
case HW_DESC_OWN: ret = GET_RX_DESC_OWN(pdesc); break;
case HW_DESC_RXPKT_LEN: ret = GET_RX_DESC_PKT_LEN(pdesc); break;
case HW_DESC_RXBUFF_ADDR: ret = GET_RX_DESC_BUFF_ADDR(pdesc); break;
- default: RT_ASSERT(false, "ERR rxdesc :%d not process\n", desc_name); break;
+ default: WARN_ONCE(true, "ERR rxdesc :%d not process\n", desc_name); break;
}
}
return ret;
}<sep>@@
expression list e;
@@
- RT_ASSERT(false,e)
+ WARN_ONCE(true,e)
<|end_of_text|> | 8,809 |
--- initial
+++ final
@@ -1,53 +1,53 @@
static bool _rtl88e_phy_sw_chnl_step_by_step(struct ieee80211_hw *hw, u8 channel, u8 *stage, u8 *step, u32 *delay) {
struct rtl_priv *rtlpriv = rtl_priv(hw);
struct rtl_phy *rtlphy = &rtlpriv->phy;
struct swchnlcmd precommoncmd[MAX_PRECMD_CNT];
u32 precommoncmdcnt;
struct swchnlcmd postcommoncmd[MAX_POSTCMD_CNT];
u32 postcommoncmdcnt;
struct swchnlcmd rfdependcmd[MAX_RFDEPENDCMD_CNT];
u32 rfdependcmdcnt;
struct swchnlcmd *currentcmd = NULL;
u8 rfpath;
u8 num_total_rfpath = rtlphy->num_total_rfpath;
precommoncmdcnt = 0;
_rtl88e_phy_set_sw_chnl_cmdarray(precommoncmd, precommoncmdcnt++, MAX_PRECMD_CNT, CMDID_SET_TXPOWEROWER_LEVEL, 0, 0, 0);
_rtl88e_phy_set_sw_chnl_cmdarray(precommoncmd, precommoncmdcnt++, MAX_PRECMD_CNT, CMDID_END, 0, 0, 0);
postcommoncmdcnt = 0;
_rtl88e_phy_set_sw_chnl_cmdarray(postcommoncmd, postcommoncmdcnt++, MAX_POSTCMD_CNT, CMDID_END, 0, 0, 0);
rfdependcmdcnt = 0;
- RT_ASSERT((channel >= 1 && channel <= 14), "illegal channel for Zebra: %d\n", channel);
+ WARN_ONCE((channel < 1 || channel > 14), "illegal channel for Zebra: %d\n", channel);
_rtl88e_phy_set_sw_chnl_cmdarray(rfdependcmd, rfdependcmdcnt++, MAX_RFDEPENDCMD_CNT, CMDID_RF_WRITEREG, RF_CHNLBW, channel, 10);
_rtl88e_phy_set_sw_chnl_cmdarray(rfdependcmd, rfdependcmdcnt++, MAX_RFDEPENDCMD_CNT, CMDID_END, 0, 0, 0);
do {
switch (*stage) {
case 0: currentcmd = &precommoncmd[*step]; break;
case 1: currentcmd = &rfdependcmd[*step]; break;
case 2: currentcmd = &postcommoncmd[*step]; break;
default: RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG, "Invalid 'stage' = %d, Check it!\n", *stage); return true;
}
if (currentcmd->cmdid == CMDID_END) {
if ((*stage) == 2) return true;
(*stage)++;
(*step) = 0;
continue;
}
switch (currentcmd->cmdid) {
case CMDID_SET_TXPOWEROWER_LEVEL: rtl88e_phy_set_txpower_level(hw, channel); break;
case CMDID_WRITEPORT_ULONG: rtl_write_dword(rtlpriv, currentcmd->para1, currentcmd->para2); break;
case CMDID_WRITEPORT_USHORT: rtl_write_word(rtlpriv, currentcmd->para1, (u16)currentcmd->para2); break;
case CMDID_WRITEPORT_UCHAR: rtl_write_byte(rtlpriv, currentcmd->para1, (u8)currentcmd->para2); break;
case CMDID_RF_WRITEREG:
for (rfpath = 0; rfpath < num_total_rfpath; rfpath++) {
rtlphy->rfreg_chnlval[rfpath] = ((rtlphy->rfreg_chnlval[rfpath] & 0xfffffc00) | currentcmd->para2);
rtl_set_rfreg(hw, (enum radio_path)rfpath, currentcmd->para1, RFREG_OFFSET_MASK, rtlphy->rfreg_chnlval[rfpath]);
}
break;
default: RT_TRACE(rtlpriv, COMP_ERR, DBG_LOUD, "switch case %#x not processed\n", currentcmd->cmdid); break;
}
break;
} while (true);
(*delay) = currentcmd->msdelay;
(*step)++;
return false;
}<sep>@@
expression list e;
expression e1,e2,e3;
@@
- RT_ASSERT((e1 >= e2 && e1 <= e3),e)
+ WARN_ONCE((e1 < e2 || e1 > e3),e)
<|end_of_text|> | 8,772 |
--- initial
+++ final
@@ -1,14 +1,14 @@
static bool _rtl92s_phy_set_sw_chnl_cmdarray(struct swchnlcmd *cmdtable, u32 cmdtableidx, u32 cmdtablesz, enum swchnlcmd_id cmdid, u32 para1, u32 para2, u32 msdelay) {
struct swchnlcmd *pcmd;
if (cmdtable == NULL) {
- RT_ASSERT(false, "cmdtable cannot be NULL\n");
+ WARN_ONCE(true, "cmdtable cannot be NULL\n");
return false;
}
if (cmdtableidx >= cmdtablesz) return false;
pcmd = cmdtable + cmdtableidx;
pcmd->cmdid = cmdid;
pcmd->para1 = para1;
pcmd->para2 = para2;
pcmd->msdelay = msdelay;
return true;
}<sep>@@
expression list e;
@@
- RT_ASSERT(false,e)
+ WARN_ONCE(true,e)
<|end_of_text|> | 8,800 |
--- initial
+++ final
@@ -1,13 +1,13 @@
void rtl92ee_set_qos(struct ieee80211_hw *hw, int aci) {
struct rtl_priv *rtlpriv = rtl_priv(hw);
rtl92ee_dm_init_edca_turbo(hw);
switch (aci) {
case AC1_BK: rtl_write_dword(rtlpriv, REG_EDCA_BK_PARAM, 0xa44f); break;
case AC0_BE:
/* rtl_write_dword(rtlpriv, REG_EDCA_BE_PARAM, u4b_ac_param); */
break;
case AC2_VI: rtl_write_dword(rtlpriv, REG_EDCA_VI_PARAM, 0x5e4322); break;
case AC3_VO: rtl_write_dword(rtlpriv, REG_EDCA_VO_PARAM, 0x2f3222); break;
- default: RT_ASSERT(false, "invalid aci: %d !\n", aci); break;
+ default: WARN_ONCE(true, "invalid aci: %d !\n", aci); break;
}
}<sep>@@
expression list e;
@@
- RT_ASSERT(false,e)
+ WARN_ONCE(true,e)
<|end_of_text|> | 8,793 |
--- initial
+++ final
@@ -1,17 +1,17 @@
void rtl8821ae_set_desc(struct ieee80211_hw *hw, u8 *pdesc, bool istx, u8 desc_name, u8 *val) {
if (istx) {
switch (desc_name) {
case HW_DESC_OWN: SET_TX_DESC_OWN(pdesc, 1); break;
case HW_DESC_TX_NEXTDESC_ADDR: SET_TX_DESC_NEXT_DESC_ADDRESS(pdesc, *(u32 *)val); break;
- default: RT_ASSERT(false, "ERR txdesc :%d not process\n", desc_name); break;
+ default: WARN_ONCE(true, "ERR txdesc :%d not process\n", desc_name); break;
}
} else {
switch (desc_name) {
case HW_DESC_RXOWN: SET_RX_DESC_OWN(pdesc, 1); break;
case HW_DESC_RXBUFF_ADDR: SET_RX_DESC_BUFF_ADDR(pdesc, *(u32 *)val); break;
case HW_DESC_RXPKT_LEN: SET_RX_DESC_PKT_LEN(pdesc, *(u32 *)val); break;
case HW_DESC_RXERO: SET_RX_DESC_EOR(pdesc, 1); break;
- default: RT_ASSERT(false, "ERR rxdesc :%d not process\n", desc_name); break;
+ default: WARN_ONCE(true, "ERR rxdesc :%d not process\n", desc_name); break;
}
}
}<sep>@@
expression list e;
@@
- RT_ASSERT(false,e)
+ WARN_ONCE(true,e)
<|end_of_text|> | 8,819 |
--- initial
+++ final
@@ -1,4 +1,4 @@
u32 _rtl92c_phy_fw_rf_serial_read(struct ieee80211_hw *hw, enum radio_path rfpath, u32 offset) {
- RT_ASSERT(false, "deprecated!\n");
+ WARN_ONCE(true, "deprecated!\n");
return 0;
}<sep>@@
expression list e;
@@
- RT_ASSERT(false,e)
+ WARN_ONCE(true,e)
<|end_of_text|> | 8,776 |
--- initial
+++ final
@@ -1,16 +1,16 @@
void rtl92c_firmware_selfreset(struct ieee80211_hw *hw) {
u8 u1b_tmp;
u8 delay = 100;
struct rtl_priv *rtlpriv = rtl_priv(hw);
rtl_write_byte(rtlpriv, REG_HMETFR + 3, 0x20);
u1b_tmp = rtl_read_byte(rtlpriv, REG_SYS_FUNC_EN + 1);
while (u1b_tmp & BIT(2)) {
delay--;
if (delay == 0) {
- RT_ASSERT(false, "8051 reset fail.\n");
+ WARN_ONCE(true, "8051 reset fail.\n");
break;
}
udelay(50);
u1b_tmp = rtl_read_byte(rtlpriv, REG_SYS_FUNC_EN + 1);
}
}<sep>@@
expression list e;
@@
- RT_ASSERT(false,e)
+ WARN_ONCE(true,e)
<|end_of_text|> | 8,763 |
--- initial
+++ final
@@ -1,56 +1,56 @@
static bool _rtl8723be_phy_sw_chnl_step_by_step(struct ieee80211_hw *hw, u8 channel, u8 *stage, u8 *step, u32 *delay) {
struct rtl_priv *rtlpriv = rtl_priv(hw);
struct rtl_phy *rtlphy = &rtlpriv->phy;
struct swchnlcmd precommoncmd[MAX_PRECMD_CNT];
u32 precommoncmdcnt;
struct swchnlcmd postcommoncmd[MAX_POSTCMD_CNT];
u32 postcommoncmdcnt;
struct swchnlcmd rfdependcmd[MAX_RFDEPENDCMD_CNT];
u32 rfdependcmdcnt;
struct swchnlcmd *currentcmd = NULL;
u8 rfpath;
u8 num_total_rfpath = rtlphy->num_total_rfpath;
precommoncmdcnt = 0;
rtl8723_phy_set_sw_chnl_cmdarray(precommoncmd, precommoncmdcnt++, MAX_PRECMD_CNT, CMDID_SET_TXPOWEROWER_LEVEL, 0, 0, 0);
rtl8723_phy_set_sw_chnl_cmdarray(precommoncmd, precommoncmdcnt++, MAX_PRECMD_CNT, CMDID_END, 0, 0, 0);
postcommoncmdcnt = 0;
rtl8723_phy_set_sw_chnl_cmdarray(postcommoncmd, postcommoncmdcnt++, MAX_POSTCMD_CNT, CMDID_END, 0, 0, 0);
rfdependcmdcnt = 0;
- RT_ASSERT((channel >= 1 && channel <= 14), "illegal channel for Zebra: %d\n", channel);
+ WARN_ONCE((channel < 1 || channel > 14), "illegal channel for Zebra: %d\n", channel);
rtl8723_phy_set_sw_chnl_cmdarray(rfdependcmd, rfdependcmdcnt++, MAX_RFDEPENDCMD_CNT, CMDID_RF_WRITEREG, RF_CHNLBW, channel, 10);
rtl8723_phy_set_sw_chnl_cmdarray(rfdependcmd, rfdependcmdcnt++, MAX_RFDEPENDCMD_CNT, CMDID_END, 0, 0, 0);
do {
switch (*stage) {
case 0: currentcmd = &precommoncmd[*step]; break;
case 1: currentcmd = &rfdependcmd[*step]; break;
case 2: currentcmd = &postcommoncmd[*step]; break;
default: RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG, "Invalid 'stage' = %d, Check it!\n", *stage); return true;
}
if (currentcmd->cmdid == CMDID_END) {
if ((*stage) == 2) {
return true;
} else {
(*stage)++;
(*step) = 0;
continue;
}
}
switch (currentcmd->cmdid) {
case CMDID_SET_TXPOWEROWER_LEVEL: rtl8723be_phy_set_txpower_level(hw, channel); break;
case CMDID_WRITEPORT_ULONG: rtl_write_dword(rtlpriv, currentcmd->para1, currentcmd->para2); break;
case CMDID_WRITEPORT_USHORT: rtl_write_word(rtlpriv, currentcmd->para1, (u16)currentcmd->para2); break;
case CMDID_WRITEPORT_UCHAR: rtl_write_byte(rtlpriv, currentcmd->para1, (u8)currentcmd->para2); break;
case CMDID_RF_WRITEREG:
for (rfpath = 0; rfpath < num_total_rfpath; rfpath++) {
rtlphy->rfreg_chnlval[rfpath] = ((rtlphy->rfreg_chnlval[rfpath] & 0xfffffc00) | currentcmd->para2);
rtl_set_rfreg(hw, (enum radio_path)rfpath, currentcmd->para1, RFREG_OFFSET_MASK, rtlphy->rfreg_chnlval[rfpath]);
}
break;
default: RT_TRACE(rtlpriv, COMP_ERR, DBG_LOUD, "switch case %#x not processed\n", currentcmd->cmdid); break;
}
break;
} while (true);
(*delay) = currentcmd->msdelay;
(*step)++;
return false;
}<sep>@@
expression list e;
expression e1,e2,e3;
@@
- RT_ASSERT((e1 >= e2 && e1 <= e3),e)
+ WARN_ONCE((e1 < e2 || e1 > e3),e)
<|end_of_text|> | 8,813 |
--- initial
+++ final
@@ -1 +1 @@
-void _rtl92c_phy_fw_rf_serial_write(struct ieee80211_hw *hw, enum radio_path rfpath, u32 offset, u32 data) { RT_ASSERT(false, "deprecated!\n"); }
+void _rtl92c_phy_fw_rf_serial_write(struct ieee80211_hw *hw, enum radio_path rfpath, u32 offset, u32 data) { WARN_ONCE(true, "deprecated!\n"); }<sep>@@
expression list e;
@@
- RT_ASSERT(false,e)
+ WARN_ONCE(true,e)
<|end_of_text|> | 8,777 |
--- initial
+++ final
@@ -1,14 +1,14 @@
bool rtl8723_phy_set_sw_chnl_cmdarray(struct swchnlcmd *cmdtable, u32 cmdtableidx, u32 cmdtablesz, enum swchnlcmd_id cmdid, u32 para1, u32 para2, u32 msdelay) {
struct swchnlcmd *pcmd;
if (cmdtable == NULL) {
- RT_ASSERT(false, "cmdtable cannot be NULL.\n");
+ WARN_ONCE(true, "cmdtable cannot be NULL.\n");
return false;
}
if (cmdtableidx >= cmdtablesz) return false;
pcmd = cmdtable + cmdtableidx;
pcmd->cmdid = cmdid;
pcmd->para1 = para1;
pcmd->para2 = para2;
pcmd->msdelay = msdelay;
return true;
}<sep>@@
expression list e;
@@
- RT_ASSERT(false,e)
+ WARN_ONCE(true,e)
<|end_of_text|> | 8,768 |
--- initial
+++ final
@@ -1,13 +1,13 @@
void rtl8821ae_set_qos(struct ieee80211_hw *hw, int aci) {
struct rtl_priv *rtlpriv = rtl_priv(hw);
rtl8821ae_dm_init_edca_turbo(hw);
switch (aci) {
case AC1_BK: rtl_write_dword(rtlpriv, REG_EDCA_BK_PARAM, 0xa44f); break;
case AC0_BE:
/* rtl_write_dword(rtlpriv, REG_EDCA_BE_PARAM, u4b_ac_param); */
break;
case AC2_VI: rtl_write_dword(rtlpriv, REG_EDCA_VI_PARAM, 0x5e4322); break;
case AC3_VO: rtl_write_dword(rtlpriv, REG_EDCA_VO_PARAM, 0x2f3222); break;
- default: RT_ASSERT(false, "invalid aci: %d !\n", aci); break;
+ default: WARN_ONCE(true, "invalid aci: %d !\n", aci); break;
}
}<sep>@@
expression list e;
@@
- RT_ASSERT(false,e)
+ WARN_ONCE(true,e)
<|end_of_text|> | 8,766 |
--- initial
+++ final
@@ -1,56 +1,56 @@
static bool _rtl92s_phy_sw_chnl_step_by_step(struct ieee80211_hw *hw, u8 channel, u8 *stage, u8 *step, u32 *delay) {
struct rtl_priv *rtlpriv = rtl_priv(hw);
struct rtl_phy *rtlphy = &(rtlpriv->phy);
struct swchnlcmd precommoncmd[MAX_PRECMD_CNT];
u32 precommoncmdcnt;
struct swchnlcmd postcommoncmd[MAX_POSTCMD_CNT];
u32 postcommoncmdcnt;
struct swchnlcmd rfdependcmd[MAX_RFDEPENDCMD_CNT];
u32 rfdependcmdcnt;
struct swchnlcmd *currentcmd = NULL;
u8 rfpath;
u8 num_total_rfpath = rtlphy->num_total_rfpath;
precommoncmdcnt = 0;
_rtl92s_phy_set_sw_chnl_cmdarray(precommoncmd, precommoncmdcnt++, MAX_PRECMD_CNT, CMDID_SET_TXPOWEROWER_LEVEL, 0, 0, 0);
_rtl92s_phy_set_sw_chnl_cmdarray(precommoncmd, precommoncmdcnt++, MAX_PRECMD_CNT, CMDID_END, 0, 0, 0);
postcommoncmdcnt = 0;
_rtl92s_phy_set_sw_chnl_cmdarray(postcommoncmd, postcommoncmdcnt++, MAX_POSTCMD_CNT, CMDID_END, 0, 0, 0);
rfdependcmdcnt = 0;
- RT_ASSERT((channel >= 1 && channel <= 14), "invalid channel for Zebra: %d\n", channel);
+ WARN_ONCE((channel < 1 || channel > 14), "invalid channel for Zebra: %d\n", channel);
_rtl92s_phy_set_sw_chnl_cmdarray(rfdependcmd, rfdependcmdcnt++, MAX_RFDEPENDCMD_CNT, CMDID_RF_WRITEREG, RF_CHNLBW, channel, 10);
_rtl92s_phy_set_sw_chnl_cmdarray(rfdependcmd, rfdependcmdcnt++, MAX_RFDEPENDCMD_CNT, CMDID_END, 0, 0, 0);
do {
switch (*stage) {
case 0: currentcmd = &precommoncmd[*step]; break;
case 1: currentcmd = &rfdependcmd[*step]; break;
case 2: currentcmd = &postcommoncmd[*step]; break;
default: return true;
}
if (currentcmd->cmdid == CMDID_END) {
if ((*stage) == 2) {
return true;
} else {
(*stage)++;
(*step) = 0;
continue;
}
}
switch (currentcmd->cmdid) {
case CMDID_SET_TXPOWEROWER_LEVEL: rtl92s_phy_set_txpower(hw, channel); break;
case CMDID_WRITEPORT_ULONG: rtl_write_dword(rtlpriv, currentcmd->para1, currentcmd->para2); break;
case CMDID_WRITEPORT_USHORT: rtl_write_word(rtlpriv, currentcmd->para1, (u16)currentcmd->para2); break;
case CMDID_WRITEPORT_UCHAR: rtl_write_byte(rtlpriv, currentcmd->para1, (u8)currentcmd->para2); break;
case CMDID_RF_WRITEREG:
for (rfpath = 0; rfpath < num_total_rfpath; rfpath++) {
rtlphy->rfreg_chnlval[rfpath] = ((rtlphy->rfreg_chnlval[rfpath] & 0xfffffc00) | currentcmd->para2);
rtl_set_rfreg(hw, (enum radio_path)rfpath, currentcmd->para1, RFREG_OFFSET_MASK, rtlphy->rfreg_chnlval[rfpath]);
}
break;
default: RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG, "switch case %#x not processed\n", currentcmd->cmdid); break;
}
break;
} while (true);
(*delay) = currentcmd->msdelay;
(*step)++;
return false;
}<sep>@@
expression list e;
expression e1,e2,e3;
@@
- RT_ASSERT((e1 >= e2 && e1 <= e3),e)
+ WARN_ONCE((e1 < e2 || e1 > e3),e)
<|end_of_text|> | 8,801 |
--- initial
+++ final
@@ -1,11 +1,11 @@
void rtl8821ae_fill_h2c_cmd(struct ieee80211_hw *hw, u8 element_id, u32 cmd_len, u8 *cmdbuffer) {
struct rtl_hal *rtlhal = rtl_hal(rtl_priv(hw));
u32 tmp_cmdbuf[2];
if (!rtlhal->fw_ready) {
- RT_ASSERT(false, "return H2C cmd because of Fw download fail!!!\n");
+ WARN_ONCE(true, "return H2C cmd because of Fw download fail!!!\n");
return;
}
memset(tmp_cmdbuf, 0, 8);
memcpy(tmp_cmdbuf, cmdbuffer, cmd_len);
_rtl8821ae_fill_h2c_command(hw, element_id, cmd_len, (u8 *)&tmp_cmdbuf);
}<sep>@@
expression list e;
@@
- RT_ASSERT(false,e)
+ WARN_ONCE(true,e)
<|end_of_text|> | 8,764 |
--- initial
+++ final
@@ -1,11 +1,11 @@
void rtl8723e_fill_h2c_cmd(struct ieee80211_hw *hw, u8 element_id, u32 cmd_len, u8 *cmdbuffer) {
struct rtl_hal *rtlhal = rtl_hal(rtl_priv(hw));
u32 tmp_cmdbuf[2];
if (!rtlhal->fw_ready) {
- RT_ASSERT(false, "return H2C cmd because of Fw download fail!!!\n");
+ WARN_ONCE(true, "return H2C cmd because of Fw download fail!!!\n");
return;
}
memset(tmp_cmdbuf, 0, 8);
memcpy(tmp_cmdbuf, cmdbuffer, cmd_len);
_rtl8723e_fill_h2c_command(hw, element_id, cmd_len, (u8 *)&tmp_cmdbuf);
}<sep>@@
expression list e;
@@
- RT_ASSERT(false,e)
+ WARN_ONCE(true,e)
<|end_of_text|> | 8,804 |
--- initial
+++ final
@@ -1,57 +1,57 @@
bool _rtl92c_phy_sw_chnl_step_by_step(struct ieee80211_hw *hw, u8 channel, u8 *stage, u8 *step, u32 *delay) {
struct rtl_priv *rtlpriv = rtl_priv(hw);
struct rtl_phy *rtlphy = &(rtlpriv->phy);
struct swchnlcmd precommoncmd[MAX_PRECMD_CNT];
u32 precommoncmdcnt;
struct swchnlcmd postcommoncmd[MAX_POSTCMD_CNT];
u32 postcommoncmdcnt;
struct swchnlcmd rfdependcmd[MAX_RFDEPENDCMD_CNT];
u32 rfdependcmdcnt;
struct swchnlcmd *currentcmd = NULL;
u8 rfpath;
u8 num_total_rfpath = rtlphy->num_total_rfpath;
precommoncmdcnt = 0;
_rtl92c_phy_set_sw_chnl_cmdarray(precommoncmd, precommoncmdcnt++, MAX_PRECMD_CNT, CMDID_SET_TXPOWEROWER_LEVEL, 0, 0, 0);
_rtl92c_phy_set_sw_chnl_cmdarray(precommoncmd, precommoncmdcnt++, MAX_PRECMD_CNT, CMDID_END, 0, 0, 0);
postcommoncmdcnt = 0;
_rtl92c_phy_set_sw_chnl_cmdarray(postcommoncmd, postcommoncmdcnt++, MAX_POSTCMD_CNT, CMDID_END, 0, 0, 0);
rfdependcmdcnt = 0;
- RT_ASSERT((channel >= 1 && channel <= 14), "illegal channel for Zebra: %d\n", channel);
+ WARN_ONCE((channel < 1 || channel > 14), "illegal channel for Zebra: %d\n", channel);
_rtl92c_phy_set_sw_chnl_cmdarray(rfdependcmd, rfdependcmdcnt++, MAX_RFDEPENDCMD_CNT, CMDID_RF_WRITEREG, RF_CHNLBW, channel, 10);
_rtl92c_phy_set_sw_chnl_cmdarray(rfdependcmd, rfdependcmdcnt++, MAX_RFDEPENDCMD_CNT, CMDID_END, 0, 0, 0);
do {
switch (*stage) {
case 0: currentcmd = &precommoncmd[*step]; break;
case 1: currentcmd = &rfdependcmd[*step]; break;
case 2: currentcmd = &postcommoncmd[*step]; break;
default: RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG, "Invalid 'stage' = %d, Check it!\n", *stage); return true;
}
if (currentcmd->cmdid == CMDID_END) {
if ((*stage) == 2) {
return true;
} else {
(*stage)++;
(*step) = 0;
continue;
}
}
switch (currentcmd->cmdid) {
case CMDID_SET_TXPOWEROWER_LEVEL: rtl92c_phy_set_txpower_level(hw, channel); break;
case CMDID_WRITEPORT_ULONG: rtl_write_dword(rtlpriv, currentcmd->para1, currentcmd->para2); break;
case CMDID_WRITEPORT_USHORT: rtl_write_word(rtlpriv, currentcmd->para1, (u16)currentcmd->para2); break;
case CMDID_WRITEPORT_UCHAR: rtl_write_byte(rtlpriv, currentcmd->para1, (u8)currentcmd->para2); break;
case CMDID_RF_WRITEREG:
for (rfpath = 0; rfpath < num_total_rfpath; rfpath++) {
rtlphy->rfreg_chnlval[rfpath] = ((rtlphy->rfreg_chnlval[rfpath] & 0xfffffc00) | currentcmd->para2);
rtl_set_rfreg(hw, (enum radio_path)rfpath, currentcmd->para1, RFREG_OFFSET_MASK, rtlphy->rfreg_chnlval[rfpath]);
}
_rtl92c_phy_sw_rf_seting(hw, channel);
break;
default: RT_TRACE(rtlpriv, COMP_ERR, DBG_LOUD, "switch case %#x not processed\n", currentcmd->cmdid); break;
}
break;
} while (true);
(*delay) = currentcmd->msdelay;
(*step)++;
return false;
}<sep>@@
expression list e;
expression e1,e2,e3;
@@
- RT_ASSERT((e1 >= e2 && e1 <= e3),e)
+ WARN_ONCE((e1 < e2 || e1 > e3),e)
<|end_of_text|> | 8,779 |