text
stringlengths 213
7.14k
| idx
int64 16
12.5k
|
---|---|
--- initial
+++ final
@@ -1,26 +1,26 @@
void *ptlrpcd_alloc_work(struct obd_import *imp, int (*cb)(const struct lu_env *, void *), void *cbdata) {
struct ptlrpc_request *req = NULL;
struct ptlrpc_work_async_args *args;
might_sleep();
if (!cb) return ERR_PTR(-EINVAL);
/* copy some code from deprecated fakereq. */
req = ptlrpc_request_cache_alloc(GFP_NOFS);
if (!req) {
CERROR("ptlrpc: run out of memory!\n");
return ERR_PTR(-ENOMEM);
}
ptlrpc_cli_req_init(req);
req->rq_send_state = LUSTRE_IMP_FULL;
req->rq_type = PTL_RPC_MSG_REQUEST;
req->rq_import = class_import_get(imp);
req->rq_interpret_reply = work_interpreter;
/* don't want reply */
req->rq_no_delay = 1;
req->rq_no_resend = 1;
req->rq_pill.rc_fmt = (void *)&worker_format;
- CLASSERT(sizeof(*args) <= sizeof(req->rq_async_args));
+ BUILD_BUG_ON(sizeof(*args) > sizeof(req->rq_async_args));
args = ptlrpc_req_async_args(req);
args->cb = cb;
args->cbdata = cbdata;
return req;
}<sep>@@
expression e1,e2;
@@
- CLASSERT(e1 <= e2);
+ BUILD_BUG_ON(e1 > e2);
<|end_of_text|> | 8,415 |
--- initial
+++ final
@@ -1,42 +1,42 @@
struct cfs_hash *cfs_hash_create(char *name, unsigned int cur_bits, unsigned int max_bits, unsigned int bkt_bits, unsigned int extra_bytes, unsigned int min_theta, unsigned int max_theta, struct cfs_hash_ops *ops, unsigned int flags) {
struct cfs_hash *hs;
int len;
- CLASSERT(CFS_HASH_THETA_BITS < 15);
+ BUILD_BUG_ON(CFS_HASH_THETA_BITS >= 15);
LASSERT(name);
LASSERT(ops->hs_key);
LASSERT(ops->hs_hash);
LASSERT(ops->hs_object);
LASSERT(ops->hs_keycmp);
LASSERT(ops->hs_get);
LASSERT(ops->hs_put_locked);
if (flags & CFS_HASH_REHASH) flags |= CFS_HASH_COUNTER; /* must have counter */
LASSERT(cur_bits > 0);
LASSERT(cur_bits >= bkt_bits);
LASSERT(max_bits >= cur_bits && max_bits < 31);
LASSERT(ergo(!(flags & CFS_HASH_REHASH), cur_bits == max_bits));
LASSERT(ergo(flags & CFS_HASH_REHASH, !(flags & CFS_HASH_NO_LOCK)));
LASSERT(ergo(flags & CFS_HASH_REHASH_KEY, ops->hs_keycpy));
len = !(flags & CFS_HASH_BIGNAME) ? CFS_HASH_NAME_LEN : CFS_HASH_BIGNAME_LEN;
LIBCFS_ALLOC(hs, offsetof(struct cfs_hash, hs_name[len]));
if (!hs) return NULL;
strlcpy(hs->hs_name, name, len);
hs->hs_flags = flags;
atomic_set(&hs->hs_refcount, 1);
atomic_set(&hs->hs_count, 0);
cfs_hash_lock_setup(hs);
cfs_hash_hlist_setup(hs);
hs->hs_cur_bits = (u8)cur_bits;
hs->hs_min_bits = (u8)cur_bits;
hs->hs_max_bits = (u8)max_bits;
hs->hs_bkt_bits = (u8)bkt_bits;
hs->hs_ops = ops;
hs->hs_extra_bytes = extra_bytes;
hs->hs_rehash_bits = 0;
cfs_wi_init(&hs->hs_rehash_wi, hs, cfs_hash_rehash_worker);
cfs_hash_depth_wi_init(hs);
if (cfs_hash_with_rehash(hs)) __cfs_hash_set_theta(hs, min_theta, max_theta);
hs->hs_buckets = cfs_hash_buckets_realloc(hs, NULL, 0, CFS_HASH_NBKT(hs));
if (hs->hs_buckets) return hs;
LIBCFS_FREE(hs, offsetof(struct cfs_hash, hs_name[len]));
return NULL;
}<sep>@@
expression e1,e2;
@@
- CLASSERT(e1 < e2);
+ BUILD_BUG_ON(e1 >= e2);
<|end_of_text|> | 8,416 |
--- initial
+++ final
@@ -1,94 +1,94 @@
int ptlrpc_connect_import(struct obd_import *imp) {
struct obd_device *obd = imp->imp_obd;
int initial_connect = 0;
int set_transno = 0;
__u64 committed_before_reconnect = 0;
struct ptlrpc_request *request;
char *bufs[] = {NULL, obd2cli_tgt(imp->imp_obd), obd->obd_uuid.uuid, (char *)&imp->imp_dlm_handle, (char *)&imp->imp_connect_data};
struct ptlrpc_connect_async_args *aa;
int rc;
spin_lock(&imp->imp_lock);
if (imp->imp_state == LUSTRE_IMP_CLOSED) {
spin_unlock(&imp->imp_lock);
CERROR("can't connect to a closed import\n");
return -EINVAL;
} else if (imp->imp_state == LUSTRE_IMP_FULL) {
spin_unlock(&imp->imp_lock);
CERROR("already connected\n");
return 0;
} else if (imp->imp_state == LUSTRE_IMP_CONNECTING || imp->imp_connected) {
spin_unlock(&imp->imp_lock);
CERROR("already connecting\n");
return -EALREADY;
}
IMPORT_SET_STATE_NOLOCK(imp, LUSTRE_IMP_CONNECTING);
imp->imp_conn_cnt++;
imp->imp_resend_replay = 0;
if (!lustre_handle_is_used(&imp->imp_remote_handle))
initial_connect = 1;
else
committed_before_reconnect = imp->imp_peer_committed_transno;
set_transno = ptlrpc_first_transno(imp, &imp->imp_connect_data.ocd_transno);
spin_unlock(&imp->imp_lock);
rc = import_select_connection(imp);
if (rc) goto out;
rc = sptlrpc_import_sec_adapt(imp, NULL, NULL);
if (rc) goto out;
/* Reset connect flags to the originally requested flags, in case
* the server is updated on-the-fly we will get the new features.
*/
imp->imp_connect_data.ocd_connect_flags = imp->imp_connect_flags_orig;
/* Reset ocd_version each time so the server knows the exact versions */
imp->imp_connect_data.ocd_version = LUSTRE_VERSION_CODE;
imp->imp_msghdr_flags &= ~MSGHDR_AT_SUPPORT;
imp->imp_msghdr_flags &= ~MSGHDR_CKSUM_INCOMPAT18;
rc = obd_reconnect(NULL, imp->imp_obd->obd_self_export, obd, &obd->obd_uuid, &imp->imp_connect_data, NULL);
if (rc) goto out;
request = ptlrpc_request_alloc(imp, &RQF_MDS_CONNECT);
if (!request) {
rc = -ENOMEM;
goto out;
}
rc = ptlrpc_request_bufs_pack(request, LUSTRE_OBD_VERSION, imp->imp_connect_op, bufs, NULL);
if (rc) {
ptlrpc_request_free(request);
goto out;
}
/* Report the rpc service time to the server so that it knows how long
* to wait for clients to join recovery
*/
lustre_msg_set_service_time(request->rq_reqmsg, at_timeout2est(request->rq_timeout));
/* The amount of time we give the server to process the connect req.
* import_select_connection will increase the net latency on
* repeated reconnect attempts to cover slow networks.
* We override/ignore the server rpc completion estimate here,
* which may be large if this is a reconnect attempt
*/
request->rq_timeout = INITIAL_CONNECT_TIMEOUT;
lustre_msg_set_timeout(request->rq_reqmsg, request->rq_timeout);
request->rq_no_resend = 1;
request->rq_no_delay = 1;
request->rq_send_state = LUSTRE_IMP_CONNECTING;
/* Allow a slightly larger reply for future growth compatibility */
req_capsule_set_size(&request->rq_pill, &RMF_CONNECT_DATA, RCL_SERVER, sizeof(struct obd_connect_data) + 16 * sizeof(__u64));
ptlrpc_request_set_replen(request);
request->rq_interpret_reply = ptlrpc_connect_interpret;
- CLASSERT(sizeof(*aa) <= sizeof(request->rq_async_args));
+ BUILD_BUG_ON(sizeof(*aa) > sizeof(request->rq_async_args));
aa = ptlrpc_req_async_args(request);
memset(aa, 0, sizeof(*aa));
aa->pcaa_peer_committed = committed_before_reconnect;
aa->pcaa_initial_connect = initial_connect;
if (aa->pcaa_initial_connect) {
spin_lock(&imp->imp_lock);
imp->imp_replayable = 1;
spin_unlock(&imp->imp_lock);
lustre_msg_add_op_flags(request->rq_reqmsg, MSG_CONNECT_INITIAL);
}
if (set_transno) lustre_msg_add_op_flags(request->rq_reqmsg, MSG_CONNECT_TRANSNO);
DEBUG_REQ(D_RPCTRACE, request, "(re)connect request (timeout %d)", request->rq_timeout);
ptlrpcd_add_req(request);
rc = 0;
out:
if (rc != 0) IMPORT_SET_STATE(imp, LUSTRE_IMP_DISCON);
return rc;
}<sep>@@
expression e1,e2;
@@
- CLASSERT(e1 <= e2);
+ BUILD_BUG_ON(e1 > e2);
<|end_of_text|> | 8,417 |
--- initial
+++ final
@@ -1,67 +1,67 @@
static int replay_one_lock(struct obd_import *imp, struct ldlm_lock *lock) {
struct ptlrpc_request *req;
struct ldlm_async_args *aa;
struct ldlm_request *body;
int flags;
/* Bug 11974: Do not replay a lock which is actively being canceled */
if (ldlm_is_bl_done(lock)) {
LDLM_DEBUG(lock, "Not replaying canceled lock:");
return 0;
}
/* If this is reply-less callback lock, we cannot replay it, since
* server might have long dropped it, but notification of that event was
* lost by network. (and server granted conflicting lock already)
*/
if (ldlm_is_cancel_on_block(lock)) {
LDLM_DEBUG(lock, "Not replaying reply-less lock:");
ldlm_lock_cancel(lock);
return 0;
}
/*
* If granted mode matches the requested mode, this lock is granted.
*
* If they differ, but we have a granted mode, then we were granted
* one mode and now want another: ergo, converting.
*
* If we haven't been granted anything and are on a resource list,
* then we're blocked/waiting.
*
* If we haven't been granted anything and we're NOT on a resource list,
* then we haven't got a reply yet and don't have a known disposition.
* This happens whenever a lock enqueue is the request that triggers
* recovery.
*/
if (lock->l_granted_mode == lock->l_req_mode)
flags = LDLM_FL_REPLAY | LDLM_FL_BLOCK_GRANTED;
else if (lock->l_granted_mode)
flags = LDLM_FL_REPLAY | LDLM_FL_BLOCK_CONV;
else if (!list_empty(&lock->l_res_link))
flags = LDLM_FL_REPLAY | LDLM_FL_BLOCK_WAIT;
else
flags = LDLM_FL_REPLAY;
req = ptlrpc_request_alloc_pack(imp, &RQF_LDLM_ENQUEUE, LUSTRE_DLM_VERSION, LDLM_ENQUEUE);
if (!req) return -ENOMEM;
/* We're part of recovery, so don't wait for it. */
req->rq_send_state = LUSTRE_IMP_REPLAY_LOCKS;
body = req_capsule_client_get(&req->rq_pill, &RMF_DLM_REQ);
ldlm_lock2desc(lock, &body->lock_desc);
body->lock_flags = ldlm_flags_to_wire(flags);
ldlm_lock2handle(lock, &body->lock_handle[0]);
if (lock->l_lvb_len > 0) req_capsule_extend(&req->rq_pill, &RQF_LDLM_ENQUEUE_LVB);
req_capsule_set_size(&req->rq_pill, &RMF_DLM_LVB, RCL_SERVER, lock->l_lvb_len);
ptlrpc_request_set_replen(req);
/* notify the server we've replayed all requests.
* also, we mark the request to be put on a dedicated
* queue to be processed after all request replayes.
* bug 6063
*/
lustre_msg_set_flags(req->rq_reqmsg, MSG_REQ_REPLAY_DONE);
LDLM_DEBUG(lock, "replaying lock:");
atomic_inc(&req->rq_import->imp_replay_inflight);
- CLASSERT(sizeof(*aa) <= sizeof(req->rq_async_args));
+ BUILD_BUG_ON(sizeof(*aa) > sizeof(req->rq_async_args));
aa = ptlrpc_req_async_args(req);
aa->lock_handle = body->lock_handle[0];
req->rq_interpret_reply = (ptlrpc_interpterer_t)replay_lock_interpret;
ptlrpcd_add_req(req);
return 0;
}<sep>@@
expression e1,e2;
@@
- CLASSERT(e1 <= e2);
+ BUILD_BUG_ON(e1 > e2);
<|end_of_text|> | 8,418 |
--- initial
+++ final
@@ -1,21 +1,21 @@
void ldlm_resource_dump(int level, struct ldlm_resource *res) {
struct ldlm_lock *lock;
unsigned int granted = 0;
- CLASSERT(RES_NAME_SIZE == 4);
+ BUILD_BUG_ON(RES_NAME_SIZE != 4);
if (!((libcfs_debug | D_ERROR) & level)) return;
CDEBUG(level, "--- Resource: " DLDLMRES " (%p) refcount = %d\n", PLDLMRES(res), res, atomic_read(&res->lr_refcount));
if (!list_empty(&res->lr_granted)) {
CDEBUG(level, "Granted locks (in reverse order):\n");
list_for_each_entry_reverse(lock, &res->lr_granted, l_res_link) {
LDLM_DEBUG_LIMIT(level, lock, "###");
if (!(level & D_CANTMASK) && ++granted > ldlm_dump_granted_max) {
CDEBUG(level, "only dump %d granted locks to avoid DDOS.\n", granted);
break;
}
}
}
if (!list_empty(&res->lr_waiting)) {
CDEBUG(level, "Waiting locks:\n");
list_for_each_entry(lock, &res->lr_waiting, l_res_link) LDLM_DEBUG_LIMIT(level, lock, "###");
}
}<sep>@@
expression e1,e2;
@@
- CLASSERT(e1 == e2);
+ BUILD_BUG_ON(e1 != e2);
<|end_of_text|> | 8,419 |
--- initial
+++ final
@@ -1,47 +1,47 @@
int lnet_ipif_query(char *name, int *up, __u32 *ip, __u32 *mask) {
struct ifreq ifr;
int nob;
int rc;
__u32 val;
nob = strnlen(name, IFNAMSIZ);
if (nob == IFNAMSIZ) {
CERROR("Interface name %s too long\n", name);
return -EINVAL;
}
- CLASSERT(sizeof(ifr.ifr_name) >= IFNAMSIZ);
+ BUILD_BUG_ON(IFNAMSIZ > sizeof(ifr.ifr_name));
if (strlen(name) > sizeof(ifr.ifr_name) - 1) return -E2BIG;
strncpy(ifr.ifr_name, name, sizeof(ifr.ifr_name));
rc = lnet_sock_ioctl(SIOCGIFFLAGS, (unsigned long)&ifr);
if (rc) {
CERROR("Can't get flags for interface %s\n", name);
return rc;
}
if (!(ifr.ifr_flags & IFF_UP)) {
CDEBUG(D_NET, "Interface %s down\n", name);
*up = 0;
*ip = *mask = 0;
return 0;
}
*up = 1;
if (strlen(name) > sizeof(ifr.ifr_name) - 1) return -E2BIG;
strncpy(ifr.ifr_name, name, sizeof(ifr.ifr_name));
ifr.ifr_addr.sa_family = AF_INET;
rc = lnet_sock_ioctl(SIOCGIFADDR, (unsigned long)&ifr);
if (rc) {
CERROR("Can't get IP address for interface %s\n", name);
return rc;
}
val = ((struct sockaddr_in *)&ifr.ifr_addr)->sin_addr.s_addr;
*ip = ntohl(val);
if (strlen(name) > sizeof(ifr.ifr_name) - 1) return -E2BIG;
strncpy(ifr.ifr_name, name, sizeof(ifr.ifr_name));
ifr.ifr_addr.sa_family = AF_INET;
rc = lnet_sock_ioctl(SIOCGIFNETMASK, (unsigned long)&ifr);
if (rc) {
CERROR("Can't get netmask for interface %s\n", name);
return rc;
}
val = ((struct sockaddr_in *)&ifr.ifr_netmask)->sin_addr.s_addr;
*mask = ntohl(val);
return 0;
}<sep>@@
expression e1,e2;
@@
- CLASSERT(e1 <= e2);
+ BUILD_BUG_ON(e1 > e2);
<|end_of_text|> | 8,420 |
--- initial
+++ final
@@ -1,99 +1,99 @@
int lov_getstripe(struct lov_object *obj, struct lov_stripe_md *lsm, struct lov_user_md __user *lump) {
/* we use lov_user_md_v3 because it is larger than lov_user_md_v1 */
struct lov_user_md_v3 lum;
struct lov_mds_md *lmmk;
u32 stripe_count;
ssize_t lmm_size;
size_t lmmk_size;
size_t lum_size;
int rc;
mm_segment_t seg;
if (!lsm) return -ENODATA;
/*
* "Switch to kernel segment" to allow copying from kernel space by
* copy_{to,from}_user().
*/
seg = get_fs();
set_fs(KERNEL_DS);
if (lsm->lsm_magic != LOV_MAGIC_V1 && lsm->lsm_magic != LOV_MAGIC_V3) {
CERROR("bad LSM MAGIC: 0x%08X != 0x%08X nor 0x%08X\n", lsm->lsm_magic, LOV_MAGIC_V1, LOV_MAGIC_V3);
rc = -EIO;
goto out;
}
if (!lsm_is_released(lsm))
stripe_count = lsm->lsm_stripe_count;
else
stripe_count = 0;
/* we only need the header part from user space to get lmm_magic and
* lmm_stripe_count, (the header part is common to v1 and v3)
*/
lum_size = sizeof(struct lov_user_md_v1);
if (copy_from_user(&lum, lump, lum_size)) {
rc = -EFAULT;
goto out;
}
if (lum.lmm_magic != LOV_USER_MAGIC_V1 && lum.lmm_magic != LOV_USER_MAGIC_V3 && lum.lmm_magic != LOV_USER_MAGIC_SPECIFIC) {
rc = -EINVAL;
goto out;
}
if (lum.lmm_stripe_count && (lum.lmm_stripe_count < lsm->lsm_stripe_count)) {
/* Return right size of stripe to user */
lum.lmm_stripe_count = stripe_count;
rc = copy_to_user(lump, &lum, lum_size);
rc = -EOVERFLOW;
goto out;
}
lmmk_size = lov_mds_md_size(stripe_count, lsm->lsm_magic);
lmmk = libcfs_kvzalloc(lmmk_size, GFP_NOFS);
if (!lmmk) {
rc = -ENOMEM;
goto out;
}
lmm_size = lov_lsm_pack(lsm, lmmk, lmmk_size);
if (lmm_size < 0) {
rc = lmm_size;
goto out_free;
}
/* FIXME: Bug 1185 - copy fields properly when structs change */
/* struct lov_user_md_v3 and struct lov_mds_md_v3 must be the same */
- CLASSERT(sizeof(lum) == sizeof(struct lov_mds_md_v3));
- CLASSERT(sizeof(lum.lmm_objects[0]) == sizeof(lmmk->lmm_objects[0]));
+ BUILD_BUG_ON(sizeof(lum) != sizeof(struct lov_mds_md_v3));
+ BUILD_BUG_ON(sizeof(lum.lmm_objects[0]) != sizeof(lmmk->lmm_objects[0]));
if (cpu_to_le32(LOV_MAGIC) != LOV_MAGIC && (lmmk->lmm_magic == cpu_to_le32(LOV_MAGIC_V1) || lmmk->lmm_magic == cpu_to_le32(LOV_MAGIC_V3))) {
lustre_swab_lov_mds_md(lmmk);
lustre_swab_lov_user_md_objects((struct lov_user_ost_data *)lmmk->lmm_objects, lmmk->lmm_stripe_count);
}
if (lum.lmm_magic == LOV_USER_MAGIC) {
/* User request for v1, we need skip lmm_pool_name */
if (lmmk->lmm_magic == LOV_MAGIC_V3) {
memmove(((struct lov_mds_md_v1 *)lmmk)->lmm_objects, ((struct lov_mds_md_v3 *)lmmk)->lmm_objects, lmmk->lmm_stripe_count * sizeof(struct lov_ost_data_v1));
lmm_size -= LOV_MAXPOOLNAME;
}
} else {
/* if v3 we just have to update the lum_size */
lum_size = sizeof(struct lov_user_md_v3);
}
/* User wasn't expecting this many OST entries */
if (lum.lmm_stripe_count == 0) {
lmm_size = lum_size;
} else if (lum.lmm_stripe_count < lmmk->lmm_stripe_count) {
rc = -EOVERFLOW;
goto out_free;
}
/*
* Have a difference between lov_mds_md & lov_user_md.
* So we have to re-order the data before copy to user.
*/
lum.lmm_stripe_count = lmmk->lmm_stripe_count;
lum.lmm_layout_gen = lmmk->lmm_layout_gen;
((struct lov_user_md *)lmmk)->lmm_layout_gen = lum.lmm_layout_gen;
((struct lov_user_md *)lmmk)->lmm_stripe_count = lum.lmm_stripe_count;
if (copy_to_user(lump, lmmk, lmm_size))
rc = -EFAULT;
else
rc = 0;
out_free:
kvfree(lmmk);
out:
set_fs(seg);
return rc;
}<sep>@@
expression e1,e2;
@@
- CLASSERT(e1 == e2);
+ BUILD_BUG_ON(e1 != e2);
<|end_of_text|> | 8,421 |
--- initial
+++ final
@@ -1,34 +1,34 @@
ssize_t lov_lsm_pack(const struct lov_stripe_md *lsm, void *buf, size_t buf_size) {
struct lov_ost_data_v1 *lmm_objects;
struct lov_mds_md_v1 *lmmv1 = buf;
struct lov_mds_md_v3 *lmmv3 = buf;
size_t lmm_size;
unsigned int i;
lmm_size = lov_mds_md_size(lsm->lsm_stripe_count, lsm->lsm_magic);
if (!buf_size) return lmm_size;
if (buf_size < lmm_size) return -ERANGE;
/*
* lmmv1 and lmmv3 point to the same struct and have the
* same first fields
*/
lmmv1->lmm_magic = cpu_to_le32(lsm->lsm_magic);
lmm_oi_cpu_to_le(&lmmv1->lmm_oi, &lsm->lsm_oi);
lmmv1->lmm_stripe_size = cpu_to_le32(lsm->lsm_stripe_size);
lmmv1->lmm_stripe_count = cpu_to_le16(lsm->lsm_stripe_count);
lmmv1->lmm_pattern = cpu_to_le32(lsm->lsm_pattern);
lmmv1->lmm_layout_gen = cpu_to_le16(lsm->lsm_layout_gen);
if (lsm->lsm_magic == LOV_MAGIC_V3) {
- CLASSERT(sizeof(lsm->lsm_pool_name) == sizeof(lmmv3->lmm_pool_name));
+ BUILD_BUG_ON(sizeof(lsm->lsm_pool_name) != sizeof(lmmv3->lmm_pool_name));
strlcpy(lmmv3->lmm_pool_name, lsm->lsm_pool_name, sizeof(lmmv3->lmm_pool_name));
lmm_objects = lmmv3->lmm_objects;
} else {
lmm_objects = lmmv1->lmm_objects;
}
for (i = 0; i < lsm->lsm_stripe_count; i++) {
struct lov_oinfo *loi = lsm->lsm_oinfo[i];
ostid_cpu_to_le(&loi->loi_oi, &lmm_objects[i].l_ost_oi);
lmm_objects[i].l_ost_gen = cpu_to_le32(loi->loi_ost_gen);
lmm_objects[i].l_ost_idx = cpu_to_le32(loi->loi_ost_idx);
}
return lmm_size;
}<sep>@@
expression e1,e2;
@@
- CLASSERT(e1 == e2);
+ BUILD_BUG_ON(e1 != e2);
<|end_of_text|> | 8,422 |
--- initial
+++ final
@@ -1,28 +1,28 @@
void mdc_create_pack(struct ptlrpc_request *req, struct md_op_data *op_data, const void *data, size_t datalen, umode_t mode, uid_t uid, gid_t gid, cfs_cap_t cap_effective, __u64 rdev) {
struct mdt_rec_create *rec;
char *tmp;
__u64 flags;
- CLASSERT(sizeof(struct mdt_rec_reint) == sizeof(struct mdt_rec_create));
+ BUILD_BUG_ON(sizeof(struct mdt_rec_reint) != sizeof(struct mdt_rec_create));
rec = req_capsule_client_get(&req->rq_pill, &RMF_REC_REINT);
rec->cr_opcode = REINT_CREATE;
rec->cr_fsuid = uid;
rec->cr_fsgid = gid;
rec->cr_cap = cap_effective;
rec->cr_fid1 = op_data->op_fid1;
rec->cr_fid2 = op_data->op_fid2;
rec->cr_mode = mode;
rec->cr_rdev = rdev;
rec->cr_time = op_data->op_mod_time;
rec->cr_suppgid1 = op_data->op_suppgids[0];
rec->cr_suppgid2 = op_data->op_suppgids[1];
flags = 0;
if (op_data->op_bias & MDS_CREATE_VOLATILE) flags |= MDS_OPEN_VOLATILE;
set_mrc_cr_flags(rec, flags);
rec->cr_bias = op_data->op_bias;
rec->cr_umask = current_umask();
mdc_pack_name(req, &RMF_NAME, op_data->op_name, op_data->op_namelen);
if (data) {
tmp = req_capsule_client_get(&req->rq_pill, &RMF_EADATA);
memcpy(tmp, data, datalen);
}
}<sep>@@
expression e1,e2;
@@
- CLASSERT(e1 == e2);
+ BUILD_BUG_ON(e1 != e2);
<|end_of_text|> | 8,423 |
--- initial
+++ final
@@ -1,16 +1,16 @@
void mdc_link_pack(struct ptlrpc_request *req, struct md_op_data *op_data) {
struct mdt_rec_link *rec;
- CLASSERT(sizeof(struct mdt_rec_reint) == sizeof(struct mdt_rec_link));
+ BUILD_BUG_ON(sizeof(struct mdt_rec_reint) != sizeof(struct mdt_rec_link));
rec = req_capsule_client_get(&req->rq_pill, &RMF_REC_REINT);
rec->lk_opcode = REINT_LINK;
rec->lk_fsuid = op_data->op_fsuid; /* current->fsuid; */
rec->lk_fsgid = op_data->op_fsgid; /* current->fsgid; */
rec->lk_cap = op_data->op_cap; /* current->cap_effective; */
rec->lk_suppgid1 = op_data->op_suppgids[0];
rec->lk_suppgid2 = op_data->op_suppgids[1];
rec->lk_fid1 = op_data->op_fid1;
rec->lk_fid2 = op_data->op_fid2;
rec->lk_time = op_data->op_mod_time;
rec->lk_bias = op_data->op_bias;
mdc_pack_name(req, &RMF_NAME, op_data->op_name, op_data->op_namelen);
}<sep>@@
expression e1,e2;
@@
- CLASSERT(e1 == e2);
+ BUILD_BUG_ON(e1 != e2);
<|end_of_text|> | 8,424 |
--- initial
+++ final
@@ -1,33 +1,33 @@
void mdc_open_pack(struct ptlrpc_request *req, struct md_op_data *op_data, umode_t mode, __u64 rdev, __u64 flags, const void *lmm, size_t lmmlen) {
struct mdt_rec_create *rec;
char *tmp;
__u64 cr_flags;
- CLASSERT(sizeof(struct mdt_rec_reint) == sizeof(struct mdt_rec_create));
+ BUILD_BUG_ON(sizeof(struct mdt_rec_reint) != sizeof(struct mdt_rec_create));
rec = req_capsule_client_get(&req->rq_pill, &RMF_REC_REINT);
/* XXX do something about time, uid, gid */
rec->cr_opcode = REINT_OPEN;
rec->cr_fsuid = from_kuid(&init_user_ns, current_fsuid());
rec->cr_fsgid = from_kgid(&init_user_ns, current_fsgid());
rec->cr_cap = cfs_curproc_cap_pack();
rec->cr_fid1 = op_data->op_fid1;
rec->cr_fid2 = op_data->op_fid2;
rec->cr_mode = mode;
cr_flags = mds_pack_open_flags(flags);
rec->cr_rdev = rdev;
rec->cr_time = op_data->op_mod_time;
rec->cr_suppgid1 = op_data->op_suppgids[0];
rec->cr_suppgid2 = op_data->op_suppgids[1];
rec->cr_bias = op_data->op_bias;
rec->cr_umask = current_umask();
rec->cr_old_handle = op_data->op_handle;
if (op_data->op_name) {
mdc_pack_name(req, &RMF_NAME, op_data->op_name, op_data->op_namelen);
if (op_data->op_bias & MDS_CREATE_VOLATILE) cr_flags |= MDS_OPEN_VOLATILE;
}
if (lmm) {
cr_flags |= MDS_OPEN_HAS_EA;
tmp = req_capsule_client_get(&req->rq_pill, &RMF_EADATA);
memcpy(tmp, lmm, lmmlen);
}
set_mrc_cr_flags(rec, cr_flags);
}<sep>@@
expression e1,e2;
@@
- CLASSERT(e1 == e2);
+ BUILD_BUG_ON(e1 != e2);
<|end_of_text|> | 8,425 |
--- initial
+++ final
@@ -1,26 +1,26 @@
void mdc_rename_pack(struct ptlrpc_request *req, struct md_op_data *op_data, const char *old, size_t oldlen, const char *new, size_t newlen) {
struct mdt_rec_rename *rec;
- CLASSERT(sizeof(struct mdt_rec_reint) == sizeof(struct mdt_rec_rename));
+ BUILD_BUG_ON(sizeof(struct mdt_rec_reint) != sizeof(struct mdt_rec_rename));
rec = req_capsule_client_get(&req->rq_pill, &RMF_REC_REINT);
/* XXX do something about time, uid, gid */
rec->rn_opcode = op_data->op_cli_flags & CLI_MIGRATE ? REINT_MIGRATE : REINT_RENAME;
rec->rn_opcode = REINT_RENAME;
rec->rn_fsuid = op_data->op_fsuid;
rec->rn_fsgid = op_data->op_fsgid;
rec->rn_cap = op_data->op_cap;
rec->rn_suppgid1 = op_data->op_suppgids[0];
rec->rn_suppgid2 = op_data->op_suppgids[1];
rec->rn_fid1 = op_data->op_fid1;
rec->rn_fid2 = op_data->op_fid2;
rec->rn_time = op_data->op_mod_time;
rec->rn_mode = op_data->op_mode;
rec->rn_bias = op_data->op_bias;
mdc_pack_name(req, &RMF_NAME, old, oldlen);
if (new) mdc_pack_name(req, &RMF_SYMTGT, new, newlen);
if (op_data->op_cli_flags & CLI_MIGRATE && op_data->op_bias & MDS_RENAME_MIGRATE) {
struct mdt_ioepoch *epoch;
mdc_intent_close_pack(req, op_data);
epoch = req_capsule_client_get(&req->rq_pill, &RMF_MDT_EPOCH);
mdc_ioepoch_pack(epoch, op_data);
}
}<sep>@@
expression e1,e2;
@@
- CLASSERT(e1 == e2);
+ BUILD_BUG_ON(e1 != e2);
<|end_of_text|> | 8,426 |
--- initial
+++ final
@@ -1,17 +1,17 @@
void mdc_setattr_pack(struct ptlrpc_request *req, struct md_op_data *op_data, void *ea, size_t ealen) {
struct mdt_rec_setattr *rec;
struct lov_user_md *lum = NULL;
- CLASSERT(sizeof(struct mdt_rec_reint) == sizeof(struct mdt_rec_setattr));
+ BUILD_BUG_ON(sizeof(struct mdt_rec_reint) != sizeof(struct mdt_rec_setattr));
rec = req_capsule_client_get(&req->rq_pill, &RMF_REC_REINT);
mdc_setattr_pack_rec(rec, op_data);
if (ealen == 0) return;
lum = req_capsule_client_get(&req->rq_pill, &RMF_EADATA);
if (!ea) { /* Remove LOV EA */
lum->lmm_magic = cpu_to_le32(LOV_USER_MAGIC_V1);
lum->lmm_stripe_size = 0;
lum->lmm_stripe_count = 0;
lum->lmm_stripe_offset = (typeof(lum->lmm_stripe_offset))(-1);
} else {
memcpy(lum, ea, ealen);
}
}<sep>@@
expression e1,e2;
@@
- CLASSERT(e1 == e2);
+ BUILD_BUG_ON(e1 != e2);
<|end_of_text|> | 8,427 |
--- initial
+++ final
@@ -1,17 +1,17 @@
void mdc_unlink_pack(struct ptlrpc_request *req, struct md_op_data *op_data) {
struct mdt_rec_unlink *rec;
- CLASSERT(sizeof(struct mdt_rec_reint) == sizeof(struct mdt_rec_unlink));
+ BUILD_BUG_ON(sizeof(struct mdt_rec_reint) != sizeof(struct mdt_rec_unlink));
rec = req_capsule_client_get(&req->rq_pill, &RMF_REC_REINT);
rec->ul_opcode = op_data->op_cli_flags & CLI_RM_ENTRY ? REINT_RMENTRY : REINT_UNLINK;
rec->ul_fsuid = op_data->op_fsuid;
rec->ul_fsgid = op_data->op_fsgid;
rec->ul_cap = op_data->op_cap;
rec->ul_mode = op_data->op_mode;
rec->ul_suppgid1 = op_data->op_suppgids[0];
rec->ul_suppgid2 = -1;
rec->ul_fid1 = op_data->op_fid1;
rec->ul_fid2 = op_data->op_fid2;
rec->ul_time = op_data->op_mod_time;
rec->ul_bias = op_data->op_bias;
mdc_pack_name(req, &RMF_NAME, op_data->op_name, op_data->op_namelen);
}<sep>@@
expression e1,e2;
@@
- CLASSERT(e1 == e2);
+ BUILD_BUG_ON(e1 != e2);
<|end_of_text|> | 8,428 |
--- initial
+++ final
@@ -1,38 +1,38 @@
int mdc_intent_getattr_async(struct obd_export *exp, struct md_enqueue_info *minfo, struct ldlm_enqueue_info *einfo) {
struct md_op_data *op_data = &minfo->mi_data;
struct lookup_intent *it = &minfo->mi_it;
struct ptlrpc_request *req;
struct mdc_getattr_args *ga;
struct obd_device *obddev = class_exp2obd(exp);
struct ldlm_res_id res_id;
/*XXX: Both MDS_INODELOCK_LOOKUP and MDS_INODELOCK_UPDATE are needed
* for statahead currently. Consider CMD in future, such two bits
* maybe managed by different MDS, should be adjusted then.
*/
union ldlm_policy_data policy = {.l_inodebits = {MDS_INODELOCK_LOOKUP | MDS_INODELOCK_UPDATE}};
int rc = 0;
__u64 flags = LDLM_FL_HAS_INTENT;
CDEBUG(D_DLMTRACE, "name: %.*s in inode " DFID ", intent: %s flags %#Lo\n", (int)op_data->op_namelen, op_data->op_name, PFID(&op_data->op_fid1), ldlm_it2str(it->it_op), it->it_flags);
fid_build_reg_res_name(&op_data->op_fid1, &res_id);
req = mdc_intent_getattr_pack(exp, it, op_data);
if (IS_ERR(req)) return PTR_ERR(req);
rc = obd_get_request_slot(&obddev->u.cli);
if (rc != 0) {
ptlrpc_req_finished(req);
return rc;
}
rc = ldlm_cli_enqueue(exp, &req, einfo, &res_id, &policy, &flags, NULL, 0, LVB_T_NONE, &minfo->mi_lockh, 1);
if (rc < 0) {
obd_put_request_slot(&obddev->u.cli);
ptlrpc_req_finished(req);
return rc;
}
- CLASSERT(sizeof(*ga) <= sizeof(req->rq_async_args));
+ BUILD_BUG_ON(sizeof(*ga) > sizeof(req->rq_async_args));
ga = ptlrpc_req_async_args(req);
ga->ga_exp = exp;
ga->ga_minfo = minfo;
ga->ga_einfo = einfo;
req->rq_interpret_reply = mdc_intent_getattr_async_interpret;
ptlrpcd_add_req(req);
return 0;
}<sep>@@
expression e1,e2;
@@
- CLASSERT(e1 <= e2);
+ BUILD_BUG_ON(e1 > e2);
<|end_of_text|> | 8,429 |
--- initial
+++ final
@@ -1,73 +1,73 @@
static int mdc_xattr_common(struct obd_export *exp, const struct req_format *fmt, const struct lu_fid *fid, int opcode, u64 valid, const char *xattr_name, const char *input, int input_size, int output_size, int flags, __u32 suppgid, struct ptlrpc_request **request) {
struct ptlrpc_request *req;
int xattr_namelen = 0;
char *tmp;
int rc;
*request = NULL;
req = ptlrpc_request_alloc(class_exp2cliimp(exp), fmt);
if (!req) return -ENOMEM;
if (xattr_name) {
xattr_namelen = strlen(xattr_name) + 1;
req_capsule_set_size(&req->rq_pill, &RMF_NAME, RCL_CLIENT, xattr_namelen);
}
if (input_size) {
LASSERT(input);
req_capsule_set_size(&req->rq_pill, &RMF_EADATA, RCL_CLIENT, input_size);
}
/* Flush local XATTR locks to get rid of a possible cancel RPC */
if (opcode == MDS_REINT && fid_is_sane(fid) && exp->exp_connect_data.ocd_ibits_known & MDS_INODELOCK_XATTR) {
LIST_HEAD(cancels);
int count;
/* Without that packing would fail */
if (input_size == 0) req_capsule_set_size(&req->rq_pill, &RMF_EADATA, RCL_CLIENT, 0);
count = mdc_resource_get_unused(exp, fid, &cancels, LCK_EX, MDS_INODELOCK_XATTR);
rc = mdc_prep_elc_req(exp, req, MDS_REINT, &cancels, count);
if (rc) {
ptlrpc_request_free(req);
return rc;
}
} else {
rc = ptlrpc_request_pack(req, LUSTRE_MDS_VERSION, opcode);
if (rc) {
ptlrpc_request_free(req);
return rc;
}
}
if (opcode == MDS_REINT) {
struct mdt_rec_setxattr *rec;
- CLASSERT(sizeof(struct mdt_rec_setxattr) == sizeof(struct mdt_rec_reint));
+ BUILD_BUG_ON(sizeof(struct mdt_rec_setxattr) != sizeof(struct mdt_rec_reint));
rec = req_capsule_client_get(&req->rq_pill, &RMF_REC_REINT);
rec->sx_opcode = REINT_SETXATTR;
rec->sx_fsuid = from_kuid(&init_user_ns, current_fsuid());
rec->sx_fsgid = from_kgid(&init_user_ns, current_fsgid());
rec->sx_cap = cfs_curproc_cap_pack();
rec->sx_suppgid1 = suppgid;
rec->sx_suppgid2 = -1;
rec->sx_fid = *fid;
rec->sx_valid = valid | OBD_MD_FLCTIME;
rec->sx_time = ktime_get_real_seconds();
rec->sx_size = output_size;
rec->sx_flags = flags;
} else {
mdc_pack_body(req, fid, valid, output_size, suppgid, flags);
}
if (xattr_name) {
tmp = req_capsule_client_get(&req->rq_pill, &RMF_NAME);
memcpy(tmp, xattr_name, xattr_namelen);
}
if (input_size) {
tmp = req_capsule_client_get(&req->rq_pill, &RMF_EADATA);
memcpy(tmp, input, input_size);
}
if (req_capsule_has_field(&req->rq_pill, &RMF_EADATA, RCL_SERVER)) req_capsule_set_size(&req->rq_pill, &RMF_EADATA, RCL_SERVER, output_size);
ptlrpc_request_set_replen(req);
/* make rpc */
if (opcode == MDS_REINT) mdc_get_mod_rpc_slot(req, NULL);
rc = ptlrpc_queue_wait(req);
if (opcode == MDS_REINT) mdc_put_mod_rpc_slot(req, NULL);
if (rc)
ptlrpc_req_finished(req);
else
*request = req;
return rc;
}<sep>@@
expression e1,e2;
@@
- CLASSERT(e1 == e2);
+ BUILD_BUG_ON(e1 != e2);
<|end_of_text|> | 8,430 |
--- initial
+++ final
@@ -1,12 +1,12 @@
int lnet_fault_init(void) {
- CLASSERT(LNET_PUT_BIT == 1 << LNET_MSG_PUT);
- CLASSERT(LNET_ACK_BIT == 1 << LNET_MSG_ACK);
- CLASSERT(LNET_GET_BIT == 1 << LNET_MSG_GET);
- CLASSERT(LNET_REPLY_BIT == 1 << LNET_MSG_REPLY);
+ BUILD_BUG_ON(LNET_PUT_BIT != 1 << LNET_MSG_PUT);
+ BUILD_BUG_ON(LNET_ACK_BIT != 1 << LNET_MSG_ACK);
+ BUILD_BUG_ON(LNET_GET_BIT != 1 << LNET_MSG_GET);
+ BUILD_BUG_ON(LNET_REPLY_BIT != 1 << LNET_MSG_REPLY);
mutex_init(&delay_dd.dd_mutex);
spin_lock_init(&delay_dd.dd_lock);
init_waitqueue_head(&delay_dd.dd_waitq);
init_waitqueue_head(&delay_dd.dd_ctl_waitq);
INIT_LIST_HEAD(&delay_dd.dd_sched_rules);
return 0;
}<sep>@@
expression e1,e2;
@@
- CLASSERT(e1 == e2);
+ BUILD_BUG_ON(e1 != e2);
<|end_of_text|> | 8,431 |
--- initial
+++ final
@@ -1,34 +1,34 @@
static void kiblnd_map_tx_pool(struct kib_tx_pool *tpo) {
struct kib_pages *txpgs = tpo->tpo_tx_pages;
struct kib_pool *pool = &tpo->tpo_pool;
struct kib_net *net = pool->po_owner->ps_net;
struct kib_dev *dev;
struct page *page;
struct kib_tx *tx;
int page_offset;
int ipage;
int i;
LASSERT(net);
dev = net->ibn_dev;
/* pre-mapped messages are not bigger than 1 page */
- CLASSERT(IBLND_MSG_SIZE <= PAGE_SIZE);
+ BUILD_BUG_ON(IBLND_MSG_SIZE > PAGE_SIZE);
/* No fancy arithmetic when we do the buffer calculations */
- CLASSERT(!(PAGE_SIZE % IBLND_MSG_SIZE));
+ BUILD_BUG_ON(PAGE_SIZE % IBLND_MSG_SIZE);
tpo->tpo_hdev = kiblnd_current_hdev(dev);
for (ipage = page_offset = i = 0; i < pool->po_size; i++) {
page = txpgs->ibp_pages[ipage];
tx = &tpo->tpo_tx_descs[i];
tx->tx_msg = (struct kib_msg *)(((char *)page_address(page)) + page_offset);
tx->tx_msgaddr = kiblnd_dma_map_single(tpo->tpo_hdev->ibh_ibdev, tx->tx_msg, IBLND_MSG_SIZE, DMA_TO_DEVICE);
LASSERT(!kiblnd_dma_mapping_error(tpo->tpo_hdev->ibh_ibdev, tx->tx_msgaddr));
KIBLND_UNMAP_ADDR_SET(tx, tx_msgunmap, tx->tx_msgaddr);
list_add(&tx->tx_list, &pool->po_free_list);
page_offset += IBLND_MSG_SIZE;
LASSERT(page_offset <= PAGE_SIZE);
if (page_offset == PAGE_SIZE) {
page_offset = 0;
ipage++;
LASSERT(ipage <= txpgs->ibp_npages);
}
}
}<sep>@@
identifier e1,e2;
@@
- CLASSERT(e1 <= e2);
+ BUILD_BUG_ON(e1 > e2);
@@
expression e;
@@
- CLASSERT(!(e));
+ BUILD_BUG_ON(e);
<|end_of_text|> | 8,432 |
--- initial
+++ final
@@ -1,62 +1,62 @@
static int kiblnd_startup(lnet_ni_t *ni) {
char *ifname;
struct kib_dev *ibdev = NULL;
struct kib_net *net;
struct timespec64 tv;
unsigned long flags;
int rc;
int newdev;
LASSERT(ni->ni_lnd == &the_o2iblnd);
if (kiblnd_data.kib_init == IBLND_INIT_NOTHING) {
rc = kiblnd_base_startup();
if (rc) return rc;
}
LIBCFS_ALLOC(net, sizeof(*net));
ni->ni_data = net;
if (!net) goto net_failed;
ktime_get_real_ts64(&tv);
net->ibn_incarnation = tv.tv_sec * USEC_PER_SEC + tv.tv_nsec / NSEC_PER_USEC;
rc = kiblnd_tunables_setup(ni);
if (rc) goto net_failed;
if (ni->ni_interfaces[0]) {
/* Use the IPoIB interface specified in 'networks=' */
- CLASSERT(LNET_MAX_INTERFACES > 1);
+ BUILD_BUG_ON(LNET_MAX_INTERFACES <= 1);
if (ni->ni_interfaces[1]) {
CERROR("Multiple interfaces not supported\n");
goto failed;
}
ifname = ni->ni_interfaces[0];
} else {
ifname = *kiblnd_tunables.kib_default_ipif;
}
if (strlen(ifname) >= sizeof(ibdev->ibd_ifname)) {
CERROR("IPoIB interface name too long: %s\n", ifname);
goto failed;
}
ibdev = kiblnd_dev_search(ifname);
newdev = !ibdev;
/* hmm...create kib_dev even for alias */
if (!ibdev || strcmp(&ibdev->ibd_ifname[0], ifname)) ibdev = kiblnd_create_dev(ifname);
if (!ibdev) goto failed;
net->ibn_dev = ibdev;
ni->ni_nid = LNET_MKNID(LNET_NIDNET(ni->ni_nid), ibdev->ibd_ifip);
rc = kiblnd_dev_start_threads(ibdev, newdev, ni->ni_cpts, ni->ni_ncpts);
if (rc) goto failed;
rc = kiblnd_net_init_pools(net, ni, ni->ni_cpts, ni->ni_ncpts);
if (rc) {
CERROR("Failed to initialize NI pools: %d\n", rc);
goto failed;
}
write_lock_irqsave(&kiblnd_data.kib_global_lock, flags);
ibdev->ibd_nnets++;
list_add_tail(&net->ibn_list, &ibdev->ibd_nets);
write_unlock_irqrestore(&kiblnd_data.kib_global_lock, flags);
net->ibn_init = IBLND_INIT_ALL;
return 0;
failed:
if (!net->ibn_dev && ibdev) kiblnd_destroy_dev(ibdev);
net_failed:
kiblnd_shutdown(ni);
CDEBUG(D_NET, "kiblnd_startup failed\n");
return -ENETDOWN;
}<sep>@@
identifier i;
expression e1;
@@
- CLASSERT(e1 < i);
+ BUILD_BUG_ON(i <= e1);
<|end_of_text|> | 8,433 |
--- initial
+++ final
@@ -1,88 +1,88 @@
int kiblnd_unpack_msg(struct kib_msg *msg, int nob) {
const int hdr_size = offsetof(struct kib_msg, ibm_u);
__u32 msg_cksum;
__u16 version;
int msg_nob;
int flip;
/* 6 bytes are enough to have received magic + version */
if (nob < 6) {
CERROR("Short message: %d\n", nob);
return -EPROTO;
}
if (msg->ibm_magic == IBLND_MSG_MAGIC) {
flip = 0;
} else if (msg->ibm_magic == __swab32(IBLND_MSG_MAGIC)) {
flip = 1;
} else {
CERROR("Bad magic: %08x\n", msg->ibm_magic);
return -EPROTO;
}
version = flip ? __swab16(msg->ibm_version) : msg->ibm_version;
if (version != IBLND_MSG_VERSION && version != IBLND_MSG_VERSION_1) {
CERROR("Bad version: %x\n", version);
return -EPROTO;
}
if (nob < hdr_size) {
CERROR("Short message: %d\n", nob);
return -EPROTO;
}
msg_nob = flip ? __swab32(msg->ibm_nob) : msg->ibm_nob;
if (msg_nob > nob) {
CERROR("Short message: got %d, wanted %d\n", nob, msg_nob);
return -EPROTO;
}
/*
* checksum must be computed with ibm_cksum zero and BEFORE anything
* gets flipped
*/
msg_cksum = flip ? __swab32(msg->ibm_cksum) : msg->ibm_cksum;
msg->ibm_cksum = 0;
if (msg_cksum && msg_cksum != kiblnd_cksum(msg, msg_nob)) {
CERROR("Bad checksum\n");
return -EPROTO;
}
msg->ibm_cksum = msg_cksum;
if (flip) {
/* leave magic unflipped as a clue to peer endianness */
msg->ibm_version = version;
- CLASSERT(sizeof(msg->ibm_type) == 1);
- CLASSERT(sizeof(msg->ibm_credits) == 1);
+ BUILD_BUG_ON(sizeof(msg->ibm_type) != 1);
+ BUILD_BUG_ON(sizeof(msg->ibm_credits) != 1);
msg->ibm_nob = msg_nob;
__swab64s(&msg->ibm_srcnid);
__swab64s(&msg->ibm_srcstamp);
__swab64s(&msg->ibm_dstnid);
__swab64s(&msg->ibm_dststamp);
}
if (msg->ibm_srcnid == LNET_NID_ANY) {
CERROR("Bad src nid: %s\n", libcfs_nid2str(msg->ibm_srcnid));
return -EPROTO;
}
if (msg_nob < kiblnd_msgtype2size(msg->ibm_type)) {
CERROR("Short %s: %d(%d)\n", kiblnd_msgtype2str(msg->ibm_type), msg_nob, kiblnd_msgtype2size(msg->ibm_type));
return -EPROTO;
}
switch (msg->ibm_type) {
default: CERROR("Unknown message type %x\n", msg->ibm_type); return -EPROTO;
case IBLND_MSG_NOOP:
case IBLND_MSG_IMMEDIATE:
case IBLND_MSG_PUT_REQ: break;
case IBLND_MSG_PUT_ACK:
case IBLND_MSG_GET_REQ:
if (kiblnd_unpack_rd(msg, flip)) return -EPROTO;
break;
case IBLND_MSG_PUT_NAK:
case IBLND_MSG_PUT_DONE:
case IBLND_MSG_GET_DONE:
if (flip) __swab32s(&msg->ibm_u.completion.ibcm_status);
break;
case IBLND_MSG_CONNREQ:
case IBLND_MSG_CONNACK:
if (flip) {
__swab16s(&msg->ibm_u.connparams.ibcp_queue_depth);
__swab16s(&msg->ibm_u.connparams.ibcp_max_frags);
__swab32s(&msg->ibm_u.connparams.ibcp_max_msg_size);
}
break;
}
return 0;
}<sep>@@
expression e1,e2;
@@
- CLASSERT(e1 == e2);
+ BUILD_BUG_ON(e1 != e2);
<|end_of_text|> | 8,434 |
--- initial
+++ final
@@ -1,8 +1,8 @@
static int __init ko2iblnd_init(void) {
- CLASSERT(sizeof(struct kib_msg) <= IBLND_MSG_SIZE);
- CLASSERT(offsetof(struct kib_msg, ibm_u.get.ibgm_rd.rd_frags[IBLND_MAX_RDMA_FRAGS]) <= IBLND_MSG_SIZE);
- CLASSERT(offsetof(struct kib_msg, ibm_u.putack.ibpam_rd.rd_frags[IBLND_MAX_RDMA_FRAGS]) <= IBLND_MSG_SIZE);
+ BUILD_BUG_ON(IBLND_MSG_SIZE < sizeof(struct kib_msg));
+ BUILD_BUG_ON(IBLND_MSG_SIZE < offsetof(struct kib_msg, ibm_u.get.ibgm_rd.rd_frags[IBLND_MAX_RDMA_FRAGS]));
+ BUILD_BUG_ON(IBLND_MSG_SIZE < offsetof(struct kib_msg, ibm_u.putack.ibpam_rd.rd_frags[IBLND_MAX_RDMA_FRAGS]));
kiblnd_tunables_init();
lnet_register_lnd(&the_o2iblnd);
return 0;
}<sep>@@
identifier i;
expression e1;
@@
- CLASSERT(e1 <= i);
+ BUILD_BUG_ON(i < e1);
<|end_of_text|> | 8,435 |
--- initial
+++ final
@@ -1,38 +1,38 @@
static int osc_io_data_version_start(const struct lu_env *env, const struct cl_io_slice *slice) {
struct cl_data_version_io *dv = &slice->cis_io->u.ci_data_version;
struct osc_io *oio = cl2osc_io(env, slice);
struct osc_async_cbargs *cbargs = &oio->oi_cbarg;
struct osc_object *obj = cl2osc(slice->cis_obj);
struct obd_export *exp = osc_export(obj);
struct lov_oinfo *loi = obj->oo_oinfo;
struct osc_data_version_args *dva;
struct obdo *oa = &oio->oi_oa;
struct ptlrpc_request *req;
struct ost_body *body;
int rc;
memset(oa, 0, sizeof(*oa));
oa->o_oi = loi->loi_oi;
oa->o_valid = OBD_MD_FLID | OBD_MD_FLGROUP;
if (dv->dv_flags & (LL_DV_RD_FLUSH | LL_DV_WR_FLUSH)) {
oa->o_valid |= OBD_MD_FLFLAGS;
oa->o_flags |= OBD_FL_SRVLOCK;
if (dv->dv_flags & LL_DV_WR_FLUSH) oa->o_flags |= OBD_FL_FLUSH;
}
init_completion(&cbargs->opc_sync);
req = ptlrpc_request_alloc(class_exp2cliimp(exp), &RQF_OST_GETATTR);
if (!req) return -ENOMEM;
rc = ptlrpc_request_pack(req, LUSTRE_OST_VERSION, OST_GETATTR);
if (rc < 0) {
ptlrpc_request_free(req);
return rc;
}
body = req_capsule_client_get(&req->rq_pill, &RMF_OST_BODY);
lustre_set_wire_obdo(&req->rq_import->imp_connect_data, &body->oa, oa);
ptlrpc_request_set_replen(req);
req->rq_interpret_reply = osc_data_version_interpret;
- CLASSERT(sizeof(*dva) <= sizeof(req->rq_async_args));
+ BUILD_BUG_ON(sizeof(*dva) > sizeof(req->rq_async_args));
dva = ptlrpc_req_async_args(req);
dva->dva_oio = oio;
ptlrpcd_add_req(req);
return 0;
}<sep>@@
expression e1,e2;
@@
- CLASSERT(e1 <= e2);
+ BUILD_BUG_ON(e1 > e2);
<|end_of_text|> | 8,436 |
--- initial
+++ final
@@ -1,132 +1,132 @@
int osc_build_rpc(const struct lu_env *env, struct client_obd *cli, struct list_head *ext_list, int cmd) {
struct ptlrpc_request *req = NULL;
struct osc_extent *ext;
struct brw_page **pga = NULL;
struct osc_brw_async_args *aa = NULL;
struct obdo *oa = NULL;
struct osc_async_page *oap;
struct osc_object *obj = NULL;
struct cl_req_attr *crattr = NULL;
u64 starting_offset = OBD_OBJECT_EOF;
u64 ending_offset = 0;
int mpflag = 0;
int mem_tight = 0;
int page_count = 0;
bool soft_sync = false;
bool interrupted = false;
int i;
int rc;
struct ost_body *body;
LIST_HEAD(rpc_list);
LASSERT(!list_empty(ext_list));
/* add pages into rpc_list to build BRW rpc */
list_for_each_entry(ext, ext_list, oe_link) {
LASSERT(ext->oe_state == OES_RPC);
mem_tight |= ext->oe_memalloc;
page_count += ext->oe_nr_pages;
if (!obj) obj = ext->oe_obj;
}
soft_sync = osc_over_unstable_soft_limit(cli);
if (mem_tight) mpflag = cfs_memory_pressure_get_and_set();
pga = kcalloc(page_count, sizeof(*pga), GFP_NOFS);
if (!pga) {
rc = -ENOMEM;
goto out;
}
oa = kmem_cache_zalloc(obdo_cachep, GFP_NOFS);
if (!oa) {
rc = -ENOMEM;
goto out;
}
i = 0;
list_for_each_entry(ext, ext_list, oe_link) {
list_for_each_entry(oap, &ext->oe_pages, oap_pending_item) {
if (mem_tight) oap->oap_brw_flags |= OBD_BRW_MEMALLOC;
if (soft_sync) oap->oap_brw_flags |= OBD_BRW_SOFT_SYNC;
pga[i] = &oap->oap_brw_page;
pga[i]->off = oap->oap_obj_off + oap->oap_page_off;
i++;
list_add_tail(&oap->oap_rpc_item, &rpc_list);
if (starting_offset == OBD_OBJECT_EOF || starting_offset > oap->oap_obj_off)
starting_offset = oap->oap_obj_off;
else
LASSERT(!oap->oap_page_off);
if (ending_offset < oap->oap_obj_off + oap->oap_count)
ending_offset = oap->oap_obj_off + oap->oap_count;
else
LASSERT(oap->oap_page_off + oap->oap_count == PAGE_SIZE);
if (oap->oap_interrupted) interrupted = true;
}
}
/* first page in the list */
oap = list_entry(rpc_list.next, typeof(*oap), oap_rpc_item);
crattr = &osc_env_info(env)->oti_req_attr;
memset(crattr, 0, sizeof(*crattr));
crattr->cra_type = (cmd & OBD_BRW_WRITE) ? CRT_WRITE : CRT_READ;
crattr->cra_flags = ~0ULL;
crattr->cra_page = oap2cl_page(oap);
crattr->cra_oa = oa;
cl_req_attr_set(env, osc2cl(obj), crattr);
sort_brw_pages(pga, page_count);
rc = osc_brw_prep_request(cmd, cli, oa, page_count, pga, &req, 1, 0);
if (rc != 0) {
CERROR("prep_req failed: %d\n", rc);
goto out;
}
req->rq_commit_cb = brw_commit;
req->rq_interpret_reply = brw_interpret;
req->rq_memalloc = mem_tight != 0;
oap->oap_request = ptlrpc_request_addref(req);
if (interrupted && !req->rq_intr) ptlrpc_mark_interrupted(req);
/* Need to update the timestamps after the request is built in case
* we race with setattr (locally or in queue at OST). If OST gets
* later setattr before earlier BRW (as determined by the request xid),
* the OST will not use BRW timestamps. Sadly, there is no obvious
* way to do this in a single call. bug 10150
*/
body = req_capsule_client_get(&req->rq_pill, &RMF_OST_BODY);
crattr->cra_oa = &body->oa;
crattr->cra_flags = OBD_MD_FLMTIME | OBD_MD_FLCTIME | OBD_MD_FLATIME;
cl_req_attr_set(env, osc2cl(obj), crattr);
lustre_msg_set_jobid(req->rq_reqmsg, crattr->cra_jobid);
- CLASSERT(sizeof(*aa) <= sizeof(req->rq_async_args));
+ BUILD_BUG_ON(sizeof(*aa) > sizeof(req->rq_async_args));
aa = ptlrpc_req_async_args(req);
INIT_LIST_HEAD(&aa->aa_oaps);
list_splice_init(&rpc_list, &aa->aa_oaps);
INIT_LIST_HEAD(&aa->aa_exts);
list_splice_init(ext_list, &aa->aa_exts);
spin_lock(&cli->cl_loi_list_lock);
starting_offset >>= PAGE_SHIFT;
if (cmd == OBD_BRW_READ) {
cli->cl_r_in_flight++;
lprocfs_oh_tally_log2(&cli->cl_read_page_hist, page_count);
lprocfs_oh_tally(&cli->cl_read_rpc_hist, cli->cl_r_in_flight);
lprocfs_oh_tally_log2(&cli->cl_read_offset_hist, starting_offset + 1);
} else {
cli->cl_w_in_flight++;
lprocfs_oh_tally_log2(&cli->cl_write_page_hist, page_count);
lprocfs_oh_tally(&cli->cl_write_rpc_hist, cli->cl_w_in_flight);
lprocfs_oh_tally_log2(&cli->cl_write_offset_hist, starting_offset + 1);
}
spin_unlock(&cli->cl_loi_list_lock);
DEBUG_REQ(D_INODE, req, "%d pages, aa %p. now %ur/%dw in flight", page_count, aa, cli->cl_r_in_flight, cli->cl_w_in_flight);
OBD_FAIL_TIMEOUT(OBD_FAIL_OSC_DELAY_IO, cfs_fail_val);
ptlrpcd_add_req(req);
rc = 0;
out:
if (mem_tight != 0) cfs_memory_pressure_restore(mpflag);
if (rc != 0) {
LASSERT(!req);
if (oa) kmem_cache_free(obdo_cachep, oa);
kfree(pga);
/* this should happen rarely and is pretty bad, it makes the
* pending list not follow the dirty order
*/
while (!list_empty(ext_list)) {
ext = list_entry(ext_list->next, struct osc_extent, oe_link);
list_del_init(&ext->oe_link);
osc_extent_finish(env, ext, 0, rc);
}
}
return rc;
}<sep>@@
expression e1,e2;
@@
- CLASSERT(e1 <= e2);
+ BUILD_BUG_ON(e1 > e2);
<|end_of_text|> | 8,438 |
--- initial
+++ final
@@ -1,115 +1,115 @@
int osc_enqueue_base(struct obd_export *exp, struct ldlm_res_id *res_id, __u64 *flags, union ldlm_policy_data *policy, struct ost_lvb *lvb, int kms_valid, osc_enqueue_upcall_f upcall, void *cookie, struct ldlm_enqueue_info *einfo, struct ptlrpc_request_set *rqset, int async, int agl) {
struct obd_device *obd = exp->exp_obd;
struct lustre_handle lockh = {0};
struct ptlrpc_request *req = NULL;
int intent = *flags & LDLM_FL_HAS_INTENT;
__u64 match_flags = *flags;
enum ldlm_mode mode;
int rc;
/* Filesystem lock extents are extended to page boundaries so that
* dealing with the page cache is a little smoother.
*/
policy->l_extent.start -= policy->l_extent.start & ~PAGE_MASK;
policy->l_extent.end |= ~PAGE_MASK;
/*
* kms is not valid when either object is completely fresh (so that no
* locks are cached), or object was evicted. In the latter case cached
* lock cannot be used, because it would prime inode state with
* potentially stale LVB.
*/
if (!kms_valid) goto no_match;
/* Next, search for already existing extent locks that will cover us */
/* If we're trying to read, we also search for an existing PW lock. The
* VFS and page cache already protect us locally, so lots of readers/
* writers can share a single PW lock.
*
* There are problems with conversion deadlocks, so instead of
* converting a read lock to a write lock, we'll just enqueue a new
* one.
*
* At some point we should cancel the read lock instead of making them
* send us a blocking callback, but there are problems with canceling
* locks out from other users right now, too.
*/
mode = einfo->ei_mode;
if (einfo->ei_mode == LCK_PR) mode |= LCK_PW;
if (agl == 0) match_flags |= LDLM_FL_LVB_READY;
if (intent != 0) match_flags |= LDLM_FL_BLOCK_GRANTED;
mode = ldlm_lock_match(obd->obd_namespace, match_flags, res_id, einfo->ei_type, policy, mode, &lockh, 0);
if (mode) {
struct ldlm_lock *matched;
if (*flags & LDLM_FL_TEST_LOCK) return ELDLM_OK;
matched = ldlm_handle2lock(&lockh);
if (agl) {
/* AGL enqueues DLM locks speculatively. Therefore if
* it already exists a DLM lock, it wll just inform the
* caller to cancel the AGL process for this stripe.
*/
ldlm_lock_decref(&lockh, mode);
LDLM_LOCK_PUT(matched);
return -ECANCELED;
} else if (osc_set_lock_data(matched, einfo->ei_cbdata)) {
*flags |= LDLM_FL_LVB_READY;
/* We already have a lock, and it's referenced. */
(*upcall)(cookie, &lockh, ELDLM_LOCK_MATCHED);
ldlm_lock_decref(&lockh, mode);
LDLM_LOCK_PUT(matched);
return ELDLM_OK;
} else {
ldlm_lock_decref(&lockh, mode);
LDLM_LOCK_PUT(matched);
}
}
no_match:
if (*flags & LDLM_FL_TEST_LOCK) return -ENOLCK;
if (intent) {
req = ptlrpc_request_alloc(class_exp2cliimp(exp), &RQF_LDLM_ENQUEUE_LVB);
if (!req) return -ENOMEM;
rc = ldlm_prep_enqueue_req(exp, req, NULL, 0);
if (rc) {
ptlrpc_request_free(req);
return rc;
}
req_capsule_set_size(&req->rq_pill, &RMF_DLM_LVB, RCL_SERVER, sizeof(*lvb));
ptlrpc_request_set_replen(req);
}
/* users of osc_enqueue() can pass this flag for ldlm_lock_match() */
*flags &= ~LDLM_FL_BLOCK_GRANTED;
rc = ldlm_cli_enqueue(exp, &req, einfo, res_id, policy, flags, lvb, sizeof(*lvb), LVB_T_OST, &lockh, async);
if (async) {
if (!rc) {
struct osc_enqueue_args *aa;
- CLASSERT(sizeof(*aa) <= sizeof(req->rq_async_args));
+ BUILD_BUG_ON(sizeof(*aa) > sizeof(req->rq_async_args));
aa = ptlrpc_req_async_args(req);
aa->oa_exp = exp;
aa->oa_mode = einfo->ei_mode;
aa->oa_type = einfo->ei_type;
lustre_handle_copy(&aa->oa_lockh, &lockh);
aa->oa_upcall = upcall;
aa->oa_cookie = cookie;
aa->oa_agl = !!agl;
if (!agl) {
aa->oa_flags = flags;
aa->oa_lvb = lvb;
} else {
/* AGL is essentially to enqueue an DLM lock
* in advance, so we don't care about the
* result of AGL enqueue.
*/
aa->oa_lvb = NULL;
aa->oa_flags = NULL;
}
req->rq_interpret_reply = (ptlrpc_interpterer_t)osc_enqueue_interpret;
if (rqset == PTLRPCD_SET)
ptlrpcd_add_req(req);
else
ptlrpc_set_add_req(rqset, req);
} else if (intent) {
ptlrpc_req_finished(req);
}
return rc;
}
rc = osc_enqueue_fini(req, upcall, cookie, &lockh, einfo->ei_mode, flags, agl, rc);
if (intent) ptlrpc_req_finished(req);
return rc;
}<sep>@@
expression e1,e2;
@@
- CLASSERT(e1 <= e2);
+ BUILD_BUG_ON(e1 > e2);
<|end_of_text|> | 8,439 |
--- initial
+++ final
@@ -1,30 +1,30 @@
int osc_punch_base(struct obd_export *exp, struct obdo *oa, obd_enqueue_update_f upcall, void *cookie, struct ptlrpc_request_set *rqset) {
struct ptlrpc_request *req;
struct osc_setattr_args *sa;
struct ost_body *body;
int rc;
req = ptlrpc_request_alloc(class_exp2cliimp(exp), &RQF_OST_PUNCH);
if (!req) return -ENOMEM;
rc = ptlrpc_request_pack(req, LUSTRE_OST_VERSION, OST_PUNCH);
if (rc) {
ptlrpc_request_free(req);
return rc;
}
req->rq_request_portal = OST_IO_PORTAL; /* bug 7198 */
ptlrpc_at_set_req_timeout(req);
body = req_capsule_client_get(&req->rq_pill, &RMF_OST_BODY);
LASSERT(body);
lustre_set_wire_obdo(&req->rq_import->imp_connect_data, &body->oa, oa);
ptlrpc_request_set_replen(req);
req->rq_interpret_reply = (ptlrpc_interpterer_t)osc_setattr_interpret;
- CLASSERT(sizeof(*sa) <= sizeof(req->rq_async_args));
+ BUILD_BUG_ON(sizeof(*sa) > sizeof(req->rq_async_args));
sa = ptlrpc_req_async_args(req);
sa->sa_oa = oa;
sa->sa_upcall = upcall;
sa->sa_cookie = cookie;
if (rqset == PTLRPCD_SET)
ptlrpcd_add_req(req);
else
ptlrpc_set_add_req(rqset, req);
return 0;
}<sep>@@
expression e1,e2;
@@
- CLASSERT(e1 <= e2);
+ BUILD_BUG_ON(e1 > e2);
<|end_of_text|> | 8,440 |
--- initial
+++ final
@@ -1,86 +1,86 @@
static int osc_set_info_async(const struct lu_env *env, struct obd_export *exp, u32 keylen, void *key, u32 vallen, void *val, struct ptlrpc_request_set *set) {
struct ptlrpc_request *req;
struct obd_device *obd = exp->exp_obd;
struct obd_import *imp = class_exp2cliimp(exp);
char *tmp;
int rc;
OBD_FAIL_TIMEOUT(OBD_FAIL_OSC_SHUTDOWN, 10);
if (KEY_IS(KEY_CHECKSUM)) {
if (vallen != sizeof(int)) return -EINVAL;
exp->exp_obd->u.cli.cl_checksum = (*(int *)val) ? 1 : 0;
return 0;
}
if (KEY_IS(KEY_SPTLRPC_CONF)) {
sptlrpc_conf_client_adapt(obd);
return 0;
}
if (KEY_IS(KEY_FLUSH_CTX)) {
sptlrpc_import_flush_my_ctx(imp);
return 0;
}
if (KEY_IS(KEY_CACHE_SET)) {
struct client_obd *cli = &obd->u.cli;
LASSERT(!cli->cl_cache); /* only once */
cli->cl_cache = val;
cl_cache_incref(cli->cl_cache);
cli->cl_lru_left = &cli->cl_cache->ccc_lru_left;
/* add this osc into entity list */
LASSERT(list_empty(&cli->cl_lru_osc));
spin_lock(&cli->cl_cache->ccc_lru_lock);
list_add(&cli->cl_lru_osc, &cli->cl_cache->ccc_lru);
spin_unlock(&cli->cl_cache->ccc_lru_lock);
return 0;
}
if (KEY_IS(KEY_CACHE_LRU_SHRINK)) {
struct client_obd *cli = &obd->u.cli;
long nr = atomic_long_read(&cli->cl_lru_in_list) >> 1;
long target = *(long *)val;
nr = osc_lru_shrink(env, cli, min(nr, target), true);
*(long *)val -= nr;
return 0;
}
if (!set && !KEY_IS(KEY_GRANT_SHRINK)) return -EINVAL;
/* We pass all other commands directly to OST. Since nobody calls osc
* methods directly and everybody is supposed to go through LOV, we
* assume lov checked invalid values for us.
* The only recognised values so far are evict_by_nid and mds_conn.
* Even if something bad goes through, we'd get a -EINVAL from OST
* anyway.
*/
req = ptlrpc_request_alloc(imp, KEY_IS(KEY_GRANT_SHRINK) ? &RQF_OST_SET_GRANT_INFO : &RQF_OBD_SET_INFO);
if (!req) return -ENOMEM;
req_capsule_set_size(&req->rq_pill, &RMF_SETINFO_KEY, RCL_CLIENT, keylen);
if (!KEY_IS(KEY_GRANT_SHRINK)) req_capsule_set_size(&req->rq_pill, &RMF_SETINFO_VAL, RCL_CLIENT, vallen);
rc = ptlrpc_request_pack(req, LUSTRE_OST_VERSION, OST_SET_INFO);
if (rc) {
ptlrpc_request_free(req);
return rc;
}
tmp = req_capsule_client_get(&req->rq_pill, &RMF_SETINFO_KEY);
memcpy(tmp, key, keylen);
tmp = req_capsule_client_get(&req->rq_pill, KEY_IS(KEY_GRANT_SHRINK) ? &RMF_OST_BODY : &RMF_SETINFO_VAL);
memcpy(tmp, val, vallen);
if (KEY_IS(KEY_GRANT_SHRINK)) {
struct osc_brw_async_args *aa;
struct obdo *oa;
- CLASSERT(sizeof(*aa) <= sizeof(req->rq_async_args));
+ BUILD_BUG_ON(sizeof(*aa) > sizeof(req->rq_async_args));
aa = ptlrpc_req_async_args(req);
oa = kmem_cache_zalloc(obdo_cachep, GFP_NOFS);
if (!oa) {
ptlrpc_req_finished(req);
return -ENOMEM;
}
*oa = ((struct ost_body *)val)->oa;
aa->aa_oa = oa;
req->rq_interpret_reply = osc_shrink_grant_interpret;
}
ptlrpc_request_set_replen(req);
if (!KEY_IS(KEY_GRANT_SHRINK)) {
LASSERT(set);
ptlrpc_set_add_req(set, req);
ptlrpc_check_set(NULL, set);
} else {
ptlrpcd_add_req(req);
}
return 0;
}<sep>@@
expression e1,e2;
@@
- CLASSERT(e1 <= e2);
+ BUILD_BUG_ON(e1 > e2);
<|end_of_text|> | 8,441 |
--- initial
+++ final
@@ -1,31 +1,31 @@
int osc_setattr_async(struct obd_export *exp, struct obdo *oa, obd_enqueue_update_f upcall, void *cookie, struct ptlrpc_request_set *rqset) {
struct ptlrpc_request *req;
struct osc_setattr_args *sa;
int rc;
req = ptlrpc_request_alloc(class_exp2cliimp(exp), &RQF_OST_SETATTR);
if (!req) return -ENOMEM;
rc = ptlrpc_request_pack(req, LUSTRE_OST_VERSION, OST_SETATTR);
if (rc) {
ptlrpc_request_free(req);
return rc;
}
osc_pack_req_body(req, oa);
ptlrpc_request_set_replen(req);
/* do mds to ost setattr asynchronously */
if (!rqset) {
/* Do not wait for response. */
ptlrpcd_add_req(req);
} else {
req->rq_interpret_reply = (ptlrpc_interpterer_t)osc_setattr_interpret;
- CLASSERT(sizeof(*sa) <= sizeof(req->rq_async_args));
+ BUILD_BUG_ON(sizeof(*sa) > sizeof(req->rq_async_args));
sa = ptlrpc_req_async_args(req);
sa->sa_oa = oa;
sa->sa_upcall = upcall;
sa->sa_cookie = cookie;
if (rqset == PTLRPCD_SET)
ptlrpcd_add_req(req);
else
ptlrpc_set_add_req(rqset, req);
}
return 0;
}<sep>@@
expression e1,e2;
@@
- CLASSERT(e1 <= e2);
+ BUILD_BUG_ON(e1 > e2);
<|end_of_text|> | 8,442 |
--- initial
+++ final
@@ -1,34 +1,34 @@
static int osc_statfs_async(struct obd_export *exp, struct obd_info *oinfo, __u64 max_age, struct ptlrpc_request_set *rqset) {
struct obd_device *obd = class_exp2obd(exp);
struct ptlrpc_request *req;
struct osc_async_args *aa;
int rc;
/* We could possibly pass max_age in the request (as an absolute
* timestamp or a "seconds.usec ago") so the target can avoid doing
* extra calls into the filesystem if that isn't necessary (e.g.
* during mount that would help a bit). Having relative timestamps
* is not so great if request processing is slow, while absolute
* timestamps are not ideal because they need time synchronization.
*/
req = ptlrpc_request_alloc(obd->u.cli.cl_import, &RQF_OST_STATFS);
if (!req) return -ENOMEM;
rc = ptlrpc_request_pack(req, LUSTRE_OST_VERSION, OST_STATFS);
if (rc) {
ptlrpc_request_free(req);
return rc;
}
ptlrpc_request_set_replen(req);
req->rq_request_portal = OST_CREATE_PORTAL;
ptlrpc_at_set_req_timeout(req);
if (oinfo->oi_flags & OBD_STATFS_NODELAY) {
/* procfs requests not want stat in wait for avoid deadlock */
req->rq_no_resend = 1;
req->rq_no_delay = 1;
}
req->rq_interpret_reply = (ptlrpc_interpterer_t)osc_statfs_interpret;
- CLASSERT(sizeof(*aa) <= sizeof(req->rq_async_args));
+ BUILD_BUG_ON(sizeof(*aa) > sizeof(req->rq_async_args));
aa = ptlrpc_req_async_args(req);
aa->aa_oi = oinfo;
ptlrpc_set_add_req(rqset, req);
return 0;
}<sep>@@
expression e1,e2;
@@
- CLASSERT(e1 <= e2);
+ BUILD_BUG_ON(e1 > e2);
<|end_of_text|> | 8,443 |
--- initial
+++ final
@@ -1,31 +1,31 @@
int osc_sync_base(struct osc_object *obj, struct obdo *oa, obd_enqueue_update_f upcall, void *cookie, struct ptlrpc_request_set *rqset) {
struct obd_export *exp = osc_export(obj);
struct ptlrpc_request *req;
struct ost_body *body;
struct osc_fsync_args *fa;
int rc;
req = ptlrpc_request_alloc(class_exp2cliimp(exp), &RQF_OST_SYNC);
if (!req) return -ENOMEM;
rc = ptlrpc_request_pack(req, LUSTRE_OST_VERSION, OST_SYNC);
if (rc) {
ptlrpc_request_free(req);
return rc;
}
/* overload the size and blocks fields in the oa with start/end */
body = req_capsule_client_get(&req->rq_pill, &RMF_OST_BODY);
LASSERT(body);
lustre_set_wire_obdo(&req->rq_import->imp_connect_data, &body->oa, oa);
ptlrpc_request_set_replen(req);
req->rq_interpret_reply = osc_sync_interpret;
- CLASSERT(sizeof(*fa) <= sizeof(req->rq_async_args));
+ BUILD_BUG_ON(sizeof(*fa) > sizeof(req->rq_async_args));
fa = ptlrpc_req_async_args(req);
fa->fa_obj = obj;
fa->fa_oa = oa;
fa->fa_upcall = upcall;
fa->fa_cookie = cookie;
if (rqset == PTLRPCD_SET)
ptlrpcd_add_req(req);
else
ptlrpc_set_add_req(rqset, req);
return 0;
}<sep>@@
expression e1,e2;
@@
- CLASSERT(e1 <= e2);
+ BUILD_BUG_ON(e1 > e2);
<|end_of_text|> | 8,444 |
--- initial
+++ final
@@ -1,41 +1,41 @@
void lustre_swab_connect(struct obd_connect_data *ocd) {
__swab64s(&ocd->ocd_connect_flags);
__swab32s(&ocd->ocd_version);
__swab32s(&ocd->ocd_grant);
__swab64s(&ocd->ocd_ibits_known);
__swab32s(&ocd->ocd_index);
__swab32s(&ocd->ocd_brw_size);
/* ocd_blocksize and ocd_inodespace don't need to be swabbed because
* they are 8-byte values
*/
__swab16s(&ocd->ocd_grant_extent);
__swab32s(&ocd->ocd_unused);
__swab64s(&ocd->ocd_transno);
__swab32s(&ocd->ocd_group);
__swab32s(&ocd->ocd_cksum_types);
__swab32s(&ocd->ocd_instance);
/* Fields after ocd_cksum_types are only accessible by the receiver
* if the corresponding flag in ocd_connect_flags is set. Accessing
* any field after ocd_maxbytes on the receiver without a valid flag
* may result in out-of-bound memory access and kernel oops.
*/
if (ocd->ocd_connect_flags & OBD_CONNECT_MAX_EASIZE) __swab32s(&ocd->ocd_max_easize);
if (ocd->ocd_connect_flags & OBD_CONNECT_MAXBYTES) __swab64s(&ocd->ocd_maxbytes);
if (ocd->ocd_connect_flags & OBD_CONNECT_MULTIMODRPCS) __swab16s(&ocd->ocd_maxmodrpcs);
- CLASSERT(offsetof(typeof(*ocd), padding0));
- CLASSERT(offsetof(typeof(*ocd), padding1) != 0);
+ BUILD_BUG_ON(!offsetof(typeof(*ocd), padding0));
+ BUILD_BUG_ON(offsetof(typeof(*ocd), padding1) == 0);
if (ocd->ocd_connect_flags & OBD_CONNECT_FLAGS2) __swab64s(&ocd->ocd_connect_flags2);
- CLASSERT(offsetof(typeof(*ocd), padding3) != 0);
- CLASSERT(offsetof(typeof(*ocd), padding4) != 0);
- CLASSERT(offsetof(typeof(*ocd), padding5) != 0);
- CLASSERT(offsetof(typeof(*ocd), padding6) != 0);
- CLASSERT(offsetof(typeof(*ocd), padding7) != 0);
- CLASSERT(offsetof(typeof(*ocd), padding8) != 0);
- CLASSERT(offsetof(typeof(*ocd), padding9) != 0);
- CLASSERT(offsetof(typeof(*ocd), paddingA) != 0);
- CLASSERT(offsetof(typeof(*ocd), paddingB) != 0);
- CLASSERT(offsetof(typeof(*ocd), paddingC) != 0);
- CLASSERT(offsetof(typeof(*ocd), paddingD) != 0);
- CLASSERT(offsetof(typeof(*ocd), paddingE) != 0);
- CLASSERT(offsetof(typeof(*ocd), paddingF) != 0);
+ BUILD_BUG_ON(offsetof(typeof(*ocd), padding3) == 0);
+ BUILD_BUG_ON(offsetof(typeof(*ocd), padding4) == 0);
+ BUILD_BUG_ON(offsetof(typeof(*ocd), padding5) == 0);
+ BUILD_BUG_ON(offsetof(typeof(*ocd), padding6) == 0);
+ BUILD_BUG_ON(offsetof(typeof(*ocd), padding7) == 0);
+ BUILD_BUG_ON(offsetof(typeof(*ocd), padding8) == 0);
+ BUILD_BUG_ON(offsetof(typeof(*ocd), padding9) == 0);
+ BUILD_BUG_ON(offsetof(typeof(*ocd), paddingA) == 0);
+ BUILD_BUG_ON(offsetof(typeof(*ocd), paddingB) == 0);
+ BUILD_BUG_ON(offsetof(typeof(*ocd), paddingC) == 0);
+ BUILD_BUG_ON(offsetof(typeof(*ocd), paddingD) == 0);
+ BUILD_BUG_ON(offsetof(typeof(*ocd), paddingE) == 0);
+ BUILD_BUG_ON(offsetof(typeof(*ocd), paddingF) == 0);
}<sep>@@
expression e1,e2;
@@
- CLASSERT(e1 != e2);
+ BUILD_BUG_ON(e1 == e2);
@@
expression e;
@@
- CLASSERT(e);
+ BUILD_BUG_ON(!e);
<|end_of_text|> | 8,445 |
--- initial
+++ final
@@ -1,9 +1,9 @@
void lustre_swab_gl_desc(union ldlm_gl_desc *desc) {
lustre_swab_lu_fid(&desc->lquota_desc.gl_id.qid_fid);
__swab64s(&desc->lquota_desc.gl_flags);
__swab64s(&desc->lquota_desc.gl_ver);
__swab64s(&desc->lquota_desc.gl_hardlimit);
__swab64s(&desc->lquota_desc.gl_softlimit);
__swab64s(&desc->lquota_desc.gl_time);
- CLASSERT(offsetof(typeof(desc->lquota_desc), gl_pad2) != 0);
+ BUILD_BUG_ON(offsetof(typeof(desc->lquota_desc), gl_pad2) == 0);
}<sep>@@
expression e1,e2;
@@
- CLASSERT(e1 != e2);
+ BUILD_BUG_ON(e1 == e2);
<|end_of_text|> | 8,446 |
--- initial
+++ final
@@ -1,8 +1,8 @@
void lustre_swab_ldlm_reply(struct ldlm_reply *r) {
__swab32s(&r->lock_flags);
- CLASSERT(offsetof(typeof(*r), lock_padding) != 0);
+ BUILD_BUG_ON(offsetof(typeof(*r), lock_padding) == 0);
lustre_swab_ldlm_lock_desc(&r->lock_desc);
/* lock_handle opaque */
__swab64s(&r->lock_policy_res1);
__swab64s(&r->lock_policy_res2);
}<sep>@@
expression e1,e2;
@@
- CLASSERT(e1 != e2);
+ BUILD_BUG_ON(e1 == e2);
<|end_of_text|> | 8,447 |
--- initial
+++ final
@@ -1,5 +1,5 @@
static void lustre_swab_ldlm_resource_desc(struct ldlm_resource_desc *r) {
__swab32s(&r->lr_type);
- CLASSERT(offsetof(typeof(*r), lr_padding) != 0);
+ BUILD_BUG_ON(offsetof(typeof(*r), lr_padding) == 0);
lustre_swab_ldlm_res_id(&r->lr_name);
}<sep>@@
expression e1,e2;
@@
- CLASSERT(e1 != e2);
+ BUILD_BUG_ON(e1 == e2);
<|end_of_text|> | 8,448 |
--- initial
+++ final
@@ -1,8 +1,8 @@
void lustre_swab_lmv_user_md(struct lmv_user_md *lum) {
__swab32s(&lum->lum_magic);
__swab32s(&lum->lum_stripe_count);
__swab32s(&lum->lum_stripe_offset);
__swab32s(&lum->lum_hash_type);
__swab32s(&lum->lum_type);
- CLASSERT(offsetof(typeof(*lum), lum_padding1));
+ BUILD_BUG_ON(!offsetof(typeof(*lum), lum_padding1));
}<sep>@@
expression e;
@@
- CLASSERT(e);
+ BUILD_BUG_ON(!e);
<|end_of_text|> | 8,449 |
--- initial
+++ final
@@ -1,31 +1,31 @@
void lustre_swab_mdt_body(struct mdt_body *b) {
lustre_swab_lu_fid(&b->mbo_fid1);
lustre_swab_lu_fid(&b->mbo_fid2);
/* handle is opaque */
__swab64s(&b->mbo_valid);
__swab64s(&b->mbo_size);
__swab64s(&b->mbo_mtime);
__swab64s(&b->mbo_atime);
__swab64s(&b->mbo_ctime);
__swab64s(&b->mbo_blocks);
__swab64s(&b->mbo_ioepoch);
__swab64s(&b->mbo_t_state);
__swab32s(&b->mbo_fsuid);
__swab32s(&b->mbo_fsgid);
__swab32s(&b->mbo_capability);
__swab32s(&b->mbo_mode);
__swab32s(&b->mbo_uid);
__swab32s(&b->mbo_gid);
__swab32s(&b->mbo_flags);
__swab32s(&b->mbo_rdev);
__swab32s(&b->mbo_nlink);
- CLASSERT(offsetof(typeof(*b), mbo_unused2) != 0);
+ BUILD_BUG_ON(offsetof(typeof(*b), mbo_unused2) == 0);
__swab32s(&b->mbo_suppgid);
__swab32s(&b->mbo_eadatasize);
__swab32s(&b->mbo_aclsize);
__swab32s(&b->mbo_max_mdsize);
- CLASSERT(offsetof(typeof(*b), mbo_unused3));
+ BUILD_BUG_ON(!offsetof(typeof(*b), mbo_unused3));
__swab32s(&b->mbo_uid_h);
__swab32s(&b->mbo_gid_h);
- CLASSERT(offsetof(typeof(*b), mbo_padding_5) != 0);
+ BUILD_BUG_ON(offsetof(typeof(*b), mbo_padding_5) == 0);
}<sep>@@
expression e1,e2;
@@
- CLASSERT(e1 != e2);
+ BUILD_BUG_ON(e1 == e2);
@@
expression e;
@@
- CLASSERT(e);
+ BUILD_BUG_ON(!e);
<|end_of_text|> | 8,450 |
--- initial
+++ final
@@ -1,7 +1,7 @@
void lustre_swab_mdt_ioepoch(struct mdt_ioepoch *b) {
/* handle is opaque */
/* mio_handle is opaque */
- CLASSERT(offsetof(typeof(*b), mio_unused1));
- CLASSERT(offsetof(typeof(*b), mio_unused2));
- CLASSERT(offsetof(typeof(*b), mio_padding));
+ BUILD_BUG_ON(!offsetof(typeof(*b), mio_unused1));
+ BUILD_BUG_ON(!offsetof(typeof(*b), mio_unused2));
+ BUILD_BUG_ON(!offsetof(typeof(*b), mio_padding));
}<sep>@@
expression e;
@@
- CLASSERT(e);
+ BUILD_BUG_ON(!e);
<|end_of_text|> | 8,451 |
--- initial
+++ final
@@ -1,25 +1,25 @@
void lustre_swab_mdt_rec_reint(struct mdt_rec_reint *rr) {
__swab32s(&rr->rr_opcode);
__swab32s(&rr->rr_cap);
__swab32s(&rr->rr_fsuid);
/* rr_fsuid_h is unused */
__swab32s(&rr->rr_fsgid);
/* rr_fsgid_h is unused */
__swab32s(&rr->rr_suppgid1);
/* rr_suppgid1_h is unused */
__swab32s(&rr->rr_suppgid2);
/* rr_suppgid2_h is unused */
lustre_swab_lu_fid(&rr->rr_fid1);
lustre_swab_lu_fid(&rr->rr_fid2);
__swab64s(&rr->rr_mtime);
__swab64s(&rr->rr_atime);
__swab64s(&rr->rr_ctime);
__swab64s(&rr->rr_size);
__swab64s(&rr->rr_blocks);
__swab32s(&rr->rr_bias);
__swab32s(&rr->rr_mode);
__swab32s(&rr->rr_flags);
__swab32s(&rr->rr_flags_h);
__swab32s(&rr->rr_umask);
- CLASSERT(offsetof(typeof(*rr), rr_padding_4) != 0);
+ BUILD_BUG_ON(offsetof(typeof(*rr), rr_padding_4) == 0);
}<sep>@@
expression e1,e2;
@@
- CLASSERT(e1 != e2);
+ BUILD_BUG_ON(e1 == e2);
<|end_of_text|> | 8,452 |
--- initial
+++ final
@@ -1,17 +1,17 @@
void lustre_swab_mgs_nidtbl_entry(struct mgs_nidtbl_entry *entry) {
__u8 i;
__swab64s(&entry->mne_version);
__swab32s(&entry->mne_instance);
__swab32s(&entry->mne_index);
__swab32s(&entry->mne_length);
/* mne_nid_(count|type) must be one byte size because we're gonna
* access it w/o swapping. */
- CLASSERT(sizeof(entry->mne_nid_count) == sizeof(__u8));
- CLASSERT(sizeof(entry->mne_nid_type) == sizeof(__u8));
+ BUILD_BUG_ON(sizeof(entry->mne_nid_count) != sizeof(__u8));
+ BUILD_BUG_ON(sizeof(entry->mne_nid_type) != sizeof(__u8));
/* remove this assertion if ipv6 is supported. */
LASSERT(entry->mne_nid_type == 0);
for (i = 0; i < entry->mne_nid_count; i++) {
- CLASSERT(sizeof(lnet_nid_t) == sizeof(__u64));
+ BUILD_BUG_ON(sizeof(lnet_nid_t) != sizeof(__u64));
__swab64s(&entry->u.nids[i]);
}
}<sep>@@
expression e1,e2;
@@
- CLASSERT(e1 == e2);
+ BUILD_BUG_ON(e1 != e2);
<|end_of_text|> | 8,453 |
--- initial
+++ final
@@ -1,12 +1,12 @@
void lustre_swab_mgs_target_info(struct mgs_target_info *mti) {
int i;
__swab32s(&mti->mti_lustre_ver);
__swab32s(&mti->mti_stripe_index);
__swab32s(&mti->mti_config_ver);
__swab32s(&mti->mti_flags);
__swab32s(&mti->mti_instance);
__swab32s(&mti->mti_nid_count);
- CLASSERT(sizeof(lnet_nid_t) == sizeof(__u64));
+ BUILD_BUG_ON(sizeof(lnet_nid_t) != sizeof(__u64));
for (i = 0; i < MTI_NIDS_MAX; i++)
__swab64s(&mti->mti_nids[i]);
}<sep>@@
expression e1,e2;
@@
- CLASSERT(e1 == e2);
+ BUILD_BUG_ON(e1 != e2);
<|end_of_text|> | 8,454 |
--- initial
+++ final
@@ -1,12 +1,12 @@
static void lustre_swab_obd_dqblk(struct obd_dqblk *b) {
__swab64s(&b->dqb_ihardlimit);
__swab64s(&b->dqb_isoftlimit);
__swab64s(&b->dqb_curinodes);
__swab64s(&b->dqb_bhardlimit);
__swab64s(&b->dqb_bsoftlimit);
__swab64s(&b->dqb_curspace);
__swab64s(&b->dqb_btime);
__swab64s(&b->dqb_itime);
__swab32s(&b->dqb_valid);
- CLASSERT(offsetof(typeof(*b), dqb_padding) != 0);
+ BUILD_BUG_ON(offsetof(typeof(*b), dqb_padding) == 0);
}<sep>@@
expression e1,e2;
@@
- CLASSERT(e1 != e2);
+ BUILD_BUG_ON(e1 == e2);
<|end_of_text|> | 8,455 |
--- initial
+++ final
@@ -1,22 +1,22 @@
void lustre_swab_obd_statfs(struct obd_statfs *os) {
__swab64s(&os->os_type);
__swab64s(&os->os_blocks);
__swab64s(&os->os_bfree);
__swab64s(&os->os_bavail);
__swab64s(&os->os_files);
__swab64s(&os->os_ffree);
/* no need to swab os_fsid */
__swab32s(&os->os_bsize);
__swab32s(&os->os_namelen);
__swab64s(&os->os_maxbytes);
__swab32s(&os->os_state);
- CLASSERT(offsetof(typeof(*os), os_fprecreated) != 0);
- CLASSERT(offsetof(typeof(*os), os_spare2) != 0);
- CLASSERT(offsetof(typeof(*os), os_spare3) != 0);
- CLASSERT(offsetof(typeof(*os), os_spare4) != 0);
- CLASSERT(offsetof(typeof(*os), os_spare5) != 0);
- CLASSERT(offsetof(typeof(*os), os_spare6) != 0);
- CLASSERT(offsetof(typeof(*os), os_spare7) != 0);
- CLASSERT(offsetof(typeof(*os), os_spare8) != 0);
- CLASSERT(offsetof(typeof(*os), os_spare9) != 0);
+ BUILD_BUG_ON(offsetof(typeof(*os), os_fprecreated) == 0);
+ BUILD_BUG_ON(offsetof(typeof(*os), os_spare2) == 0);
+ BUILD_BUG_ON(offsetof(typeof(*os), os_spare3) == 0);
+ BUILD_BUG_ON(offsetof(typeof(*os), os_spare4) == 0);
+ BUILD_BUG_ON(offsetof(typeof(*os), os_spare5) == 0);
+ BUILD_BUG_ON(offsetof(typeof(*os), os_spare6) == 0);
+ BUILD_BUG_ON(offsetof(typeof(*os), os_spare7) == 0);
+ BUILD_BUG_ON(offsetof(typeof(*os), os_spare8) == 0);
+ BUILD_BUG_ON(offsetof(typeof(*os), os_spare9) == 0);
}<sep>@@
expression e1,e2;
@@
- CLASSERT(e1 != e2);
+ BUILD_BUG_ON(e1 == e2);
<|end_of_text|> | 8,456 |
--- initial
+++ final
@@ -1,30 +1,30 @@
static void lustre_swab_obdo(struct obdo *o) {
__swab64s(&o->o_valid);
lustre_swab_ost_id(&o->o_oi);
__swab64s(&o->o_parent_seq);
__swab64s(&o->o_size);
__swab64s(&o->o_mtime);
__swab64s(&o->o_atime);
__swab64s(&o->o_ctime);
__swab64s(&o->o_blocks);
__swab64s(&o->o_grant);
__swab32s(&o->o_blksize);
__swab32s(&o->o_mode);
__swab32s(&o->o_uid);
__swab32s(&o->o_gid);
__swab32s(&o->o_flags);
__swab32s(&o->o_nlink);
__swab32s(&o->o_parent_oid);
__swab32s(&o->o_misc);
__swab64s(&o->o_ioepoch);
__swab32s(&o->o_stripe_idx);
__swab32s(&o->o_parent_ver);
/* o_handle is opaque */
/* o_lcookie is swabbed elsewhere */
__swab32s(&o->o_uid_h);
__swab32s(&o->o_gid_h);
__swab64s(&o->o_data_version);
- CLASSERT(offsetof(typeof(*o), o_padding_4) != 0);
- CLASSERT(offsetof(typeof(*o), o_padding_5) != 0);
- CLASSERT(offsetof(typeof(*o), o_padding_6) != 0);
+ BUILD_BUG_ON(offsetof(typeof(*o), o_padding_4) == 0);
+ BUILD_BUG_ON(offsetof(typeof(*o), o_padding_5) == 0);
+ BUILD_BUG_ON(offsetof(typeof(*o), o_padding_6) == 0);
}<sep>@@
expression e1,e2;
@@
- CLASSERT(e1 != e2);
+ BUILD_BUG_ON(e1 == e2);
<|end_of_text|> | 8,457 |
--- initial
+++ final
@@ -1,34 +1,34 @@
void lustre_swab_ptlrpc_body(struct ptlrpc_body *b) {
__swab32s(&b->pb_type);
__swab32s(&b->pb_version);
__swab32s(&b->pb_opc);
__swab32s(&b->pb_status);
__swab64s(&b->pb_last_xid);
__swab16s(&b->pb_tag);
__swab64s(&b->pb_last_committed);
__swab64s(&b->pb_transno);
__swab32s(&b->pb_flags);
__swab32s(&b->pb_op_flags);
__swab32s(&b->pb_conn_cnt);
__swab32s(&b->pb_timeout);
__swab32s(&b->pb_service_time);
__swab32s(&b->pb_limit);
__swab64s(&b->pb_slv);
__swab64s(&b->pb_pre_versions[0]);
__swab64s(&b->pb_pre_versions[1]);
__swab64s(&b->pb_pre_versions[2]);
__swab64s(&b->pb_pre_versions[3]);
__swab64s(&b->pb_mbits);
- CLASSERT(offsetof(typeof(*b), pb_padding0) != 0);
- CLASSERT(offsetof(typeof(*b), pb_padding1) != 0);
- CLASSERT(offsetof(typeof(*b), pb_padding64_0) != 0);
- CLASSERT(offsetof(typeof(*b), pb_padding64_1) != 0);
- CLASSERT(offsetof(typeof(*b), pb_padding64_2) != 0);
+ BUILD_BUG_ON(offsetof(typeof(*b), pb_padding0) == 0);
+ BUILD_BUG_ON(offsetof(typeof(*b), pb_padding1) == 0);
+ BUILD_BUG_ON(offsetof(typeof(*b), pb_padding64_0) == 0);
+ BUILD_BUG_ON(offsetof(typeof(*b), pb_padding64_1) == 0);
+ BUILD_BUG_ON(offsetof(typeof(*b), pb_padding64_2) == 0);
/* While we need to maintain compatibility between
* clients and servers without ptlrpc_body_v2 (< 2.3)
* do not swab any fields beyond pb_jobid, as we are
* using this swab function for both ptlrpc_body
* and ptlrpc_body_v2.
*/
- CLASSERT(offsetof(typeof(*b), pb_jobid) != 0);
+ BUILD_BUG_ON(offsetof(typeof(*b), pb_jobid) == 0);
}<sep>@@
expression e1,e2;
@@
- CLASSERT(e1 != e2);
+ BUILD_BUG_ON(e1 == e2);
<|end_of_text|> | 8,458 |
--- initial
+++ final
@@ -1,39 +1,39 @@
static int lustre_unpack_msg_v2(struct lustre_msg_v2 *m, int len) {
int swabbed, required_len, i;
/* Now we know the sender speaks my language. */
required_len = lustre_msg_hdr_size_v2(0);
if (len < required_len) {
/* can't even look inside the message */
CERROR("message length %d too small for lustre_msg\n", len);
return -EINVAL;
}
swabbed = (m->lm_magic == LUSTRE_MSG_MAGIC_V2_SWABBED);
if (swabbed) {
__swab32s(&m->lm_magic);
__swab32s(&m->lm_bufcount);
__swab32s(&m->lm_secflvr);
__swab32s(&m->lm_repsize);
__swab32s(&m->lm_cksum);
__swab32s(&m->lm_flags);
- CLASSERT(offsetof(typeof(*m), lm_padding_2) != 0);
- CLASSERT(offsetof(typeof(*m), lm_padding_3) != 0);
+ BUILD_BUG_ON(offsetof(typeof(*m), lm_padding_2) == 0);
+ BUILD_BUG_ON(offsetof(typeof(*m), lm_padding_3) == 0);
}
required_len = lustre_msg_hdr_size_v2(m->lm_bufcount);
if (len < required_len) {
/* didn't receive all the buffer lengths */
CERROR("message length %d too small for %d buflens\n", len, m->lm_bufcount);
return -EINVAL;
}
for (i = 0; i < m->lm_bufcount; i++) {
if (swabbed) __swab32s(&m->lm_buflens[i]);
required_len += cfs_size_round(m->lm_buflens[i]);
}
if (len < required_len) {
CERROR("len: %d, required_len %d\n", len, required_len);
CERROR("bufcount: %d\n", m->lm_bufcount);
for (i = 0; i < m->lm_bufcount; i++)
CERROR("buffer %d length %d\n", i, m->lm_buflens[i]);
return -EINVAL;
}
return swabbed;
}<sep>@@
expression e1,e2;
@@
- CLASSERT(e1 != e2);
+ BUILD_BUG_ON(e1 == e2);
<|end_of_text|> | 8,459 |
--- initial
+++ final
@@ -1,22 +1,22 @@
void ptlrpc_fill_bulk_md(lnet_md_t *md, struct ptlrpc_bulk_desc *desc, int mdidx) {
int offset = mdidx * LNET_MAX_IOV;
- CLASSERT(PTLRPC_MAX_BRW_PAGES < LI_POISON);
+ BUILD_BUG_ON(PTLRPC_MAX_BRW_PAGES >= LI_POISON);
LASSERT(mdidx < desc->bd_md_max_brw);
LASSERT(desc->bd_iov_count <= PTLRPC_MAX_BRW_PAGES);
LASSERT(!(md->options & (LNET_MD_IOVEC | LNET_MD_KIOV | LNET_MD_PHYS)));
md->length = max(0, desc->bd_iov_count - mdidx * LNET_MAX_IOV);
md->length = min_t(unsigned int, LNET_MAX_IOV, md->length);
if (ptlrpc_is_bulk_desc_kiov(desc->bd_type)) {
md->options |= LNET_MD_KIOV;
if (GET_ENC_KIOV(desc))
md->start = &BD_GET_ENC_KIOV(desc, offset);
else
md->start = &BD_GET_KIOV(desc, offset);
} else {
md->options |= LNET_MD_IOVEC;
if (GET_ENC_KVEC(desc))
md->start = &BD_GET_ENC_KVEC(desc, offset);
else
md->start = &BD_GET_KVEC(desc, offset);
}
}<sep>@@
identifier e1,e2;
@@
- CLASSERT(e1 < e2);
+ BUILD_BUG_ON(e1 >= e2);
<|end_of_text|> | 8,460 |
--- initial
+++ final
@@ -1,117 +1,117 @@
static int proc_lnet_peers(struct ctl_table *table, int write, void __user *buffer, size_t *lenp, loff_t *ppos) {
const int tmpsiz = 256;
struct lnet_peer_table *ptable;
char *tmpstr;
char *s;
int cpt = LNET_PROC_CPT_GET(*ppos);
int ver = LNET_PROC_VER_GET(*ppos);
int hash = LNET_PROC_HASH_GET(*ppos);
int hoff = LNET_PROC_HOFF_GET(*ppos);
int rc = 0;
int len;
- CLASSERT(LNET_PROC_HASH_BITS >= LNET_PEER_HASH_BITS);
+ BUILD_BUG_ON(LNET_PEER_HASH_BITS > LNET_PROC_HASH_BITS);
LASSERT(!write);
if (!*lenp) return 0;
if (cpt >= LNET_CPT_NUMBER) {
*lenp = 0;
return 0;
}
LIBCFS_ALLOC(tmpstr, tmpsiz);
if (!tmpstr) return -ENOMEM;
s = tmpstr; /* points to current position in tmpstr[] */
if (!*ppos) {
s += snprintf(s, tmpstr + tmpsiz - s, "%-24s %4s %5s %5s %5s %5s %5s %5s %5s %s\n", "nid", "refs", "state", "last", "max", "rtr", "min", "tx", "min", "queue");
LASSERT(tmpstr + tmpsiz - s > 0);
hoff++;
} else {
struct lnet_peer *peer;
struct list_head *p;
int skip;
again:
p = NULL;
peer = NULL;
skip = hoff - 1;
lnet_net_lock(cpt);
ptable = the_lnet.ln_peer_tables[cpt];
if (hoff == 1) ver = LNET_PROC_VERSION(ptable->pt_version);
if (ver != LNET_PROC_VERSION(ptable->pt_version)) {
lnet_net_unlock(cpt);
LIBCFS_FREE(tmpstr, tmpsiz);
return -ESTALE;
}
while (hash < LNET_PEER_HASH_SIZE) {
if (!p) p = ptable->pt_hash[hash].next;
while (p != &ptable->pt_hash[hash]) {
lnet_peer_t *lp = list_entry(p, lnet_peer_t, lp_hashlist);
if (!skip) {
peer = lp;
/*
* minor optimization: start from idx+1
* on next iteration if we've just
* drained lp_hashlist
*/
if (lp->lp_hashlist.next == &ptable->pt_hash[hash]) {
hoff = 1;
hash++;
} else {
hoff++;
}
break;
}
skip--;
p = lp->lp_hashlist.next;
}
if (peer) break;
p = NULL;
hoff = 1;
hash++;
}
if (peer) {
lnet_nid_t nid = peer->lp_nid;
int nrefs = peer->lp_refcount;
int lastalive = -1;
char *aliveness = "NA";
int maxcr = peer->lp_ni->ni_peertxcredits;
int txcr = peer->lp_txcredits;
int mintxcr = peer->lp_mintxcredits;
int rtrcr = peer->lp_rtrcredits;
int minrtrcr = peer->lp_minrtrcredits;
int txqnob = peer->lp_txqnob;
if (lnet_isrouter(peer) || lnet_peer_aliveness_enabled(peer)) aliveness = peer->lp_alive ? "up" : "down";
if (lnet_peer_aliveness_enabled(peer)) {
unsigned long now = cfs_time_current();
long delta;
delta = cfs_time_sub(now, peer->lp_last_alive);
lastalive = cfs_duration_sec(delta);
/* No need to mess up peers contents with
* arbitrarily long integers - it suffices to
* know that lastalive is more than 10000s old
*/
if (lastalive >= 10000) lastalive = 9999;
}
lnet_net_unlock(cpt);
s += snprintf(s, tmpstr + tmpsiz - s, "%-24s %4d %5s %5d %5d %5d %5d %5d %5d %d\n", libcfs_nid2str(nid), nrefs, aliveness, lastalive, maxcr, rtrcr, minrtrcr, txcr, mintxcr, txqnob);
LASSERT(tmpstr + tmpsiz - s > 0);
} else { /* peer is NULL */
lnet_net_unlock(cpt);
}
if (hash == LNET_PEER_HASH_SIZE) {
cpt++;
hash = 0;
hoff = 1;
if (!peer && cpt < LNET_CPT_NUMBER) goto again;
}
}
len = s - tmpstr; /* how many bytes was written */
if (len > *lenp) { /* linux-supplied buffer is too small */
rc = -EINVAL;
} else if (len > 0) { /* wrote something */
if (copy_to_user(buffer, tmpstr, len))
rc = -EFAULT;
else
*ppos = LNET_PROC_POS_MAKE(cpt, ver, hash, hoff);
}
LIBCFS_FREE(tmpstr, tmpsiz);
if (!rc) *lenp = len;
return rc;
}<sep>@@
identifier e1,e2;
@@
- CLASSERT(e1 <= e2);
+ BUILD_BUG_ON(e1 > e2);
<|end_of_text|> | 8,461 |
--- initial
+++ final
@@ -1,83 +1,83 @@
static int proc_lnet_routes(struct ctl_table *table, int write, void __user *buffer, size_t *lenp, loff_t *ppos) {
const int tmpsiz = 256;
char *tmpstr;
char *s;
int rc = 0;
int len;
int ver;
int off;
- CLASSERT(sizeof(loff_t) >= 4);
+ BUILD_BUG_ON(4 > sizeof(loff_t));
off = LNET_PROC_HOFF_GET(*ppos);
ver = LNET_PROC_VER_GET(*ppos);
LASSERT(!write);
if (!*lenp) return 0;
LIBCFS_ALLOC(tmpstr, tmpsiz);
if (!tmpstr) return -ENOMEM;
s = tmpstr; /* points to current position in tmpstr[] */
if (!*ppos) {
s += snprintf(s, tmpstr + tmpsiz - s, "Routing %s\n", the_lnet.ln_routing ? "enabled" : "disabled");
LASSERT(tmpstr + tmpsiz - s > 0);
s += snprintf(s, tmpstr + tmpsiz - s, "%-8s %4s %8s %7s %s\n", "net", "hops", "priority", "state", "router");
LASSERT(tmpstr + tmpsiz - s > 0);
lnet_net_lock(0);
ver = (unsigned int)the_lnet.ln_remote_nets_version;
lnet_net_unlock(0);
*ppos = LNET_PROC_POS_MAKE(0, ver, 0, off);
} else {
struct list_head *n;
struct list_head *r;
lnet_route_t *route = NULL;
lnet_remotenet_t *rnet = NULL;
int skip = off - 1;
struct list_head *rn_list;
int i;
lnet_net_lock(0);
if (ver != LNET_PROC_VERSION(the_lnet.ln_remote_nets_version)) {
lnet_net_unlock(0);
LIBCFS_FREE(tmpstr, tmpsiz);
return -ESTALE;
}
for (i = 0; i < LNET_REMOTE_NETS_HASH_SIZE && !route; i++) {
rn_list = &the_lnet.ln_remote_nets_hash[i];
n = rn_list->next;
while (n != rn_list && !route) {
rnet = list_entry(n, lnet_remotenet_t, lrn_list);
r = rnet->lrn_routes.next;
while (r != &rnet->lrn_routes) {
lnet_route_t *re = list_entry(r, lnet_route_t, lr_list);
if (!skip) {
route = re;
break;
}
skip--;
r = r->next;
}
n = n->next;
}
}
if (route) {
__u32 net = rnet->lrn_net;
__u32 hops = route->lr_hops;
unsigned int priority = route->lr_priority;
lnet_nid_t nid = route->lr_gateway->lp_nid;
int alive = lnet_is_route_alive(route);
s += snprintf(s, tmpstr + tmpsiz - s, "%-8s %4u %8u %7s %s\n", libcfs_net2str(net), hops, priority, alive ? "up" : "down", libcfs_nid2str(nid));
LASSERT(tmpstr + tmpsiz - s > 0);
}
lnet_net_unlock(0);
}
len = s - tmpstr; /* how many bytes was written */
if (len > *lenp) { /* linux-supplied buffer is too small */
rc = -EINVAL;
} else if (len > 0) { /* wrote something */
if (copy_to_user(buffer, tmpstr, len)) {
rc = -EFAULT;
} else {
off += 1;
*ppos = LNET_PROC_POS_MAKE(0, ver, 0, off);
}
}
LIBCFS_FREE(tmpstr, tmpsiz);
if (!rc) *lenp = len;
return rc;
}<sep>@@
expression e1,e2;
@@
- CLASSERT(e1 <= e2);
+ BUILD_BUG_ON(e1 > e2);
<|end_of_text|> | 8,462 |
--- initial
+++ final
@@ -1,22 +1,22 @@
static struct ksock_proto *ksocknal_parse_proto_version(ksock_hello_msg_t *hello) {
__u32 version = 0;
if (hello->kshm_magic == LNET_PROTO_MAGIC)
version = hello->kshm_version;
else if (hello->kshm_magic == __swab32(LNET_PROTO_MAGIC))
version = __swab32(hello->kshm_version);
if (version) {
#if SOCKNAL_VERSION_DEBUG
if (*ksocknal_tunables.ksnd_protocol == 1) return NULL;
if (*ksocknal_tunables.ksnd_protocol == 2 && version == KSOCK_PROTO_V3) return NULL;
#endif
if (version == KSOCK_PROTO_V2) return &ksocknal_protocol_v2x;
if (version == KSOCK_PROTO_V3) return &ksocknal_protocol_v3x;
return NULL;
}
if (hello->kshm_magic == le32_to_cpu(LNET_PROTO_TCP_MAGIC)) {
struct lnet_magicversion *hmv = (struct lnet_magicversion *)hello;
- CLASSERT(sizeof(struct lnet_magicversion) == offsetof(ksock_hello_msg_t, kshm_src_nid));
+ BUILD_BUG_ON(sizeof(struct lnet_magicversion) != offsetof(ksock_hello_msg_t, kshm_src_nid));
if (hmv->version_major == cpu_to_le16(KSOCK_PROTO_V1_MAJOR) && hmv->version_minor == cpu_to_le16(KSOCK_PROTO_V1_MINOR)) return &ksocknal_protocol_v1x;
}
return NULL;
}<sep>@@
expression e1,e2;
@@
- CLASSERT(e1 == e2);
+ BUILD_BUG_ON(e1 != e2);
<|end_of_text|> | 8,463 |
--- initial
+++ final
@@ -1,20 +1,20 @@
static int __init ksocklnd_init(void) {
int rc;
/* check ksnr_connected/connecting field large enough */
- CLASSERT(SOCKLND_CONN_NTYPES <= 4);
- CLASSERT(SOCKLND_CONN_ACK == SOCKLND_CONN_BULK_IN);
+ BUILD_BUG_ON(SOCKLND_CONN_NTYPES > 4);
+ BUILD_BUG_ON(SOCKLND_CONN_ACK != SOCKLND_CONN_BULK_IN);
/* initialize the_ksocklnd */
the_ksocklnd.lnd_type = SOCKLND;
the_ksocklnd.lnd_startup = ksocknal_startup;
the_ksocklnd.lnd_shutdown = ksocknal_shutdown;
the_ksocklnd.lnd_ctl = ksocknal_ctl;
the_ksocklnd.lnd_send = ksocknal_send;
the_ksocklnd.lnd_recv = ksocknal_recv;
the_ksocklnd.lnd_notify = ksocknal_notify;
the_ksocklnd.lnd_query = ksocknal_query;
the_ksocklnd.lnd_accept = ksocknal_accept;
rc = ksocknal_tunables_init();
if (rc) return rc;
lnet_register_lnd(&the_ksocklnd);
return 0;
}<sep>@@
expression e1,e2;
@@
- CLASSERT(e1 == e2);
+ BUILD_BUG_ON(e1 != e2);
@@
expression e1,e2;
@@
- CLASSERT(e1 <= e2);
+ BUILD_BUG_ON(e1 > e2);
<|end_of_text|> | 8,464 |
--- initial
+++ final
@@ -1,53 +1,53 @@
static int ksocknal_send_hello_v1(struct ksock_conn *conn, ksock_hello_msg_t *hello) {
struct socket *sock = conn->ksnc_sock;
struct lnet_hdr *hdr;
struct lnet_magicversion *hmv;
int rc;
int i;
- CLASSERT(sizeof(struct lnet_magicversion) == offsetof(struct lnet_hdr, src_nid));
+ BUILD_BUG_ON(sizeof(struct lnet_magicversion) != offsetof(struct lnet_hdr, src_nid));
LIBCFS_ALLOC(hdr, sizeof(*hdr));
if (!hdr) {
CERROR("Can't allocate struct lnet_hdr\n");
return -ENOMEM;
}
hmv = (struct lnet_magicversion *)&hdr->dest_nid;
/*
* Re-organize V2.x message header to V1.x (struct lnet_hdr)
* header and send out
*/
hmv->magic = cpu_to_le32(LNET_PROTO_TCP_MAGIC);
hmv->version_major = cpu_to_le16(KSOCK_PROTO_V1_MAJOR);
hmv->version_minor = cpu_to_le16(KSOCK_PROTO_V1_MINOR);
if (the_lnet.ln_testprotocompat) {
/* single-shot proto check */
LNET_LOCK();
if (the_lnet.ln_testprotocompat & 1) {
hmv->version_major++; /* just different! */
the_lnet.ln_testprotocompat &= ~1;
}
if (the_lnet.ln_testprotocompat & 2) {
hmv->magic = LNET_PROTO_MAGIC;
the_lnet.ln_testprotocompat &= ~2;
}
LNET_UNLOCK();
}
hdr->src_nid = cpu_to_le64(hello->kshm_src_nid);
hdr->src_pid = cpu_to_le32(hello->kshm_src_pid);
hdr->type = cpu_to_le32(LNET_MSG_HELLO);
hdr->payload_length = cpu_to_le32(hello->kshm_nips * sizeof(__u32));
hdr->msg.hello.type = cpu_to_le32(hello->kshm_ctype);
hdr->msg.hello.incarnation = cpu_to_le64(hello->kshm_src_incarnation);
rc = lnet_sock_write(sock, hdr, sizeof(*hdr), lnet_acceptor_timeout());
if (rc) {
CNETERR("Error %d sending HELLO hdr to %pI4h/%d\n", rc, &conn->ksnc_ipaddr, conn->ksnc_port);
goto out;
}
if (!hello->kshm_nips) goto out;
for (i = 0; i < (int)hello->kshm_nips; i++)
hello->kshm_ips[i] = __cpu_to_le32(hello->kshm_ips[i]);
rc = lnet_sock_write(sock, hello->kshm_ips, hello->kshm_nips * sizeof(__u32), lnet_acceptor_timeout());
if (rc) { CNETERR("Error %d sending HELLO payload (%d) to %pI4h/%d\n", rc, hello->kshm_nips, &conn->ksnc_ipaddr, conn->ksnc_port); }
out:
LIBCFS_FREE(hdr, sizeof(*hdr));
return rc;
}<sep>@@
expression e1,e2;
@@
- CLASSERT(e1 == e2);
+ BUILD_BUG_ON(e1 != e2);
<|end_of_text|> | 8,465 |
--- initial
+++ final
@@ -1,63 +1,63 @@
static int __init lustre_init(void) {
lnet_process_id_t lnet_id;
struct timespec64 ts;
int i, rc, seed[2];
- CLASSERT(sizeof(LUSTRE_VOLATILE_HDR) == LUSTRE_VOLATILE_HDR_LEN + 1);
+ BUILD_BUG_ON(sizeof(LUSTRE_VOLATILE_HDR) != LUSTRE_VOLATILE_HDR_LEN + 1);
/* print an address of _any_ initialized kernel symbol from this
* module, to allow debugging with gdb that doesn't support data
* symbols from modules.
*/
CDEBUG(D_INFO, "Lustre client module (%p).\n", &lustre_super_operations);
rc = -ENOMEM;
ll_inode_cachep = kmem_cache_create("lustre_inode_cache", sizeof(struct ll_inode_info), 0, SLAB_HWCACHE_ALIGN | SLAB_ACCOUNT, NULL);
if (!ll_inode_cachep) goto out_cache;
ll_file_data_slab = kmem_cache_create("ll_file_data", sizeof(struct ll_file_data), 0, SLAB_HWCACHE_ALIGN, NULL);
if (!ll_file_data_slab) goto out_cache;
llite_root = debugfs_create_dir("llite", debugfs_lustre_root);
if (IS_ERR_OR_NULL(llite_root)) {
rc = llite_root ? PTR_ERR(llite_root) : -ENOMEM;
llite_root = NULL;
goto out_cache;
}
llite_kset = kset_create_and_add("llite", NULL, lustre_kobj);
if (!llite_kset) {
rc = -ENOMEM;
goto out_debugfs;
}
cfs_get_random_bytes(seed, sizeof(seed));
/* Nodes with small feet have little entropy. The NID for this
* node gives the most entropy in the low bits
*/
for (i = 0;; i++) {
if (LNetGetId(i, &lnet_id) == -ENOENT) break;
if (LNET_NETTYP(LNET_NIDNET(lnet_id.nid)) != LOLND) seed[0] ^= LNET_NIDADDR(lnet_id.nid);
}
ktime_get_ts64(&ts);
cfs_srand(ts.tv_sec ^ seed[0], ts.tv_nsec ^ seed[1]);
rc = vvp_global_init();
if (rc != 0) goto out_sysfs;
cl_inode_fini_env = cl_env_alloc(&cl_inode_fini_refcheck, LCT_REMEMBER | LCT_NOREF);
if (IS_ERR(cl_inode_fini_env)) {
rc = PTR_ERR(cl_inode_fini_env);
goto out_vvp;
}
cl_inode_fini_env->le_ctx.lc_cookie = 0x4;
rc = ll_xattr_init();
if (rc != 0) goto out_inode_fini_env;
lustre_register_client_fill_super(ll_fill_super);
lustre_register_kill_super_cb(ll_kill_super);
lustre_register_client_process_config(ll_process_config);
return 0;
out_inode_fini_env:
cl_env_put(cl_inode_fini_env, &cl_inode_fini_refcheck);
out_vvp:
vvp_global_fini();
out_sysfs:
kset_unregister(llite_kset);
out_debugfs:
debugfs_remove(llite_root);
out_cache:
kmem_cache_destroy(ll_inode_cachep);
kmem_cache_destroy(ll_file_data_slab);
return rc;
}<sep>@@
expression e1,e2;
@@
- CLASSERT(e1 == e2);
+ BUILD_BUG_ON(e1 != e2);
<|end_of_text|> | 8,466 |
--- initial
+++ final
@@ -1,6 +1,6 @@
static void vvp_pgcache_id_unpack(loff_t pos, struct vvp_pgcache_id *id) {
- CLASSERT(sizeof(pos) == sizeof(__u64));
+ BUILD_BUG_ON(sizeof(pos) != sizeof(__u64));
id->vpi_index = pos & 0xffffffff;
id->vpi_depth = (pos >> PGC_DEPTH_SHIFT) & 0xf;
id->vpi_bucket = (unsigned long long)pos >> PGC_OBJ_SHIFT;
}<sep>@@
expression e1,e2;
@@
- CLASSERT(e1 == e2);
+ BUILD_BUG_ON(e1 != e2);
<|end_of_text|> | 8,467 |
--- initial
+++ final
@@ -1,30 +1,30 @@
static int amd_iommu_add_device(struct device *dev) {
struct iommu_dev_data *dev_data;
struct iommu_domain *domain;
struct amd_iommu *iommu;
int ret, devid;
if (!check_device(dev) || get_dev_data(dev)) return 0;
devid = get_device_id(dev);
if (devid < 0) return devid;
iommu = amd_iommu_rlookup_table[devid];
ret = iommu_init_device(dev);
if (ret) {
if (ret != -ENOTSUPP) pr_err("Failed to initialize device %s - trying to proceed anyway\n", dev_name(dev));
iommu_ignore_device(dev);
- dev->archdata.dma_ops = &nommu_dma_ops;
+ dev->dma_ops = &nommu_dma_ops;
goto out;
}
init_iommu_group(dev);
dev_data = get_dev_data(dev);
BUG_ON(!dev_data);
if (iommu_pass_through || dev_data->iommu_v2) iommu_request_dm_for_dev(dev);
/* Domains are initialized for this device - have a look what we ended up with */
domain = iommu_get_domain_for_dev(dev);
if (domain->type == IOMMU_DOMAIN_IDENTITY)
dev_data->passthrough = true;
else
- dev->archdata.dma_ops = &amd_iommu_dma_ops;
+ dev->dma_ops = &amd_iommu_dma_ops;
out:
iommu_completion_wait(iommu);
return 0;
}<sep>@@
struct device *dev;
@@
- dev->archdata.dma_ops
+ dev->dma_ops
<|end_of_text|> | 8,622 |
--- initial
+++ final
@@ -1,17 +1,17 @@
static void iommu_uninit_device(struct device *dev) {
int devid;
struct iommu_dev_data *dev_data;
devid = get_device_id(dev);
if (devid < 0) return;
dev_data = search_dev_data(devid);
if (!dev_data) return;
if (dev_data->domain) detach_device(dev);
iommu_device_unlink(amd_iommu_rlookup_table[dev_data->devid]->iommu_dev, dev);
iommu_group_remove_device(dev);
/* Remove dma-ops */
- dev->archdata.dma_ops = NULL;
+ dev->dma_ops = NULL;
/*
* We keep dev_data around for unplugged devices and reuse it when the
* device is re-plugged - not doing so would introduce a ton of races.
*/
}<sep>@@
struct device *dev;
@@
- dev->archdata.dma_ops
+ dev->dma_ops
<|end_of_text|> | 8,623 |
--- initial
+++ final
@@ -1,9 +1,9 @@
static int cell_of_bus_notify(struct notifier_block *nb, unsigned long action, void *data) {
struct device *dev = data;
/* We are only intereted in device addition */
if (action != BUS_NOTIFY_ADD_DEVICE) return 0;
/* We use the PCI DMA ops */
- dev->archdata.dma_ops = get_pci_dma_ops();
+ dev->dma_ops = get_pci_dma_ops();
cell_dma_dev_setup(dev);
return 0;
}<sep>@@
struct device *dev;
@@
- dev->archdata.dma_ops
+ dev->dma_ops
<|end_of_text|> | 8,624 |
--- initial
+++ final
@@ -1,5 +1,5 @@
void arch_setup_dma_ops(struct device *dev, u64 dma_base, u64 size, const struct iommu_ops *iommu, bool coherent) {
- if (!dev->archdata.dma_ops) dev->archdata.dma_ops = &swiotlb_dma_ops;
+ if (!dev->dma_ops) dev->dma_ops = &swiotlb_dma_ops;
dev->archdata.dma_coherent = coherent;
__iommu_setup_dma_ops(dev, dma_base, size, iommu);
}<sep>@@
struct device *dev;
@@
- dev->archdata.dma_ops
+ dev->dma_ops
<|end_of_text|> | 8,625 |
--- initial
+++ final
@@ -1 +1 @@
-void arch_teardown_dma_ops(struct device *dev) { dev->archdata.dma_ops = NULL; }
+void arch_teardown_dma_ops(struct device *dev) { dev->dma_ops = NULL; }<sep>@@
struct device *dev;
@@
- dev->archdata.dma_ops
+ dev->dma_ops
<|end_of_text|> | 8,626 |
--- initial
+++ final
@@ -1,14 +1,14 @@
static bool do_iommu_attach(struct device *dev, const struct iommu_ops *ops, u64 dma_base, u64 size) {
struct iommu_domain *domain = iommu_get_domain_for_dev(dev);
/*
* If the IOMMU driver has the DMA domain support that we require,
* then the IOMMU core will have already configured a group for this
* device, and allocated the default domain for that group.
*/
if (!domain || iommu_dma_init_domain(domain, dma_base, size, dev)) {
pr_warn("Failed to set up IOMMU for device %s; retaining platform DMA ops\n", dev_name(dev));
return false;
}
- dev->archdata.dma_ops = &iommu_dma_ops;
+ dev->dma_ops = &iommu_dma_ops;
return true;
}<sep>@@
struct device *dev;
@@
- dev->archdata.dma_ops
+ dev->dma_ops
<|end_of_text|> | 8,627 |
--- initial
+++ final
@@ -1,11 +1,11 @@
static int ibmebus_create_device(struct device_node *dn) {
struct platform_device *dev;
int ret;
dev = of_device_alloc(dn, NULL, &ibmebus_bus_device);
if (!dev) return -ENOMEM;
dev->dev.bus = &ibmebus_bus_type;
- dev->dev.archdata.dma_ops = &ibmebus_dma_ops;
+ dev->dev.dma_ops = &ibmebus_dma_ops;
ret = of_device_add(dev);
if (ret) platform_device_put(dev);
return ret;
}<sep>@@
expression dev;
identifier f;
@@
- dev->f.archdata.dma_ops
+ dev->f.dma_ops
<|end_of_text|> | 8,628 |
--- initial
+++ final
@@ -1,19 +1,19 @@
static void pci_dma_dev_setup_pasemi(struct pci_dev *dev) {
pr_debug("pci_dma_dev_setup, dev %p (%s)\n", dev, pci_name(dev));
#if !defined(CONFIG_PPC_PASEMI_IOMMU_DMA_FORCE)
/* For non-LPAR environment, don't translate anything for the DMA
* engine. The exception to this is if the user has enabled
* CONFIG_PPC_PASEMI_IOMMU_DMA_FORCE at build time.
*/
if (dev->vendor == 0x1959 && dev->device == 0xa007 && !firmware_has_feature(FW_FEATURE_LPAR)) {
- dev->dev.archdata.dma_ops = &dma_direct_ops;
+ dev->dev.dma_ops = &dma_direct_ops;
/*
* Set the coherent DMA mask to prevent the iommu
* being used unnecessarily
*/
dev->dev.coherent_dma_mask = DMA_BIT_MASK(44);
return;
}
#endif
set_iommu_table_base(&dev->dev, &iommu_table_iobmap);
}<sep>@@
expression dev;
identifier f;
@@
- dev->f.archdata.dma_ops
+ dev->f.dma_ops
<|end_of_text|> | 8,629 |
--- initial
+++ final
@@ -1,28 +1,28 @@
struct mbus_device *mbus_register_device(struct device *pdev, int id, const struct dma_map_ops *dma_ops, struct mbus_hw_ops *hw_ops, int index, void __iomem *mmio_va) {
int ret;
struct mbus_device *mbdev;
mbdev = kzalloc(sizeof(*mbdev), GFP_KERNEL);
if (!mbdev) return ERR_PTR(-ENOMEM);
mbdev->mmio_va = mmio_va;
mbdev->dev.parent = pdev;
mbdev->id.device = id;
mbdev->id.vendor = MBUS_DEV_ANY_ID;
- mbdev->dev.archdata.dma_ops = dma_ops;
+ mbdev->dev.dma_ops = dma_ops;
mbdev->dev.dma_mask = &mbdev->dev.coherent_dma_mask;
dma_set_mask(&mbdev->dev, DMA_BIT_MASK(64));
mbdev->dev.release = mbus_release_dev;
mbdev->hw_ops = hw_ops;
mbdev->dev.bus = &mic_bus;
mbdev->index = index;
dev_set_name(&mbdev->dev, "mbus-dev%u", mbdev->index);
/*
* device_register() causes the bus infrastructure to look for a
* matching driver.
*/
ret = device_register(&mbdev->dev);
if (ret) goto free_mbdev;
return mbdev;
free_mbdev:
put_device(&mbdev->dev);
return ERR_PTR(ret);
}<sep>@@
expression dev;
identifier f;
@@
- dev->f.archdata.dma_ops
+ dev->f.dma_ops
<|end_of_text|> | 8,630 |
--- initial
+++ final
@@ -1,46 +1,46 @@
static int __init calgary_init(void) {
int ret;
struct pci_dev *dev = NULL;
struct calgary_bus_info *info;
ret = calgary_locate_bbars();
if (ret) return ret;
/* Purely for kdump kernel case */
if (is_kdump_kernel()) get_tce_space_from_tar();
do {
dev = pci_get_device(PCI_VENDOR_ID_IBM, PCI_ANY_ID, dev);
if (!dev) break;
if (!is_cal_pci_dev(dev->device)) continue;
info = &bus_info[dev->bus->number];
if (info->translation_disabled) {
calgary_init_one_nontraslated(dev);
continue;
}
if (!info->tce_space && !translate_empty_slots) continue;
ret = calgary_init_one(dev);
if (ret) goto error;
} while (1);
dev = NULL;
for_each_pci_dev(dev) {
struct iommu_table *tbl;
tbl = find_iommu_table(&dev->dev);
- if (translation_enabled(tbl)) dev->dev.archdata.dma_ops = &calgary_dma_ops;
+ if (translation_enabled(tbl)) dev->dev.dma_ops = &calgary_dma_ops;
}
return ret;
error:
do {
dev = pci_get_device(PCI_VENDOR_ID_IBM, PCI_ANY_ID, dev);
if (!dev) break;
if (!is_cal_pci_dev(dev->device)) continue;
info = &bus_info[dev->bus->number];
if (info->translation_disabled) {
pci_dev_put(dev);
continue;
}
if (!info->tce_space && !translate_empty_slots) continue;
calgary_disable_translation(dev);
calgary_free_bus(dev);
pci_dev_put(dev); /* Undo calgary_init_one()'s pci_dev_get() */
- dev->dev.archdata.dma_ops = NULL;
+ dev->dev.dma_ops = NULL;
} while (1);
return ret;
}<sep>@@
expression dev;
identifier f;
@@
- dev->f.archdata.dma_ops
+ dev->f.dma_ops
<|end_of_text|> | 8,631 |
--- initial
+++ final
@@ -1,71 +1,71 @@
int pcibios_plat_dev_init(struct pci_dev *dev) {
uint16_t config;
uint32_t dconfig;
int pos;
/*
* Force the Cache line setting to 64 bytes. The standard
* Linux bus scan doesn't seem to set it. Octeon really has
* 128 byte lines, but Intel bridges get really upset if you
* try and set values above 64 bytes. Value is specified in
* 32bit words.
*/
pci_write_config_byte(dev, PCI_CACHE_LINE_SIZE, 64 / 4);
/* Set latency timers for all devices */
pci_write_config_byte(dev, PCI_LATENCY_TIMER, 64);
/* Enable reporting System errors and parity errors on all devices */
/* Enable parity checking and error reporting */
pci_read_config_word(dev, PCI_COMMAND, &config);
config |= PCI_COMMAND_PARITY | PCI_COMMAND_SERR;
pci_write_config_word(dev, PCI_COMMAND, config);
if (dev->subordinate) {
/* Set latency timers on sub bridges */
pci_write_config_byte(dev, PCI_SEC_LATENCY_TIMER, 64);
/* More bridge error detection */
pci_read_config_word(dev, PCI_BRIDGE_CONTROL, &config);
config |= PCI_BRIDGE_CTL_PARITY | PCI_BRIDGE_CTL_SERR;
pci_write_config_word(dev, PCI_BRIDGE_CONTROL, config);
}
/* Enable the PCIe normal error reporting */
config = PCI_EXP_DEVCTL_CERE; /* Correctable Error Reporting */
config |= PCI_EXP_DEVCTL_NFERE; /* Non-Fatal Error Reporting */
config |= PCI_EXP_DEVCTL_FERE; /* Fatal Error Reporting */
config |= PCI_EXP_DEVCTL_URRE; /* Unsupported Request */
pcie_capability_set_word(dev, PCI_EXP_DEVCTL, config);
/* Find the Advanced Error Reporting capability */
pos = pci_find_ext_capability(dev, PCI_EXT_CAP_ID_ERR);
if (pos) {
/* Clear Uncorrectable Error Status */
pci_read_config_dword(dev, pos + PCI_ERR_UNCOR_STATUS, &dconfig);
pci_write_config_dword(dev, pos + PCI_ERR_UNCOR_STATUS, dconfig);
/* Enable reporting of all uncorrectable errors */
/* Uncorrectable Error Mask - turned on bits disable errors */
pci_write_config_dword(dev, pos + PCI_ERR_UNCOR_MASK, 0);
/*
* Leave severity at HW default. This only controls if
* errors are reported as uncorrectable or
* correctable, not if the error is reported.
*/
/* PCI_ERR_UNCOR_SEVER - Uncorrectable Error Severity */
/* Clear Correctable Error Status */
pci_read_config_dword(dev, pos + PCI_ERR_COR_STATUS, &dconfig);
pci_write_config_dword(dev, pos + PCI_ERR_COR_STATUS, dconfig);
/* Enable reporting of all correctable errors */
/* Correctable Error Mask - turned on bits disable errors */
pci_write_config_dword(dev, pos + PCI_ERR_COR_MASK, 0);
/* Advanced Error Capabilities */
pci_read_config_dword(dev, pos + PCI_ERR_CAP, &dconfig);
/* ECRC Generation Enable */
if (config & PCI_ERR_CAP_ECRC_GENC) config |= PCI_ERR_CAP_ECRC_GENE;
/* ECRC Check Enable */
if (config & PCI_ERR_CAP_ECRC_CHKC) config |= PCI_ERR_CAP_ECRC_CHKE;
pci_write_config_dword(dev, pos + PCI_ERR_CAP, dconfig);
/* PCI_ERR_HEADER_LOG - Header Log Register (16 bytes) */
/* Report all errors to the root complex */
pci_write_config_dword(dev, pos + PCI_ERR_ROOT_COMMAND, PCI_ERR_ROOT_CMD_COR_EN | PCI_ERR_ROOT_CMD_NONFATAL_EN | PCI_ERR_ROOT_CMD_FATAL_EN);
/* Clear the Root status register */
pci_read_config_dword(dev, pos + PCI_ERR_ROOT_STATUS, &dconfig);
pci_write_config_dword(dev, pos + PCI_ERR_ROOT_STATUS, dconfig);
}
- dev->dev.archdata.dma_ops = octeon_pci_dma_map_ops;
+ dev->dev.dma_ops = octeon_pci_dma_map_ops;
return 0;
}<sep>@@
expression dev;
identifier f;
@@
- dev->f.archdata.dma_ops
+ dev->f.dma_ops
<|end_of_text|> | 8,632 |
--- initial
+++ final
@@ -1,13 +1,13 @@
int pcibios_add_device(struct pci_dev *pdev) {
struct resource *res;
int i;
pdev->dev.groups = zpci_attr_groups;
- pdev->dev.archdata.dma_ops = &s390_pci_dma_ops;
+ pdev->dev.dma_ops = &s390_pci_dma_ops;
zpci_map_resources(pdev);
for (i = 0; i < PCI_BAR_COUNT; i++) {
res = &pdev->resource[i];
if (res->parent || !res->flags) continue;
pci_claim_resource(pdev, i);
}
return 0;
}<sep>@@
expression dev;
identifier f;
@@
- dev->f.archdata.dma_ops
+ dev->f.dma_ops
<|end_of_text|> | 8,633 |
--- initial
+++ final
@@ -1,36 +1,36 @@
struct scif_hw_dev *scif_register_device(struct device *pdev, int id, const struct dma_map_ops *dma_ops, struct scif_hw_ops *hw_ops, u8 dnode, u8 snode, struct mic_mw *mmio, struct mic_mw *aper, void *dp, void __iomem *rdp, struct dma_chan **chan, int num_chan, bool card_rel_da) {
int ret;
struct scif_hw_dev *sdev;
sdev = kzalloc(sizeof(*sdev), GFP_KERNEL);
if (!sdev) return ERR_PTR(-ENOMEM);
sdev->dev.parent = pdev;
sdev->id.device = id;
sdev->id.vendor = SCIF_DEV_ANY_ID;
- sdev->dev.archdata.dma_ops = dma_ops;
+ sdev->dev.dma_ops = dma_ops;
sdev->dev.release = scif_release_dev;
sdev->hw_ops = hw_ops;
sdev->dnode = dnode;
sdev->snode = snode;
dev_set_drvdata(&sdev->dev, sdev);
sdev->dev.bus = &scif_bus;
sdev->mmio = mmio;
sdev->aper = aper;
sdev->dp = dp;
sdev->rdp = rdp;
sdev->dev.dma_mask = &sdev->dev.coherent_dma_mask;
dma_set_mask(&sdev->dev, DMA_BIT_MASK(64));
sdev->dma_ch = chan;
sdev->num_dma_ch = num_chan;
sdev->card_rel_da = card_rel_da;
dev_set_name(&sdev->dev, "scif-dev%u", sdev->dnode);
/*
* device_register() causes the bus infrastructure to look for a
* matching driver.
*/
ret = device_register(&sdev->dev);
if (ret) goto free_sdev;
return sdev;
free_sdev:
put_device(&sdev->dev);
return ERR_PTR(ret);
}<sep>@@
expression dev;
identifier f;
@@
- dev->f.archdata.dma_ops
+ dev->f.dma_ops
<|end_of_text|> | 8,634 |
--- initial
+++ final
@@ -1,16 +1,16 @@
static int pcmcia_notify(struct notifier_block *nb, unsigned long action, void *data) {
struct device *dev = data;
struct device *parent;
struct pcmcia_device *pdev = to_pcmcia_dev(dev);
/* We are only intereted in device addition */
if (action != BUS_NOTIFY_ADD_DEVICE) return 0;
parent = pdev->socket->dev.parent;
/* We know electra_cf devices will always have of_node set, since
* electra_cf is an of_platform driver.
*/
if (!parent->of_node) return 0;
if (!of_device_is_compatible(parent->of_node, "electra-cf")) return 0;
/* We use the direct ops for localbus */
- dev->archdata.dma_ops = &dma_direct_ops;
+ dev->dma_ops = &dma_direct_ops;
return 0;
}<sep>@@
struct device *dev;
@@
- dev->archdata.dma_ops
+ dev->dma_ops
<|end_of_text|> | 8,635 |
--- initial
+++ final
@@ -1,11 +1,11 @@
bool dma_capable(struct device *dev, dma_addr_t addr, size_t size) {
struct sta2x11_mapping *map;
- if (dev->archdata.dma_ops != &sta2x11_dma_ops) {
+ if (dev->dma_ops != &sta2x11_dma_ops) {
if (!dev->dma_mask) return false;
return addr + size - 1 <= *dev->dma_mask;
}
map = sta2x11_pdev_to_mapping(to_pci_dev(dev));
if (!map || (addr < map->amba_base)) return false;
if (addr + size >= map->amba_base + STA2X11_AMBA_SIZE) { return false; }
return true;
}<sep>@@
struct device *dev;
@@
- dev->archdata.dma_ops
+ dev->dma_ops
<|end_of_text|> | 8,636 |
--- initial
+++ final
@@ -1,4 +1,4 @@
phys_addr_t dma_to_phys(struct device *dev, dma_addr_t daddr) {
- if (dev->archdata.dma_ops != &sta2x11_dma_ops) return daddr;
+ if (dev->dma_ops != &sta2x11_dma_ops) return daddr;
return a2p(daddr, to_pci_dev(dev));
}<sep>@@
struct device *dev;
@@
- dev->archdata.dma_ops
+ dev->dma_ops
<|end_of_text|> | 8,637 |
--- initial
+++ final
@@ -1,4 +1,4 @@
dma_addr_t phys_to_dma(struct device *dev, phys_addr_t paddr) {
- if (dev->archdata.dma_ops != &sta2x11_dma_ops) return paddr;
+ if (dev->dma_ops != &sta2x11_dma_ops) return paddr;
return p2a(paddr, to_pci_dev(dev));
}<sep>@@
struct device *dev;
@@
- dev->archdata.dma_ops
+ dev->dma_ops
<|end_of_text|> | 8,638 |
--- initial
+++ final
@@ -1,10 +1,10 @@
static void sta2x11_setup_pdev(struct pci_dev *pdev) {
struct sta2x11_instance *instance = sta2x11_pdev_to_instance(pdev);
if (!instance) /* either a sta2x11 bridge or another ST device */
return;
pci_set_consistent_dma_mask(pdev, STA2X11_AMBA_SIZE - 1);
pci_set_dma_mask(pdev, STA2X11_AMBA_SIZE - 1);
- pdev->dev.archdata.dma_ops = &sta2x11_dma_ops;
+ pdev->dev.dma_ops = &sta2x11_dma_ops;
/* We must enable all devices as master, for audio DMA to work */
pci_set_master(pdev);
}<sep>@@
expression dev;
identifier f;
@@
- dev->f.archdata.dma_ops
+ dev->f.dma_ops
<|end_of_text|> | 8,639 |
--- initial
+++ final
@@ -1,28 +1,28 @@
int ps3_system_bus_device_register(struct ps3_system_bus_device *dev) {
int result;
static unsigned int dev_ioc0_count;
static unsigned int dev_sb_count;
static unsigned int dev_vuart_count;
static unsigned int dev_lpm_count;
if (!dev->core.parent) dev->core.parent = &ps3_system_bus;
dev->core.bus = &ps3_system_bus_type;
dev->core.release = ps3_system_bus_release_device;
switch (dev->dev_type) {
case PS3_DEVICE_TYPE_IOC0:
- dev->core.archdata.dma_ops = &ps3_ioc0_dma_ops;
+ dev->core.dma_ops = &ps3_ioc0_dma_ops;
dev_set_name(&dev->core, "ioc0_%02x", ++dev_ioc0_count);
break;
case PS3_DEVICE_TYPE_SB:
- dev->core.archdata.dma_ops = &ps3_sb_dma_ops;
+ dev->core.dma_ops = &ps3_sb_dma_ops;
dev_set_name(&dev->core, "sb_%02x", ++dev_sb_count);
break;
case PS3_DEVICE_TYPE_VUART: dev_set_name(&dev->core, "vuart_%02x", ++dev_vuart_count); break;
case PS3_DEVICE_TYPE_LPM: dev_set_name(&dev->core, "lpm_%02x", ++dev_lpm_count); break;
default: BUG();
};
dev->core.of_node = NULL;
set_dev_node(&dev->core, 0);
pr_debug("%s:%d add %s\n", __func__, __LINE__, dev_name(&dev->core));
result = device_register(&dev->core);
return result;
}<sep>@@
expression dev;
identifier f;
@@
- dev->f.archdata.dma_ops
+ dev->f.dma_ops
<|end_of_text|> | 8,640 |
--- initial
+++ final
@@ -1,30 +1,30 @@
struct vop_device *vop_register_device(struct device *pdev, int id, const struct dma_map_ops *dma_ops, struct vop_hw_ops *hw_ops, u8 dnode, struct mic_mw *aper, struct dma_chan *chan) {
int ret;
struct vop_device *vdev;
vdev = kzalloc(sizeof(*vdev), GFP_KERNEL);
if (!vdev) return ERR_PTR(-ENOMEM);
vdev->dev.parent = pdev;
vdev->id.device = id;
vdev->id.vendor = VOP_DEV_ANY_ID;
- vdev->dev.archdata.dma_ops = dma_ops;
+ vdev->dev.dma_ops = dma_ops;
vdev->dev.dma_mask = &vdev->dev.coherent_dma_mask;
dma_set_mask(&vdev->dev, DMA_BIT_MASK(64));
vdev->dev.release = vop_release_dev;
vdev->hw_ops = hw_ops;
vdev->dev.bus = &vop_bus;
vdev->dnode = dnode;
vdev->aper = aper;
vdev->dma_ch = chan;
vdev->index = dnode - 1;
dev_set_name(&vdev->dev, "vop-dev%u", vdev->index);
/*
* device_register() causes the bus infrastructure to look for a
* matching driver.
*/
ret = device_register(&vdev->dev);
if (ret) goto free_vdev;
return vdev;
free_vdev:
kfree(vdev);
return ERR_PTR(ret);
}<sep>@@
expression dev;
identifier f;
@@
- dev->f.archdata.dma_ops
+ dev->f.dma_ops
<|end_of_text|> | 8,641 |
--- initial
+++ final
@@ -1,44 +1,44 @@
static void psbfb_copyarea_accel(struct fb_info *info, const struct fb_copyarea *a) {
struct psb_fbdev *fbdev = info->par;
struct psb_framebuffer *psbfb = &fbdev->pfb;
struct drm_device *dev = psbfb->base.dev;
struct drm_framebuffer *fb = fbdev->psb_fb_helper.fb;
struct drm_psb_private *dev_priv = dev->dev_private;
uint32_t offset;
uint32_t stride;
uint32_t src_format;
uint32_t dst_format;
if (!fb) return;
offset = psbfb->gtt->offset;
stride = fb->pitches[0];
- switch (fb->depth) {
+ switch (fb->format->depth) {
case 8:
src_format = PSB_2D_SRC_332RGB;
dst_format = PSB_2D_DST_332RGB;
break;
case 15:
src_format = PSB_2D_SRC_555RGB;
dst_format = PSB_2D_DST_555RGB;
break;
case 16:
src_format = PSB_2D_SRC_565RGB;
dst_format = PSB_2D_DST_565RGB;
break;
case 24:
case 32:
/* this is wrong but since we don't do blending its okay */
src_format = PSB_2D_SRC_8888ARGB;
dst_format = PSB_2D_DST_8888ARGB;
break;
default:
/* software fallback */
drm_fb_helper_cfb_copyarea(info, a);
return;
}
if (!gma_power_begin(dev, false)) {
drm_fb_helper_cfb_copyarea(info, a);
return;
}
psb_accel_2d_copy(dev_priv, offset, stride, src_format, offset, stride, dst_format, a->sx, a->sy, a->dx, a->dy, a->width, a->height);
gma_power_end(dev);
}<sep>@@
struct drm_framebuffer *fb;
@@
- fb->depth
+ fb->format->depth
@@
struct drm_framebuffer *fb;
@@
- (fb->format->depth)
+ fb->format->depth
<|end_of_text|> | 8,642 |
--- initial
+++ final
@@ -1,74 +1,74 @@
static int amdgpufb_create(struct drm_fb_helper *helper, struct drm_fb_helper_surface_size *sizes) {
struct amdgpu_fbdev *rfbdev = (struct amdgpu_fbdev *)helper;
struct amdgpu_device *adev = rfbdev->adev;
struct fb_info *info;
struct drm_framebuffer *fb = NULL;
struct drm_mode_fb_cmd2 mode_cmd;
struct drm_gem_object *gobj = NULL;
struct amdgpu_bo *abo = NULL;
int ret;
unsigned long tmp;
mode_cmd.width = sizes->surface_width;
mode_cmd.height = sizes->surface_height;
if (sizes->surface_bpp == 24) sizes->surface_bpp = 32;
mode_cmd.pixel_format = drm_mode_legacy_fb_format(sizes->surface_bpp, sizes->surface_depth);
ret = amdgpufb_create_pinned_object(rfbdev, &mode_cmd, &gobj);
if (ret) {
DRM_ERROR("failed to create fbcon object %d\n", ret);
return ret;
}
abo = gem_to_amdgpu_bo(gobj);
/* okay we have an object now allocate the framebuffer */
info = drm_fb_helper_alloc_fbi(helper);
if (IS_ERR(info)) {
ret = PTR_ERR(info);
goto out_unref;
}
info->par = rfbdev;
info->skip_vt_switch = true;
ret = amdgpu_framebuffer_init(adev->ddev, &rfbdev->rfb, &mode_cmd, gobj);
if (ret) {
DRM_ERROR("failed to initialize framebuffer %d\n", ret);
goto out_destroy_fbi;
}
fb = &rfbdev->rfb.base;
/* setup helper */
rfbdev->helper.fb = fb;
memset_io(abo->kptr, 0x0, amdgpu_bo_size(abo));
strcpy(info->fix.id, "amdgpudrmfb");
- drm_fb_helper_fill_fix(info, fb->pitches[0], fb->depth);
+ drm_fb_helper_fill_fix(info, fb->pitches[0], fb->format->depth);
info->flags = FBINFO_DEFAULT | FBINFO_CAN_FORCE_OUTPUT;
info->fbops = &amdgpufb_ops;
tmp = amdgpu_bo_gpu_offset(abo) - adev->mc.vram_start;
info->fix.smem_start = adev->mc.aper_base + tmp;
info->fix.smem_len = amdgpu_bo_size(abo);
info->screen_base = abo->kptr;
info->screen_size = amdgpu_bo_size(abo);
drm_fb_helper_fill_var(info, &rfbdev->helper, sizes->fb_width, sizes->fb_height);
/* setup aperture base/size for vesafb takeover */
info->apertures->ranges[0].base = adev->ddev->mode_config.fb_base;
info->apertures->ranges[0].size = adev->mc.aper_size;
/* Use default scratch pixmap (info->pixmap.flags = FB_PIXMAP_SYSTEM) */
if (info->screen_base == NULL) {
ret = -ENOSPC;
goto out_destroy_fbi;
}
DRM_INFO("fb mappable at 0x%lX\n", info->fix.smem_start);
DRM_INFO("vram apper at 0x%lX\n", (unsigned long)adev->mc.aper_base);
DRM_INFO("size %lu\n", (unsigned long)amdgpu_bo_size(abo));
- DRM_INFO("fb depth is %d\n", fb->depth);
+ DRM_INFO("fb depth is %d\n", fb->format->depth);
DRM_INFO(" pitch is %d\n", fb->pitches[0]);
vga_switcheroo_client_fb_set(adev->ddev->pdev, info);
return 0;
out_destroy_fbi:
drm_fb_helper_release_fbi(helper);
out_unref:
if (abo) {}
if (fb && ret) {
drm_gem_object_unreference_unlocked(gobj);
drm_framebuffer_unregister_private(fb);
drm_framebuffer_cleanup(fb);
kfree(fb);
}
return ret;
}<sep>@@
struct drm_framebuffer *fb;
@@
- fb->depth
+ fb->format->depth
@@
struct drm_framebuffer *fb;
@@
- (fb->format->depth)
+ fb->format->depth
<|end_of_text|> | 8,643 |
--- initial
+++ final
@@ -1,53 +1,53 @@
static int astfb_create(struct drm_fb_helper *helper, struct drm_fb_helper_surface_size *sizes) {
struct ast_fbdev *afbdev = container_of(helper, struct ast_fbdev, helper);
struct drm_device *dev = afbdev->helper.dev;
struct drm_mode_fb_cmd2 mode_cmd;
struct drm_framebuffer *fb;
struct fb_info *info;
int size, ret;
void *sysram;
struct drm_gem_object *gobj = NULL;
struct ast_bo *bo = NULL;
mode_cmd.width = sizes->surface_width;
mode_cmd.height = sizes->surface_height;
mode_cmd.pitches[0] = mode_cmd.width * ((sizes->surface_bpp + 7) / 8);
mode_cmd.pixel_format = drm_mode_legacy_fb_format(sizes->surface_bpp, sizes->surface_depth);
size = mode_cmd.pitches[0] * mode_cmd.height;
ret = astfb_create_object(afbdev, &mode_cmd, &gobj);
if (ret) {
DRM_ERROR("failed to create fbcon backing object %d\n", ret);
return ret;
}
bo = gem_to_ast_bo(gobj);
sysram = vmalloc(size);
if (!sysram) return -ENOMEM;
info = drm_fb_helper_alloc_fbi(helper);
if (IS_ERR(info)) {
ret = PTR_ERR(info);
goto err_free_vram;
}
info->par = afbdev;
ret = ast_framebuffer_init(dev, &afbdev->afb, &mode_cmd, gobj);
if (ret) goto err_release_fbi;
afbdev->sysram = sysram;
afbdev->size = size;
fb = &afbdev->afb.base;
afbdev->helper.fb = fb;
strcpy(info->fix.id, "astdrmfb");
info->flags = FBINFO_DEFAULT | FBINFO_CAN_FORCE_OUTPUT;
info->fbops = &astfb_ops;
info->apertures->ranges[0].base = pci_resource_start(dev->pdev, 0);
info->apertures->ranges[0].size = pci_resource_len(dev->pdev, 0);
- drm_fb_helper_fill_fix(info, fb->pitches[0], fb->depth);
+ drm_fb_helper_fill_fix(info, fb->pitches[0], fb->format->depth);
drm_fb_helper_fill_var(info, &afbdev->helper, sizes->fb_width, sizes->fb_height);
info->screen_base = sysram;
info->screen_size = size;
info->pixmap.flags = FB_PIXMAP_SYSTEM;
DRM_DEBUG_KMS("allocated %dx%d\n", fb->width, fb->height);
return 0;
err_release_fbi:
drm_fb_helper_release_fbi(helper);
err_free_vram:
vfree(sysram);
return ret;
}<sep>@@
struct drm_framebuffer *fb;
@@
- fb->depth
+ fb->format->depth
@@
struct drm_framebuffer *fb;
@@
- (fb->format->depth)
+ fb->format->depth
<|end_of_text|> | 8,644 |
--- initial
+++ final
@@ -1,61 +1,61 @@
static int bochsfb_create(struct drm_fb_helper *helper, struct drm_fb_helper_surface_size *sizes) {
struct bochs_device *bochs = container_of(helper, struct bochs_device, fb.helper);
struct fb_info *info;
struct drm_framebuffer *fb;
struct drm_mode_fb_cmd2 mode_cmd;
struct drm_gem_object *gobj = NULL;
struct bochs_bo *bo = NULL;
int size, ret;
if (sizes->surface_bpp != 32) return -EINVAL;
mode_cmd.width = sizes->surface_width;
mode_cmd.height = sizes->surface_height;
mode_cmd.pitches[0] = mode_cmd.width * ((sizes->surface_bpp + 7) / 8);
mode_cmd.pixel_format = drm_mode_legacy_fb_format(sizes->surface_bpp, sizes->surface_depth);
size = mode_cmd.pitches[0] * mode_cmd.height;
/* alloc, pin & map bo */
ret = bochsfb_create_object(bochs, &mode_cmd, &gobj);
if (ret) {
DRM_ERROR("failed to create fbcon backing object %d\n", ret);
return ret;
}
bo = gem_to_bochs_bo(gobj);
ret = ttm_bo_reserve(&bo->bo, true, false, NULL);
if (ret) return ret;
ret = bochs_bo_pin(bo, TTM_PL_FLAG_VRAM, NULL);
if (ret) {
DRM_ERROR("failed to pin fbcon\n");
ttm_bo_unreserve(&bo->bo);
return ret;
}
ret = ttm_bo_kmap(&bo->bo, 0, bo->bo.num_pages, &bo->kmap);
if (ret) {
DRM_ERROR("failed to kmap fbcon\n");
ttm_bo_unreserve(&bo->bo);
return ret;
}
ttm_bo_unreserve(&bo->bo);
/* init fb device */
info = drm_fb_helper_alloc_fbi(helper);
if (IS_ERR(info)) return PTR_ERR(info);
info->par = &bochs->fb.helper;
ret = bochs_framebuffer_init(bochs->dev, &bochs->fb.gfb, &mode_cmd, gobj);
if (ret) {
drm_fb_helper_release_fbi(helper);
return ret;
}
bochs->fb.size = size;
/* setup helper */
fb = &bochs->fb.gfb.base;
bochs->fb.helper.fb = fb;
strcpy(info->fix.id, "bochsdrmfb");
info->flags = FBINFO_DEFAULT;
info->fbops = &bochsfb_ops;
- drm_fb_helper_fill_fix(info, fb->pitches[0], fb->depth);
+ drm_fb_helper_fill_fix(info, fb->pitches[0], fb->format->depth);
drm_fb_helper_fill_var(info, &bochs->fb.helper, sizes->fb_width, sizes->fb_height);
info->screen_base = bo->kmap.virtual;
info->screen_size = size;
drm_vma_offset_remove(&bo->bo.bdev->vma_manager, &bo->bo.vma_node);
info->fix.smem_start = 0;
info->fix.smem_len = size;
return 0;
}<sep>@@
struct drm_framebuffer *fb;
@@
- fb->depth
+ fb->format->depth
@@
struct drm_framebuffer *fb;
@@
- (fb->format->depth)
+ fb->format->depth
<|end_of_text|> | 8,645 |
--- initial
+++ final
@@ -1,58 +1,58 @@
static int cirrusfb_create(struct drm_fb_helper *helper, struct drm_fb_helper_surface_size *sizes) {
struct cirrus_fbdev *gfbdev = container_of(helper, struct cirrus_fbdev, helper);
struct cirrus_device *cdev = gfbdev->helper.dev->dev_private;
struct fb_info *info;
struct drm_framebuffer *fb;
struct drm_mode_fb_cmd2 mode_cmd;
void *sysram;
struct drm_gem_object *gobj = NULL;
struct cirrus_bo *bo = NULL;
int size, ret;
mode_cmd.width = sizes->surface_width;
mode_cmd.height = sizes->surface_height;
mode_cmd.pitches[0] = mode_cmd.width * ((sizes->surface_bpp + 7) / 8);
mode_cmd.pixel_format = drm_mode_legacy_fb_format(sizes->surface_bpp, sizes->surface_depth);
size = mode_cmd.pitches[0] * mode_cmd.height;
ret = cirrusfb_create_object(gfbdev, &mode_cmd, &gobj);
if (ret) {
DRM_ERROR("failed to create fbcon backing object %d\n", ret);
return ret;
}
bo = gem_to_cirrus_bo(gobj);
sysram = vmalloc(size);
if (!sysram) return -ENOMEM;
info = drm_fb_helper_alloc_fbi(helper);
if (IS_ERR(info)) return PTR_ERR(info);
info->par = gfbdev;
ret = cirrus_framebuffer_init(cdev->dev, &gfbdev->gfb, &mode_cmd, gobj);
if (ret) return ret;
gfbdev->sysram = sysram;
gfbdev->size = size;
fb = &gfbdev->gfb.base;
if (!fb) {
DRM_INFO("fb is NULL\n");
return -EINVAL;
}
/* setup helper */
gfbdev->helper.fb = fb;
strcpy(info->fix.id, "cirrusdrmfb");
info->flags = FBINFO_DEFAULT;
info->fbops = &cirrusfb_ops;
- drm_fb_helper_fill_fix(info, fb->pitches[0], fb->depth);
+ drm_fb_helper_fill_fix(info, fb->pitches[0], fb->format->depth);
drm_fb_helper_fill_var(info, &gfbdev->helper, sizes->fb_width, sizes->fb_height);
/* setup aperture base/size for vesafb takeover */
info->apertures->ranges[0].base = cdev->dev->mode_config.fb_base;
info->apertures->ranges[0].size = cdev->mc.vram_size;
info->fix.smem_start = cdev->dev->mode_config.fb_base;
info->fix.smem_len = cdev->mc.vram_size;
info->screen_base = sysram;
info->screen_size = size;
info->fix.mmio_start = 0;
info->fix.mmio_len = 0;
DRM_INFO("fb mappable at 0x%lX\n", info->fix.smem_start);
DRM_INFO("vram aper at 0x%lX\n", (unsigned long)info->fix.smem_start);
DRM_INFO("size %lu\n", (unsigned long)info->fix.smem_len);
- DRM_INFO("fb depth is %d\n", fb->depth);
+ DRM_INFO("fb depth is %d\n", fb->format->depth);
DRM_INFO(" pitch is %d\n", fb->pitches[0]);
return 0;
}<sep>@@
struct drm_framebuffer *fb;
@@
- fb->depth
+ fb->format->depth
@@
struct drm_framebuffer *fb;
@@
- (fb->format->depth)
+ fb->format->depth
<|end_of_text|> | 8,646 |
--- initial
+++ final
@@ -1,58 +1,58 @@
static int nv04_crtc_do_mode_set_base(struct drm_crtc *crtc, struct drm_framebuffer *passed_fb, int x, int y, bool atomic) {
struct nouveau_crtc *nv_crtc = nouveau_crtc(crtc);
struct drm_device *dev = crtc->dev;
struct nouveau_drm *drm = nouveau_drm(dev);
struct nv04_crtc_reg *regp = &nv04_display(dev)->mode_reg.crtc_reg[nv_crtc->index];
struct drm_framebuffer *drm_fb;
struct nouveau_framebuffer *fb;
int arb_burst, arb_lwm;
NV_DEBUG(drm, "index %d\n", nv_crtc->index);
/* no fb bound */
if (!atomic && !crtc->primary->fb) {
NV_DEBUG(drm, "No FB bound\n");
return 0;
}
/* If atomic, we want to switch to the fb we were passed, so
* now we update pointers to do that.
*/
if (atomic) {
drm_fb = passed_fb;
fb = nouveau_framebuffer(passed_fb);
} else {
drm_fb = crtc->primary->fb;
fb = nouveau_framebuffer(crtc->primary->fb);
}
nv_crtc->fb.offset = fb->nvbo->bo.offset;
- if (nv_crtc->lut.depth != drm_fb->depth) {
- nv_crtc->lut.depth = drm_fb->depth;
+ if (nv_crtc->lut.depth != drm_fb->format->depth) {
+ nv_crtc->lut.depth = drm_fb->format->depth;
nv_crtc_gamma_load(crtc);
}
/* Update the framebuffer format. */
regp->CRTC[NV_CIO_CRE_PIXEL_INDEX] &= ~3;
- regp->CRTC[NV_CIO_CRE_PIXEL_INDEX] |= (drm_fb->depth + 1) / 8;
+ regp->CRTC[NV_CIO_CRE_PIXEL_INDEX] |= (drm_fb->format->depth + 1) / 8;
regp->ramdac_gen_ctrl &= ~NV_PRAMDAC_GENERAL_CONTROL_ALT_MODE_SEL;
- if (drm_fb->depth == 16) regp->ramdac_gen_ctrl |= NV_PRAMDAC_GENERAL_CONTROL_ALT_MODE_SEL;
+ if (drm_fb->format->depth == 16) regp->ramdac_gen_ctrl |= NV_PRAMDAC_GENERAL_CONTROL_ALT_MODE_SEL;
crtc_wr_cio_state(crtc, regp, NV_CIO_CRE_PIXEL_INDEX);
NVWriteRAMDAC(dev, nv_crtc->index, NV_PRAMDAC_GENERAL_CONTROL, regp->ramdac_gen_ctrl);
regp->CRTC[NV_CIO_CR_OFFSET_INDEX] = drm_fb->pitches[0] >> 3;
regp->CRTC[NV_CIO_CRE_RPC0_INDEX] = XLATE(drm_fb->pitches[0] >> 3, 8, NV_CIO_CRE_RPC0_OFFSET_10_8);
regp->CRTC[NV_CIO_CRE_42] = XLATE(drm_fb->pitches[0] / 8, 11, NV_CIO_CRE_42_OFFSET_11);
crtc_wr_cio_state(crtc, regp, NV_CIO_CRE_RPC0_INDEX);
crtc_wr_cio_state(crtc, regp, NV_CIO_CR_OFFSET_INDEX);
crtc_wr_cio_state(crtc, regp, NV_CIO_CRE_42);
/* Update the framebuffer location. */
regp->fb_start = nv_crtc->fb.offset & ~3;
regp->fb_start += (y * drm_fb->pitches[0]) + (x * drm_fb->bits_per_pixel / 8);
nv_set_crtc_base(dev, nv_crtc->index, regp->fb_start);
/* Update the arbitration parameters. */
nouveau_calc_arb(dev, crtc->mode.clock, drm_fb->bits_per_pixel, &arb_burst, &arb_lwm);
regp->CRTC[NV_CIO_CRE_FF_INDEX] = arb_burst;
regp->CRTC[NV_CIO_CRE_FFLWM__INDEX] = arb_lwm & 0xff;
crtc_wr_cio_state(crtc, regp, NV_CIO_CRE_FF_INDEX);
crtc_wr_cio_state(crtc, regp, NV_CIO_CRE_FFLWM__INDEX);
if (drm->device.info.family >= NV_DEVICE_INFO_V0_KELVIN) {
regp->CRTC[NV_CIO_CRE_47] = arb_lwm >> 8;
crtc_wr_cio_state(crtc, regp, NV_CIO_CRE_47);
}
return 0;
}<sep>@@
struct drm_framebuffer *fb;
@@
- fb->depth
+ fb->format->depth
@@
struct drm_framebuffer *fb;
@@
- (fb->format->depth)
+ fb->format->depth
<|end_of_text|> | 8,647 |
--- initial
+++ final
@@ -1,97 +1,97 @@
static void nv_crtc_mode_set_regs(struct drm_crtc *crtc, struct drm_display_mode *mode) {
struct drm_device *dev = crtc->dev;
struct nouveau_drm *drm = nouveau_drm(dev);
struct nouveau_crtc *nv_crtc = nouveau_crtc(crtc);
struct nv04_crtc_reg *regp = &nv04_display(dev)->mode_reg.crtc_reg[nv_crtc->index];
struct nv04_crtc_reg *savep = &nv04_display(dev)->saved_reg.crtc_reg[nv_crtc->index];
const struct drm_framebuffer *fb = crtc->primary->fb;
struct drm_encoder *encoder;
bool lvds_output = false, tmds_output = false, tv_output = false, off_chip_digital = false;
list_for_each_entry(encoder, &dev->mode_config.encoder_list, head) {
struct nouveau_encoder *nv_encoder = nouveau_encoder(encoder);
bool digital = false;
if (encoder->crtc != crtc) continue;
if (nv_encoder->dcb->type == DCB_OUTPUT_LVDS) digital = lvds_output = true;
if (nv_encoder->dcb->type == DCB_OUTPUT_TV) tv_output = true;
if (nv_encoder->dcb->type == DCB_OUTPUT_TMDS) digital = tmds_output = true;
if (nv_encoder->dcb->location != DCB_LOC_ON_CHIP && digital) off_chip_digital = true;
}
/* Registers not directly related to the (s)vga mode */
/* What is the meaning of this register? */
/* A few popular values are 0x18, 0x1c, 0x38, 0x3c */
regp->CRTC[NV_CIO_CRE_ENH_INDEX] = savep->CRTC[NV_CIO_CRE_ENH_INDEX] & ~(1 << 5);
regp->crtc_eng_ctrl = 0;
/* Except for rare conditions I2C is enabled on the primary crtc */
if (nv_crtc->index == 0) regp->crtc_eng_ctrl |= NV_CRTC_FSEL_I2C;
#if 0
/* Set overlay to desired crtc. */
if (dev->overlayAdaptor) {
NVPortPrivPtr pPriv = GET_OVERLAY_PRIVATE(dev);
if (pPriv->overlayCRTC == nv_crtc->index)
regp->crtc_eng_ctrl |= NV_CRTC_FSEL_OVERLAY;
}
#endif
/* ADDRESS_SPACE_PNVM is the same as setting HCUR_ASI */
regp->cursor_cfg = NV_PCRTC_CURSOR_CONFIG_CUR_LINES_64 | NV_PCRTC_CURSOR_CONFIG_CUR_PIXELS_64 | NV_PCRTC_CURSOR_CONFIG_ADDRESS_SPACE_PNVM;
if (drm->device.info.chipset >= 0x11) regp->cursor_cfg |= NV_PCRTC_CURSOR_CONFIG_CUR_BPP_32;
if (mode->flags & DRM_MODE_FLAG_DBLSCAN) regp->cursor_cfg |= NV_PCRTC_CURSOR_CONFIG_DOUBLE_SCAN_ENABLE;
/* Unblock some timings */
regp->CRTC[NV_CIO_CRE_53] = 0;
regp->CRTC[NV_CIO_CRE_54] = 0;
/* 0x00 is disabled, 0x11 is lvds, 0x22 crt and 0x88 tmds */
if (lvds_output)
regp->CRTC[NV_CIO_CRE_SCRATCH3__INDEX] = 0x11;
else if (tmds_output)
regp->CRTC[NV_CIO_CRE_SCRATCH3__INDEX] = 0x88;
else
regp->CRTC[NV_CIO_CRE_SCRATCH3__INDEX] = 0x22;
/* These values seem to vary */
/* This register seems to be used by the bios to make certain decisions on some G70 cards? */
regp->CRTC[NV_CIO_CRE_SCRATCH4__INDEX] = savep->CRTC[NV_CIO_CRE_SCRATCH4__INDEX];
nv_crtc_set_digital_vibrance(crtc, nv_crtc->saturation);
/* probably a scratch reg, but kept for cargo-cult purposes:
* bit0: crtc0?, head A
* bit6: lvds, head A
* bit7: (only in X), head A
*/
if (nv_crtc->index == 0) regp->CRTC[NV_CIO_CRE_4B] = savep->CRTC[NV_CIO_CRE_4B] | 0x80;
/* The blob seems to take the current value from crtc 0, add 4 to that
* and reuse the old value for crtc 1 */
regp->CRTC[NV_CIO_CRE_TVOUT_LATENCY] = nv04_display(dev)->saved_reg.crtc_reg[0].CRTC[NV_CIO_CRE_TVOUT_LATENCY];
if (!nv_crtc->index) regp->CRTC[NV_CIO_CRE_TVOUT_LATENCY] += 4;
/* the blob sometimes sets |= 0x10 (which is the same as setting |=
* 1 << 30 on 0x60.830), for no apparent reason */
regp->CRTC[NV_CIO_CRE_59] = off_chip_digital;
if (drm->device.info.family >= NV_DEVICE_INFO_V0_RANKINE) regp->CRTC[0x9f] = off_chip_digital ? 0x11 : 0x1;
regp->crtc_830 = mode->crtc_vdisplay - 3;
regp->crtc_834 = mode->crtc_vdisplay - 1;
if (drm->device.info.family == NV_DEVICE_INFO_V0_CURIE) /* This is what the blob does */
regp->crtc_850 = NVReadCRTC(dev, 0, NV_PCRTC_850);
if (drm->device.info.family >= NV_DEVICE_INFO_V0_RANKINE) regp->gpio_ext = NVReadCRTC(dev, 0, NV_PCRTC_GPIO_EXT);
if (drm->device.info.family >= NV_DEVICE_INFO_V0_CELSIUS)
regp->crtc_cfg = NV10_PCRTC_CONFIG_START_ADDRESS_HSYNC;
else
regp->crtc_cfg = NV04_PCRTC_CONFIG_START_ADDRESS_HSYNC;
/* Some misc regs */
if (drm->device.info.family == NV_DEVICE_INFO_V0_CURIE) {
regp->CRTC[NV_CIO_CRE_85] = 0xFF;
regp->CRTC[NV_CIO_CRE_86] = 0x1;
}
- regp->CRTC[NV_CIO_CRE_PIXEL_INDEX] = (fb->depth + 1) / 8;
+ regp->CRTC[NV_CIO_CRE_PIXEL_INDEX] = (fb->format->depth + 1) / 8;
/* Enable slaved mode (called MODE_TV in nv4ref.h) */
if (lvds_output || tmds_output || tv_output) regp->CRTC[NV_CIO_CRE_PIXEL_INDEX] |= (1 << 7);
/* Generic PRAMDAC regs */
if (drm->device.info.family >= NV_DEVICE_INFO_V0_CELSIUS) /* Only bit that bios and blob set. */
regp->nv10_cursync = (1 << 25);
regp->ramdac_gen_ctrl = NV_PRAMDAC_GENERAL_CONTROL_BPC_8BITS | NV_PRAMDAC_GENERAL_CONTROL_VGA_STATE_SEL | NV_PRAMDAC_GENERAL_CONTROL_PIXMIX_ON;
- if (fb->depth == 16) regp->ramdac_gen_ctrl |= NV_PRAMDAC_GENERAL_CONTROL_ALT_MODE_SEL;
+ if (fb->format->depth == 16) regp->ramdac_gen_ctrl |= NV_PRAMDAC_GENERAL_CONTROL_ALT_MODE_SEL;
if (drm->device.info.chipset >= 0x11) regp->ramdac_gen_ctrl |= NV_PRAMDAC_GENERAL_CONTROL_PIPE_LONG;
regp->ramdac_630 = 0; /* turn off green mode (tv test pattern?) */
regp->tv_setup = 0;
nv_crtc_set_image_sharpening(crtc, nv_crtc->sharpness);
/* Some values the blob sets */
regp->ramdac_8c0 = 0x100;
regp->ramdac_a20 = 0x0;
regp->ramdac_a24 = 0xfffff;
regp->ramdac_a34 = 0x1;
}<sep>@@
struct drm_framebuffer *fb;
@@
- fb->depth
+ fb->format->depth
@@
struct drm_framebuffer *fb;
@@
- (fb->format->depth)
+ fb->format->depth
<|end_of_text|> | 8,648 |
--- initial
+++ final
@@ -1,59 +1,59 @@
int drm_fbdev_cma_create_with_funcs(struct drm_fb_helper *helper, struct drm_fb_helper_surface_size *sizes, const struct drm_framebuffer_funcs *funcs) {
struct drm_fbdev_cma *fbdev_cma = to_fbdev_cma(helper);
struct drm_mode_fb_cmd2 mode_cmd = {0};
struct drm_device *dev = helper->dev;
struct drm_gem_cma_object *obj;
struct drm_framebuffer *fb;
unsigned int bytes_per_pixel;
unsigned long offset;
struct fb_info *fbi;
size_t size;
int ret;
DRM_DEBUG_KMS("surface width(%d), height(%d) and bpp(%d)\n", sizes->surface_width, sizes->surface_height, sizes->surface_bpp);
bytes_per_pixel = DIV_ROUND_UP(sizes->surface_bpp, 8);
mode_cmd.width = sizes->surface_width;
mode_cmd.height = sizes->surface_height;
mode_cmd.pitches[0] = sizes->surface_width * bytes_per_pixel;
mode_cmd.pixel_format = drm_mode_legacy_fb_format(sizes->surface_bpp, sizes->surface_depth);
size = mode_cmd.pitches[0] * mode_cmd.height;
obj = drm_gem_cma_create(dev, size);
if (IS_ERR(obj)) return -ENOMEM;
fbi = drm_fb_helper_alloc_fbi(helper);
if (IS_ERR(fbi)) {
ret = PTR_ERR(fbi);
goto err_gem_free_object;
}
fbdev_cma->fb = drm_fb_cma_alloc(dev, &mode_cmd, &obj, 1, funcs);
if (IS_ERR(fbdev_cma->fb)) {
dev_err(dev->dev, "Failed to allocate DRM framebuffer.\n");
ret = PTR_ERR(fbdev_cma->fb);
goto err_fb_info_destroy;
}
fb = &fbdev_cma->fb->fb;
helper->fb = fb;
fbi->par = helper;
fbi->flags = FBINFO_FLAG_DEFAULT;
fbi->fbops = &drm_fbdev_cma_ops;
- drm_fb_helper_fill_fix(fbi, fb->pitches[0], fb->depth);
+ drm_fb_helper_fill_fix(fbi, fb->pitches[0], fb->format->depth);
drm_fb_helper_fill_var(fbi, helper, sizes->fb_width, sizes->fb_height);
offset = fbi->var.xoffset * bytes_per_pixel;
offset += fbi->var.yoffset * fb->pitches[0];
dev->mode_config.fb_base = (resource_size_t)obj->paddr;
fbi->screen_base = obj->vaddr + offset;
fbi->fix.smem_start = (unsigned long)(obj->paddr + offset);
fbi->screen_size = size;
fbi->fix.smem_len = size;
if (funcs->dirty) {
ret = drm_fbdev_cma_defio_init(fbi, obj);
if (ret) goto err_cma_destroy;
}
return 0;
err_cma_destroy:
drm_framebuffer_unregister_private(&fbdev_cma->fb->fb);
drm_fb_cma_destroy(&fbdev_cma->fb->fb);
err_fb_info_destroy:
drm_fb_helper_release_fbi(helper);
err_gem_free_object:
drm_gem_object_unreference_unlocked(&obj->base);
return ret;
}<sep>@@
struct drm_framebuffer *fb;
@@
- fb->depth
+ fb->format->depth
@@
struct drm_framebuffer *fb;
@@
- (fb->format->depth)
+ fb->format->depth
<|end_of_text|> | 8,650 |
--- initial
+++ final
@@ -1,67 +1,67 @@
void drm_fb_helper_fill_var(struct fb_info *info, struct drm_fb_helper *fb_helper, uint32_t fb_width, uint32_t fb_height) {
struct drm_framebuffer *fb = fb_helper->fb;
info->pseudo_palette = fb_helper->pseudo_palette;
info->var.xres_virtual = fb->width;
info->var.yres_virtual = fb->height;
info->var.bits_per_pixel = fb->bits_per_pixel;
info->var.accel_flags = FB_ACCELF_TEXT;
info->var.xoffset = 0;
info->var.yoffset = 0;
info->var.activate = FB_ACTIVATE_NOW;
info->var.height = -1;
info->var.width = -1;
- switch (fb->depth) {
+ switch (fb->format->depth) {
case 8:
info->var.red.offset = 0;
info->var.green.offset = 0;
info->var.blue.offset = 0;
info->var.red.length = 8; /* 8bit DAC */
info->var.green.length = 8;
info->var.blue.length = 8;
info->var.transp.offset = 0;
info->var.transp.length = 0;
break;
case 15:
info->var.red.offset = 10;
info->var.green.offset = 5;
info->var.blue.offset = 0;
info->var.red.length = 5;
info->var.green.length = 5;
info->var.blue.length = 5;
info->var.transp.offset = 15;
info->var.transp.length = 1;
break;
case 16:
info->var.red.offset = 11;
info->var.green.offset = 5;
info->var.blue.offset = 0;
info->var.red.length = 5;
info->var.green.length = 6;
info->var.blue.length = 5;
info->var.transp.offset = 0;
break;
case 24:
info->var.red.offset = 16;
info->var.green.offset = 8;
info->var.blue.offset = 0;
info->var.red.length = 8;
info->var.green.length = 8;
info->var.blue.length = 8;
info->var.transp.offset = 0;
info->var.transp.length = 0;
break;
case 32:
info->var.red.offset = 16;
info->var.green.offset = 8;
info->var.blue.offset = 0;
info->var.red.length = 8;
info->var.green.length = 8;
info->var.blue.length = 8;
info->var.transp.offset = 24;
info->var.transp.length = 8;
break;
default: break;
}
info->var.xres = fb_width;
info->var.yres = fb_height;
}<sep>@@
struct drm_framebuffer *fb;
@@
- fb->depth
+ fb->format->depth
@@
struct drm_framebuffer *fb;
@@
- (fb->format->depth)
+ fb->format->depth
<|end_of_text|> | 8,651 |
--- initial
+++ final
@@ -1,30 +1,30 @@
int drm_mode_getfb(struct drm_device *dev, void *data, struct drm_file *file_priv) {
struct drm_mode_fb_cmd *r = data;
struct drm_framebuffer *fb;
int ret;
if (!drm_core_check_feature(dev, DRIVER_MODESET)) return -EINVAL;
fb = drm_framebuffer_lookup(dev, r->fb_id);
if (!fb) return -ENOENT;
r->height = fb->height;
r->width = fb->width;
- r->depth = fb->depth;
+ r->depth = fb->format->depth;
r->bpp = fb->bits_per_pixel;
r->pitch = fb->pitches[0];
if (fb->funcs->create_handle) {
if (drm_is_current_master(file_priv) || capable(CAP_SYS_ADMIN) || drm_is_control_client(file_priv)) {
ret = fb->funcs->create_handle(fb, file_priv, &r->handle);
} else {
/* GET_FB() is an unprivileged ioctl so we must not
* return a buffer-handle to non-master processes! For
* backwards-compatibility reasons, we cannot make
* GET_FB() privileged, so just return an invalid handle
* for non-masters. */
r->handle = 0;
ret = 0;
}
} else {
ret = -ENODEV;
}
drm_framebuffer_unreference(fb);
return ret;
}<sep>@@
struct drm_framebuffer *fb;
@@
- fb->depth
+ fb->format->depth
@@
struct drm_framebuffer *fb;
@@
- (fb->format->depth)
+ fb->format->depth
<|end_of_text|> | 8,652 |
--- initial
+++ final
@@ -1,25 +1,23 @@
void drm_helper_mode_fill_fb_struct(struct drm_device *dev, struct drm_framebuffer *fb, const struct drm_mode_fb_cmd2 *mode_cmd) {
const struct drm_format_info *info;
int i;
info = drm_format_info(mode_cmd->pixel_format);
if (!info || !info->depth) {
struct drm_format_name_buf format_name;
DRM_DEBUG_KMS("non-RGB pixel format %s\n", drm_get_format_name(mode_cmd->pixel_format, &format_name));
- fb->depth = 0;
fb->bits_per_pixel = 0;
} else {
- fb->depth = info->depth;
fb->bits_per_pixel = info->cpp[0] * 8;
}
fb->dev = dev;
fb->format = info;
fb->width = mode_cmd->width;
fb->height = mode_cmd->height;
for (i = 0; i < 4; i++) {
fb->pitches[i] = mode_cmd->pitches[i];
fb->offsets[i] = mode_cmd->offsets[i];
}
fb->modifier = mode_cmd->modifier[0];
fb->pixel_format = mode_cmd->pixel_format;
fb->flags = mode_cmd->flags;
}<sep>@@
struct drm_framebuffer *fb;
expression E;
@@
drm_helper_mode_fill_fb_struct(...) {
...
- fb->depth = E;
...
}
<|end_of_text|> | 8,653 |
--- initial
+++ final
@@ -1,9 +1,9 @@
static int tegra_debugfs_framebuffers(struct seq_file *s, void *data) {
struct drm_info_node *node = (struct drm_info_node *)s->private;
struct drm_device *drm = node->minor->dev;
struct drm_framebuffer *fb;
mutex_lock(&drm->mode_config.fb_lock);
- list_for_each_entry(fb, &drm->mode_config.fb_list, head) { seq_printf(s, "%3d: user size: %d x %d, depth %d, %d bpp, refcount %d\n", fb->base.id, fb->width, fb->height, fb->depth, fb->bits_per_pixel, drm_framebuffer_read_refcount(fb)); }
+ list_for_each_entry(fb, &drm->mode_config.fb_list, head) { seq_printf(s, "%3d: user size: %d x %d, depth %d, %d bpp, refcount %d\n", fb->base.id, fb->width, fb->height, fb->format->depth, fb->bits_per_pixel, drm_framebuffer_read_refcount(fb)); }
mutex_unlock(&drm->mode_config.fb_lock);
return 0;
}<sep>@@
struct drm_framebuffer *fb;
@@
- fb->depth
+ fb->format->depth
@@
struct drm_framebuffer *fb;
@@
- (fb->format->depth)
+ fb->format->depth
<|end_of_text|> | 8,654 |
--- initial
+++ final
@@ -1,30 +1,30 @@
static int exynos_drm_fbdev_update(struct drm_fb_helper *helper, struct drm_fb_helper_surface_size *sizes, struct exynos_drm_gem *exynos_gem) {
struct fb_info *fbi;
struct drm_framebuffer *fb = helper->fb;
unsigned int size = fb->width * fb->height * (fb->bits_per_pixel >> 3);
unsigned int nr_pages;
unsigned long offset;
fbi = drm_fb_helper_alloc_fbi(helper);
if (IS_ERR(fbi)) {
DRM_ERROR("failed to allocate fb info.\n");
return PTR_ERR(fbi);
}
fbi->par = helper;
fbi->flags = FBINFO_FLAG_DEFAULT;
fbi->fbops = &exynos_drm_fb_ops;
- drm_fb_helper_fill_fix(fbi, fb->pitches[0], fb->depth);
+ drm_fb_helper_fill_fix(fbi, fb->pitches[0], fb->format->depth);
drm_fb_helper_fill_var(fbi, helper, sizes->fb_width, sizes->fb_height);
nr_pages = exynos_gem->size >> PAGE_SHIFT;
exynos_gem->kvaddr = (void __iomem *)vmap(exynos_gem->pages, nr_pages, VM_MAP, pgprot_writecombine(PAGE_KERNEL));
if (!exynos_gem->kvaddr) {
DRM_ERROR("failed to map pages to kernel space.\n");
drm_fb_helper_release_fbi(helper);
return -EIO;
}
offset = fbi->var.xoffset * (fb->bits_per_pixel >> 3);
offset += fbi->var.yoffset * fb->pitches[0];
fbi->screen_base = exynos_gem->kvaddr + offset;
fbi->screen_size = size;
fbi->fix.smem_len = size;
return 0;
}<sep>@@
struct drm_framebuffer *fb;
@@
- fb->depth
+ fb->format->depth
@@
struct drm_framebuffer *fb;
@@
- (fb->format->depth)
+ fb->format->depth
<|end_of_text|> | 8,655 |
--- initial
+++ final
@@ -1,63 +1,63 @@
static int tegra_fbdev_probe(struct drm_fb_helper *helper, struct drm_fb_helper_surface_size *sizes) {
struct tegra_fbdev *fbdev = to_tegra_fbdev(helper);
struct tegra_drm *tegra = helper->dev->dev_private;
struct drm_device *drm = helper->dev;
struct drm_mode_fb_cmd2 cmd = {0};
unsigned int bytes_per_pixel;
struct drm_framebuffer *fb;
unsigned long offset;
struct fb_info *info;
struct tegra_bo *bo;
size_t size;
int err;
bytes_per_pixel = DIV_ROUND_UP(sizes->surface_bpp, 8);
cmd.width = sizes->surface_width;
cmd.height = sizes->surface_height;
cmd.pitches[0] = round_up(sizes->surface_width * bytes_per_pixel, tegra->pitch_align);
cmd.pixel_format = drm_mode_legacy_fb_format(sizes->surface_bpp, sizes->surface_depth);
size = cmd.pitches[0] * cmd.height;
bo = tegra_bo_create(drm, size, 0);
if (IS_ERR(bo)) return PTR_ERR(bo);
info = drm_fb_helper_alloc_fbi(helper);
if (IS_ERR(info)) {
dev_err(drm->dev, "failed to allocate framebuffer info\n");
drm_gem_object_unreference_unlocked(&bo->gem);
return PTR_ERR(info);
}
fbdev->fb = tegra_fb_alloc(drm, &cmd, &bo, 1);
if (IS_ERR(fbdev->fb)) {
err = PTR_ERR(fbdev->fb);
dev_err(drm->dev, "failed to allocate DRM framebuffer: %d\n", err);
drm_gem_object_unreference_unlocked(&bo->gem);
goto release;
}
fb = &fbdev->fb->base;
helper->fb = fb;
helper->fbdev = info;
info->par = helper;
info->flags = FBINFO_FLAG_DEFAULT;
info->fbops = &tegra_fb_ops;
- drm_fb_helper_fill_fix(info, fb->pitches[0], fb->depth);
+ drm_fb_helper_fill_fix(info, fb->pitches[0], fb->format->depth);
drm_fb_helper_fill_var(info, helper, fb->width, fb->height);
offset = info->var.xoffset * bytes_per_pixel + info->var.yoffset * fb->pitches[0];
if (bo->pages) {
bo->vaddr = vmap(bo->pages, bo->num_pages, VM_MAP, pgprot_writecombine(PAGE_KERNEL));
if (!bo->vaddr) {
dev_err(drm->dev, "failed to vmap() framebuffer\n");
err = -ENOMEM;
goto destroy;
}
}
drm->mode_config.fb_base = (resource_size_t)bo->paddr;
info->screen_base = (void __iomem *)bo->vaddr + offset;
info->screen_size = size;
info->fix.smem_start = (unsigned long)(bo->paddr + offset);
info->fix.smem_len = size;
return 0;
destroy:
drm_framebuffer_unregister_private(fb);
tegra_fb_destroy(fb);
release:
drm_fb_helper_release_fbi(helper);
return err;
}<sep>@@
struct drm_framebuffer *fb;
@@
- fb->depth
+ fb->format->depth
@@
struct drm_framebuffer *fb;
@@
- (fb->format->depth)
+ fb->format->depth
<|end_of_text|> | 8,656 |
--- initial
+++ final
@@ -1,99 +1,99 @@
static int psbfb_create(struct psb_fbdev *fbdev, struct drm_fb_helper_surface_size *sizes) {
struct drm_device *dev = fbdev->psb_fb_helper.dev;
struct drm_psb_private *dev_priv = dev->dev_private;
struct fb_info *info;
struct drm_framebuffer *fb;
struct psb_framebuffer *psbfb = &fbdev->pfb;
struct drm_mode_fb_cmd2 mode_cmd;
int size;
int ret;
struct gtt_range *backing;
u32 bpp, depth;
int gtt_roll = 0;
int pitch_lines = 0;
mode_cmd.width = sizes->surface_width;
mode_cmd.height = sizes->surface_height;
bpp = sizes->surface_bpp;
depth = sizes->surface_depth;
/* No 24bit packed */
if (bpp == 24) bpp = 32;
do {
/*
* Acceleration via the GTT requires pitch to be
* power of two aligned. Preferably page but less
* is ok with some fonts
*/
mode_cmd.pitches[0] = ALIGN(mode_cmd.width * ((bpp + 7) / 8), 4096 >> pitch_lines);
size = mode_cmd.pitches[0] * mode_cmd.height;
size = ALIGN(size, PAGE_SIZE);
/* Allocate the fb in the GTT with stolen page backing */
backing = psbfb_alloc(dev, size);
if (pitch_lines)
pitch_lines *= 2;
else
pitch_lines = 1;
gtt_roll++;
} while (backing == NULL && pitch_lines <= 16);
/* The final pitch we accepted if we succeeded */
pitch_lines /= 2;
if (backing == NULL) {
/*
* We couldn't get the space we wanted, fall back to the
* display engine requirement instead. The HW requires
* the pitch to be 64 byte aligned
*/
gtt_roll = 0; /* Don't use GTT accelerated scrolling */
pitch_lines = 64;
mode_cmd.pitches[0] = ALIGN(mode_cmd.width * ((bpp + 7) / 8), 64);
size = mode_cmd.pitches[0] * mode_cmd.height;
size = ALIGN(size, PAGE_SIZE);
/* Allocate the framebuffer in the GTT with stolen page backing */
backing = psbfb_alloc(dev, size);
if (backing == NULL) return -ENOMEM;
}
memset(dev_priv->vram_addr + backing->offset, 0, size);
info = drm_fb_helper_alloc_fbi(&fbdev->psb_fb_helper);
if (IS_ERR(info)) {
ret = PTR_ERR(info);
goto err_free_range;
}
info->par = fbdev;
mode_cmd.pixel_format = drm_mode_legacy_fb_format(bpp, depth);
ret = psb_framebuffer_init(dev, psbfb, &mode_cmd, backing);
if (ret) goto err_release;
fb = &psbfb->base;
psbfb->fbdev = info;
fbdev->psb_fb_helper.fb = fb;
- drm_fb_helper_fill_fix(info, fb->pitches[0], fb->depth);
+ drm_fb_helper_fill_fix(info, fb->pitches[0], fb->format->depth);
strcpy(info->fix.id, "psbdrmfb");
info->flags = FBINFO_DEFAULT;
if (dev_priv->ops->accel_2d && pitch_lines > 8) /* 2D engine */
info->fbops = &psbfb_ops;
else if (gtt_roll) { /* GTT rolling seems best */
info->fbops = &psbfb_roll_ops;
info->flags |= FBINFO_HWACCEL_YPAN;
} else /* Software */
info->fbops = &psbfb_unaccel_ops;
info->fix.smem_start = dev->mode_config.fb_base;
info->fix.smem_len = size;
info->fix.ywrapstep = gtt_roll;
info->fix.ypanstep = 0;
/* Accessed stolen memory directly */
info->screen_base = dev_priv->vram_addr + backing->offset;
info->screen_size = size;
if (dev_priv->gtt.stolen_size) {
info->apertures->ranges[0].base = dev->mode_config.fb_base;
info->apertures->ranges[0].size = dev_priv->gtt.stolen_size;
}
drm_fb_helper_fill_var(info, &fbdev->psb_fb_helper, sizes->fb_width, sizes->fb_height);
info->fix.mmio_start = pci_resource_start(dev->pdev, 0);
info->fix.mmio_len = pci_resource_len(dev->pdev, 0);
/* Use default scratch pixmap (info->pixmap.flags = FB_PIXMAP_SYSTEM) */
dev_dbg(dev->dev, "allocated %dx%d fb\n", psbfb->base.width, psbfb->base.height);
return 0;
err_release:
drm_fb_helper_release_fbi(&fbdev->psb_fb_helper);
err_free_range:
psb_gtt_free_range(dev, backing);
return ret;
}<sep>@@
struct drm_framebuffer *fb;
@@
- fb->depth
+ fb->format->depth
@@
struct drm_framebuffer *fb;
@@
- (fb->format->depth)
+ fb->format->depth
<|end_of_text|> | 8,657 |
--- initial
+++ final
@@ -1,62 +1,62 @@
int gma_pipe_set_base(struct drm_crtc *crtc, int x, int y, struct drm_framebuffer *old_fb) {
struct drm_device *dev = crtc->dev;
struct drm_psb_private *dev_priv = dev->dev_private;
struct gma_crtc *gma_crtc = to_gma_crtc(crtc);
struct drm_framebuffer *fb = crtc->primary->fb;
struct psb_framebuffer *psbfb = to_psb_fb(fb);
int pipe = gma_crtc->pipe;
const struct psb_offset *map = &dev_priv->regmap[pipe];
unsigned long start, offset;
u32 dspcntr;
int ret = 0;
if (!gma_power_begin(dev, true)) return 0;
/* no fb bound */
if (!fb) {
dev_err(dev->dev, "No FB bound\n");
goto gma_pipe_cleaner;
}
/* We are displaying this buffer, make sure it is actually loaded
into the GTT */
ret = psb_gtt_pin(psbfb->gtt);
if (ret < 0) goto gma_pipe_set_base_exit;
start = psbfb->gtt->offset;
offset = y * fb->pitches[0] + x * (fb->bits_per_pixel / 8);
REG_WRITE(map->stride, fb->pitches[0]);
dspcntr = REG_READ(map->cntr);
dspcntr &= ~DISPPLANE_PIXFORMAT_MASK;
switch (fb->bits_per_pixel) {
case 8: dspcntr |= DISPPLANE_8BPP; break;
case 16:
- if (fb->depth == 15)
+ if (fb->format->depth == 15)
dspcntr |= DISPPLANE_15_16BPP;
else
dspcntr |= DISPPLANE_16BPP;
break;
case 24:
case 32: dspcntr |= DISPPLANE_32BPP_NO_ALPHA; break;
default:
dev_err(dev->dev, "Unknown color depth\n");
ret = -EINVAL;
goto gma_pipe_set_base_exit;
}
REG_WRITE(map->cntr, dspcntr);
dev_dbg(dev->dev, "Writing base %08lX %08lX %d %d\n", start, offset, x, y);
/* FIXME: Investigate whether this really is the base for psb and why
the linear offset is named base for the other chips. map->surf
should be the base and map->linoff the offset for all chips */
if (IS_PSB(dev)) {
REG_WRITE(map->base, offset + start);
REG_READ(map->base);
} else {
REG_WRITE(map->base, offset);
REG_READ(map->base);
REG_WRITE(map->surf, start);
REG_READ(map->surf);
}
gma_pipe_cleaner:
/* If there was a previous display we can now unpin it */
if (old_fb) psb_gtt_unpin(to_psb_fb(old_fb)->gtt);
gma_pipe_set_base_exit:
gma_power_end(dev);
return ret;
}<sep>@@
struct drm_framebuffer *fb;
@@
- fb->depth
+ fb->format->depth
@@
struct drm_framebuffer *fb;
@@
- (fb->format->depth)
+ fb->format->depth
<|end_of_text|> | 8,658 |
--- initial
+++ final
@@ -1,89 +1,89 @@
static int intelfb_create(struct drm_fb_helper *helper, struct drm_fb_helper_surface_size *sizes) {
struct intel_fbdev *ifbdev = container_of(helper, struct intel_fbdev, helper);
struct intel_framebuffer *intel_fb = ifbdev->fb;
struct drm_device *dev = helper->dev;
struct drm_i915_private *dev_priv = to_i915(dev);
struct pci_dev *pdev = dev_priv->drm.pdev;
struct i915_ggtt *ggtt = &dev_priv->ggtt;
struct fb_info *info;
struct drm_framebuffer *fb;
struct i915_vma *vma;
bool prealloc = false;
void __iomem *vaddr;
int ret;
if (intel_fb && (sizes->fb_width > intel_fb->base.width || sizes->fb_height > intel_fb->base.height)) {
DRM_DEBUG_KMS("BIOS fb too small (%dx%d), we require (%dx%d),"
" releasing it\n",
intel_fb->base.width, intel_fb->base.height, sizes->fb_width, sizes->fb_height);
drm_framebuffer_unreference(&intel_fb->base);
intel_fb = ifbdev->fb = NULL;
}
if (!intel_fb || WARN_ON(!intel_fb->obj)) {
DRM_DEBUG_KMS("no BIOS fb, allocating a new one\n");
ret = intelfb_alloc(helper, sizes);
if (ret) return ret;
intel_fb = ifbdev->fb;
} else {
DRM_DEBUG_KMS("re-using BIOS fb\n");
prealloc = true;
sizes->fb_width = intel_fb->base.width;
sizes->fb_height = intel_fb->base.height;
}
mutex_lock(&dev->struct_mutex);
/* Pin the GGTT vma for our access via info->screen_base.
* This also validates that any existing fb inherited from the
* BIOS is suitable for own access.
*/
vma = intel_pin_and_fence_fb_obj(&ifbdev->fb->base, DRM_ROTATE_0);
if (IS_ERR(vma)) {
ret = PTR_ERR(vma);
goto out_unlock;
}
info = drm_fb_helper_alloc_fbi(helper);
if (IS_ERR(info)) {
DRM_ERROR("Failed to allocate fb_info\n");
ret = PTR_ERR(info);
goto out_unpin;
}
info->par = helper;
fb = &ifbdev->fb->base;
ifbdev->helper.fb = fb;
strcpy(info->fix.id, "inteldrmfb");
info->flags = FBINFO_DEFAULT | FBINFO_CAN_FORCE_OUTPUT;
info->fbops = &intelfb_ops;
/* setup aperture base/size for vesafb takeover */
info->apertures->ranges[0].base = dev->mode_config.fb_base;
info->apertures->ranges[0].size = ggtt->mappable_end;
info->fix.smem_start = dev->mode_config.fb_base + i915_ggtt_offset(vma);
info->fix.smem_len = vma->node.size;
vaddr = i915_vma_pin_iomap(vma);
if (IS_ERR(vaddr)) {
DRM_ERROR("Failed to remap framebuffer into virtual memory\n");
ret = PTR_ERR(vaddr);
goto out_destroy_fbi;
}
info->screen_base = vaddr;
info->screen_size = vma->node.size;
/* This driver doesn't need a VT switch to restore the mode on resume */
info->skip_vt_switch = true;
- drm_fb_helper_fill_fix(info, fb->pitches[0], fb->depth);
+ drm_fb_helper_fill_fix(info, fb->pitches[0], fb->format->depth);
drm_fb_helper_fill_var(info, &ifbdev->helper, sizes->fb_width, sizes->fb_height);
/* If the object is shmemfs backed, it will have given us zeroed pages.
* If the object is stolen however, it will be full of whatever
* garbage was left in there.
*/
if (intel_fb->obj->stolen && !prealloc) memset_io(info->screen_base, 0, info->screen_size);
/* Use default scratch pixmap (info->pixmap.flags = FB_PIXMAP_SYSTEM) */
DRM_DEBUG_KMS("allocated %dx%d fb: 0x%08x\n", fb->width, fb->height, i915_ggtt_offset(vma));
ifbdev->vma = vma;
mutex_unlock(&dev->struct_mutex);
vga_switcheroo_client_fb_set(pdev, info);
return 0;
out_destroy_fbi:
drm_fb_helper_release_fbi(helper);
out_unpin:
intel_unpin_fb_obj(&ifbdev->fb->base, DRM_ROTATE_0);
out_unlock:
mutex_unlock(&dev->struct_mutex);
return ret;
}<sep>@@
struct drm_framebuffer *fb;
@@
- fb->depth
+ fb->format->depth
@@
struct drm_framebuffer *fb;
@@
- (fb->format->depth)
+ fb->format->depth
<|end_of_text|> | 8,659 |
--- initial
+++ final
@@ -1,50 +1,50 @@
static int mdfld__intel_pipe_set_base(struct drm_crtc *crtc, int x, int y, struct drm_framebuffer *old_fb) {
struct drm_device *dev = crtc->dev;
struct drm_psb_private *dev_priv = dev->dev_private;
struct drm_framebuffer *fb = crtc->primary->fb;
struct gma_crtc *gma_crtc = to_gma_crtc(crtc);
struct psb_framebuffer *psbfb = to_psb_fb(fb);
int pipe = gma_crtc->pipe;
const struct psb_offset *map = &dev_priv->regmap[pipe];
unsigned long start, offset;
u32 dspcntr;
int ret;
memcpy(&globle_dev, dev, sizeof(struct drm_device));
dev_dbg(dev->dev, "pipe = 0x%x.\n", pipe);
/* no fb bound */
if (!fb) {
dev_dbg(dev->dev, "No FB bound\n");
return 0;
}
ret = check_fb(fb);
if (ret) return ret;
if (pipe > 2) {
DRM_ERROR("Illegal Pipe Number.\n");
return -EINVAL;
}
if (!gma_power_begin(dev, true)) return 0;
start = psbfb->gtt->offset;
offset = y * fb->pitches[0] + x * (fb->bits_per_pixel / 8);
REG_WRITE(map->stride, fb->pitches[0]);
dspcntr = REG_READ(map->cntr);
dspcntr &= ~DISPPLANE_PIXFORMAT_MASK;
switch (fb->bits_per_pixel) {
case 8: dspcntr |= DISPPLANE_8BPP; break;
case 16:
- if (fb->depth == 15)
+ if (fb->format->depth == 15)
dspcntr |= DISPPLANE_15_16BPP;
else
dspcntr |= DISPPLANE_16BPP;
break;
case 24:
case 32: dspcntr |= DISPPLANE_32BPP_NO_ALPHA; break;
}
REG_WRITE(map->cntr, dspcntr);
dev_dbg(dev->dev, "Writing base %08lX %08lX %d %d\n", start, offset, x, y);
REG_WRITE(map->linoff, offset);
REG_READ(map->linoff);
REG_WRITE(map->surf, start);
REG_READ(map->surf);
gma_power_end(dev);
return 0;
}<sep>@@
struct drm_framebuffer *fb;
@@
- fb->depth
+ fb->format->depth
@@
struct drm_framebuffer *fb;
@@
- (fb->format->depth)
+ fb->format->depth
<|end_of_text|> | 8,660 |
--- initial
+++ final
@@ -1,60 +1,60 @@
static int mgag200fb_create(struct drm_fb_helper *helper, struct drm_fb_helper_surface_size *sizes) {
struct mga_fbdev *mfbdev = container_of(helper, struct mga_fbdev, helper);
struct drm_device *dev = mfbdev->helper.dev;
struct drm_mode_fb_cmd2 mode_cmd;
struct mga_device *mdev = dev->dev_private;
struct fb_info *info;
struct drm_framebuffer *fb;
struct drm_gem_object *gobj = NULL;
int ret;
void *sysram;
int size;
mode_cmd.width = sizes->surface_width;
mode_cmd.height = sizes->surface_height;
mode_cmd.pitches[0] = mode_cmd.width * ((sizes->surface_bpp + 7) / 8);
mode_cmd.pixel_format = drm_mode_legacy_fb_format(sizes->surface_bpp, sizes->surface_depth);
size = mode_cmd.pitches[0] * mode_cmd.height;
ret = mgag200fb_create_object(mfbdev, &mode_cmd, &gobj);
if (ret) {
DRM_ERROR("failed to create fbcon backing object %d\n", ret);
return ret;
}
sysram = vmalloc(size);
if (!sysram) {
ret = -ENOMEM;
goto err_sysram;
}
info = drm_fb_helper_alloc_fbi(helper);
if (IS_ERR(info)) {
ret = PTR_ERR(info);
goto err_alloc_fbi;
}
info->par = mfbdev;
ret = mgag200_framebuffer_init(dev, &mfbdev->mfb, &mode_cmd, gobj);
if (ret) goto err_framebuffer_init;
mfbdev->sysram = sysram;
mfbdev->size = size;
fb = &mfbdev->mfb.base;
/* setup helper */
mfbdev->helper.fb = fb;
strcpy(info->fix.id, "mgadrmfb");
info->flags = FBINFO_DEFAULT | FBINFO_CAN_FORCE_OUTPUT;
info->fbops = &mgag200fb_ops;
/* setup aperture base/size for vesafb takeover */
info->apertures->ranges[0].base = mdev->dev->mode_config.fb_base;
info->apertures->ranges[0].size = mdev->mc.vram_size;
- drm_fb_helper_fill_fix(info, fb->pitches[0], fb->depth);
+ drm_fb_helper_fill_fix(info, fb->pitches[0], fb->format->depth);
drm_fb_helper_fill_var(info, &mfbdev->helper, sizes->fb_width, sizes->fb_height);
info->screen_base = sysram;
info->screen_size = size;
info->pixmap.flags = FB_PIXMAP_SYSTEM;
DRM_DEBUG_KMS("allocated %dx%d\n", fb->width, fb->height);
return 0;
err_framebuffer_init:
drm_fb_helper_release_fbi(helper);
err_alloc_fbi:
vfree(sysram);
err_sysram:
drm_gem_object_unreference_unlocked(gobj);
return ret;
}<sep>@@
struct drm_framebuffer *fb;
@@
- fb->depth
+ fb->format->depth
@@
struct drm_framebuffer *fb;
@@
- (fb->format->depth)
+ fb->format->depth
<|end_of_text|> | 8,661 |
--- initial
+++ final
@@ -1,37 +1,37 @@
static void mga_crtc_load_lut(struct drm_crtc *crtc) {
struct mga_crtc *mga_crtc = to_mga_crtc(crtc);
struct drm_device *dev = crtc->dev;
struct mga_device *mdev = dev->dev_private;
struct drm_framebuffer *fb = crtc->primary->fb;
int i;
if (!crtc->enabled) return;
WREG8(DAC_INDEX + MGA1064_INDEX, 0);
if (fb && fb->bits_per_pixel == 16) {
- int inc = (fb->depth == 15) ? 8 : 4;
+ int inc = (fb->format->depth == 15) ? 8 : 4;
u8 r, b;
for (i = 0; i < MGAG200_LUT_SIZE; i += inc) {
- if (fb->depth == 16) {
+ if (fb->format->depth == 16) {
if (i > (MGAG200_LUT_SIZE >> 1)) {
r = b = 0;
} else {
r = mga_crtc->lut_r[i << 1];
b = mga_crtc->lut_b[i << 1];
}
} else {
r = mga_crtc->lut_r[i];
b = mga_crtc->lut_b[i];
}
/* VGA registers */
WREG8(DAC_INDEX + MGA1064_COL_PAL, r);
WREG8(DAC_INDEX + MGA1064_COL_PAL, mga_crtc->lut_g[i]);
WREG8(DAC_INDEX + MGA1064_COL_PAL, b);
}
return;
}
for (i = 0; i < MGAG200_LUT_SIZE; i++) {
/* VGA registers */
WREG8(DAC_INDEX + MGA1064_COL_PAL, mga_crtc->lut_r[i]);
WREG8(DAC_INDEX + MGA1064_COL_PAL, mga_crtc->lut_g[i]);
WREG8(DAC_INDEX + MGA1064_COL_PAL, mga_crtc->lut_b[i]);
}
}<sep>@@
struct drm_framebuffer *fb;
@@
- fb->depth
+ fb->format->depth
@@
struct drm_framebuffer *fb;
@@
- (fb->format->depth)
+ fb->format->depth
<|end_of_text|> | 8,662 |
--- initial
+++ final
@@ -1,85 +1,85 @@
static int msm_fbdev_create(struct drm_fb_helper *helper, struct drm_fb_helper_surface_size *sizes) {
struct msm_fbdev *fbdev = to_msm_fbdev(helper);
struct drm_device *dev = helper->dev;
struct drm_framebuffer *fb = NULL;
struct fb_info *fbi = NULL;
struct drm_mode_fb_cmd2 mode_cmd = {0};
uint64_t paddr;
int ret, size;
DBG("create fbdev: %dx%d@%d (%dx%d)", sizes->surface_width, sizes->surface_height, sizes->surface_bpp, sizes->fb_width, sizes->fb_height);
mode_cmd.pixel_format = drm_mode_legacy_fb_format(sizes->surface_bpp, sizes->surface_depth);
mode_cmd.width = sizes->surface_width;
mode_cmd.height = sizes->surface_height;
mode_cmd.pitches[0] = align_pitch(mode_cmd.width, sizes->surface_bpp);
/* allocate backing bo */
size = mode_cmd.pitches[0] * mode_cmd.height;
DBG("allocating %d bytes for fb %d", size, dev->primary->index);
mutex_lock(&dev->struct_mutex);
fbdev->bo = msm_gem_new(dev, size, MSM_BO_SCANOUT | MSM_BO_WC | MSM_BO_STOLEN);
mutex_unlock(&dev->struct_mutex);
if (IS_ERR(fbdev->bo)) {
ret = PTR_ERR(fbdev->bo);
fbdev->bo = NULL;
dev_err(dev->dev, "failed to allocate buffer object: %d\n", ret);
goto fail;
}
fb = msm_framebuffer_init(dev, &mode_cmd, &fbdev->bo);
if (IS_ERR(fb)) {
dev_err(dev->dev, "failed to allocate fb\n");
/* note: if fb creation failed, we can't rely on fb destroy
* to unref the bo:
*/
drm_gem_object_unreference_unlocked(fbdev->bo);
ret = PTR_ERR(fb);
goto fail;
}
mutex_lock(&dev->struct_mutex);
/*
* NOTE: if we can be guaranteed to be able to map buffer
* in panic (ie. lock-safe, etc) we could avoid pinning the
* buffer now:
*/
ret = msm_gem_get_iova_locked(fbdev->bo, 0, &paddr);
if (ret) {
dev_err(dev->dev, "failed to get buffer obj iova: %d\n", ret);
goto fail_unlock;
}
fbi = drm_fb_helper_alloc_fbi(helper);
if (IS_ERR(fbi)) {
dev_err(dev->dev, "failed to allocate fb info\n");
ret = PTR_ERR(fbi);
goto fail_unlock;
}
DBG("fbi=%p, dev=%p", fbi, dev);
fbdev->fb = fb;
helper->fb = fb;
fbi->par = helper;
fbi->flags = FBINFO_DEFAULT;
fbi->fbops = &msm_fb_ops;
strcpy(fbi->fix.id, "msm");
- drm_fb_helper_fill_fix(fbi, fb->pitches[0], fb->depth);
+ drm_fb_helper_fill_fix(fbi, fb->pitches[0], fb->format->depth);
drm_fb_helper_fill_var(fbi, helper, sizes->fb_width, sizes->fb_height);
dev->mode_config.fb_base = paddr;
fbi->screen_base = msm_gem_get_vaddr_locked(fbdev->bo);
if (IS_ERR(fbi->screen_base)) {
ret = PTR_ERR(fbi->screen_base);
goto fail_unlock;
}
fbi->screen_size = fbdev->bo->size;
fbi->fix.smem_start = paddr;
fbi->fix.smem_len = fbdev->bo->size;
DBG("par=%p, %dx%d", fbi->par, fbi->var.xres, fbi->var.yres);
DBG("allocated %dx%d fb", fbdev->fb->width, fbdev->fb->height);
mutex_unlock(&dev->struct_mutex);
return 0;
fail_unlock:
mutex_unlock(&dev->struct_mutex);
fail:
if (ret) {
if (fb) {
drm_framebuffer_unregister_private(fb);
drm_framebuffer_remove(fb);
}
}
return ret;
}<sep>@@
struct drm_framebuffer *fb;
@@
- fb->depth
+ fb->format->depth
@@
struct drm_framebuffer *fb;
@@
- (fb->format->depth)
+ fb->format->depth
<|end_of_text|> | 8,664 |
--- initial
+++ final
@@ -1,80 +1,80 @@
static int nouveau_fbcon_create(struct drm_fb_helper *helper, struct drm_fb_helper_surface_size *sizes) {
struct nouveau_fbdev *fbcon = container_of(helper, struct nouveau_fbdev, helper);
struct drm_device *dev = fbcon->helper.dev;
struct nouveau_drm *drm = nouveau_drm(dev);
struct nvif_device *device = &drm->device;
struct fb_info *info;
struct nouveau_framebuffer *fb;
struct nouveau_channel *chan;
struct nouveau_bo *nvbo;
struct drm_mode_fb_cmd2 mode_cmd;
int ret;
mode_cmd.width = sizes->surface_width;
mode_cmd.height = sizes->surface_height;
mode_cmd.pitches[0] = mode_cmd.width * (sizes->surface_bpp >> 3);
mode_cmd.pitches[0] = roundup(mode_cmd.pitches[0], 256);
mode_cmd.pixel_format = drm_mode_legacy_fb_format(sizes->surface_bpp, sizes->surface_depth);
ret = nouveau_gem_new(dev, mode_cmd.pitches[0] * mode_cmd.height, 0, NOUVEAU_GEM_DOMAIN_VRAM, 0, 0x0000, &nvbo);
if (ret) {
NV_ERROR(drm, "failed to allocate framebuffer\n");
goto out;
}
ret = nouveau_framebuffer_new(dev, &mode_cmd, nvbo, &fb);
if (ret) goto out_unref;
ret = nouveau_bo_pin(nvbo, TTM_PL_FLAG_VRAM, false);
if (ret) {
NV_ERROR(drm, "failed to pin fb: %d\n", ret);
goto out_unref;
}
ret = nouveau_bo_map(nvbo);
if (ret) {
NV_ERROR(drm, "failed to map fb: %d\n", ret);
goto out_unpin;
}
chan = nouveau_nofbaccel ? NULL : drm->channel;
if (chan && device->info.family >= NV_DEVICE_INFO_V0_TESLA) {
ret = nouveau_bo_vma_add(nvbo, drm->client.vm, &fb->vma);
if (ret) {
NV_ERROR(drm, "failed to map fb into chan: %d\n", ret);
chan = NULL;
}
}
info = drm_fb_helper_alloc_fbi(helper);
if (IS_ERR(info)) {
ret = PTR_ERR(info);
goto out_unlock;
}
info->skip_vt_switch = 1;
info->par = fbcon;
/* setup helper */
fbcon->helper.fb = &fb->base;
strcpy(info->fix.id, "nouveaufb");
if (!chan)
info->flags = FBINFO_DEFAULT | FBINFO_HWACCEL_DISABLED;
else
info->flags = FBINFO_DEFAULT | FBINFO_HWACCEL_COPYAREA | FBINFO_HWACCEL_FILLRECT | FBINFO_HWACCEL_IMAGEBLIT;
info->flags |= FBINFO_CAN_FORCE_OUTPUT;
info->fbops = &nouveau_fbcon_sw_ops;
info->fix.smem_start = fb->nvbo->bo.mem.bus.base + fb->nvbo->bo.mem.bus.offset;
info->fix.smem_len = fb->nvbo->bo.mem.num_pages << PAGE_SHIFT;
info->screen_base = nvbo_kmap_obj_iovirtual(fb->nvbo);
info->screen_size = fb->nvbo->bo.mem.num_pages << PAGE_SHIFT;
- drm_fb_helper_fill_fix(info, fb->base.pitches[0], fb->base.depth);
+ drm_fb_helper_fill_fix(info, fb->base.pitches[0], fb->base.format->depth);
drm_fb_helper_fill_var(info, &fbcon->helper, sizes->fb_width, sizes->fb_height);
/* Use default scratch pixmap (info->pixmap.flags = FB_PIXMAP_SYSTEM) */
if (chan) nouveau_fbcon_accel_init(dev);
nouveau_fbcon_zfill(dev, fbcon);
/* To allow resizeing without swapping buffers */
NV_INFO(drm, "allocated %dx%d fb: 0x%llx, bo %p\n", fb->base.width, fb->base.height, fb->nvbo->bo.offset, nvbo);
vga_switcheroo_client_fb_set(dev->pdev, info);
return 0;
out_unlock:
if (chan) nouveau_bo_vma_del(fb->nvbo, &fb->vma);
nouveau_bo_unmap(fb->nvbo);
out_unpin:
nouveau_bo_unpin(fb->nvbo);
out_unref:
nouveau_bo_ref(NULL, &fb->nvbo);
out:
return ret;
}<sep>@@
struct nouveau_framebuffer *fb;
@@
- fb->base.depth
+ fb->base.format->depth
<|end_of_text|> | 8,665 |
--- initial
+++ final
@@ -1,46 +1,46 @@
static int oaktrail_pipe_set_base(struct drm_crtc *crtc, int x, int y, struct drm_framebuffer *old_fb) {
struct drm_device *dev = crtc->dev;
struct drm_psb_private *dev_priv = dev->dev_private;
struct gma_crtc *gma_crtc = to_gma_crtc(crtc);
struct drm_framebuffer *fb = crtc->primary->fb;
struct psb_framebuffer *psbfb = to_psb_fb(fb);
int pipe = gma_crtc->pipe;
const struct psb_offset *map = &dev_priv->regmap[pipe];
unsigned long start, offset;
u32 dspcntr;
int ret = 0;
/* no fb bound */
if (!fb) {
dev_dbg(dev->dev, "No FB bound\n");
return 0;
}
if (!gma_power_begin(dev, true)) return 0;
start = psbfb->gtt->offset;
offset = y * fb->pitches[0] + x * (fb->bits_per_pixel / 8);
REG_WRITE(map->stride, fb->pitches[0]);
dspcntr = REG_READ(map->cntr);
dspcntr &= ~DISPPLANE_PIXFORMAT_MASK;
switch (fb->bits_per_pixel) {
case 8: dspcntr |= DISPPLANE_8BPP; break;
case 16:
- if (fb->depth == 15)
+ if (fb->format->depth == 15)
dspcntr |= DISPPLANE_15_16BPP;
else
dspcntr |= DISPPLANE_16BPP;
break;
case 24:
case 32: dspcntr |= DISPPLANE_32BPP_NO_ALPHA; break;
default:
dev_err(dev->dev, "Unknown color depth\n");
ret = -EINVAL;
goto pipe_set_base_exit;
}
REG_WRITE(map->cntr, dspcntr);
REG_WRITE(map->base, offset);
REG_READ(map->base);
REG_WRITE(map->surf, start);
REG_READ(map->surf);
pipe_set_base_exit:
gma_power_end(dev);
return ret;
}<sep>@@
struct drm_framebuffer *fb;
@@
- fb->depth
+ fb->format->depth
@@
struct drm_framebuffer *fb;
@@
- (fb->format->depth)
+ fb->format->depth
<|end_of_text|> | 8,666 |
--- initial
+++ final
@@ -1,102 +1,102 @@
static int omap_fbdev_create(struct drm_fb_helper *helper, struct drm_fb_helper_surface_size *sizes) {
struct omap_fbdev *fbdev = to_omap_fbdev(helper);
struct drm_device *dev = helper->dev;
struct omap_drm_private *priv = dev->dev_private;
struct drm_framebuffer *fb = NULL;
union omap_gem_size gsize;
struct fb_info *fbi = NULL;
struct drm_mode_fb_cmd2 mode_cmd = {0};
dma_addr_t paddr;
int ret;
sizes->surface_bpp = 32;
sizes->surface_depth = 24;
DBG("create fbdev: %dx%d@%d (%dx%d)", sizes->surface_width, sizes->surface_height, sizes->surface_bpp, sizes->fb_width, sizes->fb_height);
mode_cmd.pixel_format = drm_mode_legacy_fb_format(sizes->surface_bpp, sizes->surface_depth);
mode_cmd.width = sizes->surface_width;
mode_cmd.height = sizes->surface_height;
mode_cmd.pitches[0] = DIV_ROUND_UP(mode_cmd.width * sizes->surface_bpp, 8);
fbdev->ywrap_enabled = priv->has_dmm && ywrap_enabled;
if (fbdev->ywrap_enabled) {
/* need to align pitch to page size if using DMM scrolling */
mode_cmd.pitches[0] = PAGE_ALIGN(mode_cmd.pitches[0]);
}
/* allocate backing bo */
gsize = (union omap_gem_size){
.bytes = PAGE_ALIGN(mode_cmd.pitches[0] * mode_cmd.height),
};
DBG("allocating %d bytes for fb %d", gsize.bytes, dev->primary->index);
fbdev->bo = omap_gem_new(dev, gsize, OMAP_BO_SCANOUT | OMAP_BO_WC);
if (!fbdev->bo) {
dev_err(dev->dev, "failed to allocate buffer object\n");
ret = -ENOMEM;
goto fail;
}
fb = omap_framebuffer_init(dev, &mode_cmd, &fbdev->bo);
if (IS_ERR(fb)) {
dev_err(dev->dev, "failed to allocate fb\n");
/* note: if fb creation failed, we can't rely on fb destroy
* to unref the bo:
*/
drm_gem_object_unreference_unlocked(fbdev->bo);
ret = PTR_ERR(fb);
goto fail;
}
/* note: this keeps the bo pinned.. which is perhaps not ideal,
* but is needed as long as we use fb_mmap() to mmap to userspace
* (since this happens using fix.smem_start). Possibly we could
* implement our own mmap using GEM mmap support to avoid this
* (non-tiled buffer doesn't need to be pinned for fbcon to write
* to it). Then we just need to be sure that we are able to re-
* pin it in case of an opps.
*/
ret = omap_gem_get_paddr(fbdev->bo, &paddr, true);
if (ret) {
dev_err(dev->dev, "could not map (paddr)! Skipping framebuffer alloc\n");
ret = -ENOMEM;
goto fail;
}
mutex_lock(&dev->struct_mutex);
fbi = drm_fb_helper_alloc_fbi(helper);
if (IS_ERR(fbi)) {
dev_err(dev->dev, "failed to allocate fb info\n");
ret = PTR_ERR(fbi);
goto fail_unlock;
}
DBG("fbi=%p, dev=%p", fbi, dev);
fbdev->fb = fb;
helper->fb = fb;
fbi->par = helper;
fbi->flags = FBINFO_DEFAULT;
fbi->fbops = &omap_fb_ops;
strcpy(fbi->fix.id, MODULE_NAME);
- drm_fb_helper_fill_fix(fbi, fb->pitches[0], fb->depth);
+ drm_fb_helper_fill_fix(fbi, fb->pitches[0], fb->format->depth);
drm_fb_helper_fill_var(fbi, helper, sizes->fb_width, sizes->fb_height);
dev->mode_config.fb_base = paddr;
fbi->screen_base = omap_gem_vaddr(fbdev->bo);
fbi->screen_size = fbdev->bo->size;
fbi->fix.smem_start = paddr;
fbi->fix.smem_len = fbdev->bo->size;
/* if we have DMM, then we can use it for scrolling by just
* shuffling pages around in DMM rather than doing sw blit.
*/
if (fbdev->ywrap_enabled) {
DRM_INFO("Enabling DMM ywrap scrolling\n");
fbi->flags |= FBINFO_HWACCEL_YWRAP | FBINFO_READS_FAST;
fbi->fix.ywrapstep = 1;
}
DBG("par=%p, %dx%d", fbi->par, fbi->var.xres, fbi->var.yres);
DBG("allocated %dx%d fb", fbdev->fb->width, fbdev->fb->height);
mutex_unlock(&dev->struct_mutex);
return 0;
fail_unlock:
mutex_unlock(&dev->struct_mutex);
fail:
if (ret) {
drm_fb_helper_release_fbi(helper);
if (fb) {
drm_framebuffer_unregister_private(fb);
drm_framebuffer_remove(fb);
}
}
return ret;
}<sep>@@
struct drm_framebuffer *fb;
@@
- fb->depth
+ fb->format->depth
@@
struct drm_framebuffer *fb;
@@
- (fb->format->depth)
+ fb->format->depth
<|end_of_text|> | 8,667 |
--- initial
+++ final
@@ -1,86 +1,86 @@
static int qxlfb_create(struct qxl_fbdev *qfbdev, struct drm_fb_helper_surface_size *sizes) {
struct qxl_device *qdev = qfbdev->qdev;
struct fb_info *info;
struct drm_framebuffer *fb = NULL;
struct drm_mode_fb_cmd2 mode_cmd;
struct drm_gem_object *gobj = NULL;
struct qxl_bo *qbo = NULL;
int ret;
int size;
int bpp = sizes->surface_bpp;
int depth = sizes->surface_depth;
void *shadow;
mode_cmd.width = sizes->surface_width;
mode_cmd.height = sizes->surface_height;
mode_cmd.pitches[0] = ALIGN(mode_cmd.width * ((bpp + 1) / 8), 64);
mode_cmd.pixel_format = drm_mode_legacy_fb_format(bpp, depth);
ret = qxlfb_create_pinned_object(qfbdev, &mode_cmd, &gobj);
if (ret < 0) return ret;
qbo = gem_to_qxl_bo(gobj);
QXL_INFO(qdev, "%s: %dx%d %d\n", __func__, mode_cmd.width, mode_cmd.height, mode_cmd.pitches[0]);
shadow = vmalloc(mode_cmd.pitches[0] * mode_cmd.height);
/* TODO: what's the usual response to memory allocation errors? */
BUG_ON(!shadow);
QXL_INFO(qdev, "surface0 at gpu offset %lld, mmap_offset %lld (virt %p, shadow %p)\n", qxl_bo_gpu_offset(qbo), qxl_bo_mmap_offset(qbo), qbo->kptr, shadow);
size = mode_cmd.pitches[0] * mode_cmd.height;
info = drm_fb_helper_alloc_fbi(&qfbdev->helper);
if (IS_ERR(info)) {
ret = PTR_ERR(info);
goto out_unref;
}
info->par = qfbdev;
qxl_framebuffer_init(qdev->ddev, &qfbdev->qfb, &mode_cmd, gobj, &qxlfb_fb_funcs);
fb = &qfbdev->qfb.base;
/* setup helper with fb data */
qfbdev->helper.fb = fb;
qfbdev->shadow = shadow;
strcpy(info->fix.id, "qxldrmfb");
- drm_fb_helper_fill_fix(info, fb->pitches[0], fb->depth);
+ drm_fb_helper_fill_fix(info, fb->pitches[0], fb->format->depth);
info->flags = FBINFO_DEFAULT | FBINFO_HWACCEL_COPYAREA | FBINFO_HWACCEL_FILLRECT;
info->fbops = &qxlfb_ops;
/*
* TODO: using gobj->size in various places in this function. Not sure
* what the difference between the different sizes is.
*/
info->fix.smem_start = qdev->vram_base; /* TODO - correct? */
info->fix.smem_len = gobj->size;
info->screen_base = qfbdev->shadow;
info->screen_size = gobj->size;
drm_fb_helper_fill_var(info, &qfbdev->helper, sizes->fb_width, sizes->fb_height);
/* setup aperture base/size for vesafb takeover */
info->apertures->ranges[0].base = qdev->ddev->mode_config.fb_base;
info->apertures->ranges[0].size = qdev->vram_size;
info->fix.mmio_start = 0;
info->fix.mmio_len = 0;
if (info->screen_base == NULL) {
ret = -ENOSPC;
goto out_destroy_fbi;
}
#ifdef CONFIG_DRM_FBDEV_EMULATION
info->fbdefio = &qxl_defio;
fb_deferred_io_init(info);
#endif
qdev->fbdev_info = info;
qdev->fbdev_qfb = &qfbdev->qfb;
DRM_INFO("fb mappable at 0x%lX, size %lu\n", info->fix.smem_start, (unsigned long)info->screen_size);
- DRM_INFO("fb: depth %d, pitch %d, width %d, height %d\n", fb->depth, fb->pitches[0], fb->width, fb->height);
+ DRM_INFO("fb: depth %d, pitch %d, width %d, height %d\n", fb->format->depth, fb->pitches[0], fb->width, fb->height);
return 0;
out_destroy_fbi:
drm_fb_helper_release_fbi(&qfbdev->helper);
out_unref:
if (qbo) {
ret = qxl_bo_reserve(qbo, false);
if (likely(ret == 0)) {
qxl_bo_kunmap(qbo);
qxl_bo_unpin(qbo);
qxl_bo_unreserve(qbo);
}
}
if (fb && ret) {
drm_gem_object_unreference_unlocked(gobj);
drm_framebuffer_cleanup(fb);
kfree(fb);
}
drm_gem_object_unreference_unlocked(gobj);
return ret;
}<sep>@@
struct drm_framebuffer *fb;
@@
- fb->depth
+ fb->format->depth
@@
struct drm_framebuffer *fb;
@@
- (fb->format->depth)
+ fb->format->depth
<|end_of_text|> | 8,668 |
--- initial
+++ final
@@ -1,75 +1,75 @@
static int radeonfb_create(struct drm_fb_helper *helper, struct drm_fb_helper_surface_size *sizes) {
struct radeon_fbdev *rfbdev = container_of(helper, struct radeon_fbdev, helper);
struct radeon_device *rdev = rfbdev->rdev;
struct fb_info *info;
struct drm_framebuffer *fb = NULL;
struct drm_mode_fb_cmd2 mode_cmd;
struct drm_gem_object *gobj = NULL;
struct radeon_bo *rbo = NULL;
int ret;
unsigned long tmp;
mode_cmd.width = sizes->surface_width;
mode_cmd.height = sizes->surface_height;
/* avivo can't scanout real 24bpp */
if ((sizes->surface_bpp == 24) && ASIC_IS_AVIVO(rdev)) sizes->surface_bpp = 32;
mode_cmd.pixel_format = drm_mode_legacy_fb_format(sizes->surface_bpp, sizes->surface_depth);
ret = radeonfb_create_pinned_object(rfbdev, &mode_cmd, &gobj);
if (ret) {
DRM_ERROR("failed to create fbcon object %d\n", ret);
return ret;
}
rbo = gem_to_radeon_bo(gobj);
/* okay we have an object now allocate the framebuffer */
info = drm_fb_helper_alloc_fbi(helper);
if (IS_ERR(info)) {
ret = PTR_ERR(info);
goto out_unref;
}
info->par = rfbdev;
info->skip_vt_switch = true;
ret = radeon_framebuffer_init(rdev->ddev, &rfbdev->rfb, &mode_cmd, gobj);
if (ret) {
DRM_ERROR("failed to initialize framebuffer %d\n", ret);
goto out_destroy_fbi;
}
fb = &rfbdev->rfb.base;
/* setup helper */
rfbdev->helper.fb = fb;
memset_io(rbo->kptr, 0x0, radeon_bo_size(rbo));
strcpy(info->fix.id, "radeondrmfb");
- drm_fb_helper_fill_fix(info, fb->pitches[0], fb->depth);
+ drm_fb_helper_fill_fix(info, fb->pitches[0], fb->format->depth);
info->flags = FBINFO_DEFAULT | FBINFO_CAN_FORCE_OUTPUT;
info->fbops = &radeonfb_ops;
tmp = radeon_bo_gpu_offset(rbo) - rdev->mc.vram_start;
info->fix.smem_start = rdev->mc.aper_base + tmp;
info->fix.smem_len = radeon_bo_size(rbo);
info->screen_base = rbo->kptr;
info->screen_size = radeon_bo_size(rbo);
drm_fb_helper_fill_var(info, &rfbdev->helper, sizes->fb_width, sizes->fb_height);
/* setup aperture base/size for vesafb takeover */
info->apertures->ranges[0].base = rdev->ddev->mode_config.fb_base;
info->apertures->ranges[0].size = rdev->mc.aper_size;
/* Use default scratch pixmap (info->pixmap.flags = FB_PIXMAP_SYSTEM) */
if (info->screen_base == NULL) {
ret = -ENOSPC;
goto out_destroy_fbi;
}
DRM_INFO("fb mappable at 0x%lX\n", info->fix.smem_start);
DRM_INFO("vram apper at 0x%lX\n", (unsigned long)rdev->mc.aper_base);
DRM_INFO("size %lu\n", (unsigned long)radeon_bo_size(rbo));
- DRM_INFO("fb depth is %d\n", fb->depth);
+ DRM_INFO("fb depth is %d\n", fb->format->depth);
DRM_INFO(" pitch is %d\n", fb->pitches[0]);
vga_switcheroo_client_fb_set(rdev->ddev->pdev, info);
return 0;
out_destroy_fbi:
drm_fb_helper_release_fbi(helper);
out_unref:
if (rbo) {}
if (fb && ret) {
drm_gem_object_unreference_unlocked(gobj);
drm_framebuffer_unregister_private(fb);
drm_framebuffer_cleanup(fb);
kfree(fb);
}
return ret;
}<sep>@@
struct drm_framebuffer *fb;
@@
- fb->depth
+ fb->format->depth
@@
struct drm_framebuffer *fb;
@@
- (fb->format->depth)
+ fb->format->depth
<|end_of_text|> | 8,669 |
--- initial
+++ final
@@ -1,54 +1,54 @@
static int rockchip_drm_fbdev_create(struct drm_fb_helper *helper, struct drm_fb_helper_surface_size *sizes) {
struct rockchip_drm_private *private = to_drm_private(helper);
struct drm_mode_fb_cmd2 mode_cmd = {0};
struct drm_device *dev = helper->dev;
struct rockchip_gem_object *rk_obj;
struct drm_framebuffer *fb;
unsigned int bytes_per_pixel;
unsigned long offset;
struct fb_info *fbi;
size_t size;
int ret;
bytes_per_pixel = DIV_ROUND_UP(sizes->surface_bpp, 8);
mode_cmd.width = sizes->surface_width;
mode_cmd.height = sizes->surface_height;
mode_cmd.pitches[0] = sizes->surface_width * bytes_per_pixel;
mode_cmd.pixel_format = drm_mode_legacy_fb_format(sizes->surface_bpp, sizes->surface_depth);
size = mode_cmd.pitches[0] * mode_cmd.height;
rk_obj = rockchip_gem_create_object(dev, size, true);
if (IS_ERR(rk_obj)) return -ENOMEM;
private
->fbdev_bo = &rk_obj->base;
fbi = drm_fb_helper_alloc_fbi(helper);
if (IS_ERR(fbi)) {
dev_err(dev->dev, "Failed to create framebuffer info.\n");
ret = PTR_ERR(fbi);
goto err_rockchip_gem_free_object;
}
helper->fb = rockchip_drm_framebuffer_init(dev, &mode_cmd, private->fbdev_bo);
if (IS_ERR(helper->fb)) {
dev_err(dev->dev, "Failed to allocate DRM framebuffer.\n");
ret = PTR_ERR(helper->fb);
goto err_release_fbi;
}
fbi->par = helper;
fbi->flags = FBINFO_FLAG_DEFAULT;
fbi->fbops = &rockchip_drm_fbdev_ops;
fb = helper->fb;
- drm_fb_helper_fill_fix(fbi, fb->pitches[0], fb->depth);
+ drm_fb_helper_fill_fix(fbi, fb->pitches[0], fb->format->depth);
drm_fb_helper_fill_var(fbi, helper, sizes->fb_width, sizes->fb_height);
offset = fbi->var.xoffset * bytes_per_pixel;
offset += fbi->var.yoffset * fb->pitches[0];
dev->mode_config.fb_base = 0;
fbi->screen_base = rk_obj->kvaddr + offset;
fbi->screen_size = rk_obj->base.size;
fbi->fix.smem_len = rk_obj->base.size;
- DRM_DEBUG_KMS("FB [%dx%d]-%d kvaddr=%p offset=%ld size=%zu\n", fb->width, fb->height, fb->depth, rk_obj->kvaddr, offset, size);
+ DRM_DEBUG_KMS("FB [%dx%d]-%d kvaddr=%p offset=%ld size=%zu\n", fb->width, fb->height, fb->format->depth, rk_obj->kvaddr, offset, size);
fbi->skip_vt_switch = true;
return 0;
err_release_fbi:
drm_fb_helper_release_fbi(helper);
err_rockchip_gem_free_object:
rockchip_gem_free_object(&rk_obj->base);
return ret;
}<sep>@@
struct drm_framebuffer *fb;
@@
- fb->depth
+ fb->format->depth
@@
struct drm_framebuffer *fb;
@@
- (fb->format->depth)
+ fb->format->depth
<|end_of_text|> | 8,670 |
--- initial
+++ final
@@ -1,50 +1,50 @@
static int udlfb_create(struct drm_fb_helper *helper, struct drm_fb_helper_surface_size *sizes) {
struct udl_fbdev *ufbdev = container_of(helper, struct udl_fbdev, helper);
struct drm_device *dev = ufbdev->helper.dev;
struct fb_info *info;
struct drm_framebuffer *fb;
struct drm_mode_fb_cmd2 mode_cmd;
struct udl_gem_object *obj;
uint32_t size;
int ret = 0;
if (sizes->surface_bpp == 24) sizes->surface_bpp = 32;
mode_cmd.width = sizes->surface_width;
mode_cmd.height = sizes->surface_height;
mode_cmd.pitches[0] = mode_cmd.width * ((sizes->surface_bpp + 7) / 8);
mode_cmd.pixel_format = drm_mode_legacy_fb_format(sizes->surface_bpp, sizes->surface_depth);
size = mode_cmd.pitches[0] * mode_cmd.height;
size = ALIGN(size, PAGE_SIZE);
obj = udl_gem_alloc_object(dev, size);
if (!obj) goto out;
ret = udl_gem_vmap(obj);
if (ret) {
DRM_ERROR("failed to vmap fb\n");
goto out_gfree;
}
info = drm_fb_helper_alloc_fbi(helper);
if (IS_ERR(info)) {
ret = PTR_ERR(info);
goto out_gfree;
}
info->par = ufbdev;
ret = udl_framebuffer_init(dev, &ufbdev->ufb, &mode_cmd, obj);
if (ret) goto out_destroy_fbi;
fb = &ufbdev->ufb.base;
ufbdev->helper.fb = fb;
strcpy(info->fix.id, "udldrmfb");
info->screen_base = ufbdev->ufb.obj->vmapping;
info->fix.smem_len = size;
info->fix.smem_start = (unsigned long)ufbdev->ufb.obj->vmapping;
info->flags = FBINFO_DEFAULT | FBINFO_CAN_FORCE_OUTPUT;
info->fbops = &udlfb_ops;
- drm_fb_helper_fill_fix(info, fb->pitches[0], fb->depth);
+ drm_fb_helper_fill_fix(info, fb->pitches[0], fb->format->depth);
drm_fb_helper_fill_var(info, &ufbdev->helper, sizes->fb_width, sizes->fb_height);
DRM_DEBUG_KMS("allocated %dx%d vmal %p\n", fb->width, fb->height, ufbdev->ufb.obj->vmapping);
return ret;
out_destroy_fbi:
drm_fb_helper_release_fbi(helper);
out_gfree:
drm_gem_object_unreference_unlocked(&ufbdev->ufb.obj->base);
out:
return ret;
}<sep>@@
struct drm_framebuffer *fb;
@@
- fb->depth
+ fb->format->depth
@@
struct drm_framebuffer *fb;
@@
- (fb->format->depth)
+ fb->format->depth
<|end_of_text|> | 8,671 |
Subsets and Splits