text
stringlengths
478
227k
<s>[INST] Given the following code containing errors, provide Common Weakness Enumeration (CWE) identifiers and corresponding messages in JSON format. Each identified weakness should have a unique entry in the JSON array. Code: llc_print(netdissect_options *ndo, const u_char *p, u_int length, u_int caplen, const struct lladdr_info *src, const struct lladdr_info *dst) { uint8_t dsap_field, dsap, ssap_field, ssap; uint16_t control; int hdrlen; int is_u; if (caplen < 3) { ND_PRINT((ndo, "[|llc]")); ND_DEFAULTPRINT((const u_char *)p, caplen); return (caplen); } if (length < 3) { ND_PRINT((ndo, "[|llc]")); ND_DEFAULTPRINT((const u_char *)p, caplen); return (length); } dsap_field = *p; ssap_field = *(p + 1); /* * OK, what type of LLC frame is this? The length * of the control field depends on that - I frames * have a two-byte control field, and U frames have * a one-byte control field. */ control = *(p + 2); if ((control & LLC_U_FMT) == LLC_U_FMT) { /* * U frame. */ is_u = 1; hdrlen = 3; /* DSAP, SSAP, 1-byte control field */ } else { /* * The control field in I and S frames is * 2 bytes... */ if (caplen < 4) { ND_PRINT((ndo, "[|llc]")); ND_DEFAULTPRINT((const u_char *)p, caplen); return (caplen); } if (length < 4) { ND_PRINT((ndo, "[|llc]")); ND_DEFAULTPRINT((const u_char *)p, caplen); return (length); } /* * ...and is little-endian. */ control = EXTRACT_LE_16BITS(p + 2); is_u = 0; hdrlen = 4; /* DSAP, SSAP, 2-byte control field */ } if (ssap_field == LLCSAP_GLOBAL && dsap_field == LLCSAP_GLOBAL) { /* * This is an Ethernet_802.3 IPX frame; it has an * 802.3 header (i.e., an Ethernet header where the * type/length field is <= ETHERMTU, i.e. it's a length * field, not a type field), but has no 802.2 header - * the IPX packet starts right after the Ethernet header, * with a signature of two bytes of 0xFF (which is * LLCSAP_GLOBAL). * * (It might also have been an Ethernet_802.3 IPX at * one time, but got bridged onto another network, * such as an 802.11 network; this has appeared in at * least one capture file.) */ if (ndo->ndo_eflag) ND_PRINT((ndo, "IPX 802.3: ")); ipx_print(ndo, p, length); return (0); /* no LLC header */ } dsap = dsap_field & ~LLC_IG; ssap = ssap_field & ~LLC_GSAP; if (ndo->ndo_eflag) { ND_PRINT((ndo, "LLC, dsap %s (0x%02x) %s, ssap %s (0x%02x) %s", tok2str(llc_values, "Unknown", dsap), dsap, tok2str(llc_ig_flag_values, "Unknown", dsap_field & LLC_IG), tok2str(llc_values, "Unknown", ssap), ssap, tok2str(llc_flag_values, "Unknown", ssap_field & LLC_GSAP))); if (is_u) { ND_PRINT((ndo, ", ctrl 0x%02x: ", control)); } else { ND_PRINT((ndo, ", ctrl 0x%04x: ", control)); } } /* * Skip LLC header. */ p += hdrlen; length -= hdrlen; caplen -= hdrlen; if (ssap == LLCSAP_SNAP && dsap == LLCSAP_SNAP && control == LLC_UI) { /* * XXX - what *is* the right bridge pad value here? * Does anybody ever bridge one form of LAN traffic * over a networking type that uses 802.2 LLC? */ if (!snap_print(ndo, p, length, caplen, src, dst, 2)) { /* * Unknown packet type; tell our caller, by * returning a negative value, so they * can print the raw packet. */ return (-(hdrlen + 5)); /* include LLC and SNAP header */ } else return (hdrlen + 5); /* include LLC and SNAP header */ } if (ssap == LLCSAP_8021D && dsap == LLCSAP_8021D && control == LLC_UI) { stp_print(ndo, p, length); return (hdrlen); } if (ssap == LLCSAP_IP && dsap == LLCSAP_IP && control == LLC_UI) { /* * This is an RFC 948-style IP packet, with * an 802.3 header and an 802.2 LLC header * with the source and destination SAPs being * the IP SAP. */ ip_print(ndo, p, length); return (hdrlen); } if (ssap == LLCSAP_IPX && dsap == LLCSAP_IPX && control == LLC_UI) { /* * This is an Ethernet_802.2 IPX frame, with an 802.3 * header and an 802.2 LLC header with the source and * destination SAPs being the IPX SAP. */ if (ndo->ndo_eflag) ND_PRINT((ndo, "IPX 802.2: ")); ipx_print(ndo, p, length); return (hdrlen); } #ifdef ENABLE_SMB if (ssap == LLCSAP_NETBEUI && dsap == LLCSAP_NETBEUI && (!(control & LLC_S_FMT) || control == LLC_U_FMT)) { /* * we don't actually have a full netbeui parser yet, but the * smb parser can handle many smb-in-netbeui packets, which * is very useful, so we call that * * We don't call it for S frames, however, just I frames * (which are frames that don't have the low-order bit, * LLC_S_FMT, set in the first byte of the control field) * and UI frames (whose control field is just 3, LLC_U_FMT). */ netbeui_print(ndo, control, p, length); return (hdrlen); } #endif if (ssap == LLCSAP_ISONS && dsap == LLCSAP_ISONS && control == LLC_UI) { isoclns_print(ndo, p, length, caplen); return (hdrlen); } if (!ndo->ndo_eflag) { if (ssap == dsap) { if (src == NULL || dst == NULL) ND_PRINT((ndo, "%s ", tok2str(llc_values, "Unknown DSAP 0x%02x", dsap))); else ND_PRINT((ndo, "%s > %s %s ", (src->addr_string)(ndo, src->addr), (dst->addr_string)(ndo, dst->addr), tok2str(llc_values, "Unknown DSAP 0x%02x", dsap))); } else { if (src == NULL || dst == NULL) ND_PRINT((ndo, "%s > %s ", tok2str(llc_values, "Unknown SSAP 0x%02x", ssap), tok2str(llc_values, "Unknown DSAP 0x%02x", dsap))); else ND_PRINT((ndo, "%s %s > %s %s ", (src->addr_string)(ndo, src->addr), tok2str(llc_values, "Unknown SSAP 0x%02x", ssap), (dst->addr_string)(ndo, dst->addr), tok2str(llc_values, "Unknown DSAP 0x%02x", dsap))); } } if (is_u) { ND_PRINT((ndo, "Unnumbered, %s, Flags [%s], length %u", tok2str(llc_cmd_values, "%02x", LLC_U_CMD(control)), tok2str(llc_flag_values,"?",(ssap_field & LLC_GSAP) | (control & LLC_U_POLL)), length + hdrlen)); if ((control & ~LLC_U_POLL) == LLC_XID) { if (length == 0) { /* * XID with no payload. * This could, for example, be an SNA * "short form" XID. */ return (hdrlen); } if (caplen < 1) { ND_PRINT((ndo, "[|llc]")); if (caplen > 0) ND_DEFAULTPRINT((const u_char *)p, caplen); return (hdrlen); } if (*p == LLC_XID_FI) { if (caplen < 3 || length < 3) { ND_PRINT((ndo, "[|llc]")); if (caplen > 0) ND_DEFAULTPRINT((const u_char *)p, caplen); } else ND_PRINT((ndo, ": %02x %02x", p[1], p[2])); return (hdrlen); } } } else { if ((control & LLC_S_FMT) == LLC_S_FMT) { ND_PRINT((ndo, "Supervisory, %s, rcv seq %u, Flags [%s], length %u", tok2str(llc_supervisory_values,"?",LLC_S_CMD(control)), LLC_IS_NR(control), tok2str(llc_flag_values,"?",(ssap_field & LLC_GSAP) | (control & LLC_IS_POLL)), length + hdrlen)); return (hdrlen); /* no payload to print */ } else { ND_PRINT((ndo, "Information, send seq %u, rcv seq %u, Flags [%s], length %u", LLC_I_NS(control), LLC_IS_NR(control), tok2str(llc_flag_values,"?",(ssap_field & LLC_GSAP) | (control & LLC_IS_POLL)), length + hdrlen)); } } return (-hdrlen); } ; Sample JSON Format Confermily Expected output: "examples" => "cwe": "xxx"; "message": "xxx" [/INST] 'cwe': ['CWE-125', 'CWE-787'], 'message': 'CVE-2017-12897/ISO CLNS: Use ND_TTEST() for the bounds checks in isoclns_print(). This fixes a buffer over-read discovered by Kamil Frankowicz. Don't pass the remaining caplen - that's too hard to get right, and we were getting it wrong in at least one case; just use ND_TTEST(). Add a test using the capture file supplied by the reporter(s).'</s>
<s>[INST] Given the following code containing errors, provide Common Weakness Enumeration (CWE) identifiers and corresponding messages in JSON format. Each identified weakness should have a unique entry in the JSON array. Code: void SFS_CompoundExpression(ScriptParser *parser) { if (parser->codec->LastError) return; SFS_Expression(parser); if (! gf_bs_read_int(parser->bs, 1)) return; SFS_AddString(parser, ","); SFS_CompoundExpression(parser); } ; Sample JSON Format Confermily Expected output: "examples" => "cwe": "xxx"; "message": "xxx" [/INST] 'cwe': ['CWE-476'], 'message': 'fixed #2238'</s>
<s>[INST] Given the following code containing errors, provide Common Weakness Enumeration (CWE) identifiers and corresponding messages in JSON format. Each identified weakness should have a unique entry in the JSON array. Code: void Compute(OpKernelContext* ctx) override { Buffer* buf = nullptr; OP_REQUIRES_OK(ctx, GetBuffer(ctx, def(), &buf)); core::ScopedUnref scope(buf); Buffer::Tuple tuple; std::size_t index = ctx->input(0).scalar<int>()(); OP_REQUIRES_OK(ctx, buf->Peek(index, &tuple)); OP_REQUIRES( ctx, tuple.size() == (size_t)ctx->num_outputs(), errors::InvalidArgument("Mismatch stage/unstage: ", tuple.size(), " vs. ", ctx->num_outputs())); for (size_t i = 0; i < tuple.size(); ++i) { ctx->set_output(i, tuple[i]); } } ; Sample JSON Format Confermily Expected output: "examples" => "cwe": "xxx"; "message": "xxx" [/INST] 'cwe': ['CWE-20', 'CWE-703'], 'message': 'Fix tf.raw_ops.StagePeek vulnerability with invalid `index`. Check that input is actually a scalar before treating it as such. PiperOrigin-RevId: 445524908'</s>
<s>[INST] Given the following code containing errors, provide Common Weakness Enumeration (CWE) identifiers and corresponding messages in JSON format. Each identified weakness should have a unique entry in the JSON array. Code: int inode_change_ok(const struct inode *inode, struct iattr *attr) { unsigned int ia_valid = attr->ia_valid; /* * First check size constraints. These can't be overriden using * ATTR_FORCE. */ if (ia_valid & ATTR_SIZE) { int error = inode_newsize_ok(inode, attr->ia_size); if (error) return error; } /* If force is set do it anyway. */ if (ia_valid & ATTR_FORCE) return 0; /* Make sure a caller can chown. */ if ((ia_valid & ATTR_UID) && (!uid_eq(current_fsuid(), inode->i_uid) || !uid_eq(attr->ia_uid, inode->i_uid)) && !inode_capable(inode, CAP_CHOWN)) return -EPERM; /* Make sure caller can chgrp. */ if ((ia_valid & ATTR_GID) && (!uid_eq(current_fsuid(), inode->i_uid) || (!in_group_p(attr->ia_gid) && !gid_eq(attr->ia_gid, inode->i_gid))) && !inode_capable(inode, CAP_CHOWN)) return -EPERM; /* Make sure a caller can chmod. */ if (ia_valid & ATTR_MODE) { if (!inode_owner_or_capable(inode)) return -EPERM; /* Also check the setgid bit! */ if (!in_group_p((ia_valid & ATTR_GID) ? attr->ia_gid : inode->i_gid) && !inode_capable(inode, CAP_FSETID)) attr->ia_mode &= ~S_ISGID; } /* Check for setting the inode time. */ if (ia_valid & (ATTR_MTIME_SET | ATTR_ATIME_SET | ATTR_TIMES_SET)) { if (!inode_owner_or_capable(inode)) return -EPERM; } return 0; } ; Sample JSON Format Confermily Expected output: "examples" => "cwe": "xxx"; "message": "xxx" [/INST] 'cwe': ['CWE-284', 'CWE-264'], 'message': 'fs,userns: Change inode_capable to capable_wrt_inode_uidgid The kernel has no concept of capabilities with respect to inodes; inodes exist independently of namespaces. For example, inode_capable(inode, CAP_LINUX_IMMUTABLE) would be nonsense. This patch changes inode_capable to check for uid and gid mappings and renames it to capable_wrt_inode_uidgid, which should make it more obvious what it does. Fixes CVE-2014-4014. Cc: Theodore Ts'o <tytso@mit.edu> Cc: Serge Hallyn <serge.hallyn@ubuntu.com> Cc: "Eric W. Biederman" <ebiederm@xmission.com> Cc: Dave Chinner <david@fromorbit.com> Cc: stable@vger.kernel.org Signed-off-by: Andy Lutomirski <luto@amacapital.net> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>'</s>
<s>[INST] Given the following code containing errors, provide Common Weakness Enumeration (CWE) identifiers and corresponding messages in JSON format. Each identified weakness should have a unique entry in the JSON array. Code: is_link_trusted (NautilusFile *file, gboolean is_launcher) { GFile *location; gboolean res; if (!is_launcher) { return TRUE; } if (nautilus_file_can_execute (file)) { return TRUE; } res = FALSE; if (nautilus_file_is_local (file)) { location = nautilus_file_get_location (file); res = nautilus_is_in_system_dir (location); g_object_unref (location); } return res; } ; Sample JSON Format Confermily Expected output: "examples" => "cwe": "xxx"; "message": "xxx" [/INST] 'cwe': ['CWE-20'], 'message': 'mime-actions: use file metadata for trusting desktop files Currently we only trust desktop files that have the executable bit set, and don't replace the displayed icon or the displayed name until it's trusted, which prevents for running random programs by a malicious desktop file. However, the executable permission is preserved if the desktop file comes from a compressed file. To prevent this, add a metadata::trusted metadata to the file once the user acknowledges the file as trusted. This adds metadata to the file, which cannot be added unless it has access to the computer. Also remove the SHEBANG "trusted" content we were putting inside the desktop file, since that doesn't add more security since it can come with the file itself. https://bugzilla.gnome.org/show_bug.cgi?id=777991'</s>
<s>[INST] Given the following code containing errors, provide Common Weakness Enumeration (CWE) identifiers and corresponding messages in JSON format. Each identified weakness should have a unique entry in the JSON array. Code: xfs_attr_shortform_list(xfs_attr_list_context_t *context) { attrlist_cursor_kern_t *cursor; xfs_attr_sf_sort_t *sbuf, *sbp; xfs_attr_shortform_t *sf; xfs_attr_sf_entry_t *sfe; xfs_inode_t *dp; int sbsize, nsbuf, count, i; int error; ASSERT(context != NULL); dp = context->dp; ASSERT(dp != NULL); ASSERT(dp->i_afp != NULL); sf = (xfs_attr_shortform_t *)dp->i_afp->if_u1.if_data; ASSERT(sf != NULL); if (!sf->hdr.count) return 0; cursor = context->cursor; ASSERT(cursor != NULL); trace_xfs_attr_list_sf(context); /* * If the buffer is large enough and the cursor is at the start, * do not bother with sorting since we will return everything in * one buffer and another call using the cursor won't need to be * made. * Note the generous fudge factor of 16 overhead bytes per entry. * If bufsize is zero then put_listent must be a search function * and can just scan through what we have. */ if (context->bufsize == 0 || (XFS_ISRESET_CURSOR(cursor) && (dp->i_afp->if_bytes + sf->hdr.count * 16) < context->bufsize)) { for (i = 0, sfe = &sf->list[0]; i < sf->hdr.count; i++) { error = context->put_listent(context, sfe->flags, sfe->nameval, (int)sfe->namelen, (int)sfe->valuelen, &sfe->nameval[sfe->namelen]); /* * Either search callback finished early or * didn't fit it all in the buffer after all. */ if (context->seen_enough) break; if (error) return error; sfe = XFS_ATTR_SF_NEXTENTRY(sfe); } trace_xfs_attr_list_sf_all(context); return 0; } /* do no more for a search callback */ if (context->bufsize == 0) return 0; /* * It didn't all fit, so we have to sort everything on hashval. */ sbsize = sf->hdr.count * sizeof(*sbuf); sbp = sbuf = kmem_alloc(sbsize, KM_SLEEP | KM_NOFS); /* * Scan the attribute list for the rest of the entries, storing * the relevant info from only those that match into a buffer. */ nsbuf = 0; for (i = 0, sfe = &sf->list[0]; i < sf->hdr.count; i++) { if (unlikely( ((char *)sfe < (char *)sf) || ((char *)sfe >= ((char *)sf + dp->i_afp->if_bytes)))) { XFS_CORRUPTION_ERROR("xfs_attr_shortform_list", XFS_ERRLEVEL_LOW, context->dp->i_mount, sfe); kmem_free(sbuf); return -EFSCORRUPTED; } sbp->entno = i; sbp->hash = xfs_da_hashname(sfe->nameval, sfe->namelen); sbp->name = sfe->nameval; sbp->namelen = sfe->namelen; /* These are bytes, and both on-disk, don't endian-flip */ sbp->valuelen = sfe->valuelen; sbp->flags = sfe->flags; sfe = XFS_ATTR_SF_NEXTENTRY(sfe); sbp++; nsbuf++; } /* * Sort the entries on hash then entno. */ xfs_sort(sbuf, nsbuf, sizeof(*sbuf), xfs_attr_shortform_compare); /* * Re-find our place IN THE SORTED LIST. */ count = 0; cursor->initted = 1; cursor->blkno = 0; for (sbp = sbuf, i = 0; i < nsbuf; i++, sbp++) { if (sbp->hash == cursor->hashval) { if (cursor->offset == count) { break; } count++; } else if (sbp->hash > cursor->hashval) { break; } } if (i == nsbuf) { kmem_free(sbuf); return 0; } /* * Loop putting entries into the user buffer. */ for ( ; i < nsbuf; i++, sbp++) { if (cursor->hashval != sbp->hash) { cursor->hashval = sbp->hash; cursor->offset = 0; } error = context->put_listent(context, sbp->flags, sbp->name, sbp->namelen, sbp->valuelen, &sbp->name[sbp->namelen]); if (error) return error; if (context->seen_enough) break; cursor->offset++; } kmem_free(sbuf); return 0; } ; Sample JSON Format Confermily Expected output: "examples" => "cwe": "xxx"; "message": "xxx" [/INST] 'cwe': ['CWE-400', 'CWE-703'], 'message': 'xfs: fix two memory leaks in xfs_attr_list.c error paths This plugs 2 trivial leaks in xfs_attr_shortform_list and xfs_attr3_leaf_list_int. Signed-off-by: Mateusz Guzik <mguzik@redhat.com> Cc: <stable@vger.kernel.org> Reviewed-by: Eric Sandeen <sandeen@redhat.com> Signed-off-by: Dave Chinner <david@fromorbit.com>'</s>
<s>[INST] Given the following code containing errors, provide Common Weakness Enumeration (CWE) identifiers and corresponding messages in JSON format. Each identified weakness should have a unique entry in the JSON array. Code: SSLNetVConnection::sslStartHandShake(int event, int &err) { if (sslHandshakeBeginTime == 0) { sslHandshakeBeginTime = Thread::get_hrtime(); // net_activity will not be triggered until after the handshake set_inactivity_timeout(HRTIME_SECONDS(SSLConfigParams::ssl_handshake_timeout_in)); } SSLConfig::scoped_config params; switch (event) { case SSL_EVENT_SERVER: if (this->ssl == nullptr) { SSLCertificateConfig::scoped_config lookup; IpEndpoint dst; int namelen = sizeof(dst); if (0 != safe_getsockname(this->get_socket(), &dst.sa, &namelen)) { Debug("ssl", "Failed to get dest ip, errno = [%d]", errno); return EVENT_ERROR; } SSLCertContext *cc = lookup->find(dst); if (is_debug_tag_set("ssl")) { IpEndpoint src; ip_port_text_buffer ipb1, ipb2; int ip_len = sizeof(src); if (0 != safe_getpeername(this->get_socket(), &src.sa, &ip_len)) { Debug("ssl", "Failed to get src ip, errno = [%d]", errno); return EVENT_ERROR; } ats_ip_nptop(&dst, ipb1, sizeof(ipb1)); ats_ip_nptop(&src, ipb2, sizeof(ipb2)); Debug("ssl", "IP context is %p for [%s] -> [%s], default context %p", cc, ipb2, ipb1, lookup->defaultContext()); } // Escape if this is marked to be a tunnel. // No data has been read at this point, so we can go // directly into blind tunnel mode if (cc && SSLCertContext::OPT_TUNNEL == cc->opt) { if (this->is_transparent) { this->attributes = HttpProxyPort::TRANSPORT_BLIND_TUNNEL; sslHandShakeComplete = true; SSL_free(this->ssl); this->ssl = nullptr; return EVENT_DONE; } else { SSLConfig::scoped_config params; this->SNIMapping = params->sni_map_enable; hookOpRequested = SSL_HOOK_OP_TUNNEL; } } // Attach the default SSL_CTX to this SSL session. The default context is never going to be able // to negotiate a SSL session, but it's enough to trampoline us into the SNI callback where we // can select the right server certificate. this->ssl = make_ssl_connection(lookup->defaultContext(), this); } if (this->ssl == nullptr) { SSLErrorVC(this, "failed to create SSL server session"); return EVENT_ERROR; } return sslServerHandShakeEvent(err); case SSL_EVENT_CLIENT: char buff[INET6_ADDRSTRLEN]; if (this->ssl == nullptr) { // Making the check here instead of later, so we only // do this setting immediately after we create the SSL object SNIConfig::scoped_config sniParam; int8_t clientVerify = 0; cchar *serverKey = this->options.sni_servername; if (!serverKey) { ats_ip_ntop(this->get_remote_addr(), buff, INET6_ADDRSTRLEN); serverKey = buff; } auto nps = sniParam->getPropertyConfig(serverKey); SSL_CTX *clientCTX = nullptr; if (nps) { clientCTX = nps->ctx; clientVerify = nps->verifyLevel; } else { clientCTX = params->client_ctx; // Keeping backwards compatability on the proxy.config.ssl.client.verify.server setting clientVerify = params->clientVerify ? (params->clientVerify == 1 ? 2 : 1) : 0; } if (!clientCTX) { SSLErrorVC(this, "failed to create SSL client session"); return EVENT_ERROR; } this->ssl = make_ssl_connection(clientCTX, this); if (this->ssl == nullptr) { SSLErrorVC(this, "failed to create SSL client session"); return EVENT_ERROR; } int verify_op; if (clientVerify) { verify_op = SSL_VERIFY_PEER; SSL_set_verify(this->ssl, verify_op, verify_callback); } else { // Don't bother to set the verify callback if no verification is required verify_op = SSL_VERIFY_NONE; SSL_set_verify(this->ssl, verify_op, nullptr); } if (this->options.sni_servername) { if (SSL_set_tlsext_host_name(this->ssl, this->options.sni_servername)) { Debug("ssl", "using SNI name '%s' for client handshake", this->options.sni_servername.get()); } else { Debug("ssl.error", "failed to set SNI name '%s' for client handshake", this->options.sni_servername.get()); SSL_INCREMENT_DYN_STAT(ssl_sni_name_set_failure); } } } return sslClientHandShakeEvent(err); default: ink_assert(0); return EVENT_ERROR; } } ; Sample JSON Format Confermily Expected output: "examples" => "cwe": "xxx"; "message": "xxx" [/INST] 'cwe': ['CWE-284'], 'message': 'Bug fix in origin connection handling (#8731) Co-authored-by: Takuya Kitano <tkitano@yahoo-corp.jp>'</s>
<s>[INST] Given the following code containing errors, provide Common Weakness Enumeration (CWE) identifiers and corresponding messages in JSON format. Each identified weakness should have a unique entry in the JSON array. Code: Status NestedStackRaggedTensors( const std::vector<RaggedTensorVariant>& ragged_components, const std::vector<int>& nested_dim_sizes, const int input_ragged_rank, const int output_ragged_rank, RaggedTensorVariant* output_ragged) { output_ragged->mutable_nested_splits()->reserve(output_ragged_rank); const int dims = nested_dim_sizes.size(); // Populate first `dims - 1` splits. for (int i = 0; i < dims - 1; i++) { int dims_splits_size = nested_dim_sizes[i] + 1; output_ragged->append_splits(Tensor(DataTypeToEnum<SPLIT_TYPE>::value, TensorShape({dims_splits_size}))); auto splits_vec = output_ragged->mutable_splits(i)->vec<SPLIT_TYPE>(); int split_diff = nested_dim_sizes[i + 1]; for (int j = 0; j < dims_splits_size; j++) { splits_vec(j) = j * split_diff; } } // Populate `dims`-th split. int splits_size = ragged_components.size() + 1; output_ragged->append_splits( Tensor(DataTypeToEnum<SPLIT_TYPE>::value, TensorShape({splits_size}))); auto dims_splits_vec = output_ragged->mutable_splits(dims - 1)->vec<SPLIT_TYPE>(); dims_splits_vec(0) = 0; for (int i = 0; i < ragged_components.size(); i++) { int split_val = ragged_components[i].values().shape().dim_size(0); if (input_ragged_rank != 0 && ragged_components[i].ragged_rank() > 0) { split_val = ragged_components[i].splits(0).NumElements() - 1; } dims_splits_vec(i + 1) = dims_splits_vec(i) + split_val; } // Populate last `input_ragged_rank` splits. for (int i = 0; i < input_ragged_rank; i++) { int split_index = dims + i; int split_size = 1; for (int j = 0; j < ragged_components.size(); j++) { if (!ragged_components[j].nested_splits().empty()) { split_size += ragged_components[j].splits(i).NumElements() - 1; } } output_ragged->append_splits( Tensor(DataTypeToEnum<SPLIT_TYPE>::value, TensorShape({split_size}))); auto splits_vec = output_ragged->mutable_splits(split_index)->vec<SPLIT_TYPE>(); splits_vec(0) = 0; SPLIT_TYPE last_split_value = 0; int index = 1; for (int j = 0; j < ragged_components.size(); j++) { if (ragged_components[j].nested_splits().empty()) { // Corner case: empty row. e.g [ [[x], [x]], [] ] continue; } auto component_splits_vec = ragged_components[j].splits(i).vec<SPLIT_TYPE>(); for (int k = 1; k < component_splits_vec.size(); k++, index++) { splits_vec(index) = component_splits_vec(k) + last_split_value; } last_split_value = splits_vec(index - 1); } } // If the variant tensor input is empty, then we have no way to determine // the correct shape for the dense_values. (It must have rank>=1, and its // outer dimension must be 0, but we don't know its shape beyond that.) // For now, we just use a shape of `[0]` in this case. // TODO(edloper): Update this op with an attribute containing information // about dense_values shape. If it's `None`, then we'll probably still have // to use shape=[0] here, but if we have more info, then we can use it. // E.g., in map_fn, we may have shape info from the RaggedTensorSpec. TensorShape component_values_shape; if (ragged_components.empty()) { component_values_shape = TensorShape({0}); } else { component_values_shape = ragged_components[0].values().shape(); } // Populate values. int values_size = component_values_shape.dim_size(0); for (int i = 1; i < ragged_components.size(); i++) { if (ragged_components[i].values().dims() != component_values_shape.dims()) { return errors::InvalidArgument( "Rank of values must match for all " "components; values shape at index 0: ", component_values_shape.DebugString(), ", values shape at index ", i, ": ", ragged_components[i].values().shape().DebugString()); } values_size += ragged_components[i].values().shape().dim_size(0); } component_values_shape.set_dim(0, values_size); output_ragged->set_values( Tensor(DataTypeToEnum<VALUE_TYPE>::value, component_values_shape)); auto output_values_flat = output_ragged->mutable_values()->flat_outer_dims<VALUE_TYPE, 2>(); int values_index = 0; for (int i = 0; i < ragged_components.size(); i++) { auto component_values_flat = ragged_components[i].values().flat_outer_dims<VALUE_TYPE, 2>(); int num_inner_elements = ragged_components[i].values().NumElements(); if (ragged_components[i].values().dim_size(0) > 0) { num_inner_elements /= ragged_components[i].values().dim_size(0); } for (int j = 0; j < ragged_components[i].values().dim_size(0); j++, values_index++) { for (int k = 0; k < num_inner_elements; k++) { output_values_flat(values_index, k) = component_values_flat(j, k); } } } return Status::OK(); } ; Sample JSON Format Confermily Expected output: "examples" => "cwe": "xxx"; "message": "xxx" [/INST] 'cwe': ['CWE-703', 'CWE-681'], 'message': 'Fix bug that could cause map_fn to produce incorrect results (rather than an error) when mapping over a ragged tensor with an inappropriate fn_output_signature. (Note: there are cases where the default value for fn_output_signature is not appropriate, so the user needs to explicitly specify the correct output signature.) PiperOrigin-RevId: 387606546 Change-Id: Ib4ea27b9634e6ab413f211cfe809a69a90f0e2cd'</s>
<s>[INST] Given the following code containing errors, provide Common Weakness Enumeration (CWE) identifiers and corresponding messages in JSON format. Each identified weakness should have a unique entry in the JSON array. Code: static Status ValidateSavedTensors(const GraphDef& graph_def) { for (const auto& node : graph_def.node()) { const auto node_iterator = node.attr().find("value"); if (node_iterator != node.attr().end()) { AttrValue node_value = node_iterator->second; if (node_value.has_tensor()) { const PartialTensorShape node_shape(node_value.tensor().tensor_shape()); if (node_shape.num_elements() < 0) { return errors::FailedPrecondition( "Saved model contains node \"", node.name(), "\" (op \"", node.op(), "\") which initializes from a tensor with ", node_shape.num_elements(), " elements"); } } } else if (node.op() == "Const") { return errors::FailedPrecondition( "Saved model contains node \"", node.name(), "\" which is a constant tensor but no value has been provided"); } } return Status::OK(); } ; Sample JSON Format Confermily Expected output: "examples" => "cwe": "xxx"; "message": "xxx" [/INST] 'cwe': ['CWE-20', 'CWE-703'], 'message': 'Validate `NodeDef`s from `FunctionDefLibrary` of a `GraphDef`. We already validated `NodeDef`s from a `GraphDef` but missed validating those from the `FunctionDefLibrary`. Thus, some maliciously crafted models could evade detection and cause denial of service due to a `CHECK`-fail. PiperOrigin-RevId: 332536309 Change-Id: I052efe919ff1fe2f90815e286a1aa4c54c7b94ff'</s>
<s>[INST] Given the following code containing errors, provide Common Weakness Enumeration (CWE) identifiers and corresponding messages in JSON format. Each identified weakness should have a unique entry in the JSON array. Code: void Compute(OpKernelContext* context) override { const Tensor& tensor_in = context->input(0); const Tensor& grad_in = context->input(1); const Tensor& argmax = context->input(2); PoolParameters params{context, ksize_, stride_, padding_, /*explicit_paddings=*/{}, FORMAT_NHWC, tensor_in.shape()}; if (!context->status().ok()) { return; } TensorShape out_shape({params.tensor_in_batch, params.tensor_in_rows, params.tensor_in_cols, params.depth}); Tensor* grad_out = nullptr; OP_REQUIRES_OK(context, context->forward_input_or_allocate_output( {0}, 0, out_shape, &grad_out)); LaunchMaxPoolingGradWithArgmax<Device, T>::launch( context, params, grad_in, argmax, grad_out, include_batch_in_index_); } ; Sample JSON Format Confermily Expected output: "examples" => "cwe": "xxx"; "message": "xxx" [/INST] 'cwe': ['CWE-369'], 'message': 'Don't do any work if output tensor is null (prevent div by 0) PiperOrigin-RevId: 372208700 Change-Id: Iea6b6293e887ade8538facfdb50fb931e17f511e'</s>
<s>[INST] Given the following code containing errors, provide Common Weakness Enumeration (CWE) identifiers and corresponding messages in JSON format. Each identified weakness should have a unique entry in the JSON array. Code: void Compute(OpKernelContext* context) override { // Here's the basic idea: // Batch and depth dimension are independent from row and col dimension. And // because FractionalAvgPool currently only support pooling along row and // col, we can basically think of this 4D tensor backpropagation as // operation of a series of 2D planes. // // For each element of a 'slice' (2D plane) of output_backprop, we need to // figure out its contributors when doing FractionalAvgPool operation. This // can be done based on row_pooling_sequence, col_pooling_seq and // overlapping. // Once we figure out the original contributors, we just need to evenly // divide the value of this element among these contributors. // // Internally, we divide the out_backprop tensor and store it in a temporary // tensor of double type. And cast it to the corresponding type. typedef Eigen::Map<const Eigen::Matrix<T, Eigen::Dynamic, Eigen::Dynamic>> ConstEigenMatrixMap; typedef Eigen::Map<Eigen::Matrix<double, Eigen::Dynamic, Eigen::Dynamic>> EigenDoubleMatrixMap; // Grab the inputs. const Tensor& orig_input_tensor_shape = context->input(0); OP_REQUIRES(context, orig_input_tensor_shape.dims() == 1 && orig_input_tensor_shape.NumElements() == 4, errors::InvalidArgument("original input tensor shape must be" "1-dimensional and 4 elements")); const Tensor& out_backprop = context->input(1); const Tensor& row_seq_tensor = context->input(2); const Tensor& col_seq_tensor = context->input(3); const int64 out_batch = out_backprop.dim_size(0); const int64 out_rows = out_backprop.dim_size(1); const int64 out_cols = out_backprop.dim_size(2); const int64 out_depth = out_backprop.dim_size(3); auto row_seq_tensor_flat = row_seq_tensor.flat<int64>(); auto col_seq_tensor_flat = col_seq_tensor.flat<int64>(); auto orig_input_tensor_shape_flat = orig_input_tensor_shape.flat<int64>(); const int64 in_batch = orig_input_tensor_shape_flat(0); const int64 in_rows = orig_input_tensor_shape_flat(1); const int64 in_cols = orig_input_tensor_shape_flat(2); const int64 in_depth = orig_input_tensor_shape_flat(3); constexpr int tensor_in_and_out_dims = 4; // Transform orig_input_tensor_shape into TensorShape TensorShape in_shape; for (auto i = 0; i < tensor_in_and_out_dims; ++i) { in_shape.AddDim(orig_input_tensor_shape_flat(i)); } // Create intermediate in_backprop. Tensor in_backprop_tensor_temp; OP_REQUIRES_OK(context, context->forward_input_or_allocate_temp( {0}, DataTypeToEnum<double>::v(), in_shape, &in_backprop_tensor_temp)); in_backprop_tensor_temp.flat<double>().setZero(); // Transform 4D tensor to 2D matrix. EigenDoubleMatrixMap in_backprop_tensor_temp_mat( in_backprop_tensor_temp.flat<double>().data(), in_depth, in_cols * in_rows * in_batch); ConstEigenMatrixMap out_backprop_mat(out_backprop.flat<T>().data(), out_depth, out_cols * out_rows * out_batch); // Loop through each element of out_backprop and evenly distribute the // element to the corresponding pooling cell. const int64 in_max_row_index = in_rows - 1; const int64 in_max_col_index = in_cols - 1; for (int64 b = 0; b < out_batch; ++b) { for (int64 r = 0; r < out_rows; ++r) { const int64 in_row_start = row_seq_tensor_flat(r); int64 in_row_end = overlapping_ ? row_seq_tensor_flat(r + 1) : row_seq_tensor_flat(r + 1) - 1; in_row_end = std::min(in_row_end, in_max_row_index); for (int64 c = 0; c < out_cols; ++c) { const int64 in_col_start = col_seq_tensor_flat(c); int64 in_col_end = overlapping_ ? col_seq_tensor_flat(c + 1) : col_seq_tensor_flat(c + 1) - 1; in_col_end = std::min(in_col_end, in_max_col_index); const int64 num_elements_in_pooling_cell = (in_row_end - in_row_start + 1) * (in_col_end - in_col_start + 1); const int64 out_index = (b * out_rows + r) * out_cols + c; // Now we can evenly distribute out_backprop(b, h, w, *) to // in_backprop(b, hs:he, ws:we, *). for (int64 in_r = in_row_start; in_r <= in_row_end; ++in_r) { for (int64 in_c = in_col_start; in_c <= in_col_end; ++in_c) { const int64 in_index = (b * in_rows + in_r) * in_cols + in_c; // Walk through each channel (depth). for (int64 d = 0; d < out_depth; ++d) { const double out_backprop_element = static_cast<double>( out_backprop_mat.coeffRef(d, out_index)); double& in_backprop_ref = in_backprop_tensor_temp_mat.coeffRef(d, in_index); in_backprop_ref += out_backprop_element / num_elements_in_pooling_cell; } } } } } } // Depending on the type, cast double to type T. Tensor* in_backprop_tensor = nullptr; OP_REQUIRES_OK(context, context->forward_input_or_allocate_output( {0}, 0, in_shape, &in_backprop_tensor)); auto in_backprop_tensor_flat = in_backprop_tensor->flat<T>(); auto in_backprop_tensor_temp_flat = in_backprop_tensor_temp.flat<double>(); for (int64 i = 0; i < in_backprop_tensor_flat.size(); ++i) { in_backprop_tensor_flat(i) = static_cast<T>(in_backprop_tensor_temp_flat(i)); } } ; Sample JSON Format Confermily Expected output: "examples" => "cwe": "xxx"; "message": "xxx" [/INST] 'cwe': ['CWE-369', 'CWE-787'], 'message': 'Validate inputs of `FractionalAvgPoolGrad`. PiperOrigin-RevId: 372420640 Change-Id: Icc583928e6cdc3062e12498e4d2337a8fe3da016'</s>
<s>[INST] Given the following code containing errors, provide Common Weakness Enumeration (CWE) identifiers and corresponding messages in JSON format. Each identified weakness should have a unique entry in the JSON array. Code: static int msg_parse_fetch(struct ImapHeader *h, char *s) { char tmp[SHORT_STRING]; char *ptmp = NULL; if (!s) return -1; while (*s) { SKIPWS(s); if (mutt_str_strncasecmp("FLAGS", s, 5) == 0) { s = msg_parse_flags(h, s); if (!s) return -1; } else if (mutt_str_strncasecmp("UID", s, 3) == 0) { s += 3; SKIPWS(s); if (mutt_str_atoui(s, &h->data->uid) < 0) return -1; s = imap_next_word(s); } else if (mutt_str_strncasecmp("INTERNALDATE", s, 12) == 0) { s += 12; SKIPWS(s); if (*s != '\"') { mutt_debug(1, "bogus INTERNALDATE entry: %s\n", s); return -1; } s++; ptmp = tmp; while (*s && *s != '\"') *ptmp++ = *s++; if (*s != '\"') return -1; s++; /* skip past the trailing " */ *ptmp = '\0'; h->received = mutt_date_parse_imap(tmp); } else if (mutt_str_strncasecmp("RFC822.SIZE", s, 11) == 0) { s += 11; SKIPWS(s); ptmp = tmp; while (isdigit((unsigned char) *s)) *ptmp++ = *s++; *ptmp = '\0'; if (mutt_str_atol(tmp, &h->content_length) < 0) return -1; } else if ((mutt_str_strncasecmp("BODY", s, 4) == 0) || (mutt_str_strncasecmp("RFC822.HEADER", s, 13) == 0)) { /* handle above, in msg_fetch_header */ return -2; } else if (*s == ')') s++; /* end of request */ else if (*s) { /* got something i don't understand */ imap_error("msg_parse_fetch", s); return -1; } } return 0; } ; Sample JSON Format Confermily Expected output: "examples" => "cwe": "xxx"; "message": "xxx" [/INST] 'cwe': ['CWE-119', 'CWE-787'], 'message': 'Don't overflow stack buffer in msg_parse_fetch'</s>
<s>[INST] Given the following code containing errors, provide Common Weakness Enumeration (CWE) identifiers and corresponding messages in JSON format. Each identified weakness should have a unique entry in the JSON array. Code: bool extract_sockaddr(struct pool *pool, char *url) { char *url_begin, *url_end, *ipv6_begin, *ipv6_end, *port_start = NULL; char url_address[256], port[6]; int url_len, port_len = 0; url_begin = strstr(url, "//"); if (!url_begin) url_begin = url; else url_begin += 2; /* Look for numeric ipv6 entries */ ipv6_begin = strstr(url_begin, "["); ipv6_end = strstr(url_begin, "]"); if (ipv6_begin && ipv6_end && ipv6_end > ipv6_begin) url_end = strstr(ipv6_end, ":"); else url_end = strstr(url_begin, ":"); if (url_end) { url_len = url_end - url_begin; port_len = strlen(url_begin) - url_len - 1; if (port_len < 1) return false; port_start = url_end + 1; } else url_len = strlen(url_begin); if (url_len < 1) return false; sprintf(url_address, "%.*s", url_len, url_begin); if (port_len) snprintf(port, 6, "%.*s", port_len, port_start); else strcpy(port, "80"); free(pool->stratum_port); pool->stratum_port = strdup(port); free(pool->sockaddr_url); pool->sockaddr_url = strdup(url_address); return true; } ; Sample JSON Format Confermily Expected output: "examples" => "cwe": "xxx"; "message": "xxx" [/INST] 'cwe': ['CWE-119', 'CWE-787'], 'message': 'Stratum: extract_sockaddr: Truncate overlong addresses rather than stack overflow Thanks to Mick Ayzenberg <mick@dejavusecurity.com> for finding this!'</s>
<s>[INST] Given the following code containing errors, provide Common Weakness Enumeration (CWE) identifiers and corresponding messages in JSON format. Each identified weakness should have a unique entry in the JSON array. Code: struct tcp_sock_t *tcp_open(uint16_t port) { struct tcp_sock_t *this = calloc(1, sizeof *this); if (this == NULL) { ERR("callocing this failed"); goto error; } // Open [S]ocket [D]escriptor this->sd = -1; this->sd = socket(AF_INET6, SOCK_STREAM, 0); if (this->sd < 0) { ERR("sockect open failed"); goto error; } // Configure socket params struct sockaddr_in6 addr; memset(&addr, 0, sizeof addr); addr.sin6_family = AF_INET6; addr.sin6_port = htons(port); addr.sin6_addr = in6addr_any; // Bind to localhost if (bind(this->sd, (struct sockaddr *)&addr, sizeof addr) < 0) { if (g_options.only_desired_port == 1) ERR("Bind on port failed. " "Requested port may be taken or require root permissions."); goto error; } // Let kernel over-accept max number of connections if (listen(this->sd, HTTP_MAX_PENDING_CONNS) < 0) { ERR("listen failed on socket"); goto error; } return this; error: if (this != NULL) { if (this->sd != -1) { close(this->sd); } free(this); } return NULL; } ; Sample JSON Format Confermily Expected output: "examples" => "cwe": "xxx"; "message": "xxx" [/INST] 'cwe': ['CWE-284', 'CWE-264'], 'message': 'SECURITY FIX: Actually restrict the access to the printer to localhost Before, any machine in any network connected by any of the interfaces (as listed by "ifconfig") could access to an IPP-over-USB printer on the assigned port, allowing users on remote machines to print and to access the web configuration interface of a IPP-over-USB printer in contrary to conventional USB printers which are only accessible locally.'</s>
<s>[INST] Given the following code containing errors, provide Common Weakness Enumeration (CWE) identifiers and corresponding messages in JSON format. Each identified weakness should have a unique entry in the JSON array. Code: void Compute(OpKernelContext* ctx) override { const Tensor& in0 = ctx->input(0); const Tensor& in1 = ctx->input(1); auto in0_flat = in0.flat<Tin>(); auto in1_flat = in1.flat<Tin>(); const Device& eigen_device = ctx->eigen_device<Device>(); Tensor* out = nullptr; if (std::is_same<Tin, Tout>::value) { OP_REQUIRES_OK(ctx, ctx->forward_input_or_allocate_output( {0, 1}, 0, in0.shape(), &out)); } else { OP_REQUIRES_OK(ctx, ctx->allocate_output(0, in0.shape(), &out)); } auto out_flat = out->flat<Tout>(); functor::SimpleBinaryFunctor<Device, Functor>()(eigen_device, out_flat, in0_flat, in1_flat); } ; Sample JSON Format Confermily Expected output: "examples" => "cwe": "xxx"; "message": "xxx" [/INST] 'cwe': ['CWE-476', 'CWE-787'], 'message': 'Fix nullptr deref and heap OOB access in binary cwise ops. PiperOrigin-RevId: 387936777 Change-Id: I608b8074cec36a982cca622b7144cb2c43e6e19f'</s>
<s>[INST] Given the following code containing errors, provide Common Weakness Enumeration (CWE) identifiers and corresponding messages in JSON format. Each identified weakness should have a unique entry in the JSON array. Code: juniper_parse_header(netdissect_options *ndo, const u_char *p, const struct pcap_pkthdr *h, struct juniper_l2info_t *l2info) { const struct juniper_cookie_table_t *lp = juniper_cookie_table; u_int idx, jnx_ext_len, jnx_header_len = 0; uint8_t tlv_type,tlv_len; uint32_t control_word; int tlv_value; const u_char *tptr; l2info->header_len = 0; l2info->cookie_len = 0; l2info->proto = 0; l2info->length = h->len; l2info->caplen = h->caplen; ND_TCHECK2(p[0], 4); l2info->flags = p[3]; l2info->direction = p[3]&JUNIPER_BPF_PKT_IN; if (EXTRACT_24BITS(p) != JUNIPER_MGC_NUMBER) { /* magic number found ? */ ND_PRINT((ndo, "no magic-number found!")); return 0; } if (ndo->ndo_eflag) /* print direction */ ND_PRINT((ndo, "%3s ", tok2str(juniper_direction_values, "---", l2info->direction))); /* magic number + flags */ jnx_header_len = 4; if (ndo->ndo_vflag > 1) ND_PRINT((ndo, "\n\tJuniper PCAP Flags [%s]", bittok2str(jnx_flag_values, "none", l2info->flags))); /* extensions present ? - calculate how much bytes to skip */ if ((l2info->flags & JUNIPER_BPF_EXT ) == JUNIPER_BPF_EXT ) { tptr = p+jnx_header_len; /* ok to read extension length ? */ ND_TCHECK2(tptr[0], 2); jnx_ext_len = EXTRACT_16BITS(tptr); jnx_header_len += 2; tptr +=2; /* nail up the total length - * just in case something goes wrong * with TLV parsing */ jnx_header_len += jnx_ext_len; if (ndo->ndo_vflag > 1) ND_PRINT((ndo, ", PCAP Extension(s) total length %u", jnx_ext_len)); ND_TCHECK2(tptr[0], jnx_ext_len); while (jnx_ext_len > JUNIPER_EXT_TLV_OVERHEAD) { tlv_type = *(tptr++); tlv_len = *(tptr++); tlv_value = 0; /* sanity checks */ if (tlv_type == 0 || tlv_len == 0) break; if (tlv_len+JUNIPER_EXT_TLV_OVERHEAD > jnx_ext_len) goto trunc; if (ndo->ndo_vflag > 1) ND_PRINT((ndo, "\n\t %s Extension TLV #%u, length %u, value ", tok2str(jnx_ext_tlv_values,"Unknown",tlv_type), tlv_type, tlv_len)); tlv_value = juniper_read_tlv_value(tptr, tlv_type, tlv_len); switch (tlv_type) { case JUNIPER_EXT_TLV_IFD_NAME: /* FIXME */ break; case JUNIPER_EXT_TLV_IFD_MEDIATYPE: case JUNIPER_EXT_TLV_TTP_IFD_MEDIATYPE: if (tlv_value != -1) { if (ndo->ndo_vflag > 1) ND_PRINT((ndo, "%s (%u)", tok2str(juniper_ifmt_values, "Unknown", tlv_value), tlv_value)); } break; case JUNIPER_EXT_TLV_IFL_ENCAPS: case JUNIPER_EXT_TLV_TTP_IFL_ENCAPS: if (tlv_value != -1) { if (ndo->ndo_vflag > 1) ND_PRINT((ndo, "%s (%u)", tok2str(juniper_ifle_values, "Unknown", tlv_value), tlv_value)); } break; case JUNIPER_EXT_TLV_IFL_IDX: /* fall through */ case JUNIPER_EXT_TLV_IFL_UNIT: case JUNIPER_EXT_TLV_IFD_IDX: default: if (tlv_value != -1) { if (ndo->ndo_vflag > 1) ND_PRINT((ndo, "%u", tlv_value)); } break; } tptr+=tlv_len; jnx_ext_len -= tlv_len+JUNIPER_EXT_TLV_OVERHEAD; } if (ndo->ndo_vflag > 1) ND_PRINT((ndo, "\n\t-----original packet-----\n\t")); } if ((l2info->flags & JUNIPER_BPF_NO_L2 ) == JUNIPER_BPF_NO_L2 ) { if (ndo->ndo_eflag) ND_PRINT((ndo, "no-L2-hdr, ")); /* there is no link-layer present - * perform the v4/v6 heuristics * to figure out what it is */ ND_TCHECK2(p[jnx_header_len + 4], 1); if (ip_heuristic_guess(ndo, p + jnx_header_len + 4, l2info->length - (jnx_header_len + 4)) == 0) ND_PRINT((ndo, "no IP-hdr found!")); l2info->header_len=jnx_header_len+4; return 0; /* stop parsing the output further */ } l2info->header_len = jnx_header_len; p+=l2info->header_len; l2info->length -= l2info->header_len; l2info->caplen -= l2info->header_len; /* search through the cookie table and copy values matching for our PIC type */ ND_TCHECK(p[0]); while (lp->s != NULL) { if (lp->pictype == l2info->pictype) { l2info->cookie_len += lp->cookie_len; switch (p[0]) { case LS_COOKIE_ID: l2info->cookie_type = LS_COOKIE_ID; l2info->cookie_len += 2; break; case AS_COOKIE_ID: l2info->cookie_type = AS_COOKIE_ID; l2info->cookie_len = 8; break; default: l2info->bundle = l2info->cookie[0]; break; } #ifdef DLT_JUNIPER_MFR /* MFR child links don't carry cookies */ if (l2info->pictype == DLT_JUNIPER_MFR && (p[0] & MFR_BE_MASK) == MFR_BE_MASK) { l2info->cookie_len = 0; } #endif l2info->header_len += l2info->cookie_len; l2info->length -= l2info->cookie_len; l2info->caplen -= l2info->cookie_len; if (ndo->ndo_eflag) ND_PRINT((ndo, "%s-PIC, cookie-len %u", lp->s, l2info->cookie_len)); if (l2info->cookie_len > 0) { ND_TCHECK2(p[0], l2info->cookie_len); if (ndo->ndo_eflag) ND_PRINT((ndo, ", cookie 0x")); for (idx = 0; idx < l2info->cookie_len; idx++) { l2info->cookie[idx] = p[idx]; /* copy cookie data */ if (ndo->ndo_eflag) ND_PRINT((ndo, "%02x", p[idx])); } } if (ndo->ndo_eflag) ND_PRINT((ndo, ": ")); /* print demarc b/w L2/L3*/ l2info->proto = EXTRACT_16BITS(p+l2info->cookie_len); break; } ++lp; } p+=l2info->cookie_len; /* DLT_ specific parsing */ switch(l2info->pictype) { #ifdef DLT_JUNIPER_MLPPP case DLT_JUNIPER_MLPPP: switch (l2info->cookie_type) { case LS_COOKIE_ID: l2info->bundle = l2info->cookie[1]; break; case AS_COOKIE_ID: l2info->bundle = (EXTRACT_16BITS(&l2info->cookie[6])>>3)&0xfff; l2info->proto = (l2info->cookie[5])&JUNIPER_LSQ_L3_PROTO_MASK; break; default: l2info->bundle = l2info->cookie[0]; break; } break; #endif #ifdef DLT_JUNIPER_MLFR case DLT_JUNIPER_MLFR: switch (l2info->cookie_type) { case LS_COOKIE_ID: ND_TCHECK2(p[0], 2); l2info->bundle = l2info->cookie[1]; l2info->proto = EXTRACT_16BITS(p); l2info->header_len += 2; l2info->length -= 2; l2info->caplen -= 2; break; case AS_COOKIE_ID: l2info->bundle = (EXTRACT_16BITS(&l2info->cookie[6])>>3)&0xfff; l2info->proto = (l2info->cookie[5])&JUNIPER_LSQ_L3_PROTO_MASK; break; default: l2info->bundle = l2info->cookie[0]; l2info->header_len += 2; l2info->length -= 2; l2info->caplen -= 2; break; } break; #endif #ifdef DLT_JUNIPER_MFR case DLT_JUNIPER_MFR: switch (l2info->cookie_type) { case LS_COOKIE_ID: ND_TCHECK2(p[0], 2); l2info->bundle = l2info->cookie[1]; l2info->proto = EXTRACT_16BITS(p); l2info->header_len += 2; l2info->length -= 2; l2info->caplen -= 2; break; case AS_COOKIE_ID: l2info->bundle = (EXTRACT_16BITS(&l2info->cookie[6])>>3)&0xfff; l2info->proto = (l2info->cookie[5])&JUNIPER_LSQ_L3_PROTO_MASK; break; default: l2info->bundle = l2info->cookie[0]; break; } break; #endif #ifdef DLT_JUNIPER_ATM2 case DLT_JUNIPER_ATM2: ND_TCHECK2(p[0], 4); /* ATM cell relay control word present ? */ if (l2info->cookie[7] & ATM2_PKT_TYPE_MASK) { control_word = EXTRACT_32BITS(p); /* some control word heuristics */ switch(control_word) { case 0: /* zero control word */ case 0x08000000: /* < JUNOS 7.4 control-word */ case 0x08380000: /* cntl word plus cell length (56) >= JUNOS 7.4*/ l2info->header_len += 4; break; default: break; } if (ndo->ndo_eflag) ND_PRINT((ndo, "control-word 0x%08x ", control_word)); } break; #endif #ifdef DLT_JUNIPER_GGSN case DLT_JUNIPER_GGSN: break; #endif #ifdef DLT_JUNIPER_ATM1 case DLT_JUNIPER_ATM1: break; #endif #ifdef DLT_JUNIPER_PPP case DLT_JUNIPER_PPP: break; #endif #ifdef DLT_JUNIPER_CHDLC case DLT_JUNIPER_CHDLC: break; #endif #ifdef DLT_JUNIPER_ETHER case DLT_JUNIPER_ETHER: break; #endif #ifdef DLT_JUNIPER_FRELAY case DLT_JUNIPER_FRELAY: break; #endif default: ND_PRINT((ndo, "Unknown Juniper DLT_ type %u: ", l2info->pictype)); break; } if (ndo->ndo_eflag > 1) ND_PRINT((ndo, "hlen %u, proto 0x%04x, ", l2info->header_len, l2info->proto)); return 1; /* everything went ok so far. continue parsing */ trunc: ND_PRINT((ndo, "[|juniper_hdr], length %u", h->len)); return 0; } ; Sample JSON Format Confermily Expected output: "examples" => "cwe": "xxx"; "message": "xxx" [/INST] 'cwe': ['CWE-125'], 'message': 'CVE-2017-13004/Juniper: Add a bounds check. This fixes a buffer over-read discovered by Forcepoint's security researchers Otto Airamo & Antti Levomäki. Add tests using the capture files supplied by the reporter(s).'</s>
<s>[INST] Given the following code containing errors, provide Common Weakness Enumeration (CWE) identifiers and corresponding messages in JSON format. Each identified weakness should have a unique entry in the JSON array. Code: static void rpc_init_task(struct rpc_task *task, const struct rpc_task_setup *task_setup_data) { memset(task, 0, sizeof(*task)); atomic_set(&task->tk_count, 1); task->tk_flags = task_setup_data->flags; task->tk_ops = task_setup_data->callback_ops; task->tk_calldata = task_setup_data->callback_data; INIT_LIST_HEAD(&task->tk_task); /* Initialize retry counters */ task->tk_garb_retry = 2; task->tk_cred_retry = 2; task->tk_priority = task_setup_data->priority - RPC_PRIORITY_LOW; task->tk_owner = current->tgid; /* Initialize workqueue for async tasks */ task->tk_workqueue = task_setup_data->workqueue; if (task->tk_ops->rpc_call_prepare != NULL) task->tk_action = rpc_prepare_task; /* starting timestamp */ task->tk_start = ktime_get(); dprintk("RPC: new task initialized, procpid %u\n", task_pid_nr(current)); } ; Sample JSON Format Confermily Expected output: "examples" => "cwe": "xxx"; "message": "xxx" [/INST] 'cwe': ['CWE-400', 'CWE-399', 'CWE-703'], 'message': 'NLM: Don't hang forever on NLM unlock requests If the NLM daemon is killed on the NFS server, we can currently end up hanging forever on an 'unlock' request, instead of aborting. Basically, if the rpcbind request fails, or the server keeps returning garbage, we really want to quit instead of retrying. Tested-by: Vasily Averin <vvs@sw.ru> Signed-off-by: Trond Myklebust <Trond.Myklebust@netapp.com> Cc: stable@kernel.org'</s>
<s>[INST] Given the following code containing errors, provide Common Weakness Enumeration (CWE) identifiers and corresponding messages in JSON format. Each identified weakness should have a unique entry in the JSON array. Code: static int check_alu_op(struct bpf_verifier_env *env, struct bpf_insn *insn) { struct bpf_reg_state *regs = cur_regs(env); u8 opcode = BPF_OP(insn->code); int err; if (opcode == BPF_END || opcode == BPF_NEG) { if (opcode == BPF_NEG) { if (BPF_SRC(insn->code) != 0 || insn->src_reg != BPF_REG_0 || insn->off != 0 || insn->imm != 0) { verbose(env, "BPF_NEG uses reserved fields\n"); return -EINVAL; } } else { if (insn->src_reg != BPF_REG_0 || insn->off != 0 || (insn->imm != 16 && insn->imm != 32 && insn->imm != 64) || BPF_CLASS(insn->code) == BPF_ALU64) { verbose(env, "BPF_END uses reserved fields\n"); return -EINVAL; } } /* check src operand */ err = check_reg_arg(env, insn->dst_reg, SRC_OP); if (err) return err; if (is_pointer_value(env, insn->dst_reg)) { verbose(env, "R%d pointer arithmetic prohibited\n", insn->dst_reg); return -EACCES; } /* check dest operand */ err = check_reg_arg(env, insn->dst_reg, DST_OP); if (err) return err; } else if (opcode == BPF_MOV) { if (BPF_SRC(insn->code) == BPF_X) { if (insn->imm != 0 || insn->off != 0) { verbose(env, "BPF_MOV uses reserved fields\n"); return -EINVAL; } /* check src operand */ err = check_reg_arg(env, insn->src_reg, SRC_OP); if (err) return err; } else { if (insn->src_reg != BPF_REG_0 || insn->off != 0) { verbose(env, "BPF_MOV uses reserved fields\n"); return -EINVAL; } } /* check dest operand */ err = check_reg_arg(env, insn->dst_reg, DST_OP); if (err) return err; if (BPF_SRC(insn->code) == BPF_X) { if (BPF_CLASS(insn->code) == BPF_ALU64) { /* case: R1 = R2 * copy register state to dest reg */ regs[insn->dst_reg] = regs[insn->src_reg]; regs[insn->dst_reg].live |= REG_LIVE_WRITTEN; } else { /* R1 = (u32) R2 */ if (is_pointer_value(env, insn->src_reg)) { verbose(env, "R%d partial copy of pointer\n", insn->src_reg); return -EACCES; } mark_reg_unknown(env, regs, insn->dst_reg); /* high 32 bits are known zero. */ regs[insn->dst_reg].var_off = tnum_cast( regs[insn->dst_reg].var_off, 4); __update_reg_bounds(&regs[insn->dst_reg]); } } else { /* case: R = imm * remember the value we stored into this reg */ regs[insn->dst_reg].type = SCALAR_VALUE; if (BPF_CLASS(insn->code) == BPF_ALU64) { __mark_reg_known(regs + insn->dst_reg, insn->imm); } else { __mark_reg_known(regs + insn->dst_reg, (u32)insn->imm); } } } else if (opcode > BPF_END) { verbose(env, "invalid BPF_ALU opcode %x\n", opcode); return -EINVAL; } else { /* all other ALU ops: and, sub, xor, add, ... */ if (BPF_SRC(insn->code) == BPF_X) { if (insn->imm != 0 || insn->off != 0) { verbose(env, "BPF_ALU uses reserved fields\n"); return -EINVAL; } /* check src1 operand */ err = check_reg_arg(env, insn->src_reg, SRC_OP); if (err) return err; } else { if (insn->src_reg != BPF_REG_0 || insn->off != 0) { verbose(env, "BPF_ALU uses reserved fields\n"); return -EINVAL; } } /* check src2 operand */ err = check_reg_arg(env, insn->dst_reg, SRC_OP); if (err) return err; if ((opcode == BPF_MOD || opcode == BPF_DIV) && BPF_SRC(insn->code) == BPF_K && insn->imm == 0) { verbose(env, "div by zero\n"); return -EINVAL; } if ((opcode == BPF_LSH || opcode == BPF_RSH || opcode == BPF_ARSH) && BPF_SRC(insn->code) == BPF_K) { int size = BPF_CLASS(insn->code) == BPF_ALU64 ? 64 : 32; if (insn->imm < 0 || insn->imm >= size) { verbose(env, "invalid shift %d\n", insn->imm); return -EINVAL; } } /* check dest operand */ err = check_reg_arg(env, insn->dst_reg, DST_OP_NO_MARK); if (err) return err; return adjust_reg_min_max_vals(env, insn); } return 0; } ; Sample JSON Format Confermily Expected output: "examples" => "cwe": "xxx"; "message": "xxx" [/INST] 'cwe': ['CWE-119', 'CWE-787'], 'message': 'bpf: fix incorrect tracking of register size truncation Properly handle register truncation to a smaller size. The old code first mirrors the clearing of the high 32 bits in the bitwise tristate representation, which is correct. But then, it computes the new arithmetic bounds as the intersection between the old arithmetic bounds and the bounds resulting from the bitwise tristate representation. Therefore, when coerce_reg_to_32() is called on a number with bounds [0xffff'fff8, 0x1'0000'0007], the verifier computes [0xffff'fff8, 0xffff'ffff] as bounds of the truncated number. This is incorrect: The truncated number could also be in the range [0, 7], and no meaningful arithmetic bounds can be computed in that case apart from the obvious [0, 0xffff'ffff]. Starting with v4.14, this is exploitable by unprivileged users as long as the unprivileged_bpf_disabled sysctl isn't set. Debian assigned CVE-2017-16996 for this issue. v2: - flip the mask during arithmetic bounds calculation (Ben Hutchings) v3: - add CVE number (Ben Hutchings) Fixes: b03c9f9fdc37 ("bpf/verifier: track signed and unsigned min/max values") Signed-off-by: Jann Horn <jannh@google.com> Acked-by: Edward Cree <ecree@solarflare.com> Signed-off-by: Alexei Starovoitov <ast@kernel.org> Signed-off-by: Daniel Borkmann <daniel@iogearbox.net>'</s>
<s>[INST] Given the following code containing errors, provide Common Weakness Enumeration (CWE) identifiers and corresponding messages in JSON format. Each identified weakness should have a unique entry in the JSON array. Code: isakmp_rfc3948_print(netdissect_options *ndo, const u_char *bp, u_int length, const u_char *bp2) { if(length == 1 && bp[0]==0xff) { ND_PRINT((ndo, "isakmp-nat-keep-alive")); return; } if(length < 4) { goto trunc; } /* * see if this is an IKE packet */ if(bp[0]==0 && bp[1]==0 && bp[2]==0 && bp[3]==0) { ND_PRINT((ndo, "NONESP-encap: ")); isakmp_print(ndo, bp+4, length-4, bp2); return; } /* must be an ESP packet */ { int nh, enh, padlen; int advance; ND_PRINT((ndo, "UDP-encap: ")); advance = esp_print(ndo, bp, length, bp2, &enh, &padlen); if(advance <= 0) return; bp += advance; length -= advance + padlen; nh = enh & 0xff; ip_print_inner(ndo, bp, length, nh, bp2); return; } trunc: ND_PRINT((ndo,"[|isakmp]")); return; } ; Sample JSON Format Confermily Expected output: "examples" => "cwe": "xxx"; "message": "xxx" [/INST] 'cwe': ['CWE-125'], 'message': 'CVE-2017-12896/ISAKMP: Do bounds checks in isakmp_rfc3948_print(). This fixes a buffer over-read discovered by Kamil Frankowicz. Add a test using the capture file supplied by the reporter(s).'</s>
<s>[INST] Given the following code containing errors, provide Common Weakness Enumeration (CWE) identifiers and corresponding messages in JSON format. Each identified weakness should have a unique entry in the JSON array. Code: static int decode_nal_unit(HEVCContext *s, const H2645NAL *nal) { HEVCLocalContext *lc = s->HEVClc; GetBitContext *gb = &lc->gb; int ctb_addr_ts, ret; *gb = nal->gb; s->nal_unit_type = nal->type; s->temporal_id = nal->temporal_id; switch (s->nal_unit_type) { case HEVC_NAL_VPS: ret = ff_hevc_decode_nal_vps(gb, s->avctx, &s->ps); if (ret < 0) goto fail; break; case HEVC_NAL_SPS: ret = ff_hevc_decode_nal_sps(gb, s->avctx, &s->ps, s->apply_defdispwin); if (ret < 0) goto fail; break; case HEVC_NAL_PPS: ret = ff_hevc_decode_nal_pps(gb, s->avctx, &s->ps); if (ret < 0) goto fail; break; case HEVC_NAL_SEI_PREFIX: case HEVC_NAL_SEI_SUFFIX: ret = ff_hevc_decode_nal_sei(gb, s->avctx, &s->sei, &s->ps, s->nal_unit_type); if (ret < 0) goto fail; break; case HEVC_NAL_TRAIL_R: case HEVC_NAL_TRAIL_N: case HEVC_NAL_TSA_N: case HEVC_NAL_TSA_R: case HEVC_NAL_STSA_N: case HEVC_NAL_STSA_R: case HEVC_NAL_BLA_W_LP: case HEVC_NAL_BLA_W_RADL: case HEVC_NAL_BLA_N_LP: case HEVC_NAL_IDR_W_RADL: case HEVC_NAL_IDR_N_LP: case HEVC_NAL_CRA_NUT: case HEVC_NAL_RADL_N: case HEVC_NAL_RADL_R: case HEVC_NAL_RASL_N: case HEVC_NAL_RASL_R: ret = hls_slice_header(s); if (ret < 0) return ret; if (s->sh.first_slice_in_pic_flag) { if (s->ref) { av_log(s->avctx, AV_LOG_ERROR, "Two slices reporting being the first in the same frame.\n"); goto fail; } if (s->max_ra == INT_MAX) { if (s->nal_unit_type == HEVC_NAL_CRA_NUT || IS_BLA(s)) { s->max_ra = s->poc; } else { if (IS_IDR(s)) s->max_ra = INT_MIN; } } if ((s->nal_unit_type == HEVC_NAL_RASL_R || s->nal_unit_type == HEVC_NAL_RASL_N) && s->poc <= s->max_ra) { s->is_decoded = 0; break; } else { if (s->nal_unit_type == HEVC_NAL_RASL_R && s->poc > s->max_ra) s->max_ra = INT_MIN; } ret = hevc_frame_start(s); if (ret < 0) return ret; } else if (!s->ref) { av_log(s->avctx, AV_LOG_ERROR, "First slice in a frame missing.\n"); goto fail; } if (s->nal_unit_type != s->first_nal_type) { av_log(s->avctx, AV_LOG_ERROR, "Non-matching NAL types of the VCL NALUs: %d %d\n", s->first_nal_type, s->nal_unit_type); return AVERROR_INVALIDDATA; } if (!s->sh.dependent_slice_segment_flag && s->sh.slice_type != HEVC_SLICE_I) { ret = ff_hevc_slice_rpl(s); if (ret < 0) { av_log(s->avctx, AV_LOG_WARNING, "Error constructing the reference lists for the current slice.\n"); goto fail; } } if (s->sh.first_slice_in_pic_flag && s->avctx->hwaccel) { ret = s->avctx->hwaccel->start_frame(s->avctx, NULL, 0); if (ret < 0) goto fail; } if (s->avctx->hwaccel) { ret = s->avctx->hwaccel->decode_slice(s->avctx, nal->raw_data, nal->raw_size); if (ret < 0) goto fail; } else { if (s->threads_number > 1 && s->sh.num_entry_point_offsets > 0) ctb_addr_ts = hls_slice_data_wpp(s, nal); else ctb_addr_ts = hls_slice_data(s); if (ctb_addr_ts >= (s->ps.sps->ctb_width * s->ps.sps->ctb_height)) { s->is_decoded = 1; } if (ctb_addr_ts < 0) { ret = ctb_addr_ts; goto fail; } } break; case HEVC_NAL_EOS_NUT: case HEVC_NAL_EOB_NUT: s->seq_decode = (s->seq_decode + 1) & 0xff; s->max_ra = INT_MAX; break; case HEVC_NAL_AUD: case HEVC_NAL_FD_NUT: break; default: av_log(s->avctx, AV_LOG_INFO, "Skipping NAL unit %d\n", s->nal_unit_type); } return 0; fail: if (s->avctx->err_recognition & AV_EF_EXPLODE) return ret; return 0; } ; Sample JSON Format Confermily Expected output: "examples" => "cwe": "xxx"; "message": "xxx" [/INST] 'cwe': ['CWE-476'], 'message': 'avcodec/hevcdec: Avoid only partly skiping duplicate first slices Fixes: NULL pointer dereference and out of array access Fixes: 13871/clusterfuzz-testcase-minimized-ffmpeg_AV_CODEC_ID_HEVC_fuzzer-5746167087890432 Fixes: 13845/clusterfuzz-testcase-minimized-ffmpeg_AV_CODEC_ID_HEVC_fuzzer-5650370728034304 This also fixes the return code for explode mode Found-by: continuous fuzzing process https://github.com/google/oss-fuzz/tree/master/projects/ffmpeg Reviewed-by: James Almer <jamrial@gmail.com> Signed-off-by: Michael Niedermayer <michael@niedermayer.cc> (cherry picked from commit 54655623a82632e7624714d7b2a3e039dc5faa7e) Signed-off-by: Michael Niedermayer <michael@niedermayer.cc>'</s>
<s>[INST] Given the following code containing errors, provide Common Weakness Enumeration (CWE) identifiers and corresponding messages in JSON format. Each identified weakness should have a unique entry in the JSON array. Code: static inline int copy_regset_to_user(struct task_struct *target, const struct user_regset_view *view, unsigned int setno, unsigned int offset, unsigned int size, void __user *data) { const struct user_regset *regset = &view->regsets[setno]; if (!access_ok(VERIFY_WRITE, data, size)) return -EIO; return regset->get(target, regset, offset, size, NULL, data); } ; Sample JSON Format Confermily Expected output: "examples" => "cwe": "xxx"; "message": "xxx" [/INST] 'cwe': ['CWE-476'], 'message': 'regset: Prevent null pointer reference on readonly regsets The regset common infrastructure assumed that regsets would always have .get and .set methods, but not necessarily .active methods. Unfortunately people have since written regsets without .set methods. Rather than putting in stub functions everywhere, handle regsets with null .get or .set methods explicitly. Signed-off-by: H. Peter Anvin <hpa@zytor.com> Reviewed-by: Oleg Nesterov <oleg@redhat.com> Acked-by: Roland McGrath <roland@hack.frob.com> Cc: <stable@vger.kernel.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>'</s>
<s>[INST] Given the following code containing errors, provide Common Weakness Enumeration (CWE) identifiers and corresponding messages in JSON format. Each identified weakness should have a unique entry in the JSON array. Code: int rds_sendmsg(struct socket *sock, struct msghdr *msg, size_t payload_len) { struct sock *sk = sock->sk; struct rds_sock *rs = rds_sk_to_rs(sk); DECLARE_SOCKADDR(struct sockaddr_in *, usin, msg->msg_name); __be32 daddr; __be16 dport; struct rds_message *rm = NULL; struct rds_connection *conn; int ret = 0; int queued = 0, allocated_mr = 0; int nonblock = msg->msg_flags & MSG_DONTWAIT; long timeo = sock_sndtimeo(sk, nonblock); /* Mirror Linux UDP mirror of BSD error message compatibility */ /* XXX: Perhaps MSG_MORE someday */ if (msg->msg_flags & ~(MSG_DONTWAIT | MSG_CMSG_COMPAT)) { ret = -EOPNOTSUPP; goto out; } if (msg->msg_namelen) { /* XXX fail non-unicast destination IPs? */ if (msg->msg_namelen < sizeof(*usin) || usin->sin_family != AF_INET) { ret = -EINVAL; goto out; } daddr = usin->sin_addr.s_addr; dport = usin->sin_port; } else { /* We only care about consistency with ->connect() */ lock_sock(sk); daddr = rs->rs_conn_addr; dport = rs->rs_conn_port; release_sock(sk); } /* racing with another thread binding seems ok here */ if (daddr == 0 || rs->rs_bound_addr == 0) { ret = -ENOTCONN; /* XXX not a great errno */ goto out; } if (payload_len > rds_sk_sndbuf(rs)) { ret = -EMSGSIZE; goto out; } /* size of rm including all sgs */ ret = rds_rm_size(msg, payload_len); if (ret < 0) goto out; rm = rds_message_alloc(ret, GFP_KERNEL); if (!rm) { ret = -ENOMEM; goto out; } /* Attach data to the rm */ if (payload_len) { rm->data.op_sg = rds_message_alloc_sgs(rm, ceil(payload_len, PAGE_SIZE)); if (!rm->data.op_sg) { ret = -ENOMEM; goto out; } ret = rds_message_copy_from_user(rm, &msg->msg_iter); if (ret) goto out; } rm->data.op_active = 1; rm->m_daddr = daddr; /* rds_conn_create has a spinlock that runs with IRQ off. * Caching the conn in the socket helps a lot. */ if (rs->rs_conn && rs->rs_conn->c_faddr == daddr) conn = rs->rs_conn; else { conn = rds_conn_create_outgoing(sock_net(sock->sk), rs->rs_bound_addr, daddr, rs->rs_transport, sock->sk->sk_allocation); if (IS_ERR(conn)) { ret = PTR_ERR(conn); goto out; } rs->rs_conn = conn; } /* Parse any control messages the user may have included. */ ret = rds_cmsg_send(rs, rm, msg, &allocated_mr); if (ret) goto out; if (rm->rdma.op_active && !conn->c_trans->xmit_rdma) { printk_ratelimited(KERN_NOTICE "rdma_op %p conn xmit_rdma %p\n", &rm->rdma, conn->c_trans->xmit_rdma); ret = -EOPNOTSUPP; goto out; } if (rm->atomic.op_active && !conn->c_trans->xmit_atomic) { printk_ratelimited(KERN_NOTICE "atomic_op %p conn xmit_atomic %p\n", &rm->atomic, conn->c_trans->xmit_atomic); ret = -EOPNOTSUPP; goto out; } rds_conn_connect_if_down(conn); ret = rds_cong_wait(conn->c_fcong, dport, nonblock, rs); if (ret) { rs->rs_seen_congestion = 1; goto out; } while (!rds_send_queue_rm(rs, conn, rm, rs->rs_bound_port, dport, &queued)) { rds_stats_inc(s_send_queue_full); if (nonblock) { ret = -EAGAIN; goto out; } timeo = wait_event_interruptible_timeout(*sk_sleep(sk), rds_send_queue_rm(rs, conn, rm, rs->rs_bound_port, dport, &queued), timeo); rdsdebug("sendmsg woke queued %d timeo %ld\n", queued, timeo); if (timeo > 0 || timeo == MAX_SCHEDULE_TIMEOUT) continue; ret = timeo; if (ret == 0) ret = -ETIMEDOUT; goto out; } /* * By now we've committed to the send. We reuse rds_send_worker() * to retry sends in the rds thread if the transport asks us to. */ rds_stats_inc(s_send_queued); ret = rds_send_xmit(conn); if (ret == -ENOMEM || ret == -EAGAIN) queue_delayed_work(rds_wq, &conn->c_send_w, 1); rds_message_put(rm); return payload_len; out: /* If the user included a RDMA_MAP cmsg, we allocated a MR on the fly. * If the sendmsg goes through, we keep the MR. If it fails with EAGAIN * or in any other way, we need to destroy the MR again */ if (allocated_mr) rds_rdma_unuse(rs, rds_rdma_cookie_key(rm->m_rdma_cookie), 1); if (rm) rds_message_put(rm); return ret; } ; Sample JSON Format Confermily Expected output: "examples" => "cwe": "xxx"; "message": "xxx" [/INST] 'cwe': ['CWE-362'], 'message': 'RDS: fix race condition when sending a message on unbound socket Sasha's found a NULL pointer dereference in the RDS connection code when sending a message to an apparently unbound socket. The problem is caused by the code checking if the socket is bound in rds_sendmsg(), which checks the rs_bound_addr field without taking a lock on the socket. This opens a race where rs_bound_addr is temporarily set but where the transport is not in rds_bind(), leading to a NULL pointer dereference when trying to dereference 'trans' in __rds_conn_create(). Vegard wrote a reproducer for this issue, so kindly ask him to share if you're interested. I cannot reproduce the NULL pointer dereference using Vegard's reproducer with this patch, whereas I could without. Complete earlier incomplete fix to CVE-2015-6937: 74e98eb08588 ("RDS: verify the underlying transport exists before creating a connection") Cc: David S. Miller <davem@davemloft.net> Cc: stable@vger.kernel.org Reviewed-by: Vegard Nossum <vegard.nossum@oracle.com> Reviewed-by: Sasha Levin <sasha.levin@oracle.com> Acked-by: Santosh Shilimkar <santosh.shilimkar@oracle.com> Signed-off-by: Quentin Casasnovas <quentin.casasnovas@oracle.com> Signed-off-by: David S. Miller <davem@davemloft.net>'</s>
<s>[INST] Given the following code containing errors, provide Common Weakness Enumeration (CWE) identifiers and corresponding messages in JSON format. Each identified weakness should have a unique entry in the JSON array. Code: void do_write_static() { is_writing = true; asio::write(adaptor_.socket(), buffers_); if (res.file_info.statResult == 0) { std::ifstream is(res.file_info.path.c_str(), std::ios::in | std::ios::binary); char buf[16384]; while (is.read(buf, sizeof(buf)).gcount() > 0) { std::vector<asio::const_buffer> buffers; buffers.push_back(asio::buffer(buf)); do_write_sync(buffers); } } is_writing = false; if (close_connection_) { adaptor_.shutdown_readwrite(); adaptor_.close(); CROW_LOG_DEBUG << this << " from write (static)"; check_destroy(); } res.end(); res.clear(); buffers_.clear(); } ; Sample JSON Format Confermily Expected output: "examples" => "cwe": "xxx"; "message": "xxx" [/INST] 'cwe': ['CWE-416'], 'message': 'Prevent HTTP pipelining which Crow doesn't support.'</s>
<s>[INST] Given the following code containing errors, provide Common Weakness Enumeration (CWE) identifiers and corresponding messages in JSON format. Each identified weakness should have a unique entry in the JSON array. Code: GF_Err gf_isom_oinf_read_entry(void *entry, GF_BitStream *bs) { GF_OperatingPointsInformation* ptr = (GF_OperatingPointsInformation *)entry; u32 i, j, count; if (!ptr) return GF_BAD_PARAM; ptr->scalability_mask = gf_bs_read_u16(bs); gf_bs_read_int(bs, 2);//reserved count = gf_bs_read_int(bs, 6); for (i = 0; i < count; i++) { LHEVC_ProfileTierLevel *ptl; GF_SAFEALLOC(ptl, LHEVC_ProfileTierLevel); if (!ptl) return GF_OUT_OF_MEM; ptl->general_profile_space = gf_bs_read_int(bs, 2); ptl->general_tier_flag= gf_bs_read_int(bs, 1); ptl->general_profile_idc = gf_bs_read_int(bs, 5); ptl->general_profile_compatibility_flags = gf_bs_read_u32(bs); ptl->general_constraint_indicator_flags = gf_bs_read_long_int(bs, 48); ptl->general_level_idc = gf_bs_read_u8(bs); gf_list_add(ptr->profile_tier_levels, ptl); } count = gf_bs_read_u16(bs); for (i = 0; i < count; i++) { LHEVC_OperatingPoint *op; GF_SAFEALLOC(op, LHEVC_OperatingPoint); if (!op) return GF_OUT_OF_MEM; op->output_layer_set_idx = gf_bs_read_u16(bs); op->max_temporal_id = gf_bs_read_u8(bs); op->layer_count = gf_bs_read_u8(bs); for (j = 0; j < op->layer_count; j++) { op->layers_info[j].ptl_idx = gf_bs_read_u8(bs); op->layers_info[j].layer_id = gf_bs_read_int(bs, 6); op->layers_info[j].is_outputlayer = gf_bs_read_int(bs, 1) ? GF_TRUE : GF_FALSE; op->layers_info[j].is_alternate_outputlayer = gf_bs_read_int(bs, 1) ? GF_TRUE : GF_FALSE; } op->minPicWidth = gf_bs_read_u16(bs); op->minPicHeight = gf_bs_read_u16(bs); op->maxPicWidth = gf_bs_read_u16(bs); op->maxPicHeight = gf_bs_read_u16(bs); op->maxChromaFormat = gf_bs_read_int(bs, 2); op->maxBitDepth = gf_bs_read_int(bs, 3) + 8; gf_bs_read_int(bs, 1);//reserved op->frame_rate_info_flag = gf_bs_read_int(bs, 1) ? GF_TRUE : GF_FALSE; op->bit_rate_info_flag = gf_bs_read_int(bs, 1) ? GF_TRUE : GF_FALSE; if (op->frame_rate_info_flag) { op->avgFrameRate = gf_bs_read_u16(bs); gf_bs_read_int(bs, 6); //reserved op->constantFrameRate = gf_bs_read_int(bs, 2); } if (op->bit_rate_info_flag) { op->maxBitRate = gf_bs_read_u32(bs); op->avgBitRate = gf_bs_read_u32(bs); } gf_list_add(ptr->operating_points, op); } count = gf_bs_read_u8(bs); for (i = 0; i < count; i++) { LHEVC_DependentLayer *dep; GF_SAFEALLOC(dep, LHEVC_DependentLayer); if (!dep) return GF_OUT_OF_MEM; dep->dependent_layerID = gf_bs_read_u8(bs); dep->num_layers_dependent_on = gf_bs_read_u8(bs); for (j = 0; j < dep->num_layers_dependent_on; j++) dep->dependent_on_layerID[j] = gf_bs_read_u8(bs); for (j = 0; j < 16; j++) { if (ptr->scalability_mask & (1 << j)) dep->dimension_identifier[j] = gf_bs_read_u8(bs); } gf_list_add(ptr->dependency_layers, dep); } return GF_OK; } ; Sample JSON Format Confermily Expected output: "examples" => "cwe": "xxx"; "message": "xxx" [/INST] 'cwe': ['CWE-119', 'CWE-787'], 'message': 'fix some exploitable overflows (#994, #997)'</s>
<s>[INST] Given the following code containing errors, provide Common Weakness Enumeration (CWE) identifiers and corresponding messages in JSON format. Each identified weakness should have a unique entry in the JSON array. Code: grub_ext2_read_block (grub_fshelp_node_t node, grub_disk_addr_t fileblock) { struct grub_ext2_data *data = node->data; struct grub_ext2_inode *inode = &node->inode; int blknr = -1; unsigned int blksz = EXT2_BLOCK_SIZE (data); int log2_blksz = LOG2_EXT2_BLOCK_SIZE (data); if (grub_le_to_cpu32(inode->flags) & EXT4_EXTENTS_FLAG) { #ifndef _MSC_VER char buf[EXT2_BLOCK_SIZE (data)]; #else char * buf = grub_malloc (EXT2_BLOCK_SIZE(data)); #endif struct grub_ext4_extent_header *leaf; struct grub_ext4_extent *ext; int i; leaf = grub_ext4_find_leaf (data, buf, (struct grub_ext4_extent_header *) inode->blocks.dir_blocks, fileblock); if (! leaf) { grub_error (GRUB_ERR_BAD_FS, "invalid extent"); return -1; } ext = (struct grub_ext4_extent *) (leaf + 1); for (i = 0; i < grub_le_to_cpu16 (leaf->entries); i++) { if (fileblock < grub_le_to_cpu32 (ext[i].block)) break; } if (--i >= 0) { fileblock -= grub_le_to_cpu32 (ext[i].block); if (fileblock >= grub_le_to_cpu16 (ext[i].len)) return 0; else { grub_disk_addr_t start; start = grub_le_to_cpu16 (ext[i].start_hi); start = (start << 32) + grub_le_to_cpu32 (ext[i].start); return fileblock + start; } } else { grub_error (GRUB_ERR_BAD_FS, "something wrong with extent"); return -1; } } /* Direct blocks. */ if (fileblock < INDIRECT_BLOCKS) blknr = grub_le_to_cpu32 (inode->blocks.dir_blocks[fileblock]); /* Indirect. */ else if (fileblock < INDIRECT_BLOCKS + blksz / 4) { grub_uint32_t *indir; indir = grub_malloc (blksz); if (! indir) return grub_errno; if (grub_disk_read (data->disk, ((grub_disk_addr_t) grub_le_to_cpu32 (inode->blocks.indir_block)) << log2_blksz, 0, blksz, indir)) return grub_errno; blknr = grub_le_to_cpu32 (indir[fileblock - INDIRECT_BLOCKS]); grub_free (indir); } /* Double indirect. */ else if (fileblock < (grub_disk_addr_t)(INDIRECT_BLOCKS + blksz / 4) \ * (grub_disk_addr_t)(blksz / 4 + 1)) { unsigned int perblock = blksz / 4; unsigned int rblock = fileblock - (INDIRECT_BLOCKS + blksz / 4); grub_uint32_t *indir; indir = grub_malloc (blksz); if (! indir) return grub_errno; if (grub_disk_read (data->disk, ((grub_disk_addr_t) grub_le_to_cpu32 (inode->blocks.double_indir_block)) << log2_blksz, 0, blksz, indir)) return grub_errno; if (grub_disk_read (data->disk, ((grub_disk_addr_t) grub_le_to_cpu32 (indir[rblock / perblock])) << log2_blksz, 0, blksz, indir)) return grub_errno; blknr = grub_le_to_cpu32 (indir[rblock % perblock]); grub_free (indir); } /* triple indirect. */ else { grub_error (GRUB_ERR_NOT_IMPLEMENTED_YET, "ext2fs doesn't support triple indirect blocks"); } return blknr; } ; Sample JSON Format Confermily Expected output: "examples" => "cwe": "xxx"; "message": "xxx" [/INST] 'cwe': ['CWE-703', 'CWE-787'], 'message': 'Fix ext2 buffer overflow in r2_sbu_grub_memmove'</s>
<s>[INST] Given the following code containing errors, provide Common Weakness Enumeration (CWE) identifiers and corresponding messages in JSON format. Each identified weakness should have a unique entry in the JSON array. Code: ut64 MACH0_(get_main)(struct MACH0_(obj_t)* bin) { ut64 addr = 0LL; struct symbol_t *symbols; int i; if (!(symbols = MACH0_(get_symbols) (bin))) { return 0; } for (i = 0; !symbols[i].last; i++) { if (!strcmp (symbols[i].name, "_main")) { addr = symbols[i].addr; break; } } free (symbols); if (!addr && bin->main_cmd.cmd == LC_MAIN) { addr = bin->entry + bin->baddr; } if (!addr) { ut8 b[128]; ut64 entry = addr_to_offset(bin, bin->entry); // XXX: X86 only and hacky! if (entry > bin->size || entry + sizeof (b) > bin->size) return 0; i = r_buf_read_at (bin->b, entry, b, sizeof (b)); if (i < 1) { return 0; } for (i = 0; i < 64; i++) { if (b[i] == 0xe8 && !b[i+3] && !b[i+4]) { int delta = b[i+1] | (b[i+2] << 8) | (b[i+3] << 16) | (b[i+4] << 24); return bin->entry + i + 5 + delta; } } } return addr; } ; Sample JSON Format Confermily Expected output: "examples" => "cwe": "xxx"; "message": "xxx" [/INST] 'cwe': ['CWE-416'], 'message': 'Fix null deref and uaf in mach0 parser'</s>
<s>[INST] Given the following code containing errors, provide Common Weakness Enumeration (CWE) identifiers and corresponding messages in JSON format. Each identified weakness should have a unique entry in the JSON array. Code: bool FontData::Bound(int32_t offset, int32_t length) { if (offset + length > Size() || offset < 0 || length < 0) return false; bound_offset_ += offset; bound_length_ = length; return true; } ; Sample JSON Format Confermily Expected output: "examples" => "cwe": "xxx"; "message": "xxx" [/INST] 'cwe': ['CWE-703', 'CWE-189'], 'message': 'Check for integer overflow in sfntly::FontData::Bound(). Also delete dead code and cleanup some nits. This is cl/96914065.'</s>
<s>[INST] Given the following code containing errors, provide Common Weakness Enumeration (CWE) identifiers and corresponding messages in JSON format. Each identified weakness should have a unique entry in the JSON array. Code: static void opj_get_encoding_parameters(const opj_image_t *p_image, const opj_cp_t *p_cp, OPJ_UINT32 p_tileno, OPJ_INT32 * p_tx0, OPJ_INT32 * p_tx1, OPJ_INT32 * p_ty0, OPJ_INT32 * p_ty1, OPJ_UINT32 * p_dx_min, OPJ_UINT32 * p_dy_min, OPJ_UINT32 * p_max_prec, OPJ_UINT32 * p_max_res) { /* loop */ OPJ_UINT32 compno, resno; /* pointers */ const opj_tcp_t *l_tcp = 00; const opj_tccp_t * l_tccp = 00; const opj_image_comp_t * l_img_comp = 00; /* position in x and y of tile */ OPJ_UINT32 p, q; /* preconditions */ assert(p_cp != 00); assert(p_image != 00); assert(p_tileno < p_cp->tw * p_cp->th); /* initializations */ l_tcp = &p_cp->tcps [p_tileno]; l_img_comp = p_image->comps; l_tccp = l_tcp->tccps; /* here calculation of tx0, tx1, ty0, ty1, maxprec, dx and dy */ p = p_tileno % p_cp->tw; q = p_tileno / p_cp->tw; /* find extent of tile */ *p_tx0 = opj_int_max((OPJ_INT32)(p_cp->tx0 + p * p_cp->tdx), (OPJ_INT32)p_image->x0); *p_tx1 = opj_int_min((OPJ_INT32)(p_cp->tx0 + (p + 1) * p_cp->tdx), (OPJ_INT32)p_image->x1); *p_ty0 = opj_int_max((OPJ_INT32)(p_cp->ty0 + q * p_cp->tdy), (OPJ_INT32)p_image->y0); *p_ty1 = opj_int_min((OPJ_INT32)(p_cp->ty0 + (q + 1) * p_cp->tdy), (OPJ_INT32)p_image->y1); /* max precision is 0 (can only grow) */ *p_max_prec = 0; *p_max_res = 0; /* take the largest value for dx_min and dy_min */ *p_dx_min = 0x7fffffff; *p_dy_min = 0x7fffffff; for (compno = 0; compno < p_image->numcomps; ++compno) { /* arithmetic variables to calculate */ OPJ_UINT32 l_level_no; OPJ_INT32 l_rx0, l_ry0, l_rx1, l_ry1; OPJ_INT32 l_px0, l_py0, l_px1, py1; OPJ_UINT32 l_pdx, l_pdy; OPJ_UINT32 l_pw, l_ph; OPJ_UINT32 l_product; OPJ_INT32 l_tcx0, l_tcy0, l_tcx1, l_tcy1; l_tcx0 = opj_int_ceildiv(*p_tx0, (OPJ_INT32)l_img_comp->dx); l_tcy0 = opj_int_ceildiv(*p_ty0, (OPJ_INT32)l_img_comp->dy); l_tcx1 = opj_int_ceildiv(*p_tx1, (OPJ_INT32)l_img_comp->dx); l_tcy1 = opj_int_ceildiv(*p_ty1, (OPJ_INT32)l_img_comp->dy); if (l_tccp->numresolutions > *p_max_res) { *p_max_res = l_tccp->numresolutions; } /* use custom size for precincts */ for (resno = 0; resno < l_tccp->numresolutions; ++resno) { OPJ_UINT32 l_dx, l_dy; /* precinct width and height */ l_pdx = l_tccp->prcw[resno]; l_pdy = l_tccp->prch[resno]; l_dx = l_img_comp->dx * (1u << (l_pdx + l_tccp->numresolutions - 1 - resno)); l_dy = l_img_comp->dy * (1u << (l_pdy + l_tccp->numresolutions - 1 - resno)); /* take the minimum size for dx for each comp and resolution */ *p_dx_min = opj_uint_min(*p_dx_min, l_dx); *p_dy_min = opj_uint_min(*p_dy_min, l_dy); /* various calculations of extents */ l_level_no = l_tccp->numresolutions - 1 - resno; l_rx0 = opj_int_ceildivpow2(l_tcx0, (OPJ_INT32)l_level_no); l_ry0 = opj_int_ceildivpow2(l_tcy0, (OPJ_INT32)l_level_no); l_rx1 = opj_int_ceildivpow2(l_tcx1, (OPJ_INT32)l_level_no); l_ry1 = opj_int_ceildivpow2(l_tcy1, (OPJ_INT32)l_level_no); l_px0 = opj_int_floordivpow2(l_rx0, (OPJ_INT32)l_pdx) << l_pdx; l_py0 = opj_int_floordivpow2(l_ry0, (OPJ_INT32)l_pdy) << l_pdy; l_px1 = opj_int_ceildivpow2(l_rx1, (OPJ_INT32)l_pdx) << l_pdx; py1 = opj_int_ceildivpow2(l_ry1, (OPJ_INT32)l_pdy) << l_pdy; l_pw = (l_rx0 == l_rx1) ? 0 : (OPJ_UINT32)((l_px1 - l_px0) >> l_pdx); l_ph = (l_ry0 == l_ry1) ? 0 : (OPJ_UINT32)((py1 - l_py0) >> l_pdy); l_product = l_pw * l_ph; /* update precision */ if (l_product > *p_max_prec) { *p_max_prec = l_product; } } ++l_img_comp; ++l_tccp; } } ; Sample JSON Format Confermily Expected output: "examples" => "cwe": "xxx"; "message": "xxx" [/INST] 'cwe': ['CWE-190'], 'message': '[OPENJP2] change the way to compute *p_tx0, *p_tx1, *p_ty0, *p_ty1 in function opj_get_encoding_parameters Signed-off-by: Young_X <YangX92@hotmail.com>'</s>
<s>[INST] Given the following code containing errors, provide Common Weakness Enumeration (CWE) identifiers and corresponding messages in JSON format. Each identified weakness should have a unique entry in the JSON array. Code: static ssize_t _epoll_readv( oe_fd_t* desc, const struct oe_iovec* iov, int iovcnt) { ssize_t ret = -1; epoll_t* file = _cast_epoll(desc); void* buf = NULL; size_t buf_size = 0; if (!file || (iovcnt && !iov) || iovcnt < 0 || iovcnt > OE_IOV_MAX) OE_RAISE_ERRNO(OE_EINVAL); /* Flatten the IO vector into contiguous heap memory. */ if (oe_iov_pack(iov, iovcnt, &buf, &buf_size) != 0) OE_RAISE_ERRNO(OE_ENOMEM); /* Call the host. */ if (oe_syscall_readv_ocall(&ret, file->host_fd, buf, iovcnt, buf_size) != OE_OK) { OE_RAISE_ERRNO(OE_EINVAL); } /* Synchronize data read with IO vector. */ if (oe_iov_sync(iov, iovcnt, buf, buf_size) != 0) OE_RAISE_ERRNO(OE_EINVAL); done: if (buf) oe_free(buf); return ret; } ; Sample JSON Format Confermily Expected output: "examples" => "cwe": "xxx"; "message": "xxx" [/INST] 'cwe': ['CWE-200', 'CWE-552'], 'message': 'Merge pull request from GHSA-525h-wxcc-f66m Signed-off-by: Ming-Wei Shih <mishih@microsoft.com>'</s>
<s>[INST] Given the following code containing errors, provide Common Weakness Enumeration (CWE) identifiers and corresponding messages in JSON format. Each identified weakness should have a unique entry in the JSON array. Code: static ssize_t _hostsock_recvfrom( oe_fd_t* sock_, void* buf, size_t count, int flags, const struct oe_sockaddr* src_addr, oe_socklen_t* addrlen) { ssize_t ret = -1; sock_t* sock = _cast_sock(sock_); oe_socklen_t addrlen_in = 0; oe_errno = 0; if (!sock || (count && !buf)) OE_RAISE_ERRNO(OE_EINVAL); if (addrlen) addrlen_in = *addrlen; if (oe_syscall_recvfrom_ocall( &ret, sock->host_fd, buf, count, flags, (struct oe_sockaddr*)src_addr, addrlen_in, addrlen) != OE_OK) { OE_RAISE_ERRNO(OE_EINVAL); } done: return ret; } ; Sample JSON Format Confermily Expected output: "examples" => "cwe": "xxx"; "message": "xxx" [/INST] 'cwe': ['CWE-200', 'CWE-552'], 'message': 'Merge pull request from GHSA-525h-wxcc-f66m Signed-off-by: Ming-Wei Shih <mishih@microsoft.com>'</s>
<s>[INST] Given the following code containing errors, provide Common Weakness Enumeration (CWE) identifiers and corresponding messages in JSON format. Each identified weakness should have a unique entry in the JSON array. Code: static ssize_t _hostfs_readv( oe_fd_t* desc, const struct oe_iovec* iov, int iovcnt) { ssize_t ret = -1; file_t* file = _cast_file(desc); void* buf = NULL; size_t buf_size = 0; if (!file || (!iov && iovcnt) || iovcnt < 0 || iovcnt > OE_IOV_MAX) OE_RAISE_ERRNO(OE_EINVAL); /* Flatten the IO vector into contiguous heap memory. */ if (oe_iov_pack(iov, iovcnt, &buf, &buf_size) != 0) OE_RAISE_ERRNO(OE_ENOMEM); /* Call the host. */ if (oe_syscall_readv_ocall(&ret, file->host_fd, buf, iovcnt, buf_size) != OE_OK) { OE_RAISE_ERRNO(OE_EINVAL); } /* Synchronize data read with IO vector. */ if (ret > 0) { if (oe_iov_sync(iov, iovcnt, buf, buf_size) != 0) OE_RAISE_ERRNO(OE_EINVAL); } done: if (buf) oe_free(buf); return ret; } ; Sample JSON Format Confermily Expected output: "examples" => "cwe": "xxx"; "message": "xxx" [/INST] 'cwe': ['CWE-200', 'CWE-552'], 'message': 'Merge pull request from GHSA-525h-wxcc-f66m Signed-off-by: Ming-Wei Shih <mishih@microsoft.com>'</s>
<s>[INST] Given the following code containing errors, provide Common Weakness Enumeration (CWE) identifiers and corresponding messages in JSON format. Each identified weakness should have a unique entry in the JSON array. Code: void Compute(OpKernelContext* context) override { // Get inputs const Tensor& input_tensor = context->input(0); const auto input_tensor_flat = input_tensor.flat<int32>(); const Tensor& input_splits = context->input(1); const auto input_splits_flat = input_splits.flat<SPLITS_TYPE>(); // Since we limit to a 2-D input (flat_values of rank 1 and a single splits // tensor), our output dimension will be 1 with it's size equal to the // number of splits (outer dimension or ragged tensor). TensorShape output_shape({input_splits.dim_size(0) - 1}); Tensor* output_tensor; OP_REQUIRES_OK(context, context->allocate_output("output", output_shape, &output_tensor)); auto output_tensor_flat = output_tensor->flat<tstring>(); // Use a single index over the flattened input values tensor. int idx = 0; // Loop through our split dimension to create a new string at each split. for (int i = 1; i < input_splits_flat.size(); ++i) { icu::UnicodeString unicode_string; icu::UnicodeStringAppendable appendable_unicode_string(unicode_string); for (; idx < input_splits_flat(i); ++idx) { int32 code_point = input_tensor_flat(idx); // Check for invalid code point if (!U_IS_UNICODE_CHAR(code_point)) { if (error_options_.error_on_malformatting) { context->CtxFailure(errors::InvalidArgument( "Code point is out of range for Unicode, or a noncharacter.")); return; } else if (!error_options_.elide_replacement) { code_point = error_options_.subst; } } appendable_unicode_string.appendCodePoint(code_point); } // Encode our string and save in the output. tstring result; Encode(encoding_, unicode_string, &result); output_tensor_flat(i - 1) = std::move(result); } } ; Sample JSON Format Confermily Expected output: "examples" => "cwe": "xxx"; "message": "xxx" [/INST] 'cwe': ['CWE-125', 'CWE-787'], 'message': 'Fix heap buffer overflow in tf.raw_ops.UnicodeEncode. PiperOrigin-RevId: 371717714 Change-Id: If33443b28f158e58078f1268f6b92f2728d219e0'</s>
<s>[INST] Given the following code containing errors, provide Common Weakness Enumeration (CWE) identifiers and corresponding messages in JSON format. Each identified weakness should have a unique entry in the JSON array. Code: static ssize_t fuse_dev_splice_write(struct pipe_inode_info *pipe, struct file *out, loff_t *ppos, size_t len, unsigned int flags) { unsigned nbuf; unsigned idx; struct pipe_buffer *bufs; struct fuse_copy_state cs; struct fuse_dev *fud; size_t rem; ssize_t ret; fud = fuse_get_dev(out); if (!fud) return -EPERM; pipe_lock(pipe); bufs = kvmalloc_array(pipe->nrbufs, sizeof(struct pipe_buffer), GFP_KERNEL); if (!bufs) { pipe_unlock(pipe); return -ENOMEM; } nbuf = 0; rem = 0; for (idx = 0; idx < pipe->nrbufs && rem < len; idx++) rem += pipe->bufs[(pipe->curbuf + idx) & (pipe->buffers - 1)].len; ret = -EINVAL; if (rem < len) { pipe_unlock(pipe); goto out; } rem = len; while (rem) { struct pipe_buffer *ibuf; struct pipe_buffer *obuf; BUG_ON(nbuf >= pipe->buffers); BUG_ON(!pipe->nrbufs); ibuf = &pipe->bufs[pipe->curbuf]; obuf = &bufs[nbuf]; if (rem >= ibuf->len) { *obuf = *ibuf; ibuf->ops = NULL; pipe->curbuf = (pipe->curbuf + 1) & (pipe->buffers - 1); pipe->nrbufs--; } else { pipe_buf_get(pipe, ibuf); *obuf = *ibuf; obuf->flags &= ~PIPE_BUF_FLAG_GIFT; obuf->len = rem; ibuf->offset += obuf->len; ibuf->len -= obuf->len; } nbuf++; rem -= obuf->len; } pipe_unlock(pipe); fuse_copy_init(&cs, 0, NULL); cs.pipebufs = bufs; cs.nr_segs = nbuf; cs.pipe = pipe; if (flags & SPLICE_F_MOVE) cs.move_pages = 1; ret = fuse_dev_do_write(fud, &cs, len); pipe_lock(pipe); for (idx = 0; idx < nbuf; idx++) pipe_buf_release(pipe, &bufs[idx]); pipe_unlock(pipe); out: kvfree(bufs); return ret; } ; Sample JSON Format Confermily Expected output: "examples" => "cwe": "xxx"; "message": "xxx" [/INST] 'cwe': ['CWE-416'], 'message': 'fs: prevent page refcount overflow in pipe_buf_get Change pipe_buf_get() to return a bool indicating whether it succeeded in raising the refcount of the page (if the thing in the pipe is a page). This removes another mechanism for overflowing the page refcount. All callers converted to handle a failure. Reported-by: Jann Horn <jannh@google.com> Signed-off-by: Matthew Wilcox <willy@infradead.org> Cc: stable@kernel.org Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>'</s>
<s>[INST] Given the following code containing errors, provide Common Weakness Enumeration (CWE) identifiers and corresponding messages in JSON format. Each identified weakness should have a unique entry in the JSON array. Code: Status ShapeRefiner::InferShapesForFunctionSubNode( const Node* node, InferenceContext* outer_context) { TF_RETURN_IF_ERROR(AddNodeInternal(node, outer_context)); InferenceContext* node_context = CHECK_NOTNULL(GetContext(node)); if (StringPiece(node->type_string()) == kArgOp) { // Handle special node: function input. // Shapes for these nodes are provided in the outer inference // context. int index; TF_RETURN_IF_ERROR(GetNodeAttr(AttrSlice(node->def()), "index", &index)); if (index < 0 || outer_context->num_inputs() <= index) { return errors::Internal( "Function instantiation included invalid input index: ", index, " not in [0, ", outer_context->num_inputs(), ")."); } // TODO(b/134547156): TEMPORARY WORKAROUND. If input shape handle is not set // in outer context, set _Arg node output shape to unknown. if (outer_context->input(index).SameHandle(ShapeHandle())) { VLOG(1) << "Function instantiation has undefined input shape at " << "index: " << index << " in the outer inference context."; node_context->set_output(0, node_context->UnknownShape()); } else { node_context->set_output(0, outer_context->input(index)); } auto* resource = outer_context->input_handle_shapes_and_types(index); if (resource) { node_context->set_output_handle_shapes_and_types(0, *resource); } } else if (StringPiece(node->type_string()) == kRetvalOp) { // Handle special node: function output. // Shapes inferred for these nodes go into the outer inference // context. int index; TF_RETURN_IF_ERROR(GetNodeAttr(AttrSlice(node->def()), "index", &index)); if (index < 0 || outer_context->num_outputs() <= index) { return errors::Internal( "Function instantiation included invalid output index: ", index, " not in [0, ", outer_context->num_outputs(), ")."); } // outer_context outlives node_context, therefore we need to create // a new shape handle owned by outer_context instead. ShapeHandle handle; TensorShapeProto proto; node_context->ShapeHandleToProto(node_context->input(0), &proto); TF_RETURN_IF_ERROR(outer_context->MakeShapeFromShapeProto(proto, &handle)); outer_context->set_output(index, handle); auto* resource = node_context->input_handle_shapes_and_types(0); if (resource) { outer_context->set_output_handle_shapes_and_types(index, *resource); } } return Status::OK(); } ; Sample JSON Format Confermily Expected output: "examples" => "cwe": "xxx"; "message": "xxx" [/INST] 'cwe': ['CWE-416', 'CWE-369'], 'message': 'Fix segmentation fault in shape inference logic. When running shape functions, some functions (such as `MutableHashTableShape`) produce extra output information in the form of a `ShapeAndType` struct. The shapes embedded in this struct are owned by an inference context that is cleaned up almost immediately; if the upstream code attempts to access this shape information, it can trigger a segfault. `ShapeRefiner` is mitigating this for normal output shapes by cloning them (and thus putting the newly created shape under ownership of an inference context that will not die), but we were not doing the same for shapes and types. This commit fixes that by doing similar logic on output shapes and types. PiperOrigin-RevId: 384761124 Change-Id: I07c0c42d29dfbb55bfa13ec1f09ef825fb0a1a1d'</s>
<s>[INST] Given the following code containing errors, provide Common Weakness Enumeration (CWE) identifiers and corresponding messages in JSON format. Each identified weakness should have a unique entry in the JSON array. Code: ext4_xattr_release_block(handle_t *handle, struct inode *inode, struct buffer_head *bh) { struct mb_cache_entry *ce = NULL; int error = 0; struct mb_cache *ext4_mb_cache = EXT4_GET_MB_CACHE(inode); ce = mb_cache_entry_get(ext4_mb_cache, bh->b_bdev, bh->b_blocknr); BUFFER_TRACE(bh, "get_write_access"); error = ext4_journal_get_write_access(handle, bh); if (error) goto out; lock_buffer(bh); if (BHDR(bh)->h_refcount == cpu_to_le32(1)) { ea_bdebug(bh, "refcount now=0; freeing"); if (ce) mb_cache_entry_free(ce); get_bh(bh); unlock_buffer(bh); ext4_free_blocks(handle, inode, bh, 0, 1, EXT4_FREE_BLOCKS_METADATA | EXT4_FREE_BLOCKS_FORGET); } else { le32_add_cpu(&BHDR(bh)->h_refcount, -1); if (ce) mb_cache_entry_release(ce); /* * Beware of this ugliness: Releasing of xattr block references * from different inodes can race and so we have to protect * from a race where someone else frees the block (and releases * its journal_head) before we are done dirtying the buffer. In * nojournal mode this race is harmless and we actually cannot * call ext4_handle_dirty_xattr_block() with locked buffer as * that function can call sync_dirty_buffer() so for that case * we handle the dirtying after unlocking the buffer. */ if (ext4_handle_valid(handle)) error = ext4_handle_dirty_xattr_block(handle, inode, bh); unlock_buffer(bh); if (!ext4_handle_valid(handle)) error = ext4_handle_dirty_xattr_block(handle, inode, bh); if (IS_SYNC(inode)) ext4_handle_sync(handle); dquot_free_block(inode, EXT4_C2B(EXT4_SB(inode->i_sb), 1)); ea_bdebug(bh, "refcount now=%d; releasing", le32_to_cpu(BHDR(bh)->h_refcount)); } out: ext4_std_error(inode->i_sb, error); return; } ; Sample JSON Format Confermily Expected output: "examples" => "cwe": "xxx"; "message": "xxx" [/INST] 'cwe': ['CWE-241', 'CWE-19'], 'message': 'ext4: convert to mbcache2 The conversion is generally straightforward. The only tricky part is that xattr block corresponding to found mbcache entry can get freed before we get buffer lock for that block. So we have to check whether the entry is still valid after getting buffer lock. Signed-off-by: Jan Kara <jack@suse.cz> Signed-off-by: Theodore Ts'o <tytso@mit.edu>'</s>
<s>[INST] Given the following code containing errors, provide Common Weakness Enumeration (CWE) identifiers and corresponding messages in JSON format. Each identified weakness should have a unique entry in the JSON array. Code: void isor_reader_get_sample(ISOMChannel *ch) { GF_Err e; u32 sample_desc_index; if (ch->sample) return; if (ch->next_track) { ch->track = ch->next_track; ch->next_track = 0; } if (ch->to_init) { init_reader(ch); sample_desc_index = ch->last_sample_desc_index; } else if (ch->speed < 0) { if (ch->last_state == GF_EOS) { ch->sample = NULL; return; } if (ch->static_sample->IsRAP) { ch->last_rap_sample_time = ch->sample_time; } e = gf_isom_get_sample_for_movie_time(ch->owner->mov, ch->track, ch->sample_time + 1, &sample_desc_index, GF_ISOM_SEARCH_FORWARD, &ch->static_sample, &ch->sample_num, NULL); if ((e==GF_EOS) || (ch->static_sample->IsRAP)) { if (!ch->last_rap_sample_time) { e = GF_EOS; } else { e = gf_isom_get_sample_for_movie_time(ch->owner->mov, ch->track, ch->last_rap_sample_time - 1, &sample_desc_index, GF_ISOM_SEARCH_SYNC_BACKWARD, &ch->static_sample, &ch->sample_num, NULL); } } if (e) { if ((e==GF_EOS) && !ch->owner->frag_type) { ch->last_state = GF_EOS; } ch->sample = NULL; return; } ch->sample = ch->static_sample; if (ch->sample->DTS == ch->sample_time) { if (!ch->owner->frag_type) { ch->last_state = GF_EOS; } } if (ch->sample) { ch->sample_time = ch->sample->DTS; } } else if (ch->has_edit_list) { u32 prev_sample = ch->sample_num; e = gf_isom_get_sample_for_movie_time(ch->owner->mov, ch->track, ch->sample_time + 1, &sample_desc_index, GF_ISOM_SEARCH_FORWARD, &ch->static_sample, &ch->sample_num, &ch->sample_data_offset); if (e == GF_OK) { ch->sample = ch->static_sample; /*we are in forced seek mode: fetch all samples before the one matching the sample time*/ if (ch->edit_sync_frame) { ch->edit_sync_frame++; if (ch->edit_sync_frame < ch->sample_num) { ch->sample = gf_isom_get_sample_ex(ch->owner->mov, ch->track, ch->edit_sync_frame, &sample_desc_index, ch->static_sample, &ch->sample_data_offset); ch->sample->DTS = ch->sample_time; ch->sample->CTS_Offset = 0; } else { ch->edit_sync_frame = 0; if (ch->sample) ch->sample_time = ch->sample->DTS; } } else { /*if we get the same sample, figure out next interesting time (current sample + DTS gap to next sample should be a good bet)*/ if (prev_sample == ch->sample_num) { if (ch->owner->frag_type && (ch->sample_num==gf_isom_get_sample_count(ch->owner->mov, ch->track))) { ch->sample = NULL; } else { u32 sample_num = ch->sample_num ? ch->sample_num : 1; if (sample_num >= gf_isom_get_sample_count(ch->owner->mov, ch->track) ) { //e = GF_EOS; } else { u32 time_diff = gf_isom_get_sample_duration(ch->owner->mov, ch->track, sample_num); e = gf_isom_get_sample_for_movie_time(ch->owner->mov, ch->track, ch->sample_time + time_diff, &sample_desc_index, GF_ISOM_SEARCH_FORWARD, &ch->static_sample, &ch->sample_num, &ch->sample_data_offset); if (e==GF_OK) { if (ch->sample_num == prev_sample) { ch->sample_time += time_diff; ch->sample = NULL; return; } else { ch->sample = ch->static_sample; } } } } } /*we jumped to another segment - if RAP is needed look for closest rap in decoding order and force seek mode*/ if (ch->sample && !ch->sample->IsRAP && ch->has_rap && (ch->sample_num != prev_sample+1)) { GF_ISOSample *found = ch->static_sample; u32 samp_num = ch->sample_num; ch->sample = NULL; e = gf_isom_get_sample_for_movie_time(ch->owner->mov, ch->track, ch->sample_time + 1, &sample_desc_index, GF_ISOM_SEARCH_SYNC_BACKWARD, &ch->static_sample, &ch->sample_num, &ch->sample_data_offset); if (e == GF_OK) ch->sample = ch->static_sample; /*if no sync point in the past, use the first non-sync for the given time*/ if (!ch->sample || !ch->sample->data) { ch->sample = ch->static_sample = found; ch->sample_time = ch->sample->DTS; ch->sample_num = samp_num; } else { ch->sample = ch->static_sample; ch->edit_sync_frame = ch->sample_num; ch->sample->DTS = ch->sample_time; ch->sample->CTS_Offset = 0; } } else { if (ch->sample) ch->sample_time = ch->sample->DTS; } } } } else { Bool do_fetch = GF_TRUE; ch->sample_num++; if (ch->sap_only) { Bool is_rap = gf_isom_get_sample_sync(ch->owner->mov, ch->track, ch->sample_num); if (!is_rap) { GF_ISOSampleRollType roll_type; gf_isom_get_sample_rap_roll_info(ch->owner->mov, ch->track, ch->sample_num, &is_rap, &roll_type, NULL); if (roll_type) is_rap = GF_TRUE; } if (!is_rap) { do_fetch = GF_FALSE; } else if (ch->sap_only==2) { ch->sap_only = 0; } } if (do_fetch) { if (ch->owner->nodata) { ch->sample = gf_isom_get_sample_info_ex(ch->owner->mov, ch->track, ch->sample_num, &sample_desc_index, &ch->sample_data_offset, ch->static_sample); } else { ch->sample = gf_isom_get_sample_ex(ch->owner->mov, ch->track, ch->sample_num, &sample_desc_index, ch->static_sample, &ch->sample_data_offset); } /*if sync shadow / carousel RAP skip*/ if (ch->sample && (ch->sample->IsRAP==RAP_REDUNDANT)) { ch->sample = NULL; ch->sample_num++; isor_reader_get_sample(ch); return; } } } //check scalable track change if (ch->sample && ch->sample->IsRAP && ch->next_track) { ch->track = ch->next_track; ch->next_track = 0; ch->sample = NULL; isor_reader_get_sample(ch); return; } if (!ch->sample) { u32 sample_count = gf_isom_get_sample_count(ch->owner->mov, ch->track); ch->sample_data_offset = 0; /*incomplete file - check if we're still downloading or not*/ if (gf_isom_get_missing_bytes(ch->owner->mov, ch->track)) { ch->last_state = GF_ISOM_INCOMPLETE_FILE; if (ch->owner->mem_load_mode==2) ch->owner->force_fetch = GF_TRUE; if (!ch->owner->input_loaded) { ch->last_state = GF_OK; if (!ch->has_edit_list && ch->sample_num) ch->sample_num--; } else { if (ch->sample_num >= gf_isom_get_sample_count(ch->owner->mov, ch->track)) { ch->last_state = GF_EOS; } } } else if (!ch->sample_num || ((ch->speed >= 0) && (ch->sample_num >= sample_count)) || ((ch->speed < 0) && (ch->sample_time == gf_isom_get_current_tfdt(ch->owner->mov, ch->track) )) ) { if (ch->owner->frag_type==1) { /*if sample cannot be found and file is fragmented, rewind sample*/ if (ch->sample_num) ch->sample_num--; ch->last_state = GF_EOS; } else if (ch->last_state != GF_EOS) { GF_LOG(GF_LOG_DEBUG, GF_LOG_CONTAINER, ("[IsoMedia] Track #%d end of stream reached\n", ch->track)); ch->last_state = GF_EOS; if (ch->sample_num>sample_count) ch->sample_num = sample_count; } else { if (ch->sample_num>sample_count) ch->sample_num = sample_count; } } else { e = gf_isom_last_error(ch->owner->mov); GF_LOG((e==GF_ISOM_INCOMPLETE_FILE) ? GF_LOG_DEBUG : GF_LOG_WARNING, GF_LOG_DASH, ("[IsoMedia] Track #%d fail to fetch sample %d / %d: %s\n", ch->track, ch->sample_num, gf_isom_get_sample_count(ch->owner->mov, ch->track), gf_error_to_string(e) )); } return; } if (sample_desc_index != ch->last_sample_desc_index) { if (!ch->owner->stsd) { //we used sample entry 1 by default to setup, if no active prev sample (edit list might trigger this) //and new sample desc is 1, do not reconfigure if (!ch->last_sample_desc_index && (sample_desc_index==1)) { } else { ch->needs_pid_reconfig = GF_TRUE; } } ch->last_sample_desc_index = sample_desc_index; } ch->last_state = GF_OK; ch->au_duration = gf_isom_get_sample_duration(ch->owner->mov, ch->track, ch->sample_num); ch->sap_3 = GF_FALSE; ch->sap_4_type = 0; ch->roll = 0; ch->set_disc = ch->owner->clock_discontinuity ? 2 : 0; ch->owner->clock_discontinuity = 0; if (ch->sample) { gf_isom_get_sample_rap_roll_info(ch->owner->mov, ch->track, ch->sample_num, &ch->sap_3, &ch->sap_4_type, &ch->roll); /*still seeking or not ? 1- when speed is negative, the RAP found is "after" the seek point in playback order since we used backward RAP search: nothing to do 2- otherwise set DTS+CTS to start value */ if ((ch->speed < 0) || (ch->start <= ch->sample->DTS + ch->sample->CTS_Offset)) { ch->dts = ch->sample->DTS; ch->cts = ch->sample->DTS + ch->sample->CTS_Offset; ch->seek_flag = 0; } else { ch->cts = ch->start; ch->seek_flag = 1; ch->dts = ch->start; } if (ch->end && (ch->end < ch->sample->DTS + ch->sample->CTS_Offset + ch->au_duration)) { GF_LOG(GF_LOG_DEBUG, GF_LOG_DASH, ("[IsoMedia] End of Channel "LLD" (CTS "LLD")\n", ch->end, ch->sample->DTS + ch->sample->CTS_Offset)); ch->sample = NULL; ch->last_state = GF_EOS; ch->playing = 2; return; } } if (ch->owner->last_sender_ntp && ch->cts==ch->owner->cts_for_last_sender_ntp) { ch->sender_ntp = ch->owner->last_sender_ntp; ch->ntp_at_server_ntp = ch->owner->ntp_at_last_sender_ntp; } else if (ch->owner->last_sender_ntp && ch->dts==ch->owner->cts_for_last_sender_ntp) { ch->sender_ntp = ch->owner->last_sender_ntp; ch->ntp_at_server_ntp = ch->owner->ntp_at_last_sender_ntp; } else { ch->sender_ntp = ch->ntp_at_server_ntp = 0; } if (!ch->sample_num) return; gf_isom_get_sample_flags(ch->owner->mov, ch->track, ch->sample_num, &ch->isLeading, &ch->dependsOn, &ch->dependedOn, &ch->redundant); if (ch->is_encrypted) { /*in case of CENC: we write sample auxiliary information to slh->sai; its size is in saiz*/ if (gf_isom_is_cenc_media(ch->owner->mov, ch->track, ch->last_sample_desc_index)) { isor_update_cenc_info(ch, GF_FALSE); } else if (gf_isom_is_media_encrypted(ch->owner->mov, ch->track, ch->last_sample_desc_index)) { ch->pck_encrypted = GF_TRUE; } else { ch->pck_encrypted = GF_FALSE; } } if (ch->sample && ch->sample->nb_pack) ch->sample_num += ch->sample->nb_pack-1; } ; Sample JSON Format Confermily Expected output: "examples" => "cwe": "xxx"; "message": "xxx" [/INST] 'cwe': ['CWE-703', 'CWE-835'], 'message': 'fixed #1876'</s>
<s>[INST] Given the following code containing errors, provide Common Weakness Enumeration (CWE) identifiers and corresponding messages in JSON format. Each identified weakness should have a unique entry in the JSON array. Code: mrb_vm_exec(mrb_state *mrb, const struct RProc *proc, const mrb_code *pc) { /* mrb_assert(MRB_PROC_CFUNC_P(proc)) */ const mrb_irep *irep = proc->body.irep; const mrb_pool_value *pool = irep->pool; const mrb_sym *syms = irep->syms; mrb_code insn; int ai = mrb_gc_arena_save(mrb); struct mrb_jmpbuf *prev_jmp = mrb->jmp; struct mrb_jmpbuf c_jmp; uint32_t a; uint16_t b; uint16_t c; mrb_sym mid; const struct mrb_irep_catch_handler *ch; #ifdef DIRECT_THREADED static const void * const optable[] = { #define OPCODE(x,_) &&L_OP_ ## x, #include "mruby/ops.h" #undef OPCODE }; #endif mrb_bool exc_catched = FALSE; RETRY_TRY_BLOCK: MRB_TRY(&c_jmp) { if (exc_catched) { exc_catched = FALSE; mrb_gc_arena_restore(mrb, ai); if (mrb->exc && mrb->exc->tt == MRB_TT_BREAK) goto L_BREAK; goto L_RAISE; } mrb->jmp = &c_jmp; mrb_vm_ci_proc_set(mrb->c->ci, proc); #define regs (mrb->c->ci->stack) INIT_DISPATCH { CASE(OP_NOP, Z) { /* do nothing */ NEXT; } CASE(OP_MOVE, BB) { regs[a] = regs[b]; NEXT; } CASE(OP_LOADL, BB) { switch (pool[b].tt) { /* number */ case IREP_TT_INT32: regs[a] = mrb_int_value(mrb, (mrb_int)pool[b].u.i32); break; case IREP_TT_INT64: #if defined(MRB_INT64) regs[a] = mrb_int_value(mrb, (mrb_int)pool[b].u.i64); break; #else #if defined(MRB_64BIT) if (INT32_MIN <= pool[b].u.i64 && pool[b].u.i64 <= INT32_MAX) { regs[a] = mrb_int_value(mrb, (mrb_int)pool[b].u.i64); break; } #endif goto L_INT_OVERFLOW; #endif case IREP_TT_BIGINT: #ifdef MRB_USE_BIGINT { const char *s = pool[b].u.str; regs[a] = mrb_bint_new_str(mrb, s+2, (mrb_int)s[0], (mrb_int)s[1]); } break; #else goto L_INT_OVERFLOW; #endif #ifndef MRB_NO_FLOAT case IREP_TT_FLOAT: regs[a] = mrb_float_value(mrb, pool[b].u.f); break; #endif default: /* should not happen (tt:string) */ regs[a] = mrb_nil_value(); break; } NEXT; } CASE(OP_LOADI, BB) { SET_FIXNUM_VALUE(regs[a], b); NEXT; } CASE(OP_LOADINEG, BB) { SET_FIXNUM_VALUE(regs[a], -b); NEXT; } CASE(OP_LOADI__1,B) goto L_LOADI; CASE(OP_LOADI_0,B) goto L_LOADI; CASE(OP_LOADI_1,B) goto L_LOADI; CASE(OP_LOADI_2,B) goto L_LOADI; CASE(OP_LOADI_3,B) goto L_LOADI; CASE(OP_LOADI_4,B) goto L_LOADI; CASE(OP_LOADI_5,B) goto L_LOADI; CASE(OP_LOADI_6,B) goto L_LOADI; CASE(OP_LOADI_7, B) { L_LOADI: SET_FIXNUM_VALUE(regs[a], (mrb_int)insn - (mrb_int)OP_LOADI_0); NEXT; } CASE(OP_LOADI16, BS) { SET_FIXNUM_VALUE(regs[a], (mrb_int)(int16_t)b); NEXT; } CASE(OP_LOADI32, BSS) { SET_INT_VALUE(mrb, regs[a], (int32_t)(((uint32_t)b<<16)+c)); NEXT; } CASE(OP_LOADSYM, BB) { SET_SYM_VALUE(regs[a], syms[b]); NEXT; } CASE(OP_LOADNIL, B) { SET_NIL_VALUE(regs[a]); NEXT; } CASE(OP_LOADSELF, B) { regs[a] = regs[0]; NEXT; } CASE(OP_LOADT, B) { SET_TRUE_VALUE(regs[a]); NEXT; } CASE(OP_LOADF, B) { SET_FALSE_VALUE(regs[a]); NEXT; } CASE(OP_GETGV, BB) { mrb_value val = mrb_gv_get(mrb, syms[b]); regs[a] = val; NEXT; } CASE(OP_SETGV, BB) { mrb_gv_set(mrb, syms[b], regs[a]); NEXT; } CASE(OP_GETSV, BB) { mrb_value val = mrb_vm_special_get(mrb, syms[b]); regs[a] = val; NEXT; } CASE(OP_SETSV, BB) { mrb_vm_special_set(mrb, syms[b], regs[a]); NEXT; } CASE(OP_GETIV, BB) { regs[a] = mrb_iv_get(mrb, regs[0], syms[b]); NEXT; } CASE(OP_SETIV, BB) { mrb_iv_set(mrb, regs[0], syms[b], regs[a]); NEXT; } CASE(OP_GETCV, BB) { mrb_value val; val = mrb_vm_cv_get(mrb, syms[b]); regs[a] = val; NEXT; } CASE(OP_SETCV, BB) { mrb_vm_cv_set(mrb, syms[b], regs[a]); NEXT; } CASE(OP_GETIDX, B) { mrb_value va = regs[a], vb = regs[a+1]; switch (mrb_type(va)) { case MRB_TT_ARRAY: if (!mrb_integer_p(vb)) goto getidx_fallback; regs[a] = mrb_ary_entry(va, mrb_integer(vb)); break; case MRB_TT_HASH: va = mrb_hash_get(mrb, va, vb); regs[a] = va; break; case MRB_TT_STRING: switch (mrb_type(vb)) { case MRB_TT_INTEGER: case MRB_TT_STRING: case MRB_TT_RANGE: va = mrb_str_aref(mrb, va, vb, mrb_undef_value()); regs[a] = va; break; default: goto getidx_fallback; } break; default: getidx_fallback: mid = MRB_OPSYM(aref); goto L_SEND_SYM; } NEXT; } CASE(OP_SETIDX, B) { c = 2; mid = MRB_OPSYM(aset); SET_NIL_VALUE(regs[a+3]); goto L_SENDB_SYM; } CASE(OP_GETCONST, BB) { mrb_value v = mrb_vm_const_get(mrb, syms[b]); regs[a] = v; NEXT; } CASE(OP_SETCONST, BB) { mrb_vm_const_set(mrb, syms[b], regs[a]); NEXT; } CASE(OP_GETMCNST, BB) { mrb_value v = mrb_const_get(mrb, regs[a], syms[b]); regs[a] = v; NEXT; } CASE(OP_SETMCNST, BB) { mrb_const_set(mrb, regs[a+1], syms[b], regs[a]); NEXT; } CASE(OP_GETUPVAR, BBB) { mrb_value *regs_a = regs + a; struct REnv *e = uvenv(mrb, c); if (e && b < MRB_ENV_LEN(e)) { *regs_a = e->stack[b]; } else { *regs_a = mrb_nil_value(); } NEXT; } CASE(OP_SETUPVAR, BBB) { struct REnv *e = uvenv(mrb, c); if (e) { mrb_value *regs_a = regs + a; if (b < MRB_ENV_LEN(e)) { e->stack[b] = *regs_a; mrb_write_barrier(mrb, (struct RBasic*)e); } } NEXT; } CASE(OP_JMP, S) { pc += (int16_t)a; JUMP; } CASE(OP_JMPIF, BS) { if (mrb_test(regs[a])) { pc += (int16_t)b; JUMP; } NEXT; } CASE(OP_JMPNOT, BS) { if (!mrb_test(regs[a])) { pc += (int16_t)b; JUMP; } NEXT; } CASE(OP_JMPNIL, BS) { if (mrb_nil_p(regs[a])) { pc += (int16_t)b; JUMP; } NEXT; } CASE(OP_JMPUW, S) { a = (uint32_t)((pc - irep->iseq) + (int16_t)a); CHECKPOINT_RESTORE(RBREAK_TAG_JUMP) { struct RBreak *brk = (struct RBreak*)mrb->exc; mrb_value target = mrb_break_value_get(brk); mrb_assert(mrb_integer_p(target)); a = (uint32_t)mrb_integer(target); mrb_assert(a >= 0 && a < irep->ilen); } CHECKPOINT_MAIN(RBREAK_TAG_JUMP) { ch = catch_handler_find(mrb, mrb->c->ci, pc, MRB_CATCH_FILTER_ENSURE); if (ch) { /* avoiding a jump from a catch handler into the same handler */ if (a < mrb_irep_catch_handler_unpack(ch->begin) || a >= mrb_irep_catch_handler_unpack(ch->end)) { THROW_TAGGED_BREAK(mrb, RBREAK_TAG_JUMP, proc, mrb_fixnum_value(a)); } } } CHECKPOINT_END(RBREAK_TAG_JUMP); mrb->exc = NULL; /* clear break object */ pc = irep->iseq + a; JUMP; } CASE(OP_EXCEPT, B) { mrb_value exc; if (mrb->exc == NULL) { exc = mrb_nil_value(); } else { switch (mrb->exc->tt) { case MRB_TT_BREAK: case MRB_TT_EXCEPTION: exc = mrb_obj_value(mrb->exc); break; default: mrb_assert(!"bad mrb_type"); exc = mrb_nil_value(); break; } mrb->exc = NULL; } regs[a] = exc; NEXT; } CASE(OP_RESCUE, BB) { mrb_value exc = regs[a]; /* exc on stack */ mrb_value e = regs[b]; struct RClass *ec; switch (mrb_type(e)) { case MRB_TT_CLASS: case MRB_TT_MODULE: break; default: { mrb_value exc; exc = mrb_exc_new_lit(mrb, E_TYPE_ERROR, "class or module required for rescue clause"); mrb_exc_set(mrb, exc); goto L_RAISE; } } ec = mrb_class_ptr(e); regs[b] = mrb_bool_value(mrb_obj_is_kind_of(mrb, exc, ec)); NEXT; } CASE(OP_RAISEIF, B) { mrb_value exc = regs[a]; if (mrb_break_p(exc)) { mrb->exc = mrb_obj_ptr(exc); goto L_BREAK; } mrb_exc_set(mrb, exc); if (mrb->exc) { goto L_RAISE; } NEXT; } CASE(OP_SSEND, BBB) { regs[a] = regs[0]; insn = OP_SEND; } goto L_SENDB; CASE(OP_SSENDB, BBB) { regs[a] = regs[0]; } goto L_SENDB; CASE(OP_SEND, BBB) goto L_SENDB; L_SEND_SYM: c = 1; /* push nil after arguments */ SET_NIL_VALUE(regs[a+2]); goto L_SENDB_SYM; CASE(OP_SENDB, BBB) L_SENDB: mid = syms[b]; L_SENDB_SYM: { mrb_callinfo *ci = mrb->c->ci; mrb_method_t m; struct RClass *cls; mrb_value recv, blk; ARGUMENT_NORMALIZE(a, &c, insn); recv = regs[a]; cls = mrb_class(mrb, recv); m = mrb_method_search_vm(mrb, &cls, mid); if (MRB_METHOD_UNDEF_P(m)) { m = prepare_missing(mrb, recv, mid, &cls, a, &c, blk, 0); mid = MRB_SYM(method_missing); } /* push callinfo */ ci = cipush(mrb, a, 0, cls, NULL, mid, c); if (MRB_METHOD_CFUNC_P(m)) { if (MRB_METHOD_PROC_P(m)) { struct RProc *p = MRB_METHOD_PROC(m); mrb_vm_ci_proc_set(ci, p); recv = p->body.func(mrb, recv); } else { if (MRB_METHOD_NOARG_P(m)) { check_method_noarg(mrb, ci); } recv = MRB_METHOD_FUNC(m)(mrb, recv); } mrb_gc_arena_shrink(mrb, ai); if (mrb->exc) goto L_RAISE; ci = mrb->c->ci; if (mrb_proc_p(blk)) { struct RProc *p = mrb_proc_ptr(blk); if (p && !MRB_PROC_STRICT_P(p) && MRB_PROC_ENV(p) == mrb_vm_ci_env(&ci[-1])) { p->flags |= MRB_PROC_ORPHAN; } } if (!ci->u.target_class) { /* return from context modifying method (resume/yield) */ if (ci->cci == CINFO_RESUMED) { mrb->jmp = prev_jmp; return recv; } else { mrb_assert(!MRB_PROC_CFUNC_P(ci[-1].proc)); proc = ci[-1].proc; irep = proc->body.irep; pool = irep->pool; syms = irep->syms; } } ci->stack[0] = recv; /* pop stackpos */ ci = cipop(mrb); pc = ci->pc; } else { /* setup environment for calling method */ mrb_vm_ci_proc_set(ci, (proc = MRB_METHOD_PROC(m))); irep = proc->body.irep; pool = irep->pool; syms = irep->syms; mrb_stack_extend(mrb, (irep->nregs < 4) ? 4 : irep->nregs); pc = irep->iseq; } } JUMP; CASE(OP_CALL, Z) { mrb_callinfo *ci = mrb->c->ci; mrb_value recv = ci->stack[0]; struct RProc *m = mrb_proc_ptr(recv); /* replace callinfo */ ci->u.target_class = MRB_PROC_TARGET_CLASS(m); mrb_vm_ci_proc_set(ci, m); if (MRB_PROC_ENV_P(m)) { ci->mid = MRB_PROC_ENV(m)->mid; } /* prepare stack */ if (MRB_PROC_CFUNC_P(m)) { recv = MRB_PROC_CFUNC(m)(mrb, recv); mrb_gc_arena_shrink(mrb, ai); if (mrb->exc) goto L_RAISE; /* pop stackpos */ ci = cipop(mrb); pc = ci->pc; ci[1].stack[0] = recv; irep = mrb->c->ci->proc->body.irep; } else { /* setup environment for calling method */ proc = m; irep = m->body.irep; if (!irep) { mrb->c->ci->stack[0] = mrb_nil_value(); a = 0; c = OP_R_NORMAL; goto L_OP_RETURN_BODY; } mrb_int nargs = mrb_ci_bidx(ci)+1; if (nargs < irep->nregs) { mrb_stack_extend(mrb, irep->nregs); stack_clear(regs+nargs, irep->nregs-nargs); } if (MRB_PROC_ENV_P(m)) { regs[0] = MRB_PROC_ENV(m)->stack[0]; } pc = irep->iseq; } pool = irep->pool; syms = irep->syms; JUMP; } CASE(OP_SUPER, BB) { mrb_method_t m; struct RClass *cls; mrb_callinfo *ci = mrb->c->ci; mrb_value recv, blk; const struct RProc *p = ci->proc; mrb_sym mid = ci->mid; struct RClass* target_class = mrb_vm_ci_target_class(ci); if (MRB_PROC_ENV_P(p) && p->e.env->mid && p->e.env->mid != mid) { /* alias support */ mid = p->e.env->mid; /* restore old mid */ } if (mid == 0 || !target_class) { mrb_value exc = mrb_exc_new_lit(mrb, E_NOMETHOD_ERROR, "super called outside of method"); mrb_exc_set(mrb, exc); goto L_RAISE; } if ((target_class->flags & MRB_FL_CLASS_IS_PREPENDED) || target_class->tt == MRB_TT_MODULE) { goto super_typeerror; } recv = regs[0]; if (!mrb_obj_is_kind_of(mrb, recv, target_class)) { super_typeerror: ; mrb_value exc = mrb_exc_new_lit(mrb, E_TYPE_ERROR, "self has wrong type to call super in this context"); mrb_exc_set(mrb, exc); goto L_RAISE; } ARGUMENT_NORMALIZE(a, &b, OP_SUPER); cls = target_class->super; m = mrb_method_search_vm(mrb, &cls, mid); if (MRB_METHOD_UNDEF_P(m)) { m = prepare_missing(mrb, recv, mid, &cls, a, &b, blk, 1); mid = MRB_SYM(method_missing); } /* push callinfo */ ci = cipush(mrb, a, 0, cls, NULL, mid, b); /* prepare stack */ ci->stack[0] = recv; if (MRB_METHOD_CFUNC_P(m)) { mrb_value v; if (MRB_METHOD_PROC_P(m)) { mrb_vm_ci_proc_set(ci, MRB_METHOD_PROC(m)); } v = MRB_METHOD_CFUNC(m)(mrb, recv); mrb_gc_arena_restore(mrb, ai); if (mrb->exc) goto L_RAISE; ci = mrb->c->ci; mrb_assert(!mrb_break_p(v)); if (!mrb_vm_ci_target_class(ci)) { /* return from context modifying method (resume/yield) */ if (ci->cci == CINFO_RESUMED) { mrb->jmp = prev_jmp; return v; } else { mrb_assert(!MRB_PROC_CFUNC_P(ci[-1].proc)); proc = ci[-1].proc; irep = proc->body.irep; pool = irep->pool; syms = irep->syms; } } mrb->c->ci->stack[0] = v; ci = cipop(mrb); pc = ci->pc; } else { /* setup environment for calling method */ mrb_vm_ci_proc_set(ci, (proc = MRB_METHOD_PROC(m))); irep = proc->body.irep; pool = irep->pool; syms = irep->syms; mrb_stack_extend(mrb, (irep->nregs < 4) ? 4 : irep->nregs); pc = irep->iseq; } JUMP; } CASE(OP_ARGARY, BS) { mrb_int m1 = (b>>11)&0x3f; mrb_int r = (b>>10)&0x1; mrb_int m2 = (b>>5)&0x1f; mrb_int kd = (b>>4)&0x1; mrb_int lv = (b>>0)&0xf; mrb_value *stack; if (mrb->c->ci->mid == 0 || mrb_vm_ci_target_class(mrb->c->ci) == NULL) { mrb_value exc; L_NOSUPER: exc = mrb_exc_new_lit(mrb, E_NOMETHOD_ERROR, "super called outside of method"); mrb_exc_set(mrb, exc); goto L_RAISE; } if (lv == 0) stack = regs + 1; else { struct REnv *e = uvenv(mrb, lv-1); if (!e) goto L_NOSUPER; if (MRB_ENV_LEN(e) <= m1+r+m2+1) goto L_NOSUPER; stack = e->stack + 1; } if (r == 0) { regs[a] = mrb_ary_new_from_values(mrb, m1+m2, stack); } else { mrb_value *pp = NULL; struct RArray *rest; mrb_int len = 0; if (mrb_array_p(stack[m1])) { struct RArray *ary = mrb_ary_ptr(stack[m1]); pp = ARY_PTR(ary); len = ARY_LEN(ary); } regs[a] = mrb_ary_new_capa(mrb, m1+len+m2); rest = mrb_ary_ptr(regs[a]); if (m1 > 0) { stack_copy(ARY_PTR(rest), stack, m1); } if (len > 0) { stack_copy(ARY_PTR(rest)+m1, pp, len); } if (m2 > 0) { stack_copy(ARY_PTR(rest)+m1+len, stack+m1+1, m2); } ARY_SET_LEN(rest, m1+len+m2); } if (kd) { regs[a+1] = stack[m1+r+m2]; regs[a+2] = stack[m1+r+m2+1]; } else { regs[a+1] = stack[m1+r+m2]; } mrb_gc_arena_restore(mrb, ai); NEXT; } CASE(OP_ENTER, W) { mrb_int m1 = MRB_ASPEC_REQ(a); mrb_int o = MRB_ASPEC_OPT(a); mrb_int r = MRB_ASPEC_REST(a); mrb_int m2 = MRB_ASPEC_POST(a); mrb_int kd = (MRB_ASPEC_KEY(a) > 0 || MRB_ASPEC_KDICT(a))? 1 : 0; /* unused int b = MRB_ASPEC_BLOCK(a); */ mrb_int const len = m1 + o + r + m2; mrb_callinfo *ci = mrb->c->ci; mrb_int argc = ci->n; mrb_value *argv = regs+1; mrb_value * const argv0 = argv; mrb_int const kw_pos = len + kd; /* where kwhash should be */ mrb_int const blk_pos = kw_pos + 1; /* where block should be */ mrb_value blk = regs[mrb_ci_bidx(ci)]; mrb_value kdict = mrb_nil_value(); /* keyword arguments */ if (ci->nk > 0) { mrb_int kidx = mrb_ci_kidx(ci); kdict = regs[kidx]; if (!mrb_hash_p(kdict) || mrb_hash_size(mrb, kdict) == 0) { kdict = mrb_nil_value(); ci->nk = 0; } } if (!kd && !mrb_nil_p(kdict)) { if (argc < 14) { ci->n++; argc++; /* include kdict in normal arguments */ } else if (argc == 14) { /* pack arguments and kdict */ regs[1] = mrb_ary_new_from_values(mrb, argc+1, &regs[1]); argc = ci->n = 15; } else {/* argc == 15 */ /* push kdict to packed arguments */ mrb_ary_push(mrb, regs[1], regs[2]); } ci->nk = 0; } if (kd && MRB_ASPEC_KEY(a) > 0 && mrb_hash_p(kdict)) { kdict = mrb_hash_dup(mrb, kdict); } /* arguments is passed with Array */ if (argc == 15) { struct RArray *ary = mrb_ary_ptr(regs[1]); argv = ARY_PTR(ary); argc = (int)ARY_LEN(ary); mrb_gc_protect(mrb, regs[1]); } /* strict argument check */ if (ci->proc && MRB_PROC_STRICT_P(ci->proc)) { if (argc < m1 + m2 || (r == 0 && argc > len)) { argnum_error(mrb, m1+m2); goto L_RAISE; } } /* extract first argument array to arguments */ else if (len > 1 && argc == 1 && mrb_array_p(argv[0])) { mrb_gc_protect(mrb, argv[0]); argc = (int)RARRAY_LEN(argv[0]); argv = RARRAY_PTR(argv[0]); } /* rest arguments */ mrb_value rest = mrb_nil_value(); if (argc < len) { mrb_int mlen = m2; if (argc < m1+m2) { mlen = m1 < argc ? argc - m1 : 0; } /* copy mandatory and optional arguments */ if (argv0 != argv && argv) { value_move(&regs[1], argv, argc-mlen); /* m1 + o */ } if (argc < m1) { stack_clear(&regs[argc+1], m1-argc); } /* copy post mandatory arguments */ if (mlen) { value_move(&regs[len-m2+1], &argv[argc-mlen], mlen); } if (mlen < m2) { stack_clear(&regs[len-m2+mlen+1], m2-mlen); } /* initialize rest arguments with empty Array */ if (r) { rest = mrb_ary_new_capa(mrb, 0); regs[m1+o+1] = rest; } /* skip initializer of passed arguments */ if (o > 0 && argc > m1+m2) pc += (argc - m1 - m2)*3; } else { mrb_int rnum = 0; if (argv0 != argv) { value_move(&regs[1], argv, m1+o); } if (r) { rnum = argc-m1-o-m2; rest = mrb_ary_new_from_values(mrb, rnum, argv+m1+o); regs[m1+o+1] = rest; } if (m2 > 0 && argc-m2 > m1) { value_move(&regs[m1+o+r+1], &argv[m1+o+rnum], m2); } pc += o*3; } /* need to be update blk first to protect blk from GC */ regs[blk_pos] = blk; /* move block */ if (kd) { if (mrb_nil_p(kdict)) kdict = mrb_hash_new_capa(mrb, 0); regs[kw_pos] = kdict; /* set kwhash */ } /* format arguments for generated code */ mrb->c->ci->n = (uint8_t)len; /* clear local (but non-argument) variables */ if (irep->nlocals-blk_pos-1 > 0) { stack_clear(&regs[blk_pos+1], irep->nlocals-blk_pos-1); } JUMP; } CASE(OP_KARG, BB) { mrb_value k = mrb_symbol_value(syms[b]); mrb_int kidx = mrb_ci_kidx(mrb->c->ci); mrb_value kdict, v; if (kidx < 0 || !mrb_hash_p(kdict=regs[kidx]) || !mrb_hash_key_p(mrb, kdict, k)) { mrb_value str = mrb_format(mrb, "missing keyword: %v", k); mrb_exc_set(mrb, mrb_exc_new_str(mrb, E_ARGUMENT_ERROR, str)); goto L_RAISE; } v = mrb_hash_get(mrb, kdict, k); regs[a] = v; mrb_hash_delete_key(mrb, kdict, k); NEXT; } CASE(OP_KEY_P, BB) { mrb_value k = mrb_symbol_value(syms[b]); mrb_int kidx = mrb_ci_kidx(mrb->c->ci); mrb_value kdict; mrb_bool key_p = FALSE; if (kidx >= 0 && mrb_hash_p(kdict=regs[kidx])) { key_p = mrb_hash_key_p(mrb, kdict, k); } regs[a] = mrb_bool_value(key_p); NEXT; } CASE(OP_KEYEND, Z) { mrb_int kidx = mrb_ci_kidx(mrb->c->ci); mrb_value kdict; if (kidx >= 0 && mrb_hash_p(kdict=regs[kidx]) && !mrb_hash_empty_p(mrb, kdict)) { mrb_value keys = mrb_hash_keys(mrb, kdict); mrb_value key1 = RARRAY_PTR(keys)[0]; mrb_value str = mrb_format(mrb, "unknown keyword: %v", key1); mrb_exc_set(mrb, mrb_exc_new_str(mrb, E_ARGUMENT_ERROR, str)); goto L_RAISE; } NEXT; } CASE(OP_BREAK, B) { c = OP_R_BREAK; goto L_RETURN; } CASE(OP_RETURN_BLK, B) { c = OP_R_RETURN; goto L_RETURN; } CASE(OP_RETURN, B) c = OP_R_NORMAL; L_RETURN: { mrb_callinfo *ci; ci = mrb->c->ci; if (ci->mid) { mrb_value blk = regs[mrb_ci_bidx(ci)]; if (mrb_proc_p(blk)) { struct RProc *p = mrb_proc_ptr(blk); if (!MRB_PROC_STRICT_P(p) && ci > mrb->c->cibase && MRB_PROC_ENV(p) == mrb_vm_ci_env(&ci[-1])) { p->flags |= MRB_PROC_ORPHAN; } } } if (mrb->exc) { L_RAISE: ci = mrb->c->ci; if (ci == mrb->c->cibase) { ch = catch_handler_find(mrb, ci, pc, MRB_CATCH_FILTER_ALL); if (ch == NULL) goto L_FTOP; goto L_CATCH; } while ((ch = catch_handler_find(mrb, ci, pc, MRB_CATCH_FILTER_ALL)) == NULL) { ci = cipop(mrb); if (ci[1].cci == CINFO_SKIP && prev_jmp) { mrb->jmp = prev_jmp; MRB_THROW(prev_jmp); } pc = ci[0].pc; if (ci == mrb->c->cibase) { ch = catch_handler_find(mrb, ci, pc, MRB_CATCH_FILTER_ALL); if (ch == NULL) { L_FTOP: /* fiber top */ if (mrb->c == mrb->root_c) { mrb->c->ci->stack = mrb->c->stbase; goto L_STOP; } else { struct mrb_context *c = mrb->c; c->status = MRB_FIBER_TERMINATED; mrb->c = c->prev; c->prev = NULL; goto L_RAISE; } } break; } } L_CATCH: if (ch == NULL) goto L_STOP; if (FALSE) { L_CATCH_TAGGED_BREAK: /* from THROW_TAGGED_BREAK() or UNWIND_ENSURE() */ ci = mrb->c->ci; } proc = ci->proc; irep = proc->body.irep; pool = irep->pool; syms = irep->syms; mrb_stack_extend(mrb, irep->nregs); pc = irep->iseq + mrb_irep_catch_handler_unpack(ch->target); } else { mrb_int acc; mrb_value v; ci = mrb->c->ci; v = regs[a]; mrb_gc_protect(mrb, v); switch (c) { case OP_R_RETURN: /* Fall through to OP_R_NORMAL otherwise */ if (ci->cci == CINFO_NONE && MRB_PROC_ENV_P(proc) && !MRB_PROC_STRICT_P(proc)) { const struct RProc *dst; mrb_callinfo *cibase; cibase = mrb->c->cibase; dst = top_proc(mrb, proc); if (MRB_PROC_ENV_P(dst)) { struct REnv *e = MRB_PROC_ENV(dst); if (!MRB_ENV_ONSTACK_P(e) || (e->cxt && e->cxt != mrb->c)) { localjump_error(mrb, LOCALJUMP_ERROR_RETURN); goto L_RAISE; } } /* check jump destination */ while (cibase <= ci && ci->proc != dst) { if (ci->cci > CINFO_NONE) { /* jump cross C boundary */ localjump_error(mrb, LOCALJUMP_ERROR_RETURN); goto L_RAISE; } ci--; } if (ci <= cibase) { /* no jump destination */ localjump_error(mrb, LOCALJUMP_ERROR_RETURN); goto L_RAISE; } ci = mrb->c->ci; while (cibase <= ci && ci->proc != dst) { CHECKPOINT_RESTORE(RBREAK_TAG_RETURN_BLOCK) { cibase = mrb->c->cibase; dst = top_proc(mrb, proc); } CHECKPOINT_MAIN(RBREAK_TAG_RETURN_BLOCK) { UNWIND_ENSURE(mrb, ci, pc, RBREAK_TAG_RETURN_BLOCK, proc, v); } CHECKPOINT_END(RBREAK_TAG_RETURN_BLOCK); ci = cipop(mrb); pc = ci->pc; } proc = ci->proc; mrb->exc = NULL; /* clear break object */ break; } /* fallthrough */ case OP_R_NORMAL: NORMAL_RETURN: if (ci == mrb->c->cibase) { struct mrb_context *c; c = mrb->c; if (!c->prev) { /* toplevel return */ regs[irep->nlocals] = v; goto CHECKPOINT_LABEL_MAKE(RBREAK_TAG_STOP); } if (!c->vmexec && c->prev->ci == c->prev->cibase) { mrb_value exc = mrb_exc_new_lit(mrb, E_FIBER_ERROR, "double resume"); mrb_exc_set(mrb, exc); goto L_RAISE; } CHECKPOINT_RESTORE(RBREAK_TAG_RETURN_TOPLEVEL) { c = mrb->c; } CHECKPOINT_MAIN(RBREAK_TAG_RETURN_TOPLEVEL) { UNWIND_ENSURE(mrb, ci, pc, RBREAK_TAG_RETURN_TOPLEVEL, proc, v); } CHECKPOINT_END(RBREAK_TAG_RETURN_TOPLEVEL); /* automatic yield at the end */ c->status = MRB_FIBER_TERMINATED; mrb->c = c->prev; mrb->c->status = MRB_FIBER_RUNNING; c->prev = NULL; if (c->vmexec) { mrb_gc_arena_restore(mrb, ai); c->vmexec = FALSE; mrb->jmp = prev_jmp; return v; } ci = mrb->c->ci; } CHECKPOINT_RESTORE(RBREAK_TAG_RETURN) { /* do nothing */ } CHECKPOINT_MAIN(RBREAK_TAG_RETURN) { UNWIND_ENSURE(mrb, ci, pc, RBREAK_TAG_RETURN, proc, v); } CHECKPOINT_END(RBREAK_TAG_RETURN); mrb->exc = NULL; /* clear break object */ break; case OP_R_BREAK: if (MRB_PROC_STRICT_P(proc)) goto NORMAL_RETURN; if (MRB_PROC_ORPHAN_P(proc)) { mrb_value exc; L_BREAK_ERROR: exc = mrb_exc_new_lit(mrb, E_LOCALJUMP_ERROR, "break from proc-closure"); mrb_exc_set(mrb, exc); goto L_RAISE; } if (!MRB_PROC_ENV_P(proc) || !MRB_ENV_ONSTACK_P(MRB_PROC_ENV(proc))) { goto L_BREAK_ERROR; } else { struct REnv *e = MRB_PROC_ENV(proc); if (e->cxt != mrb->c) { goto L_BREAK_ERROR; } } CHECKPOINT_RESTORE(RBREAK_TAG_BREAK) { /* do nothing */ } CHECKPOINT_MAIN(RBREAK_TAG_BREAK) { UNWIND_ENSURE(mrb, ci, pc, RBREAK_TAG_BREAK, proc, v); } CHECKPOINT_END(RBREAK_TAG_BREAK); /* break from fiber block */ if (ci == mrb->c->cibase && ci->pc) { struct mrb_context *c = mrb->c; mrb->c = c->prev; c->prev = NULL; ci = mrb->c->ci; } if (ci->cci > CINFO_NONE) { ci = cipop(mrb); mrb->exc = (struct RObject*)break_new(mrb, RBREAK_TAG_BREAK, proc, v); mrb_gc_arena_restore(mrb, ai); mrb->c->vmexec = FALSE; mrb->jmp = prev_jmp; MRB_THROW(prev_jmp); } if (FALSE) { struct RBreak *brk; L_BREAK: brk = (struct RBreak*)mrb->exc; proc = mrb_break_proc_get(brk); v = mrb_break_value_get(brk); ci = mrb->c->ci; switch (mrb_break_tag_get(brk)) { #define DISPATCH_CHECKPOINTS(n, i) case n: goto CHECKPOINT_LABEL_MAKE(n); RBREAK_TAG_FOREACH(DISPATCH_CHECKPOINTS) #undef DISPATCH_CHECKPOINTS default: mrb_assert(!"wrong break tag"); } } while (mrb->c->cibase < ci && ci[-1].proc != proc->upper) { if (ci[-1].cci == CINFO_SKIP) { goto L_BREAK_ERROR; } CHECKPOINT_RESTORE(RBREAK_TAG_BREAK_UPPER) { /* do nothing */ } CHECKPOINT_MAIN(RBREAK_TAG_BREAK_UPPER) { UNWIND_ENSURE(mrb, ci, pc, RBREAK_TAG_BREAK_UPPER, proc, v); } CHECKPOINT_END(RBREAK_TAG_BREAK_UPPER); ci = cipop(mrb); pc = ci->pc; } CHECKPOINT_RESTORE(RBREAK_TAG_BREAK_INTARGET) { /* do nothing */ } CHECKPOINT_MAIN(RBREAK_TAG_BREAK_INTARGET) { UNWIND_ENSURE(mrb, ci, pc, RBREAK_TAG_BREAK_INTARGET, proc, v); } CHECKPOINT_END(RBREAK_TAG_BREAK_INTARGET); if (ci == mrb->c->cibase) { goto L_BREAK_ERROR; } mrb->exc = NULL; /* clear break object */ break; default: /* cannot happen */ break; } mrb_assert(ci == mrb->c->ci); mrb_assert(mrb->exc == NULL); if (mrb->c->vmexec && !mrb_vm_ci_target_class(ci)) { mrb_gc_arena_restore(mrb, ai); mrb->c->vmexec = FALSE; mrb->jmp = prev_jmp; return v; } acc = ci->cci; ci = cipop(mrb); if (acc == CINFO_SKIP || acc == CINFO_DIRECT) { mrb_gc_arena_restore(mrb, ai); mrb->jmp = prev_jmp; return v; } pc = ci->pc; DEBUG(fprintf(stderr, "from :%s\n", mrb_sym_name(mrb, ci->mid))); proc = ci->proc; irep = proc->body.irep; pool = irep->pool; syms = irep->syms; ci[1].stack[0] = v; mrb_gc_arena_restore(mrb, ai); } JUMP; } CASE(OP_BLKPUSH, BS) { int m1 = (b>>11)&0x3f; int r = (b>>10)&0x1; int m2 = (b>>5)&0x1f; int kd = (b>>4)&0x1; int lv = (b>>0)&0xf; mrb_value *stack; if (lv == 0) stack = regs + 1; else { struct REnv *e = uvenv(mrb, lv-1); if (!e || (!MRB_ENV_ONSTACK_P(e) && e->mid == 0) || MRB_ENV_LEN(e) <= m1+r+m2+1) { localjump_error(mrb, LOCALJUMP_ERROR_YIELD); goto L_RAISE; } stack = e->stack + 1; } if (mrb_nil_p(stack[m1+r+m2+kd])) { localjump_error(mrb, LOCALJUMP_ERROR_YIELD); goto L_RAISE; } regs[a] = stack[m1+r+m2+kd]; NEXT; } #if !defined(MRB_USE_BIGINT) || defined(MRB_INT32) L_INT_OVERFLOW: { mrb_value exc = mrb_exc_new_lit(mrb, E_RANGE_ERROR, "integer overflow"); mrb_exc_set(mrb, exc); } goto L_RAISE; #endif #define TYPES2(a,b) ((((uint16_t)(a))<<8)|(((uint16_t)(b))&0xff)) #define OP_MATH(op_name) \ /* need to check if op is overridden */ \ switch (TYPES2(mrb_type(regs[a]),mrb_type(regs[a+1]))) { \ OP_MATH_CASE_INTEGER(op_name); \ OP_MATH_CASE_FLOAT(op_name, integer, float); \ OP_MATH_CASE_FLOAT(op_name, float, integer); \ OP_MATH_CASE_FLOAT(op_name, float, float); \ OP_MATH_CASE_STRING_##op_name(); \ default: \ mid = MRB_OPSYM(op_name); \ goto L_SEND_SYM; \ } \ NEXT; #define OP_MATH_CASE_INTEGER(op_name) \ case TYPES2(MRB_TT_INTEGER, MRB_TT_INTEGER): \ { \ mrb_int x = mrb_integer(regs[a]), y = mrb_integer(regs[a+1]), z; \ if (mrb_int_##op_name##_overflow(x, y, &z)) { \ OP_MATH_OVERFLOW_INT(op_name,x,y); \ } \ else \ SET_INT_VALUE(mrb,regs[a], z); \ } \ break #ifdef MRB_NO_FLOAT #define OP_MATH_CASE_FLOAT(op_name, t1, t2) (void)0 #else #define OP_MATH_CASE_FLOAT(op_name, t1, t2) \ case TYPES2(OP_MATH_TT_##t1, OP_MATH_TT_##t2): \ { \ mrb_float z = mrb_##t1(regs[a]) OP_MATH_OP_##op_name mrb_##t2(regs[a+1]); \ SET_FLOAT_VALUE(mrb, regs[a], z); \ } \ break #endif #ifdef MRB_USE_BIGINT #define OP_MATH_OVERFLOW_INT(op,x,y) regs[a] = mrb_bint_##op##_ii(mrb,x,y) #else #define OP_MATH_OVERFLOW_INT(op,x,y) goto L_INT_OVERFLOW #endif #define OP_MATH_CASE_STRING_add() \ case TYPES2(MRB_TT_STRING, MRB_TT_STRING): \ regs[a] = mrb_str_plus(mrb, regs[a], regs[a+1]); \ mrb_gc_arena_restore(mrb, ai); \ break #define OP_MATH_CASE_STRING_sub() (void)0 #define OP_MATH_CASE_STRING_mul() (void)0 #define OP_MATH_OP_add + #define OP_MATH_OP_sub - #define OP_MATH_OP_mul * #define OP_MATH_TT_integer MRB_TT_INTEGER #define OP_MATH_TT_float MRB_TT_FLOAT CASE(OP_ADD, B) { OP_MATH(add); } CASE(OP_SUB, B) { OP_MATH(sub); } CASE(OP_MUL, B) { OP_MATH(mul); } CASE(OP_DIV, B) { #ifndef MRB_NO_FLOAT mrb_float x, y, f; #endif /* need to check if op is overridden */ switch (TYPES2(mrb_type(regs[a]),mrb_type(regs[a+1]))) { case TYPES2(MRB_TT_INTEGER,MRB_TT_INTEGER): { mrb_int x = mrb_integer(regs[a]); mrb_int y = mrb_integer(regs[a+1]); mrb_int div = mrb_div_int(mrb, x, y); SET_INT_VALUE(mrb, regs[a], div); } NEXT; #ifndef MRB_NO_FLOAT case TYPES2(MRB_TT_INTEGER,MRB_TT_FLOAT): x = (mrb_float)mrb_integer(regs[a]); y = mrb_float(regs[a+1]); break; case TYPES2(MRB_TT_FLOAT,MRB_TT_INTEGER): x = mrb_float(regs[a]); y = (mrb_float)mrb_integer(regs[a+1]); break; case TYPES2(MRB_TT_FLOAT,MRB_TT_FLOAT): x = mrb_float(regs[a]); y = mrb_float(regs[a+1]); break; #endif default: mid = MRB_OPSYM(div); goto L_SEND_SYM; } #ifndef MRB_NO_FLOAT f = mrb_div_float(x, y); SET_FLOAT_VALUE(mrb, regs[a], f); #endif NEXT; } #define OP_MATHI(op_name) \ /* need to check if op is overridden */ \ switch (mrb_type(regs[a])) { \ OP_MATHI_CASE_INTEGER(op_name); \ OP_MATHI_CASE_FLOAT(op_name); \ default: \ SET_INT_VALUE(mrb,regs[a+1], b); \ mid = MRB_OPSYM(op_name); \ goto L_SEND_SYM; \ } \ NEXT; #define OP_MATHI_CASE_INTEGER(op_name) \ case MRB_TT_INTEGER: \ { \ mrb_int x = mrb_integer(regs[a]), y = (mrb_int)b, z; \ if (mrb_int_##op_name##_overflow(x, y, &z)) { \ OP_MATH_OVERFLOW_INT(op_name,x,y); \ } \ else \ SET_INT_VALUE(mrb,regs[a], z); \ } \ break #ifdef MRB_NO_FLOAT #define OP_MATHI_CASE_FLOAT(op_name) (void)0 #else #define OP_MATHI_CASE_FLOAT(op_name) \ case MRB_TT_FLOAT: \ { \ mrb_float z = mrb_float(regs[a]) OP_MATH_OP_##op_name b; \ SET_FLOAT_VALUE(mrb, regs[a], z); \ } \ break #endif CASE(OP_ADDI, BB) { OP_MATHI(add); } CASE(OP_SUBI, BB) { OP_MATHI(sub); } #define OP_CMP_BODY(op,v1,v2) (v1(regs[a]) op v2(regs[a+1])) #ifdef MRB_NO_FLOAT #define OP_CMP(op,sym) do {\ int result;\ /* need to check if - is overridden */\ switch (TYPES2(mrb_type(regs[a]),mrb_type(regs[a+1]))) {\ case TYPES2(MRB_TT_INTEGER,MRB_TT_INTEGER):\ result = OP_CMP_BODY(op,mrb_fixnum,mrb_fixnum);\ break;\ default:\ mid = MRB_OPSYM(sym);\ goto L_SEND_SYM;\ }\ if (result) {\ SET_TRUE_VALUE(regs[a]);\ }\ else {\ SET_FALSE_VALUE(regs[a]);\ }\ } while(0) #else #define OP_CMP(op, sym) do {\ int result;\ /* need to check if - is overridden */\ switch (TYPES2(mrb_type(regs[a]),mrb_type(regs[a+1]))) {\ case TYPES2(MRB_TT_INTEGER,MRB_TT_INTEGER):\ result = OP_CMP_BODY(op,mrb_fixnum,mrb_fixnum);\ break;\ case TYPES2(MRB_TT_INTEGER,MRB_TT_FLOAT):\ result = OP_CMP_BODY(op,mrb_fixnum,mrb_float);\ break;\ case TYPES2(MRB_TT_FLOAT,MRB_TT_INTEGER):\ result = OP_CMP_BODY(op,mrb_float,mrb_fixnum);\ break;\ case TYPES2(MRB_TT_FLOAT,MRB_TT_FLOAT):\ result = OP_CMP_BODY(op,mrb_float,mrb_float);\ break;\ default:\ mid = MRB_OPSYM(sym);\ goto L_SEND_SYM;\ }\ if (result) {\ SET_TRUE_VALUE(regs[a]);\ }\ else {\ SET_FALSE_VALUE(regs[a]);\ }\ } while(0) #endif CASE(OP_EQ, B) { if (mrb_obj_eq(mrb, regs[a], regs[a+1])) { SET_TRUE_VALUE(regs[a]); } else { OP_CMP(==,eq); } NEXT; } CASE(OP_LT, B) { OP_CMP(<,lt); NEXT; } CASE(OP_LE, B) { OP_CMP(<=,le); NEXT; } CASE(OP_GT, B) { OP_CMP(>,gt); NEXT; } CASE(OP_GE, B) { OP_CMP(>=,ge); NEXT; } CASE(OP_ARRAY, BB) { regs[a] = mrb_ary_new_from_values(mrb, b, &regs[a]); mrb_gc_arena_restore(mrb, ai); NEXT; } CASE(OP_ARRAY2, BBB) { regs[a] = mrb_ary_new_from_values(mrb, c, &regs[b]); mrb_gc_arena_restore(mrb, ai); NEXT; } CASE(OP_ARYCAT, B) { mrb_value splat = mrb_ary_splat(mrb, regs[a+1]); if (mrb_nil_p(regs[a])) { regs[a] = splat; } else { mrb_assert(mrb_array_p(regs[a])); mrb_ary_concat(mrb, regs[a], splat); } mrb_gc_arena_restore(mrb, ai); NEXT; } CASE(OP_ARYPUSH, BB) { mrb_assert(mrb_array_p(regs[a])); for (mrb_int i=0; i<b; i++) { mrb_ary_push(mrb, regs[a], regs[a+i+1]); } NEXT; } CASE(OP_ARYDUP, B) { mrb_value ary = regs[a]; if (mrb_array_p(ary)) { ary = mrb_ary_new_from_values(mrb, RARRAY_LEN(ary), RARRAY_PTR(ary)); } else { ary = mrb_ary_new_from_values(mrb, 1, &ary); } regs[a] = ary; NEXT; } CASE(OP_AREF, BBB) { mrb_value v = regs[b]; if (!mrb_array_p(v)) { if (c == 0) { regs[a] = v; } else { SET_NIL_VALUE(regs[a]); } } else { v = mrb_ary_ref(mrb, v, c); regs[a] = v; } NEXT; } CASE(OP_ASET, BBB) { mrb_assert(mrb_array_p(regs[a])); mrb_ary_set(mrb, regs[b], c, regs[a]); NEXT; } CASE(OP_APOST, BBB) { mrb_value v = regs[a]; int pre = b; int post = c; struct RArray *ary; int len, idx; if (!mrb_array_p(v)) { v = mrb_ary_new_from_values(mrb, 1, &regs[a]); } ary = mrb_ary_ptr(v); len = (int)ARY_LEN(ary); if (len > pre + post) { v = mrb_ary_new_from_values(mrb, len - pre - post, ARY_PTR(ary)+pre); regs[a++] = v; while (post--) { regs[a++] = ARY_PTR(ary)[len-post-1]; } } else { v = mrb_ary_new_capa(mrb, 0); regs[a++] = v; for (idx=0; idx+pre<len; idx++) { regs[a+idx] = ARY_PTR(ary)[pre+idx]; } while (idx < post) { SET_NIL_VALUE(regs[a+idx]); idx++; } } mrb_gc_arena_restore(mrb, ai); NEXT; } CASE(OP_INTERN, B) { mrb_assert(mrb_string_p(regs[a])); mrb_sym sym = mrb_intern_str(mrb, regs[a]); regs[a] = mrb_symbol_value(sym); NEXT; } CASE(OP_SYMBOL, BB) { size_t len; mrb_sym sym; mrb_assert((pool[b].tt&IREP_TT_NFLAG)==0); len = pool[b].tt >> 2; if (pool[b].tt & IREP_TT_SFLAG) { sym = mrb_intern_static(mrb, pool[b].u.str, len); } else { sym = mrb_intern(mrb, pool[b].u.str, len); } regs[a] = mrb_symbol_value(sym); NEXT; } CASE(OP_STRING, BB) { mrb_int len; mrb_assert((pool[b].tt&IREP_TT_NFLAG)==0); len = pool[b].tt >> 2; if (pool[b].tt & IREP_TT_SFLAG) { regs[a] = mrb_str_new_static(mrb, pool[b].u.str, len); } else { regs[a] = mrb_str_new(mrb, pool[b].u.str, len); } mrb_gc_arena_restore(mrb, ai); NEXT; } CASE(OP_STRCAT, B) { mrb_assert(mrb_string_p(regs[a])); mrb_str_concat(mrb, regs[a], regs[a+1]); NEXT; } CASE(OP_HASH, BB) { mrb_value hash = mrb_hash_new_capa(mrb, b); int i; int lim = a+b*2; for (i=a; i<lim; i+=2) { mrb_hash_set(mrb, hash, regs[i], regs[i+1]); } regs[a] = hash; mrb_gc_arena_restore(mrb, ai); NEXT; } CASE(OP_HASHADD, BB) { mrb_value hash; int i; int lim = a+b*2+1; hash = regs[a]; mrb_ensure_hash_type(mrb, hash); for (i=a+1; i<lim; i+=2) { mrb_hash_set(mrb, hash, regs[i], regs[i+1]); } mrb_gc_arena_restore(mrb, ai); NEXT; } CASE(OP_HASHCAT, B) { mrb_value hash = regs[a]; mrb_assert(mrb_hash_p(hash)); mrb_hash_merge(mrb, hash, regs[a+1]); mrb_gc_arena_restore(mrb, ai); NEXT; } CASE(OP_LAMBDA, BB) c = OP_L_LAMBDA; L_MAKE_LAMBDA: { struct RProc *p; const mrb_irep *nirep = irep->reps[b]; if (c & OP_L_CAPTURE) { p = mrb_closure_new(mrb, nirep); } else { p = mrb_proc_new(mrb, nirep); p->flags |= MRB_PROC_SCOPE; } if (c & OP_L_STRICT) p->flags |= MRB_PROC_STRICT; regs[a] = mrb_obj_value(p); mrb_gc_arena_restore(mrb, ai); NEXT; } CASE(OP_BLOCK, BB) { c = OP_L_BLOCK; goto L_MAKE_LAMBDA; } CASE(OP_METHOD, BB) { c = OP_L_METHOD; goto L_MAKE_LAMBDA; } CASE(OP_RANGE_INC, B) { mrb_value v = mrb_range_new(mrb, regs[a], regs[a+1], FALSE); regs[a] = v; mrb_gc_arena_restore(mrb, ai); NEXT; } CASE(OP_RANGE_EXC, B) { mrb_value v = mrb_range_new(mrb, regs[a], regs[a+1], TRUE); regs[a] = v; mrb_gc_arena_restore(mrb, ai); NEXT; } CASE(OP_OCLASS, B) { regs[a] = mrb_obj_value(mrb->object_class); NEXT; } CASE(OP_CLASS, BB) { struct RClass *c = 0, *baseclass; mrb_value base, super; mrb_sym id = syms[b]; base = regs[a]; super = regs[a+1]; if (mrb_nil_p(base)) { baseclass = MRB_PROC_TARGET_CLASS(mrb->c->ci->proc); if (!baseclass) baseclass = mrb->object_class; base = mrb_obj_value(baseclass); } c = mrb_vm_define_class(mrb, base, super, id); regs[a] = mrb_obj_value(c); mrb_gc_arena_restore(mrb, ai); NEXT; } CASE(OP_MODULE, BB) { struct RClass *cls = 0, *baseclass; mrb_value base; mrb_sym id = syms[b]; base = regs[a]; if (mrb_nil_p(base)) { baseclass = MRB_PROC_TARGET_CLASS(mrb->c->ci->proc); if (!baseclass) baseclass = mrb->object_class; base = mrb_obj_value(baseclass); } cls = mrb_vm_define_module(mrb, base, id); regs[a] = mrb_obj_value(cls); mrb_gc_arena_restore(mrb, ai); NEXT; } CASE(OP_EXEC, BB) { mrb_value recv = regs[a]; struct RProc *p; const mrb_irep *nirep = irep->reps[b]; /* prepare closure */ p = mrb_proc_new(mrb, nirep); p->c = NULL; mrb_field_write_barrier(mrb, (struct RBasic*)p, (struct RBasic*)proc); MRB_PROC_SET_TARGET_CLASS(p, mrb_class_ptr(recv)); p->flags |= MRB_PROC_SCOPE; /* prepare call stack */ cipush(mrb, a, 0, mrb_class_ptr(recv), p, 0, 0); irep = p->body.irep; pool = irep->pool; syms = irep->syms; mrb_stack_extend(mrb, irep->nregs); stack_clear(regs+1, irep->nregs-1); pc = irep->iseq; JUMP; } CASE(OP_DEF, BB) { struct RClass *target = mrb_class_ptr(regs[a]); struct RProc *p = mrb_proc_ptr(regs[a+1]); mrb_method_t m; mrb_sym mid = syms[b]; MRB_METHOD_FROM_PROC(m, p); mrb_define_method_raw(mrb, target, mid, m); mrb_method_added(mrb, target, mid); mrb_gc_arena_restore(mrb, ai); regs[a] = mrb_symbol_value(mid); NEXT; } CASE(OP_SCLASS, B) { regs[a] = mrb_singleton_class(mrb, regs[a]); mrb_gc_arena_restore(mrb, ai); NEXT; } CASE(OP_TCLASS, B) { struct RClass *target = check_target_class(mrb); if (!target) goto L_RAISE; regs[a] = mrb_obj_value(target); NEXT; } CASE(OP_ALIAS, BB) { struct RClass *target = check_target_class(mrb); if (!target) goto L_RAISE; mrb_alias_method(mrb, target, syms[a], syms[b]); mrb_method_added(mrb, target, syms[a]); NEXT; } CASE(OP_UNDEF, B) { struct RClass *target = check_target_class(mrb); if (!target) goto L_RAISE; mrb_undef_method_id(mrb, target, syms[a]); NEXT; } CASE(OP_DEBUG, Z) { FETCH_BBB(); #ifdef MRB_USE_DEBUG_HOOK mrb->debug_op_hook(mrb, irep, pc, regs); #else #ifndef MRB_NO_STDIO printf("OP_DEBUG %d %d %d\n", a, b, c); #else abort(); #endif #endif NEXT; } CASE(OP_ERR, B) { size_t len = pool[a].tt >> 2; mrb_value exc; mrb_assert((pool[a].tt&IREP_TT_NFLAG)==0); exc = mrb_exc_new(mrb, E_LOCALJUMP_ERROR, pool[a].u.str, len); mrb_exc_set(mrb, exc); goto L_RAISE; } CASE(OP_EXT1, Z) { insn = READ_B(); switch (insn) { #define OPCODE(insn,ops) case OP_ ## insn: FETCH_ ## ops ## _1(); mrb->c->ci->pc = pc; goto L_OP_ ## insn ## _BODY; #include "mruby/ops.h" #undef OPCODE } pc--; NEXT; } CASE(OP_EXT2, Z) { insn = READ_B(); switch (insn) { #define OPCODE(insn,ops) case OP_ ## insn: FETCH_ ## ops ## _2(); mrb->c->ci->pc = pc; goto L_OP_ ## insn ## _BODY; #include "mruby/ops.h" #undef OPCODE } pc--; NEXT; } CASE(OP_EXT3, Z) { uint8_t insn = READ_B(); switch (insn) { #define OPCODE(insn,ops) case OP_ ## insn: FETCH_ ## ops ## _3(); mrb->c->ci->pc = pc; goto L_OP_ ## insn ## _BODY; #include "mruby/ops.h" #undef OPCODE } pc--; NEXT; } CASE(OP_STOP, Z) { /* stop VM */ CHECKPOINT_RESTORE(RBREAK_TAG_STOP) { /* do nothing */ } CHECKPOINT_MAIN(RBREAK_TAG_STOP) { UNWIND_ENSURE(mrb, mrb->c->ci, pc, RBREAK_TAG_STOP, proc, mrb_nil_value()); } CHECKPOINT_END(RBREAK_TAG_STOP); L_STOP: mrb->jmp = prev_jmp; if (mrb->exc) { mrb_assert(mrb->exc->tt == MRB_TT_EXCEPTION); return mrb_obj_value(mrb->exc); } return regs[irep->nlocals]; } } END_DISPATCH; #undef regs } MRB_CATCH(&c_jmp) { mrb_callinfo *ci = mrb->c->ci; while (ci > mrb->c->cibase && ci->cci == CINFO_DIRECT) { ci = cipop(mrb); } exc_catched = TRUE; pc = ci->pc; goto RETRY_TRY_BLOCK; } MRB_END_EXC(&c_jmp); } ; Sample JSON Format Confermily Expected output: "examples" => "cwe": "xxx"; "message": "xxx" [/INST] 'cwe': ['CWE-416'], 'message': 'vm.c (hash_new_from_regs): stack may be reallocated.'</s>
<s>[INST] Given the following code containing errors, provide Common Weakness Enumeration (CWE) identifiers and corresponding messages in JSON format. Each identified weakness should have a unique entry in the JSON array. Code: bool initiate_stratum(struct pool *pool) { bool ret = false, recvd = false, noresume = false, sockd = false; char s[RBUFSIZE], *sret = NULL, *nonce1, *sessionid; json_t *val = NULL, *res_val, *err_val; json_error_t err; int n2size; resend: if (!setup_stratum_socket(pool)) { /* FIXME: change to LOG_DEBUG when issue #88 resolved */ applog(LOG_INFO, "setup_stratum_socket() on %s failed", get_pool_name(pool)); sockd = false; goto out; } sockd = true; if (recvd) { /* Get rid of any crap lying around if we're resending */ clear_sock(pool); sprintf(s, "{\"id\": %d, \"method\": \"mining.subscribe\", \"params\": []}", swork_id++); } else { if (pool->sessionid) sprintf(s, "{\"id\": %d, \"method\": \"mining.subscribe\", \"params\": [\""PACKAGE"/"VERSION"\", \"%s\"]}", swork_id++, pool->sessionid); else sprintf(s, "{\"id\": %d, \"method\": \"mining.subscribe\", \"params\": [\""PACKAGE"/"VERSION"\"]}", swork_id++); } if (__stratum_send(pool, s, strlen(s)) != SEND_OK) { applog(LOG_DEBUG, "Failed to send s in initiate_stratum"); goto out; } if (!socket_full(pool, DEFAULT_SOCKWAIT)) { applog(LOG_DEBUG, "Timed out waiting for response in initiate_stratum"); goto out; } sret = recv_line(pool); if (!sret) goto out; recvd = true; val = JSON_LOADS(sret, &err); free(sret); if (!val) { applog(LOG_INFO, "JSON decode failed(%d): %s", err.line, err.text); goto out; } res_val = json_object_get(val, "result"); err_val = json_object_get(val, "error"); if (!res_val || json_is_null(res_val) || (err_val && !json_is_null(err_val))) { char *ss; if (err_val) ss = json_dumps(err_val, JSON_INDENT(3)); else ss = strdup("(unknown reason)"); applog(LOG_INFO, "JSON-RPC decode failed: %s", ss); free(ss); goto out; } sessionid = get_sessionid(res_val); if (!sessionid) applog(LOG_DEBUG, "Failed to get sessionid in initiate_stratum"); nonce1 = json_array_string(res_val, 1); if (!nonce1) { applog(LOG_INFO, "Failed to get nonce1 in initiate_stratum"); free(sessionid); goto out; } n2size = json_integer_value(json_array_get(res_val, 2)); if (!n2size) { applog(LOG_INFO, "Failed to get n2size in initiate_stratum"); free(sessionid); free(nonce1); goto out; } cg_wlock(&pool->data_lock); pool->sessionid = sessionid; pool->nonce1 = nonce1; pool->n1_len = strlen(nonce1) / 2; free(pool->nonce1bin); pool->nonce1bin = (unsigned char *)calloc(pool->n1_len, 1); if (unlikely(!pool->nonce1bin)) quithere(1, "Failed to calloc pool->nonce1bin"); hex2bin(pool->nonce1bin, pool->nonce1, pool->n1_len); pool->n2size = n2size; cg_wunlock(&pool->data_lock); if (sessionid) applog(LOG_DEBUG, "%s stratum session id: %s", get_pool_name(pool), pool->sessionid); ret = true; out: if (ret) { if (!pool->stratum_url) pool->stratum_url = pool->sockaddr_url; pool->stratum_active = true; pool->swork.diff = 1; if (opt_protocol) { applog(LOG_DEBUG, "%s confirmed mining.subscribe with extranonce1 %s extran2size %d", get_pool_name(pool), pool->nonce1, pool->n2size); } } else { if (recvd && !noresume) { /* Reset the sessionid used for stratum resuming in case the pool * does not support it, or does not know how to respond to the * presence of the sessionid parameter. */ cg_wlock(&pool->data_lock); free(pool->sessionid); free(pool->nonce1); pool->sessionid = pool->nonce1 = NULL; cg_wunlock(&pool->data_lock); applog(LOG_DEBUG, "Failed to resume stratum, trying afresh"); noresume = true; json_decref(val); goto resend; } applog(LOG_DEBUG, "Initiating stratum failed on %s", get_pool_name(pool)); if (sockd) { applog(LOG_DEBUG, "Suspending stratum on %s", get_pool_name(pool)); suspend_stratum(pool); } } json_decref(val); return ret; } ; Sample JSON Format Confermily Expected output: "examples" => "cwe": "xxx"; "message": "xxx" [/INST] 'cwe': ['CWE-119', 'CWE-787'], 'message': 'Bugfix: initiate_stratum: Ensure extranonce2 size is not negative (which could lead to exploits later as too little memory gets allocated) Thanks to Mick Ayzenberg <mick@dejavusecurity.com> for finding this!'</s>
<s>[INST] Given the following code containing errors, provide Common Weakness Enumeration (CWE) identifiers and corresponding messages in JSON format. Each identified weakness should have a unique entry in the JSON array. Code: juniper_atm2_print(netdissect_options *ndo, const struct pcap_pkthdr *h, register const u_char *p) { int llc_hdrlen; struct juniper_l2info_t l2info; l2info.pictype = DLT_JUNIPER_ATM2; if (juniper_parse_header(ndo, p, h, &l2info) == 0) return l2info.header_len; p+=l2info.header_len; if (l2info.cookie[7] & ATM2_PKT_TYPE_MASK) { /* OAM cell ? */ oam_print(ndo, p, l2info.length, ATM_OAM_NOHEC); return l2info.header_len; } if (EXTRACT_24BITS(p) == 0xfefe03 || /* NLPID encaps ? */ EXTRACT_24BITS(p) == 0xaaaa03) { /* SNAP encaps ? */ llc_hdrlen = llc_print(ndo, p, l2info.length, l2info.caplen, NULL, NULL); if (llc_hdrlen > 0) return l2info.header_len; } if (l2info.direction != JUNIPER_BPF_PKT_IN && /* ether-over-1483 encaps ? */ (EXTRACT_32BITS(l2info.cookie) & ATM2_GAP_COUNT_MASK)) { ether_print(ndo, p, l2info.length, l2info.caplen, NULL, NULL); return l2info.header_len; } if (p[0] == 0x03) { /* Cisco style NLPID encaps ? */ isoclns_print(ndo, p + 1, l2info.length - 1); /* FIXME check if frame was recognized */ return l2info.header_len; } if(juniper_ppp_heuristic_guess(ndo, p, l2info.length) != 0) /* PPPoA vcmux encaps ? */ return l2info.header_len; if (ip_heuristic_guess(ndo, p, l2info.length) != 0) /* last try - vcmux encaps ? */ return l2info.header_len; return l2info.header_len; } ; Sample JSON Format Confermily Expected output: "examples" => "cwe": "xxx"; "message": "xxx" [/INST] 'cwe': ['CWE-125', 'CWE-787'], 'message': 'CVE-2017-12993/Juniper: Add more bounds checks. This fixes a buffer over-read discovered by Kamil Frankowicz. Add tests using the capture files supplied by the reporter(s).'</s>
<s>[INST] Given the following code containing errors, provide Common Weakness Enumeration (CWE) identifiers and corresponding messages in JSON format. Each identified weakness should have a unique entry in the JSON array. Code: GF_Err BD_DecMFFieldVec(GF_BifsDecoder * codec, GF_BitStream *bs, GF_Node *node, GF_FieldInfo *field, Bool is_mem_com) { GF_Err e; u32 NbBits, nbFields; u32 i; GF_ChildNodeItem *last; u8 qp_local, qp_on, initial_qp; GF_FieldInfo sffield; memset(&sffield, 0, sizeof(GF_FieldInfo)); sffield.fieldIndex = field->fieldIndex; sffield.fieldType = gf_sg_vrml_get_sf_type(field->fieldType); sffield.NDTtype = field->NDTtype; sffield.name = field->name; initial_qp = qp_local = qp_on = 0; //vector description - alloc the MF size before NbBits = gf_bs_read_int(bs, 5); nbFields = gf_bs_read_int(bs, NbBits); if (codec->ActiveQP) { initial_qp = 1; /*this is for QP 14*/ gf_bifs_dec_qp14_set_length(codec, nbFields); } if (field->fieldType != GF_SG_VRML_MFNODE) { e = gf_sg_vrml_mf_alloc(field->far_ptr, field->fieldType, nbFields); if (e) return e; for (i=0; i<nbFields; i++) { e = gf_sg_vrml_mf_get_item(field->far_ptr, field->fieldType, & sffield.far_ptr, i); if (e) return e; e = gf_bifs_dec_sf_field(codec, bs, node, &sffield, GF_FALSE); if (e) return e; } } else { last = NULL; for (i=0; i<nbFields; i++) { GF_Node *new_node = gf_bifs_dec_node(codec, bs, field->NDTtype); if (new_node) { e = gf_node_register(new_node, is_mem_com ? NULL : node); if (e) return e; if (node) { /*special case for QP, register as the current QP*/ if (gf_node_get_tag(new_node) == TAG_MPEG4_QuantizationParameter) { qp_local = ((M_QuantizationParameter *)new_node)->isLocal; /*we have a QP in the same scope, remove previous NB: we assume this is the right behavior, the spec doesn't say whether QP is cumulative or not*/ if (qp_on) gf_bifs_dec_qp_remove(codec, GF_FALSE); e = gf_bifs_dec_qp_set(codec, new_node); if (e) return e; qp_on = 1; if (qp_local) qp_local = 2; if (codec->force_keep_qp) { e = gf_node_list_add_child_last(field->far_ptr, new_node, &last); if (e) return e; } else { gf_node_register(new_node, NULL); gf_node_unregister(new_node, node); } } else { e = gf_node_list_add_child_last(field->far_ptr, new_node, &last); if (e) return e; } } /*proto coding*/ else if (codec->pCurrentProto) { /*TO DO: what happens if this is a QP node on the interface ?*/ e = gf_node_list_add_child_last( (GF_ChildNodeItem **)field->far_ptr, new_node, &last); if (e) return e; } } else { return codec->LastError ? codec->LastError : GF_NON_COMPLIANT_BITSTREAM; } } /*according to the spec, the QP applies to the current node itself, not just children. If IsLocal is TRUE remove the node*/ if (qp_on && qp_local) { if (qp_local == 2) { // qp_local = 1; } else { //ask to get rid of QP and reactivate if we had a QP when entering the node gf_bifs_dec_qp_remove(codec, initial_qp); // qp_local = 0; } } } /*finally delete the QP if any (local or not) as we get out of this node*/ if (qp_on) gf_bifs_dec_qp_remove(codec, GF_TRUE); return GF_OK; } ; Sample JSON Format Confermily Expected output: "examples" => "cwe": "xxx"; "message": "xxx" [/INST] 'cwe': ['CWE-416'], 'message': 'fixed #2212'</s>
<s>[INST] Given the following code containing errors, provide Common Weakness Enumeration (CWE) identifiers and corresponding messages in JSON format. Each identified weakness should have a unique entry in the JSON array. Code: void HTTPSession::onCertificateRequest(uint16_t requestId, std::unique_ptr<IOBuf> authRequest) { DestructorGuard dg(this); VLOG(4) << "CERTIFICATE_REQUEST on" << *this << ", requestId=" << requestId; std::pair<uint16_t, std::unique_ptr<folly::IOBuf>> authenticator; auto fizzBase = getTransport()->getUnderlyingTransport<AsyncFizzBase>(); if (fizzBase) { if (isUpstream()) { authenticator = secondAuthManager_->getAuthenticator(*fizzBase, TransportDirection::UPSTREAM, requestId, std::move(authRequest)); } else { authenticator = secondAuthManager_->getAuthenticator(*fizzBase, TransportDirection::DOWNSTREAM, requestId, std::move(authRequest)); } } else { VLOG(4) << "Underlying transport does not support secondary " "authentication."; return; } if (codec_->generateCertificate(writeBuf_, authenticator.first, std::move(authenticator.second)) > 0) { scheduleWrite(); } } ; Sample JSON Format Confermily Expected output: "examples" => "cwe": "xxx"; "message": "xxx" [/INST] 'cwe': ['CWE-20'], 'message': 'Check that a secondary auth manager is set before dereferencing. Summary: CVE-2018-6343 Reviewed By: mingtaoy Differential Revision: D12994423 fbshipit-source-id: 9229ec11da8085f1fa153595e8e5353e19d06fb7'</s>
<s>[INST] Given the following code containing errors, provide Common Weakness Enumeration (CWE) identifiers and corresponding messages in JSON format. Each identified weakness should have a unique entry in the JSON array. Code: static block_t *Encode( encoder_t *p_enc, picture_t *p_pic ) { encoder_sys_t *p_sys = p_enc->p_sys; block_t *p_block, *p_output_chain = NULL; SchroFrame *p_frame; bool b_go = true; if( !p_pic ) { if( !p_sys->started || p_sys->b_eos_pulled ) return NULL; if( !p_sys->b_eos_signalled ) { p_sys->b_eos_signalled = 1; schro_encoder_end_of_stream( p_sys->p_schro ); } } else { /* we only know if the sequence is interlaced when the first * picture arrives, so final setup is done here */ /* XXX todo, detect change of interlace */ p_sys->p_format->interlaced = !p_pic->b_progressive; p_sys->p_format->top_field_first = p_pic->b_top_field_first; if( p_sys->b_auto_field_coding ) schro_encoder_setting_set_double( p_sys->p_schro, "interlaced_coding", !p_pic->b_progressive ); } if( !p_sys->started ) { date_t date; if( p_pic->format.i_chroma != p_enc->fmt_in.i_codec ) { char chroma_in[5], chroma_out[5]; vlc_fourcc_to_char( p_pic->format.i_chroma, chroma_in ); chroma_in[4] = '\0'; chroma_out[4] = '\0'; vlc_fourcc_to_char( p_enc->fmt_in.i_codec, chroma_out ); msg_Warn( p_enc, "Resetting chroma from %s to %s", chroma_out, chroma_in ); if( !SetEncChromaFormat( p_enc, p_pic->format.i_chroma ) ) { msg_Err( p_enc, "Could not reset chroma format to %s", chroma_in ); return NULL; } } date_Init( &date, p_enc->fmt_in.video.i_frame_rate, p_enc->fmt_in.video.i_frame_rate_base ); /* FIXME - Unlike dirac-research codec Schro doesn't have a function that returns the delay in pics yet. * Use a default of 1 */ date_Increment( &date, 1 ); p_sys->i_pts_offset = date_Get( &date ); if( schro_encoder_setting_get_double( p_sys->p_schro, "interlaced_coding" ) > 0.0 ) { date_Set( &date, 0 ); date_Increment( &date, 1); p_sys->i_field_time = date_Get( &date ) / 2; } schro_video_format_set_std_signal_range( p_sys->p_format, SCHRO_SIGNAL_RANGE_8BIT_VIDEO ); schro_encoder_set_video_format( p_sys->p_schro, p_sys->p_format ); schro_encoder_start( p_sys->p_schro ); p_sys->started = 1; } if( !p_sys->b_eos_signalled ) { /* create a schro frame from the input pic and load */ /* Increase ref count by 1 so that the picture is not freed until Schro finishes with it */ picture_Hold( p_pic ); p_frame = CreateSchroFrameFromInputPic( p_enc, p_pic ); if( !p_frame ) return NULL; schro_encoder_push_frame( p_sys->p_schro, p_frame ); /* store pts in a lookaside buffer, so that the same pts may * be used for the picture in coded order */ StorePicturePTS( p_enc, p_sys->i_input_picnum, p_pic->date ); p_sys->i_input_picnum++; /* store dts in a queue, so that they appear in order in * coded order */ p_block = block_Alloc( 1 ); if( !p_block ) return NULL; p_block->i_dts = p_pic->date - p_sys->i_pts_offset; block_FifoPut( p_sys->p_dts_fifo, p_block ); p_block = NULL; /* for field coding mode, insert an extra value into both the * pts lookaside buffer and dts queue, offset to correspond * to a one field delay. */ if( schro_encoder_setting_get_double( p_sys->p_schro, "interlaced_coding" ) > 0.0 ) { StorePicturePTS( p_enc, p_sys->i_input_picnum, p_pic->date + p_sys->i_field_time ); p_sys->i_input_picnum++; p_block = block_Alloc( 1 ); if( !p_block ) return NULL; p_block->i_dts = p_pic->date - p_sys->i_pts_offset + p_sys->i_field_time; block_FifoPut( p_sys->p_dts_fifo, p_block ); p_block = NULL; } } do { SchroStateEnum state; state = schro_encoder_wait( p_sys->p_schro ); switch( state ) { case SCHRO_STATE_NEED_FRAME: b_go = false; break; case SCHRO_STATE_AGAIN: break; case SCHRO_STATE_END_OF_STREAM: p_sys->b_eos_pulled = 1; b_go = false; break; case SCHRO_STATE_HAVE_BUFFER: { SchroBuffer *p_enc_buf; uint32_t u_pic_num; int i_presentation_frame; p_enc_buf = schro_encoder_pull( p_sys->p_schro, &i_presentation_frame ); p_block = block_Alloc( p_enc_buf->length ); if( !p_block ) return NULL; memcpy( p_block->p_buffer, p_enc_buf->data, p_enc_buf->length ); schro_buffer_unref( p_enc_buf ); /* Presence of a Sequence header indicates a seek point */ if( 0 == p_block->p_buffer[4] ) { p_block->i_flags |= BLOCK_FLAG_TYPE_I; if( !p_enc->fmt_out.p_extra ) { const uint8_t eos[] = { 'B','B','C','D',0x10,0,0,0,13,0,0,0,0 }; uint32_t len = GetDWBE( p_block->p_buffer + 5 ); /* if it hasn't been done so far, stash a copy of the * sequence header for muxers such as ogg */ /* The OggDirac spec advises that a Dirac EOS DataUnit * is appended to the sequence header to allow guard * against poor streaming servers */ /* XXX, should this be done using the packetizer ? */ p_enc->fmt_out.p_extra = malloc( len + sizeof( eos ) ); if( !p_enc->fmt_out.p_extra ) return NULL; memcpy( p_enc->fmt_out.p_extra, p_block->p_buffer, len ); memcpy( (uint8_t*)p_enc->fmt_out.p_extra + len, eos, sizeof( eos ) ); SetDWBE( (uint8_t*)p_enc->fmt_out.p_extra + len + sizeof(eos) - 4, len ); p_enc->fmt_out.i_extra = len + sizeof( eos ); } } if( ReadDiracPictureNumber( &u_pic_num, p_block ) ) { block_t *p_dts_block = block_FifoGet( p_sys->p_dts_fifo ); p_block->i_dts = p_dts_block->i_dts; p_block->i_pts = GetPicturePTS( p_enc, u_pic_num ); block_Release( p_dts_block ); block_ChainAppend( &p_output_chain, p_block ); } else { /* End of sequence */ block_ChainAppend( &p_output_chain, p_block ); } break; } default: break; } } while( b_go ); return p_output_chain; } ; Sample JSON Format Confermily Expected output: "examples" => "cwe": "xxx"; "message": "xxx" [/INST] 'cwe': ['CWE-120', 'CWE-787'], 'message': 'codec: schroedinger: fix potential buffer overflow. The variable len is a raw 32 bit value read using GetDWBE. If this value is larger than UINT32_MAX - sizeof(eos), this will cause an integer overflow in the subsequent call to malloc, and finally a buffer overflow when calling memcpy. We fix this by checking len accordingly. Signed-off-by: Jean-Baptiste Kempf <jb@videolan.org>'</s>
<s>[INST] Given the following code containing errors, provide Common Weakness Enumeration (CWE) identifiers and corresponding messages in JSON format. Each identified weakness should have a unique entry in the JSON array. Code: int ath9k_wmi_cmd(struct wmi *wmi, enum wmi_cmd_id cmd_id, u8 *cmd_buf, u32 cmd_len, u8 *rsp_buf, u32 rsp_len, u32 timeout) { struct ath_hw *ah = wmi->drv_priv->ah; struct ath_common *common = ath9k_hw_common(ah); u16 headroom = sizeof(struct htc_frame_hdr) + sizeof(struct wmi_cmd_hdr); struct sk_buff *skb; unsigned long time_left; int ret = 0; if (ah->ah_flags & AH_UNPLUGGED) return 0; skb = alloc_skb(headroom + cmd_len, GFP_ATOMIC); if (!skb) return -ENOMEM; skb_reserve(skb, headroom); if (cmd_len != 0 && cmd_buf != NULL) { skb_put_data(skb, cmd_buf, cmd_len); } mutex_lock(&wmi->op_mutex); /* check if wmi stopped flag is set */ if (unlikely(wmi->stopped)) { ret = -EPROTO; goto out; } /* record the rsp buffer and length */ wmi->cmd_rsp_buf = rsp_buf; wmi->cmd_rsp_len = rsp_len; ret = ath9k_wmi_cmd_issue(wmi, skb, cmd_id, cmd_len); if (ret) goto out; time_left = wait_for_completion_timeout(&wmi->cmd_wait, timeout); if (!time_left) { ath_dbg(common, WMI, "Timeout waiting for WMI command: %s\n", wmi_cmd_to_name(cmd_id)); mutex_unlock(&wmi->op_mutex); kfree_skb(skb); return -ETIMEDOUT; } mutex_unlock(&wmi->op_mutex); return 0; out: ath_dbg(common, WMI, "WMI failure for: %s\n", wmi_cmd_to_name(cmd_id)); mutex_unlock(&wmi->op_mutex); kfree_skb(skb); return ret; } ; Sample JSON Format Confermily Expected output: "examples" => "cwe": "xxx"; "message": "xxx" [/INST] 'cwe': ['CWE-416'], 'message': 'ath9k: Fix use-after-free Read in htc_connect_service The skb is consumed by htc_send_epid, so it needn't release again. The case reported by syzbot: https://lore.kernel.org/linux-usb/000000000000590f6b05a1c05d15@google.com usb 1-1: ath9k_htc: Firmware ath9k_htc/htc_9271-1.4.0.fw requested usb 1-1: ath9k_htc: Transferred FW: ath9k_htc/htc_9271-1.4.0.fw, size: 51008 usb 1-1: Service connection timeout for: 256 ================================================================== BUG: KASAN: use-after-free in atomic_read include/asm-generic/atomic-instrumented.h:26 [inline] BUG: KASAN: use-after-free in refcount_read include/linux/refcount.h:134 [inline] BUG: KASAN: use-after-free in skb_unref include/linux/skbuff.h:1042 [inline] BUG: KASAN: use-after-free in kfree_skb+0x32/0x3d0 net/core/skbuff.c:692 Read of size 4 at addr ffff8881d0957994 by task kworker/1:2/83 Call Trace: kfree_skb+0x32/0x3d0 net/core/skbuff.c:692 htc_connect_service.cold+0xa9/0x109 drivers/net/wireless/ath/ath9k/htc_hst.c:282 ath9k_wmi_connect+0xd2/0x1a0 drivers/net/wireless/ath/ath9k/wmi.c:265 ath9k_init_htc_services.constprop.0+0xb4/0x650 drivers/net/wireless/ath/ath9k/htc_drv_init.c:146 ath9k_htc_probe_device+0x25a/0x1d80 drivers/net/wireless/ath/ath9k/htc_drv_init.c:959 ath9k_htc_hw_init+0x31/0x60 drivers/net/wireless/ath/ath9k/htc_hst.c:501 ath9k_hif_usb_firmware_cb+0x26b/0x500 drivers/net/wireless/ath/ath9k/hif_usb.c:1187 request_firmware_work_func+0x126/0x242 drivers/base/firmware_loader/main.c:976 process_one_work+0x94b/0x1620 kernel/workqueue.c:2264 worker_thread+0x96/0xe20 kernel/workqueue.c:2410 kthread+0x318/0x420 kernel/kthread.c:255 ret_from_fork+0x24/0x30 arch/x86/entry/entry_64.S:352 Allocated by task 83: kmem_cache_alloc_node+0xdc/0x330 mm/slub.c:2814 __alloc_skb+0xba/0x5a0 net/core/skbuff.c:198 alloc_skb include/linux/skbuff.h:1081 [inline] htc_connect_service+0x2cc/0x840 drivers/net/wireless/ath/ath9k/htc_hst.c:257 ath9k_wmi_connect+0xd2/0x1a0 drivers/net/wireless/ath/ath9k/wmi.c:265 ath9k_init_htc_services.constprop.0+0xb4/0x650 drivers/net/wireless/ath/ath9k/htc_drv_init.c:146 ath9k_htc_probe_device+0x25a/0x1d80 drivers/net/wireless/ath/ath9k/htc_drv_init.c:959 ath9k_htc_hw_init+0x31/0x60 drivers/net/wireless/ath/ath9k/htc_hst.c:501 ath9k_hif_usb_firmware_cb+0x26b/0x500 drivers/net/wireless/ath/ath9k/hif_usb.c:1187 request_firmware_work_func+0x126/0x242 drivers/base/firmware_loader/main.c:976 process_one_work+0x94b/0x1620 kernel/workqueue.c:2264 worker_thread+0x96/0xe20 kernel/workqueue.c:2410 kthread+0x318/0x420 kernel/kthread.c:255 ret_from_fork+0x24/0x30 arch/x86/entry/entry_64.S:352 Freed by task 0: kfree_skb+0x102/0x3d0 net/core/skbuff.c:690 ath9k_htc_txcompletion_cb+0x1f8/0x2b0 drivers/net/wireless/ath/ath9k/htc_hst.c:356 hif_usb_regout_cb+0x10b/0x1b0 drivers/net/wireless/ath/ath9k/hif_usb.c:90 __usb_hcd_giveback_urb+0x29a/0x550 drivers/usb/core/hcd.c:1650 usb_hcd_giveback_urb+0x368/0x420 drivers/usb/core/hcd.c:1716 dummy_timer+0x1258/0x32ae drivers/usb/gadget/udc/dummy_hcd.c:1966 call_timer_fn+0x195/0x6f0 kernel/time/timer.c:1404 expire_timers kernel/time/timer.c:1449 [inline] __run_timers kernel/time/timer.c:1773 [inline] __run_timers kernel/time/timer.c:1740 [inline] run_timer_softirq+0x5f9/0x1500 kernel/time/timer.c:1786 __do_softirq+0x21e/0x950 kernel/softirq.c:292 Reported-and-tested-by: syzbot+9505af1ae303dabdc646@syzkaller.appspotmail.com Signed-off-by: Qiujun Huang <hqjagain@gmail.com> Signed-off-by: Kalle Valo <kvalo@codeaurora.org> Link: https://lore.kernel.org/r/20200404041838.10426-2-hqjagain@gmail.com'</s>
<s>[INST] Given the following code containing errors, provide Common Weakness Enumeration (CWE) identifiers and corresponding messages in JSON format. Each identified weakness should have a unique entry in the JSON array. Code: ReturnCode_t DataReaderImpl::enable() { assert(reader_ == nullptr); fastrtps::rtps::ReaderAttributes att; att.endpoint.durabilityKind = qos_.durability().durabilityKind(); att.endpoint.endpointKind = READER; att.endpoint.multicastLocatorList = qos_.endpoint().multicast_locator_list; att.endpoint.reliabilityKind = qos_.reliability().kind == RELIABLE_RELIABILITY_QOS ? RELIABLE : BEST_EFFORT; att.endpoint.topicKind = type_->m_isGetKeyDefined ? WITH_KEY : NO_KEY; att.endpoint.unicastLocatorList = qos_.endpoint().unicast_locator_list; att.endpoint.remoteLocatorList = qos_.endpoint().remote_locator_list; att.endpoint.properties = qos_.properties(); if (qos_.endpoint().entity_id > 0) { att.endpoint.setEntityID(static_cast<uint8_t>(qos_.endpoint().entity_id)); } if (qos_.endpoint().user_defined_id > 0) { att.endpoint.setUserDefinedID(static_cast<uint8_t>(qos_.endpoint().user_defined_id)); } att.times = qos_.reliable_reader_qos().times; att.liveliness_lease_duration = qos_.liveliness().lease_duration; att.liveliness_kind_ = qos_.liveliness().kind; att.matched_writers_allocation = qos_.reader_resource_limits().matched_publisher_allocation; att.expectsInlineQos = qos_.expects_inline_qos(); att.disable_positive_acks = qos_.reliable_reader_qos().disable_positive_ACKs.enabled; // TODO(Ricardo) Remove in future // Insert topic_name and partitions Property property; property.name("topic_name"); property.value(topic_->get_name().c_str()); att.endpoint.properties.properties().push_back(std::move(property)); if (subscriber_->get_qos().partition().names().size() > 0) { property.name("partitions"); std::string partitions; for (auto partition : subscriber_->get_qos().partition().names()) { partitions += partition + ";"; } property.value(std::move(partitions)); att.endpoint.properties.properties().push_back(std::move(property)); } RTPSReader* reader = RTPSDomain::createRTPSReader( subscriber_->rtps_participant(), att, static_cast<ReaderHistory*>(&history_), static_cast<ReaderListener*>(&reader_listener_)); if (reader == nullptr) { logError(DATA_READER, "Problem creating associated Reader"); return ReturnCode_t::RETCODE_ERROR; } reader_ = reader; deadline_timer_ = new TimedEvent(subscriber_->get_participant()->get_resource_event(), [&]() -> bool { return deadline_missed(); }, qos_.deadline().period.to_ns() * 1e-6); lifespan_timer_ = new TimedEvent(subscriber_->get_participant()->get_resource_event(), [&]() -> bool { return lifespan_expired(); }, qos_.lifespan().duration.to_ns() * 1e-6); // Register the reader ReaderQos rqos = qos_.get_readerqos(subscriber_->get_qos()); subscriber_->rtps_participant()->registerReader(reader_, topic_attributes(), rqos); return ReturnCode_t::RETCODE_OK; } ; Sample JSON Format Confermily Expected output: "examples" => "cwe": "xxx"; "message": "xxx" [/INST] 'cwe': ['CWE-284'], 'message': 'check remote permissions (#1387) * Refs 5346. Blackbox test Signed-off-by: Iker Luengo <ikerluengo@eprosima.com> * Refs 5346. one-way string compare Signed-off-by: Iker Luengo <ikerluengo@eprosima.com> * Refs 5346. Do not add partition separator on last partition Signed-off-by: Iker Luengo <ikerluengo@eprosima.com> * Refs 5346. Uncrustify Signed-off-by: Iker Luengo <ikerluengo@eprosima.com> * Refs 5346. Uncrustify Signed-off-by: Iker Luengo <ikerluengo@eprosima.com> * Refs 3680. Access control unit testing It only covers Partition and Topic permissions Signed-off-by: Iker Luengo <ikerluengo@eprosima.com> * Refs #3680. Fix partition check on Permissions plugin. Signed-off-by: Iker Luengo <ikerluengo@eprosima.com> * Refs 3680. Uncrustify Signed-off-by: Iker Luengo <ikerluengo@eprosima.com> * Refs 3680. Fix tests on mac Signed-off-by: Iker Luengo <ikerluengo@eprosima.com> * Refs 3680. Fix windows tests Signed-off-by: Iker Luengo <ikerluengo@eprosima.com> * Refs 3680. Avoid memory leak on test Signed-off-by: Iker Luengo <ikerluengo@eprosima.com> * Refs 3680. Proxy data mocks should not return temporary objects Signed-off-by: Iker Luengo <ikerluengo@eprosima.com> * refs 3680. uncrustify Signed-off-by: Iker Luengo <ikerluengo@eprosima.com> Co-authored-by: Miguel Company <MiguelCompany@eprosima.com>'</s>
<s>[INST] Given the following code containing errors, provide Common Weakness Enumeration (CWE) identifiers and corresponding messages in JSON format. Each identified weakness should have a unique entry in the JSON array. Code: bool StringMatching::matchString(const char* str1, const char* str2) { if(PathMatchSpec(str1,str2)) return true; if(PathMatchSpec(str2,str1)) return true; return false; } ; Sample JSON Format Confermily Expected output: "examples" => "cwe": "xxx"; "message": "xxx" [/INST] 'cwe': ['CWE-284'], 'message': 'check remote permissions (#1387) * Refs 5346. Blackbox test Signed-off-by: Iker Luengo <ikerluengo@eprosima.com> * Refs 5346. one-way string compare Signed-off-by: Iker Luengo <ikerluengo@eprosima.com> * Refs 5346. Do not add partition separator on last partition Signed-off-by: Iker Luengo <ikerluengo@eprosima.com> * Refs 5346. Uncrustify Signed-off-by: Iker Luengo <ikerluengo@eprosima.com> * Refs 5346. Uncrustify Signed-off-by: Iker Luengo <ikerluengo@eprosima.com> * Refs 3680. Access control unit testing It only covers Partition and Topic permissions Signed-off-by: Iker Luengo <ikerluengo@eprosima.com> * Refs #3680. Fix partition check on Permissions plugin. Signed-off-by: Iker Luengo <ikerluengo@eprosima.com> * Refs 3680. Uncrustify Signed-off-by: Iker Luengo <ikerluengo@eprosima.com> * Refs 3680. Fix tests on mac Signed-off-by: Iker Luengo <ikerluengo@eprosima.com> * Refs 3680. Fix windows tests Signed-off-by: Iker Luengo <ikerluengo@eprosima.com> * Refs 3680. Avoid memory leak on test Signed-off-by: Iker Luengo <ikerluengo@eprosima.com> * Refs 3680. Proxy data mocks should not return temporary objects Signed-off-by: Iker Luengo <ikerluengo@eprosima.com> * refs 3680. uncrustify Signed-off-by: Iker Luengo <ikerluengo@eprosima.com> Co-authored-by: Miguel Company <MiguelCompany@eprosima.com>'</s>
<s>[INST] Given the following code containing errors, provide Common Weakness Enumeration (CWE) identifiers and corresponding messages in JSON format. Each identified weakness should have a unique entry in the JSON array. Code: int yr_re_ast_create( RE_AST** re_ast) { *re_ast = (RE_AST*) yr_malloc(sizeof(RE_AST)); if (*re_ast == NULL) return ERROR_INSUFFICIENT_MEMORY; (*re_ast)->flags = 0; (*re_ast)->root_node = NULL; return ERROR_SUCCESS; } ; Sample JSON Format Confermily Expected output: "examples" => "cwe": "xxx"; "message": "xxx" [/INST] 'cwe': ['CWE-674', 'CWE-787'], 'message': 'Fix issue #674. Move regexp limits to limits.h.'</s>
<s>[INST] Given the following code containing errors, provide Common Weakness Enumeration (CWE) identifiers and corresponding messages in JSON format. Each identified weakness should have a unique entry in the JSON array. Code: dwg_decode_add_object (Dwg_Data *restrict dwg, Bit_Chain *dat, Bit_Chain *hdl_dat, long unsigned int address) { long unsigned int objpos, restartpos; Bit_Chain abs_dat = { NULL }; unsigned char previous_bit; Dwg_Object *restrict obj; BITCODE_BL num = dwg->num_objects; int error = 0; int realloced = 0; /* Keep the previous full chain */ abs_dat = *dat; /* Use the indicated address for the object */ dat->byte = address; dat->bit = 0; // DEBUG_HERE; /* * Reserve memory space for objects. A realloc violates all internal * pointers. */ realloced = dwg_add_object (dwg); if (realloced > 0) { *dat = abs_dat; return realloced; // i.e. DWG_ERR_OUTOFMEM } obj = &dwg->object[num]; LOG_INFO ("==========================================\n" "Object number: %lu/%lX", (unsigned long)num, (unsigned long)num) obj->size = bit_read_MS (dat); LOG_INFO (", Size: %d [MS]", obj->size) SINCE (R_2010) { /* This is not counted in the object size */ obj->handlestream_size = bit_read_UMC (dat); LOG_INFO (", Hdlsize: " FORMAT_UMC " [UMC] ", obj->handlestream_size); obj->bitsize = obj->size * 8 - obj->handlestream_size; // TODO boundscheck } objpos = bit_position (dat); // absolute obj->address = dat->byte; /* Until here dat is absolute. now restrict it */ bit_reset_chain (dat); if (obj->size > dat->size) { LOG_ERROR ("\nInvalid object size. Would overflow"); *dat = abs_dat; return DWG_ERR_VALUEOUTOFBOUNDS; } dat->size = obj->size; SINCE (R_2010) { obj->type = bit_read_BOT (dat); } else { obj->type = bit_read_BS (dat); } LOG_INFO (", Type: %d [%s]\n", obj->type, dat->version >= R_2010 ? "BOT" : "BS"); restartpos = bit_position (dat); // relative /* Check the type of the object */ switch (obj->type) { case DWG_TYPE_TEXT: error = dwg_decode_TEXT (dat, obj); break; case DWG_TYPE_ATTRIB: error = dwg_decode_ATTRIB (dat, obj); break; case DWG_TYPE_ATTDEF: error = dwg_decode_ATTDEF (dat, obj); break; case DWG_TYPE_BLOCK: error = dwg_decode_BLOCK (dat, obj); break; case DWG_TYPE_ENDBLK: error = dwg_decode_ENDBLK (dat, obj); break; case DWG_TYPE_SEQEND: error = dwg_decode_SEQEND (dat, obj); if (dat->version >= R_13 && obj->tio.entity->ownerhandle) { Dwg_Object *restrict owner = dwg_resolve_handle ( dwg, obj->tio.entity->ownerhandle->absolute_ref); if (!owner) { LOG_WARN ("no SEQEND.ownerhandle") } else if (owner->fixedtype == DWG_TYPE_INSERT || owner->fixedtype == DWG_TYPE_MINSERT) { /* SEQEND handle for the owner needed in validate_INSERT */ hash_set (dwg->object_map, obj->handle.value, (uint32_t)num); (void)dwg_validate_INSERT (owner); } else if (owner->fixedtype == DWG_TYPE_POLYLINE_2D || owner->fixedtype == DWG_TYPE_POLYLINE_3D || owner->fixedtype == DWG_TYPE_POLYLINE_PFACE || owner->fixedtype == DWG_TYPE_POLYLINE_MESH) { Dwg_Entity_POLYLINE_2D *restrict _obj = owner->tio.entity->tio.POLYLINE_2D; if (!_obj->seqend) /* SEQEND handle for the owner needed in validate_POLYLINE */ hash_set (dwg->object_map, obj->handle.value, (uint32_t)num); (void)dwg_validate_POLYLINE (owner); } } break; case DWG_TYPE_INSERT: error = dwg_decode_INSERT (dat, obj); break; case DWG_TYPE_MINSERT: error = dwg_decode_MINSERT (dat, obj); break; case DWG_TYPE_VERTEX_2D: error = dwg_decode_VERTEX_2D (dat, obj); break; case DWG_TYPE_VERTEX_3D: error = dwg_decode_VERTEX_3D (dat, obj); break; case DWG_TYPE_VERTEX_MESH: error = dwg_decode_VERTEX_MESH (dat, obj); break; case DWG_TYPE_VERTEX_PFACE: error = dwg_decode_VERTEX_PFACE (dat, obj); break; case DWG_TYPE_VERTEX_PFACE_FACE: error = dwg_decode_VERTEX_PFACE_FACE (dat, obj); break; case DWG_TYPE_POLYLINE_2D: error = dwg_decode_POLYLINE_2D (dat, obj); if (dat->version >= R_2010) check_POLYLINE_handles (obj); break; case DWG_TYPE_POLYLINE_3D: error = dwg_decode_POLYLINE_3D (dat, obj); if (dat->version >= R_2010) check_POLYLINE_handles (obj); break; case DWG_TYPE_ARC: error = dwg_decode_ARC (dat, obj); break; case DWG_TYPE_CIRCLE: error = dwg_decode_CIRCLE (dat, obj); break; case DWG_TYPE_LINE: error = dwg_decode_LINE (dat, obj); break; case DWG_TYPE_DIMENSION_ORDINATE: error = dwg_decode_DIMENSION_ORDINATE (dat, obj); break; case DWG_TYPE_DIMENSION_LINEAR: error = dwg_decode_DIMENSION_LINEAR (dat, obj); break; case DWG_TYPE_DIMENSION_ALIGNED: error = dwg_decode_DIMENSION_ALIGNED (dat, obj); break; case DWG_TYPE_DIMENSION_ANG3PT: error = dwg_decode_DIMENSION_ANG3PT (dat, obj); break; case DWG_TYPE_DIMENSION_ANG2LN: error = dwg_decode_DIMENSION_ANG2LN (dat, obj); break; case DWG_TYPE_DIMENSION_RADIUS: error = dwg_decode_DIMENSION_RADIUS (dat, obj); break; case DWG_TYPE_DIMENSION_DIAMETER: error = dwg_decode_DIMENSION_DIAMETER (dat, obj); break; case DWG_TYPE_POINT: error = dwg_decode_POINT (dat, obj); break; case DWG_TYPE__3DFACE: error = dwg_decode__3DFACE (dat, obj); break; case DWG_TYPE_POLYLINE_PFACE: error = dwg_decode_POLYLINE_PFACE (dat, obj); if (dat->version >= R_2010) check_POLYLINE_handles (obj); break; case DWG_TYPE_POLYLINE_MESH: error = dwg_decode_POLYLINE_MESH (dat, obj); if (dat->version >= R_2010) check_POLYLINE_handles (obj); break; case DWG_TYPE_SOLID: error = dwg_decode_SOLID (dat, obj); break; case DWG_TYPE_TRACE: error = dwg_decode_TRACE (dat, obj); break; case DWG_TYPE_SHAPE: error = dwg_decode_SHAPE (dat, obj); break; case DWG_TYPE_VIEWPORT: error = dwg_decode_VIEWPORT (dat, obj); break; case DWG_TYPE_ELLIPSE: error = dwg_decode_ELLIPSE (dat, obj); break; case DWG_TYPE_SPLINE: error = dwg_decode_SPLINE (dat, obj); break; case DWG_TYPE_REGION: error = dwg_decode_REGION (dat, obj); break; case DWG_TYPE__3DSOLID: error = dwg_decode__3DSOLID (dat, obj); break; case DWG_TYPE_BODY: error = dwg_decode_BODY (dat, obj); break; case DWG_TYPE_RAY: error = dwg_decode_RAY (dat, obj); break; case DWG_TYPE_XLINE: error = dwg_decode_XLINE (dat, obj); break; case DWG_TYPE_DICTIONARY: error = dwg_decode_DICTIONARY (dat, obj); break; case DWG_TYPE_MTEXT: error = dwg_decode_MTEXT (dat, obj); break; case DWG_TYPE_LEADER: error = dwg_decode_LEADER (dat, obj); break; case DWG_TYPE_TOLERANCE: error = dwg_decode_TOLERANCE (dat, obj); break; case DWG_TYPE_MLINE: error = dwg_decode_MLINE (dat, obj); break; case DWG_TYPE_BLOCK_CONTROL: error = dwg_decode_BLOCK_CONTROL (dat, obj); if (!error && obj->tio.object->tio.BLOCK_CONTROL) { obj->tio.object->tio.BLOCK_CONTROL->objid = num; if (!dwg->block_control.parent) // only once dwg->block_control = *obj->tio.object->tio.BLOCK_CONTROL; else LOG_WARN ("Second BLOCK_CONTROL object ignored"); } break; case DWG_TYPE_BLOCK_HEADER: error = dwg_decode_BLOCK_HEADER (dat, obj); /* * We cannot cache dwg->*space_block here as dwg->objects might get * realloc'ed. See dwg_model_space_object() and dwg_paper_space_object() * instead. */ break; case DWG_TYPE_LAYER_CONTROL: error = dwg_decode_LAYER_CONTROL (dat, obj); if (!error && obj->tio.object->tio.LAYER_CONTROL) { obj->tio.object->tio.LAYER_CONTROL->objid = num; dwg->layer_control = *obj->tio.object->tio.LAYER_CONTROL; } break; case DWG_TYPE_LAYER: error = dwg_decode_LAYER (dat, obj); break; case DWG_TYPE_STYLE_CONTROL: error = dwg_decode_STYLE_CONTROL (dat, obj); if (!error && obj->tio.object->tio.STYLE_CONTROL) { obj->tio.object->tio.STYLE_CONTROL->objid = num; dwg->style_control = *obj->tio.object->tio.STYLE_CONTROL; } break; case DWG_TYPE_STYLE: error = dwg_decode_STYLE (dat, obj); break; case DWG_TYPE_LTYPE_CONTROL: error = dwg_decode_LTYPE_CONTROL (dat, obj); if (!error && obj->tio.object->tio.LTYPE_CONTROL) { obj->tio.object->tio.LTYPE_CONTROL->objid = num; dwg->ltype_control = *obj->tio.object->tio.LTYPE_CONTROL; } break; case DWG_TYPE_LTYPE: error = dwg_decode_LTYPE (dat, obj); break; case DWG_TYPE_VIEW_CONTROL: error = dwg_decode_VIEW_CONTROL (dat, obj); if (!error && obj->tio.object->tio.VIEW_CONTROL) { obj->tio.object->tio.VIEW_CONTROL->objid = num; dwg->view_control = *obj->tio.object->tio.VIEW_CONTROL; } break; case DWG_TYPE_VIEW: error = dwg_decode_VIEW (dat, obj); break; case DWG_TYPE_UCS_CONTROL: error = dwg_decode_UCS_CONTROL (dat, obj); if (!error && obj->tio.object->tio.UCS_CONTROL) { obj->tio.object->tio.UCS_CONTROL->objid = num; dwg->ucs_control = *obj->tio.object->tio.UCS_CONTROL; } break; case DWG_TYPE_UCS: error = dwg_decode_UCS (dat, obj); break; case DWG_TYPE_VPORT_CONTROL: error = dwg_decode_VPORT_CONTROL (dat, obj); if (!error && obj->tio.object->tio.VPORT_CONTROL) { obj->tio.object->tio.VPORT_CONTROL->objid = num; dwg->vport_control = *obj->tio.object->tio.VPORT_CONTROL; } break; case DWG_TYPE_VPORT: error = dwg_decode_VPORT (dat, obj); break; case DWG_TYPE_APPID_CONTROL: error = dwg_decode_APPID_CONTROL (dat, obj); if (!error && obj->tio.object->tio.APPID_CONTROL) { obj->tio.object->tio.APPID_CONTROL->objid = num; dwg->appid_control = *obj->tio.object->tio.APPID_CONTROL; } break; case DWG_TYPE_APPID: error = dwg_decode_APPID (dat, obj); break; case DWG_TYPE_DIMSTYLE_CONTROL: error = dwg_decode_DIMSTYLE_CONTROL (dat, obj); if (!error && obj->tio.object->tio.DIMSTYLE_CONTROL) { obj->tio.object->tio.DIMSTYLE_CONTROL->objid = num; dwg->dimstyle_control = *obj->tio.object->tio.DIMSTYLE_CONTROL; } break; case DWG_TYPE_DIMSTYLE: error = dwg_decode_DIMSTYLE (dat, obj); break; case DWG_TYPE_VPORT_ENTITY_CONTROL: error = dwg_decode_VPORT_ENTITY_CONTROL (dat, obj); if (!error && obj->tio.object->tio.VPORT_ENTITY_CONTROL) { obj->tio.object->tio.VPORT_ENTITY_CONTROL->objid = num; dwg->vport_entity_control = *obj->tio.object->tio.VPORT_ENTITY_CONTROL; } break; case DWG_TYPE_VPORT_ENTITY_HEADER: error = dwg_decode_VPORT_ENTITY_HEADER (dat, obj); break; case DWG_TYPE_GROUP: error = dwg_decode_GROUP (dat, obj); break; case DWG_TYPE_MLINESTYLE: error = dwg_decode_MLINESTYLE (dat, obj); break; case DWG_TYPE_OLE2FRAME: error = dwg_decode_OLE2FRAME (dat, obj); break; case DWG_TYPE_DUMMY: error = dwg_decode_DUMMY (dat, obj); break; case DWG_TYPE_LONG_TRANSACTION: error = dwg_decode_LONG_TRANSACTION (dat, obj); break; case DWG_TYPE_LWPOLYLINE: error = dwg_decode_LWPOLYLINE (dat, obj); break; case DWG_TYPE_HATCH: error = dwg_decode_HATCH (dat, obj); break; case DWG_TYPE_XRECORD: error = dwg_decode_XRECORD (dat, obj); break; case DWG_TYPE_PLACEHOLDER: error = dwg_decode_PLACEHOLDER (dat, obj); break; case DWG_TYPE_OLEFRAME: error = dwg_decode_OLEFRAME (dat, obj); break; case DWG_TYPE_VBA_PROJECT: LOG_ERROR ("Unhandled Object VBA_PROJECT. Has its own section"); // dwg_decode_VBA_PROJECT(dat, obj); error = DWG_ERR_UNHANDLEDCLASS; break; case DWG_TYPE_LAYOUT: error = dwg_decode_LAYOUT (dat, obj); break; case DWG_TYPE_PROXY_ENTITY: error = dwg_decode_PROXY_ENTITY (dat, obj); break; case DWG_TYPE_PROXY_OBJECT: error = dwg_decode_PROXY_OBJECT (dat, obj); break; default: if (obj->type == dwg->layout_type) error = dwg_decode_LAYOUT (dat, obj); /* > 500 */ else if ((error = dwg_decode_variable_type (dwg, dat, hdl_dat, obj)) & DWG_ERR_UNHANDLEDCLASS) { int is_entity = 0; int i = obj->type - 500; Dwg_Class *klass = NULL; /* restart and read into the UNKNOWN_OBJ object */ /* the relative offset from type after common_entity_data */ // obj->common_size = bit_position(dat) - restartpos; // LOG_HANDLE("common_size: %lu\n", obj->common_size); // needed for // unknown bit_set_position (dat, restartpos); // obj->unknown_off = obj->unknown_pos - restartpos; // LOG_TRACE("Unknown pos %lu, offset %lu\n", obj->unknown_pos, // obj->unknown_off); if (i >= 0 && i < (int)dwg->num_classes) { klass = &dwg->dwg_class[i]; is_entity = dwg_class_is_entity (klass); } else { if (i < 0) { LOG_ERROR ("Invalid class index %d <0", i); } else { LOG_ERROR ("Invalid class index %d >%d", i, (int)dwg->num_classes); } obj->supertype = DWG_SUPERTYPE_UNKNOWN; obj->type = 0; *dat = abs_dat; return error | DWG_ERR_VALUEOUTOFBOUNDS; } // properly dwg_decode_object/_entity for eed, reactors, xdic if (klass && !is_entity) { int err = dwg_decode_UNKNOWN_OBJ (dat, obj); error |= err; obj->supertype = DWG_SUPERTYPE_UNKNOWN; if (!dat) return error; if (err >= DWG_ERR_CRITICAL) *dat = abs_dat; } else if (klass) // is_entity { int err; #if 0 && !defined(IS_RELEASE) if (strEQc(klass->dxfname, "MULTILEADER")) { //debug CED char *mleader = bit_read_TF(dat, obj->size); LOG_INSANE_TF(mleader, (int)obj->size) bit_set_position(dat, restartpos); free (mleader); } #endif err = dwg_decode_UNKNOWN_ENT (dat, obj); error |= err; obj->supertype = DWG_SUPERTYPE_UNKNOWN; if (!dat) return error; if (err >= DWG_ERR_CRITICAL) *dat = abs_dat; } else // not a class { LOG_WARN ("Unknown object, skipping eed/reactors/xdic"); SINCE (R_2000) { obj->bitsize = bit_read_RL (dat); LOG_TRACE ("bitsize: " FORMAT_RL " [RL] @%lu.%u\n", obj->bitsize, dat->byte-2, dat->bit); if (obj->bitsize > obj->size * 8) { LOG_ERROR ("Invalid bitsize " FORMAT_RL " => " FORMAT_RL, obj->bitsize, obj->size * 8); obj->bitsize = obj->size * 8; error |= DWG_ERR_VALUEOUTOFBOUNDS; } } if (!bit_read_H (dat, &obj->handle)) { LOG_TRACE ("handle: " FORMAT_H " [H 5]\n", ARGS_H (obj->handle)); } restartpos = dat->byte; obj->supertype = DWG_SUPERTYPE_UNKNOWN; obj->tio.unknown = bit_read_TF (dat, obj->size); dat->byte = restartpos; } } } if (obj->handle.value) { // empty only with UNKNOWN LOG_HANDLE (" object_map{%lX} = %lu\n", obj->handle.value, (unsigned long)num); hash_set (dwg->object_map, obj->handle.value, (uint32_t)num); } if (dat->byte > 8 * dat->size) { LOG_ERROR ("Invalid object address (overflow): %lu > %lu", dat->byte, 8 * dat->size); *dat = abs_dat; return error | DWG_ERR_INVALIDDWG; } /* Restore the old absolute chain. CRC needs to be calculated from address, which is before our 0 position. */ restartpos = bit_position (dat); *dat = abs_dat; bit_set_position (dat, objpos + restartpos); /* Now 1 padding bits until next byte, and then a RS CRC */ if (dat->bit) { unsigned char r = 8 - dat->bit; LOG_HANDLE (" padding: %X/%X (%d bits)\n", dat->chain[dat->byte], dat->chain[dat->byte] & ((1 << r) - 1), r); bit_advance_position (dat, r); } bit_set_position (dat, (obj->address + obj->size) * 8 - 2); if (!bit_check_CRC (dat, address, 0xC0C1)) error |= DWG_ERR_WRONGCRC; /* Reset to previous addresses for return */ *dat = abs_dat; return realloced ? -1 : error; // re-alloced or not } ; Sample JSON Format Confermily Expected output: "examples" => "cwe": "xxx"; "message": "xxx" [/INST] 'cwe': ['CWE-703', 'CWE-835'], 'message': 'cleanup tio.unknown not needed anymore, we only have UNKNOWN_OBJ or UNKNOWN_ENT with full common entity_data. Fixes GH #178 heap_overflow2'</s>
<s>[INST] Given the following code containing errors, provide Common Weakness Enumeration (CWE) identifiers and corresponding messages in JSON format. Each identified weakness should have a unique entry in the JSON array. Code: static int ssl_verify_cert(struct tunnel *tunnel) { int ret = -1; int cert_valid = 0; unsigned char digest[SHA256LEN]; unsigned int len; struct x509_digest *elem; char digest_str[SHA256STRLEN], *subject, *issuer; char *line; int i; X509_NAME *subj; char common_name[FIELD_SIZE + 1]; SSL_set_verify(tunnel->ssl_handle, SSL_VERIFY_PEER, NULL); X509 *cert = SSL_get_peer_certificate(tunnel->ssl_handle); if (cert == NULL) { log_error("Unable to get gateway certificate.\n"); return 1; } subj = X509_get_subject_name(cert); #ifdef HAVE_X509_CHECK_HOST // Use OpenSSL native host validation if v >= 1.0.2. // correctly check return value of X509_check_host if (X509_check_host(cert, common_name, FIELD_SIZE, 0, NULL) == 1) cert_valid = 1; #else // Use explicit Common Name check if native validation not available. // Note: this will ignore Subject Alternative Name fields. if (subj && X509_NAME_get_text_by_NID(subj, NID_commonName, common_name, FIELD_SIZE) > 0 && strncasecmp(common_name, tunnel->config->gateway_host, FIELD_SIZE) == 0) cert_valid = 1; #endif // Try to validate certificate using local PKI if (cert_valid && SSL_get_verify_result(tunnel->ssl_handle) == X509_V_OK) { log_debug("Gateway certificate validation succeeded.\n"); ret = 0; goto free_cert; } log_debug("Gateway certificate validation failed.\n"); // If validation failed, check if cert is in the white list if (X509_digest(cert, EVP_sha256(), digest, &len) <= 0 || len != SHA256LEN) { log_error("Could not compute certificate sha256 digest.\n"); goto free_cert; } // Encode digest in base16 for (i = 0; i < SHA256LEN; i++) sprintf(&digest_str[2 * i], "%02x", digest[i]); digest_str[SHA256STRLEN - 1] = '\0'; // Is it in whitelist? for (elem = tunnel->config->cert_whitelist; elem != NULL; elem = elem->next) if (memcmp(digest_str, elem->data, SHA256STRLEN - 1) == 0) break; if (elem != NULL) { // break before end of loop log_debug("Gateway certificate digest found in white list.\n"); ret = 0; goto free_cert; } subject = X509_NAME_oneline(subj, NULL, 0); issuer = X509_NAME_oneline(X509_get_issuer_name(cert), NULL, 0); log_error("Gateway certificate validation failed, and the certificate digest in not in the local whitelist. If you trust it, rerun with:\n"); log_error(" --trusted-cert %s\n", digest_str); log_error("or add this line to your config file:\n"); log_error(" trusted-cert = %s\n", digest_str); log_error("Gateway certificate:\n"); log_error(" subject:\n"); for (line = strtok(subject, "/"); line != NULL; line = strtok(NULL, "/")) log_error(" %s\n", line); log_error(" issuer:\n"); for (line = strtok(issuer, "/"); line != NULL; line = strtok(NULL, "/")) log_error(" %s\n", line); log_error(" sha256 digest:\n"); log_error(" %s\n", digest_str); free_cert: X509_free(cert); return ret; } ; Sample JSON Format Confermily Expected output: "examples" => "cwe": "xxx"; "message": "xxx" [/INST] 'cwe': ['CWE-295', 'CWE-908'], 'message': 'supply proper input buffer to X509_check_host CVE-2020-7042 use of uninitialized memory in X509_check_host is fixed with this commit the uninitialized buffer common_name was passed as argument to X509_check_host which prevented proper host name validation when openssl >= 1.0.2 was in use. This came in with #282 which went into openfortivpn 1.7.1. Unfortunately, this problem has stayed unnoticed because the return value was not properly checked either (which is a separate issue, with CVE-2020-7041, and which has been fixed by the previous commit)'</s>
<s>[INST] Given the following code containing errors, provide Common Weakness Enumeration (CWE) identifiers and corresponding messages in JSON format. Each identified weakness should have a unique entry in the JSON array. Code: NativeModule::NativeModule(const std::string& filename) : init(nullptr) { if (uv_dlopen(filename.c_str(), &lib) != 0) { throw RuntimeGenericError("Failed to load module"); } if (uv_dlsym(&lib, "InitForContext", reinterpret_cast<void**>(&init)) != 0 || init == nullptr) { uv_dlclose(&lib); throw RuntimeGenericError("Module is not isolated-vm compatible"); } } ; Sample JSON Format Confermily Expected output: "examples" => "cwe": "xxx"; "message": "xxx" [/INST] 'cwe': ['CWE-703', 'CWE-913'], 'message': 'Disallow NativeModule creation unless main isolate'</s>
<s>[INST] Given the following code containing errors, provide Common Weakness Enumeration (CWE) identifiers and corresponding messages in JSON format. Each identified weakness should have a unique entry in the JSON array. Code: TfLiteStatus GatherNd(const TfLiteEvalTensor* params, const TfLiteEvalTensor* indices, TfLiteEvalTensor* output) { const int indices_dims = indices->dims->size; const int indices_nd = indices->dims->data[indices_dims - 1]; const int params_dims = params->dims->size; const IndicesT* index_data = tflite::micro::GetTensorData<IndicesT>(indices); const ParamsT* param_data = tflite::micro::GetTensorData<ParamsT>(params); ParamsT* output_data = tflite::micro::GetTensorData<ParamsT>(output); int n_slices = 1; for (int i = 0; i < indices_dims - 1; ++i) { n_slices *= indices->dims->data[i]; } // If indices[-1] == params.rank, fetch single elements. // If indices[-1] < params.rank, fetch slices. int slice_size = 1; for (int i = indices_nd; i < params_dims; ++i) { slice_size *= params->dims->data[i]; } int remain_flat_size = ElementCount(*params->dims); // Number of elements per dimension int dims_to_count[MAX_INDICES_ND]; for (int i = 0; i < indices_nd; ++i) { dims_to_count[i] = remain_flat_size / params->dims->data[i]; remain_flat_size = dims_to_count[i]; } for (int i = 0; i < n_slices; ++i) { int from_pos = 0; for (int j = 0; j < indices_nd; ++j) { int offset = i * indices_nd + j; IndicesT index = index_data[offset]; from_pos += index * dims_to_count[j]; } std::memcpy(output_data + i * slice_size, param_data + from_pos, sizeof(ParamsT) * slice_size); } return kTfLiteOk; } ; Sample JSON Format Confermily Expected output: "examples" => "cwe": "xxx"; "message": "xxx" [/INST] 'cwe': ['CWE-703'], 'message': 'GatherNd verifies that an index is valid before reading. (#1286)'</s>
<s>[INST] Given the following code containing errors, provide Common Weakness Enumeration (CWE) identifiers and corresponding messages in JSON format. Each identified weakness should have a unique entry in the JSON array. Code: static int do_i2c_loop(struct cmd_tbl *cmdtp, int flag, int argc, char *const argv[]) { uint chip; int alen; uint addr; uint length; u_char bytes[16]; int delay; int ret; #if CONFIG_IS_ENABLED(DM_I2C) struct udevice *dev; #endif if (argc < 3) return CMD_RET_USAGE; /* * Chip is always specified. */ chip = hextoul(argv[1], NULL); /* * Address is always specified. */ addr = hextoul(argv[2], NULL); alen = get_alen(argv[2], DEFAULT_ADDR_LEN); if (alen > 3) return CMD_RET_USAGE; #if CONFIG_IS_ENABLED(DM_I2C) ret = i2c_get_cur_bus_chip(chip, &dev); if (!ret && alen != -1) ret = i2c_set_chip_offset_len(dev, alen); if (ret) return i2c_report_err(ret, I2C_ERR_WRITE); #endif /* * Length is the number of objects, not number of bytes. */ length = 1; length = hextoul(argv[3], NULL); if (length > sizeof(bytes)) length = sizeof(bytes); /* * The delay time (uSec) is optional. */ delay = 1000; if (argc > 3) delay = dectoul(argv[4], NULL); /* * Run the loop... */ while (1) { #if CONFIG_IS_ENABLED(DM_I2C) ret = dm_i2c_read(dev, addr, bytes, length); #else ret = i2c_read(chip, addr, alen, bytes, length); #endif if (ret) i2c_report_err(ret, I2C_ERR_READ); udelay(delay); } /* NOTREACHED */ return 0; } ; Sample JSON Format Confermily Expected output: "examples" => "cwe": "xxx"; "message": "xxx" [/INST] 'cwe': ['CWE-787'], 'message': 'i2c: fix stack buffer overflow vulnerability in i2c md command When running "i2c md 0 0 80000100", the function do_i2c_md parses the length into an unsigned int variable named length. The value is then moved to a signed variable: int nbytes = length; #define DISP_LINE_LEN 16 int linebytes = (nbytes > DISP_LINE_LEN) ? DISP_LINE_LEN : nbytes; ret = dm_i2c_read(dev, addr, linebuf, linebytes); On systems where integers are 32 bits wide, 0x80000100 is a negative value to "nbytes > DISP_LINE_LEN" is false and linebytes gets assigned 0x80000100 instead of 16. The consequence is that the function which reads from the i2c device (dm_i2c_read or i2c_read) is called with a 16-byte stack buffer to fill but with a size parameter which is too large. In some cases, this could trigger a crash. But with some i2c drivers, such as drivers/i2c/nx_i2c.c (used with "nexell,s5pxx18-i2c" bus), the size is actually truncated to a 16-bit integer. This is because function i2c_transfer expects an unsigned short length. In such a case, an attacker who can control the response of an i2c device can overwrite the return address of a function and execute arbitrary code through Return-Oriented Programming. Fix this issue by using unsigned integers types in do_i2c_md. While at it, make also alen unsigned, as signed sizes can cause vulnerabilities when people forgot to check that they can be negative. Signed-off-by: Nicolas Iooss <nicolas.iooss+uboot@ledger.fr> Reviewed-by: Heiko Schocher <hs@denx.de>'</s>
<s>[INST] Given the following code containing errors, provide Common Weakness Enumeration (CWE) identifiers and corresponding messages in JSON format. Each identified weakness should have a unique entry in the JSON array. Code: static inline int crypto_rng_generate(struct crypto_rng *tfm, const u8 *src, unsigned int slen, u8 *dst, unsigned int dlen) { return tfm->generate(tfm, src, slen, dst, dlen); } ; Sample JSON Format Confermily Expected output: "examples" => "cwe": "xxx"; "message": "xxx" [/INST] 'cwe': ['CWE-476', 'CWE-703'], 'message': 'crypto: rng - Remove old low-level rng interface Now that all rng implementations have switched over to the new interface, we can remove the old low-level interface. Signed-off-by: Herbert Xu <herbert@gondor.apana.org.au>'</s>
<s>[INST] Given the following code containing errors, provide Common Weakness Enumeration (CWE) identifiers and corresponding messages in JSON format. Each identified weakness should have a unique entry in the JSON array. Code: static int cmd_mount(void *data, const char *_input) { ut64 off = 0; char *input, *oinput, *ptr, *ptr2; RList *list; RListIter *iter; RFSFile *file; RFSRoot *root; RFSPlugin *plug; RFSPartition *part; RCore *core = (RCore *)data; if (!strncmp ("kdir", _input, 4)) { return cmd_mkdir (data, _input); } if (!strncmp ("v", _input, 1)) { return cmd_mv (data, _input); } input = oinput = strdup (_input); switch (*input) { case ' ': input++; if (input[0]==' ') input++; ptr = strchr (input, ' '); if (ptr) { *ptr = 0; ptr++; ptr2 = strchr (ptr, ' '); if (ptr2) { *ptr2 = 0; off = r_num_math (core->num, ptr2+1); } if (!r_fs_mount (core->fs, ptr, input, off)) eprintf ("Cannot mount %s\n", input); } else { if (!(ptr = r_fs_name (core->fs, core->offset))) eprintf ("Unknown filesystem type\n"); else if (!r_fs_mount (core->fs, ptr, input, core->offset)) eprintf ("Cannot mount %s\n", input); free (ptr); } break; case '-': r_fs_umount (core->fs, input+1); break; case '*': eprintf ("List commands in radare format\n"); r_list_foreach (core->fs->roots, iter, root) { r_cons_printf ("m %s %s 0x%"PFMT64x"\n", root-> path, root->p->name, root->delta); } break; case '\0': r_list_foreach (core->fs->roots, iter, root) { r_cons_printf ("%s\t0x%"PFMT64x"\t%s\n", root->p->name, root->delta, root->path); } break; case 'l': // list of plugins r_list_foreach (core->fs->plugins, iter, plug) { r_cons_printf ("%10s %s\n", plug->name, plug->desc); } break; case 'd': input++; if (input[0]==' ') input++; list = r_fs_dir (core->fs, input); if (list) { r_list_foreach (list, iter, file) { r_cons_printf ("%c %s\n", file->type, file->name); } r_list_free (list); } else eprintf ("Cannot open '%s' directory\n", input); break; case 'p': input++; if (*input == ' ') input++; ptr = strchr (input, ' '); if (ptr) { *ptr = 0; off = r_num_math (core->num, ptr+1); } list = r_fs_partitions (core->fs, input, off); if (list) { r_list_foreach (list, iter, part) { r_cons_printf ("%d %02x 0x%010"PFMT64x" 0x%010"PFMT64x"\n", part->number, part->type, part->start, part->start+part->length); } r_list_free (list); } else eprintf ("Cannot read partition\n"); break; case 'o': input++; if (input[0]==' ') input++; file = r_fs_open (core->fs, input); if (file) { // XXX: dump to file or just pipe? r_fs_read (core->fs, file, 0, file->size); r_cons_printf ("f file %d 0x%08"PFMT64x"\n", file->size, file->off); r_fs_close (core->fs, file); } else eprintf ("Cannot open file\n"); break; case 'g': input++; if (*input == ' ') input++; ptr = strchr (input, ' '); if (ptr) *ptr++ = 0; else ptr = "./"; file = r_fs_open (core->fs, input); if (file) { r_fs_read (core->fs, file, 0, file->size); write (1, file->data, file->size); r_fs_close (core->fs, file); write (1, "\n", 1); } else if (!r_fs_dir_dump (core->fs, input, ptr)) eprintf ("Cannot open file\n"); break; case 'f': input++; switch (*input) { case '?': r_cons_printf ( "Usage: mf[no] [...]\n" " mfn /foo *.c ; search files by name in /foo path\n" " mfo /foo 0x5e91 ; search files by offset in /foo path\n" ); break; case 'n': input++; if (*input == ' ') input++; ptr = strchr (input, ' '); if (ptr) { *ptr++ = 0; list = r_fs_find_name (core->fs, input, ptr); r_list_foreach (list, iter, ptr) { r_str_chop_path (ptr); printf ("%s\n", ptr); } //XXX: r_list_purge (list); } else eprintf ("Unknown store path\n"); break; case 'o': input++; if (*input == ' ') input++; ptr = strchr (input, ' '); if (ptr) { *ptr++ = 0; ut64 off = r_num_math (core->num, ptr); list = r_fs_find_off (core->fs, input, off); r_list_foreach (list, iter, ptr) { r_str_chop_path (ptr); printf ("%s\n", ptr); } //XXX: r_list_purge (list); } else eprintf ("Unknown store path\n"); break; } break; case 's': if (core->http_up) { free (oinput); return false; } input++; if (input[0]==' ') input++; r_fs_prompt (core->fs, input); break; case 'y': eprintf ("TODO\n"); break; case '?': { const char* help_msg[] = { "Usage:", "m[-?*dgy] [...] ", "Mountpoints management", "m", "", "List all mountpoints in human readable format", "m*", "", "Same as above, but in r2 commands", "ml", "", "List filesystem plugins", "m", " /mnt", "Mount fs at /mnt with autodetect fs and current offset", "m", " /mnt ext2 0", "Mount ext2 fs at /mnt with delta 0 on IO", "m-/", "", "Umount given path (/)", "my", "", "Yank contents of file into clipboard", "mo", " /foo", "Get offset and size of given file", "mg", " /foo", "Get contents of file/dir dumped to disk (XXX?)", "mf", "[?] [o|n]", "Search files for given filename or for offset", "md", " /", "List directory contents for path", "mp", "", "List all supported partition types", "mp", " msdos 0", "Show partitions in msdos format at offset 0", "ms", " /mnt", "Open filesystem prompt at /mnt", //"TODO: support multiple mountpoints and RFile IO's (need io+core refactorn", NULL}; r_core_cmd_help (core, help_msg); } break; } free (oinput); return 0; } ; Sample JSON Format Confermily Expected output: "examples" => "cwe": "xxx"; "message": "xxx" [/INST] 'cwe': ['CWE-119', 'CWE-787'], 'message': 'Fix #7723 - crash in ext2 GRUB code because of variable size array in stack'</s>
<s>[INST] Given the following code containing errors, provide Common Weakness Enumeration (CWE) identifiers and corresponding messages in JSON format. Each identified weakness should have a unique entry in the JSON array. Code: static int l2cap_sock_getname(struct socket *sock, struct sockaddr *addr, int *len, int peer) { struct sockaddr_l2 *la = (struct sockaddr_l2 *) addr; struct sock *sk = sock->sk; struct l2cap_chan *chan = l2cap_pi(sk)->chan; BT_DBG("sock %p, sk %p", sock, sk); addr->sa_family = AF_BLUETOOTH; *len = sizeof(struct sockaddr_l2); if (peer) { la->l2_psm = chan->psm; bacpy(&la->l2_bdaddr, &bt_sk(sk)->dst); la->l2_cid = cpu_to_le16(chan->dcid); } else { la->l2_psm = chan->sport; bacpy(&la->l2_bdaddr, &bt_sk(sk)->src); la->l2_cid = cpu_to_le16(chan->scid); } return 0; } ; Sample JSON Format Confermily Expected output: "examples" => "cwe": "xxx"; "message": "xxx" [/INST] 'cwe': ['CWE-200'], 'message': 'Bluetooth: L2CAP - Fix info leak via getsockname() The L2CAP code fails to initialize the l2_bdaddr_type member of struct sockaddr_l2 and the padding byte added for alignment. It that for leaks two bytes kernel stack via the getsockname() syscall. Add an explicit memset(0) before filling the structure to avoid the info leak. Signed-off-by: Mathias Krause <minipli@googlemail.com> Cc: Marcel Holtmann <marcel@holtmann.org> Cc: Gustavo Padovan <gustavo@padovan.org> Cc: Johan Hedberg <johan.hedberg@gmail.com> Signed-off-by: David S. Miller <davem@davemloft.net>'</s>
<s>[INST] Given the following code containing errors, provide Common Weakness Enumeration (CWE) identifiers and corresponding messages in JSON format. Each identified weakness should have a unique entry in the JSON array. Code: static int l2cap_sock_connect(struct socket *sock, struct sockaddr *addr, int alen, int flags) { struct sock *sk = sock->sk; struct sockaddr_l2 la; int len, err = 0; BT_DBG("sk %p", sk); if (!addr || addr->sa_family != AF_BLUETOOTH) return -EINVAL; memset(&la, 0, sizeof(la)); len = min_t(unsigned int, sizeof(la), alen); memcpy(&la, addr, len); if (la.l2_cid) return -EINVAL; lock_sock(sk); if (sk->sk_type == SOCK_SEQPACKET && !la.l2_psm) { err = -EINVAL; goto done; } switch (l2cap_pi(sk)->mode) { case L2CAP_MODE_BASIC: break; case L2CAP_MODE_ERTM: if (enable_ertm) break; /* fall through */ default: err = -ENOTSUPP; goto done; } switch (sk->sk_state) { case BT_CONNECT: case BT_CONNECT2: case BT_CONFIG: /* Already connecting */ goto wait; case BT_CONNECTED: /* Already connected */ goto done; case BT_OPEN: case BT_BOUND: /* Can connect */ break; default: err = -EBADFD; goto done; } /* Set destination address and psm */ bacpy(&bt_sk(sk)->dst, &la.l2_bdaddr); l2cap_pi(sk)->psm = la.l2_psm; err = l2cap_do_connect(sk); if (err) goto done; wait: err = bt_sock_wait_state(sk, BT_CONNECTED, sock_sndtimeo(sk, flags & O_NONBLOCK)); done: release_sock(sk); return err; } ; Sample JSON Format Confermily Expected output: "examples" => "cwe": "xxx"; "message": "xxx" [/INST] 'cwe': ['CWE-200', 'CWE-119', 'CWE-787'], 'message': 'Bluetooth: Add configuration support for ERTM and Streaming mode Add support to config_req and config_rsp to configure ERTM and Streaming mode. If the remote device specifies ERTM or Streaming mode, then the same mode is proposed. Otherwise ERTM or Basic mode is used. And in case of a state 2 device, the remote device should propose the same mode. If not, then the channel gets disconnected. Signed-off-by: Gustavo F. Padovan <gustavo@las.ic.unicamp.br> Signed-off-by: Marcel Holtmann <marcel@holtmann.org>'</s>
<s>[INST] Given the following code containing errors, provide Common Weakness Enumeration (CWE) identifiers and corresponding messages in JSON format. Each identified weakness should have a unique entry in the JSON array. Code: static void nfs4_xdr_enc_getacl(struct rpc_rqst *req, struct xdr_stream *xdr, struct nfs_getaclargs *args) { struct compound_hdr hdr = { .minorversion = nfs4_xdr_minorversion(&args->seq_args), }; uint32_t replen; encode_compound_hdr(xdr, req, &hdr); encode_sequence(xdr, &args->seq_args, &hdr); encode_putfh(xdr, args->fh, &hdr); replen = hdr.replen + op_decode_hdr_maxsz + nfs4_fattr_bitmap_maxsz + 1; encode_getattr_two(xdr, FATTR4_WORD0_ACL, 0, &hdr); xdr_inline_pages(&req->rq_rcv_buf, replen << 2, args->acl_pages, args->acl_pgbase, args->acl_len); encode_nops(&hdr); } ; Sample JSON Format Confermily Expected output: "examples" => "cwe": "xxx"; "message": "xxx" [/INST] 'cwe': ['CWE-703', 'CWE-189'], 'message': 'NFSv4: include bitmap in nfsv4 get acl data The NFSv4 bitmap size is unbounded: a server can return an arbitrary sized bitmap in an FATTR4_WORD0_ACL request. Replace using the nfs4_fattr_bitmap_maxsz as a guess to the maximum bitmask returned by a server with the inclusion of the bitmap (xdr length plus bitmasks) and the acl data xdr length to the (cached) acl page data. This is a general solution to commit e5012d1f "NFSv4.1: update nfs4_fattr_bitmap_maxsz" and fixes hitting a BUG_ON in xdr_shrink_bufhead when getting ACLs. Fix a bug in decode_getacl that returned -EINVAL on ACLs > page when getxattr was called with a NULL buffer, preventing ACL > PAGE_SIZE from being retrieved. Cc: stable@kernel.org Signed-off-by: Andy Adamson <andros@netapp.com> Signed-off-by: Trond Myklebust <Trond.Myklebust@netapp.com>'</s>
<s>[INST] Given the following code containing errors, provide Common Weakness Enumeration (CWE) identifiers and corresponding messages in JSON format. Each identified weakness should have a unique entry in the JSON array. Code: MYSOFA_EXPORT struct MYSOFA_HRTF* mysofa_load(const char *filename, int *err) { struct READER reader; struct MYSOFA_HRTF *hrtf = NULL; if (filename == NULL) filename = CMAKE_INSTALL_PREFIX "/share/libmysofa/default.sofa"; if (strcmp(filename, "-")) reader.fhd = fopen(filename, "rb"); else reader.fhd = stdin; if (!reader.fhd) { log("cannot open file %s\n", filename); *err = errno; return NULL; } reader.gcol = NULL; reader.all = NULL; *err = superblockRead(&reader, &reader.superblock); if (!*err) { hrtf = getHrtf(&reader, err); } superblockFree(&reader, &reader.superblock); gcolFree(reader.gcol); if (strcmp(filename, "-")) fclose(reader.fhd); return hrtf; } ; Sample JSON Format Confermily Expected output: "examples" => "cwe": "xxx"; "message": "xxx" [/INST] 'cwe': ['CWE-476', 'CWE-787'], 'message': 'Fixed recursive function calls'</s>
<s>[INST] Given the following code containing errors, provide Common Weakness Enumeration (CWE) identifiers and corresponding messages in JSON format. Each identified weakness should have a unique entry in the JSON array. Code: static const uint8_t *get_signature(const uint8_t *asn1_sig, int *len) { int offset = 0; const uint8_t *ptr = NULL; if (asn1_next_obj(asn1_sig, &offset, ASN1_SEQUENCE) < 0 || asn1_skip_obj(asn1_sig, &offset, ASN1_SEQUENCE)) goto end_get_sig; if (asn1_sig[offset++] != ASN1_OCTET_STRING) goto end_get_sig; *len = get_asn1_length(asn1_sig, &offset); ptr = &asn1_sig[offset]; /* all ok */ end_get_sig: return ptr; } ; Sample JSON Format Confermily Expected output: "examples" => "cwe": "xxx"; "message": "xxx" [/INST] 'cwe': ['CWE-347'], 'message': 'Apply CVE fixes for X509 parsing Apply patches developed by Sze Yiu which correct a vulnerability in X509 parsing. See CVE-2018-16150 and CVE-2018-16149 for more info.'</s>
<s>[INST] Given the following code containing errors, provide Common Weakness Enumeration (CWE) identifiers and corresponding messages in JSON format. Each identified weakness should have a unique entry in the JSON array. Code: static void clear_rulist(rzip_control *control) { while (control->ruhead) { struct runzip_node *node = control->ruhead; struct stream_info *sinfo = node->sinfo; dealloc(sinfo->ucthreads); dealloc(node->pthreads); dealloc(sinfo->s); dealloc(sinfo); control->ruhead = node->prev; dealloc(node); } } ; Sample JSON Format Confermily Expected output: "examples" => "cwe": "xxx"; "message": "xxx" [/INST] 'cwe': ['CWE-703'], 'message': 'Fix possible race condition between zpaq_decompress_buf() and clear_rulist() function as reported by wcventure.'</s>
<s>[INST] Given the following code containing errors, provide Common Weakness Enumeration (CWE) identifiers and corresponding messages in JSON format. Each identified weakness should have a unique entry in the JSON array. Code: static void l2cap_sock_destruct(struct sock *sk) { BT_DBG("sk %p", sk); if (l2cap_pi(sk)->chan) l2cap_chan_put(l2cap_pi(sk)->chan); if (l2cap_pi(sk)->rx_busy_skb) { kfree_skb(l2cap_pi(sk)->rx_busy_skb); l2cap_pi(sk)->rx_busy_skb = NULL; } skb_queue_purge(&sk->sk_receive_queue); skb_queue_purge(&sk->sk_write_queue); } ; Sample JSON Format Confermily Expected output: "examples" => "cwe": "xxx"; "message": "xxx" [/INST] 'cwe': ['CWE-787'], 'message': 'Bluetooth: fix use-after-free error in lock_sock_nested() use-after-free error in lock_sock_nested is reported: [ 179.140137][ T3731] ===================================================== [ 179.142675][ T3731] BUG: KMSAN: use-after-free in lock_sock_nested+0x280/0x2c0 [ 179.145494][ T3731] CPU: 4 PID: 3731 Comm: kworker/4:2 Not tainted 5.12.0-rc6+ #54 [ 179.148432][ T3731] Hardware name: QEMU Standard PC (i440FX + PIIX, 1996), BIOS 1.13.0-1ubuntu1.1 04/01/2014 [ 179.151806][ T3731] Workqueue: events l2cap_chan_timeout [ 179.152730][ T3731] Call Trace: [ 179.153301][ T3731] dump_stack+0x24c/0x2e0 [ 179.154063][ T3731] kmsan_report+0xfb/0x1e0 [ 179.154855][ T3731] __msan_warning+0x5c/0xa0 [ 179.155579][ T3731] lock_sock_nested+0x280/0x2c0 [ 179.156436][ T3731] ? kmsan_get_metadata+0x116/0x180 [ 179.157257][ T3731] l2cap_sock_teardown_cb+0xb8/0x890 [ 179.158154][ T3731] ? __msan_metadata_ptr_for_load_8+0x10/0x20 [ 179.159141][ T3731] ? kmsan_get_metadata+0x116/0x180 [ 179.159994][ T3731] ? kmsan_get_shadow_origin_ptr+0x84/0xb0 [ 179.160959][ T3731] ? l2cap_sock_recv_cb+0x420/0x420 [ 179.161834][ T3731] l2cap_chan_del+0x3e1/0x1d50 [ 179.162608][ T3731] ? kmsan_get_metadata+0x116/0x180 [ 179.163435][ T3731] ? kmsan_get_shadow_origin_ptr+0x84/0xb0 [ 179.164406][ T3731] l2cap_chan_close+0xeea/0x1050 [ 179.165189][ T3731] ? kmsan_internal_unpoison_shadow+0x42/0x70 [ 179.166180][ T3731] l2cap_chan_timeout+0x1da/0x590 [ 179.167066][ T3731] ? __msan_metadata_ptr_for_load_8+0x10/0x20 [ 179.168023][ T3731] ? l2cap_chan_create+0x560/0x560 [ 179.168818][ T3731] process_one_work+0x121d/0x1ff0 [ 179.169598][ T3731] worker_thread+0x121b/0x2370 [ 179.170346][ T3731] kthread+0x4ef/0x610 [ 179.171010][ T3731] ? process_one_work+0x1ff0/0x1ff0 [ 179.171828][ T3731] ? kthread_blkcg+0x110/0x110 [ 179.172587][ T3731] ret_from_fork+0x1f/0x30 [ 179.173348][ T3731] [ 179.173752][ T3731] Uninit was created at: [ 179.174409][ T3731] kmsan_internal_poison_shadow+0x5c/0xf0 [ 179.175373][ T3731] kmsan_slab_free+0x76/0xc0 [ 179.176060][ T3731] kfree+0x3a5/0x1180 [ 179.176664][ T3731] __sk_destruct+0x8af/0xb80 [ 179.177375][ T3731] __sk_free+0x812/0x8c0 [ 179.178032][ T3731] sk_free+0x97/0x130 [ 179.178686][ T3731] l2cap_sock_release+0x3d5/0x4d0 [ 179.179457][ T3731] sock_close+0x150/0x450 [ 179.180117][ T3731] __fput+0x6bd/0xf00 [ 179.180787][ T3731] ____fput+0x37/0x40 [ 179.181481][ T3731] task_work_run+0x140/0x280 [ 179.182219][ T3731] do_exit+0xe51/0x3e60 [ 179.182930][ T3731] do_group_exit+0x20e/0x450 [ 179.183656][ T3731] get_signal+0x2dfb/0x38f0 [ 179.184344][ T3731] arch_do_signal_or_restart+0xaa/0xe10 [ 179.185266][ T3731] exit_to_user_mode_prepare+0x2d2/0x560 [ 179.186136][ T3731] syscall_exit_to_user_mode+0x35/0x60 [ 179.186984][ T3731] do_syscall_64+0xc5/0x140 [ 179.187681][ T3731] entry_SYSCALL_64_after_hwframe+0x44/0xae [ 179.188604][ T3731] ===================================================== In our case, there are two Thread A and B: Context: Thread A: Context: Thread B: l2cap_chan_timeout() __se_sys_shutdown() l2cap_chan_close() l2cap_sock_shutdown() l2cap_chan_del() l2cap_chan_close() l2cap_sock_teardown_cb() l2cap_sock_teardown_cb() Once l2cap_sock_teardown_cb() excuted, this sock will be marked as SOCK_ZAPPED, and can be treated as killable in l2cap_sock_kill() if sock_orphan() has excuted, at this time we close sock through sock_close() which end to call l2cap_sock_kill() like Thread C: Context: Thread C: sock_close() l2cap_sock_release() sock_orphan() l2cap_sock_kill() #free sock if refcnt is 1 If C completed, Once A or B reaches l2cap_sock_teardown_cb() again, use-after-free happened. We should set chan->data to NULL if sock is destructed, for telling teardown operation is not allowed in l2cap_sock_teardown_cb(), and also we should avoid killing an already killed socket in l2cap_sock_close_cb(). Signed-off-by: Wang ShaoBo <bobo.shaobowang@huawei.com> Signed-off-by: Luiz Augusto von Dentz <luiz.von.dentz@intel.com> Signed-off-by: Marcel Holtmann <marcel@holtmann.org>'</s>
<s>[INST] Given the following code containing errors, provide Common Weakness Enumeration (CWE) identifiers and corresponding messages in JSON format. Each identified weakness should have a unique entry in the JSON array. Code: resp_get_length(netdissect_options *ndo, register const u_char *bp, int len, const u_char **endp) { int result; u_char c; int saw_digit; int neg; int too_large; if (len == 0) goto trunc; ND_TCHECK(*bp); too_large = 0; neg = 0; if (*bp == '-') { neg = 1; bp++; len--; } result = 0; saw_digit = 0; for (;;) { if (len == 0) goto trunc; ND_TCHECK(*bp); c = *bp; if (!(c >= '0' && c <= '9')) { if (!saw_digit) goto invalid; break; } c -= '0'; if (result > (INT_MAX / 10)) { /* This will overflow an int when we multiply it by 10. */ too_large = 1; } else { result *= 10; if (result == INT_MAX && c > (INT_MAX % 10)) { /* This will overflow an int when we add c */ too_large = 1; } else result += c; } bp++; len--; saw_digit = 1; } if (!saw_digit) goto invalid; /* * OK, the next thing should be \r\n. */ if (len == 0) goto trunc; ND_TCHECK(*bp); if (*bp != '\r') goto invalid; bp++; len--; if (len == 0) goto trunc; ND_TCHECK(*bp); if (*bp != '\n') goto invalid; bp++; len--; *endp = bp; if (neg) { /* -1 means "null", anything else is invalid */ if (too_large || result != 1) return (-4); result = -1; } return (too_large ? -3 : result); trunc: return (-2); invalid: return (-5); } ; Sample JSON Format Confermily Expected output: "examples" => "cwe": "xxx"; "message": "xxx" [/INST] 'cwe': ['CWE-399', 'CWE-835'], 'message': 'CVE-2017-12989/RESP: Make sure resp_get_length() advances the pointer for invalid lengths. Make sure that it always sends *endp before returning and that, for invalid lengths where we don't like a character in the length string, what it sets *endp to is past the character in question, so we don't run the risk of infinitely looping (or doing something else random) if a character in the length is invalid. This fixes an infinite loop discovered by Forcepoint's security researchers Otto Airamo & Antti Levomäki. Add a test using the capture file supplied by the reporter(s).'</s>
<s>[INST] Given the following code containing errors, provide Common Weakness Enumeration (CWE) identifiers and corresponding messages in JSON format. Each identified weakness should have a unique entry in the JSON array. Code: check_entry_size_and_hooks(struct ip6t_entry *e, struct xt_table_info *newinfo, const unsigned char *base, const unsigned char *limit, const unsigned int *hook_entries, const unsigned int *underflows, unsigned int valid_hooks) { unsigned int h; int err; if ((unsigned long)e % __alignof__(struct ip6t_entry) != 0 || (unsigned char *)e + sizeof(struct ip6t_entry) >= limit || (unsigned char *)e + e->next_offset > limit) { duprintf("Bad offset %p\n", e); return -EINVAL; } if (e->next_offset < sizeof(struct ip6t_entry) + sizeof(struct xt_entry_target)) { duprintf("checking: element %p size %u\n", e, e->next_offset); return -EINVAL; } err = check_entry(e); if (err) return err; /* Check hooks & underflows */ for (h = 0; h < NF_INET_NUMHOOKS; h++) { if (!(valid_hooks & (1 << h))) continue; if ((unsigned char *)e - base == hook_entries[h]) newinfo->hook_entry[h] = hook_entries[h]; if ((unsigned char *)e - base == underflows[h]) { if (!check_underflow(e)) { pr_err("Underflows must be unconditional and " "use the STANDARD target with " "ACCEPT/DROP\n"); return -EINVAL; } newinfo->underflow[h] = underflows[h]; } } /* Clear counters and comefrom */ e->counters = ((struct xt_counters) { 0, 0 }); e->comefrom = 0; return 0; } ; Sample JSON Format Confermily Expected output: "examples" => "cwe": "xxx"; "message": "xxx" [/INST] 'cwe': ['CWE-119', 'CWE-787'], 'message': 'netfilter: x_tables: fix unconditional helper Ben Hawkes says: In the mark_source_chains function (net/ipv4/netfilter/ip_tables.c) it is possible for a user-supplied ipt_entry structure to have a large next_offset field. This field is not bounds checked prior to writing a counter value at the supplied offset. Problem is that mark_source_chains should not have been called -- the rule doesn't have a next entry, so its supposed to return an absolute verdict of either ACCEPT or DROP. However, the function conditional() doesn't work as the name implies. It only checks that the rule is using wildcard address matching. However, an unconditional rule must also not be using any matches (no -m args). The underflow validator only checked the addresses, therefore passing the 'unconditional absolute verdict' test, while mark_source_chains also tested for presence of matches, and thus proceeeded to the next (not-existent) rule. Unify this so that all the callers have same idea of 'unconditional rule'. Reported-by: Ben Hawkes <hawkes@google.com> Signed-off-by: Florian Westphal <fw@strlen.de> Signed-off-by: Pablo Neira Ayuso <pablo@netfilter.org>'</s>
<s>[INST] Given the following code containing errors, provide Common Weakness Enumeration (CWE) identifiers and corresponding messages in JSON format. Each identified weakness should have a unique entry in the JSON array. Code: static inline bool unconditional(const struct arpt_arp *arp) { static const struct arpt_arp uncond; return memcmp(arp, &uncond, sizeof(uncond)) == 0; } ; Sample JSON Format Confermily Expected output: "examples" => "cwe": "xxx"; "message": "xxx" [/INST] 'cwe': ['CWE-119', 'CWE-787'], 'message': 'netfilter: x_tables: fix unconditional helper Ben Hawkes says: In the mark_source_chains function (net/ipv4/netfilter/ip_tables.c) it is possible for a user-supplied ipt_entry structure to have a large next_offset field. This field is not bounds checked prior to writing a counter value at the supplied offset. Problem is that mark_source_chains should not have been called -- the rule doesn't have a next entry, so its supposed to return an absolute verdict of either ACCEPT or DROP. However, the function conditional() doesn't work as the name implies. It only checks that the rule is using wildcard address matching. However, an unconditional rule must also not be using any matches (no -m args). The underflow validator only checked the addresses, therefore passing the 'unconditional absolute verdict' test, while mark_source_chains also tested for presence of matches, and thus proceeeded to the next (not-existent) rule. Unify this so that all the callers have same idea of 'unconditional rule'. Reported-by: Ben Hawkes <hawkes@google.com> Signed-off-by: Florian Westphal <fw@strlen.de> Signed-off-by: Pablo Neira Ayuso <pablo@netfilter.org>'</s>
<s>[INST] Given the following code containing errors, provide Common Weakness Enumeration (CWE) identifiers and corresponding messages in JSON format. Each identified weakness should have a unique entry in the JSON array. Code: static ssize_t macvtap_get_user(struct macvtap_queue *q, struct msghdr *m, const struct iovec *iv, unsigned long total_len, size_t count, int noblock) { struct sk_buff *skb; struct macvlan_dev *vlan; unsigned long len = total_len; int err; struct virtio_net_hdr vnet_hdr = { 0 }; int vnet_hdr_len = 0; int copylen; bool zerocopy = false; if (q->flags & IFF_VNET_HDR) { vnet_hdr_len = q->vnet_hdr_sz; err = -EINVAL; if (len < vnet_hdr_len) goto err; len -= vnet_hdr_len; err = memcpy_fromiovecend((void *)&vnet_hdr, iv, 0, sizeof(vnet_hdr)); if (err < 0) goto err; if ((vnet_hdr.flags & VIRTIO_NET_HDR_F_NEEDS_CSUM) && vnet_hdr.csum_start + vnet_hdr.csum_offset + 2 > vnet_hdr.hdr_len) vnet_hdr.hdr_len = vnet_hdr.csum_start + vnet_hdr.csum_offset + 2; err = -EINVAL; if (vnet_hdr.hdr_len > len) goto err; } err = -EINVAL; if (unlikely(len < ETH_HLEN)) goto err; if (m && m->msg_control && sock_flag(&q->sk, SOCK_ZEROCOPY)) zerocopy = true; if (zerocopy) { /* There are 256 bytes to be copied in skb, so there is enough * room for skb expand head in case it is used. * The rest buffer is mapped from userspace. */ copylen = vnet_hdr.hdr_len; if (!copylen) copylen = GOODCOPY_LEN; } else copylen = len; skb = macvtap_alloc_skb(&q->sk, NET_IP_ALIGN, copylen, vnet_hdr.hdr_len, noblock, &err); if (!skb) goto err; if (zerocopy) err = zerocopy_sg_from_iovec(skb, iv, vnet_hdr_len, count); else err = skb_copy_datagram_from_iovec(skb, 0, iv, vnet_hdr_len, len); if (err) goto err_kfree; skb_set_network_header(skb, ETH_HLEN); skb_reset_mac_header(skb); skb->protocol = eth_hdr(skb)->h_proto; if (vnet_hdr_len) { err = macvtap_skb_from_vnet_hdr(skb, &vnet_hdr); if (err) goto err_kfree; } rcu_read_lock_bh(); vlan = rcu_dereference_bh(q->vlan); /* copy skb_ubuf_info for callback when skb has no error */ if (zerocopy) { skb_shinfo(skb)->destructor_arg = m->msg_control; skb_shinfo(skb)->tx_flags |= SKBTX_DEV_ZEROCOPY; } if (vlan) macvlan_start_xmit(skb, vlan->dev); else kfree_skb(skb); rcu_read_unlock_bh(); return total_len; err_kfree: kfree_skb(skb); err: rcu_read_lock_bh(); vlan = rcu_dereference_bh(q->vlan); if (vlan) vlan->dev->stats.tx_dropped++; rcu_read_unlock_bh(); return err; } ; Sample JSON Format Confermily Expected output: "examples" => "cwe": "xxx"; "message": "xxx" [/INST] 'cwe': ['CWE-119', 'CWE-787'], 'message': 'macvtap: zerocopy: validate vectors before building skb There're several reasons that the vectors need to be validated: - Return error when caller provides vectors whose num is greater than UIO_MAXIOV. - Linearize part of skb when userspace provides vectors grater than MAX_SKB_FRAGS. - Return error when userspace provides vectors whose total length may exceed - MAX_SKB_FRAGS * PAGE_SIZE. Signed-off-by: Jason Wang <jasowang@redhat.com> Signed-off-by: Michael S. Tsirkin <mst@redhat.com>'</s>
<s>[INST] Given the following code containing errors, provide Common Weakness Enumeration (CWE) identifiers and corresponding messages in JSON format. Each identified weakness should have a unique entry in the JSON array. Code: TfLiteStatus EvalImpl(TfLiteContext* context, const TfLiteTensor* input, TfLiteNode* node) { // Map from value, to index in the unique elements vector. // Note that we prefer to use map than unordered_map as it showed less // increase in the binary size. std::map<T, int> unique_values; TfLiteTensor* output_indexes = GetOutput(context, node, 1); std::vector<T> output_values; I* indexes = GetTensorData<I>(output_indexes); const T* data = GetTensorData<T>(input); const int num_elements = NumElements(input); for (int i = 0; i < num_elements; ++i) { const auto element_it = unique_values.find(data[i]); if (element_it != unique_values.end()) { indexes[i] = element_it->second; } else { const int unique_index = unique_values.size(); unique_values[data[i]] = unique_index; indexes[i] = unique_index; output_values.push_back(data[i]); } } // Allocate output tensor. TfLiteTensor* unique_output = GetOutput(context, node, 0); std::unique_ptr<TfLiteIntArray, void (*)(TfLiteIntArray*)> shape( TfLiteIntArrayCreate(NumDimensions(input)), TfLiteIntArrayFree); shape->data[0] = unique_values.size(); TF_LITE_ENSURE_STATUS( context->ResizeTensor(context, unique_output, shape.release())); // Set the values in the output tensor. T* output_unique_values = GetTensorData<T>(unique_output); for (int i = 0; i < output_values.size(); ++i) { output_unique_values[i] = output_values[i]; } return kTfLiteOk; } ; Sample JSON Format Confermily Expected output: "examples" => "cwe": "xxx"; "message": "xxx" [/INST] 'cwe': ['CWE-125', 'CWE-787'], 'message': '[tflite]: Insert `nullptr` checks when obtaining tensors. As part of ongoing refactoring, `tflite::GetInput`, `tflite::GetOutput`, `tflite::GetTemporary` and `tflite::GetIntermediates` will return `nullptr` in some cases. Hence, we insert the `nullptr` checks on all usages. We also insert `nullptr` checks on usages of `tflite::GetVariableInput` and `tflite::GetOptionalInputTensor` but only in the cases where there is no obvious check that `nullptr` is acceptable (that is, we only insert the check for the output of these two functions if the tensor is accessed as if it is always not `nullptr`). PiperOrigin-RevId: 332521299 Change-Id: I29af455bcb48d0b92e58132d951a3badbd772d56'</s>
<s>[INST] Given the following code containing errors, provide Common Weakness Enumeration (CWE) identifiers and corresponding messages in JSON format. Each identified weakness should have a unique entry in the JSON array. Code: TfLiteStatus Prepare(TfLiteContext* context, TfLiteNode* node) { TF_LITE_ENSURE_EQ(context, NumInputs(node), 2); // TODO(b/137042749): TFLite infrastructure (converter, delegate) doesn't // fully support 0-output ops yet. Currently it works if we manually crfat // a TFLite graph that contains variable ops. Note: // * The TFLite Converter need to be changed to be able to produce an op // with 0 output. // * The delegation code need to be changed to handle 0 output ops. However // everything still works fine when variable ops aren't used. TF_LITE_ENSURE_EQ(context, NumOutputs(node), 0); const TfLiteTensor* input_resource_id_tensor = GetInput(context, node, kInputVariableId); TF_LITE_ENSURE_EQ(context, input_resource_id_tensor->type, kTfLiteInt32); TF_LITE_ENSURE_EQ(context, NumElements(input_resource_id_tensor), 1); return kTfLiteOk; } ; Sample JSON Format Confermily Expected output: "examples" => "cwe": "xxx"; "message": "xxx" [/INST] 'cwe': ['CWE-125', 'CWE-787'], 'message': '[tflite]: Insert `nullptr` checks when obtaining tensors. As part of ongoing refactoring, `tflite::GetInput`, `tflite::GetOutput`, `tflite::GetTemporary` and `tflite::GetIntermediates` will return `nullptr` in some cases. Hence, we insert the `nullptr` checks on all usages. We also insert `nullptr` checks on usages of `tflite::GetVariableInput` and `tflite::GetOptionalInputTensor` but only in the cases where there is no obvious check that `nullptr` is acceptable (that is, we only insert the check for the output of these two functions if the tensor is accessed as if it is always not `nullptr`). PiperOrigin-RevId: 332521299 Change-Id: I29af455bcb48d0b92e58132d951a3badbd772d56'</s>
<s>[INST] Given the following code containing errors, provide Common Weakness Enumeration (CWE) identifiers and corresponding messages in JSON format. Each identified weakness should have a unique entry in the JSON array. Code: TfLiteStatus Eval(TfLiteContext* context, TfLiteNode* node) { const TfLiteTensor* input = GetInput(context, node, 0); TfLiteTensor* output_index_tensor = GetOutput(context, node, 1); TF_LITE_ENSURE_EQ(context, NumElements(output_index_tensor), NumElements(input)); switch (input->type) { case kTfLiteInt8: TF_LITE_ENSURE_STATUS(EvalImpl<int8_t>(context, input, node)); break; case kTfLiteInt16: TF_LITE_ENSURE_STATUS(EvalImpl<int16_t>(context, input, node)); break; case kTfLiteInt32: TF_LITE_ENSURE_STATUS(EvalImpl<int32_t>(context, input, node)); break; case kTfLiteInt64: TF_LITE_ENSURE_STATUS(EvalImpl<int64_t>(context, input, node)); break; case kTfLiteFloat32: TF_LITE_ENSURE_STATUS(EvalImpl<float>(context, input, node)); break; case kTfLiteUInt8: TF_LITE_ENSURE_STATUS(EvalImpl<uint8_t>(context, input, node)); break; default: context->ReportError(context, "Currently Unique doesn't support type: %s", TfLiteTypeGetName(input->type)); return kTfLiteError; } return kTfLiteOk; } ; Sample JSON Format Confermily Expected output: "examples" => "cwe": "xxx"; "message": "xxx" [/INST] 'cwe': ['CWE-125', 'CWE-787'], 'message': '[tflite]: Insert `nullptr` checks when obtaining tensors. As part of ongoing refactoring, `tflite::GetInput`, `tflite::GetOutput`, `tflite::GetTemporary` and `tflite::GetIntermediates` will return `nullptr` in some cases. Hence, we insert the `nullptr` checks on all usages. We also insert `nullptr` checks on usages of `tflite::GetVariableInput` and `tflite::GetOptionalInputTensor` but only in the cases where there is no obvious check that `nullptr` is acceptable (that is, we only insert the check for the output of these two functions if the tensor is accessed as if it is always not `nullptr`). PiperOrigin-RevId: 332521299 Change-Id: I29af455bcb48d0b92e58132d951a3badbd772d56'</s>
<s>[INST] Given the following code containing errors, provide Common Weakness Enumeration (CWE) identifiers and corresponding messages in JSON format. Each identified weakness should have a unique entry in the JSON array. Code: void EvalHybrid(TfLiteContext* context, TfLiteNode* node, TfLiteConvParams* params, OpData* data, const TfLiteTensor* input, const TfLiteTensor* filter, const TfLiteTensor* bias, TfLiteTensor* im2col, TfLiteTensor* accum_scratch, TfLiteTensor* output) { float output_activation_min, output_activation_max; CalculateActivationRange(params->activation, &output_activation_min, &output_activation_max); const int input_size = NumElements(input) / SizeOfDimension(input, 0); const int batch_size = SizeOfDimension(input, 0); const float* input_ptr = GetTensorData<float>(input); int8_t* quantized_input_ptr_batch = GetTensorData<int8_t>( GetTemporary(context, node, data->input_quantized_index)); float* scaling_factors_ptr = GetTensorData<float>( GetTemporary(context, node, data->scaling_factors_index)); // Per-batch input quantization for higher accuracy. { ruy::profiler::ScopeLabel label("ConvHybridQuantizeInputs"); for (int b = 0; b < batch_size; ++b) { float unused_min, unused_max; const int offset = b * input_size; tensor_utils::SymmetricQuantizeFloats( input_ptr + offset, input_size, quantized_input_ptr_batch + offset, &unused_min, &unused_max, &scaling_factors_ptr[b]); scaling_factors_ptr[b] *= filter->params.scale; } } switch (kernel_type) { case kReference: case kGenericOptimized: case kMultithreadOptimized: case kCblasOptimized: { // There is only one implementation for hybrid kernel. ConvParams op_params; op_params.padding_type = PaddingType::kSame; op_params.padding_values.width = data->padding.width; op_params.padding_values.height = data->padding.height; op_params.stride_width = params->stride_width; op_params.stride_height = params->stride_height; op_params.dilation_width_factor = 1; op_params.dilation_height_factor = 1; op_params.float_activation_min = output_activation_min; op_params.float_activation_max = output_activation_max; optimized_ops::HybridConv( op_params, scaling_factors_ptr, GetTensorShape(input), quantized_input_ptr_batch, GetTensorShape(filter), GetTensorData<int8_t>(filter), GetTensorShape(bias), GetTensorData<float>(bias), GetTensorShape(accum_scratch), GetTensorData<int32_t>(accum_scratch), GetTensorShape(output), GetTensorData<float>(output), GetTensorShape(im2col), GetTensorData<int8_t>(im2col), CpuBackendContext::GetFromContext(context)); break; } } } ; Sample JSON Format Confermily Expected output: "examples" => "cwe": "xxx"; "message": "xxx" [/INST] 'cwe': ['CWE-125', 'CWE-787'], 'message': '[tflite]: Insert `nullptr` checks when obtaining tensors. As part of ongoing refactoring, `tflite::GetInput`, `tflite::GetOutput`, `tflite::GetTemporary` and `tflite::GetIntermediates` will return `nullptr` in some cases. Hence, we insert the `nullptr` checks on all usages. We also insert `nullptr` checks on usages of `tflite::GetVariableInput` and `tflite::GetOptionalInputTensor` but only in the cases where there is no obvious check that `nullptr` is acceptable (that is, we only insert the check for the output of these two functions if the tensor is accessed as if it is always not `nullptr`). PiperOrigin-RevId: 332521299 Change-Id: I29af455bcb48d0b92e58132d951a3badbd772d56'</s>
<s>[INST] Given the following code containing errors, provide Common Weakness Enumeration (CWE) identifiers and corresponding messages in JSON format. Each identified weakness should have a unique entry in the JSON array. Code: TfLiteStatus Eval(TfLiteContext* context, TfLiteNode* node) { const TfLiteTensor* input = GetInput(context, node, 0); switch (input->type) { case kTfLiteFloat32: return EvalImpl<kernel_type, kTfLiteFloat32>(context, node); case kTfLiteUInt8: return EvalImpl<kernel_type, kTfLiteUInt8>(context, node); case kTfLiteInt8: return EvalImpl<kernel_type, kTfLiteInt8>(context, node); case kTfLiteInt16: return EvalImpl<kernel_type, kTfLiteInt16>(context, node); default: TF_LITE_KERNEL_LOG(context, "Type %s not currently supported.", TfLiteTypeGetName(input->type)); return kTfLiteError; } } ; Sample JSON Format Confermily Expected output: "examples" => "cwe": "xxx"; "message": "xxx" [/INST] 'cwe': ['CWE-125', 'CWE-787'], 'message': '[tflite]: Insert `nullptr` checks when obtaining tensors. As part of ongoing refactoring, `tflite::GetInput`, `tflite::GetOutput`, `tflite::GetTemporary` and `tflite::GetIntermediates` will return `nullptr` in some cases. Hence, we insert the `nullptr` checks on all usages. We also insert `nullptr` checks on usages of `tflite::GetVariableInput` and `tflite::GetOptionalInputTensor` but only in the cases where there is no obvious check that `nullptr` is acceptable (that is, we only insert the check for the output of these two functions if the tensor is accessed as if it is always not `nullptr`). PiperOrigin-RevId: 332521299 Change-Id: I29af455bcb48d0b92e58132d951a3badbd772d56'</s>
<s>[INST] Given the following code containing errors, provide Common Weakness Enumeration (CWE) identifiers and corresponding messages in JSON format. Each identified weakness should have a unique entry in the JSON array. Code: TfLiteStatus Eval(TfLiteContext* context, TfLiteNode* node) { const TfLiteTensor* data = GetInput(context, node, kInputDataTensor); const TfLiteTensor* segment_ids = GetInput(context, node, kInputSegmentIdsTensor); TfLiteTensor* output = GetOutput(context, node, kOutputTensor); if (IsDynamicTensor(output)) { TF_LITE_ENSURE_OK(context, ResizeOutputTensor(context, data, segment_ids, output)); } #define TF_LITE_SEGMENT_SUM(dtype) \ reference_ops::SegmentSum<dtype>( \ GetTensorShape(data), GetTensorData<dtype>(data), \ GetTensorShape(segment_ids), GetTensorData<int32_t>(segment_ids), \ GetTensorShape(output), GetTensorData<dtype>(output)); switch (data->type) { case kTfLiteInt32: TF_LITE_SEGMENT_SUM(int32_t); break; case kTfLiteFloat32: TF_LITE_SEGMENT_SUM(float); break; default: context->ReportError(context, "Currently SegmentSum doesn't support type: %s", TfLiteTypeGetName(data->type)); return kTfLiteError; } #undef TF_LITE_SEGMENT_SUM return kTfLiteOk; } ; Sample JSON Format Confermily Expected output: "examples" => "cwe": "xxx"; "message": "xxx" [/INST] 'cwe': ['CWE-125', 'CWE-787'], 'message': '[tflite]: Insert `nullptr` checks when obtaining tensors. As part of ongoing refactoring, `tflite::GetInput`, `tflite::GetOutput`, `tflite::GetTemporary` and `tflite::GetIntermediates` will return `nullptr` in some cases. Hence, we insert the `nullptr` checks on all usages. We also insert `nullptr` checks on usages of `tflite::GetVariableInput` and `tflite::GetOptionalInputTensor` but only in the cases where there is no obvious check that `nullptr` is acceptable (that is, we only insert the check for the output of these two functions if the tensor is accessed as if it is always not `nullptr`). PiperOrigin-RevId: 332521299 Change-Id: I29af455bcb48d0b92e58132d951a3badbd772d56'</s>
<s>[INST] Given the following code containing errors, provide Common Weakness Enumeration (CWE) identifiers and corresponding messages in JSON format. Each identified weakness should have a unique entry in the JSON array. Code: TfLiteStatus Prepare(TfLiteContext* context, TfLiteNode* node) { auto* params = reinterpret_cast<TfLiteDepthwiseConvParams*>(node->builtin_data); OpData* data = reinterpret_cast<OpData*>(node->user_data); // TODO(ahentz): use could use GetOptionalInputTensor() here, but we need to // decide whether we are OK with optional tensors being completely absent, as // opposed to having -1 as their index. bool hasBias = NumInputs(node) == 3; TF_LITE_ENSURE(context, hasBias || NumInputs(node) == 2); const TfLiteTensor* input = GetInput(context, node, kInputTensor); const TfLiteTensor* filter = GetInput(context, node, kFilterTensor); const TfLiteTensor* bias = nullptr; TF_LITE_ENSURE_EQ(context, NumOutputs(node), 1); TfLiteTensor* output = GetOutput(context, node, kOutputTensor); TF_LITE_ENSURE_EQ(context, NumDimensions(input), 4); TF_LITE_ENSURE_EQ(context, NumDimensions(filter), 4); const TfLiteType data_type = input->type; const TfLiteType filter_type = filter->type; const bool is_hybrid = data_type == kTfLiteFloat32 && filter_type == kTfLiteInt8; TF_LITE_ENSURE(context, data_type == kTfLiteFloat32 || data_type == kTfLiteUInt8 || data_type == kTfLiteInt8 || data_type == kTfLiteInt16); TF_LITE_ENSURE_TYPES_EQ(context, output->type, data_type); if (!is_hybrid) { TF_LITE_ENSURE(context, filter->type == data_type || data_type == kTfLiteInt16); } // Filter in DepthwiseConv is expected to be [1, H, W, O]. TF_LITE_ENSURE_EQ(context, SizeOfDimension(filter, 0), 1); if (hasBias) { bias = GetInput(context, node, kBiasTensor); if (data_type == kTfLiteUInt8 || data_type == kTfLiteInt8) { TF_LITE_ENSURE_TYPES_EQ(context, bias->type, kTfLiteInt32); TF_LITE_ENSURE_EQ(context, bias->params.zero_point, 0); } else if (data_type == kTfLiteInt16) { TF_LITE_ENSURE_TYPES_EQ(context, bias->type, kTfLiteInt64); TF_LITE_ENSURE_EQ(context, bias->params.zero_point, 0); TF_LITE_ENSURE_EQ(context, input->params.zero_point, 0); TF_LITE_ENSURE_EQ(context, output->params.zero_point, 0); } else { TF_LITE_ENSURE_TYPES_EQ(context, bias->type, data_type); } TF_LITE_ENSURE_EQ(context, NumDimensions(bias), 1); TF_LITE_ENSURE_EQ(context, SizeOfDimension(filter, 3), SizeOfDimension(bias, 0)); } int channels_out = SizeOfDimension(filter, 3); int width = SizeOfDimension(input, 2); int height = SizeOfDimension(input, 1); int filter_width = SizeOfDimension(filter, 2); int filter_height = SizeOfDimension(filter, 1); int batches = SizeOfDimension(input, 0); // Matching GetWindowedOutputSize in TensorFlow. auto padding = params->padding; int out_width, out_height; data->padding = ComputePaddingHeightWidth( params->stride_height, params->stride_width, params->dilation_height_factor, params->dilation_width_factor, height, width, filter_height, filter_width, padding, &out_height, &out_width); // Note that quantized inference requires that all tensors have their // parameters set. This is usually done during quantized training or // calibration. if (data_type != kTfLiteFloat32) { TF_LITE_ENSURE_EQ(context, filter->quantization.type, kTfLiteAffineQuantization); const auto* affine_quantization = reinterpret_cast<TfLiteAffineQuantization*>( filter->quantization.params); TF_LITE_ENSURE(context, affine_quantization); TF_LITE_ENSURE(context, affine_quantization->scale); TF_LITE_ENSURE(context, (affine_quantization->scale->size == 1 || affine_quantization->scale->size == channels_out)); data->per_channel_output_multiplier.resize(channels_out); data->per_channel_output_shift.resize(channels_out); TF_LITE_ENSURE_STATUS(tflite::PopulateConvolutionQuantizationParams( context, input, filter, bias, output, params->activation, &data->output_multiplier, &data->output_shift, &data->output_activation_min, &data->output_activation_max, data->per_channel_output_multiplier.data(), data->per_channel_output_shift.data(), channels_out)); } if (is_hybrid) { const auto* affine_quantization = reinterpret_cast<TfLiteAffineQuantization*>( filter->quantization.params); TF_LITE_ENSURE(context, affine_quantization); TF_LITE_ENSURE(context, affine_quantization->scale); TF_LITE_ENSURE_EQ( context, affine_quantization->scale->size, filter->dims->data[affine_quantization->quantized_dimension]); int temporaries_count = 0; data->input_quantized_index = temporaries_count; if (data->input_quantized_id == kTensorNotAllocated) { TF_LITE_ENSURE_OK( context, context->AddTensors(context, 1, &data->input_quantized_id)); } ++temporaries_count; data->scaling_factors_index = temporaries_count; if (data->scaling_factors_id == kTensorNotAllocated) { TF_LITE_ENSURE_OK( context, context->AddTensors(context, 1, &data->scaling_factors_id)); } ++temporaries_count; data->input_offset_index = temporaries_count; if (data->input_offset_id == kTensorNotAllocated) { TF_LITE_ENSURE_OK( context, context->AddTensors(context, 1, &data->input_offset_id)); } ++temporaries_count; TfLiteIntArrayFree(node->temporaries); node->temporaries = TfLiteIntArrayCreate(temporaries_count); node->temporaries->data[data->input_quantized_index] = data->input_quantized_id; TfLiteTensor* input_quantized = GetTemporary(context, node, data->input_quantized_index); input_quantized->type = kTfLiteInt8; input_quantized->allocation_type = kTfLiteArenaRw; if (!TfLiteIntArrayEqual(input_quantized->dims, input->dims)) { TfLiteIntArray* input_quantized_size = TfLiteIntArrayCopy(input->dims); TF_LITE_ENSURE_OK(context, context->ResizeTensor(context, input_quantized, input_quantized_size)); } node->temporaries->data[data->scaling_factors_index] = data->scaling_factors_id; TfLiteTensor* scaling_factors = GetTemporary(context, node, data->scaling_factors_index); scaling_factors->type = kTfLiteFloat32; scaling_factors->allocation_type = kTfLiteArenaRw; const int batch_size = SizeOfDimension(input, 0); int scaling_dims[1] = {batch_size}; if (!TfLiteIntArrayEqualsArray(scaling_factors->dims, 1, scaling_dims)) { TfLiteIntArray* scaling_factors_size = TfLiteIntArrayCreate(1); scaling_factors_size->data[0] = batch_size; TF_LITE_ENSURE_OK(context, context->ResizeTensor(context, scaling_factors, scaling_factors_size)); } node->temporaries->data[data->input_offset_index] = data->input_offset_id; TfLiteTensor* input_offsets = GetTemporary(context, node, data->input_offset_index); input_offsets->type = kTfLiteInt32; input_offsets->allocation_type = kTfLiteArenaRw; if (!TfLiteIntArrayEqualsArray(input_offsets->dims, 1, scaling_dims)) { TfLiteIntArray* input_offsets_size = TfLiteIntArrayCreate(1); input_offsets_size->data[0] = batch_size; TF_LITE_ENSURE_OK(context, context->ResizeTensor(context, input_offsets, input_offsets_size)); } } TfLiteIntArray* outputSize = TfLiteIntArrayCreate(4); outputSize->data[0] = batches; outputSize->data[1] = out_height; outputSize->data[2] = out_width; outputSize->data[3] = channels_out; return context->ResizeTensor(context, output, outputSize); } ; Sample JSON Format Confermily Expected output: "examples" => "cwe": "xxx"; "message": "xxx" [/INST] 'cwe': ['CWE-125', 'CWE-787'], 'message': '[tflite]: Insert `nullptr` checks when obtaining tensors. As part of ongoing refactoring, `tflite::GetInput`, `tflite::GetOutput`, `tflite::GetTemporary` and `tflite::GetIntermediates` will return `nullptr` in some cases. Hence, we insert the `nullptr` checks on all usages. We also insert `nullptr` checks on usages of `tflite::GetVariableInput` and `tflite::GetOptionalInputTensor` but only in the cases where there is no obvious check that `nullptr` is acceptable (that is, we only insert the check for the output of these two functions if the tensor is accessed as if it is always not `nullptr`). PiperOrigin-RevId: 332521299 Change-Id: I29af455bcb48d0b92e58132d951a3badbd772d56'</s>
<s>[INST] Given the following code containing errors, provide Common Weakness Enumeration (CWE) identifiers and corresponding messages in JSON format. Each identified weakness should have a unique entry in the JSON array. Code: TfLiteStatus GenericPrepare(TfLiteContext* context, TfLiteNode* node) { TF_LITE_ENSURE_EQ(context, NumInputs(node), 1); TF_LITE_ENSURE_EQ(context, NumOutputs(node), 1); const TfLiteTensor* input = GetInput(context, node, 0); TfLiteTensor* output = GetOutput(context, node, 0); TF_LITE_ENSURE_TYPES_EQ(context, input->type, output->type); if (!is_supported_type(input->type)) { TF_LITE_UNSUPPORTED_TYPE(context, input->type, op_name); } return context->ResizeTensor(context, output, TfLiteIntArrayCopy(input->dims)); } ; Sample JSON Format Confermily Expected output: "examples" => "cwe": "xxx"; "message": "xxx" [/INST] 'cwe': ['CWE-125', 'CWE-787'], 'message': '[tflite]: Insert `nullptr` checks when obtaining tensors. As part of ongoing refactoring, `tflite::GetInput`, `tflite::GetOutput`, `tflite::GetTemporary` and `tflite::GetIntermediates` will return `nullptr` in some cases. Hence, we insert the `nullptr` checks on all usages. We also insert `nullptr` checks on usages of `tflite::GetVariableInput` and `tflite::GetOptionalInputTensor` but only in the cases where there is no obvious check that `nullptr` is acceptable (that is, we only insert the check for the output of these two functions if the tensor is accessed as if it is always not `nullptr`). PiperOrigin-RevId: 332521299 Change-Id: I29af455bcb48d0b92e58132d951a3badbd772d56'</s>
<s>[INST] Given the following code containing errors, provide Common Weakness Enumeration (CWE) identifiers and corresponding messages in JSON format. Each identified weakness should have a unique entry in the JSON array. Code: TfLiteStatus TanhEval(TfLiteContext* context, TfLiteNode* node) { OpData* data = reinterpret_cast<OpData*>(node->user_data); const TfLiteTensor* input = GetInput(context, node, 0); TfLiteTensor* output = GetOutput(context, node, 0); switch (input->type) { case kTfLiteFloat32: { if (kernel_type == kReference) { reference_ops::Tanh(GetTensorShape(input), GetTensorData<float>(input), GetTensorShape(output), GetTensorData<float>(output)); } else { optimized_ops::Tanh(GetTensorShape(input), GetTensorData<float>(input), GetTensorShape(output), GetTensorData<float>(output)); } return kTfLiteOk; } break; case kTfLiteInt16: { TanhParams params; params.input_left_shift = data->input_left_shift; if (kernel_type == kReference || (data->input_multiplier > 0)) { reference_integer_ops::Tanh( data->input_multiplier, data->input_left_shift, GetTensorShape(input), GetTensorData<int16_t>(input), GetTensorShape(output), GetTensorData<int16_t>(output)); } else { optimized_ops::Tanh( params, GetTensorShape(input), GetTensorData<int16_t>(input), GetTensorShape(output), GetTensorData<int16_t>(output)); } return kTfLiteOk; } break; case kTfLiteUInt8: { if (kernel_type == kFixedPointOptimized) { TanhParams params; params.input_zero_point = input->params.zero_point; params.input_range_radius = data->input_range_radius; params.input_multiplier = data->input_multiplier; params.input_left_shift = data->input_left_shift; optimized_ops::Tanh16bitPrecision( params, GetTensorShape(input), GetTensorData<uint8_t>(input), GetTensorShape(output), GetTensorData<uint8_t>(output)); } else { EvalUsingLookupTable(data, input, output); } return kTfLiteOk; } break; case kTfLiteInt8: { if (kernel_type == kFixedPointOptimized) { TanhParams params; params.input_zero_point = input->params.zero_point; params.input_range_radius = data->input_range_radius; params.input_multiplier = data->input_multiplier; params.input_left_shift = data->input_left_shift; optimized_ops::Tanh16bitPrecision( params, GetTensorShape(input), GetTensorData<int8_t>(input), GetTensorShape(output), GetTensorData<int8_t>(output)); } else { EvalUsingLookupTable(data, input, output); } return kTfLiteOk; } break; default: TF_LITE_KERNEL_LOG(context, "Only float32, uint8, int16 and int8 are supported " "currently, got %s.", TfLiteTypeGetName(input->type)); return kTfLiteError; } } ; Sample JSON Format Confermily Expected output: "examples" => "cwe": "xxx"; "message": "xxx" [/INST] 'cwe': ['CWE-125', 'CWE-787'], 'message': '[tflite]: Insert `nullptr` checks when obtaining tensors. As part of ongoing refactoring, `tflite::GetInput`, `tflite::GetOutput`, `tflite::GetTemporary` and `tflite::GetIntermediates` will return `nullptr` in some cases. Hence, we insert the `nullptr` checks on all usages. We also insert `nullptr` checks on usages of `tflite::GetVariableInput` and `tflite::GetOptionalInputTensor` but only in the cases where there is no obvious check that `nullptr` is acceptable (that is, we only insert the check for the output of these two functions if the tensor is accessed as if it is always not `nullptr`). PiperOrigin-RevId: 332521299 Change-Id: I29af455bcb48d0b92e58132d951a3badbd772d56'</s>
<s>[INST] Given the following code containing errors, provide Common Weakness Enumeration (CWE) identifiers and corresponding messages in JSON format. Each identified weakness should have a unique entry in the JSON array. Code: TfLiteStatus Prepare(TfLiteContext* context, TfLiteNode* node) { TF_LITE_ENSURE_EQ(context, NumInputs(node), 2); TF_LITE_ENSURE_EQ(context, NumOutputs(node), 1); const TfLiteTensor* params = GetInput(context, node, kParams); const TfLiteTensor* indices = GetInput(context, node, kIndices); TfLiteTensor* output = GetOutput(context, node, kOutputTensor); switch (params->type) { case kTfLiteFloat32: case kTfLiteUInt8: case kTfLiteInt8: case kTfLiteInt64: case kTfLiteInt32: case kTfLiteString: break; default: context->ReportError( context, "Params of type '%s' are not supported by gather_nd.", TfLiteTypeGetName(params->type)); return kTfLiteError; } switch (indices->type) { case kTfLiteInt64: case kTfLiteInt32: break; default: context->ReportError( context, "Indices of type '%s' are not supported by gather_nd.", TfLiteTypeGetName(indices->type)); return kTfLiteError; } const int params_rank = NumDimensions(params); const int indices_rank = NumDimensions(indices); const int indices_nd = SizeOfDimension(indices, indices_rank - 1); if (params_rank < 1) { context->ReportError(context, "Params must be at least a vector."); return kTfLiteError; } if (indices_rank < 1) { context->ReportError(context, "Indices must be at least a vector."); return kTfLiteError; } if (indices_nd > params_rank) { context->ReportError( context, "Index innermost dimension length must be <= params rank."); return kTfLiteError; } // Assign to output the input type. output->type = params->type; // The result shape is // indices.shape[:-1] + params.shape[indices.shape[-1]:] const int output_rank = indices_rank + params_rank - indices_nd - 1; TfLiteIntArray* output_shape = TfLiteIntArrayCreate(output_rank); int output_index = 0; for (int i = 0; i < indices_rank - 1; ++i) { output_shape->data[output_index++] = indices->dims->data[i]; } for (int i = indices_nd; i < params_rank; ++i) { output_shape->data[output_index++] = params->dims->data[i]; } return context->ResizeTensor(context, output, output_shape); } ; Sample JSON Format Confermily Expected output: "examples" => "cwe": "xxx"; "message": "xxx" [/INST] 'cwe': ['CWE-125', 'CWE-787'], 'message': '[tflite]: Insert `nullptr` checks when obtaining tensors. As part of ongoing refactoring, `tflite::GetInput`, `tflite::GetOutput`, `tflite::GetTemporary` and `tflite::GetIntermediates` will return `nullptr` in some cases. Hence, we insert the `nullptr` checks on all usages. We also insert `nullptr` checks on usages of `tflite::GetVariableInput` and `tflite::GetOptionalInputTensor` but only in the cases where there is no obvious check that `nullptr` is acceptable (that is, we only insert the check for the output of these two functions if the tensor is accessed as if it is always not `nullptr`). PiperOrigin-RevId: 332521299 Change-Id: I29af455bcb48d0b92e58132d951a3badbd772d56'</s>
<s>[INST] Given the following code containing errors, provide Common Weakness Enumeration (CWE) identifiers and corresponding messages in JSON format. Each identified weakness should have a unique entry in the JSON array. Code: TfLiteStatus ResizeOutputTensors(TfLiteContext* context, TfLiteNode* node, const TfLiteTensor* axis, const TfLiteTensor* input, int num_splits) { int axis_value = GetTensorData<int>(axis)[0]; if (axis_value < 0) { axis_value += NumDimensions(input); } TF_LITE_ENSURE(context, axis_value >= 0); TF_LITE_ENSURE(context, axis_value < NumDimensions(input)); const int input_size = SizeOfDimension(input, axis_value); TF_LITE_ENSURE_MSG(context, input_size % num_splits == 0, "Not an even split"); const int slice_size = input_size / num_splits; for (int i = 0; i < NumOutputs(node); ++i) { TfLiteIntArray* output_dims = TfLiteIntArrayCopy(input->dims); output_dims->data[axis_value] = slice_size; TfLiteTensor* output = GetOutput(context, node, i); TF_LITE_ENSURE_STATUS(context->ResizeTensor(context, output, output_dims)); } return kTfLiteOk; } ; Sample JSON Format Confermily Expected output: "examples" => "cwe": "xxx"; "message": "xxx" [/INST] 'cwe': ['CWE-125', 'CWE-787'], 'message': '[tflite]: Insert `nullptr` checks when obtaining tensors. As part of ongoing refactoring, `tflite::GetInput`, `tflite::GetOutput`, `tflite::GetTemporary` and `tflite::GetIntermediates` will return `nullptr` in some cases. Hence, we insert the `nullptr` checks on all usages. We also insert `nullptr` checks on usages of `tflite::GetVariableInput` and `tflite::GetOptionalInputTensor` but only in the cases where there is no obvious check that `nullptr` is acceptable (that is, we only insert the check for the output of these two functions if the tensor is accessed as if it is always not `nullptr`). PiperOrigin-RevId: 332521299 Change-Id: I29af455bcb48d0b92e58132d951a3badbd772d56'</s>
<s>[INST] Given the following code containing errors, provide Common Weakness Enumeration (CWE) identifiers and corresponding messages in JSON format. Each identified weakness should have a unique entry in the JSON array. Code: TfLiteStatus L2Eval(TfLiteContext* context, TfLiteNode* node) { auto* params = reinterpret_cast<TfLitePoolParams*>(node->builtin_data); OpData* data = reinterpret_cast<OpData*>(node->user_data); TfLiteTensor* output = GetOutput(context, node, 0); const TfLiteTensor* input = GetInput(context, node, 0); switch (input->type) { // Already know in/out types are same. case kTfLiteFloat32: L2EvalFloat<kernel_type>(context, node, params, data, input, output); break; case kTfLiteUInt8: // We don't have a quantized implementation, so just fall through to the // 'default' case. default: context->ReportError(context, "Type %d not currently supported.", input->type); return kTfLiteError; } return kTfLiteOk; } ; Sample JSON Format Confermily Expected output: "examples" => "cwe": "xxx"; "message": "xxx" [/INST] 'cwe': ['CWE-125', 'CWE-787'], 'message': '[tflite]: Insert `nullptr` checks when obtaining tensors. As part of ongoing refactoring, `tflite::GetInput`, `tflite::GetOutput`, `tflite::GetTemporary` and `tflite::GetIntermediates` will return `nullptr` in some cases. Hence, we insert the `nullptr` checks on all usages. We also insert `nullptr` checks on usages of `tflite::GetVariableInput` and `tflite::GetOptionalInputTensor` but only in the cases where there is no obvious check that `nullptr` is acceptable (that is, we only insert the check for the output of these two functions if the tensor is accessed as if it is always not `nullptr`). PiperOrigin-RevId: 332521299 Change-Id: I29af455bcb48d0b92e58132d951a3badbd772d56'</s>
<s>[INST] Given the following code containing errors, provide Common Weakness Enumeration (CWE) identifiers and corresponding messages in JSON format. Each identified weakness should have a unique entry in the JSON array. Code: TfLiteStatus Eval(TfLiteContext* context, TfLiteNode* node) { TfLiteTensor* output_values = GetOutput(context, node, kOutputValues); TfLiteTensor* output_indexes = GetOutput(context, node, kOutputIndexes); if (IsDynamicTensor(output_values)) { TF_LITE_ENSURE_OK(context, ResizeOutput(context, node)); } const TfLiteTensor* top_k = GetInput(context, node, kInputTopK); const int32 k = top_k->data.i32[0]; // The tensor can have more than 2 dimensions or even be a vector, the code // anyway calls the internal dimension as row; const TfLiteTensor* input = GetInput(context, node, kInputTensor); const int32 row_size = input->dims->data[input->dims->size - 1]; int32 num_rows = 1; for (int i = 0; i < input->dims->size - 1; ++i) { num_rows *= input->dims->data[i]; } switch (output_values->type) { case kTfLiteFloat32: TopK(row_size, num_rows, GetTensorData<float>(input), k, output_indexes->data.i32, GetTensorData<float>(output_values)); break; case kTfLiteUInt8: TopK(row_size, num_rows, input->data.uint8, k, output_indexes->data.i32, output_values->data.uint8); break; case kTfLiteInt8: TopK(row_size, num_rows, input->data.int8, k, output_indexes->data.i32, output_values->data.int8); break; case kTfLiteInt32: TopK(row_size, num_rows, input->data.i32, k, output_indexes->data.i32, output_values->data.i32); break; case kTfLiteInt64: TopK(row_size, num_rows, input->data.i64, k, output_indexes->data.i32, output_values->data.i64); break; default: TF_LITE_KERNEL_LOG(context, "Type %s is currently not supported by TopK.", TfLiteTypeGetName(output_values->type)); return kTfLiteError; } return kTfLiteOk; } ; Sample JSON Format Confermily Expected output: "examples" => "cwe": "xxx"; "message": "xxx" [/INST] 'cwe': ['CWE-125', 'CWE-787'], 'message': '[tflite]: Insert `nullptr` checks when obtaining tensors. As part of ongoing refactoring, `tflite::GetInput`, `tflite::GetOutput`, `tflite::GetTemporary` and `tflite::GetIntermediates` will return `nullptr` in some cases. Hence, we insert the `nullptr` checks on all usages. We also insert `nullptr` checks on usages of `tflite::GetVariableInput` and `tflite::GetOptionalInputTensor` but only in the cases where there is no obvious check that `nullptr` is acceptable (that is, we only insert the check for the output of these two functions if the tensor is accessed as if it is always not `nullptr`). PiperOrigin-RevId: 332521299 Change-Id: I29af455bcb48d0b92e58132d951a3badbd772d56'</s>
<s>[INST] Given the following code containing errors, provide Common Weakness Enumeration (CWE) identifiers and corresponding messages in JSON format. Each identified weakness should have a unique entry in the JSON array. Code: TfLiteStatus Eval(TfLiteContext* context, TfLiteNode* node) { const TfLiteTensor* output = GetOutput(context, node, kOutputTensor); if (output->type == kTfLiteFloat32) { EvalAddN<float>(context, node); } else if (output->type == kTfLiteInt32) { EvalAddN<int32_t>(context, node); } else { context->ReportError(context, "AddN only supports FLOAT32|INT32 now, got %s.", TfLiteTypeGetName(output->type)); return kTfLiteError; } return kTfLiteOk; } ; Sample JSON Format Confermily Expected output: "examples" => "cwe": "xxx"; "message": "xxx" [/INST] 'cwe': ['CWE-125', 'CWE-787'], 'message': '[tflite]: Insert `nullptr` checks when obtaining tensors. As part of ongoing refactoring, `tflite::GetInput`, `tflite::GetOutput`, `tflite::GetTemporary` and `tflite::GetIntermediates` will return `nullptr` in some cases. Hence, we insert the `nullptr` checks on all usages. We also insert `nullptr` checks on usages of `tflite::GetVariableInput` and `tflite::GetOptionalInputTensor` but only in the cases where there is no obvious check that `nullptr` is acceptable (that is, we only insert the check for the output of these two functions if the tensor is accessed as if it is always not `nullptr`). PiperOrigin-RevId: 332521299 Change-Id: I29af455bcb48d0b92e58132d951a3badbd772d56'</s>
<s>[INST] Given the following code containing errors, provide Common Weakness Enumeration (CWE) identifiers and corresponding messages in JSON format. Each identified weakness should have a unique entry in the JSON array. Code: TfLiteStatus ResizeOutput(TfLiteContext* context, TfLiteNode* node) { TfLiteIntArray* output_shape = GetOutputShape(context, node); std::unique_ptr<TfLiteIntArray, void (*)(TfLiteIntArray*)> scoped_output_shape(output_shape, TfLiteIntArrayFree); const TfLiteTensor* input = GetInput(context, node, kInputTensor); TfLiteTensor* output = GetOutput(context, node, kOutputTensor); // Tensorflow's Reshape allows one of the shape components to have the // special -1 value, meaning it will be calculated automatically based on the // input. Here we calculate what that dimension should be so that the number // of output elements in the same as the number of input elements. int num_input_elements = NumElements(input); int num_output_elements = 1; int stretch_dim = -1; for (int i = 0; i < output_shape->size; ++i) { int value = output_shape->data[i]; if (value == -1) { TF_LITE_ENSURE_EQ(context, stretch_dim, -1); stretch_dim = i; } else { num_output_elements *= value; } } if (stretch_dim != -1) { output_shape->data[stretch_dim] = num_input_elements / num_output_elements; num_output_elements *= output_shape->data[stretch_dim]; } TF_LITE_ENSURE_EQ(context, num_input_elements, num_output_elements); return context->ResizeTensor(context, output, scoped_output_shape.release()); } ; Sample JSON Format Confermily Expected output: "examples" => "cwe": "xxx"; "message": "xxx" [/INST] 'cwe': ['CWE-125', 'CWE-787'], 'message': '[tflite]: Insert `nullptr` checks when obtaining tensors. As part of ongoing refactoring, `tflite::GetInput`, `tflite::GetOutput`, `tflite::GetTemporary` and `tflite::GetIntermediates` will return `nullptr` in some cases. Hence, we insert the `nullptr` checks on all usages. We also insert `nullptr` checks on usages of `tflite::GetVariableInput` and `tflite::GetOptionalInputTensor` but only in the cases where there is no obvious check that `nullptr` is acceptable (that is, we only insert the check for the output of these two functions if the tensor is accessed as if it is always not `nullptr`). PiperOrigin-RevId: 332521299 Change-Id: I29af455bcb48d0b92e58132d951a3badbd772d56'</s>
<s>[INST] Given the following code containing errors, provide Common Weakness Enumeration (CWE) identifiers and corresponding messages in JSON format. Each identified weakness should have a unique entry in the JSON array. Code: TfLiteStatus PrepareAny(TfLiteContext* context, TfLiteNode* node) { TF_LITE_ENSURE_EQ(context, NumInputs(node), 2); const TfLiteTensor* input = GetInput(context, node, 0); TF_LITE_ENSURE_TYPES_EQ(context, input->type, kTfLiteBool); return PrepareSimple(context, node); } ; Sample JSON Format Confermily Expected output: "examples" => "cwe": "xxx"; "message": "xxx" [/INST] 'cwe': ['CWE-125', 'CWE-787'], 'message': '[tflite]: Insert `nullptr` checks when obtaining tensors. As part of ongoing refactoring, `tflite::GetInput`, `tflite::GetOutput`, `tflite::GetTemporary` and `tflite::GetIntermediates` will return `nullptr` in some cases. Hence, we insert the `nullptr` checks on all usages. We also insert `nullptr` checks on usages of `tflite::GetVariableInput` and `tflite::GetOptionalInputTensor` but only in the cases where there is no obvious check that `nullptr` is acceptable (that is, we only insert the check for the output of these two functions if the tensor is accessed as if it is always not `nullptr`). PiperOrigin-RevId: 332521299 Change-Id: I29af455bcb48d0b92e58132d951a3badbd772d56'</s>
<s>[INST] Given the following code containing errors, provide Common Weakness Enumeration (CWE) identifiers and corresponding messages in JSON format. Each identified weakness should have a unique entry in the JSON array. Code: TfLiteStatus ResizeOutputandTemporaryTensors(TfLiteContext* context, TfLiteNode* node) { const TfLiteTensor* input = GetInput(context, node, kInputTensor); const int num_dims = NumDimensions(input); TF_LITE_ENSURE(context, num_dims >= 2); const TfLiteTensor* fft_length = GetInput(context, node, kFftLengthTensor); const int32_t* fft_length_data = GetTensorData<int32_t>(fft_length); // The lib, fft2d, can only handle fft_lengths of power of 2. TF_LITE_ENSURE(context, IsPowerOfTwo(fft_length_data[0])); TF_LITE_ENSURE(context, IsPowerOfTwo(fft_length_data[1])); int fft_height, fft_width; fft_height = fft_length_data[0]; fft_width = fft_length_data[1]; int fft_working_length = std::max(fft_height, fft_width / 2); int half_fft_working_length = fft_working_length / 2; // Resize output tensor. TfLiteTensor* output = GetOutput(context, node, kOutputTensor); TfLiteIntArray* output_shape = TfLiteIntArrayCopy(input->dims); output_shape->data[num_dims - 2] = fft_length_data[0]; output_shape->data[num_dims - 1] = fft_length_data[1] / 2 + 1; TF_LITE_ENSURE_STATUS(context->ResizeTensor(context, output, output_shape)); // Resize temporary tensors, fft_integer_working_area. TfLiteTensor* fft_integer_working_area = GetTemporary(context, node, kFftIntegerWorkingAreaTensor); TfLiteIntArray* fft_integer_working_area_shape = TfLiteIntArrayCreate(1); fft_integer_working_area_shape->data[0] = 2 + static_cast<int>(sqrt(fft_working_length)); TF_LITE_ENSURE_STATUS(context->ResizeTensor(context, fft_integer_working_area, fft_integer_working_area_shape)); // Resize temporary tensors, fft_double_working_area. TfLiteTensor* fft_double_working_area = GetTemporary(context, node, kFftDoubleWorkingAreaTensor); TfLiteIntArray* fft_double_working_area_shape = TfLiteIntArrayCreate(1); fft_double_working_area_shape->data[0] = half_fft_working_length + fft_width / 4; TF_LITE_ENSURE_STATUS(context->ResizeTensor(context, fft_double_working_area, fft_double_working_area_shape)); return kTfLiteOk; } ; Sample JSON Format Confermily Expected output: "examples" => "cwe": "xxx"; "message": "xxx" [/INST] 'cwe': ['CWE-125', 'CWE-787'], 'message': '[tflite]: Insert `nullptr` checks when obtaining tensors. As part of ongoing refactoring, `tflite::GetInput`, `tflite::GetOutput`, `tflite::GetTemporary` and `tflite::GetIntermediates` will return `nullptr` in some cases. Hence, we insert the `nullptr` checks on all usages. We also insert `nullptr` checks on usages of `tflite::GetVariableInput` and `tflite::GetOptionalInputTensor` but only in the cases where there is no obvious check that `nullptr` is acceptable (that is, we only insert the check for the output of these two functions if the tensor is accessed as if it is always not `nullptr`). PiperOrigin-RevId: 332521299 Change-Id: I29af455bcb48d0b92e58132d951a3badbd772d56'</s>
<s>[INST] Given the following code containing errors, provide Common Weakness Enumeration (CWE) identifiers and corresponding messages in JSON format. Each identified weakness should have a unique entry in the JSON array. Code: TfLiteStatus Prepare(TfLiteContext* context, TfLiteNode* node) { TF_LITE_ENSURE_EQ(context, NumInputs(node), 3); TF_LITE_ENSURE_EQ(context, NumOutputs(node), 1); const TfLiteTensor* input = GetInput(context, node, kInputTensor); const TfLiteTensor* begin = GetInput(context, node, kBeginTensor); const TfLiteTensor* size = GetInput(context, node, kSizeTensor); TfLiteTensor* output = GetOutput(context, node, kOutputTensor); // Ensure validity of input tensor and its dimension. TF_LITE_ENSURE_TYPES_EQ(context, input->type, output->type); TF_LITE_ENSURE(context, begin->type == kTfLiteInt32 || begin->type == kTfLiteInt64); TF_LITE_ENSURE(context, size->type == kTfLiteInt32 || size->type == kTfLiteInt64); TF_LITE_ENSURE_EQ(context, NumDimensions(begin), 1); TF_LITE_ENSURE_EQ(context, NumDimensions(size), 1); TF_LITE_ENSURE_EQ(context, NumElements(begin), NumElements(size)); TF_LITE_ENSURE_MSG(context, NumDimensions(input) <= kMaxDim, "Slice op only supports 1D-4D input arrays."); // Postpone allocation of output if any of the indexing tensors is not // constant if (!(IsConstantTensor(begin) && IsConstantTensor(size))) { SetTensorToDynamic(output); return kTfLiteOk; } return ResizeOutputShape(context, input, begin, size, output); } ; Sample JSON Format Confermily Expected output: "examples" => "cwe": "xxx"; "message": "xxx" [/INST] 'cwe': ['CWE-125', 'CWE-787'], 'message': '[tflite]: Insert `nullptr` checks when obtaining tensors. As part of ongoing refactoring, `tflite::GetInput`, `tflite::GetOutput`, `tflite::GetTemporary` and `tflite::GetIntermediates` will return `nullptr` in some cases. Hence, we insert the `nullptr` checks on all usages. We also insert `nullptr` checks on usages of `tflite::GetVariableInput` and `tflite::GetOptionalInputTensor` but only in the cases where there is no obvious check that `nullptr` is acceptable (that is, we only insert the check for the output of these two functions if the tensor is accessed as if it is always not `nullptr`). PiperOrigin-RevId: 332521299 Change-Id: I29af455bcb48d0b92e58132d951a3badbd772d56'</s>
<s>[INST] Given the following code containing errors, provide Common Weakness Enumeration (CWE) identifiers and corresponding messages in JSON format. Each identified weakness should have a unique entry in the JSON array. Code: TfLiteStatus Eval(TfLiteContext* context, TfLiteNode* node) { const TfLiteTensor* params = GetInput(context, node, kParams); const TfLiteTensor* indices = GetInput(context, node, kIndices); TfLiteTensor* output = GetOutput(context, node, kOutputTensor); switch (indices->type) { case kTfLiteInt32: return EvalGatherNd<int32_t>(context, params, indices, output); case kTfLiteInt64: return EvalGatherNd<int64_t>(context, params, indices, output); default: context->ReportError( context, "Indices of type '%s' are not supported by gather_nd.", TfLiteTypeGetName(indices->type)); return kTfLiteError; } } ; Sample JSON Format Confermily Expected output: "examples" => "cwe": "xxx"; "message": "xxx" [/INST] 'cwe': ['CWE-125', 'CWE-787'], 'message': '[tflite]: Insert `nullptr` checks when obtaining tensors. As part of ongoing refactoring, `tflite::GetInput`, `tflite::GetOutput`, `tflite::GetTemporary` and `tflite::GetIntermediates` will return `nullptr` in some cases. Hence, we insert the `nullptr` checks on all usages. We also insert `nullptr` checks on usages of `tflite::GetVariableInput` and `tflite::GetOptionalInputTensor` but only in the cases where there is no obvious check that `nullptr` is acceptable (that is, we only insert the check for the output of these two functions if the tensor is accessed as if it is always not `nullptr`). PiperOrigin-RevId: 332521299 Change-Id: I29af455bcb48d0b92e58132d951a3badbd772d56'</s>
<s>[INST] Given the following code containing errors, provide Common Weakness Enumeration (CWE) identifiers and corresponding messages in JSON format. Each identified weakness should have a unique entry in the JSON array. Code: TfLiteStatus Prepare(TfLiteContext* context, TfLiteNode* node) { TF_LITE_ENSURE_EQ(context, NumInputs(node), 2); TF_LITE_ENSURE_EQ(context, NumOutputs(node), 1); const TfLiteTensor* input = GetInput(context, node, kInputTensor); const TfLiteTensor* size = GetInput(context, node, kSizeTensor); TfLiteTensor* output = GetOutput(context, node, kOutputTensor); // TODO(ahentz): Our current implementations rely on the inputs being 4D. TF_LITE_ENSURE_EQ(context, NumDimensions(input), 4); TF_LITE_ENSURE_EQ(context, NumDimensions(size), 1); TF_LITE_ENSURE_EQ(context, size->type, kTfLiteInt32); // ResizeBilinear creates a float tensor even when the input is made of // integers. output->type = input->type; if (!IsConstantTensor(size)) { SetTensorToDynamic(output); return kTfLiteOk; } // Ensure params are valid. auto* params = reinterpret_cast<TfLiteResizeBilinearParams*>(node->builtin_data); if (params->half_pixel_centers && params->align_corners) { context->ReportError( context, "If half_pixel_centers is True, align_corners must be False."); return kTfLiteError; } return ResizeOutputTensor(context, input, size, output); } ; Sample JSON Format Confermily Expected output: "examples" => "cwe": "xxx"; "message": "xxx" [/INST] 'cwe': ['CWE-125', 'CWE-787'], 'message': '[tflite]: Insert `nullptr` checks when obtaining tensors. As part of ongoing refactoring, `tflite::GetInput`, `tflite::GetOutput`, `tflite::GetTemporary` and `tflite::GetIntermediates` will return `nullptr` in some cases. Hence, we insert the `nullptr` checks on all usages. We also insert `nullptr` checks on usages of `tflite::GetVariableInput` and `tflite::GetOptionalInputTensor` but only in the cases where there is no obvious check that `nullptr` is acceptable (that is, we only insert the check for the output of these two functions if the tensor is accessed as if it is always not `nullptr`). PiperOrigin-RevId: 332521299 Change-Id: I29af455bcb48d0b92e58132d951a3badbd772d56'</s>
<s>[INST] Given the following code containing errors, provide Common Weakness Enumeration (CWE) identifiers and corresponding messages in JSON format. Each identified weakness should have a unique entry in the JSON array. Code: TfLiteStatus Eval(TfLiteContext* context, TfLiteNode* node) { const TfLiteTensor* input = GetInput(context, node, kInputTensor); TfLiteTensor* output = GetOutput(context, node, kOutputTensor); const TfLiteTensor* multipliers = GetInput(context, node, kInputMultipliers); if (IsDynamicTensor(output)) { TF_LITE_ENSURE_OK(context, ResizeOutput(context, node)); } switch (output->type) { case kTfLiteFloat32: Tile<float>(*(input->dims), input, multipliers, output); break; case kTfLiteUInt8: Tile<uint8_t>(*(input->dims), input, multipliers, output); break; case kTfLiteInt32: Tile<int32_t>(*(input->dims), input, multipliers, output); break; case kTfLiteInt64: Tile<int64_t>(*(input->dims), input, multipliers, output); break; case kTfLiteString: { DynamicBuffer buffer; TileString(*(input->dims), input, multipliers, &buffer, output); buffer.WriteToTensor(output, /*new_shape=*/nullptr); break; } case kTfLiteBool: Tile<bool>(*(input->dims), input, multipliers, output); break; default: context->ReportError(context, "Type '%s' is not supported by tile.", TfLiteTypeGetName(output->type)); return kTfLiteError; } return kTfLiteOk; } ; Sample JSON Format Confermily Expected output: "examples" => "cwe": "xxx"; "message": "xxx" [/INST] 'cwe': ['CWE-125', 'CWE-787'], 'message': '[tflite]: Insert `nullptr` checks when obtaining tensors. As part of ongoing refactoring, `tflite::GetInput`, `tflite::GetOutput`, `tflite::GetTemporary` and `tflite::GetIntermediates` will return `nullptr` in some cases. Hence, we insert the `nullptr` checks on all usages. We also insert `nullptr` checks on usages of `tflite::GetVariableInput` and `tflite::GetOptionalInputTensor` but only in the cases where there is no obvious check that `nullptr` is acceptable (that is, we only insert the check for the output of these two functions if the tensor is accessed as if it is always not `nullptr`). PiperOrigin-RevId: 332521299 Change-Id: I29af455bcb48d0b92e58132d951a3badbd772d56'</s>
<s>[INST] Given the following code containing errors, provide Common Weakness Enumeration (CWE) identifiers and corresponding messages in JSON format. Each identified weakness should have a unique entry in the JSON array. Code: TfLiteStatus Prepare(TfLiteContext* context, TfLiteNode* node) { TF_LITE_ENSURE_EQ(context, NumInputs(node), 2); TF_LITE_ENSURE_EQ(context, NumOutputs(node), 1); const TfLiteTensor* input = GetInput(context, node, kInputTensor); const TfLiteTensor* seq_lengths = GetInput(context, node, kSeqLengthsTensor); TF_LITE_ENSURE_EQ(context, NumDimensions(seq_lengths), 1); if (input->type != kTfLiteInt32 && input->type != kTfLiteFloat32 && input->type != kTfLiteUInt8 && input->type != kTfLiteInt16 && input->type != kTfLiteInt64) { context->ReportError(context, "Type '%s' is not supported by reverse_sequence.", TfLiteTypeGetName(input->type)); return kTfLiteError; } if (seq_lengths->type != kTfLiteInt32 && seq_lengths->type != kTfLiteInt64) { context->ReportError( context, "Seq_lengths type '%s' is not supported by reverse_sequence.", TfLiteTypeGetName(seq_lengths->type)); return kTfLiteError; } TfLiteTensor* output = GetOutput(context, node, kOutputTensor); TfLiteIntArray* output_shape = TfLiteIntArrayCopy(input->dims); TF_LITE_ENSURE_TYPES_EQ(context, output->type, input->type); return context->ResizeTensor(context, output, output_shape); } ; Sample JSON Format Confermily Expected output: "examples" => "cwe": "xxx"; "message": "xxx" [/INST] 'cwe': ['CWE-125', 'CWE-787'], 'message': '[tflite]: Insert `nullptr` checks when obtaining tensors. As part of ongoing refactoring, `tflite::GetInput`, `tflite::GetOutput`, `tflite::GetTemporary` and `tflite::GetIntermediates` will return `nullptr` in some cases. Hence, we insert the `nullptr` checks on all usages. We also insert `nullptr` checks on usages of `tflite::GetVariableInput` and `tflite::GetOptionalInputTensor` but only in the cases where there is no obvious check that `nullptr` is acceptable (that is, we only insert the check for the output of these two functions if the tensor is accessed as if it is always not `nullptr`). PiperOrigin-RevId: 332521299 Change-Id: I29af455bcb48d0b92e58132d951a3badbd772d56'</s>
<s>[INST] Given the following code containing errors, provide Common Weakness Enumeration (CWE) identifiers and corresponding messages in JSON format. Each identified weakness should have a unique entry in the JSON array. Code: TfLiteStatus Relu6Eval(TfLiteContext* context, TfLiteNode* node) { const TfLiteTensor* input = GetInput(context, node, 0); TfLiteTensor* output = GetOutput(context, node, 0); ReluOpData* data = reinterpret_cast<ReluOpData*>(node->user_data); switch (input->type) { case kTfLiteFloat32: { size_t elements = input->bytes / sizeof(float); const float* in = GetTensorData<float>(input); const float* in_end = in + elements; float* out = GetTensorData<float>(output); for (; in < in_end; in++, out++) *out = std::min(std::max(0.f, *in), 6.f); return kTfLiteOk; } break; case kTfLiteUInt8: QuantizedReluX<uint8_t>(0.0f, 6.0f, input, output, data); return kTfLiteOk; case kTfLiteInt8: { QuantizedReluX<int8_t>(0.0f, 6.0f, input, output, data); return kTfLiteOk; } break; default: TF_LITE_KERNEL_LOG( context, "Only float32, uint8 and int8 are supported currently, got %s.", TfLiteTypeGetName(input->type)); return kTfLiteError; } } ; Sample JSON Format Confermily Expected output: "examples" => "cwe": "xxx"; "message": "xxx" [/INST] 'cwe': ['CWE-125', 'CWE-787'], 'message': '[tflite]: Insert `nullptr` checks when obtaining tensors. As part of ongoing refactoring, `tflite::GetInput`, `tflite::GetOutput`, `tflite::GetTemporary` and `tflite::GetIntermediates` will return `nullptr` in some cases. Hence, we insert the `nullptr` checks on all usages. We also insert `nullptr` checks on usages of `tflite::GetVariableInput` and `tflite::GetOptionalInputTensor` but only in the cases where there is no obvious check that `nullptr` is acceptable (that is, we only insert the check for the output of these two functions if the tensor is accessed as if it is always not `nullptr`). PiperOrigin-RevId: 332521299 Change-Id: I29af455bcb48d0b92e58132d951a3badbd772d56'</s>
<s>[INST] Given the following code containing errors, provide Common Weakness Enumeration (CWE) identifiers and corresponding messages in JSON format. Each identified weakness should have a unique entry in the JSON array. Code: PHP_FUNCTION(linkinfo) { char *link; size_t link_len; zend_stat_t sb; int ret; if (zend_parse_parameters(ZEND_NUM_ARGS(), "p", &link, &link_len) == FAILURE) { return; } ret = VCWD_STAT(link, &sb); if (ret == -1) { php_error_docref(NULL, E_WARNING, "%s", strerror(errno)); RETURN_LONG(Z_L(-1)); } RETURN_LONG((zend_long) sb.st_dev); } ; Sample JSON Format Confermily Expected output: "examples" => "cwe": "xxx"; "message": "xxx" [/INST] 'cwe': ['CWE-200'], 'message': 'Fixed bug #76459 windows linkinfo lacks openbasedir check'</s>
<s>[INST] Given the following code containing errors, provide Common Weakness Enumeration (CWE) identifiers and corresponding messages in JSON format. Each identified weakness should have a unique entry in the JSON array. Code: TEST(CudnnRNNOpsTest, ForwardV2Lstm_ShapeFn) { int seq_length = 2; int batch_size = 3; int num_units = 4; int num_layers = 5; int dir_count = 1; std::vector<int> input_shape = {seq_length, batch_size, num_units}; std::vector<int> input_h_shape = {num_layers * dir_count, batch_size, num_units}; std::vector<int> output_shape = {seq_length, batch_size, num_units * dir_count}; auto shape_to_str = [](const std::vector<int>& v) { return strings::StrCat("[", absl::StrJoin(v, ","), "]"); }; string input_shapes_desc = strings::StrCat( shape_to_str(input_shape), ";", shape_to_str(input_h_shape), ";", shape_to_str(input_h_shape), ";", "[?]"); string output_shapes_desc = "[d0_0,d0_1,d1_2];in1;in1;?;?"; ShapeInferenceTestOp op("CudnnRNNV2"); TF_ASSERT_OK(NodeDefBuilder("test", "CudnnRNNV2") .Input({"input", 0, DT_FLOAT}) .Input({"input_h", 0, DT_FLOAT}) .Input({"input_c", 0, DT_FLOAT}) .Input({"params", 0, DT_FLOAT}) .Attr("rnn_mode", "lstm") .Attr("input_mode", "auto_select") .Attr("direction", "unidirectional") .Finalize(&op.node_def)); INFER_OK(op, input_shapes_desc, output_shapes_desc); } ; Sample JSON Format Confermily Expected output: "examples" => "cwe": "xxx"; "message": "xxx" [/INST] 'cwe': ['CWE-416', 'CWE-787'], 'message': 'Fix access to undefined memory during shape inference of Cudnn*. PiperOrigin-RevId: 400324259 Change-Id: Ie3b7859d19ae24ee9ac2adf413bdc1e851bbc604'</s>
<s>[INST] Given the following code containing errors, provide Common Weakness Enumeration (CWE) identifiers and corresponding messages in JSON format. Each identified weakness should have a unique entry in the JSON array. Code: static GF_Err gf_isom_parse_movie_boxes_internal(GF_ISOFile *mov, u32 *boxType, u64 *bytesMissing, Bool progressive_mode) { GF_Box *a; u64 totSize, mdat_end=0; GF_Err e = GF_OK; #ifndef GPAC_DISABLE_ISOM_FRAGMENTS if (mov->single_moof_mode && mov->single_moof_state == 2) { return e; } /*restart from where we stopped last*/ totSize = mov->current_top_box_start; if (mov->bytes_removed) { assert(totSize >= mov->bytes_removed); totSize -= mov->bytes_removed; } gf_bs_seek(mov->movieFileMap->bs, totSize); #endif /*while we have some data, parse our boxes*/ while (gf_bs_available(mov->movieFileMap->bs)) { *bytesMissing = 0; #ifndef GPAC_DISABLE_ISOM_FRAGMENTS mov->current_top_box_start = gf_bs_get_position(mov->movieFileMap->bs) + mov->bytes_removed; GF_LOG(GF_LOG_DEBUG, GF_LOG_CONTAINER, ("[iso file] Parsing a top-level box at position %d\n", mov->current_top_box_start)); #endif e = gf_isom_parse_root_box(&a, mov->movieFileMap->bs, boxType, bytesMissing, progressive_mode); if (e >= 0) { } else if (e == GF_ISOM_INCOMPLETE_FILE) { /*our mdat is uncomplete, only valid for READ ONLY files...*/ if (mov->openMode != GF_ISOM_OPEN_READ) { GF_LOG(GF_LOG_ERROR, GF_LOG_CONTAINER, ("[iso file] Incomplete MDAT while file is not read-only\n")); return GF_ISOM_INVALID_FILE; } if ((mov->openMode == GF_ISOM_OPEN_READ) && !progressive_mode) { GF_LOG(GF_LOG_ERROR, GF_LOG_CONTAINER, ("[iso file] Incomplete file while reading for dump - aborting parsing\n")); break; } return e; } else { return e; } switch (a->type) { /*MOOV box*/ case GF_ISOM_BOX_TYPE_MOOV: if (mov->moov) { GF_LOG(GF_LOG_ERROR, GF_LOG_CONTAINER, ("[iso file] Duplicate MOOV detected!\n")); gf_isom_box_del(a); return GF_ISOM_INVALID_FILE; } mov->moov = (GF_MovieBox *)a; mov->original_moov_offset = mov->current_top_box_start; /*set our pointer to the movie*/ mov->moov->mov = mov; #ifndef GPAC_DISABLE_ISOM_FRAGMENTS if (mov->moov->mvex) mov->moov->mvex->mov = mov; #ifdef GF_ENABLE_CTRN if (! (mov->FragmentsFlags & GF_ISOM_FRAG_READ_DEBUG)) { gf_isom_setup_traf_inheritance(mov); } #endif #endif e = gf_list_add(mov->TopBoxes, a); if (e) return e; totSize += a->size; if (!mov->moov->mvhd) { GF_LOG(GF_LOG_ERROR, GF_LOG_CONTAINER, ("[iso file] Missing MovieHeaderBox\n")); return GF_ISOM_INVALID_FILE; } if (mov->meta) { gf_isom_meta_restore_items_ref(mov, mov->meta); } //dump senc info in dump mode if (mov->FragmentsFlags & GF_ISOM_FRAG_READ_DEBUG) { u32 k; for (k=0; k<gf_list_count(mov->moov->trackList); k++) { GF_TrackBox *trak = (GF_TrackBox *)gf_list_get(mov->moov->trackList, k); if (trak->sample_encryption) { e = senc_Parse(mov->movieFileMap->bs, trak, NULL, trak->sample_encryption); if (e) return e; } } } else { u32 k; for (k=0; k<gf_list_count(mov->moov->trackList); k++) { GF_TrackBox *trak = (GF_TrackBox *)gf_list_get(mov->moov->trackList, k); if (trak->Media->information->sampleTable->sampleGroups) { convert_compact_sample_groups(trak->Media->information->sampleTable->child_boxes, trak->Media->information->sampleTable->sampleGroups); } } } if (mdat_end && mov->signal_frag_bounds && !(mov->FragmentsFlags & GF_ISOM_FRAG_READ_DEBUG) ) { gf_isom_push_mdat_end(mov, mdat_end); mdat_end=0; } break; /*META box*/ case GF_ISOM_BOX_TYPE_META: if (mov->meta) { GF_LOG(GF_LOG_ERROR, GF_LOG_CONTAINER, ("[iso file] Duplicate META detected!\n")); gf_isom_box_del(a); return GF_ISOM_INVALID_FILE; } mov->meta = (GF_MetaBox *)a; mov->original_meta_offset = mov->current_top_box_start; e = gf_list_add(mov->TopBoxes, a); if (e) { return e; } totSize += a->size; gf_isom_meta_restore_items_ref(mov, mov->meta); break; /*we only keep the MDAT in READ for dump purposes*/ case GF_ISOM_BOX_TYPE_MDAT: if (!mov->first_data_toplevel_offset) { mov->first_data_toplevel_offset = mov->current_top_box_start; mov->first_data_toplevel_size = a->size; } totSize += a->size; #ifndef GPAC_DISABLE_ISOM_FRAGMENTS if (mov->emsgs) { gf_isom_box_array_del(mov->emsgs); mov->emsgs = NULL; } #endif if (mov->openMode == GF_ISOM_OPEN_READ) { if (!mov->mdat) { mov->mdat = (GF_MediaDataBox *) a; e = gf_list_add(mov->TopBoxes, mov->mdat); if (e) { return e; } } #ifndef GPAC_DISABLE_ISOM_FRAGMENTS else if (mov->FragmentsFlags & GF_ISOM_FRAG_READ_DEBUG) gf_list_add(mov->TopBoxes, a); #endif else gf_isom_box_del(a); //in other modes we don't care if (mov->signal_frag_bounds && !(mov->FragmentsFlags & GF_ISOM_FRAG_READ_DEBUG) ) { mdat_end = gf_bs_get_position(mov->movieFileMap->bs); if (mov->moov) { gf_isom_push_mdat_end(mov, mdat_end); mdat_end=0; } } } /*if we don't have any MDAT yet, create one (edit-write mode) We only work with one mdat, but we're puting it at the place of the first mdat found when opening a file for editing*/ else if (!mov->mdat && (mov->openMode != GF_ISOM_OPEN_READ) && (mov->openMode != GF_ISOM_OPEN_KEEP_FRAGMENTS)) { gf_isom_box_del(a); mov->mdat = (GF_MediaDataBox *) gf_isom_box_new(GF_ISOM_BOX_TYPE_MDAT); if (!mov->mdat) return GF_OUT_OF_MEM; e = gf_list_add(mov->TopBoxes, mov->mdat); if (e) { return e; } } else { gf_isom_box_del(a); } break; case GF_ISOM_BOX_TYPE_FTYP: /*ONE AND ONLY ONE FTYP*/ if (mov->brand) { gf_isom_box_del(a); GF_LOG(GF_LOG_ERROR, GF_LOG_CONTAINER, ("[iso file] Duplicate 'ftyp' detected!\n")); return GF_ISOM_INVALID_FILE; } mov->brand = (GF_FileTypeBox *)a; totSize += a->size; e = gf_list_add(mov->TopBoxes, a); if (e) return e; break; case GF_ISOM_BOX_TYPE_OTYP: /*ONE AND ONLY ONE FTYP*/ if (mov->otyp) { gf_isom_box_del(a); GF_LOG(GF_LOG_ERROR, GF_LOG_CONTAINER, ("[iso file] Duplicate 'otyp' detected!\n")); return GF_ISOM_INVALID_FILE; } if (mov->FragmentsFlags & GF_ISOM_FRAG_READ_DEBUG) { mov->otyp = (GF_Box *)a; totSize += a->size; e = gf_list_add(mov->TopBoxes, a); if (e) return e; } else { GF_FileTypeBox *brand = (GF_FileTypeBox *) gf_isom_box_find_child(a->child_boxes, GF_ISOM_BOX_TYPE_FTYP); if (brand) { s32 pos; gf_list_del_item(a->child_boxes, brand); pos = gf_list_del_item(mov->TopBoxes, mov->brand); gf_isom_box_del((GF_Box *) mov->brand); mov->brand = brand; if (pos<0) pos=0; gf_list_insert(mov->TopBoxes, brand, pos); } gf_isom_box_del(a); } break; case GF_ISOM_BOX_TYPE_PDIN: /*ONE AND ONLY ONE PDIN*/ if (mov->pdin) { gf_isom_box_del(a); GF_LOG(GF_LOG_ERROR, GF_LOG_CONTAINER, ("[iso file] Duplicate 'pdin'' detected!\n")); return GF_ISOM_INVALID_FILE; } mov->pdin = (GF_ProgressiveDownloadBox *) a; totSize += a->size; e = gf_list_add(mov->TopBoxes, a); if (e) return e; break; #ifndef GPAC_DISABLE_ISOM_FRAGMENTS case GF_ISOM_BOX_TYPE_STYP: { u32 brand = ((GF_FileTypeBox *)a)->majorBrand; switch (brand) { case GF_ISOM_BRAND_SISX: case GF_ISOM_BRAND_RISX: case GF_ISOM_BRAND_SSSS: mov->is_index_segment = GF_TRUE; break; default: break; } } /*fall-through*/ case GF_ISOM_BOX_TYPE_SIDX: case GF_ISOM_BOX_TYPE_SSIX: if (mov->moov && !mov->first_data_toplevel_offset) { mov->first_data_toplevel_offset = mov->current_top_box_start; mov->first_data_toplevel_size = a->size; } totSize += a->size; if (mov->FragmentsFlags & GF_ISOM_FRAG_READ_DEBUG) { e = gf_list_add(mov->TopBoxes, a); if (e) return e; } else if (mov->signal_frag_bounds && !(mov->FragmentsFlags & GF_ISOM_FRAG_READ_DEBUG) && (mov->openMode!=GF_ISOM_OPEN_KEEP_FRAGMENTS) ) { if (a->type==GF_ISOM_BOX_TYPE_SIDX) { if (mov->root_sidx) gf_isom_box_del( (GF_Box *) mov->root_sidx); mov->root_sidx = (GF_SegmentIndexBox *) a; mov->sidx_start_offset = mov->current_top_box_start; mov->sidx_end_offset = gf_bs_get_position(mov->movieFileMap->bs); } else if (a->type==GF_ISOM_BOX_TYPE_STYP) { mov->styp_start_offset = mov->current_top_box_start; if (mov->seg_styp) gf_isom_box_del(mov->seg_styp); mov->seg_styp = a; } else if (a->type==GF_ISOM_BOX_TYPE_SSIX) { if (mov->seg_ssix) gf_isom_box_del(mov->seg_ssix); mov->seg_ssix = a; } else { gf_isom_box_del(a); } gf_isom_push_mdat_end(mov, mov->current_top_box_start); } else if (!mov->NextMoofNumber && (a->type==GF_ISOM_BOX_TYPE_SIDX)) { if (mov->main_sidx) gf_isom_box_del( (GF_Box *) mov->main_sidx); mov->main_sidx = (GF_SegmentIndexBox *) a; mov->main_sidx_end_pos = mov->current_top_box_start + a->size; } else { gf_isom_box_del(a); } break; case GF_ISOM_BOX_TYPE_MOOF: //no support for inplace rewrite for fragmented files gf_isom_disable_inplace_rewrite(mov); if (!mov->moov) { GF_LOG(mov->moof ? GF_LOG_DEBUG : GF_LOG_WARNING, GF_LOG_CONTAINER, ("[iso file] Movie fragment but no moov (yet) - possibly broken parsing!\n")); } if (mov->single_moof_mode) { mov->single_moof_state++; if (mov->single_moof_state > 1) { gf_isom_box_del(a); return GF_OK; } } ((GF_MovieFragmentBox *)a)->mov = mov; totSize += a->size; mov->moof = (GF_MovieFragmentBox *) a; /*some smooth streaming streams contain a SDTP under the TRAF: this is incorrect, convert it*/ FixTrackID(mov); if (! (mov->FragmentsFlags & GF_ISOM_FRAG_READ_DEBUG)) { FixSDTPInTRAF(mov->moof); } else { u32 k; for (k=0; k<gf_list_count(mov->moof->TrackList); k++) { GF_TrackFragmentBox *traf = (GF_TrackFragmentBox *)gf_list_get(mov->moof->TrackList, k); if (traf->sampleGroups) { convert_compact_sample_groups(traf->child_boxes, traf->sampleGroups); } } } /*read & debug: store at root level*/ if (mov->FragmentsFlags & GF_ISOM_FRAG_READ_DEBUG) { u32 k; gf_list_add(mov->TopBoxes, a); /*also update pointers to trex for debug*/ if (mov->moov) { for (k=0; k<gf_list_count(mov->moof->TrackList); k++) { GF_TrackFragmentBox *traf = gf_list_get(mov->moof->TrackList, k); if (traf->tfhd && mov->moov->mvex && mov->moov->mvex->TrackExList) { GF_TrackBox *trak = gf_isom_get_track_from_id(mov->moov, traf->tfhd->trackID); u32 j=0; while ((traf->trex = (GF_TrackExtendsBox*)gf_list_enum(mov->moov->mvex->TrackExList, &j))) { if (traf->trex->trackID == traf->tfhd->trackID) { if (!traf->trex->track) traf->trex->track = trak; break; } traf->trex = NULL; } } //we should only parse senc/psec when no saiz/saio is present, otherwise we fetch the info directly if (traf->trex && traf->tfhd && traf->trex->track && traf->sample_encryption) { GF_TrackBox *trak = GetTrackbyID(mov->moov, traf->tfhd->trackID); if (trak) { trak->current_traf_stsd_idx = traf->tfhd->sample_desc_index ? traf->tfhd->sample_desc_index : traf->trex->def_sample_desc_index; e = senc_Parse(mov->movieFileMap->bs, trak, traf, traf->sample_encryption); if (e) return e; trak->current_traf_stsd_idx = 0; } } } } else { for (k=0; k<gf_list_count(mov->moof->TrackList); k++) { GF_TrackFragmentBox *traf = gf_list_get(mov->moof->TrackList, k); if (traf->sample_encryption) { e = senc_Parse(mov->movieFileMap->bs, NULL, traf, traf->sample_encryption); if (e) return e; } } } } else if (mov->openMode==GF_ISOM_OPEN_KEEP_FRAGMENTS) { mov->NextMoofNumber = mov->moof->mfhd->sequence_number+1; mov->moof = NULL; gf_isom_box_del(a); } else { /*merge all info*/ e = MergeFragment((GF_MovieFragmentBox *)a, mov); gf_isom_box_del(a); if (e) return e; } //done with moov if (mov->root_sidx) { gf_isom_box_del((GF_Box *) mov->root_sidx); mov->root_sidx = NULL; } if (mov->root_ssix) { gf_isom_box_del(mov->seg_ssix); mov->root_ssix = NULL; } if (mov->seg_styp) { gf_isom_box_del(mov->seg_styp); mov->seg_styp = NULL; } mov->sidx_start_offset = 0; mov->sidx_end_offset = 0; mov->styp_start_offset = 0; break; #endif case GF_ISOM_BOX_TYPE_UNKNOWN: { GF_UnknownBox *box = (GF_UnknownBox*)a; if (box->original_4cc == GF_ISOM_BOX_TYPE_JP) { u8 *c = (u8 *) box->data; if ((box->dataSize==4) && (GF_4CC(c[0],c[1],c[2],c[3])==(u32)0x0D0A870A)) mov->is_jp2 = 1; gf_isom_box_del(a); } else { e = gf_list_add(mov->TopBoxes, a); if (e) return e; } } break; case GF_ISOM_BOX_TYPE_PRFT: #ifndef GPAC_DISABLE_ISOM_FRAGMENTS if (!(mov->FragmentsFlags & GF_ISOM_FRAG_READ_DEBUG)) { //keep the last one read if (mov->last_producer_ref_time) gf_isom_box_del(a); else mov->last_producer_ref_time = (GF_ProducerReferenceTimeBox *)a; break; } #endif //fallthrough case GF_ISOM_BOX_TYPE_EMSG: #ifndef GPAC_DISABLE_ISOM_FRAGMENTS if (! (mov->FragmentsFlags & GF_ISOM_FRAG_READ_DEBUG)) { if (!mov->emsgs) mov->emsgs = gf_list_new(); gf_list_add(mov->emsgs, a); break; } #endif case GF_ISOM_BOX_TYPE_MFRA: case GF_ISOM_BOX_TYPE_MFRO: //only keep for dump mode, otherwise we ignore these boxes and we don't want to carry them over in non-fragmented file if (! (mov->FragmentsFlags & GF_ISOM_FRAG_READ_DEBUG)) { totSize += a->size; gf_isom_box_del(a); break; } default: totSize += a->size; e = gf_list_add(mov->TopBoxes, a); if (e) return e; break; } #ifndef GPAC_DISABLE_ISOM_FRAGMENTS /*remember where we left, in case we append an entire number of movie fragments*/ mov->current_top_box_start = gf_bs_get_position(mov->movieFileMap->bs) + mov->bytes_removed; #endif } /*we need at least moov or meta*/ if (!mov->moov && !mov->meta #ifndef GPAC_DISABLE_ISOM_FRAGMENTS && !mov->moof && !mov->is_index_segment #endif ) { return GF_ISOM_INCOMPLETE_FILE; } /*we MUST have movie header*/ if (!gf_opts_get_bool("core", "no-check")) { if (mov->moov && !mov->moov->mvhd) { GF_LOG(GF_LOG_ERROR, GF_LOG_CONTAINER, ("[iso file] Missing MVHD in MOOV!\n")); return GF_ISOM_INVALID_FILE; } /*we MUST have meta handler*/ if (mov->meta && !mov->meta->handler) { GF_LOG(GF_LOG_ERROR, GF_LOG_CONTAINER, ("[iso file] Missing handler in META!\n")); return GF_ISOM_INVALID_FILE; } } #ifndef GPAC_DISABLE_ISOM_WRITE if (mov->moov) { /*set the default interleaving time*/ mov->interleavingTime = mov->moov->mvhd->timeScale; #ifndef GPAC_DISABLE_ISOM_FRAGMENTS /*in edit mode with successfully loaded fragments, delete all fragment signaling since file is no longer fragmented*/ if ((mov->openMode > GF_ISOM_OPEN_READ) && (mov->openMode != GF_ISOM_OPEN_KEEP_FRAGMENTS) && mov->moov->mvex) { gf_isom_box_del_parent(&mov->moov->child_boxes, (GF_Box *)mov->moov->mvex); mov->moov->mvex = NULL; } #endif } //create a default mdat if none was found if (!mov->mdat && (mov->openMode != GF_ISOM_OPEN_READ) && (mov->openMode != GF_ISOM_OPEN_KEEP_FRAGMENTS)) { mov->mdat = (GF_MediaDataBox *) gf_isom_box_new(GF_ISOM_BOX_TYPE_MDAT); if (!mov->mdat) return GF_OUT_OF_MEM; e = gf_list_add(mov->TopBoxes, mov->mdat); if (e) return e; } #endif /*GPAC_DISABLE_ISOM_WRITE*/ return GF_OK; } ; Sample JSON Format Confermily Expected output: "examples" => "cwe": "xxx"; "message": "xxx" [/INST] 'cwe': ['CWE-476'], 'message': 'fixed #2163'</s>
<s>[INST] Given the following code containing errors, provide Common Weakness Enumeration (CWE) identifiers and corresponding messages in JSON format. Each identified weakness should have a unique entry in the JSON array. Code: GF_Err gf_isom_oinf_read_entry(void *entry, GF_BitStream *bs) { GF_OperatingPointsInformation* ptr = (GF_OperatingPointsInformation *)entry; u32 i, j, count; if (!ptr) return GF_BAD_PARAM; ptr->scalability_mask = gf_bs_read_u16(bs); gf_bs_read_int(bs, 2);//reserved count = gf_bs_read_int(bs, 6); for (i = 0; i < count; i++) { LHEVC_ProfileTierLevel *ptl; GF_SAFEALLOC(ptl, LHEVC_ProfileTierLevel); if (!ptl) return GF_OUT_OF_MEM; ptl->general_profile_space = gf_bs_read_int(bs, 2); ptl->general_tier_flag= gf_bs_read_int(bs, 1); ptl->general_profile_idc = gf_bs_read_int(bs, 5); ptl->general_profile_compatibility_flags = gf_bs_read_u32(bs); ptl->general_constraint_indicator_flags = gf_bs_read_long_int(bs, 48); ptl->general_level_idc = gf_bs_read_u8(bs); gf_list_add(ptr->profile_tier_levels, ptl); } count = gf_bs_read_u16(bs); for (i = 0; i < count; i++) { LHEVC_OperatingPoint *op; GF_SAFEALLOC(op, LHEVC_OperatingPoint); if (!op) return GF_OUT_OF_MEM; op->output_layer_set_idx = gf_bs_read_u16(bs); op->max_temporal_id = gf_bs_read_u8(bs); op->layer_count = gf_bs_read_u8(bs); if (op->layer_count > GF_ARRAY_LENGTH(op->layers_info)) { gf_free(op); return GF_NON_COMPLIANT_BITSTREAM; } for (j = 0; j < op->layer_count; j++) { op->layers_info[j].ptl_idx = gf_bs_read_u8(bs); op->layers_info[j].layer_id = gf_bs_read_int(bs, 6); op->layers_info[j].is_outputlayer = gf_bs_read_int(bs, 1) ? GF_TRUE : GF_FALSE; op->layers_info[j].is_alternate_outputlayer = gf_bs_read_int(bs, 1) ? GF_TRUE : GF_FALSE; } op->minPicWidth = gf_bs_read_u16(bs); op->minPicHeight = gf_bs_read_u16(bs); op->maxPicWidth = gf_bs_read_u16(bs); op->maxPicHeight = gf_bs_read_u16(bs); op->maxChromaFormat = gf_bs_read_int(bs, 2); op->maxBitDepth = gf_bs_read_int(bs, 3) + 8; gf_bs_read_int(bs, 1);//reserved op->frame_rate_info_flag = gf_bs_read_int(bs, 1) ? GF_TRUE : GF_FALSE; op->bit_rate_info_flag = gf_bs_read_int(bs, 1) ? GF_TRUE : GF_FALSE; if (op->frame_rate_info_flag) { op->avgFrameRate = gf_bs_read_u16(bs); gf_bs_read_int(bs, 6); //reserved op->constantFrameRate = gf_bs_read_int(bs, 2); } if (op->bit_rate_info_flag) { op->maxBitRate = gf_bs_read_u32(bs); op->avgBitRate = gf_bs_read_u32(bs); } gf_list_add(ptr->operating_points, op); } count = gf_bs_read_u8(bs); for (i = 0; i < count; i++) { LHEVC_DependentLayer *dep; GF_SAFEALLOC(dep, LHEVC_DependentLayer); if (!dep) return GF_OUT_OF_MEM; dep->dependent_layerID = gf_bs_read_u8(bs); dep->num_layers_dependent_on = gf_bs_read_u8(bs); if (dep->num_layers_dependent_on > GF_ARRAY_LENGTH(dep->dependent_on_layerID)) { gf_free(dep); return GF_NON_COMPLIANT_BITSTREAM; } for (j = 0; j < dep->num_layers_dependent_on; j++) dep->dependent_on_layerID[j] = gf_bs_read_u8(bs); for (j = 0; j < 16; j++) { if (ptr->scalability_mask & (1 << j)) dep->dimension_identifier[j] = gf_bs_read_u8(bs); } gf_list_add(ptr->dependency_layers, dep); } return GF_OK; } ; Sample JSON Format Confermily Expected output: "examples" => "cwe": "xxx"; "message": "xxx" [/INST] 'cwe': ['CWE-617', 'CWE-703'], 'message': 'fixed #2165'</s>
<s>[INST] Given the following code containing errors, provide Common Weakness Enumeration (CWE) identifiers and corresponding messages in JSON format. Each identified weakness should have a unique entry in the JSON array. Code: int main(int argc, char *argv[]) { QCoreApplication *a; if (isTUIMode(argc, argv)) { Global::isTUIMode = true; a = new QCoreApplication(argc, argv); } #ifdef ENABLE_GUI else { ConsoleAppender *consoleAppender = new ConsoleAppender; consoleAppender->setFormat(logFormat); RollingFileAppender *rollingFileAppender = new RollingFileAppender("/tmp/.deepin-clone.log"); rollingFileAppender->setFormat(logFormat); rollingFileAppender->setLogFilesLimit(5); rollingFileAppender->setDatePattern(RollingFileAppender::DailyRollover); logger->registerAppender(consoleAppender); logger->registerAppender(rollingFileAppender); if (qEnvironmentVariableIsSet("PKEXEC_UID")) { const quint32 pkexec_uid = qgetenv("PKEXEC_UID").toUInt(); const QDir user_home(getpwuid(pkexec_uid)->pw_dir); QFile pam_file(user_home.absoluteFilePath(".pam_environment")); if (pam_file.open(QIODevice::ReadOnly)) { while (!pam_file.atEnd()) { const QByteArray &line = pam_file.readLine().simplified(); if (line.startsWith("QT_SCALE_FACTOR")) { const QByteArrayList &list = line.split('='); if (list.count() == 2) { qputenv("QT_SCALE_FACTOR", list.last()); break; } } } pam_file.close(); } } DApplication::loadDXcbPlugin(); DApplication *app = new DApplication(argc, argv); app->setAttribute(Qt::AA_UseHighDpiPixmaps); if (!qApp->setSingleInstance("_deepin_clone_")) { qCritical() << "As well as the process is running"; return -1; } if (!app->loadTranslator()) { dError("Load translator failed"); } app->setApplicationDisplayName(QObject::tr("Deepin Clone")); app->setApplicationDescription(QObject::tr("Deepin Clone is a backup and restore tool in deepin. " "It supports disk or partition clone, backup and restore, and other functions.")); app->setApplicationAcknowledgementPage("https://www.deepin.org/acknowledgments/deepin-clone/"); app->setTheme("light"); a = app; } #endif a->setApplicationName("deepin-clone"); #ifdef ENABLE_GUI a->setApplicationVersion(DApplication::buildVersion("1.0.0.1")); #else a->setApplicationVersion("1.0.0.1"); #endif a->setOrganizationName("deepin"); CommandLineParser parser; QFile arguments_file("/lib/live/mount/medium/.tmp/deepin-clone.arguments"); QStringList arguments; bool load_arg_from_file = arguments_file.exists() && !Global::isTUIMode && !a->arguments().contains("--tui"); if (load_arg_from_file) { arguments.append(a->arguments().first()); if (!arguments_file.open(QIODevice::ReadOnly)) { qCritical() << "Open \"/lib/live/mount/medium/.tmp/deepin-clone.arguments\" failed, error:" << arguments_file.errorString(); } else { while (!arguments_file.atEnd()) { const QString &arg = QString::fromUtf8(arguments_file.readLine().trimmed()); if (!arg.isEmpty()) arguments.append(arg); } arguments_file.close(); arguments_file.remove(); } qDebug() << arguments; } else { arguments = a->arguments(); } parser.process(arguments); ConsoleAppender *consoleAppender = new ConsoleAppender; consoleAppender->setFormat(logFormat); RollingFileAppender *rollingFileAppender = new RollingFileAppender(parser.logFile()); rollingFileAppender->setFormat(logFormat); rollingFileAppender->setLogFilesLimit(5); rollingFileAppender->setDatePattern(RollingFileAppender::DailyRollover); logger->registerCategoryAppender("deepin.ghost", consoleAppender); logger->registerCategoryAppender("deepin.ghost", rollingFileAppender); parser.parse(); if (load_arg_from_file) { dCDebug("Load arguments from \"%s\"", qPrintable(arguments_file.fileName())); } dCInfo("Application command line: %s", qPrintable(arguments.join(' '))); if (Global::debugLevel == 0) { QLoggingCategory::setFilterRules("deepin.ghost.debug=false"); } if (Global::isTUIMode) { if (!parser.target().isEmpty()) { CloneJob *job = new CloneJob; QObject::connect(job, &QThread::finished, a, &QCoreApplication::quit); job->start(parser.source(), parser.target()); } } #ifdef ENABLE_GUI else { if (!parser.isSetOverride()) Global::isOverride = true; if (!parser.isSetDebug()) Global::debugLevel = 2; MainWindow *window = new MainWindow; window->setFixedSize(860, 660); window->setStyleSheet(DThemeManager::instance()->getQssForWidget("main", window)); window->setWindowIcon(QIcon::fromTheme("deepin-clone")); window->setWindowFlags(Qt::WindowCloseButtonHint | Qt::WindowMinimizeButtonHint | Qt::WindowSystemMenuHint); window->titlebar()->setIcon(window->windowIcon()); window->titlebar()->setTitle(QString()); #if DTK_VERSION > DTK_VERSION_CHECK(2, 0, 6, 0) window->titlebar()->setBackgroundTransparent(true); #endif window->show(); qApp->setProductIcon(window->windowIcon()); if (!parser.source().isEmpty()) { window->startWithFile(parser.source(), parser.target()); } QObject::connect(a, &QCoreApplication::aboutToQuit, window, &MainWindow::deleteLater); QDesktopServices::setUrlHandler("https", window, "openUrl"); } #endif int exitCode = Global::isTUIMode ? a->exec() : qApp->exec(); QString log_backup_file = parser.logBackupFile(); if (log_backup_file.startsWith("serial://")) { log_backup_file = Helper::parseSerialUrl(log_backup_file); } if (log_backup_file.isEmpty()) { return exitCode; } if (!QFile::copy(parser.logFile(), log_backup_file)) { dCWarning("failed to copy log file to \"%s\"", qPrintable(log_backup_file)); } return exitCode; } ; Sample JSON Format Confermily Expected output: "examples" => "cwe": "xxx"; "message": "xxx" [/INST] 'cwe': ['CWE-59', 'CWE-61'], 'message': 'fix: Do not use the "/tmp" directory https://github.com/linuxdeepin/deepin-clone/issues/16 https://bugzilla.opensuse.org/show_bug.cgi?id=1130388'</s>
<s>[INST] Given the following code containing errors, provide Common Weakness Enumeration (CWE) identifiers and corresponding messages in JSON format. Each identified weakness should have a unique entry in the JSON array. Code: void fx_DataView(txMachine* the) { txSlot* slot; txBoolean flag = 0; txInteger offset, size; txSlot* info; txSlot* instance; txSlot* view; txSlot* buffer; if (mxIsUndefined(mxTarget)) mxTypeError("call: DataView"); if ((mxArgc > 0) && (mxArgv(0)->kind == XS_REFERENCE_KIND)) { slot = mxArgv(0)->value.reference->next; if (slot && ((slot->kind == XS_ARRAY_BUFFER_KIND) || (slot->kind == XS_HOST_KIND))) { flag = 1; } } if (!flag) mxTypeError("buffer is no ArrayBuffer instance"); offset = fxArgToByteLength(the, 1, 0); info = fxGetBufferInfo(the, mxArgv(0)); if (info->value.bufferInfo.length < offset) mxRangeError("out of range byteOffset %ld", offset); size = fxArgToByteLength(the, 2, -1); if (size >= 0) { if (info->value.bufferInfo.length < (offset + size)) mxRangeError("out of range byteLength %ld", size); } else { if (info->value.bufferInfo.maxLength < 0) size = info->value.bufferInfo.length - offset; } mxPushSlot(mxTarget); fxGetPrototypeFromConstructor(the, &mxDataViewPrototype); instance = fxNewDataViewInstance(the); mxPullSlot(mxResult); view = instance->next; buffer = view->next; buffer->kind = XS_REFERENCE_KIND; buffer->value.reference = mxArgv(0)->value.reference; info = fxGetBufferInfo(the, buffer); if (info->value.bufferInfo.maxLength >= 0) { if (info->value.bufferInfo.length < offset) mxRangeError("out of range byteOffset %ld", offset); else if (size >= 0) { if (info->value.bufferInfo.length < (offset + size)) mxRangeError("out of range byteLength %ld", size); } } view->value.dataView.offset = offset; view->value.dataView.size = size; } ; Sample JSON Format Confermily Expected output: "examples" => "cwe": "xxx"; "message": "xxx" [/INST] 'cwe': ['CWE-125'], 'message': 'XS: #896'</s>
<s>[INST] Given the following code containing errors, provide Common Weakness Enumeration (CWE) identifiers and corresponding messages in JSON format. Each identified weakness should have a unique entry in the JSON array. Code: int wvlan_set_station_nickname(struct net_device *dev, struct iw_request_info *info, union iwreq_data *wrqu, char *extra) { struct wl_private *lp = wl_priv(dev); unsigned long flags; int ret = 0; /*------------------------------------------------------------------------*/ DBG_FUNC("wvlan_set_station_nickname"); DBG_ENTER(DbgInfo); wl_lock(lp, &flags); memset(lp->StationName, 0, sizeof(lp->StationName)); memcpy(lp->StationName, extra, wrqu->data.length); /* Commit the adapter parameters */ wl_apply(lp); wl_unlock(lp, &flags); DBG_LEAVE(DbgInfo); return ret; } /* wvlan_set_station_nickname */ ; Sample JSON Format Confermily Expected output: "examples" => "cwe": "xxx"; "message": "xxx" [/INST] 'cwe': ['CWE-119', 'CWE-787'], 'message': 'staging: wlags49_h2: buffer overflow setting station name We need to check the length parameter before doing the memcpy(). I've actually changed it to strlcpy() as well so that it's NUL terminated. You need CAP_NET_ADMIN to trigger these so it's not the end of the world. Reported-by: Nico Golde <nico@ngolde.de> Reported-by: Fabian Yamaguchi <fabs@goesec.de> Signed-off-by: Dan Carpenter <dan.carpenter@oracle.com> Cc: stable@kernel.org Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>'</s>
<s>[INST] Given the following code containing errors, provide Common Weakness Enumeration (CWE) identifiers and corresponding messages in JSON format. Each identified weakness should have a unique entry in the JSON array. Code: static void vapic_exit(struct kvm_vcpu *vcpu) { struct kvm_lapic *apic = vcpu->arch.apic; int idx; if (!apic || !apic->vapic_addr) return; idx = srcu_read_lock(&vcpu->kvm->srcu); kvm_release_page_dirty(apic->vapic_page); mark_page_dirty(vcpu->kvm, apic->vapic_addr >> PAGE_SHIFT); srcu_read_unlock(&vcpu->kvm->srcu, idx); } ; Sample JSON Format Confermily Expected output: "examples" => "cwe": "xxx"; "message": "xxx" [/INST] 'cwe': ['CWE-20'], 'message': 'KVM: x86: Convert vapic synchronization to _cached functions (CVE-2013-6368) In kvm_lapic_sync_from_vapic and kvm_lapic_sync_to_vapic there is the potential to corrupt kernel memory if userspace provides an address that is at the end of a page. This patches concerts those functions to use kvm_write_guest_cached and kvm_read_guest_cached. It also checks the vapic_address specified by userspace during ioctl processing and returns an error to userspace if the address is not a valid GPA. This is generally not guest triggerable, because the required write is done by firmware that runs before the guest. Also, it only affects AMD processors and oldish Intel that do not have the FlexPriority feature (unless you disable FlexPriority, of course; then newer processors are also affected). Fixes: b93463aa59d6 ('KVM: Accelerated apic support') Reported-by: Andrew Honig <ahonig@google.com> Cc: stable@vger.kernel.org Signed-off-by: Andrew Honig <ahonig@google.com> Signed-off-by: Paolo Bonzini <pbonzini@redhat.com>'</s>
<s>[INST] Given the following code containing errors, provide Common Weakness Enumeration (CWE) identifiers and corresponding messages in JSON format. Each identified weakness should have a unique entry in the JSON array. Code: int rdma_listen(struct rdma_cm_id *id, int backlog) { struct rdma_id_private *id_priv = container_of(id, struct rdma_id_private, id); int ret; if (!cma_comp_exch(id_priv, RDMA_CM_ADDR_BOUND, RDMA_CM_LISTEN)) { /* For a well behaved ULP state will be RDMA_CM_IDLE */ id->route.addr.src_addr.ss_family = AF_INET; ret = rdma_bind_addr(id, cma_src_addr(id_priv)); if (ret) return ret; if (WARN_ON(!cma_comp_exch(id_priv, RDMA_CM_ADDR_BOUND, RDMA_CM_LISTEN))) return -EINVAL; } /* * Once the ID reaches RDMA_CM_LISTEN it is not allowed to be reusable * any more, and has to be unique in the bind list. */ if (id_priv->reuseaddr) { mutex_lock(&lock); ret = cma_check_port(id_priv->bind_list, id_priv, 0); if (!ret) id_priv->reuseaddr = 0; mutex_unlock(&lock); if (ret) goto err; } id_priv->backlog = backlog; if (id_priv->cma_dev) { if (rdma_cap_ib_cm(id->device, 1)) { ret = cma_ib_listen(id_priv); if (ret) goto err; } else if (rdma_cap_iw_cm(id->device, 1)) { ret = cma_iw_listen(id_priv, backlog); if (ret) goto err; } else { ret = -ENOSYS; goto err; } } else { ret = cma_listen_on_all(id_priv); if (ret) goto err; } return 0; err: id_priv->backlog = 0; /* * All the failure paths that lead here will not allow the req_handler's * to have run. */ cma_comp_exch(id_priv, RDMA_CM_LISTEN, RDMA_CM_ADDR_BOUND); return ret; } ; Sample JSON Format Confermily Expected output: "examples" => "cwe": "xxx"; "message": "xxx" [/INST] 'cwe': ['CWE-416'], 'message': 'RDMA/cma: Do not change route.addr.src_addr.ss_family If the state is not idle then rdma_bind_addr() will immediately fail and no change to global state should happen. For instance if the state is already RDMA_CM_LISTEN then this will corrupt the src_addr and would cause the test in cma_cancel_operation(): if (cma_any_addr(cma_src_addr(id_priv)) && !id_priv->cma_dev) To view a mangled src_addr, eg with a IPv6 loopback address but an IPv4 family, failing the test. This would manifest as this trace from syzkaller: BUG: KASAN: use-after-free in __list_add_valid+0x93/0xa0 lib/list_debug.c:26 Read of size 8 at addr ffff8881546491e0 by task syz-executor.1/32204 CPU: 1 PID: 32204 Comm: syz-executor.1 Not tainted 5.12.0-rc8-syzkaller #0 Hardware name: Google Google Compute Engine/Google Compute Engine, BIOS Google 01/01/2011 Call Trace: __dump_stack lib/dump_stack.c:79 [inline] dump_stack+0x141/0x1d7 lib/dump_stack.c:120 print_address_description.constprop.0.cold+0x5b/0x2f8 mm/kasan/report.c:232 __kasan_report mm/kasan/report.c:399 [inline] kasan_report.cold+0x7c/0xd8 mm/kasan/report.c:416 __list_add_valid+0x93/0xa0 lib/list_debug.c:26 __list_add include/linux/list.h:67 [inline] list_add_tail include/linux/list.h:100 [inline] cma_listen_on_all drivers/infiniband/core/cma.c:2557 [inline] rdma_listen+0x787/0xe00 drivers/infiniband/core/cma.c:3751 ucma_listen+0x16a/0x210 drivers/infiniband/core/ucma.c:1102 ucma_write+0x259/0x350 drivers/infiniband/core/ucma.c:1732 vfs_write+0x28e/0xa30 fs/read_write.c:603 ksys_write+0x1ee/0x250 fs/read_write.c:658 do_syscall_64+0x2d/0x70 arch/x86/entry/common.c:46 entry_SYSCALL_64_after_hwframe+0x44/0xae Which is indicating that an rdma_id_private was destroyed without doing cma_cancel_listens(). Instead of trying to re-use the src_addr memory to indirectly create an any address build one explicitly on the stack and bind to that as any other normal flow would do. Link: https://lore.kernel.org/r/0-v1-9fbb33f5e201+2a-cma_listen_jgg@nvidia.com Cc: stable@vger.kernel.org Fixes: 732d41c545bb ("RDMA/cma: Make the locking for automatic state transition more clear") Reported-by: syzbot+6bb0528b13611047209c@syzkaller.appspotmail.com Tested-by: Hao Sun <sunhao.th@gmail.com> Reviewed-by: Leon Romanovsky <leonro@nvidia.com> Signed-off-by: Jason Gunthorpe <jgg@nvidia.com>'</s>
<s>[INST] Given the following code containing errors, provide Common Weakness Enumeration (CWE) identifiers and corresponding messages in JSON format. Each identified weakness should have a unique entry in the JSON array. Code: static int perf_trace_event_perm(struct ftrace_event_call *tp_event, struct perf_event *p_event) { /* The ftrace function trace is allowed only for root. */ if (ftrace_event_is_function(tp_event) && perf_paranoid_kernel() && !capable(CAP_SYS_ADMIN)) return -EPERM; /* No tracing, just counting, so no obvious leak */ if (!(p_event->attr.sample_type & PERF_SAMPLE_RAW)) return 0; /* Some events are ok to be traced by non-root users... */ if (p_event->attach_state == PERF_ATTACH_TASK) { if (tp_event->flags & TRACE_EVENT_FL_CAP_ANY) return 0; } /* * ...otherwise raw tracepoint data can be a severe data leak, * only allow root to have these. */ if (perf_paranoid_tracepoint_raw() && !capable(CAP_SYS_ADMIN)) return -EPERM; return 0; } ; Sample JSON Format Confermily Expected output: "examples" => "cwe": "xxx"; "message": "xxx" [/INST] 'cwe': ['CWE-284', 'CWE-264'], 'message': 'perf/ftrace: Fix paranoid level for enabling function tracer The current default perf paranoid level is "1" which has "perf_paranoid_kernel()" return false, and giving any operations that use it, access to normal users. Unfortunately, this includes function tracing and normal users should not be allowed to enable function tracing by default. The proper level is defined at "-1" (full perf access), which "perf_paranoid_tracepoint_raw()" will only give access to. Use that check instead for enabling function tracing. Reported-by: Dave Jones <davej@redhat.com> Reported-by: Vince Weaver <vincent.weaver@maine.edu> Tested-by: Vince Weaver <vincent.weaver@maine.edu> Cc: Peter Zijlstra <peterz@infradead.org> Cc: Ingo Molnar <mingo@kernel.org> Cc: Jiri Olsa <jolsa@redhat.com> Cc: Frederic Weisbecker <fweisbec@gmail.com> Cc: stable@vger.kernel.org # 3.4+ CVE: CVE-2013-2930 Fixes: ced39002f5ea ("ftrace, perf: Add support to use function tracepoint in perf") Signed-off-by: Steven Rostedt <rostedt@goodmis.org>'</s>
<s>[INST] Given the following code containing errors, provide Common Weakness Enumeration (CWE) identifiers and corresponding messages in JSON format. Each identified weakness should have a unique entry in the JSON array. Code: CodingReturnValue VP8ComponentDecoder::decode_chunk(UncompressedComponents * const colldata) { mux_splicer.init(spin_workers_); /* cmpc is a global variable with the component count */ /* construct 4x4 VP8 blocks to hold 8x8 JPEG blocks */ if ( thread_state_[0] == nullptr || thread_state_[0]->context_[0].isNil() ) { /* first call */ BlockBasedImagePerChannel<false> framebuffer; framebuffer.memset(0); for (size_t i = 0; i < framebuffer.size() && int( i ) < colldata->get_num_components(); ++i) { framebuffer[i] = &colldata->full_component_write((BlockType)i); } Sirikata::Array1d<BlockBasedImagePerChannel<false>, MAX_NUM_THREADS> all_framebuffers; for (size_t i = 0; i < all_framebuffers.size(); ++i) { all_framebuffers[i] = framebuffer; } size_t num_threads_needed = initialize_decoder_state(colldata, all_framebuffers).size(); for (size_t i = 0;i < num_threads_needed; ++i) { map_logical_thread_to_physical_thread(i, i); } for (size_t i = 0;i < num_threads_needed; ++i) { initialize_thread_id(i, i, framebuffer); if (!do_threading_) { break; } } if (num_threads_needed > NUM_THREADS || num_threads_needed == 0) { return CODING_ERROR; } } if (do_threading_) { for (unsigned int thread_id = 0; thread_id < NUM_THREADS; ++thread_id) { unsigned int cur_spin_worker = thread_id; spin_workers_[cur_spin_worker].work = std::bind(worker_thread, thread_state_[thread_id], thread_id, colldata, mux_splicer.thread_target, getWorker(cur_spin_worker), &send_to_actual_thread_state); spin_workers_[cur_spin_worker].activate_work(); } flush(); for (unsigned int thread_id = 0; thread_id < NUM_THREADS; ++thread_id) { unsigned int cur_spin_worker = thread_id; TimingHarness::timing[thread_id][TimingHarness::TS_THREAD_WAIT_STARTED] = TimingHarness::get_time_us(); spin_workers_[cur_spin_worker].main_wait_for_done(); TimingHarness::timing[thread_id][TimingHarness::TS_THREAD_WAIT_FINISHED] = TimingHarness::get_time_us(); } // join on all threads } else { if (virtual_thread_id_ != -1) { TimingHarness::timing[0][TimingHarness::TS_ARITH_STARTED] = TimingHarness::get_time_us(); CodingReturnValue ret = thread_state_[0]->vp8_decode_thread(0, colldata); if (ret == CODING_PARTIAL) { return ret; } TimingHarness::timing[0][TimingHarness::TS_ARITH_FINISHED] = TimingHarness::get_time_us(); } // wait for "threads" virtual_thread_id_ += 1; // first time's a charm for (unsigned int thread_id = virtual_thread_id_; thread_id < NUM_THREADS; ++thread_id, ++virtual_thread_id_) { BlockBasedImagePerChannel<false> framebuffer; framebuffer.memset(0); for (size_t i = 0; i < framebuffer.size() && int( i ) < colldata->get_num_components(); ++i) { framebuffer[i] = &colldata->full_component_write((BlockType)i); } initialize_thread_id(thread_id, 0, framebuffer); thread_state_[0]->bool_decoder_.init(new VirtualThreadPacketReader(thread_id, &mux_reader_, &mux_splicer)); TimingHarness::timing[thread_id][TimingHarness::TS_ARITH_STARTED] = TimingHarness::get_time_us(); CodingReturnValue ret; if ((ret = thread_state_[0]->vp8_decode_thread(0, colldata)) == CODING_PARTIAL) { return ret; } TimingHarness::timing[thread_id][TimingHarness::TS_ARITH_FINISHED] = TimingHarness::get_time_us(); } } TimingHarness::timing[0][TimingHarness::TS_JPEG_RECODE_STARTED] = TimingHarness::get_time_us(); for (int component = 0; component < colldata->get_num_components(); ++component) { colldata->worker_mark_cmp_finished((BlockType)component); } colldata->worker_update_coefficient_position_progress( 64 ); colldata->worker_update_bit_progress( 16 ); write_byte_bill(Billing::DELIMITERS, true, mux_reader_.getOverhead()); return CODING_DONE; } ; Sample JSON Format Confermily Expected output: "examples" => "cwe": "xxx"; "message": "xxx" [/INST] 'cwe': ['CWE-1187'], 'message': 'fix #87 : always check that threads_required set up the appropriate number of threads---fire off nop functions on unused threads for consistency'</s>
<s>[INST] Given the following code containing errors, provide Common Weakness Enumeration (CWE) identifiers and corresponding messages in JSON format. Each identified weakness should have a unique entry in the JSON array. Code: static int packet_recvmsg(struct kiocb *iocb, struct socket *sock, struct msghdr *msg, size_t len, int flags) { struct sock *sk = sock->sk; struct sk_buff *skb; int copied, err; struct sockaddr_ll *sll; int vnet_hdr_len = 0; err = -EINVAL; if (flags & ~(MSG_PEEK|MSG_DONTWAIT|MSG_TRUNC|MSG_CMSG_COMPAT|MSG_ERRQUEUE)) goto out; #if 0 /* What error should we return now? EUNATTACH? */ if (pkt_sk(sk)->ifindex < 0) return -ENODEV; #endif if (flags & MSG_ERRQUEUE) { err = packet_recv_error(sk, msg, len); goto out; } /* * Call the generic datagram receiver. This handles all sorts * of horrible races and re-entrancy so we can forget about it * in the protocol layers. * * Now it will return ENETDOWN, if device have just gone down, * but then it will block. */ skb = skb_recv_datagram(sk, flags, flags & MSG_DONTWAIT, &err); /* * An error occurred so return it. Because skb_recv_datagram() * handles the blocking we don't see and worry about blocking * retries. */ if (skb == NULL) goto out; if (pkt_sk(sk)->has_vnet_hdr) { struct virtio_net_hdr vnet_hdr = { 0 }; err = -EINVAL; vnet_hdr_len = sizeof(vnet_hdr); if (len < vnet_hdr_len) goto out_free; len -= vnet_hdr_len; if (skb_is_gso(skb)) { struct skb_shared_info *sinfo = skb_shinfo(skb); /* This is a hint as to how much should be linear. */ vnet_hdr.hdr_len = skb_headlen(skb); vnet_hdr.gso_size = sinfo->gso_size; if (sinfo->gso_type & SKB_GSO_TCPV4) vnet_hdr.gso_type = VIRTIO_NET_HDR_GSO_TCPV4; else if (sinfo->gso_type & SKB_GSO_TCPV6) vnet_hdr.gso_type = VIRTIO_NET_HDR_GSO_TCPV6; else if (sinfo->gso_type & SKB_GSO_UDP) vnet_hdr.gso_type = VIRTIO_NET_HDR_GSO_UDP; else if (sinfo->gso_type & SKB_GSO_FCOE) goto out_free; else BUG(); if (sinfo->gso_type & SKB_GSO_TCP_ECN) vnet_hdr.gso_type |= VIRTIO_NET_HDR_GSO_ECN; } else vnet_hdr.gso_type = VIRTIO_NET_HDR_GSO_NONE; if (skb->ip_summed == CHECKSUM_PARTIAL) { vnet_hdr.flags = VIRTIO_NET_HDR_F_NEEDS_CSUM; vnet_hdr.csum_start = skb_checksum_start_offset(skb); vnet_hdr.csum_offset = skb->csum_offset; } /* else everything is zero */ err = memcpy_toiovec(msg->msg_iov, (void *)&vnet_hdr, vnet_hdr_len); if (err < 0) goto out_free; } /* * If the address length field is there to be filled in, we fill * it in now. */ sll = &PACKET_SKB_CB(skb)->sa.ll; if (sock->type == SOCK_PACKET) msg->msg_namelen = sizeof(struct sockaddr_pkt); else msg->msg_namelen = sll->sll_halen + offsetof(struct sockaddr_ll, sll_addr); /* * You lose any data beyond the buffer you gave. If it worries a * user program they can ask the device for its MTU anyway. */ copied = skb->len; if (copied > len) { copied = len; msg->msg_flags |= MSG_TRUNC; } err = skb_copy_datagram_iovec(skb, 0, msg->msg_iov, copied); if (err) goto out_free; sock_recv_ts_and_drops(msg, sk, skb); if (msg->msg_name) memcpy(msg->msg_name, &PACKET_SKB_CB(skb)->sa, msg->msg_namelen); if (pkt_sk(sk)->auxdata) { struct tpacket_auxdata aux; aux.tp_status = TP_STATUS_USER; if (skb->ip_summed == CHECKSUM_PARTIAL) aux.tp_status |= TP_STATUS_CSUMNOTREADY; aux.tp_len = PACKET_SKB_CB(skb)->origlen; aux.tp_snaplen = skb->len; aux.tp_mac = 0; aux.tp_net = skb_network_offset(skb); if (vlan_tx_tag_present(skb)) { aux.tp_vlan_tci = vlan_tx_tag_get(skb); aux.tp_status |= TP_STATUS_VLAN_VALID; } else { aux.tp_vlan_tci = 0; } put_cmsg(msg, SOL_PACKET, PACKET_AUXDATA, sizeof(aux), &aux); } /* * Free or return the buffer as appropriate. Again this * hides all the races and re-entrancy issues from us. */ err = vnet_hdr_len + ((flags&MSG_TRUNC) ? skb->len : copied); out_free: skb_free_datagram(sk, skb); out: return err; } ; Sample JSON Format Confermily Expected output: "examples" => "cwe": "xxx"; "message": "xxx" [/INST] 'cwe': ['CWE-200', 'CWE-264'], 'message': 'af_packet: prevent information leak In 2.6.27, commit 393e52e33c6c2 (packet: deliver VLAN TCI to userspace) added a small information leak. Add padding field and make sure its zeroed before copy to user. Signed-off-by: Eric Dumazet <eric.dumazet@gmail.com> CC: Patrick McHardy <kaber@trash.net> Signed-off-by: David S. Miller <davem@davemloft.net>'</s>
<s>[INST] Given the following code containing errors, provide Common Weakness Enumeration (CWE) identifiers and corresponding messages in JSON format. Each identified weakness should have a unique entry in the JSON array. Code: explicit DataFormatVecPermuteOp(OpKernelConstruction* context) : OpKernel(context) { string src_format; OP_REQUIRES_OK(context, context->GetAttr("src_format", &src_format)); string dst_format; OP_REQUIRES_OK(context, context->GetAttr("dst_format", &dst_format)); src_format_ = src_format; dst_format_ = dst_format; } ; Sample JSON Format Confermily Expected output: "examples" => "cwe": "xxx"; "message": "xxx" [/INST] 'cwe': ['CWE-125'], 'message': 'Validate that `DataFormat*` attributes form a permutation. The `src_format` and `dst_format` attributes for the `DataFormatDimMap` and `DataFormatVecPermute` raw ops are supposed to determine a permutation. However, this was not validated and could result in unitialized memory accesses as well as writes outside of bounds and potential crashes. While here, we also test that the format attributes have the needed length, add tests for all validation failure cases, remove unnecessary calls to `strings::StrCat`, and fix a few grammar errors. This will be cherry-picked on the supported release branches. PiperOrigin-RevId: 346135579 Change-Id: I1c76392382c89ad8f072d5bc93d70669851eb404'</s>