Dataset Viewer
Auto-converted to Parquet
code
stringlengths
28
4.99k
cwe
stringclasses
8 values
vulnerable
int64
0
1
Mac_Read_POST_Resource( FT_Library library, FT_Stream stream, FT_Long *offsets, FT_Long resource_cnt, FT_Long face_index, FT_Face *aface ) { FT_Error error = FT_Err_Cannot_Open_Resource; FT_Memory memory = library->memory; FT_Byte* pfb_data; int i, type, flags; FT_Long len; FT_Long pfb_len, pfb_pos, pfb_lenpos; FT_Long rlen, temp; if ( face_index == -1 ) face_index = 0; if ( face_index != 0 ) return error; /* Find the length of all the POST resources, concatenated. Assume */ /* worst case (each resource in its own section). */ pfb_len = 0; for ( i = 0; i < resource_cnt; ++i ) { error = FT_Stream_Seek( stream, offsets[i] ); if ( error ) goto Exit; if ( FT_READ_LONG( temp ) ) goto Exit; pfb_len += temp + 6; } if ( FT_ALLOC( pfb_data, (FT_Long)pfb_len + 2 ) ) goto Exit; pfb_data[0] = 0x80; pfb_data[1] = 1; /* Ascii section */ pfb_data[2] = 0; /* 4-byte length, fill in later */ pfb_data[3] = 0; pfb_data[4] = 0; pfb_data[5] = 0; pfb_pos = 6; pfb_lenpos = 2; len = 0; type = 1; for ( i = 0; i < resource_cnt; ++i ) { error = FT_Stream_Seek( stream, offsets[i] ); if ( error ) goto Exit2; if ( FT_READ_LONG( rlen ) ) goto Exit; if ( FT_READ_USHORT( flags ) ) goto Exit; rlen -= 2; /* the flags are part of the resource */ if ( ( flags >> 8 ) == type ) len += rlen; else { pfb_data[pfb_lenpos ] = (FT_Byte)( len ); pfb_data[pfb_lenpos + 1] = (FT_Byte)( len >> 8 ); pfb_data[pfb_lenpos + 2] = (FT_Byte)( len >> 16 ); pfb_data[pfb_lenpos + 3] = (FT_Byte)( len >> 24 ); if ( ( flags >> 8 ) == 5 ) /* End of font mark */ break; pfb_data[pfb_pos++] = 0x80; type = flags >> 8; len = rlen; pfb_data[pfb_pos++] = (FT_Byte)type; pfb_lenpos = pfb_pos; pfb_data[pfb_pos++] = 0; /* 4-byte length, fill in later */ pfb_data[pfb_pos++] = 0; pfb_data[pfb_pos++] = 0; pfb_data[pfb_pos++] = 0; } error = FT_Stream_Read( stream, (FT_Byte *)pfb_data + pfb_pos, rlen ); if ( error ) goto Exit2; pfb_pos += rlen; } pfb_data[pfb_lenpos ] = (FT_Byte)( len ); pfb_data[pfb_lenpos + 1] = (FT_Byte)( len >> 8 ); pfb_data[pfb_lenpos + 2] = (FT_Byte)( len >> 16 ); pfb_data[pfb_lenpos + 3] = (FT_Byte)( len >> 24 ); return open_face_from_buffer( library, pfb_data, pfb_pos, face_index, "type1", aface ); Exit2: FT_FREE( pfb_data ); Exit: return error; }
CWE-119
1
int sr_do_ioctl(Scsi_CD *cd, struct packet_command *cgc) { struct scsi_device *SDev; struct scsi_sense_hdr sshdr; int result, err = 0, retries = 0; unsigned char sense_buffer[SCSI_SENSE_BUFFERSIZE], *senseptr = NULL; SDev = cd->device; if (cgc->sense) senseptr = sense_buffer; retry: if (!scsi_block_when_processing_errors(SDev)) { err = -ENODEV; goto out; } result = scsi_execute(SDev, cgc->cmd, cgc->data_direction, cgc->buffer, cgc->buflen, senseptr, &sshdr, cgc->timeout, IOCTL_RETRIES, 0, 0, NULL); if (cgc->sense) memcpy(cgc->sense, sense_buffer, sizeof(*cgc->sense)); /* Minimal error checking. Ignore cases we know about, and report the rest. */ if (driver_byte(result) != 0) { switch (sshdr.sense_key) { case UNIT_ATTENTION: SDev->changed = 1; if (!cgc->quiet) sr_printk(KERN_INFO, cd, "disc change detected.\n"); if (retries++ < 10) goto retry; err = -ENOMEDIUM; break; case NOT_READY: /* This happens if there is no disc in drive */ if (sshdr.asc == 0x04 && sshdr.ascq == 0x01) { /* sense: Logical unit is in process of becoming ready */ if (!cgc->quiet) sr_printk(KERN_INFO, cd, "CDROM not ready yet.\n"); if (retries++ < 10) { /* sleep 2 sec and try again */ ssleep(2); goto retry; } else { /* 20 secs are enough? */ err = -ENOMEDIUM; break; } } if (!cgc->quiet) sr_printk(KERN_INFO, cd, "CDROM not ready. Make sure there " "is a disc in the drive.\n"); err = -ENOMEDIUM; break; case ILLEGAL_REQUEST: err = -EIO; if (sshdr.asc == 0x20 && sshdr.ascq == 0x00) /* sense: Invalid command operation code */ err = -EDRIVE_CANT_DO_THIS; break; default: err = -EIO; } } /* Wake up a process waiting for device */ out: cgc->stat = err; return err; }
CWE-119
1
static void recalculate_apic_map(struct kvm *kvm) { struct kvm_apic_map *new, *old = NULL; struct kvm_vcpu *vcpu; int i; new = kzalloc(sizeof(struct kvm_apic_map), GFP_KERNEL); mutex_lock(&kvm->arch.apic_map_lock); if (!new) goto out; new->ldr_bits = 8; /* flat mode is default */ new->cid_shift = 8; new->cid_mask = 0; new->lid_mask = 0xff; kvm_for_each_vcpu(i, vcpu, kvm) { struct kvm_lapic *apic = vcpu->arch.apic; u16 cid, lid; u32 ldr; if (!kvm_apic_present(vcpu)) continue; /* * All APICs have to be configured in the same mode by an OS. * We take advatage of this while building logical id loockup * table. After reset APICs are in xapic/flat mode, so if we * find apic with different setting we assume this is the mode * OS wants all apics to be in; build lookup table accordingly. */ if (apic_x2apic_mode(apic)) { new->ldr_bits = 32; new->cid_shift = 16; new->cid_mask = (1 << KVM_X2APIC_CID_BITS) - 1; new->lid_mask = 0xffff; } else if (kvm_apic_sw_enabled(apic) && !new->cid_mask /* flat mode */ && kvm_apic_get_reg(apic, APIC_DFR) == APIC_DFR_CLUSTER) { new->cid_shift = 4; new->cid_mask = 0xf; new->lid_mask = 0xf; } new->phys_map[kvm_apic_id(apic)] = apic; ldr = kvm_apic_get_reg(apic, APIC_LDR); cid = apic_cluster_id(new, ldr); lid = apic_logical_id(new, ldr); if (lid) new->logical_map[cid][ffs(lid) - 1] = apic; } out: old = rcu_dereference_protected(kvm->arch.apic_map, lockdep_is_held(&kvm->arch.apic_map_lock)); rcu_assign_pointer(kvm->arch.apic_map, new); mutex_unlock(&kvm->arch.apic_map_lock); if (old) kfree_rcu(old, rcu); kvm_vcpu_request_scan_ioapic(kvm); }
CWE-189
1
static int mailimf_group_parse(const char * message, size_t length, size_t * indx, struct mailimf_group ** result) { size_t cur_token; char * display_name; struct mailimf_mailbox_list * mailbox_list; struct mailimf_group * group; int r; int res; clist * list; cur_token = * indx; mailbox_list = NULL; r = mailimf_display_name_parse(message, length, &cur_token, &display_name); if (r != MAILIMF_NO_ERROR) { res = r; goto err; } r = mailimf_colon_parse(message, length, &cur_token); if (r != MAILIMF_NO_ERROR) { res = r; goto free_display_name; } r = mailimf_mailbox_list_parse(message, length, &cur_token, &mailbox_list); switch (r) { case MAILIMF_NO_ERROR: break; case MAILIMF_ERROR_PARSE: r = mailimf_cfws_parse(message, length, &cur_token); if ((r != MAILIMF_NO_ERROR) && (r != MAILIMF_ERROR_PARSE)) { res = r; goto free_display_name; } list = clist_new(); if (list == NULL) { res = MAILIMF_ERROR_MEMORY; goto free_display_name; } mailbox_list = mailimf_mailbox_list_new(list); if (mailbox_list == NULL) { res = MAILIMF_ERROR_MEMORY; clist_free(list); goto free_display_name; } break; default: res = r; goto free_display_name; } r = mailimf_semi_colon_parse(message, length, &cur_token); if (r != MAILIMF_NO_ERROR) { res = r; goto free_mailbox_list; } group = mailimf_group_new(display_name, mailbox_list); if (group == NULL) { res = MAILIMF_ERROR_MEMORY; goto free_mailbox_list; } * indx = cur_token; * result = group; return MAILIMF_NO_ERROR; free_mailbox_list: if (mailbox_list != NULL) { mailimf_mailbox_list_free(mailbox_list); } free_display_name: mailimf_display_name_free(display_name); err: return res; }
CWE-476
1
void TabSpecificContentSettings::OnContentBlocked( ContentSettingsType type, const std::string& resource_identifier) { DCHECK(type != CONTENT_SETTINGS_TYPE_GEOLOCATION) << "Geolocation settings handled by OnGeolocationPermissionSet"; if (type < 0 || type >= CONTENT_SETTINGS_NUM_TYPES) return; content_accessed_[type] = true; std::string identifier; if (CommandLine::ForCurrentProcess()->HasSwitch( switches::kEnableResourceContentSettings)) { identifier = resource_identifier; } if (!identifier.empty()) AddBlockedResource(type, identifier); #if defined (OS_ANDROID) if (type == CONTENT_SETTINGS_TYPE_POPUPS) { content_blocked_[type] = false; content_blockage_indicated_to_user_[type] = false; } #endif if (!content_blocked_[type]) { content_blocked_[type] = true; content::NotificationService::current()->Notify( chrome::NOTIFICATION_WEB_CONTENT_SETTINGS_CHANGED, content::Source<WebContents>(web_contents()), content::NotificationService::NoDetails()); } }
CWE-20
1
void WebPageSerializerImpl::openTagToString(Element* element, SerializeDomParam* param) { bool needSkip; StringBuilder result; result.append(preActionBeforeSerializeOpenTag(element, param, &needSkip)); if (needSkip) return; result.append('<'); result.append(element->nodeName().lower()); AttributeCollection attributes = element->attributes(); AttributeCollection::iterator end = attributes.end(); for (AttributeCollection::iterator it = attributes.begin(); it != end; ++it) { result.append(' '); result.append(it->name().toString()); result.appendLiteral("=\""); if (!it->value().isEmpty()) { const String& attrValue = it->value(); const QualifiedName& attrName = it->name(); if (element->hasLegalLinkAttribute(attrName)) { if (attrValue.startsWith("javascript:", TextCaseInsensitive)) { result.append(m_htmlEntities.convertEntitiesInString(attrValue)); } else { WebLocalFrameImpl* subFrame = WebLocalFrameImpl::fromFrameOwnerElement(element); String completeURL = subFrame ? subFrame->frame()->document()->url() : param->document->completeURL(attrValue); if (m_localLinks.contains(completeURL)) { if (!param->directoryName.isEmpty()) { result.appendLiteral("./"); result.append(param->directoryName); result.append('/'); } result.append(m_htmlEntities.convertEntitiesInString(m_localLinks.get(completeURL))); } else { result.append(m_htmlEntities.convertEntitiesInString(completeURL)); } } } else { if (param->isHTMLDocument) result.append(m_htmlEntities.convertEntitiesInString(attrValue)); else result.append(m_xmlEntities.convertEntitiesInString(attrValue)); } } result.append('\"'); } String addedContents = postActionAfterSerializeOpenTag(element, param); if (element->hasChildren() || param->haveAddedContentsBeforeEnd) result.append('>'); result.append(addedContents); saveHTMLContentToBuffer(result.toString(), param); }
CWE-20
1
static int l2cap_sock_getname(struct socket *sock, struct sockaddr *addr, int *len, int peer) { struct sockaddr_l2 *la = (struct sockaddr_l2 *) addr; struct sock *sk = sock->sk; struct l2cap_chan *chan = l2cap_pi(sk)->chan; BT_DBG("sock %p, sk %p", sock, sk); memset(la, 0, sizeof(struct sockaddr_l2)); addr->sa_family = AF_BLUETOOTH; *len = sizeof(struct sockaddr_l2); if (peer) { la->l2_psm = chan->psm; bacpy(&la->l2_bdaddr, &bt_sk(sk)->dst); la->l2_cid = cpu_to_le16(chan->dcid); } else { la->l2_psm = chan->sport; bacpy(&la->l2_bdaddr, &bt_sk(sk)->src); la->l2_cid = cpu_to_le16(chan->scid); } return 0; }
CWE-200
1
static int unimac_mdio_probe(struct platform_device *pdev) { struct unimac_mdio_pdata *pdata = pdev->dev.platform_data; struct unimac_mdio_priv *priv; struct device_node *np; struct mii_bus *bus; struct resource *r; int ret; np = pdev->dev.of_node; priv = devm_kzalloc(&pdev->dev, sizeof(*priv), GFP_KERNEL); if (!priv) return -ENOMEM; r = platform_get_resource(pdev, IORESOURCE_MEM, 0); if (!r) return -EINVAL; /* Just ioremap, as this MDIO block is usually integrated into an * Ethernet MAC controller register range */ priv->base = devm_ioremap(&pdev->dev, r->start, resource_size(r)); if (!priv->base) { dev_err(&pdev->dev, "failed to remap register\n"); return -ENOMEM; } priv->mii_bus = mdiobus_alloc(); if (!priv->mii_bus) return -ENOMEM; bus = priv->mii_bus; bus->priv = priv; if (pdata) { bus->name = pdata->bus_name; priv->wait_func = pdata->wait_func; priv->wait_func_data = pdata->wait_func_data; bus->phy_mask = ~pdata->phy_mask; } else { bus->name = "unimac MII bus"; priv->wait_func_data = priv; priv->wait_func = unimac_mdio_poll; } bus->parent = &pdev->dev; bus->read = unimac_mdio_read; bus->write = unimac_mdio_write; bus->reset = unimac_mdio_reset; snprintf(bus->id, MII_BUS_ID_SIZE, "%s-%d", pdev->name, pdev->id); ret = of_mdiobus_register(bus, np); if (ret) { dev_err(&pdev->dev, "MDIO bus registration failed\n"); goto out_mdio_free; } platform_set_drvdata(pdev, priv); dev_info(&pdev->dev, "Broadcom UniMAC MDIO bus at 0x%p\n", priv->base); return 0; out_mdio_free: mdiobus_free(bus); return ret; }
CWE-476
1
SYSCALL_DEFINE2(kill, pid_t, pid, int, sig) { struct siginfo info; info.si_signo = sig; info.si_errno = 0; info.si_code = SI_USER; info.si_pid = task_tgid_vnr(current); info.si_uid = from_kuid_munged(current_user_ns(), current_uid()); return kill_something_info(sig, &info, pid); }
CWE-399
1
void wifi_cleanup(wifi_handle handle, wifi_cleaned_up_handler handler) { hal_info *info = getHalInfo(handle); char buf[64]; info->cleaned_up_handler = handler; if (write(info->cleanup_socks[0], "Exit", 4) < 1) { ALOGE("could not write to the cleanup socket"); } else { memset(buf, 0, sizeof(buf)); int result = read(info->cleanup_socks[0], buf, sizeof(buf)); ALOGE("%s: Read after POLL returned %d, error no = %d", __FUNCTION__, result, errno); if (strncmp(buf, "Done", 4) == 0) { ALOGE("Event processing terminated"); } else { ALOGD("Rx'ed %s", buf); } } info->clean_up = true; pthread_mutex_lock(&info->cb_lock); int bad_commands = 0; for (int i = 0; i < info->num_event_cb; i++) { cb_info *cbi = &(info->event_cb[i]); WifiCommand *cmd = (WifiCommand *)cbi->cb_arg; ALOGI("Command left in event_cb %p:%s", cmd, (cmd ? cmd->getType(): "")); } while (info->num_cmd > bad_commands) { int num_cmd = info->num_cmd; cmd_info *cmdi = &(info->cmd[bad_commands]); WifiCommand *cmd = cmdi->cmd; if (cmd != NULL) { ALOGI("Cancelling command %p:%s", cmd, cmd->getType()); pthread_mutex_unlock(&info->cb_lock); cmd->cancel(); pthread_mutex_lock(&info->cb_lock); if (num_cmd == info->num_cmd) { ALOGI("Cancelling command %p:%s did not work", cmd, (cmd ? cmd->getType(): "")); bad_commands++; } /* release reference added when command is saved */ cmd->releaseRef(); } } for (int i = 0; i < info->num_event_cb; i++) { cb_info *cbi = &(info->event_cb[i]); WifiCommand *cmd = (WifiCommand *)cbi->cb_arg; ALOGE("Leaked command %p", cmd); } pthread_mutex_unlock(&info->cb_lock); internal_cleaned_up_handler(handle); }
CWE-264
1
bool SetExtendedFileAttribute(const char* path, const char* name, const char* value, size_t value_size, int flags) { //// On Chrome OS, there is no component that can validate these extended //// attributes so there is no need to set them. #if !defined(OS_CHROMEOS) base::ScopedBlockingCall scoped_blocking_call(base::BlockingType::MAY_BLOCK); int result = setxattr(path, name, value, value_size, flags); if (result) { DPLOG(ERROR) << "Could not set extended attribute " << name << " on file " << path; return false; } #endif // !defined(OS_CHROMEOS) return true; }
CWE-200
1
get_uncompressed_data(struct archive_read *a, const void **buff, size_t size, size_t minimum) { struct _7zip *zip = (struct _7zip *)a->format->data; ssize_t bytes_avail; if (zip->codec == _7Z_COPY && zip->codec2 == (unsigned long)-1) { /* Copy mode. */ *buff = __archive_read_ahead(a, minimum, &bytes_avail); if (bytes_avail <= 0) { archive_set_error(&a->archive, ARCHIVE_ERRNO_FILE_FORMAT, "Truncated 7-Zip file data"); return (ARCHIVE_FATAL); } if ((size_t)bytes_avail > zip->uncompressed_buffer_bytes_remaining) bytes_avail = (ssize_t) zip->uncompressed_buffer_bytes_remaining; if ((size_t)bytes_avail > size) bytes_avail = (ssize_t)size; zip->pack_stream_bytes_unconsumed = bytes_avail; } else if (zip->uncompressed_buffer_pointer == NULL) { /* Decompression has failed. */ archive_set_error(&(a->archive), ARCHIVE_ERRNO_MISC, "Damaged 7-Zip archive"); return (ARCHIVE_FATAL); } else { /* Packed mode. */ if (minimum > zip->uncompressed_buffer_bytes_remaining) { /* * If remaining uncompressed data size is less than * the minimum size, fill the buffer up to the * minimum size. */ if (extract_pack_stream(a, minimum) < 0) return (ARCHIVE_FATAL); } if (size > zip->uncompressed_buffer_bytes_remaining) bytes_avail = (ssize_t) zip->uncompressed_buffer_bytes_remaining; else bytes_avail = (ssize_t)size; *buff = zip->uncompressed_buffer_pointer; zip->uncompressed_buffer_pointer += bytes_avail; } zip->uncompressed_buffer_bytes_remaining -= bytes_avail; return (bytes_avail); }
CWE-125
1
static int netlbl_cipsov4_add_common(struct genl_info *info, struct cipso_v4_doi *doi_def) { struct nlattr *nla; int nla_rem; u32 iter = 0; doi_def->doi = nla_get_u32(info->attrs[NLBL_CIPSOV4_A_DOI]); if (nla_validate_nested(info->attrs[NLBL_CIPSOV4_A_TAGLST], NLBL_CIPSOV4_A_MAX, netlbl_cipsov4_genl_policy) != 0) return -EINVAL; nla_for_each_nested(nla, info->attrs[NLBL_CIPSOV4_A_TAGLST], nla_rem) if (nla->nla_type == NLBL_CIPSOV4_A_TAG) { if (iter >= CIPSO_V4_TAG_MAXCNT) return -EINVAL; doi_def->tags[iter++] = nla_get_u8(nla); } while (iter < CIPSO_V4_TAG_MAXCNT) doi_def->tags[iter++] = CIPSO_V4_TAG_INVALID; return 0; }
CWE-119
1
static int gs_usb_probe(struct usb_interface *intf, const struct usb_device_id *id) { struct gs_usb *dev; int rc = -ENOMEM; unsigned int icount, i; struct gs_host_config *hconf; struct gs_device_config *dconf; hconf = kmalloc(sizeof(*hconf), GFP_KERNEL); if (!hconf) return -ENOMEM; hconf->byte_order = 0x0000beef; /* send host config */ rc = usb_control_msg(interface_to_usbdev(intf), usb_sndctrlpipe(interface_to_usbdev(intf), 0), GS_USB_BREQ_HOST_FORMAT, USB_DIR_OUT|USB_TYPE_VENDOR|USB_RECIP_INTERFACE, 1, intf->altsetting[0].desc.bInterfaceNumber, hconf, sizeof(*hconf), 1000); kfree(hconf); if (rc < 0) { dev_err(&intf->dev, "Couldn't send data format (err=%d)\n", rc); return rc; } dconf = kmalloc(sizeof(*dconf), GFP_KERNEL); if (!dconf) return -ENOMEM; /* read device config */ rc = usb_control_msg(interface_to_usbdev(intf), usb_rcvctrlpipe(interface_to_usbdev(intf), 0), GS_USB_BREQ_DEVICE_CONFIG, USB_DIR_IN|USB_TYPE_VENDOR|USB_RECIP_INTERFACE, 1, intf->altsetting[0].desc.bInterfaceNumber, dconf, sizeof(*dconf), 1000); if (rc < 0) { dev_err(&intf->dev, "Couldn't get device config: (err=%d)\n", rc); kfree(dconf); return rc; } icount = dconf->icount + 1; dev_info(&intf->dev, "Configuring for %d interfaces\n", icount); if (icount > GS_MAX_INTF) { dev_err(&intf->dev, "Driver cannot handle more that %d CAN interfaces\n", GS_MAX_INTF); kfree(dconf); return -EINVAL; } dev = kzalloc(sizeof(*dev), GFP_KERNEL); if (!dev) { kfree(dconf); return -ENOMEM; } init_usb_anchor(&dev->rx_submitted); atomic_set(&dev->active_channels, 0); usb_set_intfdata(intf, dev); dev->udev = interface_to_usbdev(intf); for (i = 0; i < icount; i++) { dev->canch[i] = gs_make_candev(i, intf, dconf); if (IS_ERR_OR_NULL(dev->canch[i])) { /* save error code to return later */ rc = PTR_ERR(dev->canch[i]); /* on failure destroy previously created candevs */ icount = i; for (i = 0; i < icount; i++) gs_destroy_candev(dev->canch[i]); usb_kill_anchored_urbs(&dev->rx_submitted); kfree(dconf); kfree(dev); return rc; } dev->canch[i]->parent = dev; } kfree(dconf); return 0; }
CWE-119
1
tt_cmap14_validate( FT_Byte* table, FT_Validator valid ) { FT_Byte* p; FT_ULong length; FT_ULong num_selectors; if ( table + 2 + 4 + 4 > valid->limit ) FT_INVALID_TOO_SHORT; p = table + 2; length = TT_NEXT_ULONG( p ); num_selectors = TT_NEXT_ULONG( p ); if ( length > (FT_ULong)( valid->limit - table ) || /* length < 10 + 11 * num_selectors ? */ length < 10 || ( length - 10 ) / 11 < num_selectors ) FT_INVALID_TOO_SHORT; /* check selectors, they must be in increasing order */ { /* we start lastVarSel at 1 because a variant selector value of 0 * isn't valid. */ FT_ULong n, lastVarSel = 1; for ( n = 0; n < num_selectors; n++ ) { FT_ULong varSel = TT_NEXT_UINT24( p ); FT_ULong defOff = TT_NEXT_ULONG( p ); FT_ULong nondefOff = TT_NEXT_ULONG( p ); if ( defOff >= length || nondefOff >= length ) FT_INVALID_TOO_SHORT; if ( varSel < lastVarSel ) FT_INVALID_DATA; lastVarSel = varSel + 1; /* check the default table (these glyphs should be reached */ /* through the normal Unicode cmap, no GIDs, just check order) */ if ( defOff != 0 ) { FT_Byte* defp = table + defOff; FT_ULong numRanges; FT_ULong i; FT_ULong lastBase = 0; if ( defp + 4 > valid->limit ) FT_INVALID_TOO_SHORT; numRanges = TT_NEXT_ULONG( defp ); /* defp + numRanges * 4 > valid->limit ? */ if ( numRanges > (FT_ULong)( valid->limit - defp ) / 4 ) FT_INVALID_TOO_SHORT; if ( base + cnt >= 0x110000UL ) /* end of Unicode */ FT_INVALID_DATA; if ( base < lastBase ) FT_INVALID_DATA; lastBase = base + cnt + 1U; } } /* and the non-default table (these glyphs are specified here) */ if ( nondefOff != 0 ) { FT_Byte* ndp = table + nondefOff; FT_ULong numMappings = TT_NEXT_ULONG( ndp ); /* and the non-default table (these glyphs are specified here) */ if ( nondefOff != 0 ) { FT_Byte* ndp = table + nondefOff; FT_ULong numMappings; FT_ULong i, lastUni = 0; if ( ndp + 4 > valid->limit ) FT_INVALID_TOO_SHORT; numMappings = TT_NEXT_ULONG( ndp ); /* numMappings * 5 > (FT_ULong)( valid->limit - ndp ) ? */ if ( numMappings > ( (FT_ULong)( valid->limit - ndp ) ) / 5 ) FT_INVALID_TOO_SHORT; for ( i = 0; i < numMappings; ++i ) lastUni = uni + 1U; if ( valid->level >= FT_VALIDATE_TIGHT && gid >= TT_VALID_GLYPH_COUNT( valid ) ) FT_INVALID_GLYPH_ID; } } } }
CWE-125
1
cifs_iovec_write(struct file *file, const struct iovec *iov, unsigned long nr_segs, loff_t *poffset) { unsigned long nr_pages, i; size_t bytes, copied, len, cur_len; ssize_t total_written = 0; loff_t offset; struct iov_iter it; struct cifsFileInfo *open_file; struct cifs_tcon *tcon; struct cifs_sb_info *cifs_sb; struct cifs_writedata *wdata, *tmp; struct list_head wdata_list; int rc; pid_t pid; len = iov_length(iov, nr_segs); if (!len) return 0; rc = generic_write_checks(file, poffset, &len, 0); if (rc) return rc; INIT_LIST_HEAD(&wdata_list); cifs_sb = CIFS_SB(file->f_path.dentry->d_sb); open_file = file->private_data; tcon = tlink_tcon(open_file->tlink); if (!tcon->ses->server->ops->async_writev) return -ENOSYS; offset = *poffset; if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_RWPIDFORWARD) pid = open_file->pid; else pid = current->tgid; iov_iter_init(&it, iov, nr_segs, len, 0); do { size_t save_len; nr_pages = get_numpages(cifs_sb->wsize, len, &cur_len); wdata = cifs_writedata_alloc(nr_pages, cifs_uncached_writev_complete); if (!wdata) { rc = -ENOMEM; break; } rc = cifs_write_allocate_pages(wdata->pages, nr_pages); if (rc) { kfree(wdata); break; } save_len = cur_len; for (i = 0; i < nr_pages; i++) { bytes = min_t(const size_t, cur_len, PAGE_SIZE); copied = iov_iter_copy_from_user(wdata->pages[i], &it, 0, bytes); cur_len -= copied; iov_iter_advance(&it, copied); /* * If we didn't copy as much as we expected, then that * may mean we trod into an unmapped area. Stop copying * at that point. On the next pass through the big * loop, we'll likely end up getting a zero-length * write and bailing out of it. */ if (copied < bytes) break; } cur_len = save_len - cur_len; /* * If we have no data to send, then that probably means that * the copy above failed altogether. That's most likely because * the address in the iovec was bogus. Set the rc to -EFAULT, * free anything we allocated and bail out. */ if (!cur_len) { for (i = 0; i < nr_pages; i++) put_page(wdata->pages[i]); kfree(wdata); rc = -EFAULT; break; } /* * i + 1 now represents the number of pages we actually used in * the copy phase above. Bring nr_pages down to that, and free * any pages that we didn't use. */ for ( ; nr_pages > i + 1; nr_pages--) put_page(wdata->pages[nr_pages - 1]); wdata->sync_mode = WB_SYNC_ALL; wdata->nr_pages = nr_pages; wdata->offset = (__u64)offset; wdata->cfile = cifsFileInfo_get(open_file); wdata->pid = pid; wdata->bytes = cur_len; wdata->pagesz = PAGE_SIZE; wdata->tailsz = cur_len - ((nr_pages - 1) * PAGE_SIZE); rc = cifs_uncached_retry_writev(wdata); if (rc) { kref_put(&wdata->refcount, cifs_uncached_writedata_release); break; } list_add_tail(&wdata->list, &wdata_list); offset += cur_len; len -= cur_len; } while (len > 0); /* * If at least one write was successfully sent, then discard any rc * value from the later writes. If the other write succeeds, then * we'll end up returning whatever was written. If it fails, then * we'll get a new rc value from that. */ if (!list_empty(&wdata_list)) rc = 0; /* * Wait for and collect replies for any successful sends in order of * increasing offset. Once an error is hit or we get a fatal signal * while waiting, then return without waiting for any more replies. */ restart_loop: list_for_each_entry_safe(wdata, tmp, &wdata_list, list) { if (!rc) { /* FIXME: freezable too? */ rc = wait_for_completion_killable(&wdata->done); if (rc) rc = -EINTR; else if (wdata->result) rc = wdata->result; else total_written += wdata->bytes; /* resend call if it's a retryable error */ if (rc == -EAGAIN) { rc = cifs_uncached_retry_writev(wdata); goto restart_loop; } } list_del_init(&wdata->list); kref_put(&wdata->refcount, cifs_uncached_writedata_release); } if (total_written > 0) *poffset += total_written; cifs_stats_bytes_written(tcon, total_written); return total_written ? total_written : (ssize_t)rc; }
CWE-119
1
bool WebRtcAudioRenderer::Initialize(WebRtcAudioRendererSource* source) { base::AutoLock auto_lock(lock_); DCHECK_EQ(state_, UNINITIALIZED); DCHECK(source); DCHECK(!sink_); DCHECK(!source_); sink_ = AudioDeviceFactory::NewOutputDevice(); DCHECK(sink_); int sample_rate = GetAudioOutputSampleRate(); DVLOG(1) << "Audio output hardware sample rate: " << sample_rate; UMA_HISTOGRAM_ENUMERATION("WebRTC.AudioOutputSampleRate", sample_rate, media::kUnexpectedAudioSampleRate); if (std::find(&kValidOutputRates[0], &kValidOutputRates[0] + arraysize(kValidOutputRates), sample_rate) == &kValidOutputRates[arraysize(kValidOutputRates)]) { DLOG(ERROR) << sample_rate << " is not a supported output rate."; return false; } media::ChannelLayout channel_layout = media::CHANNEL_LAYOUT_STEREO; int buffer_size = 0; #if defined(OS_WIN) channel_layout = media::CHANNEL_LAYOUT_STEREO; if (sample_rate == 96000 || sample_rate == 48000) { buffer_size = (sample_rate / 100); } else { buffer_size = 2 * 440; } if (base::win::GetVersion() < base::win::VERSION_VISTA) { buffer_size = 3 * buffer_size; DLOG(WARNING) << "Extending the output buffer size by a factor of three " << "since Windows XP has been detected."; } #elif defined(OS_MACOSX) channel_layout = media::CHANNEL_LAYOUT_MONO; // frame size to use for 96kHz, 48kHz and 44.1kHz. if (sample_rate == 96000 || sample_rate == 48000) { buffer_size = (sample_rate / 100); } else { buffer_size = 440; } #elif defined(OS_LINUX) || defined(OS_OPENBSD) channel_layout = media::CHANNEL_LAYOUT_MONO; buffer_size = 480; #else DLOG(ERROR) << "Unsupported platform"; return false; #endif params_.Reset(media::AudioParameters::AUDIO_PCM_LOW_LATENCY, channel_layout, sample_rate, 16, buffer_size); buffer_.reset(new int16[params_.frames_per_buffer() * params_.channels()]); source_ = source; source->SetRenderFormat(params_); sink_->Initialize(params_, this); sink_->SetSourceRenderView(source_render_view_id_); sink_->Start(); state_ = PAUSED; UMA_HISTOGRAM_ENUMERATION("WebRTC.AudioOutputChannelLayout", channel_layout, media::CHANNEL_LAYOUT_MAX); UMA_HISTOGRAM_ENUMERATION("WebRTC.AudioOutputFramesPerBuffer", buffer_size, kUnexpectedAudioBufferSize); AddHistogramFramesPerBuffer(buffer_size); return true; }
CWE-119
1
static inline int hpel_motion(MpegEncContext *s, uint8_t *dest, uint8_t *src, int src_x, int src_y, op_pixels_func *pix_op, int motion_x, int motion_y) { int dxy = 0; int emu = 0; src_x += motion_x >> 1; src_y += motion_y >> 1; /* WARNING: do no forget half pels */ src_x = av_clip(src_x, -16, s->width); // FIXME unneeded for emu? if (src_x != s->width) dxy |= motion_x & 1; src_y = av_clip(src_y, -16, s->height); if (src_y != s->height) dxy |= (motion_y & 1) << 1; src += src_y * s->linesize + src_x; if ((unsigned)src_x > FFMAX(s->h_edge_pos - (motion_x & 1) - 8, 0) || (unsigned)src_y > FFMAX(s->v_edge_pos - (motion_y & 1) - 8, 0)) { s->vdsp.emulated_edge_mc(s->sc.edge_emu_buffer, src, s->linesize, s->linesize, 9, 9, src_x, src_y, s->h_edge_pos, s->v_edge_pos); src = s->sc.edge_emu_buffer; emu = 1; } pix_op[dxy](dest, src, s->linesize, 8); return emu; }
CWE-476
1
PerformanceNavigationTiming::PerformanceNavigationTiming( LocalFrame* frame, ResourceTimingInfo* info, TimeTicks time_origin, const WebVector<WebServerTimingInfo>& server_timing) : PerformanceResourceTiming( info ? info->FinalResponse().Url().GetString() : "", "navigation", time_origin, server_timing), ContextClient(frame), resource_timing_info_(info) { DCHECK(frame); DCHECK(info); }
CWE-200
1
file_check_mem(struct magic_set *ms, unsigned int level) { size_t len; if (level >= ms->c.len) { len = (ms->c.len = 20 + level) * sizeof(*ms->c.li); ms->c.li = CAST(struct level_info *, (ms->c.li == NULL) ? malloc(len) : realloc(ms->c.li, len)); if (ms->c.li == NULL) { file_oomem(ms, len); return -1; } } ms->c.li[level].got_match = 0; #ifdef ENABLE_CONDITIONALS ms->c.li[level].last_match = 0; ms->c.li[level].last_cond = COND_NONE; #endif /* ENABLE_CONDITIONALS */ return 0; }
CWE-119
1
void gdImageFillToBorder (gdImagePtr im, int x, int y, int border, int color) { int lastBorder; /* Seek left */ int leftLimit = -1, rightLimit; int i, restoreAlphaBlending = 0; if (border < 0 || color < 0) { /* Refuse to fill to a non-solid border */ return; } if (!im->trueColor) { if ((color > (im->colorsTotal - 1)) || (border > (im->colorsTotal - 1)) || (color < 0)) { return; } } restoreAlphaBlending = im->alphaBlendingFlag; im->alphaBlendingFlag = 0; if (x >= im->sx) { x = im->sx - 1; } else if (x < 0) { x = 0; } if (y >= im->sy) { y = im->sy - 1; } else if (y < 0) { y = 0; } for (i = x; i >= 0; i--) { if (gdImageGetPixel(im, i, y) == border) { break; } gdImageSetPixel(im, i, y, color); leftLimit = i; } if (leftLimit == -1) { im->alphaBlendingFlag = restoreAlphaBlending; return; } /* Seek right */ rightLimit = x; for (i = (x + 1); i < im->sx; i++) { if (gdImageGetPixel(im, i, y) == border) { break; } gdImageSetPixel(im, i, y, color); rightLimit = i; } /* Look at lines above and below and start paints */ /* Above */ if (y > 0) { lastBorder = 1; for (i = leftLimit; i <= rightLimit; i++) { int c = gdImageGetPixel(im, i, y - 1); if (lastBorder) { if ((c != border) && (c != color)) { gdImageFillToBorder(im, i, y - 1, border, color); lastBorder = 0; } } else if ((c == border) || (c == color)) { lastBorder = 1; } } } /* Below */ if (y < ((im->sy) - 1)) { lastBorder = 1; for (i = leftLimit; i <= rightLimit; i++) { int c = gdImageGetPixel(im, i, y + 1); if (lastBorder) { if ((c != border) && (c != color)) { gdImageFillToBorder(im, i, y + 1, border, color); lastBorder = 0; } } else if ((c == border) || (c == color)) { lastBorder = 1; } } } im->alphaBlendingFlag = restoreAlphaBlending; }
CWE-119
1
static long ioctl_file_dedupe_range(struct file *file, void __user *arg) { struct file_dedupe_range __user *argp = arg; struct file_dedupe_range *same = NULL; int ret; unsigned long size; u16 count; if (get_user(count, &argp->dest_count)) { ret = -EFAULT; goto out; } size = offsetof(struct file_dedupe_range __user, info[count]); same = memdup_user(argp, size); if (IS_ERR(same)) { ret = PTR_ERR(same); same = NULL; goto out; } same->dest_count = count; ret = vfs_dedupe_file_range(file, same); if (ret) goto out; ret = copy_to_user(argp, same, size); if (ret) ret = -EFAULT; out: kfree(same); return ret; }
CWE-119
1
static int decode_dds1(GetByteContext *gb, uint8_t *frame, int width, int height) { const uint8_t *frame_start = frame; const uint8_t *frame_end = frame + width * height; int mask = 0x10000, bitbuf = 0; int i, v, offset, count, segments; segments = bytestream2_get_le16(gb); while (segments--) { if (bytestream2_get_bytes_left(gb) < 2) return AVERROR_INVALIDDATA; if (mask == 0x10000) { bitbuf = bytestream2_get_le16u(gb); mask = 1; } if (bitbuf & mask) { v = bytestream2_get_le16(gb); offset = (v & 0x1FFF) << 2; count = ((v >> 13) + 2) << 1; if (frame - frame_start < offset || frame_end - frame < count*2 + width) return AVERROR_INVALIDDATA; for (i = 0; i < count; i++) { frame[0] = frame[1] = frame[width] = frame[width + 1] = frame[-offset]; frame += 2; } } else if (bitbuf & (mask << 1)) { v = bytestream2_get_le16(gb)*2; if (frame - frame_end < v) return AVERROR_INVALIDDATA; frame += v; } else { if (frame_end - frame < width + 4) return AVERROR_INVALIDDATA; frame[0] = frame[1] = frame[width] = frame[width + 1] = bytestream2_get_byte(gb); frame += 2; frame[0] = frame[1] = frame[width] = frame[width + 1] = bytestream2_get_byte(gb); frame += 2; } mask <<= 2; } return 0; }
CWE-119
1
static jobject Bitmap_createFromParcel(JNIEnv* env, jobject, jobject parcel) { if (parcel == NULL) { SkDebugf("-------- unparcel parcel is NULL\n"); return NULL; } android::Parcel* p = android::parcelForJavaObject(env, parcel); const bool isMutable = p->readInt32() != 0; const SkColorType colorType = (SkColorType)p->readInt32(); const SkAlphaType alphaType = (SkAlphaType)p->readInt32(); const int width = p->readInt32(); const int height = p->readInt32(); const int rowBytes = p->readInt32(); const int density = p->readInt32(); if (kN32_SkColorType != colorType && kRGB_565_SkColorType != colorType && kARGB_4444_SkColorType != colorType && kIndex_8_SkColorType != colorType && kAlpha_8_SkColorType != colorType) { SkDebugf("Bitmap_createFromParcel unknown colortype: %d\n", colorType); return NULL; } SkAutoTDelete<SkBitmap> bitmap(new SkBitmap); if (!bitmap->setInfo(SkImageInfo::Make(width, height, colorType, alphaType), rowBytes)) { return NULL; } SkColorTable* ctable = NULL; if (colorType == kIndex_8_SkColorType) { int count = p->readInt32(); if (count < 0 || count > 256) { // The data is corrupt, since SkColorTable enforces a value between 0 and 256, // inclusive. return NULL; } if (count > 0) { size_t size = count * sizeof(SkPMColor); const SkPMColor* src = (const SkPMColor*)p->readInplace(size); if (src == NULL) { return NULL; } ctable = new SkColorTable(src, count); } } jbyteArray buffer = GraphicsJNI::allocateJavaPixelRef(env, bitmap.get(), ctable); if (NULL == buffer) { SkSafeUnref(ctable); return NULL; } SkSafeUnref(ctable); size_t size = bitmap->getSize(); android::Parcel::ReadableBlob blob; android::status_t status = p->readBlob(size, &blob); if (status) { doThrowRE(env, "Could not read bitmap from parcel blob."); return NULL; } bitmap->lockPixels(); memcpy(bitmap->getPixels(), blob.data(), size); bitmap->unlockPixels(); blob.release(); return GraphicsJNI::createBitmap(env, bitmap.detach(), buffer, getPremulBitmapCreateFlags(isMutable), NULL, NULL, density); }
CWE-189
1
parse_fond( char* fond_data, short* have_sfnt, ResID* sfnt_id, Str255 lwfn_file_name, short face_index ) { AsscEntry* assoc; AsscEntry* base_assoc; FamRec* fond; *sfnt_id = 0; *have_sfnt = 0; lwfn_file_name[0] = 0; fond = (FamRec*)fond_data; assoc = (AsscEntry*)( fond_data + sizeof ( FamRec ) + 2 ); base_assoc = assoc; /* the maximum faces in a FOND is 48, size of StyleTable.indexes[] */ if ( 47 < face_index ) return; /* Let's do a little range checking before we get too excited here */ if ( face_index < count_faces_sfnt( fond_data ) ) { assoc += face_index; /* add on the face_index! */ /* if the face at this index is not scalable, fall back to the first one (old behavior) */ if ( EndianS16_BtoN( assoc->fontSize ) == 0 ) { *have_sfnt = 1; *sfnt_id = EndianS16_BtoN( assoc->fontID ); } else if ( base_assoc->fontSize == 0 ) { *have_sfnt = 1; *sfnt_id = EndianS16_BtoN( base_assoc->fontID ); } } if ( EndianS32_BtoN( fond->ffStylOff ) ) { unsigned char* p = (unsigned char*)fond_data; StyleTable* style; unsigned short string_count; char ps_name[256]; unsigned char* names[64]; int i; p += EndianS32_BtoN( fond->ffStylOff ); style = (StyleTable*)p; p += sizeof ( StyleTable ); string_count = EndianS16_BtoN( *(short*)(p) ); string_count = FT_MIN( 64, string_count ); p += sizeof ( short ); for ( i = 0; i < string_count; i++ ) { names[i] = p; p += names[i][0]; } { size_t ps_name_len = (size_t)names[0][0]; if ( ps_name_len != 0 ) { ft_memcpy(ps_name, names[0] + 1, ps_name_len); ps_name[ps_name_len] = 0; ps_name[ps_name_len] = 0; } if ( style->indexes[face_index] > 1 && style->indexes[face_index] <= string_count ) { unsigned char* suffixes = names[style->indexes[face_index] - 1]; for ( i = 1; i <= suffixes[0]; i++ ) { unsigned char* s; size_t j = suffixes[i] - 1; if ( j < string_count && ( s = names[j] ) != NULL ) { size_t s_len = (size_t)s[0]; if ( s_len != 0 && ps_name_len + s_len < sizeof ( ps_name ) ) { ft_memcpy( ps_name + ps_name_len, s + 1, s_len ); ps_name_len += s_len; ps_name[ps_name_len] = 0; } } } } } create_lwfn_name( ps_name, lwfn_file_name ); } }
CWE-119
1
WebGLObject::WebGLObject(WebGLRenderingContext* context) WebGLObject::WebGLObject(WebGLRenderingContext*) : m_object(0) , m_attachmentCount(0) , m_deleted(false) { }
CWE-119
1
SYSCALL_DEFINE6(move_pages, pid_t, pid, unsigned long, nr_pages, const void __user * __user *, pages, const int __user *, nodes, int __user *, status, int, flags) { struct task_struct *task; struct mm_struct *mm; int err; nodemask_t task_nodes; /* Check flags */ if (flags & ~(MPOL_MF_MOVE|MPOL_MF_MOVE_ALL)) return -EINVAL; if ((flags & MPOL_MF_MOVE_ALL) && !capable(CAP_SYS_NICE)) return -EPERM; /* Find the mm_struct */ rcu_read_lock(); task = pid ? find_task_by_vpid(pid) : current; if (!task) { rcu_read_unlock(); return -ESRCH; } get_task_struct(task); /* * Check if this process has the right to modify the specified * process. Use the regular "ptrace_may_access()" checks. */ if (!ptrace_may_access(task, PTRACE_MODE_READ_REALCREDS)) { rcu_read_unlock(); err = -EPERM; goto out; } rcu_read_unlock(); err = security_task_movememory(task); if (err) goto out; task_nodes = cpuset_mems_allowed(task); mm = get_task_mm(task); put_task_struct(task); if (!mm) return -EINVAL; if (nodes) err = do_pages_move(mm, task_nodes, nr_pages, pages, nodes, status, flags); else err = do_pages_stat(mm, nr_pages, pages, status); mmput(mm); return err; out: put_task_struct(task); return err; }
CWE-200
1
sd2_parse_rsrc_fork (SF_PRIVATE *psf) { SD2_RSRC rsrc ; int k, marker, error = 0 ; psf_use_rsrc (psf, SF_TRUE) ; memset (&rsrc, 0, sizeof (rsrc)) ; rsrc.rsrc_len = psf_get_filelen (psf) ; psf_log_printf (psf, "Resource length : %d (0x%04X)\n", rsrc.rsrc_len, rsrc.rsrc_len) ; if (rsrc.rsrc_len > SIGNED_SIZEOF (psf->header)) { rsrc.rsrc_data = calloc (1, rsrc.rsrc_len) ; rsrc.need_to_free_rsrc_data = SF_TRUE ; } else { rsrc.rsrc_data = psf->header ; rsrc.need_to_free_rsrc_data = SF_FALSE ; } ; /* Read in the whole lot. */ psf_fread (rsrc.rsrc_data, rsrc.rsrc_len, 1, psf) ; /* Reset the header storage because we have changed to the rsrcdes. */ psf->headindex = psf->headend = rsrc.rsrc_len ; rsrc.data_offset = read_rsrc_int (&rsrc, 0) ; rsrc.map_offset = read_rsrc_int (&rsrc, 4) ; rsrc.data_length = read_rsrc_int (&rsrc, 8) ; rsrc.map_length = read_rsrc_int (&rsrc, 12) ; if (rsrc.data_offset == 0x51607 && rsrc.map_offset == 0x20000) { psf_log_printf (psf, "Trying offset of 0x52 bytes.\n") ; rsrc.data_offset = read_rsrc_int (&rsrc, 0x52 + 0) + 0x52 ; rsrc.map_offset = read_rsrc_int (&rsrc, 0x52 + 4) + 0x52 ; rsrc.data_length = read_rsrc_int (&rsrc, 0x52 + 8) ; rsrc.map_length = read_rsrc_int (&rsrc, 0x52 + 12) ; } ; psf_log_printf (psf, " data offset : 0x%04X\n map offset : 0x%04X\n" " data length : 0x%04X\n map length : 0x%04X\n", rsrc.data_offset, rsrc.map_offset, rsrc.data_length, rsrc.map_length) ; if (rsrc.data_offset > rsrc.rsrc_len) { psf_log_printf (psf, "Error : rsrc.data_offset (%d, 0x%x) > len\n", rsrc.data_offset, rsrc.data_offset) ; error = SFE_SD2_BAD_DATA_OFFSET ; goto parse_rsrc_fork_cleanup ; } ; if (rsrc.map_offset > rsrc.rsrc_len) { psf_log_printf (psf, "Error : rsrc.map_offset > len\n") ; error = SFE_SD2_BAD_MAP_OFFSET ; goto parse_rsrc_fork_cleanup ; } ; if (rsrc.data_length > rsrc.rsrc_len) { psf_log_printf (psf, "Error : rsrc.data_length > len\n") ; error = SFE_SD2_BAD_DATA_LENGTH ; goto parse_rsrc_fork_cleanup ; } ; if (rsrc.map_length > rsrc.rsrc_len) { psf_log_printf (psf, "Error : rsrc.map_length > len\n") ; error = SFE_SD2_BAD_MAP_LENGTH ; goto parse_rsrc_fork_cleanup ; } ; if (rsrc.data_offset + rsrc.data_length != rsrc.map_offset || rsrc.map_offset + rsrc.map_length != rsrc.rsrc_len) { psf_log_printf (psf, "Error : This does not look like a MacOSX resource fork.\n") ; error = SFE_SD2_BAD_RSRC ; goto parse_rsrc_fork_cleanup ; } ; if (rsrc.map_offset + 28 >= rsrc.rsrc_len) { psf_log_printf (psf, "Bad map offset (%d + 28 > %d).\n", rsrc.map_offset, rsrc.rsrc_len) ; error = SFE_SD2_BAD_RSRC ; goto parse_rsrc_fork_cleanup ; } ; rsrc.string_offset = rsrc.map_offset + read_rsrc_short (&rsrc, rsrc.map_offset + 26) ; if (rsrc.string_offset > rsrc.rsrc_len) { psf_log_printf (psf, "Bad string offset (%d).\n", rsrc.string_offset) ; error = SFE_SD2_BAD_RSRC ; goto parse_rsrc_fork_cleanup ; } ; rsrc.type_offset = rsrc.map_offset + 30 ; if (rsrc.map_offset + 28 > rsrc.rsrc_len) { psf_log_printf (psf, "Bad map offset.\n") ; goto parse_rsrc_fork_cleanup ; } ; rsrc.type_count = read_rsrc_short (&rsrc, rsrc.map_offset + 28) + 1 ; if (rsrc.type_count < 1) { psf_log_printf (psf, "Bad type count.\n") ; error = SFE_SD2_BAD_RSRC ; goto parse_rsrc_fork_cleanup ; } ; rsrc.item_offset = rsrc.type_offset + rsrc.type_count * 8 ; if (rsrc.item_offset < 0 || rsrc.item_offset > rsrc.rsrc_len) { psf_log_printf (psf, "Bad item offset (%d).\n", rsrc.item_offset) ; error = SFE_SD2_BAD_RSRC ; goto parse_rsrc_fork_cleanup ; } ; rsrc.str_index = -1 ; for (k = 0 ; k < rsrc.type_count ; k ++) { if (rsrc.type_offset + k * 8 > rsrc.rsrc_len) { psf_log_printf (psf, "Bad rsrc marker.\n") ; goto parse_rsrc_fork_cleanup ; } ; marker = read_rsrc_marker (&rsrc, rsrc.type_offset + k * 8) ; if (marker == STR_MARKER) { rsrc.str_index = k ; rsrc.str_count = read_rsrc_short (&rsrc, rsrc.type_offset + k * 8 + 4) + 1 ; error = parse_str_rsrc (psf, &rsrc) ; goto parse_rsrc_fork_cleanup ; } ; } ; psf_log_printf (psf, "No 'STR ' resource.\n") ; error = SFE_SD2_BAD_RSRC ; parse_rsrc_fork_cleanup : psf_use_rsrc (psf, SF_FALSE) ; if (rsrc.need_to_free_rsrc_data) free (rsrc.rsrc_data) ; return error ; } /* sd2_parse_rsrc_fork */
CWE-119
1
mobility_opt_print(netdissect_options *ndo, const u_char *bp, const unsigned len) { unsigned i, optlen; for (i = 0; i < len; i += optlen) { ND_TCHECK(bp[i]); if (bp[i] == IP6MOPT_PAD1) optlen = 1; else { if (i + 1 < len) { ND_TCHECK(bp[i + 1]); optlen = bp[i + 1] + 2; } else goto trunc; } if (i + optlen > len) goto trunc; ND_TCHECK(bp[i + optlen]); switch (bp[i]) { case IP6MOPT_PAD1: ND_PRINT((ndo, "(pad1)")); break; case IP6MOPT_PADN: if (len - i < IP6MOPT_MINLEN) { ND_PRINT((ndo, "(padn: trunc)")); goto trunc; } ND_PRINT((ndo, "(padn)")); break; case IP6MOPT_REFRESH: if (len - i < IP6MOPT_REFRESH_MINLEN) { ND_PRINT((ndo, "(refresh: trunc)")); goto trunc; } /* units of 4 secs */ ND_TCHECK_16BITS(&bp[i+2]); ND_PRINT((ndo, "(refresh: %u)", EXTRACT_16BITS(&bp[i+2]) << 2)); break; case IP6MOPT_ALTCOA: if (len - i < IP6MOPT_ALTCOA_MINLEN) { ND_PRINT((ndo, "(altcoa: trunc)")); goto trunc; } ND_TCHECK_128BITS(&bp[i+2]); ND_PRINT((ndo, "(alt-CoA: %s)", ip6addr_string(ndo, &bp[i+2]))); break; case IP6MOPT_NONCEID: if (len - i < IP6MOPT_NONCEID_MINLEN) { ND_PRINT((ndo, "(ni: trunc)")); goto trunc; } ND_TCHECK_16BITS(&bp[i+2]); ND_TCHECK_16BITS(&bp[i+4]); ND_PRINT((ndo, "(ni: ho=0x%04x co=0x%04x)", EXTRACT_16BITS(&bp[i+2]), EXTRACT_16BITS(&bp[i+4]))); break; case IP6MOPT_AUTH: if (len - i < IP6MOPT_AUTH_MINLEN) { ND_PRINT((ndo, "(auth: trunc)")); goto trunc; } ND_PRINT((ndo, "(auth)")); break; default: if (len - i < IP6MOPT_MINLEN) { ND_PRINT((ndo, "(sopt_type %u: trunc)", bp[i])); goto trunc; } ND_PRINT((ndo, "(type-0x%02x: len=%u)", bp[i], bp[i + 1])); break; } } return 0; trunc: return 1; }
CWE-125
1
int read_image_tga( gdIOCtx *ctx, oTga *tga ) { int pixel_block_size = (tga->bits / 8); int image_block_size = (tga->width * tga->height) * pixel_block_size; int* decompression_buffer = NULL; unsigned char* conversion_buffer = NULL; int buffer_caret = 0; int bitmap_caret = 0; int i = 0; int encoded_pixels; int rle_size; if(overflow2(tga->width, tga->height)) { return -1; } if(overflow2(tga->width * tga->height, pixel_block_size)) { return -1; } if(overflow2(image_block_size, sizeof(int))) { return -1; } /*! \todo Add more image type support. */ if (tga->imagetype != TGA_TYPE_RGB && tga->imagetype != TGA_TYPE_RGB_RLE) return -1; /*! \brief Allocate memmory for image block * Allocate a chunk of memory for the image block to be passed into. */ tga->bitmap = (int *) gdMalloc(image_block_size * sizeof(int)); if (tga->bitmap == NULL) return -1; switch (tga->imagetype) { case TGA_TYPE_RGB: /*! \brief Read in uncompressed RGB TGA * Chunk load the pixel data from an uncompressed RGB type TGA. */ conversion_buffer = (unsigned char *) gdMalloc(image_block_size * sizeof(unsigned char)); if (conversion_buffer == NULL) { return -1; } if (gdGetBuf(conversion_buffer, image_block_size, ctx) != image_block_size) { gd_error("gd-tga: premature end of image data\n"); gdFree(conversion_buffer); return -1; } while (buffer_caret < image_block_size) { tga->bitmap[buffer_caret] = (int) conversion_buffer[buffer_caret]; buffer_caret++; } gdFree(conversion_buffer); break; case TGA_TYPE_RGB_RLE: /*! \brief Read in RLE compressed RGB TGA * Chunk load the pixel data from an RLE compressed RGB type TGA. */ decompression_buffer = (int*) gdMalloc(image_block_size * sizeof(int)); if (decompression_buffer == NULL) { return -1; } conversion_buffer = (unsigned char *) gdMalloc(image_block_size * sizeof(unsigned char)); if (conversion_buffer == NULL) { gd_error("gd-tga: premature end of image data\n"); gdFree( decompression_buffer ); return -1; } rle_size = gdGetBuf(conversion_buffer, image_block_size, ctx); if (rle_size <= 0) { gdFree(conversion_buffer); gdFree(decompression_buffer); return -1; } buffer_caret = 0; while( buffer_caret < rle_size) { decompression_buffer[buffer_caret] = (int)conversion_buffer[buffer_caret]; buffer_caret++; } buffer_caret = 0; while( bitmap_caret < image_block_size ) { if (buffer_caret + pixel_block_size > rle_size) { gdFree( decompression_buffer ); gdFree( conversion_buffer ); return -1; } if ((decompression_buffer[buffer_caret] & TGA_RLE_FLAG) == TGA_RLE_FLAG) { encoded_pixels = ( ( decompression_buffer[ buffer_caret ] & ~TGA_RLE_FLAG ) + 1 ); buffer_caret++; if ((bitmap_caret + (encoded_pixels * pixel_block_size)) > image_block_size || buffer_caret + pixel_block_size > rle_size) { gdFree( decompression_buffer ); gdFree( conversion_buffer ); return -1; } for (i = 0; i < encoded_pixels; i++) { memcpy(tga->bitmap + bitmap_caret, decompression_buffer + buffer_caret, pixel_block_size * sizeof(int)); bitmap_caret += pixel_block_size; } buffer_caret += pixel_block_size; } else { encoded_pixels = decompression_buffer[ buffer_caret ] + 1; buffer_caret++; if ((bitmap_caret + (encoded_pixels * pixel_block_size)) > image_block_size || buffer_caret + (encoded_pixels * pixel_block_size) > rle_size) { gdFree( decompression_buffer ); gdFree( conversion_buffer ); return -1; } memcpy(tga->bitmap + bitmap_caret, decompression_buffer + buffer_caret, encoded_pixels * pixel_block_size * sizeof(int)); bitmap_caret += (encoded_pixels * pixel_block_size); buffer_caret += (encoded_pixels * pixel_block_size); } } gdFree( decompression_buffer ); gdFree( conversion_buffer ); break; } return 1; }
CWE-125
1
get_endpoints(struct usbtest_dev *dev, struct usb_interface *intf) { int tmp; struct usb_host_interface *alt; struct usb_host_endpoint *in, *out; struct usb_host_endpoint *iso_in, *iso_out; struct usb_host_endpoint *int_in, *int_out; struct usb_device *udev; for (tmp = 0; tmp < intf->num_altsetting; tmp++) { unsigned ep; in = out = NULL; iso_in = iso_out = NULL; int_in = int_out = NULL; alt = intf->altsetting + tmp; if (override_alt >= 0 && override_alt != alt->desc.bAlternateSetting) continue; /* take the first altsetting with in-bulk + out-bulk; * ignore other endpoints and altsettings. */ for (ep = 0; ep < alt->desc.bNumEndpoints; ep++) { struct usb_host_endpoint *e; int edi; e = alt->endpoint + ep; edi = usb_endpoint_dir_in(&e->desc); switch (usb_endpoint_type(&e->desc)) { case USB_ENDPOINT_XFER_BULK: endpoint_update(edi, &in, &out, e); continue; case USB_ENDPOINT_XFER_INT: if (dev->info->intr) endpoint_update(edi, &int_in, &int_out, e); continue; case USB_ENDPOINT_XFER_ISOC: if (dev->info->iso) endpoint_update(edi, &iso_in, &iso_out, e); /* FALLTHROUGH */ default: continue; } } if ((in && out) || iso_in || iso_out || int_in || int_out) goto found; } return -EINVAL; found: udev = testdev_to_usbdev(dev); dev->info->alt = alt->desc.bAlternateSetting; if (alt->desc.bAlternateSetting != 0) { tmp = usb_set_interface(udev, alt->desc.bInterfaceNumber, alt->desc.bAlternateSetting); if (tmp < 0) return tmp; } if (in) dev->in_pipe = usb_rcvbulkpipe(udev, in->desc.bEndpointAddress & USB_ENDPOINT_NUMBER_MASK); if (out) dev->out_pipe = usb_sndbulkpipe(udev, out->desc.bEndpointAddress & USB_ENDPOINT_NUMBER_MASK); if (iso_in) { dev->iso_in = &iso_in->desc; dev->in_iso_pipe = usb_rcvisocpipe(udev, iso_in->desc.bEndpointAddress & USB_ENDPOINT_NUMBER_MASK); } if (iso_out) { dev->iso_out = &iso_out->desc; dev->out_iso_pipe = usb_sndisocpipe(udev, iso_out->desc.bEndpointAddress & USB_ENDPOINT_NUMBER_MASK); } if (int_in) { dev->int_in = &int_in->desc; dev->in_int_pipe = usb_rcvintpipe(udev, int_in->desc.bEndpointAddress & USB_ENDPOINT_NUMBER_MASK); } if (int_out) { dev->int_out = &int_out->desc; dev->out_int_pipe = usb_sndintpipe(udev, int_out->desc.bEndpointAddress & USB_ENDPOINT_NUMBER_MASK); } return 0; }
CWE-476
1
status_t AudioFlinger::EffectModule::command(uint32_t cmdCode, uint32_t cmdSize, void *pCmdData, uint32_t *replySize, void *pReplyData) { Mutex::Autolock _l(mLock); ALOGVV("command(), cmdCode: %d, mEffectInterface: %p", cmdCode, mEffectInterface); if (mState == DESTROYED || mEffectInterface == NULL) { return NO_INIT; } if (mStatus != NO_ERROR) { return mStatus; } if (cmdCode == EFFECT_CMD_GET_PARAM && (*replySize < sizeof(effect_param_t) || ((effect_param_t *)pCmdData)->psize > *replySize - sizeof(effect_param_t))) { android_errorWriteLog(0x534e4554, "29251553"); return -EINVAL; } if ((cmdCode == EFFECT_CMD_SET_PARAM || cmdCode == EFFECT_CMD_SET_PARAM_DEFERRED) && // DEFERRED not generally used (sizeof(effect_param_t) > cmdSize || ((effect_param_t *)pCmdData)->psize > cmdSize - sizeof(effect_param_t) || ((effect_param_t *)pCmdData)->vsize > cmdSize - sizeof(effect_param_t) - ((effect_param_t *)pCmdData)->psize || roundUpDelta(((effect_param_t *)pCmdData)->psize, (uint32_t)sizeof(int)) > cmdSize - sizeof(effect_param_t) - ((effect_param_t *)pCmdData)->psize - ((effect_param_t *)pCmdData)->vsize)) { android_errorWriteLog(0x534e4554, "30204301"); return -EINVAL; } status_t status = (*mEffectInterface)->command(mEffectInterface, cmdCode, cmdSize, pCmdData, replySize, pReplyData); if (cmdCode != EFFECT_CMD_GET_PARAM && status == NO_ERROR) { uint32_t size = (replySize == NULL) ? 0 : *replySize; for (size_t i = 1; i < mHandles.size(); i++) { EffectHandle *h = mHandles[i]; if (h != NULL && !h->destroyed_l()) { h->commandExecuted(cmdCode, cmdSize, pCmdData, size, pReplyData); } } } return status; }
CWE-200
1
static int load_script(struct linux_binprm *bprm) { const char *i_arg, *i_name; char *cp; struct file *file; char interp[BINPRM_BUF_SIZE]; int retval; if ((bprm->buf[0] != '#') || (bprm->buf[1] != '!')) return -ENOEXEC; /* * This section does the #! interpretation. * Sorta complicated, but hopefully it will work. -TYT */ allow_write_access(bprm->file); fput(bprm->file); bprm->file = NULL; bprm->buf[BINPRM_BUF_SIZE - 1] = '\0'; if ((cp = strchr(bprm->buf, '\n')) == NULL) cp = bprm->buf+BINPRM_BUF_SIZE-1; *cp = '\0'; while (cp > bprm->buf) { cp--; if ((*cp == ' ') || (*cp == '\t')) *cp = '\0'; else break; } for (cp = bprm->buf+2; (*cp == ' ') || (*cp == '\t'); cp++); if (*cp == '\0') return -ENOEXEC; /* No interpreter name found */ i_name = cp; i_arg = NULL; for ( ; *cp && (*cp != ' ') && (*cp != '\t'); cp++) /* nothing */ ; while ((*cp == ' ') || (*cp == '\t')) *cp++ = '\0'; if (*cp) i_arg = cp; strcpy (interp, i_name); /* * OK, we've parsed out the interpreter name and * (optional) argument. * Splice in (1) the interpreter's name for argv[0] * (2) (optional) argument to interpreter * (3) filename of shell script (replace argv[0]) * * This is done in reverse order, because of how the * user environment and arguments are stored. */ retval = remove_arg_zero(bprm); if (retval) return retval; retval = copy_strings_kernel(1, &bprm->interp, bprm); if (retval < 0) return retval; bprm->argc++; if (i_arg) { retval = copy_strings_kernel(1, &i_arg, bprm); if (retval < 0) return retval; bprm->argc++; } retval = copy_strings_kernel(1, &i_name, bprm); if (retval) return retval; bprm->argc++; retval = bprm_change_interp(interp, bprm); if (retval < 0) return retval; /* * OK, now restart the process with the interpreter's dentry. */ file = open_exec(interp); if (IS_ERR(file)) return PTR_ERR(file); bprm->file = file; retval = prepare_binprm(bprm); if (retval < 0) return retval; return search_binary_handler(bprm); }
CWE-200
1
do_ip_vs_get_ctl(struct sock *sk, int cmd, void __user *user, int *len) { unsigned char arg[128]; int ret = 0; unsigned int copylen; struct net *net = sock_net(sk); struct netns_ipvs *ipvs = net_ipvs(net); BUG_ON(!net); if (!capable(CAP_NET_ADMIN)) return -EPERM; if (cmd < IP_VS_BASE_CTL || cmd > IP_VS_SO_GET_MAX) return -EINVAL; if (*len < get_arglen[GET_CMDID(cmd)]) { pr_err("get_ctl: len %u < %u\n", *len, get_arglen[GET_CMDID(cmd)]); return -EINVAL; } copylen = get_arglen[GET_CMDID(cmd)]; if (copylen > 128) return -EINVAL; if (copy_from_user(arg, user, copylen) != 0) return -EFAULT; /* * Handle daemons first since it has its own locking */ if (cmd == IP_VS_SO_GET_DAEMON) { struct ip_vs_daemon_user d[2]; memset(&d, 0, sizeof(d)); if (mutex_lock_interruptible(&ipvs->sync_mutex)) return -ERESTARTSYS; if (ipvs->sync_state & IP_VS_STATE_MASTER) { d[0].state = IP_VS_STATE_MASTER; strlcpy(d[0].mcast_ifn, ipvs->master_mcast_ifn, sizeof(d[0].mcast_ifn)); d[0].syncid = ipvs->master_syncid; } if (ipvs->sync_state & IP_VS_STATE_BACKUP) { d[1].state = IP_VS_STATE_BACKUP; strlcpy(d[1].mcast_ifn, ipvs->backup_mcast_ifn, sizeof(d[1].mcast_ifn)); d[1].syncid = ipvs->backup_syncid; } if (copy_to_user(user, &d, sizeof(d)) != 0) ret = -EFAULT; mutex_unlock(&ipvs->sync_mutex); return ret; } if (mutex_lock_interruptible(&__ip_vs_mutex)) return -ERESTARTSYS; switch (cmd) { case IP_VS_SO_GET_VERSION: { char buf[64]; sprintf(buf, "IP Virtual Server version %d.%d.%d (size=%d)", NVERSION(IP_VS_VERSION_CODE), ip_vs_conn_tab_size); if (copy_to_user(user, buf, strlen(buf)+1) != 0) { ret = -EFAULT; goto out; } *len = strlen(buf)+1; } break; case IP_VS_SO_GET_INFO: { struct ip_vs_getinfo info; info.version = IP_VS_VERSION_CODE; info.size = ip_vs_conn_tab_size; info.num_services = ipvs->num_services; if (copy_to_user(user, &info, sizeof(info)) != 0) ret = -EFAULT; } break; case IP_VS_SO_GET_SERVICES: { struct ip_vs_get_services *get; int size; get = (struct ip_vs_get_services *)arg; size = sizeof(*get) + sizeof(struct ip_vs_service_entry) * get->num_services; if (*len != size) { pr_err("length: %u != %u\n", *len, size); ret = -EINVAL; goto out; } ret = __ip_vs_get_service_entries(net, get, user); } break; case IP_VS_SO_GET_SERVICE: { struct ip_vs_service_entry *entry; struct ip_vs_service *svc; union nf_inet_addr addr; entry = (struct ip_vs_service_entry *)arg; addr.ip = entry->addr; if (entry->fwmark) svc = __ip_vs_svc_fwm_find(net, AF_INET, entry->fwmark); else svc = __ip_vs_service_find(net, AF_INET, entry->protocol, &addr, entry->port); if (svc) { ip_vs_copy_service(entry, svc); if (copy_to_user(user, entry, sizeof(*entry)) != 0) ret = -EFAULT; } else ret = -ESRCH; } break; case IP_VS_SO_GET_DESTS: { struct ip_vs_get_dests *get; int size; get = (struct ip_vs_get_dests *)arg; size = sizeof(*get) + sizeof(struct ip_vs_dest_entry) * get->num_dests; if (*len != size) { pr_err("length: %u != %u\n", *len, size); ret = -EINVAL; goto out; } ret = __ip_vs_get_dest_entries(net, get, user); } break; case IP_VS_SO_GET_TIMEOUT: { struct ip_vs_timeout_user t; memset(&t, 0, sizeof(t)); __ip_vs_get_timeouts(net, &t); if (copy_to_user(user, &t, sizeof(t)) != 0) ret = -EFAULT; } break; default: ret = -EINVAL; } out: mutex_unlock(&__ip_vs_mutex); return ret; }
CWE-200
1
lldp_mgmt_addr_tlv_print(netdissect_options *ndo, const u_char *pptr, u_int len) { uint8_t mgmt_addr_len, intf_num_subtype, oid_len; const u_char *tptr; u_int tlen; char *mgmt_addr; tlen = len; tptr = pptr; if (tlen < 1) { return 0; } mgmt_addr_len = *tptr++; tlen--; if (tlen < mgmt_addr_len) { return 0; } mgmt_addr = lldp_network_addr_print(ndo, tptr, mgmt_addr_len); if (mgmt_addr == NULL) { return 0; } ND_PRINT((ndo, "\n\t Management Address length %u, %s", mgmt_addr_len, mgmt_addr)); tptr += mgmt_addr_len; tlen -= mgmt_addr_len; if (tlen < LLDP_INTF_NUM_LEN) { return 0; } intf_num_subtype = *tptr; ND_PRINT((ndo, "\n\t %s Interface Numbering (%u): %u", tok2str(lldp_intf_numb_subtype_values, "Unknown", intf_num_subtype), intf_num_subtype, EXTRACT_32BITS(tptr + 1))); tptr += LLDP_INTF_NUM_LEN; tlen -= LLDP_INTF_NUM_LEN; /* * The OID is optional. */ if (tlen) { oid_len = *tptr; if (tlen < 1U + oid_len) { return 0; } if (oid_len) { ND_PRINT((ndo, "\n\t OID length %u", oid_len)); safeputs(ndo, tptr + 1, oid_len); } } return 1; }
CWE-125
1
static MagickBooleanType ReadPSDChannelPixels(Image *image, const size_t channels,const size_t row,const ssize_t type, const unsigned char *pixels,ExceptionInfo *exception) { Quantum pixel; register const unsigned char *p; register Quantum *q; register ssize_t x; size_t packet_size; unsigned short nibble; p=pixels; q=GetAuthenticPixels(image,0,row,image->columns,1,exception); if (q == (Quantum *) NULL) return MagickFalse; packet_size=GetPSDPacketSize(image); for (x=0; x < (ssize_t) image->columns; x++) { if (packet_size == 1) pixel=ScaleCharToQuantum(*p++); else { p=PushShortPixel(MSBEndian,p,&nibble); pixel=ScaleShortToQuantum(nibble); } if (image->depth > 1) { SetPSDPixel(image,channels,type,packet_size,pixel,q,exception); q+=GetPixelChannels(image); } else { ssize_t bit, number_bits; number_bits=image->columns-x; if (number_bits > 8) number_bits=8; for (bit = 0; bit < number_bits; bit++) { SetPSDPixel(image,channels,type,packet_size,(((unsigned char) pixel) & (0x01 << (7-bit))) != 0 ? 0 : 255,q,exception); q+=GetPixelChannels(image); x++; } if (x != image->columns) x--; continue; } } return(SyncAuthenticPixels(image,exception)); }
CWE-125
1
bool CSPSourceList::matches(const KURL& url, ContentSecurityPolicy::RedirectStatus redirectStatus) const { // The CSP spec specifically states that data:, blob:, and filesystem URLs // should not be captured by a '*" source // (http://www.w3.org/TR/CSP2/#source-list-guid-matching). Thus, in the // case of a full wildcard, data:, blob:, and filesystem: URLs are // explicitly checked for in the source list before allowing them through. if (m_allowStar) { if (url.protocolIs("blob") || url.protocolIs("data") || url.protocolIs("filesystem")) return hasSourceMatchInList(url, redirectStatus); return true; } KURL effectiveURL = m_policy->selfMatchesInnerURL() && SecurityOrigin::shouldUseInnerURL(url) ? SecurityOrigin::extractInnerURL(url) : url; if (m_allowSelf && m_policy->urlMatchesSelf(effectiveURL)) return true; return hasSourceMatchInList(effectiveURL, redirectStatus); }
CWE-264
1
static int __sock_diag_rcv_msg(struct sk_buff *skb, struct nlmsghdr *nlh) { int err; struct sock_diag_req *req = nlmsg_data(nlh); const struct sock_diag_handler *hndl; if (nlmsg_len(nlh) < sizeof(*req)) return -EINVAL; if (req->sdiag_family >= AF_MAX) return -EINVAL; hndl = sock_diag_lock_handler(req->sdiag_family); if (hndl == NULL) err = -ENOENT; else err = hndl->dump(skb, nlh); sock_diag_unlock_handler(hndl); return err; }
CWE-20
1
ProcEstablishConnection(ClientPtr client) { const char *reason; char *auth_proto, *auth_string; xConnClientPrefix *prefix; REQUEST(xReq); prefix = (xConnClientPrefix *) ((char *) stuff + sz_xReq); auth_proto = (char *) prefix + sz_xConnClientPrefix; auth_string = auth_proto + pad_to_int32(prefix->nbytesAuthProto); if ((client->req_len << 2) != sz_xReq + sz_xConnClientPrefix + pad_to_int32(prefix->nbytesAuthProto) + pad_to_int32(prefix->nbytesAuthString)) reason = "Bad length"; else if ((prefix->majorVersion != X_PROTOCOL) || (prefix->minorVersion != X_PROTOCOL_REVISION)) reason = "Protocol version mismatch"; else return (SendConnSetup(client, reason)); }
CWE-20
1
void OnSuggestionModelAdded(UiScene* scene, UiBrowserInterface* browser, Model* model, SuggestionBinding* element_binding) { auto icon = base::MakeUnique<VectorIcon>(100); icon->SetDrawPhase(kPhaseForeground); icon->SetType(kTypeOmniboxSuggestionIcon); icon->set_hit_testable(false); icon->SetSize(kSuggestionIconSizeDMM, kSuggestionIconSizeDMM); BindColor(model, icon.get(), &ColorScheme::omnibox_icon, &VectorIcon::SetColor); VectorIcon* p_icon = icon.get(); auto icon_box = base::MakeUnique<UiElement>(); icon_box->SetDrawPhase(kPhaseNone); icon_box->SetType(kTypeOmniboxSuggestionIconField); icon_box->SetSize(kSuggestionIconFieldWidthDMM, kSuggestionHeightDMM); icon_box->AddChild(std::move(icon)); auto content_text = base::MakeUnique<Text>(kSuggestionContentTextHeightDMM); content_text->SetDrawPhase(kPhaseForeground); content_text->SetType(kTypeOmniboxSuggestionContentText); content_text->set_hit_testable(false); content_text->SetTextLayoutMode(TextLayoutMode::kSingleLineFixedWidth); content_text->SetSize(kSuggestionTextFieldWidthDMM, 0); content_text->SetTextAlignment(UiTexture::kTextAlignmentLeft); BindColor(model, content_text.get(), &ColorScheme::omnibox_suggestion_content, &Text::SetColor); Text* p_content_text = content_text.get(); auto description_text = base::MakeUnique<Text>(kSuggestionDescriptionTextHeightDMM); description_text->SetDrawPhase(kPhaseForeground); description_text->SetType(kTypeOmniboxSuggestionDescriptionText); description_text->set_hit_testable(false); description_text->SetTextLayoutMode(TextLayoutMode::kSingleLineFixedWidth); description_text->SetSize(kSuggestionTextFieldWidthDMM, 0); description_text->SetTextAlignment(UiTexture::kTextAlignmentLeft); BindColor(model, description_text.get(), &ColorScheme::omnibox_suggestion_description, &Text::SetColor); Text* p_description_text = description_text.get(); auto text_layout = base::MakeUnique<LinearLayout>(LinearLayout::kDown); text_layout->SetType(kTypeOmniboxSuggestionTextLayout); text_layout->set_hit_testable(false); text_layout->set_margin(kSuggestionLineGapDMM); text_layout->AddChild(std::move(content_text)); text_layout->AddChild(std::move(description_text)); auto right_margin = base::MakeUnique<UiElement>(); right_margin->SetDrawPhase(kPhaseNone); right_margin->SetSize(kSuggestionRightMarginDMM, kSuggestionHeightDMM); auto suggestion_layout = base::MakeUnique<LinearLayout>(LinearLayout::kRight); suggestion_layout->SetType(kTypeOmniboxSuggestionLayout); suggestion_layout->set_hit_testable(false); suggestion_layout->AddChild(std::move(icon_box)); suggestion_layout->AddChild(std::move(text_layout)); suggestion_layout->AddChild(std::move(right_margin)); auto background = Create<Button>( kNone, kPhaseForeground, base::BindRepeating( [](UiBrowserInterface* b, Model* m, SuggestionBinding* e) { b->Navigate(e->model()->destination); m->omnibox_input_active = false; }, base::Unretained(browser), base::Unretained(model), base::Unretained(element_binding))); background->SetType(kTypeOmniboxSuggestionBackground); background->set_hit_testable(true); background->set_bubble_events(true); background->set_bounds_contain_children(true); background->set_hover_offset(0.0); BindButtonColors(model, background.get(), &ColorScheme::suggestion_button_colors, &Button::SetButtonColors); background->AddChild(std::move(suggestion_layout)); element_binding->bindings().push_back( VR_BIND_FUNC(base::string16, SuggestionBinding, element_binding, model()->content, Text, p_content_text, SetText)); element_binding->bindings().push_back( base::MakeUnique<Binding<base::string16>>( base::BindRepeating( [](SuggestionBinding* m) { return m->model()->description; }, base::Unretained(element_binding)), base::BindRepeating( [](Text* v, const base::string16& text) { v->SetVisibleImmediately(!text.empty()); v->set_requires_layout(!text.empty()); if (!text.empty()) { v->SetText(text); } }, base::Unretained(p_description_text)))); element_binding->bindings().push_back( VR_BIND(AutocompleteMatch::Type, SuggestionBinding, element_binding, model()->type, VectorIcon, p_icon, SetIcon(AutocompleteMatch::TypeToVectorIcon(value)))); element_binding->set_view(background.get()); scene->AddUiElement(kOmniboxSuggestions, std::move(background)); }
CWE-200
1
void RemoveActionCallback(const ActionCallback& callback) { DCHECK(g_task_runner.Get()); DCHECK(g_task_runner.Get()->BelongsToCurrentThread()); std::vector<ActionCallback>* callbacks = g_callbacks.Pointer(); for (size_t i = 0; i < callbacks->size(); ++i) { if ((*callbacks)[i] == callback) { callbacks->erase(callbacks->begin() + i); return; } } }
CWE-20
1
u32 h264bsdInitDpb( dpbStorage_t *dpb, u32 picSizeInMbs, u32 dpbSize, u32 maxRefFrames, u32 maxFrameNum, u32 noReordering) { /* Variables */ u32 i; /* Code */ ASSERT(picSizeInMbs); ASSERT(maxRefFrames <= MAX_NUM_REF_PICS); ASSERT(maxRefFrames <= dpbSize); ASSERT(maxFrameNum); ASSERT(dpbSize); // see comment in loop below about size calculation if (picSizeInMbs > (UINT32_MAX - 32 - 15) / 384) { ALOGE("b/28533562"); android_errorWriteLog(0x534e4554, "28533562"); return(MEMORY_ALLOCATION_ERROR); } dpb->maxLongTermFrameIdx = NO_LONG_TERM_FRAME_INDICES; dpb->maxRefFrames = MAX(maxRefFrames, 1); if (noReordering) dpb->dpbSize = dpb->maxRefFrames; else dpb->dpbSize = dpbSize; dpb->maxFrameNum = maxFrameNum; dpb->noReordering = noReordering; dpb->fullness = 0; dpb->numRefFrames = 0; dpb->prevRefFrameNum = 0; ALLOCATE(dpb->buffer, MAX_NUM_REF_IDX_L0_ACTIVE + 1, dpbPicture_t); if (dpb->buffer == NULL) return(MEMORY_ALLOCATION_ERROR); H264SwDecMemset(dpb->buffer, 0, (MAX_NUM_REF_IDX_L0_ACTIVE + 1)*sizeof(dpbPicture_t)); for (i = 0; i < dpb->dpbSize + 1; i++) { /* Allocate needed amount of memory, which is: * image size + 32 + 15, where 32 cames from the fact that in ARM OpenMax * DL implementation Functions may read beyond the end of an array, * by a maximum of 32 bytes. And +15 cames for the need to align memory * to 16-byte boundary */ ALLOCATE(dpb->buffer[i].pAllocatedData, (picSizeInMbs*384 + 32+15), u8); if (dpb->buffer[i].pAllocatedData == NULL) return(MEMORY_ALLOCATION_ERROR); dpb->buffer[i].data = ALIGN(dpb->buffer[i].pAllocatedData, 16); } ALLOCATE(dpb->list, MAX_NUM_REF_IDX_L0_ACTIVE + 1, dpbPicture_t*); ALLOCATE(dpb->outBuf, dpb->dpbSize+1, dpbOutPicture_t); if (dpb->list == NULL || dpb->outBuf == NULL) return(MEMORY_ALLOCATION_ERROR); H264SwDecMemset(dpb->list, 0, ((MAX_NUM_REF_IDX_L0_ACTIVE + 1) * sizeof(dpbPicture_t*)) ); dpb->numOut = dpb->outIndex = 0; return(HANTRO_OK); }
CWE-119
1
static int ima_lsm_rule_init(struct ima_measure_rule_entry *entry, char *args, int lsm_rule, int audit_type) { int result; if (entry->lsm[lsm_rule].rule) return -EINVAL; entry->lsm[lsm_rule].type = audit_type; result = security_filter_rule_init(entry->lsm[lsm_rule].type, Audit_equal, args, &entry->lsm[lsm_rule].rule); if (!entry->lsm[lsm_rule].rule) return -EINVAL; return result; }
CWE-264
1
size_t jsvGetString(const JsVar *v, char *str, size_t len) { assert(len>0); const char *s = jsvGetConstString(v); if (s) { /* don't use strncpy here because we don't * want to pad the entire buffer with zeros */ len--; int l = 0; while (s[l] && l<len) { str[l] = s[l]; l++; } str[l] = 0; return l; } else if (jsvIsInt(v)) { itostr(v->varData.integer, str, 10); return strlen(str); } else if (jsvIsFloat(v)) { ftoa_bounded(v->varData.floating, str, len); return strlen(str); } else if (jsvHasCharacterData(v)) { assert(!jsvIsStringExt(v)); size_t l = len; JsvStringIterator it; jsvStringIteratorNewConst(&it, v, 0); while (jsvStringIteratorHasChar(&it)) { if (l--<=1) { *str = 0; jsvStringIteratorFree(&it); return len; } *(str++) = jsvStringIteratorGetChar(&it); jsvStringIteratorNext(&it); } jsvStringIteratorFree(&it); *str = 0; return len-l; } else { JsVar *stringVar = jsvAsString((JsVar*)v, false); // we know we're casting to non-const here if (stringVar) { size_t l = jsvGetString(stringVar, str, len); // call again - but this time with converted var jsvUnLock(stringVar); return l; } else { str[0] = 0; jsExceptionHere(JSET_INTERNALERROR, "Variable type cannot be converted to string"); return 0; } } }
CWE-119
1
INLINE void impeg2d_bit_stream_flush(void* pv_ctxt, UWORD32 u4_no_of_bits) { stream_t *ps_stream = (stream_t *)pv_ctxt; if ((ps_stream->u4_offset + 64) < ps_stream->u4_max_offset) { FLUSH_BITS(ps_stream->u4_offset,ps_stream->u4_buf,ps_stream->u4_buf_nxt,u4_no_of_bits,ps_stream->pu4_buf_aligned) } else { UWORD32 u4_temp; if (((ps_stream->u4_offset & 0x1f) + u4_no_of_bits) >= 32) { ps_stream->u4_buf = ps_stream->u4_buf_nxt; ps_stream->u4_buf_nxt = 0; } ps_stream->u4_offset += u4_no_of_bits; } return; }
CWE-200
1
de_dotdot( char* file ) { char* cp; char* cp2; int l; /* Collapse any multiple / sequences. */ while ( ( cp = strstr( file, "//") ) != (char*) 0 ) { for ( cp2 = cp + 2; *cp2 == '/'; ++cp2 ) continue; (void) strcpy( cp + 1, cp2 ); } /* Remove leading ./ and any /./ sequences. */ while ( strncmp( file, "./", 2 ) == 0 ) (void) memmove( file, file + 2, strlen( file ) - 1 ); while ( ( cp = strstr( file, "/./") ) != (char*) 0 ) (void) memmove( cp, cp + 2, strlen( cp ) - 1 ); /* Alternate between removing leading ../ and removing xxx/../ */ for (;;) { while ( strncmp( file, "../", 3 ) == 0 ) (void) memmove( file, file + 3, strlen( file ) - 2 ); cp = strstr( file, "/../" ); if ( cp == (char*) 0 ) break; for ( cp2 = cp - 1; cp2 >= file && *cp2 != '/'; --cp2 ) continue; (void) strcpy( cp2 + 1, cp + 4 ); } /* Also elide any xxx/.. at the end. */ while ( ( l = strlen( file ) ) > 3 && strcmp( ( cp = file + l - 3 ), "/.." ) == 0 ) { for ( cp2 = cp - 1; cp2 >= file && *cp2 != '/'; --cp2 ) continue; if ( cp2 < file ) break; *cp2 = '\0'; } }
CWE-119
1
static int sco_sock_recvmsg(struct kiocb *iocb, struct socket *sock, struct msghdr *msg, size_t len, int flags) { struct sock *sk = sock->sk; struct sco_pinfo *pi = sco_pi(sk); lock_sock(sk); if (sk->sk_state == BT_CONNECT2 && test_bit(BT_SK_DEFER_SETUP, &bt_sk(sk)->flags)) { hci_conn_accept(pi->conn->hcon, 0); sk->sk_state = BT_CONFIG; msg->msg_namelen = 0; release_sock(sk); return 0; } release_sock(sk); return bt_sock_recvmsg(iocb, sock, msg, len, flags); }
CWE-200
1
static bool get_build_id( Backtrace* backtrace, uintptr_t base_addr, uint8_t* e_ident, std::string* build_id) { HdrType hdr; memcpy(&hdr.e_ident[0], e_ident, EI_NIDENT); if (backtrace->Read(base_addr + EI_NIDENT, reinterpret_cast<uint8_t*>(&hdr) + EI_NIDENT, sizeof(HdrType) - EI_NIDENT) != sizeof(HdrType) - EI_NIDENT) { return false; } for (size_t i = 0; i < hdr.e_phnum; i++) { PhdrType phdr; if (backtrace->Read(base_addr + hdr.e_phoff + i * hdr.e_phentsize, reinterpret_cast<uint8_t*>(&phdr), sizeof(phdr)) != sizeof(phdr)) { return false; } if (phdr.p_type == PT_NOTE) { size_t hdr_size = phdr.p_filesz; uintptr_t addr = base_addr + phdr.p_offset; while (hdr_size >= sizeof(NhdrType)) { NhdrType nhdr; if (backtrace->Read(addr, reinterpret_cast<uint8_t*>(&nhdr), sizeof(nhdr)) != sizeof(nhdr)) { return false; } addr += sizeof(nhdr); if (nhdr.n_type == NT_GNU_BUILD_ID) { addr += NOTE_ALIGN(nhdr.n_namesz); uint8_t build_id_data[160]; if (nhdr.n_descsz > sizeof(build_id_data)) { ALOGE("Possible corrupted note, desc size value is too large: %u", nhdr.n_descsz); return false; } if (backtrace->Read(addr, build_id_data, nhdr.n_descsz) != nhdr.n_descsz) { return false; } build_id->clear(); for (size_t bytes = 0; bytes < nhdr.n_descsz; bytes++) { *build_id += android::base::StringPrintf("%02x", build_id_data[bytes]); } return true; } else { hdr_size -= sizeof(nhdr); size_t skip_bytes = NOTE_ALIGN(nhdr.n_namesz) + NOTE_ALIGN(nhdr.n_descsz); addr += skip_bytes; if (hdr_size < skip_bytes) { break; } hdr_size -= skip_bytes; } } } } return false; }
CWE-264
1
static jboolean cancelDiscoveryNative(JNIEnv* env, jobject obj) { ALOGV("%s:",__FUNCTION__); jboolean result = JNI_FALSE; if (!sBluetoothInterface) return result; int ret = sBluetoothInterface->cancel_discovery(); result = (ret == BT_STATUS_SUCCESS) ? JNI_TRUE : JNI_FALSE; return result; }
CWE-20
1
_dbus_header_byteswap (DBusHeader *header, int new_order) { unsigned char byte_order; if (header->byte_order == new_order) return; byte_order = _dbus_string_get_byte (&header->data, BYTE_ORDER_OFFSET); _dbus_assert (header->byte_order == byte_order); _dbus_marshal_byteswap (&_dbus_header_signature_str, 0, header->byte_order, new_order, &header->data, 0); _dbus_string_set_byte (&header->data, BYTE_ORDER_OFFSET, new_order); header->byte_order = new_order; }
CWE-20
1
static int dv_extract_audio_info(DVDemuxContext* c, uint8_t* frame) { const uint8_t* as_pack; int freq, stype, smpls, quant, i, ach; as_pack = dv_extract_pack(frame, dv_audio_source); if (!as_pack || !c->sys) { /* No audio ? */ c->ach = 0; return 0; } smpls = as_pack[1] & 0x3f; /* samples in this frame - min. samples */ freq = (as_pack[4] >> 3) & 0x07; /* 0 - 48kHz, 1 - 44,1kHz, 2 - 32kHz */ stype = (as_pack[3] & 0x1f); /* 0 - 2CH, 2 - 4CH, 3 - 8CH */ quant = as_pack[4] & 0x07; /* 0 - 16bit linear, 1 - 12bit nonlinear */ if (stype > 3) { av_log(c->fctx, AV_LOG_ERROR, "stype %d is invalid\n", stype); c->ach = 0; return 0; } /* note: ach counts PAIRS of channels (i.e. stereo channels) */ ach = ((int[4]){ 1, 0, 2, 4})[stype]; if (ach == 1 && quant && freq == 2) if (!c->ast[i]) break; avpriv_set_pts_info(c->ast[i], 64, 1, 30000); c->ast[i]->codec->codec_type = AVMEDIA_TYPE_AUDIO; c->ast[i]->codec->codec_id = CODEC_ID_PCM_S16LE; av_init_packet(&c->audio_pkt[i]); c->audio_pkt[i].size = 0; c->audio_pkt[i].data = c->audio_buf[i]; c->audio_pkt[i].stream_index = c->ast[i]->index; c->audio_pkt[i].flags |= AV_PKT_FLAG_KEY; }
CWE-20
1
static gboolean prplcb_xfer_new_send_cb(gpointer data, gint fd, b_input_condition cond) { PurpleXfer *xfer = data; struct im_connection *ic = purple_ic_by_pa(xfer->account); struct prpl_xfer_data *px = xfer->ui_data; PurpleBuddy *buddy; const char *who; buddy = purple_find_buddy(xfer->account, xfer->who); who = buddy ? purple_buddy_get_name(buddy) : xfer->who; /* TODO(wilmer): After spreading some more const goodness in BitlBee, remove the evil cast below. */ px->ft = imcb_file_send_start(ic, (char *) who, xfer->filename, xfer->size); if (!px->ft) { return FALSE; } px->ft->data = px; px->ft->accept = prpl_xfer_accept; px->ft->canceled = prpl_xfer_canceled; px->ft->free = prpl_xfer_free; px->ft->write_request = prpl_xfer_write_request; return FALSE; }
CWE-476
1
static int ssl_parse_server_psk_hint( mbedtls_ssl_context *ssl, unsigned char **p, unsigned char *end ) { int ret = MBEDTLS_ERR_SSL_FEATURE_UNAVAILABLE; size_t len; ((void) ssl); /* * PSK parameters: * * opaque psk_identity_hint<0..2^16-1>; */ if( (*p) > end - 2 ) { MBEDTLS_SSL_DEBUG_MSG( 1, ( "bad server key exchange message " "(psk_identity_hint length)" ) ); return( MBEDTLS_ERR_SSL_BAD_HS_SERVER_KEY_EXCHANGE ); } len = (*p)[0] << 8 | (*p)[1]; *p += 2; if( (*p) > end - len ) { MBEDTLS_SSL_DEBUG_MSG( 1, ( "bad server key exchange message " "(psk_identity_hint length)" ) ); return( MBEDTLS_ERR_SSL_BAD_HS_SERVER_KEY_EXCHANGE ); } /* * Note: we currently ignore the PKS identity hint, as we only allow one * PSK to be provisionned on the client. This could be changed later if * someone needs that feature. */ *p += len; ret = 0; return( ret ); }
CWE-125
1
static void voutf(struct GlobalConfig *config, const char *prefix, const char *fmt, va_list ap) { size_t width = (79 - strlen(prefix)); if(!config->mute) { size_t len; char *ptr; char *print_buffer; print_buffer = curlx_mvaprintf(fmt, ap); if(!print_buffer) return; len = strlen(print_buffer); ptr = print_buffer; while(len > 0) { fputs(prefix, config->errors); if(len > width) { size_t cut = width-1; while(!ISSPACE(ptr[cut]) && cut) { cut--; } if(0 == cut) /* not a single cutting position was found, just cut it at the max text width then! */ cut = width-1; (void)fwrite(ptr, cut + 1, 1, config->errors); fputs("\n", config->errors); ptr += cut + 1; /* skip the space too */ len -= cut + 1; } else { fputs(ptr, config->errors); len = 0; } } curl_free(print_buffer); } }
CWE-125
1
static void coroutine_fn v9fs_xattrcreate(void *opaque) { int flags; int32_t fid; int64_t size; ssize_t err = 0; V9fsString name; size_t offset = 7; V9fsFidState *file_fidp; V9fsFidState *xattr_fidp; V9fsPDU *pdu = opaque; v9fs_string_init(&name); err = pdu_unmarshal(pdu, offset, "dsqd", &fid, &name, &size, &flags); if (err < 0) { goto out_nofid; } trace_v9fs_xattrcreate(pdu->tag, pdu->id, fid, name.data, size, flags); file_fidp = get_fid(pdu, fid); if (file_fidp == NULL) { err = -EINVAL; goto out_nofid; } /* Make the file fid point to xattr */ xattr_fidp = file_fidp; xattr_fidp->fid_type = P9_FID_XATTR; xattr_fidp->fs.xattr.copied_len = 0; xattr_fidp->fs.xattr.len = size; xattr_fidp->fs.xattr.flags = flags; v9fs_string_init(&xattr_fidp->fs.xattr.name); v9fs_string_copy(&xattr_fidp->fs.xattr.name, &name); xattr_fidp->fs.xattr.value = g_malloc0(size); err = offset; put_fid(pdu, file_fidp); out_nofid: pdu_complete(pdu, err); v9fs_string_free(&name); }
CWE-119
1
pvscsi_ring_pop_req_descr(PVSCSIRingInfo *mgr) { uint32_t ready_ptr = RS_GET_FIELD(mgr, reqProdIdx); uint32_t ring_size = PVSCSI_MAX_NUM_PAGES_REQ_RING * PVSCSI_MAX_NUM_REQ_ENTRIES_PER_PAGE; if (ready_ptr != mgr->consumed_ptr && ready_ptr - mgr->consumed_ptr < ring_size) { uint32_t next_ready_ptr = mgr->consumed_ptr++ & mgr->txr_len_mask; uint32_t next_ready_page = return mgr->req_ring_pages_pa[next_ready_page] + inpage_idx * sizeof(PVSCSIRingReqDesc); } else { return 0; } }
CWE-399
1
void BufferQueueConsumer::dump(String8& result, const char* prefix) const { const IPCThreadState* ipc = IPCThreadState::self(); const pid_t pid = ipc->getCallingPid(); const uid_t uid = ipc->getCallingUid(); if ((uid != AID_SHELL) && !PermissionCache::checkPermission(String16( "android.permission.DUMP"), pid, uid)) { result.appendFormat("Permission Denial: can't dump BufferQueueConsumer " "from pid=%d, uid=%d\n", pid, uid); android_errorWriteWithInfoLog(0x534e4554, "27046057", uid, NULL, 0); } else { mCore->dump(result, prefix); } }
CWE-264
1
void IDNSpoofChecker::SetAllowedUnicodeSet(UErrorCode* status) { if (U_FAILURE(*status)) return; const icu::UnicodeSet* recommended_set = uspoof_getRecommendedUnicodeSet(status); icu::UnicodeSet allowed_set; allowed_set.addAll(*recommended_set); const icu::UnicodeSet* inclusion_set = uspoof_getInclusionUnicodeSet(status); allowed_set.addAll(*inclusion_set); allowed_set.remove(0x338u); allowed_set.remove(0x58au); // Armenian Hyphen allowed_set.remove(0x2010u); allowed_set.remove(0x2019u); // Right Single Quotation Mark allowed_set.remove(0x2027u); allowed_set.remove(0x30a0u); // Katakana-Hiragana Double Hyphen allowed_set.remove(0x2bbu); // Modifier Letter Turned Comma allowed_set.remove(0x2bcu); // Modifier Letter Apostrophe // Block modifier letter voicing. allowed_set.remove(0x2ecu); #if defined(OS_MACOSX) allowed_set.remove(0x0620u); allowed_set.remove(0x0F8Cu); allowed_set.remove(0x0F8Du); allowed_set.remove(0x0F8Eu); allowed_set.remove(0x0F8Fu); #endif allowed_set.remove(0x01CDu, 0x01DCu); // Latin Ext B; Pinyin allowed_set.remove(0x1C80u, 0x1C8Fu); // Cyrillic Extended-C allowed_set.remove(0x1E00u, 0x1E9Bu); // Latin Extended Additional allowed_set.remove(0x1F00u, 0x1FFFu); // Greek Extended allowed_set.remove(0xA640u, 0xA69Fu); // Cyrillic Extended-B allowed_set.remove(0xA720u, 0xA7FFu); // Latin Extended-D uspoof_setAllowedUnicodeSet(checker_, &allowed_set, status); }
CWE-20
1
int TS_OBJ_print_bio(BIO *bio, const ASN1_OBJECT *obj) { char obj_txt[128]; OBJ_obj2txt(obj_txt, sizeof(obj_txt), obj, 0); BIO_printf(bio, "%s\n", obj_txt); return 1; }
CWE-125
1
static int futex_wait(u32 __user *uaddr, int fshared, u32 val, ktime_t *abs_time, u32 bitset, int clockrt) { struct hrtimer_sleeper timeout, *to = NULL; struct restart_block *restart; struct futex_hash_bucket *hb; struct futex_q q; int ret; if (!bitset) return -EINVAL; q.pi_state = NULL; q.bitset = bitset; q.rt_waiter = NULL; q.requeue_pi_key = NULL; if (abs_time) { to = &timeout; hrtimer_init_on_stack(&to->timer, clockrt ? CLOCK_REALTIME : CLOCK_MONOTONIC, HRTIMER_MODE_ABS); hrtimer_init_sleeper(to, current); hrtimer_set_expires_range_ns(&to->timer, *abs_time, current->timer_slack_ns); } retry: /* * Prepare to wait on uaddr. On success, holds hb lock and increments * q.key refs. */ ret = futex_wait_setup(uaddr, val, fshared, &q, &hb); if (ret) goto out; /* queue_me and wait for wakeup, timeout, or a signal. */ futex_wait_queue_me(hb, &q, to); /* If we were woken (and unqueued), we succeeded, whatever. */ ret = 0; /* unqueue_me() drops q.key ref */ if (!unqueue_me(&q)) goto out; ret = -ETIMEDOUT; if (to && !to->task) goto out; /* * We expect signal_pending(current), but we might be the * victim of a spurious wakeup as well. */ if (!signal_pending(current)) goto retry; ret = -ERESTARTSYS; if (!abs_time) goto out; restart = &current_thread_info()->restart_block; restart->fn = futex_wait_restart; restart->futex.uaddr = (u32 *)uaddr; restart->futex.val = val; restart->futex.time = abs_time->tv64; restart->futex.bitset = bitset; restart->futex.flags = FLAGS_HAS_TIMEOUT; if (fshared) restart->futex.flags |= FLAGS_SHARED; if (clockrt) restart->futex.flags |= FLAGS_CLOCKRT; ret = -ERESTART_RESTARTBLOCK; out: if (to) { hrtimer_cancel(&to->timer); destroy_hrtimer_on_stack(&to->timer); } return ret; }
CWE-119
1
long fuse_do_ioctl(struct file *file, unsigned int cmd, unsigned long arg, unsigned int flags) { struct fuse_file *ff = file->private_data; struct fuse_conn *fc = ff->fc; struct fuse_ioctl_in inarg = { .fh = ff->fh, .cmd = cmd, .arg = arg, .flags = flags }; struct fuse_ioctl_out outarg; struct fuse_req *req = NULL; struct page **pages = NULL; struct page *iov_page = NULL; struct iovec *in_iov = NULL, *out_iov = NULL; unsigned int in_iovs = 0, out_iovs = 0, num_pages = 0, max_pages; size_t in_size, out_size, transferred; int err; /* assume all the iovs returned by client always fits in a page */ BUILD_BUG_ON(sizeof(struct iovec) * FUSE_IOCTL_MAX_IOV > PAGE_SIZE); err = -ENOMEM; pages = kzalloc(sizeof(pages[0]) * FUSE_MAX_PAGES_PER_REQ, GFP_KERNEL); iov_page = alloc_page(GFP_KERNEL); if (!pages || !iov_page) goto out; /* * If restricted, initialize IO parameters as encoded in @cmd. * RETRY from server is not allowed. */ if (!(flags & FUSE_IOCTL_UNRESTRICTED)) { struct iovec *iov = page_address(iov_page); iov->iov_base = (void __user *)arg; iov->iov_len = _IOC_SIZE(cmd); if (_IOC_DIR(cmd) & _IOC_WRITE) { in_iov = iov; in_iovs = 1; } if (_IOC_DIR(cmd) & _IOC_READ) { out_iov = iov; out_iovs = 1; } } retry: inarg.in_size = in_size = iov_length(in_iov, in_iovs); inarg.out_size = out_size = iov_length(out_iov, out_iovs); /* * Out data can be used either for actual out data or iovs, * make sure there always is at least one page. */ out_size = max_t(size_t, out_size, PAGE_SIZE); max_pages = DIV_ROUND_UP(max(in_size, out_size), PAGE_SIZE); /* make sure there are enough buffer pages and init request with them */ err = -ENOMEM; if (max_pages > FUSE_MAX_PAGES_PER_REQ) goto out; while (num_pages < max_pages) { pages[num_pages] = alloc_page(GFP_KERNEL | __GFP_HIGHMEM); if (!pages[num_pages]) goto out; num_pages++; } req = fuse_get_req(fc); if (IS_ERR(req)) { err = PTR_ERR(req); req = NULL; goto out; } memcpy(req->pages, pages, sizeof(req->pages[0]) * num_pages); req->num_pages = num_pages; /* okay, let's send it to the client */ req->in.h.opcode = FUSE_IOCTL; req->in.h.nodeid = ff->nodeid; req->in.numargs = 1; req->in.args[0].size = sizeof(inarg); req->in.args[0].value = &inarg; if (in_size) { req->in.numargs++; req->in.args[1].size = in_size; req->in.argpages = 1; err = fuse_ioctl_copy_user(pages, in_iov, in_iovs, in_size, false); if (err) goto out; } req->out.numargs = 2; req->out.args[0].size = sizeof(outarg); req->out.args[0].value = &outarg; req->out.args[1].size = out_size; req->out.argpages = 1; req->out.argvar = 1; fuse_request_send(fc, req); err = req->out.h.error; transferred = req->out.args[1].size; fuse_put_request(fc, req); req = NULL; if (err) goto out; /* did it ask for retry? */ if (outarg.flags & FUSE_IOCTL_RETRY) { char *vaddr; /* no retry if in restricted mode */ err = -EIO; if (!(flags & FUSE_IOCTL_UNRESTRICTED)) goto out; in_iovs = outarg.in_iovs; out_iovs = outarg.out_iovs; /* * Make sure things are in boundary, separate checks * are to protect against overflow. */ err = -ENOMEM; if (in_iovs > FUSE_IOCTL_MAX_IOV || out_iovs > FUSE_IOCTL_MAX_IOV || in_iovs + out_iovs > FUSE_IOCTL_MAX_IOV) goto out; vaddr = kmap_atomic(pages[0], KM_USER0); err = fuse_copy_ioctl_iovec(page_address(iov_page), vaddr, transferred, in_iovs + out_iovs, (flags & FUSE_IOCTL_COMPAT) != 0); kunmap_atomic(vaddr, KM_USER0); if (err) goto out; in_iov = page_address(iov_page); out_iov = in_iov + in_iovs; err = fuse_verify_ioctl_iov(in_iov, in_iovs); if (err) goto out; err = fuse_verify_ioctl_iov(out_iov, out_iovs); if (err) goto out; goto retry; } err = -EIO; if (transferred > inarg.out_size) goto out; err = fuse_ioctl_copy_user(pages, out_iov, out_iovs, transferred, true); out: if (req) fuse_put_request(fc, req); if (iov_page) __free_page(iov_page); while (num_pages) __free_page(pages[--num_pages]); kfree(pages); return err ? err : outarg.result; }
CWE-119
1
void RendererSchedulerImpl::OnShutdownTaskQueue( const scoped_refptr<MainThreadTaskQueue>& task_queue) { if (main_thread_only().was_shutdown) return; if (task_queue_throttler_) task_queue_throttler_->ShutdownTaskQueue(task_queue.get()); if (task_runners_.erase(task_queue)) { switch (task_queue->queue_class()) { case MainThreadTaskQueue::QueueClass::kTimer: task_queue->RemoveTaskObserver( &main_thread_only().timer_task_cost_estimator); break; case MainThreadTaskQueue::QueueClass::kLoading: task_queue->RemoveTaskObserver( &main_thread_only().loading_task_cost_estimator); break; default: break; } } }
CWE-119
1
static int readContigStripsIntoBuffer (TIFF* in, uint8* buf) { uint8* bufp = buf; int32 bytes_read = 0; uint32 strip, nstrips = TIFFNumberOfStrips(in); uint32 stripsize = TIFFStripSize(in); uint32 rows = 0; uint32 rps = TIFFGetFieldDefaulted(in, TIFFTAG_ROWSPERSTRIP, &rps); tsize_t scanline_size = TIFFScanlineSize(in); if (scanline_size == 0) { TIFFError("", "TIFF scanline size is zero!"); return 0; } for (strip = 0; strip < nstrips; strip++) { bytes_read = TIFFReadEncodedStrip (in, strip, bufp, -1); rows = bytes_read / scanline_size; if ((strip < (nstrips - 1)) && (bytes_read != (int32)stripsize)) TIFFError("", "Strip %d: read %lu bytes, strip size %lu", (int)strip + 1, (unsigned long) bytes_read, (unsigned long)stripsize); if (bytes_read < 0 && !ignore) { TIFFError("", "Error reading strip %lu after %lu rows", (unsigned long) strip, (unsigned long)rows); return 0; } bufp += stripsize; } return 1; } /* end readContigStripsIntoBuffer */
CWE-119
1
get_matching_model_microcode(int cpu, unsigned long start, void *data, size_t size, struct mc_saved_data *mc_saved_data, unsigned long *mc_saved_in_initrd, struct ucode_cpu_info *uci) { u8 *ucode_ptr = data; unsigned int leftover = size; enum ucode_state state = UCODE_OK; unsigned int mc_size; struct microcode_header_intel *mc_header; struct microcode_intel *mc_saved_tmp[MAX_UCODE_COUNT]; unsigned int mc_saved_count = mc_saved_data->mc_saved_count; int i; while (leftover && mc_saved_count < ARRAY_SIZE(mc_saved_tmp)) { mc_header = (struct microcode_header_intel *)ucode_ptr; mc_size = get_totalsize(mc_header); if (!mc_size || mc_size > leftover || microcode_sanity_check(ucode_ptr, 0) < 0) break; leftover -= mc_size; /* * Since APs with same family and model as the BSP may boot in * the platform, we need to find and save microcode patches * with the same family and model as the BSP. */ if (matching_model_microcode(mc_header, uci->cpu_sig.sig) != UCODE_OK) { ucode_ptr += mc_size; continue; } _save_mc(mc_saved_tmp, ucode_ptr, &mc_saved_count); ucode_ptr += mc_size; } if (leftover) { state = UCODE_ERROR; goto out; } if (mc_saved_count == 0) { state = UCODE_NFOUND; goto out; } for (i = 0; i < mc_saved_count; i++) mc_saved_in_initrd[i] = (unsigned long)mc_saved_tmp[i] - start; mc_saved_data->mc_saved_count = mc_saved_count; out: return state; }
CWE-119
1
int cipso_v4_validate(const struct sk_buff *skb, unsigned char **option) { unsigned char *opt = *option; unsigned char *tag; unsigned char opt_iter; unsigned char err_offset = 0; u8 opt_len; u8 tag_len; struct cipso_v4_doi *doi_def = NULL; u32 tag_iter; /* caller already checks for length values that are too large */ opt_len = opt[1]; if (opt_len < 8) { err_offset = 1; goto validate_return; } rcu_read_lock(); doi_def = cipso_v4_doi_search(get_unaligned_be32(&opt[2])); if (doi_def == NULL) { err_offset = 2; goto validate_return_locked; } opt_iter = CIPSO_V4_HDR_LEN; tag = opt + opt_iter; while (opt_iter < opt_len) { for (tag_iter = 0; doi_def->tags[tag_iter] != tag[0];) if (doi_def->tags[tag_iter] == CIPSO_V4_TAG_INVALID || ++tag_iter == CIPSO_V4_TAG_MAXCNT) { err_offset = opt_iter; goto validate_return_locked; } tag_len = tag[1]; if (tag_len > (opt_len - opt_iter)) { err_offset = opt_iter + 1; goto validate_return_locked; } switch (tag[0]) { case CIPSO_V4_TAG_RBITMAP: if (tag_len < CIPSO_V4_TAG_RBM_BLEN) { err_offset = opt_iter + 1; goto validate_return_locked; } /* We are already going to do all the verification * necessary at the socket layer so from our point of * view it is safe to turn these checks off (and less * work), however, the CIPSO draft says we should do * all the CIPSO validations here but it doesn't * really specify _exactly_ what we need to validate * ... so, just make it a sysctl tunable. */ if (cipso_v4_rbm_strictvalid) { if (cipso_v4_map_lvl_valid(doi_def, tag[3]) < 0) { err_offset = opt_iter + 3; goto validate_return_locked; } if (tag_len > CIPSO_V4_TAG_RBM_BLEN && cipso_v4_map_cat_rbm_valid(doi_def, &tag[4], tag_len - 4) < 0) { err_offset = opt_iter + 4; goto validate_return_locked; } } break; case CIPSO_V4_TAG_ENUM: if (tag_len < CIPSO_V4_TAG_ENUM_BLEN) { err_offset = opt_iter + 1; goto validate_return_locked; } if (cipso_v4_map_lvl_valid(doi_def, tag[3]) < 0) { err_offset = opt_iter + 3; goto validate_return_locked; } if (tag_len > CIPSO_V4_TAG_ENUM_BLEN && cipso_v4_map_cat_enum_valid(doi_def, &tag[4], tag_len - 4) < 0) { err_offset = opt_iter + 4; goto validate_return_locked; } break; case CIPSO_V4_TAG_RANGE: if (tag_len < CIPSO_V4_TAG_RNG_BLEN) { err_offset = opt_iter + 1; goto validate_return_locked; } if (cipso_v4_map_lvl_valid(doi_def, tag[3]) < 0) { err_offset = opt_iter + 3; goto validate_return_locked; } if (tag_len > CIPSO_V4_TAG_RNG_BLEN && cipso_v4_map_cat_rng_valid(doi_def, &tag[4], tag_len - 4) < 0) { err_offset = opt_iter + 4; goto validate_return_locked; } break; case CIPSO_V4_TAG_LOCAL: /* This is a non-standard tag that we only allow for * local connections, so if the incoming interface is * not the loopback device drop the packet. Further, * there is no legitimate reason for setting this from * userspace so reject it if skb is NULL. */ if (skb == NULL || !(skb->dev->flags & IFF_LOOPBACK)) { err_offset = opt_iter; goto validate_return_locked; } if (tag_len != CIPSO_V4_TAG_LOC_BLEN) { err_offset = opt_iter + 1; goto validate_return_locked; } break; default: err_offset = opt_iter; goto validate_return_locked; } tag += tag_len; opt_iter += tag_len; } validate_return_locked: rcu_read_unlock(); validate_return: *option = opt + err_offset; return err_offset; }
CWE-119
1
static int xfrm6_tunnel_rcv(struct sk_buff *skb) { struct ipv6hdr *iph = ipv6_hdr(skb); __be32 spi; spi = xfrm6_tunnel_spi_lookup((xfrm_address_t *)&iph->saddr); return xfrm6_rcv_spi(skb, spi) > 0 ? : 0; }
CWE-399
1
static int logi_dj_raw_event(struct hid_device *hdev, struct hid_report *report, u8 *data, int size) { struct dj_receiver_dev *djrcv_dev = hid_get_drvdata(hdev); struct dj_report *dj_report = (struct dj_report *) data; unsigned long flags; bool report_processed = false; dbg_hid("%s, size:%d\n", __func__, size); /* Here we receive all data coming from iface 2, there are 4 cases: * * 1) Data should continue its normal processing i.e. data does not * come from the DJ collection, in which case we do nothing and * return 0, so hid-core can continue normal processing (will forward * to associated hidraw device) * * 2) Data is from DJ collection, and is intended for this driver i. e. * data contains arrival, departure, etc notifications, in which case * we queue them for delayed processing by the work queue. We return 1 * to hid-core as no further processing is required from it. * * 3) Data is from DJ collection, and informs a connection change, * if the change means rf link loss, then we must send a null report * to the upper layer to discard potentially pressed keys that may be * repeated forever by the input layer. Return 1 to hid-core as no * further processing is required. * * 4) Data is from DJ collection and is an actual input event from * a paired DJ device in which case we forward it to the correct hid * device (via hid_input_report() ) and return 1 so hid-core does not do * anything else with it. */ if ((dj_report->device_index < DJ_DEVICE_INDEX_MIN) || (dj_report->device_index > DJ_DEVICE_INDEX_MAX)) { dev_err(&hdev->dev, "%s: invalid device index:%d\n", __func__, dj_report->device_index); return false; } spin_lock_irqsave(&djrcv_dev->lock, flags); if (dj_report->report_id == REPORT_ID_DJ_SHORT) { switch (dj_report->report_type) { case REPORT_TYPE_NOTIF_DEVICE_PAIRED: case REPORT_TYPE_NOTIF_DEVICE_UNPAIRED: logi_dj_recv_queue_notification(djrcv_dev, dj_report); break; case REPORT_TYPE_NOTIF_CONNECTION_STATUS: if (dj_report->report_params[CONNECTION_STATUS_PARAM_STATUS] == STATUS_LINKLOSS) { logi_dj_recv_forward_null_report(djrcv_dev, dj_report); } break; default: logi_dj_recv_forward_report(djrcv_dev, dj_report); } report_processed = true; } spin_unlock_irqrestore(&djrcv_dev->lock, flags); return report_processed; }
CWE-119
1
handle_mlppp(netdissect_options *ndo, const u_char *p, int length) { if (!ndo->ndo_eflag) ND_PRINT((ndo, "MLPPP, ")); if (length < 2) { ND_PRINT((ndo, "[|mlppp]")); return; } if (!ND_TTEST_16BITS(p)) { ND_PRINT((ndo, "[|mlppp]")); return; } ND_PRINT((ndo, "seq 0x%03x, Flags [%s], length %u", (EXTRACT_16BITS(p))&0x0fff, /* only support 12-Bit sequence space for now */ bittok2str(ppp_ml_flag_values, "none", *p & 0xc0), length)); }
CWE-125
1
ProcSendEvent(ClientPtr client) { WindowPtr pWin; WindowPtr effectiveFocus = NullWindow; /* only set if dest==InputFocus */ DeviceIntPtr dev = PickPointer(client); DeviceIntPtr keybd = GetMaster(dev, MASTER_KEYBOARD); SpritePtr pSprite = dev->spriteInfo->sprite; REQUEST(xSendEventReq); REQUEST_SIZE_MATCH(xSendEventReq); /* libXext and other extension libraries may set the bit indicating * that this event came from a SendEvent request so remove it * since otherwise the event type may fail the range checks * and cause an invalid BadValue error to be returned. * * This is safe to do since we later add the SendEvent bit (0x80) * back in once we send the event to the client */ stuff->event.u.u.type &= ~(SEND_EVENT_BIT); /* The client's event type must be a core event type or one defined by an extension. */ if (!((stuff->event.u.u.type > X_Reply && stuff->event.u.u.type < LASTEvent) || (stuff->event.u.u.type >= EXTENSION_EVENT_BASE && stuff->event.u.u.type < (unsigned) lastEvent))) { client->errorValue = stuff->event.u.u.type; return BadValue; } /* Generic events can have variable size, but SendEvent request holds exactly 32B of event data. */ if (stuff->event.u.u.type == GenericEvent) { client->errorValue = stuff->event.u.u.type; return BadValue; } if (stuff->event.u.u.type == ClientMessage && stuff->event.u.u.detail != 8 && stuff->event.u.u.detail != 16 && stuff->event.u.u.detail != 32) { } if (stuff->destination == PointerWindow) pWin = pSprite->win; else if (stuff->destination == InputFocus) { WindowPtr inputFocus = (keybd) ? keybd->focus->win : NoneWin; if (inputFocus == NoneWin) return Success; /* If the input focus is PointerRootWin, send the event to where the pointer is if possible, then perhaps propogate up to root. */ if (inputFocus == PointerRootWin) inputFocus = GetCurrentRootWindow(dev); if (IsParent(inputFocus, pSprite->win)) { effectiveFocus = inputFocus; pWin = pSprite->win; } else effectiveFocus = pWin = inputFocus; } else dixLookupWindow(&pWin, stuff->destination, client, DixSendAccess); if (!pWin) return BadWindow; if ((stuff->propagate != xFalse) && (stuff->propagate != xTrue)) { client->errorValue = stuff->propagate; return BadValue; } stuff->event.u.u.type |= SEND_EVENT_BIT; if (stuff->propagate) { for (; pWin; pWin = pWin->parent) { if (XaceHook(XACE_SEND_ACCESS, client, NULL, pWin, &stuff->event, 1)) return Success; if (DeliverEventsToWindow(dev, pWin, &stuff->event, 1, stuff->eventMask, NullGrab)) return Success; if (pWin == effectiveFocus) return Success; stuff->eventMask &= ~wDontPropagateMask(pWin); if (!stuff->eventMask) break; } } else if (!XaceHook(XACE_SEND_ACCESS, client, NULL, pWin, &stuff->event, 1)) DeliverEventsToWindow(dev, pWin, &stuff->event, 1, stuff->eventMask, NullGrab); return Success; }
CWE-119
1
static int dispatch_discard_io(struct xen_blkif *blkif, struct blkif_request *req) { int err = 0; int status = BLKIF_RSP_OKAY; struct block_device *bdev = blkif->vbd.bdev; unsigned long secure; struct phys_req preq; preq.sector_number = req->u.discard.sector_number; preq.nr_sects = req->u.discard.nr_sectors; err = xen_vbd_translate(&preq, blkif, WRITE); if (err) { pr_warn(DRV_PFX "access denied: DISCARD [%llu->%llu] on dev=%04x\n", preq.sector_number, preq.sector_number + preq.nr_sects, blkif->vbd.pdevice); goto fail_response; } blkif->st_ds_req++; xen_blkif_get(blkif); secure = (blkif->vbd.discard_secure && (req->u.discard.flag & BLKIF_DISCARD_SECURE)) ? BLKDEV_DISCARD_SECURE : 0; err = blkdev_issue_discard(bdev, req->u.discard.sector_number, req->u.discard.nr_sectors, GFP_KERNEL, secure); fail_response: if (err == -EOPNOTSUPP) { pr_debug(DRV_PFX "discard op failed, not supported\n"); status = BLKIF_RSP_EOPNOTSUPP; } else if (err) status = BLKIF_RSP_ERROR; make_response(blkif, req->u.discard.id, req->operation, status); xen_blkif_put(blkif); return err; }
CWE-20
1
static int inet_rtm_getroute(struct sk_buff *in_skb, struct nlmsghdr *nlh, struct netlink_ext_ack *extack) { struct net *net = sock_net(in_skb->sk); struct rtmsg *rtm; struct nlattr *tb[RTA_MAX+1]; struct fib_result res = {}; struct rtable *rt = NULL; struct flowi4 fl4; __be32 dst = 0; __be32 src = 0; u32 iif; int err; int mark; struct sk_buff *skb; u32 table_id = RT_TABLE_MAIN; kuid_t uid; err = nlmsg_parse(nlh, sizeof(*rtm), tb, RTA_MAX, rtm_ipv4_policy, extack); if (err < 0) goto errout; rtm = nlmsg_data(nlh); skb = alloc_skb(NLMSG_GOODSIZE, GFP_KERNEL); if (!skb) { err = -ENOBUFS; goto errout; } /* Reserve room for dummy headers, this skb can pass through good chunk of routing engine. */ skb_reset_mac_header(skb); skb_reset_network_header(skb); src = tb[RTA_SRC] ? nla_get_in_addr(tb[RTA_SRC]) : 0; dst = tb[RTA_DST] ? nla_get_in_addr(tb[RTA_DST]) : 0; iif = tb[RTA_IIF] ? nla_get_u32(tb[RTA_IIF]) : 0; mark = tb[RTA_MARK] ? nla_get_u32(tb[RTA_MARK]) : 0; if (tb[RTA_UID]) uid = make_kuid(current_user_ns(), nla_get_u32(tb[RTA_UID])); else uid = (iif ? INVALID_UID : current_uid()); /* Bugfix: need to give ip_route_input enough of an IP header to * not gag. */ ip_hdr(skb)->protocol = IPPROTO_UDP; ip_hdr(skb)->saddr = src; ip_hdr(skb)->daddr = dst; skb_reserve(skb, MAX_HEADER + sizeof(struct iphdr)); memset(&fl4, 0, sizeof(fl4)); fl4.daddr = dst; fl4.saddr = src; fl4.flowi4_tos = rtm->rtm_tos; fl4.flowi4_oif = tb[RTA_OIF] ? nla_get_u32(tb[RTA_OIF]) : 0; fl4.flowi4_mark = mark; fl4.flowi4_uid = uid; rcu_read_lock(); if (iif) { struct net_device *dev; dev = dev_get_by_index_rcu(net, iif); if (!dev) { err = -ENODEV; goto errout_free; } skb->protocol = htons(ETH_P_IP); skb->dev = dev; skb->mark = mark; err = ip_route_input_rcu(skb, dst, src, rtm->rtm_tos, dev, &res); rt = skb_rtable(skb); if (err == 0 && rt->dst.error) err = -rt->dst.error; } else { rt = ip_route_output_key_hash_rcu(net, &fl4, &res, skb); err = 0; if (IS_ERR(rt)) err = PTR_ERR(rt); else skb_dst_set(skb, &rt->dst); } if (err) goto errout_free; if (rtm->rtm_flags & RTM_F_NOTIFY) rt->rt_flags |= RTCF_NOTIFY; if (rtm->rtm_flags & RTM_F_LOOKUP_TABLE) table_id = rt->rt_table_id; if (rtm->rtm_flags & RTM_F_FIB_MATCH) { if (!res.fi) { err = fib_props[res.type].error; if (!err) err = -EHOSTUNREACH; goto errout_free; } err = fib_dump_info(skb, NETLINK_CB(in_skb).portid, nlh->nlmsg_seq, RTM_NEWROUTE, table_id, rt->rt_type, res.prefix, res.prefixlen, fl4.flowi4_tos, res.fi, 0); } else { err = rt_fill_info(net, dst, src, table_id, &fl4, skb, NETLINK_CB(in_skb).portid, nlh->nlmsg_seq); } if (err < 0) goto errout_free; rcu_read_unlock(); err = rtnl_unicast(skb, net, NETLINK_CB(in_skb).portid); errout: return err; errout_free: rcu_read_unlock(); kfree_skb(skb); goto errout; }
CWE-476
1
int WavpackVerifySingleBlock (unsigned char *buffer, int verify_checksum) { WavpackHeader *wphdr = (WavpackHeader *) buffer; uint32_t checksum_passed = 0, bcount, meta_bc; unsigned char *dp, meta_id, c1, c2; if (strncmp (wphdr->ckID, "wvpk", 4) || wphdr->ckSize + 8 < sizeof (WavpackHeader)) return FALSE; bcount = wphdr->ckSize - sizeof (WavpackHeader) + 8; dp = (unsigned char *)(wphdr + 1); while (bcount >= 2) { meta_id = *dp++; c1 = *dp++; meta_bc = c1 << 1; bcount -= 2; if (meta_id & ID_LARGE) { if (bcount < 2) return FALSE; c1 = *dp++; c2 = *dp++; meta_bc += ((uint32_t) c1 << 9) + ((uint32_t) c2 << 17); bcount -= 2; } if (bcount < meta_bc) return FALSE; if (verify_checksum && (meta_id & ID_UNIQUE) == ID_BLOCK_CHECKSUM) { #ifdef BITSTREAM_SHORTS uint16_t *csptr = (uint16_t*) buffer; #else unsigned char *csptr = buffer; #endif int wcount = (int)(dp - 2 - buffer) >> 1; uint32_t csum = (uint32_t) -1; if ((meta_id & ID_ODD_SIZE) || meta_bc < 2 || meta_bc > 4) return FALSE; #ifdef BITSTREAM_SHORTS while (wcount--) csum = (csum * 3) + *csptr++; #else WavpackNativeToLittleEndian ((WavpackHeader *) buffer, WavpackHeaderFormat); while (wcount--) { csum = (csum * 3) + csptr [0] + (csptr [1] << 8); csptr += 2; } WavpackLittleEndianToNative ((WavpackHeader *) buffer, WavpackHeaderFormat); #endif if (meta_bc == 4) { if (*dp != (csum & 0xff) || dp[1] != ((csum >> 8) & 0xff) || dp[2] != ((csum >> 16) & 0xff) || dp[3] != ((csum >> 24) & 0xff)) return FALSE; } else { csum ^= csum >> 16; if (*dp != (csum & 0xff) || dp[1] != ((csum >> 8) & 0xff)) return FALSE; } checksum_passed++; } bcount -= meta_bc; dp += meta_bc; } return (bcount == 0) && (!verify_checksum || !(wphdr->flags & HAS_CHECKSUM) || checksum_passed); }
CWE-125
1
TEE_Result tee_mmu_check_access_rights(const struct user_ta_ctx *utc, uint32_t flags, uaddr_t uaddr, size_t len) { uaddr_t a; uaddr_t end_addr = 0; size_t addr_incr = MIN(CORE_MMU_USER_CODE_SIZE, CORE_MMU_USER_PARAM_SIZE); if (ADD_OVERFLOW(uaddr, len, &end_addr)) return TEE_ERROR_ACCESS_DENIED; if ((flags & TEE_MEMORY_ACCESS_NONSECURE) && (flags & TEE_MEMORY_ACCESS_SECURE)) return TEE_ERROR_ACCESS_DENIED; /* * Rely on TA private memory test to check if address range is private * to TA or not. */ if (!(flags & TEE_MEMORY_ACCESS_ANY_OWNER) && !tee_mmu_is_vbuf_inside_ta_private(utc, (void *)uaddr, len)) return TEE_ERROR_ACCESS_DENIED; for (a = ROUNDDOWN(uaddr, addr_incr); a < end_addr; a += addr_incr) { uint32_t attr; TEE_Result res; res = tee_mmu_user_va2pa_attr(utc, (void *)a, NULL, &attr); if (res != TEE_SUCCESS) return res; if ((flags & TEE_MEMORY_ACCESS_NONSECURE) && (attr & TEE_MATTR_SECURE)) return TEE_ERROR_ACCESS_DENIED; if ((flags & TEE_MEMORY_ACCESS_SECURE) && !(attr & TEE_MATTR_SECURE)) return TEE_ERROR_ACCESS_DENIED; if ((flags & TEE_MEMORY_ACCESS_WRITE) && !(attr & TEE_MATTR_UW)) return TEE_ERROR_ACCESS_DENIED; if ((flags & TEE_MEMORY_ACCESS_READ) && !(attr & TEE_MATTR_UR)) return TEE_ERROR_ACCESS_DENIED; } return TEE_SUCCESS; }
CWE-20
1
void BindSkiaToInProcessGL() { static bool host_StubGL_installed = false; if (!host_StubGL_installed) { GrGLBinding binding; switch (gfx::GetGLImplementation()) { case gfx::kGLImplementationNone: NOTREACHED(); return; case gfx::kGLImplementationDesktopGL: binding = kDesktop_GrGLBinding; break; case gfx::kGLImplementationOSMesaGL: binding = kDesktop_GrGLBinding; break; case gfx::kGLImplementationEGLGLES2: binding = kES2_GrGLBinding; break; case gfx::kGLImplementationMockGL: NOTREACHED(); return; default: NOTREACHED(); return; } static GrGLInterface host_gl_interface = { binding, kProbe_GrGLCapability, // NPOTRenderTargetSupport kProbe_GrGLCapability, // MinRenderTargetHeight kProbe_GrGLCapability, // MinRenderTargetWidth StubGLActiveTexture, StubGLAttachShader, StubGLBindAttribLocation, StubGLBindBuffer, StubGLBindTexture, StubGLBlendColor, StubGLBlendFunc, StubGLBufferData, StubGLBufferSubData, StubGLClear, StubGLClearColor, StubGLClearStencil, NULL, // glClientActiveTexture NULL, // glColor4ub StubGLColorMask, NULL, // glColorPointer StubGLCompileShader, StubGLCompressedTexImage2D, StubGLCreateProgram, StubGLCreateShader, StubGLCullFace, StubGLDeleteBuffers, StubGLDeleteProgram, StubGLDeleteShader, StubGLDeleteTextures, StubGLDepthMask, StubGLDisable, NULL, // glDisableClientState StubGLDisableVertexAttribArray, StubGLDrawArrays, StubGLDrawElements, StubGLEnable, NULL, // glEnableClientState StubGLEnableVertexAttribArray, StubGLFrontFace, StubGLGenBuffers, StubGLGenTextures, StubGLGetBufferParameteriv, StubGLGetError, StubGLGetIntegerv, StubGLGetProgramInfoLog, StubGLGetProgramiv, StubGLGetShaderInfoLog, StubGLGetShaderiv, StubGLGetString, StubGLGetUniformLocation, StubGLLineWidth, StubGLLinkProgram, NULL, // glLoadMatrixf NULL, // glMatrixMode StubGLPixelStorei, NULL, // glPointSize StubGLReadPixels, StubGLScissor, NULL, // glShadeModel StubGLShaderSource, StubGLStencilFunc, StubGLStencilFuncSeparate, StubGLStencilMask, StubGLStencilMaskSeparate, StubGLStencilOp, StubGLStencilOpSeparate, NULL, // glTexCoordPointer NULL, // glTexEnvi StubGLTexImage2D, StubGLTexParameteri, StubGLTexSubImage2D, StubGLUniform1f, StubGLUniform1i, StubGLUniform1fv, StubGLUniform1iv, StubGLUniform2f, StubGLUniform2i, StubGLUniform2fv, StubGLUniform2iv, StubGLUniform3f, StubGLUniform3i, StubGLUniform3fv, StubGLUniform3iv, StubGLUniform4f, StubGLUniform4i, StubGLUniform4fv, StubGLUniform4iv, StubGLUniformMatrix2fv, StubGLUniformMatrix3fv, StubGLUniformMatrix4fv, StubGLUseProgram, StubGLVertexAttrib4fv, StubGLVertexAttribPointer, NULL, // glVertexPointer StubGLViewport, StubGLBindFramebuffer, StubGLBindRenderbuffer, StubGLCheckFramebufferStatus, StubGLDeleteFramebuffers, StubGLDeleteRenderbuffers, StubGLFramebufferRenderbuffer, StubGLFramebufferTexture2D, StubGLGenFramebuffers, StubGLGenRenderbuffers, StubGLRenderBufferStorage, StubGLRenderbufferStorageMultisample, StubGLBlitFramebuffer, NULL, // glResolveMultisampleFramebuffer StubGLMapBuffer, StubGLUnmapBuffer, NULL, // glBindFragDataLocationIndexed GrGLInterface::kStaticInitEndGuard, }; GrGLSetGLInterface(&host_gl_interface); host_StubGL_installed = true; } }
CWE-189
1
virtual void SetUpCommandLine(CommandLine* command_line) { GpuFeatureTest::SetUpCommandLine(command_line); command_line->AppendSwitch(switches::kDisableAcceleratedCompositing); }
CWE-399
1
static int snd_timer_user_tselect(struct file *file, struct snd_timer_select __user *_tselect) { struct snd_timer_user *tu; struct snd_timer_select tselect; char str[32]; int err = 0; tu = file->private_data; if (tu->timeri) { snd_timer_close(tu->timeri); tu->timeri = NULL; } if (copy_from_user(&tselect, _tselect, sizeof(tselect))) { err = -EFAULT; goto __err; } sprintf(str, "application %i", current->pid); if (tselect.id.dev_class != SNDRV_TIMER_CLASS_SLAVE) tselect.id.dev_sclass = SNDRV_TIMER_SCLASS_APPLICATION; err = snd_timer_open(&tu->timeri, str, &tselect.id, current->pid); if (err < 0) goto __err; tu->qhead = tu->qtail = tu->qused = 0; kfree(tu->queue); tu->queue = NULL; kfree(tu->tqueue); tu->tqueue = NULL; if (tu->tread) { tu->tqueue = kmalloc(tu->queue_size * sizeof(struct snd_timer_tread), GFP_KERNEL); if (tu->tqueue == NULL) err = -ENOMEM; } else { tu->queue = kmalloc(tu->queue_size * sizeof(struct snd_timer_read), GFP_KERNEL); if (tu->queue == NULL) err = -ENOMEM; } if (err < 0) { snd_timer_close(tu->timeri); tu->timeri = NULL; } else { tu->timeri->flags |= SNDRV_TIMER_IFLG_FAST; tu->timeri->callback = tu->tread ? snd_timer_user_tinterrupt : snd_timer_user_interrupt; tu->timeri->ccallback = snd_timer_user_ccallback; tu->timeri->callback_data = (void *)tu; tu->timeri->disconnect = snd_timer_user_disconnect; } __err: return err; }
CWE-200
1
void EVP_EncodeUpdate(EVP_ENCODE_CTX *ctx, unsigned char *out, int *outl, const unsigned char *in, int inl) { int i, j; unsigned int total = 0; *outl = 0; if (inl <= 0) return; OPENSSL_assert(ctx->length <= (int)sizeof(ctx->enc_data)); if (ctx->length - ctx->num > inl) { memcpy(&(ctx->enc_data[ctx->num]), in, inl); ctx->num += inl; return; } if (ctx->num != 0) { i = ctx->length - ctx->num; memcpy(&(ctx->enc_data[ctx->num]), in, i); in += i; inl -= i; j = EVP_EncodeBlock(out, ctx->enc_data, ctx->length); ctx->num = 0; out += j; *(out++) = '\n'; *out = '\0'; total = j + 1; } while (inl >= ctx->length) { j = EVP_EncodeBlock(out, in, ctx->length); in += ctx->length; inl -= ctx->length; out += j; *(out++) = '\n'; *out = '\0'; total += j + 1; } if (inl != 0) memcpy(&(ctx->enc_data[0]), in, inl); ctx->num = inl; *outl = total; }
CWE-189
1
static int ipddp_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd) { struct ipddp_route __user *rt = ifr->ifr_data; struct ipddp_route rcp, rcp2, *rp; if(!capable(CAP_NET_ADMIN)) return -EPERM; if(copy_from_user(&rcp, rt, sizeof(rcp))) return -EFAULT; switch(cmd) { case SIOCADDIPDDPRT: return ipddp_create(&rcp); case SIOCFINDIPDDPRT: spin_lock_bh(&ipddp_route_lock); rp = __ipddp_find_route(&rcp); if (rp) { memset(&rcp2, 0, sizeof(rcp2)); rcp2.ip = rp->ip; rcp2.at = rp->at; rcp2.flags = rp->flags; } spin_unlock_bh(&ipddp_route_lock); if (rp) { if (copy_to_user(rt, &rcp2, sizeof(struct ipddp_route))) return -EFAULT; return 0; } else return -ENOENT; case SIOCDELIPDDPRT: return ipddp_delete(&rcp); default: return -EINVAL; } }
CWE-200
1
static void tg3_read_vpd(struct tg3 *tp) { u8 *vpd_data; unsigned int block_end, rosize, len; u32 vpdlen; int j, i = 0; vpd_data = (u8 *)tg3_vpd_readblock(tp, &vpdlen); if (!vpd_data) goto out_no_vpd; i = pci_vpd_find_tag(vpd_data, 0, vpdlen, PCI_VPD_LRDT_RO_DATA); if (i < 0) goto out_not_found; rosize = pci_vpd_lrdt_size(&vpd_data[i]); block_end = i + PCI_VPD_LRDT_TAG_SIZE + rosize; i += PCI_VPD_LRDT_TAG_SIZE; if (block_end > vpdlen) goto out_not_found; j = pci_vpd_find_info_keyword(vpd_data, i, rosize, PCI_VPD_RO_KEYWORD_MFR_ID); if (j > 0) { len = pci_vpd_info_field_size(&vpd_data[j]); j += PCI_VPD_INFO_FLD_HDR_SIZE; if (j + len > block_end || len != 4 || memcmp(&vpd_data[j], "1028", 4)) goto partno; j = pci_vpd_find_info_keyword(vpd_data, i, rosize, PCI_VPD_RO_KEYWORD_VENDOR0); if (j < 0) goto partno; len = pci_vpd_info_field_size(&vpd_data[j]); j += PCI_VPD_INFO_FLD_HDR_SIZE; if (j + len > block_end) goto partno; if (len >= sizeof(tp->fw_ver)) len = sizeof(tp->fw_ver) - 1; memset(tp->fw_ver, 0, sizeof(tp->fw_ver)); snprintf(tp->fw_ver, sizeof(tp->fw_ver), "%.*s bc ", len, &vpd_data[j]); } partno: i = pci_vpd_find_info_keyword(vpd_data, i, rosize, PCI_VPD_RO_KEYWORD_PARTNO); if (i < 0) goto out_not_found; len = pci_vpd_info_field_size(&vpd_data[i]); i += PCI_VPD_INFO_FLD_HDR_SIZE; if (len > TG3_BPN_SIZE || (len + i) > vpdlen) goto out_not_found; memcpy(tp->board_part_number, &vpd_data[i], len); out_not_found: kfree(vpd_data); if (tp->board_part_number[0]) return; out_no_vpd: if (tg3_asic_rev(tp) == ASIC_REV_5717) { if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_5717 || tp->pdev->device == TG3PCI_DEVICE_TIGON3_5717_C) strcpy(tp->board_part_number, "BCM5717"); else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_5718) strcpy(tp->board_part_number, "BCM5718"); else goto nomatch; } else if (tg3_asic_rev(tp) == ASIC_REV_57780) { if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57780) strcpy(tp->board_part_number, "BCM57780"); else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57760) strcpy(tp->board_part_number, "BCM57760"); else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57790) strcpy(tp->board_part_number, "BCM57790"); else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57788) strcpy(tp->board_part_number, "BCM57788"); else goto nomatch; } else if (tg3_asic_rev(tp) == ASIC_REV_57765) { if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57761) strcpy(tp->board_part_number, "BCM57761"); else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57765) strcpy(tp->board_part_number, "BCM57765"); else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57781) strcpy(tp->board_part_number, "BCM57781"); else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57785) strcpy(tp->board_part_number, "BCM57785"); else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57791) strcpy(tp->board_part_number, "BCM57791"); else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57795) strcpy(tp->board_part_number, "BCM57795"); else goto nomatch; } else if (tg3_asic_rev(tp) == ASIC_REV_57766) { if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57762) strcpy(tp->board_part_number, "BCM57762"); else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57766) strcpy(tp->board_part_number, "BCM57766"); else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57782) strcpy(tp->board_part_number, "BCM57782"); else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57786) strcpy(tp->board_part_number, "BCM57786"); else goto nomatch; } else if (tg3_asic_rev(tp) == ASIC_REV_5906) { strcpy(tp->board_part_number, "BCM95906"); } else { nomatch: strcpy(tp->board_part_number, "none"); } }
CWE-119
1
static int msg_parse_fetch(struct ImapHeader *h, char *s) { char tmp[SHORT_STRING]; char *ptmp = NULL; if (!s) return -1; while (*s) { SKIPWS(s); if (mutt_str_strncasecmp("FLAGS", s, 5) == 0) { s = msg_parse_flags(h, s); if (!s) return -1; } else if (mutt_str_strncasecmp("UID", s, 3) == 0) { s += 3; SKIPWS(s); if (mutt_str_atoui(s, &h->data->uid) < 0) return -1; s = imap_next_word(s); } else if (mutt_str_strncasecmp("INTERNALDATE", s, 12) == 0) { s += 12; SKIPWS(s); if (*s != '\"') { mutt_debug(1, "bogus INTERNALDATE entry: %s\n", s); return -1; } s++; ptmp = tmp; while (*s && (*s != '\"') && (ptmp != (tmp + sizeof(tmp) - 1))) *ptmp++ = *s++; if (*s != '\"') return -1; s++; /* skip past the trailing " */ *ptmp = '\0'; h->received = mutt_date_parse_imap(tmp); } else if (mutt_str_strncasecmp("RFC822.SIZE", s, 11) == 0) { s += 11; SKIPWS(s); ptmp = tmp; while (isdigit((unsigned char) *s) && (ptmp != (tmp + sizeof(tmp) - 1))) *ptmp++ = *s++; *ptmp = '\0'; if (mutt_str_atol(tmp, &h->content_length) < 0) return -1; } else if ((mutt_str_strncasecmp("BODY", s, 4) == 0) || (mutt_str_strncasecmp("RFC822.HEADER", s, 13) == 0)) { /* handle above, in msg_fetch_header */ return -2; } else if (*s == ')') s++; /* end of request */ else if (*s) { /* got something i don't understand */ imap_error("msg_parse_fetch", s); return -1; } } return 0; }
CWE-119
1
int arch_dup_task_struct(struct task_struct *dst, struct task_struct *src) { flush_fp_to_thread(src); flush_altivec_to_thread(src); flush_vsx_to_thread(src); flush_spe_to_thread(src); /* * Flush TM state out so we can copy it. __switch_to_tm() does this * flush but it removes the checkpointed state from the current CPU and * transitions the CPU out of TM mode. Hence we need to call * tm_recheckpoint_new_task() (on the same task) to restore the * checkpointed state back and the TM mode. */ __switch_to_tm(src); tm_recheckpoint_new_task(src); *dst = *src; clear_task_ebb(dst); return 0; }
CWE-20
1
static long vhost_net_set_backend(struct vhost_net *n, unsigned index, int fd) { struct socket *sock, *oldsock; struct vhost_virtqueue *vq; struct vhost_net_virtqueue *nvq; struct vhost_net_ubuf_ref *ubufs, *oldubufs = NULL; int r; mutex_lock(&n->dev.mutex); r = vhost_dev_check_owner(&n->dev); if (r) goto err; if (index >= VHOST_NET_VQ_MAX) { r = -ENOBUFS; goto err; } vq = &n->vqs[index].vq; nvq = &n->vqs[index]; mutex_lock(&vq->mutex); /* Verify that ring has been setup correctly. */ if (!vhost_vq_access_ok(vq)) { r = -EFAULT; goto err_vq; } sock = get_socket(fd); if (IS_ERR(sock)) { r = PTR_ERR(sock); goto err_vq; } /* start polling new socket */ oldsock = rcu_dereference_protected(vq->private_data, lockdep_is_held(&vq->mutex)); if (sock != oldsock) { ubufs = vhost_net_ubuf_alloc(vq, sock && vhost_sock_zcopy(sock)); if (IS_ERR(ubufs)) { r = PTR_ERR(ubufs); goto err_ubufs; } vhost_net_disable_vq(n, vq); rcu_assign_pointer(vq->private_data, sock); r = vhost_init_used(vq); if (r) goto err_used; r = vhost_net_enable_vq(n, vq); if (r) goto err_used; oldubufs = nvq->ubufs; nvq->ubufs = ubufs; n->tx_packets = 0; n->tx_zcopy_err = 0; n->tx_flush = false; } mutex_unlock(&vq->mutex); if (oldubufs) { vhost_net_ubuf_put_wait_and_free(oldubufs); mutex_lock(&vq->mutex); vhost_zerocopy_signal_used(n, vq); mutex_unlock(&vq->mutex); } if (oldsock) { vhost_net_flush_vq(n, index); fput(oldsock->file); } mutex_unlock(&n->dev.mutex); return 0; err_used: rcu_assign_pointer(vq->private_data, oldsock); vhost_net_enable_vq(n, vq); if (ubufs) vhost_net_ubuf_put_wait_and_free(ubufs); err_ubufs: fput(sock->file); err_vq: mutex_unlock(&vq->mutex); err: mutex_unlock(&n->dev.mutex); return r; }
CWE-399
1
static int host_start(struct ci13xxx *ci) { struct usb_hcd *hcd; struct ehci_hcd *ehci; int ret; if (usb_disabled()) return -ENODEV; hcd = usb_create_hcd(&ci_ehci_hc_driver, ci->dev, dev_name(ci->dev)); if (!hcd) return -ENOMEM; dev_set_drvdata(ci->dev, ci); hcd->rsrc_start = ci->hw_bank.phys; hcd->rsrc_len = ci->hw_bank.size; hcd->regs = ci->hw_bank.abs; hcd->has_tt = 1; hcd->power_budget = ci->platdata->power_budget; hcd->phy = ci->transceiver; ehci = hcd_to_ehci(hcd); ehci->caps = ci->hw_bank.cap; ehci->has_hostpc = ci->hw_bank.lpm; ret = usb_add_hcd(hcd, 0, 0); if (ret) usb_put_hcd(hcd); else ci->hcd = hcd; if (ci->platdata->flags & CI13XXX_DISABLE_STREAMING) hw_write(ci, OP_USBMODE, USBMODE_CI_SDIS, USBMODE_CI_SDIS); return ret; }
CWE-119
1
void MediaStreamDispatcherHost::DoOpenDevice( int32_t page_request_id, const std::string& device_id, blink::MediaStreamType type, OpenDeviceCallback callback, MediaDeviceSaltAndOrigin salt_and_origin) { DCHECK_CURRENTLY_ON(BrowserThread::IO); if (!MediaStreamManager::IsOriginAllowed(render_process_id_, salt_and_origin.origin)) { std::move(callback).Run(false /* success */, std::string(), blink::MediaStreamDevice()); return; } media_stream_manager_->OpenDevice( render_process_id_, render_frame_id_, requester_id_, page_request_id, device_id, type, std::move(salt_and_origin), std::move(callback), base::BindRepeating(&MediaStreamDispatcherHost::OnDeviceStopped, weak_factory_.GetWeakPtr())); }
CWE-119
1
static void Ins_JMPR( INS_ARG ) { if ( BOUNDS(CUR.IP + args[0], CUR.codeSize ) ) { CUR.error = TT_Err_Invalid_Reference; return; } CUR.IP += (Int)(args[0]); CUR.step_ins = FALSE; * allow for simple cases here by just checking the preceding byte. * Fonts with this problem are not uncommon. */ CUR.IP -= 1; }
CWE-125
1
static Sdb *store_versioninfo_gnu_verdef(ELFOBJ *bin, Elf_(Shdr) *shdr, int sz) { const char *section_name = ""; const char *link_section_name = ""; char *end = NULL; Elf_(Shdr) *link_shdr = NULL; ut8 dfs[sizeof (Elf_(Verdef))] = {0}; Sdb *sdb; int cnt, i; if (shdr->sh_link > bin->ehdr.e_shnum) { return false; } link_shdr = &bin->shdr[shdr->sh_link]; if ((int)shdr->sh_size < 1) { return false; } Elf_(Verdef) *defs = calloc (shdr->sh_size, sizeof (char)); if (!defs) { return false; } if (bin->shstrtab && shdr->sh_name < bin->shstrtab_size) { section_name = &bin->shstrtab[shdr->sh_name]; } if (link_shdr && bin->shstrtab && link_shdr->sh_name < bin->shstrtab_size) { link_section_name = &bin->shstrtab[link_shdr->sh_name]; } if (!defs) { bprintf ("Warning: Cannot allocate memory (Check Elf_(Verdef))\n"); return NULL; } sdb = sdb_new0 (); end = (char *)defs + shdr->sh_size; sdb_set (sdb, "section_name", section_name, 0); sdb_num_set (sdb, "entries", shdr->sh_info, 0); sdb_num_set (sdb, "addr", shdr->sh_addr, 0); sdb_num_set (sdb, "offset", shdr->sh_offset, 0); sdb_num_set (sdb, "link", shdr->sh_link, 0); sdb_set (sdb, "link_section_name", link_section_name, 0); for (cnt = 0, i = 0; i >= 0 && cnt < shdr->sh_info && ((char *)defs + i < end); ++cnt) { Sdb *sdb_verdef = sdb_new0 (); char *vstart = ((char*)defs) + i; size_t vstart_off = i; char key[32] = {0}; Elf_(Verdef) *verdef = (Elf_(Verdef)*)vstart; Elf_(Verdaux) aux = {0}; int j = 0; int isum = 0; r_buf_read_at (bin->b, shdr->sh_offset + i, dfs, sizeof (Elf_(Verdef))); verdef->vd_version = READ16 (dfs, j) verdef->vd_flags = READ16 (dfs, j) verdef->vd_ndx = READ16 (dfs, j) verdef->vd_cnt = READ16 (dfs, j) verdef->vd_hash = READ32 (dfs, j) verdef->vd_aux = READ32 (dfs, j) verdef->vd_next = READ32 (dfs, j) int vdaux = verdef->vd_aux; if (vdaux < 1 || shdr->sh_size - vstart_off < vdaux) { sdb_free (sdb_verdef); goto out_error; } vstart += vdaux; vstart_off += vdaux; if (vstart > end || vstart + sizeof (Elf_(Verdaux)) > end) { sdb_free (sdb_verdef); goto out_error; } j = 0; aux.vda_name = READ32 (vstart, j) aux.vda_next = READ32 (vstart, j) isum = i + verdef->vd_aux; if (aux.vda_name > bin->dynstr_size) { sdb_free (sdb_verdef); goto out_error; } sdb_num_set (sdb_verdef, "idx", i, 0); sdb_num_set (sdb_verdef, "vd_version", verdef->vd_version, 0); sdb_num_set (sdb_verdef, "vd_ndx", verdef->vd_ndx, 0); sdb_num_set (sdb_verdef, "vd_cnt", verdef->vd_cnt, 0); sdb_set (sdb_verdef, "vda_name", &bin->dynstr[aux.vda_name], 0); sdb_set (sdb_verdef, "flags", get_ver_flags (verdef->vd_flags), 0); for (j = 1; j < verdef->vd_cnt; ++j) { int k; Sdb *sdb_parent = sdb_new0 (); isum += aux.vda_next; vstart += aux.vda_next; vstart_off += aux.vda_next; if (vstart > end || vstart + sizeof (Elf_(Verdaux)) > end) { sdb_free (sdb_verdef); sdb_free (sdb_parent); goto out_error; } k = 0; aux.vda_name = READ32 (vstart, k) aux.vda_next = READ32 (vstart, k) if (aux.vda_name > bin->dynstr_size) { sdb_free (sdb_verdef); sdb_free (sdb_parent); goto out_error; } sdb_num_set (sdb_parent, "idx", isum, 0); sdb_num_set (sdb_parent, "parent", j, 0); sdb_set (sdb_parent, "vda_name", &bin->dynstr[aux.vda_name], 0); snprintf (key, sizeof (key), "parent%d", j - 1); sdb_ns_set (sdb_verdef, key, sdb_parent); } snprintf (key, sizeof (key), "verdef%d", cnt); sdb_ns_set (sdb, key, sdb_verdef); if (!verdef->vd_next) { sdb_free (sdb_verdef); goto out_error; } if ((st32)verdef->vd_next < 1) { eprintf ("Warning: Invalid vd_next in the ELF version\n"); break; } i += verdef->vd_next; } free (defs); return sdb; out_error: free (defs); sdb_free (sdb); return NULL; }
CWE-476
1
static void chase_port(struct edgeport_port *port, unsigned long timeout, int flush) { int baud_rate; struct tty_struct *tty = tty_port_tty_get(&port->port->port); struct usb_serial *serial = port->port->serial; wait_queue_t wait; unsigned long flags; if (!tty) return; if (!timeout) timeout = (HZ * EDGE_CLOSING_WAIT)/100; /* wait for data to drain from the buffer */ spin_lock_irqsave(&port->ep_lock, flags); init_waitqueue_entry(&wait, current); add_wait_queue(&tty->write_wait, &wait); for (;;) { set_current_state(TASK_INTERRUPTIBLE); if (kfifo_len(&port->write_fifo) == 0 || timeout == 0 || signal_pending(current) || serial->disconnected) /* disconnect */ break; spin_unlock_irqrestore(&port->ep_lock, flags); timeout = schedule_timeout(timeout); spin_lock_irqsave(&port->ep_lock, flags); } set_current_state(TASK_RUNNING); remove_wait_queue(&tty->write_wait, &wait); if (flush) kfifo_reset_out(&port->write_fifo); spin_unlock_irqrestore(&port->ep_lock, flags); tty_kref_put(tty); /* wait for data to drain from the device */ timeout += jiffies; while ((long)(jiffies - timeout) < 0 && !signal_pending(current) && !serial->disconnected) { /* not disconnected */ if (!tx_active(port)) break; msleep(10); } /* disconnected */ if (serial->disconnected) return; /* wait one more character time, based on baud rate */ /* (tx_active doesn't seem to wait for the last byte) */ baud_rate = port->baud_rate; if (baud_rate == 0) baud_rate = 50; msleep(max(1, DIV_ROUND_UP(10000, baud_rate))); }
CWE-264
1
local unsigned long crc32_big(crc, buf, len) unsigned long crc; const unsigned char FAR *buf; unsigned len; { register z_crc_t c; register const z_crc_t FAR *buf4; c = ZSWAP32((z_crc_t)crc); c = ~c; while (len && ((ptrdiff_t)buf & 3)) { c = crc_table[4][(c >> 24) ^ *buf++] ^ (c << 8); len--; } buf4 = (const z_crc_t FAR *)(const void FAR *)buf; while (len >= 32) { DOBIG32; len -= 32; } while (len >= 4) { DOBIG4; len -= 4; } buf = (const unsigned char FAR *)buf4; if (len) do { c = crc_table[4][(c >> 24) ^ *buf++] ^ (c << 8); } while (--len); c = ~c; return (unsigned long)(ZSWAP32(c)); }
CWE-189
1
inline bool loadModule(const char* filename, IR::Module& outModule) { // Read the specified file into an array. std::vector<U8> fileBytes; if(!loadFile(filename, fileBytes)) { return false; } // If the file starts with the WASM binary magic number, load it as a binary irModule. if(fileBytes.size() >= 4 && *(U32*)fileBytes.data() == 0x6d736100) { return loadBinaryModule(fileBytes.data(), fileBytes.size(), outModule); } else { // Make sure the WAST file is null terminated. fileBytes.push_back(0); // Load it as a text irModule. std::vector<WAST::Error> parseErrors; if(!WAST::parseModule( (const char*)fileBytes.data(), fileBytes.size(), outModule, parseErrors)) { Log::printf(Log::error, "Error parsing WebAssembly text file:\n"); reportParseErrors(filename, parseErrors); return false; } return true; } }
CWE-125
1
static bool handle_client_startup(PgSocket *client, PktHdr *pkt) { const char *passwd; const uint8_t *key; bool ok; SBuf *sbuf = &client->sbuf; /* don't tolerate partial packets */ if (incomplete_pkt(pkt)) { disconnect_client(client, true, "client sent partial pkt in startup phase"); return false; } if (client->wait_for_welcome) { if (finish_client_login(client)) { /* the packet was already parsed */ sbuf_prepare_skip(sbuf, pkt->len); return true; } else return false; } switch (pkt->type) { case PKT_SSLREQ: slog_noise(client, "C: req SSL"); slog_noise(client, "P: nak"); /* reject SSL attempt */ if (!sbuf_answer(&client->sbuf, "N", 1)) { disconnect_client(client, false, "failed to nak SSL"); return false; } break; case PKT_STARTUP_V2: disconnect_client(client, true, "Old V2 protocol not supported"); return false; case PKT_STARTUP: if (client->pool) { disconnect_client(client, true, "client re-sent startup pkt"); return false; } if (!decide_startup_pool(client, pkt)) return false; if (client->pool->db->admin) { if (!admin_pre_login(client)) return false; } if (cf_auth_type <= AUTH_TRUST || client->own_user) { if (!finish_client_login(client)) return false; } else { if (!send_client_authreq(client)) { disconnect_client(client, false, "failed to send auth req"); return false; } } break; case 'p': /* PasswordMessage */ /* too early */ if (!client->auth_user) { disconnect_client(client, true, "client password pkt before startup packet"); return false; } /* haven't requested it */ if (cf_auth_type <= AUTH_TRUST) { disconnect_client(client, true, "unrequested passwd pkt"); return false; } ok = mbuf_get_string(&pkt->data, &passwd); if (ok && check_client_passwd(client, passwd)) { if (!finish_client_login(client)) return false; } else { disconnect_client(client, true, "Auth failed"); return false; } break; case PKT_CANCEL: if (mbuf_avail_for_read(&pkt->data) == BACKENDKEY_LEN && mbuf_get_bytes(&pkt->data, BACKENDKEY_LEN, &key)) { memcpy(client->cancel_key, key, BACKENDKEY_LEN); accept_cancel_request(client); } else disconnect_client(client, false, "bad cancel request"); return false; default: disconnect_client(client, false, "bad packet"); return false; } sbuf_prepare_skip(sbuf, pkt->len); client->request_time = get_cached_time(); return true; }
CWE-476
1
static void mptsas_fetch_request(MPTSASState *s) { PCIDevice *pci = (PCIDevice *) s; char req[MPTSAS_MAX_REQUEST_SIZE]; MPIRequestHeader *hdr = (MPIRequestHeader *)req; hwaddr addr; int size; /* Read the message header from the guest first. */ addr = s->host_mfa_high_addr | MPTSAS_FIFO_GET(s, request_post); pci_dma_read(pci, addr, req, sizeof(hdr)); }
CWE-20
1
recv_and_process_client_pkt(void /*int fd*/) { ssize_t size; len_and_sockaddr *to; struct sockaddr *from; msg_t msg; uint8_t query_status; l_fixedpt_t query_xmttime; to = get_sock_lsa(G_listen_fd); from = xzalloc(to->len); size = recv_from_to(G_listen_fd, &msg, sizeof(msg), MSG_DONTWAIT, from, &to->u.sa, to->len); if (size != NTP_MSGSIZE_NOAUTH && size != NTP_MSGSIZE) { char *addr; if (size < 0) { if (errno == EAGAIN) goto bail; bb_perror_msg_and_die("recv"); } addr = xmalloc_sockaddr2dotted_noport(from); bb_error_msg("malformed packet received from %s: size %u", addr, (int)size); free(addr); goto bail; } /* Respond only to client and symmetric active packets */ if ((msg.m_status & MODE_MASK) != MODE_CLIENT && (msg.m_status & MODE_MASK) != MODE_SYM_ACT ) { goto bail; } query_status = msg.m_status; query_xmttime = msg.m_xmttime; msg.m_ppoll = G.poll_exp; msg.m_precision_exp = G_precision_exp; /* this time was obtained between poll() and recv() */ msg.m_rectime = d_to_lfp(G.cur_time); msg.m_xmttime = d_to_lfp(gettime1900d()); /* this instant */ if (G.peer_cnt == 0) { /* we have no peers: "stratum 1 server" mode. reftime = our own time */ G.reftime = G.cur_time; } msg.m_reftime = d_to_lfp(G.reftime); msg.m_orgtime = query_xmttime; msg.m_rootdelay = d_to_sfp(G.rootdelay); msg.m_rootdisp = d_to_sfp(G.rootdisp); msg.m_refid = G.refid; // (version > (3 << VERSION_SHIFT)) ? G.refid : G.refid3; /* We reply from the local address packet was sent to, * this makes to/from look swapped here: */ do_sendto(G_listen_fd, /*from:*/ &to->u.sa, /*to:*/ from, /*addrlen:*/ to->len, &msg, size); bail: free(to); free(from); }
CWE-399
1
char *ldb_dn_escape_value(TALLOC_CTX *mem_ctx, struct ldb_val value) { char *dst; size_t len; if (!value.length) return NULL; /* allocate destination string, it will be at most 3 times the source */ dst = talloc_array(mem_ctx, char, value.length * 3 + 1); if ( ! dst) { talloc_free(dst); return NULL; } len = ldb_dn_escape_internal(dst, (const char *)value.data, value.length); dst = talloc_realloc(mem_ctx, dst, char, len + 1); if ( ! dst) { talloc_free(dst); return NULL; } dst[len] = '\0'; return dst; }
CWE-200
1
BlockedPluginInfoBarDelegate::BlockedPluginInfoBarDelegate( TabContents* tab_contents, const string16& utf16_name) : PluginInfoBarDelegate(tab_contents, utf16_name) { UserMetrics::RecordAction(UserMetricsAction("BlockedPluginInfobar.Shown")); std::string name = UTF16ToUTF8(utf16_name); if (name == webkit::npapi::PluginGroup::kJavaGroupName) UserMetrics::RecordAction( UserMetricsAction("BlockedPluginInfobar.Shown.Java")); else if (name == webkit::npapi::PluginGroup::kQuickTimeGroupName) UserMetrics::RecordAction( UserMetricsAction("BlockedPluginInfobar.Shown.QuickTime")); else if (name == webkit::npapi::PluginGroup::kShockwaveGroupName) UserMetrics::RecordAction( UserMetricsAction("BlockedPluginInfobar.Shown.Shockwave")); else if (name == webkit::npapi::PluginGroup::kRealPlayerGroupName) UserMetrics::RecordAction( UserMetricsAction("BlockedPluginInfobar.Shown.RealPlayer")); else if (name == webkit::npapi::PluginGroup::kWindowsMediaPlayerGroupName) UserMetrics::RecordAction( UserMetricsAction("BlockedPluginInfobar.Shown.WindowsMediaPlayer")); }
CWE-264
1
static void oz_usb_handle_ep_data(struct oz_usb_ctx *usb_ctx, struct oz_usb_hdr *usb_hdr, int len) { struct oz_data *data_hdr = (struct oz_data *)usb_hdr; switch (data_hdr->format) { case OZ_DATA_F_MULTIPLE_FIXED: { struct oz_multiple_fixed *body = (struct oz_multiple_fixed *)data_hdr; u8 *data = body->data; unsigned int n; if (!body->unit_size || len < sizeof(struct oz_multiple_fixed) - 1) break; n = (len - (sizeof(struct oz_multiple_fixed) - 1)) / body->unit_size; while (n--) { oz_hcd_data_ind(usb_ctx->hport, body->endpoint, data, body->unit_size); data += body->unit_size; } } break; case OZ_DATA_F_ISOC_FIXED: { struct oz_isoc_fixed *body = (struct oz_isoc_fixed *)data_hdr; int data_len = len-sizeof(struct oz_isoc_fixed)+1; int unit_size = body->unit_size; u8 *data = body->data; int count; int i; if (!unit_size) break; count = data_len/unit_size; for (i = 0; i < count; i++) { oz_hcd_data_ind(usb_ctx->hport, body->endpoint, data, unit_size); data += unit_size; } } break; } }
CWE-119
1
void InputConnectionImpl::CommitText(const base::string16& text, int new_cursor_pos) { StartStateUpdateTimer(); std::string error; if (!ime_engine_->ClearComposition(input_context_id_, &error)) LOG(ERROR) << "ClearComposition failed: error=\"" << error << "\""; if (IsControlChar(text)) { SendControlKeyEvent(text); return; } if (!ime_engine_->CommitText(input_context_id_, base::UTF16ToUTF8(text).c_str(), &error)) LOG(ERROR) << "CommitText failed: error=\"" << error << "\""; composing_text_.clear(); }
CWE-119
1
bool TabsCaptureVisibleTabFunction::RunImpl() { PrefService* service = profile()->GetPrefs(); if (service->GetBoolean(prefs::kDisableScreenshots)) { error_ = keys::kScreenshotsDisabled; return false; } WebContents* web_contents = NULL; if (!GetTabToCapture(&web_contents)) return false; image_format_ = FORMAT_JPEG; // Default format is JPEG. image_quality_ = kDefaultQuality; // Default quality setting. if (HasOptionalArgument(1)) { DictionaryValue* options = NULL; EXTENSION_FUNCTION_VALIDATE(args_->GetDictionary(1, &options)); if (options->HasKey(keys::kFormatKey)) { std::string format; EXTENSION_FUNCTION_VALIDATE( options->GetString(keys::kFormatKey, &format)); if (format == keys::kFormatValueJpeg) { image_format_ = FORMAT_JPEG; } else if (format == keys::kFormatValuePng) { image_format_ = FORMAT_PNG; } else { EXTENSION_FUNCTION_VALIDATE(0); } } if (options->HasKey(keys::kQualityKey)) { EXTENSION_FUNCTION_VALIDATE( options->GetInteger(keys::kQualityKey, &image_quality_)); } } // Use the last committed URL rather than the active URL for permissions // checking, since the visible page won't be updated until it has been // committed. A canonical example of this is interstitials, which show the // URL of the new/loading page (active) but would capture the content of the // old page (last committed). // // TODO(creis): Use WebContents::GetLastCommittedURL instead. // http://crbug.com/237908. NavigationEntry* last_committed_entry = web_contents->GetController().GetLastCommittedEntry(); GURL last_committed_url = last_committed_entry ? last_committed_entry->GetURL() : GURL(); if (!GetExtension()->CanCaptureVisiblePage(last_committed_url, SessionID::IdForTab(web_contents), &error_)) { return false; } RenderViewHost* render_view_host = web_contents->GetRenderViewHost(); content::RenderWidgetHostView* view = render_view_host->GetView(); if (!view) { error_ = keys::kInternalVisibleTabCaptureError; return false; } render_view_host->CopyFromBackingStore( gfx::Rect(), view->GetViewBounds().size(), base::Bind(&TabsCaptureVisibleTabFunction::CopyFromBackingStoreComplete, this)); return true; }
CWE-264
1
PassRefPtr<Attr> Element::setAttributeNode(Attr* attrNode, ExceptionCode& ec) { if (!attrNode) { ec = TYPE_MISMATCH_ERR; return 0; } RefPtr<Attr> oldAttrNode = attrIfExists(attrNode->qualifiedName()); if (oldAttrNode.get() == attrNode) return attrNode; // This Attr is already attached to the element. if (attrNode->ownerElement()) { ec = INUSE_ATTRIBUTE_ERR; return 0; } synchronizeAllAttributes(); UniqueElementData* elementData = ensureUniqueElementData(); size_t index = elementData->getAttributeItemIndex(attrNode->qualifiedName()); if (index != notFound) { if (oldAttrNode) detachAttrNodeFromElementWithValue(oldAttrNode.get(), elementData->attributeItem(index)->value()); else oldAttrNode = Attr::create(document(), attrNode->qualifiedName(), elementData->attributeItem(index)->value()); } setAttributeInternal(index, attrNode->qualifiedName(), attrNode->value(), NotInSynchronizationOfLazyAttribute); attrNode->attachToElement(this); treeScope()->adoptIfNeeded(attrNode); ensureAttrNodeListForElement(this)->append(attrNode); return oldAttrNode.release(); }
CWE-399
1
ecryptfs_decode_from_filename(unsigned char *dst, size_t *dst_size, const unsigned char *src, size_t src_size) { u8 current_bit_offset = 0; size_t src_byte_offset = 0; size_t dst_byte_offset = 0; if (dst == NULL) { (*dst_size) = ecryptfs_max_decoded_size(src_size); goto out; } while (src_byte_offset < src_size) { unsigned char src_byte = filename_rev_map[(int)src[src_byte_offset]]; switch (current_bit_offset) { case 0: dst[dst_byte_offset] = (src_byte << 2); current_bit_offset = 6; break; case 6: dst[dst_byte_offset++] |= (src_byte >> 4); dst[dst_byte_offset] = ((src_byte & 0xF) << 4); current_bit_offset = 4; break; case 4: dst[dst_byte_offset++] |= (src_byte >> 2); dst[dst_byte_offset] = (src_byte << 6); current_bit_offset = 2; break; case 2: dst[dst_byte_offset++] |= (src_byte); current_bit_offset = 0; break; } src_byte_offset++; } (*dst_size) = dst_byte_offset; out: return; }
CWE-189
1
char *suhosin_encrypt_single_cookie(char *name, int name_len, char *value, int value_len, char *key TSRMLS_DC) { char *buf, *buf2, *d, *d_url; int l; buf = estrndup(name, name_len); name_len = php_url_decode(buf, name_len); normalize_varname(buf); name_len = strlen(buf); if (SUHOSIN_G(cookie_plainlist)) { if (zend_hash_exists(SUHOSIN_G(cookie_plainlist), buf, name_len+1)) { encrypt_return_plain: efree(buf); return estrndup(value, value_len); } } else if (SUHOSIN_G(cookie_cryptlist)) { if (!zend_hash_exists(SUHOSIN_G(cookie_cryptlist), buf, name_len+1)) { goto encrypt_return_plain; } } buf2 = estrndup(value, value_len); value_len = php_url_decode(buf2, value_len); d = suhosin_encrypt_string(buf2, value_len, buf, name_len, key TSRMLS_CC); d_url = php_url_encode(d, strlen(d), &l); efree(d); efree(buf); efree(buf2); return d_url; }
CWE-119
1
End of preview. Expand in Data Studio
README.md exists but content is empty.
Downloads last month
18