code
stringlengths
12
2.05k
label_name
stringlengths
6
8
label
int64
0
95
selaComputeCompositeParameters(const char *fileout) { char *str, *nameh1, *nameh2, *namev1, *namev2; char buf[L_BUF_SIZE]; l_int32 size, size1, size2, len; SARRAY *sa; SELA *selabasic, *selacomb; selabasic = selaAddBasic(NULL); selacomb = selaAddDwaCombs(NULL); sa = sarrayCreate(64); for (size = 2; size < 64; size++) { selectComposableSizes(size, &size1, &size2); nameh1 = selaGetBrickName(selabasic, size1, 1); namev1 = selaGetBrickName(selabasic, 1, size1); if (size2 > 1) { nameh2 = selaGetCombName(selacomb, size1 * size2, L_HORIZ); namev2 = selaGetCombName(selacomb, size1 * size2, L_VERT); } else { nameh2 = stringNew(""); namev2 = stringNew(""); } snprintf(buf, L_BUF_SIZE, " { %d, %d, %d, \"%s\", \"%s\", \"%s\", \"%s\" },", size, size1, size2, nameh1, nameh2, namev1, namev2); sarrayAddString(sa, buf, L_COPY); LEPT_FREE(nameh1); LEPT_FREE(nameh2); LEPT_FREE(namev1); LEPT_FREE(namev2); } str = sarrayToString(sa, 1); len = strlen(str); l_binaryWrite(fileout, "w", str, len + 1); LEPT_FREE(str); sarrayDestroy(&sa); selaDestroy(&selabasic); selaDestroy(&selacomb); return; }
CWE-787
24
TfLiteStatus Prepare(TfLiteContext* context, TfLiteNode* node) { TF_LITE_ENSURE_EQ(context, NumInputs(node), 3); TF_LITE_ENSURE_EQ(context, NumOutputs(node), 2); const TfLiteTensor* lookup = GetInput(context, node, 0); TF_LITE_ENSURE_EQ(context, NumDimensions(lookup), 1); TF_LITE_ENSURE_EQ(context, lookup->type, kTfLiteInt32); const TfLiteTensor* key = GetInput(context, node, 1); TF_LITE_ENSURE_EQ(context, NumDimensions(key), 1); TF_LITE_ENSURE_EQ(context, key->type, kTfLiteInt32); const TfLiteTensor* value = GetInput(context, node, 2); TF_LITE_ENSURE(context, NumDimensions(value) >= 1); TF_LITE_ENSURE_EQ(context, SizeOfDimension(key, 0), SizeOfDimension(value, 0)); if (value->type == kTfLiteString) { TF_LITE_ENSURE_EQ(context, NumDimensions(value), 1); } TfLiteTensor* hits = GetOutput(context, node, 1); TF_LITE_ENSURE_EQ(context, hits->type, kTfLiteUInt8); TfLiteIntArray* hitSize = TfLiteIntArrayCreate(1); hitSize->data[0] = SizeOfDimension(lookup, 0); TfLiteTensor* output = GetOutput(context, node, 0); TF_LITE_ENSURE_EQ(context, value->type, output->type); TfLiteStatus status = kTfLiteOk; if (output->type != kTfLiteString) { TfLiteIntArray* outputSize = TfLiteIntArrayCreate(NumDimensions(value)); outputSize->data[0] = SizeOfDimension(lookup, 0); for (int i = 1; i < NumDimensions(value); i++) { outputSize->data[i] = SizeOfDimension(value, i); } status = context->ResizeTensor(context, output, outputSize); } if (context->ResizeTensor(context, hits, hitSize) != kTfLiteOk) { status = kTfLiteError; } return status; }
CWE-787
24
Status OpLevelCostEstimator::PredictFusedBatchNorm( const OpContext& op_context, NodeCosts* node_costs) const { bool found_unknown_shapes = false; const auto& op_info = op_context.op_info; // x: op_info.inputs(0) // scale: op_info.inputs(1) // offset: op_info.inputs(2) // mean: op_info.inputs(3) --> only for inference // variance: op_info.inputs(4) --> only for inference ConvolutionDimensions dims = OpDimensionsFromInputs( op_info.inputs(0).shape(), op_info, &found_unknown_shapes); const bool is_training = IsTraining(op_info); int64_t ops = 0; const auto rsqrt_cost = Eigen::internal::functor_traits< Eigen::internal::scalar_rsqrt_op<float>>::Cost; if (is_training) { ops = dims.iz * (dims.batch * dims.ix * dims.iy * 4 + 6 + rsqrt_cost); } else { ops = dims.batch * dims.ix * dims.iy * dims.iz * 2; } node_costs->num_compute_ops = ops; const int64_t size_nhwc = CalculateTensorSize(op_info.inputs(0), &found_unknown_shapes); const int64_t size_c = CalculateTensorSize(op_info.inputs(1), &found_unknown_shapes); if (is_training) { node_costs->num_input_bytes_accessed = {size_nhwc, size_c, size_c}; node_costs->num_output_bytes_accessed = {size_nhwc, size_c, size_c, size_c, size_c}; // FusedBatchNorm in training mode internally re-reads the input tensor: // one for mean/variance, and the 2nd internal read forthe actual scaling. // Assume small intermediate data such as mean / variance (size_c) can be // cached on-chip. node_costs->internal_read_bytes = size_nhwc; } else { node_costs->num_input_bytes_accessed = {size_nhwc, size_c, size_c, size_c, size_c}; node_costs->num_output_bytes_accessed = {size_nhwc}; } node_costs->max_memory = node_costs->num_total_output_bytes(); if (found_unknown_shapes) { node_costs->inaccurate = true; node_costs->num_nodes_with_unknown_shapes = 1; } return Status::OK(); }
CWE-369
60
unsigned int bounded_iostream::write_no_buffer(const void *from, size_t bytes_to_write) { //return iostream::write(from,tpsize,dtsize); std::pair<unsigned int, Sirikata::JpegError> retval; if (byte_bound != 0 && byte_position + bytes_to_write > byte_bound) { size_t real_bytes_to_write = byte_bound - byte_position; byte_position += real_bytes_to_write; retval = parent->Write(reinterpret_cast<const unsigned char*>(from), real_bytes_to_write); if (retval.first < real_bytes_to_write) { err = retval.second; return retval.first; } return bytes_to_write; // pretend we wrote it all } size_t total = bytes_to_write; retval = parent->Write(reinterpret_cast<const unsigned char*>(from), total); unsigned int written = retval.first; byte_position += written; if (written < total ) { err = retval.second; return written; } return bytes_to_write; }
CWE-1187
90
int length() const { return m_str ? m_str->size() : 0; }
CWE-125
47
inline TfLiteTensor* GetMutableInput(const TfLiteContext* context, const TfLiteNode* node, int index) { if (context->tensors != nullptr) { return &context->tensors[node->inputs->data[index]]; } else { return context->GetTensor(context, node->inputs->data[index]); } }
CWE-787
24
TfLiteStatus Prepare(TfLiteContext* context, TfLiteNode* node) { TF_LITE_ENSURE_EQ(context, NumInputs(node), 2); TF_LITE_ENSURE_EQ(context, NumOutputs(node), 1); // Reinterprete the opaque data provided by user. OpData* data = reinterpret_cast<OpData*>(node->user_data); const TfLiteTensor* input1 = GetInput(context, node, kInputTensor1); const TfLiteTensor* input2 = GetInput(context, node, kInputTensor2); TfLiteTensor* output = GetOutput(context, node, kOutputTensor); TF_LITE_ENSURE_TYPES_EQ(context, input1->type, input2->type); const TfLiteType type = input1->type; if (type != kTfLiteInt32 && type != kTfLiteFloat32 && type != kTfLiteInt64) { context->ReportError(context, "Type '%s' is not supported by floor_mod.", TfLiteTypeGetName(type)); return kTfLiteError; } output->type = type; data->requires_broadcast = !HaveSameShapes(input1, input2); TfLiteIntArray* output_size = nullptr; if (data->requires_broadcast) { TF_LITE_ENSURE_OK(context, CalculateShapeForBroadcast( context, input1, input2, &output_size)); } else { output_size = TfLiteIntArrayCopy(input1->dims); } return context->ResizeTensor(context, output, output_size); }
CWE-787
24
static Jsi_RC jsi_BitfieldToValue(Jsi_Interp *interp, Jsi_OptionSpec* spec, Jsi_Value **outValue, Jsi_DString *outStr, void *record, Jsi_Wide flags) { Jsi_csgset *bsget = spec->init.OPT_BITS; Jsi_Interp *d = interp; int idx = spec->idx; uchar *data = (uchar*)record; int64_t inum; Jsi_OptionSpec* enumSpec = (typeof(enumSpec))spec->data; if (!d || !bsget || idx<0) return Jsi_LogBug("invalid bitfield"); Jsi_RC rc = (*bsget)(interp, data, &inum, spec, idx, 0); if (rc != JSI_OK) return JSI_ERROR; if (enumSpec) { struct numStruct { int64_t numVal; } nval = { inum }; Jsi_OptionSpec eSpec[] = { JSI_OPT(CUSTOM, struct numStruct, numVal, .help=spec->help, .flags=JSI_OPT_ENUM_SPEC, .custom=Jsi_Opt_SwitchEnum, .data=(void*)enumSpec, .info=0, .tname=spec->tname, .value=0, .bits=0, .boffset=8*sizeof(int64_t) ), //TODO: extra JSI_OPT_END(struct numStruct) }; if (JSI_OK != jsi_EnumToValue(interp, eSpec, outValue, outStr, (void*)&nval, flags)) return JSI_ERROR; } else if (outStr) { char obuf[100]; snprintf(obuf, sizeof(obuf), "%" PRId64, inum); Jsi_DSAppend(outStr, obuf, NULL); } else { Jsi_Number num = (Jsi_Number)inum; Jsi_ValueMakeNumber(interp, outValue, num); } return JSI_OK; }
CWE-120
44
const TfLiteTensor* GetOptionalInputTensor(const TfLiteContext* context, const TfLiteNode* node, int index) { const bool use_tensor = index < node->inputs->size && node->inputs->data[index] != kTfLiteOptionalTensor; if (use_tensor) { return GetMutableInput(context, node, index); } return nullptr; }
CWE-125
47
inline int StringData::size() const { return m_len; }
CWE-125
47
void FormatConverter<T>::Populate(const T* src_data, std::vector<int> indices, int level, int prev_idx, int* src_data_ptr, T* dest_data) { if (level == indices.size()) { int orig_rank = dense_shape_.size(); std::vector<int> orig_idx; orig_idx.resize(orig_rank); int i = 0; for (; i < orig_idx.size(); i++) { int orig_dim = traversal_order_[i]; orig_idx[orig_dim] = indices[i]; } for (; i < indices.size(); i++) { const int block_idx = traversal_order_[i] - orig_rank; const int orig_dim = block_map_[block_idx]; orig_idx[orig_dim] = orig_idx[orig_dim] * block_size_[block_idx] + indices[i]; } dest_data[GetFlattenedIndex(orig_idx, dense_shape_)] = src_data[*src_data_ptr]; *src_data_ptr = *src_data_ptr + 1; return; } const int metadata_idx = 2 * level; const int shape_of_level = dim_metadata_[metadata_idx][0]; if (format_[level] == kTfLiteDimDense) { for (int i = 0; i < shape_of_level; i++) { indices[level] = i; Populate(src_data, indices, level + 1, prev_idx * shape_of_level + i, src_data_ptr, dest_data); } } else { const auto& array_segments = dim_metadata_[metadata_idx]; const auto& array_indices = dim_metadata_[metadata_idx + 1]; for (int i = array_segments[prev_idx]; i < array_segments[prev_idx + 1]; i++) { indices[level] = array_indices[i]; Populate(src_data, indices, level + 1, i, src_data_ptr, dest_data); } } }
CWE-787
24
void carray2Hex(const unsigned char *d, uint64_t _len, char *_hexArray, uint64_t _hexArrayLen) { CHECK_STATE(d); CHECK_STATE(_hexArray); char hexval[16] = {'0', '1', '2', '3', '4', '5', '6', '7', '8', '9', 'a', 'b', 'c', 'd', 'e', 'f'}; CHECK_STATE(_hexArrayLen > 2 * _len); for (int j = 0; j < _len; j++) { _hexArray[j * 2] = hexval[((d[j] >> 4) & 0xF)]; _hexArray[j * 2 + 1] = hexval[(d[j]) & 0x0F]; } _hexArray[_len * 2] = 0; }
CWE-787
24
String StringUtil::Implode(const Variant& items, const String& delim, const bool checkIsContainer /* = true */) { if (checkIsContainer && !isContainer(items)) { throw_param_is_not_container(); } int size = getContainerSize(items); if (size == 0) return empty_string(); req::vector<String> sitems; sitems.reserve(size); int len = 0; int lenDelim = delim.size(); for (ArrayIter iter(items); iter; ++iter) { sitems.emplace_back(iter.second().toString()); len += sitems.back().size() + lenDelim; } len -= lenDelim; // always one delimiter less than count of items assert(sitems.size() == size); String s = String(len, ReserveString); char *buffer = s.mutableData(); const char *sdelim = delim.data(); char *p = buffer; String &init_str = sitems[0]; int init_len = init_str.size(); memcpy(p, init_str.data(), init_len); p += init_len; for (int i = 1; i < size; i++) { String &item = sitems[i]; memcpy(p, sdelim, lenDelim); p += lenDelim; int lenItem = item.size(); memcpy(p, item.data(), lenItem); p += lenItem; } assert(p - buffer == len); s.setSize(len); return s; }
CWE-190
19
const String& setSize(int len) { assertx(m_str); m_str->setSize(len); return *this; }
CWE-190
19
TfLiteStatus Eval(TfLiteContext* context, TfLiteNode* node) { auto* params = reinterpret_cast<TfLiteLocalResponseNormParams*>(node->builtin_data); const TfLiteTensor* input = GetInput(context, node, kInputTensor); TfLiteTensor* output = GetOutput(context, node, kOutputTensor); if (output->type == kTfLiteFloat32) { #define TF_LITE_LOCAL_RESPONSE_NORM(type) \ tflite::LocalResponseNormalizationParams op_params; \ op_params.range = params->radius; \ op_params.bias = params->bias; \ op_params.alpha = params->alpha; \ op_params.beta = params->beta; \ type::LocalResponseNormalization( \ op_params, GetTensorShape(input), GetTensorData<float>(input), \ GetTensorShape(output), GetTensorData<float>(output)) if (kernel_type == kReference) { TF_LITE_LOCAL_RESPONSE_NORM(reference_ops); } if (kernel_type == kGenericOptimized) { TF_LITE_LOCAL_RESPONSE_NORM(optimized_ops); } #undef TF_LITE_LOCAL_RESPONSE_NORM } else { context->ReportError(context, "Output type is %d, requires float.", output->type); return kTfLiteError; } return kTfLiteOk; }
CWE-125
47
int size() const { return m_str ? m_str->size() : 0; }
CWE-125
47
TfLiteStatus Prepare(TfLiteContext* context, TfLiteNode* node) { TF_LITE_ENSURE_EQ(context, NumInputs(node), 1); TF_LITE_ENSURE_EQ(context, NumOutputs(node), 1); const TfLiteTensor* input = GetInput(context, node, kInputTensor); TfLiteIntArray* input_dims = input->dims; int input_dims_size = input_dims->size; TF_LITE_ENSURE(context, input_dims_size >= 1); TfLiteTensor* output = GetOutput(context, node, kOutputTensor); // Resize the output tensor. TfLiteIntArray* output_shape = TfLiteIntArrayCreate(input_dims_size + 1); for (int i = 0; i < input_dims_size; i++) { output_shape->data[i] = input_dims->data[i]; } // Last dimension in the output is the same as the last dimension in the // input. output_shape->data[input_dims_size] = input_dims->data[input_dims_size - 1]; output->type = input->type; TF_LITE_ENSURE_OK(context, context->ResizeTensor(context, output, output_shape)); return kTfLiteOk; }
CWE-787
24
void RunOneAveragePoolTest(const PoolParams& params, const RuntimeShape& input_shape, const int8* input_data, const RuntimeShape& output_shape) { const int buffer_size = output_shape.FlatSize(); std::vector<int8> optimized_averagePool_output(buffer_size); std::vector<int8> reference_averagePool_output(buffer_size); reference_integer_ops::AveragePool(params, input_shape, input_data, output_shape, reference_averagePool_output.data()); optimized_integer_ops::AveragePool(params, input_shape, input_data, output_shape, optimized_averagePool_output.data()); for (int i = 0; i < buffer_size; i++) { EXPECT_TRUE(reference_averagePool_output[i] == optimized_averagePool_output[i]); } }
CWE-835
42
jas_matrix_t *jas_seq2d_input(FILE *in) { jas_matrix_t *matrix; int i; int j; long x; int numrows; int numcols; int xoff; int yoff; if (fscanf(in, "%d %d", &xoff, &yoff) != 2) return 0; if (fscanf(in, "%d %d", &numcols, &numrows) != 2) return 0; if (!(matrix = jas_seq2d_create(xoff, yoff, xoff + numcols, yoff + numrows))) return 0; if (jas_matrix_numrows(matrix) != numrows || jas_matrix_numcols(matrix) != numcols) { abort(); } /* Get matrix data. */ for (i = 0; i < jas_matrix_numrows(matrix); i++) { for (j = 0; j < jas_matrix_numcols(matrix); j++) { if (fscanf(in, "%ld", &x) != 1) { jas_matrix_destroy(matrix); return 0; } jas_matrix_set(matrix, i, j, JAS_CAST(jas_seqent_t, x)); } } return matrix; }
CWE-190
19
TfLiteStatus Prepare(TfLiteContext* context, TfLiteNode* node) { OpData* data = static_cast<OpData*>(node->user_data); TF_LITE_ENSURE_EQ(context, NumInputs(node), 1); TF_LITE_ENSURE_EQ(context, NumOutputs(node), 1); const TfLiteTensor* input = GetInput(context, node, 0); TfLiteTensor* output = GetOutput(context, node, 0); // TODO(b/128934713): Add support for fixed-point per-channel quantization. // Currently this only support affine per-layer quantization. TF_LITE_ENSURE_EQ(context, output->quantization.type, kTfLiteAffineQuantization); const auto* affine_quantization = static_cast<TfLiteAffineQuantization*>(output->quantization.params); TF_LITE_ENSURE(context, affine_quantization); TF_LITE_ENSURE(context, affine_quantization->scale); TF_LITE_ENSURE(context, affine_quantization->scale->size == 1); if (input->type == kTfLiteFloat32) { // Quantize use case. TF_LITE_ENSURE(context, output->type == kTfLiteUInt8 || output->type == kTfLiteInt8 || output->type == kTfLiteInt16); } else { // Requantize use case. if (input->type == kTfLiteInt16) { TF_LITE_ENSURE( context, output->type == kTfLiteInt8 || output->type == kTfLiteInt16); } else { TF_LITE_ENSURE(context, input->type == kTfLiteInt8 || input->type == kTfLiteUInt8); TF_LITE_ENSURE( context, output->type == kTfLiteUInt8 || output->type == kTfLiteInt8); } const double effective_output_scale = static_cast<double>(input->params.scale) / static_cast<double>(output->params.scale); QuantizeMultiplier(effective_output_scale, &data->output_multiplier, &data->output_shift); } return context->ResizeTensor(context, output, TfLiteIntArrayCopy(input->dims)); }
CWE-125
47
TEST(DefaultCertValidatorTest, TestMatchSubjectAltNameWildcardDNSMatched) { bssl::UniquePtr<X509> cert = readCertFromFile(TestEnvironment::substitute( "{{ test_rundir " "}}/test/extensions/transport_sockets/tls/test_data/san_multiple_dns_cert.pem")); envoy::type::matcher::v3::StringMatcher matcher; matcher.set_exact("api.example.com"); std::vector<Matchers::StringMatcherImpl<envoy::type::matcher::v3::StringMatcher>> subject_alt_name_matchers; subject_alt_name_matchers.push_back(Matchers::StringMatcherImpl(matcher)); EXPECT_TRUE(DefaultCertValidator::matchSubjectAltName(cert.get(), subject_alt_name_matchers)); }
CWE-295
52
void CreateNgrams(const tstring* data, tstring* output, int num_ngrams, int ngram_width) const { for (int ngram_index = 0; ngram_index < num_ngrams; ++ngram_index) { int pad_width = get_pad_width(ngram_width); int left_padding = std::max(0, pad_width - ngram_index); int right_padding = std::max(0, pad_width - (num_ngrams - (ngram_index + 1))); int num_tokens = ngram_width - (left_padding + right_padding); int data_start_index = left_padding > 0 ? 0 : ngram_index - pad_width; // Calculate the total expected size of the ngram so we can reserve the // correct amount of space in the string. int ngram_size = 0; // Size of the left padding. ngram_size += left_padding * left_pad_.length(); // Size of the tokens. for (int n = 0; n < num_tokens; ++n) { ngram_size += data[data_start_index + n].length(); } // Size of the right padding. ngram_size += right_padding * right_pad_.length(); // Size of the separators. int num_separators = left_padding + right_padding + num_tokens - 1; ngram_size += num_separators * separator_.length(); // Build the ngram. tstring* ngram = &output[ngram_index]; ngram->reserve(ngram_size); for (int n = 0; n < left_padding; ++n) { ngram->append(left_pad_); ngram->append(separator_); } for (int n = 0; n < num_tokens - 1; ++n) { ngram->append(data[data_start_index + n]); ngram->append(separator_); } ngram->append(data[data_start_index + num_tokens - 1]); for (int n = 0; n < right_padding; ++n) { ngram->append(separator_); ngram->append(right_pad_); } // In debug mode only: validate that we've reserved enough space for the // ngram. DCHECK_EQ(ngram_size, ngram->size()); } }
CWE-787
24
static int putint(jas_stream_t *out, int sgnd, int prec, long val) { int n; int c; bool s; ulong tmp; assert((!sgnd && prec >= 1) || (sgnd && prec >= 2)); if (sgnd) { val = encode_twos_comp(val, prec); } assert(val >= 0); val &= (1 << prec) - 1; n = (prec + 7) / 8; while (--n >= 0) { c = (val >> (n * 8)) & 0xff; if (jas_stream_putc(out, c) != c) return -1; } return 0; }
CWE-190
19
void Compute(OpKernelContext* context) override { // Get the input Tensors. OpInputList params_nested_splits_in; OP_REQUIRES_OK(context, context->input_list("params_nested_splits", &params_nested_splits_in)); const Tensor& params_dense_values_in = context->input(params_nested_splits_in.size()); const Tensor& indices_in = context->input(params_nested_splits_in.size() + 1); DCHECK_GT(params_nested_splits_in.size(), 0); // Enforced by REGISTER_OP. SPLITS_TYPE num_params = params_nested_splits_in[0].dim_size(0) - 1; OP_REQUIRES_OK(context, ValidateIndices(indices_in, num_params)); OP_REQUIRES(context, params_dense_values_in.dims() > 0, errors::InvalidArgument("params.rank must be nonzero")); SPLITS_TYPE num_params_dense_values = params_dense_values_in.dim_size(0); // Calculate the `splits`, and store the value slices that we need to // copy in `value_slices`. std::vector<std::pair<SPLITS_TYPE, SPLITS_TYPE>> value_slices; SPLITS_TYPE num_values = 0; std::vector<std::vector<SPLITS_TYPE>> out_splits; OP_REQUIRES_OK(context, MakeSplits(indices_in, params_nested_splits_in, num_params_dense_values, &out_splits, &value_slices, &num_values)); // Write the output tensors. OP_REQUIRES_OK(context, WriteSplits(out_splits, context)); OP_REQUIRES_OK(context, WriteValues(params_dense_values_in, value_slices, out_splits.size(), num_values, context)); }
CWE-125
47
static int base64decode_block(unsigned char *target, const char *data, size_t data_size) { int w1,w2,w3,w4; int i; size_t n; if (!data || (data_size <= 0)) { return 0; } n = 0; i = 0; while (n < data_size-3) { w1 = base64_table[(int)data[n]]; w2 = base64_table[(int)data[n+1]]; w3 = base64_table[(int)data[n+2]]; w4 = base64_table[(int)data[n+3]]; if (w2 >= 0) { target[i++] = (char)((w1*4 + (w2 >> 4)) & 255); } if (w3 >= 0) { target[i++] = (char)((w2*16 + (w3 >> 2)) & 255); } if (w4 >= 0) { target[i++] = (char)((w3*64 + w4) & 255); } n+=4; } return i; }
CWE-125
47
void setSanMatchers(std::vector<envoy::type::matcher::v3::StringMatcher> san_matchers) { san_matchers_ = san_matchers; };
CWE-295
52
static BOOL ntlm_av_pair_check(NTLM_AV_PAIR* pAvPair, size_t cbAvPair) { if (!pAvPair || cbAvPair < sizeof(NTLM_AV_PAIR)) return FALSE; return cbAvPair >= ntlm_av_pair_get_next_offset(pAvPair); }
CWE-125
47
TfLiteStatus Eval(TfLiteContext* context, TfLiteNode* node) { TfLiteTensor* output = GetOutput(context, node, kOutputTensor); const TfLiteTensor* input = GetInput(context, node, kInputTensor); FillDiagHelper(input, output); return kTfLiteOk; }
CWE-787
24
TEST_P(SslSPIFFECertValidatorIntegrationTest, ServerRsaSPIFFEValidatorSANMatch) { auto typed_conf = new envoy::config::core::v3::TypedExtensionConfig(); TestUtility::loadFromYaml(TestEnvironment::substitute(R"EOF( name: envoy.tls.cert_validator.spiffe typed_config: "@type": type.googleapis.com/envoy.extensions.transport_sockets.tls.v3.SPIFFECertValidatorConfig trust_domains: - name: lyft.com trust_bundle: filename: "{{ test_rundir }}/test/config/integration/certs/cacert.pem" )EOF"), *typed_conf); custom_validator_config_ = typed_conf; envoy::type::matcher::v3::StringMatcher matcher; matcher.set_prefix("spiffe://lyft.com/"); san_matchers_ = {matcher}; ConnectionCreationFunction creator = [&]() -> Network::ClientConnectionPtr { return makeSslClientConnection({}); }; testRouterRequestAndResponseWithBody(1024, 512, false, false, &creator); checkVerifyErrorCouter(0); }
CWE-295
52
TfLiteStatus Prepare(TfLiteContext* context, TfLiteNode* node) { auto* data = reinterpret_cast<TfLiteAudioMicrofrontendParams*>(node->user_data); TF_LITE_ENSURE_EQ(context, NumInputs(node), 1); TF_LITE_ENSURE_EQ(context, NumOutputs(node), 1); const TfLiteTensor* input = GetInput(context, node, kInputTensor); TfLiteTensor* output = GetOutput(context, node, kOutputTensor); TF_LITE_ENSURE_EQ(context, NumDimensions(input), 1); TF_LITE_ENSURE_EQ(context, input->type, kTfLiteInt16); output->type = kTfLiteInt32; if (data->out_float) { output->type = kTfLiteFloat32; } TfLiteIntArray* output_size = TfLiteIntArrayCreate(2); int num_frames = 0; if (input->dims->data[0] >= data->state->window.size) { num_frames = (input->dims->data[0] - data->state->window.size) / data->state->window.step / data->frame_stride + 1; } output_size->data[0] = num_frames; output_size->data[1] = data->state->filterbank.num_channels * (1 + data->left_context + data->right_context); return context->ResizeTensor(context, output, output_size); }
CWE-787
24
TfLiteStatus Eval(TfLiteContext* context, TfLiteNode* node) { const TfLiteUnpackParams* data = reinterpret_cast<TfLiteUnpackParams*>(node->builtin_data); const TfLiteTensor* input = GetInput(context, node, kInputTensor); switch (input->type) { case kTfLiteFloat32: { UnpackImpl<float>(context, node, input, data->num, data->axis); break; } case kTfLiteInt32: { UnpackImpl<int32_t>(context, node, input, data->num, data->axis); break; } case kTfLiteUInt8: { UnpackImpl<uint8_t>(context, node, input, data->num, data->axis); break; } case kTfLiteInt8: { UnpackImpl<int8_t>(context, node, input, data->num, data->axis); break; } case kTfLiteBool: { UnpackImpl<bool>(context, node, input, data->num, data->axis); break; } case kTfLiteInt16: { UnpackImpl<int16_t>(context, node, input, data->num, data->axis); break; } default: { context->ReportError(context, "Type '%s' is not supported by unpack.", TfLiteTypeGetName(input->type)); return kTfLiteError; } } return kTfLiteOk; }
CWE-125
47
jas_matrix_t *jas_matrix_copy(jas_matrix_t *x) { jas_matrix_t *y; int i; int j; y = jas_matrix_create(x->numrows_, x->numcols_); for (i = 0; i < x->numrows_; ++i) { for (j = 0; j < x->numcols_; ++j) { *jas_matrix_getref(y, i, j) = jas_matrix_get(x, i, j); } } return y; }
CWE-190
19
CbrDetectorRemote::Result CbrDetectorRemote::Decrypt(cricket::MediaType media_type, const std::vector<uint32_t>& csrcs, rtc::ArrayView<const uint8_t> additional_data, rtc::ArrayView<const uint8_t> encrypted_frame, rtc::ArrayView<uint8_t> frame) { const uint8_t *src = encrypted_frame.data(); uint8_t *dst = frame.data(); uint32_t data_len = encrypted_frame.size(); if (media_type == cricket::MEDIA_TYPE_AUDIO) { if (data_len == frame_size && frame_size >= 40) { frame_count++; if (frame_count > 200 && !detected) { info("CBR detector: remote cbr detected\n"); detected = true; } } else { frame_count = 0; frame_size = data_len; if (detected) { info("CBR detector: remote cbr detected disabled\n"); detected = false; } } } memcpy(dst, src, data_len); out: return CbrDetectorRemote::Result(CbrDetectorRemote::Status::kOk, data_len); }
CWE-134
54
CxFile(void) { };
CWE-770
37
void Context::onDelete() { if (wasm_->onDelete_) { wasm_->onDelete_(this, id_); } }
CWE-476
46
TfLiteStatus Prepare(TfLiteContext* context, TfLiteNode* node) { TF_LITE_ENSURE_EQ(context, NumInputs(node), 2); TF_LITE_ENSURE_EQ(context, NumOutputs(node), 1); const TfLiteTensor* input = GetInput(context, node, kInputTensor); const TfLiteTensor* seq_lengths = GetInput(context, node, kSeqLengthsTensor); TF_LITE_ENSURE_EQ(context, NumDimensions(seq_lengths), 1); if (input->type != kTfLiteInt32 && input->type != kTfLiteFloat32 && input->type != kTfLiteUInt8 && input->type != kTfLiteInt16 && input->type != kTfLiteInt64) { context->ReportError(context, "Type '%s' is not supported by reverse_sequence.", TfLiteTypeGetName(input->type)); return kTfLiteError; } if (seq_lengths->type != kTfLiteInt32 && seq_lengths->type != kTfLiteInt64) { context->ReportError( context, "Seq_lengths type '%s' is not supported by reverse_sequence.", TfLiteTypeGetName(seq_lengths->type)); return kTfLiteError; } TfLiteTensor* output = GetOutput(context, node, kOutputTensor); TfLiteIntArray* output_shape = TfLiteIntArrayCopy(input->dims); TF_LITE_ENSURE_TYPES_EQ(context, output->type, input->type); return context->ResizeTensor(context, output, output_shape); }
CWE-787
24
TfLiteStatus AverageEval(TfLiteContext* context, TfLiteNode* node) { auto* params = reinterpret_cast<TfLitePoolParams*>(node->builtin_data); OpData* data = reinterpret_cast<OpData*>(node->user_data); TfLiteTensor* output; TF_LITE_ENSURE_OK(context, GetOutputSafe(context, node, 0, &output)); const TfLiteTensor* input; TF_LITE_ENSURE_OK(context, GetInputSafe(context, node, 0, &input)); switch (input->type) { // Already know in/out types are same. case kTfLiteFloat32: AverageEvalFloat<kernel_type>(context, node, params, data, input, output); break; case kTfLiteUInt8: AverageEvalQuantizedUint8<kernel_type>(context, node, params, data, input, output); break; case kTfLiteInt8: AverageEvalQuantizedInt8<kernel_type>(context, node, params, data, input, output); break; case kTfLiteInt16: AverageEvalQuantizedInt16<kernel_type>(context, node, params, data, input, output); break; default: TF_LITE_KERNEL_LOG(context, "Type %s not currently supported.", TfLiteTypeGetName(input->type)); return kTfLiteError; } return kTfLiteOk; }
CWE-835
42
TfLiteStatus PrepareMeanOrSum(TfLiteContext* context, TfLiteNode* node) { TF_LITE_ENSURE_OK(context, PrepareSimple(context, node)); OpData* data = reinterpret_cast<OpData*>(node->user_data); // reduce_mean requires a buffer to store intermediate sum result. OpContext op_context(context, node); if (op_context.input->type == kTfLiteInt8 || op_context.input->type == kTfLiteUInt8 || op_context.input->type == kTfLiteInt16) { const double real_multiplier = static_cast<double>(op_context.input->params.scale) / static_cast<double>(op_context.output->params.scale); int exponent; QuantizeMultiplier(real_multiplier, &data->multiplier, &exponent); data->shift = exponent; } TfLiteTensor* temp_sum = GetTemporary(context, node, /*index=*/2); if (!IsConstantTensor(op_context.axis)) { SetTensorToDynamic(temp_sum); return kTfLiteOk; } temp_sum->allocation_type = kTfLiteArenaRw; return ResizeTempSum(context, &op_context, temp_sum); }
CWE-787
24
AsyncSocket::WriteResult AsyncSSLSocket::interpretSSLError(int rc, int error) { if (error == SSL_ERROR_WANT_READ) { // Even though we are attempting to write data, SSL_write() may // need to read data if renegotiation is being performed. We currently // don't support this and just fail the write. LOG(ERROR) << "AsyncSSLSocket(fd=" << fd_ << ", state=" << int(state_) << ", sslState=" << sslState_ << ", events=" << eventFlags_ << "): " << "unsupported SSL renegotiation during write"; return WriteResult( WRITE_ERROR, std::make_unique<SSLException>(SSLError::INVALID_RENEGOTIATION)); } else { if (zero_return(error, rc, errno)) { return WriteResult(0); } auto errError = ERR_get_error(); VLOG(3) << "ERROR: AsyncSSLSocket(fd=" << fd_ << ", state=" << int(state_) << ", sslState=" << sslState_ << ", events=" << eventFlags_ << "): " << "SSL error: " << error << ", errno: " << errno << ", func: " << ERR_func_error_string(errError) << ", reason: " << ERR_reason_error_string(errError); return WriteResult( WRITE_ERROR, std::make_unique<SSLException>(error, errError, rc, errno)); } }
CWE-125
47
static BOOL ntlm_av_pair_add_copy(NTLM_AV_PAIR* pAvPairList, size_t cbAvPairList, NTLM_AV_PAIR* pAvPair, size_t cbAvPair) { if (!ntlm_av_pair_check(pAvPair, cbAvPair)) return FALSE; return ntlm_av_pair_add(pAvPairList, cbAvPairList, ntlm_av_pair_get_id(pAvPair), ntlm_av_pair_get_value_pointer(pAvPair), ntlm_av_pair_get_len(pAvPair)); }
CWE-125
47
TfLiteStatus Eval(TfLiteContext* context, TfLiteNode* node) { const TfLiteTensor* input = GetInput(context, node, kInputTensor); const TfLiteTensor* axis_tensor = GetInput(context, node, kAxisTensor); int axis = GetTensorData<int32_t>(axis_tensor)[0]; const int rank = NumDimensions(input); if (axis < 0) { axis += rank; } TF_LITE_ENSURE(context, axis >= 0 && axis < rank); TfLiteTensor* output = GetOutput(context, node, kOutputTensor); switch (output->type) { case kTfLiteFloat32: { reference_ops::Reverse<float>( axis, GetTensorShape(input), GetTensorData<float>(input), GetTensorShape(output), GetTensorData<float>(output)); break; } case kTfLiteUInt8: { reference_ops::Reverse<uint8_t>( axis, GetTensorShape(input), GetTensorData<uint8_t>(input), GetTensorShape(output), GetTensorData<uint8_t>(output)); break; } case kTfLiteInt16: { reference_ops::Reverse<int16_t>( axis, GetTensorShape(input), GetTensorData<int16_t>(input), GetTensorShape(output), GetTensorData<int16_t>(output)); break; } case kTfLiteInt32: { reference_ops::Reverse<int32_t>( axis, GetTensorShape(input), GetTensorData<int32_t>(input), GetTensorShape(output), GetTensorData<int32_t>(output)); break; } case kTfLiteInt64: { reference_ops::Reverse<int64_t>( axis, GetTensorShape(input), GetTensorData<int64_t>(input), GetTensorShape(output), GetTensorData<int64_t>(output)); break; } case kTfLiteBool: { reference_ops::Reverse<bool>( axis, GetTensorShape(input), GetTensorData<bool>(input), GetTensorShape(output), GetTensorData<bool>(output)); break; } default: { context->ReportError(context, "Type '%s' is not supported by reverse.", TfLiteTypeGetName(output->type)); return kTfLiteError; } } return kTfLiteOk; }
CWE-787
24
TfLiteStatus Eval(TfLiteContext* context, TfLiteNode* node) { const TfLiteTensor* input = GetInput(context, node, kInputTensor); TfLiteTensor* output = GetOutput(context, node, kOutputTensor); optimized_ops::Round(GetTensorShape(input), GetTensorData<float>(input), GetTensorShape(output), GetTensorData<float>(output)); return kTfLiteOk; }
CWE-125
47
void pointZZ_pAdd(PointZZ_p * rop, const PointZZ_p * op1, const PointZZ_p * op2, const CurveZZ_p * curve) { mpz_t xdiff, ydiff, lambda; mpz_inits(xdiff, ydiff, lambda, NULL); // calculate lambda mpz_sub(ydiff, op2->y, op1->y); mpz_sub(xdiff, op2->x, op1->x); mpz_invert(xdiff, xdiff, curve->p); // TODO check status mpz_mul(lambda, ydiff, xdiff); mpz_mod(lambda, lambda, curve->p); // calculate resulting x coord mpz_mul(rop->x, lambda, lambda); mpz_sub(rop->x, rop->x, op1->x); mpz_sub(rop->x, rop->x, op2->x); mpz_mod(rop->x, rop->x, curve->p); //calculate resulting y coord mpz_sub(rop->y, op1->x, rop->x); mpz_mul(rop->y, lambda, rop->y); mpz_sub(rop->y, rop->y, op1->y); mpz_mod(rop->y, rop->y, curve->p); mpz_clears(xdiff, ydiff, lambda, NULL); }
CWE-347
25
TfLiteStatus Eval(TfLiteContext* context, TfLiteNode* node) { auto* data = reinterpret_cast<TfLiteAudioMicrofrontendParams*>(node->user_data); FrontendReset(data->state); const TfLiteTensor* input = GetInput(context, node, kInputTensor); TfLiteTensor* output = GetOutput(context, node, kOutputTensor); if (data->out_float) { GenerateFeatures<float>(data, input, output); } else { GenerateFeatures<int32>(data, input, output); } return kTfLiteOk; }
CWE-125
47
void preprocessNodes(std::vector<Proxy> &nodes, extra_settings &ext) { std::for_each(nodes.begin(), nodes.end(), [&ext](Proxy &x) { if(ext.remove_emoji) x.Remark = trim(removeEmoji(x.Remark)); nodeRename(x, ext.rename_array, ext); if(ext.add_emoji) x.Remark = addEmoji(x, ext.emoji_array, ext); }); if(ext.sort_flag) { bool failed = true; if(ext.sort_script.size()) { std::string script = ext.sort_script; if(startsWith(script, "path:")) script = fileGet(script.substr(5), false); script_safe_runner(ext.js_runtime, ext.js_context, [&](qjs::Context &ctx) { try { ctx.eval(script); auto compare = (std::function<int(const Proxy&, const Proxy&)>) ctx.eval("compare"); auto comparer = [&](const Proxy &a, const Proxy &b) { if(a.Type == ProxyType::Unknow) return 1; if(b.Type == ProxyType::Unknow) return 0; return compare(a, b); }; std::stable_sort(nodes.begin(), nodes.end(), comparer); failed = false; } catch(qjs::exception) { script_print_stack(ctx); } }, global.scriptCleanContext); } if(failed) std::stable_sort(nodes.begin(), nodes.end(), [](const Proxy &a, const Proxy &b) { return a.Remark < b.Remark; }); } }
CWE-434
5
void CreateNgrams(const tstring* data, tstring* output, int num_ngrams, int ngram_width) const { for (int ngram_index = 0; ngram_index < num_ngrams; ++ngram_index) { int pad_width = get_pad_width(ngram_width); int left_padding = std::max(0, pad_width - ngram_index); int right_padding = std::max(0, pad_width - (num_ngrams - (ngram_index + 1))); int num_tokens = ngram_width - (left_padding + right_padding); int data_start_index = left_padding > 0 ? 0 : ngram_index - pad_width; // Calculate the total expected size of the ngram so we can reserve the // correct amount of space in the string. int ngram_size = 0; // Size of the left padding. ngram_size += left_padding * left_pad_.length(); // Size of the tokens. for (int n = 0; n < num_tokens; ++n) { ngram_size += data[data_start_index + n].length(); } // Size of the right padding. ngram_size += right_padding * right_pad_.length(); // Size of the separators. int num_separators = left_padding + right_padding + num_tokens - 1; ngram_size += num_separators * separator_.length(); // Build the ngram. tstring* ngram = &output[ngram_index]; ngram->reserve(ngram_size); for (int n = 0; n < left_padding; ++n) { ngram->append(left_pad_); ngram->append(separator_); } for (int n = 0; n < num_tokens - 1; ++n) { ngram->append(data[data_start_index + n]); ngram->append(separator_); } ngram->append(data[data_start_index + num_tokens - 1]); for (int n = 0; n < right_padding; ++n) { ngram->append(separator_); ngram->append(right_pad_); } // In debug mode only: validate that we've reserved enough space for the // ngram. DCHECK_EQ(ngram_size, ngram->size()); } }
CWE-476
46
int CommandData::IsProcessFile(FileHeader &FileHead,bool *ExactMatch,int MatchType, wchar *MatchedArg,uint MatchedArgSize) { if (MatchedArg!=NULL && MatchedArgSize>0) *MatchedArg=0; if (wcslen(FileHead.FileName)>=NM) return 0; bool Dir=FileHead.Dir; if (ExclCheck(FileHead.FileName,Dir,false,true)) return 0; #ifndef SFX_MODULE if (TimeCheck(FileHead.mtime)) return 0; if ((FileHead.FileAttr & ExclFileAttr)!=0 || InclAttrSet && (FileHead.FileAttr & InclFileAttr)==0) return 0; if (!Dir && SizeCheck(FileHead.UnpSize)) return 0; #endif wchar *ArgName; FileArgs.Rewind(); for (int StringCount=1;(ArgName=FileArgs.GetString())!=NULL;StringCount++) if (CmpName(ArgName,FileHead.FileName,MatchType)) { if (ExactMatch!=NULL) *ExactMatch=wcsicompc(ArgName,FileHead.FileName)==0; if (MatchedArg!=NULL) wcsncpyz(MatchedArg,ArgName,MatchedArgSize); return StringCount; } return 0; }
CWE-787
24
UnicodeString::doAppend(const UChar *srcChars, int32_t srcStart, int32_t srcLength) { if(!isWritable() || srcLength == 0 || srcChars == NULL) { return *this; } // Perform all remaining operations relative to srcChars + srcStart. // From this point forward, do not use srcStart. srcChars += srcStart; if(srcLength < 0) { // get the srcLength if necessary if((srcLength = u_strlen(srcChars)) == 0) { return *this; } } int32_t oldLength = length(); int32_t newLength = oldLength + srcLength; // Check for append onto ourself const UChar* oldArray = getArrayStart(); if (isBufferWritable() && oldArray < srcChars + srcLength && srcChars < oldArray + oldLength) { // Copy into a new UnicodeString and start over UnicodeString copy(srcChars, srcLength); if (copy.isBogus()) { setToBogus(); return *this; } return doAppend(copy.getArrayStart(), 0, srcLength); } // optimize append() onto a large-enough, owned string if((newLength <= getCapacity() && isBufferWritable()) || cloneArrayIfNeeded(newLength, getGrowCapacity(newLength))) { UChar *newArray = getArrayStart(); // Do not copy characters when // UChar *buffer=str.getAppendBuffer(...); // is followed by // str.append(buffer, length); // or // str.appendString(buffer, length) // or similar. if(srcChars != newArray + oldLength) { us_arrayCopy(srcChars, 0, newArray, oldLength, srcLength); } setLength(newLength); } return *this; }
CWE-787
24
TfLiteStatus Eval(TfLiteContext* context, TfLiteNode* node) { TfLiteTensor* output = GetOutput(context, node, kOutputTensor); switch (output->type) { case kTfLiteFloat32: { return ReverseSequenceHelper<float>(context, node); } case kTfLiteUInt8: { return ReverseSequenceHelper<uint8_t>(context, node); } case kTfLiteInt16: { return ReverseSequenceHelper<int16_t>(context, node); } case kTfLiteInt32: { return ReverseSequenceHelper<int32_t>(context, node); } case kTfLiteInt64: { return ReverseSequenceHelper<int64_t>(context, node); } default: { context->ReportError(context, "Type '%s' is not supported by reverse_sequence.", TfLiteTypeGetName(output->type)); return kTfLiteError; } } return kTfLiteOk; } // namespace
CWE-787
24
static inline int min(int a, int b) {return a<b ? a : b;}
CWE-787
24
TfLiteStatus Eval(TfLiteContext* context, TfLiteNode* node) { auto* params = reinterpret_cast<TfLiteLocalResponseNormParams*>(node->builtin_data); const TfLiteTensor* input = GetInput(context, node, kInputTensor); TfLiteTensor* output = GetOutput(context, node, kOutputTensor); if (output->type == kTfLiteFloat32) { #define TF_LITE_LOCAL_RESPONSE_NORM(type) \ tflite::LocalResponseNormalizationParams op_params; \ op_params.range = params->radius; \ op_params.bias = params->bias; \ op_params.alpha = params->alpha; \ op_params.beta = params->beta; \ type::LocalResponseNormalization( \ op_params, GetTensorShape(input), GetTensorData<float>(input), \ GetTensorShape(output), GetTensorData<float>(output)) if (kernel_type == kReference) { TF_LITE_LOCAL_RESPONSE_NORM(reference_ops); } if (kernel_type == kGenericOptimized) { TF_LITE_LOCAL_RESPONSE_NORM(optimized_ops); } #undef TF_LITE_LOCAL_RESPONSE_NORM } else { context->ReportError(context, "Output type is %d, requires float.", output->type); return kTfLiteError; } return kTfLiteOk; }
CWE-787
24
bool read(ReadonlyBytes buffer) { auto fields_size = sizeof(LocalFileHeader) - (sizeof(u8*) * 3); if (buffer.size() < fields_size) return false; if (memcmp(buffer.data(), local_file_header_signature, sizeof(local_file_header_signature)) != 0) return false; memcpy(reinterpret_cast<void*>(&minimum_version), buffer.data() + sizeof(local_file_header_signature), fields_size); name = buffer.data() + sizeof(local_file_header_signature) + fields_size; extra_data = name + name_length; compressed_data = extra_data + extra_data_length; return true; }
CWE-120
44
AP4_AvccAtom::InspectFields(AP4_AtomInspector& inspector) { inspector.AddField("Configuration Version", m_ConfigurationVersion); const char* profile_name = GetProfileName(m_Profile); if (profile_name) { inspector.AddField("Profile", profile_name); } else { inspector.AddField("Profile", m_Profile); } inspector.AddField("Profile Compatibility", m_ProfileCompatibility, AP4_AtomInspector::HINT_HEX); inspector.AddField("Level", m_Level); inspector.AddField("NALU Length Size", m_NaluLengthSize); for (unsigned int i=0; i<m_SequenceParameters.ItemCount(); i++) { inspector.AddField("Sequence Parameter", m_SequenceParameters[i].GetData(), m_SequenceParameters[i].GetDataSize()); } for (unsigned int i=0; i<m_SequenceParameters.ItemCount(); i++) { inspector.AddField("Picture Parameter", m_PictureParameters[i].GetData(), m_PictureParameters[i].GetDataSize()); } return AP4_SUCCESS; }
CWE-476
46
Status OpLevelCostEstimator::PredictAvgPoolGrad(const OpContext& op_context, NodeCosts* node_costs) const { bool found_unknown_shapes = false; const auto& op_info = op_context.op_info; // x's shape: op_info.inputs(0) // y_grad: op_info.inputs(1) // Extract x_shape from op_info.inputs(0).value() or op_info.outputs(0). bool shape_found = false; TensorShapeProto x_shape; if (op_info.inputs_size() >= 1 && op_info.inputs(0).has_value()) { const TensorProto& value = op_info.inputs(0).value(); shape_found = GetTensorShapeProtoFromTensorProto(value, &x_shape); } if (!shape_found && op_info.outputs_size() > 0) { x_shape = op_info.outputs(0).shape(); shape_found = true; } if (!shape_found) { // Set the minimum shape that's feasible. x_shape.Clear(); for (int i = 0; i < 4; ++i) { x_shape.add_dim()->set_size(1); } found_unknown_shapes = true; } ConvolutionDimensions dims = OpDimensionsFromInputs(x_shape, op_info, &found_unknown_shapes); int64_t ops = 0; if (dims.kx <= dims.sx && dims.ky <= dims.sy) { // Non-overlapping window. ops = dims.batch * dims.iz * (dims.ix * dims.iy + dims.ox * dims.oy); } else { // Overlapping window. ops = dims.batch * dims.iz * (dims.ix * dims.iy + dims.ox * dims.oy * (dims.kx * dims.ky + 1)); } auto s = PredictDefaultNodeCosts(ops, op_context, &found_unknown_shapes, node_costs); node_costs->max_memory = node_costs->num_total_output_bytes(); return s; }
CWE-369
60
explicit ThreadPoolHandleOp(OpKernelConstruction* ctx) : OpKernel(ctx) { OP_REQUIRES_OK(ctx, ctx->GetAttr("display_name", &display_name_)); OP_REQUIRES_OK(ctx, ctx->GetAttr("num_threads", &num_threads_)); OP_REQUIRES_OK(ctx, ctx->GetAttr("max_intra_op_parallelism", &max_intra_op_parallelism_)); OP_REQUIRES( ctx, num_threads_ > 0, errors::InvalidArgument("`num_threads` must be greater than zero.")); }
CWE-770
37
TfLiteStatus Eval(TfLiteContext* context, TfLiteNode* node) { auto* params = reinterpret_cast<TfLiteDivParams*>(node->builtin_data); OpData* data = reinterpret_cast<OpData*>(node->user_data); const TfLiteTensor* input1; TF_LITE_ENSURE_OK(context, GetInputSafe(context, node, kInputTensor1, &input1)); const TfLiteTensor* input2; TF_LITE_ENSURE_OK(context, GetInputSafe(context, node, kInputTensor2, &input2)); TfLiteTensor* output; TF_LITE_ENSURE_OK(context, GetOutputSafe(context, node, kOutputTensor, &output)); if (output->type == kTfLiteFloat32 || output->type == kTfLiteInt32) { EvalDiv<kernel_type>(context, node, params, data, input1, input2, output); } else if (output->type == kTfLiteUInt8) { TF_LITE_ENSURE_OK( context, EvalQuantized<kernel_type>(context, node, params, data, input1, input2, output)); } else { context->ReportError( context, "Div only supports FLOAT32, INT32 and quantized UINT8 now, got %d.", output->type); return kTfLiteError; } return kTfLiteOk; }
CWE-369
60
TEST(DefaultCertValidatorTest, TestMatchSubjectAltNameURIMatched) { bssl::UniquePtr<X509> cert = readCertFromFile(TestEnvironment::substitute( "{{ test_rundir }}/test/extensions/transport_sockets/tls/test_data/san_uri_cert.pem")); envoy::type::matcher::v3::StringMatcher matcher; matcher.MergeFrom(TestUtility::createRegexMatcher("spiffe://lyft.com/.*-team")); std::vector<Matchers::StringMatcherImpl<envoy::type::matcher::v3::StringMatcher>> subject_alt_name_matchers; subject_alt_name_matchers.push_back(Matchers::StringMatcherImpl(matcher)); EXPECT_TRUE(DefaultCertValidator::matchSubjectAltName(cert.get(), subject_alt_name_matchers)); }
CWE-295
52
char * unescape(char * dest, const char * src) { while (*src) { if (*src == '\\') { ++src; switch (*src) { case 'n': *dest = '\n'; break; case 'r': *dest = '\r'; break; case 't': *dest = '\t'; break; case 'f': *dest = '\f'; break; case 'v': *dest = '\v'; break; default: *dest = *src; } } else { *dest = *src; } ++src; ++dest; } *dest = '\0'; return dest; }
CWE-125
47
StatusOr<FullTypeDef> SpecializeType(const AttrSlice& attrs, const OpDef& op_def) { FullTypeDef ft; ft.set_type_id(TFT_PRODUCT); for (int i = 0; i < op_def.output_arg_size(); i++) { auto* t = ft.add_args(); *t = op_def.output_arg(i).experimental_full_type(); // Resolve dependent types. The convention for op registrations is to use // attributes as type variables. // See https://www.tensorflow.org/guide/create_op#type_polymorphism. // Once the op signature can be defined entirely in FullType, this // convention can be deprecated. // // Note: While this code performs some basic verifications, it generally // assumes consistent op defs and attributes. If more complete // verifications are needed, they should be done by separately, and in a // way that can be reused for type inference. for (int j = 0; j < t->args_size(); j++) { auto* arg = t->mutable_args(i); if (arg->type_id() == TFT_VAR) { const auto* attr = attrs.Find(arg->s()); DCHECK(attr != nullptr); if (attr->value_case() == AttrValue::kList) { const auto& attr_list = attr->list(); arg->set_type_id(TFT_PRODUCT); for (int i = 0; i < attr_list.type_size(); i++) { map_dtype_to_tensor(attr_list.type(i), arg->add_args()); } } else if (attr->value_case() == AttrValue::kType) { map_dtype_to_tensor(attr->type(), arg); } else { return Status(error::UNIMPLEMENTED, absl::StrCat("unknown attribute type", attrs.DebugString(), " key=", arg->s())); } arg->clear_s(); } } } return ft; }
CWE-617
51
UrlQuery::UrlQuery(const std::string& encoded_str) { if (!encoded_str.empty()) { // Split into key value pairs separated by '&'. for (std::size_t i = 0; i != std::string::npos;) { std::size_t j = encoded_str.find_first_of('&', i); std::string kv; if (j == std::string::npos) { kv = encoded_str.substr(i); i = std::string::npos; } else { kv = encoded_str.substr(i, j - i); i = j + 1; } string_view key; string_view value; if (SplitKV(kv, '=', false, &key, &value)) { parameters_.push_back({ DecodeUnsafe(key), DecodeUnsafe(value) }); } } } }
CWE-22
2
PROCESS_THREAD(snmp_process, ev, data) { PROCESS_BEGIN(); /* new connection with remote host */ snmp_udp_conn = udp_new(NULL, 0, NULL); udp_bind(snmp_udp_conn, SNMP_SERVER_PORT); LOG_DBG("Listening on port %u\n", uip_ntohs(snmp_udp_conn->lport)); while(1) { PROCESS_YIELD(); if(ev == tcpip_event) { if(uip_newdata()) { snmp_process_data(); } } } /* while (1) */ PROCESS_END(); }
CWE-125
47
void AveragePool(const float* input_data, const Dims<4>& input_dims, int stride_width, int stride_height, int pad_width, int pad_height, int kwidth, int kheight, float* output_data, const Dims<4>& output_dims) { float output_activation_min, output_activation_max; GetActivationMinMax(Ac, &output_activation_min, &output_activation_max); AveragePool(input_data, input_dims, stride_width, stride_height, pad_width, pad_height, kwidth, kheight, output_activation_min, output_activation_max, output_data, output_dims); }
CWE-835
42
int main(int argc, char * argv[]) { gr_face * face = 0; try { if (argc != 2) throw std::length_error("not enough arguments: need a backing font"); dummyFace = face_handle(argv[1]); testFeatTable<FeatTableTestA>(testDataA, "A\n"); testFeatTable<FeatTableTestB>(testDataB, "B\n"); testFeatTable<FeatTableTestB>(testDataBunsorted, "Bu\n"); testFeatTable<FeatTableTestC>(testDataCunsorted, "C\n"); testFeatTable<FeatTableTestD>(testDataDunsorted, "D\n"); testFeatTable<FeatTableTestE>(testDataE, "E\n"); // test a bad settings offset stradling the end of the table FeatureMap testFeatureMap; dummyFace.replace_table(TtfUtil::Tag::Feat, &testBadOffset, sizeof testBadOffset); face = gr_make_face_with_ops(&dummyFace, &face_handle::ops, gr_face_dumbRendering); bool readStatus = testFeatureMap.readFeats(*face); testAssert("fail gracefully on bad table", !readStatus); } catch (std::exception & e) { fprintf(stderr, "%s: %s\n", argv[0], e.what()); gr_face_destroy(face); return 1; } gr_face_destroy(face); return 0; }
CWE-476
46
TEST_CASE_METHOD(TestFixture, "DKG AES gen test", "[dkg-aes-gen]") { vector <uint8_t> encryptedDKGSecret(BUF_LEN, 0); vector<char> errMsg(BUF_LEN, 0); int errStatus = 0; uint32_t encLen = 0; PRINT_SRC_LINE auto status = trustedGenDkgSecretAES(eid, &errStatus, errMsg.data(), encryptedDKGSecret.data(), &encLen, 32); REQUIRE(status == SGX_SUCCESS); REQUIRE(errStatus == SGX_SUCCESS); vector<char> secret(BUF_LEN, 0); vector<char> errMsg1(BUF_LEN, 0); status = trustedDecryptDkgSecretAES(eid, &errStatus, errMsg1.data(), encryptedDKGSecret.data(), encLen, (uint8_t *) secret.data()); REQUIRE(status == SGX_SUCCESS); REQUIRE(errStatus == SGX_SUCCESS); }
CWE-787
24
unsigned int GetU32BE (int nPos, bool *pbSuccess) { //*pbSuccess = true; if ( nPos < 0 || nPos + 3 >= m_nLen ) { *pbSuccess = false; return 0; } unsigned int nRes = m_sFile[nPos]; nRes = (nRes << 8) + m_sFile[nPos + 1]; nRes = (nRes << 8) + m_sFile[nPos + 2]; nRes = (nRes << 8) + m_sFile[nPos + 3]; return nRes; }
CWE-787
24
static jas_image_cmpt_t *jas_image_cmpt_create(int_fast32_t tlx, int_fast32_t tly, int_fast32_t hstep, int_fast32_t vstep, int_fast32_t width, int_fast32_t height, uint_fast16_t depth, bool sgnd, uint_fast32_t inmem) { jas_image_cmpt_t *cmpt; size_t size; cmpt = 0; if (width < 0 || height < 0 || hstep <= 0 || vstep <= 0) { goto error; } if (!jas_safe_intfast32_add(tlx, width, 0) || !jas_safe_intfast32_add(tly, height, 0)) { goto error; } if (!(cmpt = jas_malloc(sizeof(jas_image_cmpt_t)))) { goto error; } cmpt->type_ = JAS_IMAGE_CT_UNKNOWN; cmpt->tlx_ = tlx; cmpt->tly_ = tly; cmpt->hstep_ = hstep; cmpt->vstep_ = vstep; cmpt->width_ = width; cmpt->height_ = height; cmpt->prec_ = depth; cmpt->sgnd_ = sgnd; cmpt->stream_ = 0; cmpt->cps_ = (depth + 7) / 8; // Compute the number of samples in the image component, while protecting // against overflow. // size = cmpt->width_ * cmpt->height_ * cmpt->cps_; if (!jas_safe_size_mul(cmpt->width_, cmpt->height_, &size) || !jas_safe_size_mul(size, cmpt->cps_, &size)) { goto error; } cmpt->stream_ = (inmem) ? jas_stream_memopen(0, size) : jas_stream_tmpfile(); if (!cmpt->stream_) { goto error; } /* Zero the component data. This isn't necessary, but it is convenient for debugging purposes. */ /* Note: conversion of size - 1 to long can overflow */ if (jas_stream_seek(cmpt->stream_, size - 1, SEEK_SET) < 0 || jas_stream_putc(cmpt->stream_, 0) == EOF || jas_stream_seek(cmpt->stream_, 0, SEEK_SET) < 0) { goto error; } return cmpt; error: if (cmpt) { jas_image_cmpt_destroy(cmpt); } return 0; }
CWE-190
19
TfLiteStatus Prepare(TfLiteContext* context, TfLiteNode* node) { TF_LITE_ENSURE_EQ(context, NumInputs(node), 2); TF_LITE_ENSURE_EQ(context, NumOutputs(node), 1); const TfLiteTensor* input = GetInput(context, node, kInputTensor); TfLiteIntArray* input_dims = input->dims; int input_dims_size = input_dims->size; TF_LITE_ENSURE(context, input_dims_size >= 2); TfLiteTensor* output = GetOutput(context, node, kOutputTensor); TfLiteIntArray* output_shape = TfLiteIntArrayCreate(input_dims_size); for (int i = 0; i < input_dims_size; i++) { output_shape->data[i] = input_dims->data[i]; } // Resize the output tensor to the same size as the input tensor. output->type = input->type; TF_LITE_ENSURE_OK(context, context->ResizeTensor(context, output, output_shape)); return kTfLiteOk; }
CWE-787
24
TfLiteStatus Eval(TfLiteContext* context, TfLiteNode* node) { Subgraph* subgraph = reinterpret_cast<Subgraph*>(context->impl_); const TfLiteTensor* input_resource_id_tensor = GetInput(context, node, kInputVariableId); const TfLiteTensor* input_value_tensor = GetInput(context, node, kInputValue); int resource_id = input_resource_id_tensor->data.i32[0]; auto& resources = subgraph->resources(); resource::CreateResourceVariableIfNotAvailable(&resources, resource_id); auto* variable = resource::GetResourceVariable(&resources, resource_id); TF_LITE_ENSURE(context, variable != nullptr); variable->AssignFrom(input_value_tensor); return kTfLiteOk; }
CWE-125
47
void Context::onUpstreamConnectionClose(PeerType peer_type) { if (wasm_->onUpstreamConnectionClose_) { wasm_->onUpstreamConnectionClose_(this, id_, static_cast<uint32_t>(peer_type)); } }
CWE-476
46
TfLiteStatus Prepare(TfLiteContext* context, TfLiteNode* node) { TF_LITE_ENSURE_EQ(context, NumInputs(node), 2); TF_LITE_ENSURE_EQ(context, NumOutputs(node), 1); // Reinterprete the opaque data provided by user. OpData* data = reinterpret_cast<OpData*>(node->user_data); const TfLiteTensor* input1 = GetInput(context, node, kInputTensor1); const TfLiteTensor* input2 = GetInput(context, node, kInputTensor2); TfLiteTensor* output = GetOutput(context, node, kOutputTensor); TF_LITE_ENSURE_TYPES_EQ(context, input1->type, input2->type); const TfLiteType type = input1->type; switch (type) { case kTfLiteFloat32: case kTfLiteInt32: break; default: context->ReportError(context, "Type '%s' is not supported by floor_div.", TfLiteTypeGetName(type)); return kTfLiteError; } output->type = type; data->requires_broadcast = !HaveSameShapes(input1, input2); TfLiteIntArray* output_size = nullptr; if (data->requires_broadcast) { TF_LITE_ENSURE_OK(context, CalculateShapeForBroadcast( context, input1, input2, &output_size)); } else { output_size = TfLiteIntArrayCopy(input1->dims); } return context->ResizeTensor(context, output, output_size); }
CWE-787
24
TfLiteStatus Eval(TfLiteContext* context, TfLiteNode* node) { const TfLiteTensor* input = GetInput(context, node, kInputTensor); TfLiteTensor* output = GetOutput(context, node, kOutputTensor); const TfLiteTensor* multipliers = GetInput(context, node, kInputMultipliers); if (IsDynamicTensor(output)) { TF_LITE_ENSURE_OK(context, ResizeOutput(context, node)); } switch (output->type) { case kTfLiteFloat32: Tile<float>(*(input->dims), input, multipliers, output); break; case kTfLiteUInt8: Tile<uint8_t>(*(input->dims), input, multipliers, output); break; case kTfLiteInt32: Tile<int32_t>(*(input->dims), input, multipliers, output); break; case kTfLiteInt64: Tile<int64_t>(*(input->dims), input, multipliers, output); break; case kTfLiteString: { DynamicBuffer buffer; TileString(*(input->dims), input, multipliers, &buffer, output); buffer.WriteToTensor(output, /*new_shape=*/nullptr); break; } case kTfLiteBool: Tile<bool>(*(input->dims), input, multipliers, output); break; default: context->ReportError(context, "Type '%s' is not supported by tile.", TfLiteTypeGetName(output->type)); return kTfLiteError; } return kTfLiteOk; }
CWE-787
24
void jas_matrix_divpow2(jas_matrix_t *matrix, int n) { int i; int j; jas_seqent_t *rowstart; int rowstep; jas_seqent_t *data; if (jas_matrix_numrows(matrix) > 0 && jas_matrix_numcols(matrix) > 0) { assert(matrix->rows_); rowstep = jas_matrix_rowstep(matrix); for (i = matrix->numrows_, rowstart = matrix->rows_[0]; i > 0; --i, rowstart += rowstep) { for (j = matrix->numcols_, data = rowstart; j > 0; --j, ++data) { *data = (*data >= 0) ? ((*data) >> n) : (-((-(*data)) >> n)); } } } }
CWE-190
19
void PCRECache::dump(const std::string& filename) { std::ofstream out(filename.c_str()); switch (m_kind) { case CacheKind::Static: for (auto& it : *m_staticCache) { out << it.first->data() << "\n"; } break; case CacheKind::Lru: case CacheKind::Scalable: { std::vector<LRUCacheKey> keys; if (m_kind == CacheKind::Lru) { m_lruCache->snapshotKeys(keys); } else { m_scalableCache->snapshotKeys(keys); } for (auto& key: keys) { out << key.c_str() << "\n"; } } break; } out.close(); }
CWE-787
24
QString Helper::temporaryMountDevice(const QString &device, const QString &name, bool readonly) { QString mount_point = mountPoint(device); if (!mount_point.isEmpty()) return mount_point; mount_point = "%1/.%2/mount/%3"; const QStringList &tmp_paths = QStandardPaths::standardLocations(QStandardPaths::TempLocation); mount_point = mount_point.arg(tmp_paths.isEmpty() ? "/tmp" : tmp_paths.first()).arg(qApp->applicationName()).arg(name); if (!QDir::current().mkpath(mount_point)) { dCError("mkpath \"%s\" failed", qPrintable(mount_point)); return QString(); } if (!mountDevice(device, mount_point, readonly)) { dCError("Mount the device \"%s\" to \"%s\" failed", qPrintable(device), qPrintable(mount_point)); return QString(); } return mount_point; }
CWE-59
36
static int lookup1_values(int entries, int dim) { int r = (int) floor(exp((float) log((float) entries) / dim)); if ((int) floor(pow((float) r+1, dim)) <= entries) // (int) cast for MinGW warning; ++r; // floor() to avoid _ftol() when non-CRT assert(pow((float) r+1, dim) > entries); assert((int) floor(pow((float) r, dim)) <= entries); // (int),floor() as above return r; }
CWE-787
24
bool Unpack::ProcessDecoded(UnpackThreadData &D) { UnpackDecodedItem *Item=D.Decoded,*Border=D.Decoded+D.DecodedSize; while (Item<Border) { UnpPtr&=MaxWinMask; if (((WriteBorder-UnpPtr) & MaxWinMask)<MAX_LZ_MATCH+3 && WriteBorder!=UnpPtr) { UnpWriteBuf(); if (WrittenFileSize>DestUnpSize) return false; } if (Item->Type==UNPDT_LITERAL) { #if defined(LITTLE_ENDIAN) && defined(ALLOW_MISALIGNED) if (Item->Length==3 && UnpPtr<MaxWinSize-4) { *(uint32 *)(Window+UnpPtr)=*(uint32 *)Item->Literal; UnpPtr+=4; } else #endif for (uint I=0;I<=Item->Length;I++) Window[UnpPtr++ & MaxWinMask]=Item->Literal[I]; } else if (Item->Type==UNPDT_MATCH) { InsertOldDist(Item->Distance); LastLength=Item->Length; CopyString(Item->Length,Item->Distance); } else if (Item->Type==UNPDT_REP) { uint Distance=OldDist[Item->Distance]; for (uint I=Item->Distance;I>0;I--) OldDist[I]=OldDist[I-1]; OldDist[0]=Distance; LastLength=Item->Length; CopyString(Item->Length,Distance); } else if (Item->Type==UNPDT_FULLREP) { if (LastLength!=0) CopyString(LastLength,OldDist[0]); } else if (Item->Type==UNPDT_FILTER) { UnpackFilter Filter; Filter.Type=(byte)Item->Length; Filter.BlockStart=Item->Distance; Item++; Filter.Channels=(byte)Item->Length; Filter.BlockLength=Item->Distance; AddFilter(Filter); } Item++; } return true; }
CWE-787
24
int MSADPCM::decodeBlock(const uint8_t *encoded, int16_t *decoded) { ms_adpcm_state decoderState[2]; ms_adpcm_state *state[2]; int channelCount = m_track->f.channelCount; // Calculate the number of bytes needed for decoded data. int outputLength = m_framesPerPacket * sizeof (int16_t) * channelCount; state[0] = &decoderState[0]; if (channelCount == 2) state[1] = &decoderState[1]; else state[1] = &decoderState[0]; // Initialize block predictor. for (int i=0; i<channelCount; i++) { state[i]->predictorIndex = *encoded++; assert(state[i]->predictorIndex < m_numCoefficients); } // Initialize delta. for (int i=0; i<channelCount; i++) { state[i]->delta = (encoded[1]<<8) | encoded[0]; encoded += sizeof (uint16_t); } // Initialize first two samples. for (int i=0; i<channelCount; i++) { state[i]->sample1 = (encoded[1]<<8) | encoded[0]; encoded += sizeof (uint16_t); } for (int i=0; i<channelCount; i++) { state[i]->sample2 = (encoded[1]<<8) | encoded[0]; encoded += sizeof (uint16_t); } const int16_t *coefficient[2] = { m_coefficients[state[0]->predictorIndex], m_coefficients[state[1]->predictorIndex] }; for (int i=0; i<channelCount; i++) *decoded++ = state[i]->sample2; for (int i=0; i<channelCount; i++) *decoded++ = state[i]->sample1; /* The first two samples have already been 'decoded' in the block header. */ int samplesRemaining = (m_framesPerPacket - 2) * m_track->f.channelCount; while (samplesRemaining > 0) { uint8_t code; int16_t newSample; code = *encoded >> 4; newSample = decodeSample(*state[0], code, coefficient[0]); *decoded++ = newSample; code = *encoded & 0x0f; newSample = decodeSample(*state[1], code, coefficient[1]); *decoded++ = newSample; encoded++; samplesRemaining -= 2; } return outputLength; }
CWE-190
19
RawTile OpenJPEGImage::getRegion( int ha, int va, unsigned int res, int layers, int x, int y, unsigned int w, unsigned int h ){ // Scale up our output bit depth to the nearest factor of 8 unsigned int obpc = bpc; if( bpc <= 16 && bpc > 8 ) obpc = 16; else if( bpc <= 8 ) obpc = 8; #ifdef DEBUG Timer timer; timer.start(); #endif RawTile rawtile( 0, res, ha, va, w, h, channels, obpc ); if( obpc == 16 ) rawtile.data = new unsigned short[w * h * channels]; else if( obpc == 8 ) rawtile.data = new unsigned char[w * h * channels]; else throw file_error( "OpenJPEG :: Unsupported number of bits" ); rawtile.dataLength = w*h*channels*(obpc/8); rawtile.filename = getImagePath(); rawtile.timestamp = timestamp; process( res, layers, x, y, w, h, rawtile.data ); #ifdef DEBUG logfile << "OpenJPEG :: getRegion() :: " << timer.getTime() << " microseconds" << endl; #endif return rawtile; }
CWE-190
19
TfLiteStatus GenericPrepare(TfLiteContext* context, TfLiteNode* node) { TF_LITE_ENSURE_EQ(context, NumInputs(node), 1); TF_LITE_ENSURE_EQ(context, NumOutputs(node), 1); const TfLiteTensor* input = GetInput(context, node, 0); TfLiteTensor* output = GetOutput(context, node, 0); TF_LITE_ENSURE_TYPES_EQ(context, input->type, output->type); return context->ResizeTensor(context, output, TfLiteIntArrayCopy(input->dims)); }
CWE-125
47
FdInStream::FdInStream(int fd_, FdInStreamBlockCallback* blockCallback_, int bufSize_) : fd(fd_), timeoutms(0), blockCallback(blockCallback_), timing(false), timeWaitedIn100us(5), timedKbits(0), bufSize(bufSize_ ? bufSize_ : DEFAULT_BUF_SIZE), offset(0) { ptr = end = start = new U8[bufSize]; }
CWE-787
24
static bool TryParse(const char* inp, int length, TypedValue* buf, Variant& out, JSONContainerType container_type, bool is_tsimplejson) { SimpleParser parser(inp, length, buf, container_type, is_tsimplejson); bool ok = parser.parseValue(); parser.skipSpace(); if (!ok || parser.p != inp + length) { // Unsupported, malformed, or trailing garbage. Release entire stack. tvDecRefRange(buf, parser.top); return false; } out = Variant::attach(*--parser.top); return true; }
CWE-125
47
void XfccIntegrationTest::initialize() { config_helper_.addConfigModifier( [&](envoy::extensions::filters::network::http_connection_manager::v3::HttpConnectionManager& hcm) -> void { hcm.set_forward_client_cert_details(fcc_); hcm.mutable_set_current_client_cert_details()->CopyFrom(sccd_); }); config_helper_.addConfigModifier([&](envoy::config::bootstrap::v3::Bootstrap& bootstrap) -> void { auto transport_socket = bootstrap.mutable_static_resources()->mutable_clusters(0)->mutable_transport_socket(); envoy::extensions::transport_sockets::tls::v3::UpstreamTlsContext context; auto* validation_context = context.mutable_common_tls_context()->mutable_validation_context(); validation_context->mutable_trusted_ca()->set_filename( TestEnvironment::runfilesPath("test/config/integration/certs/upstreamcacert.pem")); validation_context->add_match_subject_alt_names()->set_suffix("lyft.com"); transport_socket->set_name("envoy.transport_sockets.tls"); transport_socket->mutable_typed_config()->PackFrom(context); }); if (tls_) { config_helper_.addSslConfig(); } context_manager_ = std::make_unique<Extensions::TransportSockets::Tls::ContextManagerImpl>(timeSystem()); client_tls_ssl_ctx_ = createClientSslContext(false); client_mtls_ssl_ctx_ = createClientSslContext(true); HttpIntegrationTest::initialize(); }
CWE-295
52
TfLiteStatus EvalHashtableSize(TfLiteContext* context, TfLiteNode* node) { const TfLiteTensor* input_resource_id_tensor = GetInput(context, node, kInputResourceIdTensor); int resource_id = input_resource_id_tensor->data.i32[0]; TfLiteTensor* output_tensor = GetOutput(context, node, kOutputTensor); auto* output_data = GetTensorData<std::int64_t>(output_tensor); Subgraph* subgraph = reinterpret_cast<Subgraph*>(context->impl_); auto& resources = subgraph->resources(); auto* lookup = resource::GetHashtableResource(&resources, resource_id); TF_LITE_ENSURE(context, lookup != nullptr); output_data[0] = lookup->Size(); return kTfLiteOk; }
CWE-787
24
int64_t BZ2File::readImpl(char * buf, int64_t length) { if (length == 0) { return 0; } assertx(m_bzFile); int len = BZ2_bzread(m_bzFile, buf, length); /* Sometimes libbz2 will return fewer bytes than requested, and set bzerror * to BZ_STREAM_END, but it's not actually EOF, and you can keep reading from * the file - so, only set EOF after a failed read. This matches PHP5. */ if (len <= 0) { setEof(true); if (len < 0) { return -1; } } return len; }
CWE-125
47
TfLiteStatus Eval(TfLiteContext* context, TfLiteNode* node) { const TfLiteTensor* input = GetInput(context, node, kInputTensor); TfLiteTensor* output = GetOutput(context, node, kOutputTensor); const int num_elements = NumElements(input); TF_LITE_ENSURE_EQ(context, num_elements, NumElements(output)); switch (input->type) { case kTfLiteInt64: return copyToTensor(context, input->data.i64, output, num_elements); case kTfLiteInt32: return copyToTensor(context, input->data.i32, output, num_elements); case kTfLiteUInt8: return copyToTensor(context, input->data.uint8, output, num_elements); case kTfLiteFloat32: return copyToTensor(context, GetTensorData<float>(input), output, num_elements); case kTfLiteBool: return copyToTensor(context, input->data.b, output, num_elements); case kTfLiteComplex64: return copyToTensor( context, reinterpret_cast<std::complex<float>*>(input->data.c64), output, num_elements); default: // Unsupported type. TF_LITE_UNSUPPORTED_TYPE(context, input->type, "Cast"); } return kTfLiteOk; }
CWE-125
47
TfLiteStatus Eval(TfLiteContext* context, TfLiteNode* node) { // Just copy input to output. const TfLiteTensor* input = GetInput(context, node, kInput); TfLiteTensor* output = GetOutput(context, node, 0); const TfLiteTensor* axis = GetInput(context, node, kAxis); if (IsDynamicTensor(output)) { int axis_value; TF_LITE_ENSURE_OK(context, GetAxisValueFromTensor(context, *axis, &axis_value)); TF_LITE_ENSURE_OK(context, ExpandTensorDim(context, *input, axis_value, output)); } if (output->type == kTfLiteString) { TfLiteTensorRealloc(input->bytes, output); } memcpy(output->data.raw, input->data.raw, input->bytes); return kTfLiteOk; }
CWE-787
24
void reposition(int pos) { ptr = start + pos; }
CWE-787
24
jas_image_t *jas_image_create0() { jas_image_t *image; if (!(image = jas_malloc(sizeof(jas_image_t)))) { return 0; } image->tlx_ = 0; image->tly_ = 0; image->brx_ = 0; image->bry_ = 0; image->clrspc_ = JAS_CLRSPC_UNKNOWN; image->numcmpts_ = 0; image->maxcmpts_ = 0; image->cmpts_ = 0; image->inmem_ = true; image->cmprof_ = 0; return image; }
CWE-190
19
TEST_P(SslSocketTest, FailedClientAuthSanVerification) { const std::string client_ctx_yaml = R"EOF( common_tls_context: tls_certificates: certificate_chain: filename: "{{ test_rundir }}/test/extensions/transport_sockets/tls/test_data/no_san_cert.pem" private_key: filename: "{{ test_rundir }}/test/extensions/transport_sockets/tls/test_data/no_san_key.pem" )EOF"; const std::string server_ctx_yaml = R"EOF( common_tls_context: tls_certificates: certificate_chain: filename: "{{ test_rundir }}/test/extensions/transport_sockets/tls/test_data/unittest_cert.pem" private_key: filename: "{{ test_rundir }}/test/extensions/transport_sockets/tls/test_data/unittest_key.pem" validation_context: trusted_ca: filename: "{{ test_rundir }}/test/extensions/transport_sockets/tls/test_data/ca_cert.pem" match_subject_alt_names: exact: "example.com" )EOF"; TestUtilOptions test_options(client_ctx_yaml, server_ctx_yaml, false, GetParam()); testUtil(test_options.setExpectedServerStats("ssl.fail_verify_san")); }
CWE-295
52
TfLiteStatus ResizeOutputTensor(TfLiteContext* context, const TfLiteTensor* data, const TfLiteTensor* segment_ids, TfLiteTensor* output) { int max_index = -1; const int segment_id_size = segment_ids->dims->data[0]; if (segment_id_size > 0) { max_index = segment_ids->data.i32[segment_id_size - 1]; } const int data_rank = NumDimensions(data); TfLiteIntArray* output_shape = TfLiteIntArrayCreate(NumDimensions(data)); output_shape->data[0] = max_index + 1; for (int i = 1; i < data_rank; ++i) { output_shape->data[i] = data->dims->data[i]; } return context->ResizeTensor(context, output, output_shape); }
CWE-787
24
static x86newTokenType getToken(const char *str, size_t *begin, size_t *end) { // Skip whitespace while (begin && isspace ((ut8)str[*begin])) { ++(*begin); } if (!str[*begin]) { // null byte *end = *begin; return TT_EOF; } else if (isalpha ((ut8)str[*begin])) { // word token *end = *begin; while (end && isalnum ((ut8)str[*end])) { ++(*end); } return TT_WORD; } else if (isdigit ((ut8)str[*begin])) { // number token *end = *begin; while (end && isalnum ((ut8)str[*end])) { // accept alphanumeric characters, because hex. ++(*end); } return TT_NUMBER; } else { // special character: [, ], +, *, ... *end = *begin + 1; return TT_SPECIAL; } }
CWE-125
47
Status OpLevelCostEstimator::PredictFusedBatchNormGrad( const OpContext& op_context, NodeCosts* node_costs) const { bool found_unknown_shapes = false; const auto& op_info = op_context.op_info; // y_backprop: op_info.inputs(0) // x: op_info.inputs(1) // scale: op_info.inputs(2) // mean: op_info.inputs(3) // variance or inverse of variance: op_info.inputs(4) ConvolutionDimensions dims = OpDimensionsFromInputs( op_info.inputs(1).shape(), op_info, &found_unknown_shapes); int64_t ops = 0; const auto rsqrt_cost = Eigen::internal::functor_traits< Eigen::internal::scalar_rsqrt_op<float>>::Cost; ops = dims.iz * (dims.batch * dims.ix * dims.iy * 11 + 5 + rsqrt_cost); node_costs->num_compute_ops = ops; const int64_t size_nhwc = CalculateTensorSize(op_info.inputs(1), &found_unknown_shapes); const int64_t size_c = CalculateTensorSize(op_info.inputs(2), &found_unknown_shapes); // TODO(dyoon): fix missing memory cost for variance input (size_c) and // yet another read of y_backprop (size_nhwc) internally. node_costs->num_input_bytes_accessed = {size_nhwc, size_nhwc, size_c, size_c}; node_costs->num_output_bytes_accessed = {size_nhwc, size_c, size_c}; // FusedBatchNormGrad has to read y_backprop internally. node_costs->internal_read_bytes = size_nhwc; node_costs->max_memory = node_costs->num_total_output_bytes(); if (found_unknown_shapes) { node_costs->inaccurate = true; node_costs->num_nodes_with_unknown_shapes = 1; } return Status::OK(); }
CWE-369
60
TfLiteStatus Resize(TfLiteContext* context, TfLiteNode* node) { auto* params = reinterpret_cast<TfLiteLSHProjectionParams*>(node->builtin_data); TF_LITE_ENSURE(context, NumInputs(node) == 2 || NumInputs(node) == 3); TF_LITE_ENSURE_EQ(context, NumOutputs(node), 1); const TfLiteTensor* hash = GetInput(context, node, 0); TF_LITE_ENSURE_EQ(context, NumDimensions(hash), 2); // Support up to 32 bits. TF_LITE_ENSURE(context, SizeOfDimension(hash, 1) <= 32); const TfLiteTensor* input = GetInput(context, node, 1); TF_LITE_ENSURE(context, NumDimensions(input) >= 1); if (NumInputs(node) == 3) { const TfLiteTensor* weight = GetInput(context, node, 2); TF_LITE_ENSURE_EQ(context, NumDimensions(weight), 1); TF_LITE_ENSURE_EQ(context, SizeOfDimension(weight, 0), SizeOfDimension(input, 0)); } TfLiteTensor* output = GetOutput(context, node, 0); TfLiteIntArray* outputSize = TfLiteIntArrayCreate(1); switch (params->type) { case kTfLiteLshProjectionSparse: outputSize->data[0] = SizeOfDimension(hash, 0); break; case kTfLiteLshProjectionDense: outputSize->data[0] = SizeOfDimension(hash, 0) * SizeOfDimension(hash, 1); break; default: return kTfLiteError; } return context->ResizeTensor(context, output, outputSize); }
CWE-787
24
TfLiteStatus Prepare(TfLiteContext* context, TfLiteNode* node) { TF_LITE_ENSURE_EQ(context, NumInputs(node), 2); TF_LITE_ENSURE_EQ(context, NumOutputs(node), 1); const TfLiteTensor* input = GetInput(context, node, kInputTensor); const TfLiteTensor* axis = GetInput(context, node, kAxis); // Make sure the axis is only 1 dimension. TF_LITE_ENSURE_EQ(context, NumElements(axis), 1); // Make sure the axis is only either int32 or int64. TF_LITE_ENSURE(context, axis->type == kTfLiteInt32 || axis->type == kTfLiteInt64); TfLiteTensor* output = GetOutput(context, node, kOutputTensor); auto* params = reinterpret_cast<TfLiteArgMaxParams*>(node->builtin_data); switch (params->output_type) { case kTfLiteInt32: output->type = kTfLiteInt32; break; case kTfLiteInt64: output->type = kTfLiteInt64; break; default: context->ReportError(context, "Unknown index output data type: %d", params->output_type); return kTfLiteError; } // Check conditions for different types. switch (input->type) { case kTfLiteFloat32: case kTfLiteUInt8: case kTfLiteInt8: case kTfLiteInt32: break; default: context->ReportError( context, "Unknown input type: %d, only float32 and int types are supported", input->type); return kTfLiteError; } TF_LITE_ENSURE(context, NumDimensions(input) >= 1); if (IsConstantTensor(axis)) { TF_LITE_ENSURE_STATUS(ResizeOutput(context, input, axis, output)); } else { SetTensorToDynamic(output); } return kTfLiteOk; }
CWE-787
24
TfLiteStatus SimpleStatefulOp::Invoke(TfLiteContext* context, TfLiteNode* node) { OpData* data = reinterpret_cast<OpData*>(node->user_data); *data->invoke_count += 1; const TfLiteTensor* input = GetInput(context, node, kInputTensor); const uint8_t* input_data = GetTensorData<uint8_t>(input); int size = NumElements(input->dims); uint8_t* sorting_buffer = reinterpret_cast<uint8_t*>( context->GetScratchBuffer(context, data->sorting_buffer)); // Copy inputs data to the sorting buffer. We don't want to mutate the input // tensor as it might be used by a another node. for (int i = 0; i < size; i++) { sorting_buffer[i] = input_data[i]; } // In place insertion sort on `sorting_buffer`. for (int i = 1; i < size; i++) { for (int j = i; j > 0 && sorting_buffer[j] < sorting_buffer[j - 1]; j--) { std::swap(sorting_buffer[j], sorting_buffer[j - 1]); } } TfLiteTensor* median = GetOutput(context, node, kMedianTensor); uint8_t* median_data = GetTensorData<uint8_t>(median); TfLiteTensor* invoke_count = GetOutput(context, node, kInvokeCount); int32_t* invoke_count_data = GetTensorData<int32_t>(invoke_count); median_data[0] = sorting_buffer[size / 2]; invoke_count_data[0] = *data->invoke_count; return kTfLiteOk; }
CWE-125
47
std::string decodeBase64( const std::string& encoded) { if (encoded.size() == 0) { // special case, to prevent an integer overflow down below. return ""; } using namespace boost::archive::iterators; using b64it = transform_width<binary_from_base64<std::string::const_iterator>, 8, 6>; std::string decoded = std::string(b64it(std::begin(encoded)), b64it(std::end(encoded))); uint32_t numPadding = std::count(encoded.begin(), encoded.end(), '='); decoded.erase(decoded.end() - numPadding, decoded.end()); return decoded; }
CWE-787
24
inline bool loadModule(const char* filename, IR::Module& outModule) { // Read the specified file into an array. std::vector<U8> fileBytes; if(!loadFile(filename, fileBytes)) { return false; } // If the file starts with the WASM binary magic number, load it as a binary irModule. if(*(U32*)fileBytes.data() == 0x6d736100) { return loadBinaryModule(fileBytes.data(), fileBytes.size(), outModule); } else { // Make sure the WAST file is null terminated. fileBytes.push_back(0); // Load it as a text irModule. std::vector<WAST::Error> parseErrors; if(!WAST::parseModule( (const char*)fileBytes.data(), fileBytes.size(), outModule, parseErrors)) { Log::printf(Log::error, "Error parsing WebAssembly text file:\n"); reportParseErrors(filename, parseErrors); return false; } return true; } }
CWE-125
47
inline TfLiteStatus EvalImpl(TfLiteContext* context, TfLiteNode* node, std::function<T(T)> func, TfLiteType expected_type) { const TfLiteTensor* input = GetInput(context, node, 0); TfLiteTensor* output = GetOutput(context, node, 0); TF_LITE_ENSURE_TYPES_EQ(context, input->type, expected_type); const int64_t num_elements = NumElements(input); const T* in_data = GetTensorData<T>(input); T* out_data = GetTensorData<T>(output); for (int64_t i = 0; i < num_elements; ++i) { out_data[i] = func(in_data[i]); } return kTfLiteOk; }
CWE-787
24
TfLiteStatus Prepare(TfLiteContext* context, TfLiteNode* node) { const TfLiteTensor* input = GetInput(context, node, kInputTensor); TfLiteTensor* output = GetOutput(context, node, kOutputTensor); TF_LITE_ENSURE_EQ(context, NumInputs(node), 1); TF_LITE_ENSURE_EQ(context, NumOutputs(node), 1); TF_LITE_ENSURE_TYPES_EQ(context, input->type, kTfLiteFloat32); output->type = input->type; TfLiteIntArray* output_size = TfLiteIntArrayCopy(input->dims); return context->ResizeTensor(context, output, output_size); }
CWE-787
24
TfLiteStatus Prepare(TfLiteContext* context, TfLiteNode* node) { TF_LITE_ENSURE_EQ(context, NumInputs(node), 2); TF_LITE_ENSURE_EQ(context, NumOutputs(node), 1); const TfLiteTensor* input = GetInput(context, node, kInputTensor); TfLiteTensor* output = GetOutput(context, node, kOutputTensor); TF_LITE_ENSURE_TYPES_EQ(context, input->type, output->type); const TfLiteTensor* multipliers = GetInput(context, node, kInputMultipliers); // Only int32 and int64 multipliers type is supported. if (multipliers->type != kTfLiteInt32 && multipliers->type != kTfLiteInt64) { context->ReportError(context, "Multipliers of type '%s' are not supported by tile.", TfLiteTypeGetName(multipliers->type)); return kTfLiteError; } if (IsConstantTensor(multipliers)) { TF_LITE_ENSURE_OK(context, ResizeOutput(context, node)); } else { SetTensorToDynamic(output); } return kTfLiteOk; }
CWE-787
24