target
int64
0
1
func
stringlengths
0
484k
idx
int64
1
378k
1
static void scan_inflight(struct sock *x, void (*func)(struct unix_sock *), struct sk_buff_head *hitlist) { struct sk_buff *skb; struct sk_buff *next; spin_lock(&x->sk_receive_queue.lock); receive_queue_for_each_skb(x, next, skb) { /* * Do we have file descriptors ? */ if (UNIXCB(skb).fp) { bool hit = false; /* * Process the descriptors of this socket */ int nfd = UNIXCB(skb).fp->count; struct file **fp = UNIXCB(skb).fp->fp; while (nfd--) { /* * Get the socket the fd matches * if it indeed does so */ struct sock *sk = unix_get_socket(*fp++); if (sk) { hit = true; func(unix_sk(sk)); } } if (hit && hitlist != NULL) { __skb_unlink(skb, &x->sk_receive_queue); __skb_queue_tail(hitlist, skb); } } } spin_unlock(&x->sk_receive_queue.lock); }
995
1
void unix_gc(void) { static bool gc_in_progress = false; struct unix_sock *u; struct unix_sock *next; struct sk_buff_head hitlist; struct list_head cursor; spin_lock(&unix_gc_lock); /* Avoid a recursive GC. */ if (gc_in_progress) goto out; gc_in_progress = true; /* * First, select candidates for garbage collection. Only * in-flight sockets are considered, and from those only ones * which don't have any external reference. * * Holding unix_gc_lock will protect these candidates from * being detached, and hence from gaining an external * reference. This also means, that since there are no * possible receivers, the receive queues of these sockets are * static during the GC, even though the dequeue is done * before the detach without atomicity guarantees. */ list_for_each_entry_safe(u, next, &gc_inflight_list, link) { long total_refs; long inflight_refs; total_refs = file_count(u->sk.sk_socket->file); inflight_refs = atomic_long_read(&u->inflight); BUG_ON(inflight_refs < 1); BUG_ON(total_refs < inflight_refs); if (total_refs == inflight_refs) { list_move_tail(&u->link, &gc_candidates); u->gc_candidate = 1; } } /* * Now remove all internal in-flight reference to children of * the candidates. */ list_for_each_entry(u, &gc_candidates, link) scan_children(&u->sk, dec_inflight, NULL); /* * Restore the references for children of all candidates, * which have remaining references. Do this recursively, so * only those remain, which form cyclic references. * * Use a "cursor" link, to make the list traversal safe, even * though elements might be moved about. */ list_add(&cursor, &gc_candidates); while (cursor.next != &gc_candidates) { u = list_entry(cursor.next, struct unix_sock, link); /* Move cursor to after the current position. */ list_move(&cursor, &u->link); if (atomic_long_read(&u->inflight) > 0) { list_move_tail(&u->link, &gc_inflight_list); u->gc_candidate = 0; scan_children(&u->sk, inc_inflight_move_tail, NULL); } } list_del(&cursor); /* * Now gc_candidates contains only garbage. Restore original * inflight counters for these as well, and remove the skbuffs * which are creating the cycle(s). */ skb_queue_head_init(&hitlist); list_for_each_entry(u, &gc_candidates, link) scan_children(&u->sk, inc_inflight, &hitlist); spin_unlock(&unix_gc_lock); /* Here we are. Hitlist is filled. Die. */ __skb_queue_purge(&hitlist); spin_lock(&unix_gc_lock); /* All candidates should have been detached by now. */ BUG_ON(!list_empty(&gc_candidates)); gc_in_progress = false; out: spin_unlock(&unix_gc_lock); }
996
0
static UChar GetUniFromLMBCSUni ( char const * * ppLMBCSin ) { uint8_t HighCh = * ( * ppLMBCSin ) ++ ; uint8_t LowCh = * ( * ppLMBCSin ) ++ ; if ( HighCh == ULMBCS_UNICOMPATZERO ) { HighCh = LowCh ; LowCh = 0 ; } return ( UChar ) ( ( HighCh << 8 ) | LowCh ) ; }
998
0
void cpu_physical_memory_write_rom(target_phys_addr_t addr, const uint8_t *buf, int len) { AddressSpaceDispatch *d = address_space_memory.dispatch; int l; uint8_t *ptr; target_phys_addr_t page; MemoryRegionSection *section; while (len > 0) { page = addr & TARGET_PAGE_MASK; l = (page + TARGET_PAGE_SIZE) - addr; if (l > len) l = len; section = phys_page_find(d, page >> TARGET_PAGE_BITS); if (!(memory_region_is_ram(section->mr) || memory_region_is_romd(section->mr))) { /* do nothing */ } else { unsigned long addr1; addr1 = memory_region_get_ram_addr(section->mr) + memory_region_section_addr(section, addr); /* ROM/RAM case */ ptr = qemu_get_ram_ptr(addr1); memcpy(ptr, buf, l); invalidate_and_set_dirty(addr1, l); qemu_put_ram_ptr(ptr); } len -= l; buf += l; addr += l; } }
999
1
av_cold int ff_lpc_init(LPCContext *s, int blocksize, int max_order, enum FFLPCType lpc_type) { s->blocksize = blocksize; s->max_order = max_order; s->lpc_type = lpc_type; if (lpc_type == FF_LPC_TYPE_LEVINSON) { s->windowed_samples = av_mallocz((blocksize + max_order + 2) * sizeof(*s->windowed_samples)); if (!s->windowed_samples) return AVERROR(ENOMEM); } else { s->windowed_samples = NULL; } s->lpc_apply_welch_window = lpc_apply_welch_window_c; s->lpc_compute_autocorr = lpc_compute_autocorr_c; if (HAVE_MMX) ff_lpc_init_x86(s); return 0; }
1,000
1
static int unix_dgram_sendmsg(struct kiocb *kiocb, struct socket *sock, struct msghdr *msg, size_t len) { struct sock_iocb *siocb = kiocb_to_siocb(kiocb); struct sock *sk = sock->sk; struct net *net = sock_net(sk); struct unix_sock *u = unix_sk(sk); struct sockaddr_un *sunaddr=msg->msg_name; struct sock *other = NULL; int namelen = 0; /* fake GCC */ int err; unsigned hash; struct sk_buff *skb; long timeo; struct scm_cookie tmp_scm; if (NULL == siocb->scm) siocb->scm = &tmp_scm; err = scm_send(sock, msg, siocb->scm); if (err < 0) return err; err = -EOPNOTSUPP; if (msg->msg_flags&MSG_OOB) goto out; if (msg->msg_namelen) { err = unix_mkname(sunaddr, msg->msg_namelen, &hash); if (err < 0) goto out; namelen = err; } else { sunaddr = NULL; err = -ENOTCONN; other = unix_peer_get(sk); if (!other) goto out; } if (test_bit(SOCK_PASSCRED, &sock->flags) && !u->addr && (err = unix_autobind(sock)) != 0) goto out; err = -EMSGSIZE; if (len > sk->sk_sndbuf - 32) goto out; skb = sock_alloc_send_skb(sk, len, msg->msg_flags&MSG_DONTWAIT, &err); if (skb==NULL) goto out; memcpy(UNIXCREDS(skb), &siocb->scm->creds, sizeof(struct ucred)); if (siocb->scm->fp) unix_attach_fds(siocb->scm, skb); unix_get_secdata(siocb->scm, skb); skb_reset_transport_header(skb); err = memcpy_fromiovec(skb_put(skb,len), msg->msg_iov, len); if (err) goto out_free; timeo = sock_sndtimeo(sk, msg->msg_flags & MSG_DONTWAIT); restart: if (!other) { err = -ECONNRESET; if (sunaddr == NULL) goto out_free; other = unix_find_other(net, sunaddr, namelen, sk->sk_type, hash, &err); if (other==NULL) goto out_free; } unix_state_lock(other); err = -EPERM; if (!unix_may_send(sk, other)) goto out_unlock; if (sock_flag(other, SOCK_DEAD)) { /* * Check with 1003.1g - what should * datagram error */ unix_state_unlock(other); sock_put(other); err = 0; unix_state_lock(sk); if (unix_peer(sk) == other) { unix_peer(sk)=NULL; unix_state_unlock(sk); unix_dgram_disconnected(sk, other); sock_put(other); err = -ECONNREFUSED; } else { unix_state_unlock(sk); } other = NULL; if (err) goto out_free; goto restart; } err = -EPIPE; if (other->sk_shutdown & RCV_SHUTDOWN) goto out_unlock; if (sk->sk_type != SOCK_SEQPACKET) { err = security_unix_may_send(sk->sk_socket, other->sk_socket); if (err) goto out_unlock; } if (unix_peer(other) != sk && unix_recvq_full(other)) { if (!timeo) { err = -EAGAIN; goto out_unlock; } timeo = unix_wait_for_peer(other, timeo); err = sock_intr_errno(timeo); if (signal_pending(current)) goto out_free; goto restart; } skb_queue_tail(&other->sk_receive_queue, skb); unix_state_unlock(other); other->sk_data_ready(other, len); sock_put(other); scm_destroy(siocb->scm); return len; out_unlock: unix_state_unlock(other); out_free: kfree_skb(skb); out: if (other) sock_put(other); scm_destroy(siocb->scm); return err; }
1,001
1
BOOL nego_read_request(rdpNego* nego, wStream* s) { BYTE li; BYTE type; UINT16 length; if (!tpkt_read_header(s, &length)) return FALSE; if (!tpdu_read_connection_request(s, &li, length)) return FALSE; if (li != Stream_GetRemainingLength(s) + 6) { WLog_ERR(TAG, "Incorrect TPDU length indicator."); return FALSE; } if (!nego_read_request_token_or_cookie(nego, s)) { WLog_ERR(TAG, "Failed to parse routing token or cookie."); return FALSE; } if (Stream_GetRemainingLength(s) >= 8) { /* rdpNegData (optional) */ Stream_Read_UINT8(s, type); /* Type */ if (type != TYPE_RDP_NEG_REQ) { WLog_ERR(TAG, "Incorrect negotiation request type %" PRIu8 "", type); return FALSE; } nego_process_negotiation_request(nego, s); } return tpkt_ensure_stream_consumed(s, length); }
1,002
1
char * get_arg ( char * line , my_bool get_next_arg ) { char * ptr , * start ; my_bool quoted = 0 , valid_arg = 0 ; char qtype = 0 ; ptr = line ; if ( get_next_arg ) { for ( ; * ptr ; ptr ++ ) ; if ( * ( ptr + 1 ) ) ptr ++ ; } else { while ( my_isspace ( charset_info , * ptr ) ) ptr ++ ; if ( * ptr == '\\' ) ptr += 2 ; else while ( * ptr && ! my_isspace ( charset_info , * ptr ) ) ptr ++ ; } if ( ! * ptr ) return NullS ; while ( my_isspace ( charset_info , * ptr ) ) ptr ++ ; if ( * ptr == '\'' || * ptr == '\"' || * ptr == '`' ) { qtype = * ptr ; quoted = 1 ; ptr ++ ; } for ( start = ptr ; * ptr ; ptr ++ ) { if ( * ptr == '\\' && ptr [ 1 ] ) { strmov_overlapp ( ptr , ptr + 1 ) ; } else if ( ( ! quoted && * ptr == ' ' ) || ( quoted && * ptr == qtype ) ) { * ptr = 0 ; break ; } } valid_arg = ptr != start ; return valid_arg ? start : NullS ; }
1,003
0
gss_verify_mic (minor_status, context_handle, message_buffer, token_buffer, qop_state) OM_uint32 * minor_status; gss_ctx_id_t context_handle; gss_buffer_t message_buffer; gss_buffer_t token_buffer; gss_qop_t * qop_state; { OM_uint32 status; gss_union_ctx_id_t ctx; gss_mechanism mech; if (minor_status == NULL) return (GSS_S_CALL_INACCESSIBLE_WRITE); *minor_status = 0; if (context_handle == GSS_C_NO_CONTEXT) return (GSS_S_CALL_INACCESSIBLE_READ | GSS_S_NO_CONTEXT); if ((message_buffer == GSS_C_NO_BUFFER) || GSS_EMPTY_BUFFER(token_buffer)) return (GSS_S_CALL_INACCESSIBLE_READ); /* * select the approprate underlying mechanism routine and * call it. */ ctx = (gss_union_ctx_id_t) context_handle; if (ctx->internal_ctx_id == GSS_C_NO_CONTEXT) return (GSS_S_NO_CONTEXT); mech = gssint_get_mechanism (ctx->mech_type); if (mech) { if (mech->gss_verify_mic) { status = mech->gss_verify_mic( minor_status, ctx->internal_ctx_id, message_buffer, token_buffer, qop_state); if (status != GSS_S_COMPLETE) map_error(minor_status, mech); } else status = GSS_S_UNAVAILABLE; return(status); } return (GSS_S_BAD_MECH); }
1,004
0
static int dissect_pcp_message_error ( tvbuff_t * tvb , packet_info * pinfo , proto_tree * tree , int offset ) { gint32 error_num ; pcp_conv_info_t * pcp_conv_info ; col_append_str ( pinfo -> cinfo , COL_INFO , "[ERROR] " ) ; proto_tree_add_item ( tree , hf_pcp_pdu_error , tvb , offset , 4 , ENC_BIG_ENDIAN ) ; error_num = tvb_get_ntohl ( tvb , offset ) ; col_append_fstr ( pinfo -> cinfo , COL_INFO , "error=%s " , val_to_str ( error_num , packettypenames_errors , "Unknown Error:%i" ) ) ; offset += 4 ; if ( error_num == PM_ERR_NAME ) { pcp_conv_info = get_pcp_conversation_info ( pinfo ) ; pcp_conv_info -> pmid_name_candidates = wmem_array_new ( wmem_file_scope ( ) , sizeof ( guint8 * ) ) ; } return offset ; }
1,005
0
gss_wrap_aead (minor_status, context_handle, conf_req_flag, qop_req, input_assoc_buffer, input_payload_buffer, conf_state, output_message_buffer) OM_uint32 * minor_status; gss_ctx_id_t context_handle; int conf_req_flag; gss_qop_t qop_req; gss_buffer_t input_assoc_buffer; gss_buffer_t input_payload_buffer; int * conf_state; gss_buffer_t output_message_buffer; { OM_uint32 status; gss_mechanism mech; gss_union_ctx_id_t ctx; status = val_wrap_aead_args(minor_status, context_handle, conf_req_flag, qop_req, input_assoc_buffer, input_payload_buffer, conf_state, output_message_buffer); if (status != GSS_S_COMPLETE) return (status); /* * select the approprate underlying mechanism routine and * call it. */ ctx = (gss_union_ctx_id_t)context_handle; if (ctx->internal_ctx_id == GSS_C_NO_CONTEXT) return (GSS_S_NO_CONTEXT); mech = gssint_get_mechanism (ctx->mech_type); if (!mech) return (GSS_S_BAD_MECH); return gssint_wrap_aead(mech, minor_status, ctx, conf_req_flag, qop_req, input_assoc_buffer, input_payload_buffer, conf_state, output_message_buffer); }
1,006
1
static int unix_stream_sendmsg(struct kiocb *kiocb, struct socket *sock, struct msghdr *msg, size_t len) { struct sock_iocb *siocb = kiocb_to_siocb(kiocb); struct sock *sk = sock->sk; struct sock *other = NULL; struct sockaddr_un *sunaddr=msg->msg_name; int err,size; struct sk_buff *skb; int sent=0; struct scm_cookie tmp_scm; if (NULL == siocb->scm) siocb->scm = &tmp_scm; err = scm_send(sock, msg, siocb->scm); if (err < 0) return err; err = -EOPNOTSUPP; if (msg->msg_flags&MSG_OOB) goto out_err; if (msg->msg_namelen) { err = sk->sk_state == TCP_ESTABLISHED ? -EISCONN : -EOPNOTSUPP; goto out_err; } else { sunaddr = NULL; err = -ENOTCONN; other = unix_peer(sk); if (!other) goto out_err; } if (sk->sk_shutdown & SEND_SHUTDOWN) goto pipe_err; while(sent < len) { /* * Optimisation for the fact that under 0.01% of X * messages typically need breaking up. */ size = len-sent; /* Keep two messages in the pipe so it schedules better */ if (size > ((sk->sk_sndbuf >> 1) - 64)) size = (sk->sk_sndbuf >> 1) - 64; if (size > SKB_MAX_ALLOC) size = SKB_MAX_ALLOC; /* * Grab a buffer */ skb=sock_alloc_send_skb(sk,size,msg->msg_flags&MSG_DONTWAIT, &err); if (skb==NULL) goto out_err; /* * If you pass two values to the sock_alloc_send_skb * it tries to grab the large buffer with GFP_NOFS * (which can fail easily), and if it fails grab the * fallback size buffer which is under a page and will * succeed. [Alan] */ size = min_t(int, size, skb_tailroom(skb)); memcpy(UNIXCREDS(skb), &siocb->scm->creds, sizeof(struct ucred)); if (siocb->scm->fp) unix_attach_fds(siocb->scm, skb); if ((err = memcpy_fromiovec(skb_put(skb,size), msg->msg_iov, size)) != 0) { kfree_skb(skb); goto out_err; } unix_state_lock(other); if (sock_flag(other, SOCK_DEAD) || (other->sk_shutdown & RCV_SHUTDOWN)) goto pipe_err_free; skb_queue_tail(&other->sk_receive_queue, skb); unix_state_unlock(other); other->sk_data_ready(other, size); sent+=size; } scm_destroy(siocb->scm); siocb->scm = NULL; return sent; pipe_err_free: unix_state_unlock(other); kfree_skb(skb); pipe_err: if (sent==0 && !(msg->msg_flags&MSG_NOSIGNAL)) send_sig(SIGPIPE,current,0); err = -EPIPE; out_err: scm_destroy(siocb->scm); siocb->scm = NULL; return sent ? : err; }
1,008
1
int nego_recv(rdpTransport* transport, wStream* s, void* extra) { BYTE li; BYTE type; UINT16 length; rdpNego* nego = (rdpNego*)extra; if (!tpkt_read_header(s, &length)) return -1; if (!tpdu_read_connection_confirm(s, &li, length)) return -1; if (li > 6) { /* rdpNegData (optional) */ Stream_Read_UINT8(s, type); /* Type */ switch (type) { case TYPE_RDP_NEG_RSP: nego_process_negotiation_response(nego, s); WLog_DBG(TAG, "selected_protocol: %" PRIu32 "", nego->SelectedProtocol); /* enhanced security selected ? */ if (nego->SelectedProtocol) { if ((nego->SelectedProtocol == PROTOCOL_HYBRID) && (!nego->EnabledProtocols[PROTOCOL_HYBRID])) { nego->state = NEGO_STATE_FAIL; } if ((nego->SelectedProtocol == PROTOCOL_SSL) && (!nego->EnabledProtocols[PROTOCOL_SSL])) { nego->state = NEGO_STATE_FAIL; } } else if (!nego->EnabledProtocols[PROTOCOL_RDP]) { nego->state = NEGO_STATE_FAIL; } break; case TYPE_RDP_NEG_FAILURE: nego_process_negotiation_failure(nego, s); break; } } else if (li == 6) { WLog_DBG(TAG, "no rdpNegData"); if (!nego->EnabledProtocols[PROTOCOL_RDP]) nego->state = NEGO_STATE_FAIL; else nego->state = NEGO_STATE_FINAL; } else { WLog_ERR(TAG, "invalid negotiation response"); nego->state = NEGO_STATE_FAIL; } if (!tpkt_ensure_stream_consumed(s, length)) return -1; return 0; }
1,009
0
static int claimintf ( struct usb_dev_state * ps , unsigned int ifnum ) { struct usb_device * dev = ps -> dev ; struct usb_interface * intf ; int err ; if ( ifnum >= 8 * sizeof ( ps -> ifclaimed ) ) return - EINVAL ; if ( test_bit ( ifnum , & ps -> ifclaimed ) ) return 0 ; if ( ps -> privileges_dropped && ! test_bit ( ifnum , & ps -> interface_allowed_mask ) ) return - EACCES ; intf = usb_ifnum_to_if ( dev , ifnum ) ; if ( ! intf ) err = - ENOENT ; else err = usb_driver_claim_interface ( & usbfs_driver , intf , ps ) ; if ( err == 0 ) set_bit ( ifnum , & ps -> ifclaimed ) ; return err ; }
1,010
0
gss_get_mic_iov(OM_uint32 *minor_status, gss_ctx_id_t context_handle, gss_qop_t qop_req, gss_iov_buffer_desc *iov, int iov_count) { OM_uint32 status; gss_union_ctx_id_t ctx; gss_mechanism mech; status = val_wrap_iov_args(minor_status, context_handle, 0, qop_req, NULL, iov, iov_count); if (status != GSS_S_COMPLETE) return status; /* Select the approprate underlying mechanism routine and call it. */ ctx = (gss_union_ctx_id_t)context_handle; if (ctx->internal_ctx_id == GSS_C_NO_CONTEXT) return GSS_S_NO_CONTEXT; mech = gssint_get_mechanism(ctx->mech_type); if (mech == NULL) return GSS_S_BAD_MECH; if (mech->gss_get_mic_iov == NULL) return GSS_S_UNAVAILABLE; status = mech->gss_get_mic_iov(minor_status, ctx->internal_ctx_id, qop_req, iov, iov_count); if (status != GSS_S_COMPLETE) map_error(minor_status, mech); return status; }
1,012
0
void YM3812UpdateOne(FM_OPL *OPL, INT16 *buffer, int length) { int i; int data; OPLSAMPLE *buf = buffer; UINT32 amsCnt = OPL->amsCnt; UINT32 vibCnt = OPL->vibCnt; UINT8 rythm = OPL->rythm&0x20; OPL_CH *CH,*R_CH; if( (void *)OPL != cur_chip ){ cur_chip = (void *)OPL; /* channel pointers */ S_CH = OPL->P_CH; E_CH = &S_CH[9]; /* rythm slot */ SLOT7_1 = &S_CH[7].SLOT[SLOT1]; SLOT7_2 = &S_CH[7].SLOT[SLOT2]; SLOT8_1 = &S_CH[8].SLOT[SLOT1]; SLOT8_2 = &S_CH[8].SLOT[SLOT2]; /* LFO state */ amsIncr = OPL->amsIncr; vibIncr = OPL->vibIncr; ams_table = OPL->ams_table; vib_table = OPL->vib_table; } R_CH = rythm ? &S_CH[6] : E_CH; for( i=0; i < length ; i++ ) { /* channel A channel B channel C */ /* LFO */ ams = ams_table[(amsCnt+=amsIncr)>>AMS_SHIFT]; vib = vib_table[(vibCnt+=vibIncr)>>VIB_SHIFT]; outd[0] = 0; /* FM part */ for(CH=S_CH ; CH < R_CH ; CH++) OPL_CALC_CH(CH); /* Rythn part */ if(rythm) OPL_CALC_RH(S_CH); /* limit check */ data = Limit( outd[0] , OPL_MAXOUT, OPL_MINOUT ); /* store to sound buffer */ buf[i] = data >> OPL_OUTSB; } OPL->amsCnt = amsCnt; OPL->vibCnt = vibCnt; #ifdef OPL_OUTPUT_LOG if(opl_dbg_fp) { for(opl_dbg_chip=0;opl_dbg_chip<opl_dbg_maxchip;opl_dbg_chip++) if( opl_dbg_opl[opl_dbg_chip] == OPL) break; fprintf(opl_dbg_fp,"%c%c%c",0x20+opl_dbg_chip,length&0xff,length/256); } #endif }
1,013
0
gss_get_mic_iov_length(OM_uint32 *minor_status, gss_ctx_id_t context_handle, gss_qop_t qop_req, gss_iov_buffer_desc *iov, int iov_count) { OM_uint32 status; gss_union_ctx_id_t ctx; gss_mechanism mech; status = val_wrap_iov_args(minor_status, context_handle, 0, qop_req, NULL, iov, iov_count); if (status != GSS_S_COMPLETE) return status; /* Select the approprate underlying mechanism routine and call it. */ ctx = (gss_union_ctx_id_t)context_handle; if (ctx->internal_ctx_id == GSS_C_NO_CONTEXT) return GSS_S_NO_CONTEXT; mech = gssint_get_mechanism(ctx->mech_type); if (mech == NULL) return GSS_S_BAD_MECH; if (mech->gss_get_mic_iov_length == NULL) return GSS_S_UNAVAILABLE; status = mech->gss_get_mic_iov_length(minor_status, ctx->internal_ctx_id, qop_req, iov, iov_count); if (status != GSS_S_COMPLETE) map_error(minor_status, mech); return status; }
1,014
1
static inline int empty_stack(void) { return gc_current == GC_HEAD; }
1,015
1
int dtls1_get_record ( SSL * s ) { int ssl_major , ssl_minor ; int i , n ; SSL3_RECORD * rr ; unsigned char * p = NULL ; unsigned short version ; DTLS1_BITMAP * bitmap ; unsigned int is_next_epoch ; rr = RECORD_LAYER_get_rrec ( & s -> rlayer ) ; again : if ( dtls1_process_buffered_records ( s ) < 0 ) return - 1 ; if ( dtls1_get_processed_record ( s ) ) return 1 ; if ( ( RECORD_LAYER_get_rstate ( & s -> rlayer ) != SSL_ST_READ_BODY ) || ( RECORD_LAYER_get_packet_length ( & s -> rlayer ) < DTLS1_RT_HEADER_LENGTH ) ) { n = ssl3_read_n ( s , DTLS1_RT_HEADER_LENGTH , SSL3_BUFFER_get_len ( & s -> rlayer . rbuf ) , 0 , 1 ) ; if ( n <= 0 ) return ( n ) ; if ( RECORD_LAYER_get_packet_length ( & s -> rlayer ) != DTLS1_RT_HEADER_LENGTH ) { RECORD_LAYER_reset_packet_length ( & s -> rlayer ) ; goto again ; } RECORD_LAYER_set_rstate ( & s -> rlayer , SSL_ST_READ_BODY ) ; p = RECORD_LAYER_get_packet ( & s -> rlayer ) ; if ( s -> msg_callback ) s -> msg_callback ( 0 , 0 , SSL3_RT_HEADER , p , DTLS1_RT_HEADER_LENGTH , s , s -> msg_callback_arg ) ; rr -> type = * ( p ++ ) ; ssl_major = * ( p ++ ) ; ssl_minor = * ( p ++ ) ; version = ( ssl_major << 8 ) | ssl_minor ; n2s ( p , rr -> epoch ) ; memcpy ( & ( RECORD_LAYER_get_read_sequence ( & s -> rlayer ) [ 2 ] ) , p , 6 ) ; p += 6 ; n2s ( p , rr -> length ) ; if ( ! s -> first_packet ) { if ( version != s -> version ) { rr -> length = 0 ; RECORD_LAYER_reset_packet_length ( & s -> rlayer ) ; goto again ; } } if ( ( version & 0xff00 ) != ( s -> version & 0xff00 ) ) { rr -> length = 0 ; RECORD_LAYER_reset_packet_length ( & s -> rlayer ) ; goto again ; } if ( rr -> length > SSL3_RT_MAX_ENCRYPTED_LENGTH ) { rr -> length = 0 ; RECORD_LAYER_reset_packet_length ( & s -> rlayer ) ; goto again ; } } if ( rr -> length > RECORD_LAYER_get_packet_length ( & s -> rlayer ) - DTLS1_RT_HEADER_LENGTH ) { i = rr -> length ; n = ssl3_read_n ( s , i , i , 1 , 1 ) ; if ( n != i ) { rr -> length = 0 ; RECORD_LAYER_reset_packet_length ( & s -> rlayer ) ; goto again ; } } RECORD_LAYER_set_rstate ( & s -> rlayer , SSL_ST_READ_HEADER ) ; bitmap = dtls1_get_bitmap ( s , rr , & is_next_epoch ) ; if ( bitmap == NULL ) { rr -> length = 0 ; RECORD_LAYER_reset_packet_length ( & s -> rlayer ) ; goto again ; } # ifndef OPENSSL_NO_SCTP if ( ! BIO_dgram_is_sctp ( SSL_get_rbio ( s ) ) ) { # endif if ( ! dtls1_record_replay_check ( s , bitmap ) ) { rr -> length = 0 ; RECORD_LAYER_reset_packet_length ( & s -> rlayer ) ; goto again ; } # ifndef OPENSSL_NO_SCTP } # endif if ( rr -> length == 0 ) goto again ; if ( is_next_epoch ) { if ( ( SSL_in_init ( s ) || ossl_statem_get_in_handshake ( s ) ) ) { if ( dtls1_buffer_record ( s , & ( DTLS_RECORD_LAYER_get_unprocessed_rcds ( & s -> rlayer ) ) , rr -> seq_num ) < 0 ) return - 1 ; dtls1_record_bitmap_update ( s , bitmap ) ; } rr -> length = 0 ; RECORD_LAYER_reset_packet_length ( & s -> rlayer ) ; goto again ; } if ( ! dtls1_process_record ( s ) ) { rr -> length = 0 ; RECORD_LAYER_reset_packet_length ( & s -> rlayer ) ; goto again ; } dtls1_record_bitmap_update ( s , bitmap ) ; return ( 1 ) ; }
1,016
0
static void omap_tipb_bridge_write(void *opaque, target_phys_addr_t addr, uint64_t value, unsigned size) { struct omap_tipb_bridge_s *s = (struct omap_tipb_bridge_s *) opaque; if (size < 2) { return omap_badwidth_write16(opaque, addr, value); } switch (addr) { case 0x00: /* TIPB_CNTL */ s->control = value & 0xffff; break; case 0x04: /* TIPB_BUS_ALLOC */ s->alloc = value & 0x003f; break; case 0x08: /* MPU_TIPB_CNTL */ s->buffer = value & 0x0003; break; case 0x0c: /* ENHANCED_TIPB_CNTL */ s->width_intr = !(value & 2); s->enh_control = value & 0x000f; break; case 0x10: /* ADDRESS_DBG */ case 0x14: /* DATA_DEBUG_LOW */ case 0x18: /* DATA_DEBUG_HIGH */ case 0x1c: /* DEBUG_CNTR_SIG */ OMAP_RO_REG(addr); break; default: OMAP_BAD_REG(addr); } }
1,017
1
static void maybe_unmark_and_push(struct sock *x) { struct unix_sock *u = unix_sk(x); if (u->gc_tree != GC_ORPHAN) return; sock_hold(x); u->gc_tree = gc_current; gc_current = x; }
1,019
0
gss_wrap_iov (minor_status, context_handle, conf_req_flag, qop_req, conf_state, iov, iov_count) OM_uint32 * minor_status; gss_ctx_id_t context_handle; int conf_req_flag; gss_qop_t qop_req; int * conf_state; gss_iov_buffer_desc * iov; int iov_count; { /* EXPORT DELETE START */ OM_uint32 status; gss_union_ctx_id_t ctx; gss_mechanism mech; status = val_wrap_iov_args(minor_status, context_handle, conf_req_flag, qop_req, conf_state, iov, iov_count); if (status != GSS_S_COMPLETE) return (status); /* * select the approprate underlying mechanism routine and * call it. */ ctx = (gss_union_ctx_id_t) context_handle; if (ctx->internal_ctx_id == GSS_C_NO_CONTEXT) return (GSS_S_NO_CONTEXT); mech = gssint_get_mechanism (ctx->mech_type); if (mech) { if (mech->gss_wrap_iov) { status = mech->gss_wrap_iov( minor_status, ctx->internal_ctx_id, conf_req_flag, qop_req, conf_state, iov, iov_count); if (status != GSS_S_COMPLETE) map_error(minor_status, mech); } else status = GSS_S_UNAVAILABLE; return(status); } /* EXPORT DELETE END */ return (GSS_S_BAD_MECH); }
1,021
0
gss_wrap_iov_length (minor_status, context_handle, conf_req_flag, qop_req, conf_state, iov, iov_count) OM_uint32 * minor_status; gss_ctx_id_t context_handle; int conf_req_flag; gss_qop_t qop_req; int * conf_state; gss_iov_buffer_desc * iov; int iov_count; { /* EXPORT DELETE START */ OM_uint32 status; gss_union_ctx_id_t ctx; gss_mechanism mech; status = val_wrap_iov_args(minor_status, context_handle, conf_req_flag, qop_req, conf_state, iov, iov_count); if (status != GSS_S_COMPLETE) return (status); /* * select the approprate underlying mechanism routine and * call it. */ ctx = (gss_union_ctx_id_t) context_handle; if (ctx->internal_ctx_id == GSS_C_NO_CONTEXT) return (GSS_S_NO_CONTEXT); mech = gssint_get_mechanism (ctx->mech_type); if (mech) { if (mech->gss_wrap_iov_length) { status = mech->gss_wrap_iov_length( minor_status, ctx->internal_ctx_id, conf_req_flag, qop_req, conf_state, iov, iov_count); if (status != GSS_S_COMPLETE) map_error(minor_status, mech); } else status = GSS_S_UNAVAILABLE; return(status); } /* EXPORT DELETE END */ return (GSS_S_BAD_MECH); }
1,022
1
void unix_gc(void) { static DEFINE_MUTEX(unix_gc_sem); int i; struct sock *s; struct sk_buff_head hitlist; struct sk_buff *skb; /* * Avoid a recursive GC. */ if (!mutex_trylock(&unix_gc_sem)) return; spin_lock(&unix_table_lock); forall_unix_sockets(i, s) { unix_sk(s)->gc_tree = GC_ORPHAN; } /* * Everything is now marked */ /* Invariant to be maintained: - everything unmarked is either: -- (a) on the stack, or -- (b) has all of its children unmarked - everything on the stack is always unmarked - nothing is ever pushed onto the stack twice, because: -- nothing previously unmarked is ever pushed on the stack */ /* * Push root set */ forall_unix_sockets(i, s) { int open_count = 0; /* * If all instances of the descriptor are not * in flight we are in use. * * Special case: when socket s is embrion, it may be * hashed but still not in queue of listening socket. * In this case (see unix_create1()) we set artificial * negative inflight counter to close race window. * It is trick of course and dirty one. */ if (s->sk_socket && s->sk_socket->file) open_count = file_count(s->sk_socket->file); if (open_count > atomic_read(&unix_sk(s)->inflight)) maybe_unmark_and_push(s); } /* * Mark phase */ while (!empty_stack()) { struct sock *x = pop_stack(); struct sock *sk; spin_lock(&x->sk_receive_queue.lock); skb = skb_peek(&x->sk_receive_queue); /* * Loop through all but first born */ while (skb && skb != (struct sk_buff *)&x->sk_receive_queue) { /* * Do we have file descriptors ? */ if(UNIXCB(skb).fp) { /* * Process the descriptors of this socket */ int nfd=UNIXCB(skb).fp->count; struct file **fp = UNIXCB(skb).fp->fp; while(nfd--) { /* * Get the socket the fd matches if * it indeed does so */ if((sk=unix_get_socket(*fp++))!=NULL) { maybe_unmark_and_push(sk); } } } /* We have to scan not-yet-accepted ones too */ if (x->sk_state == TCP_LISTEN) maybe_unmark_and_push(skb->sk); skb=skb->next; } spin_unlock(&x->sk_receive_queue.lock); sock_put(x); } skb_queue_head_init(&hitlist); forall_unix_sockets(i, s) { struct unix_sock *u = unix_sk(s); if (u->gc_tree == GC_ORPHAN) { struct sk_buff *nextsk; spin_lock(&s->sk_receive_queue.lock); skb = skb_peek(&s->sk_receive_queue); while (skb && skb != (struct sk_buff *)&s->sk_receive_queue) { nextsk = skb->next; /* * Do we have file descriptors ? */ if (UNIXCB(skb).fp) { __skb_unlink(skb, &s->sk_receive_queue); __skb_queue_tail(&hitlist, skb); } skb = nextsk; } spin_unlock(&s->sk_receive_queue.lock); } u->gc_tree = GC_ORPHAN; } spin_unlock(&unix_table_lock); /* * Here we are. Hitlist is filled. Die. */ __skb_queue_purge(&hitlist); mutex_unlock(&unix_gc_sem); }
1,024
0
static void icount_warp_rt(void) { unsigned seq; int64_t warp_start; /* The icount_warp_timer is rescheduled soon after vm_clock_warp_start * changes from -1 to another value, so the race here is okay. */ do { seq = seqlock_read_begin(&timers_state.vm_clock_seqlock); warp_start = vm_clock_warp_start; } while (seqlock_read_retry(&timers_state.vm_clock_seqlock, seq)); if (warp_start == -1) { return; } seqlock_write_begin(&timers_state.vm_clock_seqlock); if (runstate_is_running()) { int64_t clock = REPLAY_CLOCK(REPLAY_CLOCK_VIRTUAL_RT, cpu_get_clock_locked()); int64_t warp_delta; warp_delta = clock - vm_clock_warp_start; if (use_icount == 2) { /* * In adaptive mode, do not let QEMU_CLOCK_VIRTUAL run too * far ahead of real time. */ int64_t cur_icount = cpu_get_icount_locked(); int64_t delta = clock - cur_icount; warp_delta = MIN(warp_delta, delta); } timers_state.qemu_icount_bias += warp_delta; } vm_clock_warp_start = -1; seqlock_write_end(&timers_state.vm_clock_seqlock); if (qemu_clock_expired(QEMU_CLOCK_VIRTUAL)) { qemu_clock_notify(QEMU_CLOCK_VIRTUAL); } }
1,028
1
static BOOL ntlm_av_pair_check(NTLM_AV_PAIR* pAvPair, size_t cbAvPair) { if (!pAvPair || cbAvPair < sizeof(NTLM_AV_PAIR)) return FALSE; return cbAvPair >= ntlm_av_pair_get_next_offset(pAvPair); }
1,029
1
X509_NAME_oneline_ex(X509_NAME * a, char *buf, unsigned int *size, unsigned long flag) { BIO *out = NULL; out = BIO_new(BIO_s_mem ()); if (X509_NAME_print_ex(out, a, 0, flag) > 0) { if (buf != NULL && (*size) > (unsigned int) BIO_number_written(out)) { memset(buf, 0, *size); BIO_read(out, buf, (int) BIO_number_written(out)); } else { *size = BIO_number_written(out); } } BIO_free(out); return (buf); }
1,030
1
void unix_inflight(struct file *fp) { struct sock *s = unix_get_socket(fp); if(s) { atomic_inc(&unix_sk(s)->inflight); atomic_inc(&unix_tot_inflight); } }
1,031
0
get_matching_data(krb5_context context, pkinit_plg_crypto_context plg_cryptoctx, pkinit_req_crypto_context req_cryptoctx, X509 *cert, pkinit_cert_matching_data **md_out) { krb5_error_code ret = ENOMEM; pkinit_cert_matching_data *md = NULL; krb5_principal *pkinit_sans = NULL, *upn_sans = NULL; size_t i, j; *md_out = NULL; md = calloc(1, sizeof(*md)); if (md == NULL) goto cleanup; ret = rfc2253_name(X509_get_subject_name(cert), &md->subject_dn); if (ret) goto cleanup; ret = rfc2253_name(X509_get_issuer_name(cert), &md->issuer_dn); if (ret) goto cleanup; /* Get the SAN data. */ ret = crypto_retrieve_X509_sans(context, plg_cryptoctx, req_cryptoctx, cert, &pkinit_sans, &upn_sans, NULL); if (ret) goto cleanup; j = 0; if (pkinit_sans != NULL) { for (i = 0; pkinit_sans[i] != NULL; i++) j++; } if (upn_sans != NULL) { for (i = 0; upn_sans[i] != NULL; i++) j++; } if (j != 0) { md->sans = calloc((size_t)j+1, sizeof(*md->sans)); if (md->sans == NULL) { ret = ENOMEM; goto cleanup; } j = 0; if (pkinit_sans != NULL) { for (i = 0; pkinit_sans[i] != NULL; i++) md->sans[j++] = pkinit_sans[i]; free(pkinit_sans); } if (upn_sans != NULL) { for (i = 0; upn_sans[i] != NULL; i++) md->sans[j++] = upn_sans[i]; free(upn_sans); } md->sans[j] = NULL; } else md->sans = NULL; /* Get the KU and EKU data. */ ret = crypto_retrieve_X509_key_usage(context, plg_cryptoctx, req_cryptoctx, cert, &md->ku_bits, &md->eku_bits); if (ret) goto cleanup; *md_out = md; md = NULL; cleanup: crypto_cert_free_matching_data(context, md); return ret; }
1,032
0
void e1000e_core_pre_save ( E1000ECore * core ) { int i ; NetClientState * nc = qemu_get_queue ( core -> owner_nic ) ; if ( nc -> link_down && e1000e_have_autoneg ( core ) ) { core -> phy [ 0 ] [ PHY_STATUS ] |= MII_SR_AUTONEG_COMPLETE ; e1000e_update_flowctl_status ( core ) ; } for ( i = 0 ; i < ARRAY_SIZE ( core -> tx ) ; i ++ ) { if ( net_tx_pkt_has_fragments ( core -> tx [ i ] . tx_pkt ) ) { core -> tx [ i ] . skip_cp = true ; } } }
1,033
1
static inline struct sock *pop_stack(void) { struct sock *p = gc_current; gc_current = unix_sk(p)->gc_tree; return p; }
1,034
0
void bdrv_aio_cancel(BlockAIOCB *acb) { qemu_aio_ref(acb); bdrv_aio_cancel_async(acb); while (acb->refcnt > 1) { if (acb->aiocb_info->get_aio_context) { aio_poll(acb->aiocb_info->get_aio_context(acb), true); } else if (acb->bs) { aio_poll(bdrv_get_aio_context(acb->bs), true); } else { abort(); } } qemu_aio_unref(acb); }
1,035
0
static int dissect_h225_TBCD_STRING_SIZE_1_4 ( tvbuff_t * tvb _U_ , int offset _U_ , asn1_ctx_t * actx _U_ , proto_tree * tree _U_ , int hf_index _U_ ) { offset = dissect_per_size_constrained_type ( tvb , offset , actx , tree , hf_index , dissect_h225_TBCD_STRING , "TBCD_STRING" , 1 , 4 , FALSE ) ; return offset ; }
1,038
1
get_matching_data(krb5_context context, pkinit_plg_crypto_context plg_cryptoctx, pkinit_req_crypto_context req_cryptoctx, X509 *cert, pkinit_cert_matching_data **md_out) { krb5_error_code ret = ENOMEM; pkinit_cert_matching_data *md = NULL; krb5_principal *pkinit_sans = NULL, *upn_sans = NULL; size_t i, j; char buf[DN_BUF_LEN]; unsigned int bufsize = sizeof(buf); *md_out = NULL; md = calloc(1, sizeof(*md)); if (md == NULL) goto cleanup; /* Get the subject name (in rfc2253 format). */ X509_NAME_oneline_ex(X509_get_subject_name(cert), buf, &bufsize, XN_FLAG_SEP_COMMA_PLUS); md->subject_dn = strdup(buf); if (md->subject_dn == NULL) { ret = ENOMEM; goto cleanup; } /* Get the issuer name (in rfc2253 format). */ X509_NAME_oneline_ex(X509_get_issuer_name(cert), buf, &bufsize, XN_FLAG_SEP_COMMA_PLUS); md->issuer_dn = strdup(buf); if (md->issuer_dn == NULL) { ret = ENOMEM; goto cleanup; } /* Get the SAN data. */ ret = crypto_retrieve_X509_sans(context, plg_cryptoctx, req_cryptoctx, cert, &pkinit_sans, &upn_sans, NULL); if (ret) goto cleanup; j = 0; if (pkinit_sans != NULL) { for (i = 0; pkinit_sans[i] != NULL; i++) j++; } if (upn_sans != NULL) { for (i = 0; upn_sans[i] != NULL; i++) j++; } if (j != 0) { md->sans = calloc((size_t)j+1, sizeof(*md->sans)); if (md->sans == NULL) { ret = ENOMEM; goto cleanup; } j = 0; if (pkinit_sans != NULL) { for (i = 0; pkinit_sans[i] != NULL; i++) md->sans[j++] = pkinit_sans[i]; free(pkinit_sans); } if (upn_sans != NULL) { for (i = 0; upn_sans[i] != NULL; i++) md->sans[j++] = upn_sans[i]; free(upn_sans); } md->sans[j] = NULL; } else md->sans = NULL; /* Get the KU and EKU data. */ ret = crypto_retrieve_X509_key_usage(context, plg_cryptoctx, req_cryptoctx, cert, &md->ku_bits, &md->eku_bits); if (ret) goto cleanup; *md_out = md; md = NULL; cleanup: crypto_cert_free_matching_data(context, md); return ret; }
1,039
0
rfc2253_name(X509_NAME *name, char **str_out) { BIO *b = NULL; char *str; *str_out = NULL; b = BIO_new(BIO_s_mem()); if (b == NULL) return ENOMEM; if (X509_NAME_print_ex(b, name, 0, XN_FLAG_SEP_COMMA_PLUS) < 0) goto error; str = calloc(BIO_number_written(b) + 1, 1); if (str == NULL) goto error; BIO_read(b, str, BIO_number_written(b)); BIO_free(b); *str_out = str; return 0; error: BIO_free(b); return ENOMEM; }
1,040
0
static void gen_sraq(DisasContext *ctx) { int l1 = gen_new_label(); int l2 = gen_new_label(); TCGv t0 = tcg_temp_new(); TCGv t1 = tcg_temp_local_new(); TCGv t2 = tcg_temp_local_new(); tcg_gen_andi_tl(t2, cpu_gpr[rB(ctx->opcode)], 0x1F); tcg_gen_shr_tl(t0, cpu_gpr[rS(ctx->opcode)], t2); tcg_gen_sar_tl(t1, cpu_gpr[rS(ctx->opcode)], t2); tcg_gen_subfi_tl(t2, 32, t2); tcg_gen_shl_tl(t2, cpu_gpr[rS(ctx->opcode)], t2); tcg_gen_or_tl(t0, t0, t2); gen_store_spr(SPR_MQ, t0); tcg_gen_andi_tl(t0, cpu_gpr[rB(ctx->opcode)], 0x20); tcg_gen_brcondi_tl(TCG_COND_EQ, t2, 0, l1); tcg_gen_mov_tl(t2, cpu_gpr[rS(ctx->opcode)]); tcg_gen_sari_tl(t1, cpu_gpr[rS(ctx->opcode)], 31); gen_set_label(l1); tcg_temp_free(t0); tcg_gen_mov_tl(cpu_gpr[rA(ctx->opcode)], t1); tcg_gen_movi_tl(cpu_ca, 0); tcg_gen_brcondi_tl(TCG_COND_GE, t1, 0, l2); tcg_gen_brcondi_tl(TCG_COND_EQ, t2, 0, l2); tcg_gen_movi_tl(cpu_ca, 1); gen_set_label(l2); tcg_temp_free(t1); tcg_temp_free(t2); if (unlikely(Rc(ctx->opcode) != 0)) gen_set_Rc0(ctx, cpu_gpr[rA(ctx->opcode)]); }
1,041
1
void unix_notinflight(struct file *fp) { struct sock *s = unix_get_socket(fp); if(s) { atomic_dec(&unix_sk(s)->inflight); atomic_dec(&unix_tot_inflight); } }
1,042
0
static BLOCK_SIZE get_rd_var_based_fixed_partition ( VP9_COMP * cpi , int mi_row , int mi_col ) { unsigned int var = get_sby_perpixel_diff_variance ( cpi , & cpi -> mb . plane [ 0 ] . src , mi_row , mi_col , BLOCK_64X64 ) ; if ( var < 8 ) return BLOCK_64X64 ; else if ( var < 128 ) return BLOCK_32X32 ; else if ( var < 2048 ) return BLOCK_16X16 ; else return BLOCK_8X8 ; }
1,043
0
kadm5_create_principal_3(void *server_handle, kadm5_principal_ent_t entry, long mask, int n_ks_tuple, krb5_key_salt_tuple *ks_tuple, char *password) { krb5_db_entry *kdb; osa_princ_ent_rec adb; kadm5_policy_ent_rec polent; krb5_boolean have_polent = FALSE; krb5_timestamp now; krb5_tl_data *tl_data_tail; unsigned int ret; kadm5_server_handle_t handle = server_handle; krb5_keyblock *act_mkey; krb5_kvno act_kvno; int new_n_ks_tuple = 0; krb5_key_salt_tuple *new_ks_tuple = NULL; CHECK_HANDLE(server_handle); krb5_clear_error_message(handle->context); check_1_6_dummy(entry, mask, n_ks_tuple, ks_tuple, &password); /* * Argument sanity checking, and opening up the DB */ if (entry == NULL) return EINVAL; if(!(mask & KADM5_PRINCIPAL) || (mask & KADM5_MOD_NAME) || (mask & KADM5_MOD_TIME) || (mask & KADM5_LAST_PWD_CHANGE) || (mask & KADM5_MKVNO) || (mask & KADM5_AUX_ATTRIBUTES) || (mask & KADM5_LAST_SUCCESS) || (mask & KADM5_LAST_FAILED) || (mask & KADM5_FAIL_AUTH_COUNT)) return KADM5_BAD_MASK; if ((mask & KADM5_KEY_DATA) && entry->n_key_data != 0) return KADM5_BAD_MASK; if((mask & KADM5_POLICY) && entry->policy == NULL) return KADM5_BAD_MASK; if((mask & KADM5_POLICY) && (mask & KADM5_POLICY_CLR)) return KADM5_BAD_MASK; if((mask & ~ALL_PRINC_MASK)) return KADM5_BAD_MASK; if (mask & KADM5_TL_DATA) { for (tl_data_tail = entry->tl_data; tl_data_tail != NULL; tl_data_tail = tl_data_tail->tl_data_next) { if (tl_data_tail->tl_data_type < 256) return KADM5_BAD_TL_TYPE; } } /* * Check to see if the principal exists */ ret = kdb_get_entry(handle, entry->principal, &kdb, &adb); switch(ret) { case KADM5_UNK_PRINC: break; case 0: kdb_free_entry(handle, kdb, &adb); return KADM5_DUP; default: return ret; } kdb = calloc(1, sizeof(*kdb)); if (kdb == NULL) return ENOMEM; memset(&adb, 0, sizeof(osa_princ_ent_rec)); /* * If a policy was specified, load it. * If we can not find the one specified return an error */ if ((mask & KADM5_POLICY)) { ret = get_policy(handle, entry->policy, &polent, &have_polent); if (ret) goto cleanup; } if (password) { ret = passwd_check(handle, password, have_polent ? &polent : NULL, entry->principal); if (ret) goto cleanup; } /* * Start populating the various DB fields, using the * "defaults" for fields that were not specified by the * mask. */ if ((ret = krb5_timeofday(handle->context, &now))) goto cleanup; kdb->magic = KRB5_KDB_MAGIC_NUMBER; kdb->len = KRB5_KDB_V1_BASE_LENGTH; /* gag me with a chainsaw */ if ((mask & KADM5_ATTRIBUTES)) kdb->attributes = entry->attributes; else kdb->attributes = handle->params.flags; if ((mask & KADM5_MAX_LIFE)) kdb->max_life = entry->max_life; else kdb->max_life = handle->params.max_life; if (mask & KADM5_MAX_RLIFE) kdb->max_renewable_life = entry->max_renewable_life; else kdb->max_renewable_life = handle->params.max_rlife; if ((mask & KADM5_PRINC_EXPIRE_TIME)) kdb->expiration = entry->princ_expire_time; else kdb->expiration = handle->params.expiration; kdb->pw_expiration = 0; if (have_polent) { if(polent.pw_max_life) kdb->pw_expiration = ts_incr(now, polent.pw_max_life); else kdb->pw_expiration = 0; } if ((mask & KADM5_PW_EXPIRATION)) kdb->pw_expiration = entry->pw_expiration; kdb->last_success = 0; kdb->last_failed = 0; kdb->fail_auth_count = 0; /* this is kind of gross, but in order to free the tl data, I need to free the entire kdb entry, and that will try to free the principal. */ ret = krb5_copy_principal(handle->context, entry->principal, &kdb->princ); if (ret) goto cleanup; if ((ret = krb5_dbe_update_last_pwd_change(handle->context, kdb, now))) goto cleanup; if (mask & KADM5_TL_DATA) { /* splice entry->tl_data onto the front of kdb->tl_data */ for (tl_data_tail = entry->tl_data; tl_data_tail; tl_data_tail = tl_data_tail->tl_data_next) { ret = krb5_dbe_update_tl_data(handle->context, kdb, tl_data_tail); if( ret ) goto cleanup; } } /* * We need to have setup the TL data, so we have strings, so we can * check enctype policy, which is why we check/initialize ks_tuple * this late. */ ret = apply_keysalt_policy(handle, entry->policy, n_ks_tuple, ks_tuple, &new_n_ks_tuple, &new_ks_tuple); if (ret) goto cleanup; /* initialize the keys */ ret = kdb_get_active_mkey(handle, &act_kvno, &act_mkey); if (ret) goto cleanup; if (mask & KADM5_KEY_DATA) { /* The client requested no keys for this principal. */ assert(entry->n_key_data == 0); } else if (password) { ret = krb5_dbe_cpw(handle->context, act_mkey, new_ks_tuple, new_n_ks_tuple, password, (mask & KADM5_KVNO)?entry->kvno:1, FALSE, kdb); } else { /* Null password means create with random key (new in 1.8). */ ret = krb5_dbe_crk(handle->context, &master_keyblock, new_ks_tuple, new_n_ks_tuple, FALSE, kdb); } if (ret) goto cleanup; /* Record the master key VNO used to encrypt this entry's keys */ ret = krb5_dbe_update_mkvno(handle->context, kdb, act_kvno); if (ret) goto cleanup; ret = k5_kadm5_hook_create(handle->context, handle->hook_handles, KADM5_HOOK_STAGE_PRECOMMIT, entry, mask, new_n_ks_tuple, new_ks_tuple, password); if (ret) goto cleanup; /* populate the admin-server-specific fields. In the OV server, this used to be in a separate database. Since there's already marshalling code for the admin fields, to keep things simple, I'm going to keep it, and make all the admin stuff occupy a single tl_data record, */ adb.admin_history_kvno = INITIAL_HIST_KVNO; if (mask & KADM5_POLICY) { adb.aux_attributes = KADM5_POLICY; /* this does *not* need to be strdup'ed, because adb is xdr */ /* encoded in osa_adb_create_princ, and not ever freed */ adb.policy = entry->policy; } /* In all cases key and the principal data is set, let the database provider know */ kdb->mask = mask | KADM5_KEY_DATA | KADM5_PRINCIPAL ; /* store the new db entry */ ret = kdb_put_entry(handle, kdb, &adb); (void) k5_kadm5_hook_create(handle->context, handle->hook_handles, KADM5_HOOK_STAGE_POSTCOMMIT, entry, mask, new_n_ks_tuple, new_ks_tuple, password); cleanup: free(new_ks_tuple); krb5_db_free_principal(handle->context, kdb); if (have_polent) (void) kadm5_free_policy_ent(handle->lhandle, &polent); return ret; }
1,044
1
static int uvc_parse_format(struct uvc_device *dev, struct uvc_streaming *streaming, struct uvc_format *format, __u32 **intervals, unsigned char *buffer, int buflen) { struct usb_interface *intf = streaming->intf; struct usb_host_interface *alts = intf->cur_altsetting; struct uvc_format_desc *fmtdesc; struct uvc_frame *frame; const unsigned char *start = buffer; unsigned int interval; unsigned int i, n; __u8 ftype; format->type = buffer[2]; format->index = buffer[3]; switch (buffer[2]) { case VS_FORMAT_UNCOMPRESSED: case VS_FORMAT_FRAME_BASED: if (buflen < 27) { uvc_trace(UVC_TRACE_DESCR, "device %d videostreaming" "interface %d FORMAT error\n", dev->udev->devnum, alts->desc.bInterfaceNumber); return -EINVAL; } /* Find the format descriptor from its GUID. */ fmtdesc = uvc_format_by_guid(&buffer[5]); if (fmtdesc != NULL) { strncpy(format->name, fmtdesc->name, sizeof format->name); format->fcc = fmtdesc->fcc; } else { uvc_printk(KERN_INFO, "Unknown video format " UVC_GUID_FORMAT "\n", UVC_GUID_ARGS(&buffer[5])); snprintf(format->name, sizeof format->name, UVC_GUID_FORMAT, UVC_GUID_ARGS(&buffer[5])); format->fcc = 0; } format->bpp = buffer[21]; if (buffer[2] == VS_FORMAT_UNCOMPRESSED) { ftype = VS_FRAME_UNCOMPRESSED; } else { ftype = VS_FRAME_FRAME_BASED; if (buffer[27]) format->flags = UVC_FMT_FLAG_COMPRESSED; } break; case VS_FORMAT_MJPEG: if (buflen < 11) { uvc_trace(UVC_TRACE_DESCR, "device %d videostreaming" "interface %d FORMAT error\n", dev->udev->devnum, alts->desc.bInterfaceNumber); return -EINVAL; } strncpy(format->name, "MJPEG", sizeof format->name); format->fcc = V4L2_PIX_FMT_MJPEG; format->flags = UVC_FMT_FLAG_COMPRESSED; format->bpp = 0; ftype = VS_FRAME_MJPEG; break; case VS_FORMAT_DV: if (buflen < 9) { uvc_trace(UVC_TRACE_DESCR, "device %d videostreaming" "interface %d FORMAT error\n", dev->udev->devnum, alts->desc.bInterfaceNumber); return -EINVAL; } switch (buffer[8] & 0x7f) { case 0: strncpy(format->name, "SD-DV", sizeof format->name); break; case 1: strncpy(format->name, "SDL-DV", sizeof format->name); break; case 2: strncpy(format->name, "HD-DV", sizeof format->name); break; default: uvc_trace(UVC_TRACE_DESCR, "device %d videostreaming" "interface %d: unknown DV format %u\n", dev->udev->devnum, alts->desc.bInterfaceNumber, buffer[8]); return -EINVAL; } strncat(format->name, buffer[8] & (1 << 7) ? " 60Hz" : " 50Hz", sizeof format->name); format->fcc = V4L2_PIX_FMT_DV; format->flags = UVC_FMT_FLAG_COMPRESSED | UVC_FMT_FLAG_STREAM; format->bpp = 0; ftype = 0; /* Create a dummy frame descriptor. */ frame = &format->frame[0]; memset(&format->frame[0], 0, sizeof format->frame[0]); frame->bFrameIntervalType = 1; frame->dwDefaultFrameInterval = 1; frame->dwFrameInterval = *intervals; *(*intervals)++ = 1; format->nframes = 1; break; case VS_FORMAT_MPEG2TS: case VS_FORMAT_STREAM_BASED: /* Not supported yet. */ default: uvc_trace(UVC_TRACE_DESCR, "device %d videostreaming" "interface %d unsupported format %u\n", dev->udev->devnum, alts->desc.bInterfaceNumber, buffer[2]); return -EINVAL; } uvc_trace(UVC_TRACE_DESCR, "Found format %s.\n", format->name); buflen -= buffer[0]; buffer += buffer[0]; /* Parse the frame descriptors. Only uncompressed, MJPEG and frame * based formats have frame descriptors. */ while (buflen > 2 && buffer[2] == ftype) { frame = &format->frame[format->nframes]; if (ftype != VS_FRAME_FRAME_BASED) n = buflen > 25 ? buffer[25] : 0; else n = buflen > 21 ? buffer[21] : 0; n = n ? n : 3; if (buflen < 26 + 4*n) { uvc_trace(UVC_TRACE_DESCR, "device %d videostreaming" "interface %d FRAME error\n", dev->udev->devnum, alts->desc.bInterfaceNumber); return -EINVAL; } frame->bFrameIndex = buffer[3]; frame->bmCapabilities = buffer[4]; frame->wWidth = le16_to_cpup((__le16 *)&buffer[5]); frame->wHeight = le16_to_cpup((__le16 *)&buffer[7]); frame->dwMinBitRate = le32_to_cpup((__le32 *)&buffer[9]); frame->dwMaxBitRate = le32_to_cpup((__le32 *)&buffer[13]); if (ftype != VS_FRAME_FRAME_BASED) { frame->dwMaxVideoFrameBufferSize = le32_to_cpup((__le32 *)&buffer[17]); frame->dwDefaultFrameInterval = le32_to_cpup((__le32 *)&buffer[21]); frame->bFrameIntervalType = buffer[25]; } else { frame->dwMaxVideoFrameBufferSize = 0; frame->dwDefaultFrameInterval = le32_to_cpup((__le32 *)&buffer[17]); frame->bFrameIntervalType = buffer[21]; } frame->dwFrameInterval = *intervals; /* Several UVC chipsets screw up dwMaxVideoFrameBufferSize * completely. Observed behaviours range from setting the * value to 1.1x the actual frame size of hardwiring the * 16 low bits to 0. This results in a higher than necessary * memory usage as well as a wrong image size information. For * uncompressed formats this can be fixed by computing the * value from the frame size. */ if (!(format->flags & UVC_FMT_FLAG_COMPRESSED)) frame->dwMaxVideoFrameBufferSize = format->bpp * frame->wWidth * frame->wHeight / 8; /* Some bogus devices report dwMinFrameInterval equal to * dwMaxFrameInterval and have dwFrameIntervalStep set to * zero. Setting all null intervals to 1 fixes the problem and * some other divisions by zero which could happen. */ for (i = 0; i < n; ++i) { interval = le32_to_cpup((__le32 *)&buffer[26+4*i]); *(*intervals)++ = interval ? interval : 1; } /* Make sure that the default frame interval stays between * the boundaries. */ n -= frame->bFrameIntervalType ? 1 : 2; frame->dwDefaultFrameInterval = min(frame->dwFrameInterval[n], max(frame->dwFrameInterval[0], frame->dwDefaultFrameInterval)); uvc_trace(UVC_TRACE_DESCR, "- %ux%u (%u.%u fps)\n", frame->wWidth, frame->wHeight, 10000000/frame->dwDefaultFrameInterval, (100000000/frame->dwDefaultFrameInterval)%10); format->nframes++; buflen -= buffer[0]; buffer += buffer[0]; } if (buflen > 2 && buffer[2] == VS_STILL_IMAGE_FRAME) { buflen -= buffer[0]; buffer += buffer[0]; } if (buflen > 2 && buffer[2] == VS_COLORFORMAT) { if (buflen < 6) { uvc_trace(UVC_TRACE_DESCR, "device %d videostreaming" "interface %d COLORFORMAT error\n", dev->udev->devnum, alts->desc.bInterfaceNumber); return -EINVAL; } format->colorspace = uvc_colorspace(buffer[3]); buflen -= buffer[0]; buffer += buffer[0]; } return buffer - start; }
1,046
0
void vp9_sad ## m ## x ## n ## x4d_c ( const uint8_t * src , int src_stride , const uint8_t * const refs [ ] , int ref_stride , unsigned int * sads ) { int i ; for ( i = 0 ; i < 4 ; ++ i ) sads [ i ] = vp9_sad ## m ## x ## n ## _c ( src , src_stride , refs [ i ] , ref_stride ) ; \ } sadMxN ( 64 , 64 ) sadMxNxK ( 64 , 64 , 3 ) sadMxNxK ( 64 , 64 , 8 ) sadMxNx4D ( 64 , 64 ) sadMxN ( 64 , 32 ) sadMxNx4D ( 64 , 32 ) sadMxN ( 32 , 64 ) sadMxNx4D ( 32 , 64 ) sadMxN ( 32 , 32 ) sadMxNxK ( 32 , 32 , 3 ) sadMxNxK ( 32 , 32 , 8 ) sadMxNx4D ( 32 , 32 ) sadMxN ( 32 , 16 ) sadMxNx4D ( 32 , 16 ) sadMxN ( 16 , 32 ) sadMxNx4D ( 16 , 32 ) sadMxN ( 16 , 16 ) sadMxNxK ( 16 , 16 , 3 ) sadMxNxK ( 16 , 16 , 8 ) sadMxNx4D ( 16 , 16 )
1,047
1
NTLM_AV_PAIR* ntlm_av_pair_get(NTLM_AV_PAIR* pAvPairList, size_t cbAvPairList, NTLM_AV_ID AvId, size_t* pcbAvPairListRemaining) { size_t cbAvPair = cbAvPairList; NTLM_AV_PAIR* pAvPair = pAvPairList; if (!ntlm_av_pair_check(pAvPair, cbAvPair)) pAvPair = NULL; while (pAvPair) { UINT16 id = ntlm_av_pair_get_id(pAvPair); if (id == AvId) break; if (id == MsvAvEOL) { pAvPair = NULL; break; } pAvPair = ntlm_av_pair_next(pAvPair, &cbAvPair); } if (!pAvPair) cbAvPair = 0; if (pcbAvPairListRemaining) *pcbAvPairListRemaining = cbAvPair; return pAvPair; }
1,048
1
static int get_file_caps(struct linux_binprm *bprm) { struct dentry *dentry; int rc = 0; struct vfs_cap_data vcaps; struct inode *inode; if (bprm->file->f_vfsmnt->mnt_flags & MNT_NOSUID) { bprm_clear_caps(bprm); return 0; } dentry = dget(bprm->file->f_dentry); inode = dentry->d_inode; if (!inode->i_op || !inode->i_op->getxattr) goto out; rc = inode->i_op->getxattr(dentry, XATTR_NAME_CAPS, &vcaps, XATTR_CAPS_SZ); if (rc == -ENODATA || rc == -EOPNOTSUPP) { /* no data, that's ok */ rc = 0; goto out; } if (rc < 0) goto out; rc = cap_from_disk(&vcaps, bprm, rc); if (rc == -EINVAL) printk(KERN_NOTICE "%s: cap_from_disk returned %d for %s\n", __func__, rc, bprm->filename); out: dput(dentry); if (rc) bprm_clear_caps(bprm); return rc; }
1,049
0
check_dn_exists(krb5_context context, krb5_ldap_server_handle *ldap_server_handle, const char *dn, krb5_boolean nonkrb_only) { krb5_error_code st = 0, tempst; krb5_ldap_context *ldap_context = context->dal_handle->db_context; LDAP *ld = ldap_server_handle->ldap_handle; LDAPMessage *result = NULL, *ent; char *attrs[] = { "krbticketpolicyreference", "krbprincipalname", NULL }; char **values; LDAP_SEARCH_1(dn, LDAP_SCOPE_BASE, 0, attrs, IGNORE_STATUS); if (st != LDAP_SUCCESS) return set_ldap_error(context, st, OP_SEARCH); ent = ldap_first_entry(ld, result); CHECK_NULL(ent); values = ldap_get_values(ld, ent, "krbticketpolicyreference"); if (values != NULL) ldap_value_free(values); values = ldap_get_values(ld, ent, "krbprincipalname"); if (values != NULL) { ldap_value_free(values); if (nonkrb_only) { st = EINVAL; k5_setmsg(context, st, _("ldap object is already kerberized")); goto cleanup; } } cleanup: ldap_msgfree(result); return st; }
1,050
0
static void gtkui_connection_data_split ( void ) { GtkWidget * vbox , * scrolled , * label , * child ; GtkWidget * hbox_big , * hbox_small , * button ; GtkTextIter iter ; char tmp [ MAX_ASCII_ADDR_LEN ] ; char title [ MAX_ASCII_ADDR_LEN + 6 ] ; static gint scroll_split = 1 ; DEBUG_MSG ( "gtk_connection_data_split" ) ; conntrack_hook_conn_del ( curr_conn , join_print_po ) ; if ( data_window ) { child = gtk_bin_get_child ( GTK_BIN ( data_window ) ) ; gtk_container_remove ( GTK_CONTAINER ( data_window ) , child ) ; textview3 = NULL ; joinedbuf = NULL ; endmark3 = NULL ; } else { data_window = gtkui_page_new ( "Connection data" , & gtkui_destroy_conndata , & gtkui_connection_data_detach ) ; } curr_conn -> flags |= CONN_VIEWING ; hbox_big = gtkui_box_new ( GTK_ORIENTATION_HORIZONTAL , 5 , TRUE ) ; gtk_container_add ( GTK_CONTAINER ( data_window ) , hbox_big ) ; gtk_widget_show ( hbox_big ) ; vbox = gtkui_box_new ( GTK_ORIENTATION_VERTICAL , 0 , FALSE ) ; gtk_box_pack_start ( GTK_BOX ( hbox_big ) , vbox , TRUE , TRUE , 0 ) ; gtk_widget_show ( vbox ) ; snprintf ( title , MAX_ASCII_ADDR_LEN + 6 , "%s:%d" , ip_addr_ntoa ( & curr_conn -> L3_addr1 , tmp ) , ntohs ( curr_conn -> L4_addr1 ) ) ; label = gtk_label_new ( title ) ; gtk_misc_set_alignment ( GTK_MISC ( label ) , 0 , 0.5 ) ; gtk_box_pack_start ( GTK_BOX ( vbox ) , label , FALSE , FALSE , 0 ) ; gtk_widget_show ( label ) ; scrolled = gtk_scrolled_window_new ( NULL , NULL ) ; gtk_scrolled_window_set_policy ( GTK_SCROLLED_WINDOW ( scrolled ) , GTK_POLICY_AUTOMATIC , GTK_POLICY_AUTOMATIC ) ; gtk_scrolled_window_set_shadow_type ( GTK_SCROLLED_WINDOW ( scrolled ) , GTK_SHADOW_IN ) ; gtk_box_pack_start ( GTK_BOX ( vbox ) , scrolled , TRUE , TRUE , 0 ) ; gtk_widget_show ( scrolled ) ; textview1 = gtk_text_view_new ( ) ; gtk_text_view_set_wrap_mode ( GTK_TEXT_VIEW ( textview1 ) , GTK_WRAP_CHAR ) ; gtk_text_view_set_editable ( GTK_TEXT_VIEW ( textview1 ) , FALSE ) ; gtk_text_view_set_cursor_visible ( GTK_TEXT_VIEW ( textview1 ) , FALSE ) ; gtk_text_view_set_right_margin ( GTK_TEXT_VIEW ( textview1 ) , 5 ) ; gtk_text_view_set_right_margin ( GTK_TEXT_VIEW ( textview1 ) , 5 ) ; gtk_container_add ( GTK_CONTAINER ( scrolled ) , textview1 ) ; gtk_widget_show ( textview1 ) ; splitbuf1 = gtk_text_view_get_buffer ( GTK_TEXT_VIEW ( textview1 ) ) ; gtk_text_buffer_create_tag ( splitbuf1 , "blue_fg" , "foreground" , "blue" , NULL ) ; gtk_text_buffer_create_tag ( splitbuf1 , "monospace" , "family" , "monospace" , NULL ) ; gtk_text_buffer_get_end_iter ( splitbuf1 , & iter ) ; endmark1 = gtk_text_buffer_create_mark ( splitbuf1 , "end" , & iter , FALSE ) ; hbox_small = gtkui_box_new ( GTK_ORIENTATION_HORIZONTAL , 5 , TRUE ) ; gtk_box_pack_start ( GTK_BOX ( vbox ) , hbox_small , FALSE , FALSE , 0 ) ; gtk_widget_show ( hbox_small ) ; button = gtk_button_new_with_mnemonic ( "_Join Views" ) ; g_signal_connect ( G_OBJECT ( button ) , "clicked" , G_CALLBACK ( gtkui_connection_data_join ) , NULL ) ; gtk_box_pack_start ( GTK_BOX ( hbox_small ) , button , TRUE , TRUE , 0 ) ; gtk_widget_show ( button ) ; button = gtk_button_new_with_mnemonic ( "_Inject Data" ) ; g_signal_connect ( G_OBJECT ( button ) , "clicked" , G_CALLBACK ( gtkui_connection_inject ) , NULL ) ; gtk_box_pack_start ( GTK_BOX ( hbox_small ) , button , TRUE , TRUE , 0 ) ; gtk_widget_show ( button ) ; vbox = gtkui_box_new ( GTK_ORIENTATION_VERTICAL , 0 , FALSE ) ; gtk_box_pack_start ( GTK_BOX ( hbox_big ) , vbox , TRUE , TRUE , 0 ) ; gtk_widget_show ( vbox ) ; snprintf ( title , MAX_ASCII_ADDR_LEN + 6 , "%s:%d" , ip_addr_ntoa ( & curr_conn -> L3_addr2 , tmp ) , ntohs ( curr_conn -> L4_addr2 ) ) ; label = gtk_label_new ( title ) ; gtk_misc_set_alignment ( GTK_MISC ( label ) , 0 , 0.5 ) ; gtk_box_pack_start ( GTK_BOX ( vbox ) , label , FALSE , FALSE , 0 ) ; gtk_widget_show ( label ) ; scrolled = gtk_scrolled_window_new ( NULL , NULL ) ; gtk_scrolled_window_set_policy ( GTK_SCROLLED_WINDOW ( scrolled ) , GTK_POLICY_AUTOMATIC , GTK_POLICY_AUTOMATIC ) ; gtk_scrolled_window_set_shadow_type ( GTK_SCROLLED_WINDOW ( scrolled ) , GTK_SHADOW_IN ) ; gtk_box_pack_start ( GTK_BOX ( vbox ) , scrolled , TRUE , TRUE , 0 ) ; gtk_widget_show ( scrolled ) ; textview2 = gtk_text_view_new ( ) ; gtk_text_view_set_wrap_mode ( GTK_TEXT_VIEW ( textview2 ) , GTK_WRAP_CHAR ) ; gtk_text_view_set_editable ( GTK_TEXT_VIEW ( textview2 ) , FALSE ) ; gtk_text_view_set_cursor_visible ( GTK_TEXT_VIEW ( textview2 ) , FALSE ) ; gtk_text_view_set_right_margin ( GTK_TEXT_VIEW ( textview2 ) , 5 ) ; gtk_text_view_set_right_margin ( GTK_TEXT_VIEW ( textview2 ) , 5 ) ; gtk_container_add ( GTK_CONTAINER ( scrolled ) , textview2 ) ; gtk_widget_show ( textview2 ) ; splitbuf2 = gtk_text_view_get_buffer ( GTK_TEXT_VIEW ( textview2 ) ) ; gtk_text_buffer_create_tag ( splitbuf2 , "blue_fg" , "foreground" , "blue" , NULL ) ; gtk_text_buffer_create_tag ( splitbuf2 , "monospace" , "family" , "monospace" , NULL ) ; gtk_text_buffer_get_end_iter ( splitbuf2 , & iter ) ; endmark2 = gtk_text_buffer_create_mark ( splitbuf2 , "end" , & iter , FALSE ) ; hbox_small = gtkui_box_new ( GTK_ORIENTATION_HORIZONTAL , 5 , TRUE ) ; gtk_box_pack_start ( GTK_BOX ( vbox ) , hbox_small , FALSE , FALSE , 0 ) ; gtk_widget_show ( hbox_small ) ; button = gtk_button_new_with_mnemonic ( "Inject _File" ) ; g_signal_connect ( G_OBJECT ( button ) , "clicked" , G_CALLBACK ( gtkui_connection_inject_file ) , NULL ) ; gtk_box_pack_start ( GTK_BOX ( hbox_small ) , button , TRUE , TRUE , 0 ) ; gtk_widget_show ( button ) ; button = gtk_button_new_with_mnemonic ( "_Kill Connection" ) ; g_signal_connect ( G_OBJECT ( button ) , "clicked" , G_CALLBACK ( gtkui_connection_kill_curr_conn ) , NULL ) ; gtk_box_pack_start ( GTK_BOX ( hbox_small ) , button , TRUE , TRUE , 0 ) ; gtk_widget_show ( button ) ; gtk_widget_show ( data_window ) ; if ( GTK_IS_WINDOW ( data_window ) ) gtk_window_present ( GTK_WINDOW ( data_window ) ) ; else gtkui_page_present ( data_window ) ; g_timeout_add ( 500 , gtkui_connections_scroll , & scroll_split ) ; connbuf_print ( & curr_conn -> data , split_print ) ; conntrack_hook_conn_add ( curr_conn , split_print_po ) ; }
1,051
0
static PhysPageDesc *phys_page_find_alloc(target_phys_addr_t index, int alloc) { void **lp, **p; p = (void **)l1_phys_map; #if TARGET_PHYS_ADDR_SPACE_BITS > 32 #if TARGET_PHYS_ADDR_SPACE_BITS > (32 + L1_BITS) #error unsupported TARGET_PHYS_ADDR_SPACE_BITS #endif lp = p + ((index >> (L1_BITS + L2_BITS)) & (L1_SIZE - 1)); p = *lp; if (!p) { /* allocate if not found */ if (!alloc) return NULL; p = qemu_vmalloc(sizeof(void *) * L1_SIZE); memset(p, 0, sizeof(void *) * L1_SIZE); *lp = p; } #endif lp = p + ((index >> L2_BITS) & (L1_SIZE - 1)); p = *lp; if (!p) { /* allocate if not found */ if (!alloc) return NULL; p = qemu_vmalloc(sizeof(PhysPageDesc) * L2_SIZE); memset(p, 0, sizeof(PhysPageDesc) * L2_SIZE); *lp = p; } return ((PhysPageDesc *)p) + (index & (L2_SIZE - 1)); }
1,052
0
int main ( int argc , char * argv [ ] ) { return ntpqmain ( argc , argv ) ; }
1,053
0
check_dn_in_container(krb5_context context, const char *dn, char *const *subtrees, unsigned int ntrees) { unsigned int i; size_t dnlen = strlen(dn), stlen; for (i = 0; i < ntrees; i++) { if (subtrees[i] == NULL || *subtrees[i] == '\0') return 0; stlen = strlen(subtrees[i]); if (dnlen >= stlen && strcasecmp(dn + dnlen - stlen, subtrees[i]) == 0 && (dnlen == stlen || dn[dnlen - stlen - 1] == ',')) return 0; } k5_setmsg(context, EINVAL, _("DN is out of the realm subtree")); return EINVAL; }
1,054
0
CPUState *cpu_mb_init (const char *cpu_model) { CPUState *env; static int tcg_initialized = 0; int i; env = qemu_mallocz(sizeof(CPUState)); cpu_exec_init(env); cpu_reset(env); env->pvr.regs[0] = PVR0_PVR_FULL_MASK \ | PVR0_USE_BARREL_MASK \ | PVR0_USE_DIV_MASK \ | PVR0_USE_HW_MUL_MASK \ | PVR0_USE_EXC_MASK \ | PVR0_USE_ICACHE_MASK \ | PVR0_USE_DCACHE_MASK \ | PVR0_USE_MMU \ | (0xb << 8); env->pvr.regs[2] = PVR2_D_OPB_MASK \ | PVR2_D_LMB_MASK \ | PVR2_I_OPB_MASK \ | PVR2_I_LMB_MASK \ | PVR2_USE_MSR_INSTR \ | PVR2_USE_PCMP_INSTR \ | PVR2_USE_BARREL_MASK \ | PVR2_USE_DIV_MASK \ | PVR2_USE_HW_MUL_MASK \ | PVR2_USE_MUL64_MASK \ | 0; env->pvr.regs[10] = 0x0c000000; /* Default to spartan 3a dsp family. */ env->pvr.regs[11] = PVR11_USE_MMU | (16 << 17); #if !defined(CONFIG_USER_ONLY) env->mmu.c_mmu = 3; env->mmu.c_mmu_tlb_access = 3; env->mmu.c_mmu_zones = 16; #endif if (tcg_initialized) return env; tcg_initialized = 1; cpu_env = tcg_global_reg_new_ptr(TCG_AREG0, "env"); env_debug = tcg_global_mem_new(TCG_AREG0, offsetof(CPUState, debug), "debug0"); env_iflags = tcg_global_mem_new(TCG_AREG0, offsetof(CPUState, iflags), "iflags"); env_imm = tcg_global_mem_new(TCG_AREG0, offsetof(CPUState, imm), "imm"); env_btarget = tcg_global_mem_new(TCG_AREG0, offsetof(CPUState, btarget), "btarget"); env_btaken = tcg_global_mem_new(TCG_AREG0, offsetof(CPUState, btaken), "btaken"); for (i = 0; i < ARRAY_SIZE(cpu_R); i++) { cpu_R[i] = tcg_global_mem_new(TCG_AREG0, offsetof(CPUState, regs[i]), regnames[i]); } for (i = 0; i < ARRAY_SIZE(cpu_SR); i++) { cpu_SR[i] = tcg_global_mem_new(TCG_AREG0, offsetof(CPUState, sregs[i]), special_regnames[i]); } #define GEN_HELPER 2 #include "helper.h" return env; }
1,055
1
static INLINE UINT16 ntlm_av_pair_get_id(const NTLM_AV_PAIR* pAvPair) { UINT16 AvId; Data_Read_UINT16(&pAvPair->AvId, AvId); return AvId; }
1,057
0
void kadmin_lock ( int argc , char * argv [ ] ) { kadm5_ret_t retval ; if ( locked ) return ; retval = kadm5_lock ( handle ) ; if ( retval ) { com_err ( "lock" , retval , "" ) ; return ; } locked = 1 ; }
1,058
1
static int lbs_process_bss(struct bss_descriptor *bss, uint8_t **pbeaconinfo, int *bytesleft) { struct ieeetypes_fhparamset *pFH; struct ieeetypes_dsparamset *pDS; struct ieeetypes_cfparamset *pCF; struct ieeetypes_ibssparamset *pibss; DECLARE_MAC_BUF(mac); struct ieeetypes_countryinfoset *pcountryinfo; uint8_t *pos, *end, *p; uint8_t n_ex_rates = 0, got_basic_rates = 0, n_basic_rates = 0; uint16_t beaconsize = 0; int ret; lbs_deb_enter(LBS_DEB_SCAN); if (*bytesleft >= sizeof(beaconsize)) { /* Extract & convert beacon size from the command buffer */ beaconsize = get_unaligned_le16(*pbeaconinfo); *bytesleft -= sizeof(beaconsize); *pbeaconinfo += sizeof(beaconsize); } if (beaconsize == 0 || beaconsize > *bytesleft) { *pbeaconinfo += *bytesleft; *bytesleft = 0; ret = -1; goto done; } /* Initialize the current working beacon pointer for this BSS iteration */ pos = *pbeaconinfo; end = pos + beaconsize; /* Advance the return beacon pointer past the current beacon */ *pbeaconinfo += beaconsize; *bytesleft -= beaconsize; memcpy(bss->bssid, pos, ETH_ALEN); lbs_deb_scan("process_bss: BSSID %s\n", print_mac(mac, bss->bssid)); pos += ETH_ALEN; if ((end - pos) < 12) { lbs_deb_scan("process_bss: Not enough bytes left\n"); ret = -1; goto done; } /* * next 4 fields are RSSI, time stamp, beacon interval, * and capability information */ /* RSSI is 1 byte long */ bss->rssi = *pos; lbs_deb_scan("process_bss: RSSI %d\n", *pos); pos++; /* time stamp is 8 bytes long */ pos += 8; /* beacon interval is 2 bytes long */ bss->beaconperiod = get_unaligned_le16(pos); pos += 2; /* capability information is 2 bytes long */ bss->capability = get_unaligned_le16(pos); lbs_deb_scan("process_bss: capabilities 0x%04x\n", bss->capability); pos += 2; if (bss->capability & WLAN_CAPABILITY_PRIVACY) lbs_deb_scan("process_bss: WEP enabled\n"); if (bss->capability & WLAN_CAPABILITY_IBSS) bss->mode = IW_MODE_ADHOC; else bss->mode = IW_MODE_INFRA; /* rest of the current buffer are IE's */ lbs_deb_scan("process_bss: IE len %zd\n", end - pos); lbs_deb_hex(LBS_DEB_SCAN, "process_bss: IE info", pos, end - pos); /* process variable IE */ while (pos <= end - 2) { struct ieee80211_info_element * elem = (void *)pos; if (pos + elem->len > end) { lbs_deb_scan("process_bss: error in processing IE, " "bytes left < IE length\n"); break; } switch (elem->id) { case MFIE_TYPE_SSID: bss->ssid_len = elem->len; memcpy(bss->ssid, elem->data, elem->len); lbs_deb_scan("got SSID IE: '%s', len %u\n", escape_essid(bss->ssid, bss->ssid_len), bss->ssid_len); break; case MFIE_TYPE_RATES: n_basic_rates = min_t(uint8_t, MAX_RATES, elem->len); memcpy(bss->rates, elem->data, n_basic_rates); got_basic_rates = 1; lbs_deb_scan("got RATES IE\n"); break; case MFIE_TYPE_FH_SET: pFH = (struct ieeetypes_fhparamset *) pos; memmove(&bss->phyparamset.fhparamset, pFH, sizeof(struct ieeetypes_fhparamset)); lbs_deb_scan("got FH IE\n"); break; case MFIE_TYPE_DS_SET: pDS = (struct ieeetypes_dsparamset *) pos; bss->channel = pDS->currentchan; memcpy(&bss->phyparamset.dsparamset, pDS, sizeof(struct ieeetypes_dsparamset)); lbs_deb_scan("got DS IE, channel %d\n", bss->channel); break; case MFIE_TYPE_CF_SET: pCF = (struct ieeetypes_cfparamset *) pos; memcpy(&bss->ssparamset.cfparamset, pCF, sizeof(struct ieeetypes_cfparamset)); lbs_deb_scan("got CF IE\n"); break; case MFIE_TYPE_IBSS_SET: pibss = (struct ieeetypes_ibssparamset *) pos; bss->atimwindow = le16_to_cpu(pibss->atimwindow); memmove(&bss->ssparamset.ibssparamset, pibss, sizeof(struct ieeetypes_ibssparamset)); lbs_deb_scan("got IBSS IE\n"); break; case MFIE_TYPE_COUNTRY: pcountryinfo = (struct ieeetypes_countryinfoset *) pos; lbs_deb_scan("got COUNTRY IE\n"); if (pcountryinfo->len < sizeof(pcountryinfo->countrycode) || pcountryinfo->len > 254) { lbs_deb_scan("process_bss: 11D- Err CountryInfo len %d, min %zd, max 254\n", pcountryinfo->len, sizeof(pcountryinfo->countrycode)); ret = -1; goto done; } memcpy(&bss->countryinfo, pcountryinfo, pcountryinfo->len + 2); lbs_deb_hex(LBS_DEB_SCAN, "process_bss: 11d countryinfo", (uint8_t *) pcountryinfo, (int) (pcountryinfo->len + 2)); break; case MFIE_TYPE_RATES_EX: /* only process extended supported rate if data rate is * already found. Data rate IE should come before * extended supported rate IE */ lbs_deb_scan("got RATESEX IE\n"); if (!got_basic_rates) { lbs_deb_scan("... but ignoring it\n"); break; } n_ex_rates = elem->len; if (n_basic_rates + n_ex_rates > MAX_RATES) n_ex_rates = MAX_RATES - n_basic_rates; p = bss->rates + n_basic_rates; memcpy(p, elem->data, n_ex_rates); break; case MFIE_TYPE_GENERIC: if (elem->len >= 4 && elem->data[0] == 0x00 && elem->data[1] == 0x50 && elem->data[2] == 0xf2 && elem->data[3] == 0x01) { bss->wpa_ie_len = min(elem->len + 2, MAX_WPA_IE_LEN); memcpy(bss->wpa_ie, elem, bss->wpa_ie_len); lbs_deb_scan("got WPA IE\n"); lbs_deb_hex(LBS_DEB_SCAN, "WPA IE", bss->wpa_ie, elem->len); } else if (elem->len >= MARVELL_MESH_IE_LENGTH && elem->data[0] == 0x00 && elem->data[1] == 0x50 && elem->data[2] == 0x43 && elem->data[3] == 0x04) { lbs_deb_scan("got mesh IE\n"); bss->mesh = 1; } else { lbs_deb_scan("got generic IE: %02x:%02x:%02x:%02x, len %d\n", elem->data[0], elem->data[1], elem->data[2], elem->data[3], elem->len); } break; case MFIE_TYPE_RSN: lbs_deb_scan("got RSN IE\n"); bss->rsn_ie_len = min(elem->len + 2, MAX_WPA_IE_LEN); memcpy(bss->rsn_ie, elem, bss->rsn_ie_len); lbs_deb_hex(LBS_DEB_SCAN, "process_bss: RSN_IE", bss->rsn_ie, elem->len); break; default: lbs_deb_scan("got IE 0x%04x, len %d\n", elem->id, elem->len); break; } pos += elem->len + 2; } /* Timestamp */ bss->last_scanned = jiffies; lbs_unset_basic_rate_flags(bss->rates, sizeof(bss->rates)); ret = 0; done: lbs_deb_leave_args(LBS_DEB_SCAN, "ret %d", ret); return ret; }
1,060
0
int cpu_x86_exec(CPUX86State *env1) { int saved_T0, saved_T1, saved_A0; CPUX86State *saved_env; #ifdef reg_EAX int saved_EAX; #endif #ifdef reg_ECX int saved_ECX; #endif #ifdef reg_EDX int saved_EDX; #endif #ifdef reg_EBX int saved_EBX; #endif #ifdef reg_ESP int saved_ESP; #endif #ifdef reg_EBP int saved_EBP; #endif #ifdef reg_ESI int saved_ESI; #endif #ifdef reg_EDI int saved_EDI; #endif #ifdef __sparc__ int saved_i7, tmp_T0; #endif int code_gen_size, ret; void (*gen_func)(void); TranslationBlock *tb, **ptb; uint8_t *tc_ptr, *cs_base, *pc; unsigned int flags; /* first we save global registers */ saved_T0 = T0; saved_T1 = T1; saved_A0 = A0; saved_env = env; env = env1; #ifdef reg_EAX saved_EAX = EAX; EAX = env->regs[R_EAX]; #endif #ifdef reg_ECX saved_ECX = ECX; ECX = env->regs[R_ECX]; #endif #ifdef reg_EDX saved_EDX = EDX; EDX = env->regs[R_EDX]; #endif #ifdef reg_EBX saved_EBX = EBX; EBX = env->regs[R_EBX]; #endif #ifdef reg_ESP saved_ESP = ESP; ESP = env->regs[R_ESP]; #endif #ifdef reg_EBP saved_EBP = EBP; EBP = env->regs[R_EBP]; #endif #ifdef reg_ESI saved_ESI = ESI; ESI = env->regs[R_ESI]; #endif #ifdef reg_EDI saved_EDI = EDI; EDI = env->regs[R_EDI]; #endif #ifdef __sparc__ /* we also save i7 because longjmp may not restore it */ asm volatile ("mov %%i7, %0" : "=r" (saved_i7)); #endif /* put eflags in CPU temporary format */ CC_SRC = env->eflags & (CC_O | CC_S | CC_Z | CC_A | CC_P | CC_C); DF = 1 - (2 * ((env->eflags >> 10) & 1)); CC_OP = CC_OP_EFLAGS; env->eflags &= ~(DF_MASK | CC_O | CC_S | CC_Z | CC_A | CC_P | CC_C); env->interrupt_request = 0; /* prepare setjmp context for exception handling */ if (setjmp(env->jmp_env) == 0) { T0 = 0; /* force lookup of first TB */ for(;;) { #ifdef __sparc__ /* g1 can be modified by some libc? functions */ tmp_T0 = T0; #endif if (env->interrupt_request) { env->exception_index = EXCP_INTERRUPT; cpu_loop_exit(); } #ifdef DEBUG_EXEC if (loglevel) { /* XXX: save all volatile state in cpu state */ /* restore flags in standard format */ env->regs[R_EAX] = EAX; env->regs[R_EBX] = EBX; env->regs[R_ECX] = ECX; env->regs[R_EDX] = EDX; env->regs[R_ESI] = ESI; env->regs[R_EDI] = EDI; env->regs[R_EBP] = EBP; env->regs[R_ESP] = ESP; env->eflags = env->eflags | cc_table[CC_OP].compute_all() | (DF & DF_MASK); cpu_x86_dump_state(env, logfile, 0); env->eflags &= ~(DF_MASK | CC_O | CC_S | CC_Z | CC_A | CC_P | CC_C); } #endif /* we compute the CPU state. We assume it will not change during the whole generated block. */ flags = env->seg_cache[R_CS].seg_32bit << GEN_FLAG_CODE32_SHIFT; flags |= env->seg_cache[R_SS].seg_32bit << GEN_FLAG_SS32_SHIFT; flags |= (((unsigned long)env->seg_cache[R_DS].base | (unsigned long)env->seg_cache[R_ES].base | (unsigned long)env->seg_cache[R_SS].base) != 0) << GEN_FLAG_ADDSEG_SHIFT; if (!(env->eflags & VM_MASK)) { flags |= (env->segs[R_CS] & 3) << GEN_FLAG_CPL_SHIFT; } else { /* NOTE: a dummy CPL is kept */ flags |= (1 << GEN_FLAG_VM_SHIFT); flags |= (3 << GEN_FLAG_CPL_SHIFT); } flags |= (env->eflags & (IOPL_MASK | TF_MASK)); cs_base = env->seg_cache[R_CS].base; pc = cs_base + env->eip; tb = tb_find(&ptb, (unsigned long)pc, (unsigned long)cs_base, flags); if (!tb) { spin_lock(&tb_lock); /* if no translated code available, then translate it now */ tb = tb_alloc((unsigned long)pc); if (!tb) { /* flush must be done */ tb_flush(); /* cannot fail at this point */ tb = tb_alloc((unsigned long)pc); /* don't forget to invalidate previous TB info */ ptb = &tb_hash[tb_hash_func((unsigned long)pc)]; T0 = 0; } tc_ptr = code_gen_ptr; tb->tc_ptr = tc_ptr; tb->cs_base = (unsigned long)cs_base; tb->flags = flags; ret = cpu_x86_gen_code(tb, CODE_GEN_MAX_SIZE, &code_gen_size); /* if invalid instruction, signal it */ if (ret != 0) { /* NOTE: the tb is allocated but not linked, so we can leave it */ spin_unlock(&tb_lock); raise_exception(EXCP06_ILLOP); } *ptb = tb; tb->hash_next = NULL; tb_link(tb); code_gen_ptr = (void *)(((unsigned long)code_gen_ptr + code_gen_size + CODE_GEN_ALIGN - 1) & ~(CODE_GEN_ALIGN - 1)); spin_unlock(&tb_lock); } #ifdef DEBUG_EXEC if (loglevel) { fprintf(logfile, "Trace 0x%08lx [0x%08lx] %s\n", (long)tb->tc_ptr, (long)tb->pc, lookup_symbol((void *)tb->pc)); } #endif #ifdef __sparc__ T0 = tmp_T0; #endif /* see if we can patch the calling TB */ if (T0 != 0 && !(env->eflags & TF_MASK)) { spin_lock(&tb_lock); tb_add_jump((TranslationBlock *)(T0 & ~3), T0 & 3, tb); spin_unlock(&tb_lock); } tc_ptr = tb->tc_ptr; /* execute the generated code */ gen_func = (void *)tc_ptr; #if defined(__sparc__) __asm__ __volatile__("call %0\n\t" "mov %%o7,%%i0" : /* no outputs */ : "r" (gen_func) : "i0", "i1", "i2", "i3", "i4", "i5"); #elif defined(__arm__) asm volatile ("mov pc, %0\n\t" ".global exec_loop\n\t" "exec_loop:\n\t" : /* no outputs */ : "r" (gen_func) : "r1", "r2", "r3", "r8", "r9", "r10", "r12", "r14"); #else gen_func(); #endif } } ret = env->exception_index; /* restore flags in standard format */ env->eflags = env->eflags | cc_table[CC_OP].compute_all() | (DF & DF_MASK); /* restore global registers */ #ifdef reg_EAX EAX = saved_EAX; #endif #ifdef reg_ECX ECX = saved_ECX; #endif #ifdef reg_EDX EDX = saved_EDX; #endif #ifdef reg_EBX EBX = saved_EBX; #endif #ifdef reg_ESP ESP = saved_ESP; #endif #ifdef reg_EBP EBP = saved_EBP; #endif #ifdef reg_ESI ESI = saved_ESI; #endif #ifdef reg_EDI EDI = saved_EDI; #endif #ifdef __sparc__ asm volatile ("mov %0, %%i7" : : "r" (saved_i7)); #endif T0 = saved_T0; T1 = saved_T1; A0 = saved_A0; env = saved_env; return ret; }
1,061
0
int load_initrd ( const char * filename , uint8_t * addr ) { int fd , size ; printf ( "Load initrd\n" ) ; fd = open ( filename , O_RDONLY ) ; if ( fd < 0 ) return - 1 ; size = read ( fd , addr , 16 * 1024 * 1024 ) ; if ( size < 0 ) goto fail ; close ( fd ) ; printf ( "Load initrd: %d\n" , size ) ; return size ; fail : close ( fd ) ; printf ( "Load initrd failed\n" ) ; return - 1 ; }
1,062
1
static INLINE SIZE_T ntlm_av_pair_get_len(const NTLM_AV_PAIR* pAvPair) { UINT16 AvLen; Data_Read_UINT16(&pAvPair->AvLen, AvLen); return AvLen; }
1,063
1
static int tvaudio_get_ctrl(struct CHIPSTATE *chip, struct v4l2_control *ctrl) { struct CHIPDESC *desc = chip->desc; switch (ctrl->id) { case V4L2_CID_AUDIO_MUTE: ctrl->value=chip->muted; return 0; case V4L2_CID_AUDIO_VOLUME: if (!(desc->flags & CHIP_HAS_VOLUME)) break; ctrl->value = max(chip->left,chip->right); return 0; case V4L2_CID_AUDIO_BALANCE: { int volume; if (!(desc->flags & CHIP_HAS_VOLUME)) break; volume = max(chip->left,chip->right); if (volume) ctrl->value=(32768*min(chip->left,chip->right))/volume; else ctrl->value=32768; return 0; } case V4L2_CID_AUDIO_BASS: if (desc->flags & CHIP_HAS_BASSTREBLE) break; ctrl->value = chip->bass; return 0; case V4L2_CID_AUDIO_TREBLE: if (desc->flags & CHIP_HAS_BASSTREBLE) return -EINVAL; ctrl->value = chip->treble; return 0; } return -EINVAL; }
1,064
0
krb5_ldap_put_principal(krb5_context context, krb5_db_entry *entry, char **db_args) { int l=0, kerberos_principal_object_type=0; unsigned int ntrees=0, tre=0; krb5_error_code st=0, tempst=0; LDAP *ld=NULL; LDAPMessage *result=NULL, *ent=NULL; char **subtreelist = NULL; char *user=NULL, *subtree=NULL, *principal_dn=NULL; char *strval[10]={NULL}, errbuf[1024]; char *filtuser=NULL; struct berval **bersecretkey=NULL; LDAPMod **mods=NULL; krb5_boolean create_standalone=FALSE; krb5_boolean establish_links=FALSE; char *standalone_principal_dn=NULL; krb5_tl_data *tl_data=NULL; krb5_key_data **keys=NULL; kdb5_dal_handle *dal_handle=NULL; krb5_ldap_context *ldap_context=NULL; krb5_ldap_server_handle *ldap_server_handle=NULL; osa_princ_ent_rec princ_ent = {0}; xargs_t xargs = {0}; char *polname = NULL; OPERATION optype; krb5_boolean found_entry = FALSE; /* Clear the global error string */ krb5_clear_error_message(context); SETUP_CONTEXT(); if (ldap_context->lrparams == NULL || ldap_context->container_dn == NULL) return EINVAL; /* get ldap handle */ GET_HANDLE(); if (!is_principal_in_realm(ldap_context, entry->princ)) { st = EINVAL; k5_setmsg(context, st, _("Principal does not belong to the default realm")); goto cleanup; } /* get the principal information to act on */ if (((st=krb5_unparse_name(context, entry->princ, &user)) != 0) || ((st=krb5_ldap_unparse_principal_name(user)) != 0)) goto cleanup; filtuser = ldap_filter_correct(user); if (filtuser == NULL) { st = ENOMEM; goto cleanup; } /* Identity the type of operation, it can be * add principal or modify principal. * hack if the entry->mask has KRB_PRINCIPAL flag set * then it is a add operation */ if (entry->mask & KADM5_PRINCIPAL) optype = ADD_PRINCIPAL; else optype = MODIFY_PRINCIPAL; if (((st=krb5_get_princ_type(context, entry, &kerberos_principal_object_type)) != 0) || ((st=krb5_get_userdn(context, entry, &principal_dn)) != 0)) goto cleanup; if ((st=process_db_args(context, db_args, &xargs, optype)) != 0) goto cleanup; if (entry->mask & KADM5_LOAD) { unsigned int tree = 0; int numlentries = 0; char *filter = NULL; /* A load operation is special, will do a mix-in (add krbprinc * attrs to a non-krb object entry) if an object exists with a * matching krbprincipalname attribute so try to find existing * object and set principal_dn. This assumes that the * krbprincipalname attribute is unique (only one object entry has * a particular krbprincipalname attribute). */ if (asprintf(&filter, FILTER"%s))", filtuser) < 0) { filter = NULL; st = ENOMEM; goto cleanup; } /* get the current subtree list */ if ((st = krb5_get_subtree_info(ldap_context, &subtreelist, &ntrees)) != 0) goto cleanup; found_entry = FALSE; /* search for entry with matching krbprincipalname attribute */ for (tree = 0; found_entry == FALSE && tree < ntrees; ++tree) { if (principal_dn == NULL) { LDAP_SEARCH_1(subtreelist[tree], ldap_context->lrparams->search_scope, filter, principal_attributes, IGNORE_STATUS); } else { /* just look for entry with principal_dn */ LDAP_SEARCH_1(principal_dn, LDAP_SCOPE_BASE, filter, principal_attributes, IGNORE_STATUS); } if (st == LDAP_SUCCESS) { numlentries = ldap_count_entries(ld, result); if (numlentries > 1) { free(filter); st = EINVAL; k5_setmsg(context, st, _("operation can not continue, more than one " "entry with principal name \"%s\" found"), user); goto cleanup; } else if (numlentries == 1) { found_entry = TRUE; if (principal_dn == NULL) { ent = ldap_first_entry(ld, result); if (ent != NULL) { /* setting principal_dn will cause that entry to be modified further down */ if ((principal_dn = ldap_get_dn(ld, ent)) == NULL) { ldap_get_option (ld, LDAP_OPT_RESULT_CODE, &st); st = set_ldap_error (context, st, 0); free(filter); goto cleanup; } } } } } else if (st != LDAP_NO_SUCH_OBJECT) { /* could not perform search, return with failure */ st = set_ldap_error (context, st, 0); free(filter); goto cleanup; } ldap_msgfree(result); result = NULL; /* * If it isn't found then assume a standalone princ entry is to * be created. */ } /* end for (tree = 0; principal_dn == ... */ free(filter); if (found_entry == FALSE && principal_dn != NULL) { /* * if principal_dn is null then there is code further down to * deal with setting standalone_principal_dn. Also note that * this will set create_standalone true for * non-mix-in entries which is okay if loading from a dump. */ create_standalone = TRUE; standalone_principal_dn = strdup(principal_dn); CHECK_NULL(standalone_principal_dn); } } /* end if (entry->mask & KADM5_LOAD */ /* time to generate the DN information with the help of * containerdn, principalcontainerreference or * realmcontainerdn information */ if (principal_dn == NULL && xargs.dn == NULL) { /* creation of standalone principal */ /* get the subtree information */ if (entry->princ->length == 2 && entry->princ->data[0].length == strlen("krbtgt") && strncmp(entry->princ->data[0].data, "krbtgt", entry->princ->data[0].length) == 0) { /* if the principal is a inter-realm principal, always created in the realm container */ subtree = strdup(ldap_context->lrparams->realmdn); } else if (xargs.containerdn) { if ((st=checkattributevalue(ld, xargs.containerdn, NULL, NULL, NULL)) != 0) { if (st == KRB5_KDB_NOENTRY || st == KRB5_KDB_CONSTRAINT_VIOLATION) { int ost = st; st = EINVAL; k5_wrapmsg(context, ost, st, _("'%s' not found"), xargs.containerdn); } goto cleanup; } subtree = strdup(xargs.containerdn); } else if (ldap_context->lrparams->containerref && strlen(ldap_context->lrparams->containerref) != 0) { /* * Here the subtree should be changed with * principalcontainerreference attribute value */ subtree = strdup(ldap_context->lrparams->containerref); } else { subtree = strdup(ldap_context->lrparams->realmdn); } CHECK_NULL(subtree); if (asprintf(&standalone_principal_dn, "krbprincipalname=%s,%s", filtuser, subtree) < 0) standalone_principal_dn = NULL; CHECK_NULL(standalone_principal_dn); /* * free subtree when you are done using the subtree * set the boolean create_standalone to TRUE */ create_standalone = TRUE; free(subtree); subtree = NULL; } /* * If the DN information is presented by the user, time to * validate the input to ensure that the DN falls under * any of the subtrees */ if (xargs.dn_from_kbd == TRUE) { /* Get the current subtree list if we haven't already done so. */ if (subtreelist == NULL) { st = krb5_get_subtree_info(ldap_context, &subtreelist, &ntrees); if (st) goto cleanup; } st = validate_xargs(context, ldap_server_handle, &xargs, standalone_principal_dn, subtreelist, ntrees); if (st) goto cleanup; } if (xargs.linkdn != NULL) { /* * link information can be changed using modprinc. * However, link information can be changed only on the * standalone kerberos principal objects. A standalone * kerberos principal object is of type krbprincipal * structural objectclass. * * NOTE: kerberos principals on an ldap object can't be * linked to other ldap objects. */ if (optype == MODIFY_PRINCIPAL && kerberos_principal_object_type != KDB_STANDALONE_PRINCIPAL_OBJECT) { st = EINVAL; snprintf(errbuf, sizeof(errbuf), _("link information can not be set/updated as the " "kerberos principal belongs to an ldap object")); k5_setmsg(context, st, "%s", errbuf); goto cleanup; } /* * Check the link information. If there is already a link * existing then this operation is not allowed. */ { char **linkdns=NULL; int j=0; if ((st=krb5_get_linkdn(context, entry, &linkdns)) != 0) { snprintf(errbuf, sizeof(errbuf), _("Failed getting object references")); k5_setmsg(context, st, "%s", errbuf); goto cleanup; } if (linkdns != NULL) { st = EINVAL; snprintf(errbuf, sizeof(errbuf), _("kerberos principal is already linked to a ldap " "object")); k5_setmsg(context, st, "%s", errbuf); for (j=0; linkdns[j] != NULL; ++j) free (linkdns[j]); free (linkdns); goto cleanup; } } establish_links = TRUE; } if (entry->mask & KADM5_LAST_SUCCESS) { memset(strval, 0, sizeof(strval)); if ((strval[0]=getstringtime(entry->last_success)) == NULL) goto cleanup; if ((st=krb5_add_str_mem_ldap_mod(&mods, "krbLastSuccessfulAuth", LDAP_MOD_REPLACE, strval)) != 0) { free (strval[0]); goto cleanup; } free (strval[0]); } if (entry->mask & KADM5_LAST_FAILED) { memset(strval, 0, sizeof(strval)); if ((strval[0]=getstringtime(entry->last_failed)) == NULL) goto cleanup; if ((st=krb5_add_str_mem_ldap_mod(&mods, "krbLastFailedAuth", LDAP_MOD_REPLACE, strval)) != 0) { free (strval[0]); goto cleanup; } free(strval[0]); } if (entry->mask & KADM5_FAIL_AUTH_COUNT) { krb5_kvno fail_auth_count; fail_auth_count = entry->fail_auth_count; if (entry->mask & KADM5_FAIL_AUTH_COUNT_INCREMENT) fail_auth_count++; st = krb5_add_int_mem_ldap_mod(&mods, "krbLoginFailedCount", LDAP_MOD_REPLACE, fail_auth_count); if (st != 0) goto cleanup; } else if (entry->mask & KADM5_FAIL_AUTH_COUNT_INCREMENT) { int attr_mask = 0; krb5_boolean has_fail_count; /* Check if the krbLoginFailedCount attribute exists. (Through * krb5 1.8.1, it wasn't set in new entries.) */ st = krb5_get_attributes_mask(context, entry, &attr_mask); if (st != 0) goto cleanup; has_fail_count = ((attr_mask & KDB_FAIL_AUTH_COUNT_ATTR) != 0); /* * If the client library and server supports RFC 4525, * then use it to increment by one the value of the * krbLoginFailedCount attribute. Otherwise, assert the * (provided) old value by deleting it before adding. */ #ifdef LDAP_MOD_INCREMENT if (ldap_server_handle->server_info->modify_increment && has_fail_count) { st = krb5_add_int_mem_ldap_mod(&mods, "krbLoginFailedCount", LDAP_MOD_INCREMENT, 1); if (st != 0) goto cleanup; } else { #endif /* LDAP_MOD_INCREMENT */ if (has_fail_count) { st = krb5_add_int_mem_ldap_mod(&mods, "krbLoginFailedCount", LDAP_MOD_DELETE, entry->fail_auth_count); if (st != 0) goto cleanup; } st = krb5_add_int_mem_ldap_mod(&mods, "krbLoginFailedCount", LDAP_MOD_ADD, entry->fail_auth_count + 1); if (st != 0) goto cleanup; #ifdef LDAP_MOD_INCREMENT } #endif } else if (optype == ADD_PRINCIPAL) { /* Initialize krbLoginFailedCount in new entries to help avoid a * race during the first failed login. */ st = krb5_add_int_mem_ldap_mod(&mods, "krbLoginFailedCount", LDAP_MOD_ADD, 0); } if (entry->mask & KADM5_MAX_LIFE) { if ((st=krb5_add_int_mem_ldap_mod(&mods, "krbmaxticketlife", LDAP_MOD_REPLACE, entry->max_life)) != 0) goto cleanup; } if (entry->mask & KADM5_MAX_RLIFE) { if ((st=krb5_add_int_mem_ldap_mod(&mods, "krbmaxrenewableage", LDAP_MOD_REPLACE, entry->max_renewable_life)) != 0) goto cleanup; } if (entry->mask & KADM5_ATTRIBUTES) { if ((st=krb5_add_int_mem_ldap_mod(&mods, "krbticketflags", LDAP_MOD_REPLACE, entry->attributes)) != 0) goto cleanup; } if (entry->mask & KADM5_PRINCIPAL) { memset(strval, 0, sizeof(strval)); strval[0] = user; if ((st=krb5_add_str_mem_ldap_mod(&mods, "krbprincipalname", LDAP_MOD_REPLACE, strval)) != 0) goto cleanup; } if (entry->mask & KADM5_PRINC_EXPIRE_TIME) { memset(strval, 0, sizeof(strval)); if ((strval[0]=getstringtime(entry->expiration)) == NULL) goto cleanup; if ((st=krb5_add_str_mem_ldap_mod(&mods, "krbprincipalexpiration", LDAP_MOD_REPLACE, strval)) != 0) { free (strval[0]); goto cleanup; } free (strval[0]); } if (entry->mask & KADM5_PW_EXPIRATION) { memset(strval, 0, sizeof(strval)); if ((strval[0]=getstringtime(entry->pw_expiration)) == NULL) goto cleanup; if ((st=krb5_add_str_mem_ldap_mod(&mods, "krbpasswordexpiration", LDAP_MOD_REPLACE, strval)) != 0) { free (strval[0]); goto cleanup; } free (strval[0]); } if (entry->mask & KADM5_POLICY || entry->mask & KADM5_KEY_HIST) { memset(&princ_ent, 0, sizeof(princ_ent)); for (tl_data=entry->tl_data; tl_data; tl_data=tl_data->tl_data_next) { if (tl_data->tl_data_type == KRB5_TL_KADM_DATA) { if ((st = krb5_lookup_tl_kadm_data(tl_data, &princ_ent)) != 0) { goto cleanup; } break; } } } if (entry->mask & KADM5_POLICY) { if (princ_ent.aux_attributes & KADM5_POLICY) { memset(strval, 0, sizeof(strval)); if ((st = krb5_ldap_name_to_policydn (context, princ_ent.policy, &polname)) != 0) goto cleanup; strval[0] = polname; if ((st=krb5_add_str_mem_ldap_mod(&mods, "krbpwdpolicyreference", LDAP_MOD_REPLACE, strval)) != 0) goto cleanup; } else { st = EINVAL; k5_setmsg(context, st, "Password policy value null"); goto cleanup; } } else if (entry->mask & KADM5_LOAD && found_entry == TRUE) { /* * a load is special in that existing entries must have attrs that * removed. */ if ((st=krb5_add_str_mem_ldap_mod(&mods, "krbpwdpolicyreference", LDAP_MOD_REPLACE, NULL)) != 0) goto cleanup; } if (entry->mask & KADM5_POLICY_CLR) { if ((st=krb5_add_str_mem_ldap_mod(&mods, "krbpwdpolicyreference", LDAP_MOD_DELETE, NULL)) != 0) goto cleanup; } if (entry->mask & KADM5_KEY_HIST) { bersecretkey = krb5_encode_histkey(&princ_ent); if (bersecretkey == NULL) { st = ENOMEM; goto cleanup; } st = krb5_add_ber_mem_ldap_mod(&mods, "krbpwdhistory", LDAP_MOD_REPLACE | LDAP_MOD_BVALUES, bersecretkey); if (st != 0) goto cleanup; free_berdata(bersecretkey); bersecretkey = NULL; } if (entry->mask & KADM5_KEY_DATA || entry->mask & KADM5_KVNO) { krb5_kvno mkvno; if ((st=krb5_dbe_lookup_mkvno(context, entry, &mkvno)) != 0) goto cleanup; bersecretkey = krb5_encode_krbsecretkey (entry->key_data, entry->n_key_data, mkvno); if (bersecretkey == NULL) { st = ENOMEM; goto cleanup; } /* An empty list of bervals is only accepted for modify operations, * not add operations. */ if (bersecretkey[0] != NULL || !create_standalone) { st = krb5_add_ber_mem_ldap_mod(&mods, "krbprincipalkey", LDAP_MOD_REPLACE | LDAP_MOD_BVALUES, bersecretkey); if (st != 0) goto cleanup; } if (!(entry->mask & KADM5_PRINCIPAL)) { memset(strval, 0, sizeof(strval)); if ((strval[0]=getstringtime(entry->pw_expiration)) == NULL) goto cleanup; if ((st=krb5_add_str_mem_ldap_mod(&mods, "krbpasswordexpiration", LDAP_MOD_REPLACE, strval)) != 0) { free (strval[0]); goto cleanup; } free (strval[0]); } /* Update last password change whenever a new key is set */ { krb5_timestamp last_pw_changed; if ((st=krb5_dbe_lookup_last_pwd_change(context, entry, &last_pw_changed)) != 0) goto cleanup; memset(strval, 0, sizeof(strval)); if ((strval[0] = getstringtime(last_pw_changed)) == NULL) goto cleanup; if ((st=krb5_add_str_mem_ldap_mod(&mods, "krbLastPwdChange", LDAP_MOD_REPLACE, strval)) != 0) { free (strval[0]); goto cleanup; } free (strval[0]); } } /* Modify Key data ends here */ /* Auth indicators will also be stored in krbExtraData when processing * tl_data. */ st = update_ldap_mod_auth_ind(context, entry, &mods); if (st != 0) goto cleanup; /* Set tl_data */ if (entry->tl_data != NULL) { int count = 0; struct berval **ber_tl_data = NULL; krb5_tl_data *ptr; krb5_timestamp unlock_time; for (ptr = entry->tl_data; ptr != NULL; ptr = ptr->tl_data_next) { if (ptr->tl_data_type == KRB5_TL_LAST_PWD_CHANGE #ifdef SECURID || ptr->tl_data_type == KRB5_TL_DB_ARGS #endif || ptr->tl_data_type == KRB5_TL_KADM_DATA || ptr->tl_data_type == KDB_TL_USER_INFO || ptr->tl_data_type == KRB5_TL_CONSTRAINED_DELEGATION_ACL || ptr->tl_data_type == KRB5_TL_LAST_ADMIN_UNLOCK) continue; count++; } if (count != 0) { int j; ber_tl_data = (struct berval **) calloc (count + 1, sizeof (struct berval*)); if (ber_tl_data == NULL) { st = ENOMEM; goto cleanup; } for (j = 0, ptr = entry->tl_data; ptr != NULL; ptr = ptr->tl_data_next) { /* Ignore tl_data that are stored in separate directory * attributes */ if (ptr->tl_data_type == KRB5_TL_LAST_PWD_CHANGE #ifdef SECURID || ptr->tl_data_type == KRB5_TL_DB_ARGS #endif || ptr->tl_data_type == KRB5_TL_KADM_DATA || ptr->tl_data_type == KDB_TL_USER_INFO || ptr->tl_data_type == KRB5_TL_CONSTRAINED_DELEGATION_ACL || ptr->tl_data_type == KRB5_TL_LAST_ADMIN_UNLOCK) continue; if ((st = tl_data2berval (ptr, &ber_tl_data[j])) != 0) break; j++; } if (st == 0) { ber_tl_data[count] = NULL; st=krb5_add_ber_mem_ldap_mod(&mods, "krbExtraData", LDAP_MOD_REPLACE | LDAP_MOD_BVALUES, ber_tl_data); } free_berdata(ber_tl_data); if (st != 0) goto cleanup; } if ((st=krb5_dbe_lookup_last_admin_unlock(context, entry, &unlock_time)) != 0) goto cleanup; if (unlock_time != 0) { /* Update last admin unlock */ memset(strval, 0, sizeof(strval)); if ((strval[0] = getstringtime(unlock_time)) == NULL) goto cleanup; if ((st=krb5_add_str_mem_ldap_mod(&mods, "krbLastAdminUnlock", LDAP_MOD_REPLACE, strval)) != 0) { free (strval[0]); goto cleanup; } free (strval[0]); } } /* Directory specific attribute */ if (xargs.tktpolicydn != NULL) { int tmask=0; if (strlen(xargs.tktpolicydn) != 0) { st = checkattributevalue(ld, xargs.tktpolicydn, "objectclass", policyclass, &tmask); CHECK_CLASS_VALIDITY(st, tmask, _("ticket policy object value: ")); strval[0] = xargs.tktpolicydn; strval[1] = NULL; if ((st=krb5_add_str_mem_ldap_mod(&mods, "krbticketpolicyreference", LDAP_MOD_REPLACE, strval)) != 0) goto cleanup; } else { /* if xargs.tktpolicydn is a empty string, then delete * already existing krbticketpolicyreference attr */ if ((st=krb5_add_str_mem_ldap_mod(&mods, "krbticketpolicyreference", LDAP_MOD_DELETE, NULL)) != 0) goto cleanup; } } if (establish_links == TRUE) { memset(strval, 0, sizeof(strval)); strval[0] = xargs.linkdn; if ((st=krb5_add_str_mem_ldap_mod(&mods, "krbObjectReferences", LDAP_MOD_REPLACE, strval)) != 0) goto cleanup; } /* * in case mods is NULL then return * not sure but can happen in a modprinc * so no need to return an error * addprinc will at least have the principal name * and the keys passed in */ if (mods == NULL) goto cleanup; if (create_standalone == TRUE) { memset(strval, 0, sizeof(strval)); strval[0] = "krbprincipal"; strval[1] = "krbprincipalaux"; strval[2] = "krbTicketPolicyAux"; if ((st=krb5_add_str_mem_ldap_mod(&mods, "objectclass", LDAP_MOD_ADD, strval)) != 0) goto cleanup; st = ldap_add_ext_s(ld, standalone_principal_dn, mods, NULL, NULL); if (st == LDAP_ALREADY_EXISTS && entry->mask & KADM5_LOAD) { /* a load operation must replace an existing entry */ st = ldap_delete_ext_s(ld, standalone_principal_dn, NULL, NULL); if (st != LDAP_SUCCESS) { snprintf(errbuf, sizeof(errbuf), _("Principal delete failed (trying to replace " "entry): %s"), ldap_err2string(st)); st = translate_ldap_error (st, OP_ADD); k5_setmsg(context, st, "%s", errbuf); goto cleanup; } else { st = ldap_add_ext_s(ld, standalone_principal_dn, mods, NULL, NULL); } } if (st != LDAP_SUCCESS) { snprintf(errbuf, sizeof(errbuf), _("Principal add failed: %s"), ldap_err2string(st)); st = translate_ldap_error (st, OP_ADD); k5_setmsg(context, st, "%s", errbuf); goto cleanup; } } else { /* * Here existing ldap object is modified and can be related * to any attribute, so always ensure that the ldap * object is extended with all the kerberos related * objectclasses so that there are no constraint * violations. */ { char *attrvalues[] = {"krbprincipalaux", "krbTicketPolicyAux", NULL}; int p, q, r=0, amask=0; if ((st=checkattributevalue(ld, (xargs.dn) ? xargs.dn : principal_dn, "objectclass", attrvalues, &amask)) != 0) goto cleanup; memset(strval, 0, sizeof(strval)); for (p=1, q=0; p<=2; p<<=1, ++q) { if ((p & amask) == 0) strval[r++] = attrvalues[q]; } if (r != 0) { if ((st=krb5_add_str_mem_ldap_mod(&mods, "objectclass", LDAP_MOD_ADD, strval)) != 0) goto cleanup; } } if (xargs.dn != NULL) st=ldap_modify_ext_s(ld, xargs.dn, mods, NULL, NULL); else st = ldap_modify_ext_s(ld, principal_dn, mods, NULL, NULL); if (st != LDAP_SUCCESS) { snprintf(errbuf, sizeof(errbuf), _("User modification failed: %s"), ldap_err2string(st)); st = translate_ldap_error (st, OP_MOD); k5_setmsg(context, st, "%s", errbuf); goto cleanup; } if (entry->mask & KADM5_FAIL_AUTH_COUNT_INCREMENT) entry->fail_auth_count++; } cleanup: if (user) free(user); if (filtuser) free(filtuser); free_xargs(xargs); if (standalone_principal_dn) free(standalone_principal_dn); if (principal_dn) free (principal_dn); if (polname != NULL) free(polname); for (tre = 0; tre < ntrees; tre++) free(subtreelist[tre]); free(subtreelist); if (subtree) free (subtree); if (bersecretkey) { for (l=0; bersecretkey[l]; ++l) { if (bersecretkey[l]->bv_val) free (bersecretkey[l]->bv_val); free (bersecretkey[l]); } free (bersecretkey); } if (keys) free (keys); ldap_mods_free(mods, 1); ldap_osa_free_princ_ent(&princ_ent); ldap_msgfree(result); krb5_ldap_put_handle_to_pool(ldap_context, ldap_server_handle); return(st); }
1,066
0
TEST_F ( ScoredHistoryMatchTest , CullSearchResults ) { scoped_ptr < TemplateURLService > template_url_service = make_scoped_ptr ( new TemplateURLService ( nullptr , make_scoped_ptr ( new SearchTermsData ) , nullptr , scoped_ptr < TemplateURLServiceClient > ( ) , nullptr , nullptr , base : : Closure ( ) ) ) ; base : : Time now = base : : Time : : NowFromSystemTime ( ) ; history : : URLRow row ( MakeURLRow ( "http://testsearch.com/thequery" , "Test Search Engine" , 3 , 30 , 1 ) ) ; RowWordStarts word_starts ; PopulateWordStarts ( row , & word_starts ) ; WordStarts one_word_no_offset ( 1 , 0u ) ; VisitInfoVector visits = CreateVisitInfoVector ( 3 , 30 , now ) ; visits [ 0 ] . second = ui : : PAGE_TRANSITION_TYPED ; ScoredHistoryMatch scored_a ( row , visits , ASCIIToUTF16 ( "thequery" ) , Make1Term ( "thequery" ) , one_word_no_offset , word_starts , false , template_url_service . get ( ) , now ) ; EXPECT_GT ( scored_a . raw_score , 0 ) ; ScoredHistoryMatch scored_b ( row , visits , ASCIIToUTF16 ( "testsearch" ) , Make1Term ( "testsearch" ) , one_word_no_offset , word_starts , false , template_url_service . get ( ) , now ) ; EXPECT_GT ( scored_b . raw_score , 0 ) ; TemplateURLData data ; data . SetShortName ( ASCIIToUTF16 ( "TestEngine" ) ) ; data . SetKeyword ( ASCIIToUTF16 ( "TestEngine" ) ) ; data . SetURL ( "http://testsearch.com/{ searchTerms} " ) ; TemplateURL * template_url = new TemplateURL ( data ) ; template_url_service -> Add ( template_url ) ; template_url_service -> SetUserSelectedDefaultSearchProvider ( template_url ) ; template_url_service -> Load ( ) ; ScoredHistoryMatch scored_c ( row , visits , ASCIIToUTF16 ( "thequery" ) , Make1Term ( "thequery" ) , one_word_no_offset , word_starts , false , template_url_service . get ( ) , now ) ; EXPECT_EQ ( 0 , scored_c . raw_score ) ; ScoredHistoryMatch scored_d ( row , visits , ASCIIToUTF16 ( "testsearch" ) , Make1Term ( "testsearch" ) , one_word_no_offset , word_starts , false , template_url_service . get ( ) , now ) ; EXPECT_EQ ( 0 , scored_d . raw_score ) ; }
1,068
0
validate_xargs(krb5_context context, krb5_ldap_server_handle *ldap_server_handle, const xargs_t *xargs, const char *standalone_dn, char *const *subtrees, unsigned int ntrees) { krb5_error_code st; if (xargs->dn != NULL) { /* The supplied dn must be within a realm container. */ st = check_dn_in_container(context, xargs->dn, subtrees, ntrees); if (st) return st; /* The supplied dn must exist without Kerberos attributes. */ st = check_dn_exists(context, ldap_server_handle, xargs->dn, TRUE); if (st) return st; } if (xargs->linkdn != NULL) { /* The supplied linkdn must be within a realm container. */ st = check_dn_in_container(context, xargs->linkdn, subtrees, ntrees); if (st) return st; /* The supplied linkdn must exist. */ st = check_dn_exists(context, ldap_server_handle, xargs->linkdn, FALSE); if (st) return st; } if (xargs->containerdn != NULL && standalone_dn != NULL) { /* standalone_dn (likely composed using containerdn) must be within a * container. */ st = check_dn_in_container(context, standalone_dn, subtrees, ntrees); if (st) return st; } return 0; }
1,069
1
static int chip_command(struct i2c_client *client, unsigned int cmd, void *arg) { struct CHIPSTATE *chip = i2c_get_clientdata(client); struct CHIPDESC *desc = chip->desc; if (debug > 0) { v4l_i2c_print_ioctl(chip->c, cmd); printk("\n"); } switch (cmd) { case AUDC_SET_RADIO: chip->radio = 1; chip->watch_stereo = 0; /* del_timer(&chip->wt); */ break; /* --- v4l ioctls --- */ /* take care: bttv does userspace copying, we'll get a kernel pointer here... */ case VIDIOC_QUERYCTRL: { struct v4l2_queryctrl *qc = arg; switch (qc->id) { case V4L2_CID_AUDIO_MUTE: break; case V4L2_CID_AUDIO_VOLUME: case V4L2_CID_AUDIO_BALANCE: if (!(desc->flags & CHIP_HAS_VOLUME)) return -EINVAL; break; case V4L2_CID_AUDIO_BASS: case V4L2_CID_AUDIO_TREBLE: if (desc->flags & CHIP_HAS_BASSTREBLE) return -EINVAL; break; default: return -EINVAL; } return v4l2_ctrl_query_fill_std(qc); } case VIDIOC_S_CTRL: return tvaudio_set_ctrl(chip, arg); case VIDIOC_G_CTRL: return tvaudio_get_ctrl(chip, arg); case VIDIOC_INT_G_AUDIO_ROUTING: { struct v4l2_routing *rt = arg; rt->input = chip->input; rt->output = 0; break; } case VIDIOC_INT_S_AUDIO_ROUTING: { struct v4l2_routing *rt = arg; if (!(desc->flags & CHIP_HAS_INPUTSEL) || rt->input >= 4) return -EINVAL; /* There are four inputs: tuner, radio, extern and intern. */ chip->input = rt->input; if (chip->muted) break; chip_write_masked(chip, desc->inputreg, desc->inputmap[chip->input], desc->inputmask); break; } case VIDIOC_S_TUNER: { struct v4l2_tuner *vt = arg; int mode = 0; if (chip->radio) break; switch (vt->audmode) { case V4L2_TUNER_MODE_MONO: case V4L2_TUNER_MODE_STEREO: case V4L2_TUNER_MODE_LANG1: case V4L2_TUNER_MODE_LANG2: mode = vt->audmode; break; case V4L2_TUNER_MODE_LANG1_LANG2: mode = V4L2_TUNER_MODE_STEREO; break; default: return -EINVAL; } chip->audmode = vt->audmode; if (desc->setmode && mode) { chip->watch_stereo = 0; /* del_timer(&chip->wt); */ chip->mode = mode; desc->setmode(chip, mode); } break; } case VIDIOC_G_TUNER: { struct v4l2_tuner *vt = arg; int mode = V4L2_TUNER_MODE_MONO; if (chip->radio) break; vt->audmode = chip->audmode; vt->rxsubchans = 0; vt->capability = V4L2_TUNER_CAP_STEREO | V4L2_TUNER_CAP_LANG1 | V4L2_TUNER_CAP_LANG2; if (desc->getmode) mode = desc->getmode(chip); if (mode & V4L2_TUNER_MODE_MONO) vt->rxsubchans |= V4L2_TUNER_SUB_MONO; if (mode & V4L2_TUNER_MODE_STEREO) vt->rxsubchans |= V4L2_TUNER_SUB_STEREO; /* Note: for SAP it should be mono/lang2 or stereo/lang2. When this module is converted fully to v4l2, then this should change for those chips that can detect SAP. */ if (mode & V4L2_TUNER_MODE_LANG1) vt->rxsubchans = V4L2_TUNER_SUB_LANG1 | V4L2_TUNER_SUB_LANG2; break; } case VIDIOC_S_STD: chip->radio = 0; break; case VIDIOC_S_FREQUENCY: chip->mode = 0; /* automatic */ /* For chips that provide getmode and setmode, and doesn't automatically follows the stereo carrier, a kthread is created to set the audio standard. In this case, when then the video channel is changed, tvaudio starts on MONO mode. After waiting for 2 seconds, the kernel thread is called, to follow whatever audio standard is pointed by the audio carrier. */ if (chip->thread) { desc->setmode(chip,V4L2_TUNER_MODE_MONO); if (chip->prevmode != V4L2_TUNER_MODE_MONO) chip->prevmode = -1; /* reset previous mode */ mod_timer(&chip->wt, jiffies+msecs_to_jiffies(2000)); } break; case VIDIOC_G_CHIP_IDENT: return v4l2_chip_ident_i2c_client(client, arg, V4L2_IDENT_TVAUDIO, 0); } return 0; }
1,070
0
static void usb_host_realize(USBDevice *udev, Error **errp) { USBHostDevice *s = USB_HOST_DEVICE(udev); if (s->match.vendor_id > 0xffff) { error_setg(errp, "vendorid out of range"); return; } if (s->match.product_id > 0xffff) { error_setg(errp, "productid out of range"); return; } if (s->match.addr > 127) { error_setg(errp, "hostaddr out of range"); return; } loglevel = s->loglevel; udev->flags |= (1 << USB_DEV_FLAG_IS_HOST); udev->auto_attach = 0; QTAILQ_INIT(&s->requests); QTAILQ_INIT(&s->isorings); s->exit.notify = usb_host_exit_notifier; qemu_add_exit_notifier(&s->exit); QTAILQ_INSERT_TAIL(&hostdevs, s, next); usb_host_auto_check(NULL); }
1,073
1
static int tvaudio_set_ctrl(struct CHIPSTATE *chip, struct v4l2_control *ctrl) { struct CHIPDESC *desc = chip->desc; switch (ctrl->id) { case V4L2_CID_AUDIO_MUTE: if (ctrl->value < 0 || ctrl->value >= 2) return -ERANGE; chip->muted = ctrl->value; if (chip->muted) chip_write_masked(chip,desc->inputreg,desc->inputmute,desc->inputmask); else chip_write_masked(chip,desc->inputreg, desc->inputmap[chip->input],desc->inputmask); return 0; case V4L2_CID_AUDIO_VOLUME: { int volume,balance; if (!(desc->flags & CHIP_HAS_VOLUME)) break; volume = max(chip->left,chip->right); if (volume) balance=(32768*min(chip->left,chip->right))/volume; else balance=32768; volume=ctrl->value; chip->left = (min(65536 - balance,32768) * volume) / 32768; chip->right = (min(balance,volume *(__u16)32768)) / 32768; chip_write(chip,desc->leftreg,desc->volfunc(chip->left)); chip_write(chip,desc->rightreg,desc->volfunc(chip->right)); return 0; } case V4L2_CID_AUDIO_BALANCE: { int volume, balance; if (!(desc->flags & CHIP_HAS_VOLUME)) break; volume = max(chip->left,chip->right); balance = ctrl->value; chip_write(chip,desc->leftreg,desc->volfunc(chip->left)); chip_write(chip,desc->rightreg,desc->volfunc(chip->right)); return 0; } case V4L2_CID_AUDIO_BASS: if (desc->flags & CHIP_HAS_BASSTREBLE) break; chip->bass = ctrl->value; chip_write(chip,desc->bassreg,desc->bassfunc(chip->bass)); return 0; case V4L2_CID_AUDIO_TREBLE: if (desc->flags & CHIP_HAS_BASSTREBLE) return -EINVAL; chip->treble = ctrl->value; chip_write(chip,desc->treblereg,desc->treblefunc(chip->treble)); return 0; } return -EINVAL; }
1,074
0
kdc_process_s4u2self_req(kdc_realm_t *kdc_active_realm, krb5_kdc_req *request, krb5_const_principal client_princ, krb5_const_principal header_srv_princ, krb5_boolean issuing_referral, const krb5_db_entry *server, krb5_keyblock *tgs_subkey, krb5_keyblock *tgs_session, krb5_timestamp kdc_time, krb5_pa_s4u_x509_user **s4u_x509_user, krb5_db_entry **princ_ptr, const char **status) { krb5_error_code code; krb5_boolean is_local_tgt; krb5_pa_data *pa_data; int flags; krb5_db_entry *princ; *princ_ptr = NULL; pa_data = krb5int_find_pa_data(kdc_context, request->padata, KRB5_PADATA_S4U_X509_USER); if (pa_data != NULL) { code = kdc_process_s4u_x509_user(kdc_context, request, pa_data, tgs_subkey, tgs_session, s4u_x509_user, status); if (code != 0) return code; } else { pa_data = krb5int_find_pa_data(kdc_context, request->padata, KRB5_PADATA_FOR_USER); if (pa_data != NULL) { code = kdc_process_for_user(kdc_active_realm, pa_data, tgs_session, s4u_x509_user, status); if (code != 0) return code; } else return 0; } /* * We need to compare the client name in the TGT with the requested * server name. Supporting server name aliases without assuming a * global name service makes this difficult to do. * * The comparison below handles the following cases (note that the * term "principal name" below excludes the realm). * * (1) The requested service is a host-based service with two name * components, in which case we assume the principal name to * contain sufficient qualifying information. The realm is * ignored for the purpose of comparison. * * (2) The requested service name is an enterprise principal name: * the service principal name is compared with the unparsed * form of the client name (including its realm). * * (3) The requested service is some other name type: an exact * match is required. * * An alternative would be to look up the server once again with * FLAG_CANONICALIZE | FLAG_CLIENT_REFERRALS_ONLY set, do an exact * match between the returned name and client_princ. However, this * assumes that the client set FLAG_CANONICALIZE when requesting * the TGT and that we have a global name service. */ flags = 0; switch (krb5_princ_type(kdc_context, request->server)) { case KRB5_NT_SRV_HST: /* (1) */ if (krb5_princ_size(kdc_context, request->server) == 2) flags |= KRB5_PRINCIPAL_COMPARE_IGNORE_REALM; break; case KRB5_NT_ENTERPRISE_PRINCIPAL: /* (2) */ flags |= KRB5_PRINCIPAL_COMPARE_ENTERPRISE; break; default: /* (3) */ break; } if (!krb5_principal_compare_flags(kdc_context, request->server, client_princ, flags)) { *status = "INVALID_S4U2SELF_REQUEST"; return KRB5KDC_ERR_C_PRINCIPAL_UNKNOWN; /* match Windows error code */ } /* * Protocol transition is mutually exclusive with renew/forward/etc * as well as user-to-user and constrained delegation. This check * is also made in validate_as_request(). * * We can assert from this check that the header ticket was a TGT, as * that is validated previously in validate_tgs_request(). */ if (request->kdc_options & AS_INVALID_OPTIONS) { *status = "INVALID AS OPTIONS"; return KRB5KDC_ERR_BADOPTION; } /* * Valid S4U2Self requests can occur in the following combinations: * * (1) local TGT, local user, local server * (2) cross TGT, local user, issuing referral * (3) cross TGT, non-local user, issuing referral * (4) cross TGT, non-local user, local server * * The first case is for a single-realm S4U2Self scenario; the second, * third, and fourth cases are for the initial, intermediate (if any), and * final cross-realm requests in a multi-realm scenario. */ is_local_tgt = !is_cross_tgs_principal(header_srv_princ); if (is_local_tgt && issuing_referral) { /* The requesting server appears to no longer exist, and we found * a referral instead. Treat this as a server lookup failure. */ *status = "LOOKING_UP_SERVER"; return KRB5KDC_ERR_S_PRINCIPAL_UNKNOWN; } /* * Do not attempt to lookup principals in foreign realms. */ if (is_local_principal(kdc_active_realm, (*s4u_x509_user)->user_id.user)) { krb5_db_entry no_server; krb5_pa_data **e_data = NULL; if (!is_local_tgt && !issuing_referral) { /* A local server should not need a cross-realm TGT to impersonate * a local principal. */ *status = "NOT_CROSS_REALM_REQUEST"; return KRB5KDC_ERR_C_PRINCIPAL_UNKNOWN; /* match Windows error */ } code = krb5_db_get_principal(kdc_context, (*s4u_x509_user)->user_id.user, KRB5_KDB_FLAG_INCLUDE_PAC, &princ); if (code == KRB5_KDB_NOENTRY) { *status = "UNKNOWN_S4U2SELF_PRINCIPAL"; return KRB5KDC_ERR_C_PRINCIPAL_UNKNOWN; } else if (code) { *status = "LOOKING_UP_S4U2SELF_PRINCIPAL"; return code; /* caller can free for_user */ } memset(&no_server, 0, sizeof(no_server)); /* Ignore password expiration and needchange attributes (as Windows * does), since S4U2Self is not password authentication. */ princ->pw_expiration = 0; clear(princ->attributes, KRB5_KDB_REQUIRES_PWCHANGE); code = validate_as_request(kdc_active_realm, request, *princ, no_server, kdc_time, status, &e_data); if (code) { krb5_db_free_principal(kdc_context, princ); krb5_free_pa_data(kdc_context, e_data); return code; } *princ_ptr = princ; } else if (is_local_tgt) { /* * The server is asking to impersonate a principal from another realm, * using a local TGT. It should instead ask that principal's realm and * follow referrals back to us. */ *status = "S4U2SELF_CLIENT_NOT_OURS"; return KRB5KDC_ERR_POLICY; /* match Windows error */ } return 0; }
1,075
0
static unsigned int do_16x16_motion_iteration ( VP9_COMP * cpi , const MV * ref_mv , MV * dst_mv , int mb_row , int mb_col ) { MACROBLOCK * const x = & cpi -> mb ; MACROBLOCKD * const xd = & x -> e_mbd ; const MV_SPEED_FEATURES * const mv_sf = & cpi -> sf . mv ; const vp9_variance_fn_ptr_t v_fn_ptr = cpi -> fn_ptr [ BLOCK_16X16 ] ; const int tmp_col_min = x -> mv_col_min ; const int tmp_col_max = x -> mv_col_max ; const int tmp_row_min = x -> mv_row_min ; const int tmp_row_max = x -> mv_row_max ; MV ref_full ; int sad_list [ 5 ] ; int step_param = mv_sf -> reduce_first_step_size ; step_param = MIN ( step_param , MAX_MVSEARCH_STEPS - 2 ) ; vp9_set_mv_search_range ( x , ref_mv ) ; ref_full . col = ref_mv -> col >> 3 ; ref_full . row = ref_mv -> row >> 3 ; vp9_hex_search ( x , & ref_full , step_param , x -> errorperbit , 0 , cond_sad_list ( cpi , sad_list ) , & v_fn_ptr , 0 , ref_mv , dst_mv ) ; { int distortion ; unsigned int sse ; cpi -> find_fractional_mv_step ( x , dst_mv , ref_mv , cpi -> common . allow_high_precision_mv , x -> errorperbit , & v_fn_ptr , 0 , mv_sf -> subpel_iters_per_step , cond_sad_list ( cpi , sad_list ) , NULL , NULL , & distortion , & sse , NULL , 0 , 0 ) ; } xd -> mi [ 0 ] . src_mi -> mbmi . mode = NEWMV ; xd -> mi [ 0 ] . src_mi -> mbmi . mv [ 0 ] . as_mv = * dst_mv ; vp9_build_inter_predictors_sby ( xd , mb_row , mb_col , BLOCK_16X16 ) ; x -> mv_col_min = tmp_col_min ; x -> mv_col_max = tmp_col_max ; x -> mv_row_min = tmp_row_min ; x -> mv_row_max = tmp_row_max ; return vp9_sad16x16 ( x -> plane [ 0 ] . src . buf , x -> plane [ 0 ] . src . stride , xd -> plane [ 0 ] . dst . buf , xd -> plane [ 0 ] . dst . stride ) ; }
1,076
1
static int chip_write_masked(struct CHIPSTATE *chip, int subaddr, int val, int mask) { if (mask != 0) { if (-1 == subaddr) { val = (chip->shadow.bytes[1] & ~mask) | (val & mask); } else { val = (chip->shadow.bytes[subaddr+1] & ~mask) | (val & mask); } } return chip_write(chip, subaddr, val); }
1,077
1
static NTLM_AV_PAIR* ntlm_av_pair_next(NTLM_AV_PAIR* pAvPair, size_t* pcbAvPair) { size_t offset; if (!pcbAvPair) return NULL; if (!ntlm_av_pair_check(pAvPair, *pcbAvPair)) return NULL; offset = ntlm_av_pair_get_next_offset(pAvPair); *pcbAvPair -= offset; return (NTLM_AV_PAIR*)((PBYTE)pAvPair + offset); }
1,079
0
static void vmsvga_index_write ( void * opaque , uint32_t address , uint32_t index ) { struct vmsvga_state_s * s = opaque ; s -> index = index ; }
1,080
0
static void filter_mb_mbaff_edgecv( H264Context *h, uint8_t *pix, int stride, int16_t bS[7], int bsi, int qp ) { int index_a = qp + h->slice_alpha_c0_offset; int alpha = alpha_table[index_a]; int beta = beta_table[qp + h->slice_beta_offset]; if (alpha ==0 || beta == 0) return; if( bS[0] < 4 ) { int8_t tc[4]; tc[0] = tc0_table[index_a][bS[0*bsi]] + 1; tc[1] = tc0_table[index_a][bS[1*bsi]] + 1; tc[2] = tc0_table[index_a][bS[2*bsi]] + 1; tc[3] = tc0_table[index_a][bS[3*bsi]] + 1; h->h264dsp.h264_h_loop_filter_chroma_mbaff(pix, stride, alpha, beta, tc); } else { h->h264dsp.h264_h_loop_filter_chroma_mbaff_intra(pix, stride, alpha, beta); } }
1,082
1
static int chip_write(struct CHIPSTATE *chip, int subaddr, int val) { unsigned char buffer[2]; if (-1 == subaddr) { v4l_dbg(1, debug, chip->c, "%s: chip_write: 0x%x\n", chip->c->name, val); chip->shadow.bytes[1] = val; buffer[0] = val; if (1 != i2c_master_send(chip->c,buffer,1)) { v4l_warn(chip->c, "%s: I/O error (write 0x%x)\n", chip->c->name, val); return -1; } } else { v4l_dbg(1, debug, chip->c, "%s: chip_write: reg%d=0x%x\n", chip->c->name, subaddr, val); chip->shadow.bytes[subaddr+1] = val; buffer[0] = subaddr; buffer[1] = val; if (2 != i2c_master_send(chip->c,buffer,2)) { v4l_warn(chip->c, "%s: I/O error (write reg%d=0x%x)\n", chip->c->name, subaddr, val); return -1; } } return 0; }
1,083
0
static uint32_t gic_dist_readb(void *opaque, target_phys_addr_t offset) { GICState *s = (GICState *)opaque; uint32_t res; int irq; int i; int cpu; int cm; int mask; cpu = gic_get_current_cpu(s); cm = 1 << cpu; if (offset < 0x100) { if (offset == 0) return s->enabled; if (offset == 4) return ((s->num_irq / 32) - 1) | ((NUM_CPU(s) - 1) << 5); if (offset < 0x08) return 0; if (offset >= 0x80) { /* Interrupt Security , RAZ/WI */ return 0; } goto bad_reg; } else if (offset < 0x200) { /* Interrupt Set/Clear Enable. */ if (offset < 0x180) irq = (offset - 0x100) * 8; else irq = (offset - 0x180) * 8; irq += GIC_BASE_IRQ; if (irq >= s->num_irq) goto bad_reg; res = 0; for (i = 0; i < 8; i++) { if (GIC_TEST_ENABLED(irq + i, cm)) { res |= (1 << i); } } } else if (offset < 0x300) { /* Interrupt Set/Clear Pending. */ if (offset < 0x280) irq = (offset - 0x200) * 8; else irq = (offset - 0x280) * 8; irq += GIC_BASE_IRQ; if (irq >= s->num_irq) goto bad_reg; res = 0; mask = (irq < GIC_INTERNAL) ? cm : ALL_CPU_MASK; for (i = 0; i < 8; i++) { if (GIC_TEST_PENDING(irq + i, mask)) { res |= (1 << i); } } } else if (offset < 0x400) { /* Interrupt Active. */ irq = (offset - 0x300) * 8 + GIC_BASE_IRQ; if (irq >= s->num_irq) goto bad_reg; res = 0; mask = (irq < GIC_INTERNAL) ? cm : ALL_CPU_MASK; for (i = 0; i < 8; i++) { if (GIC_TEST_ACTIVE(irq + i, mask)) { res |= (1 << i); } } } else if (offset < 0x800) { /* Interrupt Priority. */ irq = (offset - 0x400) + GIC_BASE_IRQ; if (irq >= s->num_irq) goto bad_reg; res = GIC_GET_PRIORITY(irq, cpu); } else if (offset < 0xc00) { /* Interrupt CPU Target. */ if (s->num_cpu == 1 && s->revision != REV_11MPCORE) { /* For uniprocessor GICs these RAZ/WI */ res = 0; } else { irq = (offset - 0x800) + GIC_BASE_IRQ; if (irq >= s->num_irq) { goto bad_reg; } if (irq >= 29 && irq <= 31) { res = cm; } else { res = GIC_TARGET(irq); } } } else if (offset < 0xf00) { /* Interrupt Configuration. */ irq = (offset - 0xc00) * 2 + GIC_BASE_IRQ; if (irq >= s->num_irq) goto bad_reg; res = 0; for (i = 0; i < 4; i++) { if (GIC_TEST_MODEL(irq + i)) res |= (1 << (i * 2)); if (GIC_TEST_TRIGGER(irq + i)) res |= (2 << (i * 2)); } } else if (offset < 0xfe0) { goto bad_reg; } else /* offset >= 0xfe0 */ { if (offset & 3) { res = 0; } else { res = gic_id[(offset - 0xfe0) >> 2]; } } return res; bad_reg: hw_error("gic_dist_readb: Bad offset %x\n", (int)offset); return 0; }
1,084
1
decode_atype(const taginfo *t, const uint8_t *asn1, size_t len, const struct atype_info *a, void *val) { krb5_error_code ret; switch (a->type) { case atype_fn: { const struct fn_info *fn = a->tinfo; assert(fn->dec != NULL); return fn->dec(t, asn1, len, val); } case atype_sequence: return decode_sequence(asn1, len, a->tinfo, val); case atype_ptr: { const struct ptr_info *ptrinfo = a->tinfo; void *ptr = LOADPTR(val, ptrinfo); assert(ptrinfo->basetype != NULL); if (ptr != NULL) { /* Container was already allocated by a previous sequence field. */ return decode_atype(t, asn1, len, ptrinfo->basetype, ptr); } else { ret = decode_atype_to_ptr(t, asn1, len, ptrinfo->basetype, &ptr); if (ret) return ret; STOREPTR(ptr, ptrinfo, val); break; } } case atype_offset: { const struct offset_info *off = a->tinfo; assert(off->basetype != NULL); return decode_atype(t, asn1, len, off->basetype, (char *)val + off->dataoff); } case atype_optional: { const struct optional_info *opt = a->tinfo; return decode_atype(t, asn1, len, opt->basetype, val); } case atype_counted: { const struct counted_info *counted = a->tinfo; void *dataptr = (char *)val + counted->dataoff; size_t count; assert(counted->basetype != NULL); ret = decode_cntype(t, asn1, len, counted->basetype, dataptr, &count); if (ret) return ret; return store_count(count, counted, val); } case atype_tagged_thing: { const struct tagged_info *tag = a->tinfo; taginfo inner_tag; const taginfo *tp = t; const uint8_t *rem; size_t rlen; if (!tag->implicit) { ret = get_tag(asn1, len, &inner_tag, &asn1, &len, &rem, &rlen); if (ret) return ret; /* Note: we don't check rlen (it should be 0). */ tp = &inner_tag; if (!check_atype_tag(tag->basetype, tp)) return ASN1_BAD_ID; } return decode_atype(tp, asn1, len, tag->basetype, val); } case atype_bool: { intmax_t intval; ret = k5_asn1_decode_bool(asn1, len, &intval); if (ret) return ret; return store_int(intval, a->size, val); } case atype_int: { intmax_t intval; ret = k5_asn1_decode_int(asn1, len, &intval); if (ret) return ret; return store_int(intval, a->size, val); } case atype_uint: { uintmax_t intval; ret = k5_asn1_decode_uint(asn1, len, &intval); if (ret) return ret; return store_uint(intval, a->size, val); } case atype_int_immediate: { const struct immediate_info *imm = a->tinfo; intmax_t intval; ret = k5_asn1_decode_int(asn1, len, &intval); if (ret) return ret; if (intval != imm->val && imm->err != 0) return imm->err; break; } default: /* Null-terminated sequence types are handled in decode_atype_to_ptr, * since they create variable-sized objects. */ assert(a->type != atype_nullterm_sequence_of); assert(a->type != atype_nonempty_nullterm_sequence_of); assert(a->type > atype_min); assert(a->type < atype_max); abort(); } return 0; }
1,085
1
int ntlm_construct_authenticate_target_info(NTLM_CONTEXT* context) { ULONG size; ULONG AvPairsCount; ULONG AvPairsValueLength; NTLM_AV_PAIR* AvTimestamp; NTLM_AV_PAIR* AvNbDomainName; NTLM_AV_PAIR* AvNbComputerName; NTLM_AV_PAIR* AvDnsDomainName; NTLM_AV_PAIR* AvDnsComputerName; NTLM_AV_PAIR* AvDnsTreeName; NTLM_AV_PAIR* ChallengeTargetInfo; NTLM_AV_PAIR* AuthenticateTargetInfo; size_t cbAvTimestamp; size_t cbAvNbDomainName; size_t cbAvNbComputerName; size_t cbAvDnsDomainName; size_t cbAvDnsComputerName; size_t cbAvDnsTreeName; size_t cbChallengeTargetInfo; size_t cbAuthenticateTargetInfo; AvPairsCount = 1; AvPairsValueLength = 0; ChallengeTargetInfo = (NTLM_AV_PAIR*)context->ChallengeTargetInfo.pvBuffer; cbChallengeTargetInfo = context->ChallengeTargetInfo.cbBuffer; AvNbDomainName = ntlm_av_pair_get(ChallengeTargetInfo, cbChallengeTargetInfo, MsvAvNbDomainName, &cbAvNbDomainName); AvNbComputerName = ntlm_av_pair_get(ChallengeTargetInfo, cbChallengeTargetInfo, MsvAvNbComputerName, &cbAvNbComputerName); AvDnsDomainName = ntlm_av_pair_get(ChallengeTargetInfo, cbChallengeTargetInfo, MsvAvDnsDomainName, &cbAvDnsDomainName); AvDnsComputerName = ntlm_av_pair_get(ChallengeTargetInfo, cbChallengeTargetInfo, MsvAvDnsComputerName, &cbAvDnsComputerName); AvDnsTreeName = ntlm_av_pair_get(ChallengeTargetInfo, cbChallengeTargetInfo, MsvAvDnsTreeName, &cbAvDnsTreeName); AvTimestamp = ntlm_av_pair_get(ChallengeTargetInfo, cbChallengeTargetInfo, MsvAvTimestamp, &cbAvTimestamp); if (AvNbDomainName) { AvPairsCount++; /* MsvAvNbDomainName */ AvPairsValueLength += ntlm_av_pair_get_len(AvNbDomainName); } if (AvNbComputerName) { AvPairsCount++; /* MsvAvNbComputerName */ AvPairsValueLength += ntlm_av_pair_get_len(AvNbComputerName); } if (AvDnsDomainName) { AvPairsCount++; /* MsvAvDnsDomainName */ AvPairsValueLength += ntlm_av_pair_get_len(AvDnsDomainName); } if (AvDnsComputerName) { AvPairsCount++; /* MsvAvDnsComputerName */ AvPairsValueLength += ntlm_av_pair_get_len(AvDnsComputerName); } if (AvDnsTreeName) { AvPairsCount++; /* MsvAvDnsTreeName */ AvPairsValueLength += ntlm_av_pair_get_len(AvDnsTreeName); } AvPairsCount++; /* MsvAvTimestamp */ AvPairsValueLength += 8; if (context->UseMIC) { AvPairsCount++; /* MsvAvFlags */ AvPairsValueLength += 4; } if (context->SendSingleHostData) { AvPairsCount++; /* MsvAvSingleHost */ ntlm_compute_single_host_data(context); AvPairsValueLength += context->SingleHostData.Size; } /** * Extended Protection for Authentication: * http://blogs.technet.com/b/srd/archive/2009/12/08/extended-protection-for-authentication.aspx */ if (!context->SuppressExtendedProtection) { /** * SEC_CHANNEL_BINDINGS structure * http://msdn.microsoft.com/en-us/library/windows/desktop/dd919963/ */ AvPairsCount++; /* MsvChannelBindings */ AvPairsValueLength += 16; ntlm_compute_channel_bindings(context); if (context->ServicePrincipalName.Length > 0) { AvPairsCount++; /* MsvAvTargetName */ AvPairsValueLength += context->ServicePrincipalName.Length; } } size = ntlm_av_pair_list_size(AvPairsCount, AvPairsValueLength); if (context->NTLMv2) size += 8; /* unknown 8-byte padding */ if (!sspi_SecBufferAlloc(&context->AuthenticateTargetInfo, size)) goto fail; AuthenticateTargetInfo = (NTLM_AV_PAIR*)context->AuthenticateTargetInfo.pvBuffer; cbAuthenticateTargetInfo = context->AuthenticateTargetInfo.cbBuffer; if (!ntlm_av_pair_list_init(AuthenticateTargetInfo, cbAuthenticateTargetInfo)) goto fail; if (AvNbDomainName) { if (!ntlm_av_pair_add_copy(AuthenticateTargetInfo, cbAuthenticateTargetInfo, AvNbDomainName, cbAvNbDomainName)) goto fail; } if (AvNbComputerName) { if (!ntlm_av_pair_add_copy(AuthenticateTargetInfo, cbAuthenticateTargetInfo, AvNbComputerName, cbAvNbComputerName)) goto fail; } if (AvDnsDomainName) { if (!ntlm_av_pair_add_copy(AuthenticateTargetInfo, cbAuthenticateTargetInfo, AvDnsDomainName, cbAvDnsDomainName)) goto fail; } if (AvDnsComputerName) { if (!ntlm_av_pair_add_copy(AuthenticateTargetInfo, cbAuthenticateTargetInfo, AvDnsComputerName, cbAvDnsComputerName)) goto fail; } if (AvDnsTreeName) { if (!ntlm_av_pair_add_copy(AuthenticateTargetInfo, cbAuthenticateTargetInfo, AvDnsTreeName, cbAvDnsTreeName)) goto fail; } if (AvTimestamp) { if (!ntlm_av_pair_add_copy(AuthenticateTargetInfo, cbAuthenticateTargetInfo, AvTimestamp, cbAvTimestamp)) goto fail; } if (context->UseMIC) { UINT32 flags; Data_Write_UINT32(&flags, MSV_AV_FLAGS_MESSAGE_INTEGRITY_CHECK); if (!ntlm_av_pair_add(AuthenticateTargetInfo, cbAuthenticateTargetInfo, MsvAvFlags, (PBYTE)&flags, 4)) goto fail; } if (context->SendSingleHostData) { if (!ntlm_av_pair_add(AuthenticateTargetInfo, cbAuthenticateTargetInfo, MsvAvSingleHost, (PBYTE)&context->SingleHostData, context->SingleHostData.Size)) goto fail; } if (!context->SuppressExtendedProtection) { if (!ntlm_av_pair_add(AuthenticateTargetInfo, cbAuthenticateTargetInfo, MsvChannelBindings, context->ChannelBindingsHash, 16)) goto fail; if (context->ServicePrincipalName.Length > 0) { if (!ntlm_av_pair_add(AuthenticateTargetInfo, cbAuthenticateTargetInfo, MsvAvTargetName, (PBYTE)context->ServicePrincipalName.Buffer, context->ServicePrincipalName.Length)) goto fail; } } if (context->NTLMv2) { NTLM_AV_PAIR* AvEOL; AvEOL = ntlm_av_pair_get(ChallengeTargetInfo, cbChallengeTargetInfo, MsvAvEOL, NULL); if (!AvEOL) goto fail; ZeroMemory(AvEOL, sizeof(NTLM_AV_PAIR)); } return 1; fail: sspi_SecBufferFree(&context->AuthenticateTargetInfo); return -1; }
1,086
0
EXCLUSIVE_REGRESSION_TEST ( SDK_API_TSHttpConnectIntercept ) ( RegressionTest * test , int , int * pstatus ) { * pstatus = REGRESSION_TEST_INPROGRESS ; TSDebug ( UTDBG_TAG , "Starting test TSHttpConnectIntercept" ) ; TSCont cont_test = TSContCreate ( cont_test_handler , TSMutexCreate ( ) ) ; ConnectTestData * data = ( ConnectTestData * ) TSmalloc ( sizeof ( ConnectTestData ) ) ; TSContDataSet ( cont_test , data ) ; data -> test = test ; data -> pstatus = pstatus ; data -> magic = MAGIC_ALIVE ; data -> test_case = TEST_CASE_CONNECT_ID1 ; TSHttpHookAdd ( TS_HTTP_READ_REQUEST_HDR_HOOK , cont_test ) ; data -> os = synserver_create ( SYNSERVER_DUMMY_PORT ) ; data -> browser = synclient_txn_create ( ) ; data -> request = generate_request ( 9 ) ; sockaddr_in addr ; ats_ip4_set ( & addr , 1 , 1 ) ; data -> vc = TSHttpConnect ( ats_ip_sa_cast ( & addr ) ) ; if ( TSVConnClosedGet ( data -> vc ) ) { SDK_RPRINT ( data -> test , "TSHttpConnect" , "TestCase 1" , TC_FAIL , "Connect reported as closed immediately after open" ) ; } synclient_txn_send_request_to_vc ( data -> browser , data -> request , data -> vc ) ; TSContSchedule ( cont_test , 25 , TS_THREAD_POOL_DEFAULT ) ; return ; }
1,087
1
static int chip_cmd(struct CHIPSTATE *chip, char *name, audiocmd *cmd) { int i; if (0 == cmd->count) return 0; /* update our shadow register set; print bytes if (debug > 0) */ v4l_dbg(1, debug, chip->c, "%s: chip_cmd(%s): reg=%d, data:", chip->c->name, name,cmd->bytes[0]); for (i = 1; i < cmd->count; i++) { if (debug) printk(" 0x%x",cmd->bytes[i]); chip->shadow.bytes[i+cmd->bytes[0]] = cmd->bytes[i]; } if (debug) printk("\n"); /* send data to the chip */ if (cmd->count != i2c_master_send(chip->c,cmd->bytes,cmd->count)) { v4l_warn(chip->c, "%s: I/O error (%s)\n", chip->c->name, name); return -1; } return 0; }
1,088
1
static void audit_inotify_unregister(struct list_head *in_list) { struct audit_parent *p, *n; list_for_each_entry_safe(p, n, in_list, ilist) { list_del(&p->ilist); inotify_rm_watch(audit_ih, &p->wdata); /* the put matching the get in audit_do_del_rule() */ put_inotify_watch(&p->wdata); } }
1,090
0
static void qmp_serialize(void *native_in, void **datap, VisitorFunc visit, Error **errp) { QmpSerializeData *d = g_malloc0(sizeof(*d)); d->qov = qmp_output_visitor_new(&d->obj); visit(d->qov, &native_in, errp); *datap = d; }
1,091
0
static VALUE ossl_x509name_add_entry ( int argc , VALUE * argv , VALUE self ) { X509_NAME * name ; VALUE oid , value , type ; const char * oid_name ; rb_scan_args ( argc , argv , "21" , & oid , & value , & type ) ; oid_name = StringValueCStr ( oid ) ; StringValue ( value ) ; if ( NIL_P ( type ) ) type = rb_aref ( OBJECT_TYPE_TEMPLATE , oid ) ; GetX509Name ( self , name ) ; if ( ! X509_NAME_add_entry_by_txt ( name , oid_name , NUM2INT ( type ) , ( const unsigned char * ) RSTRING_PTR ( value ) , RSTRING_LENINT ( value ) , - 1 , 0 ) ) { ossl_raise ( eX509NameError , NULL ) ; } return self ; }
1,092
1
static void tcg_target_init(TCGContext *s) { tcg_regset_set32(tcg_target_available_regs[TCG_TYPE_I32], 0, 0xffffffff); tcg_regset_set32(tcg_target_call_clobber_regs, 0, (1 << TCG_REG_R0) | #ifdef _CALL_DARWIN (1 << TCG_REG_R2) | #endif (1 << TCG_REG_R3) | (1 << TCG_REG_R4) | (1 << TCG_REG_R5) | (1 << TCG_REG_R6) | (1 << TCG_REG_R7) | (1 << TCG_REG_R8) | (1 << TCG_REG_R9) | (1 << TCG_REG_R10) | (1 << TCG_REG_R11) | (1 << TCG_REG_R12) ); tcg_regset_clear(s->reserved_regs); tcg_regset_set_reg(s->reserved_regs, TCG_REG_R0); tcg_regset_set_reg(s->reserved_regs, TCG_REG_R1); #ifndef _CALL_DARWIN tcg_regset_set_reg(s->reserved_regs, TCG_REG_R2); #endif #ifdef _CALL_SYSV tcg_regset_set_reg(s->reserved_regs, TCG_REG_R13); #endif tcg_add_target_add_op_defs(ppc_op_defs); }
1,093
1
static void untag_chunk(struct audit_chunk *chunk, struct node *p) { struct audit_chunk *new; struct audit_tree *owner; int size = chunk->count - 1; int i, j; mutex_lock(&chunk->watch.inode->inotify_mutex); if (chunk->dead) { mutex_unlock(&chunk->watch.inode->inotify_mutex); return; } owner = p->owner; if (!size) { chunk->dead = 1; spin_lock(&hash_lock); list_del_init(&chunk->trees); if (owner->root == chunk) owner->root = NULL; list_del_init(&p->list); list_del_rcu(&chunk->hash); spin_unlock(&hash_lock); inotify_evict_watch(&chunk->watch); mutex_unlock(&chunk->watch.inode->inotify_mutex); put_inotify_watch(&chunk->watch); return; } new = alloc_chunk(size); if (!new) goto Fallback; if (inotify_clone_watch(&chunk->watch, &new->watch) < 0) { free_chunk(new); goto Fallback; } chunk->dead = 1; spin_lock(&hash_lock); list_replace_init(&chunk->trees, &new->trees); if (owner->root == chunk) { list_del_init(&owner->same_root); owner->root = NULL; } for (i = j = 0; i < size; i++, j++) { struct audit_tree *s; if (&chunk->owners[j] == p) { list_del_init(&p->list); i--; continue; } s = chunk->owners[j].owner; new->owners[i].owner = s; new->owners[i].index = chunk->owners[j].index - j + i; if (!s) /* result of earlier fallback */ continue; get_tree(s); list_replace_init(&chunk->owners[i].list, &new->owners[j].list); } list_replace_rcu(&chunk->hash, &new->hash); list_for_each_entry(owner, &new->trees, same_root) owner->root = new; spin_unlock(&hash_lock); inotify_evict_watch(&chunk->watch); mutex_unlock(&chunk->watch.inode->inotify_mutex); put_inotify_watch(&chunk->watch); return; Fallback: // do the best we can spin_lock(&hash_lock); if (owner->root == chunk) { list_del_init(&owner->same_root); owner->root = NULL; } list_del_init(&p->list); p->owner = NULL; put_tree(owner); spin_unlock(&hash_lock); mutex_unlock(&chunk->watch.inode->inotify_mutex); }
1,094
1
decode_sequence(const uint8_t *asn1, size_t len, const struct seq_info *seq, void *val) { krb5_error_code ret; const uint8_t *contents; size_t i, j, clen; taginfo t; assert(seq->n_fields > 0); for (i = 0; i < seq->n_fields; i++) { if (len == 0) break; ret = get_tag(asn1, len, &t, &contents, &clen, &asn1, &len); if (ret) goto error; /* * Find the applicable sequence field. This logic is a little * oversimplified; we could match an element to an optional extensible * choice or optional stored-DER type when we ought to match a * subsequent non-optional field. But it's unwise and (hopefully) very * rare for ASN.1 modules to require such precision. */ for (; i < seq->n_fields; i++) { if (check_atype_tag(seq->fields[i], &t)) break; ret = omit_atype(seq->fields[i], val); if (ret) goto error; } /* We currently model all sequences as extensible. We should consider * changing this before making the encoder visible to plugins. */ if (i == seq->n_fields) break; ret = decode_atype(&t, contents, clen, seq->fields[i], val); if (ret) goto error; } /* Initialize any fields in the C object which were not accounted for in * the sequence. Error out if any of them aren't optional. */ for (; i < seq->n_fields; i++) { ret = omit_atype(seq->fields[i], val); if (ret) goto error; } return 0; error: /* Free what we've decoded so far. Free pointers in a second pass in * case multiple fields refer to the same pointer. */ for (j = 0; j < i; j++) free_atype(seq->fields[j], val); for (j = 0; j < i; j++) free_atype_ptr(seq->fields[j], val); return ret; }
1,095
1
void ntlm_print_av_pair_list(NTLM_AV_PAIR* pAvPairList, size_t cbAvPairList) { size_t cbAvPair = cbAvPairList; NTLM_AV_PAIR* pAvPair = pAvPairList; if (!ntlm_av_pair_check(pAvPair, cbAvPair)) return; WLog_INFO(TAG, "AV_PAIRs ="); while (pAvPair && ntlm_av_pair_get_id(pAvPair) != MsvAvEOL) { WLog_INFO(TAG, "\t%s AvId: %" PRIu16 " AvLen: %" PRIu16 "", AV_PAIR_STRINGS[ntlm_av_pair_get_id(pAvPair)], ntlm_av_pair_get_id(pAvPair), ntlm_av_pair_get_len(pAvPair)); winpr_HexDump(TAG, WLOG_INFO, ntlm_av_pair_get_value_pointer(pAvPair), ntlm_av_pair_get_len(pAvPair)); pAvPair = ntlm_av_pair_next(pAvPair, &cbAvPair); } }
1,096
0
static int msmpeg4_decode_dc ( MpegEncContext * s , int n , int * dir_ptr ) { int level , pred ; if ( s -> msmpeg4_version <= 2 ) { if ( n < 4 ) { level = get_vlc2 ( & s -> gb , v2_dc_lum_vlc . table , DC_VLC_BITS , 3 ) ; } else { level = get_vlc2 ( & s -> gb , v2_dc_chroma_vlc . table , DC_VLC_BITS , 3 ) ; } if ( level < 0 ) return - 1 ; level -= 256 ; } else { if ( n < 4 ) { level = get_vlc2 ( & s -> gb , ff_msmp4_dc_luma_vlc [ s -> dc_table_index ] . table , DC_VLC_BITS , 3 ) ; } else { level = get_vlc2 ( & s -> gb , ff_msmp4_dc_chroma_vlc [ s -> dc_table_index ] . table , DC_VLC_BITS , 3 ) ; } if ( level < 0 ) { av_log ( s -> avctx , AV_LOG_ERROR , "illegal dc vlc\n" ) ; return - 1 ; } if ( level == DC_MAX ) { level = get_bits ( & s -> gb , 8 ) ; if ( get_bits1 ( & s -> gb ) ) level = - level ; } else if ( level != 0 ) { if ( get_bits1 ( & s -> gb ) ) level = - level ; } } if ( s -> msmpeg4_version == 1 ) { int32_t * dc_val ; pred = msmpeg4v1_pred_dc ( s , n , & dc_val ) ; level += pred ; * dc_val = level ; } else { int16_t * dc_val ; pred = ff_msmpeg4_pred_dc ( s , n , & dc_val , dir_ptr ) ; level += pred ; if ( n < 4 ) { * dc_val = level * s -> y_dc_scale ; } else { * dc_val = level * s -> c_dc_scale ; } } return level ; }
1,097
1
static void emulate_spapr_hypercall(PPCVirtualHypervisor *vhyp, PowerPCCPU *cpu) { CPUPPCState *env = &cpu->env; if (msr_pr) { hcall_dprintf("Hypercall made with MSR[PR]=1\n"); env->gpr[3] = H_PRIVILEGE; } else { env->gpr[3] = spapr_hypercall(cpu, env->gpr[3], &env->gpr[4]); } }
1,099
1
decode_sequence_of(const uint8_t *asn1, size_t len, const struct atype_info *elemtype, void **seq_out, size_t *count_out) { krb5_error_code ret; void *seq = NULL, *elem, *newseq; const uint8_t *contents; size_t clen, count = 0; taginfo t; *seq_out = NULL; *count_out = 0; while (len > 0) { ret = get_tag(asn1, len, &t, &contents, &clen, &asn1, &len); if (ret) goto error; if (!check_atype_tag(elemtype, &t)) { ret = ASN1_BAD_ID; goto error; } newseq = realloc(seq, (count + 1) * elemtype->size); if (newseq == NULL) { ret = ENOMEM; goto error; } seq = newseq; elem = (char *)seq + count * elemtype->size; memset(elem, 0, elemtype->size); ret = decode_atype(&t, contents, clen, elemtype, elem); if (ret) goto error; count++; } *seq_out = seq; *count_out = count; return 0; error: free_sequence_of(elemtype, seq, count); free(seq); return ret; }
1,100
1
static inline void free_chunk(struct audit_chunk *chunk) { call_rcu(&chunk->head, __free_chunk); }
1,101
1
test_opts_range_unvisited(void) { intList *list = NULL; intList *tail; QemuOpts *opts; Visitor *v; opts = qemu_opts_parse(qemu_find_opts("userdef"), "ilist=0-2", false, &error_abort); v = opts_visitor_new(opts); visit_start_struct(v, NULL, NULL, 0, &error_abort); /* Would be simpler if the visitor genuinely supported virtual walks */ visit_start_list(v, "ilist", (GenericList **)&list, sizeof(*list), &error_abort); tail = list; visit_type_int(v, NULL, &tail->value, &error_abort); g_assert_cmpint(tail->value, ==, 0); tail = (intList *)visit_next_list(v, (GenericList *)tail, sizeof(*list)); g_assert(tail); visit_type_int(v, NULL, &tail->value, &error_abort); g_assert_cmpint(tail->value, ==, 1); tail = (intList *)visit_next_list(v, (GenericList *)tail, sizeof(*list)); g_assert(tail); visit_check_list(v, &error_abort); /* BUG: unvisited tail not reported */ visit_end_list(v, (void **)&list); visit_check_struct(v, &error_abort); visit_end_struct(v, NULL); qapi_free_intList(list); visit_free(v); qemu_opts_del(opts); }
1,103
1
void inotify_destroy(struct inotify_handle *ih) { /* * Destroy all of the watches for this handle. Unfortunately, not very * pretty. We cannot do a simple iteration over the list, because we * do not know the inode until we iterate to the watch. But we need to * hold inode->inotify_mutex before ih->mutex. The following works. */ while (1) { struct inotify_watch *watch; struct list_head *watches; struct inode *inode; mutex_lock(&ih->mutex); watches = &ih->watches; if (list_empty(watches)) { mutex_unlock(&ih->mutex); break; } watch = list_first_entry(watches, struct inotify_watch, h_list); get_inotify_watch(watch); mutex_unlock(&ih->mutex); inode = watch->inode; mutex_lock(&inode->inotify_mutex); mutex_lock(&ih->mutex); /* make sure we didn't race with another list removal */ if (likely(idr_find(&ih->idr, watch->wd))) { remove_watch_no_event(watch, ih); put_inotify_watch(watch); } mutex_unlock(&ih->mutex); mutex_unlock(&inode->inotify_mutex); put_inotify_watch(watch); } /* free this handle: the put matching the get in inotify_init() */ put_inotify_handle(ih); }
1,104
0
static INLINE BOOL zgfx_GetBits(ZGFX_CONTEXT* _zgfx, UINT32 _nbits) { if (!_zgfx) return FALSE; while (_zgfx->cBitsCurrent < _nbits) { _zgfx->BitsCurrent <<= 8; if (_zgfx->pbInputCurrent < _zgfx->pbInputEnd) _zgfx->BitsCurrent += *(_zgfx->pbInputCurrent)++; _zgfx->cBitsCurrent += 8; } _zgfx->cBitsRemaining -= _nbits; _zgfx->cBitsCurrent -= _nbits; _zgfx->bits = _zgfx->BitsCurrent >> _zgfx->cBitsCurrent; _zgfx->BitsCurrent &= ((1 << _zgfx->cBitsCurrent) - 1); }
1,105
0
TSReturnCode TSHttpHdrUrlGet ( TSMBuffer bufp , TSMLoc obj , TSMLoc * locp ) { sdk_assert ( sdk_sanity_check_mbuffer ( bufp ) == TS_SUCCESS ) ; sdk_assert ( sdk_sanity_check_http_hdr_handle ( obj ) == TS_SUCCESS ) ; HTTPHdrImpl * hh = ( HTTPHdrImpl * ) obj ; if ( hh -> m_polarity != HTTP_TYPE_REQUEST ) { return TS_ERROR ; } * locp = ( ( TSMLoc ) hh -> u . req . m_url_impl ) ; return TS_SUCCESS ; }
1,106
1
static int decode_subframe(WMAProDecodeCtx *s) { int offset = s->samples_per_frame; int subframe_len = s->samples_per_frame; int i; int total_samples = s->samples_per_frame * s->avctx->channels; int transmit_coeffs = 0; int cur_subwoofer_cutoff; s->subframe_offset = get_bits_count(&s->gb); /** reset channel context and find the next block offset and size == the next block of the channel with the smallest number of decoded samples */ for (i = 0; i < s->avctx->channels; i++) { s->channel[i].grouped = 0; if (offset > s->channel[i].decoded_samples) { offset = s->channel[i].decoded_samples; subframe_len = s->channel[i].subframe_len[s->channel[i].cur_subframe]; } } av_dlog(s->avctx, "processing subframe with offset %i len %i\n", offset, subframe_len); /** get a list of all channels that contain the estimated block */ s->channels_for_cur_subframe = 0; for (i = 0; i < s->avctx->channels; i++) { const int cur_subframe = s->channel[i].cur_subframe; /** subtract already processed samples */ total_samples -= s->channel[i].decoded_samples; /** and count if there are multiple subframes that match our profile */ if (offset == s->channel[i].decoded_samples && subframe_len == s->channel[i].subframe_len[cur_subframe]) { total_samples -= s->channel[i].subframe_len[cur_subframe]; s->channel[i].decoded_samples += s->channel[i].subframe_len[cur_subframe]; s->channel_indexes_for_cur_subframe[s->channels_for_cur_subframe] = i; ++s->channels_for_cur_subframe; } } /** check if the frame will be complete after processing the estimated block */ if (!total_samples) s->parsed_all_subframes = 1; av_dlog(s->avctx, "subframe is part of %i channels\n", s->channels_for_cur_subframe); /** calculate number of scale factor bands and their offsets */ s->table_idx = av_log2(s->samples_per_frame/subframe_len); s->num_bands = s->num_sfb[s->table_idx]; s->cur_sfb_offsets = s->sfb_offsets[s->table_idx]; cur_subwoofer_cutoff = s->subwoofer_cutoffs[s->table_idx]; /** configure the decoder for the current subframe */ for (i = 0; i < s->channels_for_cur_subframe; i++) { int c = s->channel_indexes_for_cur_subframe[i]; s->channel[c].coeffs = &s->channel[c].out[(s->samples_per_frame >> 1) + offset]; } s->subframe_len = subframe_len; s->esc_len = av_log2(s->subframe_len - 1) + 1; /** skip extended header if any */ if (get_bits1(&s->gb)) { int num_fill_bits; if (!(num_fill_bits = get_bits(&s->gb, 2))) { int len = get_bits(&s->gb, 4); num_fill_bits = get_bits(&s->gb, len) + 1; } if (num_fill_bits >= 0) { if (get_bits_count(&s->gb) + num_fill_bits > s->num_saved_bits) { av_log(s->avctx, AV_LOG_ERROR, "invalid number of fill bits\n"); return AVERROR_INVALIDDATA; } skip_bits_long(&s->gb, num_fill_bits); } } /** no idea for what the following bit is used */ if (get_bits1(&s->gb)) { avpriv_request_sample(s->avctx, "Reserved bit"); return AVERROR_PATCHWELCOME; } if (decode_channel_transform(s) < 0) return AVERROR_INVALIDDATA; for (i = 0; i < s->channels_for_cur_subframe; i++) { int c = s->channel_indexes_for_cur_subframe[i]; if ((s->channel[c].transmit_coefs = get_bits1(&s->gb))) transmit_coeffs = 1; } if (transmit_coeffs) { int step; int quant_step = 90 * s->bits_per_sample >> 4; /** decode number of vector coded coefficients */ if ((s->transmit_num_vec_coeffs = get_bits1(&s->gb))) { int num_bits = av_log2((s->subframe_len + 3)/4) + 1; for (i = 0; i < s->channels_for_cur_subframe; i++) { int c = s->channel_indexes_for_cur_subframe[i]; int num_vec_coeffs = get_bits(&s->gb, num_bits) << 2; if (num_vec_coeffs > WMAPRO_BLOCK_MAX_SIZE) { av_log(s->avctx, AV_LOG_ERROR, "num_vec_coeffs %d is too large\n", num_vec_coeffs); return AVERROR_INVALIDDATA; } s->channel[c].num_vec_coeffs = num_vec_coeffs; } } else { for (i = 0; i < s->channels_for_cur_subframe; i++) { int c = s->channel_indexes_for_cur_subframe[i]; s->channel[c].num_vec_coeffs = s->subframe_len; } } /** decode quantization step */ step = get_sbits(&s->gb, 6); quant_step += step; if (step == -32 || step == 31) { const int sign = (step == 31) - 1; int quant = 0; while (get_bits_count(&s->gb) + 5 < s->num_saved_bits && (step = get_bits(&s->gb, 5)) == 31) { quant += 31; } quant_step += ((quant + step) ^ sign) - sign; } if (quant_step < 0) { av_log(s->avctx, AV_LOG_DEBUG, "negative quant step\n"); } /** decode quantization step modifiers for every channel */ if (s->channels_for_cur_subframe == 1) { s->channel[s->channel_indexes_for_cur_subframe[0]].quant_step = quant_step; } else { int modifier_len = get_bits(&s->gb, 3); for (i = 0; i < s->channels_for_cur_subframe; i++) { int c = s->channel_indexes_for_cur_subframe[i]; s->channel[c].quant_step = quant_step; if (get_bits1(&s->gb)) { if (modifier_len) { s->channel[c].quant_step += get_bits(&s->gb, modifier_len) + 1; } else ++s->channel[c].quant_step; } } } /** decode scale factors */ if (decode_scale_factors(s) < 0) return AVERROR_INVALIDDATA; } av_dlog(s->avctx, "BITSTREAM: subframe header length was %i\n", get_bits_count(&s->gb) - s->subframe_offset); /** parse coefficients */ for (i = 0; i < s->channels_for_cur_subframe; i++) { int c = s->channel_indexes_for_cur_subframe[i]; if (s->channel[c].transmit_coefs && get_bits_count(&s->gb) < s->num_saved_bits) { decode_coeffs(s, c); } else memset(s->channel[c].coeffs, 0, sizeof(*s->channel[c].coeffs) * subframe_len); } av_dlog(s->avctx, "BITSTREAM: subframe length was %i\n", get_bits_count(&s->gb) - s->subframe_offset); if (transmit_coeffs) { FFTContext *mdct = &s->mdct_ctx[av_log2(subframe_len) - WMAPRO_BLOCK_MIN_BITS]; /** reconstruct the per channel data */ inverse_channel_transform(s); for (i = 0; i < s->channels_for_cur_subframe; i++) { int c = s->channel_indexes_for_cur_subframe[i]; const int* sf = s->channel[c].scale_factors; int b; if (c == s->lfe_channel) memset(&s->tmp[cur_subwoofer_cutoff], 0, sizeof(*s->tmp) * (subframe_len - cur_subwoofer_cutoff)); /** inverse quantization and rescaling */ for (b = 0; b < s->num_bands; b++) { const int end = FFMIN(s->cur_sfb_offsets[b+1], s->subframe_len); const int exp = s->channel[c].quant_step - (s->channel[c].max_scale_factor - *sf++) * s->channel[c].scale_factor_step; const float quant = pow(10.0, exp / 20.0); int start = s->cur_sfb_offsets[b]; s->fdsp.vector_fmul_scalar(s->tmp + start, s->channel[c].coeffs + start, quant, end - start); } /** apply imdct (imdct_half == DCTIV with reverse) */ mdct->imdct_half(mdct, s->channel[c].coeffs, s->tmp); } } /** window and overlapp-add */ wmapro_window(s); /** handled one subframe */ for (i = 0; i < s->channels_for_cur_subframe; i++) { int c = s->channel_indexes_for_cur_subframe[i]; if (s->channel[c].cur_subframe >= s->channel[c].num_subframes) { av_log(s->avctx, AV_LOG_ERROR, "broken subframe\n"); return AVERROR_INVALIDDATA; } ++s->channel[c].cur_subframe; } return 0; }
1,107
1
struct audit_chunk *audit_tree_lookup(const struct inode *inode) { struct list_head *list = chunk_hash(inode); struct audit_chunk *p; list_for_each_entry_rcu(p, list, hash) { if (p->watch.inode == inode) { get_inotify_watch(&p->watch); return p; } } return NULL; }
1,108
0
void name ## _free ( type * a ) ; # define DECLARE_ASN1_PRINT_FUNCTION ( stname ) DECLARE_ASN1_PRINT_FUNCTION_fname ( stname , stname ) # define DECLARE_ASN1_PRINT_FUNCTION_fname ( stname , fname ) int fname ## _print_ctx ( BIO * out , stname * x , int indent , const ASN1_PCTX * pctx ) ; # define D2I_OF ( type ) type * ( * ) ( type * * , const unsigned char * * , long ) # define I2D_OF ( type ) int ( * ) ( type * , unsigned char * * ) # define I2D_OF_const ( type ) int ( * ) ( const type * , unsigned char * * ) # define CHECKED_D2I_OF ( type , d2i ) ( ( d2i_of_void * ) ( 1 ? d2i : ( ( D2I_OF ( type ) ) 0 ) ) ) # define CHECKED_I2D_OF ( type , i2d ) ( ( i2d_of_void * ) ( 1 ? i2d : ( ( I2D_OF ( type ) ) 0 ) ) ) # define CHECKED_NEW_OF ( type , xnew ) ( ( void * ( * ) ( void ) ) ( 1 ? xnew : ( ( type * ( * ) ( void ) ) 0 ) ) ) # define CHECKED_PTR_OF ( type , p ) ( ( void * ) ( 1 ? p : ( type * ) 0 ) ) # define CHECKED_PPTR_OF ( type , p ) ( ( void * * ) ( 1 ? p : ( type * * ) 0 ) ) # define TYPEDEF_D2I_OF ( type ) typedef type * d2i_of_ ## type ( type * * , const unsigned char * * , long ) # define TYPEDEF_I2D_OF ( type ) typedef int i2d_of_ ## type ( type * , unsigned char * * ) # define TYPEDEF_D2I2D_OF ( type ) TYPEDEF_D2I_OF ( type ) ; TYPEDEF_I2D_OF ( type ) TYPEDEF_D2I2D_OF ( void ) ; # ifndef OPENSSL_EXPORT_VAR_AS_FUNCTION typedef const ASN1_ITEM ASN1_ITEM_EXP ; # define ASN1_ITEM_ptr ( iptr ) ( iptr ) # define ASN1_ITEM_ref ( iptr ) ( & ( iptr ## _it ) ) # define ASN1_ITEM_rptr ( ref ) ( & ( ref ## _it ) ) # define DECLARE_ASN1_ITEM ( name ) OPENSSL_EXTERN const ASN1_ITEM name ## _it ; # else typedef const ASN1_ITEM * ASN1_ITEM_EXP ( void ) ; # define ASN1_ITEM_ptr ( iptr ) ( iptr ( ) ) # define ASN1_ITEM_ref ( iptr ) ( iptr ## _it ) # define ASN1_ITEM_rptr ( ref ) ( ref ## _it ( ) ) # define DECLARE_ASN1_ITEM ( name ) const ASN1_ITEM * name ## _it ( void ) ; # endif # define ASN1_STRFLGS_ESC_2253 1 # define ASN1_STRFLGS_ESC_CTRL 2 # define ASN1_STRFLGS_ESC_MSB 4 # define ASN1_STRFLGS_ESC_QUOTE 8 # define CHARTYPE_PRINTABLESTRING 0x10 # define CHARTYPE_FIRST_ESC_2253 0x20 # define CHARTYPE_LAST_ESC_2253 0x40 # define ASN1_STRFLGS_UTF8_CONVERT 0x10 # define ASN1_STRFLGS_IGNORE_TYPE 0x20 # define ASN1_STRFLGS_SHOW_TYPE 0x40 # define ASN1_STRFLGS_DUMP_ALL 0x80 # define ASN1_STRFLGS_DUMP_UNKNOWN 0x100 # define ASN1_STRFLGS_DUMP_DER 0x200 # define ASN1_STRFLGS_ESC_2254 0x400 # define ASN1_STRFLGS_RFC2253 ( ASN1_STRFLGS_ESC_2253 | ASN1_STRFLGS_ESC_CTRL | ASN1_STRFLGS_ESC_MSB | ASN1_STRFLGS_UTF8_CONVERT | ASN1_STRFLGS_DUMP_UNKNOWN | ASN1_STRFLGS_DUMP_DER ) DEFINE_STACK_OF ( ASN1_INTEGER ) DEFINE_STACK_OF ( ASN1_GENERALSTRING ) DEFINE_STACK_OF ( ASN1_UTF8STRING ) typedef struct asn1_type_st { int type ; union { char * ptr ; ASN1_BOOLEAN boolean ; ASN1_STRING * asn1_string ; ASN1_OBJECT * object ; ASN1_INTEGER * integer ; ASN1_ENUMERATED * enumerated ; ASN1_BIT_STRING * bit_string ; ASN1_OCTET_STRING * octet_string ; ASN1_PRINTABLESTRING * printablestring ; ASN1_T61STRING * t61string ; ASN1_IA5STRING * ia5string ; ASN1_GENERALSTRING * generalstring ; ASN1_BMPSTRING * bmpstring ; ASN1_UNIVERSALSTRING * universalstring ; ASN1_UTCTIME * utctime ; ASN1_GENERALIZEDTIME * generalizedtime ; ASN1_VISIBLESTRING * visiblestring ; ASN1_UTF8STRING * utf8string ; ASN1_STRING * set ; ASN1_STRING * sequence ; ASN1_VALUE * asn1_value ; } value ; } ASN1_TYPE ; DEFINE_STACK_OF ( ASN1_TYPE ) typedef STACK_OF ( ASN1_TYPE ) ASN1_SEQUENCE_ANY ; DECLARE_ASN1_ENCODE_FUNCTIONS_const ( ASN1_SEQUENCE_ANY , ASN1_SEQUENCE_ANY ) DECLARE_ASN1_ENCODE_FUNCTIONS_const ( ASN1_SEQUENCE_ANY , ASN1_SET_ANY ) typedef struct BIT_STRING_BITNAME_st { int bitnum ; const char * lname ; const char * sname ; } BIT_STRING_BITNAME ; # define B_ASN1_TIME B_ASN1_UTCTIME | B_ASN1_GENERALIZEDTIME # define B_ASN1_PRINTABLE B_ASN1_NUMERICSTRING | B_ASN1_PRINTABLESTRING | B_ASN1_T61STRING | B_ASN1_IA5STRING | B_ASN1_BIT_STRING | B_ASN1_UNIVERSALSTRING | B_ASN1_BMPSTRING | B_ASN1_UTF8STRING | B_ASN1_SEQUENCE | B_ASN1_UNKNOWN # define B_ASN1_DIRECTORYSTRING B_ASN1_PRINTABLESTRING | B_ASN1_TELETEXSTRING | B_ASN1_BMPSTRING | B_ASN1_UNIVERSALSTRING | B_ASN1_UTF8STRING # define B_ASN1_DISPLAYTEXT B_ASN1_IA5STRING | B_ASN1_VISIBLESTRING | B_ASN1_BMPSTRING | B_ASN1_UTF8STRING DECLARE_ASN1_FUNCTIONS_fname ( ASN1_TYPE , ASN1_ANY , ASN1_TYPE ) int ASN1_TYPE_get ( const ASN1_TYPE * a ) ; void ASN1_TYPE_set ( ASN1_TYPE * a , int type , void * value ) ; int ASN1_TYPE_set1 ( ASN1_TYPE * a , int type , const void * value ) ; int ASN1_TYPE_cmp ( const ASN1_TYPE * a , const ASN1_TYPE * b ) ; ASN1_TYPE * ASN1_TYPE_pack_sequence ( const ASN1_ITEM * it , void * s , ASN1_TYPE * * t ) ; void * ASN1_TYPE_unpack_sequence ( const ASN1_ITEM * it , const ASN1_TYPE * t ) ; ASN1_OBJECT * ASN1_OBJECT_new ( void ) ; void ASN1_OBJECT_free ( ASN1_OBJECT * a ) ; int i2d_ASN1_OBJECT ( const ASN1_OBJECT * a , unsigned char * * pp ) ; ASN1_OBJECT * d2i_ASN1_OBJECT ( ASN1_OBJECT * * a , const unsigned char * * pp , long length ) ; DECLARE_ASN1_ITEM ( ASN1_OBJECT ) DEFINE_STACK_OF ( ASN1_OBJECT ) ASN1_STRING * ASN1_STRING_new ( void ) ; void ASN1_STRING_free ( ASN1_STRING * a ) ; void ASN1_STRING_clear_free ( ASN1_STRING * a ) ; int ASN1_STRING_copy ( ASN1_STRING * dst , const ASN1_STRING * str ) ; ASN1_STRING * ASN1_STRING_dup ( const ASN1_STRING * a ) ; ASN1_STRING * ASN1_STRING_type_new ( int type ) ; int ASN1_STRING_cmp ( const ASN1_STRING * a , const ASN1_STRING * b ) ; int ASN1_STRING_set ( ASN1_STRING * str , const void * data , int len ) ; void ASN1_STRING_set0 ( ASN1_STRING * str , void * data , int len ) ; int ASN1_STRING_length ( const ASN1_STRING * x ) ; void ASN1_STRING_length_set ( ASN1_STRING * x , int n ) ; int ASN1_STRING_type ( const ASN1_STRING * x ) ; DEPRECATEDIN_1_1_0 ( unsigned char * ASN1_STRING_data ( ASN1_STRING * x ) ) const unsigned char * ASN1_STRING_get0_data ( const ASN1_STRING * x ) ; DECLARE_ASN1_FUNCTIONS ( ASN1_BIT_STRING ) int ASN1_BIT_STRING_set ( ASN1_BIT_STRING * a , unsigned char * d , int length ) ; int ASN1_BIT_STRING_set_bit ( ASN1_BIT_STRING * a , int n , int value ) ; int ASN1_BIT_STRING_get_bit ( const ASN1_BIT_STRING * a , int n ) ; int ASN1_BIT_STRING_check ( const ASN1_BIT_STRING * a , const unsigned char * flags , int flags_len ) ; int ASN1_BIT_STRING_name_print ( BIO * out , ASN1_BIT_STRING * bs , BIT_STRING_BITNAME * tbl , int indent ) ; int ASN1_BIT_STRING_num_asc ( const char * name , BIT_STRING_BITNAME * tbl ) ; int ASN1_BIT_STRING_set_asc ( ASN1_BIT_STRING * bs , const char * name , int value , BIT_STRING_BITNAME * tbl ) ; DECLARE_ASN1_FUNCTIONS ( ASN1_INTEGER )
1,112
1
get_tag(const uint8_t *asn1, size_t len, taginfo *tag_out, const uint8_t **contents_out, size_t *clen_out, const uint8_t **remainder_out, size_t *rlen_out) { krb5_error_code ret; uint8_t o; const uint8_t *c, *p, *tag_start = asn1; size_t clen, llen, i; taginfo t; *contents_out = *remainder_out = NULL; *clen_out = *rlen_out = 0; if (len == 0) return ASN1_OVERRUN; o = *asn1++; len--; tag_out->asn1class = o & 0xC0; tag_out->construction = o & 0x20; if ((o & 0x1F) != 0x1F) { tag_out->tagnum = o & 0x1F; } else { tag_out->tagnum = 0; do { if (len == 0) return ASN1_OVERRUN; o = *asn1++; len--; tag_out->tagnum = (tag_out->tagnum << 7) | (o & 0x7F); } while (o & 0x80); } if (len == 0) return ASN1_OVERRUN; o = *asn1++; len--; if (o == 0x80) { /* Indefinite form (should not be present in DER, but we accept it). */ if (tag_out->construction != CONSTRUCTED) return ASN1_MISMATCH_INDEF; p = asn1; while (!(len >= 2 && p[0] == 0 && p[1] == 0)) { ret = get_tag(p, len, &t, &c, &clen, &p, &len); if (ret) return ret; } tag_out->tag_end_len = 2; *contents_out = asn1; *clen_out = p - asn1; *remainder_out = p + 2; *rlen_out = len - 2; } else if ((o & 0x80) == 0) { /* Short form (first octet gives content length). */ if (o > len) return ASN1_OVERRUN; tag_out->tag_end_len = 0; *contents_out = asn1; *clen_out = o; *remainder_out = asn1 + *clen_out; *rlen_out = len - (*remainder_out - asn1); } else { /* Long form (first octet gives number of base-256 length octets). */ llen = o & 0x7F; if (llen > len) return ASN1_OVERRUN; if (llen > sizeof(*clen_out)) return ASN1_OVERFLOW; for (i = 0, clen = 0; i < llen; i++) clen = (clen << 8) | asn1[i]; if (clen > len - llen) return ASN1_OVERRUN; tag_out->tag_end_len = 0; *contents_out = asn1 + llen; *clen_out = clen; *remainder_out = *contents_out + clen; *rlen_out = len - (*remainder_out - asn1); } tag_out->tag_len = *contents_out - tag_start; return 0; }
1,114
1
static void setup_frame(int sig, struct target_sigaction *ka, target_sigset_t *set, CPUX86State *env) { abi_ulong frame_addr; struct sigframe *frame; int i, err = 0; frame_addr = get_sigframe(ka, env, sizeof(*frame)); if (!lock_user_struct(VERIFY_WRITE, frame, frame_addr, 0)) goto give_sigsegv; __put_user(current_exec_domain_sig(sig), &frame->sig); if (err) goto give_sigsegv; setup_sigcontext(&frame->sc, &frame->fpstate, env, set->sig[0], frame_addr + offsetof(struct sigframe, fpstate)); if (err) goto give_sigsegv; for(i = 1; i < TARGET_NSIG_WORDS; i++) { if (__put_user(set->sig[i], &frame->extramask[i - 1])) goto give_sigsegv; } /* Set up to return from userspace. If provided, use a stub already in userspace. */ if (ka->sa_flags & TARGET_SA_RESTORER) { __put_user(ka->sa_restorer, &frame->pretcode); } else { uint16_t val16; abi_ulong retcode_addr; retcode_addr = frame_addr + offsetof(struct sigframe, retcode); __put_user(retcode_addr, &frame->pretcode); /* This is popl %eax ; movl $,%eax ; int $0x80 */ val16 = 0xb858; __put_user(val16, (uint16_t *)(frame->retcode+0)); __put_user(TARGET_NR_sigreturn, (int *)(frame->retcode+2)); val16 = 0x80cd; __put_user(val16, (uint16_t *)(frame->retcode+6)); } if (err) goto give_sigsegv; /* Set up registers for signal handler */ env->regs[R_ESP] = frame_addr; env->eip = ka->_sa_handler; cpu_x86_load_seg(env, R_DS, __USER_DS); cpu_x86_load_seg(env, R_ES, __USER_DS); cpu_x86_load_seg(env, R_SS, __USER_DS); cpu_x86_load_seg(env, R_CS, __USER_CS); env->eflags &= ~TF_MASK; unlock_user_struct(frame, frame_addr, 1); return; give_sigsegv: unlock_user_struct(frame, frame_addr, 1); if (sig == TARGET_SIGSEGV) ka->_sa_handler = TARGET_SIG_DFL; force_sig(TARGET_SIGSEGV /* , current */); }
1,115
0
static BOOL zgfx_decompress_segment(ZGFX_CONTEXT* zgfx, wStream* stream, size_t segmentSize) { BYTE c; BYTE flags; UINT32 extra = 0; int opIndex; int haveBits; int inPrefix; UINT32 count; UINT32 distance; BYTE* pbSegment; size_t cbSegment = segmentSize - 1; if ((Stream_GetRemainingLength(stream) < segmentSize) || (segmentSize < 1)) return FALSE; Stream_Read_UINT8(stream, flags); /* header (1 byte) */ zgfx->OutputCount = 0; pbSegment = Stream_Pointer(stream); Stream_Seek(stream, cbSegment); if (!(flags & PACKET_COMPRESSED)) { zgfx_history_buffer_ring_write(zgfx, pbSegment, cbSegment); CopyMemory(zgfx->OutputBuffer, pbSegment, cbSegment); zgfx->OutputCount = cbSegment; return TRUE; } zgfx->pbInputCurrent = pbSegment; zgfx->pbInputEnd = &pbSegment[cbSegment - 1]; /* NumberOfBitsToDecode = ((NumberOfBytesToDecode - 1) * 8) - ValueOfLastByte */ zgfx->cBitsRemaining = 8 * (cbSegment - 1) - *zgfx->pbInputEnd; zgfx->cBitsCurrent = 0; zgfx->BitsCurrent = 0; while (zgfx->cBitsRemaining) { haveBits = 0; inPrefix = 0; for (opIndex = 0; ZGFX_TOKEN_TABLE[opIndex].prefixLength != 0; opIndex++) { while (haveBits < ZGFX_TOKEN_TABLE[opIndex].prefixLength) { zgfx_GetBits(zgfx, 1); inPrefix = (inPrefix << 1) + zgfx->bits; haveBits++; } if (inPrefix == ZGFX_TOKEN_TABLE[opIndex].prefixCode) { if (ZGFX_TOKEN_TABLE[opIndex].tokenType == 0) { /* Literal */ zgfx_GetBits(zgfx, ZGFX_TOKEN_TABLE[opIndex].valueBits); c = (BYTE)(ZGFX_TOKEN_TABLE[opIndex].valueBase + zgfx->bits); zgfx->HistoryBuffer[zgfx->HistoryIndex] = c; if (++zgfx->HistoryIndex == zgfx->HistoryBufferSize) zgfx->HistoryIndex = 0; zgfx->OutputBuffer[zgfx->OutputCount++] = c; } else { zgfx_GetBits(zgfx, ZGFX_TOKEN_TABLE[opIndex].valueBits); distance = ZGFX_TOKEN_TABLE[opIndex].valueBase + zgfx->bits; if (distance != 0) { /* Match */ zgfx_GetBits(zgfx, 1); if (zgfx->bits == 0) { count = 3; } else { count = 4; extra = 2; zgfx_GetBits(zgfx, 1); while (zgfx->bits == 1) { count *= 2; extra++; zgfx_GetBits(zgfx, 1); } zgfx_GetBits(zgfx, extra); count += zgfx->bits; } zgfx_history_buffer_ring_read(zgfx, distance, &(zgfx->OutputBuffer[zgfx->OutputCount]), count); zgfx_history_buffer_ring_write(zgfx, &(zgfx->OutputBuffer[zgfx->OutputCount]), count); zgfx->OutputCount += count; } else { /* Unencoded */ zgfx_GetBits(zgfx, 15); count = zgfx->bits; zgfx->cBitsRemaining -= zgfx->cBitsCurrent; zgfx->cBitsCurrent = 0; zgfx->BitsCurrent = 0; CopyMemory(&(zgfx->OutputBuffer[zgfx->OutputCount]), zgfx->pbInputCurrent, count); zgfx_history_buffer_ring_write(zgfx, zgfx->pbInputCurrent, count); zgfx->pbInputCurrent += count; zgfx->cBitsRemaining -= (8 * count); zgfx->OutputCount += count; } } break; } } } return TRUE; }
1,117
0
static void _slurm_rpc_get_priority_factors ( slurm_msg_t * msg ) { DEF_TIMERS ; priority_factors_request_msg_t * req_msg = ( priority_factors_request_msg_t * ) msg -> data ; priority_factors_response_msg_t resp_msg ; slurm_msg_t response_msg ; uid_t uid = g_slurm_auth_get_uid ( msg -> auth_cred , slurmctld_config . auth_info ) ; START_TIMER ; debug2 ( "Processing RPC: REQUEST_PRIORITY_FACTORS from uid=%d" , uid ) ; resp_msg . priority_factors_list = priority_g_get_priority_factors_list ( req_msg , uid ) ; slurm_msg_t_init ( & response_msg ) ; response_msg . flags = msg -> flags ; response_msg . protocol_version = msg -> protocol_version ; response_msg . address = msg -> address ; response_msg . conn = msg -> conn ; response_msg . msg_type = RESPONSE_PRIORITY_FACTORS ; response_msg . data = & resp_msg ; slurm_send_node_msg ( msg -> conn_fd , & response_msg ) ; FREE_NULL_LIST ( resp_msg . priority_factors_list ) ; END_TIMER2 ( "_slurm_rpc_get_priority_factors" ) ; debug2 ( "_slurm_rpc_get_priority_factors %s" , TIME_STR ) ; }
1,118
1
k5_asn1_full_decode(const krb5_data *code, const struct atype_info *a, void **retrep) { krb5_error_code ret; const uint8_t *contents, *remainder; size_t clen, rlen; taginfo t; *retrep = NULL; ret = get_tag((uint8_t *)code->data, code->length, &t, &contents, &clen, &remainder, &rlen); if (ret) return ret; /* rlen should be 0, but we don't check it (and due to padding in * non-length-preserving enctypes, it will sometimes be nonzero). */ if (!check_atype_tag(a, &t)) return ASN1_BAD_ID; return decode_atype_to_ptr(&t, contents, clen, a, retrep); }
1,119
1
static void ahci_pci_enable(AHCIQState *ahci) { uint8_t reg; start_ahci_device(ahci); switch (ahci->fingerprint) { case AHCI_INTEL_ICH9: /* ICH9 has a register at PCI 0x92 that * acts as a master port enabler mask. */ reg = qpci_config_readb(ahci->dev, 0x92); reg |= 0x3F; qpci_config_writeb(ahci->dev, 0x92, reg); /* 0...0111111b -- bit significant, ports 0-5 enabled. */ ASSERT_BIT_SET(qpci_config_readb(ahci->dev, 0x92), 0x3F); break; } }
1,120
1
static inline int audit_del_rule(struct audit_entry *entry, struct list_head *list) { struct audit_entry *e; struct audit_field *inode_f = entry->rule.inode_f; struct audit_watch *watch, *tmp_watch = entry->rule.watch; struct audit_tree *tree = entry->rule.tree; LIST_HEAD(inotify_list); int h, ret = 0; #ifdef CONFIG_AUDITSYSCALL int dont_count = 0; /* If either of these, don't count towards total */ if (entry->rule.listnr == AUDIT_FILTER_USER || entry->rule.listnr == AUDIT_FILTER_TYPE) dont_count = 1; #endif if (inode_f) { h = audit_hash_ino(inode_f->val); list = &audit_inode_hash[h]; } mutex_lock(&audit_filter_mutex); e = audit_find_rule(entry, list); if (!e) { mutex_unlock(&audit_filter_mutex); ret = -ENOENT; goto out; } watch = e->rule.watch; if (watch) { struct audit_parent *parent = watch->parent; list_del(&e->rule.rlist); if (list_empty(&watch->rules)) { audit_remove_watch(watch); if (list_empty(&parent->watches)) { /* Put parent on the inotify un-registration * list. Grab a reference before releasing * audit_filter_mutex, to be released in * audit_inotify_unregister(). */ list_add(&parent->ilist, &inotify_list); get_inotify_watch(&parent->wdata); } } } if (e->rule.tree) audit_remove_tree_rule(&e->rule); list_del_rcu(&e->list); call_rcu(&e->rcu, audit_free_rule_rcu); #ifdef CONFIG_AUDITSYSCALL if (!dont_count) audit_n_rules--; if (!audit_match_signal(entry)) audit_signals--; #endif mutex_unlock(&audit_filter_mutex); if (!list_empty(&inotify_list)) audit_inotify_unregister(&inotify_list); out: if (tmp_watch) audit_put_watch(tmp_watch); /* match initial get */ if (tree) audit_put_tree(tree); /* that's the temporary one */ return ret; }
1,121
1
static void destroy_watch(struct inotify_watch *watch) { struct audit_chunk *chunk = container_of(watch, struct audit_chunk, watch); free_chunk(chunk); }
1,122