target
int64
0
1
func
stringlengths
7
484k
func_no_comments
stringlengths
7
484k
idx
int64
1
368k
1
static int pwc_video_close(struct inode *inode, struct file *file) { struct video_device *vdev = file->private_data; struct pwc_device *pdev; int i; PWC_DEBUG_OPEN(">> video_close called(vdev = 0x%p).\n", vdev); pdev = (struct pwc_device *)vdev->priv; if (pdev->vopen == 0) PWC_DEBUG_MODULE("video_close() called on closed device?\n"); /* Dump statistics, but only if a reasonable amount of frames were processed (to prevent endless log-entries in case of snap-shot programs) */ if (pdev->vframe_count > 20) PWC_DEBUG_MODULE("Closing video device: %d frames received, dumped %d frames, %d frames with errors.\n", pdev->vframe_count, pdev->vframes_dumped, pdev->vframes_error); if (DEVICE_USE_CODEC1(pdev->type)) pwc_dec1_exit(); else pwc_dec23_exit(); pwc_isoc_cleanup(pdev); pwc_free_buffers(pdev); /* Turn off LEDS and power down camera, but only when not unplugged */ if (pdev->error_status != EPIPE) { /* Turn LEDs off */ if (pwc_set_leds(pdev, 0, 0) < 0) PWC_DEBUG_MODULE("Failed to set LED on/off time.\n"); if (power_save) { i = pwc_camera_power(pdev, 0); if (i < 0) PWC_ERROR("Failed to power down camera (%d)\n", i); } } pdev->vopen--; PWC_DEBUG_OPEN("<< video_close() vopen=%d\n", pdev->vopen); return 0; }
static int pwc_video_close(struct inode *inode, struct file *file) { struct video_device *vdev = file->private_data; struct pwc_device *pdev; int i; PWC_DEBUG_OPEN(">> video_close called(vdev = 0x%p).\n", vdev); pdev = (struct pwc_device *)vdev->priv; if (pdev->vopen == 0) PWC_DEBUG_MODULE("video_close() called on closed device?\n"); if (pdev->vframe_count > 20) PWC_DEBUG_MODULE("Closing video device: %d frames received, dumped %d frames, %d frames with errors.\n", pdev->vframe_count, pdev->vframes_dumped, pdev->vframes_error); if (DEVICE_USE_CODEC1(pdev->type)) pwc_dec1_exit(); else pwc_dec23_exit(); pwc_isoc_cleanup(pdev); pwc_free_buffers(pdev); if (pdev->error_status != EPIPE) { if (pwc_set_leds(pdev, 0, 0) < 0) PWC_DEBUG_MODULE("Failed to set LED on/off time.\n"); if (power_save) { i = pwc_camera_power(pdev, 0); if (i < 0) PWC_ERROR("Failed to power down camera (%d)\n", i); } } pdev->vopen--; PWC_DEBUG_OPEN("<< video_close() vopen=%d\n", pdev->vopen); return 0; }
137
0
static FILE * open_sql_file_for_table ( const char * table , int flags ) { FILE * res ; char filename [ FN_REFLEN ] , tmp_path [ FN_REFLEN ] ; convert_dirname ( tmp_path , path , NullS ) ; res = my_fopen ( fn_format ( filename , table , tmp_path , ".sql" , 4 ) , flags , MYF ( MY_WME ) ) ; return res ; }
static FILE * open_sql_file_for_table ( const char * table , int flags ) { FILE * res ; char filename [ FN_REFLEN ] , tmp_path [ FN_REFLEN ] ; convert_dirname ( tmp_path , path , NullS ) ; res = my_fopen ( fn_format ( filename , table , tmp_path , ".sql" , 4 ) , flags , MYF ( MY_WME ) ) ; return res ; }
138
0
static void flac_set_bps ( FLACContext * s ) { enum AVSampleFormat req = s -> avctx -> request_sample_fmt ; int need32 = s -> bps > 16 ; int want32 = av_get_bytes_per_sample ( req ) > 2 ; int planar = av_sample_fmt_is_planar ( req ) ; if ( need32 || want32 ) { if ( planar ) s -> avctx -> sample_fmt = AV_SAMPLE_FMT_S32P ; else s -> avctx -> sample_fmt = AV_SAMPLE_FMT_S32 ; s -> sample_shift = 32 - s -> bps ; } else { if ( planar ) s -> avctx -> sample_fmt = AV_SAMPLE_FMT_S16P ; else s -> avctx -> sample_fmt = AV_SAMPLE_FMT_S16 ; s -> sample_shift = 16 - s -> bps ; } }
static void flac_set_bps ( FLACContext * s ) { enum AVSampleFormat req = s -> avctx -> request_sample_fmt ; int need32 = s -> bps > 16 ; int want32 = av_get_bytes_per_sample ( req ) > 2 ; int planar = av_sample_fmt_is_planar ( req ) ; if ( need32 || want32 ) { if ( planar ) s -> avctx -> sample_fmt = AV_SAMPLE_FMT_S32P ; else s -> avctx -> sample_fmt = AV_SAMPLE_FMT_S32 ; s -> sample_shift = 32 - s -> bps ; } else { if ( planar ) s -> avctx -> sample_fmt = AV_SAMPLE_FMT_S16P ; else s -> avctx -> sample_fmt = AV_SAMPLE_FMT_S16 ; s -> sample_shift = 16 - s -> bps ; } }
140
1
static av_cold int split_init(AVFilterContext *ctx) { SplitContext *s = ctx->priv; int i; for (i = 0; i < s->nb_outputs; i++) { char name[32]; AVFilterPad pad = { 0 }; snprintf(name, sizeof(name), "output%d", i); pad.type = ctx->filter->inputs[0].type; pad.name = av_strdup(name); if (!pad.name) return AVERROR(ENOMEM); ff_insert_outpad(ctx, i, &pad); } return 0; }
static av_cold int split_init(AVFilterContext *ctx) { SplitContext *s = ctx->priv; int i; for (i = 0; i < s->nb_outputs; i++) { char name[32]; AVFilterPad pad = { 0 }; snprintf(name, sizeof(name), "output%d", i); pad.type = ctx->filter->inputs[0].type; pad.name = av_strdup(name); if (!pad.name) return AVERROR(ENOMEM); ff_insert_outpad(ctx, i, &pad); } return 0; }
141
1
static void usb_pwc_disconnect(struct usb_interface *intf) { struct pwc_device *pdev; int hint; lock_kernel(); pdev = usb_get_intfdata (intf); usb_set_intfdata (intf, NULL); if (pdev == NULL) { PWC_ERROR("pwc_disconnect() Called without private pointer.\n"); goto disconnect_out; } if (pdev->udev == NULL) { PWC_ERROR("pwc_disconnect() already called for %p\n", pdev); goto disconnect_out; } if (pdev->udev != interface_to_usbdev(intf)) { PWC_ERROR("pwc_disconnect() Woops: pointer mismatch udev/pdev.\n"); goto disconnect_out; } /* We got unplugged; this is signalled by an EPIPE error code */ if (pdev->vopen) { PWC_INFO("Disconnected while webcam is in use!\n"); pdev->error_status = EPIPE; } /* Alert waiting processes */ wake_up_interruptible(&pdev->frameq); /* Wait until device is closed */ while (pdev->vopen) schedule(); /* Device is now closed, so we can safely unregister it */ PWC_DEBUG_PROBE("Unregistering video device in disconnect().\n"); pwc_remove_sysfs_files(pdev->vdev); video_unregister_device(pdev->vdev); /* Free memory (don't set pdev to 0 just yet) */ kfree(pdev); disconnect_out: /* search device_hint[] table if we occupy a slot, by any chance */ for (hint = 0; hint < MAX_DEV_HINTS; hint++) if (device_hint[hint].pdev == pdev) device_hint[hint].pdev = NULL; unlock_kernel(); }
static void usb_pwc_disconnect(struct usb_interface *intf) { struct pwc_device *pdev; int hint; lock_kernel(); pdev = usb_get_intfdata (intf); usb_set_intfdata (intf, NULL); if (pdev == NULL) { PWC_ERROR("pwc_disconnect() Called without private pointer.\n"); goto disconnect_out; } if (pdev->udev == NULL) { PWC_ERROR("pwc_disconnect() already called for %p\n", pdev); goto disconnect_out; } if (pdev->udev != interface_to_usbdev(intf)) { PWC_ERROR("pwc_disconnect() Woops: pointer mismatch udev/pdev.\n"); goto disconnect_out; } if (pdev->vopen) { PWC_INFO("Disconnected while webcam is in use!\n"); pdev->error_status = EPIPE; } wake_up_interruptible(&pdev->frameq); while (pdev->vopen) schedule(); PWC_DEBUG_PROBE("Unregistering video device in disconnect().\n"); pwc_remove_sysfs_files(pdev->vdev); video_unregister_device(pdev->vdev); kfree(pdev); disconnect_out: for (hint = 0; hint < MAX_DEV_HINTS; hint++) if (device_hint[hint].pdev == pdev) device_hint[hint].pdev = NULL; unlock_kernel(); }
142
0
static void ide_set_signature(IDEState *s) { s->select &= 0xf0; /* clear head */ /* put signature */ s->nsector = 1; s->sector = 1; if (s->drive_kind == IDE_CD) { s->lcyl = 0x14; s->hcyl = 0xeb; } else if (s->bs) { s->lcyl = 0; s->hcyl = 0; } else { s->lcyl = 0xff; s->hcyl = 0xff; } }
static void ide_set_signature(IDEState *s) { s->select &= 0xf0; s->nsector = 1; s->sector = 1; if (s->drive_kind == IDE_CD) { s->lcyl = 0x14; s->hcyl = 0xeb; } else if (s->bs) { s->lcyl = 0; s->hcyl = 0; } else { s->lcyl = 0xff; s->hcyl = 0xff; } }
144
1
static int return_EIO(void) { return -EIO; }
static int return_EIO(void) { return -EIO; }
145
1
kg_seal(minor_status, context_handle, conf_req_flag, qop_req, input_message_buffer, conf_state, output_message_buffer, toktype) OM_uint32 *minor_status; gss_ctx_id_t context_handle; int conf_req_flag; gss_qop_t qop_req; gss_buffer_t input_message_buffer; int *conf_state; gss_buffer_t output_message_buffer; int toktype; { krb5_gss_ctx_id_rec *ctx; krb5_error_code code; krb5_context context; output_message_buffer->length = 0; output_message_buffer->value = NULL; /* Only default qop or matching established cryptosystem is allowed. There are NO EXTENSIONS to this set for AES and friends! The new spec says "just use 0". The old spec plus extensions would actually allow for certain non-zero values. Fix this to handle them later. */ if (qop_req != 0) { *minor_status = (OM_uint32) G_UNKNOWN_QOP; return GSS_S_FAILURE; } ctx = (krb5_gss_ctx_id_rec *) context_handle; if (! ctx->established) { *minor_status = KG_CTX_INCOMPLETE; return(GSS_S_NO_CONTEXT); } context = ctx->k5_context; switch (ctx->proto) { case 0: code = make_seal_token_v1(context, ctx->enc, ctx->seq, &ctx->seq_send, ctx->initiate, input_message_buffer, output_message_buffer, ctx->signalg, ctx->cksum_size, ctx->sealalg, conf_req_flag, toktype, ctx->mech_used); break; case 1: code = gss_krb5int_make_seal_token_v3(context, ctx, input_message_buffer, output_message_buffer, conf_req_flag, toktype); break; default: code = G_UNKNOWN_QOP; /* XXX */ break; } if (code) { *minor_status = code; save_error_info(*minor_status, context); return(GSS_S_FAILURE); } if (conf_state) *conf_state = conf_req_flag; *minor_status = 0; return(GSS_S_COMPLETE); }
kg_seal(minor_status, context_handle, conf_req_flag, qop_req, input_message_buffer, conf_state, output_message_buffer, toktype) OM_uint32 *minor_status; gss_ctx_id_t context_handle; int conf_req_flag; gss_qop_t qop_req; gss_buffer_t input_message_buffer; int *conf_state; gss_buffer_t output_message_buffer; int toktype; { krb5_gss_ctx_id_rec *ctx; krb5_error_code code; krb5_context context; output_message_buffer->length = 0; output_message_buffer->value = NULL; if (qop_req != 0) { *minor_status = (OM_uint32) G_UNKNOWN_QOP; return GSS_S_FAILURE; } ctx = (krb5_gss_ctx_id_rec *) context_handle; if (! ctx->established) { *minor_status = KG_CTX_INCOMPLETE; return(GSS_S_NO_CONTEXT); } context = ctx->k5_context; switch (ctx->proto) { case 0: code = make_seal_token_v1(context, ctx->enc, ctx->seq, &ctx->seq_send, ctx->initiate, input_message_buffer, output_message_buffer, ctx->signalg, ctx->cksum_size, ctx->sealalg, conf_req_flag, toktype, ctx->mech_used); break; case 1: code = gss_krb5int_make_seal_token_v3(context, ctx, input_message_buffer, output_message_buffer, conf_req_flag, toktype); break; default: code = G_UNKNOWN_QOP; break; } if (code) { *minor_status = code; save_error_info(*minor_status, context); return(GSS_S_FAILURE); } if (conf_state) *conf_state = conf_req_flag; *minor_status = 0; return(GSS_S_COMPLETE); }
146
0
static int find_allocation(BlockDriverState *bs, off_t start, off_t *data, off_t *hole) { BDRVGlusterState *s = bs->opaque; off_t offs; if (!s->supports_seek_data) { return -ENOTSUP; } /* * SEEK_DATA cases: * D1. offs == start: start is in data * D2. offs > start: start is in a hole, next data at offs * D3. offs < 0, errno = ENXIO: either start is in a trailing hole * or start is beyond EOF * If the latter happens, the file has been truncated behind * our back since we opened it. All bets are off then. * Treating like a trailing hole is simplest. * D4. offs < 0, errno != ENXIO: we learned nothing */ offs = glfs_lseek(s->fd, start, SEEK_DATA); if (offs < 0) { return -errno; /* D3 or D4 */ } assert(offs >= start); if (offs > start) { /* D2: in hole, next data at offs */ *hole = start; *data = offs; return 0; } /* D1: in data, end not yet known */ /* * SEEK_HOLE cases: * H1. offs == start: start is in a hole * If this happens here, a hole has been dug behind our back * since the previous lseek(). * H2. offs > start: either start is in data, next hole at offs, * or start is in trailing hole, EOF at offs * Linux treats trailing holes like any other hole: offs == * start. Solaris seeks to EOF instead: offs > start (blech). * If that happens here, a hole has been dug behind our back * since the previous lseek(). * H3. offs < 0, errno = ENXIO: start is beyond EOF * If this happens, the file has been truncated behind our * back since we opened it. Treat it like a trailing hole. * H4. offs < 0, errno != ENXIO: we learned nothing * Pretend we know nothing at all, i.e. "forget" about D1. */ offs = glfs_lseek(s->fd, start, SEEK_HOLE); if (offs < 0) { return -errno; /* D1 and (H3 or H4) */ } assert(offs >= start); if (offs > start) { /* * D1 and H2: either in data, next hole at offs, or it was in * data but is now in a trailing hole. In the latter case, * all bets are off. Treating it as if it there was data all * the way to EOF is safe, so simply do that. */ *data = start; *hole = offs; return 0; } /* D1 and H1 */ return -EBUSY; }
static int find_allocation(BlockDriverState *bs, off_t start, off_t *data, off_t *hole) { BDRVGlusterState *s = bs->opaque; off_t offs; if (!s->supports_seek_data) { return -ENOTSUP; } offs = glfs_lseek(s->fd, start, SEEK_DATA); if (offs < 0) { return -errno; } assert(offs >= start); if (offs > start) { *hole = start; *data = offs; return 0; } offs = glfs_lseek(s->fd, start, SEEK_HOLE); if (offs < 0) { return -errno; } assert(offs >= start); if (offs > start) { *data = start; *hole = offs; return 0; } return -EBUSY; }
150
1
kg_seal_iov(OM_uint32 *minor_status, gss_ctx_id_t context_handle, int conf_req_flag, gss_qop_t qop_req, int *conf_state, gss_iov_buffer_desc *iov, int iov_count, int toktype) { krb5_gss_ctx_id_rec *ctx; krb5_error_code code; krb5_context context; if (qop_req != 0) { *minor_status = (OM_uint32)G_UNKNOWN_QOP; return GSS_S_FAILURE; } ctx = (krb5_gss_ctx_id_rec *)context_handle; if (!ctx->established) { *minor_status = KG_CTX_INCOMPLETE; return GSS_S_NO_CONTEXT; } if (conf_req_flag && kg_integ_only_iov(iov, iov_count)) { /* may be more sensible to return an error here */ conf_req_flag = FALSE; } context = ctx->k5_context; switch (ctx->proto) { case 0: code = make_seal_token_v1_iov(context, ctx, conf_req_flag, conf_state, iov, iov_count, toktype); break; case 1: code = gss_krb5int_make_seal_token_v3_iov(context, ctx, conf_req_flag, conf_state, iov, iov_count, toktype); break; default: code = G_UNKNOWN_QOP; break; } if (code != 0) { *minor_status = code; save_error_info(*minor_status, context); return GSS_S_FAILURE; } *minor_status = 0; return GSS_S_COMPLETE; }
kg_seal_iov(OM_uint32 *minor_status, gss_ctx_id_t context_handle, int conf_req_flag, gss_qop_t qop_req, int *conf_state, gss_iov_buffer_desc *iov, int iov_count, int toktype) { krb5_gss_ctx_id_rec *ctx; krb5_error_code code; krb5_context context; if (qop_req != 0) { *minor_status = (OM_uint32)G_UNKNOWN_QOP; return GSS_S_FAILURE; } ctx = (krb5_gss_ctx_id_rec *)context_handle; if (!ctx->established) { *minor_status = KG_CTX_INCOMPLETE; return GSS_S_NO_CONTEXT; } if (conf_req_flag && kg_integ_only_iov(iov, iov_count)) { conf_req_flag = FALSE; } context = ctx->k5_context; switch (ctx->proto) { case 0: code = make_seal_token_v1_iov(context, ctx, conf_req_flag, conf_state, iov, iov_count, toktype); break; case 1: code = gss_krb5int_make_seal_token_v3_iov(context, ctx, conf_req_flag, conf_state, iov, iov_count, toktype); break; default: code = G_UNKNOWN_QOP; break; } if (code != 0) { *minor_status = code; save_error_info(*minor_status, context); return GSS_S_FAILURE; } *minor_status = 0; return GSS_S_COMPLETE; }
151
0
static void dissect_zcl_ota_imagepagereq ( tvbuff_t * tvb , proto_tree * tree , guint * offset ) { guint8 field_ctrl ; field_ctrl = dissect_zcl_ota_field_ctrl_field ( tvb , tree , offset ) ; proto_tree_add_item ( tree , hf_zbee_zcl_ota_manufacturer_code , tvb , * offset , 2 , ENC_LITTLE_ENDIAN ) ; * offset += 2 ; proto_tree_add_item ( tree , hf_zbee_zcl_ota_image_type , tvb , * offset , 2 , ENC_LITTLE_ENDIAN ) ; * offset += 2 ; dissect_zcl_ota_file_version_field ( tvb , tree , offset ) ; proto_tree_add_item ( tree , hf_zbee_zcl_ota_file_offset , tvb , * offset , 4 , ENC_LITTLE_ENDIAN ) ; * offset += 4 ; proto_tree_add_item ( tree , hf_zbee_zcl_ota_max_data_size , tvb , * offset , 1 , ENC_NA ) ; * offset += 1 ; proto_tree_add_item ( tree , hf_zbee_zcl_ota_page_size , tvb , * offset , 2 , ENC_LITTLE_ENDIAN ) ; * offset += 2 ; proto_tree_add_item ( tree , hf_zbee_zcl_ota_rsp_spacing , tvb , * offset , 2 , ENC_LITTLE_ENDIAN ) ; * offset += 2 ; if ( field_ctrl & ZBEE_ZCL_OTA_FIELD_CTRL_IEEE_ADDR_PRESENT ) { proto_tree_add_item ( tree , hf_zbee_zcl_ota_req_node_addr , tvb , * offset , 8 , ENC_LITTLE_ENDIAN ) ; * offset += 8 ; } }
static void dissect_zcl_ota_imagepagereq ( tvbuff_t * tvb , proto_tree * tree , guint * offset ) { guint8 field_ctrl ; field_ctrl = dissect_zcl_ota_field_ctrl_field ( tvb , tree , offset ) ; proto_tree_add_item ( tree , hf_zbee_zcl_ota_manufacturer_code , tvb , * offset , 2 , ENC_LITTLE_ENDIAN ) ; * offset += 2 ; proto_tree_add_item ( tree , hf_zbee_zcl_ota_image_type , tvb , * offset , 2 , ENC_LITTLE_ENDIAN ) ; * offset += 2 ; dissect_zcl_ota_file_version_field ( tvb , tree , offset ) ; proto_tree_add_item ( tree , hf_zbee_zcl_ota_file_offset , tvb , * offset , 4 , ENC_LITTLE_ENDIAN ) ; * offset += 4 ; proto_tree_add_item ( tree , hf_zbee_zcl_ota_max_data_size , tvb , * offset , 1 , ENC_NA ) ; * offset += 1 ; proto_tree_add_item ( tree , hf_zbee_zcl_ota_page_size , tvb , * offset , 2 , ENC_LITTLE_ENDIAN ) ; * offset += 2 ; proto_tree_add_item ( tree , hf_zbee_zcl_ota_rsp_spacing , tvb , * offset , 2 , ENC_LITTLE_ENDIAN ) ; * offset += 2 ; if ( field_ctrl & ZBEE_ZCL_OTA_FIELD_CTRL_IEEE_ADDR_PRESENT ) { proto_tree_add_item ( tree , hf_zbee_zcl_ota_req_node_addr , tvb , * offset , 8 , ENC_LITTLE_ENDIAN ) ; * offset += 8 ; } }
152
1
static int em_sysexit(struct x86_emulate_ctxt *ctxt) { const struct x86_emulate_ops *ops = ctxt->ops; struct desc_struct cs, ss; u64 msr_data; int usermode; u16 cs_sel = 0, ss_sel = 0; /* inject #GP if in real mode or Virtual 8086 mode */ if (ctxt->mode == X86EMUL_MODE_REAL || ctxt->mode == X86EMUL_MODE_VM86) return emulate_gp(ctxt, 0); setup_syscalls_segments(ctxt, &cs, &ss); if ((ctxt->rex_prefix & 0x8) != 0x0) usermode = X86EMUL_MODE_PROT64; else usermode = X86EMUL_MODE_PROT32; cs.dpl = 3; ss.dpl = 3; ops->get_msr(ctxt, MSR_IA32_SYSENTER_CS, &msr_data); switch (usermode) { case X86EMUL_MODE_PROT32: cs_sel = (u16)(msr_data + 16); if ((msr_data & 0xfffc) == 0x0) return emulate_gp(ctxt, 0); ss_sel = (u16)(msr_data + 24); break; case X86EMUL_MODE_PROT64: cs_sel = (u16)(msr_data + 32); if (msr_data == 0x0) return emulate_gp(ctxt, 0); ss_sel = cs_sel + 8; cs.d = 0; cs.l = 1; break; } cs_sel |= SELECTOR_RPL_MASK; ss_sel |= SELECTOR_RPL_MASK; ops->set_segment(ctxt, cs_sel, &cs, 0, VCPU_SREG_CS); ops->set_segment(ctxt, ss_sel, &ss, 0, VCPU_SREG_SS); ctxt->_eip = reg_read(ctxt, VCPU_REGS_RDX); *reg_write(ctxt, VCPU_REGS_RSP) = reg_read(ctxt, VCPU_REGS_RCX); return X86EMUL_CONTINUE; }
static int em_sysexit(struct x86_emulate_ctxt *ctxt) { const struct x86_emulate_ops *ops = ctxt->ops; struct desc_struct cs, ss; u64 msr_data; int usermode; u16 cs_sel = 0, ss_sel = 0; if (ctxt->mode == X86EMUL_MODE_REAL || ctxt->mode == X86EMUL_MODE_VM86) return emulate_gp(ctxt, 0); setup_syscalls_segments(ctxt, &cs, &ss); if ((ctxt->rex_prefix & 0x8) != 0x0) usermode = X86EMUL_MODE_PROT64; else usermode = X86EMUL_MODE_PROT32; cs.dpl = 3; ss.dpl = 3; ops->get_msr(ctxt, MSR_IA32_SYSENTER_CS, &msr_data); switch (usermode) { case X86EMUL_MODE_PROT32: cs_sel = (u16)(msr_data + 16); if ((msr_data & 0xfffc) == 0x0) return emulate_gp(ctxt, 0); ss_sel = (u16)(msr_data + 24); break; case X86EMUL_MODE_PROT64: cs_sel = (u16)(msr_data + 32); if (msr_data == 0x0) return emulate_gp(ctxt, 0); ss_sel = cs_sel + 8; cs.d = 0; cs.l = 1; break; } cs_sel |= SELECTOR_RPL_MASK; ss_sel |= SELECTOR_RPL_MASK; ops->set_segment(ctxt, cs_sel, &cs, 0, VCPU_SREG_CS); ops->set_segment(ctxt, ss_sel, &ss, 0, VCPU_SREG_SS); ctxt->_eip = reg_read(ctxt, VCPU_REGS_RDX); *reg_write(ctxt, VCPU_REGS_RSP) = reg_read(ctxt, VCPU_REGS_RCX); return X86EMUL_CONTINUE; }
154
0
static void test_validate_struct_nested(TestInputVisitorData *data, const void *unused) { UserDefTwo *udp = NULL; Visitor *v; v = validate_test_init(data, "{ 'string0': 'string0', " "'dict1': { 'string1': 'string1', " "'dict2': { 'userdef': { 'integer': 42, " "'string': 'string' }, 'string': 'string2'}}}"); visit_type_UserDefTwo(v, NULL, &udp, &error_abort); qapi_free_UserDefTwo(udp); }
static void test_validate_struct_nested(TestInputVisitorData *data, const void *unused) { UserDefTwo *udp = NULL; Visitor *v; v = validate_test_init(data, "{ 'string0': 'string0', " "'dict1': { 'string1': 'string1', " "'dict2': { 'userdef': { 'integer': 42, " "'string': 'string' }, 'string': 'string2'}}}"); visit_type_UserDefTwo(v, NULL, &udp, &error_abort); qapi_free_UserDefTwo(udp); }
155
0
static Image * ReadLABELImage ( const ImageInfo * image_info , ExceptionInfo * exception ) { char geometry [ MaxTextExtent ] , * property ; const char * label ; DrawInfo * draw_info ; Image * image ; MagickBooleanType status ; TypeMetric metrics ; size_t height , width ; assert ( image_info != ( const ImageInfo * ) NULL ) ; assert ( image_info -> signature == MagickSignature ) ; if ( image_info -> debug != MagickFalse ) ( void ) LogMagickEvent ( TraceEvent , GetMagickModule ( ) , "%s" , image_info -> filename ) ; assert ( exception != ( ExceptionInfo * ) NULL ) ; assert ( exception -> signature == MagickSignature ) ; image = AcquireImage ( image_info ) ; ( void ) ResetImagePage ( image , "0x0+0+0" ) ; property = InterpretImageProperties ( image_info , image , image_info -> filename ) ; ( void ) SetImageProperty ( image , "label" , property ) ; property = DestroyString ( property ) ; label = GetImageProperty ( image , "label" ) ; draw_info = CloneDrawInfo ( image_info , ( DrawInfo * ) NULL ) ; draw_info -> text = ConstantString ( label ) ; metrics . width = 0 ; metrics . ascent = 0.0 ; status = GetMultilineTypeMetrics ( image , draw_info , & metrics ) ; if ( ( image -> columns == 0 ) && ( image -> rows == 0 ) ) { image -> columns = ( size_t ) floor ( metrics . width + draw_info -> stroke_width + 0.5 ) ; image -> rows = ( size_t ) floor ( metrics . height + draw_info -> stroke_width + 0.5 ) ; } else if ( ( strlen ( label ) > 0 ) && ( ( ( image -> columns == 0 ) || ( image -> rows == 0 ) ) || ( fabs ( image_info -> pointsize ) < MagickEpsilon ) ) ) { double high , low ; for ( ; ; draw_info -> pointsize *= 2.0 ) { ( void ) FormatLocaleString ( geometry , MaxTextExtent , "%+g%+g" , - metrics . bounds . x1 , metrics . ascent ) ; if ( draw_info -> gravity == UndefinedGravity ) ( void ) CloneString ( & draw_info -> geometry , geometry ) ; ( void ) GetMultilineTypeMetrics ( image , draw_info , & metrics ) ; width = ( size_t ) floor ( metrics . width + draw_info -> stroke_width + 0.5 ) ; height = ( size_t ) floor ( metrics . height + draw_info -> stroke_width + 0.5 ) ; if ( ( image -> columns != 0 ) && ( image -> rows != 0 ) ) { if ( ( width >= image -> columns ) && ( height >= image -> rows ) ) break ; } else if ( ( ( image -> columns != 0 ) && ( width >= image -> columns ) ) || ( ( image -> rows != 0 ) && ( height >= image -> rows ) ) ) break ; } high = draw_info -> pointsize ; for ( low = 1.0 ; ( high - low ) > 0.5 ; ) { draw_info -> pointsize = ( low + high ) / 2.0 ; ( void ) FormatLocaleString ( geometry , MaxTextExtent , "%+g%+g" , - metrics . bounds . x1 , metrics . ascent ) ; if ( draw_info -> gravity == UndefinedGravity ) ( void ) CloneString ( & draw_info -> geometry , geometry ) ; ( void ) GetMultilineTypeMetrics ( image , draw_info , & metrics ) ; width = ( size_t ) floor ( metrics . width + draw_info -> stroke_width + 0.5 ) ; height = ( size_t ) floor ( metrics . height + draw_info -> stroke_width + 0.5 ) ; if ( ( image -> columns != 0 ) && ( image -> rows != 0 ) ) { if ( ( width < image -> columns ) && ( height < image -> rows ) ) low = draw_info -> pointsize + 0.5 ; else high = draw_info -> pointsize - 0.5 ; } else if ( ( ( image -> columns != 0 ) && ( width < image -> columns ) ) || ( ( image -> rows != 0 ) && ( height < image -> rows ) ) ) low = draw_info -> pointsize + 0.5 ; else high = draw_info -> pointsize - 0.5 ; } draw_info -> pointsize = ( low + high ) / 2.0 - 0.5 ; } status = GetMultilineTypeMetrics ( image , draw_info , & metrics ) ; if ( status == MagickFalse ) { draw_info = DestroyDrawInfo ( draw_info ) ; InheritException ( exception , & image -> exception ) ; image = DestroyImageList ( image ) ; return ( ( Image * ) NULL ) ; } if ( image -> columns == 0 ) image -> columns = ( size_t ) floor ( metrics . width + draw_info -> stroke_width + 0.5 ) ; if ( image -> columns == 0 ) image -> columns = ( size_t ) floor ( draw_info -> pointsize + draw_info -> stroke_width + 0.5 ) ; if ( image -> rows == 0 ) image -> rows = ( size_t ) floor ( metrics . ascent - metrics . descent + draw_info -> stroke_width + 0.5 ) ; if ( image -> rows == 0 ) image -> rows = ( size_t ) floor ( draw_info -> pointsize + draw_info -> stroke_width + 0.5 ) ; status = SetImageExtent ( image , image -> columns , image -> rows ) ; if ( status == MagickFalse ) { draw_info = DestroyDrawInfo ( draw_info ) ; InheritException ( exception , & image -> exception ) ; return ( DestroyImageList ( image ) ) ; } if ( SetImageBackgroundColor ( image ) == MagickFalse ) { draw_info = DestroyDrawInfo ( draw_info ) ; InheritException ( exception , & image -> exception ) ; image = DestroyImageList ( image ) ; return ( ( Image * ) NULL ) ; } ( void ) FormatLocaleString ( geometry , MaxTextExtent , "%+g%+g" , draw_info -> direction == RightToLeftDirection ? image -> columns - metrics . bounds . x2 : 0.0 , draw_info -> gravity == UndefinedGravity ? metrics . ascent : 0.0 ) ; ( void ) CloneString ( & draw_info -> geometry , geometry ) ; status = AnnotateImage ( image , draw_info ) ; if ( image_info -> pointsize == 0.0 ) { char pointsize [ MaxTextExtent ] ; ( void ) FormatLocaleString ( pointsize , MaxTextExtent , "%.20g" , draw_info -> pointsize ) ; ( void ) SetImageProperty ( image , "label:pointsize" , pointsize ) ; } draw_info = DestroyDrawInfo ( draw_info ) ; if ( status == MagickFalse ) { image = DestroyImageList ( image ) ; return ( ( Image * ) NULL ) ; } return ( GetFirstImageInList ( image ) ) ; }
static Image * ReadLABELImage ( const ImageInfo * image_info , ExceptionInfo * exception ) { char geometry [ MaxTextExtent ] , * property ; const char * label ; DrawInfo * draw_info ; Image * image ; MagickBooleanType status ; TypeMetric metrics ; size_t height , width ; assert ( image_info != ( const ImageInfo * ) NULL ) ; assert ( image_info -> signature == MagickSignature ) ; if ( image_info -> debug != MagickFalse ) ( void ) LogMagickEvent ( TraceEvent , GetMagickModule ( ) , "%s" , image_info -> filename ) ; assert ( exception != ( ExceptionInfo * ) NULL ) ; assert ( exception -> signature == MagickSignature ) ; image = AcquireImage ( image_info ) ; ( void ) ResetImagePage ( image , "0x0+0+0" ) ; property = InterpretImageProperties ( image_info , image , image_info -> filename ) ; ( void ) SetImageProperty ( image , "label" , property ) ; property = DestroyString ( property ) ; label = GetImageProperty ( image , "label" ) ; draw_info = CloneDrawInfo ( image_info , ( DrawInfo * ) NULL ) ; draw_info -> text = ConstantString ( label ) ; metrics . width = 0 ; metrics . ascent = 0.0 ; status = GetMultilineTypeMetrics ( image , draw_info , & metrics ) ; if ( ( image -> columns == 0 ) && ( image -> rows == 0 ) ) { image -> columns = ( size_t ) floor ( metrics . width + draw_info -> stroke_width + 0.5 ) ; image -> rows = ( size_t ) floor ( metrics . height + draw_info -> stroke_width + 0.5 ) ; } else if ( ( strlen ( label ) > 0 ) && ( ( ( image -> columns == 0 ) || ( image -> rows == 0 ) ) || ( fabs ( image_info -> pointsize ) < MagickEpsilon ) ) ) { double high , low ; for ( ; ; draw_info -> pointsize *= 2.0 ) { ( void ) FormatLocaleString ( geometry , MaxTextExtent , "%+g%+g" , - metrics . bounds . x1 , metrics . ascent ) ; if ( draw_info -> gravity == UndefinedGravity ) ( void ) CloneString ( & draw_info -> geometry , geometry ) ; ( void ) GetMultilineTypeMetrics ( image , draw_info , & metrics ) ; width = ( size_t ) floor ( metrics . width + draw_info -> stroke_width + 0.5 ) ; height = ( size_t ) floor ( metrics . height + draw_info -> stroke_width + 0.5 ) ; if ( ( image -> columns != 0 ) && ( image -> rows != 0 ) ) { if ( ( width >= image -> columns ) && ( height >= image -> rows ) ) break ; } else if ( ( ( image -> columns != 0 ) && ( width >= image -> columns ) ) || ( ( image -> rows != 0 ) && ( height >= image -> rows ) ) ) break ; } high = draw_info -> pointsize ; for ( low = 1.0 ; ( high - low ) > 0.5 ; ) { draw_info -> pointsize = ( low + high ) / 2.0 ; ( void ) FormatLocaleString ( geometry , MaxTextExtent , "%+g%+g" , - metrics . bounds . x1 , metrics . ascent ) ; if ( draw_info -> gravity == UndefinedGravity ) ( void ) CloneString ( & draw_info -> geometry , geometry ) ; ( void ) GetMultilineTypeMetrics ( image , draw_info , & metrics ) ; width = ( size_t ) floor ( metrics . width + draw_info -> stroke_width + 0.5 ) ; height = ( size_t ) floor ( metrics . height + draw_info -> stroke_width + 0.5 ) ; if ( ( image -> columns != 0 ) && ( image -> rows != 0 ) ) { if ( ( width < image -> columns ) && ( height < image -> rows ) ) low = draw_info -> pointsize + 0.5 ; else high = draw_info -> pointsize - 0.5 ; } else if ( ( ( image -> columns != 0 ) && ( width < image -> columns ) ) || ( ( image -> rows != 0 ) && ( height < image -> rows ) ) ) low = draw_info -> pointsize + 0.5 ; else high = draw_info -> pointsize - 0.5 ; } draw_info -> pointsize = ( low + high ) / 2.0 - 0.5 ; } status = GetMultilineTypeMetrics ( image , draw_info , & metrics ) ; if ( status == MagickFalse ) { draw_info = DestroyDrawInfo ( draw_info ) ; InheritException ( exception , & image -> exception ) ; image = DestroyImageList ( image ) ; return ( ( Image * ) NULL ) ; } if ( image -> columns == 0 ) image -> columns = ( size_t ) floor ( metrics . width + draw_info -> stroke_width + 0.5 ) ; if ( image -> columns == 0 ) image -> columns = ( size_t ) floor ( draw_info -> pointsize + draw_info -> stroke_width + 0.5 ) ; if ( image -> rows == 0 ) image -> rows = ( size_t ) floor ( metrics . ascent - metrics . descent + draw_info -> stroke_width + 0.5 ) ; if ( image -> rows == 0 ) image -> rows = ( size_t ) floor ( draw_info -> pointsize + draw_info -> stroke_width + 0.5 ) ; status = SetImageExtent ( image , image -> columns , image -> rows ) ; if ( status == MagickFalse ) { draw_info = DestroyDrawInfo ( draw_info ) ; InheritException ( exception , & image -> exception ) ; return ( DestroyImageList ( image ) ) ; } if ( SetImageBackgroundColor ( image ) == MagickFalse ) { draw_info = DestroyDrawInfo ( draw_info ) ; InheritException ( exception , & image -> exception ) ; image = DestroyImageList ( image ) ; return ( ( Image * ) NULL ) ; } ( void ) FormatLocaleString ( geometry , MaxTextExtent , "%+g%+g" , draw_info -> direction == RightToLeftDirection ? image -> columns - metrics . bounds . x2 : 0.0 , draw_info -> gravity == UndefinedGravity ? metrics . ascent : 0.0 ) ; ( void ) CloneString ( & draw_info -> geometry , geometry ) ; status = AnnotateImage ( image , draw_info ) ; if ( image_info -> pointsize == 0.0 ) { char pointsize [ MaxTextExtent ] ; ( void ) FormatLocaleString ( pointsize , MaxTextExtent , "%.20g" , draw_info -> pointsize ) ; ( void ) SetImageProperty ( image , "label:pointsize" , pointsize ) ; } draw_info = DestroyDrawInfo ( draw_info ) ; if ( status == MagickFalse ) { image = DestroyImageList ( image ) ; return ( ( Image * ) NULL ) ; } return ( GetFirstImageInList ( image ) ) ; }
156
0
void rfbSendBell ( rfbScreenInfoPtr rfbScreen ) { rfbClientIteratorPtr i ; rfbClientPtr cl ; rfbBellMsg b ; i = rfbGetClientIterator ( rfbScreen ) ; while ( ( cl = rfbClientIteratorNext ( i ) ) ) { b . type = rfbBell ; LOCK ( cl -> sendMutex ) ; if ( rfbWriteExact ( cl , ( char * ) & b , sz_rfbBellMsg ) < 0 ) { rfbLogPerror ( "rfbSendBell: write" ) ; rfbCloseClient ( cl ) ; } UNLOCK ( cl -> sendMutex ) ; } rfbStatRecordMessageSent ( cl , rfbBell , sz_rfbBellMsg , sz_rfbBellMsg ) ; rfbReleaseClientIterator ( i ) ; }
void rfbSendBell ( rfbScreenInfoPtr rfbScreen ) { rfbClientIteratorPtr i ; rfbClientPtr cl ; rfbBellMsg b ; i = rfbGetClientIterator ( rfbScreen ) ; while ( ( cl = rfbClientIteratorNext ( i ) ) ) { b . type = rfbBell ; LOCK ( cl -> sendMutex ) ; if ( rfbWriteExact ( cl , ( char * ) & b , sz_rfbBellMsg ) < 0 ) { rfbLogPerror ( "rfbSendBell: write" ) ; rfbCloseClient ( cl ) ; } UNLOCK ( cl -> sendMutex ) ; } rfbStatRecordMessageSent ( cl , rfbBell , sz_rfbBellMsg , sz_rfbBellMsg ) ; rfbReleaseClientIterator ( i ) ; }
157
1
kg_unseal(minor_status, context_handle, input_token_buffer, message_buffer, conf_state, qop_state, toktype) OM_uint32 *minor_status; gss_ctx_id_t context_handle; gss_buffer_t input_token_buffer; gss_buffer_t message_buffer; int *conf_state; gss_qop_t *qop_state; int toktype; { krb5_gss_ctx_id_rec *ctx; unsigned char *ptr; unsigned int bodysize; int err; int toktype2; int vfyflags = 0; OM_uint32 ret; ctx = (krb5_gss_ctx_id_rec *) context_handle; if (! ctx->established) { *minor_status = KG_CTX_INCOMPLETE; return(GSS_S_NO_CONTEXT); } /* parse the token, leave the data in message_buffer, setting conf_state */ /* verify the header */ ptr = (unsigned char *) input_token_buffer->value; err = g_verify_token_header(ctx->mech_used, &bodysize, &ptr, -1, input_token_buffer->length, vfyflags); if (err) { *minor_status = err; return GSS_S_DEFECTIVE_TOKEN; } if (bodysize < 2) { *minor_status = (OM_uint32)G_BAD_TOK_HEADER; return GSS_S_DEFECTIVE_TOKEN; } toktype2 = load_16_be(ptr); ptr += 2; bodysize -= 2; switch (toktype2) { case KG2_TOK_MIC_MSG: case KG2_TOK_WRAP_MSG: case KG2_TOK_DEL_CTX: ret = gss_krb5int_unseal_token_v3(&ctx->k5_context, minor_status, ctx, ptr, bodysize, message_buffer, conf_state, qop_state, toktype); break; case KG_TOK_MIC_MSG: case KG_TOK_WRAP_MSG: case KG_TOK_DEL_CTX: ret = kg_unseal_v1(ctx->k5_context, minor_status, ctx, ptr, bodysize, message_buffer, conf_state, qop_state, toktype); break; default: *minor_status = (OM_uint32)G_BAD_TOK_HEADER; ret = GSS_S_DEFECTIVE_TOKEN; break; } if (ret != 0) save_error_info (*minor_status, ctx->k5_context); return ret; }
kg_unseal(minor_status, context_handle, input_token_buffer, message_buffer, conf_state, qop_state, toktype) OM_uint32 *minor_status; gss_ctx_id_t context_handle; gss_buffer_t input_token_buffer; gss_buffer_t message_buffer; int *conf_state; gss_qop_t *qop_state; int toktype; { krb5_gss_ctx_id_rec *ctx; unsigned char *ptr; unsigned int bodysize; int err; int toktype2; int vfyflags = 0; OM_uint32 ret; ctx = (krb5_gss_ctx_id_rec *) context_handle; if (! ctx->established) { *minor_status = KG_CTX_INCOMPLETE; return(GSS_S_NO_CONTEXT); } ptr = (unsigned char *) input_token_buffer->value; err = g_verify_token_header(ctx->mech_used, &bodysize, &ptr, -1, input_token_buffer->length, vfyflags); if (err) { *minor_status = err; return GSS_S_DEFECTIVE_TOKEN; } if (bodysize < 2) { *minor_status = (OM_uint32)G_BAD_TOK_HEADER; return GSS_S_DEFECTIVE_TOKEN; } toktype2 = load_16_be(ptr); ptr += 2; bodysize -= 2; switch (toktype2) { case KG2_TOK_MIC_MSG: case KG2_TOK_WRAP_MSG: case KG2_TOK_DEL_CTX: ret = gss_krb5int_unseal_token_v3(&ctx->k5_context, minor_status, ctx, ptr, bodysize, message_buffer, conf_state, qop_state, toktype); break; case KG_TOK_MIC_MSG: case KG_TOK_WRAP_MSG: case KG_TOK_DEL_CTX: ret = kg_unseal_v1(ctx->k5_context, minor_status, ctx, ptr, bodysize, message_buffer, conf_state, qop_state, toktype); break; default: *minor_status = (OM_uint32)G_BAD_TOK_HEADER; ret = GSS_S_DEFECTIVE_TOKEN; break; } if (ret != 0) save_error_info (*minor_status, ctx->k5_context); return ret; }
158
1
static inline void jmp_rel(struct x86_emulate_ctxt *ctxt, int rel) { assign_eip_near(ctxt, ctxt->_eip + rel); }
static inline void jmp_rel(struct x86_emulate_ctxt *ctxt, int rel) { assign_eip_near(ctxt, ctxt->_eip + rel); }
160
0
TEST_F ( ProfileInfoCacheTest , CreateSupervisedTestingProfile ) { testing_profile_manager_ . CreateTestingProfile ( "default" ) ; base : : string16 supervised_user_name = ASCIIToUTF16 ( "Supervised User" ) ; testing_profile_manager_ . CreateTestingProfile ( "test1" , scoped_ptr < PrefServiceSyncable > ( ) , supervised_user_name , 0 , "TEST_ID" , TestingProfile : : TestingFactories ( ) ) ; for ( size_t i = 0 ; i < GetCache ( ) -> GetNumberOfProfiles ( ) ; i ++ ) { bool is_supervised = GetCache ( ) -> GetNameOfProfileAtIndex ( i ) == supervised_user_name ; EXPECT_EQ ( is_supervised , GetCache ( ) -> ProfileIsSupervisedAtIndex ( i ) ) ; std : : string supervised_user_id = is_supervised ? "TEST_ID" : "" ; EXPECT_EQ ( supervised_user_id , GetCache ( ) -> GetSupervisedUserIdOfProfileAtIndex ( i ) ) ; } TestingBrowserProcess : : GetGlobal ( ) -> SetProfileManager ( NULL ) ; }
TEST_F ( ProfileInfoCacheTest , CreateSupervisedTestingProfile ) { testing_profile_manager_ . CreateTestingProfile ( "default" ) ; base : : string16 supervised_user_name = ASCIIToUTF16 ( "Supervised User" ) ; testing_profile_manager_ . CreateTestingProfile ( "test1" , scoped_ptr < PrefServiceSyncable > ( ) , supervised_user_name , 0 , "TEST_ID" , TestingProfile : : TestingFactories ( ) ) ; for ( size_t i = 0 ; i < GetCache ( ) -> GetNumberOfProfiles ( ) ; i ++ ) { bool is_supervised = GetCache ( ) -> GetNameOfProfileAtIndex ( i ) == supervised_user_name ; EXPECT_EQ ( is_supervised , GetCache ( ) -> ProfileIsSupervisedAtIndex ( i ) ) ; std : : string supervised_user_id = is_supervised ? "TEST_ID" : "" ; EXPECT_EQ ( supervised_user_id , GetCache ( ) -> GetSupervisedUserIdOfProfileAtIndex ( i ) ) ; } TestingBrowserProcess : : GetGlobal ( ) -> SetProfileManager ( NULL ) ; }
162
1
int ieee80211_rx(struct ieee80211_device *ieee, struct sk_buff *skb, struct ieee80211_rx_stats *rx_stats) { struct net_device *dev = ieee->dev; struct ieee80211_hdr_4addr *hdr; size_t hdrlen; u16 fc, type, stype, sc; struct net_device_stats *stats; unsigned int frag; u8 *payload; u16 ethertype; #ifdef NOT_YET struct net_device *wds = NULL; struct sk_buff *skb2 = NULL; struct net_device *wds = NULL; int frame_authorized = 0; int from_assoc_ap = 0; void *sta = NULL; #endif u8 dst[ETH_ALEN]; u8 src[ETH_ALEN]; struct ieee80211_crypt_data *crypt = NULL; int keyidx = 0; int can_be_decrypted = 0; hdr = (struct ieee80211_hdr_4addr *)skb->data; stats = &ieee->stats; if (skb->len < 10) { printk(KERN_INFO "%s: SKB length < 10\n", dev->name); goto rx_dropped; } fc = le16_to_cpu(hdr->frame_ctl); type = WLAN_FC_GET_TYPE(fc); stype = WLAN_FC_GET_STYPE(fc); sc = le16_to_cpu(hdr->seq_ctl); frag = WLAN_GET_SEQ_FRAG(sc); hdrlen = ieee80211_get_hdrlen(fc); /* Put this code here so that we avoid duplicating it in all * Rx paths. - Jean II */ #ifdef CONFIG_WIRELESS_EXT #ifdef IW_WIRELESS_SPY /* defined in iw_handler.h */ /* If spy monitoring on */ if (ieee->spy_data.spy_number > 0) { struct iw_quality wstats; wstats.updated = 0; if (rx_stats->mask & IEEE80211_STATMASK_RSSI) { wstats.level = rx_stats->rssi; wstats.updated |= IW_QUAL_LEVEL_UPDATED; } else wstats.updated |= IW_QUAL_LEVEL_INVALID; if (rx_stats->mask & IEEE80211_STATMASK_NOISE) { wstats.noise = rx_stats->noise; wstats.updated |= IW_QUAL_NOISE_UPDATED; } else wstats.updated |= IW_QUAL_NOISE_INVALID; if (rx_stats->mask & IEEE80211_STATMASK_SIGNAL) { wstats.qual = rx_stats->signal; wstats.updated |= IW_QUAL_QUAL_UPDATED; } else wstats.updated |= IW_QUAL_QUAL_INVALID; /* Update spy records */ wireless_spy_update(ieee->dev, hdr->addr2, &wstats); } #endif /* IW_WIRELESS_SPY */ #endif /* CONFIG_WIRELESS_EXT */ #ifdef NOT_YET hostap_update_rx_stats(local->ap, hdr, rx_stats); #endif if (ieee->iw_mode == IW_MODE_MONITOR) { stats->rx_packets++; stats->rx_bytes += skb->len; ieee80211_monitor_rx(ieee, skb, rx_stats); return 1; } can_be_decrypted = (is_multicast_ether_addr(hdr->addr1) || is_broadcast_ether_addr(hdr->addr2)) ? ieee->host_mc_decrypt : ieee->host_decrypt; if (can_be_decrypted) { if (skb->len >= hdrlen + 3) { /* Top two-bits of byte 3 are the key index */ keyidx = skb->data[hdrlen + 3] >> 6; } /* ieee->crypt[] is WEP_KEY (4) in length. Given that keyidx * is only allowed 2-bits of storage, no value of keyidx can * be provided via above code that would result in keyidx * being out of range */ crypt = ieee->crypt[keyidx]; #ifdef NOT_YET sta = NULL; /* Use station specific key to override default keys if the * receiver address is a unicast address ("individual RA"). If * bcrx_sta_key parameter is set, station specific key is used * even with broad/multicast targets (this is against IEEE * 802.11, but makes it easier to use different keys with * stations that do not support WEP key mapping). */ if (!(hdr->addr1[0] & 0x01) || local->bcrx_sta_key) (void)hostap_handle_sta_crypto(local, hdr, &crypt, &sta); #endif /* allow NULL decrypt to indicate an station specific override * for default encryption */ if (crypt && (crypt->ops == NULL || crypt->ops->decrypt_mpdu == NULL)) crypt = NULL; if (!crypt && (fc & IEEE80211_FCTL_PROTECTED)) { /* This seems to be triggered by some (multicast?) * frames from other than current BSS, so just drop the * frames silently instead of filling system log with * these reports. */ IEEE80211_DEBUG_DROP("Decryption failed (not set)" " (SA=" MAC_FMT ")\n", MAC_ARG(hdr->addr2)); ieee->ieee_stats.rx_discards_undecryptable++; goto rx_dropped; } } #ifdef NOT_YET if (type != WLAN_FC_TYPE_DATA) { if (type == WLAN_FC_TYPE_MGMT && stype == WLAN_FC_STYPE_AUTH && fc & IEEE80211_FCTL_PROTECTED && ieee->host_decrypt && (keyidx = hostap_rx_frame_decrypt(ieee, skb, crypt)) < 0) { printk(KERN_DEBUG "%s: failed to decrypt mgmt::auth " "from " MAC_FMT "\n", dev->name, MAC_ARG(hdr->addr2)); /* TODO: could inform hostapd about this so that it * could send auth failure report */ goto rx_dropped; } if (ieee80211_rx_frame_mgmt(ieee, skb, rx_stats, type, stype)) goto rx_dropped; else goto rx_exit; } #endif /* drop duplicate 802.11 retransmissions (IEEE 802.11 Chap. 9.29) */ if (sc == ieee->prev_seq_ctl) goto rx_dropped; else ieee->prev_seq_ctl = sc; /* Data frame - extract src/dst addresses */ if (skb->len < IEEE80211_3ADDR_LEN) goto rx_dropped; switch (fc & (IEEE80211_FCTL_FROMDS | IEEE80211_FCTL_TODS)) { case IEEE80211_FCTL_FROMDS: memcpy(dst, hdr->addr1, ETH_ALEN); memcpy(src, hdr->addr3, ETH_ALEN); break; case IEEE80211_FCTL_TODS: memcpy(dst, hdr->addr3, ETH_ALEN); memcpy(src, hdr->addr2, ETH_ALEN); break; case IEEE80211_FCTL_FROMDS | IEEE80211_FCTL_TODS: if (skb->len < IEEE80211_4ADDR_LEN) goto rx_dropped; memcpy(dst, hdr->addr3, ETH_ALEN); memcpy(src, hdr->addr4, ETH_ALEN); break; case 0: memcpy(dst, hdr->addr1, ETH_ALEN); memcpy(src, hdr->addr2, ETH_ALEN); break; } #ifdef NOT_YET if (hostap_rx_frame_wds(ieee, hdr, fc, &wds)) goto rx_dropped; if (wds) { skb->dev = dev = wds; stats = hostap_get_stats(dev); } if (ieee->iw_mode == IW_MODE_MASTER && !wds && (fc & (IEEE80211_FCTL_TODS | IEEE80211_FCTL_FROMDS)) == IEEE80211_FCTL_FROMDS && ieee->stadev && !compare_ether_addr(hdr->addr2, ieee->assoc_ap_addr)) { /* Frame from BSSID of the AP for which we are a client */ skb->dev = dev = ieee->stadev; stats = hostap_get_stats(dev); from_assoc_ap = 1; } #endif dev->last_rx = jiffies; #ifdef NOT_YET if ((ieee->iw_mode == IW_MODE_MASTER || ieee->iw_mode == IW_MODE_REPEAT) && !from_assoc_ap) { switch (hostap_handle_sta_rx(ieee, dev, skb, rx_stats, wds != NULL)) { case AP_RX_CONTINUE_NOT_AUTHORIZED: frame_authorized = 0; break; case AP_RX_CONTINUE: frame_authorized = 1; break; case AP_RX_DROP: goto rx_dropped; case AP_RX_EXIT: goto rx_exit; } } #endif /* Nullfunc frames may have PS-bit set, so they must be passed to * hostap_handle_sta_rx() before being dropped here. */ stype &= ~IEEE80211_STYPE_QOS_DATA; if (stype != IEEE80211_STYPE_DATA && stype != IEEE80211_STYPE_DATA_CFACK && stype != IEEE80211_STYPE_DATA_CFPOLL && stype != IEEE80211_STYPE_DATA_CFACKPOLL) { if (stype != IEEE80211_STYPE_NULLFUNC) IEEE80211_DEBUG_DROP("RX: dropped data frame " "with no data (type=0x%02x, " "subtype=0x%02x, len=%d)\n", type, stype, skb->len); goto rx_dropped; } /* skb: hdr + (possibly fragmented, possibly encrypted) payload */ if ((fc & IEEE80211_FCTL_PROTECTED) && can_be_decrypted && (keyidx = ieee80211_rx_frame_decrypt(ieee, skb, crypt)) < 0) goto rx_dropped; hdr = (struct ieee80211_hdr_4addr *)skb->data; /* skb: hdr + (possibly fragmented) plaintext payload */ // PR: FIXME: hostap has additional conditions in the "if" below: // ieee->host_decrypt && (fc & IEEE80211_FCTL_PROTECTED) && if ((frag != 0) || (fc & IEEE80211_FCTL_MOREFRAGS)) { int flen; struct sk_buff *frag_skb = ieee80211_frag_cache_get(ieee, hdr); IEEE80211_DEBUG_FRAG("Rx Fragment received (%u)\n", frag); if (!frag_skb) { IEEE80211_DEBUG(IEEE80211_DL_RX | IEEE80211_DL_FRAG, "Rx cannot get skb from fragment " "cache (morefrag=%d seq=%u frag=%u)\n", (fc & IEEE80211_FCTL_MOREFRAGS) != 0, WLAN_GET_SEQ_SEQ(sc), frag); goto rx_dropped; } flen = skb->len; if (frag != 0) flen -= hdrlen; if (frag_skb->tail + flen > frag_skb->end) { printk(KERN_WARNING "%s: host decrypted and " "reassembled frame did not fit skb\n", dev->name); ieee80211_frag_cache_invalidate(ieee, hdr); goto rx_dropped; } if (frag == 0) { /* copy first fragment (including full headers) into * beginning of the fragment cache skb */ skb_copy_from_linear_data(skb, skb_put(frag_skb, flen), flen); } else { /* append frame payload to the end of the fragment * cache skb */ skb_copy_from_linear_data_offset(skb, hdrlen, skb_put(frag_skb, flen), flen); } dev_kfree_skb_any(skb); skb = NULL; if (fc & IEEE80211_FCTL_MOREFRAGS) { /* more fragments expected - leave the skb in fragment * cache for now; it will be delivered to upper layers * after all fragments have been received */ goto rx_exit; } /* this was the last fragment and the frame will be * delivered, so remove skb from fragment cache */ skb = frag_skb; hdr = (struct ieee80211_hdr_4addr *)skb->data; ieee80211_frag_cache_invalidate(ieee, hdr); } /* skb: hdr + (possible reassembled) full MSDU payload; possibly still * encrypted/authenticated */ if ((fc & IEEE80211_FCTL_PROTECTED) && can_be_decrypted && ieee80211_rx_frame_decrypt_msdu(ieee, skb, keyidx, crypt)) goto rx_dropped; hdr = (struct ieee80211_hdr_4addr *)skb->data; if (crypt && !(fc & IEEE80211_FCTL_PROTECTED) && !ieee->open_wep) { if ( /*ieee->ieee802_1x && */ ieee80211_is_eapol_frame(ieee, skb)) { /* pass unencrypted EAPOL frames even if encryption is * configured */ } else { IEEE80211_DEBUG_DROP("encryption configured, but RX " "frame not encrypted (SA=" MAC_FMT ")\n", MAC_ARG(hdr->addr2)); goto rx_dropped; } } if (crypt && !(fc & IEEE80211_FCTL_PROTECTED) && !ieee->open_wep && !ieee80211_is_eapol_frame(ieee, skb)) { IEEE80211_DEBUG_DROP("dropped unencrypted RX data " "frame from " MAC_FMT " (drop_unencrypted=1)\n", MAC_ARG(hdr->addr2)); goto rx_dropped; } /* If the frame was decrypted in hardware, we may need to strip off * any security data (IV, ICV, etc) that was left behind */ if (!can_be_decrypted && (fc & IEEE80211_FCTL_PROTECTED) && ieee->host_strip_iv_icv) { int trimlen = 0; /* Top two-bits of byte 3 are the key index */ if (skb->len >= hdrlen + 3) keyidx = skb->data[hdrlen + 3] >> 6; /* To strip off any security data which appears before the * payload, we simply increase hdrlen (as the header gets * chopped off immediately below). For the security data which * appears after the payload, we use skb_trim. */ switch (ieee->sec.encode_alg[keyidx]) { case SEC_ALG_WEP: /* 4 byte IV */ hdrlen += 4; /* 4 byte ICV */ trimlen = 4; break; case SEC_ALG_TKIP: /* 4 byte IV, 4 byte ExtIV */ hdrlen += 8; /* 8 byte MIC, 4 byte ICV */ trimlen = 12; break; case SEC_ALG_CCMP: /* 8 byte CCMP header */ hdrlen += 8; /* 8 byte MIC */ trimlen = 8; break; } if (skb->len < trimlen) goto rx_dropped; __skb_trim(skb, skb->len - trimlen); if (skb->len < hdrlen) goto rx_dropped; } /* skb: hdr + (possible reassembled) full plaintext payload */ payload = skb->data + hdrlen; ethertype = (payload[6] << 8) | payload[7]; #ifdef NOT_YET /* If IEEE 802.1X is used, check whether the port is authorized to send * the received frame. */ if (ieee->ieee802_1x && ieee->iw_mode == IW_MODE_MASTER) { if (ethertype == ETH_P_PAE) { printk(KERN_DEBUG "%s: RX: IEEE 802.1X frame\n", dev->name); if (ieee->hostapd && ieee->apdev) { /* Send IEEE 802.1X frames to the user * space daemon for processing */ prism2_rx_80211(ieee->apdev, skb, rx_stats, PRISM2_RX_MGMT); ieee->apdevstats.rx_packets++; ieee->apdevstats.rx_bytes += skb->len; goto rx_exit; } } else if (!frame_authorized) { printk(KERN_DEBUG "%s: dropped frame from " "unauthorized port (IEEE 802.1X): " "ethertype=0x%04x\n", dev->name, ethertype); goto rx_dropped; } } #endif /* convert hdr + possible LLC headers into Ethernet header */ if (skb->len - hdrlen >= 8 && ((memcmp(payload, rfc1042_header, SNAP_SIZE) == 0 && ethertype != ETH_P_AARP && ethertype != ETH_P_IPX) || memcmp(payload, bridge_tunnel_header, SNAP_SIZE) == 0)) { /* remove RFC1042 or Bridge-Tunnel encapsulation and * replace EtherType */ skb_pull(skb, hdrlen + SNAP_SIZE); memcpy(skb_push(skb, ETH_ALEN), src, ETH_ALEN); memcpy(skb_push(skb, ETH_ALEN), dst, ETH_ALEN); } else { u16 len; /* Leave Ethernet header part of hdr and full payload */ skb_pull(skb, hdrlen); len = htons(skb->len); memcpy(skb_push(skb, 2), &len, 2); memcpy(skb_push(skb, ETH_ALEN), src, ETH_ALEN); memcpy(skb_push(skb, ETH_ALEN), dst, ETH_ALEN); } #ifdef NOT_YET if (wds && ((fc & (IEEE80211_FCTL_TODS | IEEE80211_FCTL_FROMDS)) == IEEE80211_FCTL_TODS) && skb->len >= ETH_HLEN + ETH_ALEN) { /* Non-standard frame: get addr4 from its bogus location after * the payload */ skb_copy_to_linear_data_offset(skb, ETH_ALEN, skb->data + skb->len - ETH_ALEN, ETH_ALEN); skb_trim(skb, skb->len - ETH_ALEN); } #endif stats->rx_packets++; stats->rx_bytes += skb->len; #ifdef NOT_YET if (ieee->iw_mode == IW_MODE_MASTER && !wds && ieee->ap->bridge_packets) { if (dst[0] & 0x01) { /* copy multicast frame both to the higher layers and * to the wireless media */ ieee->ap->bridged_multicast++; skb2 = skb_clone(skb, GFP_ATOMIC); if (skb2 == NULL) printk(KERN_DEBUG "%s: skb_clone failed for " "multicast frame\n", dev->name); } else if (hostap_is_sta_assoc(ieee->ap, dst)) { /* send frame directly to the associated STA using * wireless media and not passing to higher layers */ ieee->ap->bridged_unicast++; skb2 = skb; skb = NULL; } } if (skb2 != NULL) { /* send to wireless media */ skb2->dev = dev; skb2->protocol = __constant_htons(ETH_P_802_3); skb_reset_mac_header(skb2); skb_reset_network_header(skb2); /* skb2->network_header += ETH_HLEN; */ dev_queue_xmit(skb2); } #endif if (skb) { skb->protocol = eth_type_trans(skb, dev); memset(skb->cb, 0, sizeof(skb->cb)); skb->ip_summed = CHECKSUM_NONE; /* 802.11 crc not sufficient */ if (netif_rx(skb) == NET_RX_DROP) { /* netif_rx always succeeds, but it might drop * the packet. If it drops the packet, we log that * in our stats. */ IEEE80211_DEBUG_DROP ("RX: netif_rx dropped the packet\n"); stats->rx_dropped++; } } rx_exit: #ifdef NOT_YET if (sta) hostap_handle_sta_release(sta); #endif return 1; rx_dropped: stats->rx_dropped++; /* Returning 0 indicates to caller that we have not handled the SKB-- * so it is still allocated and can be used again by underlying * hardware as a DMA target */ return 0; }
int ieee80211_rx(struct ieee80211_device *ieee, struct sk_buff *skb, struct ieee80211_rx_stats *rx_stats) { struct net_device *dev = ieee->dev; struct ieee80211_hdr_4addr *hdr; size_t hdrlen; u16 fc, type, stype, sc; struct net_device_stats *stats; unsigned int frag; u8 *payload; u16 ethertype; #ifdef NOT_YET struct net_device *wds = NULL; struct sk_buff *skb2 = NULL; struct net_device *wds = NULL; int frame_authorized = 0; int from_assoc_ap = 0; void *sta = NULL; #endif u8 dst[ETH_ALEN]; u8 src[ETH_ALEN]; struct ieee80211_crypt_data *crypt = NULL; int keyidx = 0; int can_be_decrypted = 0; hdr = (struct ieee80211_hdr_4addr *)skb->data; stats = &ieee->stats; if (skb->len < 10) { printk(KERN_INFO "%s: SKB length < 10\n", dev->name); goto rx_dropped; } fc = le16_to_cpu(hdr->frame_ctl); type = WLAN_FC_GET_TYPE(fc); stype = WLAN_FC_GET_STYPE(fc); sc = le16_to_cpu(hdr->seq_ctl); frag = WLAN_GET_SEQ_FRAG(sc); hdrlen = ieee80211_get_hdrlen(fc); #ifdef CONFIG_WIRELESS_EXT #ifdef IW_WIRELESS_SPY if (ieee->spy_data.spy_number > 0) { struct iw_quality wstats; wstats.updated = 0; if (rx_stats->mask & IEEE80211_STATMASK_RSSI) { wstats.level = rx_stats->rssi; wstats.updated |= IW_QUAL_LEVEL_UPDATED; } else wstats.updated |= IW_QUAL_LEVEL_INVALID; if (rx_stats->mask & IEEE80211_STATMASK_NOISE) { wstats.noise = rx_stats->noise; wstats.updated |= IW_QUAL_NOISE_UPDATED; } else wstats.updated |= IW_QUAL_NOISE_INVALID; if (rx_stats->mask & IEEE80211_STATMASK_SIGNAL) { wstats.qual = rx_stats->signal; wstats.updated |= IW_QUAL_QUAL_UPDATED; } else wstats.updated |= IW_QUAL_QUAL_INVALID; wireless_spy_update(ieee->dev, hdr->addr2, &wstats); } #endif #endif #ifdef NOT_YET hostap_update_rx_stats(local->ap, hdr, rx_stats); #endif if (ieee->iw_mode == IW_MODE_MONITOR) { stats->rx_packets++; stats->rx_bytes += skb->len; ieee80211_monitor_rx(ieee, skb, rx_stats); return 1; } can_be_decrypted = (is_multicast_ether_addr(hdr->addr1) || is_broadcast_ether_addr(hdr->addr2)) ? ieee->host_mc_decrypt : ieee->host_decrypt; if (can_be_decrypted) { if (skb->len >= hdrlen + 3) { keyidx = skb->data[hdrlen + 3] >> 6; } crypt = ieee->crypt[keyidx]; #ifdef NOT_YET sta = NULL; if (!(hdr->addr1[0] & 0x01) || local->bcrx_sta_key) (void)hostap_handle_sta_crypto(local, hdr, &crypt, &sta); #endif if (crypt && (crypt->ops == NULL || crypt->ops->decrypt_mpdu == NULL)) crypt = NULL; if (!crypt && (fc & IEEE80211_FCTL_PROTECTED)) { IEEE80211_DEBUG_DROP("Decryption failed (not set)" " (SA=" MAC_FMT ")\n", MAC_ARG(hdr->addr2)); ieee->ieee_stats.rx_discards_undecryptable++; goto rx_dropped; } } #ifdef NOT_YET if (type != WLAN_FC_TYPE_DATA) { if (type == WLAN_FC_TYPE_MGMT && stype == WLAN_FC_STYPE_AUTH && fc & IEEE80211_FCTL_PROTECTED && ieee->host_decrypt && (keyidx = hostap_rx_frame_decrypt(ieee, skb, crypt)) < 0) { printk(KERN_DEBUG "%s: failed to decrypt mgmt::auth " "from " MAC_FMT "\n", dev->name, MAC_ARG(hdr->addr2)); goto rx_dropped; } if (ieee80211_rx_frame_mgmt(ieee, skb, rx_stats, type, stype)) goto rx_dropped; else goto rx_exit; } #endif if (sc == ieee->prev_seq_ctl) goto rx_dropped; else ieee->prev_seq_ctl = sc; if (skb->len < IEEE80211_3ADDR_LEN) goto rx_dropped; switch (fc & (IEEE80211_FCTL_FROMDS | IEEE80211_FCTL_TODS)) { case IEEE80211_FCTL_FROMDS: memcpy(dst, hdr->addr1, ETH_ALEN); memcpy(src, hdr->addr3, ETH_ALEN); break; case IEEE80211_FCTL_TODS: memcpy(dst, hdr->addr3, ETH_ALEN); memcpy(src, hdr->addr2, ETH_ALEN); break; case IEEE80211_FCTL_FROMDS | IEEE80211_FCTL_TODS: if (skb->len < IEEE80211_4ADDR_LEN) goto rx_dropped; memcpy(dst, hdr->addr3, ETH_ALEN); memcpy(src, hdr->addr4, ETH_ALEN); break; case 0: memcpy(dst, hdr->addr1, ETH_ALEN); memcpy(src, hdr->addr2, ETH_ALEN); break; } #ifdef NOT_YET if (hostap_rx_frame_wds(ieee, hdr, fc, &wds)) goto rx_dropped; if (wds) { skb->dev = dev = wds; stats = hostap_get_stats(dev); } if (ieee->iw_mode == IW_MODE_MASTER && !wds && (fc & (IEEE80211_FCTL_TODS | IEEE80211_FCTL_FROMDS)) == IEEE80211_FCTL_FROMDS && ieee->stadev && !compare_ether_addr(hdr->addr2, ieee->assoc_ap_addr)) { skb->dev = dev = ieee->stadev; stats = hostap_get_stats(dev); from_assoc_ap = 1; } #endif dev->last_rx = jiffies; #ifdef NOT_YET if ((ieee->iw_mode == IW_MODE_MASTER || ieee->iw_mode == IW_MODE_REPEAT) && !from_assoc_ap) { switch (hostap_handle_sta_rx(ieee, dev, skb, rx_stats, wds != NULL)) { case AP_RX_CONTINUE_NOT_AUTHORIZED: frame_authorized = 0; break; case AP_RX_CONTINUE: frame_authorized = 1; break; case AP_RX_DROP: goto rx_dropped; case AP_RX_EXIT: goto rx_exit; } } #endif stype &= ~IEEE80211_STYPE_QOS_DATA; if (stype != IEEE80211_STYPE_DATA && stype != IEEE80211_STYPE_DATA_CFACK && stype != IEEE80211_STYPE_DATA_CFPOLL && stype != IEEE80211_STYPE_DATA_CFACKPOLL) { if (stype != IEEE80211_STYPE_NULLFUNC) IEEE80211_DEBUG_DROP("RX: dropped data frame " "with no data (type=0x%02x, " "subtype=0x%02x, len=%d)\n", type, stype, skb->len); goto rx_dropped; } if ((fc & IEEE80211_FCTL_PROTECTED) && can_be_decrypted && (keyidx = ieee80211_rx_frame_decrypt(ieee, skb, crypt)) < 0) goto rx_dropped; hdr = (struct ieee80211_hdr_4addr *)skb->data; if ((frag != 0) || (fc & IEEE80211_FCTL_MOREFRAGS)) { int flen; struct sk_buff *frag_skb = ieee80211_frag_cache_get(ieee, hdr); IEEE80211_DEBUG_FRAG("Rx Fragment received (%u)\n", frag); if (!frag_skb) { IEEE80211_DEBUG(IEEE80211_DL_RX | IEEE80211_DL_FRAG, "Rx cannot get skb from fragment " "cache (morefrag=%d seq=%u frag=%u)\n", (fc & IEEE80211_FCTL_MOREFRAGS) != 0, WLAN_GET_SEQ_SEQ(sc), frag); goto rx_dropped; } flen = skb->len; if (frag != 0) flen -= hdrlen; if (frag_skb->tail + flen > frag_skb->end) { printk(KERN_WARNING "%s: host decrypted and " "reassembled frame did not fit skb\n", dev->name); ieee80211_frag_cache_invalidate(ieee, hdr); goto rx_dropped; } if (frag == 0) { skb_copy_from_linear_data(skb, skb_put(frag_skb, flen), flen); } else { skb_copy_from_linear_data_offset(skb, hdrlen, skb_put(frag_skb, flen), flen); } dev_kfree_skb_any(skb); skb = NULL; if (fc & IEEE80211_FCTL_MOREFRAGS) { goto rx_exit; } skb = frag_skb; hdr = (struct ieee80211_hdr_4addr *)skb->data; ieee80211_frag_cache_invalidate(ieee, hdr); } if ((fc & IEEE80211_FCTL_PROTECTED) && can_be_decrypted && ieee80211_rx_frame_decrypt_msdu(ieee, skb, keyidx, crypt)) goto rx_dropped; hdr = (struct ieee80211_hdr_4addr *)skb->data; if (crypt && !(fc & IEEE80211_FCTL_PROTECTED) && !ieee->open_wep) { if ( ieee80211_is_eapol_frame(ieee, skb)) { } else { IEEE80211_DEBUG_DROP("encryption configured, but RX " "frame not encrypted (SA=" MAC_FMT ")\n", MAC_ARG(hdr->addr2)); goto rx_dropped; } } if (crypt && !(fc & IEEE80211_FCTL_PROTECTED) && !ieee->open_wep && !ieee80211_is_eapol_frame(ieee, skb)) { IEEE80211_DEBUG_DROP("dropped unencrypted RX data " "frame from " MAC_FMT " (drop_unencrypted=1)\n", MAC_ARG(hdr->addr2)); goto rx_dropped; } if (!can_be_decrypted && (fc & IEEE80211_FCTL_PROTECTED) && ieee->host_strip_iv_icv) { int trimlen = 0; if (skb->len >= hdrlen + 3) keyidx = skb->data[hdrlen + 3] >> 6; switch (ieee->sec.encode_alg[keyidx]) { case SEC_ALG_WEP: hdrlen += 4; trimlen = 4; break; case SEC_ALG_TKIP: hdrlen += 8; trimlen = 12; break; case SEC_ALG_CCMP: hdrlen += 8; trimlen = 8; break; } if (skb->len < trimlen) goto rx_dropped; __skb_trim(skb, skb->len - trimlen); if (skb->len < hdrlen) goto rx_dropped; } payload = skb->data + hdrlen; ethertype = (payload[6] << 8) | payload[7]; #ifdef NOT_YET if (ieee->ieee802_1x && ieee->iw_mode == IW_MODE_MASTER) { if (ethertype == ETH_P_PAE) { printk(KERN_DEBUG "%s: RX: IEEE 802.1X frame\n", dev->name); if (ieee->hostapd && ieee->apdev) { prism2_rx_80211(ieee->apdev, skb, rx_stats, PRISM2_RX_MGMT); ieee->apdevstats.rx_packets++; ieee->apdevstats.rx_bytes += skb->len; goto rx_exit; } } else if (!frame_authorized) { printk(KERN_DEBUG "%s: dropped frame from " "unauthorized port (IEEE 802.1X): " "ethertype=0x%04x\n", dev->name, ethertype); goto rx_dropped; } } #endif if (skb->len - hdrlen >= 8 && ((memcmp(payload, rfc1042_header, SNAP_SIZE) == 0 && ethertype != ETH_P_AARP && ethertype != ETH_P_IPX) || memcmp(payload, bridge_tunnel_header, SNAP_SIZE) == 0)) { skb_pull(skb, hdrlen + SNAP_SIZE); memcpy(skb_push(skb, ETH_ALEN), src, ETH_ALEN); memcpy(skb_push(skb, ETH_ALEN), dst, ETH_ALEN); } else { u16 len; skb_pull(skb, hdrlen); len = htons(skb->len); memcpy(skb_push(skb, 2), &len, 2); memcpy(skb_push(skb, ETH_ALEN), src, ETH_ALEN); memcpy(skb_push(skb, ETH_ALEN), dst, ETH_ALEN); } #ifdef NOT_YET if (wds && ((fc & (IEEE80211_FCTL_TODS | IEEE80211_FCTL_FROMDS)) == IEEE80211_FCTL_TODS) && skb->len >= ETH_HLEN + ETH_ALEN) { skb_copy_to_linear_data_offset(skb, ETH_ALEN, skb->data + skb->len - ETH_ALEN, ETH_ALEN); skb_trim(skb, skb->len - ETH_ALEN); } #endif stats->rx_packets++; stats->rx_bytes += skb->len; #ifdef NOT_YET if (ieee->iw_mode == IW_MODE_MASTER && !wds && ieee->ap->bridge_packets) { if (dst[0] & 0x01) { ieee->ap->bridged_multicast++; skb2 = skb_clone(skb, GFP_ATOMIC); if (skb2 == NULL) printk(KERN_DEBUG "%s: skb_clone failed for " "multicast frame\n", dev->name); } else if (hostap_is_sta_assoc(ieee->ap, dst)) { ieee->ap->bridged_unicast++; skb2 = skb; skb = NULL; } } if (skb2 != NULL) { skb2->dev = dev; skb2->protocol = __constant_htons(ETH_P_802_3); skb_reset_mac_header(skb2); skb_reset_network_header(skb2); dev_queue_xmit(skb2); } #endif if (skb) { skb->protocol = eth_type_trans(skb, dev); memset(skb->cb, 0, sizeof(skb->cb)); skb->ip_summed = CHECKSUM_NONE; if (netif_rx(skb) == NET_RX_DROP) { IEEE80211_DEBUG_DROP ("RX: netif_rx dropped the packet\n"); stats->rx_dropped++; } } rx_exit: #ifdef NOT_YET if (sta) hostap_handle_sta_release(sta); #endif return 1; rx_dropped: stats->rx_dropped++; return 0; }
163
1
static int do_dccp_getsockopt(struct sock *sk, int level, int optname, char __user *optval, int __user *optlen) { struct dccp_sock *dp; int val, len; if (get_user(len, optlen)) return -EFAULT; if (len < sizeof(int)) return -EINVAL; dp = dccp_sk(sk); switch (optname) { case DCCP_SOCKOPT_PACKET_SIZE: DCCP_WARN("sockopt(PACKET_SIZE) is deprecated: fix your app\n"); return 0; case DCCP_SOCKOPT_SERVICE: return dccp_getsockopt_service(sk, len, (__be32 __user *)optval, optlen); case DCCP_SOCKOPT_SEND_CSCOV: val = dp->dccps_pcslen; break; case DCCP_SOCKOPT_RECV_CSCOV: val = dp->dccps_pcrlen; break; case 128 ... 191: return ccid_hc_rx_getsockopt(dp->dccps_hc_rx_ccid, sk, optname, len, (u32 __user *)optval, optlen); case 192 ... 255: return ccid_hc_tx_getsockopt(dp->dccps_hc_tx_ccid, sk, optname, len, (u32 __user *)optval, optlen); default: return -ENOPROTOOPT; } if (put_user(len, optlen) || copy_to_user(optval, &val, len)) return -EFAULT; return 0; }
static int do_dccp_getsockopt(struct sock *sk, int level, int optname, char __user *optval, int __user *optlen) { struct dccp_sock *dp; int val, len; if (get_user(len, optlen)) return -EFAULT; if (len < sizeof(int)) return -EINVAL; dp = dccp_sk(sk); switch (optname) { case DCCP_SOCKOPT_PACKET_SIZE: DCCP_WARN("sockopt(PACKET_SIZE) is deprecated: fix your app\n"); return 0; case DCCP_SOCKOPT_SERVICE: return dccp_getsockopt_service(sk, len, (__be32 __user *)optval, optlen); case DCCP_SOCKOPT_SEND_CSCOV: val = dp->dccps_pcslen; break; case DCCP_SOCKOPT_RECV_CSCOV: val = dp->dccps_pcrlen; break; case 128 ... 191: return ccid_hc_rx_getsockopt(dp->dccps_hc_rx_ccid, sk, optname, len, (u32 __user *)optval, optlen); case 192 ... 255: return ccid_hc_tx_getsockopt(dp->dccps_hc_tx_ccid, sk, optname, len, (u32 __user *)optval, optlen); default: return -ENOPROTOOPT; } if (put_user(len, optlen) || copy_to_user(optval, &val, len)) return -EFAULT; return 0; }
165
0
int qemuMonitorJSONSetMigrationSpeed ( qemuMonitorPtr mon , unsigned long bandwidth ) { int ret ; virJSONValuePtr cmd ; virJSONValuePtr reply = NULL ; cmd = qemuMonitorJSONMakeCommand ( "migrate_set_speed" , "U:value" , bandwidth * 1024ULL * 1024ULL , NULL ) ; if ( ! cmd ) return - 1 ; ret = qemuMonitorJSONCommand ( mon , cmd , & reply ) ; if ( ret == 0 ) ret = qemuMonitorJSONCheckError ( cmd , reply ) ; virJSONValueFree ( cmd ) ; virJSONValueFree ( reply ) ; return ret ; }
int qemuMonitorJSONSetMigrationSpeed ( qemuMonitorPtr mon , unsigned long bandwidth ) { int ret ; virJSONValuePtr cmd ; virJSONValuePtr reply = NULL ; cmd = qemuMonitorJSONMakeCommand ( "migrate_set_speed" , "U:value" , bandwidth * 1024ULL * 1024ULL , NULL ) ; if ( ! cmd ) return - 1 ; ret = qemuMonitorJSONCommand ( mon , cmd , & reply ) ; if ( ret == 0 ) ret = qemuMonitorJSONCheckError ( cmd , reply ) ; virJSONValueFree ( cmd ) ; virJSONValueFree ( reply ) ; return ret ; }
166
0
static int get_uint64_as_uint32(QEMUFile *f, void *pv, size_t size, VMStateField *field) { uint64_t *v = pv; *v = qemu_get_be32(f); return 0; }
static int get_uint64_as_uint32(QEMUFile *f, void *pv, size_t size, VMStateField *field) { uint64_t *v = pv; *v = qemu_get_be32(f); return 0; }
167
1
kg_unseal_iov(OM_uint32 *minor_status, gss_ctx_id_t context_handle, int *conf_state, gss_qop_t *qop_state, gss_iov_buffer_desc *iov, int iov_count, int toktype) { krb5_gss_ctx_id_rec *ctx; OM_uint32 code; ctx = (krb5_gss_ctx_id_rec *)context_handle; if (!ctx->established) { *minor_status = KG_CTX_INCOMPLETE; return GSS_S_NO_CONTEXT; } if (kg_locate_iov(iov, iov_count, GSS_IOV_BUFFER_TYPE_STREAM) != NULL) { code = kg_unseal_stream_iov(minor_status, ctx, conf_state, qop_state, iov, iov_count, toktype); } else { code = kg_unseal_iov_token(minor_status, ctx, conf_state, qop_state, iov, iov_count, toktype); } return code; }
kg_unseal_iov(OM_uint32 *minor_status, gss_ctx_id_t context_handle, int *conf_state, gss_qop_t *qop_state, gss_iov_buffer_desc *iov, int iov_count, int toktype) { krb5_gss_ctx_id_rec *ctx; OM_uint32 code; ctx = (krb5_gss_ctx_id_rec *)context_handle; if (!ctx->established) { *minor_status = KG_CTX_INCOMPLETE; return GSS_S_NO_CONTEXT; } if (kg_locate_iov(iov, iov_count, GSS_IOV_BUFFER_TYPE_STREAM) != NULL) { code = kg_unseal_stream_iov(minor_status, ctx, conf_state, qop_state, iov, iov_count, toktype); } else { code = kg_unseal_iov_token(minor_status, ctx, conf_state, qop_state, iov, iov_count, toktype); } return code; }
168
0
int x86_emulate_insn(struct x86_emulate_ctxt *ctxt) { const struct x86_emulate_ops *ops = ctxt->ops; int rc = X86EMUL_CONTINUE; int saved_dst_type = ctxt->dst.type; ctxt->mem_read.pos = 0; /* LOCK prefix is allowed only with some instructions */ if (ctxt->lock_prefix && (!(ctxt->d & Lock) || ctxt->dst.type != OP_MEM)) { rc = emulate_ud(ctxt); goto done; } if ((ctxt->d & SrcMask) == SrcMemFAddr && ctxt->src.type != OP_MEM) { rc = emulate_ud(ctxt); goto done; } if (unlikely(ctxt->d & (No64|Undefined|Sse|Mmx|Intercept|CheckPerm|Priv|Prot|String))) { if ((ctxt->mode == X86EMUL_MODE_PROT64 && (ctxt->d & No64)) || (ctxt->d & Undefined)) { rc = emulate_ud(ctxt); goto done; } if (((ctxt->d & (Sse|Mmx)) && ((ops->get_cr(ctxt, 0) & X86_CR0_EM))) || ((ctxt->d & Sse) && !(ops->get_cr(ctxt, 4) & X86_CR4_OSFXSR))) { rc = emulate_ud(ctxt); goto done; } if ((ctxt->d & (Sse|Mmx)) && (ops->get_cr(ctxt, 0) & X86_CR0_TS)) { rc = emulate_nm(ctxt); goto done; } if (ctxt->d & Mmx) { rc = flush_pending_x87_faults(ctxt); if (rc != X86EMUL_CONTINUE) goto done; /* * Now that we know the fpu is exception safe, we can fetch * operands from it. */ fetch_possible_mmx_operand(ctxt, &ctxt->src); fetch_possible_mmx_operand(ctxt, &ctxt->src2); if (!(ctxt->d & Mov)) fetch_possible_mmx_operand(ctxt, &ctxt->dst); } if (unlikely(ctxt->guest_mode) && (ctxt->d & Intercept)) { rc = emulator_check_intercept(ctxt, ctxt->intercept, X86_ICPT_PRE_EXCEPT); if (rc != X86EMUL_CONTINUE) goto done; } /* Privileged instruction can be executed only in CPL=0 */ if ((ctxt->d & Priv) && ops->cpl(ctxt)) { if (ctxt->d & PrivUD) rc = emulate_ud(ctxt); else rc = emulate_gp(ctxt, 0); goto done; } /* Instruction can only be executed in protected mode */ if ((ctxt->d & Prot) && ctxt->mode < X86EMUL_MODE_PROT16) { rc = emulate_ud(ctxt); goto done; } /* Do instruction specific permission checks */ if (ctxt->d & CheckPerm) { rc = ctxt->check_perm(ctxt); if (rc != X86EMUL_CONTINUE) goto done; } if (unlikely(ctxt->guest_mode) && (ctxt->d & Intercept)) { rc = emulator_check_intercept(ctxt, ctxt->intercept, X86_ICPT_POST_EXCEPT); if (rc != X86EMUL_CONTINUE) goto done; } if (ctxt->rep_prefix && (ctxt->d & String)) { /* All REP prefixes have the same first termination condition */ if (address_mask(ctxt, reg_read(ctxt, VCPU_REGS_RCX)) == 0) { ctxt->eip = ctxt->_eip; ctxt->eflags &= ~EFLG_RF; goto done; } } } if ((ctxt->src.type == OP_MEM) && !(ctxt->d & NoAccess)) { rc = segmented_read(ctxt, ctxt->src.addr.mem, ctxt->src.valptr, ctxt->src.bytes); if (rc != X86EMUL_CONTINUE) goto done; ctxt->src.orig_val64 = ctxt->src.val64; } if (ctxt->src2.type == OP_MEM) { rc = segmented_read(ctxt, ctxt->src2.addr.mem, &ctxt->src2.val, ctxt->src2.bytes); if (rc != X86EMUL_CONTINUE) goto done; } if ((ctxt->d & DstMask) == ImplicitOps) goto special_insn; if ((ctxt->dst.type == OP_MEM) && !(ctxt->d & Mov)) { /* optimisation - avoid slow emulated read if Mov */ rc = segmented_read(ctxt, ctxt->dst.addr.mem, &ctxt->dst.val, ctxt->dst.bytes); if (rc != X86EMUL_CONTINUE) goto done; } ctxt->dst.orig_val = ctxt->dst.val; special_insn: if (unlikely(ctxt->guest_mode) && (ctxt->d & Intercept)) { rc = emulator_check_intercept(ctxt, ctxt->intercept, X86_ICPT_POST_MEMACCESS); if (rc != X86EMUL_CONTINUE) goto done; } if (ctxt->rep_prefix && (ctxt->d & String)) ctxt->eflags |= EFLG_RF; else ctxt->eflags &= ~EFLG_RF; if (ctxt->execute) { if (ctxt->d & Fastop) { void (*fop)(struct fastop *) = (void *)ctxt->execute; rc = fastop(ctxt, fop); if (rc != X86EMUL_CONTINUE) goto done; goto writeback; } rc = ctxt->execute(ctxt); if (rc != X86EMUL_CONTINUE) goto done; goto writeback; } if (ctxt->opcode_len == 2) goto twobyte_insn; else if (ctxt->opcode_len == 3) goto threebyte_insn; switch (ctxt->b) { case 0x63: /* movsxd */ if (ctxt->mode != X86EMUL_MODE_PROT64) goto cannot_emulate; ctxt->dst.val = (s32) ctxt->src.val; break; case 0x70 ... 0x7f: /* jcc (short) */ if (test_cc(ctxt->b, ctxt->eflags)) rc = jmp_rel(ctxt, ctxt->src.val); break; case 0x8d: /* lea r16/r32, m */ ctxt->dst.val = ctxt->src.addr.mem.ea; break; case 0x90 ... 0x97: /* nop / xchg reg, rax */ if (ctxt->dst.addr.reg == reg_rmw(ctxt, VCPU_REGS_RAX)) ctxt->dst.type = OP_NONE; else rc = em_xchg(ctxt); break; case 0x98: /* cbw/cwde/cdqe */ switch (ctxt->op_bytes) { case 2: ctxt->dst.val = (s8)ctxt->dst.val; break; case 4: ctxt->dst.val = (s16)ctxt->dst.val; break; case 8: ctxt->dst.val = (s32)ctxt->dst.val; break; } break; case 0xcc: /* int3 */ rc = emulate_int(ctxt, 3); break; case 0xcd: /* int n */ rc = emulate_int(ctxt, ctxt->src.val); break; case 0xce: /* into */ if (ctxt->eflags & EFLG_OF) rc = emulate_int(ctxt, 4); break; case 0xe9: /* jmp rel */ case 0xeb: /* jmp rel short */ rc = jmp_rel(ctxt, ctxt->src.val); ctxt->dst.type = OP_NONE; /* Disable writeback. */ break; case 0xf4: /* hlt */ ctxt->ops->halt(ctxt); break; case 0xf5: /* cmc */ /* complement carry flag from eflags reg */ ctxt->eflags ^= EFLG_CF; break; case 0xf8: /* clc */ ctxt->eflags &= ~EFLG_CF; break; case 0xf9: /* stc */ ctxt->eflags |= EFLG_CF; break; case 0xfc: /* cld */ ctxt->eflags &= ~EFLG_DF; break; case 0xfd: /* std */ ctxt->eflags |= EFLG_DF; break; default: goto cannot_emulate; } if (rc != X86EMUL_CONTINUE) goto done; writeback: if (ctxt->d & SrcWrite) { BUG_ON(ctxt->src.type == OP_MEM || ctxt->src.type == OP_MEM_STR); rc = writeback(ctxt, &ctxt->src); if (rc != X86EMUL_CONTINUE) goto done; } if (!(ctxt->d & NoWrite)) { rc = writeback(ctxt, &ctxt->dst); if (rc != X86EMUL_CONTINUE) goto done; } /* * restore dst type in case the decoding will be reused * (happens for string instruction ) */ ctxt->dst.type = saved_dst_type; if ((ctxt->d & SrcMask) == SrcSI) string_addr_inc(ctxt, VCPU_REGS_RSI, &ctxt->src); if ((ctxt->d & DstMask) == DstDI) string_addr_inc(ctxt, VCPU_REGS_RDI, &ctxt->dst); if (ctxt->rep_prefix && (ctxt->d & String)) { unsigned int count; struct read_cache *r = &ctxt->io_read; if ((ctxt->d & SrcMask) == SrcSI) count = ctxt->src.count; else count = ctxt->dst.count; register_address_increment(ctxt, reg_rmw(ctxt, VCPU_REGS_RCX), -count); if (!string_insn_completed(ctxt)) { /* * Re-enter guest when pio read ahead buffer is empty * or, if it is not used, after each 1024 iteration. */ if ((r->end != 0 || reg_read(ctxt, VCPU_REGS_RCX) & 0x3ff) && (r->end == 0 || r->end != r->pos)) { /* * Reset read cache. Usually happens before * decode, but since instruction is restarted * we have to do it here. */ ctxt->mem_read.end = 0; writeback_registers(ctxt); return EMULATION_RESTART; } goto done; /* skip rip writeback */ } ctxt->eflags &= ~EFLG_RF; } ctxt->eip = ctxt->_eip; done: if (rc == X86EMUL_PROPAGATE_FAULT) { WARN_ON(ctxt->exception.vector > 0x1f); ctxt->have_exception = true; } if (rc == X86EMUL_INTERCEPTED) return EMULATION_INTERCEPTED; if (rc == X86EMUL_CONTINUE) writeback_registers(ctxt); return (rc == X86EMUL_UNHANDLEABLE) ? EMULATION_FAILED : EMULATION_OK; twobyte_insn: switch (ctxt->b) { case 0x09: /* wbinvd */ (ctxt->ops->wbinvd)(ctxt); break; case 0x08: /* invd */ case 0x0d: /* GrpP (prefetch) */ case 0x18: /* Grp16 (prefetch/nop) */ case 0x1f: /* nop */ break; case 0x20: /* mov cr, reg */ ctxt->dst.val = ops->get_cr(ctxt, ctxt->modrm_reg); break; case 0x21: /* mov from dr to reg */ ops->get_dr(ctxt, ctxt->modrm_reg, &ctxt->dst.val); break; case 0x40 ... 0x4f: /* cmov */ if (test_cc(ctxt->b, ctxt->eflags)) ctxt->dst.val = ctxt->src.val; else if (ctxt->mode != X86EMUL_MODE_PROT64 || ctxt->op_bytes != 4) ctxt->dst.type = OP_NONE; /* no writeback */ break; case 0x80 ... 0x8f: /* jnz rel, etc*/ if (test_cc(ctxt->b, ctxt->eflags)) rc = jmp_rel(ctxt, ctxt->src.val); break; case 0x90 ... 0x9f: /* setcc r/m8 */ ctxt->dst.val = test_cc(ctxt->b, ctxt->eflags); break; case 0xae: /* clflush */ break; case 0xb6 ... 0xb7: /* movzx */ ctxt->dst.bytes = ctxt->op_bytes; ctxt->dst.val = (ctxt->src.bytes == 1) ? (u8) ctxt->src.val : (u16) ctxt->src.val; break; case 0xbe ... 0xbf: /* movsx */ ctxt->dst.bytes = ctxt->op_bytes; ctxt->dst.val = (ctxt->src.bytes == 1) ? (s8) ctxt->src.val : (s16) ctxt->src.val; break; case 0xc3: /* movnti */ ctxt->dst.bytes = ctxt->op_bytes; ctxt->dst.val = (ctxt->op_bytes == 8) ? (u64) ctxt->src.val : (u32) ctxt->src.val; break; default: goto cannot_emulate; } threebyte_insn: if (rc != X86EMUL_CONTINUE) goto done; goto writeback; cannot_emulate: return EMULATION_FAILED; }
int x86_emulate_insn(struct x86_emulate_ctxt *ctxt) { const struct x86_emulate_ops *ops = ctxt->ops; int rc = X86EMUL_CONTINUE; int saved_dst_type = ctxt->dst.type; ctxt->mem_read.pos = 0; if (ctxt->lock_prefix && (!(ctxt->d & Lock) || ctxt->dst.type != OP_MEM)) { rc = emulate_ud(ctxt); goto done; } if ((ctxt->d & SrcMask) == SrcMemFAddr && ctxt->src.type != OP_MEM) { rc = emulate_ud(ctxt); goto done; } if (unlikely(ctxt->d & (No64|Undefined|Sse|Mmx|Intercept|CheckPerm|Priv|Prot|String))) { if ((ctxt->mode == X86EMUL_MODE_PROT64 && (ctxt->d & No64)) || (ctxt->d & Undefined)) { rc = emulate_ud(ctxt); goto done; } if (((ctxt->d & (Sse|Mmx)) && ((ops->get_cr(ctxt, 0) & X86_CR0_EM))) || ((ctxt->d & Sse) && !(ops->get_cr(ctxt, 4) & X86_CR4_OSFXSR))) { rc = emulate_ud(ctxt); goto done; } if ((ctxt->d & (Sse|Mmx)) && (ops->get_cr(ctxt, 0) & X86_CR0_TS)) { rc = emulate_nm(ctxt); goto done; } if (ctxt->d & Mmx) { rc = flush_pending_x87_faults(ctxt); if (rc != X86EMUL_CONTINUE) goto done; fetch_possible_mmx_operand(ctxt, &ctxt->src); fetch_possible_mmx_operand(ctxt, &ctxt->src2); if (!(ctxt->d & Mov)) fetch_possible_mmx_operand(ctxt, &ctxt->dst); } if (unlikely(ctxt->guest_mode) && (ctxt->d & Intercept)) { rc = emulator_check_intercept(ctxt, ctxt->intercept, X86_ICPT_PRE_EXCEPT); if (rc != X86EMUL_CONTINUE) goto done; } if ((ctxt->d & Priv) && ops->cpl(ctxt)) { if (ctxt->d & PrivUD) rc = emulate_ud(ctxt); else rc = emulate_gp(ctxt, 0); goto done; } if ((ctxt->d & Prot) && ctxt->mode < X86EMUL_MODE_PROT16) { rc = emulate_ud(ctxt); goto done; } if (ctxt->d & CheckPerm) { rc = ctxt->check_perm(ctxt); if (rc != X86EMUL_CONTINUE) goto done; } if (unlikely(ctxt->guest_mode) && (ctxt->d & Intercept)) { rc = emulator_check_intercept(ctxt, ctxt->intercept, X86_ICPT_POST_EXCEPT); if (rc != X86EMUL_CONTINUE) goto done; } if (ctxt->rep_prefix && (ctxt->d & String)) { if (address_mask(ctxt, reg_read(ctxt, VCPU_REGS_RCX)) == 0) { ctxt->eip = ctxt->_eip; ctxt->eflags &= ~EFLG_RF; goto done; } } } if ((ctxt->src.type == OP_MEM) && !(ctxt->d & NoAccess)) { rc = segmented_read(ctxt, ctxt->src.addr.mem, ctxt->src.valptr, ctxt->src.bytes); if (rc != X86EMUL_CONTINUE) goto done; ctxt->src.orig_val64 = ctxt->src.val64; } if (ctxt->src2.type == OP_MEM) { rc = segmented_read(ctxt, ctxt->src2.addr.mem, &ctxt->src2.val, ctxt->src2.bytes); if (rc != X86EMUL_CONTINUE) goto done; } if ((ctxt->d & DstMask) == ImplicitOps) goto special_insn; if ((ctxt->dst.type == OP_MEM) && !(ctxt->d & Mov)) { rc = segmented_read(ctxt, ctxt->dst.addr.mem, &ctxt->dst.val, ctxt->dst.bytes); if (rc != X86EMUL_CONTINUE) goto done; } ctxt->dst.orig_val = ctxt->dst.val; special_insn: if (unlikely(ctxt->guest_mode) && (ctxt->d & Intercept)) { rc = emulator_check_intercept(ctxt, ctxt->intercept, X86_ICPT_POST_MEMACCESS); if (rc != X86EMUL_CONTINUE) goto done; } if (ctxt->rep_prefix && (ctxt->d & String)) ctxt->eflags |= EFLG_RF; else ctxt->eflags &= ~EFLG_RF; if (ctxt->execute) { if (ctxt->d & Fastop) { void (*fop)(struct fastop *) = (void *)ctxt->execute; rc = fastop(ctxt, fop); if (rc != X86EMUL_CONTINUE) goto done; goto writeback; } rc = ctxt->execute(ctxt); if (rc != X86EMUL_CONTINUE) goto done; goto writeback; } if (ctxt->opcode_len == 2) goto twobyte_insn; else if (ctxt->opcode_len == 3) goto threebyte_insn; switch (ctxt->b) { case 0x63: if (ctxt->mode != X86EMUL_MODE_PROT64) goto cannot_emulate; ctxt->dst.val = (s32) ctxt->src.val; break; case 0x70 ... 0x7f: if (test_cc(ctxt->b, ctxt->eflags)) rc = jmp_rel(ctxt, ctxt->src.val); break; case 0x8d: ctxt->dst.val = ctxt->src.addr.mem.ea; break; case 0x90 ... 0x97: if (ctxt->dst.addr.reg == reg_rmw(ctxt, VCPU_REGS_RAX)) ctxt->dst.type = OP_NONE; else rc = em_xchg(ctxt); break; case 0x98: switch (ctxt->op_bytes) { case 2: ctxt->dst.val = (s8)ctxt->dst.val; break; case 4: ctxt->dst.val = (s16)ctxt->dst.val; break; case 8: ctxt->dst.val = (s32)ctxt->dst.val; break; } break; case 0xcc: rc = emulate_int(ctxt, 3); break; case 0xcd: rc = emulate_int(ctxt, ctxt->src.val); break; case 0xce: if (ctxt->eflags & EFLG_OF) rc = emulate_int(ctxt, 4); break; case 0xe9: case 0xeb: rc = jmp_rel(ctxt, ctxt->src.val); ctxt->dst.type = OP_NONE; break; case 0xf4: ctxt->ops->halt(ctxt); break; case 0xf5: ctxt->eflags ^= EFLG_CF; break; case 0xf8: ctxt->eflags &= ~EFLG_CF; break; case 0xf9: ctxt->eflags |= EFLG_CF; break; case 0xfc: ctxt->eflags &= ~EFLG_DF; break; case 0xfd: ctxt->eflags |= EFLG_DF; break; default: goto cannot_emulate; } if (rc != X86EMUL_CONTINUE) goto done; writeback: if (ctxt->d & SrcWrite) { BUG_ON(ctxt->src.type == OP_MEM || ctxt->src.type == OP_MEM_STR); rc = writeback(ctxt, &ctxt->src); if (rc != X86EMUL_CONTINUE) goto done; } if (!(ctxt->d & NoWrite)) { rc = writeback(ctxt, &ctxt->dst); if (rc != X86EMUL_CONTINUE) goto done; } ctxt->dst.type = saved_dst_type; if ((ctxt->d & SrcMask) == SrcSI) string_addr_inc(ctxt, VCPU_REGS_RSI, &ctxt->src); if ((ctxt->d & DstMask) == DstDI) string_addr_inc(ctxt, VCPU_REGS_RDI, &ctxt->dst); if (ctxt->rep_prefix && (ctxt->d & String)) { unsigned int count; struct read_cache *r = &ctxt->io_read; if ((ctxt->d & SrcMask) == SrcSI) count = ctxt->src.count; else count = ctxt->dst.count; register_address_increment(ctxt, reg_rmw(ctxt, VCPU_REGS_RCX), -count); if (!string_insn_completed(ctxt)) { if ((r->end != 0 || reg_read(ctxt, VCPU_REGS_RCX) & 0x3ff) && (r->end == 0 || r->end != r->pos)) { ctxt->mem_read.end = 0; writeback_registers(ctxt); return EMULATION_RESTART; } goto done; } ctxt->eflags &= ~EFLG_RF; } ctxt->eip = ctxt->_eip; done: if (rc == X86EMUL_PROPAGATE_FAULT) { WARN_ON(ctxt->exception.vector > 0x1f); ctxt->have_exception = true; } if (rc == X86EMUL_INTERCEPTED) return EMULATION_INTERCEPTED; if (rc == X86EMUL_CONTINUE) writeback_registers(ctxt); return (rc == X86EMUL_UNHANDLEABLE) ? EMULATION_FAILED : EMULATION_OK; twobyte_insn: switch (ctxt->b) { case 0x09: (ctxt->ops->wbinvd)(ctxt); break; case 0x08: case 0x0d: case 0x18: case 0x1f: break; case 0x20: ctxt->dst.val = ops->get_cr(ctxt, ctxt->modrm_reg); break; case 0x21: ops->get_dr(ctxt, ctxt->modrm_reg, &ctxt->dst.val); break; case 0x40 ... 0x4f: if (test_cc(ctxt->b, ctxt->eflags)) ctxt->dst.val = ctxt->src.val; else if (ctxt->mode != X86EMUL_MODE_PROT64 || ctxt->op_bytes != 4) ctxt->dst.type = OP_NONE; break; case 0x80 ... 0x8f: if (test_cc(ctxt->b, ctxt->eflags)) rc = jmp_rel(ctxt, ctxt->src.val); break; case 0x90 ... 0x9f: ctxt->dst.val = test_cc(ctxt->b, ctxt->eflags); break; case 0xae: break; case 0xb6 ... 0xb7: ctxt->dst.bytes = ctxt->op_bytes; ctxt->dst.val = (ctxt->src.bytes == 1) ? (u8) ctxt->src.val : (u16) ctxt->src.val; break; case 0xbe ... 0xbf: ctxt->dst.bytes = ctxt->op_bytes; ctxt->dst.val = (ctxt->src.bytes == 1) ? (s8) ctxt->src.val : (s16) ctxt->src.val; break; case 0xc3: ctxt->dst.bytes = ctxt->op_bytes; ctxt->dst.val = (ctxt->op_bytes == 8) ? (u64) ctxt->src.val : (u32) ctxt->src.val; break; default: goto cannot_emulate; } threebyte_insn: if (rc != X86EMUL_CONTINUE) goto done; goto writeback; cannot_emulate: return EMULATION_FAILED; }
169
0
void acpi_memory_plug_cb(HotplugHandler *hotplug_dev, MemHotplugState *mem_st, DeviceState *dev, Error **errp) { MemStatus *mdev; DeviceClass *dc = DEVICE_GET_CLASS(dev); if (!dc->hotpluggable) { return; } mdev = acpi_memory_slot_status(mem_st, dev, errp); if (!mdev) { return; } mdev->dimm = dev; mdev->is_enabled = true; if (dev->hotplugged) { mdev->is_inserting = true; acpi_send_event(DEVICE(hotplug_dev), ACPI_MEMORY_HOTPLUG_STATUS); } }
void acpi_memory_plug_cb(HotplugHandler *hotplug_dev, MemHotplugState *mem_st, DeviceState *dev, Error **errp) { MemStatus *mdev; DeviceClass *dc = DEVICE_GET_CLASS(dev); if (!dc->hotpluggable) { return; } mdev = acpi_memory_slot_status(mem_st, dev, errp); if (!mdev) { return; } mdev->dimm = dev; mdev->is_enabled = true; if (dev->hotplugged) { mdev->is_inserting = true; acpi_send_event(DEVICE(hotplug_dev), ACPI_MEMORY_HOTPLUG_STATUS); } }
170
0
static int dissect_h245_T_al3_sendBufferSize ( tvbuff_t * tvb _U_ , int offset _U_ , asn1_ctx_t * actx _U_ , proto_tree * tree _U_ , int hf_index _U_ ) { # line 332 "../../asn1/h245/h245.cnf" guint32 value ; offset = dissect_per_constrained_integer ( tvb , offset , actx , tree , hf_index , 0U , 16777215U , & value , FALSE ) ; if ( h223_lc_params_temp && h223_lc_params_temp -> al_params ) ( ( h223_al3_params * ) h223_lc_params_temp -> al_params ) -> send_buffer_size = value & 0xfffff ; return offset ; }
static int dissect_h245_T_al3_sendBufferSize ( tvbuff_t * tvb _U_ , int offset _U_ , asn1_ctx_t * actx _U_ , proto_tree * tree _U_ , int hf_index _U_ ) { # line 332 "../../asn1/h245/h245.cnf" guint32 value ; offset = dissect_per_constrained_integer ( tvb , offset , actx , tree , hf_index , 0U , 16777215U , & value , FALSE ) ; if ( h223_lc_params_temp && h223_lc_params_temp -> al_params ) ( ( h223_al3_params * ) h223_lc_params_temp -> al_params ) -> send_buffer_size = value & 0xfffff ; return offset ; }
171
1
static int jpc_qcx_getcompparms(jpc_qcxcp_t *compparms, jpc_cstate_t *cstate, jas_stream_t *in, uint_fast16_t len) { uint_fast8_t tmp; int n; int i; /* Eliminate compiler warning about unused variables. */ cstate = 0; n = 0; if (jpc_getuint8(in, &tmp)) { return -1; } ++n; compparms->qntsty = tmp & 0x1f; compparms->numguard = (tmp >> 5) & 7; switch (compparms->qntsty) { case JPC_QCX_SIQNT: compparms->numstepsizes = 1; break; case JPC_QCX_NOQNT: compparms->numstepsizes = (len - n); break; case JPC_QCX_SEQNT: /* XXX - this is a hack */ compparms->numstepsizes = (len - n) / 2; break; } if (compparms->numstepsizes > 0) { compparms->stepsizes = jas_alloc2(compparms->numstepsizes, sizeof(uint_fast16_t)); assert(compparms->stepsizes); for (i = 0; i < compparms->numstepsizes; ++i) { if (compparms->qntsty == JPC_QCX_NOQNT) { if (jpc_getuint8(in, &tmp)) { return -1; } compparms->stepsizes[i] = JPC_QCX_EXPN(tmp >> 3); } else { if (jpc_getuint16(in, &compparms->stepsizes[i])) { return -1; } } } } else { compparms->stepsizes = 0; } if (jas_stream_error(in) || jas_stream_eof(in)) { jpc_qcx_destroycompparms(compparms); return -1; } return 0; }
static int jpc_qcx_getcompparms(jpc_qcxcp_t *compparms, jpc_cstate_t *cstate, jas_stream_t *in, uint_fast16_t len) { uint_fast8_t tmp; int n; int i; cstate = 0; n = 0; if (jpc_getuint8(in, &tmp)) { return -1; } ++n; compparms->qntsty = tmp & 0x1f; compparms->numguard = (tmp >> 5) & 7; switch (compparms->qntsty) { case JPC_QCX_SIQNT: compparms->numstepsizes = 1; break; case JPC_QCX_NOQNT: compparms->numstepsizes = (len - n); break; case JPC_QCX_SEQNT: compparms->numstepsizes = (len - n) / 2; break; } if (compparms->numstepsizes > 0) { compparms->stepsizes = jas_alloc2(compparms->numstepsizes, sizeof(uint_fast16_t)); assert(compparms->stepsizes); for (i = 0; i < compparms->numstepsizes; ++i) { if (compparms->qntsty == JPC_QCX_NOQNT) { if (jpc_getuint8(in, &tmp)) { return -1; } compparms->stepsizes[i] = JPC_QCX_EXPN(tmp >> 3); } else { if (jpc_getuint16(in, &compparms->stepsizes[i])) { return -1; } } } } else { compparms->stepsizes = 0; } if (jas_stream_error(in) || jas_stream_eof(in)) { jpc_qcx_destroycompparms(compparms); return -1; } return 0; }
172
1
isdn_net_setcfg(isdn_net_ioctl_cfg * cfg) { isdn_net_dev *p = isdn_net_findif(cfg->name); ulong features; int i; int drvidx; int chidx; char drvid[25]; if (p) { isdn_net_local *lp = p->local; /* See if any registered driver supports the features we want */ features = ((1 << cfg->l2_proto) << ISDN_FEATURE_L2_SHIFT) | ((1 << cfg->l3_proto) << ISDN_FEATURE_L3_SHIFT); for (i = 0; i < ISDN_MAX_DRIVERS; i++) if (dev->drv[i]) if ((dev->drv[i]->interface->features & features) == features) break; if (i == ISDN_MAX_DRIVERS) { printk(KERN_WARNING "isdn_net: No driver with selected features\n"); return -ENODEV; } if (lp->p_encap != cfg->p_encap){ #ifdef CONFIG_ISDN_X25 struct concap_proto * cprot = p -> cprot; #endif if (isdn_net_device_started(p)) { printk(KERN_WARNING "%s: cannot change encap when if is up\n", p->dev->name); return -EBUSY; } #ifdef CONFIG_ISDN_X25 if( cprot && cprot -> pops ) cprot -> pops -> proto_del ( cprot ); p -> cprot = NULL; lp -> dops = NULL; /* ... , prepare for configuration of new one ... */ switch ( cfg -> p_encap ){ case ISDN_NET_ENCAP_X25IFACE: lp -> dops = &isdn_concap_reliable_dl_dops; } /* ... and allocate new one ... */ p -> cprot = isdn_concap_new( cfg -> p_encap ); /* p -> cprot == NULL now if p_encap is not supported by means of the concap_proto mechanism */ /* the protocol is not configured yet; this will happen later when isdn_net_reset() is called */ #endif } switch ( cfg->p_encap ) { case ISDN_NET_ENCAP_SYNCPPP: #ifndef CONFIG_ISDN_PPP printk(KERN_WARNING "%s: SyncPPP support not configured\n", p->dev->name); return -EINVAL; #else p->dev->type = ARPHRD_PPP; /* change ARP type */ p->dev->addr_len = 0; p->dev->do_ioctl = isdn_ppp_dev_ioctl; #endif break; case ISDN_NET_ENCAP_X25IFACE: #ifndef CONFIG_ISDN_X25 printk(KERN_WARNING "%s: isdn-x25 support not configured\n", p->dev->name); return -EINVAL; #else p->dev->type = ARPHRD_X25; /* change ARP type */ p->dev->addr_len = 0; #endif break; case ISDN_NET_ENCAP_CISCOHDLCK: p->dev->do_ioctl = isdn_ciscohdlck_dev_ioctl; break; default: if( cfg->p_encap >= 0 && cfg->p_encap <= ISDN_NET_ENCAP_MAX_ENCAP ) break; printk(KERN_WARNING "%s: encapsulation protocol %d not supported\n", p->dev->name, cfg->p_encap); return -EINVAL; } if (strlen(cfg->drvid)) { /* A bind has been requested ... */ char *c, *e; drvidx = -1; chidx = -1; strcpy(drvid, cfg->drvid); if ((c = strchr(drvid, ','))) { /* The channel-number is appended to the driver-Id with a comma */ chidx = (int) simple_strtoul(c + 1, &e, 10); if (e == c) chidx = -1; *c = '\0'; } for (i = 0; i < ISDN_MAX_DRIVERS; i++) /* Lookup driver-Id in array */ if (!(strcmp(dev->drvid[i], drvid))) { drvidx = i; break; } if ((drvidx == -1) || (chidx == -1)) /* Either driver-Id or channel-number invalid */ return -ENODEV; } else { /* Parameters are valid, so get them */ drvidx = lp->pre_device; chidx = lp->pre_channel; } if (cfg->exclusive > 0) { unsigned long flags; /* If binding is exclusive, try to grab the channel */ spin_lock_irqsave(&dev->lock, flags); if ((i = isdn_get_free_channel(ISDN_USAGE_NET, lp->l2_proto, lp->l3_proto, drvidx, chidx, lp->msn)) < 0) { /* Grab failed, because desired channel is in use */ lp->exclusive = -1; spin_unlock_irqrestore(&dev->lock, flags); return -EBUSY; } /* All went ok, so update isdninfo */ dev->usage[i] = ISDN_USAGE_EXCLUSIVE; isdn_info_update(); spin_unlock_irqrestore(&dev->lock, flags); lp->exclusive = i; } else { /* Non-exclusive binding or unbind. */ lp->exclusive = -1; if ((lp->pre_device != -1) && (cfg->exclusive == -1)) { isdn_unexclusive_channel(lp->pre_device, lp->pre_channel); isdn_free_channel(lp->pre_device, lp->pre_channel, ISDN_USAGE_NET); drvidx = -1; chidx = -1; } } strcpy(lp->msn, cfg->eaz); lp->pre_device = drvidx; lp->pre_channel = chidx; lp->onhtime = cfg->onhtime; lp->charge = cfg->charge; lp->l2_proto = cfg->l2_proto; lp->l3_proto = cfg->l3_proto; lp->cbdelay = cfg->cbdelay; lp->dialmax = cfg->dialmax; lp->triggercps = cfg->triggercps; lp->slavedelay = cfg->slavedelay * HZ; lp->pppbind = cfg->pppbind; lp->dialtimeout = cfg->dialtimeout >= 0 ? cfg->dialtimeout * HZ : -1; lp->dialwait = cfg->dialwait * HZ; if (cfg->secure) lp->flags |= ISDN_NET_SECURE; else lp->flags &= ~ISDN_NET_SECURE; if (cfg->cbhup) lp->flags |= ISDN_NET_CBHUP; else lp->flags &= ~ISDN_NET_CBHUP; switch (cfg->callback) { case 0: lp->flags &= ~(ISDN_NET_CALLBACK | ISDN_NET_CBOUT); break; case 1: lp->flags |= ISDN_NET_CALLBACK; lp->flags &= ~ISDN_NET_CBOUT; break; case 2: lp->flags |= ISDN_NET_CBOUT; lp->flags &= ~ISDN_NET_CALLBACK; break; } lp->flags &= ~ISDN_NET_DIALMODE_MASK; /* first all bits off */ if (cfg->dialmode && !(cfg->dialmode & ISDN_NET_DIALMODE_MASK)) { /* old isdnctrl version, where only 0 or 1 is given */ printk(KERN_WARNING "Old isdnctrl version detected! Please update.\n"); lp->flags |= ISDN_NET_DM_OFF; /* turn on `off' bit */ } else { lp->flags |= cfg->dialmode; /* turn on selected bits */ } if (cfg->chargehup) lp->hupflags |= ISDN_CHARGEHUP; else lp->hupflags &= ~ISDN_CHARGEHUP; if (cfg->ihup) lp->hupflags |= ISDN_INHUP; else lp->hupflags &= ~ISDN_INHUP; if (cfg->chargeint > 10) { lp->hupflags |= ISDN_CHARGEHUP | ISDN_HAVECHARGE | ISDN_MANCHARGE; lp->chargeint = cfg->chargeint * HZ; } if (cfg->p_encap != lp->p_encap) { if (cfg->p_encap == ISDN_NET_ENCAP_RAWIP) { p->dev->header_ops = NULL; p->dev->flags = IFF_NOARP|IFF_POINTOPOINT; } else { p->dev->header_ops = &isdn_header_ops; if (cfg->p_encap == ISDN_NET_ENCAP_ETHER) p->dev->flags = IFF_BROADCAST | IFF_MULTICAST; else p->dev->flags = IFF_NOARP|IFF_POINTOPOINT; } } lp->p_encap = cfg->p_encap; return 0; } return -ENODEV; }
isdn_net_setcfg(isdn_net_ioctl_cfg * cfg) { isdn_net_dev *p = isdn_net_findif(cfg->name); ulong features; int i; int drvidx; int chidx; char drvid[25]; if (p) { isdn_net_local *lp = p->local; features = ((1 << cfg->l2_proto) << ISDN_FEATURE_L2_SHIFT) | ((1 << cfg->l3_proto) << ISDN_FEATURE_L3_SHIFT); for (i = 0; i < ISDN_MAX_DRIVERS; i++) if (dev->drv[i]) if ((dev->drv[i]->interface->features & features) == features) break; if (i == ISDN_MAX_DRIVERS) { printk(KERN_WARNING "isdn_net: No driver with selected features\n"); return -ENODEV; } if (lp->p_encap != cfg->p_encap){ #ifdef CONFIG_ISDN_X25 struct concap_proto * cprot = p -> cprot; #endif if (isdn_net_device_started(p)) { printk(KERN_WARNING "%s: cannot change encap when if is up\n", p->dev->name); return -EBUSY; } #ifdef CONFIG_ISDN_X25 if( cprot && cprot -> pops ) cprot -> pops -> proto_del ( cprot ); p -> cprot = NULL; lp -> dops = NULL; switch ( cfg -> p_encap ){ case ISDN_NET_ENCAP_X25IFACE: lp -> dops = &isdn_concap_reliable_dl_dops; } p -> cprot = isdn_concap_new( cfg -> p_encap ); #endif } switch ( cfg->p_encap ) { case ISDN_NET_ENCAP_SYNCPPP: #ifndef CONFIG_ISDN_PPP printk(KERN_WARNING "%s: SyncPPP support not configured\n", p->dev->name); return -EINVAL; #else p->dev->type = ARPHRD_PPP; p->dev->addr_len = 0; p->dev->do_ioctl = isdn_ppp_dev_ioctl; #endif break; case ISDN_NET_ENCAP_X25IFACE: #ifndef CONFIG_ISDN_X25 printk(KERN_WARNING "%s: isdn-x25 support not configured\n", p->dev->name); return -EINVAL; #else p->dev->type = ARPHRD_X25; p->dev->addr_len = 0; #endif break; case ISDN_NET_ENCAP_CISCOHDLCK: p->dev->do_ioctl = isdn_ciscohdlck_dev_ioctl; break; default: if( cfg->p_encap >= 0 && cfg->p_encap <= ISDN_NET_ENCAP_MAX_ENCAP ) break; printk(KERN_WARNING "%s: encapsulation protocol %d not supported\n", p->dev->name, cfg->p_encap); return -EINVAL; } if (strlen(cfg->drvid)) { char *c, *e; drvidx = -1; chidx = -1; strcpy(drvid, cfg->drvid); if ((c = strchr(drvid, ','))) { chidx = (int) simple_strtoul(c + 1, &e, 10); if (e == c) chidx = -1; *c = '\0'; } for (i = 0; i < ISDN_MAX_DRIVERS; i++) if (!(strcmp(dev->drvid[i], drvid))) { drvidx = i; break; } if ((drvidx == -1) || (chidx == -1)) return -ENODEV; } else { drvidx = lp->pre_device; chidx = lp->pre_channel; } if (cfg->exclusive > 0) { unsigned long flags; spin_lock_irqsave(&dev->lock, flags); if ((i = isdn_get_free_channel(ISDN_USAGE_NET, lp->l2_proto, lp->l3_proto, drvidx, chidx, lp->msn)) < 0) { lp->exclusive = -1; spin_unlock_irqrestore(&dev->lock, flags); return -EBUSY; } dev->usage[i] = ISDN_USAGE_EXCLUSIVE; isdn_info_update(); spin_unlock_irqrestore(&dev->lock, flags); lp->exclusive = i; } else { lp->exclusive = -1; if ((lp->pre_device != -1) && (cfg->exclusive == -1)) { isdn_unexclusive_channel(lp->pre_device, lp->pre_channel); isdn_free_channel(lp->pre_device, lp->pre_channel, ISDN_USAGE_NET); drvidx = -1; chidx = -1; } } strcpy(lp->msn, cfg->eaz); lp->pre_device = drvidx; lp->pre_channel = chidx; lp->onhtime = cfg->onhtime; lp->charge = cfg->charge; lp->l2_proto = cfg->l2_proto; lp->l3_proto = cfg->l3_proto; lp->cbdelay = cfg->cbdelay; lp->dialmax = cfg->dialmax; lp->triggercps = cfg->triggercps; lp->slavedelay = cfg->slavedelay * HZ; lp->pppbind = cfg->pppbind; lp->dialtimeout = cfg->dialtimeout >= 0 ? cfg->dialtimeout * HZ : -1; lp->dialwait = cfg->dialwait * HZ; if (cfg->secure) lp->flags |= ISDN_NET_SECURE; else lp->flags &= ~ISDN_NET_SECURE; if (cfg->cbhup) lp->flags |= ISDN_NET_CBHUP; else lp->flags &= ~ISDN_NET_CBHUP; switch (cfg->callback) { case 0: lp->flags &= ~(ISDN_NET_CALLBACK | ISDN_NET_CBOUT); break; case 1: lp->flags |= ISDN_NET_CALLBACK; lp->flags &= ~ISDN_NET_CBOUT; break; case 2: lp->flags |= ISDN_NET_CBOUT; lp->flags &= ~ISDN_NET_CALLBACK; break; } lp->flags &= ~ISDN_NET_DIALMODE_MASK; if (cfg->dialmode && !(cfg->dialmode & ISDN_NET_DIALMODE_MASK)) { printk(KERN_WARNING "Old isdnctrl version detected! Please update.\n"); lp->flags |= ISDN_NET_DM_OFF; } else { lp->flags |= cfg->dialmode; } if (cfg->chargehup) lp->hupflags |= ISDN_CHARGEHUP; else lp->hupflags &= ~ISDN_CHARGEHUP; if (cfg->ihup) lp->hupflags |= ISDN_INHUP; else lp->hupflags &= ~ISDN_INHUP; if (cfg->chargeint > 10) { lp->hupflags |= ISDN_CHARGEHUP | ISDN_HAVECHARGE | ISDN_MANCHARGE; lp->chargeint = cfg->chargeint * HZ; } if (cfg->p_encap != lp->p_encap) { if (cfg->p_encap == ISDN_NET_ENCAP_RAWIP) { p->dev->header_ops = NULL; p->dev->flags = IFF_NOARP|IFF_POINTOPOINT; } else { p->dev->header_ops = &isdn_header_ops; if (cfg->p_encap == ISDN_NET_ENCAP_ETHER) p->dev->flags = IFF_BROADCAST | IFF_MULTICAST; else p->dev->flags = IFF_NOARP|IFF_POINTOPOINT; } } lp->p_encap = cfg->p_encap; return 0; } return -ENODEV; }
173
0
uint8_t *xen_map_cache(hwaddr phys_addr, hwaddr size, uint8_t lock) { MapCacheEntry *entry, *pentry = NULL; hwaddr address_index; hwaddr address_offset; hwaddr cache_size = size; hwaddr test_bit_size; bool translated = false; tryagain: address_index = phys_addr >> MCACHE_BUCKET_SHIFT; address_offset = phys_addr & (MCACHE_BUCKET_SIZE - 1); trace_xen_map_cache(phys_addr); /* test_bit_size is always a multiple of XC_PAGE_SIZE */ if (size) { test_bit_size = size + (phys_addr & (XC_PAGE_SIZE - 1)); if (test_bit_size % XC_PAGE_SIZE) { test_bit_size += XC_PAGE_SIZE - (test_bit_size % XC_PAGE_SIZE); } } else { test_bit_size = XC_PAGE_SIZE; } if (mapcache->last_entry != NULL && mapcache->last_entry->paddr_index == address_index && !lock && !size && test_bits(address_offset >> XC_PAGE_SHIFT, test_bit_size >> XC_PAGE_SHIFT, mapcache->last_entry->valid_mapping)) { trace_xen_map_cache_return(mapcache->last_entry->vaddr_base + address_offset); return mapcache->last_entry->vaddr_base + address_offset; } /* size is always a multiple of MCACHE_BUCKET_SIZE */ if (size) { cache_size = size + address_offset; if (cache_size % MCACHE_BUCKET_SIZE) { cache_size += MCACHE_BUCKET_SIZE - (cache_size % MCACHE_BUCKET_SIZE); } } else { cache_size = MCACHE_BUCKET_SIZE; } entry = &mapcache->entry[address_index % mapcache->nr_buckets]; while (entry && entry->lock && entry->vaddr_base && (entry->paddr_index != address_index || entry->size != cache_size || !test_bits(address_offset >> XC_PAGE_SHIFT, test_bit_size >> XC_PAGE_SHIFT, entry->valid_mapping))) { pentry = entry; entry = entry->next; } if (!entry) { entry = g_malloc0(sizeof (MapCacheEntry)); pentry->next = entry; xen_remap_bucket(entry, cache_size, address_index); } else if (!entry->lock) { if (!entry->vaddr_base || entry->paddr_index != address_index || entry->size != cache_size || !test_bits(address_offset >> XC_PAGE_SHIFT, test_bit_size >> XC_PAGE_SHIFT, entry->valid_mapping)) { xen_remap_bucket(entry, cache_size, address_index); } } if(!test_bits(address_offset >> XC_PAGE_SHIFT, test_bit_size >> XC_PAGE_SHIFT, entry->valid_mapping)) { mapcache->last_entry = NULL; if (!translated && mapcache->phys_offset_to_gaddr) { phys_addr = mapcache->phys_offset_to_gaddr(phys_addr, size, mapcache->opaque); translated = true; goto tryagain; } trace_xen_map_cache_return(NULL); return NULL; } mapcache->last_entry = entry; if (lock) { MapCacheRev *reventry = g_malloc0(sizeof(MapCacheRev)); entry->lock++; reventry->vaddr_req = mapcache->last_entry->vaddr_base + address_offset; reventry->paddr_index = mapcache->last_entry->paddr_index; reventry->size = entry->size; QTAILQ_INSERT_HEAD(&mapcache->locked_entries, reventry, next); } trace_xen_map_cache_return(mapcache->last_entry->vaddr_base + address_offset); return mapcache->last_entry->vaddr_base + address_offset; }
uint8_t *xen_map_cache(hwaddr phys_addr, hwaddr size, uint8_t lock) { MapCacheEntry *entry, *pentry = NULL; hwaddr address_index; hwaddr address_offset; hwaddr cache_size = size; hwaddr test_bit_size; bool translated = false; tryagain: address_index = phys_addr >> MCACHE_BUCKET_SHIFT; address_offset = phys_addr & (MCACHE_BUCKET_SIZE - 1); trace_xen_map_cache(phys_addr); if (size) { test_bit_size = size + (phys_addr & (XC_PAGE_SIZE - 1)); if (test_bit_size % XC_PAGE_SIZE) { test_bit_size += XC_PAGE_SIZE - (test_bit_size % XC_PAGE_SIZE); } } else { test_bit_size = XC_PAGE_SIZE; } if (mapcache->last_entry != NULL && mapcache->last_entry->paddr_index == address_index && !lock && !size && test_bits(address_offset >> XC_PAGE_SHIFT, test_bit_size >> XC_PAGE_SHIFT, mapcache->last_entry->valid_mapping)) { trace_xen_map_cache_return(mapcache->last_entry->vaddr_base + address_offset); return mapcache->last_entry->vaddr_base + address_offset; } if (size) { cache_size = size + address_offset; if (cache_size % MCACHE_BUCKET_SIZE) { cache_size += MCACHE_BUCKET_SIZE - (cache_size % MCACHE_BUCKET_SIZE); } } else { cache_size = MCACHE_BUCKET_SIZE; } entry = &mapcache->entry[address_index % mapcache->nr_buckets]; while (entry && entry->lock && entry->vaddr_base && (entry->paddr_index != address_index || entry->size != cache_size || !test_bits(address_offset >> XC_PAGE_SHIFT, test_bit_size >> XC_PAGE_SHIFT, entry->valid_mapping))) { pentry = entry; entry = entry->next; } if (!entry) { entry = g_malloc0(sizeof (MapCacheEntry)); pentry->next = entry; xen_remap_bucket(entry, cache_size, address_index); } else if (!entry->lock) { if (!entry->vaddr_base || entry->paddr_index != address_index || entry->size != cache_size || !test_bits(address_offset >> XC_PAGE_SHIFT, test_bit_size >> XC_PAGE_SHIFT, entry->valid_mapping)) { xen_remap_bucket(entry, cache_size, address_index); } } if(!test_bits(address_offset >> XC_PAGE_SHIFT, test_bit_size >> XC_PAGE_SHIFT, entry->valid_mapping)) { mapcache->last_entry = NULL; if (!translated && mapcache->phys_offset_to_gaddr) { phys_addr = mapcache->phys_offset_to_gaddr(phys_addr, size, mapcache->opaque); translated = true; goto tryagain; } trace_xen_map_cache_return(NULL); return NULL; } mapcache->last_entry = entry; if (lock) { MapCacheRev *reventry = g_malloc0(sizeof(MapCacheRev)); entry->lock++; reventry->vaddr_req = mapcache->last_entry->vaddr_base + address_offset; reventry->paddr_index = mapcache->last_entry->paddr_index; reventry->size = entry->size; QTAILQ_INSERT_HEAD(&mapcache->locked_entries, reventry, next); } trace_xen_map_cache_return(mapcache->last_entry->vaddr_base + address_offset); return mapcache->last_entry->vaddr_base + address_offset; }
174
0
static ossl_inline LHASH_OF ( type ) * lh_ ## type ## _new ( unsigned long ( * hfn ) ( const type * ) , int ( * cfn ) ( const type * , const type * ) ) { return ( LHASH_OF ( type ) * ) OPENSSL_LH_new ( ( OPENSSL_LH_HASHFUNC ) hfn , ( OPENSSL_LH_COMPFUNC ) cfn ) ; } static ossl_inline void lh_ ## type ## _free ( LHASH_OF ( type ) * lh ) { OPENSSL_LH_free ( ( OPENSSL_LHASH * ) lh ) ; } static ossl_inline type * lh_ ## type ## _insert ( LHASH_OF ( type ) * lh , type * d ) { return ( type * ) OPENSSL_LH_insert ( ( OPENSSL_LHASH * ) lh , d ) ; } static ossl_inline type * lh_ ## type ## _delete ( LHASH_OF ( type ) * lh , const type * d ) { return ( type * ) OPENSSL_LH_delete ( ( OPENSSL_LHASH * ) lh , d ) ; } static ossl_inline type * lh_ ## type ## _retrieve ( LHASH_OF ( type ) * lh , const type * d ) { return ( type * ) OPENSSL_LH_retrieve ( ( OPENSSL_LHASH * ) lh , d ) ; } static ossl_inline int lh_ ## type ## _error ( LHASH_OF ( type ) * lh ) { return OPENSSL_LH_error ( ( OPENSSL_LHASH * ) lh ) ; } static ossl_inline unsigned long lh_ ## type ## _num_items ( LHASH_OF ( type ) * lh ) { return OPENSSL_LH_num_items ( ( OPENSSL_LHASH * ) lh ) ; } static ossl_inline void lh_ ## type ## _node_stats_bio ( const LHASH_OF ( type ) * lh , BIO * out ) { OPENSSL_LH_node_stats_bio ( ( const OPENSSL_LHASH * ) lh , out ) ; } static ossl_inline void lh_ ## type ## _node_usage_stats_bio ( const LHASH_OF ( type ) * lh , BIO * out ) { OPENSSL_LH_node_usage_stats_bio ( ( const OPENSSL_LHASH * ) lh , out ) ; } static ossl_inline void lh_ ## type ## _stats_bio ( const LHASH_OF ( type ) * lh , BIO * out ) { OPENSSL_LH_stats_bio ( ( const OPENSSL_LHASH * ) lh , out ) ; } static ossl_inline unsigned long lh_ ## type ## _get_down_load ( LHASH_OF ( type ) * lh ) { return OPENSSL_LH_get_down_load ( ( OPENSSL_LHASH * ) lh ) ; } static ossl_inline void lh_ ## type ## _set_down_load ( LHASH_OF ( type ) * lh , unsigned long dl ) { OPENSSL_LH_set_down_load ( ( OPENSSL_LHASH * ) lh , dl ) ; } static ossl_inline void lh_ ## type ## _doall ( LHASH_OF ( type ) * lh , void ( * doall ) ( type * ) ) { OPENSSL_LH_doall ( ( OPENSSL_LHASH * ) lh , ( OPENSSL_LH_DOALL_FUNC ) doall ) ; } LHASH_OF ( type ) # define IMPLEMENT_LHASH_DOALL_ARG_CONST ( type , argtype ) int_implement_lhash_doall ( type , argtype , const type ) # define IMPLEMENT_LHASH_DOALL_ARG ( type , argtype ) int_implement_lhash_doall ( type , argtype , type ) # define int_implement_lhash_doall ( type , argtype , cbargtype ) static ossl_inline void lh_ ## type ## _doall_ ## argtype ( LHASH_OF ( type ) * lh , void ( * fn ) ( cbargtype * , argtype * ) , argtype * arg ) { OPENSSL_LH_doall_arg ( ( OPENSSL_LHASH * ) lh , ( OPENSSL_LH_DOALL_FUNCARG ) fn , ( void * ) arg ) ; } LHASH_OF ( type ) DEFINE_LHASH_OF ( OPENSSL_STRING ) ; # ifdef _MSC_VER # pragma warning ( push ) # pragma warning ( disable : 4090 ) # endif DEFINE_LHASH_OF ( OPENSSL_CSTRING )
static ossl_inline LHASH_OF ( type ) * lh_ ## type ## _new ( unsigned long ( * hfn ) ( const type * ) , int ( * cfn ) ( const type * , const type * ) ) { return ( LHASH_OF ( type ) * ) OPENSSL_LH_new ( ( OPENSSL_LH_HASHFUNC ) hfn , ( OPENSSL_LH_COMPFUNC ) cfn ) ; } static ossl_inline void lh_ ## type ## _free ( LHASH_OF ( type ) * lh ) { OPENSSL_LH_free ( ( OPENSSL_LHASH * ) lh ) ; } static ossl_inline type * lh_ ## type ## _insert ( LHASH_OF ( type ) * lh , type * d ) { return ( type * ) OPENSSL_LH_insert ( ( OPENSSL_LHASH * ) lh , d ) ; } static ossl_inline type * lh_ ## type ## _delete ( LHASH_OF ( type ) * lh , const type * d ) { return ( type * ) OPENSSL_LH_delete ( ( OPENSSL_LHASH * ) lh , d ) ; } static ossl_inline type * lh_ ## type ## _retrieve ( LHASH_OF ( type ) * lh , const type * d ) { return ( type * ) OPENSSL_LH_retrieve ( ( OPENSSL_LHASH * ) lh , d ) ; } static ossl_inline int lh_ ## type ## _error ( LHASH_OF ( type ) * lh ) { return OPENSSL_LH_error ( ( OPENSSL_LHASH * ) lh ) ; } static ossl_inline unsigned long lh_ ## type ## _num_items ( LHASH_OF ( type ) * lh ) { return OPENSSL_LH_num_items ( ( OPENSSL_LHASH * ) lh ) ; } static ossl_inline void lh_ ## type ## _node_stats_bio ( const LHASH_OF ( type ) * lh , BIO * out ) { OPENSSL_LH_node_stats_bio ( ( const OPENSSL_LHASH * ) lh , out ) ; } static ossl_inline void lh_ ## type ## _node_usage_stats_bio ( const LHASH_OF ( type ) * lh , BIO * out ) { OPENSSL_LH_node_usage_stats_bio ( ( const OPENSSL_LHASH * ) lh , out ) ; } static ossl_inline void lh_ ## type ## _stats_bio ( const LHASH_OF ( type ) * lh , BIO * out ) { OPENSSL_LH_stats_bio ( ( const OPENSSL_LHASH * ) lh , out ) ; } static ossl_inline unsigned long lh_ ## type ## _get_down_load ( LHASH_OF ( type ) * lh ) { return OPENSSL_LH_get_down_load ( ( OPENSSL_LHASH * ) lh ) ; } static ossl_inline void lh_ ## type ## _set_down_load ( LHASH_OF ( type ) * lh , unsigned long dl ) { OPENSSL_LH_set_down_load ( ( OPENSSL_LHASH * ) lh , dl ) ; } static ossl_inline void lh_ ## type ## _doall ( LHASH_OF ( type ) * lh , void ( * doall ) ( type * ) ) { OPENSSL_LH_doall ( ( OPENSSL_LHASH * ) lh , ( OPENSSL_LH_DOALL_FUNC ) doall ) ; } LHASH_OF ( type ) # define IMPLEMENT_LHASH_DOALL_ARG_CONST ( type , argtype ) int_implement_lhash_doall ( type , argtype , const type ) # define IMPLEMENT_LHASH_DOALL_ARG ( type , argtype ) int_implement_lhash_doall ( type , argtype , type ) # define int_implement_lhash_doall ( type , argtype , cbargtype ) static ossl_inline void lh_ ## type ## _doall_ ## argtype ( LHASH_OF ( type ) * lh , void ( * fn ) ( cbargtype * , argtype * ) , argtype * arg ) { OPENSSL_LH_doall_arg ( ( OPENSSL_LHASH * ) lh , ( OPENSSL_LH_DOALL_FUNCARG ) fn , ( void * ) arg ) ; } LHASH_OF ( type ) DEFINE_LHASH_OF ( OPENSSL_STRING ) ; # ifdef _MSC_VER # pragma warning ( push ) # pragma warning ( disable : 4090 ) # endif DEFINE_LHASH_OF ( OPENSSL_CSTRING )
175
1
isdn_net_addphone(isdn_net_ioctl_phone * phone) { isdn_net_dev *p = isdn_net_findif(phone->name); isdn_net_phone *n; if (p) { if (!(n = kmalloc(sizeof(isdn_net_phone), GFP_KERNEL))) return -ENOMEM; strcpy(n->num, phone->phone); n->next = p->local->phone[phone->outgoing & 1]; p->local->phone[phone->outgoing & 1] = n; return 0; } return -ENODEV; }
isdn_net_addphone(isdn_net_ioctl_phone * phone) { isdn_net_dev *p = isdn_net_findif(phone->name); isdn_net_phone *n; if (p) { if (!(n = kmalloc(sizeof(isdn_net_phone), GFP_KERNEL))) return -ENOMEM; strcpy(n->num, phone->phone); n->next = p->local->phone[phone->outgoing & 1]; p->local->phone[phone->outgoing & 1] = n; return 0; } return -ENODEV; }
177
1
static int __load_segment_descriptor(struct x86_emulate_ctxt *ctxt, u16 selector, int seg, u8 cpl, bool in_task_switch) { struct desc_struct seg_desc, old_desc; u8 dpl, rpl; unsigned err_vec = GP_VECTOR; u32 err_code = 0; bool null_selector = !(selector & ~0x3); /* 0000-0003 are null */ ulong desc_addr; int ret; u16 dummy; u32 base3 = 0; memset(&seg_desc, 0, sizeof seg_desc); if (ctxt->mode == X86EMUL_MODE_REAL) { /* set real mode segment descriptor (keep limit etc. for * unreal mode) */ ctxt->ops->get_segment(ctxt, &dummy, &seg_desc, NULL, seg); set_desc_base(&seg_desc, selector << 4); goto load; } else if (seg <= VCPU_SREG_GS && ctxt->mode == X86EMUL_MODE_VM86) { /* VM86 needs a clean new segment descriptor */ set_desc_base(&seg_desc, selector << 4); set_desc_limit(&seg_desc, 0xffff); seg_desc.type = 3; seg_desc.p = 1; seg_desc.s = 1; seg_desc.dpl = 3; goto load; } rpl = selector & 3; /* NULL selector is not valid for TR, CS and SS (except for long mode) */ if ((seg == VCPU_SREG_CS || (seg == VCPU_SREG_SS && (ctxt->mode != X86EMUL_MODE_PROT64 || rpl != cpl)) || seg == VCPU_SREG_TR) && null_selector) goto exception; /* TR should be in GDT only */ if (seg == VCPU_SREG_TR && (selector & (1 << 2))) goto exception; if (null_selector) /* for NULL selector skip all following checks */ goto load; ret = read_segment_descriptor(ctxt, selector, &seg_desc, &desc_addr); if (ret != X86EMUL_CONTINUE) return ret; err_code = selector & 0xfffc; err_vec = in_task_switch ? TS_VECTOR : GP_VECTOR; /* can't load system descriptor into segment selector */ if (seg <= VCPU_SREG_GS && !seg_desc.s) goto exception; if (!seg_desc.p) { err_vec = (seg == VCPU_SREG_SS) ? SS_VECTOR : NP_VECTOR; goto exception; } dpl = seg_desc.dpl; switch (seg) { case VCPU_SREG_SS: /* * segment is not a writable data segment or segment * selector's RPL != CPL or segment selector's RPL != CPL */ if (rpl != cpl || (seg_desc.type & 0xa) != 0x2 || dpl != cpl) goto exception; break; case VCPU_SREG_CS: if (!(seg_desc.type & 8)) goto exception; if (seg_desc.type & 4) { /* conforming */ if (dpl > cpl) goto exception; } else { /* nonconforming */ if (rpl > cpl || dpl != cpl) goto exception; } /* in long-mode d/b must be clear if l is set */ if (seg_desc.d && seg_desc.l) { u64 efer = 0; ctxt->ops->get_msr(ctxt, MSR_EFER, &efer); if (efer & EFER_LMA) goto exception; } /* CS(RPL) <- CPL */ selector = (selector & 0xfffc) | cpl; break; case VCPU_SREG_TR: if (seg_desc.s || (seg_desc.type != 1 && seg_desc.type != 9)) goto exception; old_desc = seg_desc; seg_desc.type |= 2; /* busy */ ret = ctxt->ops->cmpxchg_emulated(ctxt, desc_addr, &old_desc, &seg_desc, sizeof(seg_desc), &ctxt->exception); if (ret != X86EMUL_CONTINUE) return ret; break; case VCPU_SREG_LDTR: if (seg_desc.s || seg_desc.type != 2) goto exception; break; default: /* DS, ES, FS, or GS */ /* * segment is not a data or readable code segment or * ((segment is a data or nonconforming code segment) * and (both RPL and CPL > DPL)) */ if ((seg_desc.type & 0xa) == 0x8 || (((seg_desc.type & 0xc) != 0xc) && (rpl > dpl && cpl > dpl))) goto exception; break; } if (seg_desc.s) { /* mark segment as accessed */ seg_desc.type |= 1; ret = write_segment_descriptor(ctxt, selector, &seg_desc); if (ret != X86EMUL_CONTINUE) return ret; } else if (ctxt->mode == X86EMUL_MODE_PROT64) { ret = ctxt->ops->read_std(ctxt, desc_addr+8, &base3, sizeof(base3), &ctxt->exception); if (ret != X86EMUL_CONTINUE) return ret; } load: ctxt->ops->set_segment(ctxt, selector, &seg_desc, base3, seg); return X86EMUL_CONTINUE; exception: return emulate_exception(ctxt, err_vec, err_code, true); }
static int __load_segment_descriptor(struct x86_emulate_ctxt *ctxt, u16 selector, int seg, u8 cpl, bool in_task_switch) { struct desc_struct seg_desc, old_desc; u8 dpl, rpl; unsigned err_vec = GP_VECTOR; u32 err_code = 0; bool null_selector = !(selector & ~0x3); ulong desc_addr; int ret; u16 dummy; u32 base3 = 0; memset(&seg_desc, 0, sizeof seg_desc); if (ctxt->mode == X86EMUL_MODE_REAL) { ctxt->ops->get_segment(ctxt, &dummy, &seg_desc, NULL, seg); set_desc_base(&seg_desc, selector << 4); goto load; } else if (seg <= VCPU_SREG_GS && ctxt->mode == X86EMUL_MODE_VM86) { set_desc_base(&seg_desc, selector << 4); set_desc_limit(&seg_desc, 0xffff); seg_desc.type = 3; seg_desc.p = 1; seg_desc.s = 1; seg_desc.dpl = 3; goto load; } rpl = selector & 3; if ((seg == VCPU_SREG_CS || (seg == VCPU_SREG_SS && (ctxt->mode != X86EMUL_MODE_PROT64 || rpl != cpl)) || seg == VCPU_SREG_TR) && null_selector) goto exception; if (seg == VCPU_SREG_TR && (selector & (1 << 2))) goto exception; if (null_selector) goto load; ret = read_segment_descriptor(ctxt, selector, &seg_desc, &desc_addr); if (ret != X86EMUL_CONTINUE) return ret; err_code = selector & 0xfffc; err_vec = in_task_switch ? TS_VECTOR : GP_VECTOR; if (seg <= VCPU_SREG_GS && !seg_desc.s) goto exception; if (!seg_desc.p) { err_vec = (seg == VCPU_SREG_SS) ? SS_VECTOR : NP_VECTOR; goto exception; } dpl = seg_desc.dpl; switch (seg) { case VCPU_SREG_SS: if (rpl != cpl || (seg_desc.type & 0xa) != 0x2 || dpl != cpl) goto exception; break; case VCPU_SREG_CS: if (!(seg_desc.type & 8)) goto exception; if (seg_desc.type & 4) { if (dpl > cpl) goto exception; } else { if (rpl > cpl || dpl != cpl) goto exception; } if (seg_desc.d && seg_desc.l) { u64 efer = 0; ctxt->ops->get_msr(ctxt, MSR_EFER, &efer); if (efer & EFER_LMA) goto exception; } selector = (selector & 0xfffc) | cpl; break; case VCPU_SREG_TR: if (seg_desc.s || (seg_desc.type != 1 && seg_desc.type != 9)) goto exception; old_desc = seg_desc; seg_desc.type |= 2; ret = ctxt->ops->cmpxchg_emulated(ctxt, desc_addr, &old_desc, &seg_desc, sizeof(seg_desc), &ctxt->exception); if (ret != X86EMUL_CONTINUE) return ret; break; case VCPU_SREG_LDTR: if (seg_desc.s || seg_desc.type != 2) goto exception; break; default: if ((seg_desc.type & 0xa) == 0x8 || (((seg_desc.type & 0xc) != 0xc) && (rpl > dpl && cpl > dpl))) goto exception; break; } if (seg_desc.s) { seg_desc.type |= 1; ret = write_segment_descriptor(ctxt, selector, &seg_desc); if (ret != X86EMUL_CONTINUE) return ret; } else if (ctxt->mode == X86EMUL_MODE_PROT64) { ret = ctxt->ops->read_std(ctxt, desc_addr+8, &base3, sizeof(base3), &ctxt->exception); if (ret != X86EMUL_CONTINUE) return ret; } load: ctxt->ops->set_segment(ctxt, selector, &seg_desc, base3, seg); return X86EMUL_CONTINUE; exception: return emulate_exception(ctxt, err_vec, err_code, true); }
178
0
static void qtmd_update_model ( struct qtmd_model * model ) { struct qtmd_modelsym tmp ; int i , j ; if ( -- model -> shiftsleft ) { for ( i = model -> entries - 1 ; i >= 0 ; i -- ) { model -> syms [ i ] . cumfreq >>= 1 ; if ( model -> syms [ i ] . cumfreq <= model -> syms [ i + 1 ] . cumfreq ) { model -> syms [ i ] . cumfreq = model -> syms [ i + 1 ] . cumfreq + 1 ; } } } else { model -> shiftsleft = 50 ; for ( i = 0 ; i < model -> entries ; i ++ ) { model -> syms [ i ] . cumfreq -= model -> syms [ i + 1 ] . cumfreq ; model -> syms [ i ] . cumfreq ++ ; model -> syms [ i ] . cumfreq >>= 1 ; } for ( i = 0 ; i < model -> entries - 1 ; i ++ ) { for ( j = i + 1 ; j < model -> entries ; j ++ ) { if ( model -> syms [ i ] . cumfreq < model -> syms [ j ] . cumfreq ) { tmp = model -> syms [ i ] ; model -> syms [ i ] = model -> syms [ j ] ; model -> syms [ j ] = tmp ; } } } for ( i = model -> entries - 1 ; i >= 0 ; i -- ) { model -> syms [ i ] . cumfreq += model -> syms [ i + 1 ] . cumfreq ; } } }
static void qtmd_update_model ( struct qtmd_model * model ) { struct qtmd_modelsym tmp ; int i , j ; if ( -- model -> shiftsleft ) { for ( i = model -> entries - 1 ; i >= 0 ; i -- ) { model -> syms [ i ] . cumfreq >>= 1 ; if ( model -> syms [ i ] . cumfreq <= model -> syms [ i + 1 ] . cumfreq ) { model -> syms [ i ] . cumfreq = model -> syms [ i + 1 ] . cumfreq + 1 ; } } } else { model -> shiftsleft = 50 ; for ( i = 0 ; i < model -> entries ; i ++ ) { model -> syms [ i ] . cumfreq -= model -> syms [ i + 1 ] . cumfreq ; model -> syms [ i ] . cumfreq ++ ; model -> syms [ i ] . cumfreq >>= 1 ; } for ( i = 0 ; i < model -> entries - 1 ; i ++ ) { for ( j = i + 1 ; j < model -> entries ; j ++ ) { if ( model -> syms [ i ] . cumfreq < model -> syms [ j ] . cumfreq ) { tmp = model -> syms [ i ] ; model -> syms [ i ] = model -> syms [ j ] ; model -> syms [ j ] = tmp ; } } } for ( i = model -> entries - 1 ; i >= 0 ; i -- ) { model -> syms [ i ] . cumfreq += model -> syms [ i + 1 ] . cumfreq ; } } }
179
1
krb5_gss_process_context_token(minor_status, context_handle, token_buffer) OM_uint32 *minor_status; gss_ctx_id_t context_handle; gss_buffer_t token_buffer; { krb5_gss_ctx_id_rec *ctx; OM_uint32 majerr; ctx = (krb5_gss_ctx_id_t) context_handle; if (! ctx->established) { *minor_status = KG_CTX_INCOMPLETE; return(GSS_S_NO_CONTEXT); } /* "unseal" the token */ if (GSS_ERROR(majerr = kg_unseal(minor_status, context_handle, token_buffer, GSS_C_NO_BUFFER, NULL, NULL, KG_TOK_DEL_CTX))) return(majerr); /* that's it. delete the context */ return(krb5_gss_delete_sec_context(minor_status, &context_handle, GSS_C_NO_BUFFER)); }
krb5_gss_process_context_token(minor_status, context_handle, token_buffer) OM_uint32 *minor_status; gss_ctx_id_t context_handle; gss_buffer_t token_buffer; { krb5_gss_ctx_id_rec *ctx; OM_uint32 majerr; ctx = (krb5_gss_ctx_id_t) context_handle; if (! ctx->established) { *minor_status = KG_CTX_INCOMPLETE; return(GSS_S_NO_CONTEXT); } if (GSS_ERROR(majerr = kg_unseal(minor_status, context_handle, token_buffer, GSS_C_NO_BUFFER, NULL, NULL, KG_TOK_DEL_CTX))) return(majerr); return(krb5_gss_delete_sec_context(minor_status, &context_handle, GSS_C_NO_BUFFER)); }
181
0
static void get_symbols ( const UDateFormat * fmt , UDateFormatSymbolType type , UChar * array [ ] , int32_t arrayLength , int32_t lowestIndex , int32_t firstIndex , UErrorCode * status ) { int32_t count , i ; if ( U_FAILURE ( * status ) ) { return ; } count = udat_countSymbols ( fmt , type ) ; if ( count != arrayLength + lowestIndex ) { return ; } for ( i = 0 ; i < arrayLength ; i ++ ) { int32_t idx = ( i + firstIndex ) % arrayLength ; int32_t size = 1 + udat_getSymbols ( fmt , type , idx + lowestIndex , NULL , 0 , status ) ; array [ idx ] = ( UChar * ) malloc ( sizeof ( UChar ) * size ) ; * status = U_ZERO_ERROR ; udat_getSymbols ( fmt , type , idx + lowestIndex , array [ idx ] , size , status ) ; } }
static void get_symbols ( const UDateFormat * fmt , UDateFormatSymbolType type , UChar * array [ ] , int32_t arrayLength , int32_t lowestIndex , int32_t firstIndex , UErrorCode * status ) { int32_t count , i ; if ( U_FAILURE ( * status ) ) { return ; } count = udat_countSymbols ( fmt , type ) ; if ( count != arrayLength + lowestIndex ) { return ; } for ( i = 0 ; i < arrayLength ; i ++ ) { int32_t idx = ( i + firstIndex ) % arrayLength ; int32_t size = 1 + udat_getSymbols ( fmt , type , idx + lowestIndex , NULL , 0 , status ) ; array [ idx ] = ( UChar * ) malloc ( sizeof ( UChar ) * size ) ; * status = U_ZERO_ERROR ; udat_getSymbols ( fmt , type , idx + lowestIndex , array [ idx ] , size , status ) ; } }
182
1
static int em_call_far(struct x86_emulate_ctxt *ctxt) { u16 sel, old_cs; ulong old_eip; int rc; old_cs = get_segment_selector(ctxt, VCPU_SREG_CS); old_eip = ctxt->_eip; memcpy(&sel, ctxt->src.valptr + ctxt->op_bytes, 2); if (load_segment_descriptor(ctxt, sel, VCPU_SREG_CS)) return X86EMUL_CONTINUE; ctxt->_eip = 0; memcpy(&ctxt->_eip, ctxt->src.valptr, ctxt->op_bytes); ctxt->src.val = old_cs; rc = em_push(ctxt); if (rc != X86EMUL_CONTINUE) return rc; ctxt->src.val = old_eip; return em_push(ctxt); }
static int em_call_far(struct x86_emulate_ctxt *ctxt) { u16 sel, old_cs; ulong old_eip; int rc; old_cs = get_segment_selector(ctxt, VCPU_SREG_CS); old_eip = ctxt->_eip; memcpy(&sel, ctxt->src.valptr + ctxt->op_bytes, 2); if (load_segment_descriptor(ctxt, sel, VCPU_SREG_CS)) return X86EMUL_CONTINUE; ctxt->_eip = 0; memcpy(&ctxt->_eip, ctxt->src.valptr, ctxt->op_bytes); ctxt->src.val = old_cs; rc = em_push(ctxt); if (rc != X86EMUL_CONTINUE) return rc; ctxt->src.val = old_eip; return em_push(ctxt); }
184
1
static void print_mi_data ( VP9_COMMON * cm , FILE * file , const char * descriptor , size_t member_offset ) { int mi_row , mi_col ; int mi_index = 0 ; MODE_INFO * * mi = cm -> mi_grid_visible ; int rows = cm -> mi_rows ; int cols = cm -> mi_cols ; char prefix = descriptor [ 0 ] ; log_frame_info ( cm , descriptor , file ) ; mi_index = 0 ; for ( mi_row = 0 ; mi_row < rows ; mi_row ++ ) { fprintf ( file , "%c " , prefix ) ; for ( mi_col = 0 ; mi_col < cols ; mi_col ++ ) { fprintf ( file , "%2d " , * ( ( int * ) ( ( char * ) ( & mi [ mi_index ] -> mbmi ) + member_offset ) ) ) ; mi_index ++ ; } fprintf ( file , "\n" ) ; mi_index += 8 ; } fprintf ( file , "\n" ) ; }
static void print_mi_data ( VP9_COMMON * cm , FILE * file , const char * descriptor , size_t member_offset ) { int mi_row , mi_col ; int mi_index = 0 ; MODE_INFO * * mi = cm -> mi_grid_visible ; int rows = cm -> mi_rows ; int cols = cm -> mi_cols ; char prefix = descriptor [ 0 ] ; log_frame_info ( cm , descriptor , file ) ; mi_index = 0 ; for ( mi_row = 0 ; mi_row < rows ; mi_row ++ ) { fprintf ( file , "%c " , prefix ) ; for ( mi_col = 0 ; mi_col < cols ; mi_col ++ ) { fprintf ( file , "%2d " , * ( ( int * ) ( ( char * ) ( & mi [ mi_index ] -> mbmi ) + member_offset ) ) ) ; mi_index ++ ; } fprintf ( file , "\n" ) ; mi_index += 8 ; } fprintf ( file , "\n" ) ; }
185
1
static int esp6_input(struct xfrm_state *x, struct sk_buff *skb) { struct ip_esp_hdr *esph; struct esp_data *esp = x->data; struct crypto_aead *aead = esp->aead; struct aead_request *req; struct sk_buff *trailer; int elen = skb->len - sizeof(*esph) - crypto_aead_ivsize(aead); int nfrags; int ret = 0; void *tmp; u8 *iv; struct scatterlist *sg; struct scatterlist *asg; if (!pskb_may_pull(skb, sizeof(*esph))) { ret = -EINVAL; goto out; } if (elen <= 0) { ret = -EINVAL; goto out; } if ((nfrags = skb_cow_data(skb, 0, &trailer)) < 0) { ret = -EINVAL; goto out; } ret = -ENOMEM; tmp = esp_alloc_tmp(aead, nfrags + 1); if (!tmp) goto out; ESP_SKB_CB(skb)->tmp = tmp; iv = esp_tmp_iv(aead, tmp); req = esp_tmp_req(aead, iv); asg = esp_req_sg(aead, req); sg = asg + 1; skb->ip_summed = CHECKSUM_NONE; esph = (struct ip_esp_hdr *)skb->data; /* Get ivec. This can be wrong, check against another impls. */ iv = esph->enc_data; sg_init_table(sg, nfrags); skb_to_sgvec(skb, sg, sizeof(*esph) + crypto_aead_ivsize(aead), elen); sg_init_one(asg, esph, sizeof(*esph)); aead_request_set_callback(req, 0, esp_input_done, skb); aead_request_set_crypt(req, sg, sg, elen, iv); aead_request_set_assoc(req, asg, sizeof(*esph)); ret = crypto_aead_decrypt(req); if (ret == -EINPROGRESS) goto out; ret = esp_input_done2(skb, ret); out: return ret; }
static int esp6_input(struct xfrm_state *x, struct sk_buff *skb) { struct ip_esp_hdr *esph; struct esp_data *esp = x->data; struct crypto_aead *aead = esp->aead; struct aead_request *req; struct sk_buff *trailer; int elen = skb->len - sizeof(*esph) - crypto_aead_ivsize(aead); int nfrags; int ret = 0; void *tmp; u8 *iv; struct scatterlist *sg; struct scatterlist *asg; if (!pskb_may_pull(skb, sizeof(*esph))) { ret = -EINVAL; goto out; } if (elen <= 0) { ret = -EINVAL; goto out; } if ((nfrags = skb_cow_data(skb, 0, &trailer)) < 0) { ret = -EINVAL; goto out; } ret = -ENOMEM; tmp = esp_alloc_tmp(aead, nfrags + 1); if (!tmp) goto out; ESP_SKB_CB(skb)->tmp = tmp; iv = esp_tmp_iv(aead, tmp); req = esp_tmp_req(aead, iv); asg = esp_req_sg(aead, req); sg = asg + 1; skb->ip_summed = CHECKSUM_NONE; esph = (struct ip_esp_hdr *)skb->data; iv = esph->enc_data; sg_init_table(sg, nfrags); skb_to_sgvec(skb, sg, sizeof(*esph) + crypto_aead_ivsize(aead), elen); sg_init_one(asg, esph, sizeof(*esph)); aead_request_set_callback(req, 0, esp_input_done, skb); aead_request_set_crypt(req, sg, sg, elen, iv); aead_request_set_assoc(req, asg, sizeof(*esph)); ret = crypto_aead_decrypt(req); if (ret == -EINPROGRESS) goto out; ret = esp_input_done2(skb, ret); out: return ret; }
186
1
static int esp_input(struct xfrm_state *x, struct sk_buff *skb) { struct ip_esp_hdr *esph; struct esp_data *esp = x->data; struct crypto_aead *aead = esp->aead; struct aead_request *req; struct sk_buff *trailer; int elen = skb->len - sizeof(*esph) - crypto_aead_ivsize(aead); int nfrags; void *tmp; u8 *iv; struct scatterlist *sg; struct scatterlist *asg; int err = -EINVAL; if (!pskb_may_pull(skb, sizeof(*esph))) goto out; if (elen <= 0) goto out; if ((err = skb_cow_data(skb, 0, &trailer)) < 0) goto out; nfrags = err; err = -ENOMEM; tmp = esp_alloc_tmp(aead, nfrags + 1); if (!tmp) goto out; ESP_SKB_CB(skb)->tmp = tmp; iv = esp_tmp_iv(aead, tmp); req = esp_tmp_req(aead, iv); asg = esp_req_sg(aead, req); sg = asg + 1; skb->ip_summed = CHECKSUM_NONE; esph = (struct ip_esp_hdr *)skb->data; /* Get ivec. This can be wrong, check against another impls. */ iv = esph->enc_data; sg_init_table(sg, nfrags); skb_to_sgvec(skb, sg, sizeof(*esph) + crypto_aead_ivsize(aead), elen); sg_init_one(asg, esph, sizeof(*esph)); aead_request_set_callback(req, 0, esp_input_done, skb); aead_request_set_crypt(req, sg, sg, elen, iv); aead_request_set_assoc(req, asg, sizeof(*esph)); err = crypto_aead_decrypt(req); if (err == -EINPROGRESS) goto out; err = esp_input_done2(skb, err); out: return err; }
static int esp_input(struct xfrm_state *x, struct sk_buff *skb) { struct ip_esp_hdr *esph; struct esp_data *esp = x->data; struct crypto_aead *aead = esp->aead; struct aead_request *req; struct sk_buff *trailer; int elen = skb->len - sizeof(*esph) - crypto_aead_ivsize(aead); int nfrags; void *tmp; u8 *iv; struct scatterlist *sg; struct scatterlist *asg; int err = -EINVAL; if (!pskb_may_pull(skb, sizeof(*esph))) goto out; if (elen <= 0) goto out; if ((err = skb_cow_data(skb, 0, &trailer)) < 0) goto out; nfrags = err; err = -ENOMEM; tmp = esp_alloc_tmp(aead, nfrags + 1); if (!tmp) goto out; ESP_SKB_CB(skb)->tmp = tmp; iv = esp_tmp_iv(aead, tmp); req = esp_tmp_req(aead, iv); asg = esp_req_sg(aead, req); sg = asg + 1; skb->ip_summed = CHECKSUM_NONE; esph = (struct ip_esp_hdr *)skb->data; iv = esph->enc_data; sg_init_table(sg, nfrags); skb_to_sgvec(skb, sg, sizeof(*esph) + crypto_aead_ivsize(aead), elen); sg_init_one(asg, esph, sizeof(*esph)); aead_request_set_callback(req, 0, esp_input_done, skb); aead_request_set_crypt(req, sg, sg, elen, iv); aead_request_set_assoc(req, asg, sizeof(*esph)); err = crypto_aead_decrypt(req); if (err == -EINPROGRESS) goto out; err = esp_input_done2(skb, err); out: return err; }
188
1
krb5_gss_wrap_size_limit(minor_status, context_handle, conf_req_flag, qop_req, req_output_size, max_input_size) OM_uint32 *minor_status; gss_ctx_id_t context_handle; int conf_req_flag; gss_qop_t qop_req; OM_uint32 req_output_size; OM_uint32 *max_input_size; { krb5_gss_ctx_id_rec *ctx; OM_uint32 data_size, conflen; OM_uint32 ohlen; int overhead; /* only default qop is allowed */ if (qop_req != GSS_C_QOP_DEFAULT) { *minor_status = (OM_uint32) G_UNKNOWN_QOP; return(GSS_S_FAILURE); } ctx = (krb5_gss_ctx_id_rec *) context_handle; if (! ctx->established) { *minor_status = KG_CTX_INCOMPLETE; return(GSS_S_NO_CONTEXT); } if (ctx->proto == 1) { /* No pseudo-ASN.1 wrapper overhead, so no sequence length and OID. */ OM_uint32 sz = req_output_size; /* Token header: 16 octets. */ if (conf_req_flag) { krb5_key key; krb5_enctype enctype; key = ctx->have_acceptor_subkey ? ctx->acceptor_subkey : ctx->subkey; enctype = key->keyblock.enctype; while (sz > 0 && krb5_encrypt_size(sz, enctype) + 16 > req_output_size) sz--; /* Allow for encrypted copy of header. */ if (sz > 16) sz -= 16; else sz = 0; #ifdef CFX_EXERCISE /* Allow for EC padding. In the MIT implementation, only added while testing. */ if (sz > 65535) sz -= 65535; else sz = 0; #endif } else { krb5_cksumtype cksumtype; krb5_error_code err; size_t cksumsize; cksumtype = ctx->have_acceptor_subkey ? ctx->acceptor_subkey_cksumtype : ctx->cksumtype; err = krb5_c_checksum_length(ctx->k5_context, cksumtype, &cksumsize); if (err) { *minor_status = err; return GSS_S_FAILURE; } /* Allow for token header and checksum. */ if (sz < 16 + cksumsize) sz = 0; else sz -= (16 + cksumsize); } *max_input_size = sz; *minor_status = 0; return GSS_S_COMPLETE; } /* Calculate the token size and subtract that from the output size */ overhead = 7 + ctx->mech_used->length; data_size = req_output_size; conflen = kg_confounder_size(ctx->k5_context, ctx->enc->keyblock.enctype); data_size = (conflen + data_size + 8) & (~(OM_uint32)7); ohlen = g_token_size(ctx->mech_used, (unsigned int) (data_size + ctx->cksum_size + 14)) - req_output_size; if (ohlen+overhead < req_output_size) /* * Cannot have trailer length that will cause us to pad over our * length. */ *max_input_size = (req_output_size - ohlen - overhead) & (~(OM_uint32)7); else *max_input_size = 0; *minor_status = 0; return(GSS_S_COMPLETE); }
krb5_gss_wrap_size_limit(minor_status, context_handle, conf_req_flag, qop_req, req_output_size, max_input_size) OM_uint32 *minor_status; gss_ctx_id_t context_handle; int conf_req_flag; gss_qop_t qop_req; OM_uint32 req_output_size; OM_uint32 *max_input_size; { krb5_gss_ctx_id_rec *ctx; OM_uint32 data_size, conflen; OM_uint32 ohlen; int overhead; if (qop_req != GSS_C_QOP_DEFAULT) { *minor_status = (OM_uint32) G_UNKNOWN_QOP; return(GSS_S_FAILURE); } ctx = (krb5_gss_ctx_id_rec *) context_handle; if (! ctx->established) { *minor_status = KG_CTX_INCOMPLETE; return(GSS_S_NO_CONTEXT); } if (ctx->proto == 1) { OM_uint32 sz = req_output_size; if (conf_req_flag) { krb5_key key; krb5_enctype enctype; key = ctx->have_acceptor_subkey ? ctx->acceptor_subkey : ctx->subkey; enctype = key->keyblock.enctype; while (sz > 0 && krb5_encrypt_size(sz, enctype) + 16 > req_output_size) sz--; if (sz > 16) sz -= 16; else sz = 0; #ifdef CFX_EXERCISE if (sz > 65535) sz -= 65535; else sz = 0; #endif } else { krb5_cksumtype cksumtype; krb5_error_code err; size_t cksumsize; cksumtype = ctx->have_acceptor_subkey ? ctx->acceptor_subkey_cksumtype : ctx->cksumtype; err = krb5_c_checksum_length(ctx->k5_context, cksumtype, &cksumsize); if (err) { *minor_status = err; return GSS_S_FAILURE; } if (sz < 16 + cksumsize) sz = 0; else sz -= (16 + cksumsize); } *max_input_size = sz; *minor_status = 0; return GSS_S_COMPLETE; } overhead = 7 + ctx->mech_used->length; data_size = req_output_size; conflen = kg_confounder_size(ctx->k5_context, ctx->enc->keyblock.enctype); data_size = (conflen + data_size + 8) & (~(OM_uint32)7); ohlen = g_token_size(ctx->mech_used, (unsigned int) (data_size + ctx->cksum_size + 14)) - req_output_size; if (ohlen+overhead < req_output_size) *max_input_size = (req_output_size - ohlen - overhead) & (~(OM_uint32)7); else *max_input_size = 0; *minor_status = 0; return(GSS_S_COMPLETE); }
189
0
static uchar * check_get_key ( ACL_USER * buff , size_t * length , my_bool not_used __attribute__ ( ( unused ) ) ) { * length = buff -> hostname_length ; return ( uchar * ) buff -> host . hostname ; }
static uchar * check_get_key ( ACL_USER * buff , size_t * length , my_bool not_used __attribute__ ( ( unused ) ) ) { * length = buff -> hostname_length ; return ( uchar * ) buff -> host . hostname ; }
190
0
void aio_context_setup(AioContext *ctx) { }
void aio_context_setup(AioContext *ctx) { }
191
0
static uint64_t timer_read(void *opaque, target_phys_addr_t addr, unsigned size) { LM32TimerState *s = opaque; uint32_t r = 0; addr >>= 2; switch (addr) { case R_SR: case R_CR: case R_PERIOD: r = s->regs[addr]; break; case R_SNAPSHOT: r = (uint32_t)ptimer_get_count(s->ptimer); break; default: error_report("lm32_timer: read access to unknown register 0x" TARGET_FMT_plx, addr << 2); break; } trace_lm32_timer_memory_read(addr << 2, r); return r; }
static uint64_t timer_read(void *opaque, target_phys_addr_t addr, unsigned size) { LM32TimerState *s = opaque; uint32_t r = 0; addr >>= 2; switch (addr) { case R_SR: case R_CR: case R_PERIOD: r = s->regs[addr]; break; case R_SNAPSHOT: r = (uint32_t)ptimer_get_count(s->ptimer); break; default: error_report("lm32_timer: read access to unknown register 0x" TARGET_FMT_plx, addr << 2); break; } trace_lm32_timer_memory_read(addr << 2, r); return r; }
192
1
static int em_jmp_far(struct x86_emulate_ctxt *ctxt) { int rc; unsigned short sel; memcpy(&sel, ctxt->src.valptr + ctxt->op_bytes, 2); rc = load_segment_descriptor(ctxt, sel, VCPU_SREG_CS); if (rc != X86EMUL_CONTINUE) return rc; ctxt->_eip = 0; memcpy(&ctxt->_eip, ctxt->src.valptr, ctxt->op_bytes); return X86EMUL_CONTINUE; }
static int em_jmp_far(struct x86_emulate_ctxt *ctxt) { int rc; unsigned short sel; memcpy(&sel, ctxt->src.valptr + ctxt->op_bytes, 2); rc = load_segment_descriptor(ctxt, sel, VCPU_SREG_CS); if (rc != X86EMUL_CONTINUE) return rc; ctxt->_eip = 0; memcpy(&ctxt->_eip, ctxt->src.valptr, ctxt->op_bytes); return X86EMUL_CONTINUE; }
193
1
isdn_ioctl(struct inode *inode, struct file *file, uint cmd, ulong arg) { uint minor = iminor(inode); isdn_ctrl c; int drvidx; int chidx; int ret; int i; char __user *p; char *s; union iocpar { char name[10]; char bname[22]; isdn_ioctl_struct iocts; isdn_net_ioctl_phone phone; isdn_net_ioctl_cfg cfg; } iocpar; void __user *argp = (void __user *)arg; #define name iocpar.name #define bname iocpar.bname #define iocts iocpar.iocts #define phone iocpar.phone #define cfg iocpar.cfg if (minor == ISDN_MINOR_STATUS) { switch (cmd) { case IIOCGETDVR: return (TTY_DV + (NET_DV << 8) + (INF_DV << 16)); case IIOCGETCPS: if (arg) { ulong __user *p = argp; int i; if (!access_ok(VERIFY_WRITE, p, sizeof(ulong) * ISDN_MAX_CHANNELS * 2)) return -EFAULT; for (i = 0; i < ISDN_MAX_CHANNELS; i++) { put_user(dev->ibytes[i], p++); put_user(dev->obytes[i], p++); } return 0; } else return -EINVAL; break; #ifdef CONFIG_NETDEVICES case IIOCNETGPN: /* Get peer phone number of a connected * isdn network interface */ if (arg) { if (copy_from_user(&phone, argp, sizeof(phone))) return -EFAULT; return isdn_net_getpeer(&phone, argp); } else return -EINVAL; #endif default: return -EINVAL; } } if (!dev->drivers) return -ENODEV; if (minor <= ISDN_MINOR_BMAX) { drvidx = isdn_minor2drv(minor); if (drvidx < 0) return -ENODEV; chidx = isdn_minor2chan(minor); if (!(dev->drv[drvidx]->flags & DRV_FLAG_RUNNING)) return -ENODEV; return 0; } if (minor <= ISDN_MINOR_CTRLMAX) { /* * isdn net devices manage lots of configuration variables as linked lists. * Those lists must only be manipulated from user space. Some of the ioctl's * service routines access user space and are not atomic. Therefor, ioctl's * manipulating the lists and ioctl's sleeping while accessing the lists * are serialized by means of a semaphore. */ switch (cmd) { case IIOCNETDWRSET: printk(KERN_INFO "INFO: ISDN_DW_ABC_EXTENSION not enabled\n"); return(-EINVAL); case IIOCNETLCR: printk(KERN_INFO "INFO: ISDN_ABC_LCR_SUPPORT not enabled\n"); return -ENODEV; #ifdef CONFIG_NETDEVICES case IIOCNETAIF: /* Add a network-interface */ if (arg) { if (copy_from_user(name, argp, sizeof(name))) return -EFAULT; s = name; } else { s = NULL; } ret = mutex_lock_interruptible(&dev->mtx); if( ret ) return ret; if ((s = isdn_net_new(s, NULL))) { if (copy_to_user(argp, s, strlen(s) + 1)){ ret = -EFAULT; } else { ret = 0; } } else ret = -ENODEV; mutex_unlock(&dev->mtx); return ret; case IIOCNETASL: /* Add a slave to a network-interface */ if (arg) { if (copy_from_user(bname, argp, sizeof(bname) - 1)) return -EFAULT; } else return -EINVAL; ret = mutex_lock_interruptible(&dev->mtx); if( ret ) return ret; if ((s = isdn_net_newslave(bname))) { if (copy_to_user(argp, s, strlen(s) + 1)){ ret = -EFAULT; } else { ret = 0; } } else ret = -ENODEV; mutex_unlock(&dev->mtx); return ret; case IIOCNETDIF: /* Delete a network-interface */ if (arg) { if (copy_from_user(name, argp, sizeof(name))) return -EFAULT; ret = mutex_lock_interruptible(&dev->mtx); if( ret ) return ret; ret = isdn_net_rm(name); mutex_unlock(&dev->mtx); return ret; } else return -EINVAL; case IIOCNETSCF: /* Set configurable parameters of a network-interface */ if (arg) { if (copy_from_user(&cfg, argp, sizeof(cfg))) return -EFAULT; return isdn_net_setcfg(&cfg); } else return -EINVAL; case IIOCNETGCF: /* Get configurable parameters of a network-interface */ if (arg) { if (copy_from_user(&cfg, argp, sizeof(cfg))) return -EFAULT; if (!(ret = isdn_net_getcfg(&cfg))) { if (copy_to_user(argp, &cfg, sizeof(cfg))) return -EFAULT; } return ret; } else return -EINVAL; case IIOCNETANM: /* Add a phone-number to a network-interface */ if (arg) { if (copy_from_user(&phone, argp, sizeof(phone))) return -EFAULT; ret = mutex_lock_interruptible(&dev->mtx); if( ret ) return ret; ret = isdn_net_addphone(&phone); mutex_unlock(&dev->mtx); return ret; } else return -EINVAL; case IIOCNETGNM: /* Get list of phone-numbers of a network-interface */ if (arg) { if (copy_from_user(&phone, argp, sizeof(phone))) return -EFAULT; ret = mutex_lock_interruptible(&dev->mtx); if( ret ) return ret; ret = isdn_net_getphones(&phone, argp); mutex_unlock(&dev->mtx); return ret; } else return -EINVAL; case IIOCNETDNM: /* Delete a phone-number of a network-interface */ if (arg) { if (copy_from_user(&phone, argp, sizeof(phone))) return -EFAULT; ret = mutex_lock_interruptible(&dev->mtx); if( ret ) return ret; ret = isdn_net_delphone(&phone); mutex_unlock(&dev->mtx); return ret; } else return -EINVAL; case IIOCNETDIL: /* Force dialing of a network-interface */ if (arg) { if (copy_from_user(name, argp, sizeof(name))) return -EFAULT; return isdn_net_force_dial(name); } else return -EINVAL; #ifdef CONFIG_ISDN_PPP case IIOCNETALN: if (!arg) return -EINVAL; if (copy_from_user(name, argp, sizeof(name))) return -EFAULT; return isdn_ppp_dial_slave(name); case IIOCNETDLN: if (!arg) return -EINVAL; if (copy_from_user(name, argp, sizeof(name))) return -EFAULT; return isdn_ppp_hangup_slave(name); #endif case IIOCNETHUP: /* Force hangup of a network-interface */ if (!arg) return -EINVAL; if (copy_from_user(name, argp, sizeof(name))) return -EFAULT; return isdn_net_force_hangup(name); break; #endif /* CONFIG_NETDEVICES */ case IIOCSETVER: dev->net_verbose = arg; printk(KERN_INFO "isdn: Verbose-Level is %d\n", dev->net_verbose); return 0; case IIOCSETGST: if (arg) dev->global_flags |= ISDN_GLOBAL_STOPPED; else dev->global_flags &= ~ISDN_GLOBAL_STOPPED; printk(KERN_INFO "isdn: Global Mode %s\n", (dev->global_flags & ISDN_GLOBAL_STOPPED) ? "stopped" : "running"); return 0; case IIOCSETBRJ: drvidx = -1; if (arg) { int i; char *p; if (copy_from_user(&iocts, argp, sizeof(isdn_ioctl_struct))) return -EFAULT; if (strlen(iocts.drvid)) { if ((p = strchr(iocts.drvid, ','))) *p = 0; drvidx = -1; for (i = 0; i < ISDN_MAX_DRIVERS; i++) if (!(strcmp(dev->drvid[i], iocts.drvid))) { drvidx = i; break; } } } if (drvidx == -1) return -ENODEV; if (iocts.arg) dev->drv[drvidx]->flags |= DRV_FLAG_REJBUS; else dev->drv[drvidx]->flags &= ~DRV_FLAG_REJBUS; return 0; case IIOCSIGPRF: dev->profd = current; return 0; break; case IIOCGETPRF: /* Get all Modem-Profiles */ if (arg) { char __user *p = argp; int i; if (!access_ok(VERIFY_WRITE, argp, (ISDN_MODEM_NUMREG + ISDN_MSNLEN + ISDN_LMSNLEN) * ISDN_MAX_CHANNELS)) return -EFAULT; for (i = 0; i < ISDN_MAX_CHANNELS; i++) { if (copy_to_user(p, dev->mdm.info[i].emu.profile, ISDN_MODEM_NUMREG)) return -EFAULT; p += ISDN_MODEM_NUMREG; if (copy_to_user(p, dev->mdm.info[i].emu.pmsn, ISDN_MSNLEN)) return -EFAULT; p += ISDN_MSNLEN; if (copy_to_user(p, dev->mdm.info[i].emu.plmsn, ISDN_LMSNLEN)) return -EFAULT; p += ISDN_LMSNLEN; } return (ISDN_MODEM_NUMREG + ISDN_MSNLEN + ISDN_LMSNLEN) * ISDN_MAX_CHANNELS; } else return -EINVAL; break; case IIOCSETPRF: /* Set all Modem-Profiles */ if (arg) { char __user *p = argp; int i; if (!access_ok(VERIFY_READ, argp, (ISDN_MODEM_NUMREG + ISDN_MSNLEN + ISDN_LMSNLEN) * ISDN_MAX_CHANNELS)) return -EFAULT; for (i = 0; i < ISDN_MAX_CHANNELS; i++) { if (copy_from_user(dev->mdm.info[i].emu.profile, p, ISDN_MODEM_NUMREG)) return -EFAULT; p += ISDN_MODEM_NUMREG; if (copy_from_user(dev->mdm.info[i].emu.plmsn, p, ISDN_LMSNLEN)) return -EFAULT; p += ISDN_LMSNLEN; if (copy_from_user(dev->mdm.info[i].emu.pmsn, p, ISDN_MSNLEN)) return -EFAULT; p += ISDN_MSNLEN; } return 0; } else return -EINVAL; break; case IIOCSETMAP: case IIOCGETMAP: /* Set/Get MSN->EAZ-Mapping for a driver */ if (arg) { if (copy_from_user(&iocts, argp, sizeof(isdn_ioctl_struct))) return -EFAULT; if (strlen(iocts.drvid)) { drvidx = -1; for (i = 0; i < ISDN_MAX_DRIVERS; i++) if (!(strcmp(dev->drvid[i], iocts.drvid))) { drvidx = i; break; } } else drvidx = 0; if (drvidx == -1) return -ENODEV; if (cmd == IIOCSETMAP) { int loop = 1; p = (char __user *) iocts.arg; i = 0; while (loop) { int j = 0; while (1) { if (!access_ok(VERIFY_READ, p, 1)) return -EFAULT; get_user(bname[j], p++); switch (bname[j]) { case '\0': loop = 0; /* Fall through */ case ',': bname[j] = '\0'; strcpy(dev->drv[drvidx]->msn2eaz[i], bname); j = ISDN_MSNLEN; break; default: j++; } if (j >= ISDN_MSNLEN) break; } if (++i > 9) break; } } else { p = (char __user *) iocts.arg; for (i = 0; i < 10; i++) { sprintf(bname, "%s%s", strlen(dev->drv[drvidx]->msn2eaz[i]) ? dev->drv[drvidx]->msn2eaz[i] : "_", (i < 9) ? "," : "\0"); if (copy_to_user(p, bname, strlen(bname) + 1)) return -EFAULT; p += strlen(bname); } } return 0; } else return -EINVAL; case IIOCDBGVAR: if (arg) { if (copy_to_user(argp, &dev, sizeof(ulong))) return -EFAULT; return 0; } else return -EINVAL; break; default: if ((cmd & IIOCDRVCTL) == IIOCDRVCTL) cmd = ((cmd >> _IOC_NRSHIFT) & _IOC_NRMASK) & ISDN_DRVIOCTL_MASK; else return -EINVAL; if (arg) { int i; char *p; if (copy_from_user(&iocts, argp, sizeof(isdn_ioctl_struct))) return -EFAULT; if (strlen(iocts.drvid)) { if ((p = strchr(iocts.drvid, ','))) *p = 0; drvidx = -1; for (i = 0; i < ISDN_MAX_DRIVERS; i++) if (!(strcmp(dev->drvid[i], iocts.drvid))) { drvidx = i; break; } } else drvidx = 0; if (drvidx == -1) return -ENODEV; if (!access_ok(VERIFY_WRITE, argp, sizeof(isdn_ioctl_struct))) return -EFAULT; c.driver = drvidx; c.command = ISDN_CMD_IOCTL; c.arg = cmd; memcpy(c.parm.num, &iocts.arg, sizeof(ulong)); ret = isdn_command(&c); memcpy(&iocts.arg, c.parm.num, sizeof(ulong)); if (copy_to_user(argp, &iocts, sizeof(isdn_ioctl_struct))) return -EFAULT; return ret; } else return -EINVAL; } } #ifdef CONFIG_ISDN_PPP if (minor <= ISDN_MINOR_PPPMAX) return (isdn_ppp_ioctl(minor - ISDN_MINOR_PPP, file, cmd, arg)); #endif return -ENODEV; #undef name #undef bname #undef iocts #undef phone #undef cfg }
isdn_ioctl(struct inode *inode, struct file *file, uint cmd, ulong arg) { uint minor = iminor(inode); isdn_ctrl c; int drvidx; int chidx; int ret; int i; char __user *p; char *s; union iocpar { char name[10]; char bname[22]; isdn_ioctl_struct iocts; isdn_net_ioctl_phone phone; isdn_net_ioctl_cfg cfg; } iocpar; void __user *argp = (void __user *)arg; #define name iocpar.name #define bname iocpar.bname #define iocts iocpar.iocts #define phone iocpar.phone #define cfg iocpar.cfg if (minor == ISDN_MINOR_STATUS) { switch (cmd) { case IIOCGETDVR: return (TTY_DV + (NET_DV << 8) + (INF_DV << 16)); case IIOCGETCPS: if (arg) { ulong __user *p = argp; int i; if (!access_ok(VERIFY_WRITE, p, sizeof(ulong) * ISDN_MAX_CHANNELS * 2)) return -EFAULT; for (i = 0; i < ISDN_MAX_CHANNELS; i++) { put_user(dev->ibytes[i], p++); put_user(dev->obytes[i], p++); } return 0; } else return -EINVAL; break; #ifdef CONFIG_NETDEVICES case IIOCNETGPN: if (arg) { if (copy_from_user(&phone, argp, sizeof(phone))) return -EFAULT; return isdn_net_getpeer(&phone, argp); } else return -EINVAL; #endif default: return -EINVAL; } } if (!dev->drivers) return -ENODEV; if (minor <= ISDN_MINOR_BMAX) { drvidx = isdn_minor2drv(minor); if (drvidx < 0) return -ENODEV; chidx = isdn_minor2chan(minor); if (!(dev->drv[drvidx]->flags & DRV_FLAG_RUNNING)) return -ENODEV; return 0; } if (minor <= ISDN_MINOR_CTRLMAX) { switch (cmd) { case IIOCNETDWRSET: printk(KERN_INFO "INFO: ISDN_DW_ABC_EXTENSION not enabled\n"); return(-EINVAL); case IIOCNETLCR: printk(KERN_INFO "INFO: ISDN_ABC_LCR_SUPPORT not enabled\n"); return -ENODEV; #ifdef CONFIG_NETDEVICES case IIOCNETAIF: if (arg) { if (copy_from_user(name, argp, sizeof(name))) return -EFAULT; s = name; } else { s = NULL; } ret = mutex_lock_interruptible(&dev->mtx); if( ret ) return ret; if ((s = isdn_net_new(s, NULL))) { if (copy_to_user(argp, s, strlen(s) + 1)){ ret = -EFAULT; } else { ret = 0; } } else ret = -ENODEV; mutex_unlock(&dev->mtx); return ret; case IIOCNETASL: if (arg) { if (copy_from_user(bname, argp, sizeof(bname) - 1)) return -EFAULT; } else return -EINVAL; ret = mutex_lock_interruptible(&dev->mtx); if( ret ) return ret; if ((s = isdn_net_newslave(bname))) { if (copy_to_user(argp, s, strlen(s) + 1)){ ret = -EFAULT; } else { ret = 0; } } else ret = -ENODEV; mutex_unlock(&dev->mtx); return ret; case IIOCNETDIF: if (arg) { if (copy_from_user(name, argp, sizeof(name))) return -EFAULT; ret = mutex_lock_interruptible(&dev->mtx); if( ret ) return ret; ret = isdn_net_rm(name); mutex_unlock(&dev->mtx); return ret; } else return -EINVAL; case IIOCNETSCF: if (arg) { if (copy_from_user(&cfg, argp, sizeof(cfg))) return -EFAULT; return isdn_net_setcfg(&cfg); } else return -EINVAL; case IIOCNETGCF: if (arg) { if (copy_from_user(&cfg, argp, sizeof(cfg))) return -EFAULT; if (!(ret = isdn_net_getcfg(&cfg))) { if (copy_to_user(argp, &cfg, sizeof(cfg))) return -EFAULT; } return ret; } else return -EINVAL; case IIOCNETANM: if (arg) { if (copy_from_user(&phone, argp, sizeof(phone))) return -EFAULT; ret = mutex_lock_interruptible(&dev->mtx); if( ret ) return ret; ret = isdn_net_addphone(&phone); mutex_unlock(&dev->mtx); return ret; } else return -EINVAL; case IIOCNETGNM: if (arg) { if (copy_from_user(&phone, argp, sizeof(phone))) return -EFAULT; ret = mutex_lock_interruptible(&dev->mtx); if( ret ) return ret; ret = isdn_net_getphones(&phone, argp); mutex_unlock(&dev->mtx); return ret; } else return -EINVAL; case IIOCNETDNM: if (arg) { if (copy_from_user(&phone, argp, sizeof(phone))) return -EFAULT; ret = mutex_lock_interruptible(&dev->mtx); if( ret ) return ret; ret = isdn_net_delphone(&phone); mutex_unlock(&dev->mtx); return ret; } else return -EINVAL; case IIOCNETDIL: if (arg) { if (copy_from_user(name, argp, sizeof(name))) return -EFAULT; return isdn_net_force_dial(name); } else return -EINVAL; #ifdef CONFIG_ISDN_PPP case IIOCNETALN: if (!arg) return -EINVAL; if (copy_from_user(name, argp, sizeof(name))) return -EFAULT; return isdn_ppp_dial_slave(name); case IIOCNETDLN: if (!arg) return -EINVAL; if (copy_from_user(name, argp, sizeof(name))) return -EFAULT; return isdn_ppp_hangup_slave(name); #endif case IIOCNETHUP: if (!arg) return -EINVAL; if (copy_from_user(name, argp, sizeof(name))) return -EFAULT; return isdn_net_force_hangup(name); break; #endif case IIOCSETVER: dev->net_verbose = arg; printk(KERN_INFO "isdn: Verbose-Level is %d\n", dev->net_verbose); return 0; case IIOCSETGST: if (arg) dev->global_flags |= ISDN_GLOBAL_STOPPED; else dev->global_flags &= ~ISDN_GLOBAL_STOPPED; printk(KERN_INFO "isdn: Global Mode %s\n", (dev->global_flags & ISDN_GLOBAL_STOPPED) ? "stopped" : "running"); return 0; case IIOCSETBRJ: drvidx = -1; if (arg) { int i; char *p; if (copy_from_user(&iocts, argp, sizeof(isdn_ioctl_struct))) return -EFAULT; if (strlen(iocts.drvid)) { if ((p = strchr(iocts.drvid, ','))) *p = 0; drvidx = -1; for (i = 0; i < ISDN_MAX_DRIVERS; i++) if (!(strcmp(dev->drvid[i], iocts.drvid))) { drvidx = i; break; } } } if (drvidx == -1) return -ENODEV; if (iocts.arg) dev->drv[drvidx]->flags |= DRV_FLAG_REJBUS; else dev->drv[drvidx]->flags &= ~DRV_FLAG_REJBUS; return 0; case IIOCSIGPRF: dev->profd = current; return 0; break; case IIOCGETPRF: if (arg) { char __user *p = argp; int i; if (!access_ok(VERIFY_WRITE, argp, (ISDN_MODEM_NUMREG + ISDN_MSNLEN + ISDN_LMSNLEN) * ISDN_MAX_CHANNELS)) return -EFAULT; for (i = 0; i < ISDN_MAX_CHANNELS; i++) { if (copy_to_user(p, dev->mdm.info[i].emu.profile, ISDN_MODEM_NUMREG)) return -EFAULT; p += ISDN_MODEM_NUMREG; if (copy_to_user(p, dev->mdm.info[i].emu.pmsn, ISDN_MSNLEN)) return -EFAULT; p += ISDN_MSNLEN; if (copy_to_user(p, dev->mdm.info[i].emu.plmsn, ISDN_LMSNLEN)) return -EFAULT; p += ISDN_LMSNLEN; } return (ISDN_MODEM_NUMREG + ISDN_MSNLEN + ISDN_LMSNLEN) * ISDN_MAX_CHANNELS; } else return -EINVAL; break; case IIOCSETPRF: if (arg) { char __user *p = argp; int i; if (!access_ok(VERIFY_READ, argp, (ISDN_MODEM_NUMREG + ISDN_MSNLEN + ISDN_LMSNLEN) * ISDN_MAX_CHANNELS)) return -EFAULT; for (i = 0; i < ISDN_MAX_CHANNELS; i++) { if (copy_from_user(dev->mdm.info[i].emu.profile, p, ISDN_MODEM_NUMREG)) return -EFAULT; p += ISDN_MODEM_NUMREG; if (copy_from_user(dev->mdm.info[i].emu.plmsn, p, ISDN_LMSNLEN)) return -EFAULT; p += ISDN_LMSNLEN; if (copy_from_user(dev->mdm.info[i].emu.pmsn, p, ISDN_MSNLEN)) return -EFAULT; p += ISDN_MSNLEN; } return 0; } else return -EINVAL; break; case IIOCSETMAP: case IIOCGETMAP: if (arg) { if (copy_from_user(&iocts, argp, sizeof(isdn_ioctl_struct))) return -EFAULT; if (strlen(iocts.drvid)) { drvidx = -1; for (i = 0; i < ISDN_MAX_DRIVERS; i++) if (!(strcmp(dev->drvid[i], iocts.drvid))) { drvidx = i; break; } } else drvidx = 0; if (drvidx == -1) return -ENODEV; if (cmd == IIOCSETMAP) { int loop = 1; p = (char __user *) iocts.arg; i = 0; while (loop) { int j = 0; while (1) { if (!access_ok(VERIFY_READ, p, 1)) return -EFAULT; get_user(bname[j], p++); switch (bname[j]) { case '\0': loop = 0; case ',': bname[j] = '\0'; strcpy(dev->drv[drvidx]->msn2eaz[i], bname); j = ISDN_MSNLEN; break; default: j++; } if (j >= ISDN_MSNLEN) break; } if (++i > 9) break; } } else { p = (char __user *) iocts.arg; for (i = 0; i < 10; i++) { sprintf(bname, "%s%s", strlen(dev->drv[drvidx]->msn2eaz[i]) ? dev->drv[drvidx]->msn2eaz[i] : "_", (i < 9) ? "," : "\0"); if (copy_to_user(p, bname, strlen(bname) + 1)) return -EFAULT; p += strlen(bname); } } return 0; } else return -EINVAL; case IIOCDBGVAR: if (arg) { if (copy_to_user(argp, &dev, sizeof(ulong))) return -EFAULT; return 0; } else return -EINVAL; break; default: if ((cmd & IIOCDRVCTL) == IIOCDRVCTL) cmd = ((cmd >> _IOC_NRSHIFT) & _IOC_NRMASK) & ISDN_DRVIOCTL_MASK; else return -EINVAL; if (arg) { int i; char *p; if (copy_from_user(&iocts, argp, sizeof(isdn_ioctl_struct))) return -EFAULT; if (strlen(iocts.drvid)) { if ((p = strchr(iocts.drvid, ','))) *p = 0; drvidx = -1; for (i = 0; i < ISDN_MAX_DRIVERS; i++) if (!(strcmp(dev->drvid[i], iocts.drvid))) { drvidx = i; break; } } else drvidx = 0; if (drvidx == -1) return -ENODEV; if (!access_ok(VERIFY_WRITE, argp, sizeof(isdn_ioctl_struct))) return -EFAULT; c.driver = drvidx; c.command = ISDN_CMD_IOCTL; c.arg = cmd; memcpy(c.parm.num, &iocts.arg, sizeof(ulong)); ret = isdn_command(&c); memcpy(&iocts.arg, c.parm.num, sizeof(ulong)); if (copy_to_user(argp, &iocts, sizeof(isdn_ioctl_struct))) return -EFAULT; return ret; } else return -EINVAL; } } #ifdef CONFIG_ISDN_PPP if (minor <= ISDN_MINOR_PPPMAX) return (isdn_ppp_ioctl(minor - ISDN_MINOR_PPP, file, cmd, arg)); #endif return -ENODEV; #undef name #undef bname #undef iocts #undef phone #undef cfg }
195
0
void virtio_scsi_handle_cmd_req_submit(VirtIOSCSI *s, VirtIOSCSIReq *req) { SCSIRequest *sreq = req->sreq; if (scsi_req_enqueue(sreq)) { scsi_req_continue(sreq); } bdrv_io_unplug(sreq->dev->conf.bs); scsi_req_unref(sreq); }
void virtio_scsi_handle_cmd_req_submit(VirtIOSCSI *s, VirtIOSCSIReq *req) { SCSIRequest *sreq = req->sreq; if (scsi_req_enqueue(sreq)) { scsi_req_continue(sreq); } bdrv_io_unplug(sreq->dev->conf.bs); scsi_req_unref(sreq); }
196
0
static guint32 pad_offset ( struct tcpinfo * tcpinfo , guint32 fpdu_total_len , guint8 pad_len ) { if ( ( tcpinfo -> nxtseq - MPA_CRC_LEN - MPA_MARKER_LEN ) % MPA_MARKER_INTERVAL == 0 ) { return fpdu_total_len - MPA_CRC_LEN - MPA_MARKER_LEN - pad_len ; } else { return fpdu_total_len - MPA_CRC_LEN - pad_len ; } }
static guint32 pad_offset ( struct tcpinfo * tcpinfo , guint32 fpdu_total_len , guint8 pad_len ) { if ( ( tcpinfo -> nxtseq - MPA_CRC_LEN - MPA_MARKER_LEN ) % MPA_MARKER_INTERVAL == 0 ) { return fpdu_total_len - MPA_CRC_LEN - MPA_MARKER_LEN - pad_len ; } else { return fpdu_total_len - MPA_CRC_LEN - pad_len ; } }
197
1
static int em_ret_far(struct x86_emulate_ctxt *ctxt) { int rc; unsigned long cs; int cpl = ctxt->ops->cpl(ctxt); rc = emulate_pop(ctxt, &ctxt->_eip, ctxt->op_bytes); if (rc != X86EMUL_CONTINUE) return rc; if (ctxt->op_bytes == 4) ctxt->_eip = (u32)ctxt->_eip; rc = emulate_pop(ctxt, &cs, ctxt->op_bytes); if (rc != X86EMUL_CONTINUE) return rc; /* Outer-privilege level return is not implemented */ if (ctxt->mode >= X86EMUL_MODE_PROT16 && (cs & 3) > cpl) return X86EMUL_UNHANDLEABLE; rc = load_segment_descriptor(ctxt, (u16)cs, VCPU_SREG_CS); return rc; }
static int em_ret_far(struct x86_emulate_ctxt *ctxt) { int rc; unsigned long cs; int cpl = ctxt->ops->cpl(ctxt); rc = emulate_pop(ctxt, &ctxt->_eip, ctxt->op_bytes); if (rc != X86EMUL_CONTINUE) return rc; if (ctxt->op_bytes == 4) ctxt->_eip = (u32)ctxt->_eip; rc = emulate_pop(ctxt, &cs, ctxt->op_bytes); if (rc != X86EMUL_CONTINUE) return rc; if (ctxt->mode >= X86EMUL_MODE_PROT16 && (cs & 3) > cpl) return X86EMUL_UNHANDLEABLE; rc = load_segment_descriptor(ctxt, (u16)cs, VCPU_SREG_CS); return rc; }
198
1
shmem_alloc_page(gfp_t gfp,struct shmem_inode_info *info, unsigned long idx) { return alloc_page(gfp | __GFP_ZERO); }
shmem_alloc_page(gfp_t gfp,struct shmem_inode_info *info, unsigned long idx) { return alloc_page(gfp | __GFP_ZERO); }
199
0
static int truespeech_decode_frame(AVCodecContext *avctx, void *data, int *data_size, AVPacket *avpkt) { const uint8_t *buf = avpkt->data; int buf_size = avpkt->size; TSContext *c = avctx->priv_data; int i, j; short *samples = data; int consumed = 0; int16_t out_buf[240]; int iterations; if (!buf_size) return 0; if (buf_size < 32) { av_log(avctx, AV_LOG_ERROR, "Too small input buffer (%d bytes), need at least 32 bytes\n", buf_size); return -1; } iterations = FFMIN(buf_size / 32, *data_size / 480); for(j = 0; j < iterations; j++) { truespeech_read_frame(c, buf + consumed); consumed += 32; truespeech_correlate_filter(c); truespeech_filters_merge(c); memset(out_buf, 0, 240 * 2); for(i = 0; i < 4; i++) { truespeech_apply_twopoint_filter(c, i); truespeech_place_pulses(c, out_buf + i * 60, i); truespeech_update_filters(c, out_buf + i * 60, i); truespeech_synth(c, out_buf + i * 60, i); } truespeech_save_prevvec(c); /* finally output decoded frame */ for(i = 0; i < 240; i++) *samples++ = out_buf[i]; } *data_size = consumed * 15; return consumed; }
static int truespeech_decode_frame(AVCodecContext *avctx, void *data, int *data_size, AVPacket *avpkt) { const uint8_t *buf = avpkt->data; int buf_size = avpkt->size; TSContext *c = avctx->priv_data; int i, j; short *samples = data; int consumed = 0; int16_t out_buf[240]; int iterations; if (!buf_size) return 0; if (buf_size < 32) { av_log(avctx, AV_LOG_ERROR, "Too small input buffer (%d bytes), need at least 32 bytes\n", buf_size); return -1; } iterations = FFMIN(buf_size / 32, *data_size / 480); for(j = 0; j < iterations; j++) { truespeech_read_frame(c, buf + consumed); consumed += 32; truespeech_correlate_filter(c); truespeech_filters_merge(c); memset(out_buf, 0, 240 * 2); for(i = 0; i < 4; i++) { truespeech_apply_twopoint_filter(c, i); truespeech_place_pulses(c, out_buf + i * 60, i); truespeech_update_filters(c, out_buf + i * 60, i); truespeech_synth(c, out_buf + i * 60, i); } truespeech_save_prevvec(c); for(i = 0; i < 240; i++) *samples++ = out_buf[i]; } *data_size = consumed * 15; return consumed; }
200
1
static int shmem_getpage(struct inode *inode, unsigned long idx, struct page **pagep, enum sgp_type sgp, int *type) { struct address_space *mapping = inode->i_mapping; struct shmem_inode_info *info = SHMEM_I(inode); struct shmem_sb_info *sbinfo; struct page *filepage = *pagep; struct page *swappage; swp_entry_t *entry; swp_entry_t swap; int error; if (idx >= SHMEM_MAX_INDEX) return -EFBIG; if (type) *type = 0; /* * Normally, filepage is NULL on entry, and either found * uptodate immediately, or allocated and zeroed, or read * in under swappage, which is then assigned to filepage. * But shmem_readpage and shmem_write_begin pass in a locked * filepage, which may be found not uptodate by other callers * too, and may need to be copied from the swappage read in. */ repeat: if (!filepage) filepage = find_lock_page(mapping, idx); if (filepage && PageUptodate(filepage)) goto done; error = 0; if (sgp == SGP_QUICK) goto failed; spin_lock(&info->lock); shmem_recalc_inode(inode); entry = shmem_swp_alloc(info, idx, sgp); if (IS_ERR(entry)) { spin_unlock(&info->lock); error = PTR_ERR(entry); goto failed; } swap = *entry; if (swap.val) { /* Look it up and read it in.. */ swappage = lookup_swap_cache(swap); if (!swappage) { shmem_swp_unmap(entry); /* here we actually do the io */ if (type && !(*type & VM_FAULT_MAJOR)) { __count_vm_event(PGMAJFAULT); *type |= VM_FAULT_MAJOR; } spin_unlock(&info->lock); swappage = shmem_swapin(info, swap, idx); if (!swappage) { spin_lock(&info->lock); entry = shmem_swp_alloc(info, idx, sgp); if (IS_ERR(entry)) error = PTR_ERR(entry); else { if (entry->val == swap.val) error = -ENOMEM; shmem_swp_unmap(entry); } spin_unlock(&info->lock); if (error) goto failed; goto repeat; } wait_on_page_locked(swappage); page_cache_release(swappage); goto repeat; } /* We have to do this with page locked to prevent races */ if (TestSetPageLocked(swappage)) { shmem_swp_unmap(entry); spin_unlock(&info->lock); wait_on_page_locked(swappage); page_cache_release(swappage); goto repeat; } if (PageWriteback(swappage)) { shmem_swp_unmap(entry); spin_unlock(&info->lock); wait_on_page_writeback(swappage); unlock_page(swappage); page_cache_release(swappage); goto repeat; } if (!PageUptodate(swappage)) { shmem_swp_unmap(entry); spin_unlock(&info->lock); unlock_page(swappage); page_cache_release(swappage); error = -EIO; goto failed; } if (filepage) { shmem_swp_set(info, entry, 0); shmem_swp_unmap(entry); delete_from_swap_cache(swappage); spin_unlock(&info->lock); copy_highpage(filepage, swappage); unlock_page(swappage); page_cache_release(swappage); flush_dcache_page(filepage); SetPageUptodate(filepage); set_page_dirty(filepage); swap_free(swap); } else if (!(error = move_from_swap_cache( swappage, idx, mapping))) { info->flags |= SHMEM_PAGEIN; shmem_swp_set(info, entry, 0); shmem_swp_unmap(entry); spin_unlock(&info->lock); filepage = swappage; swap_free(swap); } else { shmem_swp_unmap(entry); spin_unlock(&info->lock); unlock_page(swappage); page_cache_release(swappage); if (error == -ENOMEM) { /* let kswapd refresh zone for GFP_ATOMICs */ congestion_wait(WRITE, HZ/50); } goto repeat; } } else if (sgp == SGP_READ && !filepage) { shmem_swp_unmap(entry); filepage = find_get_page(mapping, idx); if (filepage && (!PageUptodate(filepage) || TestSetPageLocked(filepage))) { spin_unlock(&info->lock); wait_on_page_locked(filepage); page_cache_release(filepage); filepage = NULL; goto repeat; } spin_unlock(&info->lock); } else { shmem_swp_unmap(entry); sbinfo = SHMEM_SB(inode->i_sb); if (sbinfo->max_blocks) { spin_lock(&sbinfo->stat_lock); if (sbinfo->free_blocks == 0 || shmem_acct_block(info->flags)) { spin_unlock(&sbinfo->stat_lock); spin_unlock(&info->lock); error = -ENOSPC; goto failed; } sbinfo->free_blocks--; inode->i_blocks += BLOCKS_PER_PAGE; spin_unlock(&sbinfo->stat_lock); } else if (shmem_acct_block(info->flags)) { spin_unlock(&info->lock); error = -ENOSPC; goto failed; } if (!filepage) { spin_unlock(&info->lock); filepage = shmem_alloc_page(mapping_gfp_mask(mapping), info, idx); if (!filepage) { shmem_unacct_blocks(info->flags, 1); shmem_free_blocks(inode, 1); error = -ENOMEM; goto failed; } spin_lock(&info->lock); entry = shmem_swp_alloc(info, idx, sgp); if (IS_ERR(entry)) error = PTR_ERR(entry); else { swap = *entry; shmem_swp_unmap(entry); } if (error || swap.val || 0 != add_to_page_cache_lru( filepage, mapping, idx, GFP_ATOMIC)) { spin_unlock(&info->lock); page_cache_release(filepage); shmem_unacct_blocks(info->flags, 1); shmem_free_blocks(inode, 1); filepage = NULL; if (error) goto failed; goto repeat; } info->flags |= SHMEM_PAGEIN; } info->alloced++; spin_unlock(&info->lock); flush_dcache_page(filepage); SetPageUptodate(filepage); } done: if (*pagep != filepage) { *pagep = filepage; if (sgp != SGP_FAULT) unlock_page(filepage); } return 0; failed: if (*pagep != filepage) { unlock_page(filepage); page_cache_release(filepage); } return error; }
static int shmem_getpage(struct inode *inode, unsigned long idx, struct page **pagep, enum sgp_type sgp, int *type) { struct address_space *mapping = inode->i_mapping; struct shmem_inode_info *info = SHMEM_I(inode); struct shmem_sb_info *sbinfo; struct page *filepage = *pagep; struct page *swappage; swp_entry_t *entry; swp_entry_t swap; int error; if (idx >= SHMEM_MAX_INDEX) return -EFBIG; if (type) *type = 0; repeat: if (!filepage) filepage = find_lock_page(mapping, idx); if (filepage && PageUptodate(filepage)) goto done; error = 0; if (sgp == SGP_QUICK) goto failed; spin_lock(&info->lock); shmem_recalc_inode(inode); entry = shmem_swp_alloc(info, idx, sgp); if (IS_ERR(entry)) { spin_unlock(&info->lock); error = PTR_ERR(entry); goto failed; } swap = *entry; if (swap.val) { swappage = lookup_swap_cache(swap); if (!swappage) { shmem_swp_unmap(entry); if (type && !(*type & VM_FAULT_MAJOR)) { __count_vm_event(PGMAJFAULT); *type |= VM_FAULT_MAJOR; } spin_unlock(&info->lock); swappage = shmem_swapin(info, swap, idx); if (!swappage) { spin_lock(&info->lock); entry = shmem_swp_alloc(info, idx, sgp); if (IS_ERR(entry)) error = PTR_ERR(entry); else { if (entry->val == swap.val) error = -ENOMEM; shmem_swp_unmap(entry); } spin_unlock(&info->lock); if (error) goto failed; goto repeat; } wait_on_page_locked(swappage); page_cache_release(swappage); goto repeat; } if (TestSetPageLocked(swappage)) { shmem_swp_unmap(entry); spin_unlock(&info->lock); wait_on_page_locked(swappage); page_cache_release(swappage); goto repeat; } if (PageWriteback(swappage)) { shmem_swp_unmap(entry); spin_unlock(&info->lock); wait_on_page_writeback(swappage); unlock_page(swappage); page_cache_release(swappage); goto repeat; } if (!PageUptodate(swappage)) { shmem_swp_unmap(entry); spin_unlock(&info->lock); unlock_page(swappage); page_cache_release(swappage); error = -EIO; goto failed; } if (filepage) { shmem_swp_set(info, entry, 0); shmem_swp_unmap(entry); delete_from_swap_cache(swappage); spin_unlock(&info->lock); copy_highpage(filepage, swappage); unlock_page(swappage); page_cache_release(swappage); flush_dcache_page(filepage); SetPageUptodate(filepage); set_page_dirty(filepage); swap_free(swap); } else if (!(error = move_from_swap_cache( swappage, idx, mapping))) { info->flags |= SHMEM_PAGEIN; shmem_swp_set(info, entry, 0); shmem_swp_unmap(entry); spin_unlock(&info->lock); filepage = swappage; swap_free(swap); } else { shmem_swp_unmap(entry); spin_unlock(&info->lock); unlock_page(swappage); page_cache_release(swappage); if (error == -ENOMEM) { congestion_wait(WRITE, HZ/50); } goto repeat; } } else if (sgp == SGP_READ && !filepage) { shmem_swp_unmap(entry); filepage = find_get_page(mapping, idx); if (filepage && (!PageUptodate(filepage) || TestSetPageLocked(filepage))) { spin_unlock(&info->lock); wait_on_page_locked(filepage); page_cache_release(filepage); filepage = NULL; goto repeat; } spin_unlock(&info->lock); } else { shmem_swp_unmap(entry); sbinfo = SHMEM_SB(inode->i_sb); if (sbinfo->max_blocks) { spin_lock(&sbinfo->stat_lock); if (sbinfo->free_blocks == 0 || shmem_acct_block(info->flags)) { spin_unlock(&sbinfo->stat_lock); spin_unlock(&info->lock); error = -ENOSPC; goto failed; } sbinfo->free_blocks--; inode->i_blocks += BLOCKS_PER_PAGE; spin_unlock(&sbinfo->stat_lock); } else if (shmem_acct_block(info->flags)) { spin_unlock(&info->lock); error = -ENOSPC; goto failed; } if (!filepage) { spin_unlock(&info->lock); filepage = shmem_alloc_page(mapping_gfp_mask(mapping), info, idx); if (!filepage) { shmem_unacct_blocks(info->flags, 1); shmem_free_blocks(inode, 1); error = -ENOMEM; goto failed; } spin_lock(&info->lock); entry = shmem_swp_alloc(info, idx, sgp); if (IS_ERR(entry)) error = PTR_ERR(entry); else { swap = *entry; shmem_swp_unmap(entry); } if (error || swap.val || 0 != add_to_page_cache_lru( filepage, mapping, idx, GFP_ATOMIC)) { spin_unlock(&info->lock); page_cache_release(filepage); shmem_unacct_blocks(info->flags, 1); shmem_free_blocks(inode, 1); filepage = NULL; if (error) goto failed; goto repeat; } info->flags |= SHMEM_PAGEIN; } info->alloced++; spin_unlock(&info->lock); flush_dcache_page(filepage); SetPageUptodate(filepage); } done: if (*pagep != filepage) { *pagep = filepage; if (sgp != SGP_FAULT) unlock_page(filepage); } return 0; failed: if (*pagep != filepage) { unlock_page(filepage); page_cache_release(filepage); } return error; }
202
1
static int dca_decode_frame ( AVCodecContext * avctx , void * data , int * got_frame_ptr , AVPacket * avpkt ) { AVFrame * frame = data ; const uint8_t * buf = avpkt -> data ; int buf_size = avpkt -> size ; int lfe_samples ; int num_core_channels = 0 ; int i , ret ; float * * samples_flt ; DCAContext * s = avctx -> priv_data ; int channels , full_channels ; int core_ss_end ; s -> xch_present = 0 ; s -> dca_buffer_size = ff_dca_convert_bitstream ( buf , buf_size , s -> dca_buffer , DCA_MAX_FRAME_SIZE + DCA_MAX_EXSS_HEADER_SIZE ) ; if ( s -> dca_buffer_size == AVERROR_INVALIDDATA ) { av_log ( avctx , AV_LOG_ERROR , "Not a valid DCA frame\n" ) ; return AVERROR_INVALIDDATA ; } init_get_bits ( & s -> gb , s -> dca_buffer , s -> dca_buffer_size * 8 ) ; if ( ( ret = dca_parse_frame_header ( s ) ) < 0 ) { return ret ; } avctx -> sample_rate = s -> sample_rate ; avctx -> bit_rate = s -> bit_rate ; s -> profile = FF_PROFILE_DTS ; for ( i = 0 ; i < ( s -> sample_blocks / 8 ) ; i ++ ) { if ( ( ret = dca_decode_block ( s , 0 , i ) ) ) { av_log ( avctx , AV_LOG_ERROR , "error decoding block\n" ) ; return ret ; } } num_core_channels = s -> prim_channels ; if ( s -> ext_coding ) s -> core_ext_mask = dca_ext_audio_descr_mask [ s -> ext_descr ] ; else s -> core_ext_mask = 0 ; core_ss_end = FFMIN ( s -> frame_size , s -> dca_buffer_size ) * 8 ; if ( s -> core_ext_mask < 0 || s -> core_ext_mask & DCA_EXT_XCH ) { s -> core_ext_mask = FFMAX ( s -> core_ext_mask , 0 ) ; skip_bits_long ( & s -> gb , ( - get_bits_count ( & s -> gb ) ) & 31 ) ; while ( core_ss_end - get_bits_count ( & s -> gb ) >= 32 ) { uint32_t bits = get_bits_long ( & s -> gb , 32 ) ; switch ( bits ) { case 0x5a5a5a5a : { int ext_amode , xch_fsize ; s -> xch_base_channel = s -> prim_channels ; xch_fsize = show_bits ( & s -> gb , 10 ) ; if ( ( s -> frame_size != ( get_bits_count ( & s -> gb ) >> 3 ) - 4 + xch_fsize ) && ( s -> frame_size != ( get_bits_count ( & s -> gb ) >> 3 ) - 4 + xch_fsize + 1 ) ) continue ; skip_bits ( & s -> gb , 10 ) ; s -> core_ext_mask |= DCA_EXT_XCH ; if ( ( ext_amode = get_bits ( & s -> gb , 4 ) ) != 1 ) { av_log ( avctx , AV_LOG_ERROR , "XCh extension amode %d not" " supported!\n" , ext_amode ) ; continue ; } dca_parse_audio_coding_header ( s , s -> xch_base_channel ) ; for ( i = 0 ; i < ( s -> sample_blocks / 8 ) ; i ++ ) if ( ( ret = dca_decode_block ( s , s -> xch_base_channel , i ) ) ) { av_log ( avctx , AV_LOG_ERROR , "error decoding XCh extension\n" ) ; continue ; } s -> xch_present = 1 ; break ; } case 0x47004a03 : s -> core_ext_mask |= DCA_EXT_XXCH ; break ; case 0x1d95f262 : { int fsize96 = show_bits ( & s -> gb , 12 ) + 1 ; if ( s -> frame_size != ( get_bits_count ( & s -> gb ) >> 3 ) - 4 + fsize96 ) continue ; av_log ( avctx , AV_LOG_DEBUG , "X96 extension found at %d bits\n" , get_bits_count ( & s -> gb ) ) ; skip_bits ( & s -> gb , 12 ) ; av_log ( avctx , AV_LOG_DEBUG , "FSIZE96 = %d bytes\n" , fsize96 ) ; av_log ( avctx , AV_LOG_DEBUG , "REVNO = %d\n" , get_bits ( & s -> gb , 4 ) ) ; s -> core_ext_mask |= DCA_EXT_X96 ; break ; } } skip_bits_long ( & s -> gb , ( - get_bits_count ( & s -> gb ) ) & 31 ) ; } } else { skip_bits_long ( & s -> gb , core_ss_end - get_bits_count ( & s -> gb ) ) ; } if ( s -> core_ext_mask & DCA_EXT_X96 ) s -> profile = FF_PROFILE_DTS_96_24 ; else if ( s -> core_ext_mask & ( DCA_EXT_XCH | DCA_EXT_XXCH ) ) s -> profile = FF_PROFILE_DTS_ES ; if ( s -> dca_buffer_size - s -> frame_size > 32 && get_bits_long ( & s -> gb , 32 ) == DCA_HD_MARKER ) dca_exss_parse_header ( s ) ; avctx -> profile = s -> profile ; full_channels = channels = s -> prim_channels + ! ! s -> lfe ; if ( s -> amode < 16 ) { avctx -> channel_layout = dca_core_channel_layout [ s -> amode ] ; if ( s -> xch_present && ( ! avctx -> request_channels || avctx -> request_channels > num_core_channels + ! ! s -> lfe ) ) { avctx -> channel_layout |= AV_CH_BACK_CENTER ; if ( s -> lfe ) { avctx -> channel_layout |= AV_CH_LOW_FREQUENCY ; s -> channel_order_tab = dca_channel_reorder_lfe_xch [ s -> amode ] ; } else { s -> channel_order_tab = dca_channel_reorder_nolfe_xch [ s -> amode ] ; } } else { channels = num_core_channels + ! ! s -> lfe ; s -> xch_present = 0 ; if ( s -> lfe ) { avctx -> channel_layout |= AV_CH_LOW_FREQUENCY ; s -> channel_order_tab = dca_channel_reorder_lfe [ s -> amode ] ; } else s -> channel_order_tab = dca_channel_reorder_nolfe [ s -> amode ] ; } if ( channels > ! ! s -> lfe && s -> channel_order_tab [ channels - 1 - ! ! s -> lfe ] < 0 ) return AVERROR_INVALIDDATA ; if ( avctx -> request_channels == 2 && s -> prim_channels > 2 ) { channels = 2 ; s -> output = DCA_STEREO ; avctx -> channel_layout = AV_CH_LAYOUT_STEREO ; } } else { av_log ( avctx , AV_LOG_ERROR , "Non standard configuration %d !\n" , s -> amode ) ; return AVERROR_INVALIDDATA ; } avctx -> channels = channels ; frame -> nb_samples = 256 * ( s -> sample_blocks / 8 ) ; if ( ( ret = ff_get_buffer ( avctx , frame ) ) < 0 ) { av_log ( avctx , AV_LOG_ERROR , "get_buffer() failed\n" ) ; return ret ; } samples_flt = ( float * * ) frame -> extended_data ; if ( avctx -> channels < full_channels ) { ret = av_samples_get_buffer_size ( NULL , full_channels - channels , frame -> nb_samples , avctx -> sample_fmt , 0 ) ; if ( ret < 0 ) return ret ; av_fast_malloc ( & s -> extra_channels_buffer , & s -> extra_channels_buffer_size , ret ) ; if ( ! s -> extra_channels_buffer ) return AVERROR ( ENOMEM ) ; ret = av_samples_fill_arrays ( ( uint8_t * * ) s -> extra_channels , NULL , s -> extra_channels_buffer , full_channels - channels , frame -> nb_samples , avctx -> sample_fmt , 0 ) ; if ( ret < 0 ) return ret ; } for ( i = 0 ; i < ( s -> sample_blocks / 8 ) ; i ++ ) { int ch ; for ( ch = 0 ; ch < channels ; ch ++ ) s -> samples_chanptr [ ch ] = samples_flt [ ch ] + i * 256 ; for ( ; ch < full_channels ; ch ++ ) s -> samples_chanptr [ ch ] = s -> extra_channels [ ch - channels ] + i * 256 ; dca_filter_channels ( s , i ) ; if ( ( s -> source_pcm_res & 1 ) && s -> xch_present ) { float * back_chan = s -> samples_chanptr [ s -> channel_order_tab [ s -> xch_base_channel ] ] ; float * lt_chan = s -> samples_chanptr [ s -> channel_order_tab [ s -> xch_base_channel - 2 ] ] ; float * rt_chan = s -> samples_chanptr [ s -> channel_order_tab [ s -> xch_base_channel - 1 ] ] ; s -> fdsp . vector_fmac_scalar ( lt_chan , back_chan , - M_SQRT1_2 , 256 ) ; s -> fdsp . vector_fmac_scalar ( rt_chan , back_chan , - M_SQRT1_2 , 256 ) ; } } lfe_samples = 2 * s -> lfe * ( s -> sample_blocks / 8 ) ; for ( i = 0 ; i < 2 * s -> lfe * 4 ; i ++ ) s -> lfe_data [ i ] = s -> lfe_data [ i + lfe_samples ] ; * got_frame_ptr = 1 ; return buf_size ; }
static int dca_decode_frame ( AVCodecContext * avctx , void * data , int * got_frame_ptr , AVPacket * avpkt ) { AVFrame * frame = data ; const uint8_t * buf = avpkt -> data ; int buf_size = avpkt -> size ; int lfe_samples ; int num_core_channels = 0 ; int i , ret ; float * * samples_flt ; DCAContext * s = avctx -> priv_data ; int channels , full_channels ; int core_ss_end ; s -> xch_present = 0 ; s -> dca_buffer_size = ff_dca_convert_bitstream ( buf , buf_size , s -> dca_buffer , DCA_MAX_FRAME_SIZE + DCA_MAX_EXSS_HEADER_SIZE ) ; if ( s -> dca_buffer_size == AVERROR_INVALIDDATA ) { av_log ( avctx , AV_LOG_ERROR , "Not a valid DCA frame\n" ) ; return AVERROR_INVALIDDATA ; } init_get_bits ( & s -> gb , s -> dca_buffer , s -> dca_buffer_size * 8 ) ; if ( ( ret = dca_parse_frame_header ( s ) ) < 0 ) { return ret ; } avctx -> sample_rate = s -> sample_rate ; avctx -> bit_rate = s -> bit_rate ; s -> profile = FF_PROFILE_DTS ; for ( i = 0 ; i < ( s -> sample_blocks / 8 ) ; i ++ ) { if ( ( ret = dca_decode_block ( s , 0 , i ) ) ) { av_log ( avctx , AV_LOG_ERROR , "error decoding block\n" ) ; return ret ; } } num_core_channels = s -> prim_channels ; if ( s -> ext_coding ) s -> core_ext_mask = dca_ext_audio_descr_mask [ s -> ext_descr ] ; else s -> core_ext_mask = 0 ; core_ss_end = FFMIN ( s -> frame_size , s -> dca_buffer_size ) * 8 ; if ( s -> core_ext_mask < 0 || s -> core_ext_mask & DCA_EXT_XCH ) { s -> core_ext_mask = FFMAX ( s -> core_ext_mask , 0 ) ; skip_bits_long ( & s -> gb , ( - get_bits_count ( & s -> gb ) ) & 31 ) ; while ( core_ss_end - get_bits_count ( & s -> gb ) >= 32 ) { uint32_t bits = get_bits_long ( & s -> gb , 32 ) ; switch ( bits ) { case 0x5a5a5a5a : { int ext_amode , xch_fsize ; s -> xch_base_channel = s -> prim_channels ; xch_fsize = show_bits ( & s -> gb , 10 ) ; if ( ( s -> frame_size != ( get_bits_count ( & s -> gb ) >> 3 ) - 4 + xch_fsize ) && ( s -> frame_size != ( get_bits_count ( & s -> gb ) >> 3 ) - 4 + xch_fsize + 1 ) ) continue ; skip_bits ( & s -> gb , 10 ) ; s -> core_ext_mask |= DCA_EXT_XCH ; if ( ( ext_amode = get_bits ( & s -> gb , 4 ) ) != 1 ) { av_log ( avctx , AV_LOG_ERROR , "XCh extension amode %d not" " supported!\n" , ext_amode ) ; continue ; } dca_parse_audio_coding_header ( s , s -> xch_base_channel ) ; for ( i = 0 ; i < ( s -> sample_blocks / 8 ) ; i ++ ) if ( ( ret = dca_decode_block ( s , s -> xch_base_channel , i ) ) ) { av_log ( avctx , AV_LOG_ERROR , "error decoding XCh extension\n" ) ; continue ; } s -> xch_present = 1 ; break ; } case 0x47004a03 : s -> core_ext_mask |= DCA_EXT_XXCH ; break ; case 0x1d95f262 : { int fsize96 = show_bits ( & s -> gb , 12 ) + 1 ; if ( s -> frame_size != ( get_bits_count ( & s -> gb ) >> 3 ) - 4 + fsize96 ) continue ; av_log ( avctx , AV_LOG_DEBUG , "X96 extension found at %d bits\n" , get_bits_count ( & s -> gb ) ) ; skip_bits ( & s -> gb , 12 ) ; av_log ( avctx , AV_LOG_DEBUG , "FSIZE96 = %d bytes\n" , fsize96 ) ; av_log ( avctx , AV_LOG_DEBUG , "REVNO = %d\n" , get_bits ( & s -> gb , 4 ) ) ; s -> core_ext_mask |= DCA_EXT_X96 ; break ; } } skip_bits_long ( & s -> gb , ( - get_bits_count ( & s -> gb ) ) & 31 ) ; } } else { skip_bits_long ( & s -> gb , core_ss_end - get_bits_count ( & s -> gb ) ) ; } if ( s -> core_ext_mask & DCA_EXT_X96 ) s -> profile = FF_PROFILE_DTS_96_24 ; else if ( s -> core_ext_mask & ( DCA_EXT_XCH | DCA_EXT_XXCH ) ) s -> profile = FF_PROFILE_DTS_ES ; if ( s -> dca_buffer_size - s -> frame_size > 32 && get_bits_long ( & s -> gb , 32 ) == DCA_HD_MARKER ) dca_exss_parse_header ( s ) ; avctx -> profile = s -> profile ; full_channels = channels = s -> prim_channels + ! ! s -> lfe ; if ( s -> amode < 16 ) { avctx -> channel_layout = dca_core_channel_layout [ s -> amode ] ; if ( s -> xch_present && ( ! avctx -> request_channels || avctx -> request_channels > num_core_channels + ! ! s -> lfe ) ) { avctx -> channel_layout |= AV_CH_BACK_CENTER ; if ( s -> lfe ) { avctx -> channel_layout |= AV_CH_LOW_FREQUENCY ; s -> channel_order_tab = dca_channel_reorder_lfe_xch [ s -> amode ] ; } else { s -> channel_order_tab = dca_channel_reorder_nolfe_xch [ s -> amode ] ; } } else { channels = num_core_channels + ! ! s -> lfe ; s -> xch_present = 0 ; if ( s -> lfe ) { avctx -> channel_layout |= AV_CH_LOW_FREQUENCY ; s -> channel_order_tab = dca_channel_reorder_lfe [ s -> amode ] ; } else s -> channel_order_tab = dca_channel_reorder_nolfe [ s -> amode ] ; } if ( channels > ! ! s -> lfe && s -> channel_order_tab [ channels - 1 - ! ! s -> lfe ] < 0 ) return AVERROR_INVALIDDATA ; if ( avctx -> request_channels == 2 && s -> prim_channels > 2 ) { channels = 2 ; s -> output = DCA_STEREO ; avctx -> channel_layout = AV_CH_LAYOUT_STEREO ; } } else { av_log ( avctx , AV_LOG_ERROR , "Non standard configuration %d !\n" , s -> amode ) ; return AVERROR_INVALIDDATA ; } avctx -> channels = channels ; frame -> nb_samples = 256 * ( s -> sample_blocks / 8 ) ; if ( ( ret = ff_get_buffer ( avctx , frame ) ) < 0 ) { av_log ( avctx , AV_LOG_ERROR , "get_buffer() failed\n" ) ; return ret ; } samples_flt = ( float * * ) frame -> extended_data ; if ( avctx -> channels < full_channels ) { ret = av_samples_get_buffer_size ( NULL , full_channels - channels , frame -> nb_samples , avctx -> sample_fmt , 0 ) ; if ( ret < 0 ) return ret ; av_fast_malloc ( & s -> extra_channels_buffer , & s -> extra_channels_buffer_size , ret ) ; if ( ! s -> extra_channels_buffer ) return AVERROR ( ENOMEM ) ; ret = av_samples_fill_arrays ( ( uint8_t * * ) s -> extra_channels , NULL , s -> extra_channels_buffer , full_channels - channels , frame -> nb_samples , avctx -> sample_fmt , 0 ) ; if ( ret < 0 ) return ret ; } for ( i = 0 ; i < ( s -> sample_blocks / 8 ) ; i ++ ) { int ch ; for ( ch = 0 ; ch < channels ; ch ++ ) s -> samples_chanptr [ ch ] = samples_flt [ ch ] + i * 256 ; for ( ; ch < full_channels ; ch ++ ) s -> samples_chanptr [ ch ] = s -> extra_channels [ ch - channels ] + i * 256 ; dca_filter_channels ( s , i ) ; if ( ( s -> source_pcm_res & 1 ) && s -> xch_present ) { float * back_chan = s -> samples_chanptr [ s -> channel_order_tab [ s -> xch_base_channel ] ] ; float * lt_chan = s -> samples_chanptr [ s -> channel_order_tab [ s -> xch_base_channel - 2 ] ] ; float * rt_chan = s -> samples_chanptr [ s -> channel_order_tab [ s -> xch_base_channel - 1 ] ] ; s -> fdsp . vector_fmac_scalar ( lt_chan , back_chan , - M_SQRT1_2 , 256 ) ; s -> fdsp . vector_fmac_scalar ( rt_chan , back_chan , - M_SQRT1_2 , 256 ) ; } } lfe_samples = 2 * s -> lfe * ( s -> sample_blocks / 8 ) ; for ( i = 0 ; i < 2 * s -> lfe * 4 ; i ++ ) s -> lfe_data [ i ] = s -> lfe_data [ i + lfe_samples ] ; * got_frame_ptr = 1 ; return buf_size ; }
205
1
shmem_alloc_page(gfp_t gfp, struct shmem_inode_info *info, unsigned long idx) { struct vm_area_struct pvma; struct page *page; memset(&pvma, 0, sizeof(struct vm_area_struct)); pvma.vm_policy = mpol_shared_policy_lookup(&info->policy, idx); pvma.vm_pgoff = idx; pvma.vm_end = PAGE_SIZE; page = alloc_page_vma(gfp | __GFP_ZERO, &pvma, 0); mpol_free(pvma.vm_policy); return page; }
shmem_alloc_page(gfp_t gfp, struct shmem_inode_info *info, unsigned long idx) { struct vm_area_struct pvma; struct page *page; memset(&pvma, 0, sizeof(struct vm_area_struct)); pvma.vm_policy = mpol_shared_policy_lookup(&info->policy, idx); pvma.vm_pgoff = idx; pvma.vm_end = PAGE_SIZE; page = alloc_page_vma(gfp | __GFP_ZERO, &pvma, 0); mpol_free(pvma.vm_policy); return page; }
206
1
bool_t auth_gssapi_unwrap_data( OM_uint32 *major, OM_uint32 *minor, gss_ctx_id_t context, uint32_t seq_num, XDR *in_xdrs, bool_t (*xdr_func)(), caddr_t xdr_ptr) { gss_buffer_desc in_buf, out_buf; XDR temp_xdrs; uint32_t verf_seq_num; int conf, qop; unsigned int length; PRINTF(("gssapi_unwrap_data: starting\n")); *major = GSS_S_COMPLETE; *minor = 0; /* assumption */ in_buf.value = NULL; out_buf.value = NULL; if (! xdr_bytes(in_xdrs, (char **) &in_buf.value, &length, (unsigned int) -1)) { PRINTF(("gssapi_unwrap_data: deserializing encrypted data failed\n")); temp_xdrs.x_op = XDR_FREE; (void)xdr_bytes(&temp_xdrs, (char **) &in_buf.value, &length, (unsigned int) -1); return FALSE; } in_buf.length = length; *major = gss_unseal(minor, context, &in_buf, &out_buf, &conf, &qop); free(in_buf.value); if (*major != GSS_S_COMPLETE) return FALSE; PRINTF(("gssapi_unwrap_data: %llu bytes data, %llu bytes sealed\n", (unsigned long long)out_buf.length, (unsigned long long)in_buf.length)); xdrmem_create(&temp_xdrs, out_buf.value, out_buf.length, XDR_DECODE); /* deserialize the sequence number */ if (! xdr_u_int32(&temp_xdrs, &verf_seq_num)) { PRINTF(("gssapi_unwrap_data: deserializing verf_seq_num failed\n")); gss_release_buffer(minor, &out_buf); XDR_DESTROY(&temp_xdrs); return FALSE; } if (verf_seq_num != seq_num) { PRINTF(("gssapi_unwrap_data: seq %d specified, read %d\n", seq_num, verf_seq_num)); gss_release_buffer(minor, &out_buf); XDR_DESTROY(&temp_xdrs); return FALSE; } PRINTF(("gssapi_unwrap_data: unwrap seq_num %d okay\n", verf_seq_num)); /* deserialize the arguments into xdr_ptr */ if (! (*xdr_func)(&temp_xdrs, xdr_ptr)) { PRINTF(("gssapi_unwrap_data: deserializing arguments failed\n")); gss_release_buffer(minor, &out_buf); xdr_free(xdr_func, xdr_ptr); XDR_DESTROY(&temp_xdrs); return FALSE; } PRINTF(("gssapi_unwrap_data: succeeding\n\n")); gss_release_buffer(minor, &out_buf); XDR_DESTROY(&temp_xdrs); return TRUE; }
bool_t auth_gssapi_unwrap_data( OM_uint32 *major, OM_uint32 *minor, gss_ctx_id_t context, uint32_t seq_num, XDR *in_xdrs, bool_t (*xdr_func)(), caddr_t xdr_ptr) { gss_buffer_desc in_buf, out_buf; XDR temp_xdrs; uint32_t verf_seq_num; int conf, qop; unsigned int length; PRINTF(("gssapi_unwrap_data: starting\n")); *major = GSS_S_COMPLETE; *minor = 0; in_buf.value = NULL; out_buf.value = NULL; if (! xdr_bytes(in_xdrs, (char **) &in_buf.value, &length, (unsigned int) -1)) { PRINTF(("gssapi_unwrap_data: deserializing encrypted data failed\n")); temp_xdrs.x_op = XDR_FREE; (void)xdr_bytes(&temp_xdrs, (char **) &in_buf.value, &length, (unsigned int) -1); return FALSE; } in_buf.length = length; *major = gss_unseal(minor, context, &in_buf, &out_buf, &conf, &qop); free(in_buf.value); if (*major != GSS_S_COMPLETE) return FALSE; PRINTF(("gssapi_unwrap_data: %llu bytes data, %llu bytes sealed\n", (unsigned long long)out_buf.length, (unsigned long long)in_buf.length)); xdrmem_create(&temp_xdrs, out_buf.value, out_buf.length, XDR_DECODE); if (! xdr_u_int32(&temp_xdrs, &verf_seq_num)) { PRINTF(("gssapi_unwrap_data: deserializing verf_seq_num failed\n")); gss_release_buffer(minor, &out_buf); XDR_DESTROY(&temp_xdrs); return FALSE; } if (verf_seq_num != seq_num) { PRINTF(("gssapi_unwrap_data: seq %d specified, read %d\n", seq_num, verf_seq_num)); gss_release_buffer(minor, &out_buf); XDR_DESTROY(&temp_xdrs); return FALSE; } PRINTF(("gssapi_unwrap_data: unwrap seq_num %d okay\n", verf_seq_num)); if (! (*xdr_func)(&temp_xdrs, xdr_ptr)) { PRINTF(("gssapi_unwrap_data: deserializing arguments failed\n")); gss_release_buffer(minor, &out_buf); xdr_free(xdr_func, xdr_ptr); XDR_DESTROY(&temp_xdrs); return FALSE; } PRINTF(("gssapi_unwrap_data: succeeding\n\n")); gss_release_buffer(minor, &out_buf); XDR_DESTROY(&temp_xdrs); return TRUE; }
207
0
static int dca_exss_mask2count ( int mask ) { return av_popcount ( mask ) + av_popcount ( mask & ( DCA_EXSS_CENTER_LEFT_RIGHT | DCA_EXSS_FRONT_LEFT_RIGHT | DCA_EXSS_FRONT_HIGH_LEFT_RIGHT | DCA_EXSS_WIDE_LEFT_RIGHT | DCA_EXSS_SIDE_LEFT_RIGHT | DCA_EXSS_SIDE_HIGH_LEFT_RIGHT | DCA_EXSS_SIDE_REAR_LEFT_RIGHT | DCA_EXSS_REAR_LEFT_RIGHT | DCA_EXSS_REAR_HIGH_LEFT_RIGHT ) ) ; }
static int dca_exss_mask2count ( int mask ) { return av_popcount ( mask ) + av_popcount ( mask & ( DCA_EXSS_CENTER_LEFT_RIGHT | DCA_EXSS_FRONT_LEFT_RIGHT | DCA_EXSS_FRONT_HIGH_LEFT_RIGHT | DCA_EXSS_WIDE_LEFT_RIGHT | DCA_EXSS_SIDE_LEFT_RIGHT | DCA_EXSS_SIDE_HIGH_LEFT_RIGHT | DCA_EXSS_SIDE_REAR_LEFT_RIGHT | DCA_EXSS_REAR_LEFT_RIGHT | DCA_EXSS_REAR_HIGH_LEFT_RIGHT ) ) ; }
208
0
static int qcow2_read_extensions(BlockDriverState *bs, uint64_t start_offset, uint64_t end_offset, void **p_feature_table, int flags, Error **errp) { BDRVQcow2State *s = bs->opaque; QCowExtension ext; uint64_t offset; int ret; #ifdef DEBUG_EXT printf("qcow2_read_extensions: start=%ld end=%ld\n", start_offset, end_offset); #endif offset = start_offset; while (offset < end_offset) { #ifdef DEBUG_EXT /* Sanity check */ if (offset > s->cluster_size) printf("qcow2_read_extension: suspicious offset %lu\n", offset); printf("attempting to read extended header in offset %lu\n", offset); #endif ret = bdrv_pread(bs->file, offset, &ext, sizeof(ext)); if (ret < 0) { error_setg_errno(errp, -ret, "qcow2_read_extension: ERROR: " "pread fail from offset %" PRIu64, offset); return 1; } be32_to_cpus(&ext.magic); be32_to_cpus(&ext.len); offset += sizeof(ext); #ifdef DEBUG_EXT printf("ext.magic = 0x%x\n", ext.magic); #endif if (offset > end_offset || ext.len > end_offset - offset) { error_setg(errp, "Header extension too large"); return -EINVAL; } switch (ext.magic) { case QCOW2_EXT_MAGIC_END: return 0; case QCOW2_EXT_MAGIC_BACKING_FORMAT: if (ext.len >= sizeof(bs->backing_format)) { error_setg(errp, "ERROR: ext_backing_format: len=%" PRIu32 " too large (>=%zu)", ext.len, sizeof(bs->backing_format)); return 2; } ret = bdrv_pread(bs->file, offset, bs->backing_format, ext.len); if (ret < 0) { error_setg_errno(errp, -ret, "ERROR: ext_backing_format: " "Could not read format name"); return 3; } bs->backing_format[ext.len] = '\0'; s->image_backing_format = g_strdup(bs->backing_format); #ifdef DEBUG_EXT printf("Qcow2: Got format extension %s\n", bs->backing_format); #endif break; case QCOW2_EXT_MAGIC_FEATURE_TABLE: if (p_feature_table != NULL) { void* feature_table = g_malloc0(ext.len + 2 * sizeof(Qcow2Feature)); ret = bdrv_pread(bs->file, offset , feature_table, ext.len); if (ret < 0) { error_setg_errno(errp, -ret, "ERROR: ext_feature_table: " "Could not read table"); return ret; } *p_feature_table = feature_table; } break; case QCOW2_EXT_MAGIC_CRYPTO_HEADER: { unsigned int cflags = 0; if (s->crypt_method_header != QCOW_CRYPT_LUKS) { error_setg(errp, "CRYPTO header extension only " "expected with LUKS encryption method"); return -EINVAL; } if (ext.len != sizeof(Qcow2CryptoHeaderExtension)) { error_setg(errp, "CRYPTO header extension size %u, " "but expected size %zu", ext.len, sizeof(Qcow2CryptoHeaderExtension)); return -EINVAL; } ret = bdrv_pread(bs->file, offset, &s->crypto_header, ext.len); if (ret < 0) { error_setg_errno(errp, -ret, "Unable to read CRYPTO header extension"); return ret; } be64_to_cpus(&s->crypto_header.offset); be64_to_cpus(&s->crypto_header.length); if ((s->crypto_header.offset % s->cluster_size) != 0) { error_setg(errp, "Encryption header offset '%" PRIu64 "' is " "not a multiple of cluster size '%u'", s->crypto_header.offset, s->cluster_size); return -EINVAL; } if (flags & BDRV_O_NO_IO) { cflags |= QCRYPTO_BLOCK_OPEN_NO_IO; } s->crypto = qcrypto_block_open(s->crypto_opts, "encrypt.", qcow2_crypto_hdr_read_func, bs, cflags, errp); if (!s->crypto) { return -EINVAL; } } break; default: /* unknown magic - save it in case we need to rewrite the header */ { Qcow2UnknownHeaderExtension *uext; uext = g_malloc0(sizeof(*uext) + ext.len); uext->magic = ext.magic; uext->len = ext.len; QLIST_INSERT_HEAD(&s->unknown_header_ext, uext, next); ret = bdrv_pread(bs->file, offset , uext->data, uext->len); if (ret < 0) { error_setg_errno(errp, -ret, "ERROR: unknown extension: " "Could not read data"); return ret; } } break; } offset += ((ext.len + 7) & ~7); } return 0; }
static int qcow2_read_extensions(BlockDriverState *bs, uint64_t start_offset, uint64_t end_offset, void **p_feature_table, int flags, Error **errp) { BDRVQcow2State *s = bs->opaque; QCowExtension ext; uint64_t offset; int ret; #ifdef DEBUG_EXT printf("qcow2_read_extensions: start=%ld end=%ld\n", start_offset, end_offset); #endif offset = start_offset; while (offset < end_offset) { #ifdef DEBUG_EXT if (offset > s->cluster_size) printf("qcow2_read_extension: suspicious offset %lu\n", offset); printf("attempting to read extended header in offset %lu\n", offset); #endif ret = bdrv_pread(bs->file, offset, &ext, sizeof(ext)); if (ret < 0) { error_setg_errno(errp, -ret, "qcow2_read_extension: ERROR: " "pread fail from offset %" PRIu64, offset); return 1; } be32_to_cpus(&ext.magic); be32_to_cpus(&ext.len); offset += sizeof(ext); #ifdef DEBUG_EXT printf("ext.magic = 0x%x\n", ext.magic); #endif if (offset > end_offset || ext.len > end_offset - offset) { error_setg(errp, "Header extension too large"); return -EINVAL; } switch (ext.magic) { case QCOW2_EXT_MAGIC_END: return 0; case QCOW2_EXT_MAGIC_BACKING_FORMAT: if (ext.len >= sizeof(bs->backing_format)) { error_setg(errp, "ERROR: ext_backing_format: len=%" PRIu32 " too large (>=%zu)", ext.len, sizeof(bs->backing_format)); return 2; } ret = bdrv_pread(bs->file, offset, bs->backing_format, ext.len); if (ret < 0) { error_setg_errno(errp, -ret, "ERROR: ext_backing_format: " "Could not read format name"); return 3; } bs->backing_format[ext.len] = '\0'; s->image_backing_format = g_strdup(bs->backing_format); #ifdef DEBUG_EXT printf("Qcow2: Got format extension %s\n", bs->backing_format); #endif break; case QCOW2_EXT_MAGIC_FEATURE_TABLE: if (p_feature_table != NULL) { void* feature_table = g_malloc0(ext.len + 2 * sizeof(Qcow2Feature)); ret = bdrv_pread(bs->file, offset , feature_table, ext.len); if (ret < 0) { error_setg_errno(errp, -ret, "ERROR: ext_feature_table: " "Could not read table"); return ret; } *p_feature_table = feature_table; } break; case QCOW2_EXT_MAGIC_CRYPTO_HEADER: { unsigned int cflags = 0; if (s->crypt_method_header != QCOW_CRYPT_LUKS) { error_setg(errp, "CRYPTO header extension only " "expected with LUKS encryption method"); return -EINVAL; } if (ext.len != sizeof(Qcow2CryptoHeaderExtension)) { error_setg(errp, "CRYPTO header extension size %u, " "but expected size %zu", ext.len, sizeof(Qcow2CryptoHeaderExtension)); return -EINVAL; } ret = bdrv_pread(bs->file, offset, &s->crypto_header, ext.len); if (ret < 0) { error_setg_errno(errp, -ret, "Unable to read CRYPTO header extension"); return ret; } be64_to_cpus(&s->crypto_header.offset); be64_to_cpus(&s->crypto_header.length); if ((s->crypto_header.offset % s->cluster_size) != 0) { error_setg(errp, "Encryption header offset '%" PRIu64 "' is " "not a multiple of cluster size '%u'", s->crypto_header.offset, s->cluster_size); return -EINVAL; } if (flags & BDRV_O_NO_IO) { cflags |= QCRYPTO_BLOCK_OPEN_NO_IO; } s->crypto = qcrypto_block_open(s->crypto_opts, "encrypt.", qcow2_crypto_hdr_read_func, bs, cflags, errp); if (!s->crypto) { return -EINVAL; } } break; default: { Qcow2UnknownHeaderExtension *uext; uext = g_malloc0(sizeof(*uext) + ext.len); uext->magic = ext.magic; uext->len = ext.len; QLIST_INSERT_HEAD(&s->unknown_header_ext, uext, next); ret = bdrv_pread(bs->file, offset , uext->data, uext->len); if (ret < 0) { error_setg_errno(errp, -ret, "ERROR: unknown extension: " "Could not read data"); return ret; } } break; } offset += ((ext.len + 7) & ~7); } return 0; }
209
0
static void pdf_copy_gstate ( fz_context * ctx , pdf_gstate * gs , pdf_gstate * old ) { pdf_drop_gstate ( ctx , gs ) ; * gs = * old ; pdf_keep_gstate ( ctx , gs ) ; }
static void pdf_copy_gstate ( fz_context * ctx , pdf_gstate * gs , pdf_gstate * old ) { pdf_drop_gstate ( ctx , gs ) ; * gs = * old ; pdf_keep_gstate ( ctx , gs ) ; }
211
1
ProcPanoramiXShmCreatePixmap( register ClientPtr client) { ScreenPtr pScreen = NULL; PixmapPtr pMap = NULL; DrawablePtr pDraw; DepthPtr pDepth; int i, j, result, rc; ShmDescPtr shmdesc; REQUEST(xShmCreatePixmapReq); unsigned int width, height, depth; unsigned long size; PanoramiXRes *newPix; REQUEST_SIZE_MATCH(xShmCreatePixmapReq); client->errorValue = stuff->pid; if (!sharedPixmaps) return BadImplementation; LEGAL_NEW_RESOURCE(stuff->pid, client); rc = dixLookupDrawable(&pDraw, stuff->drawable, client, M_ANY, DixUnknownAccess); if (rc != Success) return rc; VERIFY_SHMPTR(stuff->shmseg, stuff->offset, TRUE, shmdesc, client); width = stuff->width; height = stuff->height; depth = stuff->depth; if (!width || !height || !depth) { client->errorValue = 0; return BadValue; } if (width > 32767 || height > 32767) return BadAlloc; if (stuff->depth != 1) { pDepth = pDraw->pScreen->allowedDepths; for (i=0; i<pDraw->pScreen->numDepths; i++, pDepth++) if (pDepth->depth == stuff->depth) goto CreatePmap; client->errorValue = stuff->depth; return BadValue; } CreatePmap: size = PixmapBytePad(width, depth) * height; if (sizeof(size) == 4 && BitsPerPixel(depth) > 8) { if (size < width * height) return BadAlloc; /* thankfully, offset is unsigned */ if (stuff->offset + size < size) return BadAlloc; } VERIFY_SHMSIZE(shmdesc, stuff->offset, size, client); if(!(newPix = (PanoramiXRes *) xalloc(sizeof(PanoramiXRes)))) return BadAlloc; newPix->type = XRT_PIXMAP; newPix->u.pix.shared = TRUE; newPix->info[0].id = stuff->pid; for(j = 1; j < PanoramiXNumScreens; j++) newPix->info[j].id = FakeClientID(client->index); result = (client->noClientException); FOR_NSCREENS(j) { pScreen = screenInfo.screens[j]; pMap = (*shmFuncs[j]->CreatePixmap)(pScreen, stuff->width, stuff->height, stuff->depth, shmdesc->addr + stuff->offset); if (pMap) { dixSetPrivate(&pMap->devPrivates, shmPixmapPrivate, shmdesc); shmdesc->refcnt++; pMap->drawable.serialNumber = NEXT_SERIAL_NUMBER; pMap->drawable.id = newPix->info[j].id; if (!AddResource(newPix->info[j].id, RT_PIXMAP, (pointer)pMap)) { (*pScreen->DestroyPixmap)(pMap); result = BadAlloc; break; } } else { result = BadAlloc; break; } } if(result == BadAlloc) { while(j--) { (*pScreen->DestroyPixmap)(pMap); FreeResource(newPix->info[j].id, RT_NONE); } xfree(newPix); } else AddResource(stuff->pid, XRT_PIXMAP, newPix); return result; }
ProcPanoramiXShmCreatePixmap( register ClientPtr client) { ScreenPtr pScreen = NULL; PixmapPtr pMap = NULL; DrawablePtr pDraw; DepthPtr pDepth; int i, j, result, rc; ShmDescPtr shmdesc; REQUEST(xShmCreatePixmapReq); unsigned int width, height, depth; unsigned long size; PanoramiXRes *newPix; REQUEST_SIZE_MATCH(xShmCreatePixmapReq); client->errorValue = stuff->pid; if (!sharedPixmaps) return BadImplementation; LEGAL_NEW_RESOURCE(stuff->pid, client); rc = dixLookupDrawable(&pDraw, stuff->drawable, client, M_ANY, DixUnknownAccess); if (rc != Success) return rc; VERIFY_SHMPTR(stuff->shmseg, stuff->offset, TRUE, shmdesc, client); width = stuff->width; height = stuff->height; depth = stuff->depth; if (!width || !height || !depth) { client->errorValue = 0; return BadValue; } if (width > 32767 || height > 32767) return BadAlloc; if (stuff->depth != 1) { pDepth = pDraw->pScreen->allowedDepths; for (i=0; i<pDraw->pScreen->numDepths; i++, pDepth++) if (pDepth->depth == stuff->depth) goto CreatePmap; client->errorValue = stuff->depth; return BadValue; } CreatePmap: size = PixmapBytePad(width, depth) * height; if (sizeof(size) == 4 && BitsPerPixel(depth) > 8) { if (size < width * height) return BadAlloc; if (stuff->offset + size < size) return BadAlloc; } VERIFY_SHMSIZE(shmdesc, stuff->offset, size, client); if(!(newPix = (PanoramiXRes *) xalloc(sizeof(PanoramiXRes)))) return BadAlloc; newPix->type = XRT_PIXMAP; newPix->u.pix.shared = TRUE; newPix->info[0].id = stuff->pid; for(j = 1; j < PanoramiXNumScreens; j++) newPix->info[j].id = FakeClientID(client->index); result = (client->noClientException); FOR_NSCREENS(j) { pScreen = screenInfo.screens[j]; pMap = (*shmFuncs[j]->CreatePixmap)(pScreen, stuff->width, stuff->height, stuff->depth, shmdesc->addr + stuff->offset); if (pMap) { dixSetPrivate(&pMap->devPrivates, shmPixmapPrivate, shmdesc); shmdesc->refcnt++; pMap->drawable.serialNumber = NEXT_SERIAL_NUMBER; pMap->drawable.id = newPix->info[j].id; if (!AddResource(newPix->info[j].id, RT_PIXMAP, (pointer)pMap)) { (*pScreen->DestroyPixmap)(pMap); result = BadAlloc; break; } } else { result = BadAlloc; break; } } if(result == BadAlloc) { while(j--) { (*pScreen->DestroyPixmap)(pMap); FreeResource(newPix->info[j].id, RT_NONE); } xfree(newPix); } else AddResource(stuff->pid, XRT_PIXMAP, newPix); return result; }
212
1
check_rpcsec_auth(struct svc_req *rqstp) { gss_ctx_id_t ctx; krb5_context kctx; OM_uint32 maj_stat, min_stat; gss_name_t name; krb5_principal princ; int ret, success; krb5_data *c1, *c2, *realm; gss_buffer_desc gss_str; kadm5_server_handle_t handle; size_t slen; char *sdots; success = 0; handle = (kadm5_server_handle_t)global_server_handle; if (rqstp->rq_cred.oa_flavor != RPCSEC_GSS) return 0; ctx = rqstp->rq_svccred; maj_stat = gss_inquire_context(&min_stat, ctx, NULL, &name, NULL, NULL, NULL, NULL, NULL); if (maj_stat != GSS_S_COMPLETE) { krb5_klog_syslog(LOG_ERR, _("check_rpcsec_auth: failed " "inquire_context, stat=%u"), maj_stat); log_badauth(maj_stat, min_stat, rqstp->rq_xprt, NULL); goto fail_name; } kctx = handle->context; ret = gss_to_krb5_name_1(rqstp, kctx, name, &princ, &gss_str); if (ret == 0) goto fail_name; slen = gss_str.length; trunc_name(&slen, &sdots); /* * Since we accept with GSS_C_NO_NAME, the client can authenticate * against the entire kdb. Therefore, ensure that the service * name is something reasonable. */ if (krb5_princ_size(kctx, princ) != 2) goto fail_princ; c1 = krb5_princ_component(kctx, princ, 0); c2 = krb5_princ_component(kctx, princ, 1); realm = krb5_princ_realm(kctx, princ); if (strncmp(handle->params.realm, realm->data, realm->length) == 0 && strncmp("kadmin", c1->data, c1->length) == 0) { if (strncmp("history", c2->data, c2->length) == 0) goto fail_princ; else success = 1; } fail_princ: if (!success) { krb5_klog_syslog(LOG_ERR, _("bad service principal %.*s%s"), (int) slen, (char *) gss_str.value, sdots); } gss_release_buffer(&min_stat, &gss_str); krb5_free_principal(kctx, princ); fail_name: gss_release_name(&min_stat, &name); return success; }
check_rpcsec_auth(struct svc_req *rqstp) { gss_ctx_id_t ctx; krb5_context kctx; OM_uint32 maj_stat, min_stat; gss_name_t name; krb5_principal princ; int ret, success; krb5_data *c1, *c2, *realm; gss_buffer_desc gss_str; kadm5_server_handle_t handle; size_t slen; char *sdots; success = 0; handle = (kadm5_server_handle_t)global_server_handle; if (rqstp->rq_cred.oa_flavor != RPCSEC_GSS) return 0; ctx = rqstp->rq_svccred; maj_stat = gss_inquire_context(&min_stat, ctx, NULL, &name, NULL, NULL, NULL, NULL, NULL); if (maj_stat != GSS_S_COMPLETE) { krb5_klog_syslog(LOG_ERR, _("check_rpcsec_auth: failed " "inquire_context, stat=%u"), maj_stat); log_badauth(maj_stat, min_stat, rqstp->rq_xprt, NULL); goto fail_name; } kctx = handle->context; ret = gss_to_krb5_name_1(rqstp, kctx, name, &princ, &gss_str); if (ret == 0) goto fail_name; slen = gss_str.length; trunc_name(&slen, &sdots); if (krb5_princ_size(kctx, princ) != 2) goto fail_princ; c1 = krb5_princ_component(kctx, princ, 0); c2 = krb5_princ_component(kctx, princ, 1); realm = krb5_princ_realm(kctx, princ); if (strncmp(handle->params.realm, realm->data, realm->length) == 0 && strncmp("kadmin", c1->data, c1->length) == 0) { if (strncmp("history", c2->data, c2->length) == 0) goto fail_princ; else success = 1; } fail_princ: if (!success) { krb5_klog_syslog(LOG_ERR, _("bad service principal %.*s%s"), (int) slen, (char *) gss_str.value, sdots); } gss_release_buffer(&min_stat, &gss_str); krb5_free_principal(kctx, princ); fail_name: gss_release_name(&min_stat, &name); return success; }
213
0
static void listener_add_address_space(MemoryListener *listener, AddressSpace *as) { FlatView *view; FlatRange *fr; if (listener->address_space_filter && listener->address_space_filter != as) { return; } if (global_dirty_log) { if (listener->log_global_start) { listener->log_global_start(listener); } } view = as->current_map; FOR_EACH_FLAT_RANGE(fr, view) { MemoryRegionSection section = { .mr = fr->mr, .address_space = as, .offset_within_region = fr->offset_in_region, .size = fr->addr.size, .offset_within_address_space = int128_get64(fr->addr.start), .readonly = fr->readonly, }; if (listener->region_add) { listener->region_add(listener, &section); } } }
static void listener_add_address_space(MemoryListener *listener, AddressSpace *as) { FlatView *view; FlatRange *fr; if (listener->address_space_filter && listener->address_space_filter != as) { return; } if (global_dirty_log) { if (listener->log_global_start) { listener->log_global_start(listener); } } view = as->current_map; FOR_EACH_FLAT_RANGE(fr, view) { MemoryRegionSection section = { .mr = fr->mr, .address_space = as, .offset_within_region = fr->offset_in_region, .size = fr->addr.size, .offset_within_address_space = int128_get64(fr->addr.start), .readonly = fr->readonly, }; if (listener->region_add) { listener->region_add(listener, &section); } } }
214
0
long do_sigreturn(CPUM68KState *env) { struct target_sigframe *frame; abi_ulong frame_addr = env->aregs[7] - 4; target_sigset_t target_set; sigset_t set; int d0, i; if (!lock_user_struct(VERIFY_READ, frame, frame_addr, 1)) goto badframe; /* set blocked signals */ if (__get_user(target_set.sig[0], &frame->sc.sc_mask)) goto badframe; for(i = 1; i < TARGET_NSIG_WORDS; i++) { if (__get_user(target_set.sig[i], &frame->extramask[i - 1])) goto badframe; } target_to_host_sigset_internal(&set, &target_set); sigprocmask(SIG_SETMASK, &set, NULL); /* restore registers */ if (restore_sigcontext(env, &frame->sc, &d0)) goto badframe; unlock_user_struct(frame, frame_addr, 0); return d0; badframe: unlock_user_struct(frame, frame_addr, 0); force_sig(TARGET_SIGSEGV); return 0; }
long do_sigreturn(CPUM68KState *env) { struct target_sigframe *frame; abi_ulong frame_addr = env->aregs[7] - 4; target_sigset_t target_set; sigset_t set; int d0, i; if (!lock_user_struct(VERIFY_READ, frame, frame_addr, 1)) goto badframe; if (__get_user(target_set.sig[0], &frame->sc.sc_mask)) goto badframe; for(i = 1; i < TARGET_NSIG_WORDS; i++) { if (__get_user(target_set.sig[i], &frame->extramask[i - 1])) goto badframe; } target_to_host_sigset_internal(&set, &target_set); sigprocmask(SIG_SETMASK, &set, NULL); if (restore_sigcontext(env, &frame->sc, &d0)) goto badframe; unlock_user_struct(frame, frame_addr, 0); return d0; badframe: unlock_user_struct(frame, frame_addr, 0); force_sig(TARGET_SIGSEGV); return 0; }
216
1
static int load_state_from_tss16(struct x86_emulate_ctxt *ctxt, struct tss_segment_16 *tss) { int ret; u8 cpl; ctxt->_eip = tss->ip; ctxt->eflags = tss->flag | 2; *reg_write(ctxt, VCPU_REGS_RAX) = tss->ax; *reg_write(ctxt, VCPU_REGS_RCX) = tss->cx; *reg_write(ctxt, VCPU_REGS_RDX) = tss->dx; *reg_write(ctxt, VCPU_REGS_RBX) = tss->bx; *reg_write(ctxt, VCPU_REGS_RSP) = tss->sp; *reg_write(ctxt, VCPU_REGS_RBP) = tss->bp; *reg_write(ctxt, VCPU_REGS_RSI) = tss->si; *reg_write(ctxt, VCPU_REGS_RDI) = tss->di; /* * SDM says that segment selectors are loaded before segment * descriptors */ set_segment_selector(ctxt, tss->ldt, VCPU_SREG_LDTR); set_segment_selector(ctxt, tss->es, VCPU_SREG_ES); set_segment_selector(ctxt, tss->cs, VCPU_SREG_CS); set_segment_selector(ctxt, tss->ss, VCPU_SREG_SS); set_segment_selector(ctxt, tss->ds, VCPU_SREG_DS); cpl = tss->cs & 3; /* * Now load segment descriptors. If fault happens at this stage * it is handled in a context of new task */ ret = __load_segment_descriptor(ctxt, tss->ldt, VCPU_SREG_LDTR, cpl, true); if (ret != X86EMUL_CONTINUE) return ret; ret = __load_segment_descriptor(ctxt, tss->es, VCPU_SREG_ES, cpl, true); if (ret != X86EMUL_CONTINUE) return ret; ret = __load_segment_descriptor(ctxt, tss->cs, VCPU_SREG_CS, cpl, true); if (ret != X86EMUL_CONTINUE) return ret; ret = __load_segment_descriptor(ctxt, tss->ss, VCPU_SREG_SS, cpl, true); if (ret != X86EMUL_CONTINUE) return ret; ret = __load_segment_descriptor(ctxt, tss->ds, VCPU_SREG_DS, cpl, true); if (ret != X86EMUL_CONTINUE) return ret; return X86EMUL_CONTINUE; }
static int load_state_from_tss16(struct x86_emulate_ctxt *ctxt, struct tss_segment_16 *tss) { int ret; u8 cpl; ctxt->_eip = tss->ip; ctxt->eflags = tss->flag | 2; *reg_write(ctxt, VCPU_REGS_RAX) = tss->ax; *reg_write(ctxt, VCPU_REGS_RCX) = tss->cx; *reg_write(ctxt, VCPU_REGS_RDX) = tss->dx; *reg_write(ctxt, VCPU_REGS_RBX) = tss->bx; *reg_write(ctxt, VCPU_REGS_RSP) = tss->sp; *reg_write(ctxt, VCPU_REGS_RBP) = tss->bp; *reg_write(ctxt, VCPU_REGS_RSI) = tss->si; *reg_write(ctxt, VCPU_REGS_RDI) = tss->di; set_segment_selector(ctxt, tss->ldt, VCPU_SREG_LDTR); set_segment_selector(ctxt, tss->es, VCPU_SREG_ES); set_segment_selector(ctxt, tss->cs, VCPU_SREG_CS); set_segment_selector(ctxt, tss->ss, VCPU_SREG_SS); set_segment_selector(ctxt, tss->ds, VCPU_SREG_DS); cpl = tss->cs & 3; ret = __load_segment_descriptor(ctxt, tss->ldt, VCPU_SREG_LDTR, cpl, true); if (ret != X86EMUL_CONTINUE) return ret; ret = __load_segment_descriptor(ctxt, tss->es, VCPU_SREG_ES, cpl, true); if (ret != X86EMUL_CONTINUE) return ret; ret = __load_segment_descriptor(ctxt, tss->cs, VCPU_SREG_CS, cpl, true); if (ret != X86EMUL_CONTINUE) return ret; ret = __load_segment_descriptor(ctxt, tss->ss, VCPU_SREG_SS, cpl, true); if (ret != X86EMUL_CONTINUE) return ret; ret = __load_segment_descriptor(ctxt, tss->ds, VCPU_SREG_DS, cpl, true); if (ret != X86EMUL_CONTINUE) return ret; return X86EMUL_CONTINUE; }
218
1
gssrpc__svcauth_gss(struct svc_req *rqst, struct rpc_msg *msg, bool_t *no_dispatch) { enum auth_stat retstat; XDR xdrs; SVCAUTH *auth; struct svc_rpc_gss_data *gd; struct rpc_gss_cred *gc; struct rpc_gss_init_res gr; int call_stat, offset; OM_uint32 min_stat; log_debug("in svcauth_gss()"); /* Initialize reply. */ rqst->rq_xprt->xp_verf = gssrpc__null_auth; /* Allocate and set up server auth handle. */ if (rqst->rq_xprt->xp_auth == NULL || rqst->rq_xprt->xp_auth == &svc_auth_none) { if ((auth = calloc(sizeof(*auth), 1)) == NULL) { fprintf(stderr, "svcauth_gss: out_of_memory\n"); return (AUTH_FAILED); } if ((gd = calloc(sizeof(*gd), 1)) == NULL) { fprintf(stderr, "svcauth_gss: out_of_memory\n"); return (AUTH_FAILED); } auth->svc_ah_ops = &svc_auth_gss_ops; SVCAUTH_PRIVATE(auth) = gd; rqst->rq_xprt->xp_auth = auth; } else gd = SVCAUTH_PRIVATE(rqst->rq_xprt->xp_auth); log_debug("xp_auth=%p, gd=%p", rqst->rq_xprt->xp_auth, gd); /* Deserialize client credentials. */ if (rqst->rq_cred.oa_length <= 0) return (AUTH_BADCRED); gc = (struct rpc_gss_cred *)rqst->rq_clntcred; memset(gc, 0, sizeof(*gc)); log_debug("calling xdrmem_create()"); log_debug("oa_base=%p, oa_length=%u", rqst->rq_cred.oa_base, rqst->rq_cred.oa_length); xdrmem_create(&xdrs, rqst->rq_cred.oa_base, rqst->rq_cred.oa_length, XDR_DECODE); log_debug("xdrmem_create() returned"); if (!xdr_rpc_gss_cred(&xdrs, gc)) { log_debug("xdr_rpc_gss_cred() failed"); XDR_DESTROY(&xdrs); return (AUTH_BADCRED); } XDR_DESTROY(&xdrs); retstat = AUTH_FAILED; #define ret_freegc(code) do { retstat = code; goto freegc; } while (0) /* Check version. */ if (gc->gc_v != RPCSEC_GSS_VERSION) ret_freegc (AUTH_BADCRED); /* Check RPCSEC_GSS service. */ if (gc->gc_svc != RPCSEC_GSS_SVC_NONE && gc->gc_svc != RPCSEC_GSS_SVC_INTEGRITY && gc->gc_svc != RPCSEC_GSS_SVC_PRIVACY) ret_freegc (AUTH_BADCRED); /* Check sequence number. */ if (gd->established) { if (gc->gc_seq > MAXSEQ) ret_freegc (RPCSEC_GSS_CTXPROBLEM); if ((offset = gd->seqlast - gc->gc_seq) < 0) { gd->seqlast = gc->gc_seq; offset = 0 - offset; gd->seqmask <<= offset; offset = 0; } else if ((u_int)offset >= gd->win || (gd->seqmask & (1 << offset))) { *no_dispatch = 1; ret_freegc (RPCSEC_GSS_CTXPROBLEM); } gd->seq = gc->gc_seq; gd->seqmask |= (1 << offset); } if (gd->established) { rqst->rq_clntname = (char *)gd->client_name; rqst->rq_svccred = (char *)gd->ctx; } /* Handle RPCSEC_GSS control procedure. */ switch (gc->gc_proc) { case RPCSEC_GSS_INIT: case RPCSEC_GSS_CONTINUE_INIT: if (rqst->rq_proc != NULLPROC) ret_freegc (AUTH_FAILED); /* XXX ? */ if (!svcauth_gss_acquire_cred()) ret_freegc (AUTH_FAILED); if (!svcauth_gss_accept_sec_context(rqst, &gr)) ret_freegc (AUTH_REJECTEDCRED); if (!svcauth_gss_nextverf(rqst, htonl(gr.gr_win))) { gss_release_buffer(&min_stat, &gr.gr_token); mem_free(gr.gr_ctx.value, sizeof(gss_union_ctx_id_desc)); ret_freegc (AUTH_FAILED); } *no_dispatch = TRUE; call_stat = svc_sendreply(rqst->rq_xprt, xdr_rpc_gss_init_res, (caddr_t)&gr); gss_release_buffer(&min_stat, &gr.gr_token); gss_release_buffer(&min_stat, &gd->checksum); mem_free(gr.gr_ctx.value, sizeof(gss_union_ctx_id_desc)); if (!call_stat) ret_freegc (AUTH_FAILED); if (gr.gr_major == GSS_S_COMPLETE) gd->established = TRUE; break; case RPCSEC_GSS_DATA: if (!svcauth_gss_validate(rqst, gd, msg)) ret_freegc (RPCSEC_GSS_CREDPROBLEM); if (!svcauth_gss_nextverf(rqst, htonl(gc->gc_seq))) ret_freegc (AUTH_FAILED); break; case RPCSEC_GSS_DESTROY: if (rqst->rq_proc != NULLPROC) ret_freegc (AUTH_FAILED); /* XXX ? */ if (!svcauth_gss_validate(rqst, gd, msg)) ret_freegc (RPCSEC_GSS_CREDPROBLEM); if (!svcauth_gss_nextverf(rqst, htonl(gc->gc_seq))) ret_freegc (AUTH_FAILED); *no_dispatch = TRUE; call_stat = svc_sendreply(rqst->rq_xprt, xdr_void, (caddr_t)NULL); log_debug("sendreply in destroy: %d", call_stat); if (!svcauth_gss_release_cred()) ret_freegc (AUTH_FAILED); SVCAUTH_DESTROY(rqst->rq_xprt->xp_auth); rqst->rq_xprt->xp_auth = &svc_auth_none; break; default: ret_freegc (AUTH_REJECTEDCRED); break; } retstat = AUTH_OK; freegc: xdr_free(xdr_rpc_gss_cred, gc); log_debug("returning %d from svcauth_gss()", retstat); return (retstat); }
gssrpc__svcauth_gss(struct svc_req *rqst, struct rpc_msg *msg, bool_t *no_dispatch) { enum auth_stat retstat; XDR xdrs; SVCAUTH *auth; struct svc_rpc_gss_data *gd; struct rpc_gss_cred *gc; struct rpc_gss_init_res gr; int call_stat, offset; OM_uint32 min_stat; log_debug("in svcauth_gss()"); rqst->rq_xprt->xp_verf = gssrpc__null_auth; if (rqst->rq_xprt->xp_auth == NULL || rqst->rq_xprt->xp_auth == &svc_auth_none) { if ((auth = calloc(sizeof(*auth), 1)) == NULL) { fprintf(stderr, "svcauth_gss: out_of_memory\n"); return (AUTH_FAILED); } if ((gd = calloc(sizeof(*gd), 1)) == NULL) { fprintf(stderr, "svcauth_gss: out_of_memory\n"); return (AUTH_FAILED); } auth->svc_ah_ops = &svc_auth_gss_ops; SVCAUTH_PRIVATE(auth) = gd; rqst->rq_xprt->xp_auth = auth; } else gd = SVCAUTH_PRIVATE(rqst->rq_xprt->xp_auth); log_debug("xp_auth=%p, gd=%p", rqst->rq_xprt->xp_auth, gd); if (rqst->rq_cred.oa_length <= 0) return (AUTH_BADCRED); gc = (struct rpc_gss_cred *)rqst->rq_clntcred; memset(gc, 0, sizeof(*gc)); log_debug("calling xdrmem_create()"); log_debug("oa_base=%p, oa_length=%u", rqst->rq_cred.oa_base, rqst->rq_cred.oa_length); xdrmem_create(&xdrs, rqst->rq_cred.oa_base, rqst->rq_cred.oa_length, XDR_DECODE); log_debug("xdrmem_create() returned"); if (!xdr_rpc_gss_cred(&xdrs, gc)) { log_debug("xdr_rpc_gss_cred() failed"); XDR_DESTROY(&xdrs); return (AUTH_BADCRED); } XDR_DESTROY(&xdrs); retstat = AUTH_FAILED; #define ret_freegc(code) do { retstat = code; goto freegc; } while (0) if (gc->gc_v != RPCSEC_GSS_VERSION) ret_freegc (AUTH_BADCRED); if (gc->gc_svc != RPCSEC_GSS_SVC_NONE && gc->gc_svc != RPCSEC_GSS_SVC_INTEGRITY && gc->gc_svc != RPCSEC_GSS_SVC_PRIVACY) ret_freegc (AUTH_BADCRED); if (gd->established) { if (gc->gc_seq > MAXSEQ) ret_freegc (RPCSEC_GSS_CTXPROBLEM); if ((offset = gd->seqlast - gc->gc_seq) < 0) { gd->seqlast = gc->gc_seq; offset = 0 - offset; gd->seqmask <<= offset; offset = 0; } else if ((u_int)offset >= gd->win || (gd->seqmask & (1 << offset))) { *no_dispatch = 1; ret_freegc (RPCSEC_GSS_CTXPROBLEM); } gd->seq = gc->gc_seq; gd->seqmask |= (1 << offset); } if (gd->established) { rqst->rq_clntname = (char *)gd->client_name; rqst->rq_svccred = (char *)gd->ctx; } switch (gc->gc_proc) { case RPCSEC_GSS_INIT: case RPCSEC_GSS_CONTINUE_INIT: if (rqst->rq_proc != NULLPROC) ret_freegc (AUTH_FAILED); if (!svcauth_gss_acquire_cred()) ret_freegc (AUTH_FAILED); if (!svcauth_gss_accept_sec_context(rqst, &gr)) ret_freegc (AUTH_REJECTEDCRED); if (!svcauth_gss_nextverf(rqst, htonl(gr.gr_win))) { gss_release_buffer(&min_stat, &gr.gr_token); mem_free(gr.gr_ctx.value, sizeof(gss_union_ctx_id_desc)); ret_freegc (AUTH_FAILED); } *no_dispatch = TRUE; call_stat = svc_sendreply(rqst->rq_xprt, xdr_rpc_gss_init_res, (caddr_t)&gr); gss_release_buffer(&min_stat, &gr.gr_token); gss_release_buffer(&min_stat, &gd->checksum); mem_free(gr.gr_ctx.value, sizeof(gss_union_ctx_id_desc)); if (!call_stat) ret_freegc (AUTH_FAILED); if (gr.gr_major == GSS_S_COMPLETE) gd->established = TRUE; break; case RPCSEC_GSS_DATA: if (!svcauth_gss_validate(rqst, gd, msg)) ret_freegc (RPCSEC_GSS_CREDPROBLEM); if (!svcauth_gss_nextverf(rqst, htonl(gc->gc_seq))) ret_freegc (AUTH_FAILED); break; case RPCSEC_GSS_DESTROY: if (rqst->rq_proc != NULLPROC) ret_freegc (AUTH_FAILED); if (!svcauth_gss_validate(rqst, gd, msg)) ret_freegc (RPCSEC_GSS_CREDPROBLEM); if (!svcauth_gss_nextverf(rqst, htonl(gc->gc_seq))) ret_freegc (AUTH_FAILED); *no_dispatch = TRUE; call_stat = svc_sendreply(rqst->rq_xprt, xdr_void, (caddr_t)NULL); log_debug("sendreply in destroy: %d", call_stat); if (!svcauth_gss_release_cred()) ret_freegc (AUTH_FAILED); SVCAUTH_DESTROY(rqst->rq_xprt->xp_auth); rqst->rq_xprt->xp_auth = &svc_auth_none; break; default: ret_freegc (AUTH_REJECTEDCRED); break; } retstat = AUTH_OK; freegc: xdr_free(xdr_rpc_gss_cred, gc); log_debug("returning %d from svcauth_gss()", retstat); return (retstat); }
219
1
static long vmsplice_to_user(struct file *file, const struct iovec __user *iov, unsigned long nr_segs, unsigned int flags) { struct pipe_inode_info *pipe; struct splice_desc sd; ssize_t size; int error; long ret; pipe = pipe_info(file->f_path.dentry->d_inode); if (!pipe) return -EBADF; if (pipe->inode) mutex_lock(&pipe->inode->i_mutex); error = ret = 0; while (nr_segs) { void __user *base; size_t len; /* * Get user address base and length for this iovec. */ error = get_user(base, &iov->iov_base); if (unlikely(error)) break; error = get_user(len, &iov->iov_len); if (unlikely(error)) break; /* * Sanity check this iovec. 0 read succeeds. */ if (unlikely(!len)) break; if (unlikely(!base)) { error = -EFAULT; break; } sd.len = 0; sd.total_len = len; sd.flags = flags; sd.u.userptr = base; sd.pos = 0; size = __splice_from_pipe(pipe, &sd, pipe_to_user); if (size < 0) { if (!ret) ret = size; break; } ret += size; if (size < len) break; nr_segs--; iov++; } if (pipe->inode) mutex_unlock(&pipe->inode->i_mutex); if (!ret) ret = error; return ret; }
static long vmsplice_to_user(struct file *file, const struct iovec __user *iov, unsigned long nr_segs, unsigned int flags) { struct pipe_inode_info *pipe; struct splice_desc sd; ssize_t size; int error; long ret; pipe = pipe_info(file->f_path.dentry->d_inode); if (!pipe) return -EBADF; if (pipe->inode) mutex_lock(&pipe->inode->i_mutex); error = ret = 0; while (nr_segs) { void __user *base; size_t len; error = get_user(base, &iov->iov_base); if (unlikely(error)) break; error = get_user(len, &iov->iov_len); if (unlikely(error)) break; if (unlikely(!len)) break; if (unlikely(!base)) { error = -EFAULT; break; } sd.len = 0; sd.total_len = len; sd.flags = flags; sd.u.userptr = base; sd.pos = 0; size = __splice_from_pipe(pipe, &sd, pipe_to_user); if (size < 0) { if (!ret) ret = size; break; } ret += size; if (size < len) break; nr_segs--; iov++; } if (pipe->inode) mutex_unlock(&pipe->inode->i_mutex); if (!ret) ret = error; return ret; }
220
0
static int tcg_match_cmpi(TCGType type, tcg_target_long val) { if (facilities & FACILITY_EXT_IMM) { /* The COMPARE IMMEDIATE instruction is available. */ if (type == TCG_TYPE_I32) { /* We have a 32-bit immediate and can compare against anything. */ return 1; } else { /* ??? We have no insight here into whether the comparison is signed or unsigned. The COMPARE IMMEDIATE insn uses a 32-bit signed immediate, and the COMPARE LOGICAL IMMEDIATE insn uses a 32-bit unsigned immediate. If we were to use the (semi) obvious "val == (int32_t)val" we would be enabling unsigned comparisons vs very large numbers. The only solution is to take the intersection of the ranges. */ /* ??? Another possible solution is to simply lie and allow all constants here and force the out-of-range values into a temp register in tgen_cmp when we have knowledge of the actual comparison code in use. */ return val >= 0 && val <= 0x7fffffff; } } else { /* Only the LOAD AND TEST instruction is available. */ return val == 0; } }
static int tcg_match_cmpi(TCGType type, tcg_target_long val) { if (facilities & FACILITY_EXT_IMM) { if (type == TCG_TYPE_I32) { return 1; } else { return val >= 0 && val <= 0x7fffffff; } } else { return val == 0; } }
222
1
static int get_iovec_page_array(const struct iovec __user *iov, unsigned int nr_vecs, struct page **pages, struct partial_page *partial, int aligned) { int buffers = 0, error = 0; down_read(&current->mm->mmap_sem); while (nr_vecs) { unsigned long off, npages; struct iovec entry; void __user *base; size_t len; int i; error = -EFAULT; if (copy_from_user_mmap_sem(&entry, iov, sizeof(entry))) break; base = entry.iov_base; len = entry.iov_len; /* * Sanity check this iovec. 0 read succeeds. */ error = 0; if (unlikely(!len)) break; error = -EFAULT; if (unlikely(!base)) break; /* * Get this base offset and number of pages, then map * in the user pages. */ off = (unsigned long) base & ~PAGE_MASK; /* * If asked for alignment, the offset must be zero and the * length a multiple of the PAGE_SIZE. */ error = -EINVAL; if (aligned && (off || len & ~PAGE_MASK)) break; npages = (off + len + PAGE_SIZE - 1) >> PAGE_SHIFT; if (npages > PIPE_BUFFERS - buffers) npages = PIPE_BUFFERS - buffers; error = get_user_pages(current, current->mm, (unsigned long) base, npages, 0, 0, &pages[buffers], NULL); if (unlikely(error <= 0)) break; /* * Fill this contiguous range into the partial page map. */ for (i = 0; i < error; i++) { const int plen = min_t(size_t, len, PAGE_SIZE - off); partial[buffers].offset = off; partial[buffers].len = plen; off = 0; len -= plen; buffers++; } /* * We didn't complete this iov, stop here since it probably * means we have to move some of this into a pipe to * be able to continue. */ if (len) break; /* * Don't continue if we mapped fewer pages than we asked for, * or if we mapped the max number of pages that we have * room for. */ if (error < npages || buffers == PIPE_BUFFERS) break; nr_vecs--; iov++; } up_read(&current->mm->mmap_sem); if (buffers) return buffers; return error; }
static int get_iovec_page_array(const struct iovec __user *iov, unsigned int nr_vecs, struct page **pages, struct partial_page *partial, int aligned) { int buffers = 0, error = 0; down_read(&current->mm->mmap_sem); while (nr_vecs) { unsigned long off, npages; struct iovec entry; void __user *base; size_t len; int i; error = -EFAULT; if (copy_from_user_mmap_sem(&entry, iov, sizeof(entry))) break; base = entry.iov_base; len = entry.iov_len; error = 0; if (unlikely(!len)) break; error = -EFAULT; if (unlikely(!base)) break; off = (unsigned long) base & ~PAGE_MASK; error = -EINVAL; if (aligned && (off || len & ~PAGE_MASK)) break; npages = (off + len + PAGE_SIZE - 1) >> PAGE_SHIFT; if (npages > PIPE_BUFFERS - buffers) npages = PIPE_BUFFERS - buffers; error = get_user_pages(current, current->mm, (unsigned long) base, npages, 0, 0, &pages[buffers], NULL); if (unlikely(error <= 0)) break; for (i = 0; i < error; i++) { const int plen = min_t(size_t, len, PAGE_SIZE - off); partial[buffers].offset = off; partial[buffers].len = plen; off = 0; len -= plen; buffers++; } if (len) break; if (error < npages || buffers == PIPE_BUFFERS) break; nr_vecs--; iov++; } up_read(&current->mm->mmap_sem); if (buffers) return buffers; return error; }
223
1
svcauth_gss_accept_sec_context(struct svc_req *rqst, struct rpc_gss_init_res *gr) { struct svc_rpc_gss_data *gd; struct rpc_gss_cred *gc; gss_buffer_desc recv_tok, seqbuf; gss_OID mech; OM_uint32 maj_stat = 0, min_stat = 0, ret_flags, seq; log_debug("in svcauth_gss_accept_context()"); gd = SVCAUTH_PRIVATE(rqst->rq_xprt->xp_auth); gc = (struct rpc_gss_cred *)rqst->rq_clntcred; memset(gr, 0, sizeof(*gr)); /* Deserialize arguments. */ memset(&recv_tok, 0, sizeof(recv_tok)); if (!svc_getargs(rqst->rq_xprt, xdr_rpc_gss_init_args, (caddr_t)&recv_tok)) return (FALSE); gr->gr_major = gss_accept_sec_context(&gr->gr_minor, &gd->ctx, svcauth_gss_creds, &recv_tok, GSS_C_NO_CHANNEL_BINDINGS, &gd->client_name, &mech, &gr->gr_token, &ret_flags, NULL, NULL); svc_freeargs(rqst->rq_xprt, xdr_rpc_gss_init_args, (caddr_t)&recv_tok); log_status("accept_sec_context", gr->gr_major, gr->gr_minor); if (gr->gr_major != GSS_S_COMPLETE && gr->gr_major != GSS_S_CONTINUE_NEEDED) { badauth(gr->gr_major, gr->gr_minor, rqst->rq_xprt); gd->ctx = GSS_C_NO_CONTEXT; goto errout; } /* * ANDROS: krb5 mechglue returns ctx of size 8 - two pointers, * one to the mechanism oid, one to the internal_ctx_id */ if ((gr->gr_ctx.value = mem_alloc(sizeof(gss_union_ctx_id_desc))) == NULL) { fprintf(stderr, "svcauth_gss_accept_context: out of memory\n"); goto errout; } memcpy(gr->gr_ctx.value, gd->ctx, sizeof(gss_union_ctx_id_desc)); gr->gr_ctx.length = sizeof(gss_union_ctx_id_desc); /* gr->gr_win = 0x00000005; ANDROS: for debugging linux kernel version... */ gr->gr_win = sizeof(gd->seqmask) * 8; /* Save client info. */ gd->sec.mech = mech; gd->sec.qop = GSS_C_QOP_DEFAULT; gd->sec.svc = gc->gc_svc; gd->seq = gc->gc_seq; gd->win = gr->gr_win; if (gr->gr_major == GSS_S_COMPLETE) { #ifdef SPKM /* spkm3: no src_name (anonymous) */ if(!g_OID_equal(gss_mech_spkm3, mech)) { #endif maj_stat = gss_display_name(&min_stat, gd->client_name, &gd->cname, &gd->sec.mech); #ifdef SPKM } #endif if (maj_stat != GSS_S_COMPLETE) { log_status("display_name", maj_stat, min_stat); goto errout; } #ifdef DEBUG #ifdef HAVE_HEIMDAL log_debug("accepted context for %.*s with " "<mech {}, qop %d, svc %d>", gd->cname.length, (char *)gd->cname.value, gd->sec.qop, gd->sec.svc); #else { gss_buffer_desc mechname; gss_oid_to_str(&min_stat, mech, &mechname); log_debug("accepted context for %.*s with " "<mech %.*s, qop %d, svc %d>", gd->cname.length, (char *)gd->cname.value, mechname.length, (char *)mechname.value, gd->sec.qop, gd->sec.svc); gss_release_buffer(&min_stat, &mechname); } #endif #endif /* DEBUG */ seq = htonl(gr->gr_win); seqbuf.value = &seq; seqbuf.length = sizeof(seq); gss_release_buffer(&min_stat, &gd->checksum); maj_stat = gss_sign(&min_stat, gd->ctx, GSS_C_QOP_DEFAULT, &seqbuf, &gd->checksum); if (maj_stat != GSS_S_COMPLETE) { goto errout; } rqst->rq_xprt->xp_verf.oa_flavor = RPCSEC_GSS; rqst->rq_xprt->xp_verf.oa_base = gd->checksum.value; rqst->rq_xprt->xp_verf.oa_length = gd->checksum.length; } return (TRUE); errout: gss_release_buffer(&min_stat, &gr->gr_token); return (FALSE); }
svcauth_gss_accept_sec_context(struct svc_req *rqst, struct rpc_gss_init_res *gr) { struct svc_rpc_gss_data *gd; struct rpc_gss_cred *gc; gss_buffer_desc recv_tok, seqbuf; gss_OID mech; OM_uint32 maj_stat = 0, min_stat = 0, ret_flags, seq; log_debug("in svcauth_gss_accept_context()"); gd = SVCAUTH_PRIVATE(rqst->rq_xprt->xp_auth); gc = (struct rpc_gss_cred *)rqst->rq_clntcred; memset(gr, 0, sizeof(*gr)); memset(&recv_tok, 0, sizeof(recv_tok)); if (!svc_getargs(rqst->rq_xprt, xdr_rpc_gss_init_args, (caddr_t)&recv_tok)) return (FALSE); gr->gr_major = gss_accept_sec_context(&gr->gr_minor, &gd->ctx, svcauth_gss_creds, &recv_tok, GSS_C_NO_CHANNEL_BINDINGS, &gd->client_name, &mech, &gr->gr_token, &ret_flags, NULL, NULL); svc_freeargs(rqst->rq_xprt, xdr_rpc_gss_init_args, (caddr_t)&recv_tok); log_status("accept_sec_context", gr->gr_major, gr->gr_minor); if (gr->gr_major != GSS_S_COMPLETE && gr->gr_major != GSS_S_CONTINUE_NEEDED) { badauth(gr->gr_major, gr->gr_minor, rqst->rq_xprt); gd->ctx = GSS_C_NO_CONTEXT; goto errout; } if ((gr->gr_ctx.value = mem_alloc(sizeof(gss_union_ctx_id_desc))) == NULL) { fprintf(stderr, "svcauth_gss_accept_context: out of memory\n"); goto errout; } memcpy(gr->gr_ctx.value, gd->ctx, sizeof(gss_union_ctx_id_desc)); gr->gr_ctx.length = sizeof(gss_union_ctx_id_desc); gr->gr_win = sizeof(gd->seqmask) * 8; gd->sec.mech = mech; gd->sec.qop = GSS_C_QOP_DEFAULT; gd->sec.svc = gc->gc_svc; gd->seq = gc->gc_seq; gd->win = gr->gr_win; if (gr->gr_major == GSS_S_COMPLETE) { #ifdef SPKM if(!g_OID_equal(gss_mech_spkm3, mech)) { #endif maj_stat = gss_display_name(&min_stat, gd->client_name, &gd->cname, &gd->sec.mech); #ifdef SPKM } #endif if (maj_stat != GSS_S_COMPLETE) { log_status("display_name", maj_stat, min_stat); goto errout; } #ifdef DEBUG #ifdef HAVE_HEIMDAL log_debug("accepted context for %.*s with " "<mech {}, qop %d, svc %d>", gd->cname.length, (char *)gd->cname.value, gd->sec.qop, gd->sec.svc); #else { gss_buffer_desc mechname; gss_oid_to_str(&min_stat, mech, &mechname); log_debug("accepted context for %.*s with " "<mech %.*s, qop %d, svc %d>", gd->cname.length, (char *)gd->cname.value, mechname.length, (char *)mechname.value, gd->sec.qop, gd->sec.svc); gss_release_buffer(&min_stat, &mechname); } #endif #endif seq = htonl(gr->gr_win); seqbuf.value = &seq; seqbuf.length = sizeof(seq); gss_release_buffer(&min_stat, &gd->checksum); maj_stat = gss_sign(&min_stat, gd->ctx, GSS_C_QOP_DEFAULT, &seqbuf, &gd->checksum); if (maj_stat != GSS_S_COMPLETE) { goto errout; } rqst->rq_xprt->xp_verf.oa_flavor = RPCSEC_GSS; rqst->rq_xprt->xp_verf.oa_base = gd->checksum.value; rqst->rq_xprt->xp_verf.oa_length = gd->checksum.length; } return (TRUE); errout: gss_release_buffer(&min_stat, &gr->gr_token); return (FALSE); }
225
1
static int load_state_from_tss32(struct x86_emulate_ctxt *ctxt, struct tss_segment_32 *tss) { int ret; u8 cpl; if (ctxt->ops->set_cr(ctxt, 3, tss->cr3)) return emulate_gp(ctxt, 0); ctxt->_eip = tss->eip; ctxt->eflags = tss->eflags | 2; /* General purpose registers */ *reg_write(ctxt, VCPU_REGS_RAX) = tss->eax; *reg_write(ctxt, VCPU_REGS_RCX) = tss->ecx; *reg_write(ctxt, VCPU_REGS_RDX) = tss->edx; *reg_write(ctxt, VCPU_REGS_RBX) = tss->ebx; *reg_write(ctxt, VCPU_REGS_RSP) = tss->esp; *reg_write(ctxt, VCPU_REGS_RBP) = tss->ebp; *reg_write(ctxt, VCPU_REGS_RSI) = tss->esi; *reg_write(ctxt, VCPU_REGS_RDI) = tss->edi; /* * SDM says that segment selectors are loaded before segment * descriptors. This is important because CPL checks will * use CS.RPL. */ set_segment_selector(ctxt, tss->ldt_selector, VCPU_SREG_LDTR); set_segment_selector(ctxt, tss->es, VCPU_SREG_ES); set_segment_selector(ctxt, tss->cs, VCPU_SREG_CS); set_segment_selector(ctxt, tss->ss, VCPU_SREG_SS); set_segment_selector(ctxt, tss->ds, VCPU_SREG_DS); set_segment_selector(ctxt, tss->fs, VCPU_SREG_FS); set_segment_selector(ctxt, tss->gs, VCPU_SREG_GS); /* * If we're switching between Protected Mode and VM86, we need to make * sure to update the mode before loading the segment descriptors so * that the selectors are interpreted correctly. */ if (ctxt->eflags & X86_EFLAGS_VM) { ctxt->mode = X86EMUL_MODE_VM86; cpl = 3; } else { ctxt->mode = X86EMUL_MODE_PROT32; cpl = tss->cs & 3; } /* * Now load segment descriptors. If fault happenes at this stage * it is handled in a context of new task */ ret = __load_segment_descriptor(ctxt, tss->ldt_selector, VCPU_SREG_LDTR, cpl, true); if (ret != X86EMUL_CONTINUE) return ret; ret = __load_segment_descriptor(ctxt, tss->es, VCPU_SREG_ES, cpl, true); if (ret != X86EMUL_CONTINUE) return ret; ret = __load_segment_descriptor(ctxt, tss->cs, VCPU_SREG_CS, cpl, true); if (ret != X86EMUL_CONTINUE) return ret; ret = __load_segment_descriptor(ctxt, tss->ss, VCPU_SREG_SS, cpl, true); if (ret != X86EMUL_CONTINUE) return ret; ret = __load_segment_descriptor(ctxt, tss->ds, VCPU_SREG_DS, cpl, true); if (ret != X86EMUL_CONTINUE) return ret; ret = __load_segment_descriptor(ctxt, tss->fs, VCPU_SREG_FS, cpl, true); if (ret != X86EMUL_CONTINUE) return ret; ret = __load_segment_descriptor(ctxt, tss->gs, VCPU_SREG_GS, cpl, true); if (ret != X86EMUL_CONTINUE) return ret; return X86EMUL_CONTINUE; }
static int load_state_from_tss32(struct x86_emulate_ctxt *ctxt, struct tss_segment_32 *tss) { int ret; u8 cpl; if (ctxt->ops->set_cr(ctxt, 3, tss->cr3)) return emulate_gp(ctxt, 0); ctxt->_eip = tss->eip; ctxt->eflags = tss->eflags | 2; *reg_write(ctxt, VCPU_REGS_RAX) = tss->eax; *reg_write(ctxt, VCPU_REGS_RCX) = tss->ecx; *reg_write(ctxt, VCPU_REGS_RDX) = tss->edx; *reg_write(ctxt, VCPU_REGS_RBX) = tss->ebx; *reg_write(ctxt, VCPU_REGS_RSP) = tss->esp; *reg_write(ctxt, VCPU_REGS_RBP) = tss->ebp; *reg_write(ctxt, VCPU_REGS_RSI) = tss->esi; *reg_write(ctxt, VCPU_REGS_RDI) = tss->edi; set_segment_selector(ctxt, tss->ldt_selector, VCPU_SREG_LDTR); set_segment_selector(ctxt, tss->es, VCPU_SREG_ES); set_segment_selector(ctxt, tss->cs, VCPU_SREG_CS); set_segment_selector(ctxt, tss->ss, VCPU_SREG_SS); set_segment_selector(ctxt, tss->ds, VCPU_SREG_DS); set_segment_selector(ctxt, tss->fs, VCPU_SREG_FS); set_segment_selector(ctxt, tss->gs, VCPU_SREG_GS); if (ctxt->eflags & X86_EFLAGS_VM) { ctxt->mode = X86EMUL_MODE_VM86; cpl = 3; } else { ctxt->mode = X86EMUL_MODE_PROT32; cpl = tss->cs & 3; } ret = __load_segment_descriptor(ctxt, tss->ldt_selector, VCPU_SREG_LDTR, cpl, true); if (ret != X86EMUL_CONTINUE) return ret; ret = __load_segment_descriptor(ctxt, tss->es, VCPU_SREG_ES, cpl, true); if (ret != X86EMUL_CONTINUE) return ret; ret = __load_segment_descriptor(ctxt, tss->cs, VCPU_SREG_CS, cpl, true); if (ret != X86EMUL_CONTINUE) return ret; ret = __load_segment_descriptor(ctxt, tss->ss, VCPU_SREG_SS, cpl, true); if (ret != X86EMUL_CONTINUE) return ret; ret = __load_segment_descriptor(ctxt, tss->ds, VCPU_SREG_DS, cpl, true); if (ret != X86EMUL_CONTINUE) return ret; ret = __load_segment_descriptor(ctxt, tss->fs, VCPU_SREG_FS, cpl, true); if (ret != X86EMUL_CONTINUE) return ret; ret = __load_segment_descriptor(ctxt, tss->gs, VCPU_SREG_GS, cpl, true); if (ret != X86EMUL_CONTINUE) return ret; return X86EMUL_CONTINUE; }
226
0
static inline bool vhost_needs_vring_endian(VirtIODevice *vdev) { if (virtio_vdev_has_feature(vdev, VIRTIO_F_VERSION_1)) { return false; } #ifdef TARGET_IS_BIENDIAN #ifdef HOST_WORDS_BIGENDIAN return !virtio_is_big_endian(vdev); #else return virtio_is_big_endian(vdev); #endif #else return false; #endif }
static inline bool vhost_needs_vring_endian(VirtIODevice *vdev) { if (virtio_vdev_has_feature(vdev, VIRTIO_F_VERSION_1)) { return false; } #ifdef TARGET_IS_BIENDIAN #ifdef HOST_WORDS_BIGENDIAN return !virtio_is_big_endian(vdev); #else return virtio_is_big_endian(vdev); #endif #else return false; #endif }
227
0
int qxl_render_cursor(PCIQXLDevice *qxl, QXLCommandExt *ext) { QXLCursorCmd *cmd = qxl_phys2virt(qxl, ext->cmd.data, ext->group_id); QXLCursor *cursor; QEMUCursor *c; if (!cmd) { return 1; } if (!dpy_cursor_define_supported(qxl->vga.con)) { return 0; } if (qxl->debug > 1 && cmd->type != QXL_CURSOR_MOVE) { fprintf(stderr, "%s", __FUNCTION__); qxl_log_cmd_cursor(qxl, cmd, ext->group_id); fprintf(stderr, "\n"); } switch (cmd->type) { case QXL_CURSOR_SET: cursor = qxl_phys2virt(qxl, cmd->u.set.shape, ext->group_id); if (!cursor) { return 1; } c = qxl_cursor(qxl, cursor, ext->group_id); if (c == NULL) { c = cursor_builtin_left_ptr(); } qemu_mutex_lock(&qxl->ssd.lock); if (qxl->ssd.cursor) { cursor_put(qxl->ssd.cursor); } qxl->ssd.cursor = c; qxl->ssd.mouse_x = cmd->u.set.position.x; qxl->ssd.mouse_y = cmd->u.set.position.y; qemu_mutex_unlock(&qxl->ssd.lock); qemu_bh_schedule(qxl->ssd.cursor_bh); break; case QXL_CURSOR_MOVE: qemu_mutex_lock(&qxl->ssd.lock); qxl->ssd.mouse_x = cmd->u.position.x; qxl->ssd.mouse_y = cmd->u.position.y; qemu_mutex_unlock(&qxl->ssd.lock); qemu_bh_schedule(qxl->ssd.cursor_bh); break; } return 0; }
int qxl_render_cursor(PCIQXLDevice *qxl, QXLCommandExt *ext) { QXLCursorCmd *cmd = qxl_phys2virt(qxl, ext->cmd.data, ext->group_id); QXLCursor *cursor; QEMUCursor *c; if (!cmd) { return 1; } if (!dpy_cursor_define_supported(qxl->vga.con)) { return 0; } if (qxl->debug > 1 && cmd->type != QXL_CURSOR_MOVE) { fprintf(stderr, "%s", __FUNCTION__); qxl_log_cmd_cursor(qxl, cmd, ext->group_id); fprintf(stderr, "\n"); } switch (cmd->type) { case QXL_CURSOR_SET: cursor = qxl_phys2virt(qxl, cmd->u.set.shape, ext->group_id); if (!cursor) { return 1; } c = qxl_cursor(qxl, cursor, ext->group_id); if (c == NULL) { c = cursor_builtin_left_ptr(); } qemu_mutex_lock(&qxl->ssd.lock); if (qxl->ssd.cursor) { cursor_put(qxl->ssd.cursor); } qxl->ssd.cursor = c; qxl->ssd.mouse_x = cmd->u.set.position.x; qxl->ssd.mouse_y = cmd->u.set.position.y; qemu_mutex_unlock(&qxl->ssd.lock); qemu_bh_schedule(qxl->ssd.cursor_bh); break; case QXL_CURSOR_MOVE: qemu_mutex_lock(&qxl->ssd.lock); qxl->ssd.mouse_x = cmd->u.position.x; qxl->ssd.mouse_y = cmd->u.position.y; qemu_mutex_unlock(&qxl->ssd.lock); qemu_bh_schedule(qxl->ssd.cursor_bh); break; } return 0; }
228
1
asmlinkage long sys_setrlimit(unsigned int resource, struct rlimit __user *rlim) { struct rlimit new_rlim, *old_rlim; unsigned long it_prof_secs; int retval; if (resource >= RLIM_NLIMITS) return -EINVAL; if (copy_from_user(&new_rlim, rlim, sizeof(*rlim))) return -EFAULT; if (new_rlim.rlim_cur > new_rlim.rlim_max) return -EINVAL; old_rlim = current->signal->rlim + resource; if ((new_rlim.rlim_max > old_rlim->rlim_max) && !capable(CAP_SYS_RESOURCE)) return -EPERM; if (resource == RLIMIT_NOFILE && new_rlim.rlim_max > NR_OPEN) return -EPERM; retval = security_task_setrlimit(resource, &new_rlim); if (retval) return retval; task_lock(current->group_leader); *old_rlim = new_rlim; task_unlock(current->group_leader); if (resource != RLIMIT_CPU) goto out; /* * RLIMIT_CPU handling. Note that the kernel fails to return an error * code if it rejected the user's attempt to set RLIMIT_CPU. This is a * very long-standing error, and fixing it now risks breakage of * applications, so we live with it */ if (new_rlim.rlim_cur == RLIM_INFINITY) goto out; it_prof_secs = cputime_to_secs(current->signal->it_prof_expires); if (it_prof_secs == 0 || new_rlim.rlim_cur <= it_prof_secs) { unsigned long rlim_cur = new_rlim.rlim_cur; cputime_t cputime; if (rlim_cur == 0) { /* * The caller is asking for an immediate RLIMIT_CPU * expiry. But we use the zero value to mean "it was * never set". So let's cheat and make it one second * instead */ rlim_cur = 1; } cputime = secs_to_cputime(rlim_cur); read_lock(&tasklist_lock); spin_lock_irq(&current->sighand->siglock); set_process_cpu_timer(current, CPUCLOCK_PROF, &cputime, NULL); spin_unlock_irq(&current->sighand->siglock); read_unlock(&tasklist_lock); } out: return 0; }
asmlinkage long sys_setrlimit(unsigned int resource, struct rlimit __user *rlim) { struct rlimit new_rlim, *old_rlim; unsigned long it_prof_secs; int retval; if (resource >= RLIM_NLIMITS) return -EINVAL; if (copy_from_user(&new_rlim, rlim, sizeof(*rlim))) return -EFAULT; if (new_rlim.rlim_cur > new_rlim.rlim_max) return -EINVAL; old_rlim = current->signal->rlim + resource; if ((new_rlim.rlim_max > old_rlim->rlim_max) && !capable(CAP_SYS_RESOURCE)) return -EPERM; if (resource == RLIMIT_NOFILE && new_rlim.rlim_max > NR_OPEN) return -EPERM; retval = security_task_setrlimit(resource, &new_rlim); if (retval) return retval; task_lock(current->group_leader); *old_rlim = new_rlim; task_unlock(current->group_leader); if (resource != RLIMIT_CPU) goto out; if (new_rlim.rlim_cur == RLIM_INFINITY) goto out; it_prof_secs = cputime_to_secs(current->signal->it_prof_expires); if (it_prof_secs == 0 || new_rlim.rlim_cur <= it_prof_secs) { unsigned long rlim_cur = new_rlim.rlim_cur; cputime_t cputime; if (rlim_cur == 0) { rlim_cur = 1; } cputime = secs_to_cputime(rlim_cur); read_lock(&tasklist_lock); spin_lock_irq(&current->sighand->siglock); set_process_cpu_timer(current, CPUCLOCK_PROF, &cputime, NULL); spin_unlock_irq(&current->sighand->siglock); read_unlock(&tasklist_lock); } out: return 0; }
229
0
static void virtio_net_set_status ( struct VirtIODevice * vdev , uint8_t status ) { VirtIONet * n = to_virtio_net ( vdev ) ; virtio_net_vhost_status ( n , status ) ; if ( ! n -> tx_waiting ) { return ; } if ( virtio_net_started ( n , status ) && ! n -> vhost_started ) { if ( n -> tx_timer ) { qemu_mod_timer ( n -> tx_timer , qemu_get_clock_ns ( vm_clock ) + n -> tx_timeout ) ; } else { qemu_bh_schedule ( n -> tx_bh ) ; } } else { if ( n -> tx_timer ) { qemu_del_timer ( n -> tx_timer ) ; } else { qemu_bh_cancel ( n -> tx_bh ) ; } } }
static void virtio_net_set_status ( struct VirtIODevice * vdev , uint8_t status ) { VirtIONet * n = to_virtio_net ( vdev ) ; virtio_net_vhost_status ( n , status ) ; if ( ! n -> tx_waiting ) { return ; } if ( virtio_net_started ( n , status ) && ! n -> vhost_started ) { if ( n -> tx_timer ) { qemu_mod_timer ( n -> tx_timer , qemu_get_clock_ns ( vm_clock ) + n -> tx_timeout ) ; } else { qemu_bh_schedule ( n -> tx_bh ) ; } } else { if ( n -> tx_timer ) { qemu_del_timer ( n -> tx_timer ) ; } else { qemu_bh_cancel ( n -> tx_bh ) ; } } }
230
0
static int decode_tile(Jpeg2000DecoderContext *s, Jpeg2000Tile *tile) { int compno, reslevelno, bandno; int x, y, *src[4]; uint8_t *line; Jpeg2000T1Context t1; /* Loop on tile components */ for (compno = 0; compno < s->ncomponents; compno++) { Jpeg2000Component *comp = tile->comp + compno; Jpeg2000CodingStyle *codsty = tile->codsty + compno; /* Loop on resolution levels */ for (reslevelno = 0; reslevelno < codsty->nreslevels2decode; reslevelno++) { Jpeg2000ResLevel *rlevel = comp->reslevel + reslevelno; /* Loop on bands */ for (bandno = 0; bandno < rlevel->nbands; bandno++) { int nb_precincts, precno; Jpeg2000Band *band = rlevel->band + bandno; int cblkx, cblky, cblkno=0, bandpos; bandpos = bandno + (reslevelno > 0); if (band->coord[0][0] == band->coord[0][1] || band->coord[1][0] == band->coord[1][1]) continue; nb_precincts = rlevel->num_precincts_x * rlevel->num_precincts_y; /* Loop on precincts */ for (precno = 0; precno < nb_precincts; precno++) { Jpeg2000Prec *prec = band->prec + precno; /* Loop on codeblocks */ for (cblkno = 0; cblkno < prec->nb_codeblocks_width * prec->nb_codeblocks_height; cblkno++) { int x, y; int i, j; Jpeg2000Cblk *cblk = prec->cblk + cblkno; decode_cblk(s, codsty, &t1, cblk, cblk->coord[0][1] - cblk->coord[0][0], cblk->coord[1][1] - cblk->coord[1][0], bandpos); /* Manage band offsets */ x = cblk->coord[0][0]; y = cblk->coord[1][0]; dequantization_int(x, y, cblk, comp, &t1, band); } /* end cblk */ } /*end prec */ } /* end band */ } /* end reslevel */ ff_dwt_decode(&comp->dwt, comp->data); src[compno] = comp->data; } /*end comp */ /* inverse MCT transformation */ if (tile->codsty[0].mct) mct_decode(s, tile); if (s->precision <= 8) { for (compno = 0; compno < s->ncomponents; compno++) { y = tile->comp[compno].coord[1][0] - s->image_offset_y; line = s->picture->data[0] + y * s->picture->linesize[0]; for (; y < tile->comp[compno].coord[1][1] - s->image_offset_y; y += s->cdy[compno]) { uint8_t *dst; x = tile->comp[compno].coord[0][0] - s->image_offset_x; dst = line + x * s->ncomponents + compno; for (; x < tile->comp[compno].coord[0][1] - s->image_offset_x; x += s->cdx[compno]) { int val = *src[compno]++ << (8 - s->cbps[compno]); val += 1 << 7; val = av_clip(val, 0, (1 << 8) - 1); *dst = val; dst += s->ncomponents; } line += s->picture->linesize[0]; } } } else { for (compno = 0; compno < s->ncomponents; compno++) { y = tile->comp[compno].coord[1][0] - s->image_offset_y; line = s->picture->data[0] + y * s->picture->linesize[0]; for (; y < tile->comp[compno].coord[1][1] - s->image_offset_y; y += s->cdy[compno]) { uint16_t *dst; x = tile->comp[compno].coord[0][0] - s->image_offset_x; dst = (uint16_t *)(line + (x * s->ncomponents + compno) * 2); for (; x < tile->comp[compno].coord[0][1] - s->image_offset_x; x += s-> cdx[compno]) { int32_t val; val = *src[compno]++ << (16 - s->cbps[compno]); val += 1 << 15; val = av_clip(val, 0, (1 << 16) - 1); *dst = val; dst += s->ncomponents; } line += s->picture->linesize[0]; } } } return 0; }
static int decode_tile(Jpeg2000DecoderContext *s, Jpeg2000Tile *tile) { int compno, reslevelno, bandno; int x, y, *src[4]; uint8_t *line; Jpeg2000T1Context t1; for (compno = 0; compno < s->ncomponents; compno++) { Jpeg2000Component *comp = tile->comp + compno; Jpeg2000CodingStyle *codsty = tile->codsty + compno; for (reslevelno = 0; reslevelno < codsty->nreslevels2decode; reslevelno++) { Jpeg2000ResLevel *rlevel = comp->reslevel + reslevelno; for (bandno = 0; bandno < rlevel->nbands; bandno++) { int nb_precincts, precno; Jpeg2000Band *band = rlevel->band + bandno; int cblkx, cblky, cblkno=0, bandpos; bandpos = bandno + (reslevelno > 0); if (band->coord[0][0] == band->coord[0][1] || band->coord[1][0] == band->coord[1][1]) continue; nb_precincts = rlevel->num_precincts_x * rlevel->num_precincts_y; for (precno = 0; precno < nb_precincts; precno++) { Jpeg2000Prec *prec = band->prec + precno; for (cblkno = 0; cblkno < prec->nb_codeblocks_width * prec->nb_codeblocks_height; cblkno++) { int x, y; int i, j; Jpeg2000Cblk *cblk = prec->cblk + cblkno; decode_cblk(s, codsty, &t1, cblk, cblk->coord[0][1] - cblk->coord[0][0], cblk->coord[1][1] - cblk->coord[1][0], bandpos); x = cblk->coord[0][0]; y = cblk->coord[1][0]; dequantization_int(x, y, cblk, comp, &t1, band); } } } } ff_dwt_decode(&comp->dwt, comp->data); src[compno] = comp->data; } if (tile->codsty[0].mct) mct_decode(s, tile); if (s->precision <= 8) { for (compno = 0; compno < s->ncomponents; compno++) { y = tile->comp[compno].coord[1][0] - s->image_offset_y; line = s->picture->data[0] + y * s->picture->linesize[0]; for (; y < tile->comp[compno].coord[1][1] - s->image_offset_y; y += s->cdy[compno]) { uint8_t *dst; x = tile->comp[compno].coord[0][0] - s->image_offset_x; dst = line + x * s->ncomponents + compno; for (; x < tile->comp[compno].coord[0][1] - s->image_offset_x; x += s->cdx[compno]) { int val = *src[compno]++ << (8 - s->cbps[compno]); val += 1 << 7; val = av_clip(val, 0, (1 << 8) - 1); *dst = val; dst += s->ncomponents; } line += s->picture->linesize[0]; } } } else { for (compno = 0; compno < s->ncomponents; compno++) { y = tile->comp[compno].coord[1][0] - s->image_offset_y; line = s->picture->data[0] + y * s->picture->linesize[0]; for (; y < tile->comp[compno].coord[1][1] - s->image_offset_y; y += s->cdy[compno]) { uint16_t *dst; x = tile->comp[compno].coord[0][0] - s->image_offset_x; dst = (uint16_t *)(line + (x * s->ncomponents + compno) * 2); for (; x < tile->comp[compno].coord[0][1] - s->image_offset_x; x += s-> cdx[compno]) { int32_t val; val = *src[compno]++ << (16 - s->cbps[compno]); val += 1 << 15; val = av_clip(val, 0, (1 << 16) - 1); *dst = val; dst += s->ncomponents; } line += s->picture->linesize[0]; } } } return 0; }
231
0
static void ctl_putint ( const char * tag , long ival ) { register char * cp ; register const char * cq ; char buffer [ 200 ] ; cp = buffer ; cq = tag ; while ( * cq != '\0' ) * cp ++ = * cq ++ ; * cp ++ = '=' ; INSIST ( ( cp - buffer ) < ( int ) sizeof ( buffer ) ) ; snprintf ( cp , sizeof ( buffer ) - ( cp - buffer ) , "%ld" , ival ) ; cp += strlen ( cp ) ; ctl_putdata ( buffer , ( unsigned ) ( cp - buffer ) , 0 ) ; }
static void ctl_putint ( const char * tag , long ival ) { register char * cp ; register const char * cq ; char buffer [ 200 ] ; cp = buffer ; cq = tag ; while ( * cq != '\0' ) * cp ++ = * cq ++ ; * cp ++ = '=' ; INSIST ( ( cp - buffer ) < ( int ) sizeof ( buffer ) ) ; snprintf ( cp , sizeof ( buffer ) - ( cp - buffer ) , "%ld" , ival ) ; cp += strlen ( cp ) ; ctl_putdata ( buffer , ( unsigned ) ( cp - buffer ) , 0 ) ; }
232
1
int x86_decode_insn(struct x86_emulate_ctxt *ctxt, void *insn, int insn_len) { int rc = X86EMUL_CONTINUE; int mode = ctxt->mode; int def_op_bytes, def_ad_bytes, goffset, simd_prefix; bool op_prefix = false; bool has_seg_override = false; struct opcode opcode; ctxt->memop.type = OP_NONE; ctxt->memopp = NULL; ctxt->_eip = ctxt->eip; ctxt->fetch.ptr = ctxt->fetch.data; ctxt->fetch.end = ctxt->fetch.data + insn_len; ctxt->opcode_len = 1; if (insn_len > 0) memcpy(ctxt->fetch.data, insn, insn_len); else { rc = __do_insn_fetch_bytes(ctxt, 1); if (rc != X86EMUL_CONTINUE) return rc; } switch (mode) { case X86EMUL_MODE_REAL: case X86EMUL_MODE_VM86: case X86EMUL_MODE_PROT16: def_op_bytes = def_ad_bytes = 2; break; case X86EMUL_MODE_PROT32: def_op_bytes = def_ad_bytes = 4; break; #ifdef CONFIG_X86_64 case X86EMUL_MODE_PROT64: def_op_bytes = 4; def_ad_bytes = 8; break; #endif default: return EMULATION_FAILED; } ctxt->op_bytes = def_op_bytes; ctxt->ad_bytes = def_ad_bytes; /* Legacy prefixes. */ for (;;) { switch (ctxt->b = insn_fetch(u8, ctxt)) { case 0x66: /* operand-size override */ op_prefix = true; /* switch between 2/4 bytes */ ctxt->op_bytes = def_op_bytes ^ 6; break; case 0x67: /* address-size override */ if (mode == X86EMUL_MODE_PROT64) /* switch between 4/8 bytes */ ctxt->ad_bytes = def_ad_bytes ^ 12; else /* switch between 2/4 bytes */ ctxt->ad_bytes = def_ad_bytes ^ 6; break; case 0x26: /* ES override */ case 0x2e: /* CS override */ case 0x36: /* SS override */ case 0x3e: /* DS override */ has_seg_override = true; ctxt->seg_override = (ctxt->b >> 3) & 3; break; case 0x64: /* FS override */ case 0x65: /* GS override */ has_seg_override = true; ctxt->seg_override = ctxt->b & 7; break; case 0x40 ... 0x4f: /* REX */ if (mode != X86EMUL_MODE_PROT64) goto done_prefixes; ctxt->rex_prefix = ctxt->b; continue; case 0xf0: /* LOCK */ ctxt->lock_prefix = 1; break; case 0xf2: /* REPNE/REPNZ */ case 0xf3: /* REP/REPE/REPZ */ ctxt->rep_prefix = ctxt->b; break; default: goto done_prefixes; } /* Any legacy prefix after a REX prefix nullifies its effect. */ ctxt->rex_prefix = 0; } done_prefixes: /* REX prefix. */ if (ctxt->rex_prefix & 8) ctxt->op_bytes = 8; /* REX.W */ /* Opcode byte(s). */ opcode = opcode_table[ctxt->b]; /* Two-byte opcode? */ if (ctxt->b == 0x0f) { ctxt->opcode_len = 2; ctxt->b = insn_fetch(u8, ctxt); opcode = twobyte_table[ctxt->b]; /* 0F_38 opcode map */ if (ctxt->b == 0x38) { ctxt->opcode_len = 3; ctxt->b = insn_fetch(u8, ctxt); opcode = opcode_map_0f_38[ctxt->b]; } } ctxt->d = opcode.flags; if (ctxt->d & ModRM) ctxt->modrm = insn_fetch(u8, ctxt); /* vex-prefix instructions are not implemented */ if (ctxt->opcode_len == 1 && (ctxt->b == 0xc5 || ctxt->b == 0xc4) && (mode == X86EMUL_MODE_PROT64 || (mode >= X86EMUL_MODE_PROT16 && (ctxt->modrm & 0x80)))) { ctxt->d = NotImpl; } while (ctxt->d & GroupMask) { switch (ctxt->d & GroupMask) { case Group: goffset = (ctxt->modrm >> 3) & 7; opcode = opcode.u.group[goffset]; break; case GroupDual: goffset = (ctxt->modrm >> 3) & 7; if ((ctxt->modrm >> 6) == 3) opcode = opcode.u.gdual->mod3[goffset]; else opcode = opcode.u.gdual->mod012[goffset]; break; case RMExt: goffset = ctxt->modrm & 7; opcode = opcode.u.group[goffset]; break; case Prefix: if (ctxt->rep_prefix && op_prefix) return EMULATION_FAILED; simd_prefix = op_prefix ? 0x66 : ctxt->rep_prefix; switch (simd_prefix) { case 0x00: opcode = opcode.u.gprefix->pfx_no; break; case 0x66: opcode = opcode.u.gprefix->pfx_66; break; case 0xf2: opcode = opcode.u.gprefix->pfx_f2; break; case 0xf3: opcode = opcode.u.gprefix->pfx_f3; break; } break; case Escape: if (ctxt->modrm > 0xbf) opcode = opcode.u.esc->high[ctxt->modrm - 0xc0]; else opcode = opcode.u.esc->op[(ctxt->modrm >> 3) & 7]; break; default: return EMULATION_FAILED; } ctxt->d &= ~(u64)GroupMask; ctxt->d |= opcode.flags; } /* Unrecognised? */ if (ctxt->d == 0) return EMULATION_FAILED; ctxt->execute = opcode.u.execute; if (unlikely(ctxt->ud) && likely(!(ctxt->d & EmulateOnUD))) return EMULATION_FAILED; if (unlikely(ctxt->d & (NotImpl|Stack|Op3264|Sse|Mmx|Intercept|CheckPerm))) { /* * These are copied unconditionally here, and checked unconditionally * in x86_emulate_insn. */ ctxt->check_perm = opcode.check_perm; ctxt->intercept = opcode.intercept; if (ctxt->d & NotImpl) return EMULATION_FAILED; if (mode == X86EMUL_MODE_PROT64 && (ctxt->d & Stack)) ctxt->op_bytes = 8; if (ctxt->d & Op3264) { if (mode == X86EMUL_MODE_PROT64) ctxt->op_bytes = 8; else ctxt->op_bytes = 4; } if (ctxt->d & Sse) ctxt->op_bytes = 16; else if (ctxt->d & Mmx) ctxt->op_bytes = 8; } /* ModRM and SIB bytes. */ if (ctxt->d & ModRM) { rc = decode_modrm(ctxt, &ctxt->memop); if (!has_seg_override) { has_seg_override = true; ctxt->seg_override = ctxt->modrm_seg; } } else if (ctxt->d & MemAbs) rc = decode_abs(ctxt, &ctxt->memop); if (rc != X86EMUL_CONTINUE) goto done; if (!has_seg_override) ctxt->seg_override = VCPU_SREG_DS; ctxt->memop.addr.mem.seg = ctxt->seg_override; /* * Decode and fetch the source operand: register, memory * or immediate. */ rc = decode_operand(ctxt, &ctxt->src, (ctxt->d >> SrcShift) & OpMask); if (rc != X86EMUL_CONTINUE) goto done; /* * Decode and fetch the second source operand: register, memory * or immediate. */ rc = decode_operand(ctxt, &ctxt->src2, (ctxt->d >> Src2Shift) & OpMask); if (rc != X86EMUL_CONTINUE) goto done; /* Decode and fetch the destination operand: register or memory. */ rc = decode_operand(ctxt, &ctxt->dst, (ctxt->d >> DstShift) & OpMask); done: if (ctxt->rip_relative) ctxt->memopp->addr.mem.ea += ctxt->_eip; return (rc != X86EMUL_CONTINUE) ? EMULATION_FAILED : EMULATION_OK; }
int x86_decode_insn(struct x86_emulate_ctxt *ctxt, void *insn, int insn_len) { int rc = X86EMUL_CONTINUE; int mode = ctxt->mode; int def_op_bytes, def_ad_bytes, goffset, simd_prefix; bool op_prefix = false; bool has_seg_override = false; struct opcode opcode; ctxt->memop.type = OP_NONE; ctxt->memopp = NULL; ctxt->_eip = ctxt->eip; ctxt->fetch.ptr = ctxt->fetch.data; ctxt->fetch.end = ctxt->fetch.data + insn_len; ctxt->opcode_len = 1; if (insn_len > 0) memcpy(ctxt->fetch.data, insn, insn_len); else { rc = __do_insn_fetch_bytes(ctxt, 1); if (rc != X86EMUL_CONTINUE) return rc; } switch (mode) { case X86EMUL_MODE_REAL: case X86EMUL_MODE_VM86: case X86EMUL_MODE_PROT16: def_op_bytes = def_ad_bytes = 2; break; case X86EMUL_MODE_PROT32: def_op_bytes = def_ad_bytes = 4; break; #ifdef CONFIG_X86_64 case X86EMUL_MODE_PROT64: def_op_bytes = 4; def_ad_bytes = 8; break; #endif default: return EMULATION_FAILED; } ctxt->op_bytes = def_op_bytes; ctxt->ad_bytes = def_ad_bytes; for (;;) { switch (ctxt->b = insn_fetch(u8, ctxt)) { case 0x66: op_prefix = true; ctxt->op_bytes = def_op_bytes ^ 6; break; case 0x67: if (mode == X86EMUL_MODE_PROT64) ctxt->ad_bytes = def_ad_bytes ^ 12; else ctxt->ad_bytes = def_ad_bytes ^ 6; break; case 0x26: case 0x2e: case 0x36: case 0x3e: has_seg_override = true; ctxt->seg_override = (ctxt->b >> 3) & 3; break; case 0x64: case 0x65: has_seg_override = true; ctxt->seg_override = ctxt->b & 7; break; case 0x40 ... 0x4f: if (mode != X86EMUL_MODE_PROT64) goto done_prefixes; ctxt->rex_prefix = ctxt->b; continue; case 0xf0: ctxt->lock_prefix = 1; break; case 0xf2: case 0xf3: ctxt->rep_prefix = ctxt->b; break; default: goto done_prefixes; } ctxt->rex_prefix = 0; } done_prefixes: if (ctxt->rex_prefix & 8) ctxt->op_bytes = 8; opcode = opcode_table[ctxt->b]; if (ctxt->b == 0x0f) { ctxt->opcode_len = 2; ctxt->b = insn_fetch(u8, ctxt); opcode = twobyte_table[ctxt->b]; if (ctxt->b == 0x38) { ctxt->opcode_len = 3; ctxt->b = insn_fetch(u8, ctxt); opcode = opcode_map_0f_38[ctxt->b]; } } ctxt->d = opcode.flags; if (ctxt->d & ModRM) ctxt->modrm = insn_fetch(u8, ctxt); if (ctxt->opcode_len == 1 && (ctxt->b == 0xc5 || ctxt->b == 0xc4) && (mode == X86EMUL_MODE_PROT64 || (mode >= X86EMUL_MODE_PROT16 && (ctxt->modrm & 0x80)))) { ctxt->d = NotImpl; } while (ctxt->d & GroupMask) { switch (ctxt->d & GroupMask) { case Group: goffset = (ctxt->modrm >> 3) & 7; opcode = opcode.u.group[goffset]; break; case GroupDual: goffset = (ctxt->modrm >> 3) & 7; if ((ctxt->modrm >> 6) == 3) opcode = opcode.u.gdual->mod3[goffset]; else opcode = opcode.u.gdual->mod012[goffset]; break; case RMExt: goffset = ctxt->modrm & 7; opcode = opcode.u.group[goffset]; break; case Prefix: if (ctxt->rep_prefix && op_prefix) return EMULATION_FAILED; simd_prefix = op_prefix ? 0x66 : ctxt->rep_prefix; switch (simd_prefix) { case 0x00: opcode = opcode.u.gprefix->pfx_no; break; case 0x66: opcode = opcode.u.gprefix->pfx_66; break; case 0xf2: opcode = opcode.u.gprefix->pfx_f2; break; case 0xf3: opcode = opcode.u.gprefix->pfx_f3; break; } break; case Escape: if (ctxt->modrm > 0xbf) opcode = opcode.u.esc->high[ctxt->modrm - 0xc0]; else opcode = opcode.u.esc->op[(ctxt->modrm >> 3) & 7]; break; default: return EMULATION_FAILED; } ctxt->d &= ~(u64)GroupMask; ctxt->d |= opcode.flags; } if (ctxt->d == 0) return EMULATION_FAILED; ctxt->execute = opcode.u.execute; if (unlikely(ctxt->ud) && likely(!(ctxt->d & EmulateOnUD))) return EMULATION_FAILED; if (unlikely(ctxt->d & (NotImpl|Stack|Op3264|Sse|Mmx|Intercept|CheckPerm))) { ctxt->check_perm = opcode.check_perm; ctxt->intercept = opcode.intercept; if (ctxt->d & NotImpl) return EMULATION_FAILED; if (mode == X86EMUL_MODE_PROT64 && (ctxt->d & Stack)) ctxt->op_bytes = 8; if (ctxt->d & Op3264) { if (mode == X86EMUL_MODE_PROT64) ctxt->op_bytes = 8; else ctxt->op_bytes = 4; } if (ctxt->d & Sse) ctxt->op_bytes = 16; else if (ctxt->d & Mmx) ctxt->op_bytes = 8; } if (ctxt->d & ModRM) { rc = decode_modrm(ctxt, &ctxt->memop); if (!has_seg_override) { has_seg_override = true; ctxt->seg_override = ctxt->modrm_seg; } } else if (ctxt->d & MemAbs) rc = decode_abs(ctxt, &ctxt->memop); if (rc != X86EMUL_CONTINUE) goto done; if (!has_seg_override) ctxt->seg_override = VCPU_SREG_DS; ctxt->memop.addr.mem.seg = ctxt->seg_override; rc = decode_operand(ctxt, &ctxt->src, (ctxt->d >> SrcShift) & OpMask); if (rc != X86EMUL_CONTINUE) goto done; rc = decode_operand(ctxt, &ctxt->src2, (ctxt->d >> Src2Shift) & OpMask); if (rc != X86EMUL_CONTINUE) goto done; rc = decode_operand(ctxt, &ctxt->dst, (ctxt->d >> DstShift) & OpMask); done: if (ctxt->rip_relative) ctxt->memopp->addr.mem.ea += ctxt->_eip; return (rc != X86EMUL_CONTINUE) ? EMULATION_FAILED : EMULATION_OK; }
233
1
static int setup_rt_frame(int sig, struct k_sigaction *ka, siginfo_t *info, sigset_t *set, struct pt_regs * regs) { void __user *restorer; struct rt_sigframe __user *frame; int err = 0; int usig; frame = get_sigframe(ka, regs, sizeof(*frame)); if (!access_ok(VERIFY_WRITE, frame, sizeof(*frame))) goto give_sigsegv; usig = current_thread_info()->exec_domain && current_thread_info()->exec_domain->signal_invmap && sig < 32 ? current_thread_info()->exec_domain->signal_invmap[sig] : sig; err |= __put_user(usig, &frame->sig); err |= __put_user(&frame->info, &frame->pinfo); err |= __put_user(&frame->uc, &frame->puc); err |= copy_siginfo_to_user(&frame->info, info); if (err) goto give_sigsegv; /* Create the ucontext. */ err |= __put_user(0, &frame->uc.uc_flags); err |= __put_user(0, &frame->uc.uc_link); err |= __put_user(current->sas_ss_sp, &frame->uc.uc_stack.ss_sp); err |= __put_user(sas_ss_flags(regs->sp), &frame->uc.uc_stack.ss_flags); err |= __put_user(current->sas_ss_size, &frame->uc.uc_stack.ss_size); err |= setup_sigcontext(&frame->uc.uc_mcontext, &frame->fpstate, regs, set->sig[0]); err |= __copy_to_user(&frame->uc.uc_sigmask, set, sizeof(*set)); if (err) goto give_sigsegv; /* Set up to return from userspace. */ restorer = VDSO32_SYMBOL(current->mm->context.vdso, rt_sigreturn); if (ka->sa.sa_flags & SA_RESTORER) restorer = ka->sa.sa_restorer; err |= __put_user(restorer, &frame->pretcode); /* * This is movl $,%ax ; int $0x80 * * WE DO NOT USE IT ANY MORE! It's only left here for historical * reasons and because gdb uses it as a signature to notice * signal handler stack frames. */ err |= __put_user(0xb8, (char __user *)(frame->retcode+0)); err |= __put_user(__NR_rt_sigreturn, (int __user *)(frame->retcode+1)); err |= __put_user(0x80cd, (short __user *)(frame->retcode+5)); if (err) goto give_sigsegv; /* Set up registers for signal handler */ regs->sp = (unsigned long) frame; regs->ip = (unsigned long) ka->sa.sa_handler; regs->ax = (unsigned long) usig; regs->dx = (unsigned long) &frame->info; regs->cx = (unsigned long) &frame->uc; regs->ds = __USER_DS; regs->es = __USER_DS; regs->ss = __USER_DS; regs->cs = __USER_CS; /* * Clear TF when entering the signal handler, but * notify any tracer that was single-stepping it. * The tracer may want to single-step inside the * handler too. */ regs->flags &= ~TF_MASK; if (test_thread_flag(TIF_SINGLESTEP)) ptrace_notify(SIGTRAP); #if DEBUG_SIG printk("SIG deliver (%s:%d): sp=%p pc=%p ra=%p\n", current->comm, current->pid, frame, regs->ip, frame->pretcode); #endif return 0; give_sigsegv: force_sigsegv(sig, current); return -EFAULT; }
static int setup_rt_frame(int sig, struct k_sigaction *ka, siginfo_t *info, sigset_t *set, struct pt_regs * regs) { void __user *restorer; struct rt_sigframe __user *frame; int err = 0; int usig; frame = get_sigframe(ka, regs, sizeof(*frame)); if (!access_ok(VERIFY_WRITE, frame, sizeof(*frame))) goto give_sigsegv; usig = current_thread_info()->exec_domain && current_thread_info()->exec_domain->signal_invmap && sig < 32 ? current_thread_info()->exec_domain->signal_invmap[sig] : sig; err |= __put_user(usig, &frame->sig); err |= __put_user(&frame->info, &frame->pinfo); err |= __put_user(&frame->uc, &frame->puc); err |= copy_siginfo_to_user(&frame->info, info); if (err) goto give_sigsegv; err |= __put_user(0, &frame->uc.uc_flags); err |= __put_user(0, &frame->uc.uc_link); err |= __put_user(current->sas_ss_sp, &frame->uc.uc_stack.ss_sp); err |= __put_user(sas_ss_flags(regs->sp), &frame->uc.uc_stack.ss_flags); err |= __put_user(current->sas_ss_size, &frame->uc.uc_stack.ss_size); err |= setup_sigcontext(&frame->uc.uc_mcontext, &frame->fpstate, regs, set->sig[0]); err |= __copy_to_user(&frame->uc.uc_sigmask, set, sizeof(*set)); if (err) goto give_sigsegv; restorer = VDSO32_SYMBOL(current->mm->context.vdso, rt_sigreturn); if (ka->sa.sa_flags & SA_RESTORER) restorer = ka->sa.sa_restorer; err |= __put_user(restorer, &frame->pretcode); err |= __put_user(0xb8, (char __user *)(frame->retcode+0)); err |= __put_user(__NR_rt_sigreturn, (int __user *)(frame->retcode+1)); err |= __put_user(0x80cd, (short __user *)(frame->retcode+5)); if (err) goto give_sigsegv; regs->sp = (unsigned long) frame; regs->ip = (unsigned long) ka->sa.sa_handler; regs->ax = (unsigned long) usig; regs->dx = (unsigned long) &frame->info; regs->cx = (unsigned long) &frame->uc; regs->ds = __USER_DS; regs->es = __USER_DS; regs->ss = __USER_DS; regs->cs = __USER_CS; regs->flags &= ~TF_MASK; if (test_thread_flag(TIF_SINGLESTEP)) ptrace_notify(SIGTRAP); #if DEBUG_SIG printk("SIG deliver (%s:%d): sp=%p pc=%p ra=%p\n", current->comm, current->pid, frame, regs->ip, frame->pretcode); #endif return 0; give_sigsegv: force_sigsegv(sig, current); return -EFAULT; }
234
1
int main(argc, argv) int argc; char *argv[]; { krb5_data pname_data, tkt_data; int sock = 0; socklen_t l; int retval; struct sockaddr_in l_inaddr, f_inaddr; /* local, foreign address */ krb5_creds creds, *new_creds; krb5_ccache cc; krb5_data msgtext, msg; krb5_context context; krb5_auth_context auth_context = NULL; #ifndef DEBUG freopen("/tmp/uu-server.log", "w", stderr); #endif retval = krb5_init_context(&context); if (retval) { com_err(argv[0], retval, "while initializing krb5"); exit(1); } #ifdef DEBUG { int one = 1; int acc; struct servent *sp; socklen_t namelen = sizeof(f_inaddr); if ((sock = socket(PF_INET, SOCK_STREAM, 0)) < 0) { com_err("uu-server", errno, "creating socket"); exit(3); } l_inaddr.sin_family = AF_INET; l_inaddr.sin_addr.s_addr = 0; if (argc == 2) { l_inaddr.sin_port = htons(atoi(argv[1])); } else { if (!(sp = getservbyname("uu-sample", "tcp"))) { com_err("uu-server", 0, "can't find uu-sample/tcp service"); exit(3); } l_inaddr.sin_port = sp->s_port; } (void) setsockopt(sock, SOL_SOCKET, SO_REUSEADDR, (char *)&one, sizeof (one)); if (bind(sock, (struct sockaddr *)&l_inaddr, sizeof(l_inaddr))) { com_err("uu-server", errno, "binding socket"); exit(3); } if (listen(sock, 1) == -1) { com_err("uu-server", errno, "listening"); exit(3); } printf("Server started\n"); fflush(stdout); if ((acc = accept(sock, (struct sockaddr *)&f_inaddr, &namelen)) == -1) { com_err("uu-server", errno, "accepting"); exit(3); } dup2(acc, 0); close(sock); sock = 0; } #endif retval = krb5_read_message(context, (krb5_pointer) &sock, &pname_data); if (retval) { com_err ("uu-server", retval, "reading pname"); return 2; } retval = krb5_read_message(context, (krb5_pointer) &sock, &tkt_data); if (retval) { com_err ("uu-server", retval, "reading ticket data"); return 2; } retval = krb5_cc_default(context, &cc); if (retval) { com_err("uu-server", retval, "getting credentials cache"); return 4; } memset (&creds, 0, sizeof(creds)); retval = krb5_cc_get_principal(context, cc, &creds.client); if (retval) { com_err("uu-client", retval, "getting principal name"); return 6; } /* client sends it already null-terminated. */ printf ("uu-server: client principal is \"%s\".\n", pname_data.data); retval = krb5_parse_name(context, pname_data.data, &creds.server); if (retval) { com_err("uu-server", retval, "parsing client name"); return 3; } creds.second_ticket = tkt_data; printf ("uu-server: client ticket is %d bytes.\n", creds.second_ticket.length); retval = krb5_get_credentials(context, KRB5_GC_USER_USER, cc, &creds, &new_creds); if (retval) { com_err("uu-server", retval, "getting user-user ticket"); return 5; } #ifndef DEBUG l = sizeof(f_inaddr); if (getpeername(0, (struct sockaddr *)&f_inaddr, &l) == -1) { com_err("uu-server", errno, "getting client address"); return 6; } #endif l = sizeof(l_inaddr); if (getsockname(0, (struct sockaddr *)&l_inaddr, &l) == -1) { com_err("uu-server", errno, "getting local address"); return 6; } /* send a ticket/authenticator to the other side, so it can get the key we're using for the krb_safe below. */ retval = krb5_auth_con_init(context, &auth_context); if (retval) { com_err("uu-server", retval, "making auth_context"); return 8; } retval = krb5_auth_con_setflags(context, auth_context, KRB5_AUTH_CONTEXT_DO_SEQUENCE); if (retval) { com_err("uu-server", retval, "initializing the auth_context flags"); return 8; } retval = krb5_auth_con_genaddrs(context, auth_context, sock, KRB5_AUTH_CONTEXT_GENERATE_LOCAL_FULL_ADDR | KRB5_AUTH_CONTEXT_GENERATE_REMOTE_FULL_ADDR); if (retval) { com_err("uu-server", retval, "generating addrs for auth_context"); return 9; } #if 1 retval = krb5_mk_req_extended(context, &auth_context, AP_OPTS_USE_SESSION_KEY, NULL, new_creds, &msg); if (retval) { com_err("uu-server", retval, "making AP_REQ"); return 8; } retval = krb5_write_message(context, (krb5_pointer) &sock, &msg); #else retval = krb5_sendauth(context, &auth_context, (krb5_pointer)&sock, "???", 0, 0, AP_OPTS_MUTUAL_REQUIRED | AP_OPTS_USE_SESSION_KEY, NULL, &creds, cc, NULL, NULL, NULL); #endif if (retval) goto cl_short_wrt; free(msg.data); msgtext.length = 32; msgtext.data = "Hello, other end of connection."; retval = krb5_mk_safe(context, auth_context, &msgtext, &msg, NULL); if (retval) { com_err("uu-server", retval, "encoding message to client"); return 6; } retval = krb5_write_message(context, (krb5_pointer) &sock, &msg); if (retval) { cl_short_wrt: com_err("uu-server", retval, "writing message to client"); return 7; } krb5_free_data_contents(context, &msg); krb5_free_data_contents(context, &pname_data); /* tkt_data freed with creds */ krb5_free_cred_contents(context, &creds); krb5_free_creds(context, new_creds); krb5_cc_close(context, cc); krb5_auth_con_free(context, auth_context); krb5_free_context(context); return 0; }
int main(argc, argv) int argc; char *argv[]; { krb5_data pname_data, tkt_data; int sock = 0; socklen_t l; int retval; struct sockaddr_in l_inaddr, f_inaddr; krb5_creds creds, *new_creds; krb5_ccache cc; krb5_data msgtext, msg; krb5_context context; krb5_auth_context auth_context = NULL; #ifndef DEBUG freopen("/tmp/uu-server.log", "w", stderr); #endif retval = krb5_init_context(&context); if (retval) { com_err(argv[0], retval, "while initializing krb5"); exit(1); } #ifdef DEBUG { int one = 1; int acc; struct servent *sp; socklen_t namelen = sizeof(f_inaddr); if ((sock = socket(PF_INET, SOCK_STREAM, 0)) < 0) { com_err("uu-server", errno, "creating socket"); exit(3); } l_inaddr.sin_family = AF_INET; l_inaddr.sin_addr.s_addr = 0; if (argc == 2) { l_inaddr.sin_port = htons(atoi(argv[1])); } else { if (!(sp = getservbyname("uu-sample", "tcp"))) { com_err("uu-server", 0, "can't find uu-sample/tcp service"); exit(3); } l_inaddr.sin_port = sp->s_port; } (void) setsockopt(sock, SOL_SOCKET, SO_REUSEADDR, (char *)&one, sizeof (one)); if (bind(sock, (struct sockaddr *)&l_inaddr, sizeof(l_inaddr))) { com_err("uu-server", errno, "binding socket"); exit(3); } if (listen(sock, 1) == -1) { com_err("uu-server", errno, "listening"); exit(3); } printf("Server started\n"); fflush(stdout); if ((acc = accept(sock, (struct sockaddr *)&f_inaddr, &namelen)) == -1) { com_err("uu-server", errno, "accepting"); exit(3); } dup2(acc, 0); close(sock); sock = 0; } #endif retval = krb5_read_message(context, (krb5_pointer) &sock, &pname_data); if (retval) { com_err ("uu-server", retval, "reading pname"); return 2; } retval = krb5_read_message(context, (krb5_pointer) &sock, &tkt_data); if (retval) { com_err ("uu-server", retval, "reading ticket data"); return 2; } retval = krb5_cc_default(context, &cc); if (retval) { com_err("uu-server", retval, "getting credentials cache"); return 4; } memset (&creds, 0, sizeof(creds)); retval = krb5_cc_get_principal(context, cc, &creds.client); if (retval) { com_err("uu-client", retval, "getting principal name"); return 6; } printf ("uu-server: client principal is \"%s\".\n", pname_data.data); retval = krb5_parse_name(context, pname_data.data, &creds.server); if (retval) { com_err("uu-server", retval, "parsing client name"); return 3; } creds.second_ticket = tkt_data; printf ("uu-server: client ticket is %d bytes.\n", creds.second_ticket.length); retval = krb5_get_credentials(context, KRB5_GC_USER_USER, cc, &creds, &new_creds); if (retval) { com_err("uu-server", retval, "getting user-user ticket"); return 5; } #ifndef DEBUG l = sizeof(f_inaddr); if (getpeername(0, (struct sockaddr *)&f_inaddr, &l) == -1) { com_err("uu-server", errno, "getting client address"); return 6; } #endif l = sizeof(l_inaddr); if (getsockname(0, (struct sockaddr *)&l_inaddr, &l) == -1) { com_err("uu-server", errno, "getting local address"); return 6; } retval = krb5_auth_con_init(context, &auth_context); if (retval) { com_err("uu-server", retval, "making auth_context"); return 8; } retval = krb5_auth_con_setflags(context, auth_context, KRB5_AUTH_CONTEXT_DO_SEQUENCE); if (retval) { com_err("uu-server", retval, "initializing the auth_context flags"); return 8; } retval = krb5_auth_con_genaddrs(context, auth_context, sock, KRB5_AUTH_CONTEXT_GENERATE_LOCAL_FULL_ADDR | KRB5_AUTH_CONTEXT_GENERATE_REMOTE_FULL_ADDR); if (retval) { com_err("uu-server", retval, "generating addrs for auth_context"); return 9; } #if 1 retval = krb5_mk_req_extended(context, &auth_context, AP_OPTS_USE_SESSION_KEY, NULL, new_creds, &msg); if (retval) { com_err("uu-server", retval, "making AP_REQ"); return 8; } retval = krb5_write_message(context, (krb5_pointer) &sock, &msg); #else retval = krb5_sendauth(context, &auth_context, (krb5_pointer)&sock, "???", 0, 0, AP_OPTS_MUTUAL_REQUIRED | AP_OPTS_USE_SESSION_KEY, NULL, &creds, cc, NULL, NULL, NULL); #endif if (retval) goto cl_short_wrt; free(msg.data); msgtext.length = 32; msgtext.data = "Hello, other end of connection."; retval = krb5_mk_safe(context, auth_context, &msgtext, &msg, NULL); if (retval) { com_err("uu-server", retval, "encoding message to client"); return 6; } retval = krb5_write_message(context, (krb5_pointer) &sock, &msg); if (retval) { cl_short_wrt: com_err("uu-server", retval, "writing message to client"); return 7; } krb5_free_data_contents(context, &msg); krb5_free_data_contents(context, &pname_data); krb5_free_cred_contents(context, &creds); krb5_free_creds(context, new_creds); krb5_cc_close(context, cc); krb5_auth_con_free(context, auth_context); krb5_free_context(context); return 0; }
235
0
static guint64 lbmpdm_fetch_uint64_encoded ( tvbuff_t * tvb , int offset , int encoding ) { guint64 value = 0 ; if ( encoding == ENC_BIG_ENDIAN ) { value = tvb_get_ntoh64 ( tvb , offset ) ; } else { value = tvb_get_letoh64 ( tvb , offset ) ; } return ( value ) ; }
static guint64 lbmpdm_fetch_uint64_encoded ( tvbuff_t * tvb , int offset , int encoding ) { guint64 value = 0 ; if ( encoding == ENC_BIG_ENDIAN ) { value = tvb_get_ntoh64 ( tvb , offset ) ; } else { value = tvb_get_letoh64 ( tvb , offset ) ; } return ( value ) ; }
236
0
static void omap_pwl_init(target_phys_addr_t base, struct omap_mpu_state_s *s, omap_clk clk) { int iomemtype; s->pwl.base = base; omap_pwl_reset(s); iomemtype = cpu_register_io_memory(0, omap_pwl_readfn, omap_pwl_writefn, s); cpu_register_physical_memory(s->pwl.base, 0x800, iomemtype); omap_clk_adduser(clk, qemu_allocate_irqs(omap_pwl_clk_update, s, 1)[0]); }
static void omap_pwl_init(target_phys_addr_t base, struct omap_mpu_state_s *s, omap_clk clk) { int iomemtype; s->pwl.base = base; omap_pwl_reset(s); iomemtype = cpu_register_io_memory(0, omap_pwl_readfn, omap_pwl_writefn, s); cpu_register_physical_memory(s->pwl.base, 0x800, iomemtype); omap_clk_adduser(clk, qemu_allocate_irqs(omap_pwl_clk_update, s, 1)[0]); }
237
1
int ia32_setup_rt_frame(int sig, struct k_sigaction *ka, siginfo_t *info, compat_sigset_t *set, struct pt_regs *regs) { struct rt_sigframe __user *frame; struct exec_domain *ed = current_thread_info()->exec_domain; void __user *restorer; int err = 0; /* __copy_to_user optimizes that into a single 8 byte store */ static const struct { u8 movl; u32 val; u16 int80; u16 pad; u8 pad2; } __attribute__((packed)) code = { 0xb8, __NR_ia32_rt_sigreturn, 0x80cd, 0, }; frame = get_sigframe(ka, regs, sizeof(*frame)); if (!access_ok(VERIFY_WRITE, frame, sizeof(*frame))) goto give_sigsegv; err |= __put_user((ed && ed->signal_invmap && sig < 32 ? ed->signal_invmap[sig] : sig), &frame->sig); err |= __put_user(ptr_to_compat(&frame->info), &frame->pinfo); err |= __put_user(ptr_to_compat(&frame->uc), &frame->puc); err |= copy_siginfo_to_user32(&frame->info, info); if (err) goto give_sigsegv; /* Create the ucontext. */ err |= __put_user(0, &frame->uc.uc_flags); err |= __put_user(0, &frame->uc.uc_link); err |= __put_user(current->sas_ss_sp, &frame->uc.uc_stack.ss_sp); err |= __put_user(sas_ss_flags(regs->sp), &frame->uc.uc_stack.ss_flags); err |= __put_user(current->sas_ss_size, &frame->uc.uc_stack.ss_size); err |= ia32_setup_sigcontext(&frame->uc.uc_mcontext, &frame->fpstate, regs, set->sig[0]); err |= __copy_to_user(&frame->uc.uc_sigmask, set, sizeof(*set)); if (err) goto give_sigsegv; if (ka->sa.sa_flags & SA_RESTORER) restorer = ka->sa.sa_restorer; else restorer = VDSO32_SYMBOL(current->mm->context.vdso, rt_sigreturn); err |= __put_user(ptr_to_compat(restorer), &frame->pretcode); /* * Not actually used anymore, but left because some gdb * versions need it. */ err |= __copy_to_user(frame->retcode, &code, 8); if (err) goto give_sigsegv; /* Set up registers for signal handler */ regs->sp = (unsigned long) frame; regs->ip = (unsigned long) ka->sa.sa_handler; /* Make -mregparm=3 work */ regs->ax = sig; regs->dx = (unsigned long) &frame->info; regs->cx = (unsigned long) &frame->uc; /* Make -mregparm=3 work */ regs->ax = sig; regs->dx = (unsigned long) &frame->info; regs->cx = (unsigned long) &frame->uc; asm volatile("movl %0,%%ds" :: "r" (__USER32_DS)); asm volatile("movl %0,%%es" :: "r" (__USER32_DS)); regs->cs = __USER32_CS; regs->ss = __USER32_DS; set_fs(USER_DS); regs->flags &= ~X86_EFLAGS_TF; if (test_thread_flag(TIF_SINGLESTEP)) ptrace_notify(SIGTRAP); #if DEBUG_SIG printk(KERN_DEBUG "SIG deliver (%s:%d): sp=%p pc=%lx ra=%u\n", current->comm, current->pid, frame, regs->ip, frame->pretcode); #endif return 0; give_sigsegv: force_sigsegv(sig, current); return -EFAULT; }
int ia32_setup_rt_frame(int sig, struct k_sigaction *ka, siginfo_t *info, compat_sigset_t *set, struct pt_regs *regs) { struct rt_sigframe __user *frame; struct exec_domain *ed = current_thread_info()->exec_domain; void __user *restorer; int err = 0; static const struct { u8 movl; u32 val; u16 int80; u16 pad; u8 pad2; } __attribute__((packed)) code = { 0xb8, __NR_ia32_rt_sigreturn, 0x80cd, 0, }; frame = get_sigframe(ka, regs, sizeof(*frame)); if (!access_ok(VERIFY_WRITE, frame, sizeof(*frame))) goto give_sigsegv; err |= __put_user((ed && ed->signal_invmap && sig < 32 ? ed->signal_invmap[sig] : sig), &frame->sig); err |= __put_user(ptr_to_compat(&frame->info), &frame->pinfo); err |= __put_user(ptr_to_compat(&frame->uc), &frame->puc); err |= copy_siginfo_to_user32(&frame->info, info); if (err) goto give_sigsegv; err |= __put_user(0, &frame->uc.uc_flags); err |= __put_user(0, &frame->uc.uc_link); err |= __put_user(current->sas_ss_sp, &frame->uc.uc_stack.ss_sp); err |= __put_user(sas_ss_flags(regs->sp), &frame->uc.uc_stack.ss_flags); err |= __put_user(current->sas_ss_size, &frame->uc.uc_stack.ss_size); err |= ia32_setup_sigcontext(&frame->uc.uc_mcontext, &frame->fpstate, regs, set->sig[0]); err |= __copy_to_user(&frame->uc.uc_sigmask, set, sizeof(*set)); if (err) goto give_sigsegv; if (ka->sa.sa_flags & SA_RESTORER) restorer = ka->sa.sa_restorer; else restorer = VDSO32_SYMBOL(current->mm->context.vdso, rt_sigreturn); err |= __put_user(ptr_to_compat(restorer), &frame->pretcode); err |= __copy_to_user(frame->retcode, &code, 8); if (err) goto give_sigsegv; regs->sp = (unsigned long) frame; regs->ip = (unsigned long) ka->sa.sa_handler; regs->ax = sig; regs->dx = (unsigned long) &frame->info; regs->cx = (unsigned long) &frame->uc; regs->ax = sig; regs->dx = (unsigned long) &frame->info; regs->cx = (unsigned long) &frame->uc; asm volatile("movl %0,%%ds" :: "r" (__USER32_DS)); asm volatile("movl %0,%%es" :: "r" (__USER32_DS)); regs->cs = __USER32_CS; regs->ss = __USER32_DS; set_fs(USER_DS); regs->flags &= ~X86_EFLAGS_TF; if (test_thread_flag(TIF_SINGLESTEP)) ptrace_notify(SIGTRAP); #if DEBUG_SIG printk(KERN_DEBUG "SIG deliver (%s:%d): sp=%p pc=%lx ra=%u\n", current->comm, current->pid, frame, regs->ip, frame->pretcode); #endif return 0; give_sigsegv: force_sigsegv(sig, current); return -EFAULT; }
238
1
static int setup_rt_frame(int sig, struct k_sigaction *ka, siginfo_t *info, sigset_t *set, struct pt_regs * regs) { struct rt_sigframe __user *frame; struct _fpstate __user *fp = NULL; int err = 0; struct task_struct *me = current; if (used_math()) { fp = get_stack(ka, regs, sizeof(struct _fpstate)); frame = (void __user *)round_down( (unsigned long)fp - sizeof(struct rt_sigframe), 16) - 8; if (!access_ok(VERIFY_WRITE, fp, sizeof(struct _fpstate))) goto give_sigsegv; if (save_i387(fp) < 0) err |= -1; } else frame = get_stack(ka, regs, sizeof(struct rt_sigframe)) - 8; if (!access_ok(VERIFY_WRITE, frame, sizeof(*frame))) goto give_sigsegv; if (ka->sa.sa_flags & SA_SIGINFO) { err |= copy_siginfo_to_user(&frame->info, info); if (err) goto give_sigsegv; } /* Create the ucontext. */ err |= __put_user(0, &frame->uc.uc_flags); err |= __put_user(0, &frame->uc.uc_link); err |= __put_user(me->sas_ss_sp, &frame->uc.uc_stack.ss_sp); err |= __put_user(sas_ss_flags(regs->sp), &frame->uc.uc_stack.ss_flags); err |= __put_user(me->sas_ss_size, &frame->uc.uc_stack.ss_size); err |= setup_sigcontext(&frame->uc.uc_mcontext, regs, set->sig[0], me); err |= __put_user(fp, &frame->uc.uc_mcontext.fpstate); if (sizeof(*set) == 16) { __put_user(set->sig[0], &frame->uc.uc_sigmask.sig[0]); __put_user(set->sig[1], &frame->uc.uc_sigmask.sig[1]); } else err |= __copy_to_user(&frame->uc.uc_sigmask, set, sizeof(*set)); /* Set up to return from userspace. If provided, use a stub already in userspace. */ /* x86-64 should always use SA_RESTORER. */ if (ka->sa.sa_flags & SA_RESTORER) { err |= __put_user(ka->sa.sa_restorer, &frame->pretcode); } else { /* could use a vstub here */ goto give_sigsegv; } if (err) goto give_sigsegv; #ifdef DEBUG_SIG printk("%d old ip %lx old sp %lx old ax %lx\n", current->pid,regs->ip,regs->sp,regs->ax); #endif /* Set up registers for signal handler */ regs->di = sig; /* In case the signal handler was declared without prototypes */ regs->ax = 0; /* This also works for non SA_SIGINFO handlers because they expect the next argument after the signal number on the stack. */ regs->si = (unsigned long)&frame->info; regs->dx = (unsigned long)&frame->uc; regs->ip = (unsigned long) ka->sa.sa_handler; regs->sp = (unsigned long)frame; /* Set up the CS register to run signal handlers in 64-bit mode, even if the handler happens to be interrupting 32-bit code. */ regs->cs = __USER_CS; /* This, by contrast, has nothing to do with segment registers - see include/asm-x86_64/uaccess.h for details. */ set_fs(USER_DS); regs->flags &= ~X86_EFLAGS_TF; if (test_thread_flag(TIF_SINGLESTEP)) ptrace_notify(SIGTRAP); #ifdef DEBUG_SIG printk("SIG deliver (%s:%d): sp=%p pc=%lx ra=%p\n", current->comm, current->pid, frame, regs->ip, frame->pretcode); #endif return 0; give_sigsegv: force_sigsegv(sig, current); return -EFAULT; }
static int setup_rt_frame(int sig, struct k_sigaction *ka, siginfo_t *info, sigset_t *set, struct pt_regs * regs) { struct rt_sigframe __user *frame; struct _fpstate __user *fp = NULL; int err = 0; struct task_struct *me = current; if (used_math()) { fp = get_stack(ka, regs, sizeof(struct _fpstate)); frame = (void __user *)round_down( (unsigned long)fp - sizeof(struct rt_sigframe), 16) - 8; if (!access_ok(VERIFY_WRITE, fp, sizeof(struct _fpstate))) goto give_sigsegv; if (save_i387(fp) < 0) err |= -1; } else frame = get_stack(ka, regs, sizeof(struct rt_sigframe)) - 8; if (!access_ok(VERIFY_WRITE, frame, sizeof(*frame))) goto give_sigsegv; if (ka->sa.sa_flags & SA_SIGINFO) { err |= copy_siginfo_to_user(&frame->info, info); if (err) goto give_sigsegv; } err |= __put_user(0, &frame->uc.uc_flags); err |= __put_user(0, &frame->uc.uc_link); err |= __put_user(me->sas_ss_sp, &frame->uc.uc_stack.ss_sp); err |= __put_user(sas_ss_flags(regs->sp), &frame->uc.uc_stack.ss_flags); err |= __put_user(me->sas_ss_size, &frame->uc.uc_stack.ss_size); err |= setup_sigcontext(&frame->uc.uc_mcontext, regs, set->sig[0], me); err |= __put_user(fp, &frame->uc.uc_mcontext.fpstate); if (sizeof(*set) == 16) { __put_user(set->sig[0], &frame->uc.uc_sigmask.sig[0]); __put_user(set->sig[1], &frame->uc.uc_sigmask.sig[1]); } else err |= __copy_to_user(&frame->uc.uc_sigmask, set, sizeof(*set)); if (ka->sa.sa_flags & SA_RESTORER) { err |= __put_user(ka->sa.sa_restorer, &frame->pretcode); } else { goto give_sigsegv; } if (err) goto give_sigsegv; #ifdef DEBUG_SIG printk("%d old ip %lx old sp %lx old ax %lx\n", current->pid,regs->ip,regs->sp,regs->ax); #endif regs->di = sig; regs->ax = 0; regs->si = (unsigned long)&frame->info; regs->dx = (unsigned long)&frame->uc; regs->ip = (unsigned long) ka->sa.sa_handler; regs->sp = (unsigned long)frame; regs->cs = __USER_CS; set_fs(USER_DS); regs->flags &= ~X86_EFLAGS_TF; if (test_thread_flag(TIF_SINGLESTEP)) ptrace_notify(SIGTRAP); #ifdef DEBUG_SIG printk("SIG deliver (%s:%d): sp=%p pc=%lx ra=%p\n", current->comm, current->pid, frame, regs->ip, frame->pretcode); #endif return 0; give_sigsegv: force_sigsegv(sig, current); return -EFAULT; }
239
1
static int em_sysenter(struct x86_emulate_ctxt *ctxt) { const struct x86_emulate_ops *ops = ctxt->ops; struct desc_struct cs, ss; u64 msr_data; u16 cs_sel, ss_sel; u64 efer = 0; ops->get_msr(ctxt, MSR_EFER, &efer); /* inject #GP if in real mode */ if (ctxt->mode == X86EMUL_MODE_REAL) return emulate_gp(ctxt, 0); /* * Not recognized on AMD in compat mode (but is recognized in legacy * mode). */ if ((ctxt->mode == X86EMUL_MODE_PROT32) && (efer & EFER_LMA) && !vendor_intel(ctxt)) return emulate_ud(ctxt); /* sysenter/sysexit have not been tested in 64bit mode. */ if (ctxt->mode == X86EMUL_MODE_PROT64) return X86EMUL_UNHANDLEABLE; setup_syscalls_segments(ctxt, &cs, &ss); ops->get_msr(ctxt, MSR_IA32_SYSENTER_CS, &msr_data); switch (ctxt->mode) { case X86EMUL_MODE_PROT32: if ((msr_data & 0xfffc) == 0x0) return emulate_gp(ctxt, 0); break; case X86EMUL_MODE_PROT64: if (msr_data == 0x0) return emulate_gp(ctxt, 0); break; default: break; } ctxt->eflags &= ~(EFLG_VM | EFLG_IF); cs_sel = (u16)msr_data; cs_sel &= ~SELECTOR_RPL_MASK; ss_sel = cs_sel + 8; ss_sel &= ~SELECTOR_RPL_MASK; if (ctxt->mode == X86EMUL_MODE_PROT64 || (efer & EFER_LMA)) { cs.d = 0; cs.l = 1; } ops->set_segment(ctxt, cs_sel, &cs, 0, VCPU_SREG_CS); ops->set_segment(ctxt, ss_sel, &ss, 0, VCPU_SREG_SS); ops->get_msr(ctxt, MSR_IA32_SYSENTER_EIP, &msr_data); ctxt->_eip = msr_data; ops->get_msr(ctxt, MSR_IA32_SYSENTER_ESP, &msr_data); *reg_write(ctxt, VCPU_REGS_RSP) = msr_data; return X86EMUL_CONTINUE; }
static int em_sysenter(struct x86_emulate_ctxt *ctxt) { const struct x86_emulate_ops *ops = ctxt->ops; struct desc_struct cs, ss; u64 msr_data; u16 cs_sel, ss_sel; u64 efer = 0; ops->get_msr(ctxt, MSR_EFER, &efer); if (ctxt->mode == X86EMUL_MODE_REAL) return emulate_gp(ctxt, 0); if ((ctxt->mode == X86EMUL_MODE_PROT32) && (efer & EFER_LMA) && !vendor_intel(ctxt)) return emulate_ud(ctxt); if (ctxt->mode == X86EMUL_MODE_PROT64) return X86EMUL_UNHANDLEABLE; setup_syscalls_segments(ctxt, &cs, &ss); ops->get_msr(ctxt, MSR_IA32_SYSENTER_CS, &msr_data); switch (ctxt->mode) { case X86EMUL_MODE_PROT32: if ((msr_data & 0xfffc) == 0x0) return emulate_gp(ctxt, 0); break; case X86EMUL_MODE_PROT64: if (msr_data == 0x0) return emulate_gp(ctxt, 0); break; default: break; } ctxt->eflags &= ~(EFLG_VM | EFLG_IF); cs_sel = (u16)msr_data; cs_sel &= ~SELECTOR_RPL_MASK; ss_sel = cs_sel + 8; ss_sel &= ~SELECTOR_RPL_MASK; if (ctxt->mode == X86EMUL_MODE_PROT64 || (efer & EFER_LMA)) { cs.d = 0; cs.l = 1; } ops->set_segment(ctxt, cs_sel, &cs, 0, VCPU_SREG_CS); ops->set_segment(ctxt, ss_sel, &ss, 0, VCPU_SREG_SS); ops->get_msr(ctxt, MSR_IA32_SYSENTER_EIP, &msr_data); ctxt->_eip = msr_data; ops->get_msr(ctxt, MSR_IA32_SYSENTER_ESP, &msr_data); *reg_write(ctxt, VCPU_REGS_RSP) = msr_data; return X86EMUL_CONTINUE; }
241
0
static void steamdiscover_dissect_body_proofresponse ( tvbuff_t * tvb , packet_info * pinfo , proto_tree * tree , gint offset , gint bytes_left ) { gint len ; gint64 value ; protobuf_desc_t pb = { tvb , offset , bytes_left } ; protobuf_tag_t tag = { 0 , 0 , 0 } ; while ( protobuf_iter_next ( & pb , & tag ) ) { switch ( tag . field_number ) { case STEAMDISCOVER_FN_PROOFRESPONSE_RESPONSE : STEAMDISCOVER_ENSURE_WIRETYPE ( PROTOBUF_WIRETYPE_LENGTHDELIMITED ) ; value = get_varint64 ( pb . tvb , pb . offset , pb . bytes_left , & len ) ; proto_tree_add_item ( tree , hf_steam_ihs_discovery_body_proofresponse_response , pb . tvb , pb . offset + len , ( gint ) value , ENC_NA ) ; len += ( gint ) value ; break ; default : len = protobuf_dissect_unknown_field ( & pb , & tag , pinfo , tree , NULL ) ; break ; } protobuf_seek_forward ( & pb , len ) ; } }
static void steamdiscover_dissect_body_proofresponse ( tvbuff_t * tvb , packet_info * pinfo , proto_tree * tree , gint offset , gint bytes_left ) { gint len ; gint64 value ; protobuf_desc_t pb = { tvb , offset , bytes_left } ; protobuf_tag_t tag = { 0 , 0 , 0 } ; while ( protobuf_iter_next ( & pb , & tag ) ) { switch ( tag . field_number ) { case STEAMDISCOVER_FN_PROOFRESPONSE_RESPONSE : STEAMDISCOVER_ENSURE_WIRETYPE ( PROTOBUF_WIRETYPE_LENGTHDELIMITED ) ; value = get_varint64 ( pb . tvb , pb . offset , pb . bytes_left , & len ) ; proto_tree_add_item ( tree , hf_steam_ihs_discovery_body_proofresponse_response , pb . tvb , pb . offset + len , ( gint ) value , ENC_NA ) ; len += ( gint ) value ; break ; default : len = protobuf_dissect_unknown_field ( & pb , & tag , pinfo , tree , NULL ) ; break ; } protobuf_seek_forward ( & pb , len ) ; } }
242
0
static void virt_acpi_build_update(void *build_opaque) { AcpiBuildState *build_state = build_opaque; AcpiBuildTables tables; /* No state to update or already patched? Nothing to do. */ if (!build_state || build_state->patched) { return; } build_state->patched = true; acpi_build_tables_init(&tables); virt_acpi_build(build_state->guest_info, &tables); acpi_ram_update(build_state->table_mr, tables.table_data); acpi_ram_update(build_state->rsdp_mr, tables.rsdp); acpi_ram_update(build_state->linker_mr, tables.linker); acpi_build_tables_cleanup(&tables, true); }
static void virt_acpi_build_update(void *build_opaque) { AcpiBuildState *build_state = build_opaque; AcpiBuildTables tables; if (!build_state || build_state->patched) { return; } build_state->patched = true; acpi_build_tables_init(&tables); virt_acpi_build(build_state->guest_info, &tables); acpi_ram_update(build_state->table_mr, tables.table_data); acpi_ram_update(build_state->rsdp_mr, tables.rsdp); acpi_ram_update(build_state->linker_mr, tables.linker); acpi_build_tables_cleanup(&tables, true); }
243
1
__mptctl_ioctl(struct file *file, unsigned int cmd, unsigned long arg) { mpt_ioctl_header __user *uhdr = (void __user *) arg; mpt_ioctl_header khdr; int iocnum; unsigned iocnumX; int nonblock = (file->f_flags & O_NONBLOCK); int ret; MPT_ADAPTER *iocp = NULL; if (copy_from_user(&khdr, uhdr, sizeof(khdr))) { printk(KERN_ERR MYNAM "%s::mptctl_ioctl() @%d - " "Unable to copy mpt_ioctl_header data @ %p\n", __FILE__, __LINE__, uhdr); return -EFAULT; } ret = -ENXIO; /* (-6) No such device or address */ /* Verify intended MPT adapter - set iocnum and the adapter * pointer (iocp) */ iocnumX = khdr.iocnum & 0xFF; if (((iocnum = mpt_verify_adapter(iocnumX, &iocp)) < 0) || (iocp == NULL)) return -ENODEV; if (!iocp->active) { printk(KERN_DEBUG MYNAM "%s::mptctl_ioctl() @%d - Controller disabled.\n", __FILE__, __LINE__); return -EFAULT; } /* Handle those commands that are just returning * information stored in the driver. * These commands should never time out and are unaffected * by TM and FW reloads. */ if ((cmd & ~IOCSIZE_MASK) == (MPTIOCINFO & ~IOCSIZE_MASK)) { return mptctl_getiocinfo(arg, _IOC_SIZE(cmd)); } else if (cmd == MPTTARGETINFO) { return mptctl_gettargetinfo(arg); } else if (cmd == MPTTEST) { return mptctl_readtest(arg); } else if (cmd == MPTEVENTQUERY) { return mptctl_eventquery(arg); } else if (cmd == MPTEVENTENABLE) { return mptctl_eventenable(arg); } else if (cmd == MPTEVENTREPORT) { return mptctl_eventreport(arg); } else if (cmd == MPTFWREPLACE) { return mptctl_replace_fw(arg); } /* All of these commands require an interrupt or * are unknown/illegal. */ if ((ret = mptctl_syscall_down(iocp, nonblock)) != 0) return ret; if (cmd == MPTFWDOWNLOAD) ret = mptctl_fw_download(arg); else if (cmd == MPTCOMMAND) ret = mptctl_mpt_command(arg); else if (cmd == MPTHARDRESET) ret = mptctl_do_reset(arg); else if ((cmd & ~IOCSIZE_MASK) == (HP_GETHOSTINFO & ~IOCSIZE_MASK)) ret = mptctl_hp_hostinfo(arg, _IOC_SIZE(cmd)); else if (cmd == HP_GETTARGETINFO) ret = mptctl_hp_targetinfo(arg); else ret = -EINVAL; mutex_unlock(&iocp->ioctl_cmds.mutex); return ret; }
__mptctl_ioctl(struct file *file, unsigned int cmd, unsigned long arg) { mpt_ioctl_header __user *uhdr = (void __user *) arg; mpt_ioctl_header khdr; int iocnum; unsigned iocnumX; int nonblock = (file->f_flags & O_NONBLOCK); int ret; MPT_ADAPTER *iocp = NULL; if (copy_from_user(&khdr, uhdr, sizeof(khdr))) { printk(KERN_ERR MYNAM "%s::mptctl_ioctl() @%d - " "Unable to copy mpt_ioctl_header data @ %p\n", __FILE__, __LINE__, uhdr); return -EFAULT; } ret = -ENXIO; iocnumX = khdr.iocnum & 0xFF; if (((iocnum = mpt_verify_adapter(iocnumX, &iocp)) < 0) || (iocp == NULL)) return -ENODEV; if (!iocp->active) { printk(KERN_DEBUG MYNAM "%s::mptctl_ioctl() @%d - Controller disabled.\n", __FILE__, __LINE__); return -EFAULT; } if ((cmd & ~IOCSIZE_MASK) == (MPTIOCINFO & ~IOCSIZE_MASK)) { return mptctl_getiocinfo(arg, _IOC_SIZE(cmd)); } else if (cmd == MPTTARGETINFO) { return mptctl_gettargetinfo(arg); } else if (cmd == MPTTEST) { return mptctl_readtest(arg); } else if (cmd == MPTEVENTQUERY) { return mptctl_eventquery(arg); } else if (cmd == MPTEVENTENABLE) { return mptctl_eventenable(arg); } else if (cmd == MPTEVENTREPORT) { return mptctl_eventreport(arg); } else if (cmd == MPTFWREPLACE) { return mptctl_replace_fw(arg); } if ((ret = mptctl_syscall_down(iocp, nonblock)) != 0) return ret; if (cmd == MPTFWDOWNLOAD) ret = mptctl_fw_download(arg); else if (cmd == MPTCOMMAND) ret = mptctl_mpt_command(arg); else if (cmd == MPTHARDRESET) ret = mptctl_do_reset(arg); else if ((cmd & ~IOCSIZE_MASK) == (HP_GETHOSTINFO & ~IOCSIZE_MASK)) ret = mptctl_hp_hostinfo(arg, _IOC_SIZE(cmd)); else if (cmd == HP_GETTARGETINFO) ret = mptctl_hp_targetinfo(arg); else ret = -EINVAL; mutex_unlock(&iocp->ioctl_cmds.mutex); return ret; }
244
0
static void uart_rx_reset(UartState *s) { s->rx_wpos = 0; s->rx_count = 0; qemu_chr_accept_input(s->chr); s->r[R_SR] |= UART_SR_INTR_REMPTY; s->r[R_SR] &= ~UART_SR_INTR_RFUL; }
static void uart_rx_reset(UartState *s) { s->rx_wpos = 0; s->rx_count = 0; qemu_chr_accept_input(s->chr); s->r[R_SR] |= UART_SR_INTR_REMPTY; s->r[R_SR] &= ~UART_SR_INTR_RFUL; }
245
1
static int setup_frame(int sig, struct k_sigaction *ka, sigset_t *set, struct pt_regs * regs) { void __user *restorer; struct sigframe __user *frame; int err = 0; int usig; frame = get_sigframe(ka, regs, sizeof(*frame)); if (!access_ok(VERIFY_WRITE, frame, sizeof(*frame))) goto give_sigsegv; usig = current_thread_info()->exec_domain && current_thread_info()->exec_domain->signal_invmap && sig < 32 ? current_thread_info()->exec_domain->signal_invmap[sig] : sig; err = __put_user(usig, &frame->sig); if (err) goto give_sigsegv; err = setup_sigcontext(&frame->sc, &frame->fpstate, regs, set->sig[0]); if (err) goto give_sigsegv; if (_NSIG_WORDS > 1) { err = __copy_to_user(&frame->extramask, &set->sig[1], sizeof(frame->extramask)); if (err) goto give_sigsegv; } if (current->binfmt->hasvdso) restorer = VDSO32_SYMBOL(current->mm->context.vdso, sigreturn); else restorer = &frame->retcode; if (ka->sa.sa_flags & SA_RESTORER) restorer = ka->sa.sa_restorer; /* Set up to return from userspace. */ err |= __put_user(restorer, &frame->pretcode); /* * This is popl %eax ; movl $,%eax ; int $0x80 * * WE DO NOT USE IT ANY MORE! It's only left here for historical * reasons and because gdb uses it as a signature to notice * signal handler stack frames. */ err |= __put_user(0xb858, (short __user *)(frame->retcode+0)); err |= __put_user(__NR_sigreturn, (int __user *)(frame->retcode+2)); err |= __put_user(0x80cd, (short __user *)(frame->retcode+6)); if (err) goto give_sigsegv; /* Set up registers for signal handler */ regs->sp = (unsigned long) frame; regs->ip = (unsigned long) ka->sa.sa_handler; regs->ax = (unsigned long) sig; regs->dx = (unsigned long) 0; regs->cx = (unsigned long) 0; regs->ds = __USER_DS; regs->es = __USER_DS; regs->ss = __USER_DS; regs->cs = __USER_CS; /* * Clear TF when entering the signal handler, but * notify any tracer that was single-stepping it. * The tracer may want to single-step inside the * handler too. */ regs->flags &= ~TF_MASK; if (test_thread_flag(TIF_SINGLESTEP)) ptrace_notify(SIGTRAP); #if DEBUG_SIG printk("SIG deliver (%s:%d): sp=%p pc=%p ra=%p\n", current->comm, current->pid, frame, regs->ip, frame->pretcode); #endif return 0; give_sigsegv: force_sigsegv(sig, current); return -EFAULT; }
static int setup_frame(int sig, struct k_sigaction *ka, sigset_t *set, struct pt_regs * regs) { void __user *restorer; struct sigframe __user *frame; int err = 0; int usig; frame = get_sigframe(ka, regs, sizeof(*frame)); if (!access_ok(VERIFY_WRITE, frame, sizeof(*frame))) goto give_sigsegv; usig = current_thread_info()->exec_domain && current_thread_info()->exec_domain->signal_invmap && sig < 32 ? current_thread_info()->exec_domain->signal_invmap[sig] : sig; err = __put_user(usig, &frame->sig); if (err) goto give_sigsegv; err = setup_sigcontext(&frame->sc, &frame->fpstate, regs, set->sig[0]); if (err) goto give_sigsegv; if (_NSIG_WORDS > 1) { err = __copy_to_user(&frame->extramask, &set->sig[1], sizeof(frame->extramask)); if (err) goto give_sigsegv; } if (current->binfmt->hasvdso) restorer = VDSO32_SYMBOL(current->mm->context.vdso, sigreturn); else restorer = &frame->retcode; if (ka->sa.sa_flags & SA_RESTORER) restorer = ka->sa.sa_restorer; err |= __put_user(restorer, &frame->pretcode); err |= __put_user(0xb858, (short __user *)(frame->retcode+0)); err |= __put_user(__NR_sigreturn, (int __user *)(frame->retcode+2)); err |= __put_user(0x80cd, (short __user *)(frame->retcode+6)); if (err) goto give_sigsegv; regs->sp = (unsigned long) frame; regs->ip = (unsigned long) ka->sa.sa_handler; regs->ax = (unsigned long) sig; regs->dx = (unsigned long) 0; regs->cx = (unsigned long) 0; regs->ds = __USER_DS; regs->es = __USER_DS; regs->ss = __USER_DS; regs->cs = __USER_CS; regs->flags &= ~TF_MASK; if (test_thread_flag(TIF_SINGLESTEP)) ptrace_notify(SIGTRAP); #if DEBUG_SIG printk("SIG deliver (%s:%d): sp=%p pc=%p ra=%p\n", current->comm, current->pid, frame, regs->ip, frame->pretcode); #endif return 0; give_sigsegv: force_sigsegv(sig, current); return -EFAULT; }
246
0
static void kq_sighandler ( int sig ) { }
static void kq_sighandler ( int sig ) { }
247
1
int ia32_setup_frame(int sig, struct k_sigaction *ka, compat_sigset_t *set, struct pt_regs *regs) { struct sigframe __user *frame; void __user *restorer; int err = 0; /* copy_to_user optimizes that into a single 8 byte store */ static const struct { u16 poplmovl; u32 val; u16 int80; u16 pad; } __attribute__((packed)) code = { 0xb858, /* popl %eax ; movl $...,%eax */ __NR_ia32_sigreturn, 0x80cd, /* int $0x80 */ 0, }; frame = get_sigframe(ka, regs, sizeof(*frame)); if (!access_ok(VERIFY_WRITE, frame, sizeof(*frame))) goto give_sigsegv; err |= __put_user(sig, &frame->sig); if (err) goto give_sigsegv; err |= ia32_setup_sigcontext(&frame->sc, &frame->fpstate, regs, set->sig[0]); if (err) goto give_sigsegv; if (_COMPAT_NSIG_WORDS > 1) { err |= __copy_to_user(frame->extramask, &set->sig[1], sizeof(frame->extramask)); if (err) goto give_sigsegv; } if (ka->sa.sa_flags & SA_RESTORER) { restorer = ka->sa.sa_restorer; } else { /* Return stub is in 32bit vsyscall page */ if (current->binfmt->hasvdso) restorer = VDSO32_SYMBOL(current->mm->context.vdso, sigreturn); else restorer = &frame->retcode; } err |= __put_user(ptr_to_compat(restorer), &frame->pretcode); /* * These are actually not used anymore, but left because some * gdb versions depend on them as a marker. */ err |= __copy_to_user(frame->retcode, &code, 8); if (err) goto give_sigsegv; /* Set up registers for signal handler */ regs->sp = (unsigned long) frame; regs->ip = (unsigned long) ka->sa.sa_handler; /* Make -mregparm=3 work */ regs->ax = sig; regs->dx = 0; regs->cx = 0; asm volatile("movl %0,%%ds" :: "r" (__USER32_DS)); asm volatile("movl %0,%%es" :: "r" (__USER32_DS)); regs->cs = __USER32_CS; regs->ss = __USER32_DS; set_fs(USER_DS); regs->flags &= ~X86_EFLAGS_TF; if (test_thread_flag(TIF_SINGLESTEP)) ptrace_notify(SIGTRAP); #if DEBUG_SIG printk(KERN_DEBUG "SIG deliver (%s:%d): sp=%p pc=%lx ra=%u\n", current->comm, current->pid, frame, regs->ip, frame->pretcode); #endif return 0; give_sigsegv: force_sigsegv(sig, current); return -EFAULT; }
int ia32_setup_frame(int sig, struct k_sigaction *ka, compat_sigset_t *set, struct pt_regs *regs) { struct sigframe __user *frame; void __user *restorer; int err = 0; static const struct { u16 poplmovl; u32 val; u16 int80; u16 pad; } __attribute__((packed)) code = { 0xb858, __NR_ia32_sigreturn, 0x80cd, 0, }; frame = get_sigframe(ka, regs, sizeof(*frame)); if (!access_ok(VERIFY_WRITE, frame, sizeof(*frame))) goto give_sigsegv; err |= __put_user(sig, &frame->sig); if (err) goto give_sigsegv; err |= ia32_setup_sigcontext(&frame->sc, &frame->fpstate, regs, set->sig[0]); if (err) goto give_sigsegv; if (_COMPAT_NSIG_WORDS > 1) { err |= __copy_to_user(frame->extramask, &set->sig[1], sizeof(frame->extramask)); if (err) goto give_sigsegv; } if (ka->sa.sa_flags & SA_RESTORER) { restorer = ka->sa.sa_restorer; } else { if (current->binfmt->hasvdso) restorer = VDSO32_SYMBOL(current->mm->context.vdso, sigreturn); else restorer = &frame->retcode; } err |= __put_user(ptr_to_compat(restorer), &frame->pretcode); err |= __copy_to_user(frame->retcode, &code, 8); if (err) goto give_sigsegv; regs->sp = (unsigned long) frame; regs->ip = (unsigned long) ka->sa.sa_handler; regs->ax = sig; regs->dx = 0; regs->cx = 0; asm volatile("movl %0,%%ds" :: "r" (__USER32_DS)); asm volatile("movl %0,%%es" :: "r" (__USER32_DS)); regs->cs = __USER32_CS; regs->ss = __USER32_DS; set_fs(USER_DS); regs->flags &= ~X86_EFLAGS_TF; if (test_thread_flag(TIF_SINGLESTEP)) ptrace_notify(SIGTRAP); #if DEBUG_SIG printk(KERN_DEBUG "SIG deliver (%s:%d): sp=%p pc=%lx ra=%u\n", current->comm, current->pid, frame, regs->ip, frame->pretcode); #endif return 0; give_sigsegv: force_sigsegv(sig, current); return -EFAULT; }
249
1
static int rndis_set_response(USBNetState *s, rndis_set_msg_type *buf, unsigned int length) { rndis_set_cmplt_type *resp = rndis_queue_response(s, sizeof(rndis_set_cmplt_type)); uint32_t bufoffs, buflen; int ret; if (!resp) return USB_RET_STALL; bufoffs = le32_to_cpu(buf->InformationBufferOffset) + 8; buflen = le32_to_cpu(buf->InformationBufferLength); if (bufoffs + buflen > length) return USB_RET_STALL; ret = ndis_set(s, le32_to_cpu(buf->OID), bufoffs + (uint8_t *) buf, buflen); resp->MessageType = cpu_to_le32(RNDIS_SET_CMPLT); resp->RequestID = buf->RequestID; /* Still LE in msg buffer */ resp->MessageLength = cpu_to_le32(sizeof(rndis_set_cmplt_type)); if (ret < 0) { /* OID not supported */ resp->Status = cpu_to_le32(RNDIS_STATUS_NOT_SUPPORTED); return 0; } resp->Status = cpu_to_le32(RNDIS_STATUS_SUCCESS); return 0; }
static int rndis_set_response(USBNetState *s, rndis_set_msg_type *buf, unsigned int length) { rndis_set_cmplt_type *resp = rndis_queue_response(s, sizeof(rndis_set_cmplt_type)); uint32_t bufoffs, buflen; int ret; if (!resp) return USB_RET_STALL; bufoffs = le32_to_cpu(buf->InformationBufferOffset) + 8; buflen = le32_to_cpu(buf->InformationBufferLength); if (bufoffs + buflen > length) return USB_RET_STALL; ret = ndis_set(s, le32_to_cpu(buf->OID), bufoffs + (uint8_t *) buf, buflen); resp->MessageType = cpu_to_le32(RNDIS_SET_CMPLT); resp->RequestID = buf->RequestID; resp->MessageLength = cpu_to_le32(sizeof(rndis_set_cmplt_type)); if (ret < 0) { resp->Status = cpu_to_le32(RNDIS_STATUS_NOT_SUPPORTED); return 0; } resp->Status = cpu_to_le32(RNDIS_STATUS_SUCCESS); return 0; }
250
0
void hb_set_add_range ( hb_set_t * set , hb_codepoint_t first , hb_codepoint_t last ) { set -> add_range ( first , last ) ; }
void hb_set_add_range ( hb_set_t * set , hb_codepoint_t first , hb_codepoint_t last ) { set -> add_range ( first , last ) ; }
251
0
compat_mpt_command(struct file *filp, unsigned int cmd, unsigned long arg) { struct mpt_ioctl_command32 karg32; struct mpt_ioctl_command32 __user *uarg = (struct mpt_ioctl_command32 __user *) arg; struct mpt_ioctl_command karg; MPT_ADAPTER *iocp = NULL; int iocnum, iocnumX; int nonblock = (filp->f_flags & O_NONBLOCK); int ret; if (copy_from_user(&karg32, (char __user *)arg, sizeof(karg32))) return -EFAULT; /* Verify intended MPT adapter */ iocnumX = karg32.hdr.iocnum & 0xFF; if (((iocnum = mpt_verify_adapter(iocnumX, &iocp)) < 0) || (iocp == NULL)) { printk(KERN_DEBUG MYNAM "::compat_mpt_command @%d - ioc%d not found!\n", __LINE__, iocnumX); return -ENODEV; } if ((ret = mptctl_syscall_down(iocp, nonblock)) != 0) return ret; dctlprintk(iocp, printk(MYIOC_s_DEBUG_FMT "compat_mpt_command() called\n", iocp->name)); /* Copy data to karg */ karg.hdr.iocnum = karg32.hdr.iocnum; karg.hdr.port = karg32.hdr.port; karg.timeout = karg32.timeout; karg.maxReplyBytes = karg32.maxReplyBytes; karg.dataInSize = karg32.dataInSize; karg.dataOutSize = karg32.dataOutSize; karg.maxSenseBytes = karg32.maxSenseBytes; karg.dataSgeOffset = karg32.dataSgeOffset; karg.replyFrameBufPtr = (char __user *)(unsigned long)karg32.replyFrameBufPtr; karg.dataInBufPtr = (char __user *)(unsigned long)karg32.dataInBufPtr; karg.dataOutBufPtr = (char __user *)(unsigned long)karg32.dataOutBufPtr; karg.senseDataPtr = (char __user *)(unsigned long)karg32.senseDataPtr; /* Pass new structure to do_mpt_command */ ret = mptctl_do_mpt_command (iocp, karg, &uarg->MF); mutex_unlock(&iocp->ioctl_cmds.mutex); return ret; }
compat_mpt_command(struct file *filp, unsigned int cmd, unsigned long arg) { struct mpt_ioctl_command32 karg32; struct mpt_ioctl_command32 __user *uarg = (struct mpt_ioctl_command32 __user *) arg; struct mpt_ioctl_command karg; MPT_ADAPTER *iocp = NULL; int iocnum, iocnumX; int nonblock = (filp->f_flags & O_NONBLOCK); int ret; if (copy_from_user(&karg32, (char __user *)arg, sizeof(karg32))) return -EFAULT; iocnumX = karg32.hdr.iocnum & 0xFF; if (((iocnum = mpt_verify_adapter(iocnumX, &iocp)) < 0) || (iocp == NULL)) { printk(KERN_DEBUG MYNAM "::compat_mpt_command @%d - ioc%d not found!\n", __LINE__, iocnumX); return -ENODEV; } if ((ret = mptctl_syscall_down(iocp, nonblock)) != 0) return ret; dctlprintk(iocp, printk(MYIOC_s_DEBUG_FMT "compat_mpt_command() called\n", iocp->name)); karg.hdr.iocnum = karg32.hdr.iocnum; karg.hdr.port = karg32.hdr.port; karg.timeout = karg32.timeout; karg.maxReplyBytes = karg32.maxReplyBytes; karg.dataInSize = karg32.dataInSize; karg.dataOutSize = karg32.dataOutSize; karg.maxSenseBytes = karg32.maxSenseBytes; karg.dataSgeOffset = karg32.dataSgeOffset; karg.replyFrameBufPtr = (char __user *)(unsigned long)karg32.replyFrameBufPtr; karg.dataInBufPtr = (char __user *)(unsigned long)karg32.dataInBufPtr; karg.dataOutBufPtr = (char __user *)(unsigned long)karg32.dataOutBufPtr; karg.senseDataPtr = (char __user *)(unsigned long)karg32.senseDataPtr; ret = mptctl_do_mpt_command (iocp, karg, &uarg->MF); mutex_unlock(&iocp->ioctl_cmds.mutex); return ret; }
252
1
static unsigned long __peek_user(struct task_struct *child, addr_t addr) { struct user *dummy = NULL; addr_t offset, tmp; if (addr < (addr_t) &dummy->regs.acrs) { /* * psw and gprs are stored on the stack */ tmp = *(addr_t *)((addr_t) &task_pt_regs(child)->psw + addr); if (addr == (addr_t) &dummy->regs.psw.mask) /* Remove per bit from user psw. */ tmp &= ~PSW_MASK_PER; } else if (addr < (addr_t) &dummy->regs.orig_gpr2) { /* * access registers are stored in the thread structure */ offset = addr - (addr_t) &dummy->regs.acrs; #ifdef CONFIG_64BIT /* * Very special case: old & broken 64 bit gdb reading * from acrs[15]. Result is a 64 bit value. Read the * 32 bit acrs[15] value and shift it by 32. Sick... */ if (addr == (addr_t) &dummy->regs.acrs[15]) tmp = ((unsigned long) child->thread.acrs[15]) << 32; else #endif tmp = *(addr_t *)((addr_t) &child->thread.acrs + offset); } else if (addr == (addr_t) &dummy->regs.orig_gpr2) { /* * orig_gpr2 is stored on the kernel stack */ tmp = (addr_t) task_pt_regs(child)->orig_gpr2; } else if (addr < (addr_t) (&dummy->regs.fp_regs + 1)) { /* * floating point regs. are stored in the thread structure */ offset = addr - (addr_t) &dummy->regs.fp_regs; tmp = *(addr_t *)((addr_t) &child->thread.fp_regs + offset); if (addr == (addr_t) &dummy->regs.fp_regs.fpc) tmp &= (unsigned long) FPC_VALID_MASK << (BITS_PER_LONG - 32); } else if (addr < (addr_t) (&dummy->regs.per_info + 1)) { /* * per_info is found in the thread structure */ offset = addr - (addr_t) &dummy->regs.per_info; tmp = *(addr_t *)((addr_t) &child->thread.per_info + offset); } else tmp = 0; return tmp; }
static unsigned long __peek_user(struct task_struct *child, addr_t addr) { struct user *dummy = NULL; addr_t offset, tmp; if (addr < (addr_t) &dummy->regs.acrs) { tmp = *(addr_t *)((addr_t) &task_pt_regs(child)->psw + addr); if (addr == (addr_t) &dummy->regs.psw.mask) tmp &= ~PSW_MASK_PER; } else if (addr < (addr_t) &dummy->regs.orig_gpr2) { offset = addr - (addr_t) &dummy->regs.acrs; #ifdef CONFIG_64BIT if (addr == (addr_t) &dummy->regs.acrs[15]) tmp = ((unsigned long) child->thread.acrs[15]) << 32; else #endif tmp = *(addr_t *)((addr_t) &child->thread.acrs + offset); } else if (addr == (addr_t) &dummy->regs.orig_gpr2) { tmp = (addr_t) task_pt_regs(child)->orig_gpr2; } else if (addr < (addr_t) (&dummy->regs.fp_regs + 1)) { offset = addr - (addr_t) &dummy->regs.fp_regs; tmp = *(addr_t *)((addr_t) &child->thread.fp_regs + offset); if (addr == (addr_t) &dummy->regs.fp_regs.fpc) tmp &= (unsigned long) FPC_VALID_MASK << (BITS_PER_LONG - 32); } else if (addr < (addr_t) (&dummy->regs.per_info + 1)) { offset = addr - (addr_t) &dummy->regs.per_info; tmp = *(addr_t *)((addr_t) &child->thread.per_info + offset); } else tmp = 0; return tmp; }
254
1
SwsContext *sws_alloc_context(void) { SwsContext *c = av_mallocz(sizeof(SwsContext)); c->av_class = &sws_context_class; av_opt_set_defaults(c); return c; }
SwsContext *sws_alloc_context(void) { SwsContext *c = av_mallocz(sizeof(SwsContext)); c->av_class = &sws_context_class; av_opt_set_defaults(c); return c; }
255
1
compat_mpt_command(struct file *filp, unsigned int cmd, unsigned long arg) { struct mpt_ioctl_command32 karg32; struct mpt_ioctl_command32 __user *uarg = (struct mpt_ioctl_command32 __user *) arg; struct mpt_ioctl_command karg; MPT_ADAPTER *iocp = NULL; int iocnum, iocnumX; int nonblock = (filp->f_flags & O_NONBLOCK); int ret; if (copy_from_user(&karg32, (char __user *)arg, sizeof(karg32))) return -EFAULT; /* Verify intended MPT adapter */ iocnumX = karg32.hdr.iocnum & 0xFF; if (((iocnum = mpt_verify_adapter(iocnumX, &iocp)) < 0) || (iocp == NULL)) { printk(KERN_DEBUG MYNAM "::compat_mpt_command @%d - ioc%d not found!\n", __LINE__, iocnumX); return -ENODEV; } if ((ret = mptctl_syscall_down(iocp, nonblock)) != 0) return ret; dctlprintk(iocp, printk(MYIOC_s_DEBUG_FMT "compat_mpt_command() called\n", iocp->name)); /* Copy data to karg */ karg.hdr.iocnum = karg32.hdr.iocnum; karg.hdr.port = karg32.hdr.port; karg.timeout = karg32.timeout; karg.maxReplyBytes = karg32.maxReplyBytes; karg.dataInSize = karg32.dataInSize; karg.dataOutSize = karg32.dataOutSize; karg.maxSenseBytes = karg32.maxSenseBytes; karg.dataSgeOffset = karg32.dataSgeOffset; karg.replyFrameBufPtr = (char __user *)(unsigned long)karg32.replyFrameBufPtr; karg.dataInBufPtr = (char __user *)(unsigned long)karg32.dataInBufPtr; karg.dataOutBufPtr = (char __user *)(unsigned long)karg32.dataOutBufPtr; karg.senseDataPtr = (char __user *)(unsigned long)karg32.senseDataPtr; /* Pass new structure to do_mpt_command */ ret = mptctl_do_mpt_command (karg, &uarg->MF); mutex_unlock(&iocp->ioctl_cmds.mutex); return ret; }
compat_mpt_command(struct file *filp, unsigned int cmd, unsigned long arg) { struct mpt_ioctl_command32 karg32; struct mpt_ioctl_command32 __user *uarg = (struct mpt_ioctl_command32 __user *) arg; struct mpt_ioctl_command karg; MPT_ADAPTER *iocp = NULL; int iocnum, iocnumX; int nonblock = (filp->f_flags & O_NONBLOCK); int ret; if (copy_from_user(&karg32, (char __user *)arg, sizeof(karg32))) return -EFAULT; iocnumX = karg32.hdr.iocnum & 0xFF; if (((iocnum = mpt_verify_adapter(iocnumX, &iocp)) < 0) || (iocp == NULL)) { printk(KERN_DEBUG MYNAM "::compat_mpt_command @%d - ioc%d not found!\n", __LINE__, iocnumX); return -ENODEV; } if ((ret = mptctl_syscall_down(iocp, nonblock)) != 0) return ret; dctlprintk(iocp, printk(MYIOC_s_DEBUG_FMT "compat_mpt_command() called\n", iocp->name)); karg.hdr.iocnum = karg32.hdr.iocnum; karg.hdr.port = karg32.hdr.port; karg.timeout = karg32.timeout; karg.maxReplyBytes = karg32.maxReplyBytes; karg.dataInSize = karg32.dataInSize; karg.dataOutSize = karg32.dataOutSize; karg.maxSenseBytes = karg32.maxSenseBytes; karg.dataSgeOffset = karg32.dataSgeOffset; karg.replyFrameBufPtr = (char __user *)(unsigned long)karg32.replyFrameBufPtr; karg.dataInBufPtr = (char __user *)(unsigned long)karg32.dataInBufPtr; karg.dataOutBufPtr = (char __user *)(unsigned long)karg32.dataOutBufPtr; karg.senseDataPtr = (char __user *)(unsigned long)karg32.senseDataPtr; ret = mptctl_do_mpt_command (karg, &uarg->MF); mutex_unlock(&iocp->ioctl_cmds.mutex); return ret; }
256
0
void flush_thread_cache ( ) { mysql_mutex_lock ( & LOCK_thread_count ) ; kill_cached_threads ++ ; while ( cached_thread_count ) { mysql_cond_broadcast ( & COND_thread_cache ) ; mysql_cond_wait ( & COND_flush_thread_cache , & LOCK_thread_count ) ; } kill_cached_threads -- ; mysql_mutex_unlock ( & LOCK_thread_count ) ; }
void flush_thread_cache ( ) { mysql_mutex_lock ( & LOCK_thread_count ) ; kill_cached_threads ++ ; while ( cached_thread_count ) { mysql_cond_broadcast ( & COND_thread_cache ) ; mysql_cond_wait ( & COND_flush_thread_cache , & LOCK_thread_count ) ; } kill_cached_threads -- ; mysql_mutex_unlock ( & LOCK_thread_count ) ; }
257
1
static i2c_interface *musicpal_audio_init(qemu_irq irq) { AudioState *audio; musicpal_audio_state *s; i2c_interface *i2c; int iomemtype; audio = AUD_init(); if (!audio) { AUD_log(audio_name, "No audio state\n"); return NULL; } s = qemu_mallocz(sizeof(musicpal_audio_state)); s->irq = irq; i2c = qemu_mallocz(sizeof(i2c_interface)); i2c->bus = i2c_init_bus(); i2c->current_addr = -1; s->wm = wm8750_init(i2c->bus, audio); if (!s->wm) return NULL; i2c_set_slave_address(s->wm, MP_WM_ADDR); wm8750_data_req_set(s->wm, audio_callback, s); iomemtype = cpu_register_io_memory(0, musicpal_audio_readfn, musicpal_audio_writefn, s); cpu_register_physical_memory(MP_AUDIO_BASE, MP_AUDIO_SIZE, iomemtype); qemu_register_reset(musicpal_audio_reset, s); return i2c; }
static i2c_interface *musicpal_audio_init(qemu_irq irq) { AudioState *audio; musicpal_audio_state *s; i2c_interface *i2c; int iomemtype; audio = AUD_init(); if (!audio) { AUD_log(audio_name, "No audio state\n"); return NULL; } s = qemu_mallocz(sizeof(musicpal_audio_state)); s->irq = irq; i2c = qemu_mallocz(sizeof(i2c_interface)); i2c->bus = i2c_init_bus(); i2c->current_addr = -1; s->wm = wm8750_init(i2c->bus, audio); if (!s->wm) return NULL; i2c_set_slave_address(s->wm, MP_WM_ADDR); wm8750_data_req_set(s->wm, audio_callback, s); iomemtype = cpu_register_io_memory(0, musicpal_audio_readfn, musicpal_audio_writefn, s); cpu_register_physical_memory(MP_AUDIO_BASE, MP_AUDIO_SIZE, iomemtype); qemu_register_reset(musicpal_audio_reset, s); return i2c; }
259
1
static int __poke_user(struct task_struct *child, addr_t addr, addr_t data) { struct user *dummy = NULL; addr_t offset; if (addr < (addr_t) &dummy->regs.acrs) { /* * psw and gprs are stored on the stack */ if (addr == (addr_t) &dummy->regs.psw.mask && #ifdef CONFIG_COMPAT data != PSW_MASK_MERGE(psw_user32_bits, data) && #endif data != PSW_MASK_MERGE(psw_user_bits, data)) /* Invalid psw mask. */ return -EINVAL; #ifndef CONFIG_64BIT if (addr == (addr_t) &dummy->regs.psw.addr) /* I'd like to reject addresses without the high order bit but older gdb's rely on it */ data |= PSW_ADDR_AMODE; #endif *(addr_t *)((addr_t) &task_pt_regs(child)->psw + addr) = data; } else if (addr < (addr_t) (&dummy->regs.orig_gpr2)) { /* * access registers are stored in the thread structure */ offset = addr - (addr_t) &dummy->regs.acrs; #ifdef CONFIG_64BIT /* * Very special case: old & broken 64 bit gdb writing * to acrs[15] with a 64 bit value. Ignore the lower * half of the value and write the upper 32 bit to * acrs[15]. Sick... */ if (addr == (addr_t) &dummy->regs.acrs[15]) child->thread.acrs[15] = (unsigned int) (data >> 32); else #endif *(addr_t *)((addr_t) &child->thread.acrs + offset) = data; } else if (addr == (addr_t) &dummy->regs.orig_gpr2) { /* * orig_gpr2 is stored on the kernel stack */ task_pt_regs(child)->orig_gpr2 = data; } else if (addr < (addr_t) (&dummy->regs.fp_regs + 1)) { /* * floating point regs. are stored in the thread structure */ if (addr == (addr_t) &dummy->regs.fp_regs.fpc && (data & ~((unsigned long) FPC_VALID_MASK << (BITS_PER_LONG - 32))) != 0) return -EINVAL; offset = addr - (addr_t) &dummy->regs.fp_regs; *(addr_t *)((addr_t) &child->thread.fp_regs + offset) = data; } else if (addr < (addr_t) (&dummy->regs.per_info + 1)) { /* * per_info is found in the thread structure */ offset = addr - (addr_t) &dummy->regs.per_info; *(addr_t *)((addr_t) &child->thread.per_info + offset) = data; } FixPerRegisters(child); return 0; }
static int __poke_user(struct task_struct *child, addr_t addr, addr_t data) { struct user *dummy = NULL; addr_t offset; if (addr < (addr_t) &dummy->regs.acrs) { if (addr == (addr_t) &dummy->regs.psw.mask && #ifdef CONFIG_COMPAT data != PSW_MASK_MERGE(psw_user32_bits, data) && #endif data != PSW_MASK_MERGE(psw_user_bits, data)) return -EINVAL; #ifndef CONFIG_64BIT if (addr == (addr_t) &dummy->regs.psw.addr) data |= PSW_ADDR_AMODE; #endif *(addr_t *)((addr_t) &task_pt_regs(child)->psw + addr) = data; } else if (addr < (addr_t) (&dummy->regs.orig_gpr2)) { offset = addr - (addr_t) &dummy->regs.acrs; #ifdef CONFIG_64BIT if (addr == (addr_t) &dummy->regs.acrs[15]) child->thread.acrs[15] = (unsigned int) (data >> 32); else #endif *(addr_t *)((addr_t) &child->thread.acrs + offset) = data; } else if (addr == (addr_t) &dummy->regs.orig_gpr2) { task_pt_regs(child)->orig_gpr2 = data; } else if (addr < (addr_t) (&dummy->regs.fp_regs + 1)) { if (addr == (addr_t) &dummy->regs.fp_regs.fpc && (data & ~((unsigned long) FPC_VALID_MASK << (BITS_PER_LONG - 32))) != 0) return -EINVAL; offset = addr - (addr_t) &dummy->regs.fp_regs; *(addr_t *)((addr_t) &child->thread.fp_regs + offset) = data; } else if (addr < (addr_t) (&dummy->regs.per_info + 1)) { offset = addr - (addr_t) &dummy->regs.per_info; *(addr_t *)((addr_t) &child->thread.per_info + offset) = data; } FixPerRegisters(child); return 0; }
261
0
int vp8_denoiser_allocate ( VP8_DENOISER * denoiser , int width , int height , int num_mb_rows , int num_mb_cols , int mode ) { int i ; assert ( denoiser ) ; denoiser -> num_mb_cols = num_mb_cols ; for ( i = 0 ; i < MAX_REF_FRAMES ; i ++ ) { denoiser -> yv12_running_avg [ i ] . flags = 0 ; if ( vp8_yv12_alloc_frame_buffer ( & ( denoiser -> yv12_running_avg [ i ] ) , width , height , VP8BORDERINPIXELS ) < 0 ) { vp8_denoiser_free ( denoiser ) ; return 1 ; } vpx_memset ( denoiser -> yv12_running_avg [ i ] . buffer_alloc , 0 , denoiser -> yv12_running_avg [ i ] . frame_size ) ; } denoiser -> yv12_mc_running_avg . flags = 0 ; if ( vp8_yv12_alloc_frame_buffer ( & ( denoiser -> yv12_mc_running_avg ) , width , height , VP8BORDERINPIXELS ) < 0 ) { vp8_denoiser_free ( denoiser ) ; return 1 ; } vpx_memset ( denoiser -> yv12_mc_running_avg . buffer_alloc , 0 , denoiser -> yv12_mc_running_avg . frame_size ) ; if ( vp8_yv12_alloc_frame_buffer ( & denoiser -> yv12_last_source , width , height , VP8BORDERINPIXELS ) < 0 ) { vp8_denoiser_free ( denoiser ) ; return 1 ; } vpx_memset ( denoiser -> yv12_last_source . buffer_alloc , 0 , denoiser -> yv12_last_source . frame_size ) ; denoiser -> denoise_state = vpx_calloc ( ( num_mb_rows * num_mb_cols ) , 1 ) ; vpx_memset ( denoiser -> denoise_state , 0 , ( num_mb_rows * num_mb_cols ) ) ; vp8_denoiser_set_parameters ( denoiser , mode ) ; denoiser -> nmse_source_diff = 0 ; denoiser -> nmse_source_diff_count = 0 ; denoiser -> qp_avg = 0 ; denoiser -> qp_threshold_up = 80 ; denoiser -> qp_threshold_down = 128 ; denoiser -> bitrate_threshold = 200000 ; denoiser -> threshold_aggressive_mode = 35 ; if ( width * height > 640 * 480 ) { denoiser -> bitrate_threshold = 500000 ; denoiser -> threshold_aggressive_mode = 100 ; } else if ( width * height > 960 * 540 ) { denoiser -> bitrate_threshold = 800000 ; denoiser -> threshold_aggressive_mode = 150 ; } else if ( width * height > 1280 * 720 ) { denoiser -> bitrate_threshold = 2000000 ; denoiser -> threshold_aggressive_mode = 1400 ; } return 0 ; }
int vp8_denoiser_allocate ( VP8_DENOISER * denoiser , int width , int height , int num_mb_rows , int num_mb_cols , int mode ) { int i ; assert ( denoiser ) ; denoiser -> num_mb_cols = num_mb_cols ; for ( i = 0 ; i < MAX_REF_FRAMES ; i ++ ) { denoiser -> yv12_running_avg [ i ] . flags = 0 ; if ( vp8_yv12_alloc_frame_buffer ( & ( denoiser -> yv12_running_avg [ i ] ) , width , height , VP8BORDERINPIXELS ) < 0 ) { vp8_denoiser_free ( denoiser ) ; return 1 ; } vpx_memset ( denoiser -> yv12_running_avg [ i ] . buffer_alloc , 0 , denoiser -> yv12_running_avg [ i ] . frame_size ) ; } denoiser -> yv12_mc_running_avg . flags = 0 ; if ( vp8_yv12_alloc_frame_buffer ( & ( denoiser -> yv12_mc_running_avg ) , width , height , VP8BORDERINPIXELS ) < 0 ) { vp8_denoiser_free ( denoiser ) ; return 1 ; } vpx_memset ( denoiser -> yv12_mc_running_avg . buffer_alloc , 0 , denoiser -> yv12_mc_running_avg . frame_size ) ; if ( vp8_yv12_alloc_frame_buffer ( & denoiser -> yv12_last_source , width , height , VP8BORDERINPIXELS ) < 0 ) { vp8_denoiser_free ( denoiser ) ; return 1 ; } vpx_memset ( denoiser -> yv12_last_source . buffer_alloc , 0 , denoiser -> yv12_last_source . frame_size ) ; denoiser -> denoise_state = vpx_calloc ( ( num_mb_rows * num_mb_cols ) , 1 ) ; vpx_memset ( denoiser -> denoise_state , 0 , ( num_mb_rows * num_mb_cols ) ) ; vp8_denoiser_set_parameters ( denoiser , mode ) ; denoiser -> nmse_source_diff = 0 ; denoiser -> nmse_source_diff_count = 0 ; denoiser -> qp_avg = 0 ; denoiser -> qp_threshold_up = 80 ; denoiser -> qp_threshold_down = 128 ; denoiser -> bitrate_threshold = 200000 ; denoiser -> threshold_aggressive_mode = 35 ; if ( width * height > 640 * 480 ) { denoiser -> bitrate_threshold = 500000 ; denoiser -> threshold_aggressive_mode = 100 ; } else if ( width * height > 960 * 540 ) { denoiser -> bitrate_threshold = 800000 ; denoiser -> threshold_aggressive_mode = 150 ; } else if ( width * height > 1280 * 720 ) { denoiser -> bitrate_threshold = 2000000 ; denoiser -> threshold_aggressive_mode = 1400 ; } return 0 ; }
262
0
compat_mptfwxfer_ioctl(struct file *filp, unsigned int cmd, unsigned long arg) { struct mpt_fw_xfer32 kfw32; struct mpt_fw_xfer kfw; MPT_ADAPTER *iocp = NULL; int iocnum, iocnumX; int nonblock = (filp->f_flags & O_NONBLOCK); int ret; if (copy_from_user(&kfw32, (char __user *)arg, sizeof(kfw32))) return -EFAULT; /* Verify intended MPT adapter */ iocnumX = kfw32.iocnum & 0xFF; if (((iocnum = mpt_verify_adapter(iocnumX, &iocp)) < 0) || (iocp == NULL)) { printk(KERN_DEBUG MYNAM "::compat_mptfwxfer_ioctl @%d - ioc%d not found!\n", __LINE__, iocnumX); return -ENODEV; } if ((ret = mptctl_syscall_down(iocp, nonblock)) != 0) return ret; dctlprintk(iocp, printk(MYIOC_s_DEBUG_FMT "compat_mptfwxfer_ioctl() called\n", iocp->name)); kfw.iocnum = iocnum; kfw.fwlen = kfw32.fwlen; kfw.bufp = compat_ptr(kfw32.bufp); ret = mptctl_do_fw_download(iocp, kfw.bufp, kfw.fwlen); mutex_unlock(&iocp->ioctl_cmds.mutex); return ret; }
compat_mptfwxfer_ioctl(struct file *filp, unsigned int cmd, unsigned long arg) { struct mpt_fw_xfer32 kfw32; struct mpt_fw_xfer kfw; MPT_ADAPTER *iocp = NULL; int iocnum, iocnumX; int nonblock = (filp->f_flags & O_NONBLOCK); int ret; if (copy_from_user(&kfw32, (char __user *)arg, sizeof(kfw32))) return -EFAULT; iocnumX = kfw32.iocnum & 0xFF; if (((iocnum = mpt_verify_adapter(iocnumX, &iocp)) < 0) || (iocp == NULL)) { printk(KERN_DEBUG MYNAM "::compat_mptfwxfer_ioctl @%d - ioc%d not found!\n", __LINE__, iocnumX); return -ENODEV; } if ((ret = mptctl_syscall_down(iocp, nonblock)) != 0) return ret; dctlprintk(iocp, printk(MYIOC_s_DEBUG_FMT "compat_mptfwxfer_ioctl() called\n", iocp->name)); kfw.iocnum = iocnum; kfw.fwlen = kfw32.fwlen; kfw.bufp = compat_ptr(kfw32.bufp); ret = mptctl_do_fw_download(iocp, kfw.bufp, kfw.fwlen); mutex_unlock(&iocp->ioctl_cmds.mutex); return ret; }
264