target
int64
0
1
func
stringlengths
0
484k
idx
int64
1
378k
1
static int em_sysenter(struct x86_emulate_ctxt *ctxt) { const struct x86_emulate_ops *ops = ctxt->ops; struct desc_struct cs, ss; u64 msr_data; u16 cs_sel, ss_sel; u64 efer = 0; ops->get_msr(ctxt, MSR_EFER, &efer); /* inject #GP if in real mode */ if (ctxt->mode == X86EMUL_MODE_REAL) return emulate_gp(ctxt, 0); /* * Not recognized on AMD in compat mode (but is recognized in legacy * mode). */ if ((ctxt->mode == X86EMUL_MODE_PROT32) && (efer & EFER_LMA) && !vendor_intel(ctxt)) return emulate_ud(ctxt); /* sysenter/sysexit have not been tested in 64bit mode. */ if (ctxt->mode == X86EMUL_MODE_PROT64) return X86EMUL_UNHANDLEABLE; setup_syscalls_segments(ctxt, &cs, &ss); ops->get_msr(ctxt, MSR_IA32_SYSENTER_CS, &msr_data); switch (ctxt->mode) { case X86EMUL_MODE_PROT32: if ((msr_data & 0xfffc) == 0x0) return emulate_gp(ctxt, 0); break; case X86EMUL_MODE_PROT64: if (msr_data == 0x0) return emulate_gp(ctxt, 0); break; default: break; } ctxt->eflags &= ~(EFLG_VM | EFLG_IF); cs_sel = (u16)msr_data; cs_sel &= ~SELECTOR_RPL_MASK; ss_sel = cs_sel + 8; ss_sel &= ~SELECTOR_RPL_MASK; if (ctxt->mode == X86EMUL_MODE_PROT64 || (efer & EFER_LMA)) { cs.d = 0; cs.l = 1; } ops->set_segment(ctxt, cs_sel, &cs, 0, VCPU_SREG_CS); ops->set_segment(ctxt, ss_sel, &ss, 0, VCPU_SREG_SS); ops->get_msr(ctxt, MSR_IA32_SYSENTER_EIP, &msr_data); ctxt->_eip = msr_data; ops->get_msr(ctxt, MSR_IA32_SYSENTER_ESP, &msr_data); *reg_write(ctxt, VCPU_REGS_RSP) = msr_data; return X86EMUL_CONTINUE; }
247
1
__mptctl_ioctl(struct file *file, unsigned int cmd, unsigned long arg) { mpt_ioctl_header __user *uhdr = (void __user *) arg; mpt_ioctl_header khdr; int iocnum; unsigned iocnumX; int nonblock = (file->f_flags & O_NONBLOCK); int ret; MPT_ADAPTER *iocp = NULL; if (copy_from_user(&khdr, uhdr, sizeof(khdr))) { printk(KERN_ERR MYNAM "%s::mptctl_ioctl() @%d - " "Unable to copy mpt_ioctl_header data @ %p\n", __FILE__, __LINE__, uhdr); return -EFAULT; } ret = -ENXIO; /* (-6) No such device or address */ /* Verify intended MPT adapter - set iocnum and the adapter * pointer (iocp) */ iocnumX = khdr.iocnum & 0xFF; if (((iocnum = mpt_verify_adapter(iocnumX, &iocp)) < 0) || (iocp == NULL)) return -ENODEV; if (!iocp->active) { printk(KERN_DEBUG MYNAM "%s::mptctl_ioctl() @%d - Controller disabled.\n", __FILE__, __LINE__); return -EFAULT; } /* Handle those commands that are just returning * information stored in the driver. * These commands should never time out and are unaffected * by TM and FW reloads. */ if ((cmd & ~IOCSIZE_MASK) == (MPTIOCINFO & ~IOCSIZE_MASK)) { return mptctl_getiocinfo(arg, _IOC_SIZE(cmd)); } else if (cmd == MPTTARGETINFO) { return mptctl_gettargetinfo(arg); } else if (cmd == MPTTEST) { return mptctl_readtest(arg); } else if (cmd == MPTEVENTQUERY) { return mptctl_eventquery(arg); } else if (cmd == MPTEVENTENABLE) { return mptctl_eventenable(arg); } else if (cmd == MPTEVENTREPORT) { return mptctl_eventreport(arg); } else if (cmd == MPTFWREPLACE) { return mptctl_replace_fw(arg); } /* All of these commands require an interrupt or * are unknown/illegal. */ if ((ret = mptctl_syscall_down(iocp, nonblock)) != 0) return ret; if (cmd == MPTFWDOWNLOAD) ret = mptctl_fw_download(arg); else if (cmd == MPTCOMMAND) ret = mptctl_mpt_command(arg); else if (cmd == MPTHARDRESET) ret = mptctl_do_reset(arg); else if ((cmd & ~IOCSIZE_MASK) == (HP_GETHOSTINFO & ~IOCSIZE_MASK)) ret = mptctl_hp_hostinfo(arg, _IOC_SIZE(cmd)); else if (cmd == HP_GETTARGETINFO) ret = mptctl_hp_targetinfo(arg); else ret = -EINVAL; mutex_unlock(&iocp->ioctl_cmds.mutex); return ret; }
250
0
static void kq_sighandler ( int sig ) { }
251
0
__mptctl_ioctl(struct file *file, unsigned int cmd, unsigned long arg) { mpt_ioctl_header __user *uhdr = (void __user *) arg; mpt_ioctl_header khdr; int iocnum; unsigned iocnumX; int nonblock = (file->f_flags & O_NONBLOCK); int ret; MPT_ADAPTER *iocp = NULL; if (copy_from_user(&khdr, uhdr, sizeof(khdr))) { printk(KERN_ERR MYNAM "%s::mptctl_ioctl() @%d - " "Unable to copy mpt_ioctl_header data @ %p\n", __FILE__, __LINE__, uhdr); return -EFAULT; } ret = -ENXIO; /* (-6) No such device or address */ /* Verify intended MPT adapter - set iocnum and the adapter * pointer (iocp) */ iocnumX = khdr.iocnum & 0xFF; if (((iocnum = mpt_verify_adapter(iocnumX, &iocp)) < 0) || (iocp == NULL)) return -ENODEV; if (!iocp->active) { printk(KERN_DEBUG MYNAM "%s::mptctl_ioctl() @%d - Controller disabled.\n", __FILE__, __LINE__); return -EFAULT; } /* Handle those commands that are just returning * information stored in the driver. * These commands should never time out and are unaffected * by TM and FW reloads. */ if ((cmd & ~IOCSIZE_MASK) == (MPTIOCINFO & ~IOCSIZE_MASK)) { return mptctl_getiocinfo(iocp, arg, _IOC_SIZE(cmd)); } else if (cmd == MPTTARGETINFO) { return mptctl_gettargetinfo(iocp, arg); } else if (cmd == MPTTEST) { return mptctl_readtest(iocp, arg); } else if (cmd == MPTEVENTQUERY) { return mptctl_eventquery(iocp, arg); } else if (cmd == MPTEVENTENABLE) { return mptctl_eventenable(iocp, arg); } else if (cmd == MPTEVENTREPORT) { return mptctl_eventreport(iocp, arg); } else if (cmd == MPTFWREPLACE) { return mptctl_replace_fw(iocp, arg); } /* All of these commands require an interrupt or * are unknown/illegal. */ if ((ret = mptctl_syscall_down(iocp, nonblock)) != 0) return ret; if (cmd == MPTFWDOWNLOAD) ret = mptctl_fw_download(iocp, arg); else if (cmd == MPTCOMMAND) ret = mptctl_mpt_command(iocp, arg); else if (cmd == MPTHARDRESET) ret = mptctl_do_reset(iocp, arg); else if ((cmd & ~IOCSIZE_MASK) == (HP_GETHOSTINFO & ~IOCSIZE_MASK)) ret = mptctl_hp_hostinfo(iocp, arg, _IOC_SIZE(cmd)); else if (cmd == HP_GETTARGETINFO) ret = mptctl_hp_targetinfo(iocp, arg); else ret = -EINVAL; mutex_unlock(&iocp->ioctl_cmds.mutex); return ret; }
254
1
int ia32_setup_frame(int sig, struct k_sigaction *ka, compat_sigset_t *set, struct pt_regs *regs) { struct sigframe __user *frame; void __user *restorer; int err = 0; /* copy_to_user optimizes that into a single 8 byte store */ static const struct { u16 poplmovl; u32 val; u16 int80; u16 pad; } __attribute__((packed)) code = { 0xb858, /* popl %eax ; movl $...,%eax */ __NR_ia32_sigreturn, 0x80cd, /* int $0x80 */ 0, }; frame = get_sigframe(ka, regs, sizeof(*frame)); if (!access_ok(VERIFY_WRITE, frame, sizeof(*frame))) goto give_sigsegv; err |= __put_user(sig, &frame->sig); if (err) goto give_sigsegv; err |= ia32_setup_sigcontext(&frame->sc, &frame->fpstate, regs, set->sig[0]); if (err) goto give_sigsegv; if (_COMPAT_NSIG_WORDS > 1) { err |= __copy_to_user(frame->extramask, &set->sig[1], sizeof(frame->extramask)); if (err) goto give_sigsegv; } if (ka->sa.sa_flags & SA_RESTORER) { restorer = ka->sa.sa_restorer; } else { /* Return stub is in 32bit vsyscall page */ if (current->binfmt->hasvdso) restorer = VDSO32_SYMBOL(current->mm->context.vdso, sigreturn); else restorer = &frame->retcode; } err |= __put_user(ptr_to_compat(restorer), &frame->pretcode); /* * These are actually not used anymore, but left because some * gdb versions depend on them as a marker. */ err |= __copy_to_user(frame->retcode, &code, 8); if (err) goto give_sigsegv; /* Set up registers for signal handler */ regs->sp = (unsigned long) frame; regs->ip = (unsigned long) ka->sa.sa_handler; /* Make -mregparm=3 work */ regs->ax = sig; regs->dx = 0; regs->cx = 0; asm volatile("movl %0,%%ds" :: "r" (__USER32_DS)); asm volatile("movl %0,%%es" :: "r" (__USER32_DS)); regs->cs = __USER32_CS; regs->ss = __USER32_DS; set_fs(USER_DS); regs->flags &= ~X86_EFLAGS_TF; if (test_thread_flag(TIF_SINGLESTEP)) ptrace_notify(SIGTRAP); #if DEBUG_SIG printk(KERN_DEBUG "SIG deliver (%s:%d): sp=%p pc=%lx ra=%u\n", current->comm, current->pid, frame, regs->ip, frame->pretcode); #endif return 0; give_sigsegv: force_sigsegv(sig, current); return -EFAULT; }
257
0
unsigned int vp9_variance ## W ## x ## H ## _c ( const uint8_t * a , int a_stride , const uint8_t * b , int b_stride , unsigned int * sse ) { int sum ; variance ( a , a_stride , b , b_stride , W , H , sse , & sum ) ; return * sse - ( ( ( int64_t ) sum * sum ) / ( W * H ) ) ; \ } # define SUBPIX_VAR ( W , H ) unsigned int vp9_sub_pixel_variance ## W ## x ## H ## _c ( const uint8_t * src , int src_stride , int xoffset , int yoffset , const uint8_t * dst , int dst_stride , unsigned int * sse ) { uint16_t fdata3 [ ( H + 1 ) * W ] ; uint8_t temp2 [ H * W ] ; var_filter_block2d_bil_first_pass ( src , fdata3 , src_stride , 1 , H + 1 , W , BILINEAR_FILTERS_2TAP ( xoffset ) ) ; var_filter_block2d_bil_second_pass ( fdata3 , temp2 , W , W , H , W , BILINEAR_FILTERS_2TAP ( yoffset ) ) ; return vp9_variance ## W ## x ## H ## _c ( temp2 , W , dst , dst_stride , sse ) ; \ } # define SUBPIX_AVG_VAR ( W , H ) unsigned int vp9_sub_pixel_avg_variance ## W ## x ## H ## _c ( const uint8_t * src , int src_stride , int xoffset , int yoffset , const uint8_t * dst , int dst_stride , unsigned int * sse , const uint8_t * second_pred ) { uint16_t fdata3 [ ( H + 1 ) * W ] ; uint8_t temp2 [ H * W ] ; DECLARE_ALIGNED_ARRAY ( 16 , uint8_t , temp3 , H * W ) ; var_filter_block2d_bil_first_pass ( src , fdata3 , src_stride , 1 , H + 1 , W , BILINEAR_FILTERS_2TAP ( xoffset ) ) ; var_filter_block2d_bil_second_pass ( fdata3 , temp2 , W , W , H , W , BILINEAR_FILTERS_2TAP ( yoffset ) ) ; vp9_comp_avg_pred ( temp3 , second_pred , W , H , temp2 , W ) ; return vp9_variance ## W ## x ## H ## _c ( temp3 , W , dst , dst_stride , sse ) ; \ } void vp9_get16x16var_c ( const uint8_t * src_ptr , int source_stride , const uint8_t * ref_ptr , int ref_stride , unsigned int * sse , int * sum ) { variance ( src_ptr , source_stride , ref_ptr , ref_stride , 16 , 16 , sse , sum ) ; } void vp9_get8x8var_c ( const uint8_t * src_ptr , int source_stride , const uint8_t * ref_ptr , int ref_stride , unsigned int * sse , int * sum ) { variance ( src_ptr , source_stride , ref_ptr , ref_stride , 8 , 8 , sse , sum ) ; } unsigned int vp9_mse16x16_c ( const uint8_t * src , int src_stride , const uint8_t * ref , int ref_stride , unsigned int * sse ) { int sum ; variance ( src , src_stride , ref , ref_stride , 16 , 16 , sse , & sum ) ; return * sse ; } unsigned int vp9_mse16x8_c ( const uint8_t * src , int src_stride , const uint8_t * ref , int ref_stride , unsigned int * sse ) { int sum ; variance ( src , src_stride , ref , ref_stride , 16 , 8 , sse , & sum ) ; return * sse ; } unsigned int vp9_mse8x16_c ( const uint8_t * src , int src_stride , const uint8_t * ref , int ref_stride , unsigned int * sse ) { int sum ; variance ( src , src_stride , ref , ref_stride , 8 , 16 , sse , & sum ) ; return * sse ; } unsigned int vp9_mse8x8_c ( const uint8_t * src , int src_stride , const uint8_t * ref , int ref_stride , unsigned int * sse ) { int sum ; variance ( src , src_stride , ref , ref_stride , 8 , 8 , sse , & sum ) ; return * sse ; } VAR ( 4 , 4 ) SUBPIX_VAR ( 4 , 4 ) SUBPIX_AVG_VAR ( 4 , 4 ) VAR ( 4 , 8 ) SUBPIX_VAR ( 4 , 8 ) SUBPIX_AVG_VAR ( 4 , 8 ) VAR ( 8 , 4 ) SUBPIX_VAR ( 8 , 4 ) SUBPIX_AVG_VAR ( 8 , 4 ) VAR ( 8 , 8 ) SUBPIX_VAR ( 8 , 8 ) SUBPIX_AVG_VAR ( 8 , 8 ) VAR ( 8 , 16 ) SUBPIX_VAR ( 8 , 16 ) SUBPIX_AVG_VAR ( 8 , 16 ) VAR ( 16 , 8 )
259
0
compat_mpt_command(struct file *filp, unsigned int cmd, unsigned long arg) { struct mpt_ioctl_command32 karg32; struct mpt_ioctl_command32 __user *uarg = (struct mpt_ioctl_command32 __user *) arg; struct mpt_ioctl_command karg; MPT_ADAPTER *iocp = NULL; int iocnum, iocnumX; int nonblock = (filp->f_flags & O_NONBLOCK); int ret; if (copy_from_user(&karg32, (char __user *)arg, sizeof(karg32))) return -EFAULT; /* Verify intended MPT adapter */ iocnumX = karg32.hdr.iocnum & 0xFF; if (((iocnum = mpt_verify_adapter(iocnumX, &iocp)) < 0) || (iocp == NULL)) { printk(KERN_DEBUG MYNAM "::compat_mpt_command @%d - ioc%d not found!\n", __LINE__, iocnumX); return -ENODEV; } if ((ret = mptctl_syscall_down(iocp, nonblock)) != 0) return ret; dctlprintk(iocp, printk(MYIOC_s_DEBUG_FMT "compat_mpt_command() called\n", iocp->name)); /* Copy data to karg */ karg.hdr.iocnum = karg32.hdr.iocnum; karg.hdr.port = karg32.hdr.port; karg.timeout = karg32.timeout; karg.maxReplyBytes = karg32.maxReplyBytes; karg.dataInSize = karg32.dataInSize; karg.dataOutSize = karg32.dataOutSize; karg.maxSenseBytes = karg32.maxSenseBytes; karg.dataSgeOffset = karg32.dataSgeOffset; karg.replyFrameBufPtr = (char __user *)(unsigned long)karg32.replyFrameBufPtr; karg.dataInBufPtr = (char __user *)(unsigned long)karg32.dataInBufPtr; karg.dataOutBufPtr = (char __user *)(unsigned long)karg32.dataOutBufPtr; karg.senseDataPtr = (char __user *)(unsigned long)karg32.senseDataPtr; /* Pass new structure to do_mpt_command */ ret = mptctl_do_mpt_command (iocp, karg, &uarg->MF); mutex_unlock(&iocp->ioctl_cmds.mutex); return ret; }
260
1
static unsigned long __peek_user(struct task_struct *child, addr_t addr) { struct user *dummy = NULL; addr_t offset, tmp; if (addr < (addr_t) &dummy->regs.acrs) { /* * psw and gprs are stored on the stack */ tmp = *(addr_t *)((addr_t) &task_pt_regs(child)->psw + addr); if (addr == (addr_t) &dummy->regs.psw.mask) /* Remove per bit from user psw. */ tmp &= ~PSW_MASK_PER; } else if (addr < (addr_t) &dummy->regs.orig_gpr2) { /* * access registers are stored in the thread structure */ offset = addr - (addr_t) &dummy->regs.acrs; #ifdef CONFIG_64BIT /* * Very special case: old & broken 64 bit gdb reading * from acrs[15]. Result is a 64 bit value. Read the * 32 bit acrs[15] value and shift it by 32. Sick... */ if (addr == (addr_t) &dummy->regs.acrs[15]) tmp = ((unsigned long) child->thread.acrs[15]) << 32; else #endif tmp = *(addr_t *)((addr_t) &child->thread.acrs + offset); } else if (addr == (addr_t) &dummy->regs.orig_gpr2) { /* * orig_gpr2 is stored on the kernel stack */ tmp = (addr_t) task_pt_regs(child)->orig_gpr2; } else if (addr < (addr_t) (&dummy->regs.fp_regs + 1)) { /* * floating point regs. are stored in the thread structure */ offset = addr - (addr_t) &dummy->regs.fp_regs; tmp = *(addr_t *)((addr_t) &child->thread.fp_regs + offset); if (addr == (addr_t) &dummy->regs.fp_regs.fpc) tmp &= (unsigned long) FPC_VALID_MASK << (BITS_PER_LONG - 32); } else if (addr < (addr_t) (&dummy->regs.per_info + 1)) { /* * per_info is found in the thread structure */ offset = addr - (addr_t) &dummy->regs.per_info; tmp = *(addr_t *)((addr_t) &child->thread.per_info + offset); } else tmp = 0; return tmp; }
261
1
static int __poke_user_compat(struct task_struct *child, addr_t addr, addr_t data) { struct user32 *dummy32 = NULL; per_struct32 *dummy_per32 = NULL; __u32 tmp = (__u32) data; addr_t offset; if (addr < (addr_t) &dummy32->regs.acrs) { /* * psw, gprs, acrs and orig_gpr2 are stored on the stack */ if (addr == (addr_t) &dummy32->regs.psw.mask) { /* Build a 64 bit psw mask from 31 bit mask. */ if (tmp != PSW32_MASK_MERGE(psw32_user_bits, tmp)) /* Invalid psw mask. */ return -EINVAL; task_pt_regs(child)->psw.mask = PSW_MASK_MERGE(psw_user32_bits, (__u64) tmp << 32); } else if (addr == (addr_t) &dummy32->regs.psw.addr) { /* Build a 64 bit psw address from 31 bit address. */ task_pt_regs(child)->psw.addr = (__u64) tmp & PSW32_ADDR_INSN; } else { /* gpr 0-15 */ *(__u32*)((addr_t) &task_pt_regs(child)->psw + addr*2 + 4) = tmp; } } else if (addr < (addr_t) (&dummy32->regs.orig_gpr2)) { /* * access registers are stored in the thread structure */ offset = addr - (addr_t) &dummy32->regs.acrs; *(__u32*)((addr_t) &child->thread.acrs + offset) = tmp; } else if (addr == (addr_t) (&dummy32->regs.orig_gpr2)) { /* * orig_gpr2 is stored on the kernel stack */ *(__u32*)((addr_t) &task_pt_regs(child)->orig_gpr2 + 4) = tmp; } else if (addr < (addr_t) (&dummy32->regs.fp_regs + 1)) { /* * floating point regs. are stored in the thread structure */ if (addr == (addr_t) &dummy32->regs.fp_regs.fpc && (tmp & ~FPC_VALID_MASK) != 0) /* Invalid floating point control. */ return -EINVAL; offset = addr - (addr_t) &dummy32->regs.fp_regs; *(__u32 *)((addr_t) &child->thread.fp_regs + offset) = tmp; } else if (addr < (addr_t) (&dummy32->regs.per_info + 1)) { /* * per_info is found in the thread structure. */ offset = addr - (addr_t) &dummy32->regs.per_info; /* * This is magic. See per_struct and per_struct32. * By incident the offsets in per_struct are exactly * twice the offsets in per_struct32 for all fields. * The 8 byte fields need special handling though, * because the second half (bytes 4-7) is needed and * not the first half. */ if ((offset >= (addr_t) &dummy_per32->control_regs && offset < (addr_t) (&dummy_per32->control_regs + 1)) || (offset >= (addr_t) &dummy_per32->starting_addr && offset <= (addr_t) &dummy_per32->ending_addr) || offset == (addr_t) &dummy_per32->lowcore.words.address) offset = offset*2 + 4; else offset = offset*2; *(__u32 *)((addr_t) &child->thread.per_info + offset) = tmp; } FixPerRegisters(child); return 0; }
262
1
otp_verify(krb5_context context, krb5_data *req_pkt, krb5_kdc_req *request, krb5_enc_tkt_part *enc_tkt_reply, krb5_pa_data *pa, krb5_kdcpreauth_callbacks cb, krb5_kdcpreauth_rock rock, krb5_kdcpreauth_moddata moddata, krb5_kdcpreauth_verify_respond_fn respond, void *arg) { krb5_keyblock *armor_key = NULL; krb5_pa_otp_req *req = NULL; struct request_state *rs; krb5_error_code retval; krb5_data d, plaintext; char *config; enc_tkt_reply->flags |= TKT_FLG_PRE_AUTH; /* Get the FAST armor key. */ armor_key = cb->fast_armor(context, rock); if (armor_key == NULL) { retval = KRB5KDC_ERR_PREAUTH_FAILED; com_err("otp", retval, "No armor key found when verifying padata"); goto error; } /* Decode the request. */ d = make_data(pa->contents, pa->length); retval = decode_krb5_pa_otp_req(&d, &req); if (retval != 0) { com_err("otp", retval, "Unable to decode OTP request"); goto error; } /* Decrypt the nonce from the request. */ retval = decrypt_encdata(context, armor_key, req, &plaintext); if (retval != 0) { com_err("otp", retval, "Unable to decrypt nonce"); goto error; } /* Verify the nonce or timestamp. */ retval = nonce_verify(context, armor_key, &plaintext); if (retval != 0) retval = timestamp_verify(context, &plaintext); krb5_free_data_contents(context, &plaintext); if (retval != 0) { com_err("otp", retval, "Unable to verify nonce or timestamp"); goto error; } /* Create the request state. */ rs = k5alloc(sizeof(struct request_state), &retval); if (rs == NULL) goto error; rs->arg = arg; rs->respond = respond; /* Get the principal's OTP configuration string. */ retval = cb->get_string(context, rock, "otp", &config); if (retval == 0 && config == NULL) retval = KRB5_PREAUTH_FAILED; if (retval != 0) { free(rs); goto error; } /* Send the request. */ otp_state_verify((otp_state *)moddata, cb->event_context(context, rock), request->client, config, req, on_response, rs); cb->free_string(context, rock, config); k5_free_pa_otp_req(context, req); return; error: k5_free_pa_otp_req(context, req); (*respond)(arg, retval, NULL, NULL, NULL); }
263
1
static i2c_interface *musicpal_audio_init(qemu_irq irq) { AudioState *audio; musicpal_audio_state *s; i2c_interface *i2c; int iomemtype; audio = AUD_init(); if (!audio) { AUD_log(audio_name, "No audio state\n"); return NULL; } s = qemu_mallocz(sizeof(musicpal_audio_state)); s->irq = irq; i2c = qemu_mallocz(sizeof(i2c_interface)); i2c->bus = i2c_init_bus(); i2c->current_addr = -1; s->wm = wm8750_init(i2c->bus, audio); if (!s->wm) return NULL; i2c_set_slave_address(s->wm, MP_WM_ADDR); wm8750_data_req_set(s->wm, audio_callback, s); iomemtype = cpu_register_io_memory(0, musicpal_audio_readfn, musicpal_audio_writefn, s); cpu_register_physical_memory(MP_AUDIO_BASE, MP_AUDIO_SIZE, iomemtype); qemu_register_reset(musicpal_audio_reset, s); return i2c; }
264
0
void flush_thread_cache ( ) { mysql_mutex_lock ( & LOCK_thread_count ) ; kill_cached_threads ++ ; while ( cached_thread_count ) { mysql_cond_broadcast ( & COND_thread_cache ) ; mysql_cond_wait ( & COND_flush_thread_cache , & LOCK_thread_count ) ; } kill_cached_threads -- ; mysql_mutex_unlock ( & LOCK_thread_count ) ; }
265
1
compat_mpt_command(struct file *filp, unsigned int cmd, unsigned long arg) { struct mpt_ioctl_command32 karg32; struct mpt_ioctl_command32 __user *uarg = (struct mpt_ioctl_command32 __user *) arg; struct mpt_ioctl_command karg; MPT_ADAPTER *iocp = NULL; int iocnum, iocnumX; int nonblock = (filp->f_flags & O_NONBLOCK); int ret; if (copy_from_user(&karg32, (char __user *)arg, sizeof(karg32))) return -EFAULT; /* Verify intended MPT adapter */ iocnumX = karg32.hdr.iocnum & 0xFF; if (((iocnum = mpt_verify_adapter(iocnumX, &iocp)) < 0) || (iocp == NULL)) { printk(KERN_DEBUG MYNAM "::compat_mpt_command @%d - ioc%d not found!\n", __LINE__, iocnumX); return -ENODEV; } if ((ret = mptctl_syscall_down(iocp, nonblock)) != 0) return ret; dctlprintk(iocp, printk(MYIOC_s_DEBUG_FMT "compat_mpt_command() called\n", iocp->name)); /* Copy data to karg */ karg.hdr.iocnum = karg32.hdr.iocnum; karg.hdr.port = karg32.hdr.port; karg.timeout = karg32.timeout; karg.maxReplyBytes = karg32.maxReplyBytes; karg.dataInSize = karg32.dataInSize; karg.dataOutSize = karg32.dataOutSize; karg.maxSenseBytes = karg32.maxSenseBytes; karg.dataSgeOffset = karg32.dataSgeOffset; karg.replyFrameBufPtr = (char __user *)(unsigned long)karg32.replyFrameBufPtr; karg.dataInBufPtr = (char __user *)(unsigned long)karg32.dataInBufPtr; karg.dataOutBufPtr = (char __user *)(unsigned long)karg32.dataOutBufPtr; karg.senseDataPtr = (char __user *)(unsigned long)karg32.senseDataPtr; /* Pass new structure to do_mpt_command */ ret = mptctl_do_mpt_command (karg, &uarg->MF); mutex_unlock(&iocp->ioctl_cmds.mutex); return ret; }
266
1
void bdrv_error_action(BlockDriverState *bs, BlockErrorAction action, bool is_read, int error) { assert(error >= 0); bdrv_emit_qmp_error_event(bs, QEVENT_BLOCK_IO_ERROR, action, is_read); if (action == BDRV_ACTION_STOP) { vm_stop(RUN_STATE_IO_ERROR); bdrv_iostatus_set_err(bs, error); } }
267
0
int vp8_denoiser_allocate ( VP8_DENOISER * denoiser , int width , int height , int num_mb_rows , int num_mb_cols , int mode ) { int i ; assert ( denoiser ) ; denoiser -> num_mb_cols = num_mb_cols ; for ( i = 0 ; i < MAX_REF_FRAMES ; i ++ ) { denoiser -> yv12_running_avg [ i ] . flags = 0 ; if ( vp8_yv12_alloc_frame_buffer ( & ( denoiser -> yv12_running_avg [ i ] ) , width , height , VP8BORDERINPIXELS ) < 0 ) { vp8_denoiser_free ( denoiser ) ; return 1 ; } vpx_memset ( denoiser -> yv12_running_avg [ i ] . buffer_alloc , 0 , denoiser -> yv12_running_avg [ i ] . frame_size ) ; } denoiser -> yv12_mc_running_avg . flags = 0 ; if ( vp8_yv12_alloc_frame_buffer ( & ( denoiser -> yv12_mc_running_avg ) , width , height , VP8BORDERINPIXELS ) < 0 ) { vp8_denoiser_free ( denoiser ) ; return 1 ; } vpx_memset ( denoiser -> yv12_mc_running_avg . buffer_alloc , 0 , denoiser -> yv12_mc_running_avg . frame_size ) ; if ( vp8_yv12_alloc_frame_buffer ( & denoiser -> yv12_last_source , width , height , VP8BORDERINPIXELS ) < 0 ) { vp8_denoiser_free ( denoiser ) ; return 1 ; } vpx_memset ( denoiser -> yv12_last_source . buffer_alloc , 0 , denoiser -> yv12_last_source . frame_size ) ; denoiser -> denoise_state = vpx_calloc ( ( num_mb_rows * num_mb_cols ) , 1 ) ; vpx_memset ( denoiser -> denoise_state , 0 , ( num_mb_rows * num_mb_cols ) ) ; vp8_denoiser_set_parameters ( denoiser , mode ) ; denoiser -> nmse_source_diff = 0 ; denoiser -> nmse_source_diff_count = 0 ; denoiser -> qp_avg = 0 ; denoiser -> qp_threshold_up = 80 ; denoiser -> qp_threshold_down = 128 ; denoiser -> bitrate_threshold = 200000 ; denoiser -> threshold_aggressive_mode = 35 ; if ( width * height > 640 * 480 ) { denoiser -> bitrate_threshold = 500000 ; denoiser -> threshold_aggressive_mode = 100 ; } else if ( width * height > 960 * 540 ) { denoiser -> bitrate_threshold = 800000 ; denoiser -> threshold_aggressive_mode = 150 ; } else if ( width * height > 1280 * 720 ) { denoiser -> bitrate_threshold = 2000000 ; denoiser -> threshold_aggressive_mode = 1400 ; } return 0 ; }
268
1
pkinit_server_verify_padata(krb5_context context, krb5_data *req_pkt, krb5_kdc_req * request, krb5_enc_tkt_part * enc_tkt_reply, krb5_pa_data * data, krb5_kdcpreauth_callbacks cb, krb5_kdcpreauth_rock rock, krb5_kdcpreauth_moddata moddata, krb5_kdcpreauth_verify_respond_fn respond, void *arg) { krb5_error_code retval = 0; krb5_data authp_data = {0, 0, NULL}, krb5_authz = {0, 0, NULL}; krb5_pa_pk_as_req *reqp = NULL; krb5_pa_pk_as_req_draft9 *reqp9 = NULL; krb5_auth_pack *auth_pack = NULL; krb5_auth_pack_draft9 *auth_pack9 = NULL; pkinit_kdc_context plgctx = NULL; pkinit_kdc_req_context reqctx = NULL; krb5_checksum cksum = {0, 0, 0, NULL}; krb5_data *der_req = NULL; int valid_eku = 0, valid_san = 0; krb5_data k5data; int is_signed = 1; krb5_pa_data **e_data = NULL; krb5_kdcpreauth_modreq modreq = NULL; pkiDebug("pkinit_verify_padata: entered!\n"); if (data == NULL || data->length <= 0 || data->contents == NULL) { (*respond)(arg, 0, NULL, NULL, NULL); return; } if (moddata == NULL) { (*respond)(arg, EINVAL, NULL, NULL, NULL); return; } plgctx = pkinit_find_realm_context(context, moddata, request->server); if (plgctx == NULL) { (*respond)(arg, 0, NULL, NULL, NULL); return; } #ifdef DEBUG_ASN1 print_buffer_bin(data->contents, data->length, "/tmp/kdc_as_req"); #endif /* create a per-request context */ retval = pkinit_init_kdc_req_context(context, &reqctx); if (retval) goto cleanup; reqctx->pa_type = data->pa_type; PADATA_TO_KRB5DATA(data, &k5data); switch ((int)data->pa_type) { case KRB5_PADATA_PK_AS_REQ: pkiDebug("processing KRB5_PADATA_PK_AS_REQ\n"); retval = k5int_decode_krb5_pa_pk_as_req(&k5data, &reqp); if (retval) { pkiDebug("decode_krb5_pa_pk_as_req failed\n"); goto cleanup; } #ifdef DEBUG_ASN1 print_buffer_bin(reqp->signedAuthPack.data, reqp->signedAuthPack.length, "/tmp/kdc_signed_data"); #endif retval = cms_signeddata_verify(context, plgctx->cryptoctx, reqctx->cryptoctx, plgctx->idctx, CMS_SIGN_CLIENT, plgctx->opts->require_crl_checking, (unsigned char *) reqp->signedAuthPack.data, reqp->signedAuthPack.length, (unsigned char **)&authp_data.data, &authp_data.length, (unsigned char **)&krb5_authz.data, &krb5_authz.length, &is_signed); break; case KRB5_PADATA_PK_AS_REP_OLD: case KRB5_PADATA_PK_AS_REQ_OLD: pkiDebug("processing KRB5_PADATA_PK_AS_REQ_OLD\n"); retval = k5int_decode_krb5_pa_pk_as_req_draft9(&k5data, &reqp9); if (retval) { pkiDebug("decode_krb5_pa_pk_as_req_draft9 failed\n"); goto cleanup; } #ifdef DEBUG_ASN1 print_buffer_bin(reqp9->signedAuthPack.data, reqp9->signedAuthPack.length, "/tmp/kdc_signed_data_draft9"); #endif retval = cms_signeddata_verify(context, plgctx->cryptoctx, reqctx->cryptoctx, plgctx->idctx, CMS_SIGN_DRAFT9, plgctx->opts->require_crl_checking, (unsigned char *) reqp9->signedAuthPack.data, reqp9->signedAuthPack.length, (unsigned char **)&authp_data.data, &authp_data.length, (unsigned char **)&krb5_authz.data, &krb5_authz.length, NULL); break; default: pkiDebug("unrecognized pa_type = %d\n", data->pa_type); retval = EINVAL; goto cleanup; } if (retval) { pkiDebug("pkcs7_signeddata_verify failed\n"); goto cleanup; } if (is_signed) { retval = verify_client_san(context, plgctx, reqctx, request->client, &valid_san); if (retval) goto cleanup; if (!valid_san) { pkiDebug("%s: did not find an acceptable SAN in user " "certificate\n", __FUNCTION__); retval = KRB5KDC_ERR_CLIENT_NAME_MISMATCH; goto cleanup; } retval = verify_client_eku(context, plgctx, reqctx, &valid_eku); if (retval) goto cleanup; if (!valid_eku) { pkiDebug("%s: did not find an acceptable EKU in user " "certificate\n", __FUNCTION__); retval = KRB5KDC_ERR_INCONSISTENT_KEY_PURPOSE; goto cleanup; } } else { /* !is_signed */ if (!krb5_principal_compare(context, request->client, krb5_anonymous_principal())) { retval = KRB5KDC_ERR_PREAUTH_FAILED; krb5_set_error_message(context, retval, _("Pkinit request not signed, but client " "not anonymous.")); goto cleanup; } } #ifdef DEBUG_ASN1 print_buffer_bin(authp_data.data, authp_data.length, "/tmp/kdc_auth_pack"); #endif OCTETDATA_TO_KRB5DATA(&authp_data, &k5data); switch ((int)data->pa_type) { case KRB5_PADATA_PK_AS_REQ: retval = k5int_decode_krb5_auth_pack(&k5data, &auth_pack); if (retval) { pkiDebug("failed to decode krb5_auth_pack\n"); goto cleanup; } retval = krb5_check_clockskew(context, auth_pack->pkAuthenticator.ctime); if (retval) goto cleanup; /* check dh parameters */ if (auth_pack->clientPublicValue != NULL) { retval = server_check_dh(context, plgctx->cryptoctx, reqctx->cryptoctx, plgctx->idctx, &auth_pack->clientPublicValue->algorithm.parameters, plgctx->opts->dh_min_bits); if (retval) { pkiDebug("bad dh parameters\n"); goto cleanup; } } else if (!is_signed) { /*Anonymous pkinit requires DH*/ retval = KRB5KDC_ERR_PREAUTH_FAILED; krb5_set_error_message(context, retval, _("Anonymous pkinit without DH public " "value not supported.")); goto cleanup; } der_req = cb->request_body(context, rock); retval = krb5_c_make_checksum(context, CKSUMTYPE_NIST_SHA, NULL, 0, der_req, &cksum); if (retval) { pkiDebug("unable to calculate AS REQ checksum\n"); goto cleanup; } if (cksum.length != auth_pack->pkAuthenticator.paChecksum.length || k5_bcmp(cksum.contents, auth_pack->pkAuthenticator.paChecksum.contents, cksum.length) != 0) { pkiDebug("failed to match the checksum\n"); #ifdef DEBUG_CKSUM pkiDebug("calculating checksum on buf size (%d)\n", req_pkt->length); print_buffer(req_pkt->data, req_pkt->length); pkiDebug("received checksum type=%d size=%d ", auth_pack->pkAuthenticator.paChecksum.checksum_type, auth_pack->pkAuthenticator.paChecksum.length); print_buffer(auth_pack->pkAuthenticator.paChecksum.contents, auth_pack->pkAuthenticator.paChecksum.length); pkiDebug("expected checksum type=%d size=%d ", cksum.checksum_type, cksum.length); print_buffer(cksum.contents, cksum.length); #endif retval = KRB5KDC_ERR_PA_CHECKSUM_MUST_BE_INCLUDED; goto cleanup; } /* check if kdcPkId present and match KDC's subjectIdentifier */ if (reqp->kdcPkId.data != NULL) { int valid_kdcPkId = 0; retval = pkinit_check_kdc_pkid(context, plgctx->cryptoctx, reqctx->cryptoctx, plgctx->idctx, (unsigned char *)reqp->kdcPkId.data, reqp->kdcPkId.length, &valid_kdcPkId); if (retval) goto cleanup; if (!valid_kdcPkId) pkiDebug("kdcPkId in AS_REQ does not match KDC's cert" "RFC says to ignore and proceed\n"); } /* remember the decoded auth_pack for verify_padata routine */ reqctx->rcv_auth_pack = auth_pack; auth_pack = NULL; break; case KRB5_PADATA_PK_AS_REP_OLD: case KRB5_PADATA_PK_AS_REQ_OLD: retval = k5int_decode_krb5_auth_pack_draft9(&k5data, &auth_pack9); if (retval) { pkiDebug("failed to decode krb5_auth_pack_draft9\n"); goto cleanup; } if (auth_pack9->clientPublicValue != NULL) { retval = server_check_dh(context, plgctx->cryptoctx, reqctx->cryptoctx, plgctx->idctx, &auth_pack9->clientPublicValue->algorithm.parameters, plgctx->opts->dh_min_bits); if (retval) { pkiDebug("bad dh parameters\n"); goto cleanup; } } /* remember the decoded auth_pack for verify_padata routine */ reqctx->rcv_auth_pack9 = auth_pack9; auth_pack9 = NULL; break; } /* remember to set the PREAUTH flag in the reply */ enc_tkt_reply->flags |= TKT_FLG_PRE_AUTH; modreq = (krb5_kdcpreauth_modreq)reqctx; reqctx = NULL; cleanup: if (retval && data->pa_type == KRB5_PADATA_PK_AS_REQ) { pkiDebug("pkinit_verify_padata failed: creating e-data\n"); if (pkinit_create_edata(context, plgctx->cryptoctx, reqctx->cryptoctx, plgctx->idctx, plgctx->opts, retval, &e_data)) pkiDebug("pkinit_create_edata failed\n"); } switch ((int)data->pa_type) { case KRB5_PADATA_PK_AS_REQ: free_krb5_pa_pk_as_req(&reqp); free(cksum.contents); break; case KRB5_PADATA_PK_AS_REP_OLD: case KRB5_PADATA_PK_AS_REQ_OLD: free_krb5_pa_pk_as_req_draft9(&reqp9); } free(authp_data.data); free(krb5_authz.data); if (reqctx != NULL) pkinit_fini_kdc_req_context(context, reqctx); free_krb5_auth_pack(&auth_pack); free_krb5_auth_pack_draft9(context, &auth_pack9); (*respond)(arg, retval, modreq, e_data, NULL); }
269
1
static int __poke_user(struct task_struct *child, addr_t addr, addr_t data) { struct user *dummy = NULL; addr_t offset; if (addr < (addr_t) &dummy->regs.acrs) { /* * psw and gprs are stored on the stack */ if (addr == (addr_t) &dummy->regs.psw.mask && #ifdef CONFIG_COMPAT data != PSW_MASK_MERGE(psw_user32_bits, data) && #endif data != PSW_MASK_MERGE(psw_user_bits, data)) /* Invalid psw mask. */ return -EINVAL; #ifndef CONFIG_64BIT if (addr == (addr_t) &dummy->regs.psw.addr) /* I'd like to reject addresses without the high order bit but older gdb's rely on it */ data |= PSW_ADDR_AMODE; #endif *(addr_t *)((addr_t) &task_pt_regs(child)->psw + addr) = data; } else if (addr < (addr_t) (&dummy->regs.orig_gpr2)) { /* * access registers are stored in the thread structure */ offset = addr - (addr_t) &dummy->regs.acrs; #ifdef CONFIG_64BIT /* * Very special case: old & broken 64 bit gdb writing * to acrs[15] with a 64 bit value. Ignore the lower * half of the value and write the upper 32 bit to * acrs[15]. Sick... */ if (addr == (addr_t) &dummy->regs.acrs[15]) child->thread.acrs[15] = (unsigned int) (data >> 32); else #endif *(addr_t *)((addr_t) &child->thread.acrs + offset) = data; } else if (addr == (addr_t) &dummy->regs.orig_gpr2) { /* * orig_gpr2 is stored on the kernel stack */ task_pt_regs(child)->orig_gpr2 = data; } else if (addr < (addr_t) (&dummy->regs.fp_regs + 1)) { /* * floating point regs. are stored in the thread structure */ if (addr == (addr_t) &dummy->regs.fp_regs.fpc && (data & ~((unsigned long) FPC_VALID_MASK << (BITS_PER_LONG - 32))) != 0) return -EINVAL; offset = addr - (addr_t) &dummy->regs.fp_regs; *(addr_t *)((addr_t) &child->thread.fp_regs + offset) = data; } else if (addr < (addr_t) (&dummy->regs.per_info + 1)) { /* * per_info is found in the thread structure */ offset = addr - (addr_t) &dummy->regs.per_info; *(addr_t *)((addr_t) &child->thread.per_info + offset) = data; } FixPerRegisters(child); return 0; }
270
0
compat_mptfwxfer_ioctl(struct file *filp, unsigned int cmd, unsigned long arg) { struct mpt_fw_xfer32 kfw32; struct mpt_fw_xfer kfw; MPT_ADAPTER *iocp = NULL; int iocnum, iocnumX; int nonblock = (filp->f_flags & O_NONBLOCK); int ret; if (copy_from_user(&kfw32, (char __user *)arg, sizeof(kfw32))) return -EFAULT; /* Verify intended MPT adapter */ iocnumX = kfw32.iocnum & 0xFF; if (((iocnum = mpt_verify_adapter(iocnumX, &iocp)) < 0) || (iocp == NULL)) { printk(KERN_DEBUG MYNAM "::compat_mptfwxfer_ioctl @%d - ioc%d not found!\n", __LINE__, iocnumX); return -ENODEV; } if ((ret = mptctl_syscall_down(iocp, nonblock)) != 0) return ret; dctlprintk(iocp, printk(MYIOC_s_DEBUG_FMT "compat_mptfwxfer_ioctl() called\n", iocp->name)); kfw.iocnum = iocnum; kfw.fwlen = kfw32.fwlen; kfw.bufp = compat_ptr(kfw32.bufp); ret = mptctl_do_fw_download(iocp, kfw.bufp, kfw.fwlen); mutex_unlock(&iocp->ioctl_cmds.mutex); return ret; }
271
1
compat_mptfwxfer_ioctl(struct file *filp, unsigned int cmd, unsigned long arg) { struct mpt_fw_xfer32 kfw32; struct mpt_fw_xfer kfw; MPT_ADAPTER *iocp = NULL; int iocnum, iocnumX; int nonblock = (filp->f_flags & O_NONBLOCK); int ret; if (copy_from_user(&kfw32, (char __user *)arg, sizeof(kfw32))) return -EFAULT; /* Verify intended MPT adapter */ iocnumX = kfw32.iocnum & 0xFF; if (((iocnum = mpt_verify_adapter(iocnumX, &iocp)) < 0) || (iocp == NULL)) { printk(KERN_DEBUG MYNAM "::compat_mptfwxfer_ioctl @%d - ioc%d not found!\n", __LINE__, iocnumX); return -ENODEV; } if ((ret = mptctl_syscall_down(iocp, nonblock)) != 0) return ret; dctlprintk(iocp, printk(MYIOC_s_DEBUG_FMT "compat_mptfwxfer_ioctl() called\n", iocp->name)); kfw.iocnum = iocnum; kfw.fwlen = kfw32.fwlen; kfw.bufp = compat_ptr(kfw32.bufp); ret = mptctl_do_fw_download(kfw.iocnum, kfw.bufp, kfw.fwlen); mutex_unlock(&iocp->ioctl_cmds.mutex); return ret; }
273
1
static void ram_init(target_phys_addr_t addr, ram_addr_t RAM_size, uint64_t max_mem) { DeviceState *dev; SysBusDevice *s; RamDevice *d; /* allocate RAM */ if ((uint64_t)RAM_size > max_mem) { fprintf(stderr, "qemu: Too much memory for this machine: %d, maximum %d\n", (unsigned int)(RAM_size / (1024 * 1024)), (unsigned int)(max_mem / (1024 * 1024))); exit(1); } dev = qdev_create(NULL, "memory"); s = sysbus_from_qdev(dev); d = FROM_SYSBUS(RamDevice, s); d->size = RAM_size; qdev_init(dev); sysbus_mmio_map(s, 0, addr); }
274
0
static int ogg_read_header ( AVFormatContext * s ) { struct ogg * ogg = s -> priv_data ; int ret , i ; ogg -> curidx = - 1 ; do { ret = ogg_packet ( s , NULL , NULL , NULL , NULL ) ; if ( ret < 0 ) { ogg_read_close ( s ) ; return ret ; } } while ( ! ogg -> headers ) ; av_log ( s , AV_LOG_TRACE , "found headers\n" ) ; for ( i = 0 ; i < ogg -> nstreams ; i ++ ) { struct ogg_stream * os = ogg -> streams + i ; if ( ogg -> streams [ i ] . header < 0 ) { av_log ( s , AV_LOG_ERROR , "Header parsing failed for stream %d\n" , i ) ; ogg -> streams [ i ] . codec = NULL ; av_freep ( & ogg -> streams [ i ] . private ) ; } else if ( os -> codec && os -> nb_header < os -> codec -> nb_header ) { av_log ( s , AV_LOG_WARNING , "Headers mismatch for stream %d: " "expected %d received %d.\n" , i , os -> codec -> nb_header , os -> nb_header ) ; if ( s -> error_recognition & AV_EF_EXPLODE ) return AVERROR_INVALIDDATA ; } if ( os -> start_granule != OGG_NOGRANULE_VALUE ) os -> lastpts = s -> streams [ i ] -> start_time = ogg_gptopts ( s , i , os -> start_granule , NULL ) ; } ret = ogg_get_length ( s ) ; if ( ret < 0 ) { ogg_read_close ( s ) ; return ret ; } return 0 ; }
275
1
int fcntl_dirnotify(int fd, struct file *filp, unsigned long arg) { struct dnotify_struct *dn; struct dnotify_struct *odn; struct dnotify_struct **prev; struct inode *inode; fl_owner_t id = current->files; int error = 0; if ((arg & ~DN_MULTISHOT) == 0) { dnotify_flush(filp, id); return 0; } if (!dir_notify_enable) return -EINVAL; inode = filp->f_path.dentry->d_inode; if (!S_ISDIR(inode->i_mode)) return -ENOTDIR; dn = kmem_cache_alloc(dn_cache, GFP_KERNEL); if (dn == NULL) return -ENOMEM; spin_lock(&inode->i_lock); prev = &inode->i_dnotify; while ((odn = *prev) != NULL) { if ((odn->dn_owner == id) && (odn->dn_filp == filp)) { odn->dn_fd = fd; odn->dn_mask |= arg; inode->i_dnotify_mask |= arg & ~DN_MULTISHOT; goto out_free; } prev = &odn->dn_next; } error = __f_setown(filp, task_pid(current), PIDTYPE_PID, 0); if (error) goto out_free; dn->dn_mask = arg; dn->dn_fd = fd; dn->dn_filp = filp; dn->dn_owner = id; inode->i_dnotify_mask |= arg & ~DN_MULTISHOT; dn->dn_next = inode->i_dnotify; inode->i_dnotify = dn; spin_unlock(&inode->i_lock); if (filp->f_op && filp->f_op->dir_notify) return filp->f_op->dir_notify(filp, arg); return 0; out_free: spin_unlock(&inode->i_lock); kmem_cache_free(dn_cache, dn); return error; }
276
1
int xics_alloc_block(XICSState *icp, int src, int num, bool lsi, bool align) { int i, first = -1; ICSState *ics = &icp->ics[src]; assert(src == 0); /* * MSIMesage::data is used for storing VIRQ so * it has to be aligned to num to support multiple * MSI vectors. MSI-X is not affected by this. * The hint is used for the first IRQ, the rest should * be allocated continuously. */ if (align) { assert((num == 1) || (num == 2) || (num == 4) || (num == 8) || (num == 16) || (num == 32)); first = ics_find_free_block(ics, num, num); } else { first = ics_find_free_block(ics, num, 1); } if (first >= 0) { for (i = first; i < first + num; ++i) { ics_set_irq_type(ics, i, lsi); } } first += ics->offset; trace_xics_alloc_block(src, first, num, lsi, align); return first; }
277
0
static int hextoint ( int c ) { if ( ! isascii ( ( int ) c ) ) return ( - 1 ) ; if ( isdigit ( ( int ) c ) ) return ( c - '0' ) ; if ( ( c >= 'a' ) && ( c <= 'f' ) ) return ( c + 10 - 'a' ) ; if ( ( c >= 'A' ) && ( c <= 'F' ) ) return ( c + 10 - 'A' ) ; return ( - 1 ) ; }
278
1
acc_ctx_hints(OM_uint32 *minor_status, gss_ctx_id_t *ctx, spnego_gss_cred_id_t spcred, gss_buffer_t *mechListMIC, OM_uint32 *negState, send_token_flag *return_token) { OM_uint32 tmpmin, ret; gss_OID_set supported_mechSet; spnego_gss_ctx_id_t sc = NULL; *mechListMIC = GSS_C_NO_BUFFER; supported_mechSet = GSS_C_NO_OID_SET; *return_token = NO_TOKEN_SEND; *negState = REJECT; *minor_status = 0; /* A hint request must be the first token received. */ if (*ctx != GSS_C_NO_CONTEXT) return GSS_S_DEFECTIVE_TOKEN; ret = get_negotiable_mechs(minor_status, spcred, GSS_C_ACCEPT, &supported_mechSet); if (ret != GSS_S_COMPLETE) goto cleanup; ret = make_NegHints(minor_status, mechListMIC); if (ret != GSS_S_COMPLETE) goto cleanup; sc = create_spnego_ctx(); if (sc == NULL) { ret = GSS_S_FAILURE; goto cleanup; } if (put_mech_set(supported_mechSet, &sc->DER_mechTypes) < 0) { ret = GSS_S_FAILURE; goto cleanup; } sc->internal_mech = GSS_C_NO_OID; *negState = ACCEPT_INCOMPLETE; *return_token = INIT_TOKEN_SEND; sc->firstpass = 1; *ctx = (gss_ctx_id_t)sc; sc = NULL; ret = GSS_S_COMPLETE; cleanup: release_spnego_ctx(&sc); gss_release_oid_set(&tmpmin, &supported_mechSet); return ret; }
279
0
mptctl_do_fw_download(MPT_ADAPTER *iocp, char __user *ufwbuf, size_t fwlen) { FWDownload_t *dlmsg; MPT_FRAME_HDR *mf; FWDownloadTCSGE_t *ptsge; MptSge_t *sgl, *sgIn; char *sgOut; struct buflist *buflist; struct buflist *bl; dma_addr_t sgl_dma; int ret; int numfrags = 0; int maxfrags; int n = 0; u32 sgdir; u32 nib; int fw_bytes_copied = 0; int i; int sge_offset = 0; u16 iocstat; pFWDownloadReply_t ReplyMsg = NULL; unsigned long timeleft; /* Valid device. Get a message frame and construct the FW download message. */ if ((mf = mpt_get_msg_frame(mptctl_id, iocp)) == NULL) return -EAGAIN; dctlprintk(iocp, printk(MYIOC_s_DEBUG_FMT "mptctl_do_fwdl called. mptctl_id = %xh.\n", iocp->name, mptctl_id)); dctlprintk(iocp, printk(MYIOC_s_DEBUG_FMT "DbG: kfwdl.bufp = %p\n", iocp->name, ufwbuf)); dctlprintk(iocp, printk(MYIOC_s_DEBUG_FMT "DbG: kfwdl.fwlen = %d\n", iocp->name, (int)fwlen)); dlmsg = (FWDownload_t*) mf; ptsge = (FWDownloadTCSGE_t *) &dlmsg->SGL; sgOut = (char *) (ptsge + 1); /* * Construct f/w download request */ dlmsg->ImageType = MPI_FW_DOWNLOAD_ITYPE_FW; dlmsg->Reserved = 0; dlmsg->ChainOffset = 0; dlmsg->Function = MPI_FUNCTION_FW_DOWNLOAD; dlmsg->Reserved1[0] = dlmsg->Reserved1[1] = dlmsg->Reserved1[2] = 0; if (iocp->facts.MsgVersion >= MPI_VERSION_01_05) dlmsg->MsgFlags = MPI_FW_DOWNLOAD_MSGFLGS_LAST_SEGMENT; else dlmsg->MsgFlags = 0; /* Set up the Transaction SGE. */ ptsge->Reserved = 0; ptsge->ContextSize = 0; ptsge->DetailsLength = 12; ptsge->Flags = MPI_SGE_FLAGS_TRANSACTION_ELEMENT; ptsge->Reserved_0100_Checksum = 0; ptsge->ImageOffset = 0; ptsge->ImageSize = cpu_to_le32(fwlen); /* Add the SGL */ /* * Need to kmalloc area(s) for holding firmware image bytes. * But we need to do it piece meal, using a proper * scatter gather list (with 128kB MAX hunks). * * A practical limit here might be # of sg hunks that fit into * a single IOC request frame; 12 or 8 (see below), so: * For FC9xx: 12 x 128kB == 1.5 mB (max) * For C1030: 8 x 128kB == 1 mB (max) * We could support chaining, but things get ugly(ier:) * * Set the sge_offset to the start of the sgl (bytes). */ sgdir = 0x04000000; /* IOC will READ from sys mem */ sge_offset = sizeof(MPIHeader_t) + sizeof(FWDownloadTCSGE_t); if ((sgl = kbuf_alloc_2_sgl(fwlen, sgdir, sge_offset, &numfrags, &buflist, &sgl_dma, iocp)) == NULL) return -ENOMEM; /* * We should only need SGL with 2 simple_32bit entries (up to 256 kB) * for FC9xx f/w image, but calculate max number of sge hunks * we can fit into a request frame, and limit ourselves to that. * (currently no chain support) * maxfrags = (Request Size - FWdownload Size ) / Size of 32 bit SGE * Request maxfrags * 128 12 * 96 8 * 64 4 */ maxfrags = (iocp->req_sz - sizeof(MPIHeader_t) - sizeof(FWDownloadTCSGE_t)) / iocp->SGE_size; if (numfrags > maxfrags) { ret = -EMLINK; goto fwdl_out; } dctlprintk(iocp, printk(MYIOC_s_DEBUG_FMT "DbG: sgl buffer = %p, sgfrags = %d\n", iocp->name, sgl, numfrags)); /* * Parse SG list, copying sgl itself, * plus f/w image hunks from user space as we go... */ ret = -EFAULT; sgIn = sgl; bl = buflist; for (i=0; i < numfrags; i++) { /* Get the SGE type: 0 - TCSGE, 3 - Chain, 1 - Simple SGE * Skip everything but Simple. If simple, copy from * user space into kernel space. * Note: we should not have anything but Simple as * Chain SGE are illegal. */ nib = (sgIn->FlagsLength & 0x30000000) >> 28; if (nib == 0 || nib == 3) { ; } else if (sgIn->Address) { iocp->add_sge(sgOut, sgIn->FlagsLength, sgIn->Address); n++; if (copy_from_user(bl->kptr, ufwbuf+fw_bytes_copied, bl->len)) { printk(MYIOC_s_ERR_FMT "%s@%d::_ioctl_fwdl - " "Unable to copy f/w buffer hunk#%d @ %p\n", iocp->name, __FILE__, __LINE__, n, ufwbuf); goto fwdl_out; } fw_bytes_copied += bl->len; } sgIn++; bl++; sgOut += iocp->SGE_size; } DBG_DUMP_FW_DOWNLOAD(iocp, (u32 *)mf, numfrags); /* * Finally, perform firmware download. */ ReplyMsg = NULL; SET_MGMT_MSG_CONTEXT(iocp->ioctl_cmds.msg_context, dlmsg->MsgContext); INITIALIZE_MGMT_STATUS(iocp->ioctl_cmds.status) mpt_put_msg_frame(mptctl_id, iocp, mf); /* Now wait for the command to complete */ retry_wait: timeleft = wait_for_completion_timeout(&iocp->ioctl_cmds.done, HZ*60); if (!(iocp->ioctl_cmds.status & MPT_MGMT_STATUS_COMMAND_GOOD)) { ret = -ETIME; printk(MYIOC_s_WARN_FMT "%s: failed\n", iocp->name, __func__); if (iocp->ioctl_cmds.status & MPT_MGMT_STATUS_DID_IOCRESET) { mpt_free_msg_frame(iocp, mf); goto fwdl_out; } if (!timeleft) { printk(MYIOC_s_WARN_FMT "FW download timeout, doorbell=0x%08x\n", iocp->name, mpt_GetIocState(iocp, 0)); mptctl_timeout_expired(iocp, mf); } else goto retry_wait; goto fwdl_out; } if (!(iocp->ioctl_cmds.status & MPT_MGMT_STATUS_RF_VALID)) { printk(MYIOC_s_WARN_FMT "%s: failed\n", iocp->name, __func__); mpt_free_msg_frame(iocp, mf); ret = -ENODATA; goto fwdl_out; } if (sgl) kfree_sgl(sgl, sgl_dma, buflist, iocp); ReplyMsg = (pFWDownloadReply_t)iocp->ioctl_cmds.reply; iocstat = le16_to_cpu(ReplyMsg->IOCStatus) & MPI_IOCSTATUS_MASK; if (iocstat == MPI_IOCSTATUS_SUCCESS) { printk(MYIOC_s_INFO_FMT "F/W update successful!\n", iocp->name); return 0; } else if (iocstat == MPI_IOCSTATUS_INVALID_FUNCTION) { printk(MYIOC_s_WARN_FMT "Hmmm... F/W download not supported!?!\n", iocp->name); printk(MYIOC_s_WARN_FMT "(time to go bang on somebodies door)\n", iocp->name); return -EBADRQC; } else if (iocstat == MPI_IOCSTATUS_BUSY) { printk(MYIOC_s_WARN_FMT "IOC_BUSY!\n", iocp->name); printk(MYIOC_s_WARN_FMT "(try again later?)\n", iocp->name); return -EBUSY; } else { printk(MYIOC_s_WARN_FMT "ioctl_fwdl() returned [bad] status = %04xh\n", iocp->name, iocstat); printk(MYIOC_s_WARN_FMT "(bad VooDoo)\n", iocp->name); return -ENOMSG; } return 0; fwdl_out: CLEAR_MGMT_STATUS(iocp->ioctl_cmds.status); SET_MGMT_MSG_CONTEXT(iocp->ioctl_cmds.msg_context, 0); kfree_sgl(sgl, sgl_dma, buflist, iocp); return ret; }
280
1
void object_property_get_uint16List(Object *obj, const char *name, uint16List **list, Error **errp) { StringOutputVisitor *ov; StringInputVisitor *iv; ov = string_output_visitor_new(false); object_property_get(obj, string_output_get_visitor(ov), name, errp); iv = string_input_visitor_new(string_output_get_string(ov)); visit_type_uint16List(string_input_get_visitor(iv), list, NULL, errp); string_output_visitor_cleanup(ov); string_input_visitor_cleanup(iv); }
281
0
static void generate_joint_tables ( HYuvContext * s ) { uint16_t symbols [ 1 << VLC_BITS ] ; uint16_t bits [ 1 << VLC_BITS ] ; uint8_t len [ 1 << VLC_BITS ] ; if ( s -> bitstream_bpp < 24 ) { int p , i , y , u ; for ( p = 0 ; p < 3 ; p ++ ) { for ( i = y = 0 ; y < 256 ; y ++ ) { int len0 = s -> len [ 0 ] [ y ] ; int limit = VLC_BITS - len0 ; if ( limit <= 0 ) continue ; for ( u = 0 ; u < 256 ; u ++ ) { int len1 = s -> len [ p ] [ u ] ; if ( len1 > limit ) continue ; len [ i ] = len0 + len1 ; bits [ i ] = ( s -> bits [ 0 ] [ y ] << len1 ) + s -> bits [ p ] [ u ] ; symbols [ i ] = ( y << 8 ) + u ; if ( symbols [ i ] != 0xffff ) i ++ ; } } ff_free_vlc ( & s -> vlc [ 3 + p ] ) ; ff_init_vlc_sparse ( & s -> vlc [ 3 + p ] , VLC_BITS , i , len , 1 , 1 , bits , 2 , 2 , symbols , 2 , 2 , 0 ) ; } } else { uint8_t ( * map ) [ 4 ] = ( uint8_t ( * ) [ 4 ] ) s -> pix_bgr_map ; int i , b , g , r , code ; int p0 = s -> decorrelate ; int p1 = ! s -> decorrelate ; for ( i = 0 , g = - 16 ; g < 16 ; g ++ ) { int len0 = s -> len [ p0 ] [ g & 255 ] ; int limit0 = VLC_BITS - len0 ; if ( limit0 < 2 ) continue ; for ( b = - 16 ; b < 16 ; b ++ ) { int len1 = s -> len [ p1 ] [ b & 255 ] ; int limit1 = limit0 - len1 ; if ( limit1 < 1 ) continue ; code = ( s -> bits [ p0 ] [ g & 255 ] << len1 ) + s -> bits [ p1 ] [ b & 255 ] ; for ( r = - 16 ; r < 16 ; r ++ ) { int len2 = s -> len [ 2 ] [ r & 255 ] ; if ( len2 > limit1 ) continue ; len [ i ] = len0 + len1 + len2 ; bits [ i ] = ( code << len2 ) + s -> bits [ 2 ] [ r & 255 ] ; if ( s -> decorrelate ) { map [ i ] [ G ] = g ; map [ i ] [ B ] = g + b ; map [ i ] [ R ] = g + r ; } else { map [ i ] [ B ] = g ; map [ i ] [ G ] = b ; map [ i ] [ R ] = r ; } i ++ ; } } } ff_free_vlc ( & s -> vlc [ 3 ] ) ; init_vlc ( & s -> vlc [ 3 ] , VLC_BITS , i , len , 1 , 1 , bits , 2 , 2 , 0 ) ; } }
282
1
mptctl_do_fw_download(int ioc, char __user *ufwbuf, size_t fwlen) { FWDownload_t *dlmsg; MPT_FRAME_HDR *mf; MPT_ADAPTER *iocp; FWDownloadTCSGE_t *ptsge; MptSge_t *sgl, *sgIn; char *sgOut; struct buflist *buflist; struct buflist *bl; dma_addr_t sgl_dma; int ret; int numfrags = 0; int maxfrags; int n = 0; u32 sgdir; u32 nib; int fw_bytes_copied = 0; int i; int sge_offset = 0; u16 iocstat; pFWDownloadReply_t ReplyMsg = NULL; unsigned long timeleft; if (mpt_verify_adapter(ioc, &iocp) < 0) { printk(KERN_DEBUG MYNAM "ioctl_fwdl - ioc%d not found!\n", ioc); return -ENODEV; /* (-6) No such device or address */ } else { /* Valid device. Get a message frame and construct the FW download message. */ if ((mf = mpt_get_msg_frame(mptctl_id, iocp)) == NULL) return -EAGAIN; } dctlprintk(iocp, printk(MYIOC_s_DEBUG_FMT "mptctl_do_fwdl called. mptctl_id = %xh.\n", iocp->name, mptctl_id)); dctlprintk(iocp, printk(MYIOC_s_DEBUG_FMT "DbG: kfwdl.bufp = %p\n", iocp->name, ufwbuf)); dctlprintk(iocp, printk(MYIOC_s_DEBUG_FMT "DbG: kfwdl.fwlen = %d\n", iocp->name, (int)fwlen)); dctlprintk(iocp, printk(MYIOC_s_DEBUG_FMT "DbG: kfwdl.ioc = %04xh\n", iocp->name, ioc)); dlmsg = (FWDownload_t*) mf; ptsge = (FWDownloadTCSGE_t *) &dlmsg->SGL; sgOut = (char *) (ptsge + 1); /* * Construct f/w download request */ dlmsg->ImageType = MPI_FW_DOWNLOAD_ITYPE_FW; dlmsg->Reserved = 0; dlmsg->ChainOffset = 0; dlmsg->Function = MPI_FUNCTION_FW_DOWNLOAD; dlmsg->Reserved1[0] = dlmsg->Reserved1[1] = dlmsg->Reserved1[2] = 0; if (iocp->facts.MsgVersion >= MPI_VERSION_01_05) dlmsg->MsgFlags = MPI_FW_DOWNLOAD_MSGFLGS_LAST_SEGMENT; else dlmsg->MsgFlags = 0; /* Set up the Transaction SGE. */ ptsge->Reserved = 0; ptsge->ContextSize = 0; ptsge->DetailsLength = 12; ptsge->Flags = MPI_SGE_FLAGS_TRANSACTION_ELEMENT; ptsge->Reserved_0100_Checksum = 0; ptsge->ImageOffset = 0; ptsge->ImageSize = cpu_to_le32(fwlen); /* Add the SGL */ /* * Need to kmalloc area(s) for holding firmware image bytes. * But we need to do it piece meal, using a proper * scatter gather list (with 128kB MAX hunks). * * A practical limit here might be # of sg hunks that fit into * a single IOC request frame; 12 or 8 (see below), so: * For FC9xx: 12 x 128kB == 1.5 mB (max) * For C1030: 8 x 128kB == 1 mB (max) * We could support chaining, but things get ugly(ier:) * * Set the sge_offset to the start of the sgl (bytes). */ sgdir = 0x04000000; /* IOC will READ from sys mem */ sge_offset = sizeof(MPIHeader_t) + sizeof(FWDownloadTCSGE_t); if ((sgl = kbuf_alloc_2_sgl(fwlen, sgdir, sge_offset, &numfrags, &buflist, &sgl_dma, iocp)) == NULL) return -ENOMEM; /* * We should only need SGL with 2 simple_32bit entries (up to 256 kB) * for FC9xx f/w image, but calculate max number of sge hunks * we can fit into a request frame, and limit ourselves to that. * (currently no chain support) * maxfrags = (Request Size - FWdownload Size ) / Size of 32 bit SGE * Request maxfrags * 128 12 * 96 8 * 64 4 */ maxfrags = (iocp->req_sz - sizeof(MPIHeader_t) - sizeof(FWDownloadTCSGE_t)) / iocp->SGE_size; if (numfrags > maxfrags) { ret = -EMLINK; goto fwdl_out; } dctlprintk(iocp, printk(MYIOC_s_DEBUG_FMT "DbG: sgl buffer = %p, sgfrags = %d\n", iocp->name, sgl, numfrags)); /* * Parse SG list, copying sgl itself, * plus f/w image hunks from user space as we go... */ ret = -EFAULT; sgIn = sgl; bl = buflist; for (i=0; i < numfrags; i++) { /* Get the SGE type: 0 - TCSGE, 3 - Chain, 1 - Simple SGE * Skip everything but Simple. If simple, copy from * user space into kernel space. * Note: we should not have anything but Simple as * Chain SGE are illegal. */ nib = (sgIn->FlagsLength & 0x30000000) >> 28; if (nib == 0 || nib == 3) { ; } else if (sgIn->Address) { iocp->add_sge(sgOut, sgIn->FlagsLength, sgIn->Address); n++; if (copy_from_user(bl->kptr, ufwbuf+fw_bytes_copied, bl->len)) { printk(MYIOC_s_ERR_FMT "%s@%d::_ioctl_fwdl - " "Unable to copy f/w buffer hunk#%d @ %p\n", iocp->name, __FILE__, __LINE__, n, ufwbuf); goto fwdl_out; } fw_bytes_copied += bl->len; } sgIn++; bl++; sgOut += iocp->SGE_size; } DBG_DUMP_FW_DOWNLOAD(iocp, (u32 *)mf, numfrags); /* * Finally, perform firmware download. */ ReplyMsg = NULL; SET_MGMT_MSG_CONTEXT(iocp->ioctl_cmds.msg_context, dlmsg->MsgContext); INITIALIZE_MGMT_STATUS(iocp->ioctl_cmds.status) mpt_put_msg_frame(mptctl_id, iocp, mf); /* Now wait for the command to complete */ retry_wait: timeleft = wait_for_completion_timeout(&iocp->ioctl_cmds.done, HZ*60); if (!(iocp->ioctl_cmds.status & MPT_MGMT_STATUS_COMMAND_GOOD)) { ret = -ETIME; printk(MYIOC_s_WARN_FMT "%s: failed\n", iocp->name, __func__); if (iocp->ioctl_cmds.status & MPT_MGMT_STATUS_DID_IOCRESET) { mpt_free_msg_frame(iocp, mf); goto fwdl_out; } if (!timeleft) { printk(MYIOC_s_WARN_FMT "FW download timeout, doorbell=0x%08x\n", iocp->name, mpt_GetIocState(iocp, 0)); mptctl_timeout_expired(iocp, mf); } else goto retry_wait; goto fwdl_out; } if (!(iocp->ioctl_cmds.status & MPT_MGMT_STATUS_RF_VALID)) { printk(MYIOC_s_WARN_FMT "%s: failed\n", iocp->name, __func__); mpt_free_msg_frame(iocp, mf); ret = -ENODATA; goto fwdl_out; } if (sgl) kfree_sgl(sgl, sgl_dma, buflist, iocp); ReplyMsg = (pFWDownloadReply_t)iocp->ioctl_cmds.reply; iocstat = le16_to_cpu(ReplyMsg->IOCStatus) & MPI_IOCSTATUS_MASK; if (iocstat == MPI_IOCSTATUS_SUCCESS) { printk(MYIOC_s_INFO_FMT "F/W update successful!\n", iocp->name); return 0; } else if (iocstat == MPI_IOCSTATUS_INVALID_FUNCTION) { printk(MYIOC_s_WARN_FMT "Hmmm... F/W download not supported!?!\n", iocp->name); printk(MYIOC_s_WARN_FMT "(time to go bang on somebodies door)\n", iocp->name); return -EBADRQC; } else if (iocstat == MPI_IOCSTATUS_BUSY) { printk(MYIOC_s_WARN_FMT "IOC_BUSY!\n", iocp->name); printk(MYIOC_s_WARN_FMT "(try again later?)\n", iocp->name); return -EBUSY; } else { printk(MYIOC_s_WARN_FMT "ioctl_fwdl() returned [bad] status = %04xh\n", iocp->name, iocstat); printk(MYIOC_s_WARN_FMT "(bad VooDoo)\n", iocp->name); return -ENOMSG; } return 0; fwdl_out: CLEAR_MGMT_STATUS(iocp->ioctl_cmds.status); SET_MGMT_MSG_CONTEXT(iocp->ioctl_cmds.msg_context, 0); kfree_sgl(sgl, sgl_dma, buflist, iocp); return ret; }
283
1
hrtimer_forward(struct hrtimer *timer, ktime_t now, ktime_t interval) { unsigned long orun = 1; ktime_t delta; delta = ktime_sub(now, timer->expires); if (delta.tv64 < 0) return 0; if (interval.tv64 < timer->base->resolution.tv64) interval.tv64 = timer->base->resolution.tv64; if (unlikely(delta.tv64 >= interval.tv64)) { s64 incr = ktime_to_ns(interval); orun = ktime_divns(delta, incr); timer->expires = ktime_add_ns(timer->expires, incr * orun); if (timer->expires.tv64 > now.tv64) return orun; /* * This (and the ktime_add() below) is the * correction for exact: */ orun++; } timer->expires = ktime_add(timer->expires, interval); return orun; }
284
1
static int raw_create(const char *filename, QEMUOptionParameter *options) { int fd; int64_t total_size = 0; /* Read out options */ while (options && options->name) { if (!strcmp(options->name, BLOCK_OPT_SIZE)) { total_size = options->value.n / 512; } options++; } fd = open(filename, O_WRONLY | O_CREAT | O_TRUNC | O_BINARY, 0644); if (fd < 0) return -EIO; ftruncate(fd, total_size * 512); close(fd); return 0; }
285
0
static inline int vmsvga_fifo_length ( struct vmsvga_state_s * s ) { int num ; if ( ! s -> config || ! s -> enable ) { return 0 ; } num = CMD ( next_cmd ) - CMD ( stop ) ; if ( num < 0 ) { num += CMD ( max ) - CMD ( min ) ; } return num >> 2 ; }
286
1
acc_ctx_new(OM_uint32 *minor_status, gss_buffer_t buf, gss_ctx_id_t *ctx, spnego_gss_cred_id_t spcred, gss_buffer_t *mechToken, gss_buffer_t *mechListMIC, OM_uint32 *negState, send_token_flag *return_token) { OM_uint32 tmpmin, ret, req_flags; gss_OID_set supported_mechSet, mechTypes; gss_buffer_desc der_mechTypes; gss_OID mech_wanted; spnego_gss_ctx_id_t sc = NULL; ret = GSS_S_DEFECTIVE_TOKEN; der_mechTypes.length = 0; der_mechTypes.value = NULL; *mechToken = *mechListMIC = GSS_C_NO_BUFFER; supported_mechSet = mechTypes = GSS_C_NO_OID_SET; *return_token = ERROR_TOKEN_SEND; *negState = REJECT; *minor_status = 0; ret = get_negTokenInit(minor_status, buf, &der_mechTypes, &mechTypes, &req_flags, mechToken, mechListMIC); if (ret != GSS_S_COMPLETE) { goto cleanup; } ret = get_negotiable_mechs(minor_status, spcred, GSS_C_ACCEPT, &supported_mechSet); if (ret != GSS_S_COMPLETE) { *return_token = NO_TOKEN_SEND; goto cleanup; } /* * Select the best match between the list of mechs * that the initiator requested and the list that * the acceptor will support. */ mech_wanted = negotiate_mech(supported_mechSet, mechTypes, negState); if (*negState == REJECT) { ret = GSS_S_BAD_MECH; goto cleanup; } sc = (spnego_gss_ctx_id_t)*ctx; if (sc != NULL) { gss_release_buffer(&tmpmin, &sc->DER_mechTypes); assert(mech_wanted != GSS_C_NO_OID); } else sc = create_spnego_ctx(); if (sc == NULL) { ret = GSS_S_FAILURE; *return_token = NO_TOKEN_SEND; goto cleanup; } sc->mech_set = mechTypes; mechTypes = GSS_C_NO_OID_SET; sc->internal_mech = mech_wanted; sc->DER_mechTypes = der_mechTypes; der_mechTypes.length = 0; der_mechTypes.value = NULL; if (*negState == REQUEST_MIC) sc->mic_reqd = 1; *return_token = INIT_TOKEN_SEND; sc->firstpass = 1; *ctx = (gss_ctx_id_t)sc; ret = GSS_S_COMPLETE; cleanup: gss_release_oid_set(&tmpmin, &mechTypes); gss_release_oid_set(&tmpmin, &supported_mechSet); if (der_mechTypes.length != 0) gss_release_buffer(&tmpmin, &der_mechTypes); return ret; }
287
1
CairoFont *CairoFont::create(GfxFont *gfxFont, XRef *xref, FT_Library lib, GBool useCIDs) { Ref embRef; Object refObj, strObj; GooString *tmpFileName, *fileName,*tmpFileName2; DisplayFontParam *dfp; FILE *tmpFile; int c, i, n; GfxFontType fontType; char **enc; char *name; FoFiTrueType *ff; FoFiType1C *ff1c; Ref ref; static cairo_user_data_key_t cairo_font_face_key; cairo_font_face_t *cairo_font_face; FT_Face face; Gushort *codeToGID; int codeToGIDLen; dfp = NULL; codeToGID = NULL; codeToGIDLen = 0; cairo_font_face = NULL; ref = *gfxFont->getID(); fontType = gfxFont->getType(); tmpFileName = NULL; if (gfxFont->getEmbeddedFontID(&embRef)) { if (!openTempFile(&tmpFileName, &tmpFile, "wb", NULL)) { error(-1, "Couldn't create temporary font file"); goto err2; } refObj.initRef(embRef.num, embRef.gen); refObj.fetch(xref, &strObj); refObj.free(); strObj.streamReset(); while ((c = strObj.streamGetChar()) != EOF) { fputc(c, tmpFile); } strObj.streamClose(); strObj.free(); fclose(tmpFile); fileName = tmpFileName; } else if (!(fileName = gfxFont->getExtFontFile())) { // look for a display font mapping or a substitute font dfp = NULL; if (gfxFont->getName()) { dfp = globalParams->getDisplayFont(gfxFont); } if (!dfp) { error(-1, "Couldn't find a font for '%s'", gfxFont->getName() ? gfxFont->getName()->getCString() : "(unnamed)"); goto err2; } switch (dfp->kind) { case displayFontT1: fileName = dfp->t1.fileName; fontType = gfxFont->isCIDFont() ? fontCIDType0 : fontType1; break; case displayFontTT: fileName = dfp->tt.fileName; fontType = gfxFont->isCIDFont() ? fontCIDType2 : fontTrueType; break; } } switch (fontType) { case fontType1: case fontType1C: if (FT_New_Face(lib, fileName->getCString(), 0, &face)) { error(-1, "could not create type1 face"); goto err2; } enc = ((Gfx8BitFont *)gfxFont)->getEncoding(); codeToGID = (Gushort *)gmallocn(256, sizeof(int)); codeToGIDLen = 256; for (i = 0; i < 256; ++i) { codeToGID[i] = 0; if ((name = enc[i])) { codeToGID[i] = (Gushort)FT_Get_Name_Index(face, name); } } break; case fontCIDType2: codeToGID = NULL; n = 0; if (((GfxCIDFont *)gfxFont)->getCIDToGID()) { n = ((GfxCIDFont *)gfxFont)->getCIDToGIDLen(); if (n) { codeToGID = (Gushort *)gmallocn(n, sizeof(Gushort)); memcpy(codeToGID, ((GfxCIDFont *)gfxFont)->getCIDToGID(), n * sizeof(Gushort)); } } else { ff = FoFiTrueType::load(fileName->getCString()); if (! ff) goto err2; codeToGID = ((GfxCIDFont *)gfxFont)->getCodeToGIDMap(ff, &n); delete ff; } codeToGIDLen = n; /* Fall through */ case fontTrueType: if (!(ff = FoFiTrueType::load(fileName->getCString()))) { error(-1, "failed to load truetype font\n"); goto err2; } /* This might be set already for the CIDType2 case */ if (fontType == fontTrueType) { codeToGID = ((Gfx8BitFont *)gfxFont)->getCodeToGIDMap(ff); codeToGIDLen = 256; } if (!openTempFile(&tmpFileName2, &tmpFile, "wb", NULL)) { delete ff; error(-1, "failed to open truetype tempfile\n"); goto err2; } ff->writeTTF(&fileWrite, tmpFile); fclose(tmpFile); delete ff; if (FT_New_Face(lib, tmpFileName2->getCString(), 0, &face)) { error(-1, "could not create truetype face\n"); goto err2; } unlink (tmpFileName2->getCString()); delete tmpFileName2; break; case fontCIDType0: case fontCIDType0C: codeToGID = NULL; codeToGIDLen = 0; if (!useCIDs) { if ((ff1c = FoFiType1C::load(fileName->getCString()))) { codeToGID = ff1c->getCIDToGIDMap(&codeToGIDLen); delete ff1c; } } if (FT_New_Face(lib, fileName->getCString(), 0, &face)) { gfree(codeToGID); codeToGID = NULL; error(-1, "could not create cid face\n"); goto err2; } break; default: printf ("font type not handled\n"); goto err2; break; } // delete the (temporary) font file -- with Unix hard link // semantics, this will remove the last link; otherwise it will // return an error, leaving the file to be deleted later if (fileName == tmpFileName) { unlink (fileName->getCString()); delete tmpFileName; } cairo_font_face = cairo_ft_font_face_create_for_ft_face (face, FT_LOAD_NO_HINTING | FT_LOAD_NO_BITMAP); if (cairo_font_face == NULL) { error(-1, "could not create cairo font\n"); goto err2; /* this doesn't do anything, but it looks like we're * handling the error */ } { CairoFont *ret = new CairoFont(ref, cairo_font_face, face, codeToGID, codeToGIDLen); cairo_font_face_set_user_data (cairo_font_face, &cairo_font_face_key, ret, cairo_font_face_destroy); return ret; } err2: /* hmm? */ printf ("some font thing failed\n"); return NULL; }
288
0
mptctl_do_mpt_command (MPT_ADAPTER *ioc, struct mpt_ioctl_command karg, void __user *mfPtr) { MPT_FRAME_HDR *mf = NULL; MPIHeader_t *hdr; char *psge; struct buflist bufIn; /* data In buffer */ struct buflist bufOut; /* data Out buffer */ dma_addr_t dma_addr_in; dma_addr_t dma_addr_out; int sgSize = 0; /* Num SG elements */ int flagsLength; int sz, rc = 0; int msgContext; u16 req_idx; ulong timeout; unsigned long timeleft; struct scsi_device *sdev; unsigned long flags; u8 function; /* bufIn and bufOut are used for user to kernel space transfers */ bufIn.kptr = bufOut.kptr = NULL; bufIn.len = bufOut.len = 0; spin_lock_irqsave(&ioc->taskmgmt_lock, flags); if (ioc->ioc_reset_in_progress) { spin_unlock_irqrestore(&ioc->taskmgmt_lock, flags); printk(KERN_ERR MYNAM "%s@%d::mptctl_do_mpt_command - " "Busy with diagnostic reset\n", __FILE__, __LINE__); return -EBUSY; } spin_unlock_irqrestore(&ioc->taskmgmt_lock, flags); /* Basic sanity checks to prevent underflows or integer overflows */ if (karg.maxReplyBytes < 0 || karg.dataInSize < 0 || karg.dataOutSize < 0 || karg.dataSgeOffset < 0 || karg.maxSenseBytes < 0 || karg.dataSgeOffset > ioc->req_sz / 4) return -EINVAL; /* Verify that the final request frame will not be too large. */ sz = karg.dataSgeOffset * 4; if (karg.dataInSize > 0) sz += ioc->SGE_size; if (karg.dataOutSize > 0) sz += ioc->SGE_size; if (sz > ioc->req_sz) { printk(MYIOC_s_ERR_FMT "%s@%d::mptctl_do_mpt_command - " "Request frame too large (%d) maximum (%d)\n", ioc->name, __FILE__, __LINE__, sz, ioc->req_sz); return -EFAULT; } /* Get a free request frame and save the message context. */ if ((mf = mpt_get_msg_frame(mptctl_id, ioc)) == NULL) return -EAGAIN; hdr = (MPIHeader_t *) mf; msgContext = le32_to_cpu(hdr->MsgContext); req_idx = le16_to_cpu(mf->u.frame.hwhdr.msgctxu.fld.req_idx); /* Copy the request frame * Reset the saved message context. * Request frame in user space */ if (copy_from_user(mf, mfPtr, karg.dataSgeOffset * 4)) { printk(MYIOC_s_ERR_FMT "%s@%d::mptctl_do_mpt_command - " "Unable to read MF from mpt_ioctl_command struct @ %p\n", ioc->name, __FILE__, __LINE__, mfPtr); function = -1; rc = -EFAULT; goto done_free_mem; } hdr->MsgContext = cpu_to_le32(msgContext); function = hdr->Function; /* Verify that this request is allowed. */ dctlprintk(ioc, printk(MYIOC_s_DEBUG_FMT "sending mpi function (0x%02X), req=%p\n", ioc->name, hdr->Function, mf)); switch (function) { case MPI_FUNCTION_IOC_FACTS: case MPI_FUNCTION_PORT_FACTS: karg.dataOutSize = karg.dataInSize = 0; break; case MPI_FUNCTION_CONFIG: { Config_t *config_frame; config_frame = (Config_t *)mf; dctlprintk(ioc, printk(MYIOC_s_DEBUG_FMT "\ttype=0x%02x ext_type=0x%02x " "number=0x%02x action=0x%02x\n", ioc->name, config_frame->Header.PageType, config_frame->ExtPageType, config_frame->Header.PageNumber, config_frame->Action)); break; } case MPI_FUNCTION_FC_COMMON_TRANSPORT_SEND: case MPI_FUNCTION_FC_EX_LINK_SRVC_SEND: case MPI_FUNCTION_FW_UPLOAD: case MPI_FUNCTION_SCSI_ENCLOSURE_PROCESSOR: case MPI_FUNCTION_FW_DOWNLOAD: case MPI_FUNCTION_FC_PRIMITIVE_SEND: case MPI_FUNCTION_TOOLBOX: case MPI_FUNCTION_SAS_IO_UNIT_CONTROL: break; case MPI_FUNCTION_SCSI_IO_REQUEST: if (ioc->sh) { SCSIIORequest_t *pScsiReq = (SCSIIORequest_t *) mf; int qtag = MPI_SCSIIO_CONTROL_UNTAGGED; int scsidir = 0; int dataSize; u32 id; id = (ioc->devices_per_bus == 0) ? 256 : ioc->devices_per_bus; if (pScsiReq->TargetID > id) { printk(MYIOC_s_ERR_FMT "%s@%d::mptctl_do_mpt_command - " "Target ID out of bounds. \n", ioc->name, __FILE__, __LINE__); rc = -ENODEV; goto done_free_mem; } if (pScsiReq->Bus >= ioc->number_of_buses) { printk(MYIOC_s_ERR_FMT "%s@%d::mptctl_do_mpt_command - " "Target Bus out of bounds. \n", ioc->name, __FILE__, __LINE__); rc = -ENODEV; goto done_free_mem; } pScsiReq->MsgFlags &= ~MPI_SCSIIO_MSGFLGS_SENSE_WIDTH; pScsiReq->MsgFlags |= mpt_msg_flags(ioc); /* verify that app has not requested * more sense data than driver * can provide, if so, reset this parameter * set the sense buffer pointer low address * update the control field to specify Q type */ if (karg.maxSenseBytes > MPT_SENSE_BUFFER_SIZE) pScsiReq->SenseBufferLength = MPT_SENSE_BUFFER_SIZE; else pScsiReq->SenseBufferLength = karg.maxSenseBytes; pScsiReq->SenseBufferLowAddr = cpu_to_le32(ioc->sense_buf_low_dma + (req_idx * MPT_SENSE_BUFFER_ALLOC)); shost_for_each_device(sdev, ioc->sh) { struct scsi_target *starget = scsi_target(sdev); VirtTarget *vtarget = starget->hostdata; if (vtarget == NULL) continue; if ((pScsiReq->TargetID == vtarget->id) && (pScsiReq->Bus == vtarget->channel) && (vtarget->tflags & MPT_TARGET_FLAGS_Q_YES)) qtag = MPI_SCSIIO_CONTROL_SIMPLEQ; } /* Have the IOCTL driver set the direction based * on the dataOutSize (ordering issue with Sparc). */ if (karg.dataOutSize > 0) { scsidir = MPI_SCSIIO_CONTROL_WRITE; dataSize = karg.dataOutSize; } else { scsidir = MPI_SCSIIO_CONTROL_READ; dataSize = karg.dataInSize; } pScsiReq->Control = cpu_to_le32(scsidir | qtag); pScsiReq->DataLength = cpu_to_le32(dataSize); } else { printk(MYIOC_s_ERR_FMT "%s@%d::mptctl_do_mpt_command - " "SCSI driver is not loaded. \n", ioc->name, __FILE__, __LINE__); rc = -EFAULT; goto done_free_mem; } break; case MPI_FUNCTION_SMP_PASSTHROUGH: /* Check mf->PassthruFlags to determine if * transfer is ImmediateMode or not. * Immediate mode returns data in the ReplyFrame. * Else, we are sending request and response data * in two SGLs at the end of the mf. */ break; case MPI_FUNCTION_SATA_PASSTHROUGH: if (!ioc->sh) { printk(MYIOC_s_ERR_FMT "%s@%d::mptctl_do_mpt_command - " "SCSI driver is not loaded. \n", ioc->name, __FILE__, __LINE__); rc = -EFAULT; goto done_free_mem; } break; case MPI_FUNCTION_RAID_ACTION: /* Just add a SGE */ break; case MPI_FUNCTION_RAID_SCSI_IO_PASSTHROUGH: if (ioc->sh) { SCSIIORequest_t *pScsiReq = (SCSIIORequest_t *) mf; int qtag = MPI_SCSIIO_CONTROL_SIMPLEQ; int scsidir = MPI_SCSIIO_CONTROL_READ; int dataSize; pScsiReq->MsgFlags &= ~MPI_SCSIIO_MSGFLGS_SENSE_WIDTH; pScsiReq->MsgFlags |= mpt_msg_flags(ioc); /* verify that app has not requested * more sense data than driver * can provide, if so, reset this parameter * set the sense buffer pointer low address * update the control field to specify Q type */ if (karg.maxSenseBytes > MPT_SENSE_BUFFER_SIZE) pScsiReq->SenseBufferLength = MPT_SENSE_BUFFER_SIZE; else pScsiReq->SenseBufferLength = karg.maxSenseBytes; pScsiReq->SenseBufferLowAddr = cpu_to_le32(ioc->sense_buf_low_dma + (req_idx * MPT_SENSE_BUFFER_ALLOC)); /* All commands to physical devices are tagged */ /* Have the IOCTL driver set the direction based * on the dataOutSize (ordering issue with Sparc). */ if (karg.dataOutSize > 0) { scsidir = MPI_SCSIIO_CONTROL_WRITE; dataSize = karg.dataOutSize; } else { scsidir = MPI_SCSIIO_CONTROL_READ; dataSize = karg.dataInSize; } pScsiReq->Control = cpu_to_le32(scsidir | qtag); pScsiReq->DataLength = cpu_to_le32(dataSize); } else { printk(MYIOC_s_ERR_FMT "%s@%d::mptctl_do_mpt_command - " "SCSI driver is not loaded. \n", ioc->name, __FILE__, __LINE__); rc = -EFAULT; goto done_free_mem; } break; case MPI_FUNCTION_SCSI_TASK_MGMT: { SCSITaskMgmt_t *pScsiTm; pScsiTm = (SCSITaskMgmt_t *)mf; dctlprintk(ioc, printk(MYIOC_s_DEBUG_FMT "\tTaskType=0x%x MsgFlags=0x%x " "TaskMsgContext=0x%x id=%d channel=%d\n", ioc->name, pScsiTm->TaskType, le32_to_cpu (pScsiTm->TaskMsgContext), pScsiTm->MsgFlags, pScsiTm->TargetID, pScsiTm->Bus)); break; } case MPI_FUNCTION_IOC_INIT: { IOCInit_t *pInit = (IOCInit_t *) mf; u32 high_addr, sense_high; /* Verify that all entries in the IOC INIT match * existing setup (and in LE format). */ if (sizeof(dma_addr_t) == sizeof(u64)) { high_addr = cpu_to_le32((u32)((u64)ioc->req_frames_dma >> 32)); sense_high= cpu_to_le32((u32)((u64)ioc->sense_buf_pool_dma >> 32)); } else { high_addr = 0; sense_high= 0; } if ((pInit->Flags != 0) || (pInit->MaxDevices != ioc->facts.MaxDevices) || (pInit->MaxBuses != ioc->facts.MaxBuses) || (pInit->ReplyFrameSize != cpu_to_le16(ioc->reply_sz)) || (pInit->HostMfaHighAddr != high_addr) || (pInit->SenseBufferHighAddr != sense_high)) { printk(MYIOC_s_ERR_FMT "%s@%d::mptctl_do_mpt_command - " "IOC_INIT issued with 1 or more incorrect parameters. Rejected.\n", ioc->name, __FILE__, __LINE__); rc = -EFAULT; goto done_free_mem; } } break; default: /* * MPI_FUNCTION_PORT_ENABLE * MPI_FUNCTION_TARGET_CMD_BUFFER_POST * MPI_FUNCTION_TARGET_ASSIST * MPI_FUNCTION_TARGET_STATUS_SEND * MPI_FUNCTION_TARGET_MODE_ABORT * MPI_FUNCTION_IOC_MESSAGE_UNIT_RESET * MPI_FUNCTION_IO_UNIT_RESET * MPI_FUNCTION_HANDSHAKE * MPI_FUNCTION_REPLY_FRAME_REMOVAL * MPI_FUNCTION_EVENT_NOTIFICATION * (driver handles event notification) * MPI_FUNCTION_EVENT_ACK */ /* What to do with these??? CHECK ME!!! MPI_FUNCTION_FC_LINK_SRVC_BUF_POST MPI_FUNCTION_FC_LINK_SRVC_RSP MPI_FUNCTION_FC_ABORT MPI_FUNCTION_LAN_SEND MPI_FUNCTION_LAN_RECEIVE MPI_FUNCTION_LAN_RESET */ printk(MYIOC_s_ERR_FMT "%s@%d::mptctl_do_mpt_command - " "Illegal request (function 0x%x) \n", ioc->name, __FILE__, __LINE__, hdr->Function); rc = -EFAULT; goto done_free_mem; } /* Add the SGL ( at most one data in SGE and one data out SGE ) * In the case of two SGE's - the data out (write) will always * preceede the data in (read) SGE. psgList is used to free the * allocated memory. */ psge = (char *) (((int *) mf) + karg.dataSgeOffset); flagsLength = 0; if (karg.dataOutSize > 0) sgSize ++; if (karg.dataInSize > 0) sgSize ++; if (sgSize > 0) { /* Set up the dataOut memory allocation */ if (karg.dataOutSize > 0) { if (karg.dataInSize > 0) { flagsLength = ( MPI_SGE_FLAGS_SIMPLE_ELEMENT | MPI_SGE_FLAGS_END_OF_BUFFER | MPI_SGE_FLAGS_DIRECTION) << MPI_SGE_FLAGS_SHIFT; } else { flagsLength = MPT_SGE_FLAGS_SSIMPLE_WRITE; } flagsLength |= karg.dataOutSize; bufOut.len = karg.dataOutSize; bufOut.kptr = pci_alloc_consistent( ioc->pcidev, bufOut.len, &dma_addr_out); if (bufOut.kptr == NULL) { rc = -ENOMEM; goto done_free_mem; } else { /* Set up this SGE. * Copy to MF and to sglbuf */ ioc->add_sge(psge, flagsLength, dma_addr_out); psge += ioc->SGE_size; /* Copy user data to kernel space. */ if (copy_from_user(bufOut.kptr, karg.dataOutBufPtr, bufOut.len)) { printk(MYIOC_s_ERR_FMT "%s@%d::mptctl_do_mpt_command - Unable " "to read user data " "struct @ %p\n", ioc->name, __FILE__, __LINE__,karg.dataOutBufPtr); rc = -EFAULT; goto done_free_mem; } } } if (karg.dataInSize > 0) { flagsLength = MPT_SGE_FLAGS_SSIMPLE_READ; flagsLength |= karg.dataInSize; bufIn.len = karg.dataInSize; bufIn.kptr = pci_alloc_consistent(ioc->pcidev, bufIn.len, &dma_addr_in); if (bufIn.kptr == NULL) { rc = -ENOMEM; goto done_free_mem; } else { /* Set up this SGE * Copy to MF and to sglbuf */ ioc->add_sge(psge, flagsLength, dma_addr_in); } } } else { /* Add a NULL SGE */ ioc->add_sge(psge, flagsLength, (dma_addr_t) -1); } SET_MGMT_MSG_CONTEXT(ioc->ioctl_cmds.msg_context, hdr->MsgContext); INITIALIZE_MGMT_STATUS(ioc->ioctl_cmds.status) if (hdr->Function == MPI_FUNCTION_SCSI_TASK_MGMT) { mutex_lock(&ioc->taskmgmt_cmds.mutex); if (mpt_set_taskmgmt_in_progress_flag(ioc) != 0) { mutex_unlock(&ioc->taskmgmt_cmds.mutex); goto done_free_mem; } DBG_DUMP_TM_REQUEST_FRAME(ioc, (u32 *)mf); if ((ioc->facts.IOCCapabilities & MPI_IOCFACTS_CAPABILITY_HIGH_PRI_Q) && (ioc->facts.MsgVersion >= MPI_VERSION_01_05)) mpt_put_msg_frame_hi_pri(mptctl_id, ioc, mf); else { rc =mpt_send_handshake_request(mptctl_id, ioc, sizeof(SCSITaskMgmt_t), (u32*)mf, CAN_SLEEP); if (rc != 0) { dfailprintk(ioc, printk(MYIOC_s_ERR_FMT "send_handshake FAILED! (ioc %p, mf %p)\n", ioc->name, ioc, mf)); mpt_clear_taskmgmt_in_progress_flag(ioc); rc = -ENODATA; mutex_unlock(&ioc->taskmgmt_cmds.mutex); goto done_free_mem; } } } else mpt_put_msg_frame(mptctl_id, ioc, mf); /* Now wait for the command to complete */ timeout = (karg.timeout > 0) ? karg.timeout : MPT_IOCTL_DEFAULT_TIMEOUT; retry_wait: timeleft = wait_for_completion_timeout(&ioc->ioctl_cmds.done, HZ*timeout); if (!(ioc->ioctl_cmds.status & MPT_MGMT_STATUS_COMMAND_GOOD)) { rc = -ETIME; dfailprintk(ioc, printk(MYIOC_s_ERR_FMT "%s: TIMED OUT!\n", ioc->name, __func__)); if (ioc->ioctl_cmds.status & MPT_MGMT_STATUS_DID_IOCRESET) { if (function == MPI_FUNCTION_SCSI_TASK_MGMT) mutex_unlock(&ioc->taskmgmt_cmds.mutex); goto done_free_mem; } if (!timeleft) { printk(MYIOC_s_WARN_FMT "mpt cmd timeout, doorbell=0x%08x" " function=0x%x\n", ioc->name, mpt_GetIocState(ioc, 0), function); if (function == MPI_FUNCTION_SCSI_TASK_MGMT) mutex_unlock(&ioc->taskmgmt_cmds.mutex); mptctl_timeout_expired(ioc, mf); mf = NULL; } else goto retry_wait; goto done_free_mem; } if (function == MPI_FUNCTION_SCSI_TASK_MGMT) mutex_unlock(&ioc->taskmgmt_cmds.mutex); mf = NULL; /* If a valid reply frame, copy to the user. * Offset 2: reply length in U32's */ if (ioc->ioctl_cmds.status & MPT_MGMT_STATUS_RF_VALID) { if (karg.maxReplyBytes < ioc->reply_sz) { sz = min(karg.maxReplyBytes, 4*ioc->ioctl_cmds.reply[2]); } else { sz = min(ioc->reply_sz, 4*ioc->ioctl_cmds.reply[2]); } if (sz > 0) { if (copy_to_user(karg.replyFrameBufPtr, ioc->ioctl_cmds.reply, sz)){ printk(MYIOC_s_ERR_FMT "%s@%d::mptctl_do_mpt_command - " "Unable to write out reply frame %p\n", ioc->name, __FILE__, __LINE__, karg.replyFrameBufPtr); rc = -ENODATA; goto done_free_mem; } } } /* If valid sense data, copy to user. */ if (ioc->ioctl_cmds.status & MPT_MGMT_STATUS_SENSE_VALID) { sz = min(karg.maxSenseBytes, MPT_SENSE_BUFFER_SIZE); if (sz > 0) { if (copy_to_user(karg.senseDataPtr, ioc->ioctl_cmds.sense, sz)) { printk(MYIOC_s_ERR_FMT "%s@%d::mptctl_do_mpt_command - " "Unable to write sense data to user %p\n", ioc->name, __FILE__, __LINE__, karg.senseDataPtr); rc = -ENODATA; goto done_free_mem; } } } /* If the overall status is _GOOD and data in, copy data * to user. */ if ((ioc->ioctl_cmds.status & MPT_MGMT_STATUS_COMMAND_GOOD) && (karg.dataInSize > 0) && (bufIn.kptr)) { if (copy_to_user(karg.dataInBufPtr, bufIn.kptr, karg.dataInSize)) { printk(MYIOC_s_ERR_FMT "%s@%d::mptctl_do_mpt_command - " "Unable to write data to user %p\n", ioc->name, __FILE__, __LINE__, karg.dataInBufPtr); rc = -ENODATA; } } done_free_mem: CLEAR_MGMT_STATUS(ioc->ioctl_cmds.status) SET_MGMT_MSG_CONTEXT(ioc->ioctl_cmds.msg_context, 0); /* Free the allocated memory. */ if (bufOut.kptr != NULL) { pci_free_consistent(ioc->pcidev, bufOut.len, (void *) bufOut.kptr, dma_addr_out); } if (bufIn.kptr != NULL) { pci_free_consistent(ioc->pcidev, bufIn.len, (void *) bufIn.kptr, dma_addr_in); } /* mf is null if command issued successfully * otherwise, failure occurred after mf acquired. */ if (mf) mpt_free_msg_frame(ioc, mf); return rc; }
289
1
size_t compile_tree ( struct filter_op * * fop ) { int i = 1 ; struct filter_op * array = NULL ; struct unfold_elm * ue ; BUG_IF ( tree_root == NULL ) ; fprintf ( stdout , " Unfolding the meta-tree " ) ; fflush ( stdout ) ; unfold_blk ( & tree_root ) ; fprintf ( stdout , " done.\n\n" ) ; labels_to_offsets ( ) ; TAILQ_FOREACH ( ue , & unfolded_tree , next ) { if ( ue -> label == 0 ) { SAFE_REALLOC ( array , i * sizeof ( struct filter_op ) ) ; memcpy ( & array [ i - 1 ] , & ue -> fop , sizeof ( struct filter_op ) ) ; i ++ ; } } SAFE_REALLOC ( array , i * sizeof ( struct filter_op ) ) ; array [ i - 1 ] . opcode = FOP_EXIT ; * fop = array ; return ( i ) ; }
290
1
mptctl_do_mpt_command (struct mpt_ioctl_command karg, void __user *mfPtr) { MPT_ADAPTER *ioc; MPT_FRAME_HDR *mf = NULL; MPIHeader_t *hdr; char *psge; struct buflist bufIn; /* data In buffer */ struct buflist bufOut; /* data Out buffer */ dma_addr_t dma_addr_in; dma_addr_t dma_addr_out; int sgSize = 0; /* Num SG elements */ int iocnum, flagsLength; int sz, rc = 0; int msgContext; u16 req_idx; ulong timeout; unsigned long timeleft; struct scsi_device *sdev; unsigned long flags; u8 function; /* bufIn and bufOut are used for user to kernel space transfers */ bufIn.kptr = bufOut.kptr = NULL; bufIn.len = bufOut.len = 0; if (((iocnum = mpt_verify_adapter(karg.hdr.iocnum, &ioc)) < 0) || (ioc == NULL)) { printk(KERN_DEBUG MYNAM "%s::mptctl_do_mpt_command() @%d - ioc%d not found!\n", __FILE__, __LINE__, iocnum); return -ENODEV; } spin_lock_irqsave(&ioc->taskmgmt_lock, flags); if (ioc->ioc_reset_in_progress) { spin_unlock_irqrestore(&ioc->taskmgmt_lock, flags); printk(KERN_ERR MYNAM "%s@%d::mptctl_do_mpt_command - " "Busy with diagnostic reset\n", __FILE__, __LINE__); return -EBUSY; } spin_unlock_irqrestore(&ioc->taskmgmt_lock, flags); /* Basic sanity checks to prevent underflows or integer overflows */ if (karg.maxReplyBytes < 0 || karg.dataInSize < 0 || karg.dataOutSize < 0 || karg.dataSgeOffset < 0 || karg.maxSenseBytes < 0 || karg.dataSgeOffset > ioc->req_sz / 4) return -EINVAL; /* Verify that the final request frame will not be too large. */ sz = karg.dataSgeOffset * 4; if (karg.dataInSize > 0) sz += ioc->SGE_size; if (karg.dataOutSize > 0) sz += ioc->SGE_size; if (sz > ioc->req_sz) { printk(MYIOC_s_ERR_FMT "%s@%d::mptctl_do_mpt_command - " "Request frame too large (%d) maximum (%d)\n", ioc->name, __FILE__, __LINE__, sz, ioc->req_sz); return -EFAULT; } /* Get a free request frame and save the message context. */ if ((mf = mpt_get_msg_frame(mptctl_id, ioc)) == NULL) return -EAGAIN; hdr = (MPIHeader_t *) mf; msgContext = le32_to_cpu(hdr->MsgContext); req_idx = le16_to_cpu(mf->u.frame.hwhdr.msgctxu.fld.req_idx); /* Copy the request frame * Reset the saved message context. * Request frame in user space */ if (copy_from_user(mf, mfPtr, karg.dataSgeOffset * 4)) { printk(MYIOC_s_ERR_FMT "%s@%d::mptctl_do_mpt_command - " "Unable to read MF from mpt_ioctl_command struct @ %p\n", ioc->name, __FILE__, __LINE__, mfPtr); function = -1; rc = -EFAULT; goto done_free_mem; } hdr->MsgContext = cpu_to_le32(msgContext); function = hdr->Function; /* Verify that this request is allowed. */ dctlprintk(ioc, printk(MYIOC_s_DEBUG_FMT "sending mpi function (0x%02X), req=%p\n", ioc->name, hdr->Function, mf)); switch (function) { case MPI_FUNCTION_IOC_FACTS: case MPI_FUNCTION_PORT_FACTS: karg.dataOutSize = karg.dataInSize = 0; break; case MPI_FUNCTION_CONFIG: { Config_t *config_frame; config_frame = (Config_t *)mf; dctlprintk(ioc, printk(MYIOC_s_DEBUG_FMT "\ttype=0x%02x ext_type=0x%02x " "number=0x%02x action=0x%02x\n", ioc->name, config_frame->Header.PageType, config_frame->ExtPageType, config_frame->Header.PageNumber, config_frame->Action)); break; } case MPI_FUNCTION_FC_COMMON_TRANSPORT_SEND: case MPI_FUNCTION_FC_EX_LINK_SRVC_SEND: case MPI_FUNCTION_FW_UPLOAD: case MPI_FUNCTION_SCSI_ENCLOSURE_PROCESSOR: case MPI_FUNCTION_FW_DOWNLOAD: case MPI_FUNCTION_FC_PRIMITIVE_SEND: case MPI_FUNCTION_TOOLBOX: case MPI_FUNCTION_SAS_IO_UNIT_CONTROL: break; case MPI_FUNCTION_SCSI_IO_REQUEST: if (ioc->sh) { SCSIIORequest_t *pScsiReq = (SCSIIORequest_t *) mf; int qtag = MPI_SCSIIO_CONTROL_UNTAGGED; int scsidir = 0; int dataSize; u32 id; id = (ioc->devices_per_bus == 0) ? 256 : ioc->devices_per_bus; if (pScsiReq->TargetID > id) { printk(MYIOC_s_ERR_FMT "%s@%d::mptctl_do_mpt_command - " "Target ID out of bounds. \n", ioc->name, __FILE__, __LINE__); rc = -ENODEV; goto done_free_mem; } if (pScsiReq->Bus >= ioc->number_of_buses) { printk(MYIOC_s_ERR_FMT "%s@%d::mptctl_do_mpt_command - " "Target Bus out of bounds. \n", ioc->name, __FILE__, __LINE__); rc = -ENODEV; goto done_free_mem; } pScsiReq->MsgFlags &= ~MPI_SCSIIO_MSGFLGS_SENSE_WIDTH; pScsiReq->MsgFlags |= mpt_msg_flags(ioc); /* verify that app has not requested * more sense data than driver * can provide, if so, reset this parameter * set the sense buffer pointer low address * update the control field to specify Q type */ if (karg.maxSenseBytes > MPT_SENSE_BUFFER_SIZE) pScsiReq->SenseBufferLength = MPT_SENSE_BUFFER_SIZE; else pScsiReq->SenseBufferLength = karg.maxSenseBytes; pScsiReq->SenseBufferLowAddr = cpu_to_le32(ioc->sense_buf_low_dma + (req_idx * MPT_SENSE_BUFFER_ALLOC)); shost_for_each_device(sdev, ioc->sh) { struct scsi_target *starget = scsi_target(sdev); VirtTarget *vtarget = starget->hostdata; if (vtarget == NULL) continue; if ((pScsiReq->TargetID == vtarget->id) && (pScsiReq->Bus == vtarget->channel) && (vtarget->tflags & MPT_TARGET_FLAGS_Q_YES)) qtag = MPI_SCSIIO_CONTROL_SIMPLEQ; } /* Have the IOCTL driver set the direction based * on the dataOutSize (ordering issue with Sparc). */ if (karg.dataOutSize > 0) { scsidir = MPI_SCSIIO_CONTROL_WRITE; dataSize = karg.dataOutSize; } else { scsidir = MPI_SCSIIO_CONTROL_READ; dataSize = karg.dataInSize; } pScsiReq->Control = cpu_to_le32(scsidir | qtag); pScsiReq->DataLength = cpu_to_le32(dataSize); } else { printk(MYIOC_s_ERR_FMT "%s@%d::mptctl_do_mpt_command - " "SCSI driver is not loaded. \n", ioc->name, __FILE__, __LINE__); rc = -EFAULT; goto done_free_mem; } break; case MPI_FUNCTION_SMP_PASSTHROUGH: /* Check mf->PassthruFlags to determine if * transfer is ImmediateMode or not. * Immediate mode returns data in the ReplyFrame. * Else, we are sending request and response data * in two SGLs at the end of the mf. */ break; case MPI_FUNCTION_SATA_PASSTHROUGH: if (!ioc->sh) { printk(MYIOC_s_ERR_FMT "%s@%d::mptctl_do_mpt_command - " "SCSI driver is not loaded. \n", ioc->name, __FILE__, __LINE__); rc = -EFAULT; goto done_free_mem; } break; case MPI_FUNCTION_RAID_ACTION: /* Just add a SGE */ break; case MPI_FUNCTION_RAID_SCSI_IO_PASSTHROUGH: if (ioc->sh) { SCSIIORequest_t *pScsiReq = (SCSIIORequest_t *) mf; int qtag = MPI_SCSIIO_CONTROL_SIMPLEQ; int scsidir = MPI_SCSIIO_CONTROL_READ; int dataSize; pScsiReq->MsgFlags &= ~MPI_SCSIIO_MSGFLGS_SENSE_WIDTH; pScsiReq->MsgFlags |= mpt_msg_flags(ioc); /* verify that app has not requested * more sense data than driver * can provide, if so, reset this parameter * set the sense buffer pointer low address * update the control field to specify Q type */ if (karg.maxSenseBytes > MPT_SENSE_BUFFER_SIZE) pScsiReq->SenseBufferLength = MPT_SENSE_BUFFER_SIZE; else pScsiReq->SenseBufferLength = karg.maxSenseBytes; pScsiReq->SenseBufferLowAddr = cpu_to_le32(ioc->sense_buf_low_dma + (req_idx * MPT_SENSE_BUFFER_ALLOC)); /* All commands to physical devices are tagged */ /* Have the IOCTL driver set the direction based * on the dataOutSize (ordering issue with Sparc). */ if (karg.dataOutSize > 0) { scsidir = MPI_SCSIIO_CONTROL_WRITE; dataSize = karg.dataOutSize; } else { scsidir = MPI_SCSIIO_CONTROL_READ; dataSize = karg.dataInSize; } pScsiReq->Control = cpu_to_le32(scsidir | qtag); pScsiReq->DataLength = cpu_to_le32(dataSize); } else { printk(MYIOC_s_ERR_FMT "%s@%d::mptctl_do_mpt_command - " "SCSI driver is not loaded. \n", ioc->name, __FILE__, __LINE__); rc = -EFAULT; goto done_free_mem; } break; case MPI_FUNCTION_SCSI_TASK_MGMT: { SCSITaskMgmt_t *pScsiTm; pScsiTm = (SCSITaskMgmt_t *)mf; dctlprintk(ioc, printk(MYIOC_s_DEBUG_FMT "\tTaskType=0x%x MsgFlags=0x%x " "TaskMsgContext=0x%x id=%d channel=%d\n", ioc->name, pScsiTm->TaskType, le32_to_cpu (pScsiTm->TaskMsgContext), pScsiTm->MsgFlags, pScsiTm->TargetID, pScsiTm->Bus)); break; } case MPI_FUNCTION_IOC_INIT: { IOCInit_t *pInit = (IOCInit_t *) mf; u32 high_addr, sense_high; /* Verify that all entries in the IOC INIT match * existing setup (and in LE format). */ if (sizeof(dma_addr_t) == sizeof(u64)) { high_addr = cpu_to_le32((u32)((u64)ioc->req_frames_dma >> 32)); sense_high= cpu_to_le32((u32)((u64)ioc->sense_buf_pool_dma >> 32)); } else { high_addr = 0; sense_high= 0; } if ((pInit->Flags != 0) || (pInit->MaxDevices != ioc->facts.MaxDevices) || (pInit->MaxBuses != ioc->facts.MaxBuses) || (pInit->ReplyFrameSize != cpu_to_le16(ioc->reply_sz)) || (pInit->HostMfaHighAddr != high_addr) || (pInit->SenseBufferHighAddr != sense_high)) { printk(MYIOC_s_ERR_FMT "%s@%d::mptctl_do_mpt_command - " "IOC_INIT issued with 1 or more incorrect parameters. Rejected.\n", ioc->name, __FILE__, __LINE__); rc = -EFAULT; goto done_free_mem; } } break; default: /* * MPI_FUNCTION_PORT_ENABLE * MPI_FUNCTION_TARGET_CMD_BUFFER_POST * MPI_FUNCTION_TARGET_ASSIST * MPI_FUNCTION_TARGET_STATUS_SEND * MPI_FUNCTION_TARGET_MODE_ABORT * MPI_FUNCTION_IOC_MESSAGE_UNIT_RESET * MPI_FUNCTION_IO_UNIT_RESET * MPI_FUNCTION_HANDSHAKE * MPI_FUNCTION_REPLY_FRAME_REMOVAL * MPI_FUNCTION_EVENT_NOTIFICATION * (driver handles event notification) * MPI_FUNCTION_EVENT_ACK */ /* What to do with these??? CHECK ME!!! MPI_FUNCTION_FC_LINK_SRVC_BUF_POST MPI_FUNCTION_FC_LINK_SRVC_RSP MPI_FUNCTION_FC_ABORT MPI_FUNCTION_LAN_SEND MPI_FUNCTION_LAN_RECEIVE MPI_FUNCTION_LAN_RESET */ printk(MYIOC_s_ERR_FMT "%s@%d::mptctl_do_mpt_command - " "Illegal request (function 0x%x) \n", ioc->name, __FILE__, __LINE__, hdr->Function); rc = -EFAULT; goto done_free_mem; } /* Add the SGL ( at most one data in SGE and one data out SGE ) * In the case of two SGE's - the data out (write) will always * preceede the data in (read) SGE. psgList is used to free the * allocated memory. */ psge = (char *) (((int *) mf) + karg.dataSgeOffset); flagsLength = 0; if (karg.dataOutSize > 0) sgSize ++; if (karg.dataInSize > 0) sgSize ++; if (sgSize > 0) { /* Set up the dataOut memory allocation */ if (karg.dataOutSize > 0) { if (karg.dataInSize > 0) { flagsLength = ( MPI_SGE_FLAGS_SIMPLE_ELEMENT | MPI_SGE_FLAGS_END_OF_BUFFER | MPI_SGE_FLAGS_DIRECTION) << MPI_SGE_FLAGS_SHIFT; } else { flagsLength = MPT_SGE_FLAGS_SSIMPLE_WRITE; } flagsLength |= karg.dataOutSize; bufOut.len = karg.dataOutSize; bufOut.kptr = pci_alloc_consistent( ioc->pcidev, bufOut.len, &dma_addr_out); if (bufOut.kptr == NULL) { rc = -ENOMEM; goto done_free_mem; } else { /* Set up this SGE. * Copy to MF and to sglbuf */ ioc->add_sge(psge, flagsLength, dma_addr_out); psge += ioc->SGE_size; /* Copy user data to kernel space. */ if (copy_from_user(bufOut.kptr, karg.dataOutBufPtr, bufOut.len)) { printk(MYIOC_s_ERR_FMT "%s@%d::mptctl_do_mpt_command - Unable " "to read user data " "struct @ %p\n", ioc->name, __FILE__, __LINE__,karg.dataOutBufPtr); rc = -EFAULT; goto done_free_mem; } } } if (karg.dataInSize > 0) { flagsLength = MPT_SGE_FLAGS_SSIMPLE_READ; flagsLength |= karg.dataInSize; bufIn.len = karg.dataInSize; bufIn.kptr = pci_alloc_consistent(ioc->pcidev, bufIn.len, &dma_addr_in); if (bufIn.kptr == NULL) { rc = -ENOMEM; goto done_free_mem; } else { /* Set up this SGE * Copy to MF and to sglbuf */ ioc->add_sge(psge, flagsLength, dma_addr_in); } } } else { /* Add a NULL SGE */ ioc->add_sge(psge, flagsLength, (dma_addr_t) -1); } SET_MGMT_MSG_CONTEXT(ioc->ioctl_cmds.msg_context, hdr->MsgContext); INITIALIZE_MGMT_STATUS(ioc->ioctl_cmds.status) if (hdr->Function == MPI_FUNCTION_SCSI_TASK_MGMT) { mutex_lock(&ioc->taskmgmt_cmds.mutex); if (mpt_set_taskmgmt_in_progress_flag(ioc) != 0) { mutex_unlock(&ioc->taskmgmt_cmds.mutex); goto done_free_mem; } DBG_DUMP_TM_REQUEST_FRAME(ioc, (u32 *)mf); if ((ioc->facts.IOCCapabilities & MPI_IOCFACTS_CAPABILITY_HIGH_PRI_Q) && (ioc->facts.MsgVersion >= MPI_VERSION_01_05)) mpt_put_msg_frame_hi_pri(mptctl_id, ioc, mf); else { rc =mpt_send_handshake_request(mptctl_id, ioc, sizeof(SCSITaskMgmt_t), (u32*)mf, CAN_SLEEP); if (rc != 0) { dfailprintk(ioc, printk(MYIOC_s_ERR_FMT "send_handshake FAILED! (ioc %p, mf %p)\n", ioc->name, ioc, mf)); mpt_clear_taskmgmt_in_progress_flag(ioc); rc = -ENODATA; mutex_unlock(&ioc->taskmgmt_cmds.mutex); goto done_free_mem; } } } else mpt_put_msg_frame(mptctl_id, ioc, mf); /* Now wait for the command to complete */ timeout = (karg.timeout > 0) ? karg.timeout : MPT_IOCTL_DEFAULT_TIMEOUT; retry_wait: timeleft = wait_for_completion_timeout(&ioc->ioctl_cmds.done, HZ*timeout); if (!(ioc->ioctl_cmds.status & MPT_MGMT_STATUS_COMMAND_GOOD)) { rc = -ETIME; dfailprintk(ioc, printk(MYIOC_s_ERR_FMT "%s: TIMED OUT!\n", ioc->name, __func__)); if (ioc->ioctl_cmds.status & MPT_MGMT_STATUS_DID_IOCRESET) { if (function == MPI_FUNCTION_SCSI_TASK_MGMT) mutex_unlock(&ioc->taskmgmt_cmds.mutex); goto done_free_mem; } if (!timeleft) { printk(MYIOC_s_WARN_FMT "mpt cmd timeout, doorbell=0x%08x" " function=0x%x\n", ioc->name, mpt_GetIocState(ioc, 0), function); if (function == MPI_FUNCTION_SCSI_TASK_MGMT) mutex_unlock(&ioc->taskmgmt_cmds.mutex); mptctl_timeout_expired(ioc, mf); mf = NULL; } else goto retry_wait; goto done_free_mem; } if (function == MPI_FUNCTION_SCSI_TASK_MGMT) mutex_unlock(&ioc->taskmgmt_cmds.mutex); mf = NULL; /* If a valid reply frame, copy to the user. * Offset 2: reply length in U32's */ if (ioc->ioctl_cmds.status & MPT_MGMT_STATUS_RF_VALID) { if (karg.maxReplyBytes < ioc->reply_sz) { sz = min(karg.maxReplyBytes, 4*ioc->ioctl_cmds.reply[2]); } else { sz = min(ioc->reply_sz, 4*ioc->ioctl_cmds.reply[2]); } if (sz > 0) { if (copy_to_user(karg.replyFrameBufPtr, ioc->ioctl_cmds.reply, sz)){ printk(MYIOC_s_ERR_FMT "%s@%d::mptctl_do_mpt_command - " "Unable to write out reply frame %p\n", ioc->name, __FILE__, __LINE__, karg.replyFrameBufPtr); rc = -ENODATA; goto done_free_mem; } } } /* If valid sense data, copy to user. */ if (ioc->ioctl_cmds.status & MPT_MGMT_STATUS_SENSE_VALID) { sz = min(karg.maxSenseBytes, MPT_SENSE_BUFFER_SIZE); if (sz > 0) { if (copy_to_user(karg.senseDataPtr, ioc->ioctl_cmds.sense, sz)) { printk(MYIOC_s_ERR_FMT "%s@%d::mptctl_do_mpt_command - " "Unable to write sense data to user %p\n", ioc->name, __FILE__, __LINE__, karg.senseDataPtr); rc = -ENODATA; goto done_free_mem; } } } /* If the overall status is _GOOD and data in, copy data * to user. */ if ((ioc->ioctl_cmds.status & MPT_MGMT_STATUS_COMMAND_GOOD) && (karg.dataInSize > 0) && (bufIn.kptr)) { if (copy_to_user(karg.dataInBufPtr, bufIn.kptr, karg.dataInSize)) { printk(MYIOC_s_ERR_FMT "%s@%d::mptctl_do_mpt_command - " "Unable to write data to user %p\n", ioc->name, __FILE__, __LINE__, karg.dataInBufPtr); rc = -ENODATA; } } done_free_mem: CLEAR_MGMT_STATUS(ioc->ioctl_cmds.status) SET_MGMT_MSG_CONTEXT(ioc->ioctl_cmds.msg_context, 0); /* Free the allocated memory. */ if (bufOut.kptr != NULL) { pci_free_consistent(ioc->pcidev, bufOut.len, (void *) bufOut.kptr, dma_addr_out); } if (bufIn.kptr != NULL) { pci_free_consistent(ioc->pcidev, bufIn.len, (void *) bufIn.kptr, dma_addr_in); } /* mf is null if command issued successfully * otherwise, failure occurred after mf acquired. */ if (mf) mpt_free_msg_frame(ioc, mf); return rc; }
291
1
m4_mkstemp (struct obstack *obs, int argc, token_data **argv) { if (bad_argc (argv[0], argc, 2, 2)) return; mkstemp_helper (obs, ARG (1)); }
292
0
static int mptctl_do_reset(MPT_ADAPTER *iocp, unsigned long arg) { struct mpt_ioctl_diag_reset __user *urinfo = (void __user *) arg; struct mpt_ioctl_diag_reset krinfo; if (copy_from_user(&krinfo, urinfo, sizeof(struct mpt_ioctl_diag_reset))) { printk(KERN_ERR MYNAM "%s@%d::mptctl_do_reset - " "Unable to copy mpt_ioctl_diag_reset struct @ %p\n", __FILE__, __LINE__, urinfo); return -EFAULT; } dctlprintk(iocp, printk(MYIOC_s_DEBUG_FMT "mptctl_do_reset called.\n", iocp->name)); if (mpt_HardResetHandler(iocp, CAN_SLEEP) != 0) { printk (MYIOC_s_ERR_FMT "%s@%d::mptctl_do_reset - reset failed.\n", iocp->name, __FILE__, __LINE__); return -1; } return 0; }
294
1
uint64_t qcow2_alloc_cluster_offset(BlockDriverState *bs, uint64_t offset, int n_start, int n_end, int *num, QCowL2Meta *m) { BDRVQcowState *s = bs->opaque; int l2_index, ret; uint64_t l2_offset, *l2_table, cluster_offset; int nb_clusters, i = 0; QCowL2Meta *old_alloc; ret = get_cluster_table(bs, offset, &l2_table, &l2_offset, &l2_index); if (ret == 0) return 0; nb_clusters = size_to_clusters(s, n_end << 9); nb_clusters = MIN(nb_clusters, s->l2_size - l2_index); cluster_offset = be64_to_cpu(l2_table[l2_index]); /* We keep all QCOW_OFLAG_COPIED clusters */ if (cluster_offset & QCOW_OFLAG_COPIED) { nb_clusters = count_contiguous_clusters(nb_clusters, s->cluster_size, &l2_table[l2_index], 0, 0); cluster_offset &= ~QCOW_OFLAG_COPIED; m->nb_clusters = 0; goto out; } /* for the moment, multiple compressed clusters are not managed */ if (cluster_offset & QCOW_OFLAG_COMPRESSED) nb_clusters = 1; /* how many available clusters ? */ while (i < nb_clusters) { i += count_contiguous_clusters(nb_clusters - i, s->cluster_size, &l2_table[l2_index], i, 0); if(be64_to_cpu(l2_table[l2_index + i])) break; i += count_contiguous_free_clusters(nb_clusters - i, &l2_table[l2_index + i]); cluster_offset = be64_to_cpu(l2_table[l2_index + i]); if ((cluster_offset & QCOW_OFLAG_COPIED) || (cluster_offset & QCOW_OFLAG_COMPRESSED)) break; } nb_clusters = i; /* * Check if there already is an AIO write request in flight which allocates * the same cluster. In this case we need to wait until the previous * request has completed and updated the L2 table accordingly. */ QLIST_FOREACH(old_alloc, &s->cluster_allocs, next_in_flight) { uint64_t end_offset = offset + nb_clusters * s->cluster_size; uint64_t old_offset = old_alloc->offset; uint64_t old_end_offset = old_alloc->offset + old_alloc->nb_clusters * s->cluster_size; if (end_offset < old_offset || offset > old_end_offset) { /* No intersection */ } else { if (offset < old_offset) { /* Stop at the start of a running allocation */ nb_clusters = (old_offset - offset) >> s->cluster_bits; } else { nb_clusters = 0; } if (nb_clusters == 0) { /* Set dependency and wait for a callback */ m->depends_on = old_alloc; m->nb_clusters = 0; *num = 0; return 0; } } } if (!nb_clusters) { abort(); } QLIST_INSERT_HEAD(&s->cluster_allocs, m, next_in_flight); /* allocate a new cluster */ cluster_offset = qcow2_alloc_clusters(bs, nb_clusters * s->cluster_size); /* save info needed for meta data update */ m->offset = offset; m->n_start = n_start; m->nb_clusters = nb_clusters; out: m->nb_available = MIN(nb_clusters << (s->cluster_bits - 9), n_end); *num = m->nb_available - n_start; return cluster_offset; }
295
0
xmlParserInputBufferCreateFilenameFunc xmlThrDefParserInputBufferCreateFilenameDefault ( xmlParserInputBufferCreateFilenameFunc func ) { xmlParserInputBufferCreateFilenameFunc old ; xmlMutexLock ( xmlThrDefMutex ) ; old = xmlParserInputBufferCreateFilenameValueThrDef ; if ( old == NULL ) { old = __xmlParserInputBufferCreateFilename ; } xmlParserInputBufferCreateFilenameValueThrDef = func ; xmlMutexUnlock ( xmlThrDefMutex ) ; return ( old ) ; }
297
1
m4_maketemp (struct obstack *obs, int argc, token_data **argv) { if (bad_argc (argv[0], argc, 2, 2)) return; if (no_gnu_extensions) { /* POSIX states "any trailing 'X' characters [are] replaced with the current process ID as a string", without referencing the file system. Horribly insecure, but we have to do it when we are in traditional mode. For reference, Solaris m4 does: maketemp() -> `' maketemp(X) -> `X' maketemp(XX) -> `Xn', where n is last digit of pid maketemp(XXXXXXXX) -> `X00nnnnn', where nnnnn is 16-bit pid */ const char *str = ARG (1); int len = strlen (str); int i; int len2; M4ERROR ((warning_status, 0, "recommend using mkstemp instead")); for (i = len; i > 1; i--) if (str[i - 1] != 'X') break; obstack_grow (obs, str, i); str = ntoa ((int32_t) getpid (), 10); len2 = strlen (str); if (len2 > len - i) obstack_grow0 (obs, str + len2 - (len - i), len - i); else { while (i++ < len - len2) obstack_1grow (obs, '0'); obstack_grow0 (obs, str, len2); } } else mkstemp_helper (obs, ARG (1)); }
298
1
void ff_xvmc_init_block(MpegEncContext *s) { struct xvmc_render_state *render = (struct xvmc_render_state*)s->current_picture.data[2]; assert(render); if (!render || render->magic != AV_XVMC_RENDER_MAGIC) { assert(0); return; // make sure that this is a render packet } s->block = (DCTELEM *)(render->data_blocks + render->next_free_data_block_num * 64); }
299
0
static int dissect_h245_CertSelectionCriteria ( tvbuff_t * tvb _U_ , int offset _U_ , asn1_ctx_t * actx _U_ , proto_tree * tree _U_ , int hf_index _U_ ) { offset = dissect_per_constrained_sequence_of ( tvb , offset , actx , tree , hf_index , ett_h245_CertSelectionCriteria , CertSelectionCriteria_sequence_of , 1 , 16 , FALSE ) ; return offset ; }
300
1
create_spnego_ctx(void) { spnego_gss_ctx_id_t spnego_ctx = NULL; spnego_ctx = (spnego_gss_ctx_id_t) malloc(sizeof (spnego_gss_ctx_id_rec)); if (spnego_ctx == NULL) { return (NULL); } spnego_ctx->magic_num = SPNEGO_MAGIC_ID; spnego_ctx->ctx_handle = GSS_C_NO_CONTEXT; spnego_ctx->mech_set = NULL; spnego_ctx->internal_mech = NULL; spnego_ctx->optionStr = NULL; spnego_ctx->DER_mechTypes.length = 0; spnego_ctx->DER_mechTypes.value = NULL; spnego_ctx->default_cred = GSS_C_NO_CREDENTIAL; spnego_ctx->mic_reqd = 0; spnego_ctx->mic_sent = 0; spnego_ctx->mic_rcvd = 0; spnego_ctx->mech_complete = 0; spnego_ctx->nego_done = 0; spnego_ctx->internal_name = GSS_C_NO_NAME; spnego_ctx->actual_mech = GSS_C_NO_OID; check_spnego_options(spnego_ctx); return (spnego_ctx); }
301
1
static int mptctl_do_reset(unsigned long arg) { struct mpt_ioctl_diag_reset __user *urinfo = (void __user *) arg; struct mpt_ioctl_diag_reset krinfo; MPT_ADAPTER *iocp; if (copy_from_user(&krinfo, urinfo, sizeof(struct mpt_ioctl_diag_reset))) { printk(KERN_ERR MYNAM "%s@%d::mptctl_do_reset - " "Unable to copy mpt_ioctl_diag_reset struct @ %p\n", __FILE__, __LINE__, urinfo); return -EFAULT; } if (mpt_verify_adapter(krinfo.hdr.iocnum, &iocp) < 0) { printk(KERN_DEBUG MYNAM "%s@%d::mptctl_do_reset - ioc%d not found!\n", __FILE__, __LINE__, krinfo.hdr.iocnum); return -ENODEV; /* (-6) No such device or address */ } dctlprintk(iocp, printk(MYIOC_s_DEBUG_FMT "mptctl_do_reset called.\n", iocp->name)); if (mpt_HardResetHandler(iocp, CAN_SLEEP) != 0) { printk (MYIOC_s_ERR_FMT "%s@%d::mptctl_do_reset - reset failed.\n", iocp->name, __FILE__, __LINE__); return -1; } return 0; }
302
1
void do_adde (void) { T2 = T0; T0 += T1 + xer_ca; if (likely(!(T0 < T2 || (xer_ca == 1 && T0 == T2)))) { xer_ca = 0; } else { xer_ca = 1; } }
303
0
TEST_F ( SSLErrorAssistantTest , DynamicInterstitialListMatch ) { ASSERT_TRUE ( embedded_test_server ( ) -> Start ( ) ) ; EXPECT_EQ ( 1u , ssl_info ( ) . public_key_hashes . size ( ) ) ; auto config_proto = std : : make_unique < chrome_browser_ssl : : SSLErrorAssistantConfig > ( ) ; config_proto -> set_version_id ( kLargeVersionId ) ; chrome_browser_ssl : : DynamicInterstitial * filter = config_proto -> add_dynamic_interstitial ( ) ; filter -> set_interstitial_type ( chrome_browser_ssl : : DynamicInterstitial : : INTERSTITIAL_PAGE_CAPTIVE_PORTAL ) ; filter -> set_cert_error ( chrome_browser_ssl : : DynamicInterstitial : : UNKNOWN_CERT_ERROR ) ; filter -> add_sha256_hash ( "sha256ightjar" ) ; filter -> add_sha256_hash ( "sha256/frogmouth" ) ; filter -> add_sha256_hash ( "sha256/poorwill" ) ; filter -> set_mitm_software_name ( "UwS" ) ; filter -> set_issuer_common_name_regex ( "whippoorwill" ) ; filter = config_proto -> add_dynamic_interstitial ( ) ; filter -> set_interstitial_type ( chrome_browser_ssl : : DynamicInterstitial : : INTERSTITIAL_PAGE_SSL ) ; filter -> set_cert_error ( chrome_browser_ssl : : DynamicInterstitial : : ERR_CERT_COMMON_NAME_INVALID ) ; filter -> add_sha256_hash ( "sha256uthatch" ) ; filter -> add_sha256_hash ( ssl_info ( ) . public_key_hashes [ 0 ] . ToString ( ) ) ; filter -> add_sha256_hash ( "sha256/treecreeper" ) ; filter -> set_mitm_software_name ( "UwS" ) ; filter -> set_issuer_common_name_regex ( issuer_common_name ( ) ) ; filter -> set_issuer_organization_regex ( issuer_organization_name ( ) ) ; error_assistant ( ) -> SetErrorAssistantProto ( std : : move ( config_proto ) ) ; base : : Optional < DynamicInterstitialInfo > dynamic_interstitial = error_assistant ( ) -> MatchDynamicInterstitial ( ssl_info ( ) ) ; ASSERT_TRUE ( dynamic_interstitial ) ; EXPECT_EQ ( chrome_browser_ssl : : DynamicInterstitial : : INTERSTITIAL_PAGE_SSL , dynamic_interstitial -> interstitial_type ) ; }
305
0
mptctl_eventenable (MPT_ADAPTER *ioc, unsigned long arg) { struct mpt_ioctl_eventenable __user *uarg = (void __user *) arg; struct mpt_ioctl_eventenable karg; if (copy_from_user(&karg, uarg, sizeof(struct mpt_ioctl_eventenable))) { printk(KERN_ERR MYNAM "%s@%d::mptctl_eventenable - " "Unable to read in mpt_ioctl_eventenable struct @ %p\n", __FILE__, __LINE__, uarg); return -EFAULT; } dctlprintk(ioc, printk(MYIOC_s_DEBUG_FMT "mptctl_eventenable called.\n", ioc->name)); if (ioc->events == NULL) { /* Have not yet allocated memory - do so now. */ int sz = MPTCTL_EVENT_LOG_SIZE * sizeof(MPT_IOCTL_EVENTS); ioc->events = kzalloc(sz, GFP_KERNEL); if (!ioc->events) { printk(MYIOC_s_ERR_FMT ": ERROR - Insufficient memory to add adapter!\n", ioc->name); return -ENOMEM; } ioc->alloc_total += sz; ioc->eventContext = 0; } /* Update the IOC event logging flag. */ ioc->eventTypes = karg.eventTypes; return 0; }
307
1
mptctl_eventenable (unsigned long arg) { struct mpt_ioctl_eventenable __user *uarg = (void __user *) arg; struct mpt_ioctl_eventenable karg; MPT_ADAPTER *ioc; int iocnum; if (copy_from_user(&karg, uarg, sizeof(struct mpt_ioctl_eventenable))) { printk(KERN_ERR MYNAM "%s@%d::mptctl_eventenable - " "Unable to read in mpt_ioctl_eventenable struct @ %p\n", __FILE__, __LINE__, uarg); return -EFAULT; } if (((iocnum = mpt_verify_adapter(karg.hdr.iocnum, &ioc)) < 0) || (ioc == NULL)) { printk(KERN_DEBUG MYNAM "%s::mptctl_eventenable() @%d - ioc%d not found!\n", __FILE__, __LINE__, iocnum); return -ENODEV; } dctlprintk(ioc, printk(MYIOC_s_DEBUG_FMT "mptctl_eventenable called.\n", ioc->name)); if (ioc->events == NULL) { /* Have not yet allocated memory - do so now. */ int sz = MPTCTL_EVENT_LOG_SIZE * sizeof(MPT_IOCTL_EVENTS); ioc->events = kzalloc(sz, GFP_KERNEL); if (!ioc->events) { printk(MYIOC_s_ERR_FMT ": ERROR - Insufficient memory to add adapter!\n", ioc->name); return -ENOMEM; } ioc->alloc_total += sz; ioc->eventContext = 0; } /* Update the IOC event logging flag. */ ioc->eventTypes = karg.eventTypes; return 0; }
308
0
static int dissect_h245_T_mobileOperationTransmitCapability ( tvbuff_t * tvb _U_ , int offset _U_ , asn1_ctx_t * actx _U_ , proto_tree * tree _U_ , int hf_index _U_ ) { offset = dissect_per_sequence ( tvb , offset , actx , tree , hf_index , ett_h245_T_mobileOperationTransmitCapability , T_mobileOperationTransmitCapability_sequence ) ; return offset ; }
309
1
static unsigned char asn1_oid_decode(struct asn1_ctx *ctx, unsigned char *eoc, unsigned long **oid, unsigned int *len) { unsigned long subid; unsigned int size; unsigned long *optr; size = eoc - ctx->pointer + 1; *oid = kmalloc(size * sizeof(unsigned long), GFP_ATOMIC); if (*oid == NULL) { if (net_ratelimit()) printk("OOM in bsalg (%d)\n", __LINE__); return 0; } optr = *oid; if (!asn1_subid_decode(ctx, &subid)) { kfree(*oid); *oid = NULL; return 0; } if (subid < 40) { optr [0] = 0; optr [1] = subid; } else if (subid < 80) { optr [0] = 1; optr [1] = subid - 40; } else { optr [0] = 2; optr [1] = subid - 80; } *len = 2; optr += 2; while (ctx->pointer < eoc) { if (++(*len) > size) { ctx->error = ASN1_ERR_DEC_BADVALUE; kfree(*oid); *oid = NULL; return 0; } if (!asn1_subid_decode(ctx, optr++)) { kfree(*oid); *oid = NULL; return 0; } } return 1; }
311
1
spnego_gss_accept_sec_context( OM_uint32 *minor_status, gss_ctx_id_t *context_handle, gss_cred_id_t verifier_cred_handle, gss_buffer_t input_token, gss_channel_bindings_t input_chan_bindings, gss_name_t *src_name, gss_OID *mech_type, gss_buffer_t output_token, OM_uint32 *ret_flags, OM_uint32 *time_rec, gss_cred_id_t *delegated_cred_handle) { OM_uint32 ret, tmpmin, negState; send_token_flag return_token; gss_buffer_t mechtok_in, mic_in, mic_out; gss_buffer_desc mechtok_out = GSS_C_EMPTY_BUFFER; spnego_gss_ctx_id_t sc = NULL; spnego_gss_cred_id_t spcred = NULL; int sendTokenInit = 0, tmpret; mechtok_in = mic_in = mic_out = GSS_C_NO_BUFFER; /* * This function works in three steps: * * 1. Perform mechanism negotiation. * 2. Invoke the negotiated mech's gss_accept_sec_context function * and examine the results. * 3. Process or generate MICs if necessary. * * Step one determines whether the negotiation requires a MIC exchange, * while steps two and three share responsibility for determining when * the exchange is complete. If the selected mech completes in this * call and no MIC exchange is expected, then step 2 will decide. If a * MIC exchange is expected, then step 3 will decide. If an error * occurs in any step, the exchange will be aborted, possibly with an * error token. * * negState determines the state of the negotiation, and is * communicated to the acceptor if a continuing token is sent. * return_token is used to indicate what type of token, if any, should * be generated. */ /* Validate arguments. */ if (minor_status != NULL) *minor_status = 0; if (output_token != GSS_C_NO_BUFFER) { output_token->length = 0; output_token->value = NULL; } if (minor_status == NULL || output_token == GSS_C_NO_BUFFER || context_handle == NULL) return GSS_S_CALL_INACCESSIBLE_WRITE; if (input_token == GSS_C_NO_BUFFER) return GSS_S_CALL_INACCESSIBLE_READ; /* Step 1: Perform mechanism negotiation. */ sc = (spnego_gss_ctx_id_t)*context_handle; spcred = (spnego_gss_cred_id_t)verifier_cred_handle; if (sc == NULL || sc->internal_mech == GSS_C_NO_OID) { /* Process an initial token or request for NegHints. */ if (src_name != NULL) *src_name = GSS_C_NO_NAME; if (mech_type != NULL) *mech_type = GSS_C_NO_OID; if (time_rec != NULL) *time_rec = 0; if (ret_flags != NULL) *ret_flags = 0; if (delegated_cred_handle != NULL) *delegated_cred_handle = GSS_C_NO_CREDENTIAL; if (input_token->length == 0) { ret = acc_ctx_hints(minor_status, context_handle, spcred, &mic_out, &negState, &return_token); if (ret != GSS_S_COMPLETE) goto cleanup; sendTokenInit = 1; ret = GSS_S_CONTINUE_NEEDED; } else { /* Can set negState to REQUEST_MIC */ ret = acc_ctx_new(minor_status, input_token, context_handle, spcred, &mechtok_in, &mic_in, &negState, &return_token); if (ret != GSS_S_COMPLETE) goto cleanup; ret = GSS_S_CONTINUE_NEEDED; } } else { /* Process a response token. Can set negState to * ACCEPT_INCOMPLETE. */ ret = acc_ctx_cont(minor_status, input_token, context_handle, &mechtok_in, &mic_in, &negState, &return_token); if (ret != GSS_S_COMPLETE) goto cleanup; ret = GSS_S_CONTINUE_NEEDED; } /* Step 2: invoke the negotiated mechanism's gss_accept_sec_context * function. */ sc = (spnego_gss_ctx_id_t)*context_handle; /* * Handle mechtok_in and mic_in only if they are * present in input_token. If neither is present, whether * this is an error depends on whether this is the first * round-trip. RET is set to a default value according to * whether it is the first round-trip. */ if (negState != REQUEST_MIC && mechtok_in != GSS_C_NO_BUFFER) { ret = acc_ctx_call_acc(minor_status, sc, spcred, mechtok_in, mech_type, &mechtok_out, ret_flags, time_rec, delegated_cred_handle, &negState, &return_token); } /* Step 3: process or generate the MIC, if the negotiated mech is * complete and supports MICs. */ if (!HARD_ERROR(ret) && sc->mech_complete && (sc->ctx_flags & GSS_C_INTEG_FLAG)) { ret = handle_mic(minor_status, mic_in, (mechtok_out.length != 0), sc, &mic_out, &negState, &return_token); } cleanup: if (return_token == INIT_TOKEN_SEND && sendTokenInit) { assert(sc != NULL); tmpret = make_spnego_tokenInit_msg(sc, 1, mic_out, 0, GSS_C_NO_BUFFER, return_token, output_token); if (tmpret < 0) ret = GSS_S_FAILURE; } else if (return_token != NO_TOKEN_SEND && return_token != CHECK_MIC) { tmpret = make_spnego_tokenTarg_msg(negState, sc ? sc->internal_mech : GSS_C_NO_OID, &mechtok_out, mic_out, return_token, output_token); if (tmpret < 0) ret = GSS_S_FAILURE; } if (ret == GSS_S_COMPLETE) { *context_handle = (gss_ctx_id_t)sc->ctx_handle; if (sc->internal_name != GSS_C_NO_NAME && src_name != NULL) { *src_name = sc->internal_name; sc->internal_name = GSS_C_NO_NAME; } release_spnego_ctx(&sc); } else if (ret != GSS_S_CONTINUE_NEEDED) { if (sc != NULL) { gss_delete_sec_context(&tmpmin, &sc->ctx_handle, GSS_C_NO_BUFFER); release_spnego_ctx(&sc); } *context_handle = GSS_C_NO_CONTEXT; } gss_release_buffer(&tmpmin, &mechtok_out); if (mechtok_in != GSS_C_NO_BUFFER) { gss_release_buffer(&tmpmin, mechtok_in); free(mechtok_in); } if (mic_in != GSS_C_NO_BUFFER) { gss_release_buffer(&tmpmin, mic_in); free(mic_in); } if (mic_out != GSS_C_NO_BUFFER) { gss_release_buffer(&tmpmin, mic_out); free(mic_out); } return ret; }
312
1
asn1_oid_decode(struct asn1_ctx *ctx, unsigned char *eoc, unsigned long **oid, unsigned int *len) { unsigned long subid; unsigned int size; unsigned long *optr; size = eoc - ctx->pointer + 1; *oid = kmalloc(size * sizeof(unsigned long), GFP_ATOMIC); if (*oid == NULL) return 0; optr = *oid; if (!asn1_subid_decode(ctx, &subid)) { kfree(*oid); *oid = NULL; return 0; } if (subid < 40) { optr[0] = 0; optr[1] = subid; } else if (subid < 80) { optr[0] = 1; optr[1] = subid - 40; } else { optr[0] = 2; optr[1] = subid - 80; } *len = 2; optr += 2; while (ctx->pointer < eoc) { if (++(*len) > size) { ctx->error = ASN1_ERR_DEC_BADVALUE; kfree(*oid); *oid = NULL; return 0; } if (!asn1_subid_decode(ctx, optr++)) { kfree(*oid); *oid = NULL; return 0; } } return 1; }
314
0
static int sapi_uwsgi_read_post ( char * buffer , uint count_bytes TSRMLS_DC ) # endif { uint read_bytes = 0 ; struct wsgi_request * wsgi_req = ( struct wsgi_request * ) SG ( server_context ) ; count_bytes = MIN ( count_bytes , wsgi_req -> post_cl - SG ( read_post_bytes ) ) ; while ( read_bytes < count_bytes ) { ssize_t rlen = 0 ; char * buf = uwsgi_request_body_read ( wsgi_req , count_bytes - read_bytes , & rlen ) ; if ( buf == uwsgi . empty ) break ; if ( buf ) { memcpy ( buffer , buf , rlen ) ; read_bytes += rlen ; continue ; } break ; } return read_bytes ; }
315
0
mptctl_eventquery (MPT_ADAPTER *ioc, unsigned long arg) { struct mpt_ioctl_eventquery __user *uarg = (void __user *) arg; struct mpt_ioctl_eventquery karg; if (copy_from_user(&karg, uarg, sizeof(struct mpt_ioctl_eventquery))) { printk(KERN_ERR MYNAM "%s@%d::mptctl_eventquery - " "Unable to read in mpt_ioctl_eventquery struct @ %p\n", __FILE__, __LINE__, uarg); return -EFAULT; } dctlprintk(ioc, printk(MYIOC_s_DEBUG_FMT "mptctl_eventquery called.\n", ioc->name)); karg.eventEntries = MPTCTL_EVENT_LOG_SIZE; karg.eventTypes = ioc->eventTypes; /* Copy the data from kernel memory to user memory */ if (copy_to_user((char __user *)arg, &karg, sizeof(struct mpt_ioctl_eventquery))) { printk(MYIOC_s_ERR_FMT "%s@%d::mptctl_eventquery - " "Unable to write out mpt_ioctl_eventquery struct @ %p\n", ioc->name, __FILE__, __LINE__, uarg); return -EFAULT; } return 0; }
316
1
asn1_header_decode(struct asn1_ctx *ctx, unsigned char **eoc, unsigned int *cls, unsigned int *con, unsigned int *tag) { unsigned int def = 0; unsigned int len = 0; if (!asn1_id_decode(ctx, cls, con, tag)) return 0; if (!asn1_length_decode(ctx, &def, &len)) return 0; if (def) *eoc = ctx->pointer + len; else *eoc = NULL; return 1; }
318
0
static int mp_decode_frame(MPADecodeContext *s, short *samples) { int i, nb_frames, ch; short *samples_ptr; init_get_bits(&s->gb, s->inbuf + HEADER_SIZE, s->inbuf_ptr - s->inbuf - HEADER_SIZE); /* skip error protection field */ if (s->error_protection) get_bits(&s->gb, 16); dprintf("frame %d:\n", s->frame_count); switch(s->layer) { case 1: nb_frames = mp_decode_layer1(s); break; case 2: nb_frames = mp_decode_layer2(s); break; case 3: default: nb_frames = mp_decode_layer3(s); break; } #if defined(DEBUG) for(i=0;i<nb_frames;i++) { for(ch=0;ch<s->nb_channels;ch++) { int j; printf("%d-%d:", i, ch); for(j=0;j<SBLIMIT;j++) printf(" %0.6f", (double)s->sb_samples[ch][i][j] / FRAC_ONE); printf("\n"); } } #endif /* apply the synthesis filter */ for(ch=0;ch<s->nb_channels;ch++) { samples_ptr = samples + ch; for(i=0;i<nb_frames;i++) { synth_filter(s, ch, samples_ptr, s->nb_channels, s->sb_samples[ch][i]); samples_ptr += 32 * s->nb_channels; } } #ifdef DEBUG s->frame_count++; #endif return nb_frames * 32 * sizeof(short) * s->nb_channels; }
319
1
mptctl_eventquery (unsigned long arg) { struct mpt_ioctl_eventquery __user *uarg = (void __user *) arg; struct mpt_ioctl_eventquery karg; MPT_ADAPTER *ioc; int iocnum; if (copy_from_user(&karg, uarg, sizeof(struct mpt_ioctl_eventquery))) { printk(KERN_ERR MYNAM "%s@%d::mptctl_eventquery - " "Unable to read in mpt_ioctl_eventquery struct @ %p\n", __FILE__, __LINE__, uarg); return -EFAULT; } if (((iocnum = mpt_verify_adapter(karg.hdr.iocnum, &ioc)) < 0) || (ioc == NULL)) { printk(KERN_DEBUG MYNAM "%s::mptctl_eventquery() @%d - ioc%d not found!\n", __FILE__, __LINE__, iocnum); return -ENODEV; } dctlprintk(ioc, printk(MYIOC_s_DEBUG_FMT "mptctl_eventquery called.\n", ioc->name)); karg.eventEntries = MPTCTL_EVENT_LOG_SIZE; karg.eventTypes = ioc->eventTypes; /* Copy the data from kernel memory to user memory */ if (copy_to_user((char __user *)arg, &karg, sizeof(struct mpt_ioctl_eventquery))) { printk(MYIOC_s_ERR_FMT "%s@%d::mptctl_eventquery - " "Unable to write out mpt_ioctl_eventquery struct @ %p\n", ioc->name, __FILE__, __LINE__, uarg); return -EFAULT; } return 0; }
320
1
static unsigned char asn1_header_decode(struct asn1_ctx *ctx, unsigned char **eoc, unsigned int *cls, unsigned int *con, unsigned int *tag) { unsigned int def, len; if (!asn1_id_decode(ctx, cls, con, tag)) return 0; def = len = 0; if (!asn1_length_decode(ctx, &def, &len)) return 0; if (def) *eoc = ctx->pointer + len; else *eoc = NULL; return 1; }
322
1
spnego_gss_complete_auth_token( OM_uint32 *minor_status, const gss_ctx_id_t context_handle, gss_buffer_t input_message_buffer) { OM_uint32 ret; ret = gss_complete_auth_token(minor_status, context_handle, input_message_buffer); return (ret); }
323
0
static int pcm_decode_frame(AVCodecContext *avctx, void *data, int *data_size, const uint8_t *buf, int buf_size) { PCMDecode *s = avctx->priv_data; int sample_size, c, n; short *samples; const uint8_t *src, *src8, *src2[MAX_CHANNELS]; uint8_t *dstu8; int16_t *dst_int16_t; int32_t *dst_int32_t; int64_t *dst_int64_t; uint16_t *dst_uint16_t; uint32_t *dst_uint32_t; samples = data; src = buf; if (avctx->sample_fmt!=avctx->codec->sample_fmts[0]) { av_log(avctx, AV_LOG_ERROR, "invalid sample_fmt\n"); return -1; } if(avctx->channels <= 0 || avctx->channels > MAX_CHANNELS){ av_log(avctx, AV_LOG_ERROR, "PCM channels out of bounds\n"); return -1; } sample_size = av_get_bits_per_sample(avctx->codec_id)/8; /* av_get_bits_per_sample returns 0 for CODEC_ID_PCM_DVD */ if (CODEC_ID_PCM_DVD == avctx->codec_id) /* 2 samples are interleaved per block in PCM_DVD */ sample_size = avctx->bits_per_coded_sample * 2 / 8; n = avctx->channels * sample_size; if(n && buf_size % n){ av_log(avctx, AV_LOG_ERROR, "invalid PCM packet\n"); return -1; } buf_size= FFMIN(buf_size, *data_size/2); *data_size=0; n = buf_size/sample_size; switch(avctx->codec->id) { case CODEC_ID_PCM_U32LE: DECODE(uint32_t, le32, src, samples, n, 0, 0x80000000) break; case CODEC_ID_PCM_U32BE: DECODE(uint32_t, be32, src, samples, n, 0, 0x80000000) break; case CODEC_ID_PCM_S24LE: DECODE(int32_t, le24, src, samples, n, 8, 0) break; case CODEC_ID_PCM_S24BE: DECODE(int32_t, be24, src, samples, n, 8, 0) break; case CODEC_ID_PCM_U24LE: DECODE(uint32_t, le24, src, samples, n, 8, 0x800000) break; case CODEC_ID_PCM_U24BE: DECODE(uint32_t, be24, src, samples, n, 8, 0x800000) break; case CODEC_ID_PCM_S24DAUD: for(;n>0;n--) { uint32_t v = bytestream_get_be24(&src); v >>= 4; // sync flags are here *samples++ = ff_reverse[(v >> 8) & 0xff] + (ff_reverse[v & 0xff] << 8); } break; case CODEC_ID_PCM_S16LE_PLANAR: n /= avctx->channels; for(c=0;c<avctx->channels;c++) src2[c] = &src[c*n*2]; for(;n>0;n--) for(c=0;c<avctx->channels;c++) *samples++ = bytestream_get_le16(&src2[c]); src = src2[avctx->channels-1]; break; case CODEC_ID_PCM_U16LE: DECODE(uint16_t, le16, src, samples, n, 0, 0x8000) break; case CODEC_ID_PCM_U16BE: DECODE(uint16_t, be16, src, samples, n, 0, 0x8000) break; case CODEC_ID_PCM_S8: dstu8= (uint8_t*)samples; for(;n>0;n--) { *dstu8++ = *src++ + 128; } samples= (short*)dstu8; break; #if WORDS_BIGENDIAN case CODEC_ID_PCM_F64LE: DECODE(int64_t, le64, src, samples, n, 0, 0) break; case CODEC_ID_PCM_S32LE: case CODEC_ID_PCM_F32LE: DECODE(int32_t, le32, src, samples, n, 0, 0) break; case CODEC_ID_PCM_S16LE: DECODE(int16_t, le16, src, samples, n, 0, 0) break; case CODEC_ID_PCM_F64BE: case CODEC_ID_PCM_F32BE: case CODEC_ID_PCM_S32BE: case CODEC_ID_PCM_S16BE: #else case CODEC_ID_PCM_F64BE: DECODE(int64_t, be64, src, samples, n, 0, 0) break; case CODEC_ID_PCM_F32BE: case CODEC_ID_PCM_S32BE: DECODE(int32_t, be32, src, samples, n, 0, 0) break; case CODEC_ID_PCM_S16BE: DECODE(int16_t, be16, src, samples, n, 0, 0) break; case CODEC_ID_PCM_F64LE: case CODEC_ID_PCM_F32LE: case CODEC_ID_PCM_S32LE: case CODEC_ID_PCM_S16LE: #endif /* WORDS_BIGENDIAN */ case CODEC_ID_PCM_U8: memcpy(samples, src, n*sample_size); src += n*sample_size; samples = (short*)((uint8_t*)data + n*sample_size); break; case CODEC_ID_PCM_ZORK: for(;n>0;n--) { int x= *src++; if(x&128) x-= 128; else x = -x; *samples++ = x << 8; } break; case CODEC_ID_PCM_ALAW: case CODEC_ID_PCM_MULAW: for(;n>0;n--) { *samples++ = s->table[*src++]; } break; case CODEC_ID_PCM_DVD: dst_int32_t = data; n /= avctx->channels; switch (avctx->bits_per_coded_sample) { case 20: while (n--) { c = avctx->channels; src8 = src + 4*c; while (c--) { *dst_int32_t++ = (bytestream_get_be16(&src) << 16) + ((*src8 &0xf0) << 8); *dst_int32_t++ = (bytestream_get_be16(&src) << 16) + ((*src8++ &0x0f) << 12); } src = src8; } break; case 24: while (n--) { c = avctx->channels; src8 = src + 4*c; while (c--) { *dst_int32_t++ = (bytestream_get_be16(&src) << 16) + ((*src8++) << 8); *dst_int32_t++ = (bytestream_get_be16(&src) << 16) + ((*src8++) << 8); } src = src8; } break; default: av_log(avctx, AV_LOG_ERROR, "PCM DVD unsupported sample depth\n"); return -1; break; } samples = (short *) dst_int32_t; break; default: return -1; } *data_size = (uint8_t *)samples - (uint8_t *)data; return src - buf; }
324
0
IN_PROC_BROWSER_TEST_F ( FramebustBlockBrowserTest , AllowRadioButtonSelected ) { const GURL url = embedded_test_server ( ) -> GetURL ( "/iframe.html" ) ; ui_test_utils : : NavigateToURL ( browser ( ) , url ) ; auto * helper = GetFramebustTabHelper ( ) ; helper -> AddBlockedUrl ( url , base : : BindOnce ( & FramebustBlockBrowserTest : : OnClick , base : : Unretained ( this ) ) ) ; EXPECT_TRUE ( helper -> HasBlockedUrls ( ) ) ; HostContentSettingsMap * settings_map = HostContentSettingsMapFactory : : GetForProfile ( browser ( ) -> profile ( ) ) ; EXPECT_EQ ( CONTENT_SETTING_BLOCK , settings_map -> GetContentSetting ( url , GURL ( ) , CONTENT_SETTINGS_TYPE_POPUPS , std : : string ( ) ) ) ; { ContentSettingFramebustBlockBubbleModel framebust_block_bubble_model ( browser ( ) -> content_setting_bubble_model_delegate ( ) , GetWebContents ( ) , browser ( ) -> profile ( ) ) ; framebust_block_bubble_model . OnRadioClicked ( kAllowRadioButtonIndex ) ; } EXPECT_EQ ( CONTENT_SETTING_ALLOW , settings_map -> GetContentSetting ( url , GURL ( ) , CONTENT_SETTINGS_TYPE_POPUPS , std : : string ( ) ) ) ; }
325
0
static bool parse_policyConstraints ( chunk_t blob , int level0 , private_x509_cert_t * this ) { asn1_parser_t * parser ; chunk_t object ; int objectID ; bool success ; parser = asn1_parser_create ( policyConstraintsObjects , blob ) ; parser -> set_top_level ( parser , level0 ) ; while ( parser -> iterate ( parser , & objectID , & object ) ) { switch ( objectID ) { case POLICY_CONSTRAINT_EXPLICIT : this -> require_explicit = parse_constraint ( object ) ; break ; case POLICY_CONSTRAINT_INHIBIT : this -> inhibit_mapping = parse_constraint ( object ) ; break ; default : break ; } } success = parser -> success ( parser ) ; parser -> destroy ( parser ) ; return success ; }
326
0
static int write_trailer(AVFormatContext *s) { WVMuxContext *wc = s->priv_data; AVIOContext *pb = s->pb; ff_ape_write(s); if (pb->seekable) { avio_seek(pb, 12, SEEK_SET); avio_wl32(pb, wc->duration); avio_flush(pb); } return 0; }
328
0
void virtio_queue_set_align(VirtIODevice *vdev, int n, int align) { BusState *qbus = qdev_get_parent_bus(DEVICE(vdev)); VirtioBusClass *k = VIRTIO_BUS_GET_CLASS(qbus); /* virtio-1 compliant devices cannot change the alignment */ if (virtio_has_feature(vdev, VIRTIO_F_VERSION_1)) { error_report("tried to modify queue alignment for virtio-1 device"); return; } /* Check that the transport told us it was going to do this * (so a buggy transport will immediately assert rather than * silently failing to migrate this state) */ assert(k->has_variable_vring_alignment); vdev->vq[n].vring.align = align; virtio_queue_update_rings(vdev, n); }
330
1
asn1_length_decode(struct asn1_ctx *ctx, unsigned int *def, unsigned int *len) { unsigned char ch, cnt; if (!asn1_octet_decode(ctx, &ch)) return 0; if (ch == 0x80) *def = 0; else { *def = 1; if (ch < 0x80) *len = ch; else { cnt = (unsigned char) (ch & 0x7F); *len = 0; while (cnt > 0) { if (!asn1_octet_decode(ctx, &ch)) return 0; *len <<= 8; *len |= ch; cnt--; } } } return 1; }
331
0
mptctl_fw_download(MPT_ADAPTER *iocp, unsigned long arg) { struct mpt_fw_xfer __user *ufwdl = (void __user *) arg; struct mpt_fw_xfer kfwdl; if (copy_from_user(&kfwdl, ufwdl, sizeof(struct mpt_fw_xfer))) { printk(KERN_ERR MYNAM "%s@%d::_ioctl_fwdl - " "Unable to copy mpt_fw_xfer struct @ %p\n", __FILE__, __LINE__, ufwdl); return -EFAULT; } return mptctl_do_fw_download(iocp, kfwdl.bufp, kfwdl.fwlen); }
332
1
spnego_gss_context_time( OM_uint32 *minor_status, const gss_ctx_id_t context_handle, OM_uint32 *time_rec) { OM_uint32 ret; ret = gss_context_time(minor_status, context_handle, time_rec); return (ret); }
334
0
static inline void RENAME(yuvPlanartouyvy)(const uint8_t *ysrc, const uint8_t *usrc, const uint8_t *vsrc, uint8_t *dst, long width, long height, long lumStride, long chromStride, long dstStride, long vertLumPerChroma) { long y; const x86_reg chromWidth= width>>1; for (y=0; y<height; y++) { #if COMPILE_TEMPLATE_MMX //FIXME handle 2 lines at once (fewer prefetches, reuse some chroma, but very likely memory-limited anyway) __asm__ volatile( "xor %%"REG_a", %%"REG_a" \n\t" ".p2align 4 \n\t" "1: \n\t" PREFETCH" 32(%1, %%"REG_a", 2) \n\t" PREFETCH" 32(%2, %%"REG_a") \n\t" PREFETCH" 32(%3, %%"REG_a") \n\t" "movq (%2, %%"REG_a"), %%mm0 \n\t" // U(0) "movq %%mm0, %%mm2 \n\t" // U(0) "movq (%3, %%"REG_a"), %%mm1 \n\t" // V(0) "punpcklbw %%mm1, %%mm0 \n\t" // UVUV UVUV(0) "punpckhbw %%mm1, %%mm2 \n\t" // UVUV UVUV(8) "movq (%1, %%"REG_a",2), %%mm3 \n\t" // Y(0) "movq 8(%1, %%"REG_a",2), %%mm5 \n\t" // Y(8) "movq %%mm0, %%mm4 \n\t" // Y(0) "movq %%mm2, %%mm6 \n\t" // Y(8) "punpcklbw %%mm3, %%mm0 \n\t" // YUYV YUYV(0) "punpckhbw %%mm3, %%mm4 \n\t" // YUYV YUYV(4) "punpcklbw %%mm5, %%mm2 \n\t" // YUYV YUYV(8) "punpckhbw %%mm5, %%mm6 \n\t" // YUYV YUYV(12) MOVNTQ" %%mm0, (%0, %%"REG_a", 4) \n\t" MOVNTQ" %%mm4, 8(%0, %%"REG_a", 4) \n\t" MOVNTQ" %%mm2, 16(%0, %%"REG_a", 4) \n\t" MOVNTQ" %%mm6, 24(%0, %%"REG_a", 4) \n\t" "add $8, %%"REG_a" \n\t" "cmp %4, %%"REG_a" \n\t" " jb 1b \n\t" ::"r"(dst), "r"(ysrc), "r"(usrc), "r"(vsrc), "g" (chromWidth) : "%"REG_a ); #else //FIXME adapt the Alpha ASM code from yv12->yuy2 #if HAVE_FAST_64BIT int i; uint64_t *ldst = (uint64_t *) dst; const uint8_t *yc = ysrc, *uc = usrc, *vc = vsrc; for (i = 0; i < chromWidth; i += 2) { uint64_t k, l; k = uc[0] + (yc[0] << 8) + (vc[0] << 16) + (yc[1] << 24); l = uc[1] + (yc[2] << 8) + (vc[1] << 16) + (yc[3] << 24); *ldst++ = k + (l << 32); yc += 4; uc += 2; vc += 2; } #else int i, *idst = (int32_t *) dst; const uint8_t *yc = ysrc, *uc = usrc, *vc = vsrc; for (i = 0; i < chromWidth; i++) { #if HAVE_BIGENDIAN *idst++ = (uc[0] << 24)+ (yc[0] << 16) + (vc[0] << 8) + (yc[1] << 0); #else *idst++ = uc[0] + (yc[0] << 8) + (vc[0] << 16) + (yc[1] << 24); #endif yc += 2; uc++; vc++; } #endif #endif if ((y&(vertLumPerChroma-1)) == vertLumPerChroma-1) { usrc += chromStride; vsrc += chromStride; } ysrc += lumStride; dst += dstStride; } #if COMPILE_TEMPLATE_MMX __asm__(EMMS" \n\t" SFENCE" \n\t" :::"memory"); #endif }
335
1
static int ipip6_rcv(struct sk_buff *skb) { struct iphdr *iph; struct ip_tunnel *tunnel; if (!pskb_may_pull(skb, sizeof(struct ipv6hdr))) goto out; iph = ip_hdr(skb); read_lock(&ipip6_lock); if ((tunnel = ipip6_tunnel_lookup(dev_net(skb->dev), iph->saddr, iph->daddr)) != NULL) { secpath_reset(skb); skb->mac_header = skb->network_header; skb_reset_network_header(skb); IPCB(skb)->flags = 0; skb->protocol = htons(ETH_P_IPV6); skb->pkt_type = PACKET_HOST; if ((tunnel->dev->priv_flags & IFF_ISATAP) && !isatap_chksrc(skb, iph, tunnel)) { tunnel->stat.rx_errors++; read_unlock(&ipip6_lock); kfree_skb(skb); return 0; } tunnel->stat.rx_packets++; tunnel->stat.rx_bytes += skb->len; skb->dev = tunnel->dev; dst_release(skb->dst); skb->dst = NULL; nf_reset(skb); ipip6_ecn_decapsulate(iph, skb); netif_rx(skb); read_unlock(&ipip6_lock); return 0; } icmp_send(skb, ICMP_DEST_UNREACH, ICMP_PORT_UNREACH, 0); kfree_skb(skb); read_unlock(&ipip6_lock); out: return 0; }
336
1
static u64 sched_slice(struct cfs_rq *cfs_rq, struct sched_entity *se) { u64 slice = __sched_period(cfs_rq->nr_running); slice *= se->load.weight; do_div(slice, cfs_rq->load.weight); return slice; }
339
0
mptctl_getiocinfo (MPT_ADAPTER *ioc, unsigned long arg, unsigned int data_size) { struct mpt_ioctl_iocinfo __user *uarg = (void __user *) arg; struct mpt_ioctl_iocinfo *karg; struct pci_dev *pdev; unsigned int port; int cim_rev; struct scsi_device *sdev; VirtDevice *vdevice; /* Add of PCI INFO results in unaligned access for * IA64 and Sparc. Reset long to int. Return no PCI * data for obsolete format. */ if (data_size == sizeof(struct mpt_ioctl_iocinfo_rev0)) cim_rev = 0; else if (data_size == sizeof(struct mpt_ioctl_iocinfo_rev1)) cim_rev = 1; else if (data_size == sizeof(struct mpt_ioctl_iocinfo)) cim_rev = 2; else if (data_size == (sizeof(struct mpt_ioctl_iocinfo_rev0)+12)) cim_rev = 0; /* obsolete */ else return -EFAULT; karg = memdup_user(uarg, data_size); if (IS_ERR(karg)) { printk(KERN_ERR MYNAM "%s@%d::mpt_ioctl_iocinfo() - memdup_user returned error [%ld]\n", __FILE__, __LINE__, PTR_ERR(karg)); return PTR_ERR(karg); } /* Verify the data transfer size is correct. */ if (karg->hdr.maxDataSize != data_size) { printk(MYIOC_s_ERR_FMT "%s@%d::mptctl_getiocinfo - " "Structure size mismatch. Command not completed.\n", ioc->name, __FILE__, __LINE__); kfree(karg); return -EFAULT; } dctlprintk(ioc, printk(MYIOC_s_DEBUG_FMT "mptctl_getiocinfo called.\n", ioc->name)); /* Fill in the data and return the structure to the calling * program */ if (ioc->bus_type == SAS) karg->adapterType = MPT_IOCTL_INTERFACE_SAS; else if (ioc->bus_type == FC) karg->adapterType = MPT_IOCTL_INTERFACE_FC; else karg->adapterType = MPT_IOCTL_INTERFACE_SCSI; if (karg->hdr.port > 1) { kfree(karg); return -EINVAL; } port = karg->hdr.port; karg->port = port; pdev = (struct pci_dev *) ioc->pcidev; karg->pciId = pdev->device; karg->hwRev = pdev->revision; karg->subSystemDevice = pdev->subsystem_device; karg->subSystemVendor = pdev->subsystem_vendor; if (cim_rev == 1) { /* Get the PCI bus, device, and function numbers for the IOC */ karg->pciInfo.u.bits.busNumber = pdev->bus->number; karg->pciInfo.u.bits.deviceNumber = PCI_SLOT( pdev->devfn ); karg->pciInfo.u.bits.functionNumber = PCI_FUNC( pdev->devfn ); } else if (cim_rev == 2) { /* Get the PCI bus, device, function and segment ID numbers for the IOC */ karg->pciInfo.u.bits.busNumber = pdev->bus->number; karg->pciInfo.u.bits.deviceNumber = PCI_SLOT( pdev->devfn ); karg->pciInfo.u.bits.functionNumber = PCI_FUNC( pdev->devfn ); karg->pciInfo.segmentID = pci_domain_nr(pdev->bus); } /* Get number of devices */ karg->numDevices = 0; if (ioc->sh) { shost_for_each_device(sdev, ioc->sh) { vdevice = sdev->hostdata; if (vdevice == NULL || vdevice->vtarget == NULL) continue; if (vdevice->vtarget->tflags & MPT_TARGET_FLAGS_RAID_COMPONENT) continue; karg->numDevices++; } } /* Set the BIOS and FW Version */ karg->FWVersion = ioc->facts.FWVersion.Word; karg->BIOSVersion = ioc->biosVersion; /* Set the Version Strings. */ strncpy (karg->driverVersion, MPT_LINUX_PACKAGE_NAME, MPT_IOCTL_VERSION_LENGTH); karg->driverVersion[MPT_IOCTL_VERSION_LENGTH-1]='\0'; karg->busChangeEvent = 0; karg->hostId = ioc->pfacts[port].PortSCSIID; karg->rsvd[0] = karg->rsvd[1] = 0; /* Copy the data from kernel memory to user memory */ if (copy_to_user((char __user *)arg, karg, data_size)) { printk(MYIOC_s_ERR_FMT "%s@%d::mptctl_getiocinfo - " "Unable to write out mpt_ioctl_iocinfo struct @ %p\n", ioc->name, __FILE__, __LINE__, uarg); kfree(karg); return -EFAULT; } kfree(karg); return 0; }
340
0
static void dumpcffcidset ( struct alltabs * at ) { int gid , start ; putc ( 2 , at -> charset ) ; start = - 1 ; for ( gid = 1 ; gid < at -> gi . gcnt ; ++ gid ) { if ( start == - 1 ) start = gid ; else if ( at -> gi . bygid [ gid ] - at -> gi . bygid [ start ] != gid - start ) { putshort ( at -> charset , at -> gi . bygid [ start ] ) ; putshort ( at -> charset , at -> gi . bygid [ gid - 1 ] - at -> gi . bygid [ start ] ) ; start = gid ; } } if ( start != - 1 ) { putshort ( at -> charset , at -> gi . bygid [ start ] ) ; putshort ( at -> charset , at -> gi . bygid [ gid - 1 ] - at -> gi . bygid [ start ] ) ; } }
342
0
static void read_vec_element_i32(DisasContext *s, TCGv_i32 tcg_dest, int srcidx, int element, TCGMemOp memop) { int vect_off = vec_reg_offset(srcidx, element, memop & MO_SIZE); switch (memop) { case MO_8: tcg_gen_ld8u_i32(tcg_dest, cpu_env, vect_off); break; case MO_16: tcg_gen_ld16u_i32(tcg_dest, cpu_env, vect_off); break; case MO_8|MO_SIGN: tcg_gen_ld8s_i32(tcg_dest, cpu_env, vect_off); break; case MO_16|MO_SIGN: tcg_gen_ld16s_i32(tcg_dest, cpu_env, vect_off); break; case MO_32: case MO_32|MO_SIGN: tcg_gen_ld_i32(tcg_dest, cpu_env, vect_off); break; default: g_assert_not_reached(); } }
343
1
mptctl_getiocinfo (unsigned long arg, unsigned int data_size) { struct mpt_ioctl_iocinfo __user *uarg = (void __user *) arg; struct mpt_ioctl_iocinfo *karg; MPT_ADAPTER *ioc; struct pci_dev *pdev; int iocnum; unsigned int port; int cim_rev; struct scsi_device *sdev; VirtDevice *vdevice; /* Add of PCI INFO results in unaligned access for * IA64 and Sparc. Reset long to int. Return no PCI * data for obsolete format. */ if (data_size == sizeof(struct mpt_ioctl_iocinfo_rev0)) cim_rev = 0; else if (data_size == sizeof(struct mpt_ioctl_iocinfo_rev1)) cim_rev = 1; else if (data_size == sizeof(struct mpt_ioctl_iocinfo)) cim_rev = 2; else if (data_size == (sizeof(struct mpt_ioctl_iocinfo_rev0)+12)) cim_rev = 0; /* obsolete */ else return -EFAULT; karg = memdup_user(uarg, data_size); if (IS_ERR(karg)) { printk(KERN_ERR MYNAM "%s@%d::mpt_ioctl_iocinfo() - memdup_user returned error [%ld]\n", __FILE__, __LINE__, PTR_ERR(karg)); return PTR_ERR(karg); } if (((iocnum = mpt_verify_adapter(karg->hdr.iocnum, &ioc)) < 0) || (ioc == NULL)) { printk(KERN_DEBUG MYNAM "%s::mptctl_getiocinfo() @%d - ioc%d not found!\n", __FILE__, __LINE__, iocnum); kfree(karg); return -ENODEV; } /* Verify the data transfer size is correct. */ if (karg->hdr.maxDataSize != data_size) { printk(MYIOC_s_ERR_FMT "%s@%d::mptctl_getiocinfo - " "Structure size mismatch. Command not completed.\n", ioc->name, __FILE__, __LINE__); kfree(karg); return -EFAULT; } dctlprintk(ioc, printk(MYIOC_s_DEBUG_FMT "mptctl_getiocinfo called.\n", ioc->name)); /* Fill in the data and return the structure to the calling * program */ if (ioc->bus_type == SAS) karg->adapterType = MPT_IOCTL_INTERFACE_SAS; else if (ioc->bus_type == FC) karg->adapterType = MPT_IOCTL_INTERFACE_FC; else karg->adapterType = MPT_IOCTL_INTERFACE_SCSI; if (karg->hdr.port > 1) { kfree(karg); return -EINVAL; } port = karg->hdr.port; karg->port = port; pdev = (struct pci_dev *) ioc->pcidev; karg->pciId = pdev->device; karg->hwRev = pdev->revision; karg->subSystemDevice = pdev->subsystem_device; karg->subSystemVendor = pdev->subsystem_vendor; if (cim_rev == 1) { /* Get the PCI bus, device, and function numbers for the IOC */ karg->pciInfo.u.bits.busNumber = pdev->bus->number; karg->pciInfo.u.bits.deviceNumber = PCI_SLOT( pdev->devfn ); karg->pciInfo.u.bits.functionNumber = PCI_FUNC( pdev->devfn ); } else if (cim_rev == 2) { /* Get the PCI bus, device, function and segment ID numbers for the IOC */ karg->pciInfo.u.bits.busNumber = pdev->bus->number; karg->pciInfo.u.bits.deviceNumber = PCI_SLOT( pdev->devfn ); karg->pciInfo.u.bits.functionNumber = PCI_FUNC( pdev->devfn ); karg->pciInfo.segmentID = pci_domain_nr(pdev->bus); } /* Get number of devices */ karg->numDevices = 0; if (ioc->sh) { shost_for_each_device(sdev, ioc->sh) { vdevice = sdev->hostdata; if (vdevice == NULL || vdevice->vtarget == NULL) continue; if (vdevice->vtarget->tflags & MPT_TARGET_FLAGS_RAID_COMPONENT) continue; karg->numDevices++; } } /* Set the BIOS and FW Version */ karg->FWVersion = ioc->facts.FWVersion.Word; karg->BIOSVersion = ioc->biosVersion; /* Set the Version Strings. */ strncpy (karg->driverVersion, MPT_LINUX_PACKAGE_NAME, MPT_IOCTL_VERSION_LENGTH); karg->driverVersion[MPT_IOCTL_VERSION_LENGTH-1]='\0'; karg->busChangeEvent = 0; karg->hostId = ioc->pfacts[port].PortSCSIID; karg->rsvd[0] = karg->rsvd[1] = 0; /* Copy the data from kernel memory to user memory */ if (copy_to_user((char __user *)arg, karg, data_size)) { printk(MYIOC_s_ERR_FMT "%s@%d::mptctl_getiocinfo - " "Unable to write out mpt_ioctl_iocinfo struct @ %p\n", ioc->name, __FILE__, __LINE__, uarg); kfree(karg); return -EFAULT; } kfree(karg); return 0; }
344
0
static gboolean lacks_extension_info ( NautilusFile * file ) { return file -> details -> pending_info_providers != NULL ; }
346
1
static u64 sched_vslice_add(struct cfs_rq *cfs_rq, struct sched_entity *se) { return __sched_vslice(cfs_rq->load.weight + se->load.weight, cfs_rq->nr_running + 1); }
347
0
mptctl_gettargetinfo (MPT_ADAPTER *ioc, unsigned long arg) { struct mpt_ioctl_targetinfo __user *uarg = (void __user *) arg; struct mpt_ioctl_targetinfo karg; VirtDevice *vdevice; char *pmem; int *pdata; int numDevices = 0; int lun; int maxWordsLeft; int numBytes; u8 port; struct scsi_device *sdev; if (copy_from_user(&karg, uarg, sizeof(struct mpt_ioctl_targetinfo))) { printk(KERN_ERR MYNAM "%s@%d::mptctl_gettargetinfo - " "Unable to read in mpt_ioctl_targetinfo struct @ %p\n", __FILE__, __LINE__, uarg); return -EFAULT; } dctlprintk(ioc, printk(MYIOC_s_DEBUG_FMT "mptctl_gettargetinfo called.\n", ioc->name)); /* Get the port number and set the maximum number of bytes * in the returned structure. * Ignore the port setting. */ numBytes = karg.hdr.maxDataSize - sizeof(mpt_ioctl_header); maxWordsLeft = numBytes/sizeof(int); port = karg.hdr.port; if (maxWordsLeft <= 0) { printk(MYIOC_s_ERR_FMT "%s@%d::mptctl_gettargetinfo() - no memory available!\n", ioc->name, __FILE__, __LINE__); return -ENOMEM; } /* Fill in the data and return the structure to the calling * program */ /* struct mpt_ioctl_targetinfo does not contain sufficient space * for the target structures so when the IOCTL is called, there is * not sufficient stack space for the structure. Allocate memory, * populate the memory, copy back to the user, then free memory. * targetInfo format: * bits 31-24: reserved * 23-16: LUN * 15- 8: Bus Number * 7- 0: Target ID */ pmem = kzalloc(numBytes, GFP_KERNEL); if (!pmem) { printk(MYIOC_s_ERR_FMT "%s@%d::mptctl_gettargetinfo() - no memory available!\n", ioc->name, __FILE__, __LINE__); return -ENOMEM; } pdata = (int *) pmem; /* Get number of devices */ if (ioc->sh){ shost_for_each_device(sdev, ioc->sh) { if (!maxWordsLeft) continue; vdevice = sdev->hostdata; if (vdevice == NULL || vdevice->vtarget == NULL) continue; if (vdevice->vtarget->tflags & MPT_TARGET_FLAGS_RAID_COMPONENT) continue; lun = (vdevice->vtarget->raidVolume) ? 0x80 : vdevice->lun; *pdata = (((u8)lun << 16) + (vdevice->vtarget->channel << 8) + (vdevice->vtarget->id )); pdata++; numDevices++; --maxWordsLeft; } } karg.numDevices = numDevices; /* Copy part of the data from kernel memory to user memory */ if (copy_to_user((char __user *)arg, &karg, sizeof(struct mpt_ioctl_targetinfo))) { printk(MYIOC_s_ERR_FMT "%s@%d::mptctl_gettargetinfo - " "Unable to write out mpt_ioctl_targetinfo struct @ %p\n", ioc->name, __FILE__, __LINE__, uarg); kfree(pmem); return -EFAULT; } /* Copy the remaining data from kernel memory to user memory */ if (copy_to_user(uarg->targetInfo, pmem, numBytes)) { printk(MYIOC_s_ERR_FMT "%s@%d::mptctl_gettargetinfo - " "Unable to write out mpt_ioctl_targetinfo struct @ %p\n", ioc->name, __FILE__, __LINE__, pdata); kfree(pmem); return -EFAULT; } kfree(pmem); return 0; }
348
0
static void * Type_Data_Read ( struct _cms_typehandler_struct * self , cmsIOHANDLER * io , cmsUInt32Number * nItems , cmsUInt32Number SizeOfTag ) { cmsICCData * BinData ; cmsUInt32Number LenOfData ; * nItems = 0 ; if ( SizeOfTag < sizeof ( cmsUInt32Number ) ) return NULL ; LenOfData = SizeOfTag - sizeof ( cmsUInt32Number ) ; if ( LenOfData > INT_MAX ) return NULL ; BinData = ( cmsICCData * ) _cmsMalloc ( self -> ContextID , sizeof ( cmsICCData ) + LenOfData - 1 ) ; if ( BinData == NULL ) return NULL ; BinData -> len = LenOfData ; if ( ! _cmsReadUInt32Number ( io , & BinData -> flag ) ) { _cmsFree ( self -> ContextID , BinData ) ; return NULL ; } if ( io -> Read ( io , BinData -> data , sizeof ( cmsUInt8Number ) , LenOfData ) != LenOfData ) { _cmsFree ( self -> ContextID , BinData ) ; return NULL ; } * nItems = 1 ; return ( void * ) BinData ; }
349
0
static void vring_desc_read(VirtIODevice *vdev, VRingDesc *desc, hwaddr desc_pa, int i) { address_space_read(&address_space_memory, desc_pa + i * sizeof(VRingDesc), MEMTXATTRS_UNSPECIFIED, (void *)desc, sizeof(VRingDesc)); virtio_tswap64s(vdev, &desc->addr); virtio_tswap32s(vdev, &desc->len); virtio_tswap16s(vdev, &desc->flags); virtio_tswap16s(vdev, &desc->next); }
350
1
spnego_gss_export_sec_context( OM_uint32 *minor_status, gss_ctx_id_t *context_handle, gss_buffer_t interprocess_token) { OM_uint32 ret; ret = gss_export_sec_context(minor_status, context_handle, interprocess_token); return (ret); }
351
1
static u64 __sched_vslice(unsigned long rq_weight, unsigned long nr_running) { u64 vslice = __sched_period(nr_running); vslice *= NICE_0_LOAD; do_div(vslice, rq_weight); return vslice; }
352
0
static AddressSpace *q35_host_dma_iommu(PCIBus *bus, void *opaque, int devfn) { IntelIOMMUState *s = opaque; VTDAddressSpace **pvtd_as; int bus_num = pci_bus_num(bus); assert(0 <= bus_num && bus_num <= VTD_PCI_BUS_MAX); assert(0 <= devfn && devfn <= VTD_PCI_DEVFN_MAX); pvtd_as = s->address_spaces[bus_num]; if (!pvtd_as) { /* No corresponding free() */ pvtd_as = g_malloc0(sizeof(VTDAddressSpace *) * VTD_PCI_DEVFN_MAX); s->address_spaces[bus_num] = pvtd_as; } if (!pvtd_as[devfn]) { pvtd_as[devfn] = g_malloc0(sizeof(VTDAddressSpace)); pvtd_as[devfn]->bus_num = (uint8_t)bus_num; pvtd_as[devfn]->devfn = (uint8_t)devfn; pvtd_as[devfn]->iommu_state = s; pvtd_as[devfn]->context_cache_entry.context_cache_gen = 0; memory_region_init_iommu(&pvtd_as[devfn]->iommu, OBJECT(s), &s->iommu_ops, "intel_iommu", UINT64_MAX); address_space_init(&pvtd_as[devfn]->as, &pvtd_as[devfn]->iommu, "intel_iommu"); } return &pvtd_as[devfn]->as; }
353
0
EVP_PKEY * d2i_PrivateKey_fp ( FILE * fp , EVP_PKEY * * a ) { return ASN1_d2i_fp_of ( EVP_PKEY , EVP_PKEY_new , d2i_AutoPrivateKey , fp , a ) ; }
354
1
static u64 sched_slice(struct cfs_rq *cfs_rq, struct sched_entity *se) { return calc_delta_mine(__sched_period(cfs_rq->nr_running), se->load.weight, &cfs_rq->load); }
355
1
mptctl_gettargetinfo (unsigned long arg) { struct mpt_ioctl_targetinfo __user *uarg = (void __user *) arg; struct mpt_ioctl_targetinfo karg; MPT_ADAPTER *ioc; VirtDevice *vdevice; char *pmem; int *pdata; int iocnum; int numDevices = 0; int lun; int maxWordsLeft; int numBytes; u8 port; struct scsi_device *sdev; if (copy_from_user(&karg, uarg, sizeof(struct mpt_ioctl_targetinfo))) { printk(KERN_ERR MYNAM "%s@%d::mptctl_gettargetinfo - " "Unable to read in mpt_ioctl_targetinfo struct @ %p\n", __FILE__, __LINE__, uarg); return -EFAULT; } if (((iocnum = mpt_verify_adapter(karg.hdr.iocnum, &ioc)) < 0) || (ioc == NULL)) { printk(KERN_DEBUG MYNAM "%s::mptctl_gettargetinfo() @%d - ioc%d not found!\n", __FILE__, __LINE__, iocnum); return -ENODEV; } dctlprintk(ioc, printk(MYIOC_s_DEBUG_FMT "mptctl_gettargetinfo called.\n", ioc->name)); /* Get the port number and set the maximum number of bytes * in the returned structure. * Ignore the port setting. */ numBytes = karg.hdr.maxDataSize - sizeof(mpt_ioctl_header); maxWordsLeft = numBytes/sizeof(int); port = karg.hdr.port; if (maxWordsLeft <= 0) { printk(MYIOC_s_ERR_FMT "%s@%d::mptctl_gettargetinfo() - no memory available!\n", ioc->name, __FILE__, __LINE__); return -ENOMEM; } /* Fill in the data and return the structure to the calling * program */ /* struct mpt_ioctl_targetinfo does not contain sufficient space * for the target structures so when the IOCTL is called, there is * not sufficient stack space for the structure. Allocate memory, * populate the memory, copy back to the user, then free memory. * targetInfo format: * bits 31-24: reserved * 23-16: LUN * 15- 8: Bus Number * 7- 0: Target ID */ pmem = kzalloc(numBytes, GFP_KERNEL); if (!pmem) { printk(MYIOC_s_ERR_FMT "%s@%d::mptctl_gettargetinfo() - no memory available!\n", ioc->name, __FILE__, __LINE__); return -ENOMEM; } pdata = (int *) pmem; /* Get number of devices */ if (ioc->sh){ shost_for_each_device(sdev, ioc->sh) { if (!maxWordsLeft) continue; vdevice = sdev->hostdata; if (vdevice == NULL || vdevice->vtarget == NULL) continue; if (vdevice->vtarget->tflags & MPT_TARGET_FLAGS_RAID_COMPONENT) continue; lun = (vdevice->vtarget->raidVolume) ? 0x80 : vdevice->lun; *pdata = (((u8)lun << 16) + (vdevice->vtarget->channel << 8) + (vdevice->vtarget->id )); pdata++; numDevices++; --maxWordsLeft; } } karg.numDevices = numDevices; /* Copy part of the data from kernel memory to user memory */ if (copy_to_user((char __user *)arg, &karg, sizeof(struct mpt_ioctl_targetinfo))) { printk(MYIOC_s_ERR_FMT "%s@%d::mptctl_gettargetinfo - " "Unable to write out mpt_ioctl_targetinfo struct @ %p\n", ioc->name, __FILE__, __LINE__, uarg); kfree(pmem); return -EFAULT; } /* Copy the remaining data from kernel memory to user memory */ if (copy_to_user(uarg->targetInfo, pmem, numBytes)) { printk(MYIOC_s_ERR_FMT "%s@%d::mptctl_gettargetinfo - " "Unable to write out mpt_ioctl_targetinfo struct @ %p\n", ioc->name, __FILE__, __LINE__, pdata); kfree(pmem); return -EFAULT; } kfree(pmem); return 0; }
356
0
static inline uint64_t vmdk_find_offset_in_cluster(VmdkExtent *extent, int64_t offset) { uint64_t offset_in_cluster, extent_begin_offset, extent_relative_offset; uint64_t cluster_size = extent->cluster_sectors * BDRV_SECTOR_SIZE; extent_begin_offset = (extent->end_sector - extent->sectors) * BDRV_SECTOR_SIZE; extent_relative_offset = offset - extent_begin_offset; offset_in_cluster = extent_relative_offset % cluster_size; return offset_in_cluster; }
357
1
static u64 sched_vslice_add(struct cfs_rq *cfs_rq, struct sched_entity *se) { unsigned long nr_running = cfs_rq->nr_running; unsigned long weight; u64 vslice; if (!se->on_rq) nr_running++; vslice = __sched_period(nr_running); for_each_sched_entity(se) { cfs_rq = cfs_rq_of(se); weight = cfs_rq->load.weight; if (!se->on_rq) weight += se->load.weight; vslice *= NICE_0_LOAD; do_div(vslice, weight); } return vslice; }
359
0
static int dissect_h225_T_invalidTerminalAliases ( tvbuff_t * tvb _U_ , int offset _U_ , asn1_ctx_t * actx _U_ , proto_tree * tree _U_ , int hf_index _U_ ) { offset = dissect_per_sequence ( tvb , offset , actx , tree , hf_index , ett_h225_T_invalidTerminalAliases , T_invalidTerminalAliases_sequence ) ; return offset ; }
360
0
mptctl_hp_hostinfo(MPT_ADAPTER *ioc, unsigned long arg, unsigned int data_size) { hp_host_info_t __user *uarg = (void __user *) arg; struct pci_dev *pdev; char *pbuf=NULL; dma_addr_t buf_dma; hp_host_info_t karg; CONFIGPARMS cfg; ConfigPageHeader_t hdr; int rc, cim_rev; ToolboxIstwiReadWriteRequest_t *IstwiRWRequest; MPT_FRAME_HDR *mf = NULL; unsigned long timeleft; int retval; u32 msgcontext; /* Reset long to int. Should affect IA64 and SPARC only */ if (data_size == sizeof(hp_host_info_t)) cim_rev = 1; else if (data_size == sizeof(hp_host_info_rev0_t)) cim_rev = 0; /* obsolete */ else return -EFAULT; if (copy_from_user(&karg, uarg, sizeof(hp_host_info_t))) { printk(KERN_ERR MYNAM "%s@%d::mptctl_hp_host_info - " "Unable to read in hp_host_info struct @ %p\n", __FILE__, __LINE__, uarg); return -EFAULT; } dctlprintk(ioc, printk(MYIOC_s_DEBUG_FMT ": mptctl_hp_hostinfo called.\n", ioc->name)); /* Fill in the data and return the structure to the calling * program */ pdev = (struct pci_dev *) ioc->pcidev; karg.vendor = pdev->vendor; karg.device = pdev->device; karg.subsystem_id = pdev->subsystem_device; karg.subsystem_vendor = pdev->subsystem_vendor; karg.devfn = pdev->devfn; karg.bus = pdev->bus->number; /* Save the SCSI host no. if * SCSI driver loaded */ if (ioc->sh != NULL) karg.host_no = ioc->sh->host_no; else karg.host_no = -1; /* Reformat the fw_version into a string */ snprintf(karg.fw_version, sizeof(karg.fw_version), "%.2hhu.%.2hhu.%.2hhu.%.2hhu", ioc->facts.FWVersion.Struct.Major, ioc->facts.FWVersion.Struct.Minor, ioc->facts.FWVersion.Struct.Unit, ioc->facts.FWVersion.Struct.Dev); /* Issue a config request to get the device serial number */ hdr.PageVersion = 0; hdr.PageLength = 0; hdr.PageNumber = 0; hdr.PageType = MPI_CONFIG_PAGETYPE_MANUFACTURING; cfg.cfghdr.hdr = &hdr; cfg.physAddr = -1; cfg.pageAddr = 0; cfg.action = MPI_CONFIG_ACTION_PAGE_HEADER; cfg.dir = 0; /* read */ cfg.timeout = 10; strncpy(karg.serial_number, " ", 24); if (mpt_config(ioc, &cfg) == 0) { if (cfg.cfghdr.hdr->PageLength > 0) { /* Issue the second config page request */ cfg.action = MPI_CONFIG_ACTION_PAGE_READ_CURRENT; pbuf = pci_alloc_consistent(ioc->pcidev, hdr.PageLength * 4, &buf_dma); if (pbuf) { cfg.physAddr = buf_dma; if (mpt_config(ioc, &cfg) == 0) { ManufacturingPage0_t *pdata = (ManufacturingPage0_t *) pbuf; if (strlen(pdata->BoardTracerNumber) > 1) { strlcpy(karg.serial_number, pdata->BoardTracerNumber, 24); } } pci_free_consistent(ioc->pcidev, hdr.PageLength * 4, pbuf, buf_dma); pbuf = NULL; } } } rc = mpt_GetIocState(ioc, 1); switch (rc) { case MPI_IOC_STATE_OPERATIONAL: karg.ioc_status = HP_STATUS_OK; break; case MPI_IOC_STATE_FAULT: karg.ioc_status = HP_STATUS_FAILED; break; case MPI_IOC_STATE_RESET: case MPI_IOC_STATE_READY: default: karg.ioc_status = HP_STATUS_OTHER; break; } karg.base_io_addr = pci_resource_start(pdev, 0); if ((ioc->bus_type == SAS) || (ioc->bus_type == FC)) karg.bus_phys_width = HP_BUS_WIDTH_UNK; else karg.bus_phys_width = HP_BUS_WIDTH_16; karg.hard_resets = 0; karg.soft_resets = 0; karg.timeouts = 0; if (ioc->sh != NULL) { MPT_SCSI_HOST *hd = shost_priv(ioc->sh); if (hd && (cim_rev == 1)) { karg.hard_resets = ioc->hard_resets; karg.soft_resets = ioc->soft_resets; karg.timeouts = ioc->timeouts; } } /* * Gather ISTWI(Industry Standard Two Wire Interface) Data */ if ((mf = mpt_get_msg_frame(mptctl_id, ioc)) == NULL) { dfailprintk(ioc, printk(MYIOC_s_WARN_FMT "%s, no msg frames!!\n", ioc->name, __func__)); goto out; } IstwiRWRequest = (ToolboxIstwiReadWriteRequest_t *)mf; msgcontext = IstwiRWRequest->MsgContext; memset(IstwiRWRequest,0,sizeof(ToolboxIstwiReadWriteRequest_t)); IstwiRWRequest->MsgContext = msgcontext; IstwiRWRequest->Function = MPI_FUNCTION_TOOLBOX; IstwiRWRequest->Tool = MPI_TOOLBOX_ISTWI_READ_WRITE_TOOL; IstwiRWRequest->Flags = MPI_TB_ISTWI_FLAGS_READ; IstwiRWRequest->NumAddressBytes = 0x01; IstwiRWRequest->DataLength = cpu_to_le16(0x04); if (pdev->devfn & 1) IstwiRWRequest->DeviceAddr = 0xB2; else IstwiRWRequest->DeviceAddr = 0xB0; pbuf = pci_alloc_consistent(ioc->pcidev, 4, &buf_dma); if (!pbuf) goto out; ioc->add_sge((char *)&IstwiRWRequest->SGL, (MPT_SGE_FLAGS_SSIMPLE_READ|4), buf_dma); retval = 0; SET_MGMT_MSG_CONTEXT(ioc->ioctl_cmds.msg_context, IstwiRWRequest->MsgContext); INITIALIZE_MGMT_STATUS(ioc->ioctl_cmds.status) mpt_put_msg_frame(mptctl_id, ioc, mf); retry_wait: timeleft = wait_for_completion_timeout(&ioc->ioctl_cmds.done, HZ*MPT_IOCTL_DEFAULT_TIMEOUT); if (!(ioc->ioctl_cmds.status & MPT_MGMT_STATUS_COMMAND_GOOD)) { retval = -ETIME; printk(MYIOC_s_WARN_FMT "%s: failed\n", ioc->name, __func__); if (ioc->ioctl_cmds.status & MPT_MGMT_STATUS_DID_IOCRESET) { mpt_free_msg_frame(ioc, mf); goto out; } if (!timeleft) { printk(MYIOC_s_WARN_FMT "HOST INFO command timeout, doorbell=0x%08x\n", ioc->name, mpt_GetIocState(ioc, 0)); mptctl_timeout_expired(ioc, mf); } else goto retry_wait; goto out; } /* *ISTWI Data Definition * pbuf[0] = FW_VERSION = 0x4 * pbuf[1] = Bay Count = 6 or 4 or 2, depending on * the config, you should be seeing one out of these three values * pbuf[2] = Drive Installed Map = bit pattern depend on which * bays have drives in them * pbuf[3] = Checksum (0x100 = (byte0 + byte2 + byte3) */ if (ioc->ioctl_cmds.status & MPT_MGMT_STATUS_RF_VALID) karg.rsvd = *(u32 *)pbuf; out: CLEAR_MGMT_STATUS(ioc->ioctl_cmds.status) SET_MGMT_MSG_CONTEXT(ioc->ioctl_cmds.msg_context, 0); if (pbuf) pci_free_consistent(ioc->pcidev, 4, pbuf, buf_dma); /* Copy the data from kernel memory to user memory */ if (copy_to_user((char __user *)arg, &karg, sizeof(hp_host_info_t))) { printk(MYIOC_s_ERR_FMT "%s@%d::mptctl_hpgethostinfo - " "Unable to write out hp_host_info @ %p\n", ioc->name, __FILE__, __LINE__, uarg); return -EFAULT; } return 0; }
361
0
static int test_streaming ( xd3_stream * in_stream , uint8_t * encbuf , uint8_t * decbuf , uint8_t * delbuf , usize_t megs ) { xd3_stream estream , dstream ; int ret ; usize_t i , delsize , decsize ; xd3_config cfg ; xd3_init_config ( & cfg , in_stream -> flags ) ; cfg . flags |= XD3_COMPLEVEL_6 ; if ( ( ret = xd3_config_stream ( & estream , & cfg ) ) || ( ret = xd3_config_stream ( & dstream , & cfg ) ) ) { goto fail ; } for ( i = 0 ; i < megs ; i += 1 ) { ( ( usize_t * ) encbuf ) [ 0 ] = i ; if ( ( i % 200 ) == 199 ) { DOT ( ) ; } if ( ( ret = xd3_process_stream ( 1 , & estream , xd3_encode_input , 0 , encbuf , 1 << 20 , delbuf , & delsize , 1 << 20 ) ) ) { in_stream -> msg = estream . msg ; goto fail ; } if ( ( ret = xd3_process_stream ( 0 , & dstream , xd3_decode_input , 0 , delbuf , delsize , decbuf , & decsize , 1 << 20 ) ) ) { in_stream -> msg = dstream . msg ; goto fail ; } if ( decsize != 1 << 20 || memcmp ( encbuf , decbuf , 1 << 20 ) != 0 ) { in_stream -> msg = "wrong result" ; ret = XD3_INTERNAL ; goto fail ; } } if ( ( ret = xd3_close_stream ( & estream ) ) || ( ret = xd3_close_stream ( & dstream ) ) ) { goto fail ; } fail : xd3_free_stream ( & estream ) ; xd3_free_stream ( & dstream ) ; return ret ; }
362
1
static unsigned long wakeup_gran(struct sched_entity *se) { unsigned long gran = sysctl_sched_wakeup_granularity; /* * More easily preempt - nice tasks, while not making * it harder for + nice tasks. */ if (unlikely(se->load.weight > NICE_0_LOAD)) gran = calc_delta_fair(gran, &se->load); return gran; }
363
1
mptctl_hp_hostinfo(unsigned long arg, unsigned int data_size) { hp_host_info_t __user *uarg = (void __user *) arg; MPT_ADAPTER *ioc; struct pci_dev *pdev; char *pbuf=NULL; dma_addr_t buf_dma; hp_host_info_t karg; CONFIGPARMS cfg; ConfigPageHeader_t hdr; int iocnum; int rc, cim_rev; ToolboxIstwiReadWriteRequest_t *IstwiRWRequest; MPT_FRAME_HDR *mf = NULL; unsigned long timeleft; int retval; u32 msgcontext; /* Reset long to int. Should affect IA64 and SPARC only */ if (data_size == sizeof(hp_host_info_t)) cim_rev = 1; else if (data_size == sizeof(hp_host_info_rev0_t)) cim_rev = 0; /* obsolete */ else return -EFAULT; if (copy_from_user(&karg, uarg, sizeof(hp_host_info_t))) { printk(KERN_ERR MYNAM "%s@%d::mptctl_hp_host_info - " "Unable to read in hp_host_info struct @ %p\n", __FILE__, __LINE__, uarg); return -EFAULT; } if (((iocnum = mpt_verify_adapter(karg.hdr.iocnum, &ioc)) < 0) || (ioc == NULL)) { printk(KERN_DEBUG MYNAM "%s::mptctl_hp_hostinfo() @%d - ioc%d not found!\n", __FILE__, __LINE__, iocnum); return -ENODEV; } dctlprintk(ioc, printk(MYIOC_s_DEBUG_FMT ": mptctl_hp_hostinfo called.\n", ioc->name)); /* Fill in the data and return the structure to the calling * program */ pdev = (struct pci_dev *) ioc->pcidev; karg.vendor = pdev->vendor; karg.device = pdev->device; karg.subsystem_id = pdev->subsystem_device; karg.subsystem_vendor = pdev->subsystem_vendor; karg.devfn = pdev->devfn; karg.bus = pdev->bus->number; /* Save the SCSI host no. if * SCSI driver loaded */ if (ioc->sh != NULL) karg.host_no = ioc->sh->host_no; else karg.host_no = -1; /* Reformat the fw_version into a string */ snprintf(karg.fw_version, sizeof(karg.fw_version), "%.2hhu.%.2hhu.%.2hhu.%.2hhu", ioc->facts.FWVersion.Struct.Major, ioc->facts.FWVersion.Struct.Minor, ioc->facts.FWVersion.Struct.Unit, ioc->facts.FWVersion.Struct.Dev); /* Issue a config request to get the device serial number */ hdr.PageVersion = 0; hdr.PageLength = 0; hdr.PageNumber = 0; hdr.PageType = MPI_CONFIG_PAGETYPE_MANUFACTURING; cfg.cfghdr.hdr = &hdr; cfg.physAddr = -1; cfg.pageAddr = 0; cfg.action = MPI_CONFIG_ACTION_PAGE_HEADER; cfg.dir = 0; /* read */ cfg.timeout = 10; strncpy(karg.serial_number, " ", 24); if (mpt_config(ioc, &cfg) == 0) { if (cfg.cfghdr.hdr->PageLength > 0) { /* Issue the second config page request */ cfg.action = MPI_CONFIG_ACTION_PAGE_READ_CURRENT; pbuf = pci_alloc_consistent(ioc->pcidev, hdr.PageLength * 4, &buf_dma); if (pbuf) { cfg.physAddr = buf_dma; if (mpt_config(ioc, &cfg) == 0) { ManufacturingPage0_t *pdata = (ManufacturingPage0_t *) pbuf; if (strlen(pdata->BoardTracerNumber) > 1) { strlcpy(karg.serial_number, pdata->BoardTracerNumber, 24); } } pci_free_consistent(ioc->pcidev, hdr.PageLength * 4, pbuf, buf_dma); pbuf = NULL; } } } rc = mpt_GetIocState(ioc, 1); switch (rc) { case MPI_IOC_STATE_OPERATIONAL: karg.ioc_status = HP_STATUS_OK; break; case MPI_IOC_STATE_FAULT: karg.ioc_status = HP_STATUS_FAILED; break; case MPI_IOC_STATE_RESET: case MPI_IOC_STATE_READY: default: karg.ioc_status = HP_STATUS_OTHER; break; } karg.base_io_addr = pci_resource_start(pdev, 0); if ((ioc->bus_type == SAS) || (ioc->bus_type == FC)) karg.bus_phys_width = HP_BUS_WIDTH_UNK; else karg.bus_phys_width = HP_BUS_WIDTH_16; karg.hard_resets = 0; karg.soft_resets = 0; karg.timeouts = 0; if (ioc->sh != NULL) { MPT_SCSI_HOST *hd = shost_priv(ioc->sh); if (hd && (cim_rev == 1)) { karg.hard_resets = ioc->hard_resets; karg.soft_resets = ioc->soft_resets; karg.timeouts = ioc->timeouts; } } /* * Gather ISTWI(Industry Standard Two Wire Interface) Data */ if ((mf = mpt_get_msg_frame(mptctl_id, ioc)) == NULL) { dfailprintk(ioc, printk(MYIOC_s_WARN_FMT "%s, no msg frames!!\n", ioc->name, __func__)); goto out; } IstwiRWRequest = (ToolboxIstwiReadWriteRequest_t *)mf; msgcontext = IstwiRWRequest->MsgContext; memset(IstwiRWRequest,0,sizeof(ToolboxIstwiReadWriteRequest_t)); IstwiRWRequest->MsgContext = msgcontext; IstwiRWRequest->Function = MPI_FUNCTION_TOOLBOX; IstwiRWRequest->Tool = MPI_TOOLBOX_ISTWI_READ_WRITE_TOOL; IstwiRWRequest->Flags = MPI_TB_ISTWI_FLAGS_READ; IstwiRWRequest->NumAddressBytes = 0x01; IstwiRWRequest->DataLength = cpu_to_le16(0x04); if (pdev->devfn & 1) IstwiRWRequest->DeviceAddr = 0xB2; else IstwiRWRequest->DeviceAddr = 0xB0; pbuf = pci_alloc_consistent(ioc->pcidev, 4, &buf_dma); if (!pbuf) goto out; ioc->add_sge((char *)&IstwiRWRequest->SGL, (MPT_SGE_FLAGS_SSIMPLE_READ|4), buf_dma); retval = 0; SET_MGMT_MSG_CONTEXT(ioc->ioctl_cmds.msg_context, IstwiRWRequest->MsgContext); INITIALIZE_MGMT_STATUS(ioc->ioctl_cmds.status) mpt_put_msg_frame(mptctl_id, ioc, mf); retry_wait: timeleft = wait_for_completion_timeout(&ioc->ioctl_cmds.done, HZ*MPT_IOCTL_DEFAULT_TIMEOUT); if (!(ioc->ioctl_cmds.status & MPT_MGMT_STATUS_COMMAND_GOOD)) { retval = -ETIME; printk(MYIOC_s_WARN_FMT "%s: failed\n", ioc->name, __func__); if (ioc->ioctl_cmds.status & MPT_MGMT_STATUS_DID_IOCRESET) { mpt_free_msg_frame(ioc, mf); goto out; } if (!timeleft) { printk(MYIOC_s_WARN_FMT "HOST INFO command timeout, doorbell=0x%08x\n", ioc->name, mpt_GetIocState(ioc, 0)); mptctl_timeout_expired(ioc, mf); } else goto retry_wait; goto out; } /* *ISTWI Data Definition * pbuf[0] = FW_VERSION = 0x4 * pbuf[1] = Bay Count = 6 or 4 or 2, depending on * the config, you should be seeing one out of these three values * pbuf[2] = Drive Installed Map = bit pattern depend on which * bays have drives in them * pbuf[3] = Checksum (0x100 = (byte0 + byte2 + byte3) */ if (ioc->ioctl_cmds.status & MPT_MGMT_STATUS_RF_VALID) karg.rsvd = *(u32 *)pbuf; out: CLEAR_MGMT_STATUS(ioc->ioctl_cmds.status) SET_MGMT_MSG_CONTEXT(ioc->ioctl_cmds.msg_context, 0); if (pbuf) pci_free_consistent(ioc->pcidev, 4, pbuf, buf_dma); /* Copy the data from kernel memory to user memory */ if (copy_to_user((char __user *)arg, &karg, sizeof(hp_host_info_t))) { printk(MYIOC_s_ERR_FMT "%s@%d::mptctl_hpgethostinfo - " "Unable to write out hp_host_info @ %p\n", ioc->name, __FILE__, __LINE__, uarg); return -EFAULT; } return 0; }
364
0
static unsigned int dec_move_pr(DisasContext *dc) { TCGv t0; DIS(fprintf (logfile, "move $p%u, $r%u\n", dc->op1, dc->op2)); cris_cc_mask(dc, 0); if (dc->op2 == PR_CCS) cris_evaluate_flags(dc); t0 = tcg_temp_new(TCG_TYPE_TL); t_gen_mov_TN_preg(t0, dc->op2); cris_alu(dc, CC_OP_MOVE, cpu_R[dc->op1], cpu_R[dc->op1], t0, preg_sizes[dc->op2]); tcg_temp_free(t0); return 2; }
365
0
static void U_CALLCONV _LMBCSOpen ## n ( UConverter * _this , UConverterLoadArgs * pArgs , UErrorCode * err ) \ { _LMBCSOpenWorker ( _this , pArgs , err , n ) ; } static void _LMBCSOpenWorker ( UConverter * _this , UConverterLoadArgs * pArgs , UErrorCode * err , ulmbcs_byte_t OptGroup ) { UConverterDataLMBCS * extraInfo = ( UConverterDataLMBCS * ) uprv_malloc ( sizeof ( UConverterDataLMBCS ) ) ; _this -> extraInfo = extraInfo ; if ( extraInfo != NULL ) { UConverterNamePieces stackPieces ; UConverterLoadArgs stackArgs = UCNV_LOAD_ARGS_INITIALIZER ; ulmbcs_byte_t i ; uprv_memset ( extraInfo , 0 , sizeof ( UConverterDataLMBCS ) ) ; stackArgs . onlyTestIsLoadable = pArgs -> onlyTestIsLoadable ; for ( i = 0 ; i <= ULMBCS_GRP_LAST && U_SUCCESS ( * err ) ; i ++ ) { if ( OptGroupByteToCPName [ i ] != NULL ) { extraInfo -> OptGrpConverter [ i ] = ucnv_loadSharedData ( OptGroupByteToCPName [ i ] , & stackPieces , & stackArgs , err ) ; } } if ( U_FAILURE ( * err ) || pArgs -> onlyTestIsLoadable ) { _LMBCSClose ( _this ) ; return ; } extraInfo -> OptGroup = OptGroup ; extraInfo -> localeConverterIndex = FindLMBCSLocale ( pArgs -> locale ) ; } else { * err = U_MEMORY_ALLOCATION_ERROR ; } } U_CDECL_BEGIN static void U_CALLCONV _LMBCSClose ( UConverter * _this ) { if ( _this -> extraInfo != NULL ) { ulmbcs_byte_t Ix ; UConverterDataLMBCS * extraInfo = ( UConverterDataLMBCS * ) _this -> extraInfo ; for ( Ix = 0 ; Ix <= ULMBCS_GRP_LAST ; Ix ++ ) { if ( extraInfo -> OptGrpConverter [ Ix ] != NULL ) ucnv_unloadSharedDataIfReady ( extraInfo -> OptGrpConverter [ Ix ] ) ; } if ( ! _this -> isExtraLocal ) { uprv_free ( _this -> extraInfo ) ; _this -> extraInfo = NULL ; } } } typedef struct LMBCSClone { UConverter cnv ; UConverterDataLMBCS lmbcs ; } LMBCSClone ; static UConverter * U_CALLCONV _LMBCSSafeClone ( const UConverter * cnv , void * stackBuffer , int32_t * pBufferSize , UErrorCode * status ) { ( void ) status ; LMBCSClone * newLMBCS ; UConverterDataLMBCS * extraInfo ; int32_t i ; if ( * pBufferSize <= 0 ) { * pBufferSize = ( int32_t ) sizeof ( LMBCSClone ) ; return NULL ; } extraInfo = ( UConverterDataLMBCS * ) cnv -> extraInfo ; newLMBCS = ( LMBCSClone * ) stackBuffer ; uprv_memcpy ( & newLMBCS -> lmbcs , extraInfo , sizeof ( UConverterDataLMBCS ) ) ; for ( i = 0 ; i <= ULMBCS_GRP_LAST ; ++ i ) { if ( extraInfo -> OptGrpConverter [ i ] != NULL ) { ucnv_incrementRefCount ( extraInfo -> OptGrpConverter [ i ] ) ; } } newLMBCS -> cnv . extraInfo = & newLMBCS -> lmbcs ; newLMBCS -> cnv . isExtraLocal = TRUE ; return & newLMBCS -> cnv ; } static size_t LMBCSConversionWorker ( UConverterDataLMBCS * extraInfo , ulmbcs_byte_t group , ulmbcs_byte_t * pStartLMBCS , UChar * pUniChar , ulmbcs_byte_t * lastConverterIndex , UBool * groups_tried ) { ulmbcs_byte_t * pLMBCS = pStartLMBCS ; UConverterSharedData * xcnv = extraInfo -> OptGrpConverter [ group ] ; int bytesConverted ; uint32_t value ; ulmbcs_byte_t firstByte ; U_ASSERT ( xcnv ) ; U_ASSERT ( group < ULMBCS_GRP_UNICODE ) ; bytesConverted = ucnv_MBCSFromUChar32 ( xcnv , * pUniChar , & value , FALSE ) ; if ( bytesConverted > 0 ) { firstByte = ( ulmbcs_byte_t ) ( value >> ( ( bytesConverted - 1 ) * 8 ) ) ; } else { groups_tried [ group ] = TRUE ; return 0 ; } * lastConverterIndex = group ; U_ASSERT ( ( firstByte <= ULMBCS_C0END ) || ( firstByte >= ULMBCS_C1START ) || ( group == ULMBCS_GRP_EXCEPT ) ) ; if ( group != ULMBCS_GRP_EXCEPT && extraInfo -> OptGroup != group ) { * pLMBCS ++ = group ; if ( bytesConverted == 1 && group >= ULMBCS_DOUBLEOPTGROUP_START ) { * pLMBCS ++ = group ; } } if ( bytesConverted == 1 && firstByte < 0x20 ) return 0 ; switch ( bytesConverted ) { case 4 : * pLMBCS ++ = ( ulmbcs_byte_t ) ( value >> 24 ) ; U_FALLTHROUGH ; case 3 : * pLMBCS ++ = ( ulmbcs_byte_t ) ( value >> 16 ) ; U_FALLTHROUGH ; case 2 : * pLMBCS ++ = ( ulmbcs_byte_t ) ( value >> 8 ) ; U_FALLTHROUGH ; case 1 : * pLMBCS ++ = ( ulmbcs_byte_t ) value ; U_FALLTHROUGH ; default : break ; } return ( pLMBCS - pStartLMBCS ) ; } static size_t LMBCSConvertUni ( ulmbcs_byte_t * pLMBCS , UChar uniChar ) { uint8_t LowCh = ( uint8_t ) ( uniChar & 0x00FF ) ; uint8_t HighCh = ( uint8_t ) ( uniChar >> 8 ) ; * pLMBCS ++ = ULMBCS_GRP_UNICODE ; if ( LowCh == 0 ) { * pLMBCS ++ = ULMBCS_UNICOMPATZERO ; * pLMBCS ++ = HighCh ; } else { * pLMBCS ++ = HighCh ; * pLMBCS ++ = LowCh ; } return ULMBCS_UNICODE_SIZE ; } static void U_CALLCONV _LMBCSFromUnicode ( UConverterFromUnicodeArgs * args , UErrorCode * err ) { ulmbcs_byte_t lastConverterIndex = 0 ; UChar uniChar ; ulmbcs_byte_t LMBCS [ ULMBCS_CHARSIZE_MAX ] ; ulmbcs_byte_t * pLMBCS ; int32_t bytes_written ; UBool groups_tried [ ULMBCS_GRP_LAST + 1 ] ; UConverterDataLMBCS * extraInfo = ( UConverterDataLMBCS * ) args -> converter -> extraInfo ; int sourceIndex = 0 ; ulmbcs_byte_t OldConverterIndex = 0 ; while ( args -> source < args -> sourceLimit && ! U_FAILURE ( * err ) ) { OldConverterIndex = extraInfo -> localeConverterIndex ; if ( args -> target >= args -> targetLimit ) { * err = U_BUFFER_OVERFLOW_ERROR ; break ; } uniChar = * ( args -> source ) ; bytes_written = 0 ; pLMBCS = LMBCS ; if ( ( uniChar >= 0x80 ) && ( uniChar <= 0xff ) && ( uniChar != 0xB1 ) && ( uniChar != 0xD7 ) && ( uniChar != 0xF7 ) && ( uniChar != 0xB0 ) && ( uniChar != 0xB4 ) && ( uniChar != 0xB6 ) && ( uniChar != 0xA7 ) && ( uniChar != 0xA8 ) ) { extraInfo -> localeConverterIndex = ULMBCS_GRP_L1 ; } if ( ( ( uniChar > ULMBCS_C0END ) && ( uniChar < ULMBCS_C1START ) ) || uniChar == 0 || uniChar == ULMBCS_HT || uniChar == ULMBCS_CR || uniChar == ULMBCS_LF || uniChar == ULMBCS_123SYSTEMRANGE ) { * pLMBCS ++ = ( ulmbcs_byte_t ) uniChar ; bytes_written = 1 ; } if ( ! bytes_written ) { ulmbcs_byte_t group = FindLMBCSUniRange ( uniChar ) ; if ( group == ULMBCS_GRP_UNICODE ) { pLMBCS += LMBCSConvertUni ( pLMBCS , uniChar ) ; bytes_written = ( int32_t ) ( pLMBCS - LMBCS ) ; } else if ( group == ULMBCS_GRP_CTRL ) { if ( uniChar <= ULMBCS_C0END ) { * pLMBCS ++ = ULMBCS_GRP_CTRL ; * pLMBCS ++ = ( ulmbcs_byte_t ) ( ULMBCS_CTRLOFFSET + uniChar ) ; } else if ( uniChar >= ULMBCS_C1START && uniChar <= ULMBCS_C1START + ULMBCS_CTRLOFFSET ) { * pLMBCS ++ = ULMBCS_GRP_CTRL ; * pLMBCS ++ = ( ulmbcs_byte_t ) ( uniChar & 0x00FF ) ; } bytes_written = ( int32_t ) ( pLMBCS - LMBCS ) ; } else if ( group < ULMBCS_GRP_UNICODE ) { bytes_written = ( int32_t ) LMBCSConversionWorker ( extraInfo , group , pLMBCS , & uniChar , & lastConverterIndex , groups_tried ) ; } if ( ! bytes_written ) { uprv_memset ( groups_tried , 0 , sizeof ( groups_tried ) ) ; if ( ( extraInfo -> OptGroup != 1 ) && ( ULMBCS_AMBIGUOUS_MATCH ( group , extraInfo -> OptGroup ) ) ) { if ( extraInfo -> localeConverterIndex < ULMBCS_DOUBLEOPTGROUP_START ) { bytes_written = LMBCSConversionWorker ( extraInfo , ULMBCS_GRP_L1 , pLMBCS , & uniChar , & lastConverterIndex , groups_tried ) ; if ( ! bytes_written ) { bytes_written = LMBCSConversionWorker ( extraInfo , ULMBCS_GRP_EXCEPT , pLMBCS , & uniChar , & lastConverterIndex , groups_tried ) ; } if ( ! bytes_written ) { bytes_written = LMBCSConversionWorker ( extraInfo , extraInfo -> localeConverterIndex , pLMBCS , & uniChar , & lastConverterIndex , groups_tried ) ; } } else { bytes_written = LMBCSConversionWorker ( extraInfo , extraInfo -> localeConverterIndex , pLMBCS , & uniChar , & lastConverterIndex , groups_tried ) ; } } if ( ! bytes_written && ( extraInfo -> localeConverterIndex ) && ( ULMBCS_AMBIGUOUS_MATCH ( group , extraInfo -> localeConverterIndex ) ) ) { bytes_written = ( int32_t ) LMBCSConversionWorker ( extraInfo , extraInfo -> localeConverterIndex , pLMBCS , & uniChar , & lastConverterIndex , groups_tried ) ; } if ( ! bytes_written && ( lastConverterIndex ) && ( ULMBCS_AMBIGUOUS_MATCH ( group , lastConverterIndex ) ) ) { bytes_written = ( int32_t ) LMBCSConversionWorker ( extraInfo , lastConverterIndex , pLMBCS , & uniChar , & lastConverterIndex , groups_tried ) ; } if ( ! bytes_written ) { ulmbcs_byte_t grp_start ; ulmbcs_byte_t grp_end ; ulmbcs_byte_t grp_ix ; grp_start = ( ulmbcs_byte_t ) ( ( group == ULMBCS_AMBIGUOUS_MBCS ) ? ULMBCS_DOUBLEOPTGROUP_START : ULMBCS_GRP_L1 ) ; grp_end = ( ulmbcs_byte_t ) ( ( group == ULMBCS_AMBIGUOUS_MBCS ) ? ULMBCS_GRP_LAST : ULMBCS_GRP_TH ) ; if ( group == ULMBCS_AMBIGUOUS_ALL ) { grp_start = ULMBCS_GRP_L1 ; grp_end = ULMBCS_GRP_LAST ; } for ( grp_ix = grp_start ; grp_ix <= grp_end && ! bytes_written ; grp_ix ++ ) { if ( extraInfo -> OptGrpConverter [ grp_ix ] && ! groups_tried [ grp_ix ] ) { bytes_written = ( int32_t ) LMBCSConversionWorker ( extraInfo , grp_ix , pLMBCS , & uniChar , & lastConverterIndex , groups_tried ) ; } } if ( ! bytes_written && grp_start == ULMBCS_GRP_L1 ) { bytes_written = ( int32_t ) LMBCSConversionWorker ( extraInfo , ULMBCS_GRP_EXCEPT , pLMBCS , & uniChar , & lastConverterIndex , groups_tried ) ; } } if ( ! bytes_written ) { pLMBCS += LMBCSConvertUni ( pLMBCS , uniChar ) ; bytes_written = ( int32_t ) ( pLMBCS - LMBCS ) ; } } } args -> source ++ ; pLMBCS = LMBCS ; while ( args -> target < args -> targetLimit && bytes_written -- ) { * ( args -> target ) ++ = * pLMBCS ++ ; if ( args -> offsets ) { * ( args -> offsets ) ++ = sourceIndex ; } } sourceIndex ++ ; if ( bytes_written > 0 ) { uint8_t * pErrorBuffer = args -> converter -> charErrorBuffer ; * err = U_BUFFER_OVERFLOW_ERROR ; args -> converter -> charErrorBufferLength = ( int8_t ) bytes_written ; while ( bytes_written -- ) { * pErrorBuffer ++ = * pLMBCS ++ ; } } extraInfo -> localeConverterIndex = OldConverterIndex ; } } static UChar GetUniFromLMBCSUni ( char const * * ppLMBCSin ) { uint8_t HighCh = * ( * ppLMBCSin ) ++ ; uint8_t LowCh = * ( * ppLMBCSin ) ++ ; if ( HighCh == ULMBCS_UNICOMPATZERO ) { HighCh = LowCh ; LowCh = 0 ; } return ( UChar ) ( ( HighCh << 8 ) | LowCh ) ; } # define CHECK_SOURCE_LIMIT ( index ) if ( args -> source + index > args -> sourceLimit ) { * err = U_TRUNCATED_CHAR_FOUND ; args -> source = args -> sourceLimit ; return 0xffff ; } static UChar32 U_CALLCONV _LMBCSGetNextUCharWorker ( UConverterToUnicodeArgs * args , UErrorCode * err ) { UChar32 uniChar = 0 ; ulmbcs_byte_t CurByte ; if ( args -> source >= args -> sourceLimit ) { * err = U_ILLEGAL_ARGUMENT_ERROR ; return 0xffff ; } CurByte = * ( ( ulmbcs_byte_t * ) ( args -> source ++ ) ) ; if ( ( ( CurByte > ULMBCS_C0END ) && ( CurByte < ULMBCS_C1START ) ) || ( CurByte == 0 ) || CurByte == ULMBCS_HT || CurByte == ULMBCS_CR || CurByte == ULMBCS_LF || CurByte == ULMBCS_123SYSTEMRANGE ) { uniChar = CurByte ; } else { UConverterDataLMBCS * extraInfo ; ulmbcs_byte_t group ; UConverterSharedData * cnv ; if ( CurByte == ULMBCS_GRP_CTRL ) { ulmbcs_byte_t C0C1byte ; CHECK_SOURCE_LIMIT ( 1 ) ; C0C1byte = * ( args -> source ) ++ ; uniChar = ( C0C1byte < ULMBCS_C1START ) ? C0C1byte - ULMBCS_CTRLOFFSET : C0C1byte ; } else if ( CurByte == ULMBCS_GRP_UNICODE ) { CHECK_SOURCE_LIMIT ( 2 ) ; return GetUniFromLMBCSUni ( & ( args -> source ) ) ; } else if ( CurByte <= ULMBCS_CTRLOFFSET ) { group = CurByte ; extraInfo = ( UConverterDataLMBCS * ) args -> converter -> extraInfo ; if ( group > ULMBCS_GRP_LAST || ( cnv = extraInfo -> OptGrpConverter [ group ] ) == NULL ) { * err = U_INVALID_CHAR_FOUND ; } else if ( group >= ULMBCS_DOUBLEOPTGROUP_START ) { CHECK_SOURCE_LIMIT ( 2 ) ; if ( * args -> source == group ) { ++ args -> source ; uniChar = ucnv_MBCSSimpleGetNextUChar ( cnv , args -> source , 1 , FALSE ) ; ++ args -> source ; } else { uniChar = ucnv_MBCSSimpleGetNextUChar ( cnv , args -> source , 2 , FALSE ) ; args -> source += 2 ; } } else { CHECK_SOURCE_LIMIT ( 1 ) ; CurByte = * ( args -> source ) ++ ; if ( CurByte >= ULMBCS_C1START ) { uniChar = _MBCS_SINGLE_SIMPLE_GET_NEXT_BMP ( cnv , CurByte ) ; } else { char bytes [ 2 ] ; extraInfo = ( UConverterDataLMBCS * ) args -> converter -> extraInfo ; cnv = extraInfo -> OptGrpConverter [ ULMBCS_GRP_EXCEPT ] ; bytes [ 0 ] = group ; bytes [ 1 ] = CurByte ; uniChar = ucnv_MBCSSimpleGetNextUChar ( cnv , bytes , 2 , FALSE ) ; } } } else if ( CurByte >= ULMBCS_C1START ) { extraInfo = ( UConverterDataLMBCS * ) args -> converter -> extraInfo ; group = extraInfo -> OptGroup ; cnv = extraInfo -> OptGrpConverter [ group ] ; if ( group >= ULMBCS_DOUBLEOPTGROUP_START ) { if ( ! ucnv_MBCSIsLeadByte ( cnv , CurByte ) ) { CHECK_SOURCE_LIMIT ( 0 ) ; uniChar = ucnv_MBCSSimpleGetNextUChar ( cnv , args -> source - 1 , 1 , FALSE ) ; } else { CHECK_SOURCE_LIMIT ( 1 ) ; uniChar = ucnv_MBCSSimpleGetNextUChar ( cnv , args -> source - 1 , 2 , FALSE ) ; ++ args -> source ; } } else { uniChar = _MBCS_SINGLE_SIMPLE_GET_NEXT_BMP ( cnv , CurByte ) ; } } } return uniChar ; } static void U_CALLCONV _LMBCSToUnicodeWithOffsets ( UConverterToUnicodeArgs * args , UErrorCode * err ) { char LMBCS [ ULMBCS_CHARSIZE_MAX ] ; UChar uniChar ; const char * saveSource ; const char * pStartLMBCS = args -> source ; const char * errSource = NULL ; int8_t savebytes = 0 ; while ( U_SUCCESS ( * err ) && args -> sourceLimit > args -> source && args -> targetLimit > args -> target ) { saveSource = args -> source ; if ( args -> converter -> toULength ) { const char * saveSourceLimit ; size_t size_old = args -> converter -> toULength ; size_t size_new_maybe_1 = sizeof ( LMBCS ) - size_old ; size_t size_new_maybe_2 = args -> sourceLimit - args -> source ; size_t size_new = ( size_new_maybe_1 < size_new_maybe_2 ) ? size_new_maybe_1 : size_new_maybe_2 ; uprv_memcpy ( LMBCS , args -> converter -> toUBytes , size_old ) ; uprv_memcpy ( LMBCS + size_old , args -> source , size_new ) ; saveSourceLimit = args -> sourceLimit ; args -> source = errSource = LMBCS ; args -> sourceLimit = LMBCS + size_old + size_new ; savebytes = ( int8_t ) ( size_old + size_new ) ; uniChar = ( UChar ) _LMBCSGetNextUCharWorker ( args , err ) ; args -> source = saveSource + ( ( args -> source - LMBCS ) - size_old ) ; args -> sourceLimit = saveSourceLimit ; if ( * err == U_TRUNCATED_CHAR_FOUND ) { args -> converter -> toULength = savebytes ; uprv_memcpy ( args -> converter -> toUBytes , LMBCS , savebytes ) ; args -> source = args -> sourceLimit ; * err = U_ZERO_ERROR ; return ; } else { args -> converter -> toULength = 0 ; } } else { errSource = saveSource ; uniChar = ( UChar ) _LMBCSGetNextUCharWorker ( args , err ) ; savebytes = ( int8_t ) ( args -> source - saveSource ) ; } if ( U_SUCCESS ( * err ) ) { if ( uniChar < 0xfffe ) { * ( args -> target ) ++ = uniChar ; if ( args -> offsets ) { * ( args -> offsets ) ++ = ( int32_t ) ( saveSource - pStartLMBCS ) ; } } else if ( uniChar == 0xfffe ) { * err = U_INVALID_CHAR_FOUND ; } else { * err = U_ILLEGAL_CHAR_FOUND ; } } } if ( U_SUCCESS ( * err ) && args -> sourceLimit > args -> source && args -> targetLimit <= args -> target ) { * err = U_BUFFER_OVERFLOW_ERROR ; } else if ( U_FAILURE ( * err ) ) { args -> converter -> toULength = savebytes ; if ( savebytes > 0 ) { uprv_memcpy ( args -> converter -> toUBytes , errSource , savebytes ) ; } if ( * err == U_TRUNCATED_CHAR_FOUND ) { * err = U_ZERO_ERROR ; } } } DEFINE_LMBCS_OPEN ( 1 ) DEFINE_LMBCS_OPEN ( 2 )
366
0
static void virtio_balloon_class_init(ObjectClass *klass, void *data) { DeviceClass *dc = DEVICE_CLASS(klass); PCIDeviceClass *k = PCI_DEVICE_CLASS(klass); k->init = virtio_balloon_init_pci; k->exit = virtio_balloon_exit_pci; k->vendor_id = PCI_VENDOR_ID_REDHAT_QUMRANET; k->device_id = PCI_DEVICE_ID_VIRTIO_BALLOON; k->revision = VIRTIO_PCI_ABI_VERSION; k->class_id = PCI_CLASS_MEMORY_RAM; dc->alias = "virtio-balloon"; dc->reset = virtio_pci_reset; dc->props = virtio_balloon_properties; }
367
1
static u64 sched_slice(struct cfs_rq *cfs_rq, struct sched_entity *se) { u64 slice = __sched_period(cfs_rq->nr_running); for_each_sched_entity(se) { cfs_rq = cfs_rq_of(se); slice *= se->load.weight; do_div(slice, cfs_rq->load.weight); } return slice; }
368
1
spnego_gss_get_mic_iov(OM_uint32 *minor_status, gss_ctx_id_t context_handle, gss_qop_t qop_req, gss_iov_buffer_desc *iov, int iov_count) { return gss_get_mic_iov(minor_status, context_handle, qop_req, iov, iov_count); }
369
0
mptctl_hp_targetinfo(MPT_ADAPTER *ioc, unsigned long arg) { hp_target_info_t __user *uarg = (void __user *) arg; SCSIDevicePage0_t *pg0_alloc; SCSIDevicePage3_t *pg3_alloc; MPT_SCSI_HOST *hd = NULL; hp_target_info_t karg; int data_sz; dma_addr_t page_dma; CONFIGPARMS cfg; ConfigPageHeader_t hdr; int tmp, np, rc = 0; if (copy_from_user(&karg, uarg, sizeof(hp_target_info_t))) { printk(KERN_ERR MYNAM "%s@%d::mptctl_hp_targetinfo - " "Unable to read in hp_host_targetinfo struct @ %p\n", __FILE__, __LINE__, uarg); return -EFAULT; } if (karg.hdr.id >= MPT_MAX_FC_DEVICES) return -EINVAL; dctlprintk(ioc, printk(MYIOC_s_DEBUG_FMT "mptctl_hp_targetinfo called.\n", ioc->name)); /* There is nothing to do for FCP parts. */ if ((ioc->bus_type == SAS) || (ioc->bus_type == FC)) return 0; if ((ioc->spi_data.sdp0length == 0) || (ioc->sh == NULL)) return 0; if (ioc->sh->host_no != karg.hdr.host) return -ENODEV; /* Get the data transfer speeds */ data_sz = ioc->spi_data.sdp0length * 4; pg0_alloc = (SCSIDevicePage0_t *) pci_alloc_consistent(ioc->pcidev, data_sz, &page_dma); if (pg0_alloc) { hdr.PageVersion = ioc->spi_data.sdp0version; hdr.PageLength = data_sz; hdr.PageNumber = 0; hdr.PageType = MPI_CONFIG_PAGETYPE_SCSI_DEVICE; cfg.cfghdr.hdr = &hdr; cfg.action = MPI_CONFIG_ACTION_PAGE_READ_CURRENT; cfg.dir = 0; cfg.timeout = 0; cfg.physAddr = page_dma; cfg.pageAddr = (karg.hdr.channel << 8) | karg.hdr.id; if ((rc = mpt_config(ioc, &cfg)) == 0) { np = le32_to_cpu(pg0_alloc->NegotiatedParameters); karg.negotiated_width = np & MPI_SCSIDEVPAGE0_NP_WIDE ? HP_BUS_WIDTH_16 : HP_BUS_WIDTH_8; if (np & MPI_SCSIDEVPAGE0_NP_NEG_SYNC_OFFSET_MASK) { tmp = (np & MPI_SCSIDEVPAGE0_NP_NEG_SYNC_PERIOD_MASK) >> 8; if (tmp < 0x09) karg.negotiated_speed = HP_DEV_SPEED_ULTRA320; else if (tmp <= 0x09) karg.negotiated_speed = HP_DEV_SPEED_ULTRA160; else if (tmp <= 0x0A) karg.negotiated_speed = HP_DEV_SPEED_ULTRA2; else if (tmp <= 0x0C) karg.negotiated_speed = HP_DEV_SPEED_ULTRA; else if (tmp <= 0x25) karg.negotiated_speed = HP_DEV_SPEED_FAST; else karg.negotiated_speed = HP_DEV_SPEED_ASYNC; } else karg.negotiated_speed = HP_DEV_SPEED_ASYNC; } pci_free_consistent(ioc->pcidev, data_sz, (u8 *) pg0_alloc, page_dma); } /* Set defaults */ karg.message_rejects = -1; karg.phase_errors = -1; karg.parity_errors = -1; karg.select_timeouts = -1; /* Get the target error parameters */ hdr.PageVersion = 0; hdr.PageLength = 0; hdr.PageNumber = 3; hdr.PageType = MPI_CONFIG_PAGETYPE_SCSI_DEVICE; cfg.cfghdr.hdr = &hdr; cfg.action = MPI_CONFIG_ACTION_PAGE_HEADER; cfg.dir = 0; cfg.timeout = 0; cfg.physAddr = -1; if ((mpt_config(ioc, &cfg) == 0) && (cfg.cfghdr.hdr->PageLength > 0)) { /* Issue the second config page request */ cfg.action = MPI_CONFIG_ACTION_PAGE_READ_CURRENT; data_sz = (int) cfg.cfghdr.hdr->PageLength * 4; pg3_alloc = (SCSIDevicePage3_t *) pci_alloc_consistent( ioc->pcidev, data_sz, &page_dma); if (pg3_alloc) { cfg.physAddr = page_dma; cfg.pageAddr = (karg.hdr.channel << 8) | karg.hdr.id; if ((rc = mpt_config(ioc, &cfg)) == 0) { karg.message_rejects = (u32) le16_to_cpu(pg3_alloc->MsgRejectCount); karg.phase_errors = (u32) le16_to_cpu(pg3_alloc->PhaseErrorCount); karg.parity_errors = (u32) le16_to_cpu(pg3_alloc->ParityErrorCount); } pci_free_consistent(ioc->pcidev, data_sz, (u8 *) pg3_alloc, page_dma); } } hd = shost_priv(ioc->sh); if (hd != NULL) karg.select_timeouts = hd->sel_timeout[karg.hdr.id]; /* Copy the data from kernel memory to user memory */ if (copy_to_user((char __user *)arg, &karg, sizeof(hp_target_info_t))) { printk(MYIOC_s_ERR_FMT "%s@%d::mptctl_hp_target_info - " "Unable to write out mpt_ioctl_targetinfo struct @ %p\n", ioc->name, __FILE__, __LINE__, uarg); return -EFAULT; } return 0; }
370