rem
stringlengths
1
322k
add
stringlengths
0
2.05M
context
stringlengths
4
228k
meta
stringlengths
156
215
def fmin_bfgs(f, x0, fprime=None, args=(), avegtol=1e-5, epsilon=_epsilon,
def fmin_bfgs(f, x0, fprime=None, args=(), maxgtol=1e-5, epsilon=_epsilon,
def fmin_bfgs(f, x0, fprime=None, args=(), avegtol=1e-5, epsilon=_epsilon, maxiter=None, full_output=0, disp=1, retall=0): """Minimize a function using the BFGS algorithm. Description: Optimize the function, f, whose gradient is given by fprime using the quasi-Newton method of Broyden, Fletcher, Goldfarb, and Shanno (BFGS) See Wright, and Nocedal 'Numerical Optimization', 1999, pg. 198. Inputs: f -- the Python function or method to be minimized. x0 -- the initial guess for the minimizer. fprime -- a function to compute the gradient of f. args -- extra arguments to f and fprime. avegtol -- minimum average value of gradient for stopping epsilon -- if fprime is approximated use this value for the step size (can be scalar or vector) Outputs: (xopt, {fopt, gopt, Hopt, func_calls, grad_calls, warnflag}, <allvecs>) xopt -- the minimizer of f. fopt -- the value of f(xopt). gopt -- the value of f'(xopt). (Should be near 0) Bopt -- the value of 1/f''(xopt). (inverse hessian matrix) func_calls -- the number of function_calls. grad_calls -- the number of gradient calls. warnflag -- an integer warning flag: 1 : 'Maximum number of iterations exceeded.' 2 : 'Gradient and/or function calls not changing' allvecs -- a list of all iterates (only returned if retall==1) Additional Inputs: avegtol -- the minimum occurs when fprime(xopt)==0. This specifies how close to zero the average magnitude of fprime(xopt) needs to be. maxiter -- the maximum number of iterations. full_output -- if non-zero then return fopt, func_calls, grad_calls, and warnflag in addition to xopt. disp -- print convergence message if non-zero. retall -- return a list of results at each iteration if non-zero """ app_fprime = 0 if fprime is None: app_fprime = 1 x0 = asarray(x0) if maxiter is None: maxiter = len(x0)*200 func_calls = 0 grad_calls = 0 k = 0 N = len(x0) gtol = N*avegtol I = MLab.eye(N) Hk = I old_fval = f(x0,*args) old_old_fval = old_fval + 5000 func_calls += 1 if app_fprime: gfk = apply(approx_fprime,(x0,f,epsilon)+args) myfprime = (approx_fprime,epsilon) func_calls = func_calls + len(x0) + 1 else: gfk = apply(fprime,(x0,)+args) myfprime = fprime grad_calls = grad_calls + 1 xk = x0 if retall: allvecs = [x0] sk = [2*gtol] warnflag = 0 while (Num.add.reduce(abs(gfk)) > gtol) and (k < maxiter): pk = -Num.dot(Hk,gfk) alpha_k, fc, gc, old_fval, old_old_fval, gfkp1 = \ line_search(f,myfprime,xk,pk,gfk,old_fval,old_old_fval,args=args) func_calls = func_calls + fc grad_calls = grad_calls + gc xkp1 = xk + alpha_k * pk if retall: allvecs.append(xkp1) sk = xkp1 - xk xk = xkp1 if gfkp1 is None: if app_fprime: gfkp1 = apply(approx_fprime,(xkp1,f,epsilon)+args) func_calls = func_calls + len(x0) + 1 else: gfkp1 = apply(fprime,(xkp1,)+args) grad_calls = grad_calls + 1 yk = gfkp1 - gfk k = k + 1 try: rhok = 1 / Num.dot(yk,sk) except ZeroDivisionError: warnflag = 2 break A1 = I - sk[:,Num.NewAxis] * yk[Num.NewAxis,:] * rhok A2 = I - yk[:,Num.NewAxis] * sk[Num.NewAxis,:] * rhok Hk = Num.dot(A1,Num.dot(Hk,A2)) + rhok * sk[:,Num.NewAxis] \ * sk[Num.NewAxis,:] gfk = gfkp1 if disp or full_output: fval = old_fval if warnflag == 2: if disp: print "Warning: Desired error not necessarily achieved due to precision loss" print " Current function value: %f" % fval print " Iterations: %d" % k print " Function evaluations: %d" % func_calls print " Gradient evaluations: %d" % grad_calls elif k >= maxiter: warnflag = 1 if disp: print "Warning: Maximum number of iterations has been exceeded" print " Current function value: %f" % fval print " Iterations: %d" % k print " Function evaluations: %d" % func_calls print " Gradient evaluations: %d" % grad_calls else: if disp: print "Optimization terminated successfully." print " Current function value: %f" % fval print " Iterations: %d" % k print " Function evaluations: %d" % func_calls print " Gradient evaluations: %d" % grad_calls if full_output: retlist = xk, fval, gfk, Hk, func_calls, grad_calls, warnflag if retall: retlist += (allvecs,) else: retlist = xk if retall: retlist = (xk, allvecs) return retlist
b23f472c9c0898a47e5db8f9f85123aa16d59ece /local1/tlutelli/issta_data/temp/all_python//python/2006_temp/2006/12971/b23f472c9c0898a47e5db8f9f85123aa16d59ece/optimize.py
avegtol -- minimum average value of gradient for stopping
maxgtol -- maximum allowable gradient magnitude for stopping
def fmin_bfgs(f, x0, fprime=None, args=(), avegtol=1e-5, epsilon=_epsilon, maxiter=None, full_output=0, disp=1, retall=0): """Minimize a function using the BFGS algorithm. Description: Optimize the function, f, whose gradient is given by fprime using the quasi-Newton method of Broyden, Fletcher, Goldfarb, and Shanno (BFGS) See Wright, and Nocedal 'Numerical Optimization', 1999, pg. 198. Inputs: f -- the Python function or method to be minimized. x0 -- the initial guess for the minimizer. fprime -- a function to compute the gradient of f. args -- extra arguments to f and fprime. avegtol -- minimum average value of gradient for stopping epsilon -- if fprime is approximated use this value for the step size (can be scalar or vector) Outputs: (xopt, {fopt, gopt, Hopt, func_calls, grad_calls, warnflag}, <allvecs>) xopt -- the minimizer of f. fopt -- the value of f(xopt). gopt -- the value of f'(xopt). (Should be near 0) Bopt -- the value of 1/f''(xopt). (inverse hessian matrix) func_calls -- the number of function_calls. grad_calls -- the number of gradient calls. warnflag -- an integer warning flag: 1 : 'Maximum number of iterations exceeded.' 2 : 'Gradient and/or function calls not changing' allvecs -- a list of all iterates (only returned if retall==1) Additional Inputs: avegtol -- the minimum occurs when fprime(xopt)==0. This specifies how close to zero the average magnitude of fprime(xopt) needs to be. maxiter -- the maximum number of iterations. full_output -- if non-zero then return fopt, func_calls, grad_calls, and warnflag in addition to xopt. disp -- print convergence message if non-zero. retall -- return a list of results at each iteration if non-zero """ app_fprime = 0 if fprime is None: app_fprime = 1 x0 = asarray(x0) if maxiter is None: maxiter = len(x0)*200 func_calls = 0 grad_calls = 0 k = 0 N = len(x0) gtol = N*avegtol I = MLab.eye(N) Hk = I old_fval = f(x0,*args) old_old_fval = old_fval + 5000 func_calls += 1 if app_fprime: gfk = apply(approx_fprime,(x0,f,epsilon)+args) myfprime = (approx_fprime,epsilon) func_calls = func_calls + len(x0) + 1 else: gfk = apply(fprime,(x0,)+args) myfprime = fprime grad_calls = grad_calls + 1 xk = x0 if retall: allvecs = [x0] sk = [2*gtol] warnflag = 0 while (Num.add.reduce(abs(gfk)) > gtol) and (k < maxiter): pk = -Num.dot(Hk,gfk) alpha_k, fc, gc, old_fval, old_old_fval, gfkp1 = \ line_search(f,myfprime,xk,pk,gfk,old_fval,old_old_fval,args=args) func_calls = func_calls + fc grad_calls = grad_calls + gc xkp1 = xk + alpha_k * pk if retall: allvecs.append(xkp1) sk = xkp1 - xk xk = xkp1 if gfkp1 is None: if app_fprime: gfkp1 = apply(approx_fprime,(xkp1,f,epsilon)+args) func_calls = func_calls + len(x0) + 1 else: gfkp1 = apply(fprime,(xkp1,)+args) grad_calls = grad_calls + 1 yk = gfkp1 - gfk k = k + 1 try: rhok = 1 / Num.dot(yk,sk) except ZeroDivisionError: warnflag = 2 break A1 = I - sk[:,Num.NewAxis] * yk[Num.NewAxis,:] * rhok A2 = I - yk[:,Num.NewAxis] * sk[Num.NewAxis,:] * rhok Hk = Num.dot(A1,Num.dot(Hk,A2)) + rhok * sk[:,Num.NewAxis] \ * sk[Num.NewAxis,:] gfk = gfkp1 if disp or full_output: fval = old_fval if warnflag == 2: if disp: print "Warning: Desired error not necessarily achieved due to precision loss" print " Current function value: %f" % fval print " Iterations: %d" % k print " Function evaluations: %d" % func_calls print " Gradient evaluations: %d" % grad_calls elif k >= maxiter: warnflag = 1 if disp: print "Warning: Maximum number of iterations has been exceeded" print " Current function value: %f" % fval print " Iterations: %d" % k print " Function evaluations: %d" % func_calls print " Gradient evaluations: %d" % grad_calls else: if disp: print "Optimization terminated successfully." print " Current function value: %f" % fval print " Iterations: %d" % k print " Function evaluations: %d" % func_calls print " Gradient evaluations: %d" % grad_calls if full_output: retlist = xk, fval, gfk, Hk, func_calls, grad_calls, warnflag if retall: retlist += (allvecs,) else: retlist = xk if retall: retlist = (xk, allvecs) return retlist
b23f472c9c0898a47e5db8f9f85123aa16d59ece /local1/tlutelli/issta_data/temp/all_python//python/2006_temp/2006/12971/b23f472c9c0898a47e5db8f9f85123aa16d59ece/optimize.py
avegtol -- the minimum occurs when fprime(xopt)==0. This specifies how close to zero the average magnitude of fprime(xopt) needs to be.
def fmin_bfgs(f, x0, fprime=None, args=(), avegtol=1e-5, epsilon=_epsilon, maxiter=None, full_output=0, disp=1, retall=0): """Minimize a function using the BFGS algorithm. Description: Optimize the function, f, whose gradient is given by fprime using the quasi-Newton method of Broyden, Fletcher, Goldfarb, and Shanno (BFGS) See Wright, and Nocedal 'Numerical Optimization', 1999, pg. 198. Inputs: f -- the Python function or method to be minimized. x0 -- the initial guess for the minimizer. fprime -- a function to compute the gradient of f. args -- extra arguments to f and fprime. avegtol -- minimum average value of gradient for stopping epsilon -- if fprime is approximated use this value for the step size (can be scalar or vector) Outputs: (xopt, {fopt, gopt, Hopt, func_calls, grad_calls, warnflag}, <allvecs>) xopt -- the minimizer of f. fopt -- the value of f(xopt). gopt -- the value of f'(xopt). (Should be near 0) Bopt -- the value of 1/f''(xopt). (inverse hessian matrix) func_calls -- the number of function_calls. grad_calls -- the number of gradient calls. warnflag -- an integer warning flag: 1 : 'Maximum number of iterations exceeded.' 2 : 'Gradient and/or function calls not changing' allvecs -- a list of all iterates (only returned if retall==1) Additional Inputs: avegtol -- the minimum occurs when fprime(xopt)==0. This specifies how close to zero the average magnitude of fprime(xopt) needs to be. maxiter -- the maximum number of iterations. full_output -- if non-zero then return fopt, func_calls, grad_calls, and warnflag in addition to xopt. disp -- print convergence message if non-zero. retall -- return a list of results at each iteration if non-zero """ app_fprime = 0 if fprime is None: app_fprime = 1 x0 = asarray(x0) if maxiter is None: maxiter = len(x0)*200 func_calls = 0 grad_calls = 0 k = 0 N = len(x0) gtol = N*avegtol I = MLab.eye(N) Hk = I old_fval = f(x0,*args) old_old_fval = old_fval + 5000 func_calls += 1 if app_fprime: gfk = apply(approx_fprime,(x0,f,epsilon)+args) myfprime = (approx_fprime,epsilon) func_calls = func_calls + len(x0) + 1 else: gfk = apply(fprime,(x0,)+args) myfprime = fprime grad_calls = grad_calls + 1 xk = x0 if retall: allvecs = [x0] sk = [2*gtol] warnflag = 0 while (Num.add.reduce(abs(gfk)) > gtol) and (k < maxiter): pk = -Num.dot(Hk,gfk) alpha_k, fc, gc, old_fval, old_old_fval, gfkp1 = \ line_search(f,myfprime,xk,pk,gfk,old_fval,old_old_fval,args=args) func_calls = func_calls + fc grad_calls = grad_calls + gc xkp1 = xk + alpha_k * pk if retall: allvecs.append(xkp1) sk = xkp1 - xk xk = xkp1 if gfkp1 is None: if app_fprime: gfkp1 = apply(approx_fprime,(xkp1,f,epsilon)+args) func_calls = func_calls + len(x0) + 1 else: gfkp1 = apply(fprime,(xkp1,)+args) grad_calls = grad_calls + 1 yk = gfkp1 - gfk k = k + 1 try: rhok = 1 / Num.dot(yk,sk) except ZeroDivisionError: warnflag = 2 break A1 = I - sk[:,Num.NewAxis] * yk[Num.NewAxis,:] * rhok A2 = I - yk[:,Num.NewAxis] * sk[Num.NewAxis,:] * rhok Hk = Num.dot(A1,Num.dot(Hk,A2)) + rhok * sk[:,Num.NewAxis] \ * sk[Num.NewAxis,:] gfk = gfkp1 if disp or full_output: fval = old_fval if warnflag == 2: if disp: print "Warning: Desired error not necessarily achieved due to precision loss" print " Current function value: %f" % fval print " Iterations: %d" % k print " Function evaluations: %d" % func_calls print " Gradient evaluations: %d" % grad_calls elif k >= maxiter: warnflag = 1 if disp: print "Warning: Maximum number of iterations has been exceeded" print " Current function value: %f" % fval print " Iterations: %d" % k print " Function evaluations: %d" % func_calls print " Gradient evaluations: %d" % grad_calls else: if disp: print "Optimization terminated successfully." print " Current function value: %f" % fval print " Iterations: %d" % k print " Function evaluations: %d" % func_calls print " Gradient evaluations: %d" % grad_calls if full_output: retlist = xk, fval, gfk, Hk, func_calls, grad_calls, warnflag if retall: retlist += (allvecs,) else: retlist = xk if retall: retlist = (xk, allvecs) return retlist
b23f472c9c0898a47e5db8f9f85123aa16d59ece /local1/tlutelli/issta_data/temp/all_python//python/2006_temp/2006/12971/b23f472c9c0898a47e5db8f9f85123aa16d59ece/optimize.py
gtol = N*avegtol
def fmin_bfgs(f, x0, fprime=None, args=(), avegtol=1e-5, epsilon=_epsilon, maxiter=None, full_output=0, disp=1, retall=0): """Minimize a function using the BFGS algorithm. Description: Optimize the function, f, whose gradient is given by fprime using the quasi-Newton method of Broyden, Fletcher, Goldfarb, and Shanno (BFGS) See Wright, and Nocedal 'Numerical Optimization', 1999, pg. 198. Inputs: f -- the Python function or method to be minimized. x0 -- the initial guess for the minimizer. fprime -- a function to compute the gradient of f. args -- extra arguments to f and fprime. avegtol -- minimum average value of gradient for stopping epsilon -- if fprime is approximated use this value for the step size (can be scalar or vector) Outputs: (xopt, {fopt, gopt, Hopt, func_calls, grad_calls, warnflag}, <allvecs>) xopt -- the minimizer of f. fopt -- the value of f(xopt). gopt -- the value of f'(xopt). (Should be near 0) Bopt -- the value of 1/f''(xopt). (inverse hessian matrix) func_calls -- the number of function_calls. grad_calls -- the number of gradient calls. warnflag -- an integer warning flag: 1 : 'Maximum number of iterations exceeded.' 2 : 'Gradient and/or function calls not changing' allvecs -- a list of all iterates (only returned if retall==1) Additional Inputs: avegtol -- the minimum occurs when fprime(xopt)==0. This specifies how close to zero the average magnitude of fprime(xopt) needs to be. maxiter -- the maximum number of iterations. full_output -- if non-zero then return fopt, func_calls, grad_calls, and warnflag in addition to xopt. disp -- print convergence message if non-zero. retall -- return a list of results at each iteration if non-zero """ app_fprime = 0 if fprime is None: app_fprime = 1 x0 = asarray(x0) if maxiter is None: maxiter = len(x0)*200 func_calls = 0 grad_calls = 0 k = 0 N = len(x0) gtol = N*avegtol I = MLab.eye(N) Hk = I old_fval = f(x0,*args) old_old_fval = old_fval + 5000 func_calls += 1 if app_fprime: gfk = apply(approx_fprime,(x0,f,epsilon)+args) myfprime = (approx_fprime,epsilon) func_calls = func_calls + len(x0) + 1 else: gfk = apply(fprime,(x0,)+args) myfprime = fprime grad_calls = grad_calls + 1 xk = x0 if retall: allvecs = [x0] sk = [2*gtol] warnflag = 0 while (Num.add.reduce(abs(gfk)) > gtol) and (k < maxiter): pk = -Num.dot(Hk,gfk) alpha_k, fc, gc, old_fval, old_old_fval, gfkp1 = \ line_search(f,myfprime,xk,pk,gfk,old_fval,old_old_fval,args=args) func_calls = func_calls + fc grad_calls = grad_calls + gc xkp1 = xk + alpha_k * pk if retall: allvecs.append(xkp1) sk = xkp1 - xk xk = xkp1 if gfkp1 is None: if app_fprime: gfkp1 = apply(approx_fprime,(xkp1,f,epsilon)+args) func_calls = func_calls + len(x0) + 1 else: gfkp1 = apply(fprime,(xkp1,)+args) grad_calls = grad_calls + 1 yk = gfkp1 - gfk k = k + 1 try: rhok = 1 / Num.dot(yk,sk) except ZeroDivisionError: warnflag = 2 break A1 = I - sk[:,Num.NewAxis] * yk[Num.NewAxis,:] * rhok A2 = I - yk[:,Num.NewAxis] * sk[Num.NewAxis,:] * rhok Hk = Num.dot(A1,Num.dot(Hk,A2)) + rhok * sk[:,Num.NewAxis] \ * sk[Num.NewAxis,:] gfk = gfkp1 if disp or full_output: fval = old_fval if warnflag == 2: if disp: print "Warning: Desired error not necessarily achieved due to precision loss" print " Current function value: %f" % fval print " Iterations: %d" % k print " Function evaluations: %d" % func_calls print " Gradient evaluations: %d" % grad_calls elif k >= maxiter: warnflag = 1 if disp: print "Warning: Maximum number of iterations has been exceeded" print " Current function value: %f" % fval print " Iterations: %d" % k print " Function evaluations: %d" % func_calls print " Gradient evaluations: %d" % grad_calls else: if disp: print "Optimization terminated successfully." print " Current function value: %f" % fval print " Iterations: %d" % k print " Function evaluations: %d" % func_calls print " Gradient evaluations: %d" % grad_calls if full_output: retlist = xk, fval, gfk, Hk, func_calls, grad_calls, warnflag if retall: retlist += (allvecs,) else: retlist = xk if retall: retlist = (xk, allvecs) return retlist
b23f472c9c0898a47e5db8f9f85123aa16d59ece /local1/tlutelli/issta_data/temp/all_python//python/2006_temp/2006/12971/b23f472c9c0898a47e5db8f9f85123aa16d59ece/optimize.py
while (Num.add.reduce(abs(gfk)) > gtol) and (k < maxiter):
while (Num.maximum.reduce(abs(gfk)) > gtol) and (k < maxiter):
def fmin_bfgs(f, x0, fprime=None, args=(), avegtol=1e-5, epsilon=_epsilon, maxiter=None, full_output=0, disp=1, retall=0): """Minimize a function using the BFGS algorithm. Description: Optimize the function, f, whose gradient is given by fprime using the quasi-Newton method of Broyden, Fletcher, Goldfarb, and Shanno (BFGS) See Wright, and Nocedal 'Numerical Optimization', 1999, pg. 198. Inputs: f -- the Python function or method to be minimized. x0 -- the initial guess for the minimizer. fprime -- a function to compute the gradient of f. args -- extra arguments to f and fprime. avegtol -- minimum average value of gradient for stopping epsilon -- if fprime is approximated use this value for the step size (can be scalar or vector) Outputs: (xopt, {fopt, gopt, Hopt, func_calls, grad_calls, warnflag}, <allvecs>) xopt -- the minimizer of f. fopt -- the value of f(xopt). gopt -- the value of f'(xopt). (Should be near 0) Bopt -- the value of 1/f''(xopt). (inverse hessian matrix) func_calls -- the number of function_calls. grad_calls -- the number of gradient calls. warnflag -- an integer warning flag: 1 : 'Maximum number of iterations exceeded.' 2 : 'Gradient and/or function calls not changing' allvecs -- a list of all iterates (only returned if retall==1) Additional Inputs: avegtol -- the minimum occurs when fprime(xopt)==0. This specifies how close to zero the average magnitude of fprime(xopt) needs to be. maxiter -- the maximum number of iterations. full_output -- if non-zero then return fopt, func_calls, grad_calls, and warnflag in addition to xopt. disp -- print convergence message if non-zero. retall -- return a list of results at each iteration if non-zero """ app_fprime = 0 if fprime is None: app_fprime = 1 x0 = asarray(x0) if maxiter is None: maxiter = len(x0)*200 func_calls = 0 grad_calls = 0 k = 0 N = len(x0) gtol = N*avegtol I = MLab.eye(N) Hk = I old_fval = f(x0,*args) old_old_fval = old_fval + 5000 func_calls += 1 if app_fprime: gfk = apply(approx_fprime,(x0,f,epsilon)+args) myfprime = (approx_fprime,epsilon) func_calls = func_calls + len(x0) + 1 else: gfk = apply(fprime,(x0,)+args) myfprime = fprime grad_calls = grad_calls + 1 xk = x0 if retall: allvecs = [x0] sk = [2*gtol] warnflag = 0 while (Num.add.reduce(abs(gfk)) > gtol) and (k < maxiter): pk = -Num.dot(Hk,gfk) alpha_k, fc, gc, old_fval, old_old_fval, gfkp1 = \ line_search(f,myfprime,xk,pk,gfk,old_fval,old_old_fval,args=args) func_calls = func_calls + fc grad_calls = grad_calls + gc xkp1 = xk + alpha_k * pk if retall: allvecs.append(xkp1) sk = xkp1 - xk xk = xkp1 if gfkp1 is None: if app_fprime: gfkp1 = apply(approx_fprime,(xkp1,f,epsilon)+args) func_calls = func_calls + len(x0) + 1 else: gfkp1 = apply(fprime,(xkp1,)+args) grad_calls = grad_calls + 1 yk = gfkp1 - gfk k = k + 1 try: rhok = 1 / Num.dot(yk,sk) except ZeroDivisionError: warnflag = 2 break A1 = I - sk[:,Num.NewAxis] * yk[Num.NewAxis,:] * rhok A2 = I - yk[:,Num.NewAxis] * sk[Num.NewAxis,:] * rhok Hk = Num.dot(A1,Num.dot(Hk,A2)) + rhok * sk[:,Num.NewAxis] \ * sk[Num.NewAxis,:] gfk = gfkp1 if disp or full_output: fval = old_fval if warnflag == 2: if disp: print "Warning: Desired error not necessarily achieved due to precision loss" print " Current function value: %f" % fval print " Iterations: %d" % k print " Function evaluations: %d" % func_calls print " Gradient evaluations: %d" % grad_calls elif k >= maxiter: warnflag = 1 if disp: print "Warning: Maximum number of iterations has been exceeded" print " Current function value: %f" % fval print " Iterations: %d" % k print " Function evaluations: %d" % func_calls print " Gradient evaluations: %d" % grad_calls else: if disp: print "Optimization terminated successfully." print " Current function value: %f" % fval print " Iterations: %d" % k print " Function evaluations: %d" % func_calls print " Gradient evaluations: %d" % grad_calls if full_output: retlist = xk, fval, gfk, Hk, func_calls, grad_calls, warnflag if retall: retlist += (allvecs,) else: retlist = xk if retall: retlist = (xk, allvecs) return retlist
b23f472c9c0898a47e5db8f9f85123aa16d59ece /local1/tlutelli/issta_data/temp/all_python//python/2006_temp/2006/12971/b23f472c9c0898a47e5db8f9f85123aa16d59ece/optimize.py
try:
try:
def fmin_bfgs(f, x0, fprime=None, args=(), avegtol=1e-5, epsilon=_epsilon, maxiter=None, full_output=0, disp=1, retall=0): """Minimize a function using the BFGS algorithm. Description: Optimize the function, f, whose gradient is given by fprime using the quasi-Newton method of Broyden, Fletcher, Goldfarb, and Shanno (BFGS) See Wright, and Nocedal 'Numerical Optimization', 1999, pg. 198. Inputs: f -- the Python function or method to be minimized. x0 -- the initial guess for the minimizer. fprime -- a function to compute the gradient of f. args -- extra arguments to f and fprime. avegtol -- minimum average value of gradient for stopping epsilon -- if fprime is approximated use this value for the step size (can be scalar or vector) Outputs: (xopt, {fopt, gopt, Hopt, func_calls, grad_calls, warnflag}, <allvecs>) xopt -- the minimizer of f. fopt -- the value of f(xopt). gopt -- the value of f'(xopt). (Should be near 0) Bopt -- the value of 1/f''(xopt). (inverse hessian matrix) func_calls -- the number of function_calls. grad_calls -- the number of gradient calls. warnflag -- an integer warning flag: 1 : 'Maximum number of iterations exceeded.' 2 : 'Gradient and/or function calls not changing' allvecs -- a list of all iterates (only returned if retall==1) Additional Inputs: avegtol -- the minimum occurs when fprime(xopt)==0. This specifies how close to zero the average magnitude of fprime(xopt) needs to be. maxiter -- the maximum number of iterations. full_output -- if non-zero then return fopt, func_calls, grad_calls, and warnflag in addition to xopt. disp -- print convergence message if non-zero. retall -- return a list of results at each iteration if non-zero """ app_fprime = 0 if fprime is None: app_fprime = 1 x0 = asarray(x0) if maxiter is None: maxiter = len(x0)*200 func_calls = 0 grad_calls = 0 k = 0 N = len(x0) gtol = N*avegtol I = MLab.eye(N) Hk = I old_fval = f(x0,*args) old_old_fval = old_fval + 5000 func_calls += 1 if app_fprime: gfk = apply(approx_fprime,(x0,f,epsilon)+args) myfprime = (approx_fprime,epsilon) func_calls = func_calls + len(x0) + 1 else: gfk = apply(fprime,(x0,)+args) myfprime = fprime grad_calls = grad_calls + 1 xk = x0 if retall: allvecs = [x0] sk = [2*gtol] warnflag = 0 while (Num.add.reduce(abs(gfk)) > gtol) and (k < maxiter): pk = -Num.dot(Hk,gfk) alpha_k, fc, gc, old_fval, old_old_fval, gfkp1 = \ line_search(f,myfprime,xk,pk,gfk,old_fval,old_old_fval,args=args) func_calls = func_calls + fc grad_calls = grad_calls + gc xkp1 = xk + alpha_k * pk if retall: allvecs.append(xkp1) sk = xkp1 - xk xk = xkp1 if gfkp1 is None: if app_fprime: gfkp1 = apply(approx_fprime,(xkp1,f,epsilon)+args) func_calls = func_calls + len(x0) + 1 else: gfkp1 = apply(fprime,(xkp1,)+args) grad_calls = grad_calls + 1 yk = gfkp1 - gfk k = k + 1 try: rhok = 1 / Num.dot(yk,sk) except ZeroDivisionError: warnflag = 2 break A1 = I - sk[:,Num.NewAxis] * yk[Num.NewAxis,:] * rhok A2 = I - yk[:,Num.NewAxis] * sk[Num.NewAxis,:] * rhok Hk = Num.dot(A1,Num.dot(Hk,A2)) + rhok * sk[:,Num.NewAxis] \ * sk[Num.NewAxis,:] gfk = gfkp1 if disp or full_output: fval = old_fval if warnflag == 2: if disp: print "Warning: Desired error not necessarily achieved due to precision loss" print " Current function value: %f" % fval print " Iterations: %d" % k print " Function evaluations: %d" % func_calls print " Gradient evaluations: %d" % grad_calls elif k >= maxiter: warnflag = 1 if disp: print "Warning: Maximum number of iterations has been exceeded" print " Current function value: %f" % fval print " Iterations: %d" % k print " Function evaluations: %d" % func_calls print " Gradient evaluations: %d" % grad_calls else: if disp: print "Optimization terminated successfully." print " Current function value: %f" % fval print " Iterations: %d" % k print " Function evaluations: %d" % func_calls print " Gradient evaluations: %d" % grad_calls if full_output: retlist = xk, fval, gfk, Hk, func_calls, grad_calls, warnflag if retall: retlist += (allvecs,) else: retlist = xk if retall: retlist = (xk, allvecs) return retlist
b23f472c9c0898a47e5db8f9f85123aa16d59ece /local1/tlutelli/issta_data/temp/all_python//python/2006_temp/2006/12971/b23f472c9c0898a47e5db8f9f85123aa16d59ece/optimize.py
warnflag = 2 break A1 = I - sk[:,Num.NewAxis] * yk[Num.NewAxis,:] * rhok A2 = I - yk[:,Num.NewAxis] * sk[Num.NewAxis,:] * rhok Hk = Num.dot(A1,Num.dot(Hk,A2)) + rhok * sk[:,Num.NewAxis] \ * sk[Num.NewAxis,:]
print "Divide by zero encountered: Hessian calculation reset." Hk = I else: A1 = I - sk[:,Num.NewAxis] * yk[Num.NewAxis,:] * rhok A2 = I - yk[:,Num.NewAxis] * sk[Num.NewAxis,:] * rhok Hk = Num.dot(A1,Num.dot(Hk,A2)) + rhok * sk[:,Num.NewAxis] \ * sk[Num.NewAxis,:]
def fmin_bfgs(f, x0, fprime=None, args=(), avegtol=1e-5, epsilon=_epsilon, maxiter=None, full_output=0, disp=1, retall=0): """Minimize a function using the BFGS algorithm. Description: Optimize the function, f, whose gradient is given by fprime using the quasi-Newton method of Broyden, Fletcher, Goldfarb, and Shanno (BFGS) See Wright, and Nocedal 'Numerical Optimization', 1999, pg. 198. Inputs: f -- the Python function or method to be minimized. x0 -- the initial guess for the minimizer. fprime -- a function to compute the gradient of f. args -- extra arguments to f and fprime. avegtol -- minimum average value of gradient for stopping epsilon -- if fprime is approximated use this value for the step size (can be scalar or vector) Outputs: (xopt, {fopt, gopt, Hopt, func_calls, grad_calls, warnflag}, <allvecs>) xopt -- the minimizer of f. fopt -- the value of f(xopt). gopt -- the value of f'(xopt). (Should be near 0) Bopt -- the value of 1/f''(xopt). (inverse hessian matrix) func_calls -- the number of function_calls. grad_calls -- the number of gradient calls. warnflag -- an integer warning flag: 1 : 'Maximum number of iterations exceeded.' 2 : 'Gradient and/or function calls not changing' allvecs -- a list of all iterates (only returned if retall==1) Additional Inputs: avegtol -- the minimum occurs when fprime(xopt)==0. This specifies how close to zero the average magnitude of fprime(xopt) needs to be. maxiter -- the maximum number of iterations. full_output -- if non-zero then return fopt, func_calls, grad_calls, and warnflag in addition to xopt. disp -- print convergence message if non-zero. retall -- return a list of results at each iteration if non-zero """ app_fprime = 0 if fprime is None: app_fprime = 1 x0 = asarray(x0) if maxiter is None: maxiter = len(x0)*200 func_calls = 0 grad_calls = 0 k = 0 N = len(x0) gtol = N*avegtol I = MLab.eye(N) Hk = I old_fval = f(x0,*args) old_old_fval = old_fval + 5000 func_calls += 1 if app_fprime: gfk = apply(approx_fprime,(x0,f,epsilon)+args) myfprime = (approx_fprime,epsilon) func_calls = func_calls + len(x0) + 1 else: gfk = apply(fprime,(x0,)+args) myfprime = fprime grad_calls = grad_calls + 1 xk = x0 if retall: allvecs = [x0] sk = [2*gtol] warnflag = 0 while (Num.add.reduce(abs(gfk)) > gtol) and (k < maxiter): pk = -Num.dot(Hk,gfk) alpha_k, fc, gc, old_fval, old_old_fval, gfkp1 = \ line_search(f,myfprime,xk,pk,gfk,old_fval,old_old_fval,args=args) func_calls = func_calls + fc grad_calls = grad_calls + gc xkp1 = xk + alpha_k * pk if retall: allvecs.append(xkp1) sk = xkp1 - xk xk = xkp1 if gfkp1 is None: if app_fprime: gfkp1 = apply(approx_fprime,(xkp1,f,epsilon)+args) func_calls = func_calls + len(x0) + 1 else: gfkp1 = apply(fprime,(xkp1,)+args) grad_calls = grad_calls + 1 yk = gfkp1 - gfk k = k + 1 try: rhok = 1 / Num.dot(yk,sk) except ZeroDivisionError: warnflag = 2 break A1 = I - sk[:,Num.NewAxis] * yk[Num.NewAxis,:] * rhok A2 = I - yk[:,Num.NewAxis] * sk[Num.NewAxis,:] * rhok Hk = Num.dot(A1,Num.dot(Hk,A2)) + rhok * sk[:,Num.NewAxis] \ * sk[Num.NewAxis,:] gfk = gfkp1 if disp or full_output: fval = old_fval if warnflag == 2: if disp: print "Warning: Desired error not necessarily achieved due to precision loss" print " Current function value: %f" % fval print " Iterations: %d" % k print " Function evaluations: %d" % func_calls print " Gradient evaluations: %d" % grad_calls elif k >= maxiter: warnflag = 1 if disp: print "Warning: Maximum number of iterations has been exceeded" print " Current function value: %f" % fval print " Iterations: %d" % k print " Function evaluations: %d" % func_calls print " Gradient evaluations: %d" % grad_calls else: if disp: print "Optimization terminated successfully." print " Current function value: %f" % fval print " Iterations: %d" % k print " Function evaluations: %d" % func_calls print " Gradient evaluations: %d" % grad_calls if full_output: retlist = xk, fval, gfk, Hk, func_calls, grad_calls, warnflag if retall: retlist += (allvecs,) else: retlist = xk if retall: retlist = (xk, allvecs) return retlist
b23f472c9c0898a47e5db8f9f85123aa16d59ece /local1/tlutelli/issta_data/temp/all_python//python/2006_temp/2006/12971/b23f472c9c0898a47e5db8f9f85123aa16d59ece/optimize.py
x = fmin_bfgs(rosen, x0, avegtol=1e-4, maxiter=100)
x = fmin_bfgs(rosen, x0, gtol=1e-4, maxiter=100)
def _scalarfunc(*params): params = squeeze(asarray(params)) return func(params,*args)
b23f472c9c0898a47e5db8f9f85123aa16d59ece /local1/tlutelli/issta_data/temp/all_python//python/2006_temp/2006/12971/b23f472c9c0898a47e5db8f9f85123aa16d59ece/optimize.py
def _scalarfunc(*params): params = squeeze(asarray(params)) return func(params,*args)
b23f472c9c0898a47e5db8f9f85123aa16d59ece /local1/tlutelli/issta_data/temp/all_python//python/2006_temp/2006/12971/b23f472c9c0898a47e5db8f9f85123aa16d59ece/optimize.py
return a.var()
n = len(a) return a.var()*(n/(n-1.))
def tvar(a, limits=None, inclusive=(1,1)): """Returns the sample variance of values in an array, (i.e., using N-1), ignoring values strictly outside the sequence passed to 'limits'. Note: either limit in the sequence, or the value of limits itself, can be set to None. The inclusive list/tuple determines whether the lower and upper limiting bounds (respectively) are open/exclusive (0) or closed/inclusive (1). """ a = asarray(a) a = a.astype(float).ravel() if limits is None: return a.var() am = mask_to_limits(a, limits, inclusive) return masked_var(am)
6482e3bdd39a9a78db8fc7a81920df517fce79ce /local1/tlutelli/issta_data/temp/all_python//python/2006_temp/2006/12971/6482e3bdd39a9a78db8fc7a81920df517fce79ce/stats.py
n = float(len(ravel(a)))
n = float(len(a))
def tsem(a, limits=None, inclusive=(True,True)): """Returns the standard error of the mean for the values in an array, (i.e., using N for the denominator), ignoring values strictly outside the sequence passed to 'limits'. Note: either limit in the sequence, or the value of limits itself, can be set to None. The inclusive list/tuple determines whether the lower and upper limiting bounds (respectively) are open/exclusive (0) or closed/inclusive (1). """ a = asarray(a).ravel() if limits is None: n = float(len(ravel(a))) return a.std()/sqrt(n) am = mask_to_limits(a.ravel(), limits, inclusive) sd = sqrt(masked_var(am)) return sd / am.count()
6482e3bdd39a9a78db8fc7a81920df517fce79ce /local1/tlutelli/issta_data/temp/all_python//python/2006_temp/2006/12971/6482e3bdd39a9a78db8fc7a81920df517fce79ce/stats.py
correction = np.sqrt(float(n-1) / n) return a.std(axis)/a.mean(axis) * correction
return a.std(axis)/a.mean(axis)
def variation(a, axis=0): """Computes the coefficient of variation, the ratio of the biased standard deviation to the mean. Parameters ---------- a : array axis : int or None References ---------- [CRCProbStat2000] section 2.2.20 """ a, axis = _chk_asarray(a, axis) n = a.shape[axis] correction = np.sqrt(float(n-1) / n) return a.std(axis)/a.mean(axis) * correction
6482e3bdd39a9a78db8fc7a81920df517fce79ce /local1/tlutelli/issta_data/temp/all_python//python/2006_temp/2006/12971/6482e3bdd39a9a78db8fc7a81920df517fce79ce/stats.py
rpb = (y1m - y0m)*np.sqrt(phat * (1-phat)) / (y.std() * np.sqrt((n-1)/float(n)))
rpb = (y1m - y0m)*np.sqrt(phat * (1-phat)) / y.std()
def pointbiserialr(x, y): # comment: I am changing the semantics somewhat. The original function is # fairly general and accepts an x sequence that has any type of thing in it as # along as there are only two unique items. I am going to restrict this to # a boolean array for my sanity. """Calculates a point biserial correlation coefficient and the associated p-value. The point biserial correlation is used to measure the relationship between a binary variable, x, and a continuous variable, y. Like other correlation coefficients, this one varies between -1 and +1 with 0 implying no correlation. Correlations of -1 or +1 imply a determinative relationship. Parameters ---------- x : array of bools y : array of floats Returns ------- (point-biserial r, 2-tailed p-value) References ---------- http://www.childrens-mercy.org/stats/definitions/biserial.htm """ ## Test data: http://support.sas.com/ctx/samples/index.jsp?sid=490&tab=output # x = [1,0,1,1,1,1,0,1,0,0,0,1,1,0,0,0,1,1,1,0,0,0,0,0,0,0,0,1,0,0,0,0,0,1] # y = [14.8,13.8,12.4,10.1,7.1,6.1,5.8,4.6,4.3,3.5,3.3,3.2,3.0,2.8,2.8,2.5, # 2.4,2.3,2.1,1.7,1.7,1.5,1.3,1.3,1.2,1.2,1.1,0.8,0.7,0.6,0.5,0.2,0.2, # 0.1] # rpb = 0.36149 x = np.asarray(x, dtype=bool) y = np.asarray(y, dtype=float) n = len(x) # phat is the fraction of x values that are True phat = x.sum() / float(len(x)) y0 = y[~x] # y-values where x is False y1 = y[x] # y-values where x is True y0m = y0.mean() y1m = y1.mean() rpb = (y1m - y0m)*np.sqrt(phat * (1-phat)) / (y.std() * np.sqrt((n-1)/float(n))) df = n-2 # fixme: see comment about TINY in pearsonr() TINY = 1e-20 t = rpb*np.sqrt(df/((1.0-rpb+TINY)*(1.0+rpb+TINY))) prob = betai(0.5*df, 0.5, df/(df+t*t)) return rpb, prob
6482e3bdd39a9a78db8fc7a81920df517fce79ce /local1/tlutelli/issta_data/temp/all_python//python/2006_temp/2006/12971/6482e3bdd39a9a78db8fc7a81920df517fce79ce/stats.py
def __init__(self,file_name,permission='r',format='n'): if type(file_name) == type(''): if sys.platform=='win32' and 'b' not in permission: print "Warning: Generally fopen is used for opening binary\n" + \ "files, which on this system requires attaching a 'b' \n" + \ "to the permission flag."
def __init__(self,file_name,permission='rb',format='n'): if 'b' not in permission: permission += 'b' if type(file_name) in (types.StringType, types.UnicodeType):
def __init__(self,file_name,permission='r',format='n'): if type(file_name) == type(''): if sys.platform=='win32' and 'b' not in permission: print "Warning: Generally fopen is used for opening binary\n" + \ "files, which on this system requires attaching a 'b' \n" + \ "to the permission flag." self.__dict__['fid'] = open(file_name,permission) elif 'fileno' in file_name.__methods__: # first argument is an open file self.__dict__['fid'] = file_name if format in ['native','n','default']: self.__dict__['bs'] = 0 self.__dict__['format'] = 'native' elif format in ['ieee-le','l','little-endian','le']: self.__dict__['bs'] = not LittleEndian self.__dict__['format'] = 'ieee-le' elif format in ['ieee-be','b','big-endian','be']: self.__dict__['bs'] = LittleEndian self.__dict__['format'] = 'ieee-be' else: raise ValueError, "Unrecognized format: " + format
6078c659529054b5167f615a37dce51fe41fc76c /local1/tlutelli/issta_data/temp/all_python//python/2006_temp/2006/12971/6078c659529054b5167f615a37dce51fe41fc76c/mio.py
shape = tuple(count) count = product(shape)
shape = list(count) minus_ones = shape.count(-1) if minus_ones == 0: count = product(shape) elif minus_ones == 1: now = self.fid.tell() self.fid.seek(0,2) end = self.fid.tell() self.fid.seek(now) remaining_bytes = end - now know_dimensions_size = -product(count) * getsize_type(stype)[0] unknown_dimension_size, illegal = divmod(remaining_bytes, know_dimensions_size) if illegal: raise ValueError("unknown dimension doesn't match filesize") shape[shape.index(-1)] = unknown_dimension_size count = product(shape) else: raise ValueError( "illegal count; can only specify one unknown dimension") shape = tuple(shape)
def read(self,count,stype,rtype=None,bs=None): """Read data from file and return it in a Numeric array.
6078c659529054b5167f615a37dce51fe41fc76c /local1/tlutelli/issta_data/temp/all_python//python/2006_temp/2006/12971/6078c659529054b5167f615a37dce51fe41fc76c/mio.py
if type(fmt) == type(''):
if type(fmt) in (types.StringType, types.UnicodeType):
def fort_write(self,fmt,*args): """Write a Fortran binary record.
6078c659529054b5167f615a37dce51fe41fc76c /local1/tlutelli/issta_data/temp/all_python//python/2006_temp/2006/12971/6078c659529054b5167f615a37dce51fe41fc76c/mio.py
fid = open(test_name,'r')
fid = open(test_name,'rb')
def loadmat(name, dict=None, appendmat=1): """Load the MATLAB mat file saved in level 1.0 format. If name is a full path name load it in. Otherwise search for the file on the sys.path list and load the first one found (the current directory is searched first). Only Level 1.0 MAT files are supported so far. Inputs: name -- name of the mat file (don't need .mat extension) dict -- the dictionary to insert into. If none the variables will be returned in a dictionary. appendmat -- non-zero to append the .mat extension to the end of the given filename. Outputs: If dict is None, then a dictionary of names and objects representing the stored arrays is returned. """ if appendmat and name[-4:] == ".mat": name = name[:-4] if os.sep in name: full_name = name if appendmat: full_name = name + ".mat" else: full_name = None junk,name = os.path.split(name) for path in sys.path: test_name = os.path.join(path,name) if appendmat: test_name += ".mat" try: fid = open(test_name,'r') fid.close() full_name = test_name except IOError: pass if full_name is None: raise IOError, "%s not found on the path." % name permis = 'r' if sys.platform=='win32': permis = 'rb' fid = fopen(full_name,permis) test_vals = fid.fread(4,'byte') if not (0 in test_vals): fid.close() raise ValueError, "Version 5.0 file format not supported." testtype = struct.unpack('i',test_vals.tostring()) # Check to see if the number is positive and less than 5000. if testtype[0] < 0 or testtype[0] > 4999: # wrong byte-order if LittleEndian: format = 'ieee-be' else: format = 'ieee-le' else: # otherwise we are O.K. if LittleEndian: format = 'ieee-le' else: format = 'ieee-be' fid.close() fid = fopen(full_name, permis, format) length = fid.size() fid.rewind() # back to the begining defnames = [] thisdict = {} while 1: if (fid.tell() == length): break header = fid.fread(5,'int') if len(header) != 5: fid.close() print "Warning: Read error in file." break M,rest = divmod(header[0],1000) O,rest = divmod(rest,100) P,rest = divmod(rest,10) T = rest if (M > 1): fid.close() raise ValueError, "Unsupported binary format." if (O != 0): fid.close() raise ValuError, "Hundreds digit of first integer should be zero." if (P == 4): fid.close() raise ValueError, "No support for 16-bit unsigned integers." if (T not in [0,1]): fid.close() raise ValueError, "Cannot handle sparse matrices, yet." storage = {0:'d',1:'f',2:'i',3:'s',5:'b'}[P] varname = fid.fread(header[-1],'char')[:-1] varname = varname.tostring() defnames.append(varname) numels = header[1]*header[2] if T == 0: # Text data data = r1array(fid.fread(numels,storage)) if header[3]: # imaginary data data2 = fid.fread(numels,storage) if data.typecode() == 'f' and data2.typecode() == 'f': new = zeros(data.shape,'F') new.real = data new.imag = data2 data = new del(new) del(data2) if len(data) > 1: data.shape = (header[2], header[1]) thisdict[varname] = transpose(squeeze(data)) else: thisdict[varname] = data else: data = r1array(fid.fread(numels,storage,'char')) if len(data) > 1: data.shape = (header[2], header[1]) thisdict[varname] = transpose(squeeze(data)) else: thisdict[varname] = data fid.close() if dict is not None: print "Names defined = ", defnames dict.update(thisdict) else: return thisdict
6078c659529054b5167f615a37dce51fe41fc76c /local1/tlutelli/issta_data/temp/all_python//python/2006_temp/2006/12971/6078c659529054b5167f615a37dce51fe41fc76c/mio.py
permis = 'r' if sys.platform=='win32': permis = 'rb' fid = fopen(full_name,permis)
fid = fopen(full_name,'rb')
def loadmat(name, dict=None, appendmat=1): """Load the MATLAB mat file saved in level 1.0 format. If name is a full path name load it in. Otherwise search for the file on the sys.path list and load the first one found (the current directory is searched first). Only Level 1.0 MAT files are supported so far. Inputs: name -- name of the mat file (don't need .mat extension) dict -- the dictionary to insert into. If none the variables will be returned in a dictionary. appendmat -- non-zero to append the .mat extension to the end of the given filename. Outputs: If dict is None, then a dictionary of names and objects representing the stored arrays is returned. """ if appendmat and name[-4:] == ".mat": name = name[:-4] if os.sep in name: full_name = name if appendmat: full_name = name + ".mat" else: full_name = None junk,name = os.path.split(name) for path in sys.path: test_name = os.path.join(path,name) if appendmat: test_name += ".mat" try: fid = open(test_name,'r') fid.close() full_name = test_name except IOError: pass if full_name is None: raise IOError, "%s not found on the path." % name permis = 'r' if sys.platform=='win32': permis = 'rb' fid = fopen(full_name,permis) test_vals = fid.fread(4,'byte') if not (0 in test_vals): fid.close() raise ValueError, "Version 5.0 file format not supported." testtype = struct.unpack('i',test_vals.tostring()) # Check to see if the number is positive and less than 5000. if testtype[0] < 0 or testtype[0] > 4999: # wrong byte-order if LittleEndian: format = 'ieee-be' else: format = 'ieee-le' else: # otherwise we are O.K. if LittleEndian: format = 'ieee-le' else: format = 'ieee-be' fid.close() fid = fopen(full_name, permis, format) length = fid.size() fid.rewind() # back to the begining defnames = [] thisdict = {} while 1: if (fid.tell() == length): break header = fid.fread(5,'int') if len(header) != 5: fid.close() print "Warning: Read error in file." break M,rest = divmod(header[0],1000) O,rest = divmod(rest,100) P,rest = divmod(rest,10) T = rest if (M > 1): fid.close() raise ValueError, "Unsupported binary format." if (O != 0): fid.close() raise ValuError, "Hundreds digit of first integer should be zero." if (P == 4): fid.close() raise ValueError, "No support for 16-bit unsigned integers." if (T not in [0,1]): fid.close() raise ValueError, "Cannot handle sparse matrices, yet." storage = {0:'d',1:'f',2:'i',3:'s',5:'b'}[P] varname = fid.fread(header[-1],'char')[:-1] varname = varname.tostring() defnames.append(varname) numels = header[1]*header[2] if T == 0: # Text data data = r1array(fid.fread(numels,storage)) if header[3]: # imaginary data data2 = fid.fread(numels,storage) if data.typecode() == 'f' and data2.typecode() == 'f': new = zeros(data.shape,'F') new.real = data new.imag = data2 data = new del(new) del(data2) if len(data) > 1: data.shape = (header[2], header[1]) thisdict[varname] = transpose(squeeze(data)) else: thisdict[varname] = data else: data = r1array(fid.fread(numels,storage,'char')) if len(data) > 1: data.shape = (header[2], header[1]) thisdict[varname] = transpose(squeeze(data)) else: thisdict[varname] = data fid.close() if dict is not None: print "Names defined = ", defnames dict.update(thisdict) else: return thisdict
6078c659529054b5167f615a37dce51fe41fc76c /local1/tlutelli/issta_data/temp/all_python//python/2006_temp/2006/12971/6078c659529054b5167f615a37dce51fe41fc76c/mio.py
elif self.try_link("
elif self.try_link("
fpedef = "-DFPU_HPUX"
ac6a0577676678ec29e46db4c128bc775fbaa1fd /local1/tlutelli/issta_data/temp/all_python//python/2006_temp/2006/12971/ac6a0577676678ec29e46db4c128bc775fbaa1fd/config_pygist.py
winnum = xplt.window()
winnum = gist.window()
def plot(x,*args,**keywds): """Plot curves. Description: Plot one or more curves on the same graph. Inputs: There can be a variable number of inputs which consist of pairs or triples. The second variable is plotted against the first using the linetype specified by the optional third variable in the triple. If only two plots are being compared, the x-axis does not have to be repeated. """ try: override = 1 savesys = gist.plsys(2) gist.plsys(savesys) except: override = 0 global _hold if "hold" in keywds.keys(): _hold = keywds['hold'] if _hold or override: pass else: gist.fma() gist.animate(0) winnum = xplt.window() if winnum < 0: xplt.window(0) nargs = len(args) if nargs == 0: y = x x = Numeric.arange(0,len(y)) if scipy.array_iscomplex(y): print "Warning: complex data plotting real part." y = y.real y = where(scipy.isfinite(y),y,0) gist.plg(y,x,type='solid',color='blue',marks=0) return y = args[0] argpos = 1 nowplotting = 0 clear_global_linetype() while 1: try: thearg = args[argpos] except IndexError: thearg = 0 thetype,thecolor,themarker,tomark = _parse_type_arg(thearg,nowplotting) if themarker == 'Z': # args[argpos] was data or non-existent. pass append_global_linetype(_rtypes[thetype]+_rcolors[thecolor]) else: # args[argpos] was a string argpos = argpos + 1 if tomark: append_global_linetype(_rtypes[thetype]+_rcolors[thecolor]+_rmarkers[themarker]) else: append_global_linetype(_rtypes[thetype]+_rcolors[thecolor]) if scipy.array_iscomplex(x) or scipy.array_iscomplex(y): print "Warning: complex data provided, using only real part." x = scipy.real(x) y = scipy.real(y) y = where(scipy.isfinite(y),y,0) gist.plg(y,x,type=thetype,color=thecolor,marker=themarker,marks=tomark) nowplotting = nowplotting + 1 ## Argpos is pointing to the next potential triple of data. ## Now one of four things can happen: ## ## 1: argpos points to data, argpos+1 is a string ## 2: argpos points to data, end ## 3: argpos points to data, argpos+1 is data ## 4: argpos points to data, argpos+1 is data, argpos+2 is a string if argpos >= nargs: break # no more data if argpos == nargs-1: # this is a single data value. x = x y = args[argpos] argpos = argpos+1 elif type(args[argpos+1]) is types.StringType: x = x y = args[argpos] argpos = argpos+1 else: # 3 x = args[argpos] y = args[argpos+1] argpos = argpos+2 return
0b4c869b980175673fc3b21c39aa0653fdeae46b /local1/tlutelli/issta_data/temp/all_python//python/2006_temp/2006/12971/0b4c869b980175673fc3b21c39aa0653fdeae46b/Mplot.py
xplt.window(0)
gist.window(0)
def plot(x,*args,**keywds): """Plot curves. Description: Plot one or more curves on the same graph. Inputs: There can be a variable number of inputs which consist of pairs or triples. The second variable is plotted against the first using the linetype specified by the optional third variable in the triple. If only two plots are being compared, the x-axis does not have to be repeated. """ try: override = 1 savesys = gist.plsys(2) gist.plsys(savesys) except: override = 0 global _hold if "hold" in keywds.keys(): _hold = keywds['hold'] if _hold or override: pass else: gist.fma() gist.animate(0) winnum = xplt.window() if winnum < 0: xplt.window(0) nargs = len(args) if nargs == 0: y = x x = Numeric.arange(0,len(y)) if scipy.array_iscomplex(y): print "Warning: complex data plotting real part." y = y.real y = where(scipy.isfinite(y),y,0) gist.plg(y,x,type='solid',color='blue',marks=0) return y = args[0] argpos = 1 nowplotting = 0 clear_global_linetype() while 1: try: thearg = args[argpos] except IndexError: thearg = 0 thetype,thecolor,themarker,tomark = _parse_type_arg(thearg,nowplotting) if themarker == 'Z': # args[argpos] was data or non-existent. pass append_global_linetype(_rtypes[thetype]+_rcolors[thecolor]) else: # args[argpos] was a string argpos = argpos + 1 if tomark: append_global_linetype(_rtypes[thetype]+_rcolors[thecolor]+_rmarkers[themarker]) else: append_global_linetype(_rtypes[thetype]+_rcolors[thecolor]) if scipy.array_iscomplex(x) or scipy.array_iscomplex(y): print "Warning: complex data provided, using only real part." x = scipy.real(x) y = scipy.real(y) y = where(scipy.isfinite(y),y,0) gist.plg(y,x,type=thetype,color=thecolor,marker=themarker,marks=tomark) nowplotting = nowplotting + 1 ## Argpos is pointing to the next potential triple of data. ## Now one of four things can happen: ## ## 1: argpos points to data, argpos+1 is a string ## 2: argpos points to data, end ## 3: argpos points to data, argpos+1 is data ## 4: argpos points to data, argpos+1 is data, argpos+2 is a string if argpos >= nargs: break # no more data if argpos == nargs-1: # this is a single data value. x = x y = args[argpos] argpos = argpos+1 elif type(args[argpos+1]) is types.StringType: x = x y = args[argpos] argpos = argpos+1 else: # 3 x = args[argpos] y = args[argpos+1] argpos = argpos+2 return
0b4c869b980175673fc3b21c39aa0653fdeae46b /local1/tlutelli/issta_data/temp/all_python//python/2006_temp/2006/12971/0b4c869b980175673fc3b21c39aa0653fdeae46b/Mplot.py
def setdiag(self, values, k=0): """Fills the diagonal elements {a_ii} with the values from the given sequence. If k != 0, fills the off-diagonal elements {a_{i,i+k}} instead. """ M, N = self.shape if len(values) > min(M, N+k): raise ValueError, "sequence of target values is too long" for i, v in enumerate(values): self[i, i+k] = v return
def mean(self, axis=None): """Average the matrix over the given axis. If the axis is None, average over both rows and columns, returning a scalar. """ if axis==0: mean = self.sum(0) mean *= 1.0 / self.shape[0] return mean elif axis==1: mean = self.sum(1) mean *= 1.0 / self.shape[1] return mean elif axis is None: return self.sum(None) * 1.0 / (self.shape[0]*self.shape[1]) else: raise ValueError, "axis out of bounds"
ea0c6885c65a199b29ef216ee91e52a46b3ef770 /local1/tlutelli/issta_data/temp/all_python//python/2006_temp/2006/12971/ea0c6885c65a199b29ef216ee91e52a46b3ef770/sparse.py
def setdiag(self, values, k=0): M, N = self.shape assert len(values) >= max(M, N) for i in xrange(min(M, N-k)): self[i, i+k] = values[i] return
def setdiag(self, values, k=0): M, N = self.shape assert len(values) >= max(M, N) for i in xrange(min(M, N-k)): self[i, i+k] = values[i] return
ea0c6885c65a199b29ef216ee91e52a46b3ef770 /local1/tlutelli/issta_data/temp/all_python//python/2006_temp/2006/12971/ea0c6885c65a199b29ef216ee91e52a46b3ef770/sparse.py
self.data[i] = [x for item in seq]
self.data[i] = [x for item in seq]
def __setitem__(self, index, x): try: assert len(index) == 2 except (AssertionError, TypeError): raise IndexError, "invalid index" i, j = index if isinstance(i, int): if not (i>=0 and i<self.shape[0]): raise IndexError, "lil_matrix index out of range" else: if isinstance(i, slice): seq = xrange(i.start or 0, i.stop or self.shape[1], i.step or 1) elif operator.isSequenceType(i): seq = i else: raise IndexError, "invalid index" try: if not len(x) == len(seq): raise ValueError, "number of elements in source must be" \ " same as number of elements in destimation" except TypeError: # Either x or seq is not a sequence. Note that a sparse matrix # is also not a sequence under this definition. # Currently we don't support setting to/from non-sequence types. # This could be enhanced, though, to allow a scalar source, # and/or a sparse vector. raise TypeError, "unsupported type for lil_matrix.__setitem__" else: # Sequence: call __setitem__ recursively, once for each row for i in xrange(len(seq)): self[seq[i], index[1]] = x[i] return
ea0c6885c65a199b29ef216ee91e52a46b3ef770 /local1/tlutelli/issta_data/temp/all_python//python/2006_temp/2006/12971/ea0c6885c65a199b29ef216ee91e52a46b3ef770/sparse.py
if isdense(x):
if isinstance(x, lil_matrix): if x.shape == (1, self.shape[1]): self.rows[i] = x.rows[0] self.data[i] = x.data[0] elif x.shape == (1, len(seq)): for k, col in enumerate(seq): self[i, col] = x[0, k] else: raise ValueError, "source and destination must have" \ " the same shape" return elif isinstance(x, csr_matrix): if x.shape != (1, self.shape[1]): raise ValueError, "sparse matrix source must be (1 x n)" self.rows[i] = x.colind.tolist() self.data[i] = x.data.tolist() else:
def __setitem__(self, index, x): try: assert len(index) == 2 except (AssertionError, TypeError): raise IndexError, "invalid index" i, j = index if isinstance(i, int): if not (i>=0 and i<self.shape[0]): raise IndexError, "lil_matrix index out of range" else: if isinstance(i, slice): seq = xrange(i.start or 0, i.stop or self.shape[1], i.step or 1) elif operator.isSequenceType(i): seq = i else: raise IndexError, "invalid index" try: if not len(x) == len(seq): raise ValueError, "number of elements in source must be" \ " same as number of elements in destimation" except TypeError: # Either x or seq is not a sequence. Note that a sparse matrix # is also not a sequence under this definition. # Currently we don't support setting to/from non-sequence types. # This could be enhanced, though, to allow a scalar source, # and/or a sparse vector. raise TypeError, "unsupported type for lil_matrix.__setitem__" else: # Sequence: call __setitem__ recursively, once for each row for i in xrange(len(seq)): self[seq[i], index[1]] = x[i] return
ea0c6885c65a199b29ef216ee91e52a46b3ef770 /local1/tlutelli/issta_data/temp/all_python//python/2006_temp/2006/12971/ea0c6885c65a199b29ef216ee91e52a46b3ef770/sparse.py
x = asarray(x).squeeze()
try: x = asarray(x).squeeze() except Error, e: raise TypeError, "unsupported type for" \ " lil_matrix.__setitem__"
def __setitem__(self, index, x): try: assert len(index) == 2 except (AssertionError, TypeError): raise IndexError, "invalid index" i, j = index if isinstance(i, int): if not (i>=0 and i<self.shape[0]): raise IndexError, "lil_matrix index out of range" else: if isinstance(i, slice): seq = xrange(i.start or 0, i.stop or self.shape[1], i.step or 1) elif operator.isSequenceType(i): seq = i else: raise IndexError, "invalid index" try: if not len(x) == len(seq): raise ValueError, "number of elements in source must be" \ " same as number of elements in destimation" except TypeError: # Either x or seq is not a sequence. Note that a sparse matrix # is also not a sequence under this definition. # Currently we don't support setting to/from non-sequence types. # This could be enhanced, though, to allow a scalar source, # and/or a sparse vector. raise TypeError, "unsupported type for lil_matrix.__setitem__" else: # Sequence: call __setitem__ recursively, once for each row for i in xrange(len(seq)): self[seq[i], index[1]] = x[i] return
ea0c6885c65a199b29ef216ee91e52a46b3ef770 /local1/tlutelli/issta_data/temp/all_python//python/2006_temp/2006/12971/ea0c6885c65a199b29ef216ee91e52a46b3ef770/sparse.py
def __setitem__(self, index, x): try: assert len(index) == 2 except (AssertionError, TypeError): raise IndexError, "invalid index" i, j = index if isinstance(i, int): if not (i>=0 and i<self.shape[0]): raise IndexError, "lil_matrix index out of range" else: if isinstance(i, slice): seq = xrange(i.start or 0, i.stop or self.shape[1], i.step or 1) elif operator.isSequenceType(i): seq = i else: raise IndexError, "invalid index" try: if not len(x) == len(seq): raise ValueError, "number of elements in source must be" \ " same as number of elements in destimation" except TypeError: # Either x or seq is not a sequence. Note that a sparse matrix # is also not a sequence under this definition. # Currently we don't support setting to/from non-sequence types. # This could be enhanced, though, to allow a scalar source, # and/or a sparse vector. raise TypeError, "unsupported type for lil_matrix.__setitem__" else: # Sequence: call __setitem__ recursively, once for each row for i in xrange(len(seq)): self[seq[i], index[1]] = x[i] return
ea0c6885c65a199b29ef216ee91e52a46b3ef770 /local1/tlutelli/issta_data/temp/all_python//python/2006_temp/2006/12971/ea0c6885c65a199b29ef216ee91e52a46b3ef770/sparse.py
elif isinstance(x, lil_matrix): if x.shape != (1, self.shape[1]): raise ValueError, "sparse matrix source must be (1 x n)" self.rows[i] = x.rows[0] self.data[i] = x.data[0] elif isinstance(x, csr_matrix): if x.shape != (1, self.shape[1]): raise ValueError, "sparse matrix source must be (1 x n)" self.rows[i] = x.colind.tolist() self.data[i] = x.data.tolist() else: raise TypeError, "unsupported type for" \ " lil_matrix.__setitem__"
def __setitem__(self, index, x): try: assert len(index) == 2 except (AssertionError, TypeError): raise IndexError, "invalid index" i, j = index if isinstance(i, int): if not (i>=0 and i<self.shape[0]): raise IndexError, "lil_matrix index out of range" else: if isinstance(i, slice): seq = xrange(i.start or 0, i.stop or self.shape[1], i.step or 1) elif operator.isSequenceType(i): seq = i else: raise IndexError, "invalid index" try: if not len(x) == len(seq): raise ValueError, "number of elements in source must be" \ " same as number of elements in destimation" except TypeError: # Either x or seq is not a sequence. Note that a sparse matrix # is also not a sequence under this definition. # Currently we don't support setting to/from non-sequence types. # This could be enhanced, though, to allow a scalar source, # and/or a sparse vector. raise TypeError, "unsupported type for lil_matrix.__setitem__" else: # Sequence: call __setitem__ recursively, once for each row for i in xrange(len(seq)): self[seq[i], index[1]] = x[i] return
ea0c6885c65a199b29ef216ee91e52a46b3ef770 /local1/tlutelli/issta_data/temp/all_python//python/2006_temp/2006/12971/ea0c6885c65a199b29ef216ee91e52a46b3ef770/sparse.py
return gssv(N, lastel, data, index0, index1, b, csc, permc_spec)[0]
print "data-ftype: %s compared to data %s" % (ftype, data.dtype.char) print "Calling _superlu.%sgssv" % ftype return gssv(N, lastel, data, index0, index1, b, csc, permc_spec)[0]
def solve(A, b, permc_spec=2): if not hasattr(A, 'tocsr') and not hasattr(A, 'tocsc'): raise ValueError, "sparse matrix must be able to return CSC format--"\ "A.tocsc()--or CSR format--A.tocsr()" if not hasattr(A, 'shape'): raise ValueError, "sparse matrix must be able to return shape" \ " (rows, cols) = A.shape" M, N = A.shape if (M != N): raise ValueError, "matrix must be square" if isUmfpack and useUmfpack: mat = _toCS_umfpack( A ) if mat.dtype.char not in 'dD': raise ValueError, "convert matrix data to double, please, using"\ " .astype(), or set sparse.useUmfpack = False" family = {'d' : 'di', 'D' : 'zi'} umf = umfpack.UmfpackContext( family[mat.dtype.char] ) return umf.linsolve( umfpack.UMFPACK_A, mat, b, autoTranspose = True ) else: mat, csc = _toCS_superLU( A ) ftype, lastel, data, index0, index1 = \ mat.ftype, mat.nnz, mat.data, mat.rowind, mat.indptr gssv = eval('_superlu.' + ftype + 'gssv') return gssv(N, lastel, data, index0, index1, b, csc, permc_spec)[0]
b31b66aa3bb672fb49716ef246940563bd20afa0 /local1/tlutelli/issta_data/temp/all_python//python/2006_temp/2006/12971/b31b66aa3bb672fb49716ef246940563bd20afa0/sparse.py
print "Adding a constant:" c += 5 print c
def _testme(): a = csc_matrix((arange(1, 9), numpy.transpose([[0, 1, 1, 2, 2, 3, 3, 4], [0, 1, 3, 0, 2, 3, 4, 4]]))) print "Representation of a matrix:" print repr(a) print "How a matrix prints:" print a print "Adding two matrices:" b = a+a print b print "Subtracting two matrices:" c = b - a print c print "Multiplying a sparse matrix by a dense vector:" d = a*[1, 2, 3, 4, 5] print d print [1, 2, 3, 4, 5]*a print "Inverting a sparse linear system:" print "The sparse matrix (constructed from diagonals):" a = spdiags([[1, 2, 3, 4, 5], [6, 5, 8, 9, 10]], [0, 1], 5, 5) b = numpy.array([1, 2, 3, 4, 5]) print "Solve: single precision complex:" useUmfpack = False a = a.astype('F')
b31b66aa3bb672fb49716ef246940563bd20afa0 /local1/tlutelli/issta_data/temp/all_python//python/2006_temp/2006/12971/b31b66aa3bb672fb49716ef246940563bd20afa0/sparse.py
bprime[j] = val
bprime[j] = real(val)
def bilinear(b,a,fs=1.0): """Return a digital filter from an analog filter using the bilinear transform. The bilinear transform substitutes (z-1) / (z+1) for s """ fs =float(fs) a,b = map(atleast_1d,(a,b)) D = len(a) - 1 N = len(b) - 1 artype = Num.Float M = max([N,D]) Np = M Dp = M bprime = Num.zeros(Np+1,artype) aprime = Num.zeros(Dp+1,artype) for j in range(Np+1): val = 0.0 for i in range(N+1): for k in range(i+1): for l in range(M-i+1): if k+l == j: val += comb(i,k)*comb(M-i,l)*b[N-i]*pow(2*fs,i)*(-1)**k bprime[j] = val for j in range(Dp+1): val = 0.0 for i in range(D+1): for k in range(i+1): for l in range(M-i+1): if k+l == j: val += comb(i,k)*comb(M-i,l)*a[D-i]*pow(2*fs,i)*(-1)**k aprime[j] = val return normalize(bprime, aprime)
1fe1ff906d3ac15b6463cc44ffca19662075e1ca /local1/tlutelli/issta_data/temp/all_python//python/2006_temp/2006/12971/1fe1ff906d3ac15b6463cc44ffca19662075e1ca/filter_design.py
aprime[j] = val
aprime[j] = real(val)
def bilinear(b,a,fs=1.0): """Return a digital filter from an analog filter using the bilinear transform. The bilinear transform substitutes (z-1) / (z+1) for s """ fs =float(fs) a,b = map(atleast_1d,(a,b)) D = len(a) - 1 N = len(b) - 1 artype = Num.Float M = max([N,D]) Np = M Dp = M bprime = Num.zeros(Np+1,artype) aprime = Num.zeros(Dp+1,artype) for j in range(Np+1): val = 0.0 for i in range(N+1): for k in range(i+1): for l in range(M-i+1): if k+l == j: val += comb(i,k)*comb(M-i,l)*b[N-i]*pow(2*fs,i)*(-1)**k bprime[j] = val for j in range(Dp+1): val = 0.0 for i in range(D+1): for k in range(i+1): for l in range(M-i+1): if k+l == j: val += comb(i,k)*comb(M-i,l)*a[D-i]*pow(2*fs,i)*(-1)**k aprime[j] = val return normalize(bprime, aprime)
1fe1ff906d3ac15b6463cc44ffca19662075e1ca /local1/tlutelli/issta_data/temp/all_python//python/2006_temp/2006/12971/1fe1ff906d3ac15b6463cc44ffca19662075e1ca/filter_design.py
x,y = asarray(x, y)
x = asarray(x) y = asarray(y)
def mannwhitneyu(x,y): """
466a3af4da1d2addb334dcd92710821d3bec4fd5 /local1/tlutelli/issta_data/temp/all_python//python/2006_temp/2006/12971/466a3af4da1d2addb334dcd92710821d3bec4fd5/stats.py
result = squeeze(transpose(reshape(result,dims[::-1])))
result = squeeze(transpose(reshape(result,tupdims)))
def _parse_mimatrix(fid,bytes): dclass, cmplx, nzmax =_parse_array_flags(fid) dims = _get_element(fid)[0] name = ''.join(asarray(_get_element(fid)[0]).astype('c')) if dclass in mxArrays: result, unused =_get_element(fid) if type == mxCHAR_CLASS: result = ''.join(asarray(result).astype('c')) else: if cmplx: imag, unused =_get_element(fid) result = result + cast[imag.typecode()](1j) * imag result = squeeze(transpose(reshape(result,dims[::-1]))) elif dclass == mxCELL_CLASS: length = product(dims) result = zeros(length, PyObject) for i in range(length): sa, unused = _get_element(fid) result[i]= sa result = squeeze(transpose(reshape(result,dims[::-1]))) if rank(result)==0: result = result.toscalar() elif dclass == mxSTRUCT_CLASS: length = product(dims) result = zeros(length, PyObject) namelength = _get_element(fid)[0] # get field names names = _get_element(fid)[0] splitnames = [names[i:i+namelength] for i in \ xrange(0,len(names),namelength)] fieldnames = [''.join(asarray(x).astype('c')).strip('\x00') for x in splitnames] for i in range(length): result[i] = mat_struct() for element in fieldnames: val,unused = _get_element(fid) result[i].__dict__[element] = val result = squeeze(transpose(reshape(result,dims[::-1]))) if rank(result)==0: result = result.toscalar() # object is like a structure with but with a class name elif dclass == mxOBJECT_CLASS: class_name = ''.join(asarray(_get_element(fid)[0]).astype('c')) length = product(dims) result = zeros(length, PyObject) namelength = _get_element(fid)[0] # get field names names = _get_element(fid)[0] splitnames = [names[i:i+namelength] for i in \ xrange(0,len(names),namelength)] fieldnames = [''.join(asarray(x).astype('c')).strip('\x00') for x in splitnames] for i in range(length): result[i] = mat_obj() result[i]._classname = class_name for element in fieldnames: val,unused = _get_element(fid) result[i].__dict__[element] = val result = squeeze(transpose(reshape(result,dims[::-1]))) if rank(result)==0: result = result.toscalar() elif dclass == mxSPARSE_CLASS: rowind, unused = _get_element(fid) colind, unused = _get_element(fid) res, unused = _get_element(fid) if cmplx: imag, unused = _get_element(fid) res = res + cast[imag.typecode()](1j)*imag if have_sparse: spmat = scipy.sparse.csc_matrix(res, (rowind[:len(res)], colind), M=dims[0],N=dims[1]) result = spmat else: result = (dims, rowind, colind, res) return result, name
5e019f26834d259e6bbeec69c1511aad635cced1 /local1/tlutelli/issta_data/temp/all_python//python/2006_temp/2006/12971/5e019f26834d259e6bbeec69c1511aad635cced1/mio.py
result = squeeze(transpose(reshape(result,dims[::-1])))
result = squeeze(transpose(reshape(result,tupdims)))
def _parse_mimatrix(fid,bytes): dclass, cmplx, nzmax =_parse_array_flags(fid) dims = _get_element(fid)[0] name = ''.join(asarray(_get_element(fid)[0]).astype('c')) if dclass in mxArrays: result, unused =_get_element(fid) if type == mxCHAR_CLASS: result = ''.join(asarray(result).astype('c')) else: if cmplx: imag, unused =_get_element(fid) result = result + cast[imag.typecode()](1j) * imag result = squeeze(transpose(reshape(result,dims[::-1]))) elif dclass == mxCELL_CLASS: length = product(dims) result = zeros(length, PyObject) for i in range(length): sa, unused = _get_element(fid) result[i]= sa result = squeeze(transpose(reshape(result,dims[::-1]))) if rank(result)==0: result = result.toscalar() elif dclass == mxSTRUCT_CLASS: length = product(dims) result = zeros(length, PyObject) namelength = _get_element(fid)[0] # get field names names = _get_element(fid)[0] splitnames = [names[i:i+namelength] for i in \ xrange(0,len(names),namelength)] fieldnames = [''.join(asarray(x).astype('c')).strip('\x00') for x in splitnames] for i in range(length): result[i] = mat_struct() for element in fieldnames: val,unused = _get_element(fid) result[i].__dict__[element] = val result = squeeze(transpose(reshape(result,dims[::-1]))) if rank(result)==0: result = result.toscalar() # object is like a structure with but with a class name elif dclass == mxOBJECT_CLASS: class_name = ''.join(asarray(_get_element(fid)[0]).astype('c')) length = product(dims) result = zeros(length, PyObject) namelength = _get_element(fid)[0] # get field names names = _get_element(fid)[0] splitnames = [names[i:i+namelength] for i in \ xrange(0,len(names),namelength)] fieldnames = [''.join(asarray(x).astype('c')).strip('\x00') for x in splitnames] for i in range(length): result[i] = mat_obj() result[i]._classname = class_name for element in fieldnames: val,unused = _get_element(fid) result[i].__dict__[element] = val result = squeeze(transpose(reshape(result,dims[::-1]))) if rank(result)==0: result = result.toscalar() elif dclass == mxSPARSE_CLASS: rowind, unused = _get_element(fid) colind, unused = _get_element(fid) res, unused = _get_element(fid) if cmplx: imag, unused = _get_element(fid) res = res + cast[imag.typecode()](1j)*imag if have_sparse: spmat = scipy.sparse.csc_matrix(res, (rowind[:len(res)], colind), M=dims[0],N=dims[1]) result = spmat else: result = (dims, rowind, colind, res) return result, name
5e019f26834d259e6bbeec69c1511aad635cced1 /local1/tlutelli/issta_data/temp/all_python//python/2006_temp/2006/12971/5e019f26834d259e6bbeec69c1511aad635cced1/mio.py
result = squeeze(transpose(reshape(result,dims[::-1])))
result = squeeze(transpose(reshape(result,tupdims)))
def _parse_mimatrix(fid,bytes): dclass, cmplx, nzmax =_parse_array_flags(fid) dims = _get_element(fid)[0] name = ''.join(asarray(_get_element(fid)[0]).astype('c')) if dclass in mxArrays: result, unused =_get_element(fid) if type == mxCHAR_CLASS: result = ''.join(asarray(result).astype('c')) else: if cmplx: imag, unused =_get_element(fid) result = result + cast[imag.typecode()](1j) * imag result = squeeze(transpose(reshape(result,dims[::-1]))) elif dclass == mxCELL_CLASS: length = product(dims) result = zeros(length, PyObject) for i in range(length): sa, unused = _get_element(fid) result[i]= sa result = squeeze(transpose(reshape(result,dims[::-1]))) if rank(result)==0: result = result.toscalar() elif dclass == mxSTRUCT_CLASS: length = product(dims) result = zeros(length, PyObject) namelength = _get_element(fid)[0] # get field names names = _get_element(fid)[0] splitnames = [names[i:i+namelength] for i in \ xrange(0,len(names),namelength)] fieldnames = [''.join(asarray(x).astype('c')).strip('\x00') for x in splitnames] for i in range(length): result[i] = mat_struct() for element in fieldnames: val,unused = _get_element(fid) result[i].__dict__[element] = val result = squeeze(transpose(reshape(result,dims[::-1]))) if rank(result)==0: result = result.toscalar() # object is like a structure with but with a class name elif dclass == mxOBJECT_CLASS: class_name = ''.join(asarray(_get_element(fid)[0]).astype('c')) length = product(dims) result = zeros(length, PyObject) namelength = _get_element(fid)[0] # get field names names = _get_element(fid)[0] splitnames = [names[i:i+namelength] for i in \ xrange(0,len(names),namelength)] fieldnames = [''.join(asarray(x).astype('c')).strip('\x00') for x in splitnames] for i in range(length): result[i] = mat_obj() result[i]._classname = class_name for element in fieldnames: val,unused = _get_element(fid) result[i].__dict__[element] = val result = squeeze(transpose(reshape(result,dims[::-1]))) if rank(result)==0: result = result.toscalar() elif dclass == mxSPARSE_CLASS: rowind, unused = _get_element(fid) colind, unused = _get_element(fid) res, unused = _get_element(fid) if cmplx: imag, unused = _get_element(fid) res = res + cast[imag.typecode()](1j)*imag if have_sparse: spmat = scipy.sparse.csc_matrix(res, (rowind[:len(res)], colind), M=dims[0],N=dims[1]) result = spmat else: result = (dims, rowind, colind, res) return result, name
5e019f26834d259e6bbeec69c1511aad635cced1 /local1/tlutelli/issta_data/temp/all_python//python/2006_temp/2006/12971/5e019f26834d259e6bbeec69c1511aad635cced1/mio.py
result = squeeze(transpose(reshape(result,dims[::-1])))
result = squeeze(transpose(reshape(result,tupdims)))
def _parse_mimatrix(fid,bytes): dclass, cmplx, nzmax =_parse_array_flags(fid) dims = _get_element(fid)[0] name = ''.join(asarray(_get_element(fid)[0]).astype('c')) if dclass in mxArrays: result, unused =_get_element(fid) if type == mxCHAR_CLASS: result = ''.join(asarray(result).astype('c')) else: if cmplx: imag, unused =_get_element(fid) result = result + cast[imag.typecode()](1j) * imag result = squeeze(transpose(reshape(result,dims[::-1]))) elif dclass == mxCELL_CLASS: length = product(dims) result = zeros(length, PyObject) for i in range(length): sa, unused = _get_element(fid) result[i]= sa result = squeeze(transpose(reshape(result,dims[::-1]))) if rank(result)==0: result = result.toscalar() elif dclass == mxSTRUCT_CLASS: length = product(dims) result = zeros(length, PyObject) namelength = _get_element(fid)[0] # get field names names = _get_element(fid)[0] splitnames = [names[i:i+namelength] for i in \ xrange(0,len(names),namelength)] fieldnames = [''.join(asarray(x).astype('c')).strip('\x00') for x in splitnames] for i in range(length): result[i] = mat_struct() for element in fieldnames: val,unused = _get_element(fid) result[i].__dict__[element] = val result = squeeze(transpose(reshape(result,dims[::-1]))) if rank(result)==0: result = result.toscalar() # object is like a structure with but with a class name elif dclass == mxOBJECT_CLASS: class_name = ''.join(asarray(_get_element(fid)[0]).astype('c')) length = product(dims) result = zeros(length, PyObject) namelength = _get_element(fid)[0] # get field names names = _get_element(fid)[0] splitnames = [names[i:i+namelength] for i in \ xrange(0,len(names),namelength)] fieldnames = [''.join(asarray(x).astype('c')).strip('\x00') for x in splitnames] for i in range(length): result[i] = mat_obj() result[i]._classname = class_name for element in fieldnames: val,unused = _get_element(fid) result[i].__dict__[element] = val result = squeeze(transpose(reshape(result,dims[::-1]))) if rank(result)==0: result = result.toscalar() elif dclass == mxSPARSE_CLASS: rowind, unused = _get_element(fid) colind, unused = _get_element(fid) res, unused = _get_element(fid) if cmplx: imag, unused = _get_element(fid) res = res + cast[imag.typecode()](1j)*imag if have_sparse: spmat = scipy.sparse.csc_matrix(res, (rowind[:len(res)], colind), M=dims[0],N=dims[1]) result = spmat else: result = (dims, rowind, colind, res) return result, name
5e019f26834d259e6bbeec69c1511aad635cced1 /local1/tlutelli/issta_data/temp/all_python//python/2006_temp/2006/12971/5e019f26834d259e6bbeec69c1511aad635cced1/mio.py
y = scipy.stats.linregress(BIG,X)
y = scipy.stats.linregress(X,BIG)
def check_linregressBIGX(self): """ W.II.F. Regress BIG on X.
3c36215c8af3afa51ba2b65cccb51b1da48608be /local1/tlutelli/issta_data/temp/all_python//python/2006_temp/2006/12971/3c36215c8af3afa51ba2b65cccb51b1da48608be/test_stats.py
lwork = calc_lwork.getri(getri.prefix,a1.shape[0])[1]
lwork = calc_lwork.getri(getri.prefix,a1.shape[0]) lwork = lwork[1] lwork = int(1.01*lwork)
def inv(a, overwrite_a=0): """Return inverse of square matrix a. """ a1 = asarray(a) if len(a1.shape) != 2 or a1.shape[0] != a1.shape[1]: raise ValueError, 'expected square matrix' overwrite_a = overwrite_a or a1 is not a #XXX: I found no advantage or disadvantage of using finv.
d2289b8c0d6dbe549538e64771e1bd75fab2ee06 /local1/tlutelli/issta_data/temp/all_python//python/2006_temp/2006/12971/d2289b8c0d6dbe549538e64771e1bd75fab2ee06/basic.py
def configuration(parent_package='',parent_path=None): config = Configuration('integrate', parent_package, parent_path) blas_opt = get_info('blas_opt') if not blas_opt: raise NotFoundError,'no blas resources found'
blas_opt = get_info('blas_opt',notfound_action=2)
def configuration(parent_package='',parent_path=None): config = Configuration('integrate', parent_package, parent_path) blas_opt = get_info('blas_opt') if not blas_opt: raise NotFoundError,'no blas resources found' config.add_library('linpack_lite', sources=[join('linpack_lite','*.f')]) config.add_library('mach', sources=[join('mach','*.f')]) config.add_library('quadpack', sources=[join('quadpack','*.f')]) config.add_library('odepack', sources=[join('odepack','*.f')]) # should we try to weed through files and replace with calls to # LAPACK routines? # Yes, someday... # Extensions # quadpack: config.add_extension('_quadpack', sources=['_quadpackmodule.c'], libraries=['quadpack', 'linpack_lite', 'mach']) # odepack libs = ['odepack','linpack_lite','mach'] # Remove libraries key from blas_opt if blas_opt.has_key('libraries'): # key doesn't exist on OS X ... libs.extend(blas_opt['libraries']) newblas = {} for key in blas_opt.keys(): if key == 'libraries': continue newblas[key] = blas_opt[key] config.add_extension('_odepack', sources=['_odepackmodule.c'], libraries=libs, **newblas) # vode config.add_extension('vode', sources=['vode.pyf'], libraries=libs, **newblas) config.add_data_dir('tests') return config
526c0e68927f27806f075bb9b8cc8fc0c8cdd317 /local1/tlutelli/issta_data/temp/all_python//python/2006_temp/2006/12971/526c0e68927f27806f075bb9b8cc8fc0c8cdd317/setup.py
setup(**configuration(parent_path=''))
setup(**configuration(top_path='').todict())
def configuration(parent_package='',parent_path=None): config = Configuration('integrate', parent_package, parent_path) blas_opt = get_info('blas_opt') if not blas_opt: raise NotFoundError,'no blas resources found' config.add_library('linpack_lite', sources=[join('linpack_lite','*.f')]) config.add_library('mach', sources=[join('mach','*.f')]) config.add_library('quadpack', sources=[join('quadpack','*.f')]) config.add_library('odepack', sources=[join('odepack','*.f')]) # should we try to weed through files and replace with calls to # LAPACK routines? # Yes, someday... # Extensions # quadpack: config.add_extension('_quadpack', sources=['_quadpackmodule.c'], libraries=['quadpack', 'linpack_lite', 'mach']) # odepack libs = ['odepack','linpack_lite','mach'] # Remove libraries key from blas_opt if blas_opt.has_key('libraries'): # key doesn't exist on OS X ... libs.extend(blas_opt['libraries']) newblas = {} for key in blas_opt.keys(): if key == 'libraries': continue newblas[key] = blas_opt[key] config.add_extension('_odepack', sources=['_odepackmodule.c'], libraries=libs, **newblas) # vode config.add_extension('vode', sources=['vode.pyf'], libraries=libs, **newblas) config.add_data_dir('tests') return config
526c0e68927f27806f075bb9b8cc8fc0c8cdd317 /local1/tlutelli/issta_data/temp/all_python//python/2006_temp/2006/12971/526c0e68927f27806f075bb9b8cc8fc0c8cdd317/setup.py
libs.extend(blas_opt['libraries'])
if blas_opt.has_key('libraries'): libs.extend(blas_opt['libraries'])
def configuration(parent_package='',parent_path=None): config = Configuration('integrate', parent_package, parent_path) blas_opt = get_info('blas_opt') if not blas_opt: raise NotFoundError,'no blas resources found' config.add_library('linpack_lite', sources=[join('linpack_lite','*.f')]) config.add_library('mach', sources=[join('mach','*.f')]) config.add_library('quadpack', sources=[join('quadpack','*.f')]) config.add_library('odepack', sources=[join('odepack','*.f')]) # should we try to weed through files and replace with calls to # LAPACK routines? # Yes, someday... # Extensions # quadpack: config.add_extension('_quadpack', sources=['_quadpackmodule.c'], libraries=['quadpack', 'linpack_lite', 'mach']) # odepack libs = ['odepack','linpack_lite','mach'] # remove libraries key from blas_opt libs.extend(blas_opt['libraries']) newblas = {} for key in blas_opt.keys(): if key == 'libraries': continue newblas[key] = blas_opt[key] config.add_extension('_odepack', sources=['_odepackmodule.c'], libraries=libs, **newblas) # vode config.add_extension('vode', sources=['vode.pyf'], libraries=libs, **newblas) return config
633ded47da3140d5efed4d73d3f90ba336613626 /local1/tlutelli/issta_data/temp/all_python//python/2006_temp/2006/12971/633ded47da3140d5efed4d73d3f90ba336613626/setup.py
execstring = 'column = map(lambda x: x'+cnums+', listoflists)' exec execstring
evalstring = 'map(lambda x: x'+cnums+', listoflists)' column = eval(evalstring)
def colex (listoflists,cnums): """\nExtracts from listoflists the columns specified in the list 'cnums' (cnums can be an integer, a sequence of integers, or an expression that
c0f2c8fcb681d46766df2d66826517b6050b386f /local1/tlutelli/issta_data/temp/all_python//python/2006_temp/2006/12971/c0f2c8fcb681d46766df2d66826517b6050b386f/pstat.py
fname = os.path.join(__path__[0],'plt','lena.dat')
fname = os.path.join(os.path.dirname(__file__),'plt','lena.dat')
def lena(): import cPickle, os fname = os.path.join(__path__[0],'plt','lena.dat') f = open(fname,'rb') lena = scipy.array(cPickle.load(f)) f.close() return lena
75f4e4a251208a09f471651ac783d89eae74e085 /local1/tlutelli/issta_data/temp/all_python//python/2006_temp/2006/12971/75f4e4a251208a09f471651ac783d89eae74e085/common.py
assert_equal(cephes.pro_ang1_cv(1,1,1,1,0),(1.0,0.0))
assert_array_almost_equal(cephes.pro_ang1_cv(1,1,1,1,0), array((1.0,0.0)))
def check_pro_ang1_cv(self): assert_equal(cephes.pro_ang1_cv(1,1,1,1,0),(1.0,0.0))
76b070c091fac9b2cebceb2c9e24712a1cd809ea /local1/tlutelli/issta_data/temp/all_python//python/2006_temp/2006/12971/76b070c091fac9b2cebceb2c9e24712a1cd809ea/test_basic.py
ext = Extension(name="_lbfgsb",sources=sources, **lapack)
ext = Extension(dot_join(parent_package,package,"_lbfgsb"), sources=sources, **lapack)
def configuration(parent_package='',parent_path=None): package = 'optimize' config = default_config_dict(package,parent_package) local_path = get_path(__name__,parent_path) minpack = glob(os.path.join(local_path,'minpack','*.f')) config['fortran_libraries'].append(('minpack',{'sources':minpack})) sources = ['_minpackmodule.c'] sources = [os.path.join(local_path,x) for x in sources] ext = Extension(dot_join(parent_package,package,'_minpack'), sources, libraries = ['minpack']) config['ext_modules'].append(ext) rootfind = glob(os.path.join(local_path,'Zeros','*.c')) roothead = os.path.join(local_path,'zeros.h') config['libraries'].append(('rootfind',{'sources':rootfind, 'headers':roothead})) sources = ['zeros.c'] sources = [os.path.join(local_path,x) for x in sources] ext = Extension(dot_join(parent_package,package,'_zeros'), sources, libraries=['rootfind']) config['ext_modules'].append(ext) lapack = system_info.lapack_opt_info().get_info() sources = ['lbfgsb.pyf','routines.f'] sources = [os.path.join(local_path,'lbfgsb-0.9',x) for x in sources] ext = Extension(name="_lbfgsb",sources=sources, **lapack) config['ext_modules'].append(ext) sources = ['moduleTNC.c', 'tnc.c'] sources = [os.path.join(local_path,'tnc',x) for x in sources] ext = Extension(name="moduleTNC", sources=sources) config['ext_modules'].append(ext) return config
a2acd69529e1925ece31015eef16daa6b11d1262 /local1/tlutelli/issta_data/temp/all_python//python/2006_temp/2006/12971/a2acd69529e1925ece31015eef16daa6b11d1262/setup_optimize.py
ext = Extension(name="moduleTNC", sources=sources)
ext = Extension(dot_join(parent_package,package,'moduleTNC'), sources=sources)
def configuration(parent_package='',parent_path=None): package = 'optimize' config = default_config_dict(package,parent_package) local_path = get_path(__name__,parent_path) minpack = glob(os.path.join(local_path,'minpack','*.f')) config['fortran_libraries'].append(('minpack',{'sources':minpack})) sources = ['_minpackmodule.c'] sources = [os.path.join(local_path,x) for x in sources] ext = Extension(dot_join(parent_package,package,'_minpack'), sources, libraries = ['minpack']) config['ext_modules'].append(ext) rootfind = glob(os.path.join(local_path,'Zeros','*.c')) roothead = os.path.join(local_path,'zeros.h') config['libraries'].append(('rootfind',{'sources':rootfind, 'headers':roothead})) sources = ['zeros.c'] sources = [os.path.join(local_path,x) for x in sources] ext = Extension(dot_join(parent_package,package,'_zeros'), sources, libraries=['rootfind']) config['ext_modules'].append(ext) lapack = system_info.lapack_opt_info().get_info() sources = ['lbfgsb.pyf','routines.f'] sources = [os.path.join(local_path,'lbfgsb-0.9',x) for x in sources] ext = Extension(name="_lbfgsb",sources=sources, **lapack) config['ext_modules'].append(ext) sources = ['moduleTNC.c', 'tnc.c'] sources = [os.path.join(local_path,'tnc',x) for x in sources] ext = Extension(name="moduleTNC", sources=sources) config['ext_modules'].append(ext) return config
a2acd69529e1925ece31015eef16daa6b11d1262 /local1/tlutelli/issta_data/temp/all_python//python/2006_temp/2006/12971/a2acd69529e1925ece31015eef16daa6b11d1262/setup_optimize.py
width = fac*(x[2]-x[1])
width = fac*(x[1]-x[0])
def errorbars(x,y,err,ptcolor='r',linecolor='b',pttype='o',linetype='-',fac=0.25): """Draw connected points with errorbars. Description: Plot connected points with errorbars. Inputs: x, y -- The points to plot. err -- The error in the y values. ptcolor -- The color for the points. linecolor -- The color of the connecting lines and error bars. pttype -- The type of point ('o', 'x', '+', '.', 'x', '*') linetype -- The type of line ('-', '|', ':', '-.', '-:') fac -- Adjusts how long the horizontal lines are which make the top and bottom of the error bars. """ # create line arrays yb = y - err ye = y + err try: override = 1 savesys = gist.plsys(2) gist.plsys(savesys) except: override = 0 if _hold or override: pass else: gist.fma() y = where(scipy.isfinite(y),y,0) gist.plg(y,x,color=_colors[ptcolor],marker=_markers[pttype],type='none') gist.pldj(x,yb,x,ye,color=_colors[linecolor],type=_types[linetype]) viewp = gist.viewport() plotlims = gist.limits() conv_factorx = (viewp[1] - viewp[0]) / (plotlims[1]-plotlims[0]) conv_factory = (viewp[3] - viewp[2]) / (plotlims[3]-plotlims[2]) width = fac*(x[2]-x[1]) x0 = x-width/2.0 x1 = x+width/2.0 gist.pldj(x0,ye,x1,ye,color=_colors[linecolor],type=_types[linetype]) gist.pldj(x0,yb,x1,yb,color=_colors[linecolor],type=_types[linetype]) return
f78138bb751d44f8a30ef0f2410adddd8179fed8 /local1/tlutelli/issta_data/temp/all_python//python/2006_temp/2006/12971/f78138bb751d44f8a30ef0f2410adddd8179fed8/Mplot.py
print linetypes[k], text[k] print llx+width+deltax, ypos-deltay
def legend(text,linetypes=None,lleft=None,color=None,tfont='helvetica',fontsize=14,nobox=0): """Construct and place a legend. Description: Build a legend and place it on the current plot with an interactive prompt. Inputs: text -- A list of strings which document the curves. linetypes -- If not given, then the text strings are associated with the curves in the order they were originally drawn. Otherwise, associate the text strings with the corresponding curve types given. See plot for description. """ global _hold global _textcolor if color is None: color = _textcolor else: _textcolor = color if color is None: color = 'black' sys = gist.plsys() if sys == 0: gist.plsys(1) viewp = gist.viewport() gist.plsys(sys) DX = viewp[1] - viewp[0] DY = viewp[3] - viewp[2] width = DY / 10.0; if lleft is None: lleft = gist.mouse(0,0,"Click on point for lower left coordinate.") llx = lleft[0] lly = lleft[1] else: llx,lly = lleft[:2] savesys = gist.plsys() dx = width / 3.0 legarr = Numeric.arange(llx,llx+width,dx) legy = Numeric.ones(legarr.shape) dy = fontsize*points*1.2 deltay = fontsize*points / 2.8 deltax = fontsize*points / 2.6 * DX / DY ypos = lly + deltay; if linetypes is None: linetypes = _GLOBAL_LINE_TYPES[:] # copy them out gist.plsys(0) savehold = _hold _hold = 1 for k in range(len(text)): plot(legarr,ypos*legy,linetypes[k]) #print llx+width+deltax, ypos-deltay if text[k] != "": gist.plt(text[k],llx+width+deltax,ypos-deltay, color=color,font=tfont,height=fontsize,tosys=0) ypos = ypos + dy _hold = savehold if nobox: pass else: gist.plsys(0) maxlen = MLab.max(map(len,text)) c1 = (llx-deltax,lly-deltay) c2 = (llx + width + deltax + fontsize*points* maxlen/1.8 + deltax, lly + len(text)*dy) linesx0 = [c1[0],c1[0],c2[0],c2[0]] linesy0 = [c1[1],c2[1],c2[1],c1[1]] linesx1 = [c1[0],c2[0],c2[0],c1[0]] linesy1 = [c2[1],c2[1],c1[1],c1[1]] gist.pldj(linesx0,linesy0,linesx1,linesy1,color=color) gist.plsys(savesys) return
f78138bb751d44f8a30ef0f2410adddd8179fed8 /local1/tlutelli/issta_data/temp/all_python//python/2006_temp/2006/12971/f78138bb751d44f8a30ef0f2410adddd8179fed8/Mplot.py
config_list += map(get_separate_package_config,separate_packages) config_list += map(get_package_config,scipy_packages)
def setup_package(ignore_packages=[]): old_path = os.getcwd() path = get_path(__name__) os.chdir(path) sys.path.insert(0,os.path.join(path,'Lib')) # setup files of subpackages require scipy_core: sys.path.insert(0,os.path.join(path,'scipy_core')) try: #sys.path.insert(0,os.path.join(path,'Lib')) from scipy_version import scipy_version #del sys.path[0] config_list = [{'packages':['scipy','scipy.tests'], 'package_dir': {'scipy':'Lib', 'scipy.tests':os.path.join('Lib','tests')}}] #new style packages: for d in ['scipy_core','Lib','Lib_chaco']: if sys.platform!='win32' and d=='Lib_chaco': # Currently chaco is working only on win32. continue config_list += get_packages(os.path.join(path,d),ignore_packages, parent_path=path) #old style packages: #config_list += map(get_separate_package_config,separate_packages) #config_list += map(get_package_config,scipy_packages) config_dict = merge_config_dicts(config_list) print 'SciPy Version %s' % scipy_version setup (name = "SciPy", version = scipy_version, maintainer = "SciPy Developers", maintainer_email = "scipy-dev@scipy.org", description = "Scientific Algorithms Library for Python", license = "SciPy License (BSD Style)", url = "http://www.scipy.org", **config_dict ) finally: del sys.path[0] del sys.path[0] os.chdir(old_path)
15a9b7aa0588a214ba25545f11bebed6067e9a97 /local1/tlutelli/issta_data/temp/all_python//python/2006_temp/2006/12971/15a9b7aa0588a214ba25545f11bebed6067e9a97/setup.py
wxPython-2.5.x.
wxPython-2.5.x.
def _import_wx_core(wx_pth, pexec): """Imports the core modules for wx. This is necessary for wxPython-2.5.x. """ # Find the suffix. suffix = '.so' for x in [x[0] for x in imp.get_suffixes() if x[-1] is imp.C_EXTENSION]: if os.path.exists(os.path.join(wx_pth, '_core_' + x)): suffix = x break # Now import the modules manually. pexec('import imp, os.path') code="""\
75f99be65d3ba60ecfc6845632d50e0354d73f63 /local1/tlutelli/issta_data/temp/all_python//python/2006_temp/2006/12971/75f99be65d3ba60ecfc6845632d50e0354d73f63/wxPython_thread.py
for i in ["_core_", "_controls_", "_misc_", "_windows_", "_gdi_"]:
for i in [\"_core_\", \"_controls_\", \"_misc_\", \"_windows_\", \"_gdi_\"]:
def _import_wx_core(wx_pth, pexec): """Imports the core modules for wx. This is necessary for wxPython-2.5.x. """ # Find the suffix. suffix = '.so' for x in [x[0] for x in imp.get_suffixes() if x[-1] is imp.C_EXTENSION]: if os.path.exists(os.path.join(wx_pth, '_core_' + x)): suffix = x break # Now import the modules manually. pexec('import imp, os.path') code="""\
75f99be65d3ba60ecfc6845632d50e0354d73f63 /local1/tlutelli/issta_data/temp/all_python//python/2006_temp/2006/12971/75f99be65d3ba60ecfc6845632d50e0354d73f63/wxPython_thread.py
def _import_wx_core(wx_pth, pexec): """Imports the core modules for wx. This is necessary for wxPython-2.5.x. """ # Find the suffix. suffix = '.so' for x in [x[0] for x in imp.get_suffixes() if x[-1] is imp.C_EXTENSION]: if os.path.exists(os.path.join(wx_pth, '_core_' + x)): suffix = x break # Now import the modules manually. pexec('import imp, os.path') code="""\
75f99be65d3ba60ecfc6845632d50e0354d73f63 /local1/tlutelli/issta_data/temp/all_python//python/2006_temp/2006/12971/75f99be65d3ba60ecfc6845632d50e0354d73f63/wxPython_thread.py
return 1
def _import_wx_core(wx_pth, pexec): """Imports the core modules for wx. This is necessary for wxPython-2.5.x. """ # Find the suffix. suffix = '.so' for x in [x[0] for x in imp.get_suffixes() if x[-1] is imp.C_EXTENSION]: if os.path.exists(os.path.join(wx_pth, '_core_' + x)): suffix = x break # Now import the modules manually. pexec('import imp, os.path') code="""\
75f99be65d3ba60ecfc6845632d50e0354d73f63 /local1/tlutelli/issta_data/temp/all_python//python/2006_temp/2006/12971/75f99be65d3ba60ecfc6845632d50e0354d73f63/wxPython_thread.py
output = valarray(shape(cond),value=self.a)
output = valarray(shape(cond),value=self.a*scale + loc)
def ppf(self,q,*args,**kwds): loc,scale=map(kwds.get,['loc','scale']) args, loc, scale = self.__fix_loc_scale(args, loc, scale) q,loc,scale = map(arr,(q,loc,scale)) args = tuple(map(arr,args)) cond0 = self._argcheck(*args) & (scale > 0) & (loc==loc) cond1 = (q > 0) & (q < 1) cond2 = (q==1) & cond0 cond = cond0 & cond1 output = valarray(shape(cond),value=self.a) insert(output,(1-cond0)*(cond1==cond1), self.badvalue) insert(output,cond2,self.b) goodargs = argsreduce(cond, *((q,)+args+(scale,loc))) scale, loc, goodargs = goodargs[-2], goodargs[-1], goodargs[:-2] insert(output,cond,self._ppf(*goodargs)*scale + loc) return output
f93e20fb7c54e06df0cd658c3e31c9380b9d5cdd /local1/tlutelli/issta_data/temp/all_python//python/2006_temp/2006/12971/f93e20fb7c54e06df0cd658c3e31c9380b9d5cdd/distributions.py
insert(output,cond2,self.b)
insert(output,cond2,self.b*scale + loc)
def ppf(self,q,*args,**kwds): loc,scale=map(kwds.get,['loc','scale']) args, loc, scale = self.__fix_loc_scale(args, loc, scale) q,loc,scale = map(arr,(q,loc,scale)) args = tuple(map(arr,args)) cond0 = self._argcheck(*args) & (scale > 0) & (loc==loc) cond1 = (q > 0) & (q < 1) cond2 = (q==1) & cond0 cond = cond0 & cond1 output = valarray(shape(cond),value=self.a) insert(output,(1-cond0)*(cond1==cond1), self.badvalue) insert(output,cond2,self.b) goodargs = argsreduce(cond, *((q,)+args+(scale,loc))) scale, loc, goodargs = goodargs[-2], goodargs[-1], goodargs[:-2] insert(output,cond,self._ppf(*goodargs)*scale + loc) return output
f93e20fb7c54e06df0cd658c3e31c9380b9d5cdd /local1/tlutelli/issta_data/temp/all_python//python/2006_temp/2006/12971/f93e20fb7c54e06df0cd658c3e31c9380b9d5cdd/distributions.py
def _stats(self, x): return 0, 0.25, 0, -1.0
f93e20fb7c54e06df0cd658c3e31c9380b9d5cdd /local1/tlutelli/issta_data/temp/all_python//python/2006_temp/2006/12971/f93e20fb7c54e06df0cd658c3e31c9380b9d5cdd/distributions.py
return c/3.0, (1.0-c+c*c)/18, sqrt(2)*(2*c-1)*(c+1)*(c-2) / \
return (c+1.0)/3.0, (1.0-c+c*c)/18, sqrt(2)*(2*c-1)*(c+1)*(c-2) / \
def _stats(self, c): return c/3.0, (1.0-c+c*c)/18, sqrt(2)*(2*c-1)*(c+1)*(c-2) / \ (5*(1.0-c+c*c)**1.5), -3.0/5.0
f93e20fb7c54e06df0cd658c3e31c9380b9d5cdd /local1/tlutelli/issta_data/temp/all_python//python/2006_temp/2006/12971/f93e20fb7c54e06df0cd658c3e31c9380b9d5cdd/distributions.py
if not 'MATHLIB' in os.environ:
if not os.environ.has_key('MATHLIB'):
def config_toplevel(self): print " ============= begin top level configuration ============="
bbfab57135c981d6ff6f4a633fd0e00d94e09de6 /local1/tlutelli/issta_data/temp/all_python//python/2006_temp/2006/12971/bbfab57135c981d6ff6f4a633fd0e00d94e09de6/setup_xplt.py
config['packages'].append(dot_join(parent_package,'stats'))
def configuration(parent_package=''): #if parent_package: # parent_package += '.' local_path = get_path(__name__) test_path = os.path.join(local_path,'tests') config = default_config_dict() #config['packages'].append(dot_join(parent_package,'stats')) config['packages'].append(dot_join(parent_package,'stats.tests')) config['package_dir']['stats.tests'] = test_path # Extension sources = ['randmodule.c','ranlib_all.c'] sources = [os.path.join(local_path,x) for x in sources] ext = Extension(dot_join(parent_package,'stats.rand'),sources) config['ext_modules'].append(ext) return config
e9a8204e44c8739bdee25f5a4f5be4b5331424c8 /local1/tlutelli/issta_data/temp/all_python//python/2006_temp/2006/12971/e9a8204e44c8739bdee25f5a4f5be4b5331424c8/setup_stats.py
R = dot(r, transpose(perm))
R = dot(r, perm)
def leastsq(func,x0,args=(),Dfun=None,full_output=0,col_deriv=0,ftol=1.49012e-8,xtol=1.49012e-8,gtol=0.0,maxfev=0,epsfcn=0.0,factor=100,diag=None): """Minimize the sum of squares of a set of equations. Description: Return the point which minimizes the sum of squares of M (non-linear) equations in N unknowns given a starting estimate, x0, using a modification of the Levenberg-Marquardt algorithm. x = arg min(sum(func(y)**2)) y Inputs: func -- A Python function or method which takes at least one (possibly length N vector) argument and returns M floating point numbers. x0 -- The starting estimate for the minimization. args -- Any extra arguments to func are placed in this tuple. Dfun -- A function or method to compute the Jacobian of func with derivatives across the rows. If this is None, the Jacobian will be estimated. full_output -- non-zero to return all optional outputs. col_deriv -- non-zero to specify that the Jacobian function computes derivatives down the columns (faster, because there is no transpose operation). Outputs: (x, {cov_x, infodict, ier}, mesg) x -- the solution (or the result of the last iteration for an unsuccessful call. infodict -- a dictionary of optional outputs with the keys: 'nfev' : the number of function calls 'njev' : the number of jacobian calls 'fvec' : the function evaluated at the output 'fjac' : A permutation of the R matrix of a QR factorization of the final approximate Jacobian matrix, stored column wise. Together with ipvt, the covariance of the estimate can be approximated. 'ipvt' : an integer array of length N which defines a permutation matrix, p, such that fjac*p = q*r, where r is upper triangular with diagonal elements of nonincreasing magnitude. Column j of p is column ipvt(j) of the identity matrix. 'qtf' : the vector (transpose(q) * fvec). ier -- an integer flag. If it is equal to 1 the solution was found. If it is not equal to 1, the solution was not found and the following message gives more information. mesg -- a string message giving information about the cause of failure. cov_x -- uses the fjac and ipvt optional outputs to construct an estimate of the covariance matrix of the solution. Extended Inputs: ftol -- Relative error desired in the sum of squares. xtol -- Relative error desired in the approximate solution. gtol -- Orthogonality desired between the function vector and the columns of the Jacobian. maxfev -- The maximum number of calls to the function. If zero, then 100*(N+1) is the maximum where N is the number of elements in x0. epsfcn -- A suitable step length for the forward-difference approximation of the Jacobian (for Dfun=None). If epsfcn is less than the machine precision, it is assumed that the relative errors in the functions are of the order of the machine precision. factor -- A parameter determining the initial step bound (factor * || diag * x||). Should be in interval (0.1,100). diag -- A sequency of N positive entries that serve as a scale factors for the variables. Remarks: "leastsq" is a wrapper around MINPACK's lmdif and lmder algorithms. """ x0 = atleast_1d(x0) n = len(x0) if type(args) != type(()): args = (args,) m = check_func(func,x0,args,n)[0] if Dfun is None: if (maxfev == 0): maxfev = 200*(n+1) retval = _minpack._lmdif(func,x0,args,full_output,ftol,xtol,gtol,maxfev,epsfcn,factor,diag) else: if col_deriv: check_func(Dfun,x0,args,n,(n,m)) else: check_func(Dfun,x0,args,n,(m,n)) if (maxfev == 0): maxfev = 100*(n+1) retval = _minpack._lmder(func,Dfun,x0,args,full_output,col_deriv,ftol,xtol,gtol,maxfev,factor,diag) errors = {0:["Improper input parameters.", TypeError], 1:["Both actual and predicted relative reductions in the sum of squares\n are at most %f" % ftol, None], 2:["The relative error between two consecutive iterates is at most %f" % xtol, None], 3:["Both actual and predicted relative reductions in the sum of squares\n are at most %f and the relative error between two consecutive iterates is at \n most %f" % (ftol,xtol), None], 4:["The cosine of the angle between func(x) and any column of the\n Jacobian is at most %f in absolute value" % gtol, None], 5:["Number of calls to function has reached maxfev = %d." % maxfev, ValueError], 6:["ftol=%f is too small, no further reduction in the sum of squares\n is possible.""" % ftol, ValueError], 7:["xtol=%f is too small, no further improvement in the approximate\n solution is possible." % xtol, ValueError], 8:["gtol=%f is too small, func(x) is orthogonal to the columns of\n the Jacobian to machine precision." % gtol, ValueError], 'unknown':["Unknown error.", TypeError]} info = retval[-1] # The FORTRAN return value if (info not in [1,2,3,4] and not full_output): if info in [5,6,7,8]: print "Warning: " + errors[info][0] else: try: raise errors[info][1], errors[info][0] except KeyError: raise errors['unknown'][1], errors['unknown'][0] if n == 1: retval = (retval[0][0],) + retval[1:] mesg = errors[info][0] if full_output: import scipy.linalg as sl perm = take(eye(n),retval[1]['ipvt']-1) r = sl.triu(transpose(retval[1]['fjac'])[:n,:]) R = dot(r, transpose(perm)) cov_x = sl.inv(dot(transpose(R),R)) return (retval[0], cov_x) + retval[1:] + (mesg,) else: return (retval[0], mesg)
e8035e8d7cfa2696816dcfe7607a81878f53a57b /local1/tlutelli/issta_data/temp/all_python//python/2006_temp/2006/12971/e8035e8d7cfa2696816dcfe7607a81878f53a57b/minpack.py
def check_gradient(fcn,Dfcn,x0,col_deriv=0):
def check_gradient(fcn,Dfcn,x0,args=(),col_deriv=0):
def check_gradient(fcn,Dfcn,x0,col_deriv=0): """Perform a simple check on the gradient for correctness. """ x = atleast_1d(x0) n = len(x) x.shape = (n,) fvec = atleast_1d(fcn(x)) if 1 not in fvec.shape: raise ValueError, "Function does not return a 1-D array." m = len(fvec) fvec.shape = (m,) ldfjac = m fjac = atleast_1d(Dfcn(x)) fjac.shape = (m,n) if col_deriv == 0: fjac = transpose(fjac) xp = zeros((n,),Float64) err = zeros((m,),Float64) fvecp = None _minpack._chkder(m,n,x,fvec,fjac,ldfjac,xp,fvecp,1,err) fvecp = atleast_1d(fcn(xp)) fvecp.shape = (m,) _minpack._chkder(m,n,x,fvec,fjac,ldfjac,xp,fvecp,2,err) good = (product(greater(err,0.5))) return (good,err)
e8035e8d7cfa2696816dcfe7607a81878f53a57b /local1/tlutelli/issta_data/temp/all_python//python/2006_temp/2006/12971/e8035e8d7cfa2696816dcfe7607a81878f53a57b/minpack.py
fvec = atleast_1d(fcn(x)) if 1 not in fvec.shape: raise ValueError, "Function does not return a 1-D array."
fvec = atleast_1d(fcn(x,*args))
def check_gradient(fcn,Dfcn,x0,col_deriv=0): """Perform a simple check on the gradient for correctness. """ x = atleast_1d(x0) n = len(x) x.shape = (n,) fvec = atleast_1d(fcn(x)) if 1 not in fvec.shape: raise ValueError, "Function does not return a 1-D array." m = len(fvec) fvec.shape = (m,) ldfjac = m fjac = atleast_1d(Dfcn(x)) fjac.shape = (m,n) if col_deriv == 0: fjac = transpose(fjac) xp = zeros((n,),Float64) err = zeros((m,),Float64) fvecp = None _minpack._chkder(m,n,x,fvec,fjac,ldfjac,xp,fvecp,1,err) fvecp = atleast_1d(fcn(xp)) fvecp.shape = (m,) _minpack._chkder(m,n,x,fvec,fjac,ldfjac,xp,fvecp,2,err) good = (product(greater(err,0.5))) return (good,err)
e8035e8d7cfa2696816dcfe7607a81878f53a57b /local1/tlutelli/issta_data/temp/all_python//python/2006_temp/2006/12971/e8035e8d7cfa2696816dcfe7607a81878f53a57b/minpack.py
fjac = atleast_1d(Dfcn(x))
fjac = atleast_1d(Dfcn(x,*args))
def check_gradient(fcn,Dfcn,x0,col_deriv=0): """Perform a simple check on the gradient for correctness. """ x = atleast_1d(x0) n = len(x) x.shape = (n,) fvec = atleast_1d(fcn(x)) if 1 not in fvec.shape: raise ValueError, "Function does not return a 1-D array." m = len(fvec) fvec.shape = (m,) ldfjac = m fjac = atleast_1d(Dfcn(x)) fjac.shape = (m,n) if col_deriv == 0: fjac = transpose(fjac) xp = zeros((n,),Float64) err = zeros((m,),Float64) fvecp = None _minpack._chkder(m,n,x,fvec,fjac,ldfjac,xp,fvecp,1,err) fvecp = atleast_1d(fcn(xp)) fvecp.shape = (m,) _minpack._chkder(m,n,x,fvec,fjac,ldfjac,xp,fvecp,2,err) good = (product(greater(err,0.5))) return (good,err)
e8035e8d7cfa2696816dcfe7607a81878f53a57b /local1/tlutelli/issta_data/temp/all_python//python/2006_temp/2006/12971/e8035e8d7cfa2696816dcfe7607a81878f53a57b/minpack.py
fvecp = atleast_1d(fcn(xp))
fvecp = atleast_1d(fcn(xp,*args))
def check_gradient(fcn,Dfcn,x0,col_deriv=0): """Perform a simple check on the gradient for correctness. """ x = atleast_1d(x0) n = len(x) x.shape = (n,) fvec = atleast_1d(fcn(x)) if 1 not in fvec.shape: raise ValueError, "Function does not return a 1-D array." m = len(fvec) fvec.shape = (m,) ldfjac = m fjac = atleast_1d(Dfcn(x)) fjac.shape = (m,n) if col_deriv == 0: fjac = transpose(fjac) xp = zeros((n,),Float64) err = zeros((m,),Float64) fvecp = None _minpack._chkder(m,n,x,fvec,fjac,ldfjac,xp,fvecp,1,err) fvecp = atleast_1d(fcn(xp)) fvecp.shape = (m,) _minpack._chkder(m,n,x,fvec,fjac,ldfjac,xp,fvecp,2,err) good = (product(greater(err,0.5))) return (good,err)
e8035e8d7cfa2696816dcfe7607a81878f53a57b /local1/tlutelli/issta_data/temp/all_python//python/2006_temp/2006/12971/e8035e8d7cfa2696816dcfe7607a81878f53a57b/minpack.py
include_dirs = [numpy.get_numpy_include()], sources = [join('src',f) for f in ['_intsamplermodule.c', 'sampler5tbl.c']] )
include_dirs = [numpy.get_numpy_include(), '/usr/include/python2.4/numpy/random/'], libraries=['randomkit'], sources = [join('src', f) for f in ['_intsamplermodule.c', 'compact5table.c']] )
def configuration(parent_package='', top_path=None): config = Configuration('montecarlo', parent_package, top_path) config.add_extension('_intsampler', include_dirs = [numpy.get_numpy_include()], sources = [join('src',f) for f in ['_intsamplermodule.c', 'sampler5tbl.c']] ) config.add_data_dir('tests') config.add_data_dir('examples') config.add_data_dir('doc') return config
b53525ac030c790936e371a5bc40053892d52535 /local1/tlutelli/issta_data/temp/all_python//python/2006_temp/2006/12971/b53525ac030c790936e371a5bc40053892d52535/setup.py
assert_equal(cephes.nrdtrimn(0.5,1,1),1.0)
assert_approx_equal(cephes.nrdtrimn(0.5,1,1),1.0)
def check_nrdtrimn(self): assert_equal(cephes.nrdtrimn(0.5,1,1),1.0)
cfc04ee254f7332508c879fd8e3ae3c8783628da /local1/tlutelli/issta_data/temp/all_python//python/2006_temp/2006/12971/cfc04ee254f7332508c879fd8e3ae3c8783628da/test_basic.py
-7.940178689168587]),11)
-7.940178689168587]),10)
def check_bei_zeros(self): bi = bi_zeros(5) assert_array_almost_equal(bi[0],array([-1.173713222709127, -3.271093302836352, -4.830737841662016, -6.169852128310251, -7.376762079367764]),11)
cfc04ee254f7332508c879fd8e3ae3c8783628da /local1/tlutelli/issta_data/temp/all_python//python/2006_temp/2006/12971/cfc04ee254f7332508c879fd8e3ae3c8783628da/test_basic.py
assert_almost_equal(w,0.90047299861907959,7) assert_almost_equal(pw,0.042089745402336121,7)
assert_almost_equal(w,0.90047299861907959,6) assert_almost_equal(pw,0.042089745402336121,6)
def check_basic(self): x1 = [0.11,7.87,4.61,10.14,7.95,3.14,0.46, 4.43,0.21,4.75,0.71,1.52,3.24, 0.93,0.42,4.97,9.53,4.55,0.47,6.66] w,pw = scipy.stats.shapiro(x1) assert_almost_equal(w,0.90047299861907959,7) assert_almost_equal(pw,0.042089745402336121,7) x2 = [1.36,1.14,2.92,2.55,1.46,1.06,5.27,-1.11, 3.48,1.10,0.88,-0.51,1.46,0.52,6.20,1.69, 0.08,3.67,2.81,3.49] w,pw = scipy.stats.shapiro(x2) assert_almost_equal(w,0.9590269923210144,7) assert_almost_equal(pw,0.52459925413131714,7)
1b57dd2995121404b1732f25b5881a8cc5828b0e /local1/tlutelli/issta_data/temp/all_python//python/2006_temp/2006/12971/1b57dd2995121404b1732f25b5881a8cc5828b0e/test_morestats.py
assert_almost_equal(w,0.9590269923210144,7) assert_almost_equal(pw,0.52459925413131714,7)
assert_almost_equal(w,0.9590269923210144,6) assert_almost_equal(pw,0.52459925413131714,6)
def check_basic(self): x1 = [0.11,7.87,4.61,10.14,7.95,3.14,0.46, 4.43,0.21,4.75,0.71,1.52,3.24, 0.93,0.42,4.97,9.53,4.55,0.47,6.66] w,pw = scipy.stats.shapiro(x1) assert_almost_equal(w,0.90047299861907959,7) assert_almost_equal(pw,0.042089745402336121,7) x2 = [1.36,1.14,2.92,2.55,1.46,1.06,5.27,-1.11, 3.48,1.10,0.88,-0.51,1.46,0.52,6.20,1.69, 0.08,3.67,2.81,3.49] w,pw = scipy.stats.shapiro(x2) assert_almost_equal(w,0.9590269923210144,7) assert_almost_equal(pw,0.52459925413131714,7)
1b57dd2995121404b1732f25b5881a8cc5828b0e /local1/tlutelli/issta_data/temp/all_python//python/2006_temp/2006/12971/1b57dd2995121404b1732f25b5881a8cc5828b0e/test_morestats.py
xk += update
xk = xk + update
def fmin_ncg(f, x0, fprime, fhess_p=None, fhess=None, args=(), avextol=1e-5, epsilon=_epsilon, maxiter=None, full_output=0, disp=1, retall=0, callback=None): """Description: Minimize the function, f, whose gradient is given by fprime using the Newton-CG method. fhess_p must compute the hessian times an arbitrary vector. If it is not given, finite-differences on fprime are used to compute it. See Wright, and Nocedal 'Numerical Optimization', 1999, pg. 140. Inputs: f -- the Python function or method to be minimized. x0 -- the initial guess for the minimizer. fprime -- a function to compute the gradient of f: fprime(x, *args) fhess_p -- a function to compute the Hessian of f times an arbitrary vector: fhess_p (x, p, *args) fhess -- a function to compute the Hessian matrix of f. args -- extra arguments for f, fprime, fhess_p, and fhess (the same set of extra arguments is supplied to all of these functions). epsilon -- if fhess is approximated use this value for the step size (can be scalar or vector) callback -- an optional user-supplied function to call after each iteration. It is called as callback(xk), where xk is the current parameter vector. Outputs: (xopt, {fopt, fcalls, gcalls, hcalls, warnflag},{allvecs}) xopt -- the minimizer of f fopt -- the value of the function at xopt: fopt = f(xopt) fcalls -- the number of function calls. gcalls -- the number of gradient calls. hcalls -- the number of hessian calls. warnflag -- algorithm warnings: 1 : 'Maximum number of iterations exceeded.' allvecs -- a list of all tried iterates Additional Inputs: avextol -- Convergence is assumed when the average relative error in the minimizer falls below this amount. maxiter -- Maximum number of iterations to allow. full_output -- If non-zero return the optional outputs. disp -- If non-zero print convergence message. retall -- return a list of results at each iteration if True Remarks: Only one of fhess_p or fhess need be given. If fhess is provided, then fhess_p will be ignored. If neither fhess nor fhess_p is provided, then the hessian product will be approximated using finite differences on fprime. """ x0 = asarray(x0) fcalls, f = wrap_function(f, args) gcalls, fprime = wrap_function(fprime, args) hcalls = 0 if maxiter is None: maxiter = len(x0)*200 xtol = len(x0)*avextol update = [2*xtol] xk = x0 if retall: allvecs = [xk] k = 0 old_fval = f(x0) while (numpy.add.reduce(abs(update)) > xtol) and (k < maxiter): # Compute a search direction pk by applying the CG method to # del2 f(xk) p = - grad f(xk) starting from 0. b = -fprime(xk) maggrad = numpy.add.reduce(abs(b)) eta = min([0.5,numpy.sqrt(maggrad)]) termcond = eta * maggrad xsupi = zeros(len(x0), x0.dtype.char) ri = -b psupi = -ri i = 0 dri0 = numpy.dot(ri,ri) if fhess is not None: # you want to compute hessian once. A = apply(fhess,(xk,)+args) hcalls = hcalls + 1 while numpy.add.reduce(abs(ri)) > termcond: if fhess is None: if fhess_p is None: Ap = approx_fhess_p(xk,psupi,fprime,epsilon) else: Ap = fhess_p(xk,psupi, *args) hcalls = hcalls + 1 else: Ap = numpy.dot(A,psupi) # check curvature curv = numpy.dot(psupi,Ap) if curv == 0.0: break elif curv < 0: if (i > 0): break else: xsupi = xsupi + dri0/curv * psupi break alphai = dri0 / curv xsupi = xsupi + alphai * psupi ri = ri + alphai * Ap dri1 = numpy.dot(ri,ri) betai = dri1 / dri0 psupi = -ri + betai * psupi i = i + 1 dri0 = dri1 # update numpy.dot(ri,ri) for next time. pk = xsupi # search direction is solution to system. gfk = -b # gradient at xk alphak, fc, gc, old_fval = line_search_BFGS(f,xk,pk,gfk,old_fval) update = alphak * pk xk += update if callback is not None: callback(xk) if retall: allvecs.append(xk) k += 1 if disp or full_output: fval = old_fval if k >= maxiter: warnflag = 1 if disp: print "Warning: Maximum number of iterations has been exceeded" print " Current function value: %f" % fval print " Iterations: %d" % k print " Function evaluations: %d" % fcalls[0] print " Gradient evaluations: %d" % gcalls[0] print " Hessian evaluations: %d" % hcalls else: warnflag = 0 if disp: print "Optimization terminated successfully." print " Current function value: %f" % fval print " Iterations: %d" % k print " Function evaluations: %d" % fcalls[0] print " Gradient evaluations: %d" % gcalls[0] print " Hessian evaluations: %d" % hcalls if full_output: retlist = xk, fval, fcalls[0], gcalls[0], hcalls, warnflag if retall: retlist += (allvecs,) else: retlist = xk if retall: retlist = (xk, allvecs) return retlist
6484cd4111bec54cd7ab9f96da12f13b920e927f /local1/tlutelli/issta_data/temp/all_python//python/2006_temp/2006/12971/6484cd4111bec54cd7ab9f96da12f13b920e927f/optimize.py
assert_equal(cephes.cbrt(1),1.0)
assert_approx_equal(cephes.cbrt(1),1.0)
def check_cbrt(self): assert_equal(cephes.cbrt(1),1.0)
e5297972670d13fef142a7671dfa40ec1089de07 /local1/tlutelli/issta_data/temp/all_python//python/2006_temp/2006/12971/e5297972670d13fef142a7671dfa40ec1089de07/test_basic.py
assert_equal(cephes.exp10(2),100.0)
assert_approx_equal(cephes.exp10(2),100.0)
def check_exp10(self): assert_equal(cephes.exp10(2),100.0)
e5297972670d13fef142a7671dfa40ec1089de07 /local1/tlutelli/issta_data/temp/all_python//python/2006_temp/2006/12971/e5297972670d13fef142a7671dfa40ec1089de07/test_basic.py
assert_equal(cb,cbrl)
assert_approx_equal(cb,cbrl)
def check_cbrt(self): cb = cbrt(27) cbrl = 27**(1.0/3.0) assert_equal(cb,cbrl)
e5297972670d13fef142a7671dfa40ec1089de07 /local1/tlutelli/issta_data/temp/all_python//python/2006_temp/2006/12971/e5297972670d13fef142a7671dfa40ec1089de07/test_basic.py
assert_equal(ex,exrl)
assert_approx_equal(ex,exrl)
def check_exp10(self): ex = exp10(2) exrl = 10**2 assert_equal(ex,exrl)
e5297972670d13fef142a7671dfa40ec1089de07 /local1/tlutelli/issta_data/temp/all_python//python/2006_temp/2006/12971/e5297972670d13fef142a7671dfa40ec1089de07/test_basic.py
if type(x) <> StringType:
if type(x) != StringType:
def makestr (x): if type(x) <> StringType: x = str(x) return x
9212912c52a95c6d6e2e12c52450ef94bffd1759 /local1/tlutelli/issta_data/temp/all_python//python/2006_temp/2006/12971/9212912c52a95c6d6e2e12c52450ef94bffd1759/_support.py
if sterr:
if stderr:
def collapse (a,keepcols,collapsecols,stderr=0,ns=0,cfcn=None): """Averages data in collapsecol, keeping all unique items in keepcols (using unique, which keeps unique LISTS of column numbers), retaining the unique sets of values in keepcols, the mean for each. If the sterr or N of the mean are desired, set either or both parameters to 1. Returns: unique 'conditions' specified by the contents of columns specified by keepcols, abutted with the mean(s) of column(s) specified by collapsecols """ if cfcn is None: cfcn = stats.mean a = asarray(a) if keepcols == []: avgcol = colex(a,collapsecols) means = cfcn(avgcol) return means else: if type(keepcols) not in [ListType,TupleType,N.ArrayType]: keepcols = [keepcols] values = colex(a,keepcols) # so that "item" can be appended (below) uniques = unique(values) # get a LIST, so .sort keeps rows intact uniques.sort() newlist = [] for item in uniques: if type(item) not in [ListType,TupleType,N.ArrayType]: item =[item] tmprows = linexand(a,keepcols,item) for col in collapsecols: avgcol = colex(tmprows,col) item.append(cfcn(avgcol)) if sterr: if len(avgcol)>1: item.append(stats.sterr(avgcol)) else: item.append('N/A') if ns: item.append(len(avgcol)) newlist.append(item) try: new_a = N.array(newlist) except TypeError: new_a = N.array(newlist,'O') return new_a
9212912c52a95c6d6e2e12c52450ef94bffd1759 /local1/tlutelli/issta_data/temp/all_python//python/2006_temp/2006/12971/9212912c52a95c6d6e2e12c52450ef94bffd1759/_support.py
item.append(stats.sterr(avgcol))
item.append(stats.stderr(avgcol))
def collapse (a,keepcols,collapsecols,stderr=0,ns=0,cfcn=None): """Averages data in collapsecol, keeping all unique items in keepcols (using unique, which keeps unique LISTS of column numbers), retaining the unique sets of values in keepcols, the mean for each. If the sterr or N of the mean are desired, set either or both parameters to 1. Returns: unique 'conditions' specified by the contents of columns specified by keepcols, abutted with the mean(s) of column(s) specified by collapsecols """ if cfcn is None: cfcn = stats.mean a = asarray(a) if keepcols == []: avgcol = colex(a,collapsecols) means = cfcn(avgcol) return means else: if type(keepcols) not in [ListType,TupleType,N.ArrayType]: keepcols = [keepcols] values = colex(a,keepcols) # so that "item" can be appended (below) uniques = unique(values) # get a LIST, so .sort keeps rows intact uniques.sort() newlist = [] for item in uniques: if type(item) not in [ListType,TupleType,N.ArrayType]: item =[item] tmprows = linexand(a,keepcols,item) for col in collapsecols: avgcol = colex(tmprows,col) item.append(cfcn(avgcol)) if sterr: if len(avgcol)>1: item.append(stats.sterr(avgcol)) else: item.append('N/A') if ns: item.append(len(avgcol)) newlist.append(item) try: new_a = N.array(newlist) except TypeError: new_a = N.array(newlist,'O') return new_a
9212912c52a95c6d6e2e12c52450ef94bffd1759 /local1/tlutelli/issta_data/temp/all_python//python/2006_temp/2006/12971/9212912c52a95c6d6e2e12c52450ef94bffd1759/_support.py
if type(item) <> StringType:
if type(item) != StringType:
def makestr (item): if type(item) <> StringType: item = str(item) return item
9212912c52a95c6d6e2e12c52450ef94bffd1759 /local1/tlutelli/issta_data/temp/all_python//python/2006_temp/2006/12971/9212912c52a95c6d6e2e12c52450ef94bffd1759/_support.py
if type(inlist[i]) <> StringType:
if type(inlist[i]) != StringType:
def lineincustcols (inlist,colsizes): """\nReturns a string composed of elements in inlist, with each element
9212912c52a95c6d6e2e12c52450ef94bffd1759 /local1/tlutelli/issta_data/temp/all_python//python/2006_temp/2006/12971/9212912c52a95c6d6e2e12c52450ef94bffd1759/_support.py
return string.join(stringlist)
return "".join(stringlist)
def list2string (inlist): """\nConverts a 1D list to a single long string for file output, using
9212912c52a95c6d6e2e12c52450ef94bffd1759 /local1/tlutelli/issta_data/temp/all_python//python/2006_temp/2006/12971/9212912c52a95c6d6e2e12c52450ef94bffd1759/_support.py
from scipy_distutils.mingw32_support import *
import scipy_distutils.mingw32_support
def configuration(parent_package=''): if sys.platform == 'win32': from scipy_distutils.mingw32_support import * from scipy_distutils.core import Extension from scipy_distutils.misc_util import get_path, default_config_dict from scipy_distutils.misc_util import fortran_library_item, dot_join from scipy_distutils.system_info import get_info,dict_append,\ AtlasNotFoundError,LapackNotFoundError,BlasNotFoundError,\ LapackSrcNotFoundError,BlasSrcNotFoundError package = 'linalg' from interface_gen import generate_interface config = default_config_dict(package,parent_package) local_path = get_path(__name__) m = re.compile(r'(build|install|bdist|run_f2py)') if not filter(m.match,sys.argv): sources = [] sources += glob(os.path.join(local_path,'src','*.f')) sources += glob(os.path.join(local_path,'src','*.c')) sources += glob(os.path.join(local_path,'generic_*.pyf')) sources += [os.path.join(local_path,f) for f in [\ 'flapack_user_routines.pyf','atlas_version.c']] config['ext_modules'].append(Extension(\ name='fake_linalg_ext_module', sources = sources)) return config atlas_info = get_info('atlas') #atlas_info = {} # uncomment if ATLAS is available but want to use # Fortran LAPACK/BLAS; useful for testing f_libs = [] atlas_version = None if atlas_info: # Try to determine ATLAS version cur_dir = os.getcwd() os.chdir(local_path) cmd = '%s %s build_ext --inplace --force'%\ (sys.executable, os.path.join(local_path,'setup_atlas_version.py')) print cmd s,o=run_command(cmd) if not s: cmd = sys.executable+' -c "import atlas_version"' print cmd s,o=run_command(cmd) if not s: m = re.match(r'ATLAS version (?P<version>\d+[.]\d+[.]\d+)',o) if m: atlas_version = m.group('version') print 'ATLAS version',atlas_version if atlas_version is None: if re.search(r'undefined symbol: ATL_buildinfo',o,re.M): atlas_version = '3.2.1' # or pre 3.3.6 print 'ATLAS version',atlas_version,'(or pre 3.3.6)' else: print o else: print o if atlas_version is None: print 'Failed to determine ATLAS version' os.chdir(cur_dir) if ('ATLAS_WITHOUT_LAPACK',None) in atlas_info.get('define_macros',[]): lapack_info = get_info('lapack') if not lapack_info: warnings.warn(LapackNotFoundError.__doc__) lapack_src_info = get_info('lapack_src') if not lapack_src_info: raise LapackSrcNotFoundError,LapackSrcNotFoundError.__doc__ dict_append(lapack_info,libraries=['lapack_src']) f_libs.append(fortran_library_item(\ 'lapack_src',lapack_src_info['sources'], )) dict_append(lapack_info,**atlas_info) atlas_info = lapack_info blas_info,lapack_info = {},{} if not atlas_info: warnings.warn(AtlasNotFoundError.__doc__) blas_info = get_info('blas') #blas_info = {} # test building BLAS from sources. if not blas_info: warnings.warn(BlasNotFoundError.__doc__) blas_src_info = get_info('blas_src') if not blas_src_info: raise BlasSrcNotFoundError,BlasSrcNotFoundError.__doc__ dict_append(blas_info,libraries=['blas_src']) f_libs.append(fortran_library_item(\ 'blas_src',blas_src_info['sources'], )) lapack_info = get_info('lapack') #lapack_info = {} # test building LAPACK from sources. if not lapack_info: warnings.warn(LapackNotFoundError.__doc__) lapack_src_info = get_info('lapack_src') if not lapack_src_info: raise LapackSrcNotFoundError,LapackSrcNotFoundError.__doc__ dict_append(lapack_info,libraries=['lapack_src']) f_libs.append(fortran_library_item(\ 'lapack_src',lapack_src_info['sources'], )) mod_sources = {} if atlas_info or blas_info: mod_sources['fblas'] = ['generic_fblas.pyf', 'generic_fblas1.pyf', 'generic_fblas2.pyf', 'generic_fblas3.pyf', os.path.join('src','fblaswrap.f'), ] if atlas_info or lapack_info: mod_sources['flapack'] = ['generic_flapack.pyf'] if atlas_info: mod_sources['cblas'] = ['generic_cblas.pyf', 'generic_cblas1.pyf'] mod_sources['clapack'] = ['generic_clapack.pyf'] else: dict_append(atlas_info,**lapack_info) dict_append(atlas_info,**blas_info) skip_names = {'clapack':[],'flapack':[],'cblas':[],'fblas':[]} if skip_single_routines: skip_names['clapack'].extend(\ 'sgesv cgesv sgetrf cgetrf sgetrs cgetrs sgetri cgetri'\ ' sposv cposv spotrf cpotrf spotrs cpotrs spotri cpotri'\ ' slauum clauum strtri ctrtri'.split()) skip_names['flapack'].extend(skip_names['clapack']) skip_names['flapack'].extend(\ 'sgesdd cgesdd sgelss cgelss sgeqrf cgeqrf sgeev cgeev'\ ' sgegv cgegv ssyev cheev slaswp claswp sgees cgees' ' sggev cggev'.split()) skip_names['cblas'].extend('saxpy caxpy'.split()) skip_names['fblas'].extend(skip_names['cblas']) skip_names['fblas'].extend(\ 'srotg crotg srotmg srot csrot srotm sswap cswap sscal cscal'\ ' csscal scopy ccopy sdot cdotu cdotc snrm2 scnrm2 sasum scasum'\ ' isamax icamax sgemv cgemv chemv ssymv strmv ctrmv'\ ' sgemm cgemm'.split()) if atlas_version=='3.2.1': skip_names['clapack'].extend(\ 'sgetri dgetri cgetri zgetri spotri dpotri cpotri zpotri'\ ' slauum dlauum clauum zlauum strtri dtrtri ctrtri ztrtri'.split()) for mod_name,sources in mod_sources.items(): sources = [os.path.join(local_path,s) for s in sources] pyf_sources = filter(lambda s:s[-4:]=='.pyf',sources) mod_file = os.path.join(local_path,mod_name+'.pyf') if dep_util.newer_group(pyf_sources,mod_file): generate_interface(mod_name,sources[0],mod_file, skip_names.get(mod_name,[])) sources = filter(lambda s:s[-4:]!='.pyf',sources) ext_args = {'name':dot_join(parent_package,package,mod_name), 'sources':[mod_file]+sources} dict_append(ext_args,**atlas_info) ext = Extension(**ext_args) ext.need_fcompiler_opts = 1 config['ext_modules'].append(ext) flinalg = [] for f in ['det.f','lu.f', #'wrappers.c','inv.f', ]: flinalg.append(os.path.join(local_path,'src',f)) ext_args = {'name':dot_join(parent_package,package,'_flinalg'), 'sources':flinalg} dict_append(ext_args,**atlas_info) config['ext_modules'].append(Extension(**ext_args)) ext_args = {'name':dot_join(parent_package,package,'calc_lwork'), 'sources':[os.path.join(local_path,'src','calc_lwork.f')], } dict_append(ext_args,**atlas_info) config['ext_modules'].append(Extension(**ext_args)) config['fortran_libraries'].extend(f_libs) return config
56dd6a368cdad4bb26c9004b60dc948a29ee3e2c /local1/tlutelli/issta_data/temp/all_python//python/2006_temp/2006/12971/56dd6a368cdad4bb26c9004b60dc948a29ee3e2c/setup_linalg.py
def legend(text,linetypes=None,lleft=None,color='black',tfont='helvetica',fontsize=14,nobox=0):
def legend(text,linetypes=None,lleft=None,color=None,tfont='helvetica',fontsize=14,nobox=0):
def legend(text,linetypes=None,lleft=None,color='black',tfont='helvetica',fontsize=14,nobox=0): """Construct and place a legend. Description: Build a legend and place it on the current plot with an interactive prompt. Inputs: text -- A list of strings which document the curves. linetypes -- If not given, then the text strings are associated with the curves in the order they were originally drawn. Otherwise, associate the text strings with the corresponding curve types given. See plot for description. """ global _hold sys = gist.plsys() if sys == 0: gist.plsys(1) viewp = gist.viewport() gist.plsys(sys) DX = viewp[1] - viewp[0] DY = viewp[3] - viewp[2] width = DY / 10.0; if lleft is None: lleft = gist.mouse(0,0,"Click on point for lower left coordinate.") llx = lleft[0] lly = lleft[1] else: llx,lly = lleft[:2] savesys = gist.plsys() dx = width / 3.0 legarr = Numeric.arange(llx,llx+width,dx) legy = Numeric.ones(legarr.shape) dy = fontsize*points*1.2 deltay = fontsize*points / 2.8 deltax = fontsize*points / 2.6 * DX / DY ypos = lly + deltay; if linetypes is None: linetypes = _GLOBAL_LINE_TYPES[:] # copy them out gist.plsys(0) savehold = _hold _hold = 1 for k in range(len(text)): plot(legarr,ypos*legy,linetypes[k]) #print llx+width+deltax, ypos-deltay if text[k] != "": gist.plt(text[k],llx+width+deltax,ypos-deltay, color=color,font=tfont,height=fontsize,tosys=0) ypos = ypos + dy _hold = savehold if nobox: pass else: gist.plsys(0) maxlen = MLab.max(map(len,text)) c1 = (llx-deltax,lly-deltay) c2 = (llx + width + deltax + fontsize*points* maxlen/1.8 + deltax, lly + len(text)*dy) linesx0 = [c1[0],c1[0],c2[0],c2[0]] linesy0 = [c1[1],c2[1],c2[1],c1[1]] linesx1 = [c1[0],c2[0],c2[0],c1[0]] linesy1 = [c2[1],c2[1],c1[1],c1[1]] gist.pldj(linesx0,linesy0,linesx1,linesy1,color=color) gist.plsys(savesys) return
9121349dd46a03f04c040f9bcf23b3a2b2bfa73d /local1/tlutelli/issta_data/temp/all_python//python/2006_temp/2006/12971/9121349dd46a03f04c040f9bcf23b3a2b2bfa73d/Mplot.py
"""
"""
def legend(text,linetypes=None,lleft=None,color='black',tfont='helvetica',fontsize=14,nobox=0): """Construct and place a legend. Description: Build a legend and place it on the current plot with an interactive prompt. Inputs: text -- A list of strings which document the curves. linetypes -- If not given, then the text strings are associated with the curves in the order they were originally drawn. Otherwise, associate the text strings with the corresponding curve types given. See plot for description. """ global _hold sys = gist.plsys() if sys == 0: gist.plsys(1) viewp = gist.viewport() gist.plsys(sys) DX = viewp[1] - viewp[0] DY = viewp[3] - viewp[2] width = DY / 10.0; if lleft is None: lleft = gist.mouse(0,0,"Click on point for lower left coordinate.") llx = lleft[0] lly = lleft[1] else: llx,lly = lleft[:2] savesys = gist.plsys() dx = width / 3.0 legarr = Numeric.arange(llx,llx+width,dx) legy = Numeric.ones(legarr.shape) dy = fontsize*points*1.2 deltay = fontsize*points / 2.8 deltax = fontsize*points / 2.6 * DX / DY ypos = lly + deltay; if linetypes is None: linetypes = _GLOBAL_LINE_TYPES[:] # copy them out gist.plsys(0) savehold = _hold _hold = 1 for k in range(len(text)): plot(legarr,ypos*legy,linetypes[k]) #print llx+width+deltax, ypos-deltay if text[k] != "": gist.plt(text[k],llx+width+deltax,ypos-deltay, color=color,font=tfont,height=fontsize,tosys=0) ypos = ypos + dy _hold = savehold if nobox: pass else: gist.plsys(0) maxlen = MLab.max(map(len,text)) c1 = (llx-deltax,lly-deltay) c2 = (llx + width + deltax + fontsize*points* maxlen/1.8 + deltax, lly + len(text)*dy) linesx0 = [c1[0],c1[0],c2[0],c2[0]] linesy0 = [c1[1],c2[1],c2[1],c1[1]] linesx1 = [c1[0],c2[0],c2[0],c1[0]] linesy1 = [c2[1],c2[1],c1[1],c1[1]] gist.pldj(linesx0,linesy0,linesx1,linesy1,color=color) gist.plsys(savesys) return
9121349dd46a03f04c040f9bcf23b3a2b2bfa73d /local1/tlutelli/issta_data/temp/all_python//python/2006_temp/2006/12971/9121349dd46a03f04c040f9bcf23b3a2b2bfa73d/Mplot.py
def title(text,color=None,font='helvetica',fontsize=18,deltax=0.0,deltay=0.0): """Set title for plot. To get symbol font for the next character precede by !. To get superscript enclose with ^^ To get subscript enclose with _<text>_ """ global _textcolor if color is None: color = _textcolor else: _textcolor = color if color is None: color = 'black' vp = gist.viewport() xmidpt = (vp[0] + vp[1])/2.0 + deltax if text != "": gist.plt(text,xmidpt,vp[3] + 0.02 + deltay, font=font, justify='CB', height=fontsize, color=color)
9121349dd46a03f04c040f9bcf23b3a2b2bfa73d /local1/tlutelli/issta_data/temp/all_python//python/2006_temp/2006/12971/9121349dd46a03f04c040f9bcf23b3a2b2bfa73d/Mplot.py
def _get_namespace(self): return self.__namespace or default_namespace
def _get_namespace(self): if isinstance(self.__namespace, N.ndarray): return self.__namespace else: return self.__namespace or default_namespace
def _get_namespace(self): return self.__namespace or default_namespace
3c5a494acfbc5e9e8197217036fc3890452a6248 /local1/tlutelli/issta_data/temp/all_python//python/2006_temp/2006/12971/3c5a494acfbc5e9e8197217036fc3890452a6248/formula.py
n = Numeric.searchsorted(Numeric.sort(a), bins) n = Numeric.concatenate([ n, [len(a)]])
n = searchsorted(sort(a), bins) n = concatenate([ n, [len(a)]])
def histogram2(a, bins): """ histogram2(a,bins) -- Compute histogram of a using divisions in bins Description: Count the number of times values from array a fall into numerical ranges defined by bins. Range x is given by bins[x] <= range_x < bins[x+1] where x =0,N and N is the length of the bins array. The last range is given by bins[N] <= range_N < infinity. Values less than bins[0] are not included in the histogram. Arguments: a -- 1D array. The array of values to be divied into bins bins -- 1D array. Defines the ranges of values to use during histogramming. Returns: 1D array. Each value represents the occurences for a given bin (range) of values. Caveat: This should probably have an axis argument that would histogram along a specific axis (kinda like matlab) """ n = Numeric.searchsorted(Numeric.sort(a), bins) n = Numeric.concatenate([ n, [len(a)]]) return n[ 1:]-n[:-1]
53dbd51f8c72d530573a48659741a4012117c0b2 /local1/tlutelli/issta_data/temp/all_python//python/2006_temp/2006/12971/53dbd51f8c72d530573a48659741a4012117c0b2/stats.py
noise = Numeric.mean(Numeric.ravel(lVar))
noise = mean(Numeric.ravel(lVar))
def wiener(im,mysize=None,noise=None): """Perform a wiener filter on an N-dimensional array. Description: Apply a wiener filter to the N-dimensional array in. Inputs: in -- an N-dimensional array. kernel_size -- A scalar or an N-length list giving the size of the median filter window in each dimension. Elements of kernel_size should be odd. If kernel_size is a scalar, then this scalar is used as the size in each dimension. noise -- The noise-power to use. If None, then noise is estimated as the average of the local variance of the input. Outputs: (out,) out -- Wiener filtered result with the same shape as in. """ im = Numeric.asarray(im) if mysize is None: mysize = [3] * len(im.shape) mysize = Numeric.asarray(mysize); # Estimate the local mean lMean = correlate(im,Numeric.ones(mysize),1) / Numeric.product(mysize) # Estimate the local variance lVar = correlate(im**2,Numeric.ones(mysize),1) / Numeric.product(mysize) - lMean**2 # Estimate the noise power if needed. if noise==None: noise = Numeric.mean(Numeric.ravel(lVar)) # Compute result # f = lMean + (maximum(0, lVar - noise) ./ # maximum(lVar, noise)) * (im - lMean) # out = im - lMean im = lVar - noise im = Numeric.maximum(im,0) lVar = Numeric.maximum(lVar,noise) out = out / lVar out = out * im out = out + lMean return out
0bd917a72b16e9c9f4aa4b41256c5cfc1daf0d81 /local1/tlutelli/issta_data/temp/all_python//python/2006_temp/2006/12971/0bd917a72b16e9c9f4aa4b41256c5cfc1daf0d81/signaltools.py
from scipy.stats import mean
def resample(x,num,t=None,axis=0,window=None): """Resample to num samples using Fourier method along the given axis. The resampled signal starts at the same value of x but is sampled with a spacing of len(x) / num * (spacing of x). Because a Fourier method is used, the signal is assumed periodic. Window controls a Fourier-domain window that tapers the Fourier spectrum before zero-padding to aleviate ringing in the resampled values for sampled signals you didn't intend to be interpreted as band-limited. If window is a string then use the named window. If window is a float, then it represents a value of beta for a kaiser window. If window is a tuple, then the first component is a string representing the window, and the next arguments are parameters for that window. Possible windows are: 'blackman' ('black', 'blk') 'hamming' ('hamm', 'ham') 'bartlett' ('bart', 'brt') 'hanning' ('hann', 'han') 'kaiser' ('ksr') # requires parameter (beta) 'gaussian' ('gauss', 'gss') # requires parameter (std.) 'general gauss' ('general', 'ggs') # requires two parameters (power, width) The first sample of the returned vector is the same as the first sample of the input vector, the spacing between samples is changed from dx to dx * len(x) / num If t is not None, then it represents the old sample positions, and the new sample positions will be returned as well as the new samples. """ x = asarray(x) X = fft(x,axis=axis) Nx = x.shape[axis] if window is not None: W = ifftshift(get_window(window,Nx)) newshape = ones(len(x.shape)) newshape[axis] = len(W) W.shape = newshape X = X*W sl = [slice(None)]*len(x.shape) newshape = list(x.shape) newshape[axis] = num N = int(Numeric.minimum(num,Nx)) Y = Numeric.zeros(newshape,'D') sl[axis] = slice(0,(N+1)/2) Y[sl] = X[sl] sl[axis] = slice(-(N-1)/2,None) Y[sl] = X[sl] y = ifft(Y,axis=axis)*(float(num)/float(Nx)) if x.typecode() not in ['F','D']: y = y.real if t is None: return y else: new_t = arange(0,num)*(t[1]-t[0])* Nx / float(num) + t[0] return y, new_t
0bd917a72b16e9c9f4aa4b41256c5cfc1daf0d81 /local1/tlutelli/issta_data/temp/all_python//python/2006_temp/2006/12971/0bd917a72b16e9c9f4aa4b41256c5cfc1daf0d81/signaltools.py
try: from scipy.sandbox.numexpr import expressions modname = 'scipy.sandbox.numexpr.expressions' except ImportError: from numexpr import expressions modname = 'numexpr.expressions'
modname = modname[__name__.rfind('.')-1:] + '.expressions'
def makeExpressions(context): """Make private copy of the expressions module with a custom get_context(). An attempt was made to make this threadsafe, but I can't guarantee it's bulletproof. """ import sys, imp try: from scipy.sandbox.numexpr import expressions modname = 'scipy.sandbox.numexpr.expressions' except ImportError: from numexpr import expressions modname = 'numexpr.expressions' # get our own, private copy of expressions imp.acquire_lock() try: old = sys.modules.pop(modname) import expressions private = sys.modules.pop(modname) sys.modules[modname] = old finally: imp.release_lock() def get_context(): return context private.get_context = get_context return private
b2ceecb6b6df9eb5ee857b2b837c102eaea5e88d /local1/tlutelli/issta_data/temp/all_python//python/2006_temp/2006/12971/b2ceecb6b6df9eb5ee857b2b837c102eaea5e88d/compiler.py
class dictsampler(object):
class dictsampler(genericsampler):
def sample(self, size, return_probs=0): """Generates a sample of the given size from the specified discrete distribution, optionally returning the probabilities under the distribution.
fa5a9fad06dfb3c9eb9207b72792b9d4d03890fa /local1/tlutelli/issta_data/temp/all_python//python/2006_temp/2006/12971/fa5a9fad06dfb3c9eb9207b72792b9d4d03890fa/montecarlo.py
def __init__(self, mydict): # We can't use this: # self.labels = numpy.array(mydict.keys(), object) # since numpy's construction of object arrays is dodgy. Instead, # create an empty object array and fill it: self.labels = numpy.empty(len(mydict), dtype=object) for i, label in enumerate(mydict): self.labels[i] = label self.probs = numpy.array(mydict.values(), float) s = self.probs.sum() if s > 0: self.probs /= s else: raise ValueError, "sum of table frequencies must be > 0"
fa5a9fad06dfb3c9eb9207b72792b9d4d03890fa /local1/tlutelli/issta_data/temp/all_python//python/2006_temp/2006/12971/fa5a9fad06dfb3c9eb9207b72792b9d4d03890fa/montecarlo.py
raise TypeError, "unsupported type for adding to a sparse matrix"
raise TypeError, "unsupported type for sparse matrix addition"
def __radd__(self, other): """ Function supporting the operation: self + other. This does not currently work correctly for self + dense. Perhaps dense matrices need some hooks to support this. """ if isscalar(other) or (isdense(other) and rank(other)==0): raise NotImplementedError, 'adding a scalar to a CSC matrix is ' \ 'not yet supported' elif isspmatrix(other): ocs = other.tocsc() if (ocs.shape != self.shape): raise ValueError, "inconsistent shapes" dtypechar = _coerce_rules[(self.dtypechar, ocs.dtypechar)] nnz1, nnz2 = self.nnz, ocs.nnz data1, data2 = _convert_data(self.data[:nnz1], ocs.data[:nnz2], dtypechar) func = getattr(sparsetools, _transtabl[dtypechar]+'cscadd') c, rowc, ptrc, ierr = func(data1, self.rowind[:nnz1], self.indptr, data2, ocs.rowind[:nnz2], ocs.indptr) if ierr: raise ValueError, "ran out of space (but shouldn't have happened)" M, N = self.shape return csc_matrix((c, rowc, ptrc), dims=(M, N)) elif isdense(other): # Convert this matrix to a dense matrix and add them. # This does not currently work. return self.todense() + other else: raise TypeError, "unsupported type for adding to a sparse matrix"
42a797e36c22c530c85f0fd893fc9733248799dd /local1/tlutelli/issta_data/temp/all_python//python/2006_temp/2006/12971/42a797e36c22c530c85f0fd893fc9733248799dd/sparse.py
raise TypeError, "unknown type for sparse matrix addition"
raise TypeError, "unsupported type for sparse matrix addition"
def __add__(self, other): if isscalar(other) or (isdense(other) and rank(other)==0): raise NotImplementedError, 'adding a scalar to a CSC matrix is ' \ 'not yet supported' elif isspmatrix(other): ocs = other.tocsc() if (ocs.shape != self.shape): raise ValueError, "inconsistent shapes" dtypechar = _coerce_rules[(self.dtypechar, ocs.dtypechar)] nnz1, nnz2 = self.nnz, ocs.nnz data1, data2 = _convert_data(self.data[:nnz1], ocs.data[:nnz2], dtypechar) func = getattr(sparsetools, _transtabl[dtypechar]+'cscadd') c, rowc, ptrc, ierr = func(data1, self.rowind[:nnz1], self.indptr, data2, ocs.rowind[:nnz2], ocs.indptr) if ierr: raise ValueError, "ran out of space (but shouldn't have happened)" M, N = self.shape return csc_matrix((c, rowc, ptrc), dims=(M, N)) elif isdense(other): # Convert this matrix to a dense matrix and add them return other + self.todense() else: raise TypeError, "unknown type for sparse matrix addition"
42a797e36c22c530c85f0fd893fc9733248799dd /local1/tlutelli/issta_data/temp/all_python//python/2006_temp/2006/12971/42a797e36c22c530c85f0fd893fc9733248799dd/sparse.py
raise TypeError, "unsupported type for adding to a sparse matrix"
raise TypeError, "unsupported type for sparse matrix addition"
def __add__(self, other): # First check if argument is a scalar if isscalar(other) or (isdense(other) and rank(other)==0): # Now we would add this scalar to every element. raise NotImplementedError, 'adding a scalar to a sparse matrix ' \ 'is not yet supported' elif isspmatrix(other): ocs = other.tocsr() if (ocs.shape != self.shape): raise ValueError, "inconsistent shapes"
42a797e36c22c530c85f0fd893fc9733248799dd /local1/tlutelli/issta_data/temp/all_python//python/2006_temp/2006/12971/42a797e36c22c530c85f0fd893fc9733248799dd/sparse.py
self.shape = (M, N)
assert M == int(M) and M > 0 assert N == int(N) and N > 0 self.shape = (int(M), int(N))
def __init__(self, A=None): """ Create a new dictionary-of-keys sparse matrix. An optional argument A is accepted, which initializes the dok_matrix with it. This can be a tuple of dimensions (m, n) or a (dense) array to copy. """ dict.__init__(self) spmatrix.__init__(self) self.shape = (0, 0) # If _validate is True, ensure __setitem__ keys are integer tuples self._validate = True if A is not None: if type(A) == tuple: # Interpret as dimensions try: dims = A (M, N) = dims self.shape = (M, N) return except (TypeError, ValueError): pass if isspmatrix(A): # For sparse matrices, this is too inefficient; we need # something else. raise NotImplementedError, "initializing a dok_matrix with " \ "a sparse matrix is not yet supported" elif isdense(A): A = asarray(A) if rank(A) == 2: M, N = A.shape self.shape = (M, N) for i in range(M): for j in range(N): if A[i, j] != 0: self[i, j] = A[i, j] elif rank(A) == 1: M = A.shape[0] self.shape = (M, 1) for i in range(M): if A[i] != 0: self[i, 0] = A[i] else: raise TypeError, "array for initialization must have rank 2" else: raise TypeError, "argument should be a tuple of dimensions " \ "or a sparse or dense matrix"
42a797e36c22c530c85f0fd893fc9733248799dd /local1/tlutelli/issta_data/temp/all_python//python/2006_temp/2006/12971/42a797e36c22c530c85f0fd893fc9733248799dd/sparse.py
except (TypeError, ValueError): pass
except (TypeError, ValueError, AssertionError): raise TypeError, "dimensions must be a 2-tuple of positive"\ " integers"
def __init__(self, A=None): """ Create a new dictionary-of-keys sparse matrix. An optional argument A is accepted, which initializes the dok_matrix with it. This can be a tuple of dimensions (m, n) or a (dense) array to copy. """ dict.__init__(self) spmatrix.__init__(self) self.shape = (0, 0) # If _validate is True, ensure __setitem__ keys are integer tuples self._validate = True if A is not None: if type(A) == tuple: # Interpret as dimensions try: dims = A (M, N) = dims self.shape = (M, N) return except (TypeError, ValueError): pass if isspmatrix(A): # For sparse matrices, this is too inefficient; we need # something else. raise NotImplementedError, "initializing a dok_matrix with " \ "a sparse matrix is not yet supported" elif isdense(A): A = asarray(A) if rank(A) == 2: M, N = A.shape self.shape = (M, N) for i in range(M): for j in range(N): if A[i, j] != 0: self[i, j] = A[i, j] elif rank(A) == 1: M = A.shape[0] self.shape = (M, 1) for i in range(M): if A[i] != 0: self[i, 0] = A[i] else: raise TypeError, "array for initialization must have rank 2" else: raise TypeError, "argument should be a tuple of dimensions " \ "or a sparse or dense matrix"
42a797e36c22c530c85f0fd893fc9733248799dd /local1/tlutelli/issta_data/temp/all_python//python/2006_temp/2006/12971/42a797e36c22c530c85f0fd893fc9733248799dd/sparse.py
N = len(values) for n in range(N): self[n, n+k] = values[n]
M, N = self.shape m = len(values) for i in range(min(M, N-k)): self[i, i+k] = values[i]
def setdiag(self, values, k=0): N = len(values) for n in range(N): self[n, n+k] = values[n] return
42a797e36c22c530c85f0fd893fc9733248799dd /local1/tlutelli/issta_data/temp/all_python//python/2006_temp/2006/12971/42a797e36c22c530c85f0fd893fc9733248799dd/sparse.py
if not (exists('src/randomkit.c') and exists('src/randomkit.h')): raise OSError, "Please copy or symlink randomkit.c and randomkit.h to montecarlo/src/ from numpy/random/mtrand/ in the NumPy source tree!"
def configuration(parent_package='', top_path=None): config = Configuration('montecarlo', parent_package, top_path) # This code requires 'randomkit.c' and 'randomkit.h' to have been copied # to (or symlinked to) montecarlo/src/. config.add_extension('_intsampler', sources = [join('src', f) for f in ['_intsamplermodule.c', 'compact5table.c', 'randomkit.c']]) config.add_data_dir('tests') config.add_data_dir('examples') config.add_data_dir('doc') return config
fd76054335486c6655f084bac58bb28eea4d2458 /local1/tlutelli/issta_data/temp/all_python//python/2006_temp/2006/12971/fd76054335486c6655f084bac58bb28eea4d2458/setup.py
from scipy_test.testing import ScipyTest
from scipy.test.testing import ScipyTest
def __init__(self,name,location,p_frame=None):
cf7437641c3970f9712152ab4ef39c45532e5297 /local1/tlutelli/issta_data/temp/all_python//python/2006_temp/2006/12971/cf7437641c3970f9712152ab4ef39c45532e5297/ppimport.py