rem
stringlengths
0
322k
add
stringlengths
0
2.05M
context
stringlengths
8
228k
print "Adding a constant:" c += 5 print c
def _testme(): a = csc_matrix((arange(1, 9), numpy.transpose([[0, 1, 1, 2, 2, 3, 3, 4], [0, 1, 3, 0, 2, 3, 4, 4]]))) print "Representation of a matrix:" print repr(a) print "How a matrix prints:" print a print "Adding two matrices:" b = a+a print b print "Subtracting two matrices:" c = b - a print c print "Multiplying a sparse matrix by a dense vector:" d = a*[1, 2, 3, 4, 5] print d print [1, 2, 3, 4, 5]*a print "Inverting a sparse linear system:" print "The sparse matrix (constructed from diagonals):" a = spdiags([[1, 2, 3, 4, 5], [6, 5, 8, 9, 10]], [0, 1], 5, 5) b = numpy.array([1, 2, 3, 4, 5]) print "Solve: single precision complex:" useUmfpack = False a = a.astype('F')
bprime[j] = val
bprime[j] = real(val)
def bilinear(b,a,fs=1.0): """Return a digital filter from an analog filter using the bilinear transform. The bilinear transform substitutes (z-1) / (z+1) for s """ fs =float(fs) a,b = map(atleast_1d,(a,b)) D = len(a) - 1 N = len(b) - 1 artype = Num.Float M = max([N,D]) Np = M Dp = M bprime = Num.zeros(Np+1,artype) aprime = Num.zeros(Dp+1,artype) for j in range(Np+1): val = 0.0 for i in range(N+1): for k in range(i+1): for l in range(M-i+1): if k+l == j: val += comb(i,k)*comb(M-i,l)*b[N-i]*pow(2*fs,i)*(-1)**k bprime[j] = val for j in range(Dp+1): val = 0.0 for i in range(D+1): for k in range(i+1): for l in range(M-i+1): if k+l == j: val += comb(i,k)*comb(M-i,l)*a[D-i]*pow(2*fs,i)*(-1)**k aprime[j] = val return normalize(bprime, aprime)
aprime[j] = val
aprime[j] = real(val)
def bilinear(b,a,fs=1.0): """Return a digital filter from an analog filter using the bilinear transform. The bilinear transform substitutes (z-1) / (z+1) for s """ fs =float(fs) a,b = map(atleast_1d,(a,b)) D = len(a) - 1 N = len(b) - 1 artype = Num.Float M = max([N,D]) Np = M Dp = M bprime = Num.zeros(Np+1,artype) aprime = Num.zeros(Dp+1,artype) for j in range(Np+1): val = 0.0 for i in range(N+1): for k in range(i+1): for l in range(M-i+1): if k+l == j: val += comb(i,k)*comb(M-i,l)*b[N-i]*pow(2*fs,i)*(-1)**k bprime[j] = val for j in range(Dp+1): val = 0.0 for i in range(D+1): for k in range(i+1): for l in range(M-i+1): if k+l == j: val += comb(i,k)*comb(M-i,l)*a[D-i]*pow(2*fs,i)*(-1)**k aprime[j] = val return normalize(bprime, aprime)
x,y = asarray(x, y)
x = asarray(x) y = asarray(y)
def mannwhitneyu(x,y): """
result = squeeze(transpose(reshape(result,dims[::-1])))
result = squeeze(transpose(reshape(result,tupdims)))
def _parse_mimatrix(fid,bytes): dclass, cmplx, nzmax =_parse_array_flags(fid) dims = _get_element(fid)[0] name = ''.join(asarray(_get_element(fid)[0]).astype('c')) if dclass in mxArrays: result, unused =_get_element(fid) if type == mxCHAR_CLASS: result = ''.join(asarray(result).astype('c')) else: if cmplx: imag, unused =_get_element(fid) result = result + cast[imag.typecode()](1j) * imag result = squeeze(transpose(reshape(result,dims[::-1]))) elif dclass == mxCELL_CLASS: length = product(dims) result = zeros(length, PyObject) for i in range(length): sa, unused = _get_element(fid) result[i]= sa result = squeeze(transpose(reshape(result,dims[::-1]))) if rank(result)==0: result = result.toscalar() elif dclass == mxSTRUCT_CLASS: length = product(dims) result = zeros(length, PyObject) namelength = _get_element(fid)[0] # get field names names = _get_element(fid)[0] splitnames = [names[i:i+namelength] for i in \ xrange(0,len(names),namelength)] fieldnames = [''.join(asarray(x).astype('c')).strip('\x00') for x in splitnames] for i in range(length): result[i] = mat_struct() for element in fieldnames: val,unused = _get_element(fid) result[i].__dict__[element] = val result = squeeze(transpose(reshape(result,dims[::-1]))) if rank(result)==0: result = result.toscalar() # object is like a structure with but with a class name elif dclass == mxOBJECT_CLASS: class_name = ''.join(asarray(_get_element(fid)[0]).astype('c')) length = product(dims) result = zeros(length, PyObject) namelength = _get_element(fid)[0] # get field names names = _get_element(fid)[0] splitnames = [names[i:i+namelength] for i in \ xrange(0,len(names),namelength)] fieldnames = [''.join(asarray(x).astype('c')).strip('\x00') for x in splitnames] for i in range(length): result[i] = mat_obj() result[i]._classname = class_name for element in fieldnames: val,unused = _get_element(fid) result[i].__dict__[element] = val result = squeeze(transpose(reshape(result,dims[::-1]))) if rank(result)==0: result = result.toscalar() elif dclass == mxSPARSE_CLASS: rowind, unused = _get_element(fid) colind, unused = _get_element(fid) res, unused = _get_element(fid) if cmplx: imag, unused = _get_element(fid) res = res + cast[imag.typecode()](1j)*imag if have_sparse: spmat = scipy.sparse.csc_matrix(res, (rowind[:len(res)], colind), M=dims[0],N=dims[1]) result = spmat else: result = (dims, rowind, colind, res) return result, name
y = scipy.stats.linregress(BIG,X)
y = scipy.stats.linregress(X,BIG)
def check_linregressBIGX(self): """ W.II.F. Regress BIG on X.
lwork = calc_lwork.getri(getri.prefix,a1.shape[0])[1]
lwork = calc_lwork.getri(getri.prefix,a1.shape[0]) lwork = lwork[1] lwork = int(1.01*lwork)
def inv(a, overwrite_a=0): """Return inverse of square matrix a. """ a1 = asarray(a) if len(a1.shape) != 2 or a1.shape[0] != a1.shape[1]: raise ValueError, 'expected square matrix' overwrite_a = overwrite_a or a1 is not a #XXX: I found no advantage or disadvantage of using finv.
def configuration(parent_package='',parent_path=None): config = Configuration('integrate', parent_package, parent_path) blas_opt = get_info('blas_opt') if not blas_opt: raise NotFoundError,'no blas resources found'
blas_opt = get_info('blas_opt',notfound_action=2)
def configuration(parent_package='',parent_path=None): config = Configuration('integrate', parent_package, parent_path) blas_opt = get_info('blas_opt') if not blas_opt: raise NotFoundError,'no blas resources found' config.add_library('linpack_lite', sources=[join('linpack_lite','*.f')]) config.add_library('mach', sources=[join('mach','*.f')]) config.add_library('quadpack', sources=[join('quadpack','*.f')]) config.add_library('odepack', sources=[join('odepack','*.f')]) # should we try to weed through files and replace with calls to # LAPACK routines? # Yes, someday... # Extensions # quadpack: config.add_extension('_quadpack', sources=['_quadpackmodule.c'], libraries=['quadpack', 'linpack_lite', 'mach']) # odepack libs = ['odepack','linpack_lite','mach'] # Remove libraries key from blas_opt if blas_opt.has_key('libraries'): # key doesn't exist on OS X ... libs.extend(blas_opt['libraries']) newblas = {} for key in blas_opt.keys(): if key == 'libraries': continue newblas[key] = blas_opt[key] config.add_extension('_odepack', sources=['_odepackmodule.c'], libraries=libs, **newblas) # vode config.add_extension('vode', sources=['vode.pyf'], libraries=libs, **newblas) config.add_data_dir('tests') return config
setup(**configuration(parent_path=''))
setup(**configuration(top_path='').todict())
def configuration(parent_package='',parent_path=None): config = Configuration('integrate', parent_package, parent_path) blas_opt = get_info('blas_opt') if not blas_opt: raise NotFoundError,'no blas resources found' config.add_library('linpack_lite', sources=[join('linpack_lite','*.f')]) config.add_library('mach', sources=[join('mach','*.f')]) config.add_library('quadpack', sources=[join('quadpack','*.f')]) config.add_library('odepack', sources=[join('odepack','*.f')]) # should we try to weed through files and replace with calls to # LAPACK routines? # Yes, someday... # Extensions # quadpack: config.add_extension('_quadpack', sources=['_quadpackmodule.c'], libraries=['quadpack', 'linpack_lite', 'mach']) # odepack libs = ['odepack','linpack_lite','mach'] # Remove libraries key from blas_opt if blas_opt.has_key('libraries'): # key doesn't exist on OS X ... libs.extend(blas_opt['libraries']) newblas = {} for key in blas_opt.keys(): if key == 'libraries': continue newblas[key] = blas_opt[key] config.add_extension('_odepack', sources=['_odepackmodule.c'], libraries=libs, **newblas) # vode config.add_extension('vode', sources=['vode.pyf'], libraries=libs, **newblas) config.add_data_dir('tests') return config
libs.extend(blas_opt['libraries'])
if blas_opt.has_key('libraries'): libs.extend(blas_opt['libraries'])
def configuration(parent_package='',parent_path=None): config = Configuration('integrate', parent_package, parent_path) blas_opt = get_info('blas_opt') if not blas_opt: raise NotFoundError,'no blas resources found' config.add_library('linpack_lite', sources=[join('linpack_lite','*.f')]) config.add_library('mach', sources=[join('mach','*.f')]) config.add_library('quadpack', sources=[join('quadpack','*.f')]) config.add_library('odepack', sources=[join('odepack','*.f')]) # should we try to weed through files and replace with calls to # LAPACK routines? # Yes, someday... # Extensions # quadpack: config.add_extension('_quadpack', sources=['_quadpackmodule.c'], libraries=['quadpack', 'linpack_lite', 'mach']) # odepack libs = ['odepack','linpack_lite','mach'] # remove libraries key from blas_opt libs.extend(blas_opt['libraries']) newblas = {} for key in blas_opt.keys(): if key == 'libraries': continue newblas[key] = blas_opt[key] config.add_extension('_odepack', sources=['_odepackmodule.c'], libraries=libs, **newblas) # vode config.add_extension('vode', sources=['vode.pyf'], libraries=libs, **newblas) return config
execstring = 'column = map(lambda x: x'+cnums+', listoflists)' exec execstring
evalstring = 'map(lambda x: x'+cnums+', listoflists)' column = eval(evalstring)
def colex (listoflists,cnums): """\nExtracts from listoflists the columns specified in the list 'cnums' (cnums can be an integer, a sequence of integers, or an expression that
fname = os.path.join(__path__[0],'plt','lena.dat')
fname = os.path.join(os.path.dirname(__file__),'plt','lena.dat')
def lena(): import cPickle, os fname = os.path.join(__path__[0],'plt','lena.dat') f = open(fname,'rb') lena = scipy.array(cPickle.load(f)) f.close() return lena
assert_equal(cephes.pro_ang1_cv(1,1,1,1,0),(1.0,0.0))
assert_array_almost_equal(cephes.pro_ang1_cv(1,1,1,1,0), array((1.0,0.0)))
def check_pro_ang1_cv(self): assert_equal(cephes.pro_ang1_cv(1,1,1,1,0),(1.0,0.0))
ext = Extension(name="_lbfgsb",sources=sources, **lapack)
ext = Extension(dot_join(parent_package,package,"_lbfgsb"), sources=sources, **lapack)
def configuration(parent_package='',parent_path=None): package = 'optimize' config = default_config_dict(package,parent_package) local_path = get_path(__name__,parent_path) minpack = glob(os.path.join(local_path,'minpack','*.f')) config['fortran_libraries'].append(('minpack',{'sources':minpack})) sources = ['_minpackmodule.c'] sources = [os.path.join(local_path,x) for x in sources] ext = Extension(dot_join(parent_package,package,'_minpack'), sources, libraries = ['minpack']) config['ext_modules'].append(ext) rootfind = glob(os.path.join(local_path,'Zeros','*.c')) roothead = os.path.join(local_path,'zeros.h') config['libraries'].append(('rootfind',{'sources':rootfind, 'headers':roothead})) sources = ['zeros.c'] sources = [os.path.join(local_path,x) for x in sources] ext = Extension(dot_join(parent_package,package,'_zeros'), sources, libraries=['rootfind']) config['ext_modules'].append(ext) lapack = system_info.lapack_opt_info().get_info() sources = ['lbfgsb.pyf','routines.f'] sources = [os.path.join(local_path,'lbfgsb-0.9',x) for x in sources] ext = Extension(name="_lbfgsb",sources=sources, **lapack) config['ext_modules'].append(ext) sources = ['moduleTNC.c', 'tnc.c'] sources = [os.path.join(local_path,'tnc',x) for x in sources] ext = Extension(name="moduleTNC", sources=sources) config['ext_modules'].append(ext) return config
ext = Extension(name="moduleTNC", sources=sources)
ext = Extension(dot_join(parent_package,package,'moduleTNC'), sources=sources)
def configuration(parent_package='',parent_path=None): package = 'optimize' config = default_config_dict(package,parent_package) local_path = get_path(__name__,parent_path) minpack = glob(os.path.join(local_path,'minpack','*.f')) config['fortran_libraries'].append(('minpack',{'sources':minpack})) sources = ['_minpackmodule.c'] sources = [os.path.join(local_path,x) for x in sources] ext = Extension(dot_join(parent_package,package,'_minpack'), sources, libraries = ['minpack']) config['ext_modules'].append(ext) rootfind = glob(os.path.join(local_path,'Zeros','*.c')) roothead = os.path.join(local_path,'zeros.h') config['libraries'].append(('rootfind',{'sources':rootfind, 'headers':roothead})) sources = ['zeros.c'] sources = [os.path.join(local_path,x) for x in sources] ext = Extension(dot_join(parent_package,package,'_zeros'), sources, libraries=['rootfind']) config['ext_modules'].append(ext) lapack = system_info.lapack_opt_info().get_info() sources = ['lbfgsb.pyf','routines.f'] sources = [os.path.join(local_path,'lbfgsb-0.9',x) for x in sources] ext = Extension(name="_lbfgsb",sources=sources, **lapack) config['ext_modules'].append(ext) sources = ['moduleTNC.c', 'tnc.c'] sources = [os.path.join(local_path,'tnc',x) for x in sources] ext = Extension(name="moduleTNC", sources=sources) config['ext_modules'].append(ext) return config
width = fac*(x[2]-x[1])
width = fac*(x[1]-x[0])
def errorbars(x,y,err,ptcolor='r',linecolor='b',pttype='o',linetype='-',fac=0.25): """Draw connected points with errorbars. Description: Plot connected points with errorbars. Inputs: x, y -- The points to plot. err -- The error in the y values. ptcolor -- The color for the points. linecolor -- The color of the connecting lines and error bars. pttype -- The type of point ('o', 'x', '+', '.', 'x', '*') linetype -- The type of line ('-', '|', ':', '-.', '-:') fac -- Adjusts how long the horizontal lines are which make the top and bottom of the error bars. """ # create line arrays yb = y - err ye = y + err try: override = 1 savesys = gist.plsys(2) gist.plsys(savesys) except: override = 0 if _hold or override: pass else: gist.fma() y = where(scipy.isfinite(y),y,0) gist.plg(y,x,color=_colors[ptcolor],marker=_markers[pttype],type='none') gist.pldj(x,yb,x,ye,color=_colors[linecolor],type=_types[linetype]) viewp = gist.viewport() plotlims = gist.limits() conv_factorx = (viewp[1] - viewp[0]) / (plotlims[1]-plotlims[0]) conv_factory = (viewp[3] - viewp[2]) / (plotlims[3]-plotlims[2]) width = fac*(x[2]-x[1]) x0 = x-width/2.0 x1 = x+width/2.0 gist.pldj(x0,ye,x1,ye,color=_colors[linecolor],type=_types[linetype]) gist.pldj(x0,yb,x1,yb,color=_colors[linecolor],type=_types[linetype]) return
print linetypes[k], text[k] print llx+width+deltax, ypos-deltay
def legend(text,linetypes=None,lleft=None,color=None,tfont='helvetica',fontsize=14,nobox=0): """Construct and place a legend. Description: Build a legend and place it on the current plot with an interactive prompt. Inputs: text -- A list of strings which document the curves. linetypes -- If not given, then the text strings are associated with the curves in the order they were originally drawn. Otherwise, associate the text strings with the corresponding curve types given. See plot for description. """ global _hold global _textcolor if color is None: color = _textcolor else: _textcolor = color if color is None: color = 'black' sys = gist.plsys() if sys == 0: gist.plsys(1) viewp = gist.viewport() gist.plsys(sys) DX = viewp[1] - viewp[0] DY = viewp[3] - viewp[2] width = DY / 10.0; if lleft is None: lleft = gist.mouse(0,0,"Click on point for lower left coordinate.") llx = lleft[0] lly = lleft[1] else: llx,lly = lleft[:2] savesys = gist.plsys() dx = width / 3.0 legarr = Numeric.arange(llx,llx+width,dx) legy = Numeric.ones(legarr.shape) dy = fontsize*points*1.2 deltay = fontsize*points / 2.8 deltax = fontsize*points / 2.6 * DX / DY ypos = lly + deltay; if linetypes is None: linetypes = _GLOBAL_LINE_TYPES[:] # copy them out gist.plsys(0) savehold = _hold _hold = 1 for k in range(len(text)): plot(legarr,ypos*legy,linetypes[k]) #print llx+width+deltax, ypos-deltay if text[k] != "": gist.plt(text[k],llx+width+deltax,ypos-deltay, color=color,font=tfont,height=fontsize,tosys=0) ypos = ypos + dy _hold = savehold if nobox: pass else: gist.plsys(0) maxlen = MLab.max(map(len,text)) c1 = (llx-deltax,lly-deltay) c2 = (llx + width + deltax + fontsize*points* maxlen/1.8 + deltax, lly + len(text)*dy) linesx0 = [c1[0],c1[0],c2[0],c2[0]] linesy0 = [c1[1],c2[1],c2[1],c1[1]] linesx1 = [c1[0],c2[0],c2[0],c1[0]] linesy1 = [c2[1],c2[1],c1[1],c1[1]] gist.pldj(linesx0,linesy0,linesx1,linesy1,color=color) gist.plsys(savesys) return
config_list += map(get_separate_package_config,separate_packages) config_list += map(get_package_config,scipy_packages)
def setup_package(ignore_packages=[]): old_path = os.getcwd() path = get_path(__name__) os.chdir(path) sys.path.insert(0,os.path.join(path,'Lib')) # setup files of subpackages require scipy_core: sys.path.insert(0,os.path.join(path,'scipy_core')) try: #sys.path.insert(0,os.path.join(path,'Lib')) from scipy_version import scipy_version #del sys.path[0] config_list = [{'packages':['scipy','scipy.tests'], 'package_dir': {'scipy':'Lib', 'scipy.tests':os.path.join('Lib','tests')}}] #new style packages: for d in ['scipy_core','Lib','Lib_chaco']: if sys.platform!='win32' and d=='Lib_chaco': # Currently chaco is working only on win32. continue config_list += get_packages(os.path.join(path,d),ignore_packages, parent_path=path) #old style packages: #config_list += map(get_separate_package_config,separate_packages) #config_list += map(get_package_config,scipy_packages) config_dict = merge_config_dicts(config_list) print 'SciPy Version %s' % scipy_version setup (name = "SciPy", version = scipy_version, maintainer = "SciPy Developers", maintainer_email = "scipy-dev@scipy.org", description = "Scientific Algorithms Library for Python", license = "SciPy License (BSD Style)", url = "http://www.scipy.org", **config_dict ) finally: del sys.path[0] del sys.path[0] os.chdir(old_path)
for i in ["_core_", "_controls_", "_misc_", "_windows_", "_gdi_"]:
for i in [\"_core_\", \"_controls_\", \"_misc_\", \"_windows_\", \"_gdi_\"]:
def _import_wx_core(wx_pth, pexec): """Imports the core modules for wx. This is necessary for wxPython-2.5.x. """ # Find the suffix. suffix = '.so' for x in [x[0] for x in imp.get_suffixes() if x[-1] is imp.C_EXTENSION]: if os.path.exists(os.path.join(wx_pth, '_core_' + x)): suffix = x break # Now import the modules manually. pexec('import imp, os.path') code="""\
return 1
def _import_wx_core(wx_pth, pexec): """Imports the core modules for wx. This is necessary for wxPython-2.5.x. """ # Find the suffix. suffix = '.so' for x in [x[0] for x in imp.get_suffixes() if x[-1] is imp.C_EXTENSION]: if os.path.exists(os.path.join(wx_pth, '_core_' + x)): suffix = x break # Now import the modules manually. pexec('import imp, os.path') code="""\
output = valarray(shape(cond),value=self.a)
output = valarray(shape(cond),value=self.a*scale + loc)
def ppf(self,q,*args,**kwds): loc,scale=map(kwds.get,['loc','scale']) args, loc, scale = self.__fix_loc_scale(args, loc, scale) q,loc,scale = map(arr,(q,loc,scale)) args = tuple(map(arr,args)) cond0 = self._argcheck(*args) & (scale > 0) & (loc==loc) cond1 = (q > 0) & (q < 1) cond2 = (q==1) & cond0 cond = cond0 & cond1 output = valarray(shape(cond),value=self.a) insert(output,(1-cond0)*(cond1==cond1), self.badvalue) insert(output,cond2,self.b) goodargs = argsreduce(cond, *((q,)+args+(scale,loc))) scale, loc, goodargs = goodargs[-2], goodargs[-1], goodargs[:-2] insert(output,cond,self._ppf(*goodargs)*scale + loc) return output
insert(output,cond2,self.b)
insert(output,cond2,self.b*scale + loc)
def ppf(self,q,*args,**kwds): loc,scale=map(kwds.get,['loc','scale']) args, loc, scale = self.__fix_loc_scale(args, loc, scale) q,loc,scale = map(arr,(q,loc,scale)) args = tuple(map(arr,args)) cond0 = self._argcheck(*args) & (scale > 0) & (loc==loc) cond1 = (q > 0) & (q < 1) cond2 = (q==1) & cond0 cond = cond0 & cond1 output = valarray(shape(cond),value=self.a) insert(output,(1-cond0)*(cond1==cond1), self.badvalue) insert(output,cond2,self.b) goodargs = argsreduce(cond, *((q,)+args+(scale,loc))) scale, loc, goodargs = goodargs[-2], goodargs[-1], goodargs[:-2] insert(output,cond,self._ppf(*goodargs)*scale + loc) return output
return c/3.0, (1.0-c+c*c)/18, sqrt(2)*(2*c-1)*(c+1)*(c-2) / \
return (c+1.0)/3.0, (1.0-c+c*c)/18, sqrt(2)*(2*c-1)*(c+1)*(c-2) / \
def _stats(self, c): return c/3.0, (1.0-c+c*c)/18, sqrt(2)*(2*c-1)*(c+1)*(c-2) / \ (5*(1.0-c+c*c)**1.5), -3.0/5.0
if not 'MATHLIB' in os.environ:
if not os.environ.has_key('MATHLIB'):
def config_toplevel(self): print " ============= begin top level configuration ============="
config['packages'].append(dot_join(parent_package,'stats'))
def configuration(parent_package=''): #if parent_package: # parent_package += '.' local_path = get_path(__name__) test_path = os.path.join(local_path,'tests') config = default_config_dict() #config['packages'].append(dot_join(parent_package,'stats')) config['packages'].append(dot_join(parent_package,'stats.tests')) config['package_dir']['stats.tests'] = test_path # Extension sources = ['randmodule.c','ranlib_all.c'] sources = [os.path.join(local_path,x) for x in sources] ext = Extension(dot_join(parent_package,'stats.rand'),sources) config['ext_modules'].append(ext) return config
R = dot(r, transpose(perm))
R = dot(r, perm)
def leastsq(func,x0,args=(),Dfun=None,full_output=0,col_deriv=0,ftol=1.49012e-8,xtol=1.49012e-8,gtol=0.0,maxfev=0,epsfcn=0.0,factor=100,diag=None): """Minimize the sum of squares of a set of equations. Description: Return the point which minimizes the sum of squares of M (non-linear) equations in N unknowns given a starting estimate, x0, using a modification of the Levenberg-Marquardt algorithm. x = arg min(sum(func(y)**2)) y Inputs: func -- A Python function or method which takes at least one (possibly length N vector) argument and returns M floating point numbers. x0 -- The starting estimate for the minimization. args -- Any extra arguments to func are placed in this tuple. Dfun -- A function or method to compute the Jacobian of func with derivatives across the rows. If this is None, the Jacobian will be estimated. full_output -- non-zero to return all optional outputs. col_deriv -- non-zero to specify that the Jacobian function computes derivatives down the columns (faster, because there is no transpose operation). Outputs: (x, {cov_x, infodict, ier}, mesg) x -- the solution (or the result of the last iteration for an unsuccessful call. infodict -- a dictionary of optional outputs with the keys: 'nfev' : the number of function calls 'njev' : the number of jacobian calls 'fvec' : the function evaluated at the output 'fjac' : A permutation of the R matrix of a QR factorization of the final approximate Jacobian matrix, stored column wise. Together with ipvt, the covariance of the estimate can be approximated. 'ipvt' : an integer array of length N which defines a permutation matrix, p, such that fjac*p = q*r, where r is upper triangular with diagonal elements of nonincreasing magnitude. Column j of p is column ipvt(j) of the identity matrix. 'qtf' : the vector (transpose(q) * fvec). ier -- an integer flag. If it is equal to 1 the solution was found. If it is not equal to 1, the solution was not found and the following message gives more information. mesg -- a string message giving information about the cause of failure. cov_x -- uses the fjac and ipvt optional outputs to construct an estimate of the covariance matrix of the solution. Extended Inputs: ftol -- Relative error desired in the sum of squares. xtol -- Relative error desired in the approximate solution. gtol -- Orthogonality desired between the function vector and the columns of the Jacobian. maxfev -- The maximum number of calls to the function. If zero, then 100*(N+1) is the maximum where N is the number of elements in x0. epsfcn -- A suitable step length for the forward-difference approximation of the Jacobian (for Dfun=None). If epsfcn is less than the machine precision, it is assumed that the relative errors in the functions are of the order of the machine precision. factor -- A parameter determining the initial step bound (factor * || diag * x||). Should be in interval (0.1,100). diag -- A sequency of N positive entries that serve as a scale factors for the variables. Remarks: "leastsq" is a wrapper around MINPACK's lmdif and lmder algorithms. """ x0 = atleast_1d(x0) n = len(x0) if type(args) != type(()): args = (args,) m = check_func(func,x0,args,n)[0] if Dfun is None: if (maxfev == 0): maxfev = 200*(n+1) retval = _minpack._lmdif(func,x0,args,full_output,ftol,xtol,gtol,maxfev,epsfcn,factor,diag) else: if col_deriv: check_func(Dfun,x0,args,n,(n,m)) else: check_func(Dfun,x0,args,n,(m,n)) if (maxfev == 0): maxfev = 100*(n+1) retval = _minpack._lmder(func,Dfun,x0,args,full_output,col_deriv,ftol,xtol,gtol,maxfev,factor,diag) errors = {0:["Improper input parameters.", TypeError], 1:["Both actual and predicted relative reductions in the sum of squares\n are at most %f" % ftol, None], 2:["The relative error between two consecutive iterates is at most %f" % xtol, None], 3:["Both actual and predicted relative reductions in the sum of squares\n are at most %f and the relative error between two consecutive iterates is at \n most %f" % (ftol,xtol), None], 4:["The cosine of the angle between func(x) and any column of the\n Jacobian is at most %f in absolute value" % gtol, None], 5:["Number of calls to function has reached maxfev = %d." % maxfev, ValueError], 6:["ftol=%f is too small, no further reduction in the sum of squares\n is possible.""" % ftol, ValueError], 7:["xtol=%f is too small, no further improvement in the approximate\n solution is possible." % xtol, ValueError], 8:["gtol=%f is too small, func(x) is orthogonal to the columns of\n the Jacobian to machine precision." % gtol, ValueError], 'unknown':["Unknown error.", TypeError]} info = retval[-1] # The FORTRAN return value if (info not in [1,2,3,4] and not full_output): if info in [5,6,7,8]: print "Warning: " + errors[info][0] else: try: raise errors[info][1], errors[info][0] except KeyError: raise errors['unknown'][1], errors['unknown'][0] if n == 1: retval = (retval[0][0],) + retval[1:] mesg = errors[info][0] if full_output: import scipy.linalg as sl perm = take(eye(n),retval[1]['ipvt']-1) r = sl.triu(transpose(retval[1]['fjac'])[:n,:]) R = dot(r, transpose(perm)) cov_x = sl.inv(dot(transpose(R),R)) return (retval[0], cov_x) + retval[1:] + (mesg,) else: return (retval[0], mesg)
def check_gradient(fcn,Dfcn,x0,col_deriv=0):
def check_gradient(fcn,Dfcn,x0,args=(),col_deriv=0):
def check_gradient(fcn,Dfcn,x0,col_deriv=0): """Perform a simple check on the gradient for correctness. """ x = atleast_1d(x0) n = len(x) x.shape = (n,) fvec = atleast_1d(fcn(x)) if 1 not in fvec.shape: raise ValueError, "Function does not return a 1-D array." m = len(fvec) fvec.shape = (m,) ldfjac = m fjac = atleast_1d(Dfcn(x)) fjac.shape = (m,n) if col_deriv == 0: fjac = transpose(fjac) xp = zeros((n,),Float64) err = zeros((m,),Float64) fvecp = None _minpack._chkder(m,n,x,fvec,fjac,ldfjac,xp,fvecp,1,err) fvecp = atleast_1d(fcn(xp)) fvecp.shape = (m,) _minpack._chkder(m,n,x,fvec,fjac,ldfjac,xp,fvecp,2,err) good = (product(greater(err,0.5))) return (good,err)
fvec = atleast_1d(fcn(x)) if 1 not in fvec.shape: raise ValueError, "Function does not return a 1-D array."
fvec = atleast_1d(fcn(x,*args))
def check_gradient(fcn,Dfcn,x0,col_deriv=0): """Perform a simple check on the gradient for correctness. """ x = atleast_1d(x0) n = len(x) x.shape = (n,) fvec = atleast_1d(fcn(x)) if 1 not in fvec.shape: raise ValueError, "Function does not return a 1-D array." m = len(fvec) fvec.shape = (m,) ldfjac = m fjac = atleast_1d(Dfcn(x)) fjac.shape = (m,n) if col_deriv == 0: fjac = transpose(fjac) xp = zeros((n,),Float64) err = zeros((m,),Float64) fvecp = None _minpack._chkder(m,n,x,fvec,fjac,ldfjac,xp,fvecp,1,err) fvecp = atleast_1d(fcn(xp)) fvecp.shape = (m,) _minpack._chkder(m,n,x,fvec,fjac,ldfjac,xp,fvecp,2,err) good = (product(greater(err,0.5))) return (good,err)
fjac = atleast_1d(Dfcn(x))
fjac = atleast_1d(Dfcn(x,*args))
def check_gradient(fcn,Dfcn,x0,col_deriv=0): """Perform a simple check on the gradient for correctness. """ x = atleast_1d(x0) n = len(x) x.shape = (n,) fvec = atleast_1d(fcn(x)) if 1 not in fvec.shape: raise ValueError, "Function does not return a 1-D array." m = len(fvec) fvec.shape = (m,) ldfjac = m fjac = atleast_1d(Dfcn(x)) fjac.shape = (m,n) if col_deriv == 0: fjac = transpose(fjac) xp = zeros((n,),Float64) err = zeros((m,),Float64) fvecp = None _minpack._chkder(m,n,x,fvec,fjac,ldfjac,xp,fvecp,1,err) fvecp = atleast_1d(fcn(xp)) fvecp.shape = (m,) _minpack._chkder(m,n,x,fvec,fjac,ldfjac,xp,fvecp,2,err) good = (product(greater(err,0.5))) return (good,err)
fvecp = atleast_1d(fcn(xp))
fvecp = atleast_1d(fcn(xp,*args))
def check_gradient(fcn,Dfcn,x0,col_deriv=0): """Perform a simple check on the gradient for correctness. """ x = atleast_1d(x0) n = len(x) x.shape = (n,) fvec = atleast_1d(fcn(x)) if 1 not in fvec.shape: raise ValueError, "Function does not return a 1-D array." m = len(fvec) fvec.shape = (m,) ldfjac = m fjac = atleast_1d(Dfcn(x)) fjac.shape = (m,n) if col_deriv == 0: fjac = transpose(fjac) xp = zeros((n,),Float64) err = zeros((m,),Float64) fvecp = None _minpack._chkder(m,n,x,fvec,fjac,ldfjac,xp,fvecp,1,err) fvecp = atleast_1d(fcn(xp)) fvecp.shape = (m,) _minpack._chkder(m,n,x,fvec,fjac,ldfjac,xp,fvecp,2,err) good = (product(greater(err,0.5))) return (good,err)
include_dirs = [numpy.get_numpy_include()], sources = [join('src',f) for f in ['_intsamplermodule.c', 'sampler5tbl.c']] )
include_dirs = [numpy.get_numpy_include(), '/usr/include/python2.4/numpy/random/'], libraries=['randomkit'], sources = [join('src', f) for f in ['_intsamplermodule.c', 'compact5table.c']] )
def configuration(parent_package='', top_path=None): config = Configuration('montecarlo', parent_package, top_path) config.add_extension('_intsampler', include_dirs = [numpy.get_numpy_include()], sources = [join('src',f) for f in ['_intsamplermodule.c', 'sampler5tbl.c']] ) config.add_data_dir('tests') config.add_data_dir('examples') config.add_data_dir('doc') return config
assert_equal(cephes.nrdtrimn(0.5,1,1),1.0)
assert_approx_equal(cephes.nrdtrimn(0.5,1,1),1.0)
def check_nrdtrimn(self): assert_equal(cephes.nrdtrimn(0.5,1,1),1.0)
-7.940178689168587]),11)
-7.940178689168587]),10)
def check_bei_zeros(self): bi = bi_zeros(5) assert_array_almost_equal(bi[0],array([-1.173713222709127, -3.271093302836352, -4.830737841662016, -6.169852128310251, -7.376762079367764]),11)
assert_almost_equal(w,0.90047299861907959,7) assert_almost_equal(pw,0.042089745402336121,7)
assert_almost_equal(w,0.90047299861907959,6) assert_almost_equal(pw,0.042089745402336121,6)
def check_basic(self): x1 = [0.11,7.87,4.61,10.14,7.95,3.14,0.46, 4.43,0.21,4.75,0.71,1.52,3.24, 0.93,0.42,4.97,9.53,4.55,0.47,6.66] w,pw = scipy.stats.shapiro(x1) assert_almost_equal(w,0.90047299861907959,7) assert_almost_equal(pw,0.042089745402336121,7) x2 = [1.36,1.14,2.92,2.55,1.46,1.06,5.27,-1.11, 3.48,1.10,0.88,-0.51,1.46,0.52,6.20,1.69, 0.08,3.67,2.81,3.49] w,pw = scipy.stats.shapiro(x2) assert_almost_equal(w,0.9590269923210144,7) assert_almost_equal(pw,0.52459925413131714,7)
assert_almost_equal(w,0.9590269923210144,7) assert_almost_equal(pw,0.52459925413131714,7)
assert_almost_equal(w,0.9590269923210144,6) assert_almost_equal(pw,0.52459925413131714,6)
def check_basic(self): x1 = [0.11,7.87,4.61,10.14,7.95,3.14,0.46, 4.43,0.21,4.75,0.71,1.52,3.24, 0.93,0.42,4.97,9.53,4.55,0.47,6.66] w,pw = scipy.stats.shapiro(x1) assert_almost_equal(w,0.90047299861907959,7) assert_almost_equal(pw,0.042089745402336121,7) x2 = [1.36,1.14,2.92,2.55,1.46,1.06,5.27,-1.11, 3.48,1.10,0.88,-0.51,1.46,0.52,6.20,1.69, 0.08,3.67,2.81,3.49] w,pw = scipy.stats.shapiro(x2) assert_almost_equal(w,0.9590269923210144,7) assert_almost_equal(pw,0.52459925413131714,7)
xk += update
xk = xk + update
def fmin_ncg(f, x0, fprime, fhess_p=None, fhess=None, args=(), avextol=1e-5, epsilon=_epsilon, maxiter=None, full_output=0, disp=1, retall=0, callback=None): """Description: Minimize the function, f, whose gradient is given by fprime using the Newton-CG method. fhess_p must compute the hessian times an arbitrary vector. If it is not given, finite-differences on fprime are used to compute it. See Wright, and Nocedal 'Numerical Optimization', 1999, pg. 140. Inputs: f -- the Python function or method to be minimized. x0 -- the initial guess for the minimizer. fprime -- a function to compute the gradient of f: fprime(x, *args) fhess_p -- a function to compute the Hessian of f times an arbitrary vector: fhess_p (x, p, *args) fhess -- a function to compute the Hessian matrix of f. args -- extra arguments for f, fprime, fhess_p, and fhess (the same set of extra arguments is supplied to all of these functions). epsilon -- if fhess is approximated use this value for the step size (can be scalar or vector) callback -- an optional user-supplied function to call after each iteration. It is called as callback(xk), where xk is the current parameter vector. Outputs: (xopt, {fopt, fcalls, gcalls, hcalls, warnflag},{allvecs}) xopt -- the minimizer of f fopt -- the value of the function at xopt: fopt = f(xopt) fcalls -- the number of function calls. gcalls -- the number of gradient calls. hcalls -- the number of hessian calls. warnflag -- algorithm warnings: 1 : 'Maximum number of iterations exceeded.' allvecs -- a list of all tried iterates Additional Inputs: avextol -- Convergence is assumed when the average relative error in the minimizer falls below this amount. maxiter -- Maximum number of iterations to allow. full_output -- If non-zero return the optional outputs. disp -- If non-zero print convergence message. retall -- return a list of results at each iteration if True Remarks: Only one of fhess_p or fhess need be given. If fhess is provided, then fhess_p will be ignored. If neither fhess nor fhess_p is provided, then the hessian product will be approximated using finite differences on fprime. """ x0 = asarray(x0) fcalls, f = wrap_function(f, args) gcalls, fprime = wrap_function(fprime, args) hcalls = 0 if maxiter is None: maxiter = len(x0)*200 xtol = len(x0)*avextol update = [2*xtol] xk = x0 if retall: allvecs = [xk] k = 0 old_fval = f(x0) while (numpy.add.reduce(abs(update)) > xtol) and (k < maxiter): # Compute a search direction pk by applying the CG method to # del2 f(xk) p = - grad f(xk) starting from 0. b = -fprime(xk) maggrad = numpy.add.reduce(abs(b)) eta = min([0.5,numpy.sqrt(maggrad)]) termcond = eta * maggrad xsupi = zeros(len(x0), x0.dtype.char) ri = -b psupi = -ri i = 0 dri0 = numpy.dot(ri,ri) if fhess is not None: # you want to compute hessian once. A = apply(fhess,(xk,)+args) hcalls = hcalls + 1 while numpy.add.reduce(abs(ri)) > termcond: if fhess is None: if fhess_p is None: Ap = approx_fhess_p(xk,psupi,fprime,epsilon) else: Ap = fhess_p(xk,psupi, *args) hcalls = hcalls + 1 else: Ap = numpy.dot(A,psupi) # check curvature curv = numpy.dot(psupi,Ap) if curv == 0.0: break elif curv < 0: if (i > 0): break else: xsupi = xsupi + dri0/curv * psupi break alphai = dri0 / curv xsupi = xsupi + alphai * psupi ri = ri + alphai * Ap dri1 = numpy.dot(ri,ri) betai = dri1 / dri0 psupi = -ri + betai * psupi i = i + 1 dri0 = dri1 # update numpy.dot(ri,ri) for next time. pk = xsupi # search direction is solution to system. gfk = -b # gradient at xk alphak, fc, gc, old_fval = line_search_BFGS(f,xk,pk,gfk,old_fval) update = alphak * pk xk += update if callback is not None: callback(xk) if retall: allvecs.append(xk) k += 1 if disp or full_output: fval = old_fval if k >= maxiter: warnflag = 1 if disp: print "Warning: Maximum number of iterations has been exceeded" print " Current function value: %f" % fval print " Iterations: %d" % k print " Function evaluations: %d" % fcalls[0] print " Gradient evaluations: %d" % gcalls[0] print " Hessian evaluations: %d" % hcalls else: warnflag = 0 if disp: print "Optimization terminated successfully." print " Current function value: %f" % fval print " Iterations: %d" % k print " Function evaluations: %d" % fcalls[0] print " Gradient evaluations: %d" % gcalls[0] print " Hessian evaluations: %d" % hcalls if full_output: retlist = xk, fval, fcalls[0], gcalls[0], hcalls, warnflag if retall: retlist += (allvecs,) else: retlist = xk if retall: retlist = (xk, allvecs) return retlist
assert_equal(cephes.cbrt(1),1.0)
assert_approx_equal(cephes.cbrt(1),1.0)
def check_cbrt(self): assert_equal(cephes.cbrt(1),1.0)
assert_equal(cephes.exp10(2),100.0)
assert_approx_equal(cephes.exp10(2),100.0)
def check_exp10(self): assert_equal(cephes.exp10(2),100.0)
assert_equal(cb,cbrl)
assert_approx_equal(cb,cbrl)
def check_cbrt(self): cb = cbrt(27) cbrl = 27**(1.0/3.0) assert_equal(cb,cbrl)
assert_equal(ex,exrl)
assert_approx_equal(ex,exrl)
def check_exp10(self): ex = exp10(2) exrl = 10**2 assert_equal(ex,exrl)
if type(x) <> StringType:
if type(x) != StringType:
def makestr (x): if type(x) <> StringType: x = str(x) return x
if sterr:
if stderr:
def collapse (a,keepcols,collapsecols,stderr=0,ns=0,cfcn=None): """Averages data in collapsecol, keeping all unique items in keepcols (using unique, which keeps unique LISTS of column numbers), retaining the unique sets of values in keepcols, the mean for each. If the sterr or N of the mean are desired, set either or both parameters to 1. Returns: unique 'conditions' specified by the contents of columns specified by keepcols, abutted with the mean(s) of column(s) specified by collapsecols """ if cfcn is None: cfcn = stats.mean a = asarray(a) if keepcols == []: avgcol = colex(a,collapsecols) means = cfcn(avgcol) return means else: if type(keepcols) not in [ListType,TupleType,N.ArrayType]: keepcols = [keepcols] values = colex(a,keepcols) # so that "item" can be appended (below) uniques = unique(values) # get a LIST, so .sort keeps rows intact uniques.sort() newlist = [] for item in uniques: if type(item) not in [ListType,TupleType,N.ArrayType]: item =[item] tmprows = linexand(a,keepcols,item) for col in collapsecols: avgcol = colex(tmprows,col) item.append(cfcn(avgcol)) if sterr: if len(avgcol)>1: item.append(stats.sterr(avgcol)) else: item.append('N/A') if ns: item.append(len(avgcol)) newlist.append(item) try: new_a = N.array(newlist) except TypeError: new_a = N.array(newlist,'O') return new_a
item.append(stats.sterr(avgcol))
item.append(stats.stderr(avgcol))
def collapse (a,keepcols,collapsecols,stderr=0,ns=0,cfcn=None): """Averages data in collapsecol, keeping all unique items in keepcols (using unique, which keeps unique LISTS of column numbers), retaining the unique sets of values in keepcols, the mean for each. If the sterr or N of the mean are desired, set either or both parameters to 1. Returns: unique 'conditions' specified by the contents of columns specified by keepcols, abutted with the mean(s) of column(s) specified by collapsecols """ if cfcn is None: cfcn = stats.mean a = asarray(a) if keepcols == []: avgcol = colex(a,collapsecols) means = cfcn(avgcol) return means else: if type(keepcols) not in [ListType,TupleType,N.ArrayType]: keepcols = [keepcols] values = colex(a,keepcols) # so that "item" can be appended (below) uniques = unique(values) # get a LIST, so .sort keeps rows intact uniques.sort() newlist = [] for item in uniques: if type(item) not in [ListType,TupleType,N.ArrayType]: item =[item] tmprows = linexand(a,keepcols,item) for col in collapsecols: avgcol = colex(tmprows,col) item.append(cfcn(avgcol)) if sterr: if len(avgcol)>1: item.append(stats.sterr(avgcol)) else: item.append('N/A') if ns: item.append(len(avgcol)) newlist.append(item) try: new_a = N.array(newlist) except TypeError: new_a = N.array(newlist,'O') return new_a
if type(item) <> StringType:
if type(item) != StringType:
def makestr (item): if type(item) <> StringType: item = str(item) return item
if type(inlist[i]) <> StringType:
if type(inlist[i]) != StringType:
def lineincustcols (inlist,colsizes): """\nReturns a string composed of elements in inlist, with each element
return string.join(stringlist)
return "".join(stringlist)
def list2string (inlist): """\nConverts a 1D list to a single long string for file output, using
from scipy_distutils.mingw32_support import *
import scipy_distutils.mingw32_support
def configuration(parent_package=''): if sys.platform == 'win32': from scipy_distutils.mingw32_support import * from scipy_distutils.core import Extension from scipy_distutils.misc_util import get_path, default_config_dict from scipy_distutils.misc_util import fortran_library_item, dot_join from scipy_distutils.system_info import get_info,dict_append,\ AtlasNotFoundError,LapackNotFoundError,BlasNotFoundError,\ LapackSrcNotFoundError,BlasSrcNotFoundError package = 'linalg' from interface_gen import generate_interface config = default_config_dict(package,parent_package) local_path = get_path(__name__) m = re.compile(r'(build|install|bdist|run_f2py)') if not filter(m.match,sys.argv): sources = [] sources += glob(os.path.join(local_path,'src','*.f')) sources += glob(os.path.join(local_path,'src','*.c')) sources += glob(os.path.join(local_path,'generic_*.pyf')) sources += [os.path.join(local_path,f) for f in [\ 'flapack_user_routines.pyf','atlas_version.c']] config['ext_modules'].append(Extension(\ name='fake_linalg_ext_module', sources = sources)) return config atlas_info = get_info('atlas') #atlas_info = {} # uncomment if ATLAS is available but want to use # Fortran LAPACK/BLAS; useful for testing f_libs = [] atlas_version = None if atlas_info: # Try to determine ATLAS version cur_dir = os.getcwd() os.chdir(local_path) cmd = '%s %s build_ext --inplace --force'%\ (sys.executable, os.path.join(local_path,'setup_atlas_version.py')) print cmd s,o=run_command(cmd) if not s: cmd = sys.executable+' -c "import atlas_version"' print cmd s,o=run_command(cmd) if not s: m = re.match(r'ATLAS version (?P<version>\d+[.]\d+[.]\d+)',o) if m: atlas_version = m.group('version') print 'ATLAS version',atlas_version if atlas_version is None: if re.search(r'undefined symbol: ATL_buildinfo',o,re.M): atlas_version = '3.2.1' # or pre 3.3.6 print 'ATLAS version',atlas_version,'(or pre 3.3.6)' else: print o else: print o if atlas_version is None: print 'Failed to determine ATLAS version' os.chdir(cur_dir) if ('ATLAS_WITHOUT_LAPACK',None) in atlas_info.get('define_macros',[]): lapack_info = get_info('lapack') if not lapack_info: warnings.warn(LapackNotFoundError.__doc__) lapack_src_info = get_info('lapack_src') if not lapack_src_info: raise LapackSrcNotFoundError,LapackSrcNotFoundError.__doc__ dict_append(lapack_info,libraries=['lapack_src']) f_libs.append(fortran_library_item(\ 'lapack_src',lapack_src_info['sources'], )) dict_append(lapack_info,**atlas_info) atlas_info = lapack_info blas_info,lapack_info = {},{} if not atlas_info: warnings.warn(AtlasNotFoundError.__doc__) blas_info = get_info('blas') #blas_info = {} # test building BLAS from sources. if not blas_info: warnings.warn(BlasNotFoundError.__doc__) blas_src_info = get_info('blas_src') if not blas_src_info: raise BlasSrcNotFoundError,BlasSrcNotFoundError.__doc__ dict_append(blas_info,libraries=['blas_src']) f_libs.append(fortran_library_item(\ 'blas_src',blas_src_info['sources'], )) lapack_info = get_info('lapack') #lapack_info = {} # test building LAPACK from sources. if not lapack_info: warnings.warn(LapackNotFoundError.__doc__) lapack_src_info = get_info('lapack_src') if not lapack_src_info: raise LapackSrcNotFoundError,LapackSrcNotFoundError.__doc__ dict_append(lapack_info,libraries=['lapack_src']) f_libs.append(fortran_library_item(\ 'lapack_src',lapack_src_info['sources'], )) mod_sources = {} if atlas_info or blas_info: mod_sources['fblas'] = ['generic_fblas.pyf', 'generic_fblas1.pyf', 'generic_fblas2.pyf', 'generic_fblas3.pyf', os.path.join('src','fblaswrap.f'), ] if atlas_info or lapack_info: mod_sources['flapack'] = ['generic_flapack.pyf'] if atlas_info: mod_sources['cblas'] = ['generic_cblas.pyf', 'generic_cblas1.pyf'] mod_sources['clapack'] = ['generic_clapack.pyf'] else: dict_append(atlas_info,**lapack_info) dict_append(atlas_info,**blas_info) skip_names = {'clapack':[],'flapack':[],'cblas':[],'fblas':[]} if skip_single_routines: skip_names['clapack'].extend(\ 'sgesv cgesv sgetrf cgetrf sgetrs cgetrs sgetri cgetri'\ ' sposv cposv spotrf cpotrf spotrs cpotrs spotri cpotri'\ ' slauum clauum strtri ctrtri'.split()) skip_names['flapack'].extend(skip_names['clapack']) skip_names['flapack'].extend(\ 'sgesdd cgesdd sgelss cgelss sgeqrf cgeqrf sgeev cgeev'\ ' sgegv cgegv ssyev cheev slaswp claswp sgees cgees' ' sggev cggev'.split()) skip_names['cblas'].extend('saxpy caxpy'.split()) skip_names['fblas'].extend(skip_names['cblas']) skip_names['fblas'].extend(\ 'srotg crotg srotmg srot csrot srotm sswap cswap sscal cscal'\ ' csscal scopy ccopy sdot cdotu cdotc snrm2 scnrm2 sasum scasum'\ ' isamax icamax sgemv cgemv chemv ssymv strmv ctrmv'\ ' sgemm cgemm'.split()) if atlas_version=='3.2.1': skip_names['clapack'].extend(\ 'sgetri dgetri cgetri zgetri spotri dpotri cpotri zpotri'\ ' slauum dlauum clauum zlauum strtri dtrtri ctrtri ztrtri'.split()) for mod_name,sources in mod_sources.items(): sources = [os.path.join(local_path,s) for s in sources] pyf_sources = filter(lambda s:s[-4:]=='.pyf',sources) mod_file = os.path.join(local_path,mod_name+'.pyf') if dep_util.newer_group(pyf_sources,mod_file): generate_interface(mod_name,sources[0],mod_file, skip_names.get(mod_name,[])) sources = filter(lambda s:s[-4:]!='.pyf',sources) ext_args = {'name':dot_join(parent_package,package,mod_name), 'sources':[mod_file]+sources} dict_append(ext_args,**atlas_info) ext = Extension(**ext_args) ext.need_fcompiler_opts = 1 config['ext_modules'].append(ext) flinalg = [] for f in ['det.f','lu.f', #'wrappers.c','inv.f', ]: flinalg.append(os.path.join(local_path,'src',f)) ext_args = {'name':dot_join(parent_package,package,'_flinalg'), 'sources':flinalg} dict_append(ext_args,**atlas_info) config['ext_modules'].append(Extension(**ext_args)) ext_args = {'name':dot_join(parent_package,package,'calc_lwork'), 'sources':[os.path.join(local_path,'src','calc_lwork.f')], } dict_append(ext_args,**atlas_info) config['ext_modules'].append(Extension(**ext_args)) config['fortran_libraries'].extend(f_libs) return config
def legend(text,linetypes=None,lleft=None,color='black',tfont='helvetica',fontsize=14,nobox=0):
def legend(text,linetypes=None,lleft=None,color=None,tfont='helvetica',fontsize=14,nobox=0):
def legend(text,linetypes=None,lleft=None,color='black',tfont='helvetica',fontsize=14,nobox=0): """Construct and place a legend. Description: Build a legend and place it on the current plot with an interactive prompt. Inputs: text -- A list of strings which document the curves. linetypes -- If not given, then the text strings are associated with the curves in the order they were originally drawn. Otherwise, associate the text strings with the corresponding curve types given. See plot for description. """ global _hold sys = gist.plsys() if sys == 0: gist.plsys(1) viewp = gist.viewport() gist.plsys(sys) DX = viewp[1] - viewp[0] DY = viewp[3] - viewp[2] width = DY / 10.0; if lleft is None: lleft = gist.mouse(0,0,"Click on point for lower left coordinate.") llx = lleft[0] lly = lleft[1] else: llx,lly = lleft[:2] savesys = gist.plsys() dx = width / 3.0 legarr = Numeric.arange(llx,llx+width,dx) legy = Numeric.ones(legarr.shape) dy = fontsize*points*1.2 deltay = fontsize*points / 2.8 deltax = fontsize*points / 2.6 * DX / DY ypos = lly + deltay; if linetypes is None: linetypes = _GLOBAL_LINE_TYPES[:] # copy them out gist.plsys(0) savehold = _hold _hold = 1 for k in range(len(text)): plot(legarr,ypos*legy,linetypes[k]) #print llx+width+deltax, ypos-deltay if text[k] != "": gist.plt(text[k],llx+width+deltax,ypos-deltay, color=color,font=tfont,height=fontsize,tosys=0) ypos = ypos + dy _hold = savehold if nobox: pass else: gist.plsys(0) maxlen = MLab.max(map(len,text)) c1 = (llx-deltax,lly-deltay) c2 = (llx + width + deltax + fontsize*points* maxlen/1.8 + deltax, lly + len(text)*dy) linesx0 = [c1[0],c1[0],c2[0],c2[0]] linesy0 = [c1[1],c2[1],c2[1],c1[1]] linesx1 = [c1[0],c2[0],c2[0],c1[0]] linesy1 = [c2[1],c2[1],c1[1],c1[1]] gist.pldj(linesx0,linesy0,linesx1,linesy1,color=color) gist.plsys(savesys) return
def _get_namespace(self): return self.__namespace or default_namespace
def _get_namespace(self): if isinstance(self.__namespace, N.ndarray): return self.__namespace else: return self.__namespace or default_namespace
def _get_namespace(self): return self.__namespace or default_namespace
n = Numeric.searchsorted(Numeric.sort(a), bins) n = Numeric.concatenate([ n, [len(a)]])
n = searchsorted(sort(a), bins) n = concatenate([ n, [len(a)]])
def histogram2(a, bins): """ histogram2(a,bins) -- Compute histogram of a using divisions in bins Description: Count the number of times values from array a fall into numerical ranges defined by bins. Range x is given by bins[x] <= range_x < bins[x+1] where x =0,N and N is the length of the bins array. The last range is given by bins[N] <= range_N < infinity. Values less than bins[0] are not included in the histogram. Arguments: a -- 1D array. The array of values to be divied into bins bins -- 1D array. Defines the ranges of values to use during histogramming. Returns: 1D array. Each value represents the occurences for a given bin (range) of values. Caveat: This should probably have an axis argument that would histogram along a specific axis (kinda like matlab) """ n = Numeric.searchsorted(Numeric.sort(a), bins) n = Numeric.concatenate([ n, [len(a)]]) return n[ 1:]-n[:-1]
noise = Numeric.mean(Numeric.ravel(lVar))
noise = mean(Numeric.ravel(lVar))
def wiener(im,mysize=None,noise=None): """Perform a wiener filter on an N-dimensional array. Description: Apply a wiener filter to the N-dimensional array in. Inputs: in -- an N-dimensional array. kernel_size -- A scalar or an N-length list giving the size of the median filter window in each dimension. Elements of kernel_size should be odd. If kernel_size is a scalar, then this scalar is used as the size in each dimension. noise -- The noise-power to use. If None, then noise is estimated as the average of the local variance of the input. Outputs: (out,) out -- Wiener filtered result with the same shape as in. """ im = Numeric.asarray(im) if mysize is None: mysize = [3] * len(im.shape) mysize = Numeric.asarray(mysize); # Estimate the local mean lMean = correlate(im,Numeric.ones(mysize),1) / Numeric.product(mysize) # Estimate the local variance lVar = correlate(im**2,Numeric.ones(mysize),1) / Numeric.product(mysize) - lMean**2 # Estimate the noise power if needed. if noise==None: noise = Numeric.mean(Numeric.ravel(lVar)) # Compute result # f = lMean + (maximum(0, lVar - noise) ./ # maximum(lVar, noise)) * (im - lMean) # out = im - lMean im = lVar - noise im = Numeric.maximum(im,0) lVar = Numeric.maximum(lVar,noise) out = out / lVar out = out * im out = out + lMean return out
from scipy.stats import mean
def resample(x,num,t=None,axis=0,window=None): """Resample to num samples using Fourier method along the given axis. The resampled signal starts at the same value of x but is sampled with a spacing of len(x) / num * (spacing of x). Because a Fourier method is used, the signal is assumed periodic. Window controls a Fourier-domain window that tapers the Fourier spectrum before zero-padding to aleviate ringing in the resampled values for sampled signals you didn't intend to be interpreted as band-limited. If window is a string then use the named window. If window is a float, then it represents a value of beta for a kaiser window. If window is a tuple, then the first component is a string representing the window, and the next arguments are parameters for that window. Possible windows are: 'blackman' ('black', 'blk') 'hamming' ('hamm', 'ham') 'bartlett' ('bart', 'brt') 'hanning' ('hann', 'han') 'kaiser' ('ksr') # requires parameter (beta) 'gaussian' ('gauss', 'gss') # requires parameter (std.) 'general gauss' ('general', 'ggs') # requires two parameters (power, width) The first sample of the returned vector is the same as the first sample of the input vector, the spacing between samples is changed from dx to dx * len(x) / num If t is not None, then it represents the old sample positions, and the new sample positions will be returned as well as the new samples. """ x = asarray(x) X = fft(x,axis=axis) Nx = x.shape[axis] if window is not None: W = ifftshift(get_window(window,Nx)) newshape = ones(len(x.shape)) newshape[axis] = len(W) W.shape = newshape X = X*W sl = [slice(None)]*len(x.shape) newshape = list(x.shape) newshape[axis] = num N = int(Numeric.minimum(num,Nx)) Y = Numeric.zeros(newshape,'D') sl[axis] = slice(0,(N+1)/2) Y[sl] = X[sl] sl[axis] = slice(-(N-1)/2,None) Y[sl] = X[sl] y = ifft(Y,axis=axis)*(float(num)/float(Nx)) if x.typecode() not in ['F','D']: y = y.real if t is None: return y else: new_t = arange(0,num)*(t[1]-t[0])* Nx / float(num) + t[0] return y, new_t
try: from scipy.sandbox.numexpr import expressions modname = 'scipy.sandbox.numexpr.expressions' except ImportError: from numexpr import expressions modname = 'numexpr.expressions'
modname = modname[__name__.rfind('.')-1:] + '.expressions'
def makeExpressions(context): """Make private copy of the expressions module with a custom get_context(). An attempt was made to make this threadsafe, but I can't guarantee it's bulletproof. """ import sys, imp try: from scipy.sandbox.numexpr import expressions modname = 'scipy.sandbox.numexpr.expressions' except ImportError: from numexpr import expressions modname = 'numexpr.expressions' # get our own, private copy of expressions imp.acquire_lock() try: old = sys.modules.pop(modname) import expressions private = sys.modules.pop(modname) sys.modules[modname] = old finally: imp.release_lock() def get_context(): return context private.get_context = get_context return private
class dictsampler(object):
class dictsampler(genericsampler):
def sample(self, size, return_probs=0): """Generates a sample of the given size from the specified discrete distribution, optionally returning the probabilities under the distribution.
raise TypeError, "unsupported type for adding to a sparse matrix"
raise TypeError, "unsupported type for sparse matrix addition"
def __radd__(self, other): """ Function supporting the operation: self + other. This does not currently work correctly for self + dense. Perhaps dense matrices need some hooks to support this. """ if isscalar(other) or (isdense(other) and rank(other)==0): raise NotImplementedError, 'adding a scalar to a CSC matrix is ' \ 'not yet supported' elif isspmatrix(other): ocs = other.tocsc() if (ocs.shape != self.shape): raise ValueError, "inconsistent shapes" dtypechar = _coerce_rules[(self.dtypechar, ocs.dtypechar)] nnz1, nnz2 = self.nnz, ocs.nnz data1, data2 = _convert_data(self.data[:nnz1], ocs.data[:nnz2], dtypechar) func = getattr(sparsetools, _transtabl[dtypechar]+'cscadd') c, rowc, ptrc, ierr = func(data1, self.rowind[:nnz1], self.indptr, data2, ocs.rowind[:nnz2], ocs.indptr) if ierr: raise ValueError, "ran out of space (but shouldn't have happened)" M, N = self.shape return csc_matrix((c, rowc, ptrc), dims=(M, N)) elif isdense(other): # Convert this matrix to a dense matrix and add them. # This does not currently work. return self.todense() + other else: raise TypeError, "unsupported type for adding to a sparse matrix"
raise TypeError, "unknown type for sparse matrix addition"
raise TypeError, "unsupported type for sparse matrix addition"
def __add__(self, other): if isscalar(other) or (isdense(other) and rank(other)==0): raise NotImplementedError, 'adding a scalar to a CSC matrix is ' \ 'not yet supported' elif isspmatrix(other): ocs = other.tocsc() if (ocs.shape != self.shape): raise ValueError, "inconsistent shapes" dtypechar = _coerce_rules[(self.dtypechar, ocs.dtypechar)] nnz1, nnz2 = self.nnz, ocs.nnz data1, data2 = _convert_data(self.data[:nnz1], ocs.data[:nnz2], dtypechar) func = getattr(sparsetools, _transtabl[dtypechar]+'cscadd') c, rowc, ptrc, ierr = func(data1, self.rowind[:nnz1], self.indptr, data2, ocs.rowind[:nnz2], ocs.indptr) if ierr: raise ValueError, "ran out of space (but shouldn't have happened)" M, N = self.shape return csc_matrix((c, rowc, ptrc), dims=(M, N)) elif isdense(other): # Convert this matrix to a dense matrix and add them return other + self.todense() else: raise TypeError, "unknown type for sparse matrix addition"
raise TypeError, "unsupported type for adding to a sparse matrix"
raise TypeError, "unsupported type for sparse matrix addition"
def __add__(self, other): # First check if argument is a scalar if isscalar(other) or (isdense(other) and rank(other)==0): # Now we would add this scalar to every element. raise NotImplementedError, 'adding a scalar to a sparse matrix ' \ 'is not yet supported' elif isspmatrix(other): ocs = other.tocsr() if (ocs.shape != self.shape): raise ValueError, "inconsistent shapes"
self.shape = (M, N)
assert M == int(M) and M > 0 assert N == int(N) and N > 0 self.shape = (int(M), int(N))
def __init__(self, A=None): """ Create a new dictionary-of-keys sparse matrix. An optional argument A is accepted, which initializes the dok_matrix with it. This can be a tuple of dimensions (m, n) or a (dense) array to copy. """ dict.__init__(self) spmatrix.__init__(self) self.shape = (0, 0) # If _validate is True, ensure __setitem__ keys are integer tuples self._validate = True if A is not None: if type(A) == tuple: # Interpret as dimensions try: dims = A (M, N) = dims self.shape = (M, N) return except (TypeError, ValueError): pass if isspmatrix(A): # For sparse matrices, this is too inefficient; we need # something else. raise NotImplementedError, "initializing a dok_matrix with " \ "a sparse matrix is not yet supported" elif isdense(A): A = asarray(A) if rank(A) == 2: M, N = A.shape self.shape = (M, N) for i in range(M): for j in range(N): if A[i, j] != 0: self[i, j] = A[i, j] elif rank(A) == 1: M = A.shape[0] self.shape = (M, 1) for i in range(M): if A[i] != 0: self[i, 0] = A[i] else: raise TypeError, "array for initialization must have rank 2" else: raise TypeError, "argument should be a tuple of dimensions " \ "or a sparse or dense matrix"
except (TypeError, ValueError): pass
except (TypeError, ValueError, AssertionError): raise TypeError, "dimensions must be a 2-tuple of positive"\ " integers"
def __init__(self, A=None): """ Create a new dictionary-of-keys sparse matrix. An optional argument A is accepted, which initializes the dok_matrix with it. This can be a tuple of dimensions (m, n) or a (dense) array to copy. """ dict.__init__(self) spmatrix.__init__(self) self.shape = (0, 0) # If _validate is True, ensure __setitem__ keys are integer tuples self._validate = True if A is not None: if type(A) == tuple: # Interpret as dimensions try: dims = A (M, N) = dims self.shape = (M, N) return except (TypeError, ValueError): pass if isspmatrix(A): # For sparse matrices, this is too inefficient; we need # something else. raise NotImplementedError, "initializing a dok_matrix with " \ "a sparse matrix is not yet supported" elif isdense(A): A = asarray(A) if rank(A) == 2: M, N = A.shape self.shape = (M, N) for i in range(M): for j in range(N): if A[i, j] != 0: self[i, j] = A[i, j] elif rank(A) == 1: M = A.shape[0] self.shape = (M, 1) for i in range(M): if A[i] != 0: self[i, 0] = A[i] else: raise TypeError, "array for initialization must have rank 2" else: raise TypeError, "argument should be a tuple of dimensions " \ "or a sparse or dense matrix"
N = len(values) for n in range(N): self[n, n+k] = values[n]
M, N = self.shape m = len(values) for i in range(min(M, N-k)): self[i, i+k] = values[i]
def setdiag(self, values, k=0): N = len(values) for n in range(N): self[n, n+k] = values[n] return
if not (exists('src/randomkit.c') and exists('src/randomkit.h')): raise OSError, "Please copy or symlink randomkit.c and randomkit.h to montecarlo/src/ from numpy/random/mtrand/ in the NumPy source tree!"
def configuration(parent_package='', top_path=None): config = Configuration('montecarlo', parent_package, top_path) # This code requires 'randomkit.c' and 'randomkit.h' to have been copied # to (or symlinked to) montecarlo/src/. config.add_extension('_intsampler', sources = [join('src', f) for f in ['_intsamplermodule.c', 'compact5table.c', 'randomkit.c']]) config.add_data_dir('tests') config.add_data_dir('examples') config.add_data_dir('doc') return config
from scipy_test.testing import ScipyTest
from scipy.test.testing import ScipyTest
def __init__(self,name,location,p_frame=None):
from scipy_test.testing import ScipyTest
from scipy.test.testing import ScipyTest
def _ppimport_importer(self): name = self.__name__
finished = threading.Event() evt = gui_thread_guts.proxy_event(event_catcher.Close, (),{},finished) event_poster.post(evt) finished.wait()
if in_proxy_call: event_catcher.Close() else: finished = threading.Event() evt = gui_thread_guts.proxy_event(event_catcher.Close, (),{},finished) event_poster.post(evt) finished.wait()
def exit_gui_thread(last_exit = oldexitfunc): # don't wait on MS platforms -- it hangs. # On X11, we have to shut down the secondary thread. if running_in_second_thread and os.name != 'nt': import gui_thread_guts event_poster = gui_thread_guts.proxy_base() event_catcher = event_poster.catcher finished = threading.Event() evt = gui_thread_guts.proxy_event(event_catcher.Close, (),{},finished) event_poster.post(evt) # wait for event to get handled finished.wait() # wait for the gui_thread to die. gui_thread_finished.wait() if last_exit: last_exit()
if not hasattr(wx_class, 'init2'): wx_class.init2 = wx_class.__init__
if not hasattr(wx_class, '_iNiT2'): if hasattr(wx_class, '__init__'): wx_class._iNiT2 = wx_class.__init__ else: wx_class._iNiT2 = None
def register(wx_class): """ Create a gui_thread compatible version of wx_class Test whether a proxy is necessary. If so, generate and return the proxy class. if not, just return the wx_class unaltered. """ if running_in_second_thread: #print 'proxy generated' return proxify(wx_class) else: if not hasattr(wx_class, 'init2'): wx_class.init2 = wx_class.__init__ wx_class.__init__ = plain_class__init__ return wx_class
self.init2(*args,**kw)
if self._iNiT2: self._iNiT2(*args,**kw)
def plain_class__init__(self,*args,**kw): self.init2(*args,**kw) add_close_event_handler(self) self.proxy_object_alive = 1
results = 'self.wx_obj = finished._result;' \
results = 'self.wx_obj = ret_val;' \
def generate_method(method,wx_class): """ Create a proxy method. This first creates a text version of the method, accounting for slight differences between __init__ methods and all other methods. It converts the text to a code object (using exec) and returns the code object. The code is never actually written to a file. It takes about .4 seconds on a wxFrame object with 150 methods. This is a one time cost at start up. It might be beneficial if we use the same code over and over to actually write the proxy class to a module. (.pyc file?) """ module_name = wx_class.__module__ class_name = wx_class.__name__ import_statement = 'from %s import %s' % (module_name,class_name) documentation = "" try: documentation = getattr(getattr(wx_class, method), '__doc__') except AttributeError: pass if method == '__init__': call_method = class_name pre_test = '' #pre_test = 'from gui_thread_guts import proxy_base;'\ # 'proxy_base.__init__(self)' arguments = 'arg_list = args' results = 'self.wx_obj = finished._result;' \ 'add_close_event_handler(self);' \ 'self.proxy_object_alive = 1;' elif (method == '__getattr__') or (method == '__del__'): return None else: pre_test = "if not self.proxy_object_alive: proxy_error()" call_method = '%s.%s' % (class_name,method) arguments = 'arg_list = tuple([self.wx_obj] + list(args))' results = 'return smart_return(finished._result, self)' body = """def %(method)s(self,*args,**kw): \"\"\"%(documentation)s\"\"\" %(pre_test)s from gui_thread_guts import proxy_event, smart_return %(import_statement)s #import statement finished = threading.Event() # remove proxies if present args = dereference_arglist(args) %(arguments)s #arguments evt = proxy_event(%(call_method)s,arg_list,kw,finished) self.post(evt) finished.wait() if finished.exception_info: raise finished.exception_info[0],finished.exception_info[1] %(results)s #results\n""" %locals() #if method == '__init__': # print body exec(body) return eval(method)
results = 'return smart_return(finished._result, self)'
results = 'return smart_return(ret_val)'
def generate_method(method,wx_class): """ Create a proxy method. This first creates a text version of the method, accounting for slight differences between __init__ methods and all other methods. It converts the text to a code object (using exec) and returns the code object. The code is never actually written to a file. It takes about .4 seconds on a wxFrame object with 150 methods. This is a one time cost at start up. It might be beneficial if we use the same code over and over to actually write the proxy class to a module. (.pyc file?) """ module_name = wx_class.__module__ class_name = wx_class.__name__ import_statement = 'from %s import %s' % (module_name,class_name) documentation = "" try: documentation = getattr(getattr(wx_class, method), '__doc__') except AttributeError: pass if method == '__init__': call_method = class_name pre_test = '' #pre_test = 'from gui_thread_guts import proxy_base;'\ # 'proxy_base.__init__(self)' arguments = 'arg_list = args' results = 'self.wx_obj = finished._result;' \ 'add_close_event_handler(self);' \ 'self.proxy_object_alive = 1;' elif (method == '__getattr__') or (method == '__del__'): return None else: pre_test = "if not self.proxy_object_alive: proxy_error()" call_method = '%s.%s' % (class_name,method) arguments = 'arg_list = tuple([self.wx_obj] + list(args))' results = 'return smart_return(finished._result, self)' body = """def %(method)s(self,*args,**kw): \"\"\"%(documentation)s\"\"\" %(pre_test)s from gui_thread_guts import proxy_event, smart_return %(import_statement)s #import statement finished = threading.Event() # remove proxies if present args = dereference_arglist(args) %(arguments)s #arguments evt = proxy_event(%(call_method)s,arg_list,kw,finished) self.post(evt) finished.wait() if finished.exception_info: raise finished.exception_info[0],finished.exception_info[1] %(results)s #results\n""" %locals() #if method == '__init__': # print body exec(body) return eval(method)
%(import_statement)s finished = threading.Event()
%(import_statement)s
body = """def %(method)s(self,*args,**kw): \"\"\"%(documentation)s\"\"\" %(pre_test)s from gui_thread_guts import proxy_event, smart_return %(import_statement)s #import statement finished = threading.Event() # remove proxies if present args = dereference_arglist(args) %(arguments)s #arguments evt = proxy_event(%(call_method)s,arg_list,kw,finished) self.post(evt) finished.wait() if finished.exception_info: raise finished.exception_info[0],finished.exception_info[1] %(results)s #results\n""" %locals()
%(arguments)s evt = proxy_event(%(call_method)s,arg_list,kw,finished) self.post(evt) finished.wait() if finished.exception_info: raise finished.exception_info[0],finished.exception_info[1]
%(arguments)s ret_val = None if in_proxy_call: ret_val = apply(%(call_method)s, arg_list, kw) else: finished = threading.Event() evt = proxy_event(%(call_method)s,arg_list,kw,finished) self.post(evt) finished.wait() if finished.exception_info: raise finished.exception_info[0], \ finished.exception_info[1] ret_val = finished._result
body = """def %(method)s(self,*args,**kw): \"\"\"%(documentation)s\"\"\" %(pre_test)s from gui_thread_guts import proxy_event, smart_return %(import_statement)s #import statement finished = threading.Event() # remove proxies if present args = dereference_arglist(args) %(arguments)s #arguments evt = proxy_event(%(call_method)s,arg_list,kw,finished) self.post(evt) finished.wait() if finished.exception_info: raise finished.exception_info[0],finished.exception_info[1] %(results)s #results\n""" %locals()
return hasattr(x, '_proxy_attr__proxy')
hasattr(x, 'x._proxy_attr__dont_mess_with_me_unless_you_know_what_youre_doing')
def is_proxy_attr(x): return hasattr(x, '_proxy_attr__proxy')
return self._cdf(k,*args) - self._cdf(k-1,*args)
return self.cdf(k,*args) - self.cdf(k-1,*args)
def _pdf(self, k, *args): return self._cdf(k,*args) - self._cdf(k-1,*args)
def logL(self, b, Y, **extra):
def logL(self, b, Y):
def logL(self, b, Y, **extra): return -scipy.linalg.norm(self.whiten(Y) - N.dot(self.wdesign, b))**2 / 2.
def __init__(self, design, **keywords): LikelihoodModel.__init__(self, **keywords)
def __init__(self, design): LikelihoodModel.__init__(self)
def __init__(self, design, **keywords): LikelihoodModel.__init__(self, **keywords) self.initialize(design)
def initialize(self, design, **keywords):
def initialize(self, design):
def initialize(self, design, **keywords): self.design = design self.wdesign = self.whiten(design) self.calc_beta = L.pinv(self.wdesign) self.normalized_cov_beta = N.dot(self.calc_beta, N.transpose(self.calc_beta)) self.df_resid = self.wdesign.shape[0] - utils.rank(self.design)
def fit(self, Y, **keywords):
def fit(self, Y):
def fit(self, Y, **keywords): """ Full \'fit\' of the model including estimate of covariance matrix, (whitened) residuals and scale.
def __init__(self, design, rho=0, **keywords): LikelihoodModel.__init__(self, **keywords)
def __init__(self, design, rho=0):
def __init__(self, design, rho=0, **keywords): LikelihoodModel.__init__(self, **keywords) self.rho = rho self.initialize(design)
self.initialize(design)
ols_model.__init__(self, design)
def __init__(self, design, rho=0, **keywords): LikelihoodModel.__init__(self, **keywords) self.rho = rho self.initialize(design)
class wls_model(ar_model):
class wls_model(ols_model):
def whiten(self, X): factor = 1. / N.sqrt(1 - self.rho**2) return N.concatenate([[X[0]], (X[1:] - self.rho * X[0:-1]) * factor])
def __init__(self, design, weights=1, **keywords): LikelihoodModel.__init__(self, **keywords)
def __init__(self, design, weights=1):
def __init__(self, design, weights=1, **keywords): LikelihoodModel.__init__(self, **keywords) self.weights = weights self.initialize(design)
self.initialize(design)
ols_model.__init__(self, design)
def __init__(self, design, weights=1, **keywords): LikelihoodModel.__init__(self, **keywords) self.weights = weights self.initialize(design)
norm_resid = self.resid * N.multiply.outer(N.ones(self.Y.shape[0]), sdd) return norm_resid
return self.resid * N.multiply.outer(N.ones(self.Y.shape[0]), sdd)
def norm_resid(self): """ Residuals, normalized to have unit length.
range = abs(upper - lower) if range == 0.:
rng = abs(upper - lower) if rng == 0.:
def auto_ticks(data_bounds, bounds_info = default_bounds): """ Find locations for axis tick marks. Calculate the location for tick marks on an axis. data_bounds is a sequence of 2 numbers specifying the maximum and minimum values of the data along this axis. bounds_info is a sequence of 3 values that specify how the axis end points and tick interval are calculated. An array of tick mark locations is returned from the function. The first and last tick entries are the axis end points. data_bounds -- (lower,upper). The maximum and minimum values of the data long this axis. If any of the settings in bounds_info are 'auto' or 'fit', the axis properties are calculated automatically from these settings. bounds_info -- (lower,upper,interval). Each entry can either be a numerical value or a string. If a number,the axis property is set to that value. If the entry is 'auto', the property is calculated automatically. lower and upper can also be 'fit' in which case the axis end points are set equal to the values in data_bounds. """ # pretty ugly code... # man, this needs some testing. if is_number(bounds_info[0]): lower = bounds_info[0] else: lower = data_bounds[0] if is_number(bounds_info[1]): upper = bounds_info[1] else: upper = data_bounds[1] interval = bounds_info[2] #print 'raw interval:', interval if interval in ['linear','auto']: range = abs(upper - lower) if range == 0.: # anything more intelligent to do here? interval = .5 lower,upper = data_bounds + array((-.5,.5)) if is_base2(range) and is_base2(upper) and range > 4: if range == 2: interval = 1 elif range == 4: interval = 4 else: interval = range / 4 # maybe we want it 8 else: interval = auto_interval((lower,upper)) elif type(interval) in [type(0.0),type(0)]: pass else: #print 'interval: ', interval raise ValueError, interval + " is an unknown value for interval: " \ " expects 'auto' or 'linear', or a number" # If the lower or upper bound are set to 'auto', # calculate them based on the newly chosen interval. #print 'interval:', interval auto_lower,auto_upper = auto_bounds(data_bounds,interval) # if the lower and upper bound span 0, make sure ticks # will hit exactly on zero. if lower < 0 and upper > 0: hi_ticks = arange(0,auto_upper+interval,interval) low_ticks = - arange(interval,-auto_lower+interval,interval) ticks = concatenate((low_ticks[::-1],hi_ticks)) else: # othersize the ticks start and end on the lower and # upper values. ticks = arange(auto_lower,auto_upper+interval,interval) if bounds_info[0] == 'fit': ticks[0] = lower if bounds_info[1] == 'fit': ticks[-1] = upper return ticks
if is_base2(range) and is_base2(upper) and range > 4: if range == 2:
if is_base2(rng) and is_base2(upper) and rng > 4: if rng == 2:
def auto_ticks(data_bounds, bounds_info = default_bounds): """ Find locations for axis tick marks. Calculate the location for tick marks on an axis. data_bounds is a sequence of 2 numbers specifying the maximum and minimum values of the data along this axis. bounds_info is a sequence of 3 values that specify how the axis end points and tick interval are calculated. An array of tick mark locations is returned from the function. The first and last tick entries are the axis end points. data_bounds -- (lower,upper). The maximum and minimum values of the data long this axis. If any of the settings in bounds_info are 'auto' or 'fit', the axis properties are calculated automatically from these settings. bounds_info -- (lower,upper,interval). Each entry can either be a numerical value or a string. If a number,the axis property is set to that value. If the entry is 'auto', the property is calculated automatically. lower and upper can also be 'fit' in which case the axis end points are set equal to the values in data_bounds. """ # pretty ugly code... # man, this needs some testing. if is_number(bounds_info[0]): lower = bounds_info[0] else: lower = data_bounds[0] if is_number(bounds_info[1]): upper = bounds_info[1] else: upper = data_bounds[1] interval = bounds_info[2] #print 'raw interval:', interval if interval in ['linear','auto']: range = abs(upper - lower) if range == 0.: # anything more intelligent to do here? interval = .5 lower,upper = data_bounds + array((-.5,.5)) if is_base2(range) and is_base2(upper) and range > 4: if range == 2: interval = 1 elif range == 4: interval = 4 else: interval = range / 4 # maybe we want it 8 else: interval = auto_interval((lower,upper)) elif type(interval) in [type(0.0),type(0)]: pass else: #print 'interval: ', interval raise ValueError, interval + " is an unknown value for interval: " \ " expects 'auto' or 'linear', or a number" # If the lower or upper bound are set to 'auto', # calculate them based on the newly chosen interval. #print 'interval:', interval auto_lower,auto_upper = auto_bounds(data_bounds,interval) # if the lower and upper bound span 0, make sure ticks # will hit exactly on zero. if lower < 0 and upper > 0: hi_ticks = arange(0,auto_upper+interval,interval) low_ticks = - arange(interval,-auto_lower+interval,interval) ticks = concatenate((low_ticks[::-1],hi_ticks)) else: # othersize the ticks start and end on the lower and # upper values. ticks = arange(auto_lower,auto_upper+interval,interval) if bounds_info[0] == 'fit': ticks[0] = lower if bounds_info[1] == 'fit': ticks[-1] = upper return ticks
elif range == 4:
elif rng == 4:
def auto_ticks(data_bounds, bounds_info = default_bounds): """ Find locations for axis tick marks. Calculate the location for tick marks on an axis. data_bounds is a sequence of 2 numbers specifying the maximum and minimum values of the data along this axis. bounds_info is a sequence of 3 values that specify how the axis end points and tick interval are calculated. An array of tick mark locations is returned from the function. The first and last tick entries are the axis end points. data_bounds -- (lower,upper). The maximum and minimum values of the data long this axis. If any of the settings in bounds_info are 'auto' or 'fit', the axis properties are calculated automatically from these settings. bounds_info -- (lower,upper,interval). Each entry can either be a numerical value or a string. If a number,the axis property is set to that value. If the entry is 'auto', the property is calculated automatically. lower and upper can also be 'fit' in which case the axis end points are set equal to the values in data_bounds. """ # pretty ugly code... # man, this needs some testing. if is_number(bounds_info[0]): lower = bounds_info[0] else: lower = data_bounds[0] if is_number(bounds_info[1]): upper = bounds_info[1] else: upper = data_bounds[1] interval = bounds_info[2] #print 'raw interval:', interval if interval in ['linear','auto']: range = abs(upper - lower) if range == 0.: # anything more intelligent to do here? interval = .5 lower,upper = data_bounds + array((-.5,.5)) if is_base2(range) and is_base2(upper) and range > 4: if range == 2: interval = 1 elif range == 4: interval = 4 else: interval = range / 4 # maybe we want it 8 else: interval = auto_interval((lower,upper)) elif type(interval) in [type(0.0),type(0)]: pass else: #print 'interval: ', interval raise ValueError, interval + " is an unknown value for interval: " \ " expects 'auto' or 'linear', or a number" # If the lower or upper bound are set to 'auto', # calculate them based on the newly chosen interval. #print 'interval:', interval auto_lower,auto_upper = auto_bounds(data_bounds,interval) # if the lower and upper bound span 0, make sure ticks # will hit exactly on zero. if lower < 0 and upper > 0: hi_ticks = arange(0,auto_upper+interval,interval) low_ticks = - arange(interval,-auto_lower+interval,interval) ticks = concatenate((low_ticks[::-1],hi_ticks)) else: # othersize the ticks start and end on the lower and # upper values. ticks = arange(auto_lower,auto_upper+interval,interval) if bounds_info[0] == 'fit': ticks[0] = lower if bounds_info[1] == 'fit': ticks[-1] = upper return ticks
interval = range / 4
interval = rng / 4
def auto_ticks(data_bounds, bounds_info = default_bounds): """ Find locations for axis tick marks. Calculate the location for tick marks on an axis. data_bounds is a sequence of 2 numbers specifying the maximum and minimum values of the data along this axis. bounds_info is a sequence of 3 values that specify how the axis end points and tick interval are calculated. An array of tick mark locations is returned from the function. The first and last tick entries are the axis end points. data_bounds -- (lower,upper). The maximum and minimum values of the data long this axis. If any of the settings in bounds_info are 'auto' or 'fit', the axis properties are calculated automatically from these settings. bounds_info -- (lower,upper,interval). Each entry can either be a numerical value or a string. If a number,the axis property is set to that value. If the entry is 'auto', the property is calculated automatically. lower and upper can also be 'fit' in which case the axis end points are set equal to the values in data_bounds. """ # pretty ugly code... # man, this needs some testing. if is_number(bounds_info[0]): lower = bounds_info[0] else: lower = data_bounds[0] if is_number(bounds_info[1]): upper = bounds_info[1] else: upper = data_bounds[1] interval = bounds_info[2] #print 'raw interval:', interval if interval in ['linear','auto']: range = abs(upper - lower) if range == 0.: # anything more intelligent to do here? interval = .5 lower,upper = data_bounds + array((-.5,.5)) if is_base2(range) and is_base2(upper) and range > 4: if range == 2: interval = 1 elif range == 4: interval = 4 else: interval = range / 4 # maybe we want it 8 else: interval = auto_interval((lower,upper)) elif type(interval) in [type(0.0),type(0)]: pass else: #print 'interval: ', interval raise ValueError, interval + " is an unknown value for interval: " \ " expects 'auto' or 'linear', or a number" # If the lower or upper bound are set to 'auto', # calculate them based on the newly chosen interval. #print 'interval:', interval auto_lower,auto_upper = auto_bounds(data_bounds,interval) # if the lower and upper bound span 0, make sure ticks # will hit exactly on zero. if lower < 0 and upper > 0: hi_ticks = arange(0,auto_upper+interval,interval) low_ticks = - arange(interval,-auto_lower+interval,interval) ticks = concatenate((low_ticks[::-1],hi_ticks)) else: # othersize the ticks start and end on the lower and # upper values. ticks = arange(auto_lower,auto_upper+interval,interval) if bounds_info[0] == 'fit': ticks[0] = lower if bounds_info[1] == 'fit': ticks[-1] = upper return ticks
hi_ticks = arange(0,auto_upper+interval,interval) low_ticks = - arange(interval,-auto_lower+interval,interval)
hi_ticks = arange(0,upper+interval,interval) low_ticks = - arange(interval,-lower+interval,interval)
def auto_ticks(data_bounds, bounds_info = default_bounds): """ Find locations for axis tick marks. Calculate the location for tick marks on an axis. data_bounds is a sequence of 2 numbers specifying the maximum and minimum values of the data along this axis. bounds_info is a sequence of 3 values that specify how the axis end points and tick interval are calculated. An array of tick mark locations is returned from the function. The first and last tick entries are the axis end points. data_bounds -- (lower,upper). The maximum and minimum values of the data long this axis. If any of the settings in bounds_info are 'auto' or 'fit', the axis properties are calculated automatically from these settings. bounds_info -- (lower,upper,interval). Each entry can either be a numerical value or a string. If a number,the axis property is set to that value. If the entry is 'auto', the property is calculated automatically. lower and upper can also be 'fit' in which case the axis end points are set equal to the values in data_bounds. """ # pretty ugly code... # man, this needs some testing. if is_number(bounds_info[0]): lower = bounds_info[0] else: lower = data_bounds[0] if is_number(bounds_info[1]): upper = bounds_info[1] else: upper = data_bounds[1] interval = bounds_info[2] #print 'raw interval:', interval if interval in ['linear','auto']: range = abs(upper - lower) if range == 0.: # anything more intelligent to do here? interval = .5 lower,upper = data_bounds + array((-.5,.5)) if is_base2(range) and is_base2(upper) and range > 4: if range == 2: interval = 1 elif range == 4: interval = 4 else: interval = range / 4 # maybe we want it 8 else: interval = auto_interval((lower,upper)) elif type(interval) in [type(0.0),type(0)]: pass else: #print 'interval: ', interval raise ValueError, interval + " is an unknown value for interval: " \ " expects 'auto' or 'linear', or a number" # If the lower or upper bound are set to 'auto', # calculate them based on the newly chosen interval. #print 'interval:', interval auto_lower,auto_upper = auto_bounds(data_bounds,interval) # if the lower and upper bound span 0, make sure ticks # will hit exactly on zero. if lower < 0 and upper > 0: hi_ticks = arange(0,auto_upper+interval,interval) low_ticks = - arange(interval,-auto_lower+interval,interval) ticks = concatenate((low_ticks[::-1],hi_ticks)) else: # othersize the ticks start and end on the lower and # upper values. ticks = arange(auto_lower,auto_upper+interval,interval) if bounds_info[0] == 'fit': ticks[0] = lower if bounds_info[1] == 'fit': ticks[-1] = upper return ticks
ticks = arange(auto_lower,auto_upper+interval,interval)
ticks = arange(lower,upper+interval,interval)
def auto_ticks(data_bounds, bounds_info = default_bounds): """ Find locations for axis tick marks. Calculate the location for tick marks on an axis. data_bounds is a sequence of 2 numbers specifying the maximum and minimum values of the data along this axis. bounds_info is a sequence of 3 values that specify how the axis end points and tick interval are calculated. An array of tick mark locations is returned from the function. The first and last tick entries are the axis end points. data_bounds -- (lower,upper). The maximum and minimum values of the data long this axis. If any of the settings in bounds_info are 'auto' or 'fit', the axis properties are calculated automatically from these settings. bounds_info -- (lower,upper,interval). Each entry can either be a numerical value or a string. If a number,the axis property is set to that value. If the entry is 'auto', the property is calculated automatically. lower and upper can also be 'fit' in which case the axis end points are set equal to the values in data_bounds. """ # pretty ugly code... # man, this needs some testing. if is_number(bounds_info[0]): lower = bounds_info[0] else: lower = data_bounds[0] if is_number(bounds_info[1]): upper = bounds_info[1] else: upper = data_bounds[1] interval = bounds_info[2] #print 'raw interval:', interval if interval in ['linear','auto']: range = abs(upper - lower) if range == 0.: # anything more intelligent to do here? interval = .5 lower,upper = data_bounds + array((-.5,.5)) if is_base2(range) and is_base2(upper) and range > 4: if range == 2: interval = 1 elif range == 4: interval = 4 else: interval = range / 4 # maybe we want it 8 else: interval = auto_interval((lower,upper)) elif type(interval) in [type(0.0),type(0)]: pass else: #print 'interval: ', interval raise ValueError, interval + " is an unknown value for interval: " \ " expects 'auto' or 'linear', or a number" # If the lower or upper bound are set to 'auto', # calculate them based on the newly chosen interval. #print 'interval:', interval auto_lower,auto_upper = auto_bounds(data_bounds,interval) # if the lower and upper bound span 0, make sure ticks # will hit exactly on zero. if lower < 0 and upper > 0: hi_ticks = arange(0,auto_upper+interval,interval) low_ticks = - arange(interval,-auto_lower+interval,interval) ticks = concatenate((low_ticks[::-1],hi_ticks)) else: # othersize the ticks start and end on the lower and # upper values. ticks = arange(auto_lower,auto_upper+interval,interval) if bounds_info[0] == 'fit': ticks[0] = lower if bounds_info[1] == 'fit': ticks[-1] = upper return ticks
if len(probs) == 1:
if not isscalar(probs) and len(probs) == 1:
def ttest_ind (a, b, axis=0, printit=False, name1='Samp1', name2='Samp2',writemode='a'): """
raise ValueError, 'Unequal length arrays.'
raise ValueError, 'unequal length arrays'
def ttest_rel (a,b,axis=None,printit=False,name1='Samp1',name2='Samp2',writemode='a'): """
if len(probs) == 1:
if not isscalar(probs) and len(probs) == 1:
def ttest_rel (a,b,axis=None,printit=False,name1='Samp1',name2='Samp2',writemode='a'): """
def _drv_pdf(self, xk, *args):
def _drv_pmf(self, xk, *args):
def _drv_pdf(self, xk, *args): try: return self.P[xk] except KeyError: return 0.0
ext = Extension(parent_package+'integrate._quadpack',sources)
ext = Extension(parent_package+'integrate._quadpack',sources, library_dirs=atlas_library_dirs, libraries=['quadpack','linpack_lite'] + blas_libraries)
def configuration(parent_package=''): if parent_package: parent_package += '.' local_path = get_path(__name__) config = default_config_dict() if parent_package: config['packages'].append(parent_package+'integrate') #config['packages'].append(parent_package+'integrate.tests') quadpack = glob(os.path.join(local_path,'quadpack','*.f')) config['fortran_libraries'].append(('quadpack',{'sources':quadpack})) odepack = glob(os.path.join(local_path,'odepack','*.f')) config['fortran_libraries'].append(('odepack',{'sources':odepack})) # should we try to weed through files and replace with calls to # LAPACK routines? linpack_lite = glob(os.path.join(local_path,'linpack_lite','*.f')) config['fortran_libraries'].append(('linpack_lite',{'sources':linpack_lite})) mach = glob(os.path.join(local_path,'mach','*.f')) config['fortran_libraries'].append(('mach',{'sources':mach})) # Extension # flibraries.append(('blas',{'sources':blas})) # Note that all extension modules will be linked against all c and # fortran libraries. But it is a good idea to at least comment # the dependencies in the section for each subpackage. sources = ['_quadpackmodule.c'] sources = [os.path.join(local_path,x) for x in sources] ext = Extension(parent_package+'integrate._quadpack',sources) config['ext_modules'].append(ext) # need info about blas -- how to get this??? blas_libraries, lapack_libraries, atlas_library_dirs = get_atlas_info() sources = ['_odepackmodule.c'] sources = [os.path.join(local_path,x) for x in sources] ext = Extension(parent_package+'integrate._odepack',sources, library_dirs=atlas_library_dirs, libraries=['odepack','linpack_lite',] + blas_libraries) config['ext_modules'].append(ext) # vode sources = [os.path.join(local_path,'vode.pyf')] ext = Extension(parent_package+'integrate.vode', sources, library_dirs=atlas_library_dirs, libraries=['odepack','linpack_lite'] + blas_libraries, ) config['ext_modules'].append(ext) return config
blas_libraries, lapack_libraries, atlas_library_dirs = get_atlas_info()
def configuration(parent_package=''): if parent_package: parent_package += '.' local_path = get_path(__name__) config = default_config_dict() if parent_package: config['packages'].append(parent_package+'integrate') #config['packages'].append(parent_package+'integrate.tests') quadpack = glob(os.path.join(local_path,'quadpack','*.f')) config['fortran_libraries'].append(('quadpack',{'sources':quadpack})) odepack = glob(os.path.join(local_path,'odepack','*.f')) config['fortran_libraries'].append(('odepack',{'sources':odepack})) # should we try to weed through files and replace with calls to # LAPACK routines? linpack_lite = glob(os.path.join(local_path,'linpack_lite','*.f')) config['fortran_libraries'].append(('linpack_lite',{'sources':linpack_lite})) mach = glob(os.path.join(local_path,'mach','*.f')) config['fortran_libraries'].append(('mach',{'sources':mach})) # Extension # flibraries.append(('blas',{'sources':blas})) # Note that all extension modules will be linked against all c and # fortran libraries. But it is a good idea to at least comment # the dependencies in the section for each subpackage. sources = ['_quadpackmodule.c'] sources = [os.path.join(local_path,x) for x in sources] ext = Extension(parent_package+'integrate._quadpack',sources) config['ext_modules'].append(ext) # need info about blas -- how to get this??? blas_libraries, lapack_libraries, atlas_library_dirs = get_atlas_info() sources = ['_odepackmodule.c'] sources = [os.path.join(local_path,x) for x in sources] ext = Extension(parent_package+'integrate._odepack',sources, library_dirs=atlas_library_dirs, libraries=['odepack','linpack_lite',] + blas_libraries) config['ext_modules'].append(ext) # vode sources = [os.path.join(local_path,'vode.pyf')] ext = Extension(parent_package+'integrate.vode', sources, library_dirs=atlas_library_dirs, libraries=['odepack','linpack_lite'] + blas_libraries, ) config['ext_modules'].append(ext) return config
libraries=['odepack','linpack_lite',] + blas_libraries)
libraries=['odepack','linpack_lite'] + blas_libraries)
def configuration(parent_package=''): if parent_package: parent_package += '.' local_path = get_path(__name__) config = default_config_dict() if parent_package: config['packages'].append(parent_package+'integrate') #config['packages'].append(parent_package+'integrate.tests') quadpack = glob(os.path.join(local_path,'quadpack','*.f')) config['fortran_libraries'].append(('quadpack',{'sources':quadpack})) odepack = glob(os.path.join(local_path,'odepack','*.f')) config['fortran_libraries'].append(('odepack',{'sources':odepack})) # should we try to weed through files and replace with calls to # LAPACK routines? linpack_lite = glob(os.path.join(local_path,'linpack_lite','*.f')) config['fortran_libraries'].append(('linpack_lite',{'sources':linpack_lite})) mach = glob(os.path.join(local_path,'mach','*.f')) config['fortran_libraries'].append(('mach',{'sources':mach})) # Extension # flibraries.append(('blas',{'sources':blas})) # Note that all extension modules will be linked against all c and # fortran libraries. But it is a good idea to at least comment # the dependencies in the section for each subpackage. sources = ['_quadpackmodule.c'] sources = [os.path.join(local_path,x) for x in sources] ext = Extension(parent_package+'integrate._quadpack',sources) config['ext_modules'].append(ext) # need info about blas -- how to get this??? blas_libraries, lapack_libraries, atlas_library_dirs = get_atlas_info() sources = ['_odepackmodule.c'] sources = [os.path.join(local_path,x) for x in sources] ext = Extension(parent_package+'integrate._odepack',sources, library_dirs=atlas_library_dirs, libraries=['odepack','linpack_lite',] + blas_libraries) config['ext_modules'].append(ext) # vode sources = [os.path.join(local_path,'vode.pyf')] ext = Extension(parent_package+'integrate.vode', sources, library_dirs=atlas_library_dirs, libraries=['odepack','linpack_lite'] + blas_libraries, ) config['ext_modules'].append(ext) return config
assert_equal(numstring,array([0.,0.1,0.2,0.3,
assert_almost_equal(numstring,array([0.,0.1,0.2,0.3,
def check_arange(self): numstring = arange(0,2.21,.1) assert_equal(numstring,array([0.,0.1,0.2,0.3, 0.4,0.5,0.6,0.7, 0.8,0.9,1.,1.1, 1.2,1.3,1.4,1.5, 1.6,1.7,1.8,1.9, 2.,2.1,2.2])) numstringa = arange(3,4,.3) assert_array_equal(numstringa, array([3.,3.3,3.6,3.9])) numstringb = arange(3,27,3) assert_array_equal(numstringb,array([3,6,9,12, 15,18,21,24])) numstringc = arange(3.3,27,4) assert_array_equal(numstringc,array([3.3,7.3,11.3,15.3, 19.3,23.3]))
assert_equal(lag2.c,array([1,-2*(k+2),(k+1.)*(k+2.)])/2.0) assert_equal(lag3.c,array([-1,3*(k+3),-3*(k+2)*(k+3),(k+1)*(k+2)*(k+3)])/6.0)
assert_almost_equal(lag2.c,array([1,-2*(k+2),(k+1.)*(k+2.)])/2.0) assert_almost_equal(lag3.c,array([-1,3*(k+3),-3*(k+2)*(k+3),(k+1)*(k+2)*(k+3)])/6.0)
def check_genlaguerre(self): k = 5*rand()-0.9 lag0 = genlaguerre(0,k) lag1 = genlaguerre(1,k) lag2 = genlaguerre(2,k) lag3 = genlaguerre(3,k) assert_equal(lag0.c,[1]) assert_equal(lag1.c,[-1,k+1]) assert_equal(lag2.c,array([1,-2*(k+2),(k+1.)*(k+2.)])/2.0) assert_equal(lag3.c,array([-1,3*(k+3),-3*(k+2)*(k+3),(k+1)*(k+2)*(k+3)])/6.0)
assert_equal(leg3.c,array([5,0,-3,0])/2.0) assert_equal(leg4.c,array([35,0,-30,0,3])/8.0) assert_equal(leg5.c,array([63,0,-70,0,15,0])/8.0)
assert_almost_equal(leg3.c,array([5,0,-3,0])/2.0) assert_almost_equal(leg4.c,array([35,0,-30,0,3])/8.0) assert_almost_equal(leg5.c,array([63,0,-70,0,15,0])/8.0)
def check_legendre(self): leg0 = legendre(0) leg1 = legendre(1) leg2 = legendre(2) leg3 = legendre(3) leg4 = legendre(4) leg5 = legendre(5) assert_equal(leg0.c,[1]) assert_equal(leg1.c,[1,0]) assert_equal(leg2.c,array([3,0,-1])/2.0) assert_equal(leg3.c,array([5,0,-3,0])/2.0) assert_equal(leg4.c,array([35,0,-30,0,3])/8.0) assert_equal(leg5.c,array([63,0,-70,0,15,0])/8.0)
axis('equal')
try: axis('equal') except AttributeError: _active.client.layout_all() axis('equal')
def image(img,x=None,y=None,colormap = 'grey',scale='no'): """Colormap should really default to the current colormap...""" # check for hold here validate_active() image = wxplt.image_object(img,x,y,colormap=colormap,scale=scale) if not _active.hold in ['on','yes']: _active.line_list.data = [] # clear it out _active.image_list.data = [] # clear it out _active.image_list.append(image) axis('equal') else: _active.image_list.append(image) _active.update() return _active
assert_array_almost_equal(dot(a,v[:,i]),w[i]*v[:,i])
assert_array_almost_equal(dot(a,v[:,i]),w[i]*v[:,i],self.decimal)
def check_heev_complex(self,level=1,suffix=''): a= [[1,2-2j,3+7j],[2+2j,2,3],[3-7j,3,5]] exact_w=[-6.305141710654834,2.797880950890922,11.50726075976392] f = getattr(self.lapack,'heev'+suffix) w,v,info=f(a) assert not info,`info` assert_array_almost_equal(w,exact_w) for i in range(3): assert_array_almost_equal(dot(a,v[:,i]),w[i]*v[:,i])