rem
stringlengths
1
322k
add
stringlengths
0
2.05M
context
stringlengths
4
228k
meta
stringlengths
156
215
"""Takes a PIL image and returns a copy of the image in a Numeric container. If the image is RGB returns a 3-dimensional array: arr[:,:,n] is each channel
"""Takes a PIL image and returns a copy of the image in a Numeric container. If the image is RGB returns a 3-dimensional array: arr[:,:,n] is each channel
def fromimage(im, flatten=0): """Takes a PIL image and returns a copy of the image in a Numeric container. If the image is RGB returns a 3-dimensional array: arr[:,:,n] is each channel Optional arguments: - flatten (0): if true, the image is flattened by calling convert('F') on the image object before extracting the numerical data. This flattens the color layers into a single grayscale layer. Note that the supplied image object is NOT modified. """ assert Image.isImageType(im), "Not a PIL image." if flatten: im = im.convert('F') mode = im.mode adjust = 0 if mode == '1': im = im.convert(mode='L') mode = 'L' adjust = 1 str = im.tostring() type = 'b' if mode == 'F': type = 'f' if mode == 'I': type = 'i' arr = Numeric.fromstring(str,type) shape = list(im.size) shape.reverse() if mode == 'P': arr.shape = shape if im.palette.rawmode != 'RGB': print "Warning: Image has invalid palette." return arr pal = Numeric.fromstring(im.palette.data,type) N = len(pal) pal.shape = (int(N/3.0),3) return arr, pal if mode in ['RGB','YCbCr']: shape += [3] elif mode in ['CMYK','RGBA']: shape += [4] arr.shape = shape if adjust: arr = (arr != 0) return arr
8a6662fcba07c2c4d22f646995e8b6f3ab31d7ff /local1/tlutelli/issta_data/temp/all_python//python/2006_temp/2006/12971/8a6662fcba07c2c4d22f646995e8b6f3ab31d7ff/pilutil.py
- flatten (0): if true, the image is flattened by calling convert('F') on the image object before extracting the numerical data. This flattens the color layers into a single grayscale layer. Note that the supplied image object is NOT modified.
- flatten (0): if true, the image is flattened by calling convert('F') on the image object before extracting the numerical data. This flattens the color layers into a single grayscale layer. Note that the supplied image object is NOT modified.
def fromimage(im, flatten=0): """Takes a PIL image and returns a copy of the image in a Numeric container. If the image is RGB returns a 3-dimensional array: arr[:,:,n] is each channel Optional arguments: - flatten (0): if true, the image is flattened by calling convert('F') on the image object before extracting the numerical data. This flattens the color layers into a single grayscale layer. Note that the supplied image object is NOT modified. """ assert Image.isImageType(im), "Not a PIL image." if flatten: im = im.convert('F') mode = im.mode adjust = 0 if mode == '1': im = im.convert(mode='L') mode = 'L' adjust = 1 str = im.tostring() type = 'b' if mode == 'F': type = 'f' if mode == 'I': type = 'i' arr = Numeric.fromstring(str,type) shape = list(im.size) shape.reverse() if mode == 'P': arr.shape = shape if im.palette.rawmode != 'RGB': print "Warning: Image has invalid palette." return arr pal = Numeric.fromstring(im.palette.data,type) N = len(pal) pal.shape = (int(N/3.0),3) return arr, pal if mode in ['RGB','YCbCr']: shape += [3] elif mode in ['CMYK','RGBA']: shape += [4] arr.shape = shape if adjust: arr = (arr != 0) return arr
8a6662fcba07c2c4d22f646995e8b6f3ab31d7ff /local1/tlutelli/issta_data/temp/all_python//python/2006_temp/2006/12971/8a6662fcba07c2c4d22f646995e8b6f3ab31d7ff/pilutil.py
print "di"
def __init__(self, x, y, z, kind='linear', copy=True, bounds_error=False, fill_value=np.nan): """ Initialize a 2D interpolator.
0fd926738190f2ebd369125b367fed04eae880c8 /local1/tlutelli/issta_data/temp/all_python//python/2006_temp/2006/12971/0fd926738190f2ebd369125b367fed04eae880c8/interpolate.py
print self.x, self.y, self.z
def __init__(self, x, y, z, kind='linear', copy=True, bounds_error=False, fill_value=np.nan): """ Initialize a 2D interpolator.
0fd926738190f2ebd369125b367fed04eae880c8 /local1/tlutelli/issta_data/temp/all_python//python/2006_temp/2006/12971/0fd926738190f2ebd369125b367fed04eae880c8/interpolate.py
expr = numexpr("2.0*a+3.0*c",[('a',float),('c', float)]) assert_array_equal(expr(a,c), 2.0*a+3.0*c) def check_all_scalar(self): a = 3. b = 4. assert_equal(evaluate("a+b"), a+b) expr = numexpr("2*a+3*b",[('a',float),('b', float)]) assert_equal(expr(a,b), 2*a+3*b) def check_run(self): a = arange(100).reshape(10,10)[::2] b = arange(10) expr = numexpr("2*a+3*b",[('a',float),('b', float)]) assert_array_equal(expr(a,b), expr.run(a,b))
def check_broadcasting(self): a = arange(100).reshape(10,10)[::2] c = arange(10) d = arange(5).reshape(5,1) assert_array_equal(evaluate("a+c"), a+c) assert_array_equal(evaluate("a+d"), a+d)
a98cf77a23aaabba56e1f91f8502448bd9dcae34 /local1/tlutelli/issta_data/temp/all_python//python/2006_temp/2006/12971/a98cf77a23aaabba56e1f91f8502448bd9dcae34/test_numexpr.py
def Construct(s, ij=None, M=None ,N=None, nzmax=100, dtype='d', copy=False): """ Allows constructing a csc_matrix by passing: - data, ij, {M,N,nzmax} a[ij[k,0],ij[k,1]] = data[k] - data, (row, ptr) """ # Moved out of the __init__ function for now for simplicity. # I think this should eventually be moved to be a module-level # function. Otherwise we overload the __init__ method too much, # given Python's weak type checking. This should also remove # some code duplication.
a2bf57fc75113a68b861da7355c48c019c1ddc40 /local1/tlutelli/issta_data/temp/all_python//python/2006_temp/2006/12971/a2bf57fc75113a68b861da7355c48c019c1ddc40/sparse.py
M = amax(new.rowind) + 1
M = int(amax(new.rowind)) + 1
def Construct(s, ij=None, M=None ,N=None, nzmax=100, dtype='d', copy=False): """ Allows constructing a csc_matrix by passing: - data, ij, {M,N,nzmax} a[ij[k,0],ij[k,1]] = data[k] - data, (row, ptr) """ # Moved out of the __init__ function for now for simplicity. # I think this should eventually be moved to be a module-level # function. Otherwise we overload the __init__ method too much, # given Python's weak type checking. This should also remove # some code duplication.
a2bf57fc75113a68b861da7355c48c019c1ddc40 /local1/tlutelli/issta_data/temp/all_python//python/2006_temp/2006/12971/a2bf57fc75113a68b861da7355c48c019c1ddc40/sparse.py
new.data = new.data * other
new.data *= other
def __mul__(self, other): # implement matrix multiplication and matrix-vector multiplication if isspmatrix(other): return self.matmat(other) elif isscalar(other): new = self.copy() new.data = new.data * other new._dtypechar = new.data.dtypechar new.ftype = _transtabl[new._dtypechar] return new else: return self.matvec(other)
a2bf57fc75113a68b861da7355c48c019c1ddc40 /local1/tlutelli/issta_data/temp/all_python//python/2006_temp/2006/12971/a2bf57fc75113a68b861da7355c48c019c1ddc40/sparse.py
def __rsub__(self, other): # implement other - self ocs = csc_matrix(other) if (ocs.shape != self.shape): raise ValueError, "Inconsistent shapes." dtypechar = _coerce_rules[(self._dtypechar,ocs._dtypechar)] data1, data2 = _convert_data(self.data, ocs.data, dtypechar) func = getattr(sparsetools,_transtabl[dtypechar]+'cscadd') c,rowc,ptrc,ierr = func(-data1,self.rowind,self.indptr,data2,ocs.rowind,ocs.indptr) if ierr: raise ValueError, "Ran out of space (but shouldn't have happened)." M, N = self.shape return csc_matrix.Construct(c,(rowc,ptrc),M=M,N=N)
a2bf57fc75113a68b861da7355c48c019c1ddc40 /local1/tlutelli/issta_data/temp/all_python//python/2006_temp/2006/12971/a2bf57fc75113a68b861da7355c48c019c1ddc40/sparse.py
if isinstance(other, type(3)): raise NotImplementedError elif isscalar(other):
""" Element-by-element power (unless other is a scalar, in which case return the matrix power.) """ if isscalar(other):
def __pow__(self, other): if isinstance(other, type(3)): raise NotImplementedError elif isscalar(other): new = self.copy() new.data = new.data * other new._dtypechar = new.data.dtypechar new.ftype = _transtabl[new._dtypechar] return new else: ocs = csc_matrix(other) if (ocs.shape != self.shape): raise ValueError, "Inconsistent shapes." dtypechar = _coerce_rules[(self._dtypechar,ocs._dtypechar)] nnz1, nnz2 = self.nnz, ocs.nnz data1, data2 = _convert_data(self.data[:nnz1], ocs.data[:nnz2], dtypechar) func = getattr(sparsetools,_transtabl[dtypechar]+'cscmul') c,rowc,ptrc,ierr = func(data1,self.rowind[:nnz1],self.indptr,data2,ocs.rowind[:nnz2],ocs.indptr) if ierr: raise ValueError, "Ran out of space (but shouldn't have happened)." M, N = self.shape return csc_matrix.Construct(c,(rowc,ptrc),M=M,N=N)
a2bf57fc75113a68b861da7355c48c019c1ddc40 /local1/tlutelli/issta_data/temp/all_python//python/2006_temp/2006/12971/a2bf57fc75113a68b861da7355c48c019c1ddc40/sparse.py
new.data = new.data * other
new.data = new.data ** other
def __pow__(self, other): if isinstance(other, type(3)): raise NotImplementedError elif isscalar(other): new = self.copy() new.data = new.data * other new._dtypechar = new.data.dtypechar new.ftype = _transtabl[new._dtypechar] return new else: ocs = csc_matrix(other) if (ocs.shape != self.shape): raise ValueError, "Inconsistent shapes." dtypechar = _coerce_rules[(self._dtypechar,ocs._dtypechar)] nnz1, nnz2 = self.nnz, ocs.nnz data1, data2 = _convert_data(self.data[:nnz1], ocs.data[:nnz2], dtypechar) func = getattr(sparsetools,_transtabl[dtypechar]+'cscmul') c,rowc,ptrc,ierr = func(data1,self.rowind[:nnz1],self.indptr,data2,ocs.rowind[:nnz2],ocs.indptr) if ierr: raise ValueError, "Ran out of space (but shouldn't have happened)." M, N = self.shape return csc_matrix.Construct(c,(rowc,ptrc),M=M,N=N)
a2bf57fc75113a68b861da7355c48c019c1ddc40 /local1/tlutelli/issta_data/temp/all_python//python/2006_temp/2006/12971/a2bf57fc75113a68b861da7355c48c019c1ddc40/sparse.py
new = csr_matrix(N,M,nzmax=0,dtype=self._dtypechar)
new = csr_matrix((N,M), nzmax=0, dtype=self._dtypechar)
def transpose(self, copy=False): M,N = self.shape new = csr_matrix(N,M,nzmax=0,dtype=self._dtypechar) if copy: new.data = self.data.copy() new.colind = self.rowind.copy() new.indptr = self.indptr.copy() else: new.data = self.data new.colind = self.rowind new.indptr = self.indptr new._check() return new
a2bf57fc75113a68b861da7355c48c019c1ddc40 /local1/tlutelli/issta_data/temp/all_python//python/2006_temp/2006/12971/a2bf57fc75113a68b861da7355c48c019c1ddc40/sparse.py
elif isinstance(key,type(3)):
elif type(key) == int:
def __getitem__(self, key): if isinstance(key,types.TupleType): row = key[0] col = key[1] func = getattr(sparsetools,self.ftype+'cscgetel') M, N = self.shape if not (0<=row<M) or not (0<=col<N): raise KeyError, "Index out of bounds." ind, val = func(self.data, self.rowind, self.indptr, row, col) return val elif isinstance(key,type(3)): return self.data[key] else: raise NotImplementedError
a2bf57fc75113a68b861da7355c48c019c1ddc40 /local1/tlutelli/issta_data/temp/all_python//python/2006_temp/2006/12971/a2bf57fc75113a68b861da7355c48c019c1ddc40/sparse.py
M, N = self.shape
def copy(self): M, N = self.shape dtype = self._dtypechar new = csc_matrix.Construct(M, N, nzmax=0, dtype=dtype) new.data = self.data.copy() new.rowind = self.rowind.copy() new.indptr = self.indptr.copy() new._check() return new
a2bf57fc75113a68b861da7355c48c019c1ddc40 /local1/tlutelli/issta_data/temp/all_python//python/2006_temp/2006/12971/a2bf57fc75113a68b861da7355c48c019c1ddc40/sparse.py
new = csc_matrix.Construct(M, N, nzmax=0, dtype=dtype)
new = csc_matrix(self.shape, nzmax=0, dtype=dtype)
def copy(self): M, N = self.shape dtype = self._dtypechar new = csc_matrix.Construct(M, N, nzmax=0, dtype=dtype) new.data = self.data.copy() new.rowind = self.rowind.copy() new.indptr = self.indptr.copy() new._check() return new
a2bf57fc75113a68b861da7355c48c019c1ddc40 /local1/tlutelli/issta_data/temp/all_python//python/2006_temp/2006/12971/a2bf57fc75113a68b861da7355c48c019c1ddc40/sparse.py
N = amax(new.colind) + 1
N = int(amax(new.colind)) + 1
def Construct(s, ij=None, M=None ,N=None, nzmax=100, dtype='d', copy=False): """ Allows constructing a csr_matrix by passing: - data, ij, {M,N,nzmax} a[ij[k,0],ij[k,1]] = data[k] - data, (row, ptr) """ # Moved out of the __init__ function for now for simplicity. # I think this should eventually be moved to be a module-level # function. Otherwise we overload the __init__ method too much, # given Python's weak type checking. This should also remove # some code duplication. if (isinstance(s, ArrayType) or \ isinstance(s, type([]))): s = asarray(s) if (rank(s) == 2): # converting from a full array ocsc = csc_matrix(transpose(s)) dims = (ocsc.shape[1], ocsc.shape[0]) new = csr_matrix(dims) new.shape = dims new.colind = ocsc.rowind new.indptr = ocsc.indptr new.data = ocsc.data elif isinstance(ij, ArrayType) and (rank(ij) == 2) and (shape(ij) == (len(s),2)): ijnew = ij.copy() ijnew[:,0] = ij[:,1] ijnew[:,1] = ij[:,0] temp = coo_matrix(s,ijnew,M=M,N=N,nzmax=nzmax, dtype=dtype) temp = temp.tocsc() dims = temp.shape new = csr_matrix(dims) new.data = temp.data new.colind = temp.colind new.indptr = temp.indptr # new.shape = temp.shape elif isinstance(ij, types.TupleType) and (len(ij)==2): # What are the new dimensions? Do we need to know them now? dims = (0,0) new = csr_matrix(dims) new.data = asarray(s) new.colind = ij[0] new.indptr = ij[1] if N is None: try: N = amax(new.colind) + 1 except ValueError: N = 0 if M is None: M = len(new.indptr) - 1 if M == -1: M = 0 new.shape = (M,N) else: raise ValueError, "Unrecognized form for csr_matrix constructor." else: raise ValueError, "Unrecognized form for csr_matrix constructor."
a2bf57fc75113a68b861da7355c48c019c1ddc40 /local1/tlutelli/issta_data/temp/all_python//python/2006_temp/2006/12971/a2bf57fc75113a68b861da7355c48c019c1ddc40/sparse.py
M,N = self.shape
M, N = self.shape
def _check(self): M,N = self.shape nnz = self.indptr[-1] nzmax = len(self.colind) if (rank(self.data) != 1) or (rank(self.colind) != 1) or \ (rank(self.indptr) != 1): raise ValueError, "Data, colind, and indptr arrays "\ "should be rank 1." if (len(self.data) != nzmax): raise ValueError, "Data and row list should have same length" if (len(self.indptr) != M+1): raise ValueError, "Index pointer should be of length #rows + 1" if (nnz>0) and (max(self.colind[:nnz]) >= N): raise ValueError, "Column-values must be < N." if (nnz > nzmax): raise ValueError, \ "Last value of index list should be less than "\ "the size of data list" self.nnz = nnz self.nzmax = nzmax self._dtypechar = self.data.dtypechar if self._dtypechar not in 'fdFD': self.data = self.data + 0.0 self._dtypechar = self.data.dtypechar self.ftype = _transtabl[self._dtypechar]
a2bf57fc75113a68b861da7355c48c019c1ddc40 /local1/tlutelli/issta_data/temp/all_python//python/2006_temp/2006/12971/a2bf57fc75113a68b861da7355c48c019c1ddc40/sparse.py
new.data = new.data * other
new.data *= other
def __mul__(self, other): # implement matrix multiplication and matrix-vector multiplication if isspmatrix(other): return self.matmat(other) elif isscalar(other): new = self.copy() new.data = new.data * other new._dtypechar = new.data.dtypechar new.ftype = _transtabl[new._dtypechar] return new else: return self.matvec(other)
a2bf57fc75113a68b861da7355c48c019c1ddc40 /local1/tlutelli/issta_data/temp/all_python//python/2006_temp/2006/12971/a2bf57fc75113a68b861da7355c48c019c1ddc40/sparse.py
def __rsub__(self, other): # implement other - self ocs = csr_matrix(other) if (ocs.shape != self.shape): raise ValueError, "Inconsistent shapes." dtypechar = _coerce_rules[(self._dtypechar,other._dtypechar)] data1, data2 = _convert_data(self.data, other.data, dtypechar) func = getattr(sparsetools,_transtabl[dtypechar]+'cscadd') c,colc,ptrc,ierr = func(-data1,self.colind,self.indptr,data2,other.colind,other.indptr) if ierr: raise ValueError, "Ran out of space (but shouldn't have happened)." M, N = self.shape return csr_matrix.Construct(c,(colc,ptrc),M=M,N=N)
a2bf57fc75113a68b861da7355c48c019c1ddc40 /local1/tlutelli/issta_data/temp/all_python//python/2006_temp/2006/12971/a2bf57fc75113a68b861da7355c48c019c1ddc40/sparse.py
if isinstance(other, type(3)): raise NotImplementedError elif isscalar(other):
""" Element-by-element power (unless other is a scalar, in which case return the matrix power.) """ if isscalar(other):
def __pow__(self, other): if isinstance(other, type(3)): raise NotImplementedError elif isscalar(other): new = self.copy() new.data = new.data * other new._dtypechar = new.data.dtypechar new.ftype = _transtabl[new._dtypechar] return new else: ocs = csr_matrix(other) if (ocs.shape != self.shape): raise ValueError, "Inconsistent shapes." dtypechar = _coerce_rules[(self._dtypechar,ocs._dtypechar)] data1, data2 = _convert_data(self.data, ocs.data, dtypechar) func = getattr(sparsetools,_transtabl[dtypechar]+'cscmul') c,colc,ptrc,ierr = func(data1,self.colind,self.indptr,data2,ocs.colind,ocs.indptr) if ierr: raise ValueError, "Ran out of space (but shouldn't have happened)." M, N = self.shape return csr_matrix.Construct(c,(colc,ptrc),M=M,N=N)
a2bf57fc75113a68b861da7355c48c019c1ddc40 /local1/tlutelli/issta_data/temp/all_python//python/2006_temp/2006/12971/a2bf57fc75113a68b861da7355c48c019c1ddc40/sparse.py
new.data = new.data * other
new.data = new.data ** other
def __pow__(self, other): if isinstance(other, type(3)): raise NotImplementedError elif isscalar(other): new = self.copy() new.data = new.data * other new._dtypechar = new.data.dtypechar new.ftype = _transtabl[new._dtypechar] return new else: ocs = csr_matrix(other) if (ocs.shape != self.shape): raise ValueError, "Inconsistent shapes." dtypechar = _coerce_rules[(self._dtypechar,ocs._dtypechar)] data1, data2 = _convert_data(self.data, ocs.data, dtypechar) func = getattr(sparsetools,_transtabl[dtypechar]+'cscmul') c,colc,ptrc,ierr = func(data1,self.colind,self.indptr,data2,ocs.colind,ocs.indptr) if ierr: raise ValueError, "Ran out of space (but shouldn't have happened)." M, N = self.shape return csr_matrix.Construct(c,(colc,ptrc),M=M,N=N)
a2bf57fc75113a68b861da7355c48c019c1ddc40 /local1/tlutelli/issta_data/temp/all_python//python/2006_temp/2006/12971/a2bf57fc75113a68b861da7355c48c019c1ddc40/sparse.py
elif isinstance(key,type(3)):
elif type(key) == int:
def __getitem__(self, key): if isinstance(key,types.TupleType): row = key[0] col = key[1] func = getattr(sparsetools,self.ftype+'cscgetel') M, N = self.shape if (row < 0): row = M + row if (col < 0): col = N + col if (row >= M ) or (col >= N) or (row < 0) or (col < 0): raise IndexError, "Index out of bounds." ind, val = func(self.data, self.colind, self.indptr, col, row) return val elif isinstance(key,type(3)): return self.data[key] else: raise NotImplementedError
a2bf57fc75113a68b861da7355c48c019c1ddc40 /local1/tlutelli/issta_data/temp/all_python//python/2006_temp/2006/12971/a2bf57fc75113a68b861da7355c48c019c1ddc40/sparse.py
M, N = self.shape new = csr_matrix(M, N, nzmax=0, dtype=self._dtypechar)
new = csr_matrix(self.shape, nzmax=0, dtype=self._dtypechar)
def copy(self): M, N = self.shape new = csr_matrix(M, N, nzmax=0, dtype=self._dtypechar) new.data = self.data.copy() new.colind = self.colind.copy() new.indptr = self.indptr.copy() new._check() return new
a2bf57fc75113a68b861da7355c48c019c1ddc40 /local1/tlutelli/issta_data/temp/all_python//python/2006_temp/2006/12971/a2bf57fc75113a68b861da7355c48c019c1ddc40/sparse.py
keys = self.keys()
def matvec(self, other): other = asarray(other) if other.shape[0] != self.shape[1]: raise ValueError, "Dimensions do not match." keys = self.keys() res = [0]*self.shape[0] for key in keys: res[int(key[0])] += self[key] * other[int(key[1]),...] return array(res)
a2bf57fc75113a68b861da7355c48c019c1ddc40 /local1/tlutelli/issta_data/temp/all_python//python/2006_temp/2006/12971/a2bf57fc75113a68b861da7355c48c019c1ddc40/sparse.py
for key in keys:
for key in self.keys():
def matvec(self, other): other = asarray(other) if other.shape[0] != self.shape[1]: raise ValueError, "Dimensions do not match." keys = self.keys() res = [0]*self.shape[0] for key in keys: res[int(key[0])] += self[key] * other[int(key[1]),...] return array(res)
a2bf57fc75113a68b861da7355c48c019c1ddc40 /local1/tlutelli/issta_data/temp/all_python//python/2006_temp/2006/12971/a2bf57fc75113a68b861da7355c48c019c1ddc40/sparse.py
keys = self.keys()
def rmatvec(self, other): other = asarray(other)
a2bf57fc75113a68b861da7355c48c019c1ddc40 /local1/tlutelli/issta_data/temp/all_python//python/2006_temp/2006/12971/a2bf57fc75113a68b861da7355c48c019c1ddc40/sparse.py
for key in keys:
for key in self.keys():
def rmatvec(self, other): other = asarray(other)
a2bf57fc75113a68b861da7355c48c019c1ddc40 /local1/tlutelli/issta_data/temp/all_python//python/2006_temp/2006/12971/a2bf57fc75113a68b861da7355c48c019c1ddc40/sparse.py
M = amax(ij[0])
M = int(amax(ij[0]))
def __init__(self, obj, ij, M=None, N=None, nzmax=None, dtype=None): spmatrix.__init__(self) if type(ij) is type(()) and len(ij)==2: if M is None: M = amax(ij[0]) if N is None: N = amax(ij[1]) self.row = asarray(ij[0],'i') self.col = asarray(ij[1],'i') else: aij = asarray(ij,'i') if M is None: M = amax(aij[:,0]) if N is None: N = amax(aij[:,1]) self.row = aij[:,0] self.col = aij[:,1] aobj = asarray(obj,dtype=dtype) self.shape = (M,N) if nzmax is None: nzmax = len(aobj) self.nzmax = nzmax self.data = aobj self._dtypechar = aobj.dtypechar self._check()
a2bf57fc75113a68b861da7355c48c019c1ddc40 /local1/tlutelli/issta_data/temp/all_python//python/2006_temp/2006/12971/a2bf57fc75113a68b861da7355c48c019c1ddc40/sparse.py
N = amax(ij[1])
N = int(amax(ij[1]))
def __init__(self, obj, ij, M=None, N=None, nzmax=None, dtype=None): spmatrix.__init__(self) if type(ij) is type(()) and len(ij)==2: if M is None: M = amax(ij[0]) if N is None: N = amax(ij[1]) self.row = asarray(ij[0],'i') self.col = asarray(ij[1],'i') else: aij = asarray(ij,'i') if M is None: M = amax(aij[:,0]) if N is None: N = amax(aij[:,1]) self.row = aij[:,0] self.col = aij[:,1] aobj = asarray(obj,dtype=dtype) self.shape = (M,N) if nzmax is None: nzmax = len(aobj) self.nzmax = nzmax self.data = aobj self._dtypechar = aobj.dtypechar self._check()
a2bf57fc75113a68b861da7355c48c019c1ddc40 /local1/tlutelli/issta_data/temp/all_python//python/2006_temp/2006/12971/a2bf57fc75113a68b861da7355c48c019c1ddc40/sparse.py
M = amax(aij[:,0])
M = int(amax(aij[:,0]))
def __init__(self, obj, ij, M=None, N=None, nzmax=None, dtype=None): spmatrix.__init__(self) if type(ij) is type(()) and len(ij)==2: if M is None: M = amax(ij[0]) if N is None: N = amax(ij[1]) self.row = asarray(ij[0],'i') self.col = asarray(ij[1],'i') else: aij = asarray(ij,'i') if M is None: M = amax(aij[:,0]) if N is None: N = amax(aij[:,1]) self.row = aij[:,0] self.col = aij[:,1] aobj = asarray(obj,dtype=dtype) self.shape = (M,N) if nzmax is None: nzmax = len(aobj) self.nzmax = nzmax self.data = aobj self._dtypechar = aobj.dtypechar self._check()
a2bf57fc75113a68b861da7355c48c019c1ddc40 /local1/tlutelli/issta_data/temp/all_python//python/2006_temp/2006/12971/a2bf57fc75113a68b861da7355c48c019c1ddc40/sparse.py
N = amax(aij[:,1])
N = int(amax(aij[:,1]))
def __init__(self, obj, ij, M=None, N=None, nzmax=None, dtype=None): spmatrix.__init__(self) if type(ij) is type(()) and len(ij)==2: if M is None: M = amax(ij[0]) if N is None: N = amax(ij[1]) self.row = asarray(ij[0],'i') self.col = asarray(ij[1],'i') else: aij = asarray(ij,'i') if M is None: M = amax(aij[:,0]) if N is None: N = amax(aij[:,1]) self.row = aij[:,0] self.col = aij[:,1] aobj = asarray(obj,dtype=dtype) self.shape = (M,N) if nzmax is None: nzmax = len(aobj) self.nzmax = nzmax self.data = aobj self._dtypechar = aobj.dtypechar self._check()
a2bf57fc75113a68b861da7355c48c019c1ddc40 /local1/tlutelli/issta_data/temp/all_python//python/2006_temp/2006/12971/a2bf57fc75113a68b861da7355c48c019c1ddc40/sparse.py
M,N = A.shape
M, N = A.shape
def solve(A,b,permc_spec=2): if not hasattr(A, 'tocsr') and not hasattr(A, 'tocsc'): raise ValueError, "Sparse matrix must be able to return CSC format--"\ "A.tocsc()--or CSR format--A.tocsr()" if not hasattr(A,'shape'): raise ValueError, "Sparse matrix must be able to return shape (rows,cols) = A.shape" M,N = A.shape if (M != N): raise ValueError, "Matrix must be square." if hasattr(A, 'tocsc'): mat = A.tocsc() ftype, lastel, data, index0, index1 = \ mat.ftype, mat.nnz, mat.data, mat.rowind, mat.indptr csc = 1 else: mat = A.tocsr() ftype, lastel, data, index0, index1 = \ mat.ftype, mat.nnz, mat.data, mat.colind, mat.indptr csc = 0 gssv = eval('_superlu.' + ftype + 'gssv') return gssv(N,lastel,data,index0,index1,b,csc,permc_spec)[0]
a2bf57fc75113a68b861da7355c48c019c1ddc40 /local1/tlutelli/issta_data/temp/all_python//python/2006_temp/2006/12971/a2bf57fc75113a68b861da7355c48c019c1ddc40/sparse.py
M,N = A.shape
M, N = A.shape
def lu_factor(A, permc_spec=2, diag_pivot_thresh=1.0, drop_tol=0.0, relax=1, panel_size=10): M,N = A.shape if (M != N): raise ValueError, "Can only factor square matrices." csc = A.tocsc() gstrf = eval('_superlu.' + csc.ftype + 'gstrf') return gstrf(N,csc.nnz,csc.data,csc.rowind,csc.indptr,permc_spec, diag_pivot_thresh, drop_tol, relax, panel_size)
a2bf57fc75113a68b861da7355c48c019c1ddc40 /local1/tlutelli/issta_data/temp/all_python//python/2006_temp/2006/12971/a2bf57fc75113a68b861da7355c48c019c1ddc40/sparse.py
edges.update(zip(self.triangle_nodes[border[:,0]][:,1], self.triangle_nodes[border[:,0]][:,2])) edges.update(zip(self.triangle_nodes[border[:,1]][:,2], self.triangle_nodes[border[:,1]][:,0])) edges.update(zip(self.triangle_nodes[border[:,2]][:,0], self.triangle_nodes[border[:,2]][:,1]))
edges.update(dict(zip(self.triangle_nodes[border[:,0]][:,1], self.triangle_nodes[border[:,0]][:,2]))) edges.update(dict(zip(self.triangle_nodes[border[:,1]][:,2], self.triangle_nodes[border[:,1]][:,0]))) edges.update(dict(zip(self.triangle_nodes[border[:,2]][:,0], self.triangle_nodes[border[:,2]][:,1])))
def _compute_convex_hull(self): """Extract the convex hull from the triangulation information.
c210adcdba8dd94fdea864daa246de58ea492f77 /local1/tlutelli/issta_data/temp/all_python//python/2006_temp/2006/12971/c210adcdba8dd94fdea864daa246de58ea492f77/triangulate.py
def configuration(parent_package='',parent_path=None): from scipy.distutils.system_info import get_info package = 'cluster' local_path = get_path(__name__,parent_path) config = Configuration(package,parent_package)
def configuration(parent_package='',top_path=None): from scipy.distutils.misc_util import Configuration config = Configuration('cluster',parent_package,top_path) config.add_data_dir('tests')
def configuration(parent_package='',parent_path=None): from scipy.distutils.system_info import get_info package = 'cluster' local_path = get_path(__name__,parent_path) config = Configuration(package,parent_package) config.add_extension('_vq', sources=[join('src', 'vq_wrap.cpp')]) return config
64d85ae8d3fb0f7cebd49730623ca86bac49fef6 /local1/tlutelli/issta_data/temp/all_python//python/2006_temp/2006/12971/64d85ae8d3fb0f7cebd49730623ca86bac49fef6/setup.py
**configuration()
**configuration(top_path='').todict()
def configuration(parent_package='',parent_path=None): from scipy.distutils.system_info import get_info package = 'cluster' local_path = get_path(__name__,parent_path) config = Configuration(package,parent_package) config.add_extension('_vq', sources=[join('src', 'vq_wrap.cpp')]) return config
64d85ae8d3fb0f7cebd49730623ca86bac49fef6 /local1/tlutelli/issta_data/temp/all_python//python/2006_temp/2006/12971/64d85ae8d3fb0f7cebd49730623ca86bac49fef6/setup.py
def __init__(self,freq,year=None, month=None, day=None, seconds=None,quarter=None, date=None, val=None): if hasattr(freq,'freq'):
def __init__(self, freq, year=None, month=None, day=None, seconds=None,quarter=None, mxDate=None, val=None): if hasattr(freq, 'freq'):
def __init__(self,freq,year=None, month=None, day=None, seconds=None,quarter=None, date=None, val=None): if hasattr(freq,'freq'): self.freq = corelib.fmtFreq(freq.freq) else: self.freq = corelib.fmtFreq(freq) self.type = corelib.freqToType(self.freq) if val is not None: if self.freq == 'D': self.__date = val+originDate elif self.freq == 'B': self.__date = originDate + val + (val//5)*7 - (val//5)*5 elif self.freq == 'S': self.__date = secondlyOriginDate + mx.DateTime.DateTimeDeltaFromSeconds(val) elif self.freq == 'M': self.__date = originDate + mx.DateTime.RelativeDateTime(months=val, day=-1) elif self.freq == 'A': self.__date = originDate + mx.DateTime.RelativeDateTime(years=val, month=-1, day=-1) elif self.freq == 'Q': self.__date = originDate + 1 + mx.DateTime.RelativeDateTime(years=int(val/4), month=int(12 * (float(val)/4 - val/4)), day=-1) elif date is not None: self.__date = date else: error = ValueError("Insufficient parameters given to create a date at the given frequency")
c207acc5c7f49d5bd86329331c20462583407dd3 /local1/tlutelli/issta_data/temp/all_python//python/2006_temp/2006/12971/c207acc5c7f49d5bd86329331c20462583407dd3/tsdate.py
elif date is not None: self.__date = date
elif mxDate is not None: self.__date = mxDate
def __init__(self,freq,year=None, month=None, day=None, seconds=None,quarter=None, date=None, val=None): if hasattr(freq,'freq'): self.freq = corelib.fmtFreq(freq.freq) else: self.freq = corelib.fmtFreq(freq) self.type = corelib.freqToType(self.freq) if val is not None: if self.freq == 'D': self.__date = val+originDate elif self.freq == 'B': self.__date = originDate + val + (val//5)*7 - (val//5)*5 elif self.freq == 'S': self.__date = secondlyOriginDate + mx.DateTime.DateTimeDeltaFromSeconds(val) elif self.freq == 'M': self.__date = originDate + mx.DateTime.RelativeDateTime(months=val, day=-1) elif self.freq == 'A': self.__date = originDate + mx.DateTime.RelativeDateTime(years=val, month=-1, day=-1) elif self.freq == 'Q': self.__date = originDate + 1 + mx.DateTime.RelativeDateTime(years=int(val/4), month=int(12 * (float(val)/4 - val/4)), day=-1) elif date is not None: self.__date = date else: error = ValueError("Insufficient parameters given to create a date at the given frequency")
c207acc5c7f49d5bd86329331c20462583407dd3 /local1/tlutelli/issta_data/temp/all_python//python/2006_temp/2006/12971/c207acc5c7f49d5bd86329331c20462583407dd3/tsdate.py
if self.freq in ('B','D'):
if self.freq in ('B', 'D'):
def __init__(self,freq,year=None, month=None, day=None, seconds=None,quarter=None, date=None, val=None): if hasattr(freq,'freq'): self.freq = corelib.fmtFreq(freq.freq) else: self.freq = corelib.fmtFreq(freq) self.type = corelib.freqToType(self.freq) if val is not None: if self.freq == 'D': self.__date = val+originDate elif self.freq == 'B': self.__date = originDate + val + (val//5)*7 - (val//5)*5 elif self.freq == 'S': self.__date = secondlyOriginDate + mx.DateTime.DateTimeDeltaFromSeconds(val) elif self.freq == 'M': self.__date = originDate + mx.DateTime.RelativeDateTime(months=val, day=-1) elif self.freq == 'A': self.__date = originDate + mx.DateTime.RelativeDateTime(years=val, month=-1, day=-1) elif self.freq == 'Q': self.__date = originDate + 1 + mx.DateTime.RelativeDateTime(years=int(val/4), month=int(12 * (float(val)/4 - val/4)), day=-1) elif date is not None: self.__date = date else: error = ValueError("Insufficient parameters given to create a date at the given frequency")
c207acc5c7f49d5bd86329331c20462583407dd3 /local1/tlutelli/issta_data/temp/all_python//python/2006_temp/2006/12971/c207acc5c7f49d5bd86329331c20462583407dd3/tsdate.py
def strfmt(self,fmt): qFmt = fmt.replace("%q","XXXX")
def strfmt(self, fmt): qFmt = fmt.replace("%q", "XXXX")
def strfmt(self,fmt): qFmt = fmt.replace("%q","XXXX") tmpStr = self.__date.strftime(qFmt) return tmpStr.replace("XXXX",str(self.quarter()))
c207acc5c7f49d5bd86329331c20462583407dd3 /local1/tlutelli/issta_data/temp/all_python//python/2006_temp/2006/12971/c207acc5c7f49d5bd86329331c20462583407dd3/tsdate.py
return tmpStr.replace("XXXX",str(self.quarter()))
return tmpStr.replace("XXXX", str(self.quarter()))
def strfmt(self,fmt): qFmt = fmt.replace("%q","XXXX") tmpStr = self.__date.strftime(qFmt) return tmpStr.replace("XXXX",str(self.quarter()))
c207acc5c7f49d5bd86329331c20462583407dd3 /local1/tlutelli/issta_data/temp/all_python//python/2006_temp/2006/12971/c207acc5c7f49d5bd86329331c20462583407dd3/tsdate.py
if self.freq in ("B","D"): return self.__date.strftime("%d-%b-%y")
if self.freq in ("B", "D"): return self.strfmt("%d-%b-%y")
def __str__(self): if self.freq in ("B","D"): return self.__date.strftime("%d-%b-%y") elif self.freq == "S": return self.__date.strftime("%d-%b-%Y %H:%M:%S") elif self.freq == "M": return self.__date.strftime("%b-%Y") elif self.freq == "Q": return str(self.year())+"q"+str(self.quarter()) elif self.freq == "A": return str(self.year()) else: return self.__date.strftime("%d-%b-%y")
c207acc5c7f49d5bd86329331c20462583407dd3 /local1/tlutelli/issta_data/temp/all_python//python/2006_temp/2006/12971/c207acc5c7f49d5bd86329331c20462583407dd3/tsdate.py
return self.__date.strftime("%d-%b-%Y %H:%M:%S")
return self.strfmt("%d-%b-%Y %H:%M:%S")
def __str__(self): if self.freq in ("B","D"): return self.__date.strftime("%d-%b-%y") elif self.freq == "S": return self.__date.strftime("%d-%b-%Y %H:%M:%S") elif self.freq == "M": return self.__date.strftime("%b-%Y") elif self.freq == "Q": return str(self.year())+"q"+str(self.quarter()) elif self.freq == "A": return str(self.year()) else: return self.__date.strftime("%d-%b-%y")
c207acc5c7f49d5bd86329331c20462583407dd3 /local1/tlutelli/issta_data/temp/all_python//python/2006_temp/2006/12971/c207acc5c7f49d5bd86329331c20462583407dd3/tsdate.py
return self.__date.strftime("%b-%Y")
return self.strfmt("%b-%Y")
def __str__(self): if self.freq in ("B","D"): return self.__date.strftime("%d-%b-%y") elif self.freq == "S": return self.__date.strftime("%d-%b-%Y %H:%M:%S") elif self.freq == "M": return self.__date.strftime("%b-%Y") elif self.freq == "Q": return str(self.year())+"q"+str(self.quarter()) elif self.freq == "A": return str(self.year()) else: return self.__date.strftime("%d-%b-%y")
c207acc5c7f49d5bd86329331c20462583407dd3 /local1/tlutelli/issta_data/temp/all_python//python/2006_temp/2006/12971/c207acc5c7f49d5bd86329331c20462583407dd3/tsdate.py
return str(self.year())+"q"+str(self.quarter())
return self.strfmt("%Yq%q")
def __str__(self): if self.freq in ("B","D"): return self.__date.strftime("%d-%b-%y") elif self.freq == "S": return self.__date.strftime("%d-%b-%Y %H:%M:%S") elif self.freq == "M": return self.__date.strftime("%b-%Y") elif self.freq == "Q": return str(self.year())+"q"+str(self.quarter()) elif self.freq == "A": return str(self.year()) else: return self.__date.strftime("%d-%b-%y")
c207acc5c7f49d5bd86329331c20462583407dd3 /local1/tlutelli/issta_data/temp/all_python//python/2006_temp/2006/12971/c207acc5c7f49d5bd86329331c20462583407dd3/tsdate.py
return str(self.year())
return self.strfmt("%Y")
def __str__(self): if self.freq in ("B","D"): return self.__date.strftime("%d-%b-%y") elif self.freq == "S": return self.__date.strftime("%d-%b-%Y %H:%M:%S") elif self.freq == "M": return self.__date.strftime("%b-%Y") elif self.freq == "Q": return str(self.year())+"q"+str(self.quarter()) elif self.freq == "A": return str(self.year()) else: return self.__date.strftime("%d-%b-%y")
c207acc5c7f49d5bd86329331c20462583407dd3 /local1/tlutelli/issta_data/temp/all_python//python/2006_temp/2006/12971/c207acc5c7f49d5bd86329331c20462583407dd3/tsdate.py
return self.__date.strftime("%d-%b-%y")
return self.strfmt("%d-%b-%y")
def __str__(self): if self.freq in ("B","D"): return self.__date.strftime("%d-%b-%y") elif self.freq == "S": return self.__date.strftime("%d-%b-%Y %H:%M:%S") elif self.freq == "M": return self.__date.strftime("%b-%Y") elif self.freq == "Q": return str(self.year())+"q"+str(self.quarter()) elif self.freq == "A": return str(self.year()) else: return self.__date.strftime("%d-%b-%y")
c207acc5c7f49d5bd86329331c20462583407dd3 /local1/tlutelli/issta_data/temp/all_python//python/2006_temp/2006/12971/c207acc5c7f49d5bd86329331c20462583407dd3/tsdate.py
if self.freq <> other.freq: raise ValueError("Cannont subtract dates of different frequency (" + str(self.freq) + " <> " + str(other.freq) + ")")
if self.freq != other.freq: raise ValueError("Cannont subtract dates of different frequency (" + str(self.freq) + " != " + str(other.freq) + ")")
def __sub__(self, other): try: return self + (-1) * other except: pass try: if self.freq <> other.freq: raise ValueError("Cannont subtract dates of different frequency (" + str(self.freq) + " <> " + str(other.freq) + ")") return int(self) - int(other) except TypeError: raise TypeError("Could not subtract types " + str(type(self)) + " and " + str(type(other)))
c207acc5c7f49d5bd86329331c20462583407dd3 /local1/tlutelli/issta_data/temp/all_python//python/2006_temp/2006/12971/c207acc5c7f49d5bd86329331c20462583407dd3/tsdate.py
if self.freq <> other.freq:
if self.freq != other.freq:
def __eq__(self, other): if self.freq <> other.freq: raise TypeError("frequencies are not equal!") return int(self) == int(other)
c207acc5c7f49d5bd86329331c20462583407dd3 /local1/tlutelli/issta_data/temp/all_python//python/2006_temp/2006/12971/c207acc5c7f49d5bd86329331c20462583407dd3/tsdate.py
if self.freq <> other.freq:
if self.freq != other.freq:
def __cmp__(self, other): if self.freq <> other.freq: raise TypeError("frequencies are not equal!") return int(self)-int(other)
c207acc5c7f49d5bd86329331c20462583407dd3 /local1/tlutelli/issta_data/temp/all_python//python/2006_temp/2006/12971/c207acc5c7f49d5bd86329331c20462583407dd3/tsdate.py
return Date(freq, date=tempDate)
return Date(freq, mxDate=tempDate)
def thisday(freq): freq = corelib.fmtFreq(freq) tempDate = mx.DateTime.now() # if it is Saturday or Sunday currently, freq==B, then we want to use Friday if freq == 'B' and tempDate.day_of_week >= 5: tempDate -= (tempDate.day_of_week - 4) if freq == 'B' or freq == 'D' or freq == 'S': return Date(freq, date=tempDate) elif freq == 'M': return Date(freq,tempDate.year,tempDate.month) elif freq == 'Q': return Date(freq,tempDate.year,quarter=monthToQuarter(tempDate.month)) elif freq == 'A': return Date(freq,tempDate.year)
c207acc5c7f49d5bd86329331c20462583407dd3 /local1/tlutelli/issta_data/temp/all_python//python/2006_temp/2006/12971/c207acc5c7f49d5bd86329331c20462583407dd3/tsdate.py
return Date(freq,tempDate.year,tempDate.month)
return Date(freq, year=tempDate.year, month=tempDate.month)
def thisday(freq): freq = corelib.fmtFreq(freq) tempDate = mx.DateTime.now() # if it is Saturday or Sunday currently, freq==B, then we want to use Friday if freq == 'B' and tempDate.day_of_week >= 5: tempDate -= (tempDate.day_of_week - 4) if freq == 'B' or freq == 'D' or freq == 'S': return Date(freq, date=tempDate) elif freq == 'M': return Date(freq,tempDate.year,tempDate.month) elif freq == 'Q': return Date(freq,tempDate.year,quarter=monthToQuarter(tempDate.month)) elif freq == 'A': return Date(freq,tempDate.year)
c207acc5c7f49d5bd86329331c20462583407dd3 /local1/tlutelli/issta_data/temp/all_python//python/2006_temp/2006/12971/c207acc5c7f49d5bd86329331c20462583407dd3/tsdate.py
return Date(freq,tempDate.year,quarter=monthToQuarter(tempDate.month))
return Date(freq, yaer=tempDate.year, quarter=monthToQuarter(tempDate.month))
def thisday(freq): freq = corelib.fmtFreq(freq) tempDate = mx.DateTime.now() # if it is Saturday or Sunday currently, freq==B, then we want to use Friday if freq == 'B' and tempDate.day_of_week >= 5: tempDate -= (tempDate.day_of_week - 4) if freq == 'B' or freq == 'D' or freq == 'S': return Date(freq, date=tempDate) elif freq == 'M': return Date(freq,tempDate.year,tempDate.month) elif freq == 'Q': return Date(freq,tempDate.year,quarter=monthToQuarter(tempDate.month)) elif freq == 'A': return Date(freq,tempDate.year)
c207acc5c7f49d5bd86329331c20462583407dd3 /local1/tlutelli/issta_data/temp/all_python//python/2006_temp/2006/12971/c207acc5c7f49d5bd86329331c20462583407dd3/tsdate.py
return Date(freq,tempDate.year) def prevbusday(day_end_hour=18,day_end_min=0):
return Date(freq, year=tempDate.year) def prevbusday(day_end_hour=18, day_end_min=0):
def thisday(freq): freq = corelib.fmtFreq(freq) tempDate = mx.DateTime.now() # if it is Saturday or Sunday currently, freq==B, then we want to use Friday if freq == 'B' and tempDate.day_of_week >= 5: tempDate -= (tempDate.day_of_week - 4) if freq == 'B' or freq == 'D' or freq == 'S': return Date(freq, date=tempDate) elif freq == 'M': return Date(freq,tempDate.year,tempDate.month) elif freq == 'Q': return Date(freq,tempDate.year,quarter=monthToQuarter(tempDate.month)) elif freq == 'A': return Date(freq,tempDate.year)
c207acc5c7f49d5bd86329331c20462583407dd3 /local1/tlutelli/issta_data/temp/all_python//python/2006_temp/2006/12971/c207acc5c7f49d5bd86329331c20462583407dd3/tsdate.py
def dateOf(_date,_destFreq,_relation="BEFORE"): _destFreq = corelib.fmtFreq(_destFreq) _rel = _relation.upper()[0] if _date.freq == _destFreq: return _date elif _date.freq == 'D': if _destFreq == 'B': tempDate = _date.mxDate() if _rel == "B":
def dateOf(date, toFreq, relation="BEFORE"): toFreq = corelib.fmtFreq(toFreq) _rel = relation.upper()[0] if date.freq == toFreq: return date elif date.freq == 'D': if toFreq == 'B': tempDate = date.mxDate() if _rel == 'B':
def prevbusday(day_end_hour=18,day_end_min=0): tempDate = mx.DateTime.localtime() dateNum = tempDate.hour + float(tempDate.minute)/60 checkNum = day_end_hour + float(day_end_min)/60 if dateNum < checkNum: return thisday('B') - 1 else: return thisday('B')
c207acc5c7f49d5bd86329331c20462583407dd3 /local1/tlutelli/issta_data/temp/all_python//python/2006_temp/2006/12971/c207acc5c7f49d5bd86329331c20462583407dd3/tsdate.py
'blas_src',blas_src_info['sources'],
'blas_src',blas_src_info['sources'] + \ [os.path.join(local_path,'src','fblaswrap.f')],
def configuration(parent_package=''): if sys.platform == 'win32': import scipy_distutils.mingw32_support from scipy_distutils.core import Extension from scipy_distutils.misc_util import get_path, default_config_dict from scipy_distutils.misc_util import fortran_library_item, dot_join from scipy_distutils.system_info import get_info,dict_append,\ AtlasNotFoundError,LapackNotFoundError,BlasNotFoundError,\ LapackSrcNotFoundError,BlasSrcNotFoundError package = 'linalg' from interface_gen import generate_interface config = default_config_dict(package,parent_package) local_path = get_path(__name__) m = re.compile(r'(build|install|bdist|run_f2py)') if not filter(m.match,sys.argv): sources = [] sources += glob(os.path.join(local_path,'src','*.f')) sources += glob(os.path.join(local_path,'src','*.c')) sources += glob(os.path.join(local_path,'generic_*.pyf')) sources += [os.path.join(local_path,f) for f in [\ 'flapack_user_routines.pyf','atlas_version.c']] config['ext_modules'].append(Extension(\ name='fake_linalg_ext_module', sources = sources)) return config atlas_info = get_info('atlas') #atlas_info = {} # uncomment if ATLAS is available but want to use # Fortran LAPACK/BLAS; useful for testing f_libs = [] atlas_version = None if atlas_info: # Try to determine ATLAS version cur_dir = os.getcwd() os.chdir(local_path) cmd = '%s %s build_ext --inplace --force'%\ (sys.executable, os.path.join(local_path,'setup_atlas_version.py')) print cmd s,o=run_command(cmd) if not s: cmd = sys.executable+' -c "import atlas_version"' print cmd s,o=run_command(cmd) if not s: m = re.match(r'ATLAS version (?P<version>\d+[.]\d+[.]\d+)',o) if m: atlas_version = m.group('version') print 'ATLAS version',atlas_version if atlas_version is None: if re.search(r'undefined symbol: ATL_buildinfo',o,re.M): atlas_version = '3.2.1' # or pre 3.3.6 print 'ATLAS version',atlas_version,'(or pre 3.3.6)' else: print o else: print o if atlas_version is None: print 'Failed to determine ATLAS version' os.chdir(cur_dir) if ('ATLAS_WITHOUT_LAPACK',None) in atlas_info.get('define_macros',[]): lapack_info = get_info('lapack') if not lapack_info: warnings.warn(LapackNotFoundError.__doc__) lapack_src_info = get_info('lapack_src') if not lapack_src_info: raise LapackSrcNotFoundError,LapackSrcNotFoundError.__doc__ dict_append(lapack_info,libraries=['lapack_src']) f_libs.append(fortran_library_item(\ 'lapack_src',lapack_src_info['sources'], )) dict_append(lapack_info,**atlas_info) atlas_info = lapack_info blas_info,lapack_info = {},{} if not atlas_info: warnings.warn(AtlasNotFoundError.__doc__) blas_info = get_info('blas') #blas_info = {} # test building BLAS from sources. if not blas_info: warnings.warn(BlasNotFoundError.__doc__) blas_src_info = get_info('blas_src') if not blas_src_info: raise BlasSrcNotFoundError,BlasSrcNotFoundError.__doc__ dict_append(blas_info,libraries=['blas_src']) f_libs.append(fortran_library_item(\ 'blas_src',blas_src_info['sources'], )) lapack_info = get_info('lapack') #lapack_info = {} # test building LAPACK from sources. if not lapack_info: warnings.warn(LapackNotFoundError.__doc__) lapack_src_info = get_info('lapack_src') if not lapack_src_info: raise LapackSrcNotFoundError,LapackSrcNotFoundError.__doc__ dict_append(lapack_info,libraries=['lapack_src']) f_libs.append(fortran_library_item(\ 'lapack_src',lapack_src_info['sources'], )) mod_sources = {} if atlas_info or blas_info: mod_sources['fblas'] = ['generic_fblas.pyf', 'generic_fblas1.pyf', 'generic_fblas2.pyf', 'generic_fblas3.pyf', os.path.join('src','fblaswrap.f'), ] if atlas_info or lapack_info: mod_sources['flapack'] = ['generic_flapack.pyf'] if atlas_info: mod_sources['cblas'] = ['generic_cblas.pyf', 'generic_cblas1.pyf'] mod_sources['clapack'] = ['generic_clapack.pyf'] else: dict_append(atlas_info,**lapack_info) dict_append(atlas_info,**blas_info) skip_names = {'clapack':[],'flapack':[],'cblas':[],'fblas':[]} if skip_single_routines: skip_names['clapack'].extend(\ 'sgesv cgesv sgetrf cgetrf sgetrs cgetrs sgetri cgetri'\ ' sposv cposv spotrf cpotrf spotrs cpotrs spotri cpotri'\ ' slauum clauum strtri ctrtri'.split()) skip_names['flapack'].extend(skip_names['clapack']) skip_names['flapack'].extend(\ 'sgesdd cgesdd sgelss cgelss sgeqrf cgeqrf sgeev cgeev'\ ' sgegv cgegv ssyev cheev slaswp claswp sgees cgees' ' sggev cggev'.split()) skip_names['cblas'].extend('saxpy caxpy'.split()) skip_names['fblas'].extend(skip_names['cblas']) skip_names['fblas'].extend(\ 'srotg crotg srotmg srot csrot srotm sswap cswap sscal cscal'\ ' csscal scopy ccopy sdot cdotu cdotc snrm2 scnrm2 sasum scasum'\ ' isamax icamax sgemv cgemv chemv ssymv strmv ctrmv'\ ' sgemm cgemm'.split()) if using_lapack_blas: skip_names['fblas'].extend(\ 'drotmg srotmg drotm srotm'.split()) if atlas_version=='3.2.1': skip_names['clapack'].extend(\ 'sgetri dgetri cgetri zgetri spotri dpotri cpotri zpotri'\ ' slauum dlauum clauum zlauum strtri dtrtri ctrtri ztrtri'.split()) for mod_name,sources in mod_sources.items(): sources = [os.path.join(local_path,s) for s in sources] pyf_sources = filter(lambda s:s[-4:]=='.pyf',sources) mod_file = os.path.join(local_path,mod_name+'.pyf') if dep_util.newer_group(pyf_sources,mod_file): generate_interface(mod_name,sources[0],mod_file, skip_names.get(mod_name,[])) sources = filter(lambda s:s[-4:]!='.pyf',sources) ext_args = {'name':dot_join(parent_package,package,mod_name), 'sources':[mod_file]+sources} dict_append(ext_args,**atlas_info) ext = Extension(**ext_args) ext.need_fcompiler_opts = 1 config['ext_modules'].append(ext) flinalg = [] for f in ['det.f','lu.f', #'wrappers.c','inv.f', ]: flinalg.append(os.path.join(local_path,'src',f)) ext_args = {'name':dot_join(parent_package,package,'_flinalg'), 'sources':flinalg} dict_append(ext_args,**atlas_info) config['ext_modules'].append(Extension(**ext_args)) ext_args = {'name':dot_join(parent_package,package,'calc_lwork'), 'sources':[os.path.join(local_path,'src','calc_lwork.f')], } dict_append(ext_args,**atlas_info) config['ext_modules'].append(Extension(**ext_args)) config['fortran_libraries'].extend(f_libs) return config
b1e1ccbf85736bc67a87e9364025451a1d59dadd /local1/tlutelli/issta_data/temp/all_python//python/2006_temp/2006/12971/b1e1ccbf85736bc67a87e9364025451a1d59dadd/setup_linalg.py
sys.args.insert(0,'scipy_core')
sys.argv.insert(0,'scipy_core')
def get_package_config(name): sys.path.insert(0,os.path.join('scipy_core',name)) try: mod = __import__('setup_'+name) config = mod.configuration() finally: del sys.path[0] return config
b1e1ccbf85736bc67a87e9364025451a1d59dadd /local1/tlutelli/issta_data/temp/all_python//python/2006_temp/2006/12971/b1e1ccbf85736bc67a87e9364025451a1d59dadd/setup_linalg.py
from scipy.special import binomcdf, binomcdfc, binomcdfinv, betacdf, betaq, fcdf, \ fcdfc, fp, gammacdf, gammacdfc, gammaq, negbinomcdf, negbinomcdfinv, \ possioncdf, poissioncdfc, possioncdfinv, studentcdf, studentq, \ chi2cdf, chi2cdfc, chi2p, normalcdf, normalq, smirnovcdfc, smirnovp, \ kolmogorovcdfc, kolmogorovp
def friedmanchisquare(*args): """
b5687aa43d7494456b3a512e58b25567d07f7305 /local1/tlutelli/issta_data/temp/all_python//python/2006_temp/2006/12971/b5687aa43d7494456b3a512e58b25567d07f7305/stats.py
fcdfc, fp, gammacdf, gammacdfc, gammaq, negbinomcdf, negbinomcdfinv, \ possioncdf, poissioncdfc, possioncdfinv, studentcdf, studentq, \ chi2cdf, chi2cdfc, chi2p, normalcdf, normalq, smirnovcdfc, smirnovp, \ kolmogorovcdfc, kolmogorovp
fcdfc, fp, gammacdf, gammacdfc, gammaq, negbinomcdf, negbinomcdfinv from scipy.special import poissoncdf, poissoncdfc, poissoncdfinv, studentcdf, \ studentq, chi2cdf, chi2cdfc, chi2p, normalcdf, normalq, smirnovcdfc from scipy.special import smirnovp, kolmogorovcdfc, kolmogorovp
def friedmanchisquare(*args): """
576e0a3361df2e89cd086a66b330cb03edf197ae /local1/tlutelli/issta_data/temp/all_python//python/2006_temp/2006/12971/576e0a3361df2e89cd086a66b330cb03edf197ae/stats.py
def __del__(self):
b7aff2a07a384ad500a3cb77f124eacba4d7ec26 /local1/tlutelli/issta_data/temp/all_python//python/2006_temp/2006/12971/b7aff2a07a384ad500a3cb77f124eacba4d7ec26/cox.py
lin = 1. + b * X
lin = 1 + b*X
def information(self, b, ties='breslow'):
b7aff2a07a384ad500a3cb77f124eacba4d7ec26 /local1/tlutelli/issta_data/temp/all_python//python/2006_temp/2006/12971/b7aff2a07a384ad500a3cb77f124eacba4d7ec26/cox.py
maxfun=None, full_output=0, disp=1, retall=0, callback=None):
maxfun=None, full_output=0, disp=1, retall=0, callback=None, direc=None):
def fmin_powell(func, x0, args=(), xtol=1e-4, ftol=1e-4, maxiter=None, maxfun=None, full_output=0, disp=1, retall=0, callback=None): """Minimize a function using modified Powell's method. Description: Uses a modification of Powell's method to find the minimum of a function of N variables Inputs: func -- the Python function or method to be minimized. x0 -- the initial guess. args -- extra arguments for func. callback -- an optional user-supplied function to call after each iteration. It is called as callback(xk), where xk is the current parameter vector. Outputs: (xopt, {fopt, xi, direc, iter, funcalls, warnflag}, {allvecs}) xopt -- minimizer of function fopt -- value of function at minimum: fopt = func(xopt) direc -- current direction set iter -- number of iterations funcalls -- number of function calls warnflag -- Integer warning flag: 1 : 'Maximum number of function evaluations.' 2 : 'Maximum number of iterations.' allvecs -- a list of solutions at each iteration Additional Inputs: xtol -- line-search error tolerance. ftol -- acceptable relative error in func(xopt) for convergence. maxiter -- the maximum number of iterations to perform. maxfun -- the maximum number of function evaluations. full_output -- non-zero if fval and warnflag outputs are desired. disp -- non-zero to print convergence messages. retall -- non-zero to return a list of the solution at each iteration See also: fmin, fmin_powell, fmin_cg, fmin_bfgs, fmin_ncg -- multivariate local optimizers leastsq -- nonlinear least squares minimizer fmin_l_bfgs_b, fmin_tnc, fmin_cobyla -- constrained multivariate optimizers anneal, brute -- global optimizers fminbound, brent, golden, bracket -- local scalar minimizers fsolve -- n-dimenstional root-finding brentq, brenth, ridder, bisect, newton -- one-dimensional root-finding fixed_point -- scalar fixed-point finder """ # we need to use a mutable object here that we can update in the # wrapper function fcalls, func = wrap_function(func, args) x = asarray(x0) if retall: allvecs = [x] N = len(x) rank = len(x.shape) if not -1 < rank < 2: raise ValueError, "Initial guess must be a scalar or rank-1 sequence." if maxiter is None: maxiter = N * 1000 if maxfun is None: maxfun = N * 1000 direc = eye(N,dtype=float) fval = squeeze(func(x)) x1 = x.copy() iter = 0; ilist = range(N) while True: fx = fval bigind = 0 delta = 0.0 for i in ilist: direc1 = direc[i] fx2 = fval fval, x, direc1 = _linesearch_powell(func, x, direc1, tol=xtol*100) if (fx2 - fval) > delta: delta = fx2 - fval bigind = i iter += 1 if callback is not None: callback(x) if retall: allvecs.append(x) if (2.0*(fx - fval) <= ftol*(abs(fx)+abs(fval))+1e-20): break if fcalls[0] >= maxfun: break if iter >= maxiter: break # Construct the extrapolated point direc1 = x - x1 x2 = 2*x - x1 x1 = x.copy() fx2 = squeeze(func(x2)) if (fx > fx2): t = 2.0*(fx+fx2-2.0*fval) temp = (fx-fval-delta) t *= temp*temp temp = fx-fx2 t -= delta*temp*temp if t < 0.0: fval, x, direc1 = _linesearch_powell(func, x, direc1, tol=xtol*100) direc[bigind] = direc[-1] direc[-1] = direc1 warnflag = 0 if fcalls[0] >= maxfun: warnflag = 1 if disp: print "Warning: Maximum number of function evaluations has "\ "been exceeded." elif iter >= maxiter: warnflag = 2 if disp: print "Warning: Maximum number of iterations has been exceeded" else: if disp: print "Optimization terminated successfully." print " Current function value: %f" % fval print " Iterations: %d" % iter print " Function evaluations: %d" % fcalls[0] x = squeeze(x) if full_output: retlist = x, fval, direc, iter, fcalls[0], warnflag if retall: retlist += (allvecs,) else: retlist = x if retall: retlist = (x, allvecs) return retlist
8974d1f4da239d0c491cb9e9b4582fc60c0f37d4 /local1/tlutelli/issta_data/temp/all_python//python/2006_temp/2006/12971/8974d1f4da239d0c491cb9e9b4582fc60c0f37d4/optimize.py
direc = eye(N,dtype=float)
if direc is None: direc = eye(N, dtype=float) else: direc = asarray(direc, dtype=float)
def fmin_powell(func, x0, args=(), xtol=1e-4, ftol=1e-4, maxiter=None, maxfun=None, full_output=0, disp=1, retall=0, callback=None): """Minimize a function using modified Powell's method. Description: Uses a modification of Powell's method to find the minimum of a function of N variables Inputs: func -- the Python function or method to be minimized. x0 -- the initial guess. args -- extra arguments for func. callback -- an optional user-supplied function to call after each iteration. It is called as callback(xk), where xk is the current parameter vector. Outputs: (xopt, {fopt, xi, direc, iter, funcalls, warnflag}, {allvecs}) xopt -- minimizer of function fopt -- value of function at minimum: fopt = func(xopt) direc -- current direction set iter -- number of iterations funcalls -- number of function calls warnflag -- Integer warning flag: 1 : 'Maximum number of function evaluations.' 2 : 'Maximum number of iterations.' allvecs -- a list of solutions at each iteration Additional Inputs: xtol -- line-search error tolerance. ftol -- acceptable relative error in func(xopt) for convergence. maxiter -- the maximum number of iterations to perform. maxfun -- the maximum number of function evaluations. full_output -- non-zero if fval and warnflag outputs are desired. disp -- non-zero to print convergence messages. retall -- non-zero to return a list of the solution at each iteration See also: fmin, fmin_powell, fmin_cg, fmin_bfgs, fmin_ncg -- multivariate local optimizers leastsq -- nonlinear least squares minimizer fmin_l_bfgs_b, fmin_tnc, fmin_cobyla -- constrained multivariate optimizers anneal, brute -- global optimizers fminbound, brent, golden, bracket -- local scalar minimizers fsolve -- n-dimenstional root-finding brentq, brenth, ridder, bisect, newton -- one-dimensional root-finding fixed_point -- scalar fixed-point finder """ # we need to use a mutable object here that we can update in the # wrapper function fcalls, func = wrap_function(func, args) x = asarray(x0) if retall: allvecs = [x] N = len(x) rank = len(x.shape) if not -1 < rank < 2: raise ValueError, "Initial guess must be a scalar or rank-1 sequence." if maxiter is None: maxiter = N * 1000 if maxfun is None: maxfun = N * 1000 direc = eye(N,dtype=float) fval = squeeze(func(x)) x1 = x.copy() iter = 0; ilist = range(N) while True: fx = fval bigind = 0 delta = 0.0 for i in ilist: direc1 = direc[i] fx2 = fval fval, x, direc1 = _linesearch_powell(func, x, direc1, tol=xtol*100) if (fx2 - fval) > delta: delta = fx2 - fval bigind = i iter += 1 if callback is not None: callback(x) if retall: allvecs.append(x) if (2.0*(fx - fval) <= ftol*(abs(fx)+abs(fval))+1e-20): break if fcalls[0] >= maxfun: break if iter >= maxiter: break # Construct the extrapolated point direc1 = x - x1 x2 = 2*x - x1 x1 = x.copy() fx2 = squeeze(func(x2)) if (fx > fx2): t = 2.0*(fx+fx2-2.0*fval) temp = (fx-fval-delta) t *= temp*temp temp = fx-fx2 t -= delta*temp*temp if t < 0.0: fval, x, direc1 = _linesearch_powell(func, x, direc1, tol=xtol*100) direc[bigind] = direc[-1] direc[-1] = direc1 warnflag = 0 if fcalls[0] >= maxfun: warnflag = 1 if disp: print "Warning: Maximum number of function evaluations has "\ "been exceeded." elif iter >= maxiter: warnflag = 2 if disp: print "Warning: Maximum number of iterations has been exceeded" else: if disp: print "Optimization terminated successfully." print " Current function value: %f" % fval print " Iterations: %d" % iter print " Function evaluations: %d" % fcalls[0] x = squeeze(x) if full_output: retlist = x, fval, direc, iter, fcalls[0], warnflag if retall: retlist += (allvecs,) else: retlist = x if retall: retlist = (x, allvecs) return retlist
8974d1f4da239d0c491cb9e9b4582fc60c0f37d4 /local1/tlutelli/issta_data/temp/all_python//python/2006_temp/2006/12971/8974d1f4da239d0c491cb9e9b4582fc60c0f37d4/optimize.py
print "Gegenbauer, a = ", a
def check_gegenbauer(self): a = 5*rand()-0.5 if any(a==0): a = -0.2 print "Gegenbauer, a = ", a Ca0 = gegenbauer(0,a) Ca1 = gegenbauer(1,a) Ca2 = gegenbauer(2,a) Ca3 = gegenbauer(3,a) Ca4 = gegenbauer(4,a) Ca5 = gegenbauer(5,a)
c578408812d8d2503357dbbbd47520ad7539cee1 /local1/tlutelli/issta_data/temp/all_python//python/2006_temp/2006/12971/c578408812d8d2503357dbbbd47520ad7539cee1/test_basic.py
jc = jv(0,.1) assert_almost_equal(jc,0.99750156206604002,8)
values = [[0, 0.1, 0.99750156206604002], [2./3, 1e-8, 0.3239028506761532e-5], [2./3, 1e-10, 0.1503423854873779e-6], [3.1, 1e-10, 0.1711956265409013e-32], [2./3, 4.0, -0.2325440850267039], ] for i, (v, x, y) in enumerate(values): yc = jv(v, x) assert_almost_equal(yc, y, 8, err_msg='test
def check_jv(self): jc = jv(0,.1) assert_almost_equal(jc,0.99750156206604002,8)
c578408812d8d2503357dbbbd47520ad7539cee1 /local1/tlutelli/issta_data/temp/all_python//python/2006_temp/2006/12971/c578408812d8d2503357dbbbd47520ad7539cee1/test_basic.py
maxnfeval : max. number of function evaluation
maxfun : max. number of function evaluation
def fmin_tnc(func, x0, fprime=None, args=(), approx_grad=False, bounds=None, epsilon=1e-8, scale=None, messages=MSG_ALL, maxCGit=-1, maxfun=None, eta=-1, stepmx=0, accuracy=0, fmin=0, ftol=0, rescale=-1): """Minimize a function with variables subject to bounds, using gradient information. returns (rc, nfeval, x). Inputs: func -- function to minimize. Called as func(x, *args) x0 -- initial guess to minimum fprime -- gradient of func. If None, then func returns the function value and the gradient ( f, g = func(x, *args) ). Called as fprime(x, *args) args -- arguments to pass to function approx_grad -- if true, approximate the gradient numerically bounds -- a list of (min, max) pairs for each element in x, defining the bounds on that parameter. Use None for one of min or max when there is no bound in that direction scale : scaling factors to apply to each variable (a list of floats) if None, the factors are up-low for interval bounded variables and 1+|x] fo the others. defaults to None messages : bit mask used to select messages display during minimization values defined in the optimize.tnc.MSGS dict. defaults to optimize.tnc.MGS_ALL maxCGit : max. number of hessian*vector evaluation per main iteration if maxCGit == 0, the direction chosen is -gradient if maxCGit < 0, maxCGit is set to max(1,min(50,n/2)) defaults to -1 maxnfeval : max. number of function evaluation if None, maxnfeval is set to max(1000, 100*len(x0)) defaults to None eta : severity of the line search. if < 0 or > 1, set to 0.25 defaults to -1 stepmx : maximum step for the line search. may be increased during call if too small, will be set to 10.0 defaults to 0 accuracy : relative precision for finite difference calculations if <= machine_precision, set to sqrt(machine_precision) defaults to 0 fmin : minimum function value estimate defaults to 0 ftol : precision goal for the value of f in the stoping criterion relative to the machine precision and the value of f. if ftol < 0.0, ftol is set to 0.0 defaults to 0 rescale : Scaling factor (in log10) used to trigger rescaling if 0, rescale at each iteration if a large value, never rescale if < 0, rescale is set to 1.3 Outputs: x : the solution (a list of floats) nfeval : the number of function evaluations rc : return code (corresponding message in optimize.tnc.RCSTRINGS) """ n = len(x0) if bounds is None: bounds = [(None,None)] * n if len(bounds) != n: raise ValueError('length of x0 != length of bounds') if approx_grad: def func_and_grad(x): x = asarray(x) f = func(x, *args) g = approx_fprime(x, func, epsilon, *args) return f, list(g) elif fprime is None: def func_and_grad(x): x = asarray(x) f, g = func(x, *args) return f, list(g) else: def func_and_grad(x): x = asarray(x) f = func(x, *args) g = fprime(x, *args) return f, list(g) low = [0]*n up = [0]*n for i in range(n): l,u = bounds[i] if l is None: low[i] = -HUGE_VAL else: low[i] = l if u is None: up[i] = HUGE_VAL else: up[i] = l if scale == None: scale = [] if maxfun == None: maxfun = max(1000, 100*len(x0)) return moduleTNC.minimize(func_and_grad, x0, low, up, scale, messages, maxCGit, maxfun, eta, stepmx, accuracy, fmin, ftol, rescale)
3138646cae18d97c5f5303e1ff1e0679b7fa2422 /local1/tlutelli/issta_data/temp/all_python//python/2006_temp/2006/12971/3138646cae18d97c5f5303e1ff1e0679b7fa2422/tnc.py
up[i] = l
up[i] = u
def func_and_grad(x): x = asarray(x) f = func(x, *args) g = fprime(x, *args) return f, list(g)
3138646cae18d97c5f5303e1ff1e0679b7fa2422 /local1/tlutelli/issta_data/temp/all_python//python/2006_temp/2006/12971/3138646cae18d97c5f5303e1ff1e0679b7fa2422/tnc.py
rc, nf, x = minimize(function, [-7, 3], bounds=([-10, 10], [1, 10]))
rc, nf, x = fmin_tnc(function, [-7, 3], bounds=([-10, 10], [1, 10]))
def function(x): f = pow(x[0],2.0)+pow(abs(x[1]),3.0) g = [0,0] g[0] = 2.0*x[0] g[1] = 3.0*pow(abs(x[1]),2.0) if x[1]<0: g[1] = -g[1] return f, g
3138646cae18d97c5f5303e1ff1e0679b7fa2422 /local1/tlutelli/issta_data/temp/all_python//python/2006_temp/2006/12971/3138646cae18d97c5f5303e1ff1e0679b7fa2422/tnc.py
rc, nf, x = minimize(fg, x, bounds=bounds, messages = MSG_NONE, maxnfeval = 200)
rc, nf, x = fmin_tnc(fg, x, bounds=bounds, messages = MSG_NONE, maxnfeval = 200)
def test(fg, x, bounds, xopt): print "** Test", fg.__name__ rc, nf, x = minimize(fg, x, bounds=bounds, messages = MSG_NONE, maxnfeval = 200) print "After", nf, "function evaluations, TNC returned:", RCSTRINGS[rc] print "x =", x print "exact value =", xopt enorm = 0.0 norm = 1.0 for y,yo in zip(x, xopt): enorm += (y-yo)*(y-yo) norm += yo*yo e = pow(enorm/norm, 0.5) print "Error =", e if e > 1e-8: raise "Test "+fg.__name__+" failed"
3138646cae18d97c5f5303e1ff1e0679b7fa2422 /local1/tlutelli/issta_data/temp/all_python//python/2006_temp/2006/12971/3138646cae18d97c5f5303e1ff1e0679b7fa2422/tnc.py
assert isinstance(ij, ArrayType) and (rank(ij) == 2) and (shape(ij) == (len(s), 2))
assert isinstance(ij, ArrayType) and (rank(ij) == 2) \ and (shape(ij) == (2, len(s)))
def __init__(self, arg1, dims=None, nzmax=NZMAX, dtype=None, copy=False): spmatrix.__init__(self) if isdense(arg1): self.dtype = getdtype(dtype, arg1) # Convert the dense array or matrix arg1 to CSC format if rank(arg1) == 1: # Convert to a row vector arg1 = arg1.reshape(1, arg1.shape[0]) if rank(arg1) == 2: #s = asarray(arg1) s = arg1 if s.dtype.char not in 'fdFD': # Use a double array as the source (but leave it alone) s = s*1.0 if (rank(s) == 2): M, N = s.shape dtype = s.dtype func = getattr(sparsetools, _transtabl[dtype.char]+'fulltocsc') ierr = irow = jcol = 0 nnz = (s != 0.0).sum() a = zeros((nnz,), self.dtype) rowa = zeros((nnz,), intc) ptra = zeros((N+1,), intc) while 1: a, rowa, ptra, irow, jcol, ierr = \ func(s, a, rowa, ptra, irow, jcol, ierr) if (ierr == 0): break nnz = nnz + ALLOCSIZE a = resize1d(a, nnz) rowa = resize1d(rowa, nnz) self.data = a self.rowind = rowa self.indptr = ptra self.shape = (M, N) else: raise ValueError, "dense array must have rank 1 or 2" elif isspmatrix(arg1): s = arg1 self.dtype = getdtype(dtype, s) if isinstance(s, csc_matrix): # do nothing but copy information self.shape = s.shape if copy: self.data = s.data.copy() self.rowind = s.rowind.copy() self.indptr = s.indptr.copy() else: self.data = s.data self.rowind = s.rowind self.indptr = s.indptr elif isinstance(s, csr_matrix): self.shape = s.shape func = getattr(sparsetools, s.ftype+'transp') self.data, self.rowind, self.indptr = \ func(s.shape[1], s.data, s.colind, s.indptr) else: temp = s.tocsc() self.data = temp.data self.rowind = temp.rowind self.indptr = temp.indptr self.shape = temp.shape elif type(arg1) == tuple: if isshape(arg1): self.dtype = getdtype(dtype, default=float) # It's a tuple of matrix dimensions (M, N) M, N = arg1 self.data = zeros((nzmax,), self.dtype) self.rowind = zeros((nzmax,), intc) self.indptr = zeros((N+1,), intc) self.shape = (M, N) else: try: # Try interpreting it as (data, ij) (s, ij) = arg1 assert isinstance(ij, ArrayType) and (rank(ij) == 2) and (shape(ij) == (len(s), 2)) except (AssertionError, TypeError, ValueError): try: # Try interpreting it as (data, rowind, indptr) (s, rowind, indptr) = arg1 self.dtype = getdtype(dtype, s) if copy: self.data = array(s) self.rowind = array(rowind) self.indptr = array(indptr) else: self.data = asarray(s) self.rowind = asarray(rowind) self.indptr = asarray(indptr) except: raise ValueError, "unrecognized form for csc_matrix constructor" else: # (data, ij) format self.dtype = getdtype(dtype, s) temp = coo_matrix((s, ij), dims=dims, dtype=dtype).tocsc() self.shape = temp.shape self.data = temp.data self.rowind = temp.rowind self.indptr = temp.indptr else: raise ValueError, "unrecognized form for csc_matrix constructor"
d5ad978cd0a938588df0380720c9a573220ef0f4 /local1/tlutelli/issta_data/temp/all_python//python/2006_temp/2006/12971/d5ad978cd0a938588df0380720c9a573220ef0f4/sparse.py
temp = coo_matrix((s, ij), dims=dims, dtype=dtype).tocsc()
ijnew = array(ij, copy=copy) temp = coo_matrix((s, ijnew), dims=dims, \ dtype=self.dtype).tocsc()
def __init__(self, arg1, dims=None, nzmax=NZMAX, dtype=None, copy=False): spmatrix.__init__(self) if isdense(arg1): self.dtype = getdtype(dtype, arg1) # Convert the dense array or matrix arg1 to CSC format if rank(arg1) == 1: # Convert to a row vector arg1 = arg1.reshape(1, arg1.shape[0]) if rank(arg1) == 2: #s = asarray(arg1) s = arg1 if s.dtype.char not in 'fdFD': # Use a double array as the source (but leave it alone) s = s*1.0 if (rank(s) == 2): M, N = s.shape dtype = s.dtype func = getattr(sparsetools, _transtabl[dtype.char]+'fulltocsc') ierr = irow = jcol = 0 nnz = (s != 0.0).sum() a = zeros((nnz,), self.dtype) rowa = zeros((nnz,), intc) ptra = zeros((N+1,), intc) while 1: a, rowa, ptra, irow, jcol, ierr = \ func(s, a, rowa, ptra, irow, jcol, ierr) if (ierr == 0): break nnz = nnz + ALLOCSIZE a = resize1d(a, nnz) rowa = resize1d(rowa, nnz) self.data = a self.rowind = rowa self.indptr = ptra self.shape = (M, N) else: raise ValueError, "dense array must have rank 1 or 2" elif isspmatrix(arg1): s = arg1 self.dtype = getdtype(dtype, s) if isinstance(s, csc_matrix): # do nothing but copy information self.shape = s.shape if copy: self.data = s.data.copy() self.rowind = s.rowind.copy() self.indptr = s.indptr.copy() else: self.data = s.data self.rowind = s.rowind self.indptr = s.indptr elif isinstance(s, csr_matrix): self.shape = s.shape func = getattr(sparsetools, s.ftype+'transp') self.data, self.rowind, self.indptr = \ func(s.shape[1], s.data, s.colind, s.indptr) else: temp = s.tocsc() self.data = temp.data self.rowind = temp.rowind self.indptr = temp.indptr self.shape = temp.shape elif type(arg1) == tuple: if isshape(arg1): self.dtype = getdtype(dtype, default=float) # It's a tuple of matrix dimensions (M, N) M, N = arg1 self.data = zeros((nzmax,), self.dtype) self.rowind = zeros((nzmax,), intc) self.indptr = zeros((N+1,), intc) self.shape = (M, N) else: try: # Try interpreting it as (data, ij) (s, ij) = arg1 assert isinstance(ij, ArrayType) and (rank(ij) == 2) and (shape(ij) == (len(s), 2)) except (AssertionError, TypeError, ValueError): try: # Try interpreting it as (data, rowind, indptr) (s, rowind, indptr) = arg1 self.dtype = getdtype(dtype, s) if copy: self.data = array(s) self.rowind = array(rowind) self.indptr = array(indptr) else: self.data = asarray(s) self.rowind = asarray(rowind) self.indptr = asarray(indptr) except: raise ValueError, "unrecognized form for csc_matrix constructor" else: # (data, ij) format self.dtype = getdtype(dtype, s) temp = coo_matrix((s, ij), dims=dims, dtype=dtype).tocsc() self.shape = temp.shape self.data = temp.data self.rowind = temp.rowind self.indptr = temp.indptr else: raise ValueError, "unrecognized form for csc_matrix constructor"
d5ad978cd0a938588df0380720c9a573220ef0f4 /local1/tlutelli/issta_data/temp/all_python//python/2006_temp/2006/12971/d5ad978cd0a938588df0380720c9a573220ef0f4/sparse.py
a[ij[k, 0], ij[k, 1]] = data[k]
a[ij[0, k], ij[1, k]] = data[k]
def copy(self): new = csc_matrix(self.shape, nzmax=self.nzmax, dtype=self.dtype) new.data = self.data.copy() new.rowind = self.rowind.copy() new.indptr = self.indptr.copy() new._check() return new
d5ad978cd0a938588df0380720c9a573220ef0f4 /local1/tlutelli/issta_data/temp/all_python//python/2006_temp/2006/12971/d5ad978cd0a938588df0380720c9a573220ef0f4/sparse.py
assert isinstance(ij, ArrayType) and (rank(ij) == 2) and (shape(ij) == (len(s), 2))
assert isinstance(ij, ArrayType) and (rank(ij) == 2) \ and (shape(ij) == (2, len(s)))
def __init__(self, arg1, dims=None, nzmax=NZMAX, dtype=None, copy=False): spmatrix.__init__(self) if isdense(arg1): self.dtype = getdtype(dtype, arg1) # Convert the dense array or matrix arg1 to CSR format if rank(arg1) == 1: # Convert to a row vector arg1 = arg1.reshape(1, arg1.shape[0]) if rank(arg1) == 2: s = arg1 ocsc = csc_matrix(transpose(s)) self.colind = ocsc.rowind self.indptr = ocsc.indptr self.data = ocsc.data self.shape = (ocsc.shape[1], ocsc.shape[0]) else: raise ValueError, "dense array must have rank 1 or 2" elif isspmatrix(arg1): s = arg1 self.dtype = getdtype(dtype, s) if isinstance(s, csr_matrix): # do nothing but copy information self.shape = s.shape if copy: self.data = s.data.copy() self.colind = s.colind.copy() self.indptr = s.indptr.copy() else: self.data = s.data self.colind = s.colind self.indptr = s.indptr elif isinstance(s, csc_matrix): self.shape = s.shape func = getattr(sparsetools, s.ftype+'transp') self.data, self.colind, self.indptr = \ func(s.shape[1], s.data, s.rowind, s.indptr) else: try: temp = s.tocsr() except AttributeError: temp = csr_matrix(s.tocsc()) self.data = temp.data self.colind = temp.colind self.indptr = temp.indptr self.shape = temp.shape elif type(arg1) == tuple: if isshape(arg1): # It's a tuple of matrix dimensions (M, N) M, N = arg1 self.dtype = getdtype(dtype, default=float) self.data = zeros((nzmax,), self.dtype) self.colind = zeros((nzmax,), intc) self.indptr = zeros((M+1,), intc) self.shape = (M, N) else: try: # Try interpreting it as (data, ij) (s, ij) = arg1 assert isinstance(ij, ArrayType) and (rank(ij) == 2) and (shape(ij) == (len(s), 2)) except (AssertionError, TypeError, ValueError, AttributeError): try: # Try interpreting it as (data, colind, indptr) (s, colind, indptr) = arg1 self.dtype = getdtype(dtype, s) if copy: self.data = array(s, dtype=self.dtype) self.colind = array(colind) self.indptr = array(indptr) else: self.data = asarray(s, dtype=self.dtype) self.colind = asarray(colind) self.indptr = asarray(indptr) except: raise ValueError, "unrecognized form for csr_matrix constructor" else: # (data, ij) format ijnew = ij.copy() ijnew[:, 0] = ij[:, 1] ijnew[:, 1] = ij[:, 0] temp = coo_matrix((s, ijnew), dims=dims, dtype=dtype).tocsr() self.shape = temp.shape self.data = temp.data self.colind = temp.colind self.indptr = temp.indptr self.dtype = temp.dtype else: raise ValueError, "unrecognized form for csr_matrix constructor"
d5ad978cd0a938588df0380720c9a573220ef0f4 /local1/tlutelli/issta_data/temp/all_python//python/2006_temp/2006/12971/d5ad978cd0a938588df0380720c9a573220ef0f4/sparse.py
except: raise ValueError, "unrecognized form for csr_matrix constructor"
def __init__(self, arg1, dims=None, nzmax=NZMAX, dtype=None, copy=False): spmatrix.__init__(self) if isdense(arg1): self.dtype = getdtype(dtype, arg1) # Convert the dense array or matrix arg1 to CSR format if rank(arg1) == 1: # Convert to a row vector arg1 = arg1.reshape(1, arg1.shape[0]) if rank(arg1) == 2: s = arg1 ocsc = csc_matrix(transpose(s)) self.colind = ocsc.rowind self.indptr = ocsc.indptr self.data = ocsc.data self.shape = (ocsc.shape[1], ocsc.shape[0]) else: raise ValueError, "dense array must have rank 1 or 2" elif isspmatrix(arg1): s = arg1 self.dtype = getdtype(dtype, s) if isinstance(s, csr_matrix): # do nothing but copy information self.shape = s.shape if copy: self.data = s.data.copy() self.colind = s.colind.copy() self.indptr = s.indptr.copy() else: self.data = s.data self.colind = s.colind self.indptr = s.indptr elif isinstance(s, csc_matrix): self.shape = s.shape func = getattr(sparsetools, s.ftype+'transp') self.data, self.colind, self.indptr = \ func(s.shape[1], s.data, s.rowind, s.indptr) else: try: temp = s.tocsr() except AttributeError: temp = csr_matrix(s.tocsc()) self.data = temp.data self.colind = temp.colind self.indptr = temp.indptr self.shape = temp.shape elif type(arg1) == tuple: if isshape(arg1): # It's a tuple of matrix dimensions (M, N) M, N = arg1 self.dtype = getdtype(dtype, default=float) self.data = zeros((nzmax,), self.dtype) self.colind = zeros((nzmax,), intc) self.indptr = zeros((M+1,), intc) self.shape = (M, N) else: try: # Try interpreting it as (data, ij) (s, ij) = arg1 assert isinstance(ij, ArrayType) and (rank(ij) == 2) and (shape(ij) == (len(s), 2)) except (AssertionError, TypeError, ValueError, AttributeError): try: # Try interpreting it as (data, colind, indptr) (s, colind, indptr) = arg1 self.dtype = getdtype(dtype, s) if copy: self.data = array(s, dtype=self.dtype) self.colind = array(colind) self.indptr = array(indptr) else: self.data = asarray(s, dtype=self.dtype) self.colind = asarray(colind) self.indptr = asarray(indptr) except: raise ValueError, "unrecognized form for csr_matrix constructor" else: # (data, ij) format ijnew = ij.copy() ijnew[:, 0] = ij[:, 1] ijnew[:, 1] = ij[:, 0] temp = coo_matrix((s, ijnew), dims=dims, dtype=dtype).tocsr() self.shape = temp.shape self.data = temp.data self.colind = temp.colind self.indptr = temp.indptr self.dtype = temp.dtype else: raise ValueError, "unrecognized form for csr_matrix constructor"
d5ad978cd0a938588df0380720c9a573220ef0f4 /local1/tlutelli/issta_data/temp/all_python//python/2006_temp/2006/12971/d5ad978cd0a938588df0380720c9a573220ef0f4/sparse.py
ijnew = ij.copy() ijnew[:, 0] = ij[:, 1] ijnew[:, 1] = ij[:, 0] temp = coo_matrix((s, ijnew), dims=dims, dtype=dtype).tocsr()
self.dtype = getdtype(dtype, s) ijnew = array([ij[1], ij[0]], copy=copy) temp = coo_matrix((s, ijnew), dims=dims, \ dtype=self.dtype).tocsr()
def __init__(self, arg1, dims=None, nzmax=NZMAX, dtype=None, copy=False): spmatrix.__init__(self) if isdense(arg1): self.dtype = getdtype(dtype, arg1) # Convert the dense array or matrix arg1 to CSR format if rank(arg1) == 1: # Convert to a row vector arg1 = arg1.reshape(1, arg1.shape[0]) if rank(arg1) == 2: s = arg1 ocsc = csc_matrix(transpose(s)) self.colind = ocsc.rowind self.indptr = ocsc.indptr self.data = ocsc.data self.shape = (ocsc.shape[1], ocsc.shape[0]) else: raise ValueError, "dense array must have rank 1 or 2" elif isspmatrix(arg1): s = arg1 self.dtype = getdtype(dtype, s) if isinstance(s, csr_matrix): # do nothing but copy information self.shape = s.shape if copy: self.data = s.data.copy() self.colind = s.colind.copy() self.indptr = s.indptr.copy() else: self.data = s.data self.colind = s.colind self.indptr = s.indptr elif isinstance(s, csc_matrix): self.shape = s.shape func = getattr(sparsetools, s.ftype+'transp') self.data, self.colind, self.indptr = \ func(s.shape[1], s.data, s.rowind, s.indptr) else: try: temp = s.tocsr() except AttributeError: temp = csr_matrix(s.tocsc()) self.data = temp.data self.colind = temp.colind self.indptr = temp.indptr self.shape = temp.shape elif type(arg1) == tuple: if isshape(arg1): # It's a tuple of matrix dimensions (M, N) M, N = arg1 self.dtype = getdtype(dtype, default=float) self.data = zeros((nzmax,), self.dtype) self.colind = zeros((nzmax,), intc) self.indptr = zeros((M+1,), intc) self.shape = (M, N) else: try: # Try interpreting it as (data, ij) (s, ij) = arg1 assert isinstance(ij, ArrayType) and (rank(ij) == 2) and (shape(ij) == (len(s), 2)) except (AssertionError, TypeError, ValueError, AttributeError): try: # Try interpreting it as (data, colind, indptr) (s, colind, indptr) = arg1 self.dtype = getdtype(dtype, s) if copy: self.data = array(s, dtype=self.dtype) self.colind = array(colind) self.indptr = array(indptr) else: self.data = asarray(s, dtype=self.dtype) self.colind = asarray(colind) self.indptr = asarray(indptr) except: raise ValueError, "unrecognized form for csr_matrix constructor" else: # (data, ij) format ijnew = ij.copy() ijnew[:, 0] = ij[:, 1] ijnew[:, 1] = ij[:, 0] temp = coo_matrix((s, ijnew), dims=dims, dtype=dtype).tocsr() self.shape = temp.shape self.data = temp.data self.colind = temp.colind self.indptr = temp.indptr self.dtype = temp.dtype else: raise ValueError, "unrecognized form for csr_matrix constructor"
d5ad978cd0a938588df0380720c9a573220ef0f4 /local1/tlutelli/issta_data/temp/all_python//python/2006_temp/2006/12971/d5ad978cd0a938588df0380720c9a573220ef0f4/sparse.py
self.dtype = temp.dtype
def __init__(self, arg1, dims=None, nzmax=NZMAX, dtype=None, copy=False): spmatrix.__init__(self) if isdense(arg1): self.dtype = getdtype(dtype, arg1) # Convert the dense array or matrix arg1 to CSR format if rank(arg1) == 1: # Convert to a row vector arg1 = arg1.reshape(1, arg1.shape[0]) if rank(arg1) == 2: s = arg1 ocsc = csc_matrix(transpose(s)) self.colind = ocsc.rowind self.indptr = ocsc.indptr self.data = ocsc.data self.shape = (ocsc.shape[1], ocsc.shape[0]) else: raise ValueError, "dense array must have rank 1 or 2" elif isspmatrix(arg1): s = arg1 self.dtype = getdtype(dtype, s) if isinstance(s, csr_matrix): # do nothing but copy information self.shape = s.shape if copy: self.data = s.data.copy() self.colind = s.colind.copy() self.indptr = s.indptr.copy() else: self.data = s.data self.colind = s.colind self.indptr = s.indptr elif isinstance(s, csc_matrix): self.shape = s.shape func = getattr(sparsetools, s.ftype+'transp') self.data, self.colind, self.indptr = \ func(s.shape[1], s.data, s.rowind, s.indptr) else: try: temp = s.tocsr() except AttributeError: temp = csr_matrix(s.tocsc()) self.data = temp.data self.colind = temp.colind self.indptr = temp.indptr self.shape = temp.shape elif type(arg1) == tuple: if isshape(arg1): # It's a tuple of matrix dimensions (M, N) M, N = arg1 self.dtype = getdtype(dtype, default=float) self.data = zeros((nzmax,), self.dtype) self.colind = zeros((nzmax,), intc) self.indptr = zeros((M+1,), intc) self.shape = (M, N) else: try: # Try interpreting it as (data, ij) (s, ij) = arg1 assert isinstance(ij, ArrayType) and (rank(ij) == 2) and (shape(ij) == (len(s), 2)) except (AssertionError, TypeError, ValueError, AttributeError): try: # Try interpreting it as (data, colind, indptr) (s, colind, indptr) = arg1 self.dtype = getdtype(dtype, s) if copy: self.data = array(s, dtype=self.dtype) self.colind = array(colind) self.indptr = array(indptr) else: self.data = asarray(s, dtype=self.dtype) self.colind = asarray(colind) self.indptr = asarray(indptr) except: raise ValueError, "unrecognized form for csr_matrix constructor" else: # (data, ij) format ijnew = ij.copy() ijnew[:, 0] = ij[:, 1] ijnew[:, 1] = ij[:, 0] temp = coo_matrix((s, ijnew), dims=dims, dtype=dtype).tocsr() self.shape = temp.shape self.data = temp.data self.colind = temp.colind self.indptr = temp.indptr self.dtype = temp.dtype else: raise ValueError, "unrecognized form for csr_matrix constructor"
d5ad978cd0a938588df0380720c9a573220ef0f4 /local1/tlutelli/issta_data/temp/all_python//python/2006_temp/2006/12971/d5ad978cd0a938588df0380720c9a573220ef0f4/sparse.py
A = coo_matrix(obj, ij, [dims])
A = coo_matrix((obj, ij), [dims])
def resize(self, shape): """ Resize the matrix to dimensions given by 'shape', removing any non-zero elements that lie outside. """ M, N = self.shape try: newM, newN = shape assert newM == int(newM) and newM > 0 assert newN == int(newN) and newN > 0 except (TypeError, ValueError, AssertionError): raise TypeError, "dimensions must be a 2-tuple of positive"\ " integers" if newM < M or newN < N: # Remove all elements outside new dimensions for (i,j) in self.keys(): if i >= newM or j >= newN: del self[i,j] self.shape = (newM, newN)
d5ad978cd0a938588df0380720c9a573220ef0f4 /local1/tlutelli/issta_data/temp/all_python//python/2006_temp/2006/12971/d5ad978cd0a938588df0380720c9a573220ef0f4/sparse.py
ij[:][0] and ij[:][1]
ij[0][:] and ij[1][:]
def resize(self, shape): """ Resize the matrix to dimensions given by 'shape', removing any non-zero elements that lie outside. """ M, N = self.shape try: newM, newN = shape assert newM == int(newM) and newM > 0 assert newN == int(newN) and newN > 0 except (TypeError, ValueError, AssertionError): raise TypeError, "dimensions must be a 2-tuple of positive"\ " integers" if newM < M or newN < N: # Remove all elements outside new dimensions for (i,j) in self.keys(): if i >= newM or j >= newN: del self[i,j] self.shape = (newM, newN)
d5ad978cd0a938588df0380720c9a573220ef0f4 /local1/tlutelli/issta_data/temp/all_python//python/2006_temp/2006/12971/d5ad978cd0a938588df0380720c9a573220ef0f4/sparse.py
1. obj[:]: the entries of the matrix, in any order 2. ij[:][0]: the row indices of the matrix entries 3. ij[:][1]: the column indices of the matrix entries
1. obj[:] the entries of the matrix, in any order 2. ij[0][:] the row indices of the matrix entries 3. ij[1][:] the column indices of the matrix entries
def resize(self, shape): """ Resize the matrix to dimensions given by 'shape', removing any non-zero elements that lie outside. """ M, N = self.shape try: newM, newN = shape assert newM == int(newM) and newM > 0 assert newN == int(newN) and newN > 0 except (TypeError, ValueError, AssertionError): raise TypeError, "dimensions must be a 2-tuple of positive"\ " integers" if newM < M or newN < N: # Remove all elements outside new dimensions for (i,j) in self.keys(): if i >= newM or j >= newN: del self[i,j] self.shape = (newM, newN)
d5ad978cd0a938588df0380720c9a573220ef0f4 /local1/tlutelli/issta_data/temp/all_python//python/2006_temp/2006/12971/d5ad978cd0a938588df0380720c9a573220ef0f4/sparse.py
A[ij[k][0], ij[k][1]] = obj[k]
A[ij[0][k], ij[1][k] = obj[k]
def resize(self, shape): """ Resize the matrix to dimensions given by 'shape', removing any non-zero elements that lie outside. """ M, N = self.shape try: newM, newN = shape assert newM == int(newM) and newM > 0 assert newN == int(newN) and newN > 0 except (TypeError, ValueError, AssertionError): raise TypeError, "dimensions must be a 2-tuple of positive"\ " integers" if newM < M or newN < N: # Remove all elements outside new dimensions for (i,j) in self.keys(): if i >= newM or j >= newN: del self[i,j] self.shape = (newM, newN)
d5ad978cd0a938588df0380720c9a573220ef0f4 /local1/tlutelli/issta_data/temp/all_python//python/2006_temp/2006/12971/d5ad978cd0a938588df0380720c9a573220ef0f4/sparse.py
obj, ij_in = arg1
obj, ij = arg1
def __init__(self, arg1, dims=None, dtype=None): spmatrix.__init__(self) if isinstance(arg1, tuple): try: obj, ij_in = arg1 except: raise TypeError, "invalid input format" elif arg1 is None: # clumsy! We should make ALL arguments # keyword arguments instead! # Initialize an empty matrix. if not isinstance(dims, tuple) or not isinstance(dims[0], int): raise TypeError, "dimensions not understood" self.shape = dims self.dtype = getdtype(dtype, default=float) self.data = array([]) self.row = array([]) self.col = array([]) self._check() return self.dtype = getdtype(dtype, obj, default=float) try: # Assume the first calling convention # assert len(ij) == 2 if len(ij_in) != 2: if isdense( ij_in ) and (ij_in.shape[1] == 2): ij = (ij_in[:,0], ij_in[:,1]) else: raise AssertionError else: ij = ij_in if dims is None: M = int(amax(ij[0])) + 1 N = int(amax(ij[1])) + 1 self.shape = (M, N) else: # Use 2 steps to ensure dims has length 2. M, N = dims self.shape = (M, N) self.row = asarray(ij[0]) self.col = asarray(ij[1]) self.data = asarray(obj, dtype=self.dtype) self._check() except Exception: raise TypeError, "invalid input format"
d5ad978cd0a938588df0380720c9a573220ef0f4 /local1/tlutelli/issta_data/temp/all_python//python/2006_temp/2006/12971/d5ad978cd0a938588df0380720c9a573220ef0f4/sparse.py
if len(ij_in) != 2: if isdense( ij_in ) and (ij_in.shape[1] == 2): ij = (ij_in[:,0], ij_in[:,1]) else: raise AssertionError else: ij = ij_in if dims is None: M = int(amax(ij[0])) + 1 N = int(amax(ij[1])) + 1 self.shape = (M, N) else: M, N = dims self.shape = (M, N) self.row = asarray(ij[0]) self.col = asarray(ij[1]) self.data = asarray(obj, dtype=self.dtype) self._check() except Exception:
if len(ij) != 2: raise TypeError except TypeError:
def __init__(self, arg1, dims=None, dtype=None): spmatrix.__init__(self) if isinstance(arg1, tuple): try: obj, ij_in = arg1 except: raise TypeError, "invalid input format" elif arg1 is None: # clumsy! We should make ALL arguments # keyword arguments instead! # Initialize an empty matrix. if not isinstance(dims, tuple) or not isinstance(dims[0], int): raise TypeError, "dimensions not understood" self.shape = dims self.dtype = getdtype(dtype, default=float) self.data = array([]) self.row = array([]) self.col = array([]) self._check() return self.dtype = getdtype(dtype, obj, default=float) try: # Assume the first calling convention # assert len(ij) == 2 if len(ij_in) != 2: if isdense( ij_in ) and (ij_in.shape[1] == 2): ij = (ij_in[:,0], ij_in[:,1]) else: raise AssertionError else: ij = ij_in if dims is None: M = int(amax(ij[0])) + 1 N = int(amax(ij[1])) + 1 self.shape = (M, N) else: # Use 2 steps to ensure dims has length 2. M, N = dims self.shape = (M, N) self.row = asarray(ij[0]) self.col = asarray(ij[1]) self.data = asarray(obj, dtype=self.dtype) self._check() except Exception: raise TypeError, "invalid input format"
d5ad978cd0a938588df0380720c9a573220ef0f4 /local1/tlutelli/issta_data/temp/all_python//python/2006_temp/2006/12971/d5ad978cd0a938588df0380720c9a573220ef0f4/sparse.py
def complex(a, b, complex=__builtins__.complex): c = zeros(a.shape, dtype=complex)
def complex(a, b): c = zeros(a.shape, dtype=complex_)
def complex(a, b, complex=__builtins__.complex): c = zeros(a.shape, dtype=complex) c.real = a c.imag = b return c
5a55ddd6f5dccf6a603d032b045d6859a6e2a4f6 /local1/tlutelli/issta_data/temp/all_python//python/2006_temp/2006/12971/5a55ddd6f5dccf6a603d032b045d6859a6e2a4f6/test_numexpr.py
tests.append(('OPERATIONS', optests))
def complex(a, b, complex=__builtins__.complex): c = zeros(a.shape, dtype=complex) c.real = a c.imag = b return c
5a55ddd6f5dccf6a603d032b045d6859a6e2a4f6 /local1/tlutelli/issta_data/temp/all_python//python/2006_temp/2006/12971/5a55ddd6f5dccf6a603d032b045d6859a6e2a4f6/test_numexpr.py
x = random.randint(1,2**31-1)
x = random.randint(1,2**31-2)
def seed(x=0,y=0): """seed(x, y), set the seed using the integers x, y; Set a random one from clock if y == 0 """ if type (x) != types.IntType or type (y) != types.IntType : raise ArgumentError, "seed requires integer arguments." if y == 0: import random y = int(rv.initial_seed()) x = random.randint(1,2**31-1) rand.set_seeds(x,y)
dcca2a1be393d7a19d876d002c9ee41602d4cb5f /local1/tlutelli/issta_data/temp/all_python//python/2006_temp/2006/12971/dcca2a1be393d7a19d876d002c9ee41602d4cb5f/distributions.py
self.isCSR = 1
self.isCSR = 0
def _getIndx( self, mtx ):
1ab51e60f923d1b3d3f9fae58333f00489f07b56 /local1/tlutelli/issta_data/temp/all_python//python/2006_temp/2006/12971/1ab51e60f923d1b3d3f9fae58333f00489f07b56/umfpack.py
self.isCSR = 0
self.isCSR = 1
def _getIndx( self, mtx ):
1ab51e60f923d1b3d3f9fae58333f00489f07b56 /local1/tlutelli/issta_data/temp/all_python//python/2006_temp/2006/12971/1ab51e60f923d1b3d3f9fae58333f00489f07b56/umfpack.py
"""Print all status information."""
"""Print all status information. Output depends on self.control[UMFPACK_PRL]."""
def report_info( self ): """Print all status information.""" self.funs.report_info( self.control, self.info )
1ab51e60f923d1b3d3f9fae58333f00489f07b56 /local1/tlutelli/issta_data/temp/all_python//python/2006_temp/2006/12971/1ab51e60f923d1b3d3f9fae58333f00489f07b56/umfpack.py
assert rt == T, 'Expected %s, got %s type' % (T, rt)
assert N.dtype(rt) == N.dtype(T), \ 'Expected %s, got %s type' % (T, rt)
def test_smallest_int_sctype(self): # Smallest int sctype with testing recaster params = sctype_attributes() mmax = params[N.int32]['max'] mmin = params[N.int32]['min'] for kind in ('int', 'uint'): for T in N.sctypes[kind]: mx = params[T]['max'] mn = params[T]['min'] rt = self.recaster.smallest_int_sctype(mx, mn) if mx <= mmax and mn >= mmin: assert rt == N.int32, 'Expected int32 type' else: assert rt is None, 'Expected None, got %s for %s' % (T, rt) # Smallest int sctype with full recaster RF = Recaster() test_triples = [(N.uint8, 0, 255), (N.int8, -128, 0), (N.uint16, 0, params[N.uint16]['max']), (N.int16, params[N.int16]['min'], 0), (N.uint32, 0, params[N.uint32]['max']), (N.int32, params[N.int32]['min'], 0), (N.uint64, 0, params[N.uint64]['max']), (N.int64, params[N.int64]['min'], 0)] for T, mn, mx in test_triples: rt = RF.smallest_int_sctype(mx, mn) assert rt == T, 'Expected %s, got %s type' % (T, rt)
a701824a0efd9e5698d8a33c1423c306e04dc0b4 /local1/tlutelli/issta_data/temp/all_python//python/2006_temp/2006/12971/a701824a0efd9e5698d8a33c1423c306e04dc0b4/test_recaster.py
## def __del__(self):
10a9f586935a179066a0a38a59a0ccd013cd4a66 /local1/tlutelli/issta_data/temp/all_python//python/2006_temp/2006/12971/10a9f586935a179066a0a38a59a0ccd013cd4a66/test_gui_thread.py
def __init__(self, parent):
10a9f586935a179066a0a38a59a0ccd013cd4a66 /local1/tlutelli/issta_data/temp/all_python//python/2006_temp/2006/12971/10a9f586935a179066a0a38a59a0ccd013cd4a66/test_gui_thread.py
panel = TestPanel(self)
self.panel = TestPanel(self)
def __init__(self, parent):
10a9f586935a179066a0a38a59a0ccd013cd4a66 /local1/tlutelli/issta_data/temp/all_python//python/2006_temp/2006/12971/10a9f586935a179066a0a38a59a0ccd013cd4a66/test_gui_thread.py
def __init__(self, parent):
10a9f586935a179066a0a38a59a0ccd013cd4a66 /local1/tlutelli/issta_data/temp/all_python//python/2006_temp/2006/12971/10a9f586935a179066a0a38a59a0ccd013cd4a66/test_gui_thread.py
def is_alive(obj): if obj() is None: return 0 else: return 1
def is_alive(obj): if obj() is None: return 0 else: return 1
10a9f586935a179066a0a38a59a0ccd013cd4a66 /local1/tlutelli/issta_data/temp/all_python//python/2006_temp/2006/12971/10a9f586935a179066a0a38a59a0ccd013cd4a66/test_gui_thread.py
time.sleep(0.25)
yield()
def check_wx_class(self): "Checking a wxFrame proxied class" for i in range(5): f = gui_thread.register(TestFrame) a = f(None) p = weakref.ref(a) a.Close(1) del a time.sleep(0.25) # sync threads # this checks for memory leaks self.assertEqual(is_alive(p), 0)
10a9f586935a179066a0a38a59a0ccd013cd4a66 /local1/tlutelli/issta_data/temp/all_python//python/2006_temp/2006/12971/10a9f586935a179066a0a38a59a0ccd013cd4a66/test_gui_thread.py
def check_normal_class(self): "Checking non-wxWindows proxied class " f = gui_thread.register(TestClass) a = f() p = weakref.ref(a) # the reference count has to be 2. self.assertEqual(sys.getrefcount(a), 2) del a self.assertEqual(is_alive(p), 0)
10a9f586935a179066a0a38a59a0ccd013cd4a66 /local1/tlutelli/issta_data/temp/all_python//python/2006_temp/2006/12971/10a9f586935a179066a0a38a59a0ccd013cd4a66/test_gui_thread.py
class NoThreadTestFrame(wxFrame):
class TesterApp (wxApp): def OnInit (self): f = TesterFrame(None) return true class TesterFrame(wxFrame):
def test(): all_tests = test_suite() runner = unittest.TextTestRunner(verbosity=2) runner.run(all_tests)
10a9f586935a179066a0a38a59a0ccd013cd4a66 /local1/tlutelli/issta_data/temp/all_python//python/2006_temp/2006/12971/10a9f586935a179066a0a38a59a0ccd013cd4a66/test_gui_thread.py
wxFrame.__init__(self, parent, -1, "Hello Test")
wxFrame.__init__(self, parent, -1, "Tester") self.CreateStatusBar() sizer = wxBoxSizer(wxHORIZONTAL) ID = NewId() btn = wxButton(self, ID, "Start Test") EVT_BUTTON(self, ID, self.OnStart) msg = "Click to start running tests. "\ "Tester Output will be shown on the shell." btn.SetToolTip(wxToolTip(msg)) sizer.Add(btn, 1, wxEXPAND) ID = NewId() btn = wxButton(self, ID, "Close") EVT_BUTTON(self, ID, self.OnClose) btn.SetToolTip(wxToolTip("Click to close the tester.")) sizer.Add(btn, 1, wxEXPAND) sizer.Fit(self) self.SetAutoLayout(true) self.SetSizer(sizer) self.Show(1) def OnStart(self, evt): self.SetStatusText("Running Tests")
def __init__(self, parent): wxFrame.__init__(self, parent, -1, "Hello Test") test() self.Close(1)
10a9f586935a179066a0a38a59a0ccd013cd4a66 /local1/tlutelli/issta_data/temp/all_python//python/2006_temp/2006/12971/10a9f586935a179066a0a38a59a0ccd013cd4a66/test_gui_thread.py
app = wxPySimpleApp() frame = NoThreadTestFrame(None)
app = TesterApp()
def __init__(self, parent): wxFrame.__init__(self, parent, -1, "Hello Test") test() self.Close(1)
10a9f586935a179066a0a38a59a0ccd013cd4a66 /local1/tlutelli/issta_data/temp/all_python//python/2006_temp/2006/12971/10a9f586935a179066a0a38a59a0ccd013cd4a66/test_gui_thread.py
'libraries' : ['specfun']
'libraries' : ['specfun'], 'depends':specfun
def configuration(parent_package='',parent_path=None): from scipy_distutils.core import Extension from scipy_distutils.misc_util import get_path,\ default_config_dict, dot_join from scipy_distutils.system_info import dict_append, get_info package = 'special' config = default_config_dict(package,parent_package) local_path = get_path(__name__,parent_path) numpy_info = get_info('numpy',notfound_action=2) define_macros = [] if sys.byteorder == "little": define_macros.append(('USE_MCONF_LE',None)) else: define_macros.append(('USE_MCONF_BE',None)) if sys.platform=='win32': define_macros.append(('NOINFINITIES',None)) define_macros.append(('NONANS',None)) c_misc = glob(os.path.join(local_path,'c_misc','*.c')) cephes = glob(os.path.join(local_path,'cephes','*.c')) if sys.platform=='win32': cephes = [f for f in cephes if os.path.basename(f)!='fabs.c'] mach = glob(os.path.join(local_path,'mach','*.f')) amos = glob(os.path.join(local_path,'amos','*.f')) toms = glob(os.path.join(local_path,'toms','*.f')) cdf = glob(os.path.join(local_path,'cdflib','*.f')) specfun = glob(os.path.join(local_path, 'specfun','*.f')) # C libraries config['libraries'].append(('c_misc',{'sources':c_misc})) config['libraries'].append(('cephes',{'sources':cephes, 'macros':define_macros})) # Fortran libraries config['libraries'].append(('mach',{'sources':mach})) config['libraries'].append(('amos',{'sources':amos})) config['libraries'].append(('toms',{'sources':toms})) config['libraries'].append(('cdf',{'sources':cdf})) config['libraries'].append(('specfun',{'sources':specfun})) # Extension sources = ['cephesmodule.c', 'amos_wrappers.c', 'specfun_wrappers.c', 'toms_wrappers.c','cdf_wrappers.c','ufunc_extras.c'] sources = [os.path.join(local_path,x) for x in sources] ext_args = {} dict_append(ext_args, name=dot_join(parent_package,package,'cephes'), sources = sources, libraries = ['amos','toms','c_misc','cephes','mach', 'cdf', 'specfun'], define_macros = define_macros ) dict_append(ext_args,**numpy_info) ext = Extension(**ext_args) config['ext_modules'].append(ext) ext_args = {'name':dot_join(parent_package,package,'specfun'), 'sources':[os.path.join(local_path,'specfun.pyf')], 'f2py_options':['--no-wrap-functions'], #'define_macros':[('F2PY_REPORT_ATEXIT_DISABLE',None)], 'libraries' : ['specfun'] } dict_append(ext_args,**numpy_info) ext = Extension(**ext_args) ext.need_fcompiler_opts = 1 config['ext_modules'].append(ext) return config
3e5294d3405d9e791160ac9b52973d66c8599116 /local1/tlutelli/issta_data/temp/all_python//python/2006_temp/2006/12971/3e5294d3405d9e791160ac9b52973d66c8599116/setup_special.py

Dataset Card for CoCoNuT-Python(2010)

Dataset Summary

Part of the data used to train the models in the "CoCoNuT: Combining Context-Aware Neural Translation Models using Ensemble for Program Repair" paper. These datasets contain raw data extracted from GitHub, GitLab, and Bitbucket, and have neither been shuffled nor tokenized. The year in the dataset’s name is the cutting year that shows the year of the newest commit in the dataset.

Languages

  • Python

Dataset Structure

Data Fields

The dataset consists of 4 columns: add, rem, context, and meta. These match the original dataset files: add.txt, rem.txt, context.txt, and meta.txt.

Data Instances

There is a mapping between the 4 columns for each instance. For example:

5 first rows of rem (i.e., the buggy line/hunk):

1 public synchronized StringBuffer append(char ch)
2 ensureCapacity_unsynchronized(count + 1); value[count++] = ch; return this;
3 public String substring(int beginIndex, int endIndex)
4 if (beginIndex < 0 || endIndex > count || beginIndex > endIndex) throw new StringIndexOutOfBoundsException(); if (beginIndex == 0 && endIndex == count) return this; int len = endIndex - beginIndex;  return new String(value, beginIndex + offset, len, (len << 2) >= value.length);
5 public Object next() {

5 first rows of add (i.e., the fixed line/hunk):

1 public StringBuffer append(Object obj)
2 return append(obj == null ? "null" : obj.toString());
3 public String substring(int begin)
4 return substring(begin, count);
5 public FSEntry next() {

These map to the 5 instances:

- public synchronized StringBuffer append(char ch)
+ public StringBuffer append(Object obj)
- ensureCapacity_unsynchronized(count + 1); value[count++] = ch; return this;
+ return append(obj == null ? "null" : obj.toString());
- public String substring(int beginIndex, int endIndex)
+ public String substring(int begin)
- if (beginIndex < 0 || endIndex > count || beginIndex > endIndex) throw new StringIndexOutOfBoundsException(); if (beginIndex == 0 && endIndex == count) return this; int len = endIndex - beginIndex;  return new String(value, beginIndex + offset, len, (len << 2) >= value.length);
+ return substring(begin, count);
- public Object next() {
+ public FSEntry next() { 

context contains the associated "context". Context is the (in-lined) buggy function (including the buggy lines and comments). For example, the context of

public synchronized StringBuffer append(char ch)

is its associated function:

public synchronized StringBuffer append(char ch)  {    ensureCapacity_unsynchronized(count + 1);    value[count++] = ch;    return this;  }

meta contains some metadata about the project:

1056	/local/tlutelli/issta_data/temp/all_java0context/java/2006_temp/2006/1056/68a6301301378680519f2b146daec37812a1bc22/StringBuffer.java/buggy/core/src/classpath/java/java/lang/StringBuffer.java

1056 is the project id. /local/... is the absolute path to the buggy file. This can be parsed to extract the commit id: 68a6301301378680519f2b146daec37812a1bc22, the file name: StringBuffer.java and the original path within the project core/src/classpath/java/java/lang/StringBuffer.java

Number of projects Number of Instances
13,899 480,777

Dataset Creation

Curation Rationale

Data is collected to train automated program repair (APR) models.

Citation Information

@inproceedings{lutellierCoCoNuTCombiningContextaware2020,
  title = {{{CoCoNuT}}: Combining Context-Aware Neural Translation Models Using Ensemble for Program Repair},
  shorttitle = {{{CoCoNuT}}},
  booktitle = {Proceedings of the 29th {{ACM SIGSOFT International Symposium}} on {{Software Testing}} and {{Analysis}}},
  author = {Lutellier, Thibaud and Pham, Hung Viet and Pang, Lawrence and Li, Yitong and Wei, Moshi and Tan, Lin},
  year = {2020},
  month = jul,
  series = {{{ISSTA}} 2020},
  pages = {101--114},
  publisher = {{Association for Computing Machinery}},
  address = {{New York, NY, USA}},
  doi = {10.1145/3395363.3397369},
  url = {https://doi.org/10.1145/3395363.3397369},
  urldate = {2022-12-06},
  isbn = {978-1-4503-8008-9},
  keywords = {AI and Software Engineering,Automated program repair,Deep Learning,Neural Machine Translation}
}
Downloads last month
23
Edit dataset card

Collection including h4iku/coconut_python2010