repository_name
stringlengths
7
54
func_path_in_repository
stringlengths
4
175
func_name
stringlengths
1
129
whole_func_string
stringlengths
91
50.9k
language
stringclasses
1 value
func_code_string
stringlengths
91
50.9k
func_code_tokens
sequence
func_documentation_string
stringlengths
1
31.6k
func_documentation_tokens
sequence
split_name
stringclasses
1 value
func_code_url
stringlengths
89
268
score
float64
0
0.09
PmagPy/PmagPy
SPD/spd.py
PintPars.get_ptrm_dec_and_inc
def get_ptrm_dec_and_inc(self): """not included in spd.""" PTRMS = self.PTRMS[1:] CART_pTRMS_orig = numpy.array([lib_direct.dir2cart(row[1:4]) for row in PTRMS]) #B_lab_dir = [self.B_lab_dir[0], self.B_lab_dir[1], 1.] # dir tmin, tmax = self.t_Arai[0], self.t_Arai[-1] ptrms_dec_Free, ptrms_inc_Free, ptrm_best_fit_vector_Free, ptrm_tau_Free, ptrm_v_Free, ptrm_mass_center_Free, ptrm_PCA_sigma_Free = lib_direct.get_dec_and_inc(CART_pTRMS_orig, self.t_Arai, tmin, tmax, anchored=False) ptrms_angle = lib_direct.get_ptrms_angle(ptrm_best_fit_vector_Free, self.B_lab_cart) self.pars['ptrms_dec_Free'], self.pars['ptrms_inc_Free'] = ptrms_dec_Free, ptrms_inc_Free self.pars['ptrms_tau_Free'] = ptrm_tau_Free self.pars['ptrms_angle_Free'] = ptrms_angle
python
def get_ptrm_dec_and_inc(self): """not included in spd.""" PTRMS = self.PTRMS[1:] CART_pTRMS_orig = numpy.array([lib_direct.dir2cart(row[1:4]) for row in PTRMS]) #B_lab_dir = [self.B_lab_dir[0], self.B_lab_dir[1], 1.] # dir tmin, tmax = self.t_Arai[0], self.t_Arai[-1] ptrms_dec_Free, ptrms_inc_Free, ptrm_best_fit_vector_Free, ptrm_tau_Free, ptrm_v_Free, ptrm_mass_center_Free, ptrm_PCA_sigma_Free = lib_direct.get_dec_and_inc(CART_pTRMS_orig, self.t_Arai, tmin, tmax, anchored=False) ptrms_angle = lib_direct.get_ptrms_angle(ptrm_best_fit_vector_Free, self.B_lab_cart) self.pars['ptrms_dec_Free'], self.pars['ptrms_inc_Free'] = ptrms_dec_Free, ptrms_inc_Free self.pars['ptrms_tau_Free'] = ptrm_tau_Free self.pars['ptrms_angle_Free'] = ptrms_angle
[ "def", "get_ptrm_dec_and_inc", "(", "self", ")", ":", "PTRMS", "=", "self", ".", "PTRMS", "[", "1", ":", "]", "CART_pTRMS_orig", "=", "numpy", ".", "array", "(", "[", "lib_direct", ".", "dir2cart", "(", "row", "[", "1", ":", "4", "]", ")", "for", "row", "in", "PTRMS", "]", ")", "#B_lab_dir = [self.B_lab_dir[0], self.B_lab_dir[1], 1.] # dir", "tmin", ",", "tmax", "=", "self", ".", "t_Arai", "[", "0", "]", ",", "self", ".", "t_Arai", "[", "-", "1", "]", "ptrms_dec_Free", ",", "ptrms_inc_Free", ",", "ptrm_best_fit_vector_Free", ",", "ptrm_tau_Free", ",", "ptrm_v_Free", ",", "ptrm_mass_center_Free", ",", "ptrm_PCA_sigma_Free", "=", "lib_direct", ".", "get_dec_and_inc", "(", "CART_pTRMS_orig", ",", "self", ".", "t_Arai", ",", "tmin", ",", "tmax", ",", "anchored", "=", "False", ")", "ptrms_angle", "=", "lib_direct", ".", "get_ptrms_angle", "(", "ptrm_best_fit_vector_Free", ",", "self", ".", "B_lab_cart", ")", "self", ".", "pars", "[", "'ptrms_dec_Free'", "]", ",", "self", ".", "pars", "[", "'ptrms_inc_Free'", "]", "=", "ptrms_dec_Free", ",", "ptrms_inc_Free", "self", ".", "pars", "[", "'ptrms_tau_Free'", "]", "=", "ptrm_tau_Free", "self", ".", "pars", "[", "'ptrms_angle_Free'", "]", "=", "ptrms_angle" ]
not included in spd.
[ "not", "included", "in", "spd", "." ]
train
https://github.com/PmagPy/PmagPy/blob/c7984f8809bf40fe112e53dcc311a33293b62d0b/SPD/spd.py#L391-L401
0.008454
molmod/molmod
molmod/graphs.py
Graph.full_match
def full_match(self, other): """Find the mapping between vertex indexes in self and other. This also works on disconnected graphs. Derived classes should just implement get_vertex_string and get_edge_string to make this method aware of the different nature of certain vertices. In case molecules, this would make the algorithm sensitive to atom numbers etc. """ # we need normalize subgraphs because these graphs are used as patterns. graphs0 = [ self.get_subgraph(group, normalize=True) for group in self.independent_vertices ] graphs1 = [ other.get_subgraph(group) for group in other.independent_vertices ] if len(graphs0) != len(graphs1): return matches = [] for graph0 in graphs0: pattern = EqualPattern(graph0) found_match = False for i, graph1 in enumerate(graphs1): local_matches = list(GraphSearch(pattern)(graph1, one_match=True)) if len(local_matches) == 1: match = local_matches[0] # we need to restore the relation between the normalized # graph0 and its original indexes old_to_new = OneToOne(( (j, i) for i, j in enumerate(graph0._old_vertex_indexes) )) matches.append(match * old_to_new) del graphs1[i] found_match = True break if not found_match: return result = OneToOne() for match in matches: result.add_relations(match.forward.items()) return result
python
def full_match(self, other): """Find the mapping between vertex indexes in self and other. This also works on disconnected graphs. Derived classes should just implement get_vertex_string and get_edge_string to make this method aware of the different nature of certain vertices. In case molecules, this would make the algorithm sensitive to atom numbers etc. """ # we need normalize subgraphs because these graphs are used as patterns. graphs0 = [ self.get_subgraph(group, normalize=True) for group in self.independent_vertices ] graphs1 = [ other.get_subgraph(group) for group in other.independent_vertices ] if len(graphs0) != len(graphs1): return matches = [] for graph0 in graphs0: pattern = EqualPattern(graph0) found_match = False for i, graph1 in enumerate(graphs1): local_matches = list(GraphSearch(pattern)(graph1, one_match=True)) if len(local_matches) == 1: match = local_matches[0] # we need to restore the relation between the normalized # graph0 and its original indexes old_to_new = OneToOne(( (j, i) for i, j in enumerate(graph0._old_vertex_indexes) )) matches.append(match * old_to_new) del graphs1[i] found_match = True break if not found_match: return result = OneToOne() for match in matches: result.add_relations(match.forward.items()) return result
[ "def", "full_match", "(", "self", ",", "other", ")", ":", "# we need normalize subgraphs because these graphs are used as patterns.", "graphs0", "=", "[", "self", ".", "get_subgraph", "(", "group", ",", "normalize", "=", "True", ")", "for", "group", "in", "self", ".", "independent_vertices", "]", "graphs1", "=", "[", "other", ".", "get_subgraph", "(", "group", ")", "for", "group", "in", "other", ".", "independent_vertices", "]", "if", "len", "(", "graphs0", ")", "!=", "len", "(", "graphs1", ")", ":", "return", "matches", "=", "[", "]", "for", "graph0", "in", "graphs0", ":", "pattern", "=", "EqualPattern", "(", "graph0", ")", "found_match", "=", "False", "for", "i", ",", "graph1", "in", "enumerate", "(", "graphs1", ")", ":", "local_matches", "=", "list", "(", "GraphSearch", "(", "pattern", ")", "(", "graph1", ",", "one_match", "=", "True", ")", ")", "if", "len", "(", "local_matches", ")", "==", "1", ":", "match", "=", "local_matches", "[", "0", "]", "# we need to restore the relation between the normalized", "# graph0 and its original indexes", "old_to_new", "=", "OneToOne", "(", "(", "(", "j", ",", "i", ")", "for", "i", ",", "j", "in", "enumerate", "(", "graph0", ".", "_old_vertex_indexes", ")", ")", ")", "matches", ".", "append", "(", "match", "*", "old_to_new", ")", "del", "graphs1", "[", "i", "]", "found_match", "=", "True", "break", "if", "not", "found_match", ":", "return", "result", "=", "OneToOne", "(", ")", "for", "match", "in", "matches", ":", "result", ".", "add_relations", "(", "match", ".", "forward", ".", "items", "(", ")", ")", "return", "result" ]
Find the mapping between vertex indexes in self and other. This also works on disconnected graphs. Derived classes should just implement get_vertex_string and get_edge_string to make this method aware of the different nature of certain vertices. In case molecules, this would make the algorithm sensitive to atom numbers etc.
[ "Find", "the", "mapping", "between", "vertex", "indexes", "in", "self", "and", "other", "." ]
train
https://github.com/molmod/molmod/blob/a7b5b4364ed514ad4c465856c05b5eda1cb561e0/molmod/graphs.py#L782-L828
0.00275
spacetelescope/drizzlepac
drizzlepac/pixtosky.py
xy2rd
def xy2rd(input,x=None,y=None,coords=None, coordfile=None,colnames=None,separator=None, hms=True, precision=6,output=None,verbose=True): """ Primary interface to perform coordinate transformations from pixel to sky coordinates using STWCS and full distortion models read from the input image header. """ single_coord = False # Only use value provided in `coords` if nothing has been specified for coordfile if coords is not None and coordfile is None: coordfile = coords warnings.simplefilter('always',DeprecationWarning) warnings.warn("Please update calling code to pass in `coordfile` instead of `coords`.", category=DeprecationWarning) warnings.simplefilter('default',DeprecationWarning) if coordfile is not None: if colnames in blank_list: colnames = ['c1','c2'] # Determine columns which contain pixel positions cols = util.parse_colnames(colnames,coordfile) # read in columns from input coordinates file xyvals = np.loadtxt(coordfile,usecols=cols,delimiter=separator) if xyvals.ndim == 1: # only 1 entry in coordfile xlist = [xyvals[0].copy()] ylist = [xyvals[1].copy()] else: xlist = xyvals[:,0].copy() ylist = xyvals[:,1].copy() del xyvals else: if isinstance(x, np.ndarray): xlist = x.tolist() ylist = y.tolist() elif not isinstance(x,list): xlist = [x] ylist = [y] single_coord = True else: xlist = x ylist = y # start by reading in WCS+distortion info for input image inwcs = wcsutil.HSTWCS(input) if inwcs.wcs.is_unity(): print("####\nNo valid WCS found in {}.\n Results may be invalid.\n####\n".format(input)) # Now, convert pixel coordinates into sky coordinates dra,ddec = inwcs.all_pix2world(xlist,ylist,1) # convert to HH:MM:SS.S format, if specified if hms: ra,dec = wcs_functions.ddtohms(dra,ddec,precision=precision) rastr = ra decstr = dec else: # add formatting based on precision here... rastr = [] decstr = [] fmt = "%."+repr(precision)+"f" for r,d in zip(dra,ddec): rastr.append(fmt%r) decstr.append(fmt%d) ra = dra dec = ddec if verbose or (not verbose and util.is_blank(output)): print('# Coordinate transformations for ',input) print('# X Y RA Dec\n') for x,y,r,d in zip(xlist,ylist,rastr,decstr): print("%.4f %.4f %s %s"%(x,y,r,d)) # Create output file, if specified if output: f = open(output,mode='w') f.write("# Coordinates converted from %s\n"%input) for r,d in zip(rastr,decstr): f.write('%s %s\n'%(r,d)) f.close() print('Wrote out results to: ',output) if single_coord: ra = ra[0] dec = dec[0] return ra,dec
python
def xy2rd(input,x=None,y=None,coords=None, coordfile=None,colnames=None,separator=None, hms=True, precision=6,output=None,verbose=True): """ Primary interface to perform coordinate transformations from pixel to sky coordinates using STWCS and full distortion models read from the input image header. """ single_coord = False # Only use value provided in `coords` if nothing has been specified for coordfile if coords is not None and coordfile is None: coordfile = coords warnings.simplefilter('always',DeprecationWarning) warnings.warn("Please update calling code to pass in `coordfile` instead of `coords`.", category=DeprecationWarning) warnings.simplefilter('default',DeprecationWarning) if coordfile is not None: if colnames in blank_list: colnames = ['c1','c2'] # Determine columns which contain pixel positions cols = util.parse_colnames(colnames,coordfile) # read in columns from input coordinates file xyvals = np.loadtxt(coordfile,usecols=cols,delimiter=separator) if xyvals.ndim == 1: # only 1 entry in coordfile xlist = [xyvals[0].copy()] ylist = [xyvals[1].copy()] else: xlist = xyvals[:,0].copy() ylist = xyvals[:,1].copy() del xyvals else: if isinstance(x, np.ndarray): xlist = x.tolist() ylist = y.tolist() elif not isinstance(x,list): xlist = [x] ylist = [y] single_coord = True else: xlist = x ylist = y # start by reading in WCS+distortion info for input image inwcs = wcsutil.HSTWCS(input) if inwcs.wcs.is_unity(): print("####\nNo valid WCS found in {}.\n Results may be invalid.\n####\n".format(input)) # Now, convert pixel coordinates into sky coordinates dra,ddec = inwcs.all_pix2world(xlist,ylist,1) # convert to HH:MM:SS.S format, if specified if hms: ra,dec = wcs_functions.ddtohms(dra,ddec,precision=precision) rastr = ra decstr = dec else: # add formatting based on precision here... rastr = [] decstr = [] fmt = "%."+repr(precision)+"f" for r,d in zip(dra,ddec): rastr.append(fmt%r) decstr.append(fmt%d) ra = dra dec = ddec if verbose or (not verbose and util.is_blank(output)): print('# Coordinate transformations for ',input) print('# X Y RA Dec\n') for x,y,r,d in zip(xlist,ylist,rastr,decstr): print("%.4f %.4f %s %s"%(x,y,r,d)) # Create output file, if specified if output: f = open(output,mode='w') f.write("# Coordinates converted from %s\n"%input) for r,d in zip(rastr,decstr): f.write('%s %s\n'%(r,d)) f.close() print('Wrote out results to: ',output) if single_coord: ra = ra[0] dec = dec[0] return ra,dec
[ "def", "xy2rd", "(", "input", ",", "x", "=", "None", ",", "y", "=", "None", ",", "coords", "=", "None", ",", "coordfile", "=", "None", ",", "colnames", "=", "None", ",", "separator", "=", "None", ",", "hms", "=", "True", ",", "precision", "=", "6", ",", "output", "=", "None", ",", "verbose", "=", "True", ")", ":", "single_coord", "=", "False", "# Only use value provided in `coords` if nothing has been specified for coordfile", "if", "coords", "is", "not", "None", "and", "coordfile", "is", "None", ":", "coordfile", "=", "coords", "warnings", ".", "simplefilter", "(", "'always'", ",", "DeprecationWarning", ")", "warnings", ".", "warn", "(", "\"Please update calling code to pass in `coordfile` instead of `coords`.\"", ",", "category", "=", "DeprecationWarning", ")", "warnings", ".", "simplefilter", "(", "'default'", ",", "DeprecationWarning", ")", "if", "coordfile", "is", "not", "None", ":", "if", "colnames", "in", "blank_list", ":", "colnames", "=", "[", "'c1'", ",", "'c2'", "]", "# Determine columns which contain pixel positions", "cols", "=", "util", ".", "parse_colnames", "(", "colnames", ",", "coordfile", ")", "# read in columns from input coordinates file", "xyvals", "=", "np", ".", "loadtxt", "(", "coordfile", ",", "usecols", "=", "cols", ",", "delimiter", "=", "separator", ")", "if", "xyvals", ".", "ndim", "==", "1", ":", "# only 1 entry in coordfile", "xlist", "=", "[", "xyvals", "[", "0", "]", ".", "copy", "(", ")", "]", "ylist", "=", "[", "xyvals", "[", "1", "]", ".", "copy", "(", ")", "]", "else", ":", "xlist", "=", "xyvals", "[", ":", ",", "0", "]", ".", "copy", "(", ")", "ylist", "=", "xyvals", "[", ":", ",", "1", "]", ".", "copy", "(", ")", "del", "xyvals", "else", ":", "if", "isinstance", "(", "x", ",", "np", ".", "ndarray", ")", ":", "xlist", "=", "x", ".", "tolist", "(", ")", "ylist", "=", "y", ".", "tolist", "(", ")", "elif", "not", "isinstance", "(", "x", ",", "list", ")", ":", "xlist", "=", "[", "x", "]", "ylist", "=", "[", "y", "]", "single_coord", "=", "True", "else", ":", "xlist", "=", "x", "ylist", "=", "y", "# start by reading in WCS+distortion info for input image", "inwcs", "=", "wcsutil", ".", "HSTWCS", "(", "input", ")", "if", "inwcs", ".", "wcs", ".", "is_unity", "(", ")", ":", "print", "(", "\"####\\nNo valid WCS found in {}.\\n Results may be invalid.\\n####\\n\"", ".", "format", "(", "input", ")", ")", "# Now, convert pixel coordinates into sky coordinates", "dra", ",", "ddec", "=", "inwcs", ".", "all_pix2world", "(", "xlist", ",", "ylist", ",", "1", ")", "# convert to HH:MM:SS.S format, if specified", "if", "hms", ":", "ra", ",", "dec", "=", "wcs_functions", ".", "ddtohms", "(", "dra", ",", "ddec", ",", "precision", "=", "precision", ")", "rastr", "=", "ra", "decstr", "=", "dec", "else", ":", "# add formatting based on precision here...", "rastr", "=", "[", "]", "decstr", "=", "[", "]", "fmt", "=", "\"%.\"", "+", "repr", "(", "precision", ")", "+", "\"f\"", "for", "r", ",", "d", "in", "zip", "(", "dra", ",", "ddec", ")", ":", "rastr", ".", "append", "(", "fmt", "%", "r", ")", "decstr", ".", "append", "(", "fmt", "%", "d", ")", "ra", "=", "dra", "dec", "=", "ddec", "if", "verbose", "or", "(", "not", "verbose", "and", "util", ".", "is_blank", "(", "output", ")", ")", ":", "print", "(", "'# Coordinate transformations for '", ",", "input", ")", "print", "(", "'# X Y RA Dec\\n'", ")", "for", "x", ",", "y", ",", "r", ",", "d", "in", "zip", "(", "xlist", ",", "ylist", ",", "rastr", ",", "decstr", ")", ":", "print", "(", "\"%.4f %.4f %s %s\"", "%", "(", "x", ",", "y", ",", "r", ",", "d", ")", ")", "# Create output file, if specified", "if", "output", ":", "f", "=", "open", "(", "output", ",", "mode", "=", "'w'", ")", "f", ".", "write", "(", "\"# Coordinates converted from %s\\n\"", "%", "input", ")", "for", "r", ",", "d", "in", "zip", "(", "rastr", ",", "decstr", ")", ":", "f", ".", "write", "(", "'%s %s\\n'", "%", "(", "r", ",", "d", ")", ")", "f", ".", "close", "(", ")", "print", "(", "'Wrote out results to: '", ",", "output", ")", "if", "single_coord", ":", "ra", "=", "ra", "[", "0", "]", "dec", "=", "dec", "[", "0", "]", "return", "ra", ",", "dec" ]
Primary interface to perform coordinate transformations from pixel to sky coordinates using STWCS and full distortion models read from the input image header.
[ "Primary", "interface", "to", "perform", "coordinate", "transformations", "from", "pixel", "to", "sky", "coordinates", "using", "STWCS", "and", "full", "distortion", "models", "read", "from", "the", "input", "image", "header", "." ]
train
https://github.com/spacetelescope/drizzlepac/blob/15bec3c929a6a869d9e71b9398ced43ede0620f1/drizzlepac/pixtosky.py#L102-L187
0.01685
oscarlazoarjona/fast
fast/bloch.py
independent_get_coefficients
def independent_get_coefficients(coef, rhouv, s, i, j, k, u, v, unfolding, matrix_form): r"""Get the indices mu, nu, and term coefficients for linear terms. >>> from fast.symbolic import define_density_matrix >>> Ne = 2 >>> coef = 1+2j >>> rhouv = define_density_matrix(Ne)[1, 1] >>> s, i, j, k, u, v = (1, 1, 0, 1, 1, 1) >>> unfolding = Unfolding(Ne, real=True, normalized=True) >>> independent_get_coefficients(coef, rhouv, s, i, j, k, u, v, ... unfolding, False) [[1, None, -2.00000000000000, False, False]] """ if matrix_form: coef = -coef Mu = unfolding.Mu mu = Mu(s, i, j) rhouv_isconjugated = False if s == 1: coef_list = [[mu, None, -im(coef), matrix_form, rhouv_isconjugated]] elif s == -1: coef_list = [[mu, None, re(coef), matrix_form, rhouv_isconjugated]] else: coef_list = [[mu, None, coef, matrix_form, rhouv_isconjugated]] return coef_list
python
def independent_get_coefficients(coef, rhouv, s, i, j, k, u, v, unfolding, matrix_form): r"""Get the indices mu, nu, and term coefficients for linear terms. >>> from fast.symbolic import define_density_matrix >>> Ne = 2 >>> coef = 1+2j >>> rhouv = define_density_matrix(Ne)[1, 1] >>> s, i, j, k, u, v = (1, 1, 0, 1, 1, 1) >>> unfolding = Unfolding(Ne, real=True, normalized=True) >>> independent_get_coefficients(coef, rhouv, s, i, j, k, u, v, ... unfolding, False) [[1, None, -2.00000000000000, False, False]] """ if matrix_form: coef = -coef Mu = unfolding.Mu mu = Mu(s, i, j) rhouv_isconjugated = False if s == 1: coef_list = [[mu, None, -im(coef), matrix_form, rhouv_isconjugated]] elif s == -1: coef_list = [[mu, None, re(coef), matrix_form, rhouv_isconjugated]] else: coef_list = [[mu, None, coef, matrix_form, rhouv_isconjugated]] return coef_list
[ "def", "independent_get_coefficients", "(", "coef", ",", "rhouv", ",", "s", ",", "i", ",", "j", ",", "k", ",", "u", ",", "v", ",", "unfolding", ",", "matrix_form", ")", ":", "if", "matrix_form", ":", "coef", "=", "-", "coef", "Mu", "=", "unfolding", ".", "Mu", "mu", "=", "Mu", "(", "s", ",", "i", ",", "j", ")", "rhouv_isconjugated", "=", "False", "if", "s", "==", "1", ":", "coef_list", "=", "[", "[", "mu", ",", "None", ",", "-", "im", "(", "coef", ")", ",", "matrix_form", ",", "rhouv_isconjugated", "]", "]", "elif", "s", "==", "-", "1", ":", "coef_list", "=", "[", "[", "mu", ",", "None", ",", "re", "(", "coef", ")", ",", "matrix_form", ",", "rhouv_isconjugated", "]", "]", "else", ":", "coef_list", "=", "[", "[", "mu", ",", "None", ",", "coef", ",", "matrix_form", ",", "rhouv_isconjugated", "]", "]", "return", "coef_list" ]
r"""Get the indices mu, nu, and term coefficients for linear terms. >>> from fast.symbolic import define_density_matrix >>> Ne = 2 >>> coef = 1+2j >>> rhouv = define_density_matrix(Ne)[1, 1] >>> s, i, j, k, u, v = (1, 1, 0, 1, 1, 1) >>> unfolding = Unfolding(Ne, real=True, normalized=True) >>> independent_get_coefficients(coef, rhouv, s, i, j, k, u, v, ... unfolding, False) [[1, None, -2.00000000000000, False, False]]
[ "r", "Get", "the", "indices", "mu", "nu", "and", "term", "coefficients", "for", "linear", "terms", "." ]
train
https://github.com/oscarlazoarjona/fast/blob/3e5400672af2a7b7cc616e7f4aa10d7672720222/fast/bloch.py#L1354-L1381
0.000974
ethereum/py-evm
eth/vm/state.py
BaseState.apply_transaction
def apply_transaction( self, transaction: BaseOrSpoofTransaction) -> 'BaseComputation': """ Apply transaction to the vm state :param transaction: the transaction to apply :return: the computation """ if self.state_root != BLANK_ROOT_HASH and not self._account_db.has_root(self.state_root): raise StateRootNotFound(self.state_root) else: return self.execute_transaction(transaction)
python
def apply_transaction( self, transaction: BaseOrSpoofTransaction) -> 'BaseComputation': """ Apply transaction to the vm state :param transaction: the transaction to apply :return: the computation """ if self.state_root != BLANK_ROOT_HASH and not self._account_db.has_root(self.state_root): raise StateRootNotFound(self.state_root) else: return self.execute_transaction(transaction)
[ "def", "apply_transaction", "(", "self", ",", "transaction", ":", "BaseOrSpoofTransaction", ")", "->", "'BaseComputation'", ":", "if", "self", ".", "state_root", "!=", "BLANK_ROOT_HASH", "and", "not", "self", ".", "_account_db", ".", "has_root", "(", "self", ".", "state_root", ")", ":", "raise", "StateRootNotFound", "(", "self", ".", "state_root", ")", "else", ":", "return", "self", ".", "execute_transaction", "(", "transaction", ")" ]
Apply transaction to the vm state :param transaction: the transaction to apply :return: the computation
[ "Apply", "transaction", "to", "the", "vm", "state" ]
train
https://github.com/ethereum/py-evm/blob/58346848f076116381d3274bbcea96b9e2cfcbdf/eth/vm/state.py#L311-L323
0.006173
theislab/scanpy
scanpy/utils.py
select_groups
def select_groups(adata, groups_order_subset='all', key='groups'): """Get subset of groups in adata.obs[key]. """ groups_order = adata.obs[key].cat.categories if key + '_masks' in adata.uns: groups_masks = adata.uns[key + '_masks'] else: groups_masks = np.zeros((len(adata.obs[key].cat.categories), adata.obs[key].values.size), dtype=bool) for iname, name in enumerate(adata.obs[key].cat.categories): # if the name is not found, fallback to index retrieval if adata.obs[key].cat.categories[iname] in adata.obs[key].values: mask = adata.obs[key].cat.categories[iname] == adata.obs[key].values else: mask = str(iname) == adata.obs[key].values groups_masks[iname] = mask groups_ids = list(range(len(groups_order))) if groups_order_subset != 'all': groups_ids = [] for name in groups_order_subset: groups_ids.append( np.where(adata.obs[key].cat.categories.values == name)[0][0]) if len(groups_ids) == 0: # fallback to index retrieval groups_ids = np.where( np.in1d(np.arange(len(adata.obs[key].cat.categories)).astype(str), np.array(groups_order_subset)))[0] if len(groups_ids) == 0: logg.m(np.array(groups_order_subset), 'invalid! specify valid groups_order (or indices) one of', adata.obs[key].cat.categories) from sys import exit exit(0) groups_masks = groups_masks[groups_ids] groups_order_subset = adata.obs[key].cat.categories[groups_ids].values else: groups_order_subset = groups_order.values return groups_order_subset, groups_masks
python
def select_groups(adata, groups_order_subset='all', key='groups'): """Get subset of groups in adata.obs[key]. """ groups_order = adata.obs[key].cat.categories if key + '_masks' in adata.uns: groups_masks = adata.uns[key + '_masks'] else: groups_masks = np.zeros((len(adata.obs[key].cat.categories), adata.obs[key].values.size), dtype=bool) for iname, name in enumerate(adata.obs[key].cat.categories): # if the name is not found, fallback to index retrieval if adata.obs[key].cat.categories[iname] in adata.obs[key].values: mask = adata.obs[key].cat.categories[iname] == adata.obs[key].values else: mask = str(iname) == adata.obs[key].values groups_masks[iname] = mask groups_ids = list(range(len(groups_order))) if groups_order_subset != 'all': groups_ids = [] for name in groups_order_subset: groups_ids.append( np.where(adata.obs[key].cat.categories.values == name)[0][0]) if len(groups_ids) == 0: # fallback to index retrieval groups_ids = np.where( np.in1d(np.arange(len(adata.obs[key].cat.categories)).astype(str), np.array(groups_order_subset)))[0] if len(groups_ids) == 0: logg.m(np.array(groups_order_subset), 'invalid! specify valid groups_order (or indices) one of', adata.obs[key].cat.categories) from sys import exit exit(0) groups_masks = groups_masks[groups_ids] groups_order_subset = adata.obs[key].cat.categories[groups_ids].values else: groups_order_subset = groups_order.values return groups_order_subset, groups_masks
[ "def", "select_groups", "(", "adata", ",", "groups_order_subset", "=", "'all'", ",", "key", "=", "'groups'", ")", ":", "groups_order", "=", "adata", ".", "obs", "[", "key", "]", ".", "cat", ".", "categories", "if", "key", "+", "'_masks'", "in", "adata", ".", "uns", ":", "groups_masks", "=", "adata", ".", "uns", "[", "key", "+", "'_masks'", "]", "else", ":", "groups_masks", "=", "np", ".", "zeros", "(", "(", "len", "(", "adata", ".", "obs", "[", "key", "]", ".", "cat", ".", "categories", ")", ",", "adata", ".", "obs", "[", "key", "]", ".", "values", ".", "size", ")", ",", "dtype", "=", "bool", ")", "for", "iname", ",", "name", "in", "enumerate", "(", "adata", ".", "obs", "[", "key", "]", ".", "cat", ".", "categories", ")", ":", "# if the name is not found, fallback to index retrieval", "if", "adata", ".", "obs", "[", "key", "]", ".", "cat", ".", "categories", "[", "iname", "]", "in", "adata", ".", "obs", "[", "key", "]", ".", "values", ":", "mask", "=", "adata", ".", "obs", "[", "key", "]", ".", "cat", ".", "categories", "[", "iname", "]", "==", "adata", ".", "obs", "[", "key", "]", ".", "values", "else", ":", "mask", "=", "str", "(", "iname", ")", "==", "adata", ".", "obs", "[", "key", "]", ".", "values", "groups_masks", "[", "iname", "]", "=", "mask", "groups_ids", "=", "list", "(", "range", "(", "len", "(", "groups_order", ")", ")", ")", "if", "groups_order_subset", "!=", "'all'", ":", "groups_ids", "=", "[", "]", "for", "name", "in", "groups_order_subset", ":", "groups_ids", ".", "append", "(", "np", ".", "where", "(", "adata", ".", "obs", "[", "key", "]", ".", "cat", ".", "categories", ".", "values", "==", "name", ")", "[", "0", "]", "[", "0", "]", ")", "if", "len", "(", "groups_ids", ")", "==", "0", ":", "# fallback to index retrieval", "groups_ids", "=", "np", ".", "where", "(", "np", ".", "in1d", "(", "np", ".", "arange", "(", "len", "(", "adata", ".", "obs", "[", "key", "]", ".", "cat", ".", "categories", ")", ")", ".", "astype", "(", "str", ")", ",", "np", ".", "array", "(", "groups_order_subset", ")", ")", ")", "[", "0", "]", "if", "len", "(", "groups_ids", ")", "==", "0", ":", "logg", ".", "m", "(", "np", ".", "array", "(", "groups_order_subset", ")", ",", "'invalid! specify valid groups_order (or indices) one of'", ",", "adata", ".", "obs", "[", "key", "]", ".", "cat", ".", "categories", ")", "from", "sys", "import", "exit", "exit", "(", "0", ")", "groups_masks", "=", "groups_masks", "[", "groups_ids", "]", "groups_order_subset", "=", "adata", ".", "obs", "[", "key", "]", ".", "cat", ".", "categories", "[", "groups_ids", "]", ".", "values", "else", ":", "groups_order_subset", "=", "groups_order", ".", "values", "return", "groups_order_subset", ",", "groups_masks" ]
Get subset of groups in adata.obs[key].
[ "Get", "subset", "of", "groups", "in", "adata", ".", "obs", "[", "key", "]", "." ]
train
https://github.com/theislab/scanpy/blob/9e4e5ee02e04cf618872d9b098e24f0542e8b227/scanpy/utils.py#L686-L723
0.002163
pantsbuild/pants
src/python/pants/backend/jvm/ivy_utils.py
IvyInfo.traverse_dependency_graph
def traverse_dependency_graph(self, ref, collector, memo=None): """Traverses module graph, starting with ref, collecting values for each ref into the sets created by the collector function. :param ref an IvyModuleRef to start traversing the ivy dependency graph :param collector a function that takes a ref and returns a new set of values to collect for that ref, which will also be updated with all the dependencies accumulated values :param memo is a dict of ref -> set that memoizes the results of each node in the graph. If provided, allows for retaining cache across calls. :returns the accumulated set for ref """ resolved_ref = self.refs_by_unversioned_refs.get(ref.unversioned) if resolved_ref: ref = resolved_ref if memo is None: memo = dict() visited = set() return self._do_traverse_dependency_graph(ref, collector, memo, visited)
python
def traverse_dependency_graph(self, ref, collector, memo=None): """Traverses module graph, starting with ref, collecting values for each ref into the sets created by the collector function. :param ref an IvyModuleRef to start traversing the ivy dependency graph :param collector a function that takes a ref and returns a new set of values to collect for that ref, which will also be updated with all the dependencies accumulated values :param memo is a dict of ref -> set that memoizes the results of each node in the graph. If provided, allows for retaining cache across calls. :returns the accumulated set for ref """ resolved_ref = self.refs_by_unversioned_refs.get(ref.unversioned) if resolved_ref: ref = resolved_ref if memo is None: memo = dict() visited = set() return self._do_traverse_dependency_graph(ref, collector, memo, visited)
[ "def", "traverse_dependency_graph", "(", "self", ",", "ref", ",", "collector", ",", "memo", "=", "None", ")", ":", "resolved_ref", "=", "self", ".", "refs_by_unversioned_refs", ".", "get", "(", "ref", ".", "unversioned", ")", "if", "resolved_ref", ":", "ref", "=", "resolved_ref", "if", "memo", "is", "None", ":", "memo", "=", "dict", "(", ")", "visited", "=", "set", "(", ")", "return", "self", ".", "_do_traverse_dependency_graph", "(", "ref", ",", "collector", ",", "memo", ",", "visited", ")" ]
Traverses module graph, starting with ref, collecting values for each ref into the sets created by the collector function. :param ref an IvyModuleRef to start traversing the ivy dependency graph :param collector a function that takes a ref and returns a new set of values to collect for that ref, which will also be updated with all the dependencies accumulated values :param memo is a dict of ref -> set that memoizes the results of each node in the graph. If provided, allows for retaining cache across calls. :returns the accumulated set for ref
[ "Traverses", "module", "graph", "starting", "with", "ref", "collecting", "values", "for", "each", "ref", "into", "the", "sets", "created", "by", "the", "collector", "function", "." ]
train
https://github.com/pantsbuild/pants/blob/b72e650da0df685824ffdcc71988b8c282d0962d/src/python/pants/backend/jvm/ivy_utils.py#L632-L650
0.007568
revelc/pyaccumulo
pyaccumulo/proxy/AccumuloProxy.py
Client.importTable
def importTable(self, login, tableName, importDir): """ Parameters: - login - tableName - importDir """ self.send_importTable(login, tableName, importDir) self.recv_importTable()
python
def importTable(self, login, tableName, importDir): """ Parameters: - login - tableName - importDir """ self.send_importTable(login, tableName, importDir) self.recv_importTable()
[ "def", "importTable", "(", "self", ",", "login", ",", "tableName", ",", "importDir", ")", ":", "self", ".", "send_importTable", "(", "login", ",", "tableName", ",", "importDir", ")", "self", ".", "recv_importTable", "(", ")" ]
Parameters: - login - tableName - importDir
[ "Parameters", ":", "-", "login", "-", "tableName", "-", "importDir" ]
train
https://github.com/revelc/pyaccumulo/blob/8adcf535bb82ba69c749efce785c9efc487e85de/pyaccumulo/proxy/AccumuloProxy.py#L1435-L1443
0.004695
BlackEarth/bxml
bxml/xml.py
XML.tobytes
def tobytes( self, root=None, encoding='UTF-8', doctype=None, canonicalized=True, xml_declaration=True, pretty_print=True, with_comments=True, ): """return the content of the XML document as a byte string suitable for writing""" if root is None: root = self.root if canonicalized == True: return self.canonicalized_bytes(root) else: return etree.tostring( root, encoding=encoding or self.info.encoding, doctype=doctype or self.info.doctype, xml_declaration=xml_declaration, pretty_print=pretty_print, with_comments=with_comments, )
python
def tobytes( self, root=None, encoding='UTF-8', doctype=None, canonicalized=True, xml_declaration=True, pretty_print=True, with_comments=True, ): """return the content of the XML document as a byte string suitable for writing""" if root is None: root = self.root if canonicalized == True: return self.canonicalized_bytes(root) else: return etree.tostring( root, encoding=encoding or self.info.encoding, doctype=doctype or self.info.doctype, xml_declaration=xml_declaration, pretty_print=pretty_print, with_comments=with_comments, )
[ "def", "tobytes", "(", "self", ",", "root", "=", "None", ",", "encoding", "=", "'UTF-8'", ",", "doctype", "=", "None", ",", "canonicalized", "=", "True", ",", "xml_declaration", "=", "True", ",", "pretty_print", "=", "True", ",", "with_comments", "=", "True", ",", ")", ":", "if", "root", "is", "None", ":", "root", "=", "self", ".", "root", "if", "canonicalized", "==", "True", ":", "return", "self", ".", "canonicalized_bytes", "(", "root", ")", "else", ":", "return", "etree", ".", "tostring", "(", "root", ",", "encoding", "=", "encoding", "or", "self", ".", "info", ".", "encoding", ",", "doctype", "=", "doctype", "or", "self", ".", "info", ".", "doctype", ",", "xml_declaration", "=", "xml_declaration", ",", "pretty_print", "=", "pretty_print", ",", "with_comments", "=", "with_comments", ",", ")" ]
return the content of the XML document as a byte string suitable for writing
[ "return", "the", "content", "of", "the", "XML", "document", "as", "a", "byte", "string", "suitable", "for", "writing" ]
train
https://github.com/BlackEarth/bxml/blob/8fbea5dad7fadc7b854ddbeff6ecfb55aaceeb77/bxml/xml.py#L176-L199
0.006266
dadadel/pyment
pyment/docstring.py
DocToolsBase.get_return_list
def get_return_list(self, data): """Get the list of returned values. The list contains tuples (name=None, desc, type=None) :param data: the data to proceed """ return_list = [] lst = self.get_list_key(data, 'return') for l in lst: name, desc, rtype = l if l[2] is None: rtype = l[0] name = None desc = desc.strip() return_list.append((name, desc, rtype)) return return_list
python
def get_return_list(self, data): """Get the list of returned values. The list contains tuples (name=None, desc, type=None) :param data: the data to proceed """ return_list = [] lst = self.get_list_key(data, 'return') for l in lst: name, desc, rtype = l if l[2] is None: rtype = l[0] name = None desc = desc.strip() return_list.append((name, desc, rtype)) return return_list
[ "def", "get_return_list", "(", "self", ",", "data", ")", ":", "return_list", "=", "[", "]", "lst", "=", "self", ".", "get_list_key", "(", "data", ",", "'return'", ")", "for", "l", "in", "lst", ":", "name", ",", "desc", ",", "rtype", "=", "l", "if", "l", "[", "2", "]", "is", "None", ":", "rtype", "=", "l", "[", "0", "]", "name", "=", "None", "desc", "=", "desc", ".", "strip", "(", ")", "return_list", ".", "append", "(", "(", "name", ",", "desc", ",", "rtype", ")", ")", "return", "return_list" ]
Get the list of returned values. The list contains tuples (name=None, desc, type=None) :param data: the data to proceed
[ "Get", "the", "list", "of", "returned", "values", ".", "The", "list", "contains", "tuples", "(", "name", "=", "None", "desc", "type", "=", "None", ")" ]
train
https://github.com/dadadel/pyment/blob/3d1bdf87d083ff56230bd0bf7c5252e20552b7b6/pyment/docstring.py#L190-L207
0.005725
apache/incubator-superset
superset/views/tags.py
TagView.post
def post(self, object_type, object_id): """Add new tags to an object.""" if object_id == 0: return Response(status=404) tagged_objects = [] for name in request.get_json(force=True): if ':' in name: type_name = name.split(':', 1)[0] type_ = TagTypes[type_name] else: type_ = TagTypes.custom tag = db.session.query(Tag).filter_by(name=name, type=type_).first() if not tag: tag = Tag(name=name, type=type_) tagged_objects.append( TaggedObject( object_id=object_id, object_type=object_type, tag=tag, ), ) db.session.add_all(tagged_objects) db.session.commit() return Response(status=201)
python
def post(self, object_type, object_id): """Add new tags to an object.""" if object_id == 0: return Response(status=404) tagged_objects = [] for name in request.get_json(force=True): if ':' in name: type_name = name.split(':', 1)[0] type_ = TagTypes[type_name] else: type_ = TagTypes.custom tag = db.session.query(Tag).filter_by(name=name, type=type_).first() if not tag: tag = Tag(name=name, type=type_) tagged_objects.append( TaggedObject( object_id=object_id, object_type=object_type, tag=tag, ), ) db.session.add_all(tagged_objects) db.session.commit() return Response(status=201)
[ "def", "post", "(", "self", ",", "object_type", ",", "object_id", ")", ":", "if", "object_id", "==", "0", ":", "return", "Response", "(", "status", "=", "404", ")", "tagged_objects", "=", "[", "]", "for", "name", "in", "request", ".", "get_json", "(", "force", "=", "True", ")", ":", "if", "':'", "in", "name", ":", "type_name", "=", "name", ".", "split", "(", "':'", ",", "1", ")", "[", "0", "]", "type_", "=", "TagTypes", "[", "type_name", "]", "else", ":", "type_", "=", "TagTypes", ".", "custom", "tag", "=", "db", ".", "session", ".", "query", "(", "Tag", ")", ".", "filter_by", "(", "name", "=", "name", ",", "type", "=", "type_", ")", ".", "first", "(", ")", "if", "not", "tag", ":", "tag", "=", "Tag", "(", "name", "=", "name", ",", "type", "=", "type_", ")", "tagged_objects", ".", "append", "(", "TaggedObject", "(", "object_id", "=", "object_id", ",", "object_type", "=", "object_type", ",", "tag", "=", "tag", ",", ")", ",", ")", "db", ".", "session", ".", "add_all", "(", "tagged_objects", ")", "db", ".", "session", ".", "commit", "(", ")", "return", "Response", "(", "status", "=", "201", ")" ]
Add new tags to an object.
[ "Add", "new", "tags", "to", "an", "object", "." ]
train
https://github.com/apache/incubator-superset/blob/ca2996c78f679260eb79c6008e276733df5fb653/superset/views/tags.py#L91-L119
0.003394
cggh/scikit-allel
allel/stats/hw.py
heterozygosity_observed
def heterozygosity_observed(g, fill=np.nan): """Calculate the rate of observed heterozygosity for each variant. Parameters ---------- g : array_like, int, shape (n_variants, n_samples, ploidy) Genotype array. fill : float, optional Use this value for variants where all calls are missing. Returns ------- ho : ndarray, float, shape (n_variants,) Observed heterozygosity Examples -------- >>> import allel >>> g = allel.GenotypeArray([[[0, 0], [0, 0], [0, 0]], ... [[0, 0], [0, 1], [1, 1]], ... [[0, 0], [1, 1], [2, 2]], ... [[1, 1], [1, 2], [-1, -1]]]) >>> allel.heterozygosity_observed(g) array([0. , 0.33333333, 0. , 0.5 ]) """ # check inputs if not hasattr(g, 'count_het') or not hasattr(g, 'count_called'): g = GenotypeArray(g, copy=False) # count hets n_het = np.asarray(g.count_het(axis=1)) n_called = np.asarray(g.count_called(axis=1)) # calculate rate of observed heterozygosity, accounting for variants # where all calls are missing with ignore_invalid(): ho = np.where(n_called > 0, n_het / n_called, fill) return ho
python
def heterozygosity_observed(g, fill=np.nan): """Calculate the rate of observed heterozygosity for each variant. Parameters ---------- g : array_like, int, shape (n_variants, n_samples, ploidy) Genotype array. fill : float, optional Use this value for variants where all calls are missing. Returns ------- ho : ndarray, float, shape (n_variants,) Observed heterozygosity Examples -------- >>> import allel >>> g = allel.GenotypeArray([[[0, 0], [0, 0], [0, 0]], ... [[0, 0], [0, 1], [1, 1]], ... [[0, 0], [1, 1], [2, 2]], ... [[1, 1], [1, 2], [-1, -1]]]) >>> allel.heterozygosity_observed(g) array([0. , 0.33333333, 0. , 0.5 ]) """ # check inputs if not hasattr(g, 'count_het') or not hasattr(g, 'count_called'): g = GenotypeArray(g, copy=False) # count hets n_het = np.asarray(g.count_het(axis=1)) n_called = np.asarray(g.count_called(axis=1)) # calculate rate of observed heterozygosity, accounting for variants # where all calls are missing with ignore_invalid(): ho = np.where(n_called > 0, n_het / n_called, fill) return ho
[ "def", "heterozygosity_observed", "(", "g", ",", "fill", "=", "np", ".", "nan", ")", ":", "# check inputs", "if", "not", "hasattr", "(", "g", ",", "'count_het'", ")", "or", "not", "hasattr", "(", "g", ",", "'count_called'", ")", ":", "g", "=", "GenotypeArray", "(", "g", ",", "copy", "=", "False", ")", "# count hets", "n_het", "=", "np", ".", "asarray", "(", "g", ".", "count_het", "(", "axis", "=", "1", ")", ")", "n_called", "=", "np", ".", "asarray", "(", "g", ".", "count_called", "(", "axis", "=", "1", ")", ")", "# calculate rate of observed heterozygosity, accounting for variants", "# where all calls are missing", "with", "ignore_invalid", "(", ")", ":", "ho", "=", "np", ".", "where", "(", "n_called", ">", "0", ",", "n_het", "/", "n_called", ",", "fill", ")", "return", "ho" ]
Calculate the rate of observed heterozygosity for each variant. Parameters ---------- g : array_like, int, shape (n_variants, n_samples, ploidy) Genotype array. fill : float, optional Use this value for variants where all calls are missing. Returns ------- ho : ndarray, float, shape (n_variants,) Observed heterozygosity Examples -------- >>> import allel >>> g = allel.GenotypeArray([[[0, 0], [0, 0], [0, 0]], ... [[0, 0], [0, 1], [1, 1]], ... [[0, 0], [1, 1], [2, 2]], ... [[1, 1], [1, 2], [-1, -1]]]) >>> allel.heterozygosity_observed(g) array([0. , 0.33333333, 0. , 0.5 ])
[ "Calculate", "the", "rate", "of", "observed", "heterozygosity", "for", "each", "variant", "." ]
train
https://github.com/cggh/scikit-allel/blob/3c979a57a100240ba959dd13f98839349530f215/allel/stats/hw.py#L12-L55
0.000781
nkmathew/yasi-sexp-indenter
yasi.py
assign_indent_numbers
def assign_indent_numbers(lst, inum, dic=collections.defaultdict(int)): """ Associate keywords with their respective indentation numbers """ for i in lst: dic[i] = inum return dic
python
def assign_indent_numbers(lst, inum, dic=collections.defaultdict(int)): """ Associate keywords with their respective indentation numbers """ for i in lst: dic[i] = inum return dic
[ "def", "assign_indent_numbers", "(", "lst", ",", "inum", ",", "dic", "=", "collections", ".", "defaultdict", "(", "int", ")", ")", ":", "for", "i", "in", "lst", ":", "dic", "[", "i", "]", "=", "inum", "return", "dic" ]
Associate keywords with their respective indentation numbers
[ "Associate", "keywords", "with", "their", "respective", "indentation", "numbers" ]
train
https://github.com/nkmathew/yasi-sexp-indenter/blob/6ec2a4675e79606c555bcb67494a0ba994b05805/yasi.py#L571-L576
0.004926
sashahart/vex
vex/options.py
make_arg_parser
def make_arg_parser(): """Return a standard ArgumentParser object. """ parser = argparse.ArgumentParser( formatter_class=argparse.RawTextHelpFormatter, usage="vex [OPTIONS] VIRTUALENV_NAME COMMAND_TO_RUN ...", ) make = parser.add_argument_group(title='To make a new virtualenv') make.add_argument( '-m', '--make', action="store_true", help="make named virtualenv before running command" ) make.add_argument( '--python', help="specify which python for virtualenv to be made", action="store", default=None, ) make.add_argument( '--site-packages', help="allow site package imports from new virtualenv", action="store_true", ) make.add_argument( '--always-copy', help="use copies instead of symlinks in new virtualenv", action="store_true", ) remove = parser.add_argument_group(title='To remove a virtualenv') remove.add_argument( '-r', '--remove', action="store_true", help="remove the named virtualenv after running command" ) parser.add_argument( "--path", metavar="DIR", help="absolute path to virtualenv to use", action="store" ) parser.add_argument( '--cwd', metavar="DIR", action="store", default='.', help="path to run command in (default: '.' aka $PWD)", ) parser.add_argument( "--config", metavar="FILE", default=None, action="store", help="path to config file to read (default: '~/.vexrc')" ) parser.add_argument( '--shell-config', metavar="SHELL", dest="shell_to_configure", action="store", default=None, help="print optional config for the specified shell" ) parser.add_argument( '--list', metavar="PREFIX", nargs="?", const="", default=None, help="print a list of available virtualenvs [matching PREFIX]", action="store" ) parser.add_argument( '--version', help="print the version of vex that is being run", action="store_true" ) parser.add_argument( "rest", nargs=argparse.REMAINDER, help=argparse.SUPPRESS) return parser
python
def make_arg_parser(): """Return a standard ArgumentParser object. """ parser = argparse.ArgumentParser( formatter_class=argparse.RawTextHelpFormatter, usage="vex [OPTIONS] VIRTUALENV_NAME COMMAND_TO_RUN ...", ) make = parser.add_argument_group(title='To make a new virtualenv') make.add_argument( '-m', '--make', action="store_true", help="make named virtualenv before running command" ) make.add_argument( '--python', help="specify which python for virtualenv to be made", action="store", default=None, ) make.add_argument( '--site-packages', help="allow site package imports from new virtualenv", action="store_true", ) make.add_argument( '--always-copy', help="use copies instead of symlinks in new virtualenv", action="store_true", ) remove = parser.add_argument_group(title='To remove a virtualenv') remove.add_argument( '-r', '--remove', action="store_true", help="remove the named virtualenv after running command" ) parser.add_argument( "--path", metavar="DIR", help="absolute path to virtualenv to use", action="store" ) parser.add_argument( '--cwd', metavar="DIR", action="store", default='.', help="path to run command in (default: '.' aka $PWD)", ) parser.add_argument( "--config", metavar="FILE", default=None, action="store", help="path to config file to read (default: '~/.vexrc')" ) parser.add_argument( '--shell-config', metavar="SHELL", dest="shell_to_configure", action="store", default=None, help="print optional config for the specified shell" ) parser.add_argument( '--list', metavar="PREFIX", nargs="?", const="", default=None, help="print a list of available virtualenvs [matching PREFIX]", action="store" ) parser.add_argument( '--version', help="print the version of vex that is being run", action="store_true" ) parser.add_argument( "rest", nargs=argparse.REMAINDER, help=argparse.SUPPRESS) return parser
[ "def", "make_arg_parser", "(", ")", ":", "parser", "=", "argparse", ".", "ArgumentParser", "(", "formatter_class", "=", "argparse", ".", "RawTextHelpFormatter", ",", "usage", "=", "\"vex [OPTIONS] VIRTUALENV_NAME COMMAND_TO_RUN ...\"", ",", ")", "make", "=", "parser", ".", "add_argument_group", "(", "title", "=", "'To make a new virtualenv'", ")", "make", ".", "add_argument", "(", "'-m'", ",", "'--make'", ",", "action", "=", "\"store_true\"", ",", "help", "=", "\"make named virtualenv before running command\"", ")", "make", ".", "add_argument", "(", "'--python'", ",", "help", "=", "\"specify which python for virtualenv to be made\"", ",", "action", "=", "\"store\"", ",", "default", "=", "None", ",", ")", "make", ".", "add_argument", "(", "'--site-packages'", ",", "help", "=", "\"allow site package imports from new virtualenv\"", ",", "action", "=", "\"store_true\"", ",", ")", "make", ".", "add_argument", "(", "'--always-copy'", ",", "help", "=", "\"use copies instead of symlinks in new virtualenv\"", ",", "action", "=", "\"store_true\"", ",", ")", "remove", "=", "parser", ".", "add_argument_group", "(", "title", "=", "'To remove a virtualenv'", ")", "remove", ".", "add_argument", "(", "'-r'", ",", "'--remove'", ",", "action", "=", "\"store_true\"", ",", "help", "=", "\"remove the named virtualenv after running command\"", ")", "parser", ".", "add_argument", "(", "\"--path\"", ",", "metavar", "=", "\"DIR\"", ",", "help", "=", "\"absolute path to virtualenv to use\"", ",", "action", "=", "\"store\"", ")", "parser", ".", "add_argument", "(", "'--cwd'", ",", "metavar", "=", "\"DIR\"", ",", "action", "=", "\"store\"", ",", "default", "=", "'.'", ",", "help", "=", "\"path to run command in (default: '.' aka $PWD)\"", ",", ")", "parser", ".", "add_argument", "(", "\"--config\"", ",", "metavar", "=", "\"FILE\"", ",", "default", "=", "None", ",", "action", "=", "\"store\"", ",", "help", "=", "\"path to config file to read (default: '~/.vexrc')\"", ")", "parser", ".", "add_argument", "(", "'--shell-config'", ",", "metavar", "=", "\"SHELL\"", ",", "dest", "=", "\"shell_to_configure\"", ",", "action", "=", "\"store\"", ",", "default", "=", "None", ",", "help", "=", "\"print optional config for the specified shell\"", ")", "parser", ".", "add_argument", "(", "'--list'", ",", "metavar", "=", "\"PREFIX\"", ",", "nargs", "=", "\"?\"", ",", "const", "=", "\"\"", ",", "default", "=", "None", ",", "help", "=", "\"print a list of available virtualenvs [matching PREFIX]\"", ",", "action", "=", "\"store\"", ")", "parser", ".", "add_argument", "(", "'--version'", ",", "help", "=", "\"print the version of vex that is being run\"", ",", "action", "=", "\"store_true\"", ")", "parser", ".", "add_argument", "(", "\"rest\"", ",", "nargs", "=", "argparse", ".", "REMAINDER", ",", "help", "=", "argparse", ".", "SUPPRESS", ")", "return", "parser" ]
Return a standard ArgumentParser object.
[ "Return", "a", "standard", "ArgumentParser", "object", "." ]
train
https://github.com/sashahart/vex/blob/b7680c40897b8cbe6aae55ec9812b4fb11738192/vex/options.py#L5-L90
0.000423
Julius2342/pyvlx
pyvlx/login.py
Login.handle_frame
async def handle_frame(self, frame): """Handle incoming API frame, return True if this was the expected frame.""" if not isinstance(frame, FramePasswordEnterConfirmation): return False if frame.status == PasswordEnterConfirmationStatus.FAILED: PYVLXLOG.warning('Failed to authenticate with password "%s****"', self.password[:2]) self.success = False if frame.status == PasswordEnterConfirmationStatus.SUCCESSFUL: self.success = True return True
python
async def handle_frame(self, frame): """Handle incoming API frame, return True if this was the expected frame.""" if not isinstance(frame, FramePasswordEnterConfirmation): return False if frame.status == PasswordEnterConfirmationStatus.FAILED: PYVLXLOG.warning('Failed to authenticate with password "%s****"', self.password[:2]) self.success = False if frame.status == PasswordEnterConfirmationStatus.SUCCESSFUL: self.success = True return True
[ "async", "def", "handle_frame", "(", "self", ",", "frame", ")", ":", "if", "not", "isinstance", "(", "frame", ",", "FramePasswordEnterConfirmation", ")", ":", "return", "False", "if", "frame", ".", "status", "==", "PasswordEnterConfirmationStatus", ".", "FAILED", ":", "PYVLXLOG", ".", "warning", "(", "'Failed to authenticate with password \"%s****\"'", ",", "self", ".", "password", "[", ":", "2", "]", ")", "self", ".", "success", "=", "False", "if", "frame", ".", "status", "==", "PasswordEnterConfirmationStatus", ".", "SUCCESSFUL", ":", "self", ".", "success", "=", "True", "return", "True" ]
Handle incoming API frame, return True if this was the expected frame.
[ "Handle", "incoming", "API", "frame", "return", "True", "if", "this", "was", "the", "expected", "frame", "." ]
train
https://github.com/Julius2342/pyvlx/blob/ee78e1324bcb1be5b8d1a9d05ab5496b72eae848/pyvlx/login.py#L18-L27
0.007519
saltstack/salt
salt/modules/win_file.py
get_attributes
def get_attributes(path): ''' Return a dictionary object with the Windows file attributes for a file. Args: path (str): The path to the file or directory Returns: dict: A dictionary of file attributes CLI Example: .. code-block:: bash salt '*' file.get_attributes c:\\temp\\a.txt ''' if not os.path.exists(path): raise CommandExecutionError('Path not found: {0}'.format(path)) # set up dictionary for attribute values attributes = {} # Get cumulative int value of attributes intAttributes = win32file.GetFileAttributes(path) # Assign individual attributes attributes['archive'] = (intAttributes & 32) == 32 attributes['reparsePoint'] = (intAttributes & 1024) == 1024 attributes['compressed'] = (intAttributes & 2048) == 2048 attributes['directory'] = (intAttributes & 16) == 16 attributes['encrypted'] = (intAttributes & 16384) == 16384 attributes['hidden'] = (intAttributes & 2) == 2 attributes['normal'] = (intAttributes & 128) == 128 attributes['notIndexed'] = (intAttributes & 8192) == 8192 attributes['offline'] = (intAttributes & 4096) == 4096 attributes['readonly'] = (intAttributes & 1) == 1 attributes['system'] = (intAttributes & 4) == 4 attributes['temporary'] = (intAttributes & 256) == 256 # check if it's a Mounted Volume attributes['mountedVolume'] = False if attributes['reparsePoint'] is True and attributes['directory'] is True: fileIterator = win32file.FindFilesIterator(path) findDataTuple = next(fileIterator) if findDataTuple[6] == 0xA0000003: attributes['mountedVolume'] = True # check if it's a soft (symbolic) link # Note: os.path.islink() does not work in # Python 2.7 for the Windows NTFS file system. # The following code does, however, work (tested in Windows 8) attributes['symbolicLink'] = False if attributes['reparsePoint'] is True: fileIterator = win32file.FindFilesIterator(path) findDataTuple = next(fileIterator) if findDataTuple[6] == 0xA000000C: attributes['symbolicLink'] = True return attributes
python
def get_attributes(path): ''' Return a dictionary object with the Windows file attributes for a file. Args: path (str): The path to the file or directory Returns: dict: A dictionary of file attributes CLI Example: .. code-block:: bash salt '*' file.get_attributes c:\\temp\\a.txt ''' if not os.path.exists(path): raise CommandExecutionError('Path not found: {0}'.format(path)) # set up dictionary for attribute values attributes = {} # Get cumulative int value of attributes intAttributes = win32file.GetFileAttributes(path) # Assign individual attributes attributes['archive'] = (intAttributes & 32) == 32 attributes['reparsePoint'] = (intAttributes & 1024) == 1024 attributes['compressed'] = (intAttributes & 2048) == 2048 attributes['directory'] = (intAttributes & 16) == 16 attributes['encrypted'] = (intAttributes & 16384) == 16384 attributes['hidden'] = (intAttributes & 2) == 2 attributes['normal'] = (intAttributes & 128) == 128 attributes['notIndexed'] = (intAttributes & 8192) == 8192 attributes['offline'] = (intAttributes & 4096) == 4096 attributes['readonly'] = (intAttributes & 1) == 1 attributes['system'] = (intAttributes & 4) == 4 attributes['temporary'] = (intAttributes & 256) == 256 # check if it's a Mounted Volume attributes['mountedVolume'] = False if attributes['reparsePoint'] is True and attributes['directory'] is True: fileIterator = win32file.FindFilesIterator(path) findDataTuple = next(fileIterator) if findDataTuple[6] == 0xA0000003: attributes['mountedVolume'] = True # check if it's a soft (symbolic) link # Note: os.path.islink() does not work in # Python 2.7 for the Windows NTFS file system. # The following code does, however, work (tested in Windows 8) attributes['symbolicLink'] = False if attributes['reparsePoint'] is True: fileIterator = win32file.FindFilesIterator(path) findDataTuple = next(fileIterator) if findDataTuple[6] == 0xA000000C: attributes['symbolicLink'] = True return attributes
[ "def", "get_attributes", "(", "path", ")", ":", "if", "not", "os", ".", "path", ".", "exists", "(", "path", ")", ":", "raise", "CommandExecutionError", "(", "'Path not found: {0}'", ".", "format", "(", "path", ")", ")", "# set up dictionary for attribute values", "attributes", "=", "{", "}", "# Get cumulative int value of attributes", "intAttributes", "=", "win32file", ".", "GetFileAttributes", "(", "path", ")", "# Assign individual attributes", "attributes", "[", "'archive'", "]", "=", "(", "intAttributes", "&", "32", ")", "==", "32", "attributes", "[", "'reparsePoint'", "]", "=", "(", "intAttributes", "&", "1024", ")", "==", "1024", "attributes", "[", "'compressed'", "]", "=", "(", "intAttributes", "&", "2048", ")", "==", "2048", "attributes", "[", "'directory'", "]", "=", "(", "intAttributes", "&", "16", ")", "==", "16", "attributes", "[", "'encrypted'", "]", "=", "(", "intAttributes", "&", "16384", ")", "==", "16384", "attributes", "[", "'hidden'", "]", "=", "(", "intAttributes", "&", "2", ")", "==", "2", "attributes", "[", "'normal'", "]", "=", "(", "intAttributes", "&", "128", ")", "==", "128", "attributes", "[", "'notIndexed'", "]", "=", "(", "intAttributes", "&", "8192", ")", "==", "8192", "attributes", "[", "'offline'", "]", "=", "(", "intAttributes", "&", "4096", ")", "==", "4096", "attributes", "[", "'readonly'", "]", "=", "(", "intAttributes", "&", "1", ")", "==", "1", "attributes", "[", "'system'", "]", "=", "(", "intAttributes", "&", "4", ")", "==", "4", "attributes", "[", "'temporary'", "]", "=", "(", "intAttributes", "&", "256", ")", "==", "256", "# check if it's a Mounted Volume", "attributes", "[", "'mountedVolume'", "]", "=", "False", "if", "attributes", "[", "'reparsePoint'", "]", "is", "True", "and", "attributes", "[", "'directory'", "]", "is", "True", ":", "fileIterator", "=", "win32file", ".", "FindFilesIterator", "(", "path", ")", "findDataTuple", "=", "next", "(", "fileIterator", ")", "if", "findDataTuple", "[", "6", "]", "==", "0xA0000003", ":", "attributes", "[", "'mountedVolume'", "]", "=", "True", "# check if it's a soft (symbolic) link", "# Note: os.path.islink() does not work in", "# Python 2.7 for the Windows NTFS file system.", "# The following code does, however, work (tested in Windows 8)", "attributes", "[", "'symbolicLink'", "]", "=", "False", "if", "attributes", "[", "'reparsePoint'", "]", "is", "True", ":", "fileIterator", "=", "win32file", ".", "FindFilesIterator", "(", "path", ")", "findDataTuple", "=", "next", "(", "fileIterator", ")", "if", "findDataTuple", "[", "6", "]", "==", "0xA000000C", ":", "attributes", "[", "'symbolicLink'", "]", "=", "True", "return", "attributes" ]
Return a dictionary object with the Windows file attributes for a file. Args: path (str): The path to the file or directory Returns: dict: A dictionary of file attributes CLI Example: .. code-block:: bash salt '*' file.get_attributes c:\\temp\\a.txt
[ "Return", "a", "dictionary", "object", "with", "the", "Windows", "file", "attributes", "for", "a", "file", "." ]
train
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/modules/win_file.py#L879-L939
0.000454
google/grr
grr/server/grr_response_server/databases/mem_flows.py
InMemoryDBFlowMixin.WriteClientActionRequests
def WriteClientActionRequests(self, requests): """Writes messages that should go to the client to the db.""" for r in requests: req_dict = self.flow_requests.get((r.client_id, r.flow_id), {}) if r.request_id not in req_dict: request_keys = [(r.client_id, r.flow_id, r.request_id) for r in requests ] raise db.AtLeastOneUnknownRequestError(request_keys) for r in requests: request_key = (r.client_id, r.flow_id, r.request_id) self.client_action_requests[request_key] = r
python
def WriteClientActionRequests(self, requests): """Writes messages that should go to the client to the db.""" for r in requests: req_dict = self.flow_requests.get((r.client_id, r.flow_id), {}) if r.request_id not in req_dict: request_keys = [(r.client_id, r.flow_id, r.request_id) for r in requests ] raise db.AtLeastOneUnknownRequestError(request_keys) for r in requests: request_key = (r.client_id, r.flow_id, r.request_id) self.client_action_requests[request_key] = r
[ "def", "WriteClientActionRequests", "(", "self", ",", "requests", ")", ":", "for", "r", "in", "requests", ":", "req_dict", "=", "self", ".", "flow_requests", ".", "get", "(", "(", "r", ".", "client_id", ",", "r", ".", "flow_id", ")", ",", "{", "}", ")", "if", "r", ".", "request_id", "not", "in", "req_dict", ":", "request_keys", "=", "[", "(", "r", ".", "client_id", ",", "r", ".", "flow_id", ",", "r", ".", "request_id", ")", "for", "r", "in", "requests", "]", "raise", "db", ".", "AtLeastOneUnknownRequestError", "(", "request_keys", ")", "for", "r", "in", "requests", ":", "request_key", "=", "(", "r", ".", "client_id", ",", "r", ".", "flow_id", ",", "r", ".", "request_id", ")", "self", ".", "client_action_requests", "[", "request_key", "]", "=", "r" ]
Writes messages that should go to the client to the db.
[ "Writes", "messages", "that", "should", "go", "to", "the", "client", "to", "the", "db", "." ]
train
https://github.com/google/grr/blob/5cef4e8e2f0d5df43ea4877e9c798e0bf60bfe74/grr/server/grr_response_server/databases/mem_flows.py#L211-L222
0.012844
PinLin/KCOJ_api
KCOJ_api/api.py
KCOJ.get_question_passers
def get_question_passers(self, number: str) -> list: """ 取得課程中特定題目通過者列表 """ try: # 操作所需資訊 params = { 'HW_ID': number } # 取得資料 response = self.__session.get( self.__url + '/success.jsp', params=params, timeout=0.5, verify=False) soup = BeautifulSoup(response.text, 'html.parser') # 整理通過者資訊 passers = [] for tag in soup.find_all('tr'): # 取得通過者學號 passer = tag.get_text().replace('\n', '').strip() # 跳過標題列 if passer != '學號': passers.append(passer) # 回傳結果 return passers except requests.exceptions.Timeout: return ["Timeout"]
python
def get_question_passers(self, number: str) -> list: """ 取得課程中特定題目通過者列表 """ try: # 操作所需資訊 params = { 'HW_ID': number } # 取得資料 response = self.__session.get( self.__url + '/success.jsp', params=params, timeout=0.5, verify=False) soup = BeautifulSoup(response.text, 'html.parser') # 整理通過者資訊 passers = [] for tag in soup.find_all('tr'): # 取得通過者學號 passer = tag.get_text().replace('\n', '').strip() # 跳過標題列 if passer != '學號': passers.append(passer) # 回傳結果 return passers except requests.exceptions.Timeout: return ["Timeout"]
[ "def", "get_question_passers", "(", "self", ",", "number", ":", "str", ")", "->", "list", ":", "try", ":", "# 操作所需資訊", "params", "=", "{", "'HW_ID'", ":", "number", "}", "# 取得資料", "response", "=", "self", ".", "__session", ".", "get", "(", "self", ".", "__url", "+", "'/success.jsp'", ",", "params", "=", "params", ",", "timeout", "=", "0.5", ",", "verify", "=", "False", ")", "soup", "=", "BeautifulSoup", "(", "response", ".", "text", ",", "'html.parser'", ")", "# 整理通過者資訊", "passers", "=", "[", "]", "for", "tag", "in", "soup", ".", "find_all", "(", "'tr'", ")", ":", "# 取得通過者學號", "passer", "=", "tag", ".", "get_text", "(", ")", ".", "replace", "(", "'\\n'", ",", "''", ")", ".", "strip", "(", ")", "# 跳過標題列", "if", "passer", "!=", "'學號':", "", "passers", ".", "append", "(", "passer", ")", "# 回傳結果", "return", "passers", "except", "requests", ".", "exceptions", ".", "Timeout", ":", "return", "[", "\"Timeout\"", "]" ]
取得課程中特定題目通過者列表
[ "取得課程中特定題目通過者列表" ]
train
https://github.com/PinLin/KCOJ_api/blob/64f6ef0f9e64dc1efd692cbe6d5738ee7cfb78ec/KCOJ_api/api.py#L133-L158
0.003654
JdeRobot/base
src/drivers/MAVLinkServer/MAVProxy/pymavlink/dialects/v10/matrixpilot.py
MAVLink.position_target_local_ned_send
def position_target_local_ned_send(self, time_boot_ms, coordinate_frame, type_mask, x, y, z, vx, vy, vz, afx, afy, afz, yaw, yaw_rate, force_mavlink1=False): ''' Reports the current commanded vehicle position, velocity, and acceleration as specified by the autopilot. This should match the commands sent in SET_POSITION_TARGET_LOCAL_NED if the vehicle is being controlled this way. time_boot_ms : Timestamp in milliseconds since system boot (uint32_t) coordinate_frame : Valid options are: MAV_FRAME_LOCAL_NED = 1, MAV_FRAME_LOCAL_OFFSET_NED = 7, MAV_FRAME_BODY_NED = 8, MAV_FRAME_BODY_OFFSET_NED = 9 (uint8_t) type_mask : Bitmask to indicate which dimensions should be ignored by the vehicle: a value of 0b0000000000000000 or 0b0000001000000000 indicates that none of the setpoint dimensions should be ignored. If bit 10 is set the floats afx afy afz should be interpreted as force instead of acceleration. Mapping: bit 1: x, bit 2: y, bit 3: z, bit 4: vx, bit 5: vy, bit 6: vz, bit 7: ax, bit 8: ay, bit 9: az, bit 10: is force setpoint, bit 11: yaw, bit 12: yaw rate (uint16_t) x : X Position in NED frame in meters (float) y : Y Position in NED frame in meters (float) z : Z Position in NED frame in meters (note, altitude is negative in NED) (float) vx : X velocity in NED frame in meter / s (float) vy : Y velocity in NED frame in meter / s (float) vz : Z velocity in NED frame in meter / s (float) afx : X acceleration or force (if bit 10 of type_mask is set) in NED frame in meter / s^2 or N (float) afy : Y acceleration or force (if bit 10 of type_mask is set) in NED frame in meter / s^2 or N (float) afz : Z acceleration or force (if bit 10 of type_mask is set) in NED frame in meter / s^2 or N (float) yaw : yaw setpoint in rad (float) yaw_rate : yaw rate setpoint in rad/s (float) ''' return self.send(self.position_target_local_ned_encode(time_boot_ms, coordinate_frame, type_mask, x, y, z, vx, vy, vz, afx, afy, afz, yaw, yaw_rate), force_mavlink1=force_mavlink1)
python
def position_target_local_ned_send(self, time_boot_ms, coordinate_frame, type_mask, x, y, z, vx, vy, vz, afx, afy, afz, yaw, yaw_rate, force_mavlink1=False): ''' Reports the current commanded vehicle position, velocity, and acceleration as specified by the autopilot. This should match the commands sent in SET_POSITION_TARGET_LOCAL_NED if the vehicle is being controlled this way. time_boot_ms : Timestamp in milliseconds since system boot (uint32_t) coordinate_frame : Valid options are: MAV_FRAME_LOCAL_NED = 1, MAV_FRAME_LOCAL_OFFSET_NED = 7, MAV_FRAME_BODY_NED = 8, MAV_FRAME_BODY_OFFSET_NED = 9 (uint8_t) type_mask : Bitmask to indicate which dimensions should be ignored by the vehicle: a value of 0b0000000000000000 or 0b0000001000000000 indicates that none of the setpoint dimensions should be ignored. If bit 10 is set the floats afx afy afz should be interpreted as force instead of acceleration. Mapping: bit 1: x, bit 2: y, bit 3: z, bit 4: vx, bit 5: vy, bit 6: vz, bit 7: ax, bit 8: ay, bit 9: az, bit 10: is force setpoint, bit 11: yaw, bit 12: yaw rate (uint16_t) x : X Position in NED frame in meters (float) y : Y Position in NED frame in meters (float) z : Z Position in NED frame in meters (note, altitude is negative in NED) (float) vx : X velocity in NED frame in meter / s (float) vy : Y velocity in NED frame in meter / s (float) vz : Z velocity in NED frame in meter / s (float) afx : X acceleration or force (if bit 10 of type_mask is set) in NED frame in meter / s^2 or N (float) afy : Y acceleration or force (if bit 10 of type_mask is set) in NED frame in meter / s^2 or N (float) afz : Z acceleration or force (if bit 10 of type_mask is set) in NED frame in meter / s^2 or N (float) yaw : yaw setpoint in rad (float) yaw_rate : yaw rate setpoint in rad/s (float) ''' return self.send(self.position_target_local_ned_encode(time_boot_ms, coordinate_frame, type_mask, x, y, z, vx, vy, vz, afx, afy, afz, yaw, yaw_rate), force_mavlink1=force_mavlink1)
[ "def", "position_target_local_ned_send", "(", "self", ",", "time_boot_ms", ",", "coordinate_frame", ",", "type_mask", ",", "x", ",", "y", ",", "z", ",", "vx", ",", "vy", ",", "vz", ",", "afx", ",", "afy", ",", "afz", ",", "yaw", ",", "yaw_rate", ",", "force_mavlink1", "=", "False", ")", ":", "return", "self", ".", "send", "(", "self", ".", "position_target_local_ned_encode", "(", "time_boot_ms", ",", "coordinate_frame", ",", "type_mask", ",", "x", ",", "y", ",", "z", ",", "vx", ",", "vy", ",", "vz", ",", "afx", ",", "afy", ",", "afz", ",", "yaw", ",", "yaw_rate", ")", ",", "force_mavlink1", "=", "force_mavlink1", ")" ]
Reports the current commanded vehicle position, velocity, and acceleration as specified by the autopilot. This should match the commands sent in SET_POSITION_TARGET_LOCAL_NED if the vehicle is being controlled this way. time_boot_ms : Timestamp in milliseconds since system boot (uint32_t) coordinate_frame : Valid options are: MAV_FRAME_LOCAL_NED = 1, MAV_FRAME_LOCAL_OFFSET_NED = 7, MAV_FRAME_BODY_NED = 8, MAV_FRAME_BODY_OFFSET_NED = 9 (uint8_t) type_mask : Bitmask to indicate which dimensions should be ignored by the vehicle: a value of 0b0000000000000000 or 0b0000001000000000 indicates that none of the setpoint dimensions should be ignored. If bit 10 is set the floats afx afy afz should be interpreted as force instead of acceleration. Mapping: bit 1: x, bit 2: y, bit 3: z, bit 4: vx, bit 5: vy, bit 6: vz, bit 7: ax, bit 8: ay, bit 9: az, bit 10: is force setpoint, bit 11: yaw, bit 12: yaw rate (uint16_t) x : X Position in NED frame in meters (float) y : Y Position in NED frame in meters (float) z : Z Position in NED frame in meters (note, altitude is negative in NED) (float) vx : X velocity in NED frame in meter / s (float) vy : Y velocity in NED frame in meter / s (float) vz : Z velocity in NED frame in meter / s (float) afx : X acceleration or force (if bit 10 of type_mask is set) in NED frame in meter / s^2 or N (float) afy : Y acceleration or force (if bit 10 of type_mask is set) in NED frame in meter / s^2 or N (float) afz : Z acceleration or force (if bit 10 of type_mask is set) in NED frame in meter / s^2 or N (float) yaw : yaw setpoint in rad (float) yaw_rate : yaw rate setpoint in rad/s (float)
[ "Reports", "the", "current", "commanded", "vehicle", "position", "velocity", "and", "acceleration", "as", "specified", "by", "the", "autopilot", ".", "This", "should", "match", "the", "commands", "sent", "in", "SET_POSITION_TARGET_LOCAL_NED", "if", "the", "vehicle", "is", "being", "controlled", "this", "way", "." ]
train
https://github.com/JdeRobot/base/blob/303b18992785b2fe802212f2d758a60873007f1f/src/drivers/MAVLinkServer/MAVProxy/pymavlink/dialects/v10/matrixpilot.py#L10645-L10669
0.006116
openstack/horizon
openstack_auth/utils.py
_augment_url_with_version
def _augment_url_with_version(auth_url): """Optionally augment auth_url path with version suffix. Check if path component already contains version suffix and if it does not, append version suffix to the end of path, not erasing the previous path contents, since keystone web endpoint (like /identity) could be there. Keystone version needs to be added to endpoint because as of Kilo, the identity URLs returned by Keystone might no longer contain API versions, leaving the version choice up to the user. """ if has_in_url_path(auth_url, ["/v2.0", "/v3"]): return auth_url if get_keystone_version() >= 3: return url_path_append(auth_url, "/v3") else: return url_path_append(auth_url, "/v2.0")
python
def _augment_url_with_version(auth_url): """Optionally augment auth_url path with version suffix. Check if path component already contains version suffix and if it does not, append version suffix to the end of path, not erasing the previous path contents, since keystone web endpoint (like /identity) could be there. Keystone version needs to be added to endpoint because as of Kilo, the identity URLs returned by Keystone might no longer contain API versions, leaving the version choice up to the user. """ if has_in_url_path(auth_url, ["/v2.0", "/v3"]): return auth_url if get_keystone_version() >= 3: return url_path_append(auth_url, "/v3") else: return url_path_append(auth_url, "/v2.0")
[ "def", "_augment_url_with_version", "(", "auth_url", ")", ":", "if", "has_in_url_path", "(", "auth_url", ",", "[", "\"/v2.0\"", ",", "\"/v3\"", "]", ")", ":", "return", "auth_url", "if", "get_keystone_version", "(", ")", ">=", "3", ":", "return", "url_path_append", "(", "auth_url", ",", "\"/v3\"", ")", "else", ":", "return", "url_path_append", "(", "auth_url", ",", "\"/v2.0\"", ")" ]
Optionally augment auth_url path with version suffix. Check if path component already contains version suffix and if it does not, append version suffix to the end of path, not erasing the previous path contents, since keystone web endpoint (like /identity) could be there. Keystone version needs to be added to endpoint because as of Kilo, the identity URLs returned by Keystone might no longer contain API versions, leaving the version choice up to the user.
[ "Optionally", "augment", "auth_url", "path", "with", "version", "suffix", "." ]
train
https://github.com/openstack/horizon/blob/5601ea9477323e599d9b766fcac1f8be742935b2/openstack_auth/utils.py#L287-L303
0.001314
gwastro/pycbc
pycbc/cosmology.py
z_at_value
def z_at_value(func, fval, unit, zmax=1000., **kwargs): r"""Wrapper around astropy.cosmology.z_at_value to handle numpy arrays. Getting a z for a cosmological quantity involves numerically inverting ``func``. The ``zmax`` argument sets how large of a z to guess (see :py:func:`astropy.cosmology.z_at_value` for details). If a z is larger than ``zmax``, this will try a larger zmax up to ``zmax * 10**5``. If that still is not large enough, will just return ``numpy.inf``. Parameters ---------- func : function or method A function that takes redshift as input. fval : float The value of ``func(z)``. unit : astropy.unit The unit of ``fval``. zmax : float, optional The initial maximum search limit for ``z``. Default is 1000. \**kwargs : All other keyword arguments are passed to :py:func:``astropy.cosmology.z_at_value``. Returns ------- float The redshift at the requested values. """ fval, input_is_array = ensurearray(fval) # make sure fval is atleast 1D if fval.size == 1 and fval.ndim == 0: fval = fval.reshape(1) zs = numpy.zeros(fval.shape, dtype=float) # the output array for (ii, val) in enumerate(fval): try: zs[ii] = astropy.cosmology.z_at_value(func, val*unit, zmax=zmax, **kwargs) except CosmologyError: # we'll get this if the z was larger than zmax; in that case we'll # try bumping up zmax later to get a value zs[ii] = numpy.inf # check if there were any zs > zmax replacemask = numpy.isinf(zs) # try bumping up zmax to get a result if replacemask.any(): # we'll keep bumping up the maxz until we can get a result counter = 0 # to prevent running forever while replacemask.any(): kwargs['zmin'] = zmax zmax = 10 * zmax idx = numpy.where(replacemask) for ii in idx: val = fval[ii] try: zs[ii] = astropy.cosmology.z_at_value( func, val*unit, zmax=zmax, **kwargs) replacemask[ii] = False except CosmologyError: # didn't work, try on next loop pass counter += 1 if counter == 5: # give up and warn the user logging.warning("One or more values correspond to a " "redshift > {0:.1e}. The redshift for these " "have been set to inf. If you would like " "better precision, call God.".format(zmax)) break return formatreturn(zs, input_is_array)
python
def z_at_value(func, fval, unit, zmax=1000., **kwargs): r"""Wrapper around astropy.cosmology.z_at_value to handle numpy arrays. Getting a z for a cosmological quantity involves numerically inverting ``func``. The ``zmax`` argument sets how large of a z to guess (see :py:func:`astropy.cosmology.z_at_value` for details). If a z is larger than ``zmax``, this will try a larger zmax up to ``zmax * 10**5``. If that still is not large enough, will just return ``numpy.inf``. Parameters ---------- func : function or method A function that takes redshift as input. fval : float The value of ``func(z)``. unit : astropy.unit The unit of ``fval``. zmax : float, optional The initial maximum search limit for ``z``. Default is 1000. \**kwargs : All other keyword arguments are passed to :py:func:``astropy.cosmology.z_at_value``. Returns ------- float The redshift at the requested values. """ fval, input_is_array = ensurearray(fval) # make sure fval is atleast 1D if fval.size == 1 and fval.ndim == 0: fval = fval.reshape(1) zs = numpy.zeros(fval.shape, dtype=float) # the output array for (ii, val) in enumerate(fval): try: zs[ii] = astropy.cosmology.z_at_value(func, val*unit, zmax=zmax, **kwargs) except CosmologyError: # we'll get this if the z was larger than zmax; in that case we'll # try bumping up zmax later to get a value zs[ii] = numpy.inf # check if there were any zs > zmax replacemask = numpy.isinf(zs) # try bumping up zmax to get a result if replacemask.any(): # we'll keep bumping up the maxz until we can get a result counter = 0 # to prevent running forever while replacemask.any(): kwargs['zmin'] = zmax zmax = 10 * zmax idx = numpy.where(replacemask) for ii in idx: val = fval[ii] try: zs[ii] = astropy.cosmology.z_at_value( func, val*unit, zmax=zmax, **kwargs) replacemask[ii] = False except CosmologyError: # didn't work, try on next loop pass counter += 1 if counter == 5: # give up and warn the user logging.warning("One or more values correspond to a " "redshift > {0:.1e}. The redshift for these " "have been set to inf. If you would like " "better precision, call God.".format(zmax)) break return formatreturn(zs, input_is_array)
[ "def", "z_at_value", "(", "func", ",", "fval", ",", "unit", ",", "zmax", "=", "1000.", ",", "*", "*", "kwargs", ")", ":", "fval", ",", "input_is_array", "=", "ensurearray", "(", "fval", ")", "# make sure fval is atleast 1D", "if", "fval", ".", "size", "==", "1", "and", "fval", ".", "ndim", "==", "0", ":", "fval", "=", "fval", ".", "reshape", "(", "1", ")", "zs", "=", "numpy", ".", "zeros", "(", "fval", ".", "shape", ",", "dtype", "=", "float", ")", "# the output array", "for", "(", "ii", ",", "val", ")", "in", "enumerate", "(", "fval", ")", ":", "try", ":", "zs", "[", "ii", "]", "=", "astropy", ".", "cosmology", ".", "z_at_value", "(", "func", ",", "val", "*", "unit", ",", "zmax", "=", "zmax", ",", "*", "*", "kwargs", ")", "except", "CosmologyError", ":", "# we'll get this if the z was larger than zmax; in that case we'll", "# try bumping up zmax later to get a value", "zs", "[", "ii", "]", "=", "numpy", ".", "inf", "# check if there were any zs > zmax", "replacemask", "=", "numpy", ".", "isinf", "(", "zs", ")", "# try bumping up zmax to get a result", "if", "replacemask", ".", "any", "(", ")", ":", "# we'll keep bumping up the maxz until we can get a result", "counter", "=", "0", "# to prevent running forever", "while", "replacemask", ".", "any", "(", ")", ":", "kwargs", "[", "'zmin'", "]", "=", "zmax", "zmax", "=", "10", "*", "zmax", "idx", "=", "numpy", ".", "where", "(", "replacemask", ")", "for", "ii", "in", "idx", ":", "val", "=", "fval", "[", "ii", "]", "try", ":", "zs", "[", "ii", "]", "=", "astropy", ".", "cosmology", ".", "z_at_value", "(", "func", ",", "val", "*", "unit", ",", "zmax", "=", "zmax", ",", "*", "*", "kwargs", ")", "replacemask", "[", "ii", "]", "=", "False", "except", "CosmologyError", ":", "# didn't work, try on next loop", "pass", "counter", "+=", "1", "if", "counter", "==", "5", ":", "# give up and warn the user", "logging", ".", "warning", "(", "\"One or more values correspond to a \"", "\"redshift > {0:.1e}. The redshift for these \"", "\"have been set to inf. If you would like \"", "\"better precision, call God.\"", ".", "format", "(", "zmax", ")", ")", "break", "return", "formatreturn", "(", "zs", ",", "input_is_array", ")" ]
r"""Wrapper around astropy.cosmology.z_at_value to handle numpy arrays. Getting a z for a cosmological quantity involves numerically inverting ``func``. The ``zmax`` argument sets how large of a z to guess (see :py:func:`astropy.cosmology.z_at_value` for details). If a z is larger than ``zmax``, this will try a larger zmax up to ``zmax * 10**5``. If that still is not large enough, will just return ``numpy.inf``. Parameters ---------- func : function or method A function that takes redshift as input. fval : float The value of ``func(z)``. unit : astropy.unit The unit of ``fval``. zmax : float, optional The initial maximum search limit for ``z``. Default is 1000. \**kwargs : All other keyword arguments are passed to :py:func:``astropy.cosmology.z_at_value``. Returns ------- float The redshift at the requested values.
[ "r", "Wrapper", "around", "astropy", ".", "cosmology", ".", "z_at_value", "to", "handle", "numpy", "arrays", "." ]
train
https://github.com/gwastro/pycbc/blob/7a64cdd104d263f1b6ea0b01e6841837d05a4cb3/pycbc/cosmology.py#L105-L173
0.000353
seequent/properties
properties/basic.py
GettableProperty.equal
def equal(self, value_a, value_b): #pylint: disable=no-self-use """Check if two valid Property values are equal .. note:: This method assumes that :code:`None` and :code:`properties.undefined` are never passed in as values """ equal = value_a == value_b if hasattr(equal, '__iter__'): return all(equal) return equal
python
def equal(self, value_a, value_b): #pylint: disable=no-self-use """Check if two valid Property values are equal .. note:: This method assumes that :code:`None` and :code:`properties.undefined` are never passed in as values """ equal = value_a == value_b if hasattr(equal, '__iter__'): return all(equal) return equal
[ "def", "equal", "(", "self", ",", "value_a", ",", "value_b", ")", ":", "#pylint: disable=no-self-use", "equal", "=", "value_a", "==", "value_b", "if", "hasattr", "(", "equal", ",", "'__iter__'", ")", ":", "return", "all", "(", "equal", ")", "return", "equal" ]
Check if two valid Property values are equal .. note:: This method assumes that :code:`None` and :code:`properties.undefined` are never passed in as values
[ "Check", "if", "two", "valid", "Property", "values", "are", "equal" ]
train
https://github.com/seequent/properties/blob/096b07012fff86b0a880c8c018320c3b512751b9/properties/basic.py#L264-L275
0.00907
mfussenegger/cr8
cr8/clients.py
_plain_or_callable
def _plain_or_callable(obj): """Returns the value of the called object of obj is a callable, otherwise the plain object. Returns None if obj is None. >>> obj = None >>> _plain_or_callable(obj) >>> stmt = 'select * from sys.nodes' >>> _plain_or_callable(stmt) 'select * from sys.nodes' >>> def _args(): ... return [1, 'name'] >>> _plain_or_callable(_args) [1, 'name'] >>> _plain_or_callable((x for x in range(10))) 0 >>> class BulkArgsGenerator: ... def __call__(self): ... return [[1, 'foo'], [2, 'bar'], [3, 'foobar']] >>> _plain_or_callable(BulkArgsGenerator()) [[1, 'foo'], [2, 'bar'], [3, 'foobar']] """ if callable(obj): return obj() elif isinstance(obj, types.GeneratorType): return next(obj) else: return obj
python
def _plain_or_callable(obj): """Returns the value of the called object of obj is a callable, otherwise the plain object. Returns None if obj is None. >>> obj = None >>> _plain_or_callable(obj) >>> stmt = 'select * from sys.nodes' >>> _plain_or_callable(stmt) 'select * from sys.nodes' >>> def _args(): ... return [1, 'name'] >>> _plain_or_callable(_args) [1, 'name'] >>> _plain_or_callable((x for x in range(10))) 0 >>> class BulkArgsGenerator: ... def __call__(self): ... return [[1, 'foo'], [2, 'bar'], [3, 'foobar']] >>> _plain_or_callable(BulkArgsGenerator()) [[1, 'foo'], [2, 'bar'], [3, 'foobar']] """ if callable(obj): return obj() elif isinstance(obj, types.GeneratorType): return next(obj) else: return obj
[ "def", "_plain_or_callable", "(", "obj", ")", ":", "if", "callable", "(", "obj", ")", ":", "return", "obj", "(", ")", "elif", "isinstance", "(", "obj", ",", "types", ".", "GeneratorType", ")", ":", "return", "next", "(", "obj", ")", "else", ":", "return", "obj" ]
Returns the value of the called object of obj is a callable, otherwise the plain object. Returns None if obj is None. >>> obj = None >>> _plain_or_callable(obj) >>> stmt = 'select * from sys.nodes' >>> _plain_or_callable(stmt) 'select * from sys.nodes' >>> def _args(): ... return [1, 'name'] >>> _plain_or_callable(_args) [1, 'name'] >>> _plain_or_callable((x for x in range(10))) 0 >>> class BulkArgsGenerator: ... def __call__(self): ... return [[1, 'foo'], [2, 'bar'], [3, 'foobar']] >>> _plain_or_callable(BulkArgsGenerator()) [[1, 'foo'], [2, 'bar'], [3, 'foobar']]
[ "Returns", "the", "value", "of", "the", "called", "object", "of", "obj", "is", "a", "callable", "otherwise", "the", "plain", "object", ".", "Returns", "None", "if", "obj", "is", "None", "." ]
train
https://github.com/mfussenegger/cr8/blob/a37d6049f1f9fee2d0556efae2b7b7f8761bffe8/cr8/clients.py#L97-L128
0.001174
aws/aws-iot-device-sdk-python
AWSIoTPythonSDK/core/protocol/paho/client.py
Client.loop_stop
def loop_stop(self, force=False): """This is part of the threaded client interface. Call this once to stop the network thread previously created with loop_start(). This call will block until the network thread finishes. The force parameter is currently ignored. """ if self._thread is None: return MQTT_ERR_INVAL self._thread_terminate = True self._thread.join() self._thread = None
python
def loop_stop(self, force=False): """This is part of the threaded client interface. Call this once to stop the network thread previously created with loop_start(). This call will block until the network thread finishes. The force parameter is currently ignored. """ if self._thread is None: return MQTT_ERR_INVAL self._thread_terminate = True self._thread.join() self._thread = None
[ "def", "loop_stop", "(", "self", ",", "force", "=", "False", ")", ":", "if", "self", ".", "_thread", "is", "None", ":", "return", "MQTT_ERR_INVAL", "self", ".", "_thread_terminate", "=", "True", "self", ".", "_thread", ".", "join", "(", ")", "self", ".", "_thread", "=", "None" ]
This is part of the threaded client interface. Call this once to stop the network thread previously created with loop_start(). This call will block until the network thread finishes. The force parameter is currently ignored.
[ "This", "is", "part", "of", "the", "threaded", "client", "interface", ".", "Call", "this", "once", "to", "stop", "the", "network", "thread", "previously", "created", "with", "loop_start", "()", ".", "This", "call", "will", "block", "until", "the", "network", "thread", "finishes", "." ]
train
https://github.com/aws/aws-iot-device-sdk-python/blob/f0aa2ce34b21dd2e44f4fb7e1d058656aaf2fc62/AWSIoTPythonSDK/core/protocol/paho/client.py#L1399-L1411
0.004274
jupyterhub/kubespawner
kubespawner/spawner.py
KubeSpawner.poll
def poll(self): """ Check if the pod is still running. Uses the same interface as subprocess.Popen.poll(): if the pod is still running, returns None. If the pod has exited, return the exit code if we can determine it, or 1 if it has exited but we don't know how. These are the return values JupyterHub expects. Note that a clean exit will have an exit code of zero, so it is necessary to check that the returned value is None, rather than just Falsy, to determine that the pod is still running. """ # have to wait for first load of data before we have a valid answer if not self.pod_reflector.first_load_future.done(): yield self.pod_reflector.first_load_future data = self.pod_reflector.pods.get(self.pod_name, None) if data is not None: if data.status.phase == 'Pending': return None ctr_stat = data.status.container_statuses if ctr_stat is None: # No status, no container (we hope) # This seems to happen when a pod is idle-culled. return 1 for c in ctr_stat: # return exit code if notebook container has terminated if c.name == 'notebook': if c.state.terminated: # call self.stop to delete the pod if self.delete_stopped_pods: yield self.stop(now=True) return c.state.terminated.exit_code break # None means pod is running or starting up return None # pod doesn't exist or has been deleted return 1
python
def poll(self): """ Check if the pod is still running. Uses the same interface as subprocess.Popen.poll(): if the pod is still running, returns None. If the pod has exited, return the exit code if we can determine it, or 1 if it has exited but we don't know how. These are the return values JupyterHub expects. Note that a clean exit will have an exit code of zero, so it is necessary to check that the returned value is None, rather than just Falsy, to determine that the pod is still running. """ # have to wait for first load of data before we have a valid answer if not self.pod_reflector.first_load_future.done(): yield self.pod_reflector.first_load_future data = self.pod_reflector.pods.get(self.pod_name, None) if data is not None: if data.status.phase == 'Pending': return None ctr_stat = data.status.container_statuses if ctr_stat is None: # No status, no container (we hope) # This seems to happen when a pod is idle-culled. return 1 for c in ctr_stat: # return exit code if notebook container has terminated if c.name == 'notebook': if c.state.terminated: # call self.stop to delete the pod if self.delete_stopped_pods: yield self.stop(now=True) return c.state.terminated.exit_code break # None means pod is running or starting up return None # pod doesn't exist or has been deleted return 1
[ "def", "poll", "(", "self", ")", ":", "# have to wait for first load of data before we have a valid answer", "if", "not", "self", ".", "pod_reflector", ".", "first_load_future", ".", "done", "(", ")", ":", "yield", "self", ".", "pod_reflector", ".", "first_load_future", "data", "=", "self", ".", "pod_reflector", ".", "pods", ".", "get", "(", "self", ".", "pod_name", ",", "None", ")", "if", "data", "is", "not", "None", ":", "if", "data", ".", "status", ".", "phase", "==", "'Pending'", ":", "return", "None", "ctr_stat", "=", "data", ".", "status", ".", "container_statuses", "if", "ctr_stat", "is", "None", ":", "# No status, no container (we hope)", "# This seems to happen when a pod is idle-culled.", "return", "1", "for", "c", "in", "ctr_stat", ":", "# return exit code if notebook container has terminated", "if", "c", ".", "name", "==", "'notebook'", ":", "if", "c", ".", "state", ".", "terminated", ":", "# call self.stop to delete the pod", "if", "self", ".", "delete_stopped_pods", ":", "yield", "self", ".", "stop", "(", "now", "=", "True", ")", "return", "c", ".", "state", ".", "terminated", ".", "exit_code", "break", "# None means pod is running or starting up", "return", "None", "# pod doesn't exist or has been deleted", "return", "1" ]
Check if the pod is still running. Uses the same interface as subprocess.Popen.poll(): if the pod is still running, returns None. If the pod has exited, return the exit code if we can determine it, or 1 if it has exited but we don't know how. These are the return values JupyterHub expects. Note that a clean exit will have an exit code of zero, so it is necessary to check that the returned value is None, rather than just Falsy, to determine that the pod is still running.
[ "Check", "if", "the", "pod", "is", "still", "running", "." ]
train
https://github.com/jupyterhub/kubespawner/blob/46a4b109c5e657a4c3d5bfa8ea4731ec6564ea13/kubespawner/spawner.py#L1456-L1492
0.00115
jxtech/wechatpy
wechatpy/pay/api/order.py
WeChatOrder.close
def close(self, out_trade_no): """ 关闭订单 :param out_trade_no: 商户系统内部的订单号 :return: 返回的结果数据 """ data = { 'appid': self.appid, 'out_trade_no': out_trade_no, } return self._post('pay/closeorder', data=data)
python
def close(self, out_trade_no): """ 关闭订单 :param out_trade_no: 商户系统内部的订单号 :return: 返回的结果数据 """ data = { 'appid': self.appid, 'out_trade_no': out_trade_no, } return self._post('pay/closeorder', data=data)
[ "def", "close", "(", "self", ",", "out_trade_no", ")", ":", "data", "=", "{", "'appid'", ":", "self", ".", "appid", ",", "'out_trade_no'", ":", "out_trade_no", ",", "}", "return", "self", ".", "_post", "(", "'pay/closeorder'", ",", "data", "=", "data", ")" ]
关闭订单 :param out_trade_no: 商户系统内部的订单号 :return: 返回的结果数据
[ "关闭订单" ]
train
https://github.com/jxtech/wechatpy/blob/4df0da795618c0895a10f1c2cde9e9d5c0a93aaa/wechatpy/pay/api/order.py#L97-L108
0.006897
jgilchrist/pybib
pybib/formatters.py
color_parts
def color_parts(parts): """Adds colors to each part of the citation""" return parts._replace( title=Fore.GREEN + parts.title + Style.RESET_ALL, doi=Fore.CYAN + parts.doi + Style.RESET_ALL )
python
def color_parts(parts): """Adds colors to each part of the citation""" return parts._replace( title=Fore.GREEN + parts.title + Style.RESET_ALL, doi=Fore.CYAN + parts.doi + Style.RESET_ALL )
[ "def", "color_parts", "(", "parts", ")", ":", "return", "parts", ".", "_replace", "(", "title", "=", "Fore", ".", "GREEN", "+", "parts", ".", "title", "+", "Style", ".", "RESET_ALL", ",", "doi", "=", "Fore", ".", "CYAN", "+", "parts", ".", "doi", "+", "Style", ".", "RESET_ALL", ")" ]
Adds colors to each part of the citation
[ "Adds", "colors", "to", "each", "part", "of", "the", "citation" ]
train
https://github.com/jgilchrist/pybib/blob/da2130d281bb02e930728ed7c1d0c1dffa747ee0/pybib/formatters.py#L22-L27
0.004608
zhmcclient/python-zhmcclient
zhmcclient_mock/_urihandler.py
CpcStopHandler.post
def post(method, hmc, uri, uri_parms, body, logon_required, wait_for_completion): """Operation: Stop CPC (requires DPM mode).""" assert wait_for_completion is True # async not supported yet cpc_oid = uri_parms[0] try: cpc = hmc.cpcs.lookup_by_oid(cpc_oid) except KeyError: raise InvalidResourceError(method, uri) if not cpc.dpm_enabled: raise CpcNotInDpmError(method, uri, cpc) cpc.properties['status'] = 'not-operating'
python
def post(method, hmc, uri, uri_parms, body, logon_required, wait_for_completion): """Operation: Stop CPC (requires DPM mode).""" assert wait_for_completion is True # async not supported yet cpc_oid = uri_parms[0] try: cpc = hmc.cpcs.lookup_by_oid(cpc_oid) except KeyError: raise InvalidResourceError(method, uri) if not cpc.dpm_enabled: raise CpcNotInDpmError(method, uri, cpc) cpc.properties['status'] = 'not-operating'
[ "def", "post", "(", "method", ",", "hmc", ",", "uri", ",", "uri_parms", ",", "body", ",", "logon_required", ",", "wait_for_completion", ")", ":", "assert", "wait_for_completion", "is", "True", "# async not supported yet", "cpc_oid", "=", "uri_parms", "[", "0", "]", "try", ":", "cpc", "=", "hmc", ".", "cpcs", ".", "lookup_by_oid", "(", "cpc_oid", ")", "except", "KeyError", ":", "raise", "InvalidResourceError", "(", "method", ",", "uri", ")", "if", "not", "cpc", ".", "dpm_enabled", ":", "raise", "CpcNotInDpmError", "(", "method", ",", "uri", ",", "cpc", ")", "cpc", ".", "properties", "[", "'status'", "]", "=", "'not-operating'" ]
Operation: Stop CPC (requires DPM mode).
[ "Operation", ":", "Stop", "CPC", "(", "requires", "DPM", "mode", ")", "." ]
train
https://github.com/zhmcclient/python-zhmcclient/blob/9657563e5d9184c51d3c903442a58b9725fdf335/zhmcclient_mock/_urihandler.py#L1110-L1121
0.005703
iamjarret/pystockfish
pystockfish.py
Match.move
def move(self): """ Advance game by single move, if possible. @return: logical indicator if move was performed. """ if len(self.moves) == MAX_MOVES: return False elif len(self.moves) % 2: active_engine = self.black_engine active_engine_name = self.black inactive_engine = self.white_engine inactive_engine_name = self.white else: active_engine = self.white_engine active_engine_name = self.white inactive_engine = self.black_engine inactive_engine_name = self.black active_engine.setposition(self.moves) movedict = active_engine.bestmove() bestmove = movedict.get('move') info = movedict.get('info') ponder = movedict.get('ponder') self.moves.append(bestmove) if info["score"]["eval"] == "mate": matenum = info["score"]["value"] if matenum > 0: self.winner_engine = active_engine self.winner = active_engine_name elif matenum < 0: self.winner_engine = inactive_engine self.winner = inactive_engine_name return False if ponder != '(none)': return True
python
def move(self): """ Advance game by single move, if possible. @return: logical indicator if move was performed. """ if len(self.moves) == MAX_MOVES: return False elif len(self.moves) % 2: active_engine = self.black_engine active_engine_name = self.black inactive_engine = self.white_engine inactive_engine_name = self.white else: active_engine = self.white_engine active_engine_name = self.white inactive_engine = self.black_engine inactive_engine_name = self.black active_engine.setposition(self.moves) movedict = active_engine.bestmove() bestmove = movedict.get('move') info = movedict.get('info') ponder = movedict.get('ponder') self.moves.append(bestmove) if info["score"]["eval"] == "mate": matenum = info["score"]["value"] if matenum > 0: self.winner_engine = active_engine self.winner = active_engine_name elif matenum < 0: self.winner_engine = inactive_engine self.winner = inactive_engine_name return False if ponder != '(none)': return True
[ "def", "move", "(", "self", ")", ":", "if", "len", "(", "self", ".", "moves", ")", "==", "MAX_MOVES", ":", "return", "False", "elif", "len", "(", "self", ".", "moves", ")", "%", "2", ":", "active_engine", "=", "self", ".", "black_engine", "active_engine_name", "=", "self", ".", "black", "inactive_engine", "=", "self", ".", "white_engine", "inactive_engine_name", "=", "self", ".", "white", "else", ":", "active_engine", "=", "self", ".", "white_engine", "active_engine_name", "=", "self", ".", "white", "inactive_engine", "=", "self", ".", "black_engine", "inactive_engine_name", "=", "self", ".", "black", "active_engine", ".", "setposition", "(", "self", ".", "moves", ")", "movedict", "=", "active_engine", ".", "bestmove", "(", ")", "bestmove", "=", "movedict", ".", "get", "(", "'move'", ")", "info", "=", "movedict", ".", "get", "(", "'info'", ")", "ponder", "=", "movedict", ".", "get", "(", "'ponder'", ")", "self", ".", "moves", ".", "append", "(", "bestmove", ")", "if", "info", "[", "\"score\"", "]", "[", "\"eval\"", "]", "==", "\"mate\"", ":", "matenum", "=", "info", "[", "\"score\"", "]", "[", "\"value\"", "]", "if", "matenum", ">", "0", ":", "self", ".", "winner_engine", "=", "active_engine", "self", ".", "winner", "=", "active_engine_name", "elif", "matenum", "<", "0", ":", "self", ".", "winner_engine", "=", "inactive_engine", "self", ".", "winner", "=", "inactive_engine_name", "return", "False", "if", "ponder", "!=", "'(none)'", ":", "return", "True" ]
Advance game by single move, if possible. @return: logical indicator if move was performed.
[ "Advance", "game", "by", "single", "move", "if", "possible", "." ]
train
https://github.com/iamjarret/pystockfish/blob/ae34a4b4d29c577c888b72691fcf0cb5a89b1792/pystockfish.py#L54-L90
0.001533
hyperledger/indy-plenum
plenum/server/view_change/view_changer.py
ViewChanger.process_instance_change_msg
def process_instance_change_msg(self, instChg: InstanceChange, frm: str) -> None: """ Validate and process an instance change request. :param instChg: the instance change request :param frm: the name of the node that sent this `msg` """ if frm not in self.provider.connected_nodes(): self.provider.discard( instChg, "received instance change request: {} from {} " "which is not in connected list: {}".format( instChg, frm, self.provider.connected_nodes()), logger.info) return logger.info("{} received instance change request: {} from {}".format(self, instChg, frm)) # TODO: add sender to blacklist? if not isinstance(instChg.viewNo, int): self.provider.discard( instChg, "{}field view_no has incorrect type: {}".format( VIEW_CHANGE_PREFIX, type(instChg.viewNo))) elif instChg.viewNo <= self.view_no: self.provider.discard( instChg, "Received instance change request with view no {} " "which is not more than its view no {}".format( instChg.viewNo, self.view_no), logger.info) else: # Record instance changes for views but send instance change # only when found master to be degraded. if quorum of view changes # found then change view even if master not degraded self._on_verified_instance_change_msg(instChg, frm) if self.instance_changes.has_inst_chng_from(instChg.viewNo, self.name): logger.info("{} received instance change message {} but has already " "sent an instance change message".format(self, instChg)) elif not self.provider.is_master_degraded(): logger.info("{} received instance change message {} but did not " "find the master to be slow".format(self, instChg)) else: logger.display("{}{} found master degraded after receiving instance change" " message from {}".format(VIEW_CHANGE_PREFIX, self, frm)) self.sendInstanceChange(instChg.viewNo)
python
def process_instance_change_msg(self, instChg: InstanceChange, frm: str) -> None: """ Validate and process an instance change request. :param instChg: the instance change request :param frm: the name of the node that sent this `msg` """ if frm not in self.provider.connected_nodes(): self.provider.discard( instChg, "received instance change request: {} from {} " "which is not in connected list: {}".format( instChg, frm, self.provider.connected_nodes()), logger.info) return logger.info("{} received instance change request: {} from {}".format(self, instChg, frm)) # TODO: add sender to blacklist? if not isinstance(instChg.viewNo, int): self.provider.discard( instChg, "{}field view_no has incorrect type: {}".format( VIEW_CHANGE_PREFIX, type(instChg.viewNo))) elif instChg.viewNo <= self.view_no: self.provider.discard( instChg, "Received instance change request with view no {} " "which is not more than its view no {}".format( instChg.viewNo, self.view_no), logger.info) else: # Record instance changes for views but send instance change # only when found master to be degraded. if quorum of view changes # found then change view even if master not degraded self._on_verified_instance_change_msg(instChg, frm) if self.instance_changes.has_inst_chng_from(instChg.viewNo, self.name): logger.info("{} received instance change message {} but has already " "sent an instance change message".format(self, instChg)) elif not self.provider.is_master_degraded(): logger.info("{} received instance change message {} but did not " "find the master to be slow".format(self, instChg)) else: logger.display("{}{} found master degraded after receiving instance change" " message from {}".format(VIEW_CHANGE_PREFIX, self, frm)) self.sendInstanceChange(instChg.viewNo)
[ "def", "process_instance_change_msg", "(", "self", ",", "instChg", ":", "InstanceChange", ",", "frm", ":", "str", ")", "->", "None", ":", "if", "frm", "not", "in", "self", ".", "provider", ".", "connected_nodes", "(", ")", ":", "self", ".", "provider", ".", "discard", "(", "instChg", ",", "\"received instance change request: {} from {} \"", "\"which is not in connected list: {}\"", ".", "format", "(", "instChg", ",", "frm", ",", "self", ".", "provider", ".", "connected_nodes", "(", ")", ")", ",", "logger", ".", "info", ")", "return", "logger", ".", "info", "(", "\"{} received instance change request: {} from {}\"", ".", "format", "(", "self", ",", "instChg", ",", "frm", ")", ")", "# TODO: add sender to blacklist?", "if", "not", "isinstance", "(", "instChg", ".", "viewNo", ",", "int", ")", ":", "self", ".", "provider", ".", "discard", "(", "instChg", ",", "\"{}field view_no has incorrect type: {}\"", ".", "format", "(", "VIEW_CHANGE_PREFIX", ",", "type", "(", "instChg", ".", "viewNo", ")", ")", ")", "elif", "instChg", ".", "viewNo", "<=", "self", ".", "view_no", ":", "self", ".", "provider", ".", "discard", "(", "instChg", ",", "\"Received instance change request with view no {} \"", "\"which is not more than its view no {}\"", ".", "format", "(", "instChg", ".", "viewNo", ",", "self", ".", "view_no", ")", ",", "logger", ".", "info", ")", "else", ":", "# Record instance changes for views but send instance change", "# only when found master to be degraded. if quorum of view changes", "# found then change view even if master not degraded", "self", ".", "_on_verified_instance_change_msg", "(", "instChg", ",", "frm", ")", "if", "self", ".", "instance_changes", ".", "has_inst_chng_from", "(", "instChg", ".", "viewNo", ",", "self", ".", "name", ")", ":", "logger", ".", "info", "(", "\"{} received instance change message {} but has already \"", "\"sent an instance change message\"", ".", "format", "(", "self", ",", "instChg", ")", ")", "elif", "not", "self", ".", "provider", ".", "is_master_degraded", "(", ")", ":", "logger", ".", "info", "(", "\"{} received instance change message {} but did not \"", "\"find the master to be slow\"", ".", "format", "(", "self", ",", "instChg", ")", ")", "else", ":", "logger", ".", "display", "(", "\"{}{} found master degraded after receiving instance change\"", "\" message from {}\"", ".", "format", "(", "VIEW_CHANGE_PREFIX", ",", "self", ",", "frm", ")", ")", "self", ".", "sendInstanceChange", "(", "instChg", ".", "viewNo", ")" ]
Validate and process an instance change request. :param instChg: the instance change request :param frm: the name of the node that sent this `msg`
[ "Validate", "and", "process", "an", "instance", "change", "request", "." ]
train
https://github.com/hyperledger/indy-plenum/blob/dcd144e238af7f17a869ffc9412f13dc488b7020/plenum/server/view_change/view_changer.py#L373-L416
0.004772
UCL-INGI/INGInious
inginious/frontend/template_helper.py
TemplateHelper._generic_hook
def _generic_hook(self, name, **kwargs): """ A generic hook that links the TemplateHelper with PluginManager """ entries = [entry for entry in self._plugin_manager.call_hook(name, **kwargs) if entry is not None] return "\n".join(entries)
python
def _generic_hook(self, name, **kwargs): """ A generic hook that links the TemplateHelper with PluginManager """ entries = [entry for entry in self._plugin_manager.call_hook(name, **kwargs) if entry is not None] return "\n".join(entries)
[ "def", "_generic_hook", "(", "self", ",", "name", ",", "*", "*", "kwargs", ")", ":", "entries", "=", "[", "entry", "for", "entry", "in", "self", ".", "_plugin_manager", ".", "call_hook", "(", "name", ",", "*", "*", "kwargs", ")", "if", "entry", "is", "not", "None", "]", "return", "\"\\n\"", ".", "join", "(", "entries", ")" ]
A generic hook that links the TemplateHelper with PluginManager
[ "A", "generic", "hook", "that", "links", "the", "TemplateHelper", "with", "PluginManager" ]
train
https://github.com/UCL-INGI/INGInious/blob/cbda9a9c7f2b8e8eb1e6d7d51f0d18092086300c/inginious/frontend/template_helper.py#L148-L151
0.011494
quantmind/pulsar
pulsar/utils/importer.py
expand_star
def expand_star(mod_name): """Expand something like 'unuk.tasks.*' into a list of all the modules there. """ expanded = [] mod_dir = os.path.dirname( __import__(mod_name[:-2], {}, {}, ['']).__file__) for f in glob.glob1(mod_dir, "[!_]*.py"): expanded.append('%s.%s' % (mod_name[:-2], f[:-3])) return expanded
python
def expand_star(mod_name): """Expand something like 'unuk.tasks.*' into a list of all the modules there. """ expanded = [] mod_dir = os.path.dirname( __import__(mod_name[:-2], {}, {}, ['']).__file__) for f in glob.glob1(mod_dir, "[!_]*.py"): expanded.append('%s.%s' % (mod_name[:-2], f[:-3])) return expanded
[ "def", "expand_star", "(", "mod_name", ")", ":", "expanded", "=", "[", "]", "mod_dir", "=", "os", ".", "path", ".", "dirname", "(", "__import__", "(", "mod_name", "[", ":", "-", "2", "]", ",", "{", "}", ",", "{", "}", ",", "[", "''", "]", ")", ".", "__file__", ")", "for", "f", "in", "glob", ".", "glob1", "(", "mod_dir", ",", "\"[!_]*.py\"", ")", ":", "expanded", ".", "append", "(", "'%s.%s'", "%", "(", "mod_name", "[", ":", "-", "2", "]", ",", "f", "[", ":", "-", "3", "]", ")", ")", "return", "expanded" ]
Expand something like 'unuk.tasks.*' into a list of all the modules there.
[ "Expand", "something", "like", "unuk", ".", "tasks", ".", "*", "into", "a", "list", "of", "all", "the", "modules", "there", "." ]
train
https://github.com/quantmind/pulsar/blob/fee44e871954aa6ca36d00bb5a3739abfdb89b26/pulsar/utils/importer.py#L20-L29
0.002841
facelessuser/wcmatch
wcmatch/glob.py
Glob._get_starting_paths
def _get_starting_paths(self, curdir): """ Get the starting location. For case sensitive paths, we have to "glob" for it first as Python doesn't like for its users to think about case. By scanning for it, we can get the actual casing and then compare. """ results = [curdir] if not self._is_parent(curdir) and not self._is_this(curdir): fullpath = os.path.abspath(curdir) basename = os.path.basename(fullpath) dirname = os.path.dirname(fullpath) if basename: matcher = self._get_matcher(basename) results = [os.path.basename(name) for name in self._glob_dir(dirname, matcher, self)] return results
python
def _get_starting_paths(self, curdir): """ Get the starting location. For case sensitive paths, we have to "glob" for it first as Python doesn't like for its users to think about case. By scanning for it, we can get the actual casing and then compare. """ results = [curdir] if not self._is_parent(curdir) and not self._is_this(curdir): fullpath = os.path.abspath(curdir) basename = os.path.basename(fullpath) dirname = os.path.dirname(fullpath) if basename: matcher = self._get_matcher(basename) results = [os.path.basename(name) for name in self._glob_dir(dirname, matcher, self)] return results
[ "def", "_get_starting_paths", "(", "self", ",", "curdir", ")", ":", "results", "=", "[", "curdir", "]", "if", "not", "self", ".", "_is_parent", "(", "curdir", ")", "and", "not", "self", ".", "_is_this", "(", "curdir", ")", ":", "fullpath", "=", "os", ".", "path", ".", "abspath", "(", "curdir", ")", "basename", "=", "os", ".", "path", ".", "basename", "(", "fullpath", ")", "dirname", "=", "os", ".", "path", ".", "dirname", "(", "fullpath", ")", "if", "basename", ":", "matcher", "=", "self", ".", "_get_matcher", "(", "basename", ")", "results", "=", "[", "os", ".", "path", ".", "basename", "(", "name", ")", "for", "name", "in", "self", ".", "_glob_dir", "(", "dirname", ",", "matcher", ",", "self", ")", "]", "return", "results" ]
Get the starting location. For case sensitive paths, we have to "glob" for it first as Python doesn't like for its users to think about case. By scanning for it, we can get the actual casing and then compare.
[ "Get", "the", "starting", "location", "." ]
train
https://github.com/facelessuser/wcmatch/blob/d153e7007cc73b994ae1ba553dc4584039f5c212/wcmatch/glob.py#L317-L337
0.003942
tensorflow/mesh
mesh_tensorflow/placement_mesh_impl.py
PlacementMeshImpl.receive
def receive(self, x, mesh_axis, source_pcoord): """Collective receive in groups. Each group contains the processors that differ only in mesh_axis. ```python group_size = self.shape[mesh_axis].size ``` Args: x: a LaidOutTensor mesh_axis: an integer source_pcoord: a list of optional integers. Each element is either None or an integer in [0, group_size). If source_pcoord[k] is None, then the output for the k-th processor in each group is a zero tensor. If source_pcoord[k] is not None, then the output for the k-th processor in each group is equal to the input for the source_pcoord[k]-th processor in that group. Returns: a LaidOutTensor """ x = x.to_laid_out_tensor() shape = x.tensor_list[0].shape dtype = x.tensor_list[0].dtype def _collective_receive(tensor_list, device_list): ret = [] for pcoord, device in enumerate(device_list): with tf.device(device): if source_pcoord[pcoord] is None: ret.append(tf.zeros(shape, dtype)) else: ret.append(tf.identity(tensor_list[source_pcoord[pcoord]])) return ret return self._collective_with_groups( x, [mesh_axis], _collective_receive)
python
def receive(self, x, mesh_axis, source_pcoord): """Collective receive in groups. Each group contains the processors that differ only in mesh_axis. ```python group_size = self.shape[mesh_axis].size ``` Args: x: a LaidOutTensor mesh_axis: an integer source_pcoord: a list of optional integers. Each element is either None or an integer in [0, group_size). If source_pcoord[k] is None, then the output for the k-th processor in each group is a zero tensor. If source_pcoord[k] is not None, then the output for the k-th processor in each group is equal to the input for the source_pcoord[k]-th processor in that group. Returns: a LaidOutTensor """ x = x.to_laid_out_tensor() shape = x.tensor_list[0].shape dtype = x.tensor_list[0].dtype def _collective_receive(tensor_list, device_list): ret = [] for pcoord, device in enumerate(device_list): with tf.device(device): if source_pcoord[pcoord] is None: ret.append(tf.zeros(shape, dtype)) else: ret.append(tf.identity(tensor_list[source_pcoord[pcoord]])) return ret return self._collective_with_groups( x, [mesh_axis], _collective_receive)
[ "def", "receive", "(", "self", ",", "x", ",", "mesh_axis", ",", "source_pcoord", ")", ":", "x", "=", "x", ".", "to_laid_out_tensor", "(", ")", "shape", "=", "x", ".", "tensor_list", "[", "0", "]", ".", "shape", "dtype", "=", "x", ".", "tensor_list", "[", "0", "]", ".", "dtype", "def", "_collective_receive", "(", "tensor_list", ",", "device_list", ")", ":", "ret", "=", "[", "]", "for", "pcoord", ",", "device", "in", "enumerate", "(", "device_list", ")", ":", "with", "tf", ".", "device", "(", "device", ")", ":", "if", "source_pcoord", "[", "pcoord", "]", "is", "None", ":", "ret", ".", "append", "(", "tf", ".", "zeros", "(", "shape", ",", "dtype", ")", ")", "else", ":", "ret", ".", "append", "(", "tf", ".", "identity", "(", "tensor_list", "[", "source_pcoord", "[", "pcoord", "]", "]", ")", ")", "return", "ret", "return", "self", ".", "_collective_with_groups", "(", "x", ",", "[", "mesh_axis", "]", ",", "_collective_receive", ")" ]
Collective receive in groups. Each group contains the processors that differ only in mesh_axis. ```python group_size = self.shape[mesh_axis].size ``` Args: x: a LaidOutTensor mesh_axis: an integer source_pcoord: a list of optional integers. Each element is either None or an integer in [0, group_size). If source_pcoord[k] is None, then the output for the k-th processor in each group is a zero tensor. If source_pcoord[k] is not None, then the output for the k-th processor in each group is equal to the input for the source_pcoord[k]-th processor in that group. Returns: a LaidOutTensor
[ "Collective", "receive", "in", "groups", "." ]
train
https://github.com/tensorflow/mesh/blob/3921196e5e43302e820da0a87329f25d7e2a3016/mesh_tensorflow/placement_mesh_impl.py#L248-L283
0.005469
bunq/sdk_python
bunq/sdk/json/converter.py
JsonAdapter._fill_default_values
def _fill_default_values(cls, cls_context, dict_): """ :type cls_context: type :type dict_: dict :rtype: dict """ dict_with_default_values = dict(dict_) params = re.findall(cls._PATTERN_PARAM_NAME_TYPED_ANY, cls_context.__doc__) for param in params: if param not in dict_with_default_values: dict_with_default_values[param] = None return dict_with_default_values
python
def _fill_default_values(cls, cls_context, dict_): """ :type cls_context: type :type dict_: dict :rtype: dict """ dict_with_default_values = dict(dict_) params = re.findall(cls._PATTERN_PARAM_NAME_TYPED_ANY, cls_context.__doc__) for param in params: if param not in dict_with_default_values: dict_with_default_values[param] = None return dict_with_default_values
[ "def", "_fill_default_values", "(", "cls", ",", "cls_context", ",", "dict_", ")", ":", "dict_with_default_values", "=", "dict", "(", "dict_", ")", "params", "=", "re", ".", "findall", "(", "cls", ".", "_PATTERN_PARAM_NAME_TYPED_ANY", ",", "cls_context", ".", "__doc__", ")", "for", "param", "in", "params", ":", "if", "param", "not", "in", "dict_with_default_values", ":", "dict_with_default_values", "[", "param", "]", "=", "None", "return", "dict_with_default_values" ]
:type cls_context: type :type dict_: dict :rtype: dict
[ ":", "type", "cls_context", ":", "type", ":", "type", "dict_", ":", "dict" ]
train
https://github.com/bunq/sdk_python/blob/da6c9b83e6d83ee8062617f53c6eb7293c0d863d/bunq/sdk/json/converter.py#L394-L410
0.004049
CiscoUcs/UcsPythonSDK
src/UcsSdk/UcsBase.py
UcsUtils.GetUcsPropertyMeta
def GetUcsPropertyMeta(classId, key): """ Methods returns the property meta of the provided key for the given classId. """ if classId in _ManagedObjectMeta: if key in _ManagedObjectMeta[classId]: return _ManagedObjectMeta[classId][key] return None
python
def GetUcsPropertyMeta(classId, key): """ Methods returns the property meta of the provided key for the given classId. """ if classId in _ManagedObjectMeta: if key in _ManagedObjectMeta[classId]: return _ManagedObjectMeta[classId][key] return None
[ "def", "GetUcsPropertyMeta", "(", "classId", ",", "key", ")", ":", "if", "classId", "in", "_ManagedObjectMeta", ":", "if", "key", "in", "_ManagedObjectMeta", "[", "classId", "]", ":", "return", "_ManagedObjectMeta", "[", "classId", "]", "[", "key", "]", "return", "None" ]
Methods returns the property meta of the provided key for the given classId.
[ "Methods", "returns", "the", "property", "meta", "of", "the", "provided", "key", "for", "the", "given", "classId", "." ]
train
https://github.com/CiscoUcs/UcsPythonSDK/blob/bf6b07d6abeacb922c92b198352eda4eb9e4629b/src/UcsSdk/UcsBase.py#L518-L523
0.030769
jobovy/galpy
galpy/orbit/planarOrbit.py
planarOrbitTop.rap
def rap(self,analytic=False,pot=None,**kwargs): """ NAME: rap PURPOSE: return the apocenter radius INPUT: analytic - compute this analytically pot - potential to use for analytical calculation OUTPUT: R_ap HISTORY: 2010-09-20 - Written - Bovy (NYU) """ if analytic: self._setupaA(pot=pot,type='adiabatic') (rperi,rap)= self._aA.calcRapRperi(self) return rap if not hasattr(self,'orbit'): raise AttributeError("Integrate the orbit first") if not hasattr(self,'rs'): self.rs= self.orbit[:,0] return nu.amax(self.rs)
python
def rap(self,analytic=False,pot=None,**kwargs): """ NAME: rap PURPOSE: return the apocenter radius INPUT: analytic - compute this analytically pot - potential to use for analytical calculation OUTPUT: R_ap HISTORY: 2010-09-20 - Written - Bovy (NYU) """ if analytic: self._setupaA(pot=pot,type='adiabatic') (rperi,rap)= self._aA.calcRapRperi(self) return rap if not hasattr(self,'orbit'): raise AttributeError("Integrate the orbit first") if not hasattr(self,'rs'): self.rs= self.orbit[:,0] return nu.amax(self.rs)
[ "def", "rap", "(", "self", ",", "analytic", "=", "False", ",", "pot", "=", "None", ",", "*", "*", "kwargs", ")", ":", "if", "analytic", ":", "self", ".", "_setupaA", "(", "pot", "=", "pot", ",", "type", "=", "'adiabatic'", ")", "(", "rperi", ",", "rap", ")", "=", "self", ".", "_aA", ".", "calcRapRperi", "(", "self", ")", "return", "rap", "if", "not", "hasattr", "(", "self", ",", "'orbit'", ")", ":", "raise", "AttributeError", "(", "\"Integrate the orbit first\"", ")", "if", "not", "hasattr", "(", "self", ",", "'rs'", ")", ":", "self", ".", "rs", "=", "self", ".", "orbit", "[", ":", ",", "0", "]", "return", "nu", ".", "amax", "(", "self", ".", "rs", ")" ]
NAME: rap PURPOSE: return the apocenter radius INPUT: analytic - compute this analytically pot - potential to use for analytical calculation OUTPUT: R_ap HISTORY: 2010-09-20 - Written - Bovy (NYU)
[ "NAME", ":", "rap", "PURPOSE", ":", "return", "the", "apocenter", "radius", "INPUT", ":", "analytic", "-", "compute", "this", "analytically", "pot", "-", "potential", "to", "use", "for", "analytical", "calculation", "OUTPUT", ":", "R_ap", "HISTORY", ":", "2010", "-", "09", "-", "20", "-", "Written", "-", "Bovy", "(", "NYU", ")" ]
train
https://github.com/jobovy/galpy/blob/9c5b9fe65d58835624dffe432be282060918ee08/galpy/orbit/planarOrbit.py#L128-L150
0.016506
LeastAuthority/txkube
src/txkube/_invariants.py
instance_of
def instance_of(cls): """ Create an invariant requiring the value is an instance of ``cls``. """ def check(value): return ( isinstance(value, cls), u"{value!r} is instance of {actual!s}, required {required!s}".format( value=value, actual=fullyQualifiedName(type(value)), required=fullyQualifiedName(cls), ), ) return check
python
def instance_of(cls): """ Create an invariant requiring the value is an instance of ``cls``. """ def check(value): return ( isinstance(value, cls), u"{value!r} is instance of {actual!s}, required {required!s}".format( value=value, actual=fullyQualifiedName(type(value)), required=fullyQualifiedName(cls), ), ) return check
[ "def", "instance_of", "(", "cls", ")", ":", "def", "check", "(", "value", ")", ":", "return", "(", "isinstance", "(", "value", ",", "cls", ")", ",", "u\"{value!r} is instance of {actual!s}, required {required!s}\"", ".", "format", "(", "value", "=", "value", ",", "actual", "=", "fullyQualifiedName", "(", "type", "(", "value", ")", ")", ",", "required", "=", "fullyQualifiedName", "(", "cls", ")", ",", ")", ",", ")", "return", "check" ]
Create an invariant requiring the value is an instance of ``cls``.
[ "Create", "an", "invariant", "requiring", "the", "value", "is", "an", "instance", "of", "cls", "." ]
train
https://github.com/LeastAuthority/txkube/blob/a7e555d00535ff787d4b1204c264780da40cf736/src/txkube/_invariants.py#L10-L23
0.004525
PmagPy/PmagPy
programs/demag_gui.py
Demag_GUI.on_btn_delete_fit
def on_btn_delete_fit(self, event): """ removes the current interpretation Parameters ---------- event : the wx.ButtonEvent that triggered this function """ self.delete_fit(self.current_fit, specimen=self.s)
python
def on_btn_delete_fit(self, event): """ removes the current interpretation Parameters ---------- event : the wx.ButtonEvent that triggered this function """ self.delete_fit(self.current_fit, specimen=self.s)
[ "def", "on_btn_delete_fit", "(", "self", ",", "event", ")", ":", "self", ".", "delete_fit", "(", "self", ".", "current_fit", ",", "specimen", "=", "self", ".", "s", ")" ]
removes the current interpretation Parameters ---------- event : the wx.ButtonEvent that triggered this function
[ "removes", "the", "current", "interpretation" ]
train
https://github.com/PmagPy/PmagPy/blob/c7984f8809bf40fe112e53dcc311a33293b62d0b/programs/demag_gui.py#L8481-L8489
0.007576
cytoscape/py2cytoscape
py2cytoscape/data/cynetwork.py
CyNetwork.add_nodes
def add_nodes(self, node_name_list, dataframe=False): """ Add new nodes to the network :param node_name_list: list of node names, e.g. ['a', 'b', 'c'] :param dataframe: If True, return a pandas dataframe instead of a dict. :return: A dict mapping names to SUIDs for the newly-created nodes. """ res = self.session.post(self.__url + 'nodes', data=json.dumps(node_name_list), headers=HEADERS) check_response(res) nodes = res.json() if dataframe: return pd.DataFrame(nodes).set_index(['SUID']) else: return {node['name']: node['SUID'] for node in nodes}
python
def add_nodes(self, node_name_list, dataframe=False): """ Add new nodes to the network :param node_name_list: list of node names, e.g. ['a', 'b', 'c'] :param dataframe: If True, return a pandas dataframe instead of a dict. :return: A dict mapping names to SUIDs for the newly-created nodes. """ res = self.session.post(self.__url + 'nodes', data=json.dumps(node_name_list), headers=HEADERS) check_response(res) nodes = res.json() if dataframe: return pd.DataFrame(nodes).set_index(['SUID']) else: return {node['name']: node['SUID'] for node in nodes}
[ "def", "add_nodes", "(", "self", ",", "node_name_list", ",", "dataframe", "=", "False", ")", ":", "res", "=", "self", ".", "session", ".", "post", "(", "self", ".", "__url", "+", "'nodes'", ",", "data", "=", "json", ".", "dumps", "(", "node_name_list", ")", ",", "headers", "=", "HEADERS", ")", "check_response", "(", "res", ")", "nodes", "=", "res", ".", "json", "(", ")", "if", "dataframe", ":", "return", "pd", ".", "DataFrame", "(", "nodes", ")", ".", "set_index", "(", "[", "'SUID'", "]", ")", "else", ":", "return", "{", "node", "[", "'name'", "]", ":", "node", "[", "'SUID'", "]", "for", "node", "in", "nodes", "}" ]
Add new nodes to the network :param node_name_list: list of node names, e.g. ['a', 'b', 'c'] :param dataframe: If True, return a pandas dataframe instead of a dict. :return: A dict mapping names to SUIDs for the newly-created nodes.
[ "Add", "new", "nodes", "to", "the", "network" ]
train
https://github.com/cytoscape/py2cytoscape/blob/dd34de8d028f512314d0057168df7fef7c5d5195/py2cytoscape/data/cynetwork.py#L88-L102
0.004525
Esri/ArcREST
src/arcrest/web/_base.py
BaseWebOperations._get
def _get(self, url, param_dict=None, securityHandler=None, additional_headers=None, handlers=None, proxy_url=None, proxy_port=None, compress=True, custom_handlers=None, out_folder=None, file_name=None): """ Performs a GET operation Inputs: Output: returns dictionary, string or None """ # ensure that no spaces are in the url url = url.replace(" ", "%20") pass_headers = {} if custom_handlers is None: custom_handlers = [] if handlers is None: handlers = [] if param_dict is None: param_dict = {} self._last_method = "GET" CHUNK = 4056 param_dict, handler, cj = self._processHandler(securityHandler, param_dict) if additional_headers is not None: headers = [] + additional_headers else: headers = [] pass_headers = {} if securityHandler and securityHandler.referer_url: pass_headers['referer'] = securityHandler.referer_url for h in headers: pass_headers[h[0]] = h[1] if compress: pass_headers['Accept-encoding'] = 'gzip' else: pass_headers['Accept-encoding'] = "" #headers.append(('User-Agent', USER_AGENT)) pass_headers['User-Agent'] = self.useragent if len(param_dict.keys()) == 0: param_dict = None if handlers is None: handlers = [] if handler is not None: handlers.append(handler) handlers.append(RedirectHandler()) if self._verify == False and \ sys.version_info[0:3] >= (2, 7, 9) and \ hasattr(ssl,'create_default_context'): ctx = ssl.create_default_context() ctx.check_hostname = False ctx.verify_mode = ssl.CERT_NONE handlers.append(request.HTTPSHandler(context=ctx)) if cj is not None: handlers.append(request.HTTPCookieProcessor(cj)) if proxy_url is not None: if proxy_port is None: proxy_port = 80 proxies = {"http":"http://%s:%s" % (proxy_url, proxy_port), "https":"https://%s:%s" % (proxy_url, proxy_port)} proxy_support = request.ProxyHandler(proxies) handlers.append(proxy_support) opener = request.build_opener(*handlers) opener.addheaders = headers request.install_opener(opener) ctx = None hasContext = False if self._verify == False and \ 'context' in self._has_context(request.urlopen) and \ sys.version_info[0:3] >= (2, 7, 9) and \ hasattr(ssl,'create_default_context'): ctx = ssl.create_default_context() ctx.check_hostname = False ctx.verify_mode = ssl.CERT_NONE hasContext = True if hasContext == False: if param_dict is None: req = request.Request(self._asString(url), headers=pass_headers) resp = request.urlopen(req) elif len(str(urlencode(param_dict))) + len(url) >= 1999: return self._post( url=url, param_dict=param_dict, files=None, securityHandler=securityHandler, additional_headers=additional_headers, custom_handlers=custom_handlers, proxy_url=proxy_url, proxy_port=proxy_port, compress=compress, out_folder=out_folder, file_name=file_name, force_form_post=False) else: format_url = self._asString(url) + "?%s" % urlencode(param_dict) req = request.Request(format_url, headers=pass_headers) resp = request.urlopen(req) else: if param_dict is None: req = request.Request(self._asString(url), headers=pass_headers) resp = request.urlopen(req, context=ctx) elif len(str(urlencode(param_dict))) + len(url) >= 1999: return self._post( url=url, param_dict=param_dict, files=None, securityHandler=securityHandler, additional_headers=additional_headers, custom_handlers=custom_handlers, proxy_url=proxy_url, proxy_port=proxy_port, compress=compress, out_folder=out_folder, file_name=file_name, force_form_post=False) else: format_url = self._asString(url) + "?%s" % urlencode(param_dict) req = request.Request(format_url, headers=pass_headers) resp = request.urlopen(req, context=ctx) self._last_code = resp.getcode() self._last_url = resp.geturl() # Get some headers from the response maintype = self._mainType(resp) contentDisposition = resp.headers.get('content-disposition') contentMD5 = resp.headers.get('Content-MD5') #contentEncoding = resp.headers.get('content-encoding') contentType = resp.headers.get('content-Type').split(';')[0].lower() contentLength = resp.headers.get('content-length') if maintype.lower() in ('image', 'application/x-zip-compressed') or \ contentType in ('application/x-zip-compressed', 'application/octet-stream') or \ contentMD5 is not None or\ (contentDisposition is not None and \ contentDisposition.lower().find('attachment;') > -1): fname = self._get_file_name( contentDisposition=contentDisposition, url=url) if out_folder is None: out_folder = tempfile.gettempdir() if contentLength is not None: max_length = int(contentLength) if max_length < CHUNK: CHUNK = max_length file_name = os.path.join(out_folder, fname) with open(file_name, 'wb') as writer: for data in self._chunk(response=resp, size=CHUNK): writer.write(data) writer.flush() writer.flush() del writer return file_name else: read = "" for data in self._chunk(response=resp, size=CHUNK): if self.PY3 == True: read += data.decode('utf-8') else: read += data del data try: results = json.loads(read) if 'error' in results: if 'message' in results['error']: if results['error']['message'] == 'Request not made over ssl': if url.startswith('http://'): url = url.replace('http://', 'https://') return self._get(url, param_dict, securityHandler, additional_headers, handlers, proxy_url, proxy_port, compress, custom_handlers, out_folder, file_name) return results except: return read
python
def _get(self, url, param_dict=None, securityHandler=None, additional_headers=None, handlers=None, proxy_url=None, proxy_port=None, compress=True, custom_handlers=None, out_folder=None, file_name=None): """ Performs a GET operation Inputs: Output: returns dictionary, string or None """ # ensure that no spaces are in the url url = url.replace(" ", "%20") pass_headers = {} if custom_handlers is None: custom_handlers = [] if handlers is None: handlers = [] if param_dict is None: param_dict = {} self._last_method = "GET" CHUNK = 4056 param_dict, handler, cj = self._processHandler(securityHandler, param_dict) if additional_headers is not None: headers = [] + additional_headers else: headers = [] pass_headers = {} if securityHandler and securityHandler.referer_url: pass_headers['referer'] = securityHandler.referer_url for h in headers: pass_headers[h[0]] = h[1] if compress: pass_headers['Accept-encoding'] = 'gzip' else: pass_headers['Accept-encoding'] = "" #headers.append(('User-Agent', USER_AGENT)) pass_headers['User-Agent'] = self.useragent if len(param_dict.keys()) == 0: param_dict = None if handlers is None: handlers = [] if handler is not None: handlers.append(handler) handlers.append(RedirectHandler()) if self._verify == False and \ sys.version_info[0:3] >= (2, 7, 9) and \ hasattr(ssl,'create_default_context'): ctx = ssl.create_default_context() ctx.check_hostname = False ctx.verify_mode = ssl.CERT_NONE handlers.append(request.HTTPSHandler(context=ctx)) if cj is not None: handlers.append(request.HTTPCookieProcessor(cj)) if proxy_url is not None: if proxy_port is None: proxy_port = 80 proxies = {"http":"http://%s:%s" % (proxy_url, proxy_port), "https":"https://%s:%s" % (proxy_url, proxy_port)} proxy_support = request.ProxyHandler(proxies) handlers.append(proxy_support) opener = request.build_opener(*handlers) opener.addheaders = headers request.install_opener(opener) ctx = None hasContext = False if self._verify == False and \ 'context' in self._has_context(request.urlopen) and \ sys.version_info[0:3] >= (2, 7, 9) and \ hasattr(ssl,'create_default_context'): ctx = ssl.create_default_context() ctx.check_hostname = False ctx.verify_mode = ssl.CERT_NONE hasContext = True if hasContext == False: if param_dict is None: req = request.Request(self._asString(url), headers=pass_headers) resp = request.urlopen(req) elif len(str(urlencode(param_dict))) + len(url) >= 1999: return self._post( url=url, param_dict=param_dict, files=None, securityHandler=securityHandler, additional_headers=additional_headers, custom_handlers=custom_handlers, proxy_url=proxy_url, proxy_port=proxy_port, compress=compress, out_folder=out_folder, file_name=file_name, force_form_post=False) else: format_url = self._asString(url) + "?%s" % urlencode(param_dict) req = request.Request(format_url, headers=pass_headers) resp = request.urlopen(req) else: if param_dict is None: req = request.Request(self._asString(url), headers=pass_headers) resp = request.urlopen(req, context=ctx) elif len(str(urlencode(param_dict))) + len(url) >= 1999: return self._post( url=url, param_dict=param_dict, files=None, securityHandler=securityHandler, additional_headers=additional_headers, custom_handlers=custom_handlers, proxy_url=proxy_url, proxy_port=proxy_port, compress=compress, out_folder=out_folder, file_name=file_name, force_form_post=False) else: format_url = self._asString(url) + "?%s" % urlencode(param_dict) req = request.Request(format_url, headers=pass_headers) resp = request.urlopen(req, context=ctx) self._last_code = resp.getcode() self._last_url = resp.geturl() # Get some headers from the response maintype = self._mainType(resp) contentDisposition = resp.headers.get('content-disposition') contentMD5 = resp.headers.get('Content-MD5') #contentEncoding = resp.headers.get('content-encoding') contentType = resp.headers.get('content-Type').split(';')[0].lower() contentLength = resp.headers.get('content-length') if maintype.lower() in ('image', 'application/x-zip-compressed') or \ contentType in ('application/x-zip-compressed', 'application/octet-stream') or \ contentMD5 is not None or\ (contentDisposition is not None and \ contentDisposition.lower().find('attachment;') > -1): fname = self._get_file_name( contentDisposition=contentDisposition, url=url) if out_folder is None: out_folder = tempfile.gettempdir() if contentLength is not None: max_length = int(contentLength) if max_length < CHUNK: CHUNK = max_length file_name = os.path.join(out_folder, fname) with open(file_name, 'wb') as writer: for data in self._chunk(response=resp, size=CHUNK): writer.write(data) writer.flush() writer.flush() del writer return file_name else: read = "" for data in self._chunk(response=resp, size=CHUNK): if self.PY3 == True: read += data.decode('utf-8') else: read += data del data try: results = json.loads(read) if 'error' in results: if 'message' in results['error']: if results['error']['message'] == 'Request not made over ssl': if url.startswith('http://'): url = url.replace('http://', 'https://') return self._get(url, param_dict, securityHandler, additional_headers, handlers, proxy_url, proxy_port, compress, custom_handlers, out_folder, file_name) return results except: return read
[ "def", "_get", "(", "self", ",", "url", ",", "param_dict", "=", "None", ",", "securityHandler", "=", "None", ",", "additional_headers", "=", "None", ",", "handlers", "=", "None", ",", "proxy_url", "=", "None", ",", "proxy_port", "=", "None", ",", "compress", "=", "True", ",", "custom_handlers", "=", "None", ",", "out_folder", "=", "None", ",", "file_name", "=", "None", ")", ":", "# ensure that no spaces are in the url", "url", "=", "url", ".", "replace", "(", "\" \"", ",", "\"%20\"", ")", "pass_headers", "=", "{", "}", "if", "custom_handlers", "is", "None", ":", "custom_handlers", "=", "[", "]", "if", "handlers", "is", "None", ":", "handlers", "=", "[", "]", "if", "param_dict", "is", "None", ":", "param_dict", "=", "{", "}", "self", ".", "_last_method", "=", "\"GET\"", "CHUNK", "=", "4056", "param_dict", ",", "handler", ",", "cj", "=", "self", ".", "_processHandler", "(", "securityHandler", ",", "param_dict", ")", "if", "additional_headers", "is", "not", "None", ":", "headers", "=", "[", "]", "+", "additional_headers", "else", ":", "headers", "=", "[", "]", "pass_headers", "=", "{", "}", "if", "securityHandler", "and", "securityHandler", ".", "referer_url", ":", "pass_headers", "[", "'referer'", "]", "=", "securityHandler", ".", "referer_url", "for", "h", "in", "headers", ":", "pass_headers", "[", "h", "[", "0", "]", "]", "=", "h", "[", "1", "]", "if", "compress", ":", "pass_headers", "[", "'Accept-encoding'", "]", "=", "'gzip'", "else", ":", "pass_headers", "[", "'Accept-encoding'", "]", "=", "\"\"", "#headers.append(('User-Agent', USER_AGENT))", "pass_headers", "[", "'User-Agent'", "]", "=", "self", ".", "useragent", "if", "len", "(", "param_dict", ".", "keys", "(", ")", ")", "==", "0", ":", "param_dict", "=", "None", "if", "handlers", "is", "None", ":", "handlers", "=", "[", "]", "if", "handler", "is", "not", "None", ":", "handlers", ".", "append", "(", "handler", ")", "handlers", ".", "append", "(", "RedirectHandler", "(", ")", ")", "if", "self", ".", "_verify", "==", "False", "and", "sys", ".", "version_info", "[", "0", ":", "3", "]", ">=", "(", "2", ",", "7", ",", "9", ")", "and", "hasattr", "(", "ssl", ",", "'create_default_context'", ")", ":", "ctx", "=", "ssl", ".", "create_default_context", "(", ")", "ctx", ".", "check_hostname", "=", "False", "ctx", ".", "verify_mode", "=", "ssl", ".", "CERT_NONE", "handlers", ".", "append", "(", "request", ".", "HTTPSHandler", "(", "context", "=", "ctx", ")", ")", "if", "cj", "is", "not", "None", ":", "handlers", ".", "append", "(", "request", ".", "HTTPCookieProcessor", "(", "cj", ")", ")", "if", "proxy_url", "is", "not", "None", ":", "if", "proxy_port", "is", "None", ":", "proxy_port", "=", "80", "proxies", "=", "{", "\"http\"", ":", "\"http://%s:%s\"", "%", "(", "proxy_url", ",", "proxy_port", ")", ",", "\"https\"", ":", "\"https://%s:%s\"", "%", "(", "proxy_url", ",", "proxy_port", ")", "}", "proxy_support", "=", "request", ".", "ProxyHandler", "(", "proxies", ")", "handlers", ".", "append", "(", "proxy_support", ")", "opener", "=", "request", ".", "build_opener", "(", "*", "handlers", ")", "opener", ".", "addheaders", "=", "headers", "request", ".", "install_opener", "(", "opener", ")", "ctx", "=", "None", "hasContext", "=", "False", "if", "self", ".", "_verify", "==", "False", "and", "'context'", "in", "self", ".", "_has_context", "(", "request", ".", "urlopen", ")", "and", "sys", ".", "version_info", "[", "0", ":", "3", "]", ">=", "(", "2", ",", "7", ",", "9", ")", "and", "hasattr", "(", "ssl", ",", "'create_default_context'", ")", ":", "ctx", "=", "ssl", ".", "create_default_context", "(", ")", "ctx", ".", "check_hostname", "=", "False", "ctx", ".", "verify_mode", "=", "ssl", ".", "CERT_NONE", "hasContext", "=", "True", "if", "hasContext", "==", "False", ":", "if", "param_dict", "is", "None", ":", "req", "=", "request", ".", "Request", "(", "self", ".", "_asString", "(", "url", ")", ",", "headers", "=", "pass_headers", ")", "resp", "=", "request", ".", "urlopen", "(", "req", ")", "elif", "len", "(", "str", "(", "urlencode", "(", "param_dict", ")", ")", ")", "+", "len", "(", "url", ")", ">=", "1999", ":", "return", "self", ".", "_post", "(", "url", "=", "url", ",", "param_dict", "=", "param_dict", ",", "files", "=", "None", ",", "securityHandler", "=", "securityHandler", ",", "additional_headers", "=", "additional_headers", ",", "custom_handlers", "=", "custom_handlers", ",", "proxy_url", "=", "proxy_url", ",", "proxy_port", "=", "proxy_port", ",", "compress", "=", "compress", ",", "out_folder", "=", "out_folder", ",", "file_name", "=", "file_name", ",", "force_form_post", "=", "False", ")", "else", ":", "format_url", "=", "self", ".", "_asString", "(", "url", ")", "+", "\"?%s\"", "%", "urlencode", "(", "param_dict", ")", "req", "=", "request", ".", "Request", "(", "format_url", ",", "headers", "=", "pass_headers", ")", "resp", "=", "request", ".", "urlopen", "(", "req", ")", "else", ":", "if", "param_dict", "is", "None", ":", "req", "=", "request", ".", "Request", "(", "self", ".", "_asString", "(", "url", ")", ",", "headers", "=", "pass_headers", ")", "resp", "=", "request", ".", "urlopen", "(", "req", ",", "context", "=", "ctx", ")", "elif", "len", "(", "str", "(", "urlencode", "(", "param_dict", ")", ")", ")", "+", "len", "(", "url", ")", ">=", "1999", ":", "return", "self", ".", "_post", "(", "url", "=", "url", ",", "param_dict", "=", "param_dict", ",", "files", "=", "None", ",", "securityHandler", "=", "securityHandler", ",", "additional_headers", "=", "additional_headers", ",", "custom_handlers", "=", "custom_handlers", ",", "proxy_url", "=", "proxy_url", ",", "proxy_port", "=", "proxy_port", ",", "compress", "=", "compress", ",", "out_folder", "=", "out_folder", ",", "file_name", "=", "file_name", ",", "force_form_post", "=", "False", ")", "else", ":", "format_url", "=", "self", ".", "_asString", "(", "url", ")", "+", "\"?%s\"", "%", "urlencode", "(", "param_dict", ")", "req", "=", "request", ".", "Request", "(", "format_url", ",", "headers", "=", "pass_headers", ")", "resp", "=", "request", ".", "urlopen", "(", "req", ",", "context", "=", "ctx", ")", "self", ".", "_last_code", "=", "resp", ".", "getcode", "(", ")", "self", ".", "_last_url", "=", "resp", ".", "geturl", "(", ")", "# Get some headers from the response", "maintype", "=", "self", ".", "_mainType", "(", "resp", ")", "contentDisposition", "=", "resp", ".", "headers", ".", "get", "(", "'content-disposition'", ")", "contentMD5", "=", "resp", ".", "headers", ".", "get", "(", "'Content-MD5'", ")", "#contentEncoding = resp.headers.get('content-encoding')", "contentType", "=", "resp", ".", "headers", ".", "get", "(", "'content-Type'", ")", ".", "split", "(", "';'", ")", "[", "0", "]", ".", "lower", "(", ")", "contentLength", "=", "resp", ".", "headers", ".", "get", "(", "'content-length'", ")", "if", "maintype", ".", "lower", "(", ")", "in", "(", "'image'", ",", "'application/x-zip-compressed'", ")", "or", "contentType", "in", "(", "'application/x-zip-compressed'", ",", "'application/octet-stream'", ")", "or", "contentMD5", "is", "not", "None", "or", "(", "contentDisposition", "is", "not", "None", "and", "contentDisposition", ".", "lower", "(", ")", ".", "find", "(", "'attachment;'", ")", ">", "-", "1", ")", ":", "fname", "=", "self", ".", "_get_file_name", "(", "contentDisposition", "=", "contentDisposition", ",", "url", "=", "url", ")", "if", "out_folder", "is", "None", ":", "out_folder", "=", "tempfile", ".", "gettempdir", "(", ")", "if", "contentLength", "is", "not", "None", ":", "max_length", "=", "int", "(", "contentLength", ")", "if", "max_length", "<", "CHUNK", ":", "CHUNK", "=", "max_length", "file_name", "=", "os", ".", "path", ".", "join", "(", "out_folder", ",", "fname", ")", "with", "open", "(", "file_name", ",", "'wb'", ")", "as", "writer", ":", "for", "data", "in", "self", ".", "_chunk", "(", "response", "=", "resp", ",", "size", "=", "CHUNK", ")", ":", "writer", ".", "write", "(", "data", ")", "writer", ".", "flush", "(", ")", "writer", ".", "flush", "(", ")", "del", "writer", "return", "file_name", "else", ":", "read", "=", "\"\"", "for", "data", "in", "self", ".", "_chunk", "(", "response", "=", "resp", ",", "size", "=", "CHUNK", ")", ":", "if", "self", ".", "PY3", "==", "True", ":", "read", "+=", "data", ".", "decode", "(", "'utf-8'", ")", "else", ":", "read", "+=", "data", "del", "data", "try", ":", "results", "=", "json", ".", "loads", "(", "read", ")", "if", "'error'", "in", "results", ":", "if", "'message'", "in", "results", "[", "'error'", "]", ":", "if", "results", "[", "'error'", "]", "[", "'message'", "]", "==", "'Request not made over ssl'", ":", "if", "url", ".", "startswith", "(", "'http://'", ")", ":", "url", "=", "url", ".", "replace", "(", "'http://'", ",", "'https://'", ")", "return", "self", ".", "_get", "(", "url", ",", "param_dict", ",", "securityHandler", ",", "additional_headers", ",", "handlers", ",", "proxy_url", ",", "proxy_port", ",", "compress", ",", "custom_handlers", ",", "out_folder", ",", "file_name", ")", "return", "results", "except", ":", "return", "read" ]
Performs a GET operation Inputs: Output: returns dictionary, string or None
[ "Performs", "a", "GET", "operation", "Inputs", ":" ]
train
https://github.com/Esri/ArcREST/blob/ab240fde2b0200f61d4a5f6df033516e53f2f416/src/arcrest/web/_base.py#L551-L751
0.003829
google/apitools
apitools/base/py/transfer.py
Download.StreamMedia
def StreamMedia(self, callback=None, finish_callback=None, additional_headers=None, use_chunks=True): """Stream the entire download. Args: callback: (default: None) Callback to call as each chunk is completed. finish_callback: (default: None) Callback to call when the download is complete. additional_headers: (default: None) Additional headers to include in fetching bytes. use_chunks: (bool, default: True) If False, ignore self.chunksize and stream this download in a single request. Returns: None. Streams bytes into self.stream. """ callback = callback or self.progress_callback finish_callback = finish_callback or self.finish_callback self.EnsureInitialized() while True: if self.__initial_response is not None: response = self.__initial_response self.__initial_response = None else: end_byte = self.__ComputeEndByte(self.progress, use_chunks=use_chunks) response = self.__GetChunk( self.progress, end_byte, additional_headers=additional_headers) if self.total_size is None: self.__SetTotal(response.info) response = self.__ProcessResponse(response) self._ExecuteCallback(callback, response) if (response.status_code == http_client.OK or self.progress >= self.total_size): break self._ExecuteCallback(finish_callback, response)
python
def StreamMedia(self, callback=None, finish_callback=None, additional_headers=None, use_chunks=True): """Stream the entire download. Args: callback: (default: None) Callback to call as each chunk is completed. finish_callback: (default: None) Callback to call when the download is complete. additional_headers: (default: None) Additional headers to include in fetching bytes. use_chunks: (bool, default: True) If False, ignore self.chunksize and stream this download in a single request. Returns: None. Streams bytes into self.stream. """ callback = callback or self.progress_callback finish_callback = finish_callback or self.finish_callback self.EnsureInitialized() while True: if self.__initial_response is not None: response = self.__initial_response self.__initial_response = None else: end_byte = self.__ComputeEndByte(self.progress, use_chunks=use_chunks) response = self.__GetChunk( self.progress, end_byte, additional_headers=additional_headers) if self.total_size is None: self.__SetTotal(response.info) response = self.__ProcessResponse(response) self._ExecuteCallback(callback, response) if (response.status_code == http_client.OK or self.progress >= self.total_size): break self._ExecuteCallback(finish_callback, response)
[ "def", "StreamMedia", "(", "self", ",", "callback", "=", "None", ",", "finish_callback", "=", "None", ",", "additional_headers", "=", "None", ",", "use_chunks", "=", "True", ")", ":", "callback", "=", "callback", "or", "self", ".", "progress_callback", "finish_callback", "=", "finish_callback", "or", "self", ".", "finish_callback", "self", ".", "EnsureInitialized", "(", ")", "while", "True", ":", "if", "self", ".", "__initial_response", "is", "not", "None", ":", "response", "=", "self", ".", "__initial_response", "self", ".", "__initial_response", "=", "None", "else", ":", "end_byte", "=", "self", ".", "__ComputeEndByte", "(", "self", ".", "progress", ",", "use_chunks", "=", "use_chunks", ")", "response", "=", "self", ".", "__GetChunk", "(", "self", ".", "progress", ",", "end_byte", ",", "additional_headers", "=", "additional_headers", ")", "if", "self", ".", "total_size", "is", "None", ":", "self", ".", "__SetTotal", "(", "response", ".", "info", ")", "response", "=", "self", ".", "__ProcessResponse", "(", "response", ")", "self", ".", "_ExecuteCallback", "(", "callback", ",", "response", ")", "if", "(", "response", ".", "status_code", "==", "http_client", ".", "OK", "or", "self", ".", "progress", ">=", "self", ".", "total_size", ")", ":", "break", "self", ".", "_ExecuteCallback", "(", "finish_callback", ",", "response", ")" ]
Stream the entire download. Args: callback: (default: None) Callback to call as each chunk is completed. finish_callback: (default: None) Callback to call when the download is complete. additional_headers: (default: None) Additional headers to include in fetching bytes. use_chunks: (bool, default: True) If False, ignore self.chunksize and stream this download in a single request. Returns: None. Streams bytes into self.stream.
[ "Stream", "the", "entire", "download", "." ]
train
https://github.com/google/apitools/blob/f3745a7ea535aa0e88b0650c16479b696d6fd446/apitools/base/py/transfer.py#L506-L544
0.001749
hozn/keepassdb
keepassdb/db.py
Database.load
def load(self, dbfile, password=None, keyfile=None, readonly=False): """ Load the database from file/stream. :param dbfile: The database file path/stream. :type dbfile: str or file-like object :param password: The password for the database. :type password: str :param keyfile: Path to a keyfile (or a stream) that can be used instead of or in conjunction with password for database. :type keyfile: str or file-like object :param readonly: Whether to open the database read-only. :type readonly: bool """ self._clear() buf = None is_stream = hasattr(dbfile, 'read') if is_stream: buf = dbfile.read() else: if not os.path.exists(dbfile): raise IOError("File does not exist: {0}".format(dbfile)) with open(dbfile, 'rb') as fp: buf = fp.read() self.load_from_buffer(buf, password=password, keyfile=keyfile, readonly=readonly) # One we have successfully loaded the file, go ahead and set the internal attribute # (in the LockingDatabase subclass, this will effectivley take out the lock on the file) if not is_stream: self.filepath = dbfile
python
def load(self, dbfile, password=None, keyfile=None, readonly=False): """ Load the database from file/stream. :param dbfile: The database file path/stream. :type dbfile: str or file-like object :param password: The password for the database. :type password: str :param keyfile: Path to a keyfile (or a stream) that can be used instead of or in conjunction with password for database. :type keyfile: str or file-like object :param readonly: Whether to open the database read-only. :type readonly: bool """ self._clear() buf = None is_stream = hasattr(dbfile, 'read') if is_stream: buf = dbfile.read() else: if not os.path.exists(dbfile): raise IOError("File does not exist: {0}".format(dbfile)) with open(dbfile, 'rb') as fp: buf = fp.read() self.load_from_buffer(buf, password=password, keyfile=keyfile, readonly=readonly) # One we have successfully loaded the file, go ahead and set the internal attribute # (in the LockingDatabase subclass, this will effectivley take out the lock on the file) if not is_stream: self.filepath = dbfile
[ "def", "load", "(", "self", ",", "dbfile", ",", "password", "=", "None", ",", "keyfile", "=", "None", ",", "readonly", "=", "False", ")", ":", "self", ".", "_clear", "(", ")", "buf", "=", "None", "is_stream", "=", "hasattr", "(", "dbfile", ",", "'read'", ")", "if", "is_stream", ":", "buf", "=", "dbfile", ".", "read", "(", ")", "else", ":", "if", "not", "os", ".", "path", ".", "exists", "(", "dbfile", ")", ":", "raise", "IOError", "(", "\"File does not exist: {0}\"", ".", "format", "(", "dbfile", ")", ")", "with", "open", "(", "dbfile", ",", "'rb'", ")", "as", "fp", ":", "buf", "=", "fp", ".", "read", "(", ")", "self", ".", "load_from_buffer", "(", "buf", ",", "password", "=", "password", ",", "keyfile", "=", "keyfile", ",", "readonly", "=", "readonly", ")", "# One we have successfully loaded the file, go ahead and set the internal attribute", "# (in the LockingDatabase subclass, this will effectivley take out the lock on the file)", "if", "not", "is_stream", ":", "self", ".", "filepath", "=", "dbfile" ]
Load the database from file/stream. :param dbfile: The database file path/stream. :type dbfile: str or file-like object :param password: The password for the database. :type password: str :param keyfile: Path to a keyfile (or a stream) that can be used instead of or in conjunction with password for database. :type keyfile: str or file-like object :param readonly: Whether to open the database read-only. :type readonly: bool
[ "Load", "the", "database", "from", "file", "/", "stream", ".", ":", "param", "dbfile", ":", "The", "database", "file", "path", "/", "stream", ".", ":", "type", "dbfile", ":", "str", "or", "file", "-", "like", "object", ":", "param", "password", ":", "The", "password", "for", "the", "database", ".", ":", "type", "password", ":", "str", ":", "param", "keyfile", ":", "Path", "to", "a", "keyfile", "(", "or", "a", "stream", ")", "that", "can", "be", "used", "instead", "of", "or", "in", "conjunction", "with", "password", "for", "database", ".", ":", "type", "keyfile", ":", "str", "or", "file", "-", "like", "object", ":", "param", "readonly", ":", "Whether", "to", "open", "the", "database", "read", "-", "only", ".", ":", "type", "readonly", ":", "bool" ]
train
https://github.com/hozn/keepassdb/blob/cb24985d1ed04e7d7db99ecdddf80dd1a91ee48b/keepassdb/db.py#L121-L152
0.009002
Azure/azure-event-hubs-python
azure/eventprocessorhost/azure_blob_lease.py
AzureBlobLease.with_blob
def with_blob(self, blob): """ Init Azure Blob Lease with existing blob. """ content = json.loads(blob.content) self.partition_id = content["partition_id"] self.owner = content["owner"] self.token = content["token"] self.epoch = content["epoch"] self.offset = content["offset"] self.sequence_number = content["sequence_number"] self.event_processor_context = content.get("event_processor_context")
python
def with_blob(self, blob): """ Init Azure Blob Lease with existing blob. """ content = json.loads(blob.content) self.partition_id = content["partition_id"] self.owner = content["owner"] self.token = content["token"] self.epoch = content["epoch"] self.offset = content["offset"] self.sequence_number = content["sequence_number"] self.event_processor_context = content.get("event_processor_context")
[ "def", "with_blob", "(", "self", ",", "blob", ")", ":", "content", "=", "json", ".", "loads", "(", "blob", ".", "content", ")", "self", ".", "partition_id", "=", "content", "[", "\"partition_id\"", "]", "self", ".", "owner", "=", "content", "[", "\"owner\"", "]", "self", ".", "token", "=", "content", "[", "\"token\"", "]", "self", ".", "epoch", "=", "content", "[", "\"epoch\"", "]", "self", ".", "offset", "=", "content", "[", "\"offset\"", "]", "self", ".", "sequence_number", "=", "content", "[", "\"sequence_number\"", "]", "self", ".", "event_processor_context", "=", "content", ".", "get", "(", "\"event_processor_context\"", ")" ]
Init Azure Blob Lease with existing blob.
[ "Init", "Azure", "Blob", "Lease", "with", "existing", "blob", "." ]
train
https://github.com/Azure/azure-event-hubs-python/blob/737c5f966557ada2cf10fa0d8f3c19671ae96348/azure/eventprocessorhost/azure_blob_lease.py#L40-L51
0.004124
jaywink/federation
federation/protocols/diaspora/protocol.py
Protocol.get_message_content
def get_message_content(self): """ Given the Slap XML, extract out the payload. """ body = self.doc.find( ".//{http://salmon-protocol.org/ns/magic-env}data").text body = urlsafe_b64decode(body.encode("ascii")) logger.debug("diaspora.protocol.get_message_content: %s", body) return body
python
def get_message_content(self): """ Given the Slap XML, extract out the payload. """ body = self.doc.find( ".//{http://salmon-protocol.org/ns/magic-env}data").text body = urlsafe_b64decode(body.encode("ascii")) logger.debug("diaspora.protocol.get_message_content: %s", body) return body
[ "def", "get_message_content", "(", "self", ")", ":", "body", "=", "self", ".", "doc", ".", "find", "(", "\".//{http://salmon-protocol.org/ns/magic-env}data\"", ")", ".", "text", "body", "=", "urlsafe_b64decode", "(", "body", ".", "encode", "(", "\"ascii\"", ")", ")", "logger", ".", "debug", "(", "\"diaspora.protocol.get_message_content: %s\"", ",", "body", ")", "return", "body" ]
Given the Slap XML, extract out the payload.
[ "Given", "the", "Slap", "XML", "extract", "out", "the", "payload", "." ]
train
https://github.com/jaywink/federation/blob/59d31bb37e662891dbea72c1dee05dc53146c78b/federation/protocols/diaspora/protocol.py#L114-L124
0.005634

Dataset Card for "code_search_data-pep8"

More Information needed

Downloads last month
12
Edit dataset card