repository_name
stringlengths 7
54
| func_path_in_repository
stringlengths 4
175
| func_name
stringlengths 1
129
| whole_func_string
stringlengths 91
50.9k
| language
stringclasses 1
value | func_code_string
stringlengths 91
50.9k
| func_code_tokens
sequence | func_documentation_string
stringlengths 1
31.6k
| func_documentation_tokens
sequence | split_name
stringclasses 1
value | func_code_url
stringlengths 89
268
| score
float64 0
0.09
|
---|---|---|---|---|---|---|---|---|---|---|---|
PmagPy/PmagPy | SPD/spd.py | PintPars.get_ptrm_dec_and_inc | def get_ptrm_dec_and_inc(self):
"""not included in spd."""
PTRMS = self.PTRMS[1:]
CART_pTRMS_orig = numpy.array([lib_direct.dir2cart(row[1:4]) for row in PTRMS])
#B_lab_dir = [self.B_lab_dir[0], self.B_lab_dir[1], 1.] # dir
tmin, tmax = self.t_Arai[0], self.t_Arai[-1]
ptrms_dec_Free, ptrms_inc_Free, ptrm_best_fit_vector_Free, ptrm_tau_Free, ptrm_v_Free, ptrm_mass_center_Free, ptrm_PCA_sigma_Free = lib_direct.get_dec_and_inc(CART_pTRMS_orig, self.t_Arai, tmin, tmax, anchored=False)
ptrms_angle = lib_direct.get_ptrms_angle(ptrm_best_fit_vector_Free, self.B_lab_cart)
self.pars['ptrms_dec_Free'], self.pars['ptrms_inc_Free'] = ptrms_dec_Free, ptrms_inc_Free
self.pars['ptrms_tau_Free'] = ptrm_tau_Free
self.pars['ptrms_angle_Free'] = ptrms_angle | python | def get_ptrm_dec_and_inc(self):
"""not included in spd."""
PTRMS = self.PTRMS[1:]
CART_pTRMS_orig = numpy.array([lib_direct.dir2cart(row[1:4]) for row in PTRMS])
#B_lab_dir = [self.B_lab_dir[0], self.B_lab_dir[1], 1.] # dir
tmin, tmax = self.t_Arai[0], self.t_Arai[-1]
ptrms_dec_Free, ptrms_inc_Free, ptrm_best_fit_vector_Free, ptrm_tau_Free, ptrm_v_Free, ptrm_mass_center_Free, ptrm_PCA_sigma_Free = lib_direct.get_dec_and_inc(CART_pTRMS_orig, self.t_Arai, tmin, tmax, anchored=False)
ptrms_angle = lib_direct.get_ptrms_angle(ptrm_best_fit_vector_Free, self.B_lab_cart)
self.pars['ptrms_dec_Free'], self.pars['ptrms_inc_Free'] = ptrms_dec_Free, ptrms_inc_Free
self.pars['ptrms_tau_Free'] = ptrm_tau_Free
self.pars['ptrms_angle_Free'] = ptrms_angle | [
"def",
"get_ptrm_dec_and_inc",
"(",
"self",
")",
":",
"PTRMS",
"=",
"self",
".",
"PTRMS",
"[",
"1",
":",
"]",
"CART_pTRMS_orig",
"=",
"numpy",
".",
"array",
"(",
"[",
"lib_direct",
".",
"dir2cart",
"(",
"row",
"[",
"1",
":",
"4",
"]",
")",
"for",
"row",
"in",
"PTRMS",
"]",
")",
"#B_lab_dir = [self.B_lab_dir[0], self.B_lab_dir[1], 1.] # dir",
"tmin",
",",
"tmax",
"=",
"self",
".",
"t_Arai",
"[",
"0",
"]",
",",
"self",
".",
"t_Arai",
"[",
"-",
"1",
"]",
"ptrms_dec_Free",
",",
"ptrms_inc_Free",
",",
"ptrm_best_fit_vector_Free",
",",
"ptrm_tau_Free",
",",
"ptrm_v_Free",
",",
"ptrm_mass_center_Free",
",",
"ptrm_PCA_sigma_Free",
"=",
"lib_direct",
".",
"get_dec_and_inc",
"(",
"CART_pTRMS_orig",
",",
"self",
".",
"t_Arai",
",",
"tmin",
",",
"tmax",
",",
"anchored",
"=",
"False",
")",
"ptrms_angle",
"=",
"lib_direct",
".",
"get_ptrms_angle",
"(",
"ptrm_best_fit_vector_Free",
",",
"self",
".",
"B_lab_cart",
")",
"self",
".",
"pars",
"[",
"'ptrms_dec_Free'",
"]",
",",
"self",
".",
"pars",
"[",
"'ptrms_inc_Free'",
"]",
"=",
"ptrms_dec_Free",
",",
"ptrms_inc_Free",
"self",
".",
"pars",
"[",
"'ptrms_tau_Free'",
"]",
"=",
"ptrm_tau_Free",
"self",
".",
"pars",
"[",
"'ptrms_angle_Free'",
"]",
"=",
"ptrms_angle"
] | not included in spd. | [
"not",
"included",
"in",
"spd",
"."
] | train | https://github.com/PmagPy/PmagPy/blob/c7984f8809bf40fe112e53dcc311a33293b62d0b/SPD/spd.py#L391-L401 | 0.008454 |
molmod/molmod | molmod/graphs.py | Graph.full_match | def full_match(self, other):
"""Find the mapping between vertex indexes in self and other.
This also works on disconnected graphs. Derived classes should just
implement get_vertex_string and get_edge_string to make this method
aware of the different nature of certain vertices. In case molecules,
this would make the algorithm sensitive to atom numbers etc.
"""
# we need normalize subgraphs because these graphs are used as patterns.
graphs0 = [
self.get_subgraph(group, normalize=True)
for group in self.independent_vertices
]
graphs1 = [
other.get_subgraph(group)
for group in other.independent_vertices
]
if len(graphs0) != len(graphs1):
return
matches = []
for graph0 in graphs0:
pattern = EqualPattern(graph0)
found_match = False
for i, graph1 in enumerate(graphs1):
local_matches = list(GraphSearch(pattern)(graph1, one_match=True))
if len(local_matches) == 1:
match = local_matches[0]
# we need to restore the relation between the normalized
# graph0 and its original indexes
old_to_new = OneToOne((
(j, i) for i, j
in enumerate(graph0._old_vertex_indexes)
))
matches.append(match * old_to_new)
del graphs1[i]
found_match = True
break
if not found_match:
return
result = OneToOne()
for match in matches:
result.add_relations(match.forward.items())
return result | python | def full_match(self, other):
"""Find the mapping between vertex indexes in self and other.
This also works on disconnected graphs. Derived classes should just
implement get_vertex_string and get_edge_string to make this method
aware of the different nature of certain vertices. In case molecules,
this would make the algorithm sensitive to atom numbers etc.
"""
# we need normalize subgraphs because these graphs are used as patterns.
graphs0 = [
self.get_subgraph(group, normalize=True)
for group in self.independent_vertices
]
graphs1 = [
other.get_subgraph(group)
for group in other.independent_vertices
]
if len(graphs0) != len(graphs1):
return
matches = []
for graph0 in graphs0:
pattern = EqualPattern(graph0)
found_match = False
for i, graph1 in enumerate(graphs1):
local_matches = list(GraphSearch(pattern)(graph1, one_match=True))
if len(local_matches) == 1:
match = local_matches[0]
# we need to restore the relation between the normalized
# graph0 and its original indexes
old_to_new = OneToOne((
(j, i) for i, j
in enumerate(graph0._old_vertex_indexes)
))
matches.append(match * old_to_new)
del graphs1[i]
found_match = True
break
if not found_match:
return
result = OneToOne()
for match in matches:
result.add_relations(match.forward.items())
return result | [
"def",
"full_match",
"(",
"self",
",",
"other",
")",
":",
"# we need normalize subgraphs because these graphs are used as patterns.",
"graphs0",
"=",
"[",
"self",
".",
"get_subgraph",
"(",
"group",
",",
"normalize",
"=",
"True",
")",
"for",
"group",
"in",
"self",
".",
"independent_vertices",
"]",
"graphs1",
"=",
"[",
"other",
".",
"get_subgraph",
"(",
"group",
")",
"for",
"group",
"in",
"other",
".",
"independent_vertices",
"]",
"if",
"len",
"(",
"graphs0",
")",
"!=",
"len",
"(",
"graphs1",
")",
":",
"return",
"matches",
"=",
"[",
"]",
"for",
"graph0",
"in",
"graphs0",
":",
"pattern",
"=",
"EqualPattern",
"(",
"graph0",
")",
"found_match",
"=",
"False",
"for",
"i",
",",
"graph1",
"in",
"enumerate",
"(",
"graphs1",
")",
":",
"local_matches",
"=",
"list",
"(",
"GraphSearch",
"(",
"pattern",
")",
"(",
"graph1",
",",
"one_match",
"=",
"True",
")",
")",
"if",
"len",
"(",
"local_matches",
")",
"==",
"1",
":",
"match",
"=",
"local_matches",
"[",
"0",
"]",
"# we need to restore the relation between the normalized",
"# graph0 and its original indexes",
"old_to_new",
"=",
"OneToOne",
"(",
"(",
"(",
"j",
",",
"i",
")",
"for",
"i",
",",
"j",
"in",
"enumerate",
"(",
"graph0",
".",
"_old_vertex_indexes",
")",
")",
")",
"matches",
".",
"append",
"(",
"match",
"*",
"old_to_new",
")",
"del",
"graphs1",
"[",
"i",
"]",
"found_match",
"=",
"True",
"break",
"if",
"not",
"found_match",
":",
"return",
"result",
"=",
"OneToOne",
"(",
")",
"for",
"match",
"in",
"matches",
":",
"result",
".",
"add_relations",
"(",
"match",
".",
"forward",
".",
"items",
"(",
")",
")",
"return",
"result"
] | Find the mapping between vertex indexes in self and other.
This also works on disconnected graphs. Derived classes should just
implement get_vertex_string and get_edge_string to make this method
aware of the different nature of certain vertices. In case molecules,
this would make the algorithm sensitive to atom numbers etc. | [
"Find",
"the",
"mapping",
"between",
"vertex",
"indexes",
"in",
"self",
"and",
"other",
"."
] | train | https://github.com/molmod/molmod/blob/a7b5b4364ed514ad4c465856c05b5eda1cb561e0/molmod/graphs.py#L782-L828 | 0.00275 |
spacetelescope/drizzlepac | drizzlepac/pixtosky.py | xy2rd | def xy2rd(input,x=None,y=None,coords=None, coordfile=None,colnames=None,separator=None,
hms=True, precision=6,output=None,verbose=True):
""" Primary interface to perform coordinate transformations from
pixel to sky coordinates using STWCS and full distortion models
read from the input image header.
"""
single_coord = False
# Only use value provided in `coords` if nothing has been specified for coordfile
if coords is not None and coordfile is None:
coordfile = coords
warnings.simplefilter('always',DeprecationWarning)
warnings.warn("Please update calling code to pass in `coordfile` instead of `coords`.",
category=DeprecationWarning)
warnings.simplefilter('default',DeprecationWarning)
if coordfile is not None:
if colnames in blank_list:
colnames = ['c1','c2']
# Determine columns which contain pixel positions
cols = util.parse_colnames(colnames,coordfile)
# read in columns from input coordinates file
xyvals = np.loadtxt(coordfile,usecols=cols,delimiter=separator)
if xyvals.ndim == 1: # only 1 entry in coordfile
xlist = [xyvals[0].copy()]
ylist = [xyvals[1].copy()]
else:
xlist = xyvals[:,0].copy()
ylist = xyvals[:,1].copy()
del xyvals
else:
if isinstance(x, np.ndarray):
xlist = x.tolist()
ylist = y.tolist()
elif not isinstance(x,list):
xlist = [x]
ylist = [y]
single_coord = True
else:
xlist = x
ylist = y
# start by reading in WCS+distortion info for input image
inwcs = wcsutil.HSTWCS(input)
if inwcs.wcs.is_unity():
print("####\nNo valid WCS found in {}.\n Results may be invalid.\n####\n".format(input))
# Now, convert pixel coordinates into sky coordinates
dra,ddec = inwcs.all_pix2world(xlist,ylist,1)
# convert to HH:MM:SS.S format, if specified
if hms:
ra,dec = wcs_functions.ddtohms(dra,ddec,precision=precision)
rastr = ra
decstr = dec
else:
# add formatting based on precision here...
rastr = []
decstr = []
fmt = "%."+repr(precision)+"f"
for r,d in zip(dra,ddec):
rastr.append(fmt%r)
decstr.append(fmt%d)
ra = dra
dec = ddec
if verbose or (not verbose and util.is_blank(output)):
print('# Coordinate transformations for ',input)
print('# X Y RA Dec\n')
for x,y,r,d in zip(xlist,ylist,rastr,decstr):
print("%.4f %.4f %s %s"%(x,y,r,d))
# Create output file, if specified
if output:
f = open(output,mode='w')
f.write("# Coordinates converted from %s\n"%input)
for r,d in zip(rastr,decstr):
f.write('%s %s\n'%(r,d))
f.close()
print('Wrote out results to: ',output)
if single_coord:
ra = ra[0]
dec = dec[0]
return ra,dec | python | def xy2rd(input,x=None,y=None,coords=None, coordfile=None,colnames=None,separator=None,
hms=True, precision=6,output=None,verbose=True):
""" Primary interface to perform coordinate transformations from
pixel to sky coordinates using STWCS and full distortion models
read from the input image header.
"""
single_coord = False
# Only use value provided in `coords` if nothing has been specified for coordfile
if coords is not None and coordfile is None:
coordfile = coords
warnings.simplefilter('always',DeprecationWarning)
warnings.warn("Please update calling code to pass in `coordfile` instead of `coords`.",
category=DeprecationWarning)
warnings.simplefilter('default',DeprecationWarning)
if coordfile is not None:
if colnames in blank_list:
colnames = ['c1','c2']
# Determine columns which contain pixel positions
cols = util.parse_colnames(colnames,coordfile)
# read in columns from input coordinates file
xyvals = np.loadtxt(coordfile,usecols=cols,delimiter=separator)
if xyvals.ndim == 1: # only 1 entry in coordfile
xlist = [xyvals[0].copy()]
ylist = [xyvals[1].copy()]
else:
xlist = xyvals[:,0].copy()
ylist = xyvals[:,1].copy()
del xyvals
else:
if isinstance(x, np.ndarray):
xlist = x.tolist()
ylist = y.tolist()
elif not isinstance(x,list):
xlist = [x]
ylist = [y]
single_coord = True
else:
xlist = x
ylist = y
# start by reading in WCS+distortion info for input image
inwcs = wcsutil.HSTWCS(input)
if inwcs.wcs.is_unity():
print("####\nNo valid WCS found in {}.\n Results may be invalid.\n####\n".format(input))
# Now, convert pixel coordinates into sky coordinates
dra,ddec = inwcs.all_pix2world(xlist,ylist,1)
# convert to HH:MM:SS.S format, if specified
if hms:
ra,dec = wcs_functions.ddtohms(dra,ddec,precision=precision)
rastr = ra
decstr = dec
else:
# add formatting based on precision here...
rastr = []
decstr = []
fmt = "%."+repr(precision)+"f"
for r,d in zip(dra,ddec):
rastr.append(fmt%r)
decstr.append(fmt%d)
ra = dra
dec = ddec
if verbose or (not verbose and util.is_blank(output)):
print('# Coordinate transformations for ',input)
print('# X Y RA Dec\n')
for x,y,r,d in zip(xlist,ylist,rastr,decstr):
print("%.4f %.4f %s %s"%(x,y,r,d))
# Create output file, if specified
if output:
f = open(output,mode='w')
f.write("# Coordinates converted from %s\n"%input)
for r,d in zip(rastr,decstr):
f.write('%s %s\n'%(r,d))
f.close()
print('Wrote out results to: ',output)
if single_coord:
ra = ra[0]
dec = dec[0]
return ra,dec | [
"def",
"xy2rd",
"(",
"input",
",",
"x",
"=",
"None",
",",
"y",
"=",
"None",
",",
"coords",
"=",
"None",
",",
"coordfile",
"=",
"None",
",",
"colnames",
"=",
"None",
",",
"separator",
"=",
"None",
",",
"hms",
"=",
"True",
",",
"precision",
"=",
"6",
",",
"output",
"=",
"None",
",",
"verbose",
"=",
"True",
")",
":",
"single_coord",
"=",
"False",
"# Only use value provided in `coords` if nothing has been specified for coordfile",
"if",
"coords",
"is",
"not",
"None",
"and",
"coordfile",
"is",
"None",
":",
"coordfile",
"=",
"coords",
"warnings",
".",
"simplefilter",
"(",
"'always'",
",",
"DeprecationWarning",
")",
"warnings",
".",
"warn",
"(",
"\"Please update calling code to pass in `coordfile` instead of `coords`.\"",
",",
"category",
"=",
"DeprecationWarning",
")",
"warnings",
".",
"simplefilter",
"(",
"'default'",
",",
"DeprecationWarning",
")",
"if",
"coordfile",
"is",
"not",
"None",
":",
"if",
"colnames",
"in",
"blank_list",
":",
"colnames",
"=",
"[",
"'c1'",
",",
"'c2'",
"]",
"# Determine columns which contain pixel positions",
"cols",
"=",
"util",
".",
"parse_colnames",
"(",
"colnames",
",",
"coordfile",
")",
"# read in columns from input coordinates file",
"xyvals",
"=",
"np",
".",
"loadtxt",
"(",
"coordfile",
",",
"usecols",
"=",
"cols",
",",
"delimiter",
"=",
"separator",
")",
"if",
"xyvals",
".",
"ndim",
"==",
"1",
":",
"# only 1 entry in coordfile",
"xlist",
"=",
"[",
"xyvals",
"[",
"0",
"]",
".",
"copy",
"(",
")",
"]",
"ylist",
"=",
"[",
"xyvals",
"[",
"1",
"]",
".",
"copy",
"(",
")",
"]",
"else",
":",
"xlist",
"=",
"xyvals",
"[",
":",
",",
"0",
"]",
".",
"copy",
"(",
")",
"ylist",
"=",
"xyvals",
"[",
":",
",",
"1",
"]",
".",
"copy",
"(",
")",
"del",
"xyvals",
"else",
":",
"if",
"isinstance",
"(",
"x",
",",
"np",
".",
"ndarray",
")",
":",
"xlist",
"=",
"x",
".",
"tolist",
"(",
")",
"ylist",
"=",
"y",
".",
"tolist",
"(",
")",
"elif",
"not",
"isinstance",
"(",
"x",
",",
"list",
")",
":",
"xlist",
"=",
"[",
"x",
"]",
"ylist",
"=",
"[",
"y",
"]",
"single_coord",
"=",
"True",
"else",
":",
"xlist",
"=",
"x",
"ylist",
"=",
"y",
"# start by reading in WCS+distortion info for input image",
"inwcs",
"=",
"wcsutil",
".",
"HSTWCS",
"(",
"input",
")",
"if",
"inwcs",
".",
"wcs",
".",
"is_unity",
"(",
")",
":",
"print",
"(",
"\"####\\nNo valid WCS found in {}.\\n Results may be invalid.\\n####\\n\"",
".",
"format",
"(",
"input",
")",
")",
"# Now, convert pixel coordinates into sky coordinates",
"dra",
",",
"ddec",
"=",
"inwcs",
".",
"all_pix2world",
"(",
"xlist",
",",
"ylist",
",",
"1",
")",
"# convert to HH:MM:SS.S format, if specified",
"if",
"hms",
":",
"ra",
",",
"dec",
"=",
"wcs_functions",
".",
"ddtohms",
"(",
"dra",
",",
"ddec",
",",
"precision",
"=",
"precision",
")",
"rastr",
"=",
"ra",
"decstr",
"=",
"dec",
"else",
":",
"# add formatting based on precision here...",
"rastr",
"=",
"[",
"]",
"decstr",
"=",
"[",
"]",
"fmt",
"=",
"\"%.\"",
"+",
"repr",
"(",
"precision",
")",
"+",
"\"f\"",
"for",
"r",
",",
"d",
"in",
"zip",
"(",
"dra",
",",
"ddec",
")",
":",
"rastr",
".",
"append",
"(",
"fmt",
"%",
"r",
")",
"decstr",
".",
"append",
"(",
"fmt",
"%",
"d",
")",
"ra",
"=",
"dra",
"dec",
"=",
"ddec",
"if",
"verbose",
"or",
"(",
"not",
"verbose",
"and",
"util",
".",
"is_blank",
"(",
"output",
")",
")",
":",
"print",
"(",
"'# Coordinate transformations for '",
",",
"input",
")",
"print",
"(",
"'# X Y RA Dec\\n'",
")",
"for",
"x",
",",
"y",
",",
"r",
",",
"d",
"in",
"zip",
"(",
"xlist",
",",
"ylist",
",",
"rastr",
",",
"decstr",
")",
":",
"print",
"(",
"\"%.4f %.4f %s %s\"",
"%",
"(",
"x",
",",
"y",
",",
"r",
",",
"d",
")",
")",
"# Create output file, if specified",
"if",
"output",
":",
"f",
"=",
"open",
"(",
"output",
",",
"mode",
"=",
"'w'",
")",
"f",
".",
"write",
"(",
"\"# Coordinates converted from %s\\n\"",
"%",
"input",
")",
"for",
"r",
",",
"d",
"in",
"zip",
"(",
"rastr",
",",
"decstr",
")",
":",
"f",
".",
"write",
"(",
"'%s %s\\n'",
"%",
"(",
"r",
",",
"d",
")",
")",
"f",
".",
"close",
"(",
")",
"print",
"(",
"'Wrote out results to: '",
",",
"output",
")",
"if",
"single_coord",
":",
"ra",
"=",
"ra",
"[",
"0",
"]",
"dec",
"=",
"dec",
"[",
"0",
"]",
"return",
"ra",
",",
"dec"
] | Primary interface to perform coordinate transformations from
pixel to sky coordinates using STWCS and full distortion models
read from the input image header. | [
"Primary",
"interface",
"to",
"perform",
"coordinate",
"transformations",
"from",
"pixel",
"to",
"sky",
"coordinates",
"using",
"STWCS",
"and",
"full",
"distortion",
"models",
"read",
"from",
"the",
"input",
"image",
"header",
"."
] | train | https://github.com/spacetelescope/drizzlepac/blob/15bec3c929a6a869d9e71b9398ced43ede0620f1/drizzlepac/pixtosky.py#L102-L187 | 0.01685 |
oscarlazoarjona/fast | fast/bloch.py | independent_get_coefficients | def independent_get_coefficients(coef, rhouv, s, i, j, k, u, v,
unfolding, matrix_form):
r"""Get the indices mu, nu, and term coefficients for linear terms.
>>> from fast.symbolic import define_density_matrix
>>> Ne = 2
>>> coef = 1+2j
>>> rhouv = define_density_matrix(Ne)[1, 1]
>>> s, i, j, k, u, v = (1, 1, 0, 1, 1, 1)
>>> unfolding = Unfolding(Ne, real=True, normalized=True)
>>> independent_get_coefficients(coef, rhouv, s, i, j, k, u, v,
... unfolding, False)
[[1, None, -2.00000000000000, False, False]]
"""
if matrix_form:
coef = -coef
Mu = unfolding.Mu
mu = Mu(s, i, j)
rhouv_isconjugated = False
if s == 1:
coef_list = [[mu, None, -im(coef), matrix_form, rhouv_isconjugated]]
elif s == -1:
coef_list = [[mu, None, re(coef), matrix_form, rhouv_isconjugated]]
else:
coef_list = [[mu, None, coef, matrix_form, rhouv_isconjugated]]
return coef_list | python | def independent_get_coefficients(coef, rhouv, s, i, j, k, u, v,
unfolding, matrix_form):
r"""Get the indices mu, nu, and term coefficients for linear terms.
>>> from fast.symbolic import define_density_matrix
>>> Ne = 2
>>> coef = 1+2j
>>> rhouv = define_density_matrix(Ne)[1, 1]
>>> s, i, j, k, u, v = (1, 1, 0, 1, 1, 1)
>>> unfolding = Unfolding(Ne, real=True, normalized=True)
>>> independent_get_coefficients(coef, rhouv, s, i, j, k, u, v,
... unfolding, False)
[[1, None, -2.00000000000000, False, False]]
"""
if matrix_form:
coef = -coef
Mu = unfolding.Mu
mu = Mu(s, i, j)
rhouv_isconjugated = False
if s == 1:
coef_list = [[mu, None, -im(coef), matrix_form, rhouv_isconjugated]]
elif s == -1:
coef_list = [[mu, None, re(coef), matrix_form, rhouv_isconjugated]]
else:
coef_list = [[mu, None, coef, matrix_form, rhouv_isconjugated]]
return coef_list | [
"def",
"independent_get_coefficients",
"(",
"coef",
",",
"rhouv",
",",
"s",
",",
"i",
",",
"j",
",",
"k",
",",
"u",
",",
"v",
",",
"unfolding",
",",
"matrix_form",
")",
":",
"if",
"matrix_form",
":",
"coef",
"=",
"-",
"coef",
"Mu",
"=",
"unfolding",
".",
"Mu",
"mu",
"=",
"Mu",
"(",
"s",
",",
"i",
",",
"j",
")",
"rhouv_isconjugated",
"=",
"False",
"if",
"s",
"==",
"1",
":",
"coef_list",
"=",
"[",
"[",
"mu",
",",
"None",
",",
"-",
"im",
"(",
"coef",
")",
",",
"matrix_form",
",",
"rhouv_isconjugated",
"]",
"]",
"elif",
"s",
"==",
"-",
"1",
":",
"coef_list",
"=",
"[",
"[",
"mu",
",",
"None",
",",
"re",
"(",
"coef",
")",
",",
"matrix_form",
",",
"rhouv_isconjugated",
"]",
"]",
"else",
":",
"coef_list",
"=",
"[",
"[",
"mu",
",",
"None",
",",
"coef",
",",
"matrix_form",
",",
"rhouv_isconjugated",
"]",
"]",
"return",
"coef_list"
] | r"""Get the indices mu, nu, and term coefficients for linear terms.
>>> from fast.symbolic import define_density_matrix
>>> Ne = 2
>>> coef = 1+2j
>>> rhouv = define_density_matrix(Ne)[1, 1]
>>> s, i, j, k, u, v = (1, 1, 0, 1, 1, 1)
>>> unfolding = Unfolding(Ne, real=True, normalized=True)
>>> independent_get_coefficients(coef, rhouv, s, i, j, k, u, v,
... unfolding, False)
[[1, None, -2.00000000000000, False, False]] | [
"r",
"Get",
"the",
"indices",
"mu",
"nu",
"and",
"term",
"coefficients",
"for",
"linear",
"terms",
"."
] | train | https://github.com/oscarlazoarjona/fast/blob/3e5400672af2a7b7cc616e7f4aa10d7672720222/fast/bloch.py#L1354-L1381 | 0.000974 |
ethereum/py-evm | eth/vm/state.py | BaseState.apply_transaction | def apply_transaction(
self,
transaction: BaseOrSpoofTransaction) -> 'BaseComputation':
"""
Apply transaction to the vm state
:param transaction: the transaction to apply
:return: the computation
"""
if self.state_root != BLANK_ROOT_HASH and not self._account_db.has_root(self.state_root):
raise StateRootNotFound(self.state_root)
else:
return self.execute_transaction(transaction) | python | def apply_transaction(
self,
transaction: BaseOrSpoofTransaction) -> 'BaseComputation':
"""
Apply transaction to the vm state
:param transaction: the transaction to apply
:return: the computation
"""
if self.state_root != BLANK_ROOT_HASH and not self._account_db.has_root(self.state_root):
raise StateRootNotFound(self.state_root)
else:
return self.execute_transaction(transaction) | [
"def",
"apply_transaction",
"(",
"self",
",",
"transaction",
":",
"BaseOrSpoofTransaction",
")",
"->",
"'BaseComputation'",
":",
"if",
"self",
".",
"state_root",
"!=",
"BLANK_ROOT_HASH",
"and",
"not",
"self",
".",
"_account_db",
".",
"has_root",
"(",
"self",
".",
"state_root",
")",
":",
"raise",
"StateRootNotFound",
"(",
"self",
".",
"state_root",
")",
"else",
":",
"return",
"self",
".",
"execute_transaction",
"(",
"transaction",
")"
] | Apply transaction to the vm state
:param transaction: the transaction to apply
:return: the computation | [
"Apply",
"transaction",
"to",
"the",
"vm",
"state"
] | train | https://github.com/ethereum/py-evm/blob/58346848f076116381d3274bbcea96b9e2cfcbdf/eth/vm/state.py#L311-L323 | 0.006173 |
theislab/scanpy | scanpy/utils.py | select_groups | def select_groups(adata, groups_order_subset='all', key='groups'):
"""Get subset of groups in adata.obs[key].
"""
groups_order = adata.obs[key].cat.categories
if key + '_masks' in adata.uns:
groups_masks = adata.uns[key + '_masks']
else:
groups_masks = np.zeros((len(adata.obs[key].cat.categories),
adata.obs[key].values.size), dtype=bool)
for iname, name in enumerate(adata.obs[key].cat.categories):
# if the name is not found, fallback to index retrieval
if adata.obs[key].cat.categories[iname] in adata.obs[key].values:
mask = adata.obs[key].cat.categories[iname] == adata.obs[key].values
else:
mask = str(iname) == adata.obs[key].values
groups_masks[iname] = mask
groups_ids = list(range(len(groups_order)))
if groups_order_subset != 'all':
groups_ids = []
for name in groups_order_subset:
groups_ids.append(
np.where(adata.obs[key].cat.categories.values == name)[0][0])
if len(groups_ids) == 0:
# fallback to index retrieval
groups_ids = np.where(
np.in1d(np.arange(len(adata.obs[key].cat.categories)).astype(str),
np.array(groups_order_subset)))[0]
if len(groups_ids) == 0:
logg.m(np.array(groups_order_subset),
'invalid! specify valid groups_order (or indices) one of',
adata.obs[key].cat.categories)
from sys import exit
exit(0)
groups_masks = groups_masks[groups_ids]
groups_order_subset = adata.obs[key].cat.categories[groups_ids].values
else:
groups_order_subset = groups_order.values
return groups_order_subset, groups_masks | python | def select_groups(adata, groups_order_subset='all', key='groups'):
"""Get subset of groups in adata.obs[key].
"""
groups_order = adata.obs[key].cat.categories
if key + '_masks' in adata.uns:
groups_masks = adata.uns[key + '_masks']
else:
groups_masks = np.zeros((len(adata.obs[key].cat.categories),
adata.obs[key].values.size), dtype=bool)
for iname, name in enumerate(adata.obs[key].cat.categories):
# if the name is not found, fallback to index retrieval
if adata.obs[key].cat.categories[iname] in adata.obs[key].values:
mask = adata.obs[key].cat.categories[iname] == adata.obs[key].values
else:
mask = str(iname) == adata.obs[key].values
groups_masks[iname] = mask
groups_ids = list(range(len(groups_order)))
if groups_order_subset != 'all':
groups_ids = []
for name in groups_order_subset:
groups_ids.append(
np.where(adata.obs[key].cat.categories.values == name)[0][0])
if len(groups_ids) == 0:
# fallback to index retrieval
groups_ids = np.where(
np.in1d(np.arange(len(adata.obs[key].cat.categories)).astype(str),
np.array(groups_order_subset)))[0]
if len(groups_ids) == 0:
logg.m(np.array(groups_order_subset),
'invalid! specify valid groups_order (or indices) one of',
adata.obs[key].cat.categories)
from sys import exit
exit(0)
groups_masks = groups_masks[groups_ids]
groups_order_subset = adata.obs[key].cat.categories[groups_ids].values
else:
groups_order_subset = groups_order.values
return groups_order_subset, groups_masks | [
"def",
"select_groups",
"(",
"adata",
",",
"groups_order_subset",
"=",
"'all'",
",",
"key",
"=",
"'groups'",
")",
":",
"groups_order",
"=",
"adata",
".",
"obs",
"[",
"key",
"]",
".",
"cat",
".",
"categories",
"if",
"key",
"+",
"'_masks'",
"in",
"adata",
".",
"uns",
":",
"groups_masks",
"=",
"adata",
".",
"uns",
"[",
"key",
"+",
"'_masks'",
"]",
"else",
":",
"groups_masks",
"=",
"np",
".",
"zeros",
"(",
"(",
"len",
"(",
"adata",
".",
"obs",
"[",
"key",
"]",
".",
"cat",
".",
"categories",
")",
",",
"adata",
".",
"obs",
"[",
"key",
"]",
".",
"values",
".",
"size",
")",
",",
"dtype",
"=",
"bool",
")",
"for",
"iname",
",",
"name",
"in",
"enumerate",
"(",
"adata",
".",
"obs",
"[",
"key",
"]",
".",
"cat",
".",
"categories",
")",
":",
"# if the name is not found, fallback to index retrieval",
"if",
"adata",
".",
"obs",
"[",
"key",
"]",
".",
"cat",
".",
"categories",
"[",
"iname",
"]",
"in",
"adata",
".",
"obs",
"[",
"key",
"]",
".",
"values",
":",
"mask",
"=",
"adata",
".",
"obs",
"[",
"key",
"]",
".",
"cat",
".",
"categories",
"[",
"iname",
"]",
"==",
"adata",
".",
"obs",
"[",
"key",
"]",
".",
"values",
"else",
":",
"mask",
"=",
"str",
"(",
"iname",
")",
"==",
"adata",
".",
"obs",
"[",
"key",
"]",
".",
"values",
"groups_masks",
"[",
"iname",
"]",
"=",
"mask",
"groups_ids",
"=",
"list",
"(",
"range",
"(",
"len",
"(",
"groups_order",
")",
")",
")",
"if",
"groups_order_subset",
"!=",
"'all'",
":",
"groups_ids",
"=",
"[",
"]",
"for",
"name",
"in",
"groups_order_subset",
":",
"groups_ids",
".",
"append",
"(",
"np",
".",
"where",
"(",
"adata",
".",
"obs",
"[",
"key",
"]",
".",
"cat",
".",
"categories",
".",
"values",
"==",
"name",
")",
"[",
"0",
"]",
"[",
"0",
"]",
")",
"if",
"len",
"(",
"groups_ids",
")",
"==",
"0",
":",
"# fallback to index retrieval",
"groups_ids",
"=",
"np",
".",
"where",
"(",
"np",
".",
"in1d",
"(",
"np",
".",
"arange",
"(",
"len",
"(",
"adata",
".",
"obs",
"[",
"key",
"]",
".",
"cat",
".",
"categories",
")",
")",
".",
"astype",
"(",
"str",
")",
",",
"np",
".",
"array",
"(",
"groups_order_subset",
")",
")",
")",
"[",
"0",
"]",
"if",
"len",
"(",
"groups_ids",
")",
"==",
"0",
":",
"logg",
".",
"m",
"(",
"np",
".",
"array",
"(",
"groups_order_subset",
")",
",",
"'invalid! specify valid groups_order (or indices) one of'",
",",
"adata",
".",
"obs",
"[",
"key",
"]",
".",
"cat",
".",
"categories",
")",
"from",
"sys",
"import",
"exit",
"exit",
"(",
"0",
")",
"groups_masks",
"=",
"groups_masks",
"[",
"groups_ids",
"]",
"groups_order_subset",
"=",
"adata",
".",
"obs",
"[",
"key",
"]",
".",
"cat",
".",
"categories",
"[",
"groups_ids",
"]",
".",
"values",
"else",
":",
"groups_order_subset",
"=",
"groups_order",
".",
"values",
"return",
"groups_order_subset",
",",
"groups_masks"
] | Get subset of groups in adata.obs[key]. | [
"Get",
"subset",
"of",
"groups",
"in",
"adata",
".",
"obs",
"[",
"key",
"]",
"."
] | train | https://github.com/theislab/scanpy/blob/9e4e5ee02e04cf618872d9b098e24f0542e8b227/scanpy/utils.py#L686-L723 | 0.002163 |
pantsbuild/pants | src/python/pants/backend/jvm/ivy_utils.py | IvyInfo.traverse_dependency_graph | def traverse_dependency_graph(self, ref, collector, memo=None):
"""Traverses module graph, starting with ref, collecting values for each ref into the sets
created by the collector function.
:param ref an IvyModuleRef to start traversing the ivy dependency graph
:param collector a function that takes a ref and returns a new set of values to collect for
that ref, which will also be updated with all the dependencies accumulated values
:param memo is a dict of ref -> set that memoizes the results of each node in the graph.
If provided, allows for retaining cache across calls.
:returns the accumulated set for ref
"""
resolved_ref = self.refs_by_unversioned_refs.get(ref.unversioned)
if resolved_ref:
ref = resolved_ref
if memo is None:
memo = dict()
visited = set()
return self._do_traverse_dependency_graph(ref, collector, memo, visited) | python | def traverse_dependency_graph(self, ref, collector, memo=None):
"""Traverses module graph, starting with ref, collecting values for each ref into the sets
created by the collector function.
:param ref an IvyModuleRef to start traversing the ivy dependency graph
:param collector a function that takes a ref and returns a new set of values to collect for
that ref, which will also be updated with all the dependencies accumulated values
:param memo is a dict of ref -> set that memoizes the results of each node in the graph.
If provided, allows for retaining cache across calls.
:returns the accumulated set for ref
"""
resolved_ref = self.refs_by_unversioned_refs.get(ref.unversioned)
if resolved_ref:
ref = resolved_ref
if memo is None:
memo = dict()
visited = set()
return self._do_traverse_dependency_graph(ref, collector, memo, visited) | [
"def",
"traverse_dependency_graph",
"(",
"self",
",",
"ref",
",",
"collector",
",",
"memo",
"=",
"None",
")",
":",
"resolved_ref",
"=",
"self",
".",
"refs_by_unversioned_refs",
".",
"get",
"(",
"ref",
".",
"unversioned",
")",
"if",
"resolved_ref",
":",
"ref",
"=",
"resolved_ref",
"if",
"memo",
"is",
"None",
":",
"memo",
"=",
"dict",
"(",
")",
"visited",
"=",
"set",
"(",
")",
"return",
"self",
".",
"_do_traverse_dependency_graph",
"(",
"ref",
",",
"collector",
",",
"memo",
",",
"visited",
")"
] | Traverses module graph, starting with ref, collecting values for each ref into the sets
created by the collector function.
:param ref an IvyModuleRef to start traversing the ivy dependency graph
:param collector a function that takes a ref and returns a new set of values to collect for
that ref, which will also be updated with all the dependencies accumulated values
:param memo is a dict of ref -> set that memoizes the results of each node in the graph.
If provided, allows for retaining cache across calls.
:returns the accumulated set for ref | [
"Traverses",
"module",
"graph",
"starting",
"with",
"ref",
"collecting",
"values",
"for",
"each",
"ref",
"into",
"the",
"sets",
"created",
"by",
"the",
"collector",
"function",
"."
] | train | https://github.com/pantsbuild/pants/blob/b72e650da0df685824ffdcc71988b8c282d0962d/src/python/pants/backend/jvm/ivy_utils.py#L632-L650 | 0.007568 |
revelc/pyaccumulo | pyaccumulo/proxy/AccumuloProxy.py | Client.importTable | def importTable(self, login, tableName, importDir):
"""
Parameters:
- login
- tableName
- importDir
"""
self.send_importTable(login, tableName, importDir)
self.recv_importTable() | python | def importTable(self, login, tableName, importDir):
"""
Parameters:
- login
- tableName
- importDir
"""
self.send_importTable(login, tableName, importDir)
self.recv_importTable() | [
"def",
"importTable",
"(",
"self",
",",
"login",
",",
"tableName",
",",
"importDir",
")",
":",
"self",
".",
"send_importTable",
"(",
"login",
",",
"tableName",
",",
"importDir",
")",
"self",
".",
"recv_importTable",
"(",
")"
] | Parameters:
- login
- tableName
- importDir | [
"Parameters",
":",
"-",
"login",
"-",
"tableName",
"-",
"importDir"
] | train | https://github.com/revelc/pyaccumulo/blob/8adcf535bb82ba69c749efce785c9efc487e85de/pyaccumulo/proxy/AccumuloProxy.py#L1435-L1443 | 0.004695 |
BlackEarth/bxml | bxml/xml.py | XML.tobytes | def tobytes(
self,
root=None,
encoding='UTF-8',
doctype=None,
canonicalized=True,
xml_declaration=True,
pretty_print=True,
with_comments=True,
):
"""return the content of the XML document as a byte string suitable for writing"""
if root is None:
root = self.root
if canonicalized == True:
return self.canonicalized_bytes(root)
else:
return etree.tostring(
root,
encoding=encoding or self.info.encoding,
doctype=doctype or self.info.doctype,
xml_declaration=xml_declaration,
pretty_print=pretty_print,
with_comments=with_comments,
) | python | def tobytes(
self,
root=None,
encoding='UTF-8',
doctype=None,
canonicalized=True,
xml_declaration=True,
pretty_print=True,
with_comments=True,
):
"""return the content of the XML document as a byte string suitable for writing"""
if root is None:
root = self.root
if canonicalized == True:
return self.canonicalized_bytes(root)
else:
return etree.tostring(
root,
encoding=encoding or self.info.encoding,
doctype=doctype or self.info.doctype,
xml_declaration=xml_declaration,
pretty_print=pretty_print,
with_comments=with_comments,
) | [
"def",
"tobytes",
"(",
"self",
",",
"root",
"=",
"None",
",",
"encoding",
"=",
"'UTF-8'",
",",
"doctype",
"=",
"None",
",",
"canonicalized",
"=",
"True",
",",
"xml_declaration",
"=",
"True",
",",
"pretty_print",
"=",
"True",
",",
"with_comments",
"=",
"True",
",",
")",
":",
"if",
"root",
"is",
"None",
":",
"root",
"=",
"self",
".",
"root",
"if",
"canonicalized",
"==",
"True",
":",
"return",
"self",
".",
"canonicalized_bytes",
"(",
"root",
")",
"else",
":",
"return",
"etree",
".",
"tostring",
"(",
"root",
",",
"encoding",
"=",
"encoding",
"or",
"self",
".",
"info",
".",
"encoding",
",",
"doctype",
"=",
"doctype",
"or",
"self",
".",
"info",
".",
"doctype",
",",
"xml_declaration",
"=",
"xml_declaration",
",",
"pretty_print",
"=",
"pretty_print",
",",
"with_comments",
"=",
"with_comments",
",",
")"
] | return the content of the XML document as a byte string suitable for writing | [
"return",
"the",
"content",
"of",
"the",
"XML",
"document",
"as",
"a",
"byte",
"string",
"suitable",
"for",
"writing"
] | train | https://github.com/BlackEarth/bxml/blob/8fbea5dad7fadc7b854ddbeff6ecfb55aaceeb77/bxml/xml.py#L176-L199 | 0.006266 |
dadadel/pyment | pyment/docstring.py | DocToolsBase.get_return_list | def get_return_list(self, data):
"""Get the list of returned values.
The list contains tuples (name=None, desc, type=None)
:param data: the data to proceed
"""
return_list = []
lst = self.get_list_key(data, 'return')
for l in lst:
name, desc, rtype = l
if l[2] is None:
rtype = l[0]
name = None
desc = desc.strip()
return_list.append((name, desc, rtype))
return return_list | python | def get_return_list(self, data):
"""Get the list of returned values.
The list contains tuples (name=None, desc, type=None)
:param data: the data to proceed
"""
return_list = []
lst = self.get_list_key(data, 'return')
for l in lst:
name, desc, rtype = l
if l[2] is None:
rtype = l[0]
name = None
desc = desc.strip()
return_list.append((name, desc, rtype))
return return_list | [
"def",
"get_return_list",
"(",
"self",
",",
"data",
")",
":",
"return_list",
"=",
"[",
"]",
"lst",
"=",
"self",
".",
"get_list_key",
"(",
"data",
",",
"'return'",
")",
"for",
"l",
"in",
"lst",
":",
"name",
",",
"desc",
",",
"rtype",
"=",
"l",
"if",
"l",
"[",
"2",
"]",
"is",
"None",
":",
"rtype",
"=",
"l",
"[",
"0",
"]",
"name",
"=",
"None",
"desc",
"=",
"desc",
".",
"strip",
"(",
")",
"return_list",
".",
"append",
"(",
"(",
"name",
",",
"desc",
",",
"rtype",
")",
")",
"return",
"return_list"
] | Get the list of returned values.
The list contains tuples (name=None, desc, type=None)
:param data: the data to proceed | [
"Get",
"the",
"list",
"of",
"returned",
"values",
".",
"The",
"list",
"contains",
"tuples",
"(",
"name",
"=",
"None",
"desc",
"type",
"=",
"None",
")"
] | train | https://github.com/dadadel/pyment/blob/3d1bdf87d083ff56230bd0bf7c5252e20552b7b6/pyment/docstring.py#L190-L207 | 0.005725 |
apache/incubator-superset | superset/views/tags.py | TagView.post | def post(self, object_type, object_id):
"""Add new tags to an object."""
if object_id == 0:
return Response(status=404)
tagged_objects = []
for name in request.get_json(force=True):
if ':' in name:
type_name = name.split(':', 1)[0]
type_ = TagTypes[type_name]
else:
type_ = TagTypes.custom
tag = db.session.query(Tag).filter_by(name=name, type=type_).first()
if not tag:
tag = Tag(name=name, type=type_)
tagged_objects.append(
TaggedObject(
object_id=object_id,
object_type=object_type,
tag=tag,
),
)
db.session.add_all(tagged_objects)
db.session.commit()
return Response(status=201) | python | def post(self, object_type, object_id):
"""Add new tags to an object."""
if object_id == 0:
return Response(status=404)
tagged_objects = []
for name in request.get_json(force=True):
if ':' in name:
type_name = name.split(':', 1)[0]
type_ = TagTypes[type_name]
else:
type_ = TagTypes.custom
tag = db.session.query(Tag).filter_by(name=name, type=type_).first()
if not tag:
tag = Tag(name=name, type=type_)
tagged_objects.append(
TaggedObject(
object_id=object_id,
object_type=object_type,
tag=tag,
),
)
db.session.add_all(tagged_objects)
db.session.commit()
return Response(status=201) | [
"def",
"post",
"(",
"self",
",",
"object_type",
",",
"object_id",
")",
":",
"if",
"object_id",
"==",
"0",
":",
"return",
"Response",
"(",
"status",
"=",
"404",
")",
"tagged_objects",
"=",
"[",
"]",
"for",
"name",
"in",
"request",
".",
"get_json",
"(",
"force",
"=",
"True",
")",
":",
"if",
"':'",
"in",
"name",
":",
"type_name",
"=",
"name",
".",
"split",
"(",
"':'",
",",
"1",
")",
"[",
"0",
"]",
"type_",
"=",
"TagTypes",
"[",
"type_name",
"]",
"else",
":",
"type_",
"=",
"TagTypes",
".",
"custom",
"tag",
"=",
"db",
".",
"session",
".",
"query",
"(",
"Tag",
")",
".",
"filter_by",
"(",
"name",
"=",
"name",
",",
"type",
"=",
"type_",
")",
".",
"first",
"(",
")",
"if",
"not",
"tag",
":",
"tag",
"=",
"Tag",
"(",
"name",
"=",
"name",
",",
"type",
"=",
"type_",
")",
"tagged_objects",
".",
"append",
"(",
"TaggedObject",
"(",
"object_id",
"=",
"object_id",
",",
"object_type",
"=",
"object_type",
",",
"tag",
"=",
"tag",
",",
")",
",",
")",
"db",
".",
"session",
".",
"add_all",
"(",
"tagged_objects",
")",
"db",
".",
"session",
".",
"commit",
"(",
")",
"return",
"Response",
"(",
"status",
"=",
"201",
")"
] | Add new tags to an object. | [
"Add",
"new",
"tags",
"to",
"an",
"object",
"."
] | train | https://github.com/apache/incubator-superset/blob/ca2996c78f679260eb79c6008e276733df5fb653/superset/views/tags.py#L91-L119 | 0.003394 |
cggh/scikit-allel | allel/stats/hw.py | heterozygosity_observed | def heterozygosity_observed(g, fill=np.nan):
"""Calculate the rate of observed heterozygosity for each variant.
Parameters
----------
g : array_like, int, shape (n_variants, n_samples, ploidy)
Genotype array.
fill : float, optional
Use this value for variants where all calls are missing.
Returns
-------
ho : ndarray, float, shape (n_variants,)
Observed heterozygosity
Examples
--------
>>> import allel
>>> g = allel.GenotypeArray([[[0, 0], [0, 0], [0, 0]],
... [[0, 0], [0, 1], [1, 1]],
... [[0, 0], [1, 1], [2, 2]],
... [[1, 1], [1, 2], [-1, -1]]])
>>> allel.heterozygosity_observed(g)
array([0. , 0.33333333, 0. , 0.5 ])
"""
# check inputs
if not hasattr(g, 'count_het') or not hasattr(g, 'count_called'):
g = GenotypeArray(g, copy=False)
# count hets
n_het = np.asarray(g.count_het(axis=1))
n_called = np.asarray(g.count_called(axis=1))
# calculate rate of observed heterozygosity, accounting for variants
# where all calls are missing
with ignore_invalid():
ho = np.where(n_called > 0, n_het / n_called, fill)
return ho | python | def heterozygosity_observed(g, fill=np.nan):
"""Calculate the rate of observed heterozygosity for each variant.
Parameters
----------
g : array_like, int, shape (n_variants, n_samples, ploidy)
Genotype array.
fill : float, optional
Use this value for variants where all calls are missing.
Returns
-------
ho : ndarray, float, shape (n_variants,)
Observed heterozygosity
Examples
--------
>>> import allel
>>> g = allel.GenotypeArray([[[0, 0], [0, 0], [0, 0]],
... [[0, 0], [0, 1], [1, 1]],
... [[0, 0], [1, 1], [2, 2]],
... [[1, 1], [1, 2], [-1, -1]]])
>>> allel.heterozygosity_observed(g)
array([0. , 0.33333333, 0. , 0.5 ])
"""
# check inputs
if not hasattr(g, 'count_het') or not hasattr(g, 'count_called'):
g = GenotypeArray(g, copy=False)
# count hets
n_het = np.asarray(g.count_het(axis=1))
n_called = np.asarray(g.count_called(axis=1))
# calculate rate of observed heterozygosity, accounting for variants
# where all calls are missing
with ignore_invalid():
ho = np.where(n_called > 0, n_het / n_called, fill)
return ho | [
"def",
"heterozygosity_observed",
"(",
"g",
",",
"fill",
"=",
"np",
".",
"nan",
")",
":",
"# check inputs",
"if",
"not",
"hasattr",
"(",
"g",
",",
"'count_het'",
")",
"or",
"not",
"hasattr",
"(",
"g",
",",
"'count_called'",
")",
":",
"g",
"=",
"GenotypeArray",
"(",
"g",
",",
"copy",
"=",
"False",
")",
"# count hets",
"n_het",
"=",
"np",
".",
"asarray",
"(",
"g",
".",
"count_het",
"(",
"axis",
"=",
"1",
")",
")",
"n_called",
"=",
"np",
".",
"asarray",
"(",
"g",
".",
"count_called",
"(",
"axis",
"=",
"1",
")",
")",
"# calculate rate of observed heterozygosity, accounting for variants",
"# where all calls are missing",
"with",
"ignore_invalid",
"(",
")",
":",
"ho",
"=",
"np",
".",
"where",
"(",
"n_called",
">",
"0",
",",
"n_het",
"/",
"n_called",
",",
"fill",
")",
"return",
"ho"
] | Calculate the rate of observed heterozygosity for each variant.
Parameters
----------
g : array_like, int, shape (n_variants, n_samples, ploidy)
Genotype array.
fill : float, optional
Use this value for variants where all calls are missing.
Returns
-------
ho : ndarray, float, shape (n_variants,)
Observed heterozygosity
Examples
--------
>>> import allel
>>> g = allel.GenotypeArray([[[0, 0], [0, 0], [0, 0]],
... [[0, 0], [0, 1], [1, 1]],
... [[0, 0], [1, 1], [2, 2]],
... [[1, 1], [1, 2], [-1, -1]]])
>>> allel.heterozygosity_observed(g)
array([0. , 0.33333333, 0. , 0.5 ]) | [
"Calculate",
"the",
"rate",
"of",
"observed",
"heterozygosity",
"for",
"each",
"variant",
"."
] | train | https://github.com/cggh/scikit-allel/blob/3c979a57a100240ba959dd13f98839349530f215/allel/stats/hw.py#L12-L55 | 0.000781 |
nkmathew/yasi-sexp-indenter | yasi.py | assign_indent_numbers | def assign_indent_numbers(lst, inum, dic=collections.defaultdict(int)):
""" Associate keywords with their respective indentation numbers
"""
for i in lst:
dic[i] = inum
return dic | python | def assign_indent_numbers(lst, inum, dic=collections.defaultdict(int)):
""" Associate keywords with their respective indentation numbers
"""
for i in lst:
dic[i] = inum
return dic | [
"def",
"assign_indent_numbers",
"(",
"lst",
",",
"inum",
",",
"dic",
"=",
"collections",
".",
"defaultdict",
"(",
"int",
")",
")",
":",
"for",
"i",
"in",
"lst",
":",
"dic",
"[",
"i",
"]",
"=",
"inum",
"return",
"dic"
] | Associate keywords with their respective indentation numbers | [
"Associate",
"keywords",
"with",
"their",
"respective",
"indentation",
"numbers"
] | train | https://github.com/nkmathew/yasi-sexp-indenter/blob/6ec2a4675e79606c555bcb67494a0ba994b05805/yasi.py#L571-L576 | 0.004926 |
sashahart/vex | vex/options.py | make_arg_parser | def make_arg_parser():
"""Return a standard ArgumentParser object.
"""
parser = argparse.ArgumentParser(
formatter_class=argparse.RawTextHelpFormatter,
usage="vex [OPTIONS] VIRTUALENV_NAME COMMAND_TO_RUN ...",
)
make = parser.add_argument_group(title='To make a new virtualenv')
make.add_argument(
'-m', '--make',
action="store_true",
help="make named virtualenv before running command"
)
make.add_argument(
'--python',
help="specify which python for virtualenv to be made",
action="store",
default=None,
)
make.add_argument(
'--site-packages',
help="allow site package imports from new virtualenv",
action="store_true",
)
make.add_argument(
'--always-copy',
help="use copies instead of symlinks in new virtualenv",
action="store_true",
)
remove = parser.add_argument_group(title='To remove a virtualenv')
remove.add_argument(
'-r', '--remove',
action="store_true",
help="remove the named virtualenv after running command"
)
parser.add_argument(
"--path",
metavar="DIR",
help="absolute path to virtualenv to use",
action="store"
)
parser.add_argument(
'--cwd',
metavar="DIR",
action="store",
default='.',
help="path to run command in (default: '.' aka $PWD)",
)
parser.add_argument(
"--config",
metavar="FILE",
default=None,
action="store",
help="path to config file to read (default: '~/.vexrc')"
)
parser.add_argument(
'--shell-config',
metavar="SHELL",
dest="shell_to_configure",
action="store",
default=None,
help="print optional config for the specified shell"
)
parser.add_argument(
'--list',
metavar="PREFIX",
nargs="?",
const="",
default=None,
help="print a list of available virtualenvs [matching PREFIX]",
action="store"
)
parser.add_argument(
'--version',
help="print the version of vex that is being run",
action="store_true"
)
parser.add_argument(
"rest",
nargs=argparse.REMAINDER,
help=argparse.SUPPRESS)
return parser | python | def make_arg_parser():
"""Return a standard ArgumentParser object.
"""
parser = argparse.ArgumentParser(
formatter_class=argparse.RawTextHelpFormatter,
usage="vex [OPTIONS] VIRTUALENV_NAME COMMAND_TO_RUN ...",
)
make = parser.add_argument_group(title='To make a new virtualenv')
make.add_argument(
'-m', '--make',
action="store_true",
help="make named virtualenv before running command"
)
make.add_argument(
'--python',
help="specify which python for virtualenv to be made",
action="store",
default=None,
)
make.add_argument(
'--site-packages',
help="allow site package imports from new virtualenv",
action="store_true",
)
make.add_argument(
'--always-copy',
help="use copies instead of symlinks in new virtualenv",
action="store_true",
)
remove = parser.add_argument_group(title='To remove a virtualenv')
remove.add_argument(
'-r', '--remove',
action="store_true",
help="remove the named virtualenv after running command"
)
parser.add_argument(
"--path",
metavar="DIR",
help="absolute path to virtualenv to use",
action="store"
)
parser.add_argument(
'--cwd',
metavar="DIR",
action="store",
default='.',
help="path to run command in (default: '.' aka $PWD)",
)
parser.add_argument(
"--config",
metavar="FILE",
default=None,
action="store",
help="path to config file to read (default: '~/.vexrc')"
)
parser.add_argument(
'--shell-config',
metavar="SHELL",
dest="shell_to_configure",
action="store",
default=None,
help="print optional config for the specified shell"
)
parser.add_argument(
'--list',
metavar="PREFIX",
nargs="?",
const="",
default=None,
help="print a list of available virtualenvs [matching PREFIX]",
action="store"
)
parser.add_argument(
'--version',
help="print the version of vex that is being run",
action="store_true"
)
parser.add_argument(
"rest",
nargs=argparse.REMAINDER,
help=argparse.SUPPRESS)
return parser | [
"def",
"make_arg_parser",
"(",
")",
":",
"parser",
"=",
"argparse",
".",
"ArgumentParser",
"(",
"formatter_class",
"=",
"argparse",
".",
"RawTextHelpFormatter",
",",
"usage",
"=",
"\"vex [OPTIONS] VIRTUALENV_NAME COMMAND_TO_RUN ...\"",
",",
")",
"make",
"=",
"parser",
".",
"add_argument_group",
"(",
"title",
"=",
"'To make a new virtualenv'",
")",
"make",
".",
"add_argument",
"(",
"'-m'",
",",
"'--make'",
",",
"action",
"=",
"\"store_true\"",
",",
"help",
"=",
"\"make named virtualenv before running command\"",
")",
"make",
".",
"add_argument",
"(",
"'--python'",
",",
"help",
"=",
"\"specify which python for virtualenv to be made\"",
",",
"action",
"=",
"\"store\"",
",",
"default",
"=",
"None",
",",
")",
"make",
".",
"add_argument",
"(",
"'--site-packages'",
",",
"help",
"=",
"\"allow site package imports from new virtualenv\"",
",",
"action",
"=",
"\"store_true\"",
",",
")",
"make",
".",
"add_argument",
"(",
"'--always-copy'",
",",
"help",
"=",
"\"use copies instead of symlinks in new virtualenv\"",
",",
"action",
"=",
"\"store_true\"",
",",
")",
"remove",
"=",
"parser",
".",
"add_argument_group",
"(",
"title",
"=",
"'To remove a virtualenv'",
")",
"remove",
".",
"add_argument",
"(",
"'-r'",
",",
"'--remove'",
",",
"action",
"=",
"\"store_true\"",
",",
"help",
"=",
"\"remove the named virtualenv after running command\"",
")",
"parser",
".",
"add_argument",
"(",
"\"--path\"",
",",
"metavar",
"=",
"\"DIR\"",
",",
"help",
"=",
"\"absolute path to virtualenv to use\"",
",",
"action",
"=",
"\"store\"",
")",
"parser",
".",
"add_argument",
"(",
"'--cwd'",
",",
"metavar",
"=",
"\"DIR\"",
",",
"action",
"=",
"\"store\"",
",",
"default",
"=",
"'.'",
",",
"help",
"=",
"\"path to run command in (default: '.' aka $PWD)\"",
",",
")",
"parser",
".",
"add_argument",
"(",
"\"--config\"",
",",
"metavar",
"=",
"\"FILE\"",
",",
"default",
"=",
"None",
",",
"action",
"=",
"\"store\"",
",",
"help",
"=",
"\"path to config file to read (default: '~/.vexrc')\"",
")",
"parser",
".",
"add_argument",
"(",
"'--shell-config'",
",",
"metavar",
"=",
"\"SHELL\"",
",",
"dest",
"=",
"\"shell_to_configure\"",
",",
"action",
"=",
"\"store\"",
",",
"default",
"=",
"None",
",",
"help",
"=",
"\"print optional config for the specified shell\"",
")",
"parser",
".",
"add_argument",
"(",
"'--list'",
",",
"metavar",
"=",
"\"PREFIX\"",
",",
"nargs",
"=",
"\"?\"",
",",
"const",
"=",
"\"\"",
",",
"default",
"=",
"None",
",",
"help",
"=",
"\"print a list of available virtualenvs [matching PREFIX]\"",
",",
"action",
"=",
"\"store\"",
")",
"parser",
".",
"add_argument",
"(",
"'--version'",
",",
"help",
"=",
"\"print the version of vex that is being run\"",
",",
"action",
"=",
"\"store_true\"",
")",
"parser",
".",
"add_argument",
"(",
"\"rest\"",
",",
"nargs",
"=",
"argparse",
".",
"REMAINDER",
",",
"help",
"=",
"argparse",
".",
"SUPPRESS",
")",
"return",
"parser"
] | Return a standard ArgumentParser object. | [
"Return",
"a",
"standard",
"ArgumentParser",
"object",
"."
] | train | https://github.com/sashahart/vex/blob/b7680c40897b8cbe6aae55ec9812b4fb11738192/vex/options.py#L5-L90 | 0.000423 |
Julius2342/pyvlx | pyvlx/login.py | Login.handle_frame | async def handle_frame(self, frame):
"""Handle incoming API frame, return True if this was the expected frame."""
if not isinstance(frame, FramePasswordEnterConfirmation):
return False
if frame.status == PasswordEnterConfirmationStatus.FAILED:
PYVLXLOG.warning('Failed to authenticate with password "%s****"', self.password[:2])
self.success = False
if frame.status == PasswordEnterConfirmationStatus.SUCCESSFUL:
self.success = True
return True | python | async def handle_frame(self, frame):
"""Handle incoming API frame, return True if this was the expected frame."""
if not isinstance(frame, FramePasswordEnterConfirmation):
return False
if frame.status == PasswordEnterConfirmationStatus.FAILED:
PYVLXLOG.warning('Failed to authenticate with password "%s****"', self.password[:2])
self.success = False
if frame.status == PasswordEnterConfirmationStatus.SUCCESSFUL:
self.success = True
return True | [
"async",
"def",
"handle_frame",
"(",
"self",
",",
"frame",
")",
":",
"if",
"not",
"isinstance",
"(",
"frame",
",",
"FramePasswordEnterConfirmation",
")",
":",
"return",
"False",
"if",
"frame",
".",
"status",
"==",
"PasswordEnterConfirmationStatus",
".",
"FAILED",
":",
"PYVLXLOG",
".",
"warning",
"(",
"'Failed to authenticate with password \"%s****\"'",
",",
"self",
".",
"password",
"[",
":",
"2",
"]",
")",
"self",
".",
"success",
"=",
"False",
"if",
"frame",
".",
"status",
"==",
"PasswordEnterConfirmationStatus",
".",
"SUCCESSFUL",
":",
"self",
".",
"success",
"=",
"True",
"return",
"True"
] | Handle incoming API frame, return True if this was the expected frame. | [
"Handle",
"incoming",
"API",
"frame",
"return",
"True",
"if",
"this",
"was",
"the",
"expected",
"frame",
"."
] | train | https://github.com/Julius2342/pyvlx/blob/ee78e1324bcb1be5b8d1a9d05ab5496b72eae848/pyvlx/login.py#L18-L27 | 0.007519 |
saltstack/salt | salt/modules/win_file.py | get_attributes | def get_attributes(path):
'''
Return a dictionary object with the Windows
file attributes for a file.
Args:
path (str): The path to the file or directory
Returns:
dict: A dictionary of file attributes
CLI Example:
.. code-block:: bash
salt '*' file.get_attributes c:\\temp\\a.txt
'''
if not os.path.exists(path):
raise CommandExecutionError('Path not found: {0}'.format(path))
# set up dictionary for attribute values
attributes = {}
# Get cumulative int value of attributes
intAttributes = win32file.GetFileAttributes(path)
# Assign individual attributes
attributes['archive'] = (intAttributes & 32) == 32
attributes['reparsePoint'] = (intAttributes & 1024) == 1024
attributes['compressed'] = (intAttributes & 2048) == 2048
attributes['directory'] = (intAttributes & 16) == 16
attributes['encrypted'] = (intAttributes & 16384) == 16384
attributes['hidden'] = (intAttributes & 2) == 2
attributes['normal'] = (intAttributes & 128) == 128
attributes['notIndexed'] = (intAttributes & 8192) == 8192
attributes['offline'] = (intAttributes & 4096) == 4096
attributes['readonly'] = (intAttributes & 1) == 1
attributes['system'] = (intAttributes & 4) == 4
attributes['temporary'] = (intAttributes & 256) == 256
# check if it's a Mounted Volume
attributes['mountedVolume'] = False
if attributes['reparsePoint'] is True and attributes['directory'] is True:
fileIterator = win32file.FindFilesIterator(path)
findDataTuple = next(fileIterator)
if findDataTuple[6] == 0xA0000003:
attributes['mountedVolume'] = True
# check if it's a soft (symbolic) link
# Note: os.path.islink() does not work in
# Python 2.7 for the Windows NTFS file system.
# The following code does, however, work (tested in Windows 8)
attributes['symbolicLink'] = False
if attributes['reparsePoint'] is True:
fileIterator = win32file.FindFilesIterator(path)
findDataTuple = next(fileIterator)
if findDataTuple[6] == 0xA000000C:
attributes['symbolicLink'] = True
return attributes | python | def get_attributes(path):
'''
Return a dictionary object with the Windows
file attributes for a file.
Args:
path (str): The path to the file or directory
Returns:
dict: A dictionary of file attributes
CLI Example:
.. code-block:: bash
salt '*' file.get_attributes c:\\temp\\a.txt
'''
if not os.path.exists(path):
raise CommandExecutionError('Path not found: {0}'.format(path))
# set up dictionary for attribute values
attributes = {}
# Get cumulative int value of attributes
intAttributes = win32file.GetFileAttributes(path)
# Assign individual attributes
attributes['archive'] = (intAttributes & 32) == 32
attributes['reparsePoint'] = (intAttributes & 1024) == 1024
attributes['compressed'] = (intAttributes & 2048) == 2048
attributes['directory'] = (intAttributes & 16) == 16
attributes['encrypted'] = (intAttributes & 16384) == 16384
attributes['hidden'] = (intAttributes & 2) == 2
attributes['normal'] = (intAttributes & 128) == 128
attributes['notIndexed'] = (intAttributes & 8192) == 8192
attributes['offline'] = (intAttributes & 4096) == 4096
attributes['readonly'] = (intAttributes & 1) == 1
attributes['system'] = (intAttributes & 4) == 4
attributes['temporary'] = (intAttributes & 256) == 256
# check if it's a Mounted Volume
attributes['mountedVolume'] = False
if attributes['reparsePoint'] is True and attributes['directory'] is True:
fileIterator = win32file.FindFilesIterator(path)
findDataTuple = next(fileIterator)
if findDataTuple[6] == 0xA0000003:
attributes['mountedVolume'] = True
# check if it's a soft (symbolic) link
# Note: os.path.islink() does not work in
# Python 2.7 for the Windows NTFS file system.
# The following code does, however, work (tested in Windows 8)
attributes['symbolicLink'] = False
if attributes['reparsePoint'] is True:
fileIterator = win32file.FindFilesIterator(path)
findDataTuple = next(fileIterator)
if findDataTuple[6] == 0xA000000C:
attributes['symbolicLink'] = True
return attributes | [
"def",
"get_attributes",
"(",
"path",
")",
":",
"if",
"not",
"os",
".",
"path",
".",
"exists",
"(",
"path",
")",
":",
"raise",
"CommandExecutionError",
"(",
"'Path not found: {0}'",
".",
"format",
"(",
"path",
")",
")",
"# set up dictionary for attribute values",
"attributes",
"=",
"{",
"}",
"# Get cumulative int value of attributes",
"intAttributes",
"=",
"win32file",
".",
"GetFileAttributes",
"(",
"path",
")",
"# Assign individual attributes",
"attributes",
"[",
"'archive'",
"]",
"=",
"(",
"intAttributes",
"&",
"32",
")",
"==",
"32",
"attributes",
"[",
"'reparsePoint'",
"]",
"=",
"(",
"intAttributes",
"&",
"1024",
")",
"==",
"1024",
"attributes",
"[",
"'compressed'",
"]",
"=",
"(",
"intAttributes",
"&",
"2048",
")",
"==",
"2048",
"attributes",
"[",
"'directory'",
"]",
"=",
"(",
"intAttributes",
"&",
"16",
")",
"==",
"16",
"attributes",
"[",
"'encrypted'",
"]",
"=",
"(",
"intAttributes",
"&",
"16384",
")",
"==",
"16384",
"attributes",
"[",
"'hidden'",
"]",
"=",
"(",
"intAttributes",
"&",
"2",
")",
"==",
"2",
"attributes",
"[",
"'normal'",
"]",
"=",
"(",
"intAttributes",
"&",
"128",
")",
"==",
"128",
"attributes",
"[",
"'notIndexed'",
"]",
"=",
"(",
"intAttributes",
"&",
"8192",
")",
"==",
"8192",
"attributes",
"[",
"'offline'",
"]",
"=",
"(",
"intAttributes",
"&",
"4096",
")",
"==",
"4096",
"attributes",
"[",
"'readonly'",
"]",
"=",
"(",
"intAttributes",
"&",
"1",
")",
"==",
"1",
"attributes",
"[",
"'system'",
"]",
"=",
"(",
"intAttributes",
"&",
"4",
")",
"==",
"4",
"attributes",
"[",
"'temporary'",
"]",
"=",
"(",
"intAttributes",
"&",
"256",
")",
"==",
"256",
"# check if it's a Mounted Volume",
"attributes",
"[",
"'mountedVolume'",
"]",
"=",
"False",
"if",
"attributes",
"[",
"'reparsePoint'",
"]",
"is",
"True",
"and",
"attributes",
"[",
"'directory'",
"]",
"is",
"True",
":",
"fileIterator",
"=",
"win32file",
".",
"FindFilesIterator",
"(",
"path",
")",
"findDataTuple",
"=",
"next",
"(",
"fileIterator",
")",
"if",
"findDataTuple",
"[",
"6",
"]",
"==",
"0xA0000003",
":",
"attributes",
"[",
"'mountedVolume'",
"]",
"=",
"True",
"# check if it's a soft (symbolic) link",
"# Note: os.path.islink() does not work in",
"# Python 2.7 for the Windows NTFS file system.",
"# The following code does, however, work (tested in Windows 8)",
"attributes",
"[",
"'symbolicLink'",
"]",
"=",
"False",
"if",
"attributes",
"[",
"'reparsePoint'",
"]",
"is",
"True",
":",
"fileIterator",
"=",
"win32file",
".",
"FindFilesIterator",
"(",
"path",
")",
"findDataTuple",
"=",
"next",
"(",
"fileIterator",
")",
"if",
"findDataTuple",
"[",
"6",
"]",
"==",
"0xA000000C",
":",
"attributes",
"[",
"'symbolicLink'",
"]",
"=",
"True",
"return",
"attributes"
] | Return a dictionary object with the Windows
file attributes for a file.
Args:
path (str): The path to the file or directory
Returns:
dict: A dictionary of file attributes
CLI Example:
.. code-block:: bash
salt '*' file.get_attributes c:\\temp\\a.txt | [
"Return",
"a",
"dictionary",
"object",
"with",
"the",
"Windows",
"file",
"attributes",
"for",
"a",
"file",
"."
] | train | https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/modules/win_file.py#L879-L939 | 0.000454 |
google/grr | grr/server/grr_response_server/databases/mem_flows.py | InMemoryDBFlowMixin.WriteClientActionRequests | def WriteClientActionRequests(self, requests):
"""Writes messages that should go to the client to the db."""
for r in requests:
req_dict = self.flow_requests.get((r.client_id, r.flow_id), {})
if r.request_id not in req_dict:
request_keys = [(r.client_id, r.flow_id, r.request_id) for r in requests
]
raise db.AtLeastOneUnknownRequestError(request_keys)
for r in requests:
request_key = (r.client_id, r.flow_id, r.request_id)
self.client_action_requests[request_key] = r | python | def WriteClientActionRequests(self, requests):
"""Writes messages that should go to the client to the db."""
for r in requests:
req_dict = self.flow_requests.get((r.client_id, r.flow_id), {})
if r.request_id not in req_dict:
request_keys = [(r.client_id, r.flow_id, r.request_id) for r in requests
]
raise db.AtLeastOneUnknownRequestError(request_keys)
for r in requests:
request_key = (r.client_id, r.flow_id, r.request_id)
self.client_action_requests[request_key] = r | [
"def",
"WriteClientActionRequests",
"(",
"self",
",",
"requests",
")",
":",
"for",
"r",
"in",
"requests",
":",
"req_dict",
"=",
"self",
".",
"flow_requests",
".",
"get",
"(",
"(",
"r",
".",
"client_id",
",",
"r",
".",
"flow_id",
")",
",",
"{",
"}",
")",
"if",
"r",
".",
"request_id",
"not",
"in",
"req_dict",
":",
"request_keys",
"=",
"[",
"(",
"r",
".",
"client_id",
",",
"r",
".",
"flow_id",
",",
"r",
".",
"request_id",
")",
"for",
"r",
"in",
"requests",
"]",
"raise",
"db",
".",
"AtLeastOneUnknownRequestError",
"(",
"request_keys",
")",
"for",
"r",
"in",
"requests",
":",
"request_key",
"=",
"(",
"r",
".",
"client_id",
",",
"r",
".",
"flow_id",
",",
"r",
".",
"request_id",
")",
"self",
".",
"client_action_requests",
"[",
"request_key",
"]",
"=",
"r"
] | Writes messages that should go to the client to the db. | [
"Writes",
"messages",
"that",
"should",
"go",
"to",
"the",
"client",
"to",
"the",
"db",
"."
] | train | https://github.com/google/grr/blob/5cef4e8e2f0d5df43ea4877e9c798e0bf60bfe74/grr/server/grr_response_server/databases/mem_flows.py#L211-L222 | 0.012844 |
PinLin/KCOJ_api | KCOJ_api/api.py | KCOJ.get_question_passers | def get_question_passers(self, number: str) -> list:
"""
取得課程中特定題目通過者列表
"""
try:
# 操作所需資訊
params = {
'HW_ID': number
}
# 取得資料
response = self.__session.get(
self.__url + '/success.jsp', params=params, timeout=0.5, verify=False)
soup = BeautifulSoup(response.text, 'html.parser')
# 整理通過者資訊
passers = []
for tag in soup.find_all('tr'):
# 取得通過者學號
passer = tag.get_text().replace('\n', '').strip()
# 跳過標題列
if passer != '學號':
passers.append(passer)
# 回傳結果
return passers
except requests.exceptions.Timeout:
return ["Timeout"] | python | def get_question_passers(self, number: str) -> list:
"""
取得課程中特定題目通過者列表
"""
try:
# 操作所需資訊
params = {
'HW_ID': number
}
# 取得資料
response = self.__session.get(
self.__url + '/success.jsp', params=params, timeout=0.5, verify=False)
soup = BeautifulSoup(response.text, 'html.parser')
# 整理通過者資訊
passers = []
for tag in soup.find_all('tr'):
# 取得通過者學號
passer = tag.get_text().replace('\n', '').strip()
# 跳過標題列
if passer != '學號':
passers.append(passer)
# 回傳結果
return passers
except requests.exceptions.Timeout:
return ["Timeout"] | [
"def",
"get_question_passers",
"(",
"self",
",",
"number",
":",
"str",
")",
"->",
"list",
":",
"try",
":",
"# 操作所需資訊",
"params",
"=",
"{",
"'HW_ID'",
":",
"number",
"}",
"# 取得資料",
"response",
"=",
"self",
".",
"__session",
".",
"get",
"(",
"self",
".",
"__url",
"+",
"'/success.jsp'",
",",
"params",
"=",
"params",
",",
"timeout",
"=",
"0.5",
",",
"verify",
"=",
"False",
")",
"soup",
"=",
"BeautifulSoup",
"(",
"response",
".",
"text",
",",
"'html.parser'",
")",
"# 整理通過者資訊",
"passers",
"=",
"[",
"]",
"for",
"tag",
"in",
"soup",
".",
"find_all",
"(",
"'tr'",
")",
":",
"# 取得通過者學號",
"passer",
"=",
"tag",
".",
"get_text",
"(",
")",
".",
"replace",
"(",
"'\\n'",
",",
"''",
")",
".",
"strip",
"(",
")",
"# 跳過標題列",
"if",
"passer",
"!=",
"'學號':",
"",
"passers",
".",
"append",
"(",
"passer",
")",
"# 回傳結果",
"return",
"passers",
"except",
"requests",
".",
"exceptions",
".",
"Timeout",
":",
"return",
"[",
"\"Timeout\"",
"]"
] | 取得課程中特定題目通過者列表 | [
"取得課程中特定題目通過者列表"
] | train | https://github.com/PinLin/KCOJ_api/blob/64f6ef0f9e64dc1efd692cbe6d5738ee7cfb78ec/KCOJ_api/api.py#L133-L158 | 0.003654 |
JdeRobot/base | src/drivers/MAVLinkServer/MAVProxy/pymavlink/dialects/v10/matrixpilot.py | MAVLink.position_target_local_ned_send | def position_target_local_ned_send(self, time_boot_ms, coordinate_frame, type_mask, x, y, z, vx, vy, vz, afx, afy, afz, yaw, yaw_rate, force_mavlink1=False):
'''
Reports the current commanded vehicle position, velocity, and
acceleration as specified by the autopilot. This
should match the commands sent in
SET_POSITION_TARGET_LOCAL_NED if the vehicle is being
controlled this way.
time_boot_ms : Timestamp in milliseconds since system boot (uint32_t)
coordinate_frame : Valid options are: MAV_FRAME_LOCAL_NED = 1, MAV_FRAME_LOCAL_OFFSET_NED = 7, MAV_FRAME_BODY_NED = 8, MAV_FRAME_BODY_OFFSET_NED = 9 (uint8_t)
type_mask : Bitmask to indicate which dimensions should be ignored by the vehicle: a value of 0b0000000000000000 or 0b0000001000000000 indicates that none of the setpoint dimensions should be ignored. If bit 10 is set the floats afx afy afz should be interpreted as force instead of acceleration. Mapping: bit 1: x, bit 2: y, bit 3: z, bit 4: vx, bit 5: vy, bit 6: vz, bit 7: ax, bit 8: ay, bit 9: az, bit 10: is force setpoint, bit 11: yaw, bit 12: yaw rate (uint16_t)
x : X Position in NED frame in meters (float)
y : Y Position in NED frame in meters (float)
z : Z Position in NED frame in meters (note, altitude is negative in NED) (float)
vx : X velocity in NED frame in meter / s (float)
vy : Y velocity in NED frame in meter / s (float)
vz : Z velocity in NED frame in meter / s (float)
afx : X acceleration or force (if bit 10 of type_mask is set) in NED frame in meter / s^2 or N (float)
afy : Y acceleration or force (if bit 10 of type_mask is set) in NED frame in meter / s^2 or N (float)
afz : Z acceleration or force (if bit 10 of type_mask is set) in NED frame in meter / s^2 or N (float)
yaw : yaw setpoint in rad (float)
yaw_rate : yaw rate setpoint in rad/s (float)
'''
return self.send(self.position_target_local_ned_encode(time_boot_ms, coordinate_frame, type_mask, x, y, z, vx, vy, vz, afx, afy, afz, yaw, yaw_rate), force_mavlink1=force_mavlink1) | python | def position_target_local_ned_send(self, time_boot_ms, coordinate_frame, type_mask, x, y, z, vx, vy, vz, afx, afy, afz, yaw, yaw_rate, force_mavlink1=False):
'''
Reports the current commanded vehicle position, velocity, and
acceleration as specified by the autopilot. This
should match the commands sent in
SET_POSITION_TARGET_LOCAL_NED if the vehicle is being
controlled this way.
time_boot_ms : Timestamp in milliseconds since system boot (uint32_t)
coordinate_frame : Valid options are: MAV_FRAME_LOCAL_NED = 1, MAV_FRAME_LOCAL_OFFSET_NED = 7, MAV_FRAME_BODY_NED = 8, MAV_FRAME_BODY_OFFSET_NED = 9 (uint8_t)
type_mask : Bitmask to indicate which dimensions should be ignored by the vehicle: a value of 0b0000000000000000 or 0b0000001000000000 indicates that none of the setpoint dimensions should be ignored. If bit 10 is set the floats afx afy afz should be interpreted as force instead of acceleration. Mapping: bit 1: x, bit 2: y, bit 3: z, bit 4: vx, bit 5: vy, bit 6: vz, bit 7: ax, bit 8: ay, bit 9: az, bit 10: is force setpoint, bit 11: yaw, bit 12: yaw rate (uint16_t)
x : X Position in NED frame in meters (float)
y : Y Position in NED frame in meters (float)
z : Z Position in NED frame in meters (note, altitude is negative in NED) (float)
vx : X velocity in NED frame in meter / s (float)
vy : Y velocity in NED frame in meter / s (float)
vz : Z velocity in NED frame in meter / s (float)
afx : X acceleration or force (if bit 10 of type_mask is set) in NED frame in meter / s^2 or N (float)
afy : Y acceleration or force (if bit 10 of type_mask is set) in NED frame in meter / s^2 or N (float)
afz : Z acceleration or force (if bit 10 of type_mask is set) in NED frame in meter / s^2 or N (float)
yaw : yaw setpoint in rad (float)
yaw_rate : yaw rate setpoint in rad/s (float)
'''
return self.send(self.position_target_local_ned_encode(time_boot_ms, coordinate_frame, type_mask, x, y, z, vx, vy, vz, afx, afy, afz, yaw, yaw_rate), force_mavlink1=force_mavlink1) | [
"def",
"position_target_local_ned_send",
"(",
"self",
",",
"time_boot_ms",
",",
"coordinate_frame",
",",
"type_mask",
",",
"x",
",",
"y",
",",
"z",
",",
"vx",
",",
"vy",
",",
"vz",
",",
"afx",
",",
"afy",
",",
"afz",
",",
"yaw",
",",
"yaw_rate",
",",
"force_mavlink1",
"=",
"False",
")",
":",
"return",
"self",
".",
"send",
"(",
"self",
".",
"position_target_local_ned_encode",
"(",
"time_boot_ms",
",",
"coordinate_frame",
",",
"type_mask",
",",
"x",
",",
"y",
",",
"z",
",",
"vx",
",",
"vy",
",",
"vz",
",",
"afx",
",",
"afy",
",",
"afz",
",",
"yaw",
",",
"yaw_rate",
")",
",",
"force_mavlink1",
"=",
"force_mavlink1",
")"
] | Reports the current commanded vehicle position, velocity, and
acceleration as specified by the autopilot. This
should match the commands sent in
SET_POSITION_TARGET_LOCAL_NED if the vehicle is being
controlled this way.
time_boot_ms : Timestamp in milliseconds since system boot (uint32_t)
coordinate_frame : Valid options are: MAV_FRAME_LOCAL_NED = 1, MAV_FRAME_LOCAL_OFFSET_NED = 7, MAV_FRAME_BODY_NED = 8, MAV_FRAME_BODY_OFFSET_NED = 9 (uint8_t)
type_mask : Bitmask to indicate which dimensions should be ignored by the vehicle: a value of 0b0000000000000000 or 0b0000001000000000 indicates that none of the setpoint dimensions should be ignored. If bit 10 is set the floats afx afy afz should be interpreted as force instead of acceleration. Mapping: bit 1: x, bit 2: y, bit 3: z, bit 4: vx, bit 5: vy, bit 6: vz, bit 7: ax, bit 8: ay, bit 9: az, bit 10: is force setpoint, bit 11: yaw, bit 12: yaw rate (uint16_t)
x : X Position in NED frame in meters (float)
y : Y Position in NED frame in meters (float)
z : Z Position in NED frame in meters (note, altitude is negative in NED) (float)
vx : X velocity in NED frame in meter / s (float)
vy : Y velocity in NED frame in meter / s (float)
vz : Z velocity in NED frame in meter / s (float)
afx : X acceleration or force (if bit 10 of type_mask is set) in NED frame in meter / s^2 or N (float)
afy : Y acceleration or force (if bit 10 of type_mask is set) in NED frame in meter / s^2 or N (float)
afz : Z acceleration or force (if bit 10 of type_mask is set) in NED frame in meter / s^2 or N (float)
yaw : yaw setpoint in rad (float)
yaw_rate : yaw rate setpoint in rad/s (float) | [
"Reports",
"the",
"current",
"commanded",
"vehicle",
"position",
"velocity",
"and",
"acceleration",
"as",
"specified",
"by",
"the",
"autopilot",
".",
"This",
"should",
"match",
"the",
"commands",
"sent",
"in",
"SET_POSITION_TARGET_LOCAL_NED",
"if",
"the",
"vehicle",
"is",
"being",
"controlled",
"this",
"way",
"."
] | train | https://github.com/JdeRobot/base/blob/303b18992785b2fe802212f2d758a60873007f1f/src/drivers/MAVLinkServer/MAVProxy/pymavlink/dialects/v10/matrixpilot.py#L10645-L10669 | 0.006116 |
openstack/horizon | openstack_auth/utils.py | _augment_url_with_version | def _augment_url_with_version(auth_url):
"""Optionally augment auth_url path with version suffix.
Check if path component already contains version suffix and if it does
not, append version suffix to the end of path, not erasing the previous
path contents, since keystone web endpoint (like /identity) could be
there. Keystone version needs to be added to endpoint because as of Kilo,
the identity URLs returned by Keystone might no longer contain API
versions, leaving the version choice up to the user.
"""
if has_in_url_path(auth_url, ["/v2.0", "/v3"]):
return auth_url
if get_keystone_version() >= 3:
return url_path_append(auth_url, "/v3")
else:
return url_path_append(auth_url, "/v2.0") | python | def _augment_url_with_version(auth_url):
"""Optionally augment auth_url path with version suffix.
Check if path component already contains version suffix and if it does
not, append version suffix to the end of path, not erasing the previous
path contents, since keystone web endpoint (like /identity) could be
there. Keystone version needs to be added to endpoint because as of Kilo,
the identity URLs returned by Keystone might no longer contain API
versions, leaving the version choice up to the user.
"""
if has_in_url_path(auth_url, ["/v2.0", "/v3"]):
return auth_url
if get_keystone_version() >= 3:
return url_path_append(auth_url, "/v3")
else:
return url_path_append(auth_url, "/v2.0") | [
"def",
"_augment_url_with_version",
"(",
"auth_url",
")",
":",
"if",
"has_in_url_path",
"(",
"auth_url",
",",
"[",
"\"/v2.0\"",
",",
"\"/v3\"",
"]",
")",
":",
"return",
"auth_url",
"if",
"get_keystone_version",
"(",
")",
">=",
"3",
":",
"return",
"url_path_append",
"(",
"auth_url",
",",
"\"/v3\"",
")",
"else",
":",
"return",
"url_path_append",
"(",
"auth_url",
",",
"\"/v2.0\"",
")"
] | Optionally augment auth_url path with version suffix.
Check if path component already contains version suffix and if it does
not, append version suffix to the end of path, not erasing the previous
path contents, since keystone web endpoint (like /identity) could be
there. Keystone version needs to be added to endpoint because as of Kilo,
the identity URLs returned by Keystone might no longer contain API
versions, leaving the version choice up to the user. | [
"Optionally",
"augment",
"auth_url",
"path",
"with",
"version",
"suffix",
"."
] | train | https://github.com/openstack/horizon/blob/5601ea9477323e599d9b766fcac1f8be742935b2/openstack_auth/utils.py#L287-L303 | 0.001314 |
gwastro/pycbc | pycbc/cosmology.py | z_at_value | def z_at_value(func, fval, unit, zmax=1000., **kwargs):
r"""Wrapper around astropy.cosmology.z_at_value to handle numpy arrays.
Getting a z for a cosmological quantity involves numerically inverting
``func``. The ``zmax`` argument sets how large of a z to guess (see
:py:func:`astropy.cosmology.z_at_value` for details). If a z is larger than
``zmax``, this will try a larger zmax up to ``zmax * 10**5``. If that still
is not large enough, will just return ``numpy.inf``.
Parameters
----------
func : function or method
A function that takes redshift as input.
fval : float
The value of ``func(z)``.
unit : astropy.unit
The unit of ``fval``.
zmax : float, optional
The initial maximum search limit for ``z``. Default is 1000.
\**kwargs :
All other keyword arguments are passed to
:py:func:``astropy.cosmology.z_at_value``.
Returns
-------
float
The redshift at the requested values.
"""
fval, input_is_array = ensurearray(fval)
# make sure fval is atleast 1D
if fval.size == 1 and fval.ndim == 0:
fval = fval.reshape(1)
zs = numpy.zeros(fval.shape, dtype=float) # the output array
for (ii, val) in enumerate(fval):
try:
zs[ii] = astropy.cosmology.z_at_value(func, val*unit, zmax=zmax,
**kwargs)
except CosmologyError:
# we'll get this if the z was larger than zmax; in that case we'll
# try bumping up zmax later to get a value
zs[ii] = numpy.inf
# check if there were any zs > zmax
replacemask = numpy.isinf(zs)
# try bumping up zmax to get a result
if replacemask.any():
# we'll keep bumping up the maxz until we can get a result
counter = 0 # to prevent running forever
while replacemask.any():
kwargs['zmin'] = zmax
zmax = 10 * zmax
idx = numpy.where(replacemask)
for ii in idx:
val = fval[ii]
try:
zs[ii] = astropy.cosmology.z_at_value(
func, val*unit, zmax=zmax, **kwargs)
replacemask[ii] = False
except CosmologyError:
# didn't work, try on next loop
pass
counter += 1
if counter == 5:
# give up and warn the user
logging.warning("One or more values correspond to a "
"redshift > {0:.1e}. The redshift for these "
"have been set to inf. If you would like "
"better precision, call God.".format(zmax))
break
return formatreturn(zs, input_is_array) | python | def z_at_value(func, fval, unit, zmax=1000., **kwargs):
r"""Wrapper around astropy.cosmology.z_at_value to handle numpy arrays.
Getting a z for a cosmological quantity involves numerically inverting
``func``. The ``zmax`` argument sets how large of a z to guess (see
:py:func:`astropy.cosmology.z_at_value` for details). If a z is larger than
``zmax``, this will try a larger zmax up to ``zmax * 10**5``. If that still
is not large enough, will just return ``numpy.inf``.
Parameters
----------
func : function or method
A function that takes redshift as input.
fval : float
The value of ``func(z)``.
unit : astropy.unit
The unit of ``fval``.
zmax : float, optional
The initial maximum search limit for ``z``. Default is 1000.
\**kwargs :
All other keyword arguments are passed to
:py:func:``astropy.cosmology.z_at_value``.
Returns
-------
float
The redshift at the requested values.
"""
fval, input_is_array = ensurearray(fval)
# make sure fval is atleast 1D
if fval.size == 1 and fval.ndim == 0:
fval = fval.reshape(1)
zs = numpy.zeros(fval.shape, dtype=float) # the output array
for (ii, val) in enumerate(fval):
try:
zs[ii] = astropy.cosmology.z_at_value(func, val*unit, zmax=zmax,
**kwargs)
except CosmologyError:
# we'll get this if the z was larger than zmax; in that case we'll
# try bumping up zmax later to get a value
zs[ii] = numpy.inf
# check if there were any zs > zmax
replacemask = numpy.isinf(zs)
# try bumping up zmax to get a result
if replacemask.any():
# we'll keep bumping up the maxz until we can get a result
counter = 0 # to prevent running forever
while replacemask.any():
kwargs['zmin'] = zmax
zmax = 10 * zmax
idx = numpy.where(replacemask)
for ii in idx:
val = fval[ii]
try:
zs[ii] = astropy.cosmology.z_at_value(
func, val*unit, zmax=zmax, **kwargs)
replacemask[ii] = False
except CosmologyError:
# didn't work, try on next loop
pass
counter += 1
if counter == 5:
# give up and warn the user
logging.warning("One or more values correspond to a "
"redshift > {0:.1e}. The redshift for these "
"have been set to inf. If you would like "
"better precision, call God.".format(zmax))
break
return formatreturn(zs, input_is_array) | [
"def",
"z_at_value",
"(",
"func",
",",
"fval",
",",
"unit",
",",
"zmax",
"=",
"1000.",
",",
"*",
"*",
"kwargs",
")",
":",
"fval",
",",
"input_is_array",
"=",
"ensurearray",
"(",
"fval",
")",
"# make sure fval is atleast 1D",
"if",
"fval",
".",
"size",
"==",
"1",
"and",
"fval",
".",
"ndim",
"==",
"0",
":",
"fval",
"=",
"fval",
".",
"reshape",
"(",
"1",
")",
"zs",
"=",
"numpy",
".",
"zeros",
"(",
"fval",
".",
"shape",
",",
"dtype",
"=",
"float",
")",
"# the output array",
"for",
"(",
"ii",
",",
"val",
")",
"in",
"enumerate",
"(",
"fval",
")",
":",
"try",
":",
"zs",
"[",
"ii",
"]",
"=",
"astropy",
".",
"cosmology",
".",
"z_at_value",
"(",
"func",
",",
"val",
"*",
"unit",
",",
"zmax",
"=",
"zmax",
",",
"*",
"*",
"kwargs",
")",
"except",
"CosmologyError",
":",
"# we'll get this if the z was larger than zmax; in that case we'll",
"# try bumping up zmax later to get a value",
"zs",
"[",
"ii",
"]",
"=",
"numpy",
".",
"inf",
"# check if there were any zs > zmax",
"replacemask",
"=",
"numpy",
".",
"isinf",
"(",
"zs",
")",
"# try bumping up zmax to get a result",
"if",
"replacemask",
".",
"any",
"(",
")",
":",
"# we'll keep bumping up the maxz until we can get a result",
"counter",
"=",
"0",
"# to prevent running forever",
"while",
"replacemask",
".",
"any",
"(",
")",
":",
"kwargs",
"[",
"'zmin'",
"]",
"=",
"zmax",
"zmax",
"=",
"10",
"*",
"zmax",
"idx",
"=",
"numpy",
".",
"where",
"(",
"replacemask",
")",
"for",
"ii",
"in",
"idx",
":",
"val",
"=",
"fval",
"[",
"ii",
"]",
"try",
":",
"zs",
"[",
"ii",
"]",
"=",
"astropy",
".",
"cosmology",
".",
"z_at_value",
"(",
"func",
",",
"val",
"*",
"unit",
",",
"zmax",
"=",
"zmax",
",",
"*",
"*",
"kwargs",
")",
"replacemask",
"[",
"ii",
"]",
"=",
"False",
"except",
"CosmologyError",
":",
"# didn't work, try on next loop",
"pass",
"counter",
"+=",
"1",
"if",
"counter",
"==",
"5",
":",
"# give up and warn the user",
"logging",
".",
"warning",
"(",
"\"One or more values correspond to a \"",
"\"redshift > {0:.1e}. The redshift for these \"",
"\"have been set to inf. If you would like \"",
"\"better precision, call God.\"",
".",
"format",
"(",
"zmax",
")",
")",
"break",
"return",
"formatreturn",
"(",
"zs",
",",
"input_is_array",
")"
] | r"""Wrapper around astropy.cosmology.z_at_value to handle numpy arrays.
Getting a z for a cosmological quantity involves numerically inverting
``func``. The ``zmax`` argument sets how large of a z to guess (see
:py:func:`astropy.cosmology.z_at_value` for details). If a z is larger than
``zmax``, this will try a larger zmax up to ``zmax * 10**5``. If that still
is not large enough, will just return ``numpy.inf``.
Parameters
----------
func : function or method
A function that takes redshift as input.
fval : float
The value of ``func(z)``.
unit : astropy.unit
The unit of ``fval``.
zmax : float, optional
The initial maximum search limit for ``z``. Default is 1000.
\**kwargs :
All other keyword arguments are passed to
:py:func:``astropy.cosmology.z_at_value``.
Returns
-------
float
The redshift at the requested values. | [
"r",
"Wrapper",
"around",
"astropy",
".",
"cosmology",
".",
"z_at_value",
"to",
"handle",
"numpy",
"arrays",
"."
] | train | https://github.com/gwastro/pycbc/blob/7a64cdd104d263f1b6ea0b01e6841837d05a4cb3/pycbc/cosmology.py#L105-L173 | 0.000353 |
seequent/properties | properties/basic.py | GettableProperty.equal | def equal(self, value_a, value_b): #pylint: disable=no-self-use
"""Check if two valid Property values are equal
.. note::
This method assumes that :code:`None` and
:code:`properties.undefined` are never passed in as values
"""
equal = value_a == value_b
if hasattr(equal, '__iter__'):
return all(equal)
return equal | python | def equal(self, value_a, value_b): #pylint: disable=no-self-use
"""Check if two valid Property values are equal
.. note::
This method assumes that :code:`None` and
:code:`properties.undefined` are never passed in as values
"""
equal = value_a == value_b
if hasattr(equal, '__iter__'):
return all(equal)
return equal | [
"def",
"equal",
"(",
"self",
",",
"value_a",
",",
"value_b",
")",
":",
"#pylint: disable=no-self-use",
"equal",
"=",
"value_a",
"==",
"value_b",
"if",
"hasattr",
"(",
"equal",
",",
"'__iter__'",
")",
":",
"return",
"all",
"(",
"equal",
")",
"return",
"equal"
] | Check if two valid Property values are equal
.. note::
This method assumes that :code:`None` and
:code:`properties.undefined` are never passed in as values | [
"Check",
"if",
"two",
"valid",
"Property",
"values",
"are",
"equal"
] | train | https://github.com/seequent/properties/blob/096b07012fff86b0a880c8c018320c3b512751b9/properties/basic.py#L264-L275 | 0.00907 |
mfussenegger/cr8 | cr8/clients.py | _plain_or_callable | def _plain_or_callable(obj):
"""Returns the value of the called object of obj is a callable,
otherwise the plain object.
Returns None if obj is None.
>>> obj = None
>>> _plain_or_callable(obj)
>>> stmt = 'select * from sys.nodes'
>>> _plain_or_callable(stmt)
'select * from sys.nodes'
>>> def _args():
... return [1, 'name']
>>> _plain_or_callable(_args)
[1, 'name']
>>> _plain_or_callable((x for x in range(10)))
0
>>> class BulkArgsGenerator:
... def __call__(self):
... return [[1, 'foo'], [2, 'bar'], [3, 'foobar']]
>>> _plain_or_callable(BulkArgsGenerator())
[[1, 'foo'], [2, 'bar'], [3, 'foobar']]
"""
if callable(obj):
return obj()
elif isinstance(obj, types.GeneratorType):
return next(obj)
else:
return obj | python | def _plain_or_callable(obj):
"""Returns the value of the called object of obj is a callable,
otherwise the plain object.
Returns None if obj is None.
>>> obj = None
>>> _plain_or_callable(obj)
>>> stmt = 'select * from sys.nodes'
>>> _plain_or_callable(stmt)
'select * from sys.nodes'
>>> def _args():
... return [1, 'name']
>>> _plain_or_callable(_args)
[1, 'name']
>>> _plain_or_callable((x for x in range(10)))
0
>>> class BulkArgsGenerator:
... def __call__(self):
... return [[1, 'foo'], [2, 'bar'], [3, 'foobar']]
>>> _plain_or_callable(BulkArgsGenerator())
[[1, 'foo'], [2, 'bar'], [3, 'foobar']]
"""
if callable(obj):
return obj()
elif isinstance(obj, types.GeneratorType):
return next(obj)
else:
return obj | [
"def",
"_plain_or_callable",
"(",
"obj",
")",
":",
"if",
"callable",
"(",
"obj",
")",
":",
"return",
"obj",
"(",
")",
"elif",
"isinstance",
"(",
"obj",
",",
"types",
".",
"GeneratorType",
")",
":",
"return",
"next",
"(",
"obj",
")",
"else",
":",
"return",
"obj"
] | Returns the value of the called object of obj is a callable,
otherwise the plain object.
Returns None if obj is None.
>>> obj = None
>>> _plain_or_callable(obj)
>>> stmt = 'select * from sys.nodes'
>>> _plain_or_callable(stmt)
'select * from sys.nodes'
>>> def _args():
... return [1, 'name']
>>> _plain_or_callable(_args)
[1, 'name']
>>> _plain_or_callable((x for x in range(10)))
0
>>> class BulkArgsGenerator:
... def __call__(self):
... return [[1, 'foo'], [2, 'bar'], [3, 'foobar']]
>>> _plain_or_callable(BulkArgsGenerator())
[[1, 'foo'], [2, 'bar'], [3, 'foobar']] | [
"Returns",
"the",
"value",
"of",
"the",
"called",
"object",
"of",
"obj",
"is",
"a",
"callable",
"otherwise",
"the",
"plain",
"object",
".",
"Returns",
"None",
"if",
"obj",
"is",
"None",
"."
] | train | https://github.com/mfussenegger/cr8/blob/a37d6049f1f9fee2d0556efae2b7b7f8761bffe8/cr8/clients.py#L97-L128 | 0.001174 |
aws/aws-iot-device-sdk-python | AWSIoTPythonSDK/core/protocol/paho/client.py | Client.loop_stop | def loop_stop(self, force=False):
"""This is part of the threaded client interface. Call this once to
stop the network thread previously created with loop_start(). This call
will block until the network thread finishes.
The force parameter is currently ignored.
"""
if self._thread is None:
return MQTT_ERR_INVAL
self._thread_terminate = True
self._thread.join()
self._thread = None | python | def loop_stop(self, force=False):
"""This is part of the threaded client interface. Call this once to
stop the network thread previously created with loop_start(). This call
will block until the network thread finishes.
The force parameter is currently ignored.
"""
if self._thread is None:
return MQTT_ERR_INVAL
self._thread_terminate = True
self._thread.join()
self._thread = None | [
"def",
"loop_stop",
"(",
"self",
",",
"force",
"=",
"False",
")",
":",
"if",
"self",
".",
"_thread",
"is",
"None",
":",
"return",
"MQTT_ERR_INVAL",
"self",
".",
"_thread_terminate",
"=",
"True",
"self",
".",
"_thread",
".",
"join",
"(",
")",
"self",
".",
"_thread",
"=",
"None"
] | This is part of the threaded client interface. Call this once to
stop the network thread previously created with loop_start(). This call
will block until the network thread finishes.
The force parameter is currently ignored. | [
"This",
"is",
"part",
"of",
"the",
"threaded",
"client",
"interface",
".",
"Call",
"this",
"once",
"to",
"stop",
"the",
"network",
"thread",
"previously",
"created",
"with",
"loop_start",
"()",
".",
"This",
"call",
"will",
"block",
"until",
"the",
"network",
"thread",
"finishes",
"."
] | train | https://github.com/aws/aws-iot-device-sdk-python/blob/f0aa2ce34b21dd2e44f4fb7e1d058656aaf2fc62/AWSIoTPythonSDK/core/protocol/paho/client.py#L1399-L1411 | 0.004274 |
jupyterhub/kubespawner | kubespawner/spawner.py | KubeSpawner.poll | def poll(self):
"""
Check if the pod is still running.
Uses the same interface as subprocess.Popen.poll(): if the pod is
still running, returns None. If the pod has exited, return the
exit code if we can determine it, or 1 if it has exited but we
don't know how. These are the return values JupyterHub expects.
Note that a clean exit will have an exit code of zero, so it is
necessary to check that the returned value is None, rather than
just Falsy, to determine that the pod is still running.
"""
# have to wait for first load of data before we have a valid answer
if not self.pod_reflector.first_load_future.done():
yield self.pod_reflector.first_load_future
data = self.pod_reflector.pods.get(self.pod_name, None)
if data is not None:
if data.status.phase == 'Pending':
return None
ctr_stat = data.status.container_statuses
if ctr_stat is None: # No status, no container (we hope)
# This seems to happen when a pod is idle-culled.
return 1
for c in ctr_stat:
# return exit code if notebook container has terminated
if c.name == 'notebook':
if c.state.terminated:
# call self.stop to delete the pod
if self.delete_stopped_pods:
yield self.stop(now=True)
return c.state.terminated.exit_code
break
# None means pod is running or starting up
return None
# pod doesn't exist or has been deleted
return 1 | python | def poll(self):
"""
Check if the pod is still running.
Uses the same interface as subprocess.Popen.poll(): if the pod is
still running, returns None. If the pod has exited, return the
exit code if we can determine it, or 1 if it has exited but we
don't know how. These are the return values JupyterHub expects.
Note that a clean exit will have an exit code of zero, so it is
necessary to check that the returned value is None, rather than
just Falsy, to determine that the pod is still running.
"""
# have to wait for first load of data before we have a valid answer
if not self.pod_reflector.first_load_future.done():
yield self.pod_reflector.first_load_future
data = self.pod_reflector.pods.get(self.pod_name, None)
if data is not None:
if data.status.phase == 'Pending':
return None
ctr_stat = data.status.container_statuses
if ctr_stat is None: # No status, no container (we hope)
# This seems to happen when a pod is idle-culled.
return 1
for c in ctr_stat:
# return exit code if notebook container has terminated
if c.name == 'notebook':
if c.state.terminated:
# call self.stop to delete the pod
if self.delete_stopped_pods:
yield self.stop(now=True)
return c.state.terminated.exit_code
break
# None means pod is running or starting up
return None
# pod doesn't exist or has been deleted
return 1 | [
"def",
"poll",
"(",
"self",
")",
":",
"# have to wait for first load of data before we have a valid answer",
"if",
"not",
"self",
".",
"pod_reflector",
".",
"first_load_future",
".",
"done",
"(",
")",
":",
"yield",
"self",
".",
"pod_reflector",
".",
"first_load_future",
"data",
"=",
"self",
".",
"pod_reflector",
".",
"pods",
".",
"get",
"(",
"self",
".",
"pod_name",
",",
"None",
")",
"if",
"data",
"is",
"not",
"None",
":",
"if",
"data",
".",
"status",
".",
"phase",
"==",
"'Pending'",
":",
"return",
"None",
"ctr_stat",
"=",
"data",
".",
"status",
".",
"container_statuses",
"if",
"ctr_stat",
"is",
"None",
":",
"# No status, no container (we hope)",
"# This seems to happen when a pod is idle-culled.",
"return",
"1",
"for",
"c",
"in",
"ctr_stat",
":",
"# return exit code if notebook container has terminated",
"if",
"c",
".",
"name",
"==",
"'notebook'",
":",
"if",
"c",
".",
"state",
".",
"terminated",
":",
"# call self.stop to delete the pod",
"if",
"self",
".",
"delete_stopped_pods",
":",
"yield",
"self",
".",
"stop",
"(",
"now",
"=",
"True",
")",
"return",
"c",
".",
"state",
".",
"terminated",
".",
"exit_code",
"break",
"# None means pod is running or starting up",
"return",
"None",
"# pod doesn't exist or has been deleted",
"return",
"1"
] | Check if the pod is still running.
Uses the same interface as subprocess.Popen.poll(): if the pod is
still running, returns None. If the pod has exited, return the
exit code if we can determine it, or 1 if it has exited but we
don't know how. These are the return values JupyterHub expects.
Note that a clean exit will have an exit code of zero, so it is
necessary to check that the returned value is None, rather than
just Falsy, to determine that the pod is still running. | [
"Check",
"if",
"the",
"pod",
"is",
"still",
"running",
"."
] | train | https://github.com/jupyterhub/kubespawner/blob/46a4b109c5e657a4c3d5bfa8ea4731ec6564ea13/kubespawner/spawner.py#L1456-L1492 | 0.00115 |
jxtech/wechatpy | wechatpy/pay/api/order.py | WeChatOrder.close | def close(self, out_trade_no):
"""
关闭订单
:param out_trade_no: 商户系统内部的订单号
:return: 返回的结果数据
"""
data = {
'appid': self.appid,
'out_trade_no': out_trade_no,
}
return self._post('pay/closeorder', data=data) | python | def close(self, out_trade_no):
"""
关闭订单
:param out_trade_no: 商户系统内部的订单号
:return: 返回的结果数据
"""
data = {
'appid': self.appid,
'out_trade_no': out_trade_no,
}
return self._post('pay/closeorder', data=data) | [
"def",
"close",
"(",
"self",
",",
"out_trade_no",
")",
":",
"data",
"=",
"{",
"'appid'",
":",
"self",
".",
"appid",
",",
"'out_trade_no'",
":",
"out_trade_no",
",",
"}",
"return",
"self",
".",
"_post",
"(",
"'pay/closeorder'",
",",
"data",
"=",
"data",
")"
] | 关闭订单
:param out_trade_no: 商户系统内部的订单号
:return: 返回的结果数据 | [
"关闭订单"
] | train | https://github.com/jxtech/wechatpy/blob/4df0da795618c0895a10f1c2cde9e9d5c0a93aaa/wechatpy/pay/api/order.py#L97-L108 | 0.006897 |
jgilchrist/pybib | pybib/formatters.py | color_parts | def color_parts(parts):
"""Adds colors to each part of the citation"""
return parts._replace(
title=Fore.GREEN + parts.title + Style.RESET_ALL,
doi=Fore.CYAN + parts.doi + Style.RESET_ALL
) | python | def color_parts(parts):
"""Adds colors to each part of the citation"""
return parts._replace(
title=Fore.GREEN + parts.title + Style.RESET_ALL,
doi=Fore.CYAN + parts.doi + Style.RESET_ALL
) | [
"def",
"color_parts",
"(",
"parts",
")",
":",
"return",
"parts",
".",
"_replace",
"(",
"title",
"=",
"Fore",
".",
"GREEN",
"+",
"parts",
".",
"title",
"+",
"Style",
".",
"RESET_ALL",
",",
"doi",
"=",
"Fore",
".",
"CYAN",
"+",
"parts",
".",
"doi",
"+",
"Style",
".",
"RESET_ALL",
")"
] | Adds colors to each part of the citation | [
"Adds",
"colors",
"to",
"each",
"part",
"of",
"the",
"citation"
] | train | https://github.com/jgilchrist/pybib/blob/da2130d281bb02e930728ed7c1d0c1dffa747ee0/pybib/formatters.py#L22-L27 | 0.004608 |
zhmcclient/python-zhmcclient | zhmcclient_mock/_urihandler.py | CpcStopHandler.post | def post(method, hmc, uri, uri_parms, body, logon_required,
wait_for_completion):
"""Operation: Stop CPC (requires DPM mode)."""
assert wait_for_completion is True # async not supported yet
cpc_oid = uri_parms[0]
try:
cpc = hmc.cpcs.lookup_by_oid(cpc_oid)
except KeyError:
raise InvalidResourceError(method, uri)
if not cpc.dpm_enabled:
raise CpcNotInDpmError(method, uri, cpc)
cpc.properties['status'] = 'not-operating' | python | def post(method, hmc, uri, uri_parms, body, logon_required,
wait_for_completion):
"""Operation: Stop CPC (requires DPM mode)."""
assert wait_for_completion is True # async not supported yet
cpc_oid = uri_parms[0]
try:
cpc = hmc.cpcs.lookup_by_oid(cpc_oid)
except KeyError:
raise InvalidResourceError(method, uri)
if not cpc.dpm_enabled:
raise CpcNotInDpmError(method, uri, cpc)
cpc.properties['status'] = 'not-operating' | [
"def",
"post",
"(",
"method",
",",
"hmc",
",",
"uri",
",",
"uri_parms",
",",
"body",
",",
"logon_required",
",",
"wait_for_completion",
")",
":",
"assert",
"wait_for_completion",
"is",
"True",
"# async not supported yet",
"cpc_oid",
"=",
"uri_parms",
"[",
"0",
"]",
"try",
":",
"cpc",
"=",
"hmc",
".",
"cpcs",
".",
"lookup_by_oid",
"(",
"cpc_oid",
")",
"except",
"KeyError",
":",
"raise",
"InvalidResourceError",
"(",
"method",
",",
"uri",
")",
"if",
"not",
"cpc",
".",
"dpm_enabled",
":",
"raise",
"CpcNotInDpmError",
"(",
"method",
",",
"uri",
",",
"cpc",
")",
"cpc",
".",
"properties",
"[",
"'status'",
"]",
"=",
"'not-operating'"
] | Operation: Stop CPC (requires DPM mode). | [
"Operation",
":",
"Stop",
"CPC",
"(",
"requires",
"DPM",
"mode",
")",
"."
] | train | https://github.com/zhmcclient/python-zhmcclient/blob/9657563e5d9184c51d3c903442a58b9725fdf335/zhmcclient_mock/_urihandler.py#L1110-L1121 | 0.005703 |
iamjarret/pystockfish | pystockfish.py | Match.move | def move(self):
"""
Advance game by single move, if possible.
@return: logical indicator if move was performed.
"""
if len(self.moves) == MAX_MOVES:
return False
elif len(self.moves) % 2:
active_engine = self.black_engine
active_engine_name = self.black
inactive_engine = self.white_engine
inactive_engine_name = self.white
else:
active_engine = self.white_engine
active_engine_name = self.white
inactive_engine = self.black_engine
inactive_engine_name = self.black
active_engine.setposition(self.moves)
movedict = active_engine.bestmove()
bestmove = movedict.get('move')
info = movedict.get('info')
ponder = movedict.get('ponder')
self.moves.append(bestmove)
if info["score"]["eval"] == "mate":
matenum = info["score"]["value"]
if matenum > 0:
self.winner_engine = active_engine
self.winner = active_engine_name
elif matenum < 0:
self.winner_engine = inactive_engine
self.winner = inactive_engine_name
return False
if ponder != '(none)':
return True | python | def move(self):
"""
Advance game by single move, if possible.
@return: logical indicator if move was performed.
"""
if len(self.moves) == MAX_MOVES:
return False
elif len(self.moves) % 2:
active_engine = self.black_engine
active_engine_name = self.black
inactive_engine = self.white_engine
inactive_engine_name = self.white
else:
active_engine = self.white_engine
active_engine_name = self.white
inactive_engine = self.black_engine
inactive_engine_name = self.black
active_engine.setposition(self.moves)
movedict = active_engine.bestmove()
bestmove = movedict.get('move')
info = movedict.get('info')
ponder = movedict.get('ponder')
self.moves.append(bestmove)
if info["score"]["eval"] == "mate":
matenum = info["score"]["value"]
if matenum > 0:
self.winner_engine = active_engine
self.winner = active_engine_name
elif matenum < 0:
self.winner_engine = inactive_engine
self.winner = inactive_engine_name
return False
if ponder != '(none)':
return True | [
"def",
"move",
"(",
"self",
")",
":",
"if",
"len",
"(",
"self",
".",
"moves",
")",
"==",
"MAX_MOVES",
":",
"return",
"False",
"elif",
"len",
"(",
"self",
".",
"moves",
")",
"%",
"2",
":",
"active_engine",
"=",
"self",
".",
"black_engine",
"active_engine_name",
"=",
"self",
".",
"black",
"inactive_engine",
"=",
"self",
".",
"white_engine",
"inactive_engine_name",
"=",
"self",
".",
"white",
"else",
":",
"active_engine",
"=",
"self",
".",
"white_engine",
"active_engine_name",
"=",
"self",
".",
"white",
"inactive_engine",
"=",
"self",
".",
"black_engine",
"inactive_engine_name",
"=",
"self",
".",
"black",
"active_engine",
".",
"setposition",
"(",
"self",
".",
"moves",
")",
"movedict",
"=",
"active_engine",
".",
"bestmove",
"(",
")",
"bestmove",
"=",
"movedict",
".",
"get",
"(",
"'move'",
")",
"info",
"=",
"movedict",
".",
"get",
"(",
"'info'",
")",
"ponder",
"=",
"movedict",
".",
"get",
"(",
"'ponder'",
")",
"self",
".",
"moves",
".",
"append",
"(",
"bestmove",
")",
"if",
"info",
"[",
"\"score\"",
"]",
"[",
"\"eval\"",
"]",
"==",
"\"mate\"",
":",
"matenum",
"=",
"info",
"[",
"\"score\"",
"]",
"[",
"\"value\"",
"]",
"if",
"matenum",
">",
"0",
":",
"self",
".",
"winner_engine",
"=",
"active_engine",
"self",
".",
"winner",
"=",
"active_engine_name",
"elif",
"matenum",
"<",
"0",
":",
"self",
".",
"winner_engine",
"=",
"inactive_engine",
"self",
".",
"winner",
"=",
"inactive_engine_name",
"return",
"False",
"if",
"ponder",
"!=",
"'(none)'",
":",
"return",
"True"
] | Advance game by single move, if possible.
@return: logical indicator if move was performed. | [
"Advance",
"game",
"by",
"single",
"move",
"if",
"possible",
"."
] | train | https://github.com/iamjarret/pystockfish/blob/ae34a4b4d29c577c888b72691fcf0cb5a89b1792/pystockfish.py#L54-L90 | 0.001533 |
hyperledger/indy-plenum | plenum/server/view_change/view_changer.py | ViewChanger.process_instance_change_msg | def process_instance_change_msg(self, instChg: InstanceChange, frm: str) -> None:
"""
Validate and process an instance change request.
:param instChg: the instance change request
:param frm: the name of the node that sent this `msg`
"""
if frm not in self.provider.connected_nodes():
self.provider.discard(
instChg,
"received instance change request: {} from {} "
"which is not in connected list: {}".format(
instChg, frm, self.provider.connected_nodes()), logger.info)
return
logger.info("{} received instance change request: {} from {}".format(self, instChg, frm))
# TODO: add sender to blacklist?
if not isinstance(instChg.viewNo, int):
self.provider.discard(
instChg, "{}field view_no has incorrect type: {}".format(
VIEW_CHANGE_PREFIX, type(instChg.viewNo)))
elif instChg.viewNo <= self.view_no:
self.provider.discard(
instChg,
"Received instance change request with view no {} "
"which is not more than its view no {}".format(
instChg.viewNo, self.view_no), logger.info)
else:
# Record instance changes for views but send instance change
# only when found master to be degraded. if quorum of view changes
# found then change view even if master not degraded
self._on_verified_instance_change_msg(instChg, frm)
if self.instance_changes.has_inst_chng_from(instChg.viewNo, self.name):
logger.info("{} received instance change message {} but has already "
"sent an instance change message".format(self, instChg))
elif not self.provider.is_master_degraded():
logger.info("{} received instance change message {} but did not "
"find the master to be slow".format(self, instChg))
else:
logger.display("{}{} found master degraded after receiving instance change"
" message from {}".format(VIEW_CHANGE_PREFIX, self, frm))
self.sendInstanceChange(instChg.viewNo) | python | def process_instance_change_msg(self, instChg: InstanceChange, frm: str) -> None:
"""
Validate and process an instance change request.
:param instChg: the instance change request
:param frm: the name of the node that sent this `msg`
"""
if frm not in self.provider.connected_nodes():
self.provider.discard(
instChg,
"received instance change request: {} from {} "
"which is not in connected list: {}".format(
instChg, frm, self.provider.connected_nodes()), logger.info)
return
logger.info("{} received instance change request: {} from {}".format(self, instChg, frm))
# TODO: add sender to blacklist?
if not isinstance(instChg.viewNo, int):
self.provider.discard(
instChg, "{}field view_no has incorrect type: {}".format(
VIEW_CHANGE_PREFIX, type(instChg.viewNo)))
elif instChg.viewNo <= self.view_no:
self.provider.discard(
instChg,
"Received instance change request with view no {} "
"which is not more than its view no {}".format(
instChg.viewNo, self.view_no), logger.info)
else:
# Record instance changes for views but send instance change
# only when found master to be degraded. if quorum of view changes
# found then change view even if master not degraded
self._on_verified_instance_change_msg(instChg, frm)
if self.instance_changes.has_inst_chng_from(instChg.viewNo, self.name):
logger.info("{} received instance change message {} but has already "
"sent an instance change message".format(self, instChg))
elif not self.provider.is_master_degraded():
logger.info("{} received instance change message {} but did not "
"find the master to be slow".format(self, instChg))
else:
logger.display("{}{} found master degraded after receiving instance change"
" message from {}".format(VIEW_CHANGE_PREFIX, self, frm))
self.sendInstanceChange(instChg.viewNo) | [
"def",
"process_instance_change_msg",
"(",
"self",
",",
"instChg",
":",
"InstanceChange",
",",
"frm",
":",
"str",
")",
"->",
"None",
":",
"if",
"frm",
"not",
"in",
"self",
".",
"provider",
".",
"connected_nodes",
"(",
")",
":",
"self",
".",
"provider",
".",
"discard",
"(",
"instChg",
",",
"\"received instance change request: {} from {} \"",
"\"which is not in connected list: {}\"",
".",
"format",
"(",
"instChg",
",",
"frm",
",",
"self",
".",
"provider",
".",
"connected_nodes",
"(",
")",
")",
",",
"logger",
".",
"info",
")",
"return",
"logger",
".",
"info",
"(",
"\"{} received instance change request: {} from {}\"",
".",
"format",
"(",
"self",
",",
"instChg",
",",
"frm",
")",
")",
"# TODO: add sender to blacklist?",
"if",
"not",
"isinstance",
"(",
"instChg",
".",
"viewNo",
",",
"int",
")",
":",
"self",
".",
"provider",
".",
"discard",
"(",
"instChg",
",",
"\"{}field view_no has incorrect type: {}\"",
".",
"format",
"(",
"VIEW_CHANGE_PREFIX",
",",
"type",
"(",
"instChg",
".",
"viewNo",
")",
")",
")",
"elif",
"instChg",
".",
"viewNo",
"<=",
"self",
".",
"view_no",
":",
"self",
".",
"provider",
".",
"discard",
"(",
"instChg",
",",
"\"Received instance change request with view no {} \"",
"\"which is not more than its view no {}\"",
".",
"format",
"(",
"instChg",
".",
"viewNo",
",",
"self",
".",
"view_no",
")",
",",
"logger",
".",
"info",
")",
"else",
":",
"# Record instance changes for views but send instance change",
"# only when found master to be degraded. if quorum of view changes",
"# found then change view even if master not degraded",
"self",
".",
"_on_verified_instance_change_msg",
"(",
"instChg",
",",
"frm",
")",
"if",
"self",
".",
"instance_changes",
".",
"has_inst_chng_from",
"(",
"instChg",
".",
"viewNo",
",",
"self",
".",
"name",
")",
":",
"logger",
".",
"info",
"(",
"\"{} received instance change message {} but has already \"",
"\"sent an instance change message\"",
".",
"format",
"(",
"self",
",",
"instChg",
")",
")",
"elif",
"not",
"self",
".",
"provider",
".",
"is_master_degraded",
"(",
")",
":",
"logger",
".",
"info",
"(",
"\"{} received instance change message {} but did not \"",
"\"find the master to be slow\"",
".",
"format",
"(",
"self",
",",
"instChg",
")",
")",
"else",
":",
"logger",
".",
"display",
"(",
"\"{}{} found master degraded after receiving instance change\"",
"\" message from {}\"",
".",
"format",
"(",
"VIEW_CHANGE_PREFIX",
",",
"self",
",",
"frm",
")",
")",
"self",
".",
"sendInstanceChange",
"(",
"instChg",
".",
"viewNo",
")"
] | Validate and process an instance change request.
:param instChg: the instance change request
:param frm: the name of the node that sent this `msg` | [
"Validate",
"and",
"process",
"an",
"instance",
"change",
"request",
"."
] | train | https://github.com/hyperledger/indy-plenum/blob/dcd144e238af7f17a869ffc9412f13dc488b7020/plenum/server/view_change/view_changer.py#L373-L416 | 0.004772 |
UCL-INGI/INGInious | inginious/frontend/template_helper.py | TemplateHelper._generic_hook | def _generic_hook(self, name, **kwargs):
""" A generic hook that links the TemplateHelper with PluginManager """
entries = [entry for entry in self._plugin_manager.call_hook(name, **kwargs) if entry is not None]
return "\n".join(entries) | python | def _generic_hook(self, name, **kwargs):
""" A generic hook that links the TemplateHelper with PluginManager """
entries = [entry for entry in self._plugin_manager.call_hook(name, **kwargs) if entry is not None]
return "\n".join(entries) | [
"def",
"_generic_hook",
"(",
"self",
",",
"name",
",",
"*",
"*",
"kwargs",
")",
":",
"entries",
"=",
"[",
"entry",
"for",
"entry",
"in",
"self",
".",
"_plugin_manager",
".",
"call_hook",
"(",
"name",
",",
"*",
"*",
"kwargs",
")",
"if",
"entry",
"is",
"not",
"None",
"]",
"return",
"\"\\n\"",
".",
"join",
"(",
"entries",
")"
] | A generic hook that links the TemplateHelper with PluginManager | [
"A",
"generic",
"hook",
"that",
"links",
"the",
"TemplateHelper",
"with",
"PluginManager"
] | train | https://github.com/UCL-INGI/INGInious/blob/cbda9a9c7f2b8e8eb1e6d7d51f0d18092086300c/inginious/frontend/template_helper.py#L148-L151 | 0.011494 |
quantmind/pulsar | pulsar/utils/importer.py | expand_star | def expand_star(mod_name):
"""Expand something like 'unuk.tasks.*' into a list of all the modules
there.
"""
expanded = []
mod_dir = os.path.dirname(
__import__(mod_name[:-2], {}, {}, ['']).__file__)
for f in glob.glob1(mod_dir, "[!_]*.py"):
expanded.append('%s.%s' % (mod_name[:-2], f[:-3]))
return expanded | python | def expand_star(mod_name):
"""Expand something like 'unuk.tasks.*' into a list of all the modules
there.
"""
expanded = []
mod_dir = os.path.dirname(
__import__(mod_name[:-2], {}, {}, ['']).__file__)
for f in glob.glob1(mod_dir, "[!_]*.py"):
expanded.append('%s.%s' % (mod_name[:-2], f[:-3]))
return expanded | [
"def",
"expand_star",
"(",
"mod_name",
")",
":",
"expanded",
"=",
"[",
"]",
"mod_dir",
"=",
"os",
".",
"path",
".",
"dirname",
"(",
"__import__",
"(",
"mod_name",
"[",
":",
"-",
"2",
"]",
",",
"{",
"}",
",",
"{",
"}",
",",
"[",
"''",
"]",
")",
".",
"__file__",
")",
"for",
"f",
"in",
"glob",
".",
"glob1",
"(",
"mod_dir",
",",
"\"[!_]*.py\"",
")",
":",
"expanded",
".",
"append",
"(",
"'%s.%s'",
"%",
"(",
"mod_name",
"[",
":",
"-",
"2",
"]",
",",
"f",
"[",
":",
"-",
"3",
"]",
")",
")",
"return",
"expanded"
] | Expand something like 'unuk.tasks.*' into a list of all the modules
there. | [
"Expand",
"something",
"like",
"unuk",
".",
"tasks",
".",
"*",
"into",
"a",
"list",
"of",
"all",
"the",
"modules",
"there",
"."
] | train | https://github.com/quantmind/pulsar/blob/fee44e871954aa6ca36d00bb5a3739abfdb89b26/pulsar/utils/importer.py#L20-L29 | 0.002841 |
facelessuser/wcmatch | wcmatch/glob.py | Glob._get_starting_paths | def _get_starting_paths(self, curdir):
"""
Get the starting location.
For case sensitive paths, we have to "glob" for
it first as Python doesn't like for its users to
think about case. By scanning for it, we can get
the actual casing and then compare.
"""
results = [curdir]
if not self._is_parent(curdir) and not self._is_this(curdir):
fullpath = os.path.abspath(curdir)
basename = os.path.basename(fullpath)
dirname = os.path.dirname(fullpath)
if basename:
matcher = self._get_matcher(basename)
results = [os.path.basename(name) for name in self._glob_dir(dirname, matcher, self)]
return results | python | def _get_starting_paths(self, curdir):
"""
Get the starting location.
For case sensitive paths, we have to "glob" for
it first as Python doesn't like for its users to
think about case. By scanning for it, we can get
the actual casing and then compare.
"""
results = [curdir]
if not self._is_parent(curdir) and not self._is_this(curdir):
fullpath = os.path.abspath(curdir)
basename = os.path.basename(fullpath)
dirname = os.path.dirname(fullpath)
if basename:
matcher = self._get_matcher(basename)
results = [os.path.basename(name) for name in self._glob_dir(dirname, matcher, self)]
return results | [
"def",
"_get_starting_paths",
"(",
"self",
",",
"curdir",
")",
":",
"results",
"=",
"[",
"curdir",
"]",
"if",
"not",
"self",
".",
"_is_parent",
"(",
"curdir",
")",
"and",
"not",
"self",
".",
"_is_this",
"(",
"curdir",
")",
":",
"fullpath",
"=",
"os",
".",
"path",
".",
"abspath",
"(",
"curdir",
")",
"basename",
"=",
"os",
".",
"path",
".",
"basename",
"(",
"fullpath",
")",
"dirname",
"=",
"os",
".",
"path",
".",
"dirname",
"(",
"fullpath",
")",
"if",
"basename",
":",
"matcher",
"=",
"self",
".",
"_get_matcher",
"(",
"basename",
")",
"results",
"=",
"[",
"os",
".",
"path",
".",
"basename",
"(",
"name",
")",
"for",
"name",
"in",
"self",
".",
"_glob_dir",
"(",
"dirname",
",",
"matcher",
",",
"self",
")",
"]",
"return",
"results"
] | Get the starting location.
For case sensitive paths, we have to "glob" for
it first as Python doesn't like for its users to
think about case. By scanning for it, we can get
the actual casing and then compare. | [
"Get",
"the",
"starting",
"location",
"."
] | train | https://github.com/facelessuser/wcmatch/blob/d153e7007cc73b994ae1ba553dc4584039f5c212/wcmatch/glob.py#L317-L337 | 0.003942 |
tensorflow/mesh | mesh_tensorflow/placement_mesh_impl.py | PlacementMeshImpl.receive | def receive(self, x, mesh_axis, source_pcoord):
"""Collective receive in groups.
Each group contains the processors that differ only in mesh_axis.
```python
group_size = self.shape[mesh_axis].size
```
Args:
x: a LaidOutTensor
mesh_axis: an integer
source_pcoord: a list of optional integers. Each element is either None
or an integer in [0, group_size). If source_pcoord[k] is None, then the
output for the k-th processor in each group is a zero tensor. If
source_pcoord[k] is not None, then the output for the k-th processor in
each group is equal to the input for the source_pcoord[k]-th processor
in that group.
Returns:
a LaidOutTensor
"""
x = x.to_laid_out_tensor()
shape = x.tensor_list[0].shape
dtype = x.tensor_list[0].dtype
def _collective_receive(tensor_list, device_list):
ret = []
for pcoord, device in enumerate(device_list):
with tf.device(device):
if source_pcoord[pcoord] is None:
ret.append(tf.zeros(shape, dtype))
else:
ret.append(tf.identity(tensor_list[source_pcoord[pcoord]]))
return ret
return self._collective_with_groups(
x, [mesh_axis], _collective_receive) | python | def receive(self, x, mesh_axis, source_pcoord):
"""Collective receive in groups.
Each group contains the processors that differ only in mesh_axis.
```python
group_size = self.shape[mesh_axis].size
```
Args:
x: a LaidOutTensor
mesh_axis: an integer
source_pcoord: a list of optional integers. Each element is either None
or an integer in [0, group_size). If source_pcoord[k] is None, then the
output for the k-th processor in each group is a zero tensor. If
source_pcoord[k] is not None, then the output for the k-th processor in
each group is equal to the input for the source_pcoord[k]-th processor
in that group.
Returns:
a LaidOutTensor
"""
x = x.to_laid_out_tensor()
shape = x.tensor_list[0].shape
dtype = x.tensor_list[0].dtype
def _collective_receive(tensor_list, device_list):
ret = []
for pcoord, device in enumerate(device_list):
with tf.device(device):
if source_pcoord[pcoord] is None:
ret.append(tf.zeros(shape, dtype))
else:
ret.append(tf.identity(tensor_list[source_pcoord[pcoord]]))
return ret
return self._collective_with_groups(
x, [mesh_axis], _collective_receive) | [
"def",
"receive",
"(",
"self",
",",
"x",
",",
"mesh_axis",
",",
"source_pcoord",
")",
":",
"x",
"=",
"x",
".",
"to_laid_out_tensor",
"(",
")",
"shape",
"=",
"x",
".",
"tensor_list",
"[",
"0",
"]",
".",
"shape",
"dtype",
"=",
"x",
".",
"tensor_list",
"[",
"0",
"]",
".",
"dtype",
"def",
"_collective_receive",
"(",
"tensor_list",
",",
"device_list",
")",
":",
"ret",
"=",
"[",
"]",
"for",
"pcoord",
",",
"device",
"in",
"enumerate",
"(",
"device_list",
")",
":",
"with",
"tf",
".",
"device",
"(",
"device",
")",
":",
"if",
"source_pcoord",
"[",
"pcoord",
"]",
"is",
"None",
":",
"ret",
".",
"append",
"(",
"tf",
".",
"zeros",
"(",
"shape",
",",
"dtype",
")",
")",
"else",
":",
"ret",
".",
"append",
"(",
"tf",
".",
"identity",
"(",
"tensor_list",
"[",
"source_pcoord",
"[",
"pcoord",
"]",
"]",
")",
")",
"return",
"ret",
"return",
"self",
".",
"_collective_with_groups",
"(",
"x",
",",
"[",
"mesh_axis",
"]",
",",
"_collective_receive",
")"
] | Collective receive in groups.
Each group contains the processors that differ only in mesh_axis.
```python
group_size = self.shape[mesh_axis].size
```
Args:
x: a LaidOutTensor
mesh_axis: an integer
source_pcoord: a list of optional integers. Each element is either None
or an integer in [0, group_size). If source_pcoord[k] is None, then the
output for the k-th processor in each group is a zero tensor. If
source_pcoord[k] is not None, then the output for the k-th processor in
each group is equal to the input for the source_pcoord[k]-th processor
in that group.
Returns:
a LaidOutTensor | [
"Collective",
"receive",
"in",
"groups",
"."
] | train | https://github.com/tensorflow/mesh/blob/3921196e5e43302e820da0a87329f25d7e2a3016/mesh_tensorflow/placement_mesh_impl.py#L248-L283 | 0.005469 |
bunq/sdk_python | bunq/sdk/json/converter.py | JsonAdapter._fill_default_values | def _fill_default_values(cls, cls_context, dict_):
"""
:type cls_context: type
:type dict_: dict
:rtype: dict
"""
dict_with_default_values = dict(dict_)
params = re.findall(cls._PATTERN_PARAM_NAME_TYPED_ANY,
cls_context.__doc__)
for param in params:
if param not in dict_with_default_values:
dict_with_default_values[param] = None
return dict_with_default_values | python | def _fill_default_values(cls, cls_context, dict_):
"""
:type cls_context: type
:type dict_: dict
:rtype: dict
"""
dict_with_default_values = dict(dict_)
params = re.findall(cls._PATTERN_PARAM_NAME_TYPED_ANY,
cls_context.__doc__)
for param in params:
if param not in dict_with_default_values:
dict_with_default_values[param] = None
return dict_with_default_values | [
"def",
"_fill_default_values",
"(",
"cls",
",",
"cls_context",
",",
"dict_",
")",
":",
"dict_with_default_values",
"=",
"dict",
"(",
"dict_",
")",
"params",
"=",
"re",
".",
"findall",
"(",
"cls",
".",
"_PATTERN_PARAM_NAME_TYPED_ANY",
",",
"cls_context",
".",
"__doc__",
")",
"for",
"param",
"in",
"params",
":",
"if",
"param",
"not",
"in",
"dict_with_default_values",
":",
"dict_with_default_values",
"[",
"param",
"]",
"=",
"None",
"return",
"dict_with_default_values"
] | :type cls_context: type
:type dict_: dict
:rtype: dict | [
":",
"type",
"cls_context",
":",
"type",
":",
"type",
"dict_",
":",
"dict"
] | train | https://github.com/bunq/sdk_python/blob/da6c9b83e6d83ee8062617f53c6eb7293c0d863d/bunq/sdk/json/converter.py#L394-L410 | 0.004049 |
CiscoUcs/UcsPythonSDK | src/UcsSdk/UcsBase.py | UcsUtils.GetUcsPropertyMeta | def GetUcsPropertyMeta(classId, key):
""" Methods returns the property meta of the provided key for the given classId. """
if classId in _ManagedObjectMeta:
if key in _ManagedObjectMeta[classId]:
return _ManagedObjectMeta[classId][key]
return None | python | def GetUcsPropertyMeta(classId, key):
""" Methods returns the property meta of the provided key for the given classId. """
if classId in _ManagedObjectMeta:
if key in _ManagedObjectMeta[classId]:
return _ManagedObjectMeta[classId][key]
return None | [
"def",
"GetUcsPropertyMeta",
"(",
"classId",
",",
"key",
")",
":",
"if",
"classId",
"in",
"_ManagedObjectMeta",
":",
"if",
"key",
"in",
"_ManagedObjectMeta",
"[",
"classId",
"]",
":",
"return",
"_ManagedObjectMeta",
"[",
"classId",
"]",
"[",
"key",
"]",
"return",
"None"
] | Methods returns the property meta of the provided key for the given classId. | [
"Methods",
"returns",
"the",
"property",
"meta",
"of",
"the",
"provided",
"key",
"for",
"the",
"given",
"classId",
"."
] | train | https://github.com/CiscoUcs/UcsPythonSDK/blob/bf6b07d6abeacb922c92b198352eda4eb9e4629b/src/UcsSdk/UcsBase.py#L518-L523 | 0.030769 |
jobovy/galpy | galpy/orbit/planarOrbit.py | planarOrbitTop.rap | def rap(self,analytic=False,pot=None,**kwargs):
"""
NAME:
rap
PURPOSE:
return the apocenter radius
INPUT:
analytic - compute this analytically
pot - potential to use for analytical calculation
OUTPUT:
R_ap
HISTORY:
2010-09-20 - Written - Bovy (NYU)
"""
if analytic:
self._setupaA(pot=pot,type='adiabatic')
(rperi,rap)= self._aA.calcRapRperi(self)
return rap
if not hasattr(self,'orbit'):
raise AttributeError("Integrate the orbit first")
if not hasattr(self,'rs'):
self.rs= self.orbit[:,0]
return nu.amax(self.rs) | python | def rap(self,analytic=False,pot=None,**kwargs):
"""
NAME:
rap
PURPOSE:
return the apocenter radius
INPUT:
analytic - compute this analytically
pot - potential to use for analytical calculation
OUTPUT:
R_ap
HISTORY:
2010-09-20 - Written - Bovy (NYU)
"""
if analytic:
self._setupaA(pot=pot,type='adiabatic')
(rperi,rap)= self._aA.calcRapRperi(self)
return rap
if not hasattr(self,'orbit'):
raise AttributeError("Integrate the orbit first")
if not hasattr(self,'rs'):
self.rs= self.orbit[:,0]
return nu.amax(self.rs) | [
"def",
"rap",
"(",
"self",
",",
"analytic",
"=",
"False",
",",
"pot",
"=",
"None",
",",
"*",
"*",
"kwargs",
")",
":",
"if",
"analytic",
":",
"self",
".",
"_setupaA",
"(",
"pot",
"=",
"pot",
",",
"type",
"=",
"'adiabatic'",
")",
"(",
"rperi",
",",
"rap",
")",
"=",
"self",
".",
"_aA",
".",
"calcRapRperi",
"(",
"self",
")",
"return",
"rap",
"if",
"not",
"hasattr",
"(",
"self",
",",
"'orbit'",
")",
":",
"raise",
"AttributeError",
"(",
"\"Integrate the orbit first\"",
")",
"if",
"not",
"hasattr",
"(",
"self",
",",
"'rs'",
")",
":",
"self",
".",
"rs",
"=",
"self",
".",
"orbit",
"[",
":",
",",
"0",
"]",
"return",
"nu",
".",
"amax",
"(",
"self",
".",
"rs",
")"
] | NAME:
rap
PURPOSE:
return the apocenter radius
INPUT:
analytic - compute this analytically
pot - potential to use for analytical calculation
OUTPUT:
R_ap
HISTORY:
2010-09-20 - Written - Bovy (NYU) | [
"NAME",
":",
"rap",
"PURPOSE",
":",
"return",
"the",
"apocenter",
"radius",
"INPUT",
":",
"analytic",
"-",
"compute",
"this",
"analytically",
"pot",
"-",
"potential",
"to",
"use",
"for",
"analytical",
"calculation",
"OUTPUT",
":",
"R_ap",
"HISTORY",
":",
"2010",
"-",
"09",
"-",
"20",
"-",
"Written",
"-",
"Bovy",
"(",
"NYU",
")"
] | train | https://github.com/jobovy/galpy/blob/9c5b9fe65d58835624dffe432be282060918ee08/galpy/orbit/planarOrbit.py#L128-L150 | 0.016506 |
LeastAuthority/txkube | src/txkube/_invariants.py | instance_of | def instance_of(cls):
"""
Create an invariant requiring the value is an instance of ``cls``.
"""
def check(value):
return (
isinstance(value, cls),
u"{value!r} is instance of {actual!s}, required {required!s}".format(
value=value,
actual=fullyQualifiedName(type(value)),
required=fullyQualifiedName(cls),
),
)
return check | python | def instance_of(cls):
"""
Create an invariant requiring the value is an instance of ``cls``.
"""
def check(value):
return (
isinstance(value, cls),
u"{value!r} is instance of {actual!s}, required {required!s}".format(
value=value,
actual=fullyQualifiedName(type(value)),
required=fullyQualifiedName(cls),
),
)
return check | [
"def",
"instance_of",
"(",
"cls",
")",
":",
"def",
"check",
"(",
"value",
")",
":",
"return",
"(",
"isinstance",
"(",
"value",
",",
"cls",
")",
",",
"u\"{value!r} is instance of {actual!s}, required {required!s}\"",
".",
"format",
"(",
"value",
"=",
"value",
",",
"actual",
"=",
"fullyQualifiedName",
"(",
"type",
"(",
"value",
")",
")",
",",
"required",
"=",
"fullyQualifiedName",
"(",
"cls",
")",
",",
")",
",",
")",
"return",
"check"
] | Create an invariant requiring the value is an instance of ``cls``. | [
"Create",
"an",
"invariant",
"requiring",
"the",
"value",
"is",
"an",
"instance",
"of",
"cls",
"."
] | train | https://github.com/LeastAuthority/txkube/blob/a7e555d00535ff787d4b1204c264780da40cf736/src/txkube/_invariants.py#L10-L23 | 0.004525 |
PmagPy/PmagPy | programs/demag_gui.py | Demag_GUI.on_btn_delete_fit | def on_btn_delete_fit(self, event):
"""
removes the current interpretation
Parameters
----------
event : the wx.ButtonEvent that triggered this function
"""
self.delete_fit(self.current_fit, specimen=self.s) | python | def on_btn_delete_fit(self, event):
"""
removes the current interpretation
Parameters
----------
event : the wx.ButtonEvent that triggered this function
"""
self.delete_fit(self.current_fit, specimen=self.s) | [
"def",
"on_btn_delete_fit",
"(",
"self",
",",
"event",
")",
":",
"self",
".",
"delete_fit",
"(",
"self",
".",
"current_fit",
",",
"specimen",
"=",
"self",
".",
"s",
")"
] | removes the current interpretation
Parameters
----------
event : the wx.ButtonEvent that triggered this function | [
"removes",
"the",
"current",
"interpretation"
] | train | https://github.com/PmagPy/PmagPy/blob/c7984f8809bf40fe112e53dcc311a33293b62d0b/programs/demag_gui.py#L8481-L8489 | 0.007576 |
cytoscape/py2cytoscape | py2cytoscape/data/cynetwork.py | CyNetwork.add_nodes | def add_nodes(self, node_name_list, dataframe=False):
"""
Add new nodes to the network
:param node_name_list: list of node names, e.g. ['a', 'b', 'c']
:param dataframe: If True, return a pandas dataframe instead of a dict.
:return: A dict mapping names to SUIDs for the newly-created nodes.
"""
res = self.session.post(self.__url + 'nodes', data=json.dumps(node_name_list), headers=HEADERS)
check_response(res)
nodes = res.json()
if dataframe:
return pd.DataFrame(nodes).set_index(['SUID'])
else:
return {node['name']: node['SUID'] for node in nodes} | python | def add_nodes(self, node_name_list, dataframe=False):
"""
Add new nodes to the network
:param node_name_list: list of node names, e.g. ['a', 'b', 'c']
:param dataframe: If True, return a pandas dataframe instead of a dict.
:return: A dict mapping names to SUIDs for the newly-created nodes.
"""
res = self.session.post(self.__url + 'nodes', data=json.dumps(node_name_list), headers=HEADERS)
check_response(res)
nodes = res.json()
if dataframe:
return pd.DataFrame(nodes).set_index(['SUID'])
else:
return {node['name']: node['SUID'] for node in nodes} | [
"def",
"add_nodes",
"(",
"self",
",",
"node_name_list",
",",
"dataframe",
"=",
"False",
")",
":",
"res",
"=",
"self",
".",
"session",
".",
"post",
"(",
"self",
".",
"__url",
"+",
"'nodes'",
",",
"data",
"=",
"json",
".",
"dumps",
"(",
"node_name_list",
")",
",",
"headers",
"=",
"HEADERS",
")",
"check_response",
"(",
"res",
")",
"nodes",
"=",
"res",
".",
"json",
"(",
")",
"if",
"dataframe",
":",
"return",
"pd",
".",
"DataFrame",
"(",
"nodes",
")",
".",
"set_index",
"(",
"[",
"'SUID'",
"]",
")",
"else",
":",
"return",
"{",
"node",
"[",
"'name'",
"]",
":",
"node",
"[",
"'SUID'",
"]",
"for",
"node",
"in",
"nodes",
"}"
] | Add new nodes to the network
:param node_name_list: list of node names, e.g. ['a', 'b', 'c']
:param dataframe: If True, return a pandas dataframe instead of a dict.
:return: A dict mapping names to SUIDs for the newly-created nodes. | [
"Add",
"new",
"nodes",
"to",
"the",
"network"
] | train | https://github.com/cytoscape/py2cytoscape/blob/dd34de8d028f512314d0057168df7fef7c5d5195/py2cytoscape/data/cynetwork.py#L88-L102 | 0.004525 |
Esri/ArcREST | src/arcrest/web/_base.py | BaseWebOperations._get | def _get(self, url,
param_dict=None,
securityHandler=None,
additional_headers=None,
handlers=None,
proxy_url=None,
proxy_port=None,
compress=True,
custom_handlers=None,
out_folder=None,
file_name=None):
"""
Performs a GET operation
Inputs:
Output:
returns dictionary, string or None
"""
# ensure that no spaces are in the url
url = url.replace(" ", "%20")
pass_headers = {}
if custom_handlers is None:
custom_handlers = []
if handlers is None:
handlers = []
if param_dict is None:
param_dict = {}
self._last_method = "GET"
CHUNK = 4056
param_dict, handler, cj = self._processHandler(securityHandler, param_dict)
if additional_headers is not None:
headers = [] + additional_headers
else:
headers = []
pass_headers = {}
if securityHandler and securityHandler.referer_url:
pass_headers['referer'] = securityHandler.referer_url
for h in headers:
pass_headers[h[0]] = h[1]
if compress:
pass_headers['Accept-encoding'] = 'gzip'
else:
pass_headers['Accept-encoding'] = ""
#headers.append(('User-Agent', USER_AGENT))
pass_headers['User-Agent'] = self.useragent
if len(param_dict.keys()) == 0:
param_dict = None
if handlers is None:
handlers = []
if handler is not None:
handlers.append(handler)
handlers.append(RedirectHandler())
if self._verify == False and \
sys.version_info[0:3] >= (2, 7, 9) and \
hasattr(ssl,'create_default_context'):
ctx = ssl.create_default_context()
ctx.check_hostname = False
ctx.verify_mode = ssl.CERT_NONE
handlers.append(request.HTTPSHandler(context=ctx))
if cj is not None:
handlers.append(request.HTTPCookieProcessor(cj))
if proxy_url is not None:
if proxy_port is None:
proxy_port = 80
proxies = {"http":"http://%s:%s" % (proxy_url, proxy_port),
"https":"https://%s:%s" % (proxy_url, proxy_port)}
proxy_support = request.ProxyHandler(proxies)
handlers.append(proxy_support)
opener = request.build_opener(*handlers)
opener.addheaders = headers
request.install_opener(opener)
ctx = None
hasContext = False
if self._verify == False and \
'context' in self._has_context(request.urlopen) and \
sys.version_info[0:3] >= (2, 7, 9) and \
hasattr(ssl,'create_default_context'):
ctx = ssl.create_default_context()
ctx.check_hostname = False
ctx.verify_mode = ssl.CERT_NONE
hasContext = True
if hasContext == False:
if param_dict is None:
req = request.Request(self._asString(url),
headers=pass_headers)
resp = request.urlopen(req)
elif len(str(urlencode(param_dict))) + len(url) >= 1999:
return self._post(
url=url,
param_dict=param_dict,
files=None,
securityHandler=securityHandler,
additional_headers=additional_headers,
custom_handlers=custom_handlers,
proxy_url=proxy_url,
proxy_port=proxy_port,
compress=compress,
out_folder=out_folder,
file_name=file_name,
force_form_post=False)
else:
format_url = self._asString(url) + "?%s" % urlencode(param_dict)
req = request.Request(format_url,
headers=pass_headers)
resp = request.urlopen(req)
else:
if param_dict is None:
req = request.Request(self._asString(url),
headers=pass_headers)
resp = request.urlopen(req,
context=ctx)
elif len(str(urlencode(param_dict))) + len(url) >= 1999:
return self._post(
url=url,
param_dict=param_dict,
files=None,
securityHandler=securityHandler,
additional_headers=additional_headers,
custom_handlers=custom_handlers,
proxy_url=proxy_url,
proxy_port=proxy_port,
compress=compress,
out_folder=out_folder,
file_name=file_name,
force_form_post=False)
else:
format_url = self._asString(url) + "?%s" % urlencode(param_dict)
req = request.Request(format_url,
headers=pass_headers)
resp = request.urlopen(req,
context=ctx)
self._last_code = resp.getcode()
self._last_url = resp.geturl()
# Get some headers from the response
maintype = self._mainType(resp)
contentDisposition = resp.headers.get('content-disposition')
contentMD5 = resp.headers.get('Content-MD5')
#contentEncoding = resp.headers.get('content-encoding')
contentType = resp.headers.get('content-Type').split(';')[0].lower()
contentLength = resp.headers.get('content-length')
if maintype.lower() in ('image',
'application/x-zip-compressed') or \
contentType in ('application/x-zip-compressed', 'application/octet-stream') or \
contentMD5 is not None or\
(contentDisposition is not None and \
contentDisposition.lower().find('attachment;') > -1):
fname = self._get_file_name(
contentDisposition=contentDisposition,
url=url)
if out_folder is None:
out_folder = tempfile.gettempdir()
if contentLength is not None:
max_length = int(contentLength)
if max_length < CHUNK:
CHUNK = max_length
file_name = os.path.join(out_folder, fname)
with open(file_name, 'wb') as writer:
for data in self._chunk(response=resp,
size=CHUNK):
writer.write(data)
writer.flush()
writer.flush()
del writer
return file_name
else:
read = ""
for data in self._chunk(response=resp,
size=CHUNK):
if self.PY3 == True:
read += data.decode('utf-8')
else:
read += data
del data
try:
results = json.loads(read)
if 'error' in results:
if 'message' in results['error']:
if results['error']['message'] == 'Request not made over ssl':
if url.startswith('http://'):
url = url.replace('http://', 'https://')
return self._get(url,
param_dict,
securityHandler,
additional_headers,
handlers,
proxy_url,
proxy_port,
compress,
custom_handlers,
out_folder,
file_name)
return results
except:
return read | python | def _get(self, url,
param_dict=None,
securityHandler=None,
additional_headers=None,
handlers=None,
proxy_url=None,
proxy_port=None,
compress=True,
custom_handlers=None,
out_folder=None,
file_name=None):
"""
Performs a GET operation
Inputs:
Output:
returns dictionary, string or None
"""
# ensure that no spaces are in the url
url = url.replace(" ", "%20")
pass_headers = {}
if custom_handlers is None:
custom_handlers = []
if handlers is None:
handlers = []
if param_dict is None:
param_dict = {}
self._last_method = "GET"
CHUNK = 4056
param_dict, handler, cj = self._processHandler(securityHandler, param_dict)
if additional_headers is not None:
headers = [] + additional_headers
else:
headers = []
pass_headers = {}
if securityHandler and securityHandler.referer_url:
pass_headers['referer'] = securityHandler.referer_url
for h in headers:
pass_headers[h[0]] = h[1]
if compress:
pass_headers['Accept-encoding'] = 'gzip'
else:
pass_headers['Accept-encoding'] = ""
#headers.append(('User-Agent', USER_AGENT))
pass_headers['User-Agent'] = self.useragent
if len(param_dict.keys()) == 0:
param_dict = None
if handlers is None:
handlers = []
if handler is not None:
handlers.append(handler)
handlers.append(RedirectHandler())
if self._verify == False and \
sys.version_info[0:3] >= (2, 7, 9) and \
hasattr(ssl,'create_default_context'):
ctx = ssl.create_default_context()
ctx.check_hostname = False
ctx.verify_mode = ssl.CERT_NONE
handlers.append(request.HTTPSHandler(context=ctx))
if cj is not None:
handlers.append(request.HTTPCookieProcessor(cj))
if proxy_url is not None:
if proxy_port is None:
proxy_port = 80
proxies = {"http":"http://%s:%s" % (proxy_url, proxy_port),
"https":"https://%s:%s" % (proxy_url, proxy_port)}
proxy_support = request.ProxyHandler(proxies)
handlers.append(proxy_support)
opener = request.build_opener(*handlers)
opener.addheaders = headers
request.install_opener(opener)
ctx = None
hasContext = False
if self._verify == False and \
'context' in self._has_context(request.urlopen) and \
sys.version_info[0:3] >= (2, 7, 9) and \
hasattr(ssl,'create_default_context'):
ctx = ssl.create_default_context()
ctx.check_hostname = False
ctx.verify_mode = ssl.CERT_NONE
hasContext = True
if hasContext == False:
if param_dict is None:
req = request.Request(self._asString(url),
headers=pass_headers)
resp = request.urlopen(req)
elif len(str(urlencode(param_dict))) + len(url) >= 1999:
return self._post(
url=url,
param_dict=param_dict,
files=None,
securityHandler=securityHandler,
additional_headers=additional_headers,
custom_handlers=custom_handlers,
proxy_url=proxy_url,
proxy_port=proxy_port,
compress=compress,
out_folder=out_folder,
file_name=file_name,
force_form_post=False)
else:
format_url = self._asString(url) + "?%s" % urlencode(param_dict)
req = request.Request(format_url,
headers=pass_headers)
resp = request.urlopen(req)
else:
if param_dict is None:
req = request.Request(self._asString(url),
headers=pass_headers)
resp = request.urlopen(req,
context=ctx)
elif len(str(urlencode(param_dict))) + len(url) >= 1999:
return self._post(
url=url,
param_dict=param_dict,
files=None,
securityHandler=securityHandler,
additional_headers=additional_headers,
custom_handlers=custom_handlers,
proxy_url=proxy_url,
proxy_port=proxy_port,
compress=compress,
out_folder=out_folder,
file_name=file_name,
force_form_post=False)
else:
format_url = self._asString(url) + "?%s" % urlencode(param_dict)
req = request.Request(format_url,
headers=pass_headers)
resp = request.urlopen(req,
context=ctx)
self._last_code = resp.getcode()
self._last_url = resp.geturl()
# Get some headers from the response
maintype = self._mainType(resp)
contentDisposition = resp.headers.get('content-disposition')
contentMD5 = resp.headers.get('Content-MD5')
#contentEncoding = resp.headers.get('content-encoding')
contentType = resp.headers.get('content-Type').split(';')[0].lower()
contentLength = resp.headers.get('content-length')
if maintype.lower() in ('image',
'application/x-zip-compressed') or \
contentType in ('application/x-zip-compressed', 'application/octet-stream') or \
contentMD5 is not None or\
(contentDisposition is not None and \
contentDisposition.lower().find('attachment;') > -1):
fname = self._get_file_name(
contentDisposition=contentDisposition,
url=url)
if out_folder is None:
out_folder = tempfile.gettempdir()
if contentLength is not None:
max_length = int(contentLength)
if max_length < CHUNK:
CHUNK = max_length
file_name = os.path.join(out_folder, fname)
with open(file_name, 'wb') as writer:
for data in self._chunk(response=resp,
size=CHUNK):
writer.write(data)
writer.flush()
writer.flush()
del writer
return file_name
else:
read = ""
for data in self._chunk(response=resp,
size=CHUNK):
if self.PY3 == True:
read += data.decode('utf-8')
else:
read += data
del data
try:
results = json.loads(read)
if 'error' in results:
if 'message' in results['error']:
if results['error']['message'] == 'Request not made over ssl':
if url.startswith('http://'):
url = url.replace('http://', 'https://')
return self._get(url,
param_dict,
securityHandler,
additional_headers,
handlers,
proxy_url,
proxy_port,
compress,
custom_handlers,
out_folder,
file_name)
return results
except:
return read | [
"def",
"_get",
"(",
"self",
",",
"url",
",",
"param_dict",
"=",
"None",
",",
"securityHandler",
"=",
"None",
",",
"additional_headers",
"=",
"None",
",",
"handlers",
"=",
"None",
",",
"proxy_url",
"=",
"None",
",",
"proxy_port",
"=",
"None",
",",
"compress",
"=",
"True",
",",
"custom_handlers",
"=",
"None",
",",
"out_folder",
"=",
"None",
",",
"file_name",
"=",
"None",
")",
":",
"# ensure that no spaces are in the url",
"url",
"=",
"url",
".",
"replace",
"(",
"\" \"",
",",
"\"%20\"",
")",
"pass_headers",
"=",
"{",
"}",
"if",
"custom_handlers",
"is",
"None",
":",
"custom_handlers",
"=",
"[",
"]",
"if",
"handlers",
"is",
"None",
":",
"handlers",
"=",
"[",
"]",
"if",
"param_dict",
"is",
"None",
":",
"param_dict",
"=",
"{",
"}",
"self",
".",
"_last_method",
"=",
"\"GET\"",
"CHUNK",
"=",
"4056",
"param_dict",
",",
"handler",
",",
"cj",
"=",
"self",
".",
"_processHandler",
"(",
"securityHandler",
",",
"param_dict",
")",
"if",
"additional_headers",
"is",
"not",
"None",
":",
"headers",
"=",
"[",
"]",
"+",
"additional_headers",
"else",
":",
"headers",
"=",
"[",
"]",
"pass_headers",
"=",
"{",
"}",
"if",
"securityHandler",
"and",
"securityHandler",
".",
"referer_url",
":",
"pass_headers",
"[",
"'referer'",
"]",
"=",
"securityHandler",
".",
"referer_url",
"for",
"h",
"in",
"headers",
":",
"pass_headers",
"[",
"h",
"[",
"0",
"]",
"]",
"=",
"h",
"[",
"1",
"]",
"if",
"compress",
":",
"pass_headers",
"[",
"'Accept-encoding'",
"]",
"=",
"'gzip'",
"else",
":",
"pass_headers",
"[",
"'Accept-encoding'",
"]",
"=",
"\"\"",
"#headers.append(('User-Agent', USER_AGENT))",
"pass_headers",
"[",
"'User-Agent'",
"]",
"=",
"self",
".",
"useragent",
"if",
"len",
"(",
"param_dict",
".",
"keys",
"(",
")",
")",
"==",
"0",
":",
"param_dict",
"=",
"None",
"if",
"handlers",
"is",
"None",
":",
"handlers",
"=",
"[",
"]",
"if",
"handler",
"is",
"not",
"None",
":",
"handlers",
".",
"append",
"(",
"handler",
")",
"handlers",
".",
"append",
"(",
"RedirectHandler",
"(",
")",
")",
"if",
"self",
".",
"_verify",
"==",
"False",
"and",
"sys",
".",
"version_info",
"[",
"0",
":",
"3",
"]",
">=",
"(",
"2",
",",
"7",
",",
"9",
")",
"and",
"hasattr",
"(",
"ssl",
",",
"'create_default_context'",
")",
":",
"ctx",
"=",
"ssl",
".",
"create_default_context",
"(",
")",
"ctx",
".",
"check_hostname",
"=",
"False",
"ctx",
".",
"verify_mode",
"=",
"ssl",
".",
"CERT_NONE",
"handlers",
".",
"append",
"(",
"request",
".",
"HTTPSHandler",
"(",
"context",
"=",
"ctx",
")",
")",
"if",
"cj",
"is",
"not",
"None",
":",
"handlers",
".",
"append",
"(",
"request",
".",
"HTTPCookieProcessor",
"(",
"cj",
")",
")",
"if",
"proxy_url",
"is",
"not",
"None",
":",
"if",
"proxy_port",
"is",
"None",
":",
"proxy_port",
"=",
"80",
"proxies",
"=",
"{",
"\"http\"",
":",
"\"http://%s:%s\"",
"%",
"(",
"proxy_url",
",",
"proxy_port",
")",
",",
"\"https\"",
":",
"\"https://%s:%s\"",
"%",
"(",
"proxy_url",
",",
"proxy_port",
")",
"}",
"proxy_support",
"=",
"request",
".",
"ProxyHandler",
"(",
"proxies",
")",
"handlers",
".",
"append",
"(",
"proxy_support",
")",
"opener",
"=",
"request",
".",
"build_opener",
"(",
"*",
"handlers",
")",
"opener",
".",
"addheaders",
"=",
"headers",
"request",
".",
"install_opener",
"(",
"opener",
")",
"ctx",
"=",
"None",
"hasContext",
"=",
"False",
"if",
"self",
".",
"_verify",
"==",
"False",
"and",
"'context'",
"in",
"self",
".",
"_has_context",
"(",
"request",
".",
"urlopen",
")",
"and",
"sys",
".",
"version_info",
"[",
"0",
":",
"3",
"]",
">=",
"(",
"2",
",",
"7",
",",
"9",
")",
"and",
"hasattr",
"(",
"ssl",
",",
"'create_default_context'",
")",
":",
"ctx",
"=",
"ssl",
".",
"create_default_context",
"(",
")",
"ctx",
".",
"check_hostname",
"=",
"False",
"ctx",
".",
"verify_mode",
"=",
"ssl",
".",
"CERT_NONE",
"hasContext",
"=",
"True",
"if",
"hasContext",
"==",
"False",
":",
"if",
"param_dict",
"is",
"None",
":",
"req",
"=",
"request",
".",
"Request",
"(",
"self",
".",
"_asString",
"(",
"url",
")",
",",
"headers",
"=",
"pass_headers",
")",
"resp",
"=",
"request",
".",
"urlopen",
"(",
"req",
")",
"elif",
"len",
"(",
"str",
"(",
"urlencode",
"(",
"param_dict",
")",
")",
")",
"+",
"len",
"(",
"url",
")",
">=",
"1999",
":",
"return",
"self",
".",
"_post",
"(",
"url",
"=",
"url",
",",
"param_dict",
"=",
"param_dict",
",",
"files",
"=",
"None",
",",
"securityHandler",
"=",
"securityHandler",
",",
"additional_headers",
"=",
"additional_headers",
",",
"custom_handlers",
"=",
"custom_handlers",
",",
"proxy_url",
"=",
"proxy_url",
",",
"proxy_port",
"=",
"proxy_port",
",",
"compress",
"=",
"compress",
",",
"out_folder",
"=",
"out_folder",
",",
"file_name",
"=",
"file_name",
",",
"force_form_post",
"=",
"False",
")",
"else",
":",
"format_url",
"=",
"self",
".",
"_asString",
"(",
"url",
")",
"+",
"\"?%s\"",
"%",
"urlencode",
"(",
"param_dict",
")",
"req",
"=",
"request",
".",
"Request",
"(",
"format_url",
",",
"headers",
"=",
"pass_headers",
")",
"resp",
"=",
"request",
".",
"urlopen",
"(",
"req",
")",
"else",
":",
"if",
"param_dict",
"is",
"None",
":",
"req",
"=",
"request",
".",
"Request",
"(",
"self",
".",
"_asString",
"(",
"url",
")",
",",
"headers",
"=",
"pass_headers",
")",
"resp",
"=",
"request",
".",
"urlopen",
"(",
"req",
",",
"context",
"=",
"ctx",
")",
"elif",
"len",
"(",
"str",
"(",
"urlencode",
"(",
"param_dict",
")",
")",
")",
"+",
"len",
"(",
"url",
")",
">=",
"1999",
":",
"return",
"self",
".",
"_post",
"(",
"url",
"=",
"url",
",",
"param_dict",
"=",
"param_dict",
",",
"files",
"=",
"None",
",",
"securityHandler",
"=",
"securityHandler",
",",
"additional_headers",
"=",
"additional_headers",
",",
"custom_handlers",
"=",
"custom_handlers",
",",
"proxy_url",
"=",
"proxy_url",
",",
"proxy_port",
"=",
"proxy_port",
",",
"compress",
"=",
"compress",
",",
"out_folder",
"=",
"out_folder",
",",
"file_name",
"=",
"file_name",
",",
"force_form_post",
"=",
"False",
")",
"else",
":",
"format_url",
"=",
"self",
".",
"_asString",
"(",
"url",
")",
"+",
"\"?%s\"",
"%",
"urlencode",
"(",
"param_dict",
")",
"req",
"=",
"request",
".",
"Request",
"(",
"format_url",
",",
"headers",
"=",
"pass_headers",
")",
"resp",
"=",
"request",
".",
"urlopen",
"(",
"req",
",",
"context",
"=",
"ctx",
")",
"self",
".",
"_last_code",
"=",
"resp",
".",
"getcode",
"(",
")",
"self",
".",
"_last_url",
"=",
"resp",
".",
"geturl",
"(",
")",
"# Get some headers from the response",
"maintype",
"=",
"self",
".",
"_mainType",
"(",
"resp",
")",
"contentDisposition",
"=",
"resp",
".",
"headers",
".",
"get",
"(",
"'content-disposition'",
")",
"contentMD5",
"=",
"resp",
".",
"headers",
".",
"get",
"(",
"'Content-MD5'",
")",
"#contentEncoding = resp.headers.get('content-encoding')",
"contentType",
"=",
"resp",
".",
"headers",
".",
"get",
"(",
"'content-Type'",
")",
".",
"split",
"(",
"';'",
")",
"[",
"0",
"]",
".",
"lower",
"(",
")",
"contentLength",
"=",
"resp",
".",
"headers",
".",
"get",
"(",
"'content-length'",
")",
"if",
"maintype",
".",
"lower",
"(",
")",
"in",
"(",
"'image'",
",",
"'application/x-zip-compressed'",
")",
"or",
"contentType",
"in",
"(",
"'application/x-zip-compressed'",
",",
"'application/octet-stream'",
")",
"or",
"contentMD5",
"is",
"not",
"None",
"or",
"(",
"contentDisposition",
"is",
"not",
"None",
"and",
"contentDisposition",
".",
"lower",
"(",
")",
".",
"find",
"(",
"'attachment;'",
")",
">",
"-",
"1",
")",
":",
"fname",
"=",
"self",
".",
"_get_file_name",
"(",
"contentDisposition",
"=",
"contentDisposition",
",",
"url",
"=",
"url",
")",
"if",
"out_folder",
"is",
"None",
":",
"out_folder",
"=",
"tempfile",
".",
"gettempdir",
"(",
")",
"if",
"contentLength",
"is",
"not",
"None",
":",
"max_length",
"=",
"int",
"(",
"contentLength",
")",
"if",
"max_length",
"<",
"CHUNK",
":",
"CHUNK",
"=",
"max_length",
"file_name",
"=",
"os",
".",
"path",
".",
"join",
"(",
"out_folder",
",",
"fname",
")",
"with",
"open",
"(",
"file_name",
",",
"'wb'",
")",
"as",
"writer",
":",
"for",
"data",
"in",
"self",
".",
"_chunk",
"(",
"response",
"=",
"resp",
",",
"size",
"=",
"CHUNK",
")",
":",
"writer",
".",
"write",
"(",
"data",
")",
"writer",
".",
"flush",
"(",
")",
"writer",
".",
"flush",
"(",
")",
"del",
"writer",
"return",
"file_name",
"else",
":",
"read",
"=",
"\"\"",
"for",
"data",
"in",
"self",
".",
"_chunk",
"(",
"response",
"=",
"resp",
",",
"size",
"=",
"CHUNK",
")",
":",
"if",
"self",
".",
"PY3",
"==",
"True",
":",
"read",
"+=",
"data",
".",
"decode",
"(",
"'utf-8'",
")",
"else",
":",
"read",
"+=",
"data",
"del",
"data",
"try",
":",
"results",
"=",
"json",
".",
"loads",
"(",
"read",
")",
"if",
"'error'",
"in",
"results",
":",
"if",
"'message'",
"in",
"results",
"[",
"'error'",
"]",
":",
"if",
"results",
"[",
"'error'",
"]",
"[",
"'message'",
"]",
"==",
"'Request not made over ssl'",
":",
"if",
"url",
".",
"startswith",
"(",
"'http://'",
")",
":",
"url",
"=",
"url",
".",
"replace",
"(",
"'http://'",
",",
"'https://'",
")",
"return",
"self",
".",
"_get",
"(",
"url",
",",
"param_dict",
",",
"securityHandler",
",",
"additional_headers",
",",
"handlers",
",",
"proxy_url",
",",
"proxy_port",
",",
"compress",
",",
"custom_handlers",
",",
"out_folder",
",",
"file_name",
")",
"return",
"results",
"except",
":",
"return",
"read"
] | Performs a GET operation
Inputs:
Output:
returns dictionary, string or None | [
"Performs",
"a",
"GET",
"operation",
"Inputs",
":"
] | train | https://github.com/Esri/ArcREST/blob/ab240fde2b0200f61d4a5f6df033516e53f2f416/src/arcrest/web/_base.py#L551-L751 | 0.003829 |
google/apitools | apitools/base/py/transfer.py | Download.StreamMedia | def StreamMedia(self, callback=None, finish_callback=None,
additional_headers=None, use_chunks=True):
"""Stream the entire download.
Args:
callback: (default: None) Callback to call as each chunk is
completed.
finish_callback: (default: None) Callback to call when the
download is complete.
additional_headers: (default: None) Additional headers to
include in fetching bytes.
use_chunks: (bool, default: True) If False, ignore self.chunksize
and stream this download in a single request.
Returns:
None. Streams bytes into self.stream.
"""
callback = callback or self.progress_callback
finish_callback = finish_callback or self.finish_callback
self.EnsureInitialized()
while True:
if self.__initial_response is not None:
response = self.__initial_response
self.__initial_response = None
else:
end_byte = self.__ComputeEndByte(self.progress,
use_chunks=use_chunks)
response = self.__GetChunk(
self.progress, end_byte,
additional_headers=additional_headers)
if self.total_size is None:
self.__SetTotal(response.info)
response = self.__ProcessResponse(response)
self._ExecuteCallback(callback, response)
if (response.status_code == http_client.OK or
self.progress >= self.total_size):
break
self._ExecuteCallback(finish_callback, response) | python | def StreamMedia(self, callback=None, finish_callback=None,
additional_headers=None, use_chunks=True):
"""Stream the entire download.
Args:
callback: (default: None) Callback to call as each chunk is
completed.
finish_callback: (default: None) Callback to call when the
download is complete.
additional_headers: (default: None) Additional headers to
include in fetching bytes.
use_chunks: (bool, default: True) If False, ignore self.chunksize
and stream this download in a single request.
Returns:
None. Streams bytes into self.stream.
"""
callback = callback or self.progress_callback
finish_callback = finish_callback or self.finish_callback
self.EnsureInitialized()
while True:
if self.__initial_response is not None:
response = self.__initial_response
self.__initial_response = None
else:
end_byte = self.__ComputeEndByte(self.progress,
use_chunks=use_chunks)
response = self.__GetChunk(
self.progress, end_byte,
additional_headers=additional_headers)
if self.total_size is None:
self.__SetTotal(response.info)
response = self.__ProcessResponse(response)
self._ExecuteCallback(callback, response)
if (response.status_code == http_client.OK or
self.progress >= self.total_size):
break
self._ExecuteCallback(finish_callback, response) | [
"def",
"StreamMedia",
"(",
"self",
",",
"callback",
"=",
"None",
",",
"finish_callback",
"=",
"None",
",",
"additional_headers",
"=",
"None",
",",
"use_chunks",
"=",
"True",
")",
":",
"callback",
"=",
"callback",
"or",
"self",
".",
"progress_callback",
"finish_callback",
"=",
"finish_callback",
"or",
"self",
".",
"finish_callback",
"self",
".",
"EnsureInitialized",
"(",
")",
"while",
"True",
":",
"if",
"self",
".",
"__initial_response",
"is",
"not",
"None",
":",
"response",
"=",
"self",
".",
"__initial_response",
"self",
".",
"__initial_response",
"=",
"None",
"else",
":",
"end_byte",
"=",
"self",
".",
"__ComputeEndByte",
"(",
"self",
".",
"progress",
",",
"use_chunks",
"=",
"use_chunks",
")",
"response",
"=",
"self",
".",
"__GetChunk",
"(",
"self",
".",
"progress",
",",
"end_byte",
",",
"additional_headers",
"=",
"additional_headers",
")",
"if",
"self",
".",
"total_size",
"is",
"None",
":",
"self",
".",
"__SetTotal",
"(",
"response",
".",
"info",
")",
"response",
"=",
"self",
".",
"__ProcessResponse",
"(",
"response",
")",
"self",
".",
"_ExecuteCallback",
"(",
"callback",
",",
"response",
")",
"if",
"(",
"response",
".",
"status_code",
"==",
"http_client",
".",
"OK",
"or",
"self",
".",
"progress",
">=",
"self",
".",
"total_size",
")",
":",
"break",
"self",
".",
"_ExecuteCallback",
"(",
"finish_callback",
",",
"response",
")"
] | Stream the entire download.
Args:
callback: (default: None) Callback to call as each chunk is
completed.
finish_callback: (default: None) Callback to call when the
download is complete.
additional_headers: (default: None) Additional headers to
include in fetching bytes.
use_chunks: (bool, default: True) If False, ignore self.chunksize
and stream this download in a single request.
Returns:
None. Streams bytes into self.stream. | [
"Stream",
"the",
"entire",
"download",
"."
] | train | https://github.com/google/apitools/blob/f3745a7ea535aa0e88b0650c16479b696d6fd446/apitools/base/py/transfer.py#L506-L544 | 0.001749 |
hozn/keepassdb | keepassdb/db.py | Database.load | def load(self, dbfile, password=None, keyfile=None, readonly=False):
"""
Load the database from file/stream.
:param dbfile: The database file path/stream.
:type dbfile: str or file-like object
:param password: The password for the database.
:type password: str
:param keyfile: Path to a keyfile (or a stream) that can be used instead of or in conjunction with password for database.
:type keyfile: str or file-like object
:param readonly: Whether to open the database read-only.
:type readonly: bool
"""
self._clear()
buf = None
is_stream = hasattr(dbfile, 'read')
if is_stream:
buf = dbfile.read()
else:
if not os.path.exists(dbfile):
raise IOError("File does not exist: {0}".format(dbfile))
with open(dbfile, 'rb') as fp:
buf = fp.read()
self.load_from_buffer(buf, password=password, keyfile=keyfile, readonly=readonly)
# One we have successfully loaded the file, go ahead and set the internal attribute
# (in the LockingDatabase subclass, this will effectivley take out the lock on the file)
if not is_stream:
self.filepath = dbfile | python | def load(self, dbfile, password=None, keyfile=None, readonly=False):
"""
Load the database from file/stream.
:param dbfile: The database file path/stream.
:type dbfile: str or file-like object
:param password: The password for the database.
:type password: str
:param keyfile: Path to a keyfile (or a stream) that can be used instead of or in conjunction with password for database.
:type keyfile: str or file-like object
:param readonly: Whether to open the database read-only.
:type readonly: bool
"""
self._clear()
buf = None
is_stream = hasattr(dbfile, 'read')
if is_stream:
buf = dbfile.read()
else:
if not os.path.exists(dbfile):
raise IOError("File does not exist: {0}".format(dbfile))
with open(dbfile, 'rb') as fp:
buf = fp.read()
self.load_from_buffer(buf, password=password, keyfile=keyfile, readonly=readonly)
# One we have successfully loaded the file, go ahead and set the internal attribute
# (in the LockingDatabase subclass, this will effectivley take out the lock on the file)
if not is_stream:
self.filepath = dbfile | [
"def",
"load",
"(",
"self",
",",
"dbfile",
",",
"password",
"=",
"None",
",",
"keyfile",
"=",
"None",
",",
"readonly",
"=",
"False",
")",
":",
"self",
".",
"_clear",
"(",
")",
"buf",
"=",
"None",
"is_stream",
"=",
"hasattr",
"(",
"dbfile",
",",
"'read'",
")",
"if",
"is_stream",
":",
"buf",
"=",
"dbfile",
".",
"read",
"(",
")",
"else",
":",
"if",
"not",
"os",
".",
"path",
".",
"exists",
"(",
"dbfile",
")",
":",
"raise",
"IOError",
"(",
"\"File does not exist: {0}\"",
".",
"format",
"(",
"dbfile",
")",
")",
"with",
"open",
"(",
"dbfile",
",",
"'rb'",
")",
"as",
"fp",
":",
"buf",
"=",
"fp",
".",
"read",
"(",
")",
"self",
".",
"load_from_buffer",
"(",
"buf",
",",
"password",
"=",
"password",
",",
"keyfile",
"=",
"keyfile",
",",
"readonly",
"=",
"readonly",
")",
"# One we have successfully loaded the file, go ahead and set the internal attribute",
"# (in the LockingDatabase subclass, this will effectivley take out the lock on the file)",
"if",
"not",
"is_stream",
":",
"self",
".",
"filepath",
"=",
"dbfile"
] | Load the database from file/stream.
:param dbfile: The database file path/stream.
:type dbfile: str or file-like object
:param password: The password for the database.
:type password: str
:param keyfile: Path to a keyfile (or a stream) that can be used instead of or in conjunction with password for database.
:type keyfile: str or file-like object
:param readonly: Whether to open the database read-only.
:type readonly: bool | [
"Load",
"the",
"database",
"from",
"file",
"/",
"stream",
".",
":",
"param",
"dbfile",
":",
"The",
"database",
"file",
"path",
"/",
"stream",
".",
":",
"type",
"dbfile",
":",
"str",
"or",
"file",
"-",
"like",
"object",
":",
"param",
"password",
":",
"The",
"password",
"for",
"the",
"database",
".",
":",
"type",
"password",
":",
"str",
":",
"param",
"keyfile",
":",
"Path",
"to",
"a",
"keyfile",
"(",
"or",
"a",
"stream",
")",
"that",
"can",
"be",
"used",
"instead",
"of",
"or",
"in",
"conjunction",
"with",
"password",
"for",
"database",
".",
":",
"type",
"keyfile",
":",
"str",
"or",
"file",
"-",
"like",
"object",
":",
"param",
"readonly",
":",
"Whether",
"to",
"open",
"the",
"database",
"read",
"-",
"only",
".",
":",
"type",
"readonly",
":",
"bool"
] | train | https://github.com/hozn/keepassdb/blob/cb24985d1ed04e7d7db99ecdddf80dd1a91ee48b/keepassdb/db.py#L121-L152 | 0.009002 |
Azure/azure-event-hubs-python | azure/eventprocessorhost/azure_blob_lease.py | AzureBlobLease.with_blob | def with_blob(self, blob):
"""
Init Azure Blob Lease with existing blob.
"""
content = json.loads(blob.content)
self.partition_id = content["partition_id"]
self.owner = content["owner"]
self.token = content["token"]
self.epoch = content["epoch"]
self.offset = content["offset"]
self.sequence_number = content["sequence_number"]
self.event_processor_context = content.get("event_processor_context") | python | def with_blob(self, blob):
"""
Init Azure Blob Lease with existing blob.
"""
content = json.loads(blob.content)
self.partition_id = content["partition_id"]
self.owner = content["owner"]
self.token = content["token"]
self.epoch = content["epoch"]
self.offset = content["offset"]
self.sequence_number = content["sequence_number"]
self.event_processor_context = content.get("event_processor_context") | [
"def",
"with_blob",
"(",
"self",
",",
"blob",
")",
":",
"content",
"=",
"json",
".",
"loads",
"(",
"blob",
".",
"content",
")",
"self",
".",
"partition_id",
"=",
"content",
"[",
"\"partition_id\"",
"]",
"self",
".",
"owner",
"=",
"content",
"[",
"\"owner\"",
"]",
"self",
".",
"token",
"=",
"content",
"[",
"\"token\"",
"]",
"self",
".",
"epoch",
"=",
"content",
"[",
"\"epoch\"",
"]",
"self",
".",
"offset",
"=",
"content",
"[",
"\"offset\"",
"]",
"self",
".",
"sequence_number",
"=",
"content",
"[",
"\"sequence_number\"",
"]",
"self",
".",
"event_processor_context",
"=",
"content",
".",
"get",
"(",
"\"event_processor_context\"",
")"
] | Init Azure Blob Lease with existing blob. | [
"Init",
"Azure",
"Blob",
"Lease",
"with",
"existing",
"blob",
"."
] | train | https://github.com/Azure/azure-event-hubs-python/blob/737c5f966557ada2cf10fa0d8f3c19671ae96348/azure/eventprocessorhost/azure_blob_lease.py#L40-L51 | 0.004124 |
jaywink/federation | federation/protocols/diaspora/protocol.py | Protocol.get_message_content | def get_message_content(self):
"""
Given the Slap XML, extract out the payload.
"""
body = self.doc.find(
".//{http://salmon-protocol.org/ns/magic-env}data").text
body = urlsafe_b64decode(body.encode("ascii"))
logger.debug("diaspora.protocol.get_message_content: %s", body)
return body | python | def get_message_content(self):
"""
Given the Slap XML, extract out the payload.
"""
body = self.doc.find(
".//{http://salmon-protocol.org/ns/magic-env}data").text
body = urlsafe_b64decode(body.encode("ascii"))
logger.debug("diaspora.protocol.get_message_content: %s", body)
return body | [
"def",
"get_message_content",
"(",
"self",
")",
":",
"body",
"=",
"self",
".",
"doc",
".",
"find",
"(",
"\".//{http://salmon-protocol.org/ns/magic-env}data\"",
")",
".",
"text",
"body",
"=",
"urlsafe_b64decode",
"(",
"body",
".",
"encode",
"(",
"\"ascii\"",
")",
")",
"logger",
".",
"debug",
"(",
"\"diaspora.protocol.get_message_content: %s\"",
",",
"body",
")",
"return",
"body"
] | Given the Slap XML, extract out the payload. | [
"Given",
"the",
"Slap",
"XML",
"extract",
"out",
"the",
"payload",
"."
] | train | https://github.com/jaywink/federation/blob/59d31bb37e662891dbea72c1dee05dc53146c78b/federation/protocols/diaspora/protocol.py#L114-L124 | 0.005634 |
dh1tw/pyhamtools | pyhamtools/callinfo.py | Callinfo._dismantle_callsign | def _dismantle_callsign(self, callsign, timestamp=timestamp_now):
""" try to identify the callsign's identity by analyzing it in the following order:
Args:
callsign (str): Amateur Radio callsign
timestamp (datetime, optional): datetime in UTC (tzinfo=pytz.UTC)
Raises:
KeyError: Callsign could not be identified
"""
entire_callsign = callsign.upper()
if re.search('[/A-Z0-9\-]{3,15}', entire_callsign): # make sure the call has at least 3 characters
if re.search('\-\d{1,3}$', entire_callsign): # cut off any -10 / -02 appendixes
callsign = re.sub('\-\d{1,3}$', '', entire_callsign)
if re.search('/[A-Z0-9]{1,4}/[A-Z0-9]{1,4}$', callsign):
callsign = re.sub('/[A-Z0-9]{1,4}$', '', callsign) # cut off 2. appendix DH1TW/HC2/P -> DH1TW/HC2
# multiple character appendix (callsign/xxx)
if re.search('[A-Z0-9]{4,10}/[A-Z0-9]{2,4}$', callsign): # case call/xxx, but ignoring /p and /m or /5
appendix = re.search('/[A-Z0-9]{2,4}$', callsign)
appendix = re.sub('/', '', appendix.group(0))
self._logger.debug("appendix: " + appendix)
if appendix == 'MM': # special case Martime Mobile
#self._mm = True
return {
'adif': 999,
'continent': '',
'country': 'MARITIME MOBILE',
'cqz': 0,
'latitude': 0.0,
'longitude': 0.0
}
elif appendix == 'AM': # special case Aeronautic Mobile
return {
'adif': 998,
'continent': '',
'country': 'AIRCAFT MOBILE',
'cqz': 0,
'latitude': 0.0,
'longitude': 0.0
}
elif appendix == 'QRP': # special case QRP
callsign = re.sub('/QRP', '', callsign)
return self._iterate_prefix(callsign, timestamp)
elif appendix == 'QRPP': # special case QRPP
callsign = re.sub('/QRPP', '', callsign)
return self._iterate_prefix(callsign, timestamp)
elif appendix == 'BCN': # filter all beacons
callsign = re.sub('/BCN', '', callsign)
data = self._iterate_prefix(callsign, timestamp).copy()
data[const.BEACON] = True
return data
elif appendix == "LH": # Filter all Lighthouses
callsign = re.sub('/LH', '', callsign)
return self._iterate_prefix(callsign, timestamp)
elif re.search('[A-Z]{3}', appendix): #case of US county(?) contest N3HBX/UAL
callsign = re.sub('/[A-Z]{3}$', '', callsign)
return self._iterate_prefix(callsign, timestamp)
else:
# check if the appendix is a valid country prefix
return self._iterate_prefix(re.sub('/', '', appendix), timestamp)
# Single character appendix (callsign/x)
elif re.search('/[A-Z0-9]$', callsign): # case call/p or /b /m or /5 etc.
appendix = re.search('/[A-Z0-9]$', callsign)
appendix = re.sub('/', '', appendix.group(0))
if appendix == 'B': # special case Beacon
callsign = re.sub('/B', '', callsign)
data = self._iterate_prefix(callsign, timestamp).copy()
data[const.BEACON] = True
return data
elif re.search('\d$', appendix):
area_nr = re.search('\d$', appendix).group(0)
callsign = re.sub('/\d$', '', callsign) #remove /number
if len(re.findall(r'\d+', callsign)) == 1: #call has just on digit e.g. DH1TW
callsign = re.sub('[\d]+', area_nr, callsign)
else: # call has several digits e.g. 7N4AAL
pass # no (two) digit prefix contries known where appendix would change entitiy
return self._iterate_prefix(callsign, timestamp)
else:
return self._iterate_prefix(callsign, timestamp)
# regular callsigns, without prefix or appendix
elif re.match('^[\d]{0,1}[A-Z]{1,2}\d([A-Z]{1,4}|\d{3,3}|\d{1,3}[A-Z])[A-Z]{0,5}$', callsign):
return self._iterate_prefix(callsign, timestamp)
# callsigns with prefixes (xxx/callsign)
elif re.search('^[A-Z0-9]{1,4}/', entire_callsign):
pfx = re.search('^[A-Z0-9]{1,4}/', entire_callsign)
pfx = re.sub('/', '', pfx.group(0))
#make sure that the remaining part is actually a callsign (avoid: OZ/JO81)
rest = re.search('/[A-Z0-9]+', entire_callsign)
rest = re.sub('/', '', rest.group(0))
if re.match('^[\d]{0,1}[A-Z]{1,2}\d([A-Z]{1,4}|\d{3,3}|\d{1,3}[A-Z])[A-Z]{0,5}$', rest):
return self._iterate_prefix(pfx)
if entire_callsign in callsign_exceptions:
return self._iterate_prefix(callsign_exceptions[entire_callsign])
self._logger.debug("Could not decode " + callsign)
raise KeyError("Callsign could not be decoded") | python | def _dismantle_callsign(self, callsign, timestamp=timestamp_now):
""" try to identify the callsign's identity by analyzing it in the following order:
Args:
callsign (str): Amateur Radio callsign
timestamp (datetime, optional): datetime in UTC (tzinfo=pytz.UTC)
Raises:
KeyError: Callsign could not be identified
"""
entire_callsign = callsign.upper()
if re.search('[/A-Z0-9\-]{3,15}', entire_callsign): # make sure the call has at least 3 characters
if re.search('\-\d{1,3}$', entire_callsign): # cut off any -10 / -02 appendixes
callsign = re.sub('\-\d{1,3}$', '', entire_callsign)
if re.search('/[A-Z0-9]{1,4}/[A-Z0-9]{1,4}$', callsign):
callsign = re.sub('/[A-Z0-9]{1,4}$', '', callsign) # cut off 2. appendix DH1TW/HC2/P -> DH1TW/HC2
# multiple character appendix (callsign/xxx)
if re.search('[A-Z0-9]{4,10}/[A-Z0-9]{2,4}$', callsign): # case call/xxx, but ignoring /p and /m or /5
appendix = re.search('/[A-Z0-9]{2,4}$', callsign)
appendix = re.sub('/', '', appendix.group(0))
self._logger.debug("appendix: " + appendix)
if appendix == 'MM': # special case Martime Mobile
#self._mm = True
return {
'adif': 999,
'continent': '',
'country': 'MARITIME MOBILE',
'cqz': 0,
'latitude': 0.0,
'longitude': 0.0
}
elif appendix == 'AM': # special case Aeronautic Mobile
return {
'adif': 998,
'continent': '',
'country': 'AIRCAFT MOBILE',
'cqz': 0,
'latitude': 0.0,
'longitude': 0.0
}
elif appendix == 'QRP': # special case QRP
callsign = re.sub('/QRP', '', callsign)
return self._iterate_prefix(callsign, timestamp)
elif appendix == 'QRPP': # special case QRPP
callsign = re.sub('/QRPP', '', callsign)
return self._iterate_prefix(callsign, timestamp)
elif appendix == 'BCN': # filter all beacons
callsign = re.sub('/BCN', '', callsign)
data = self._iterate_prefix(callsign, timestamp).copy()
data[const.BEACON] = True
return data
elif appendix == "LH": # Filter all Lighthouses
callsign = re.sub('/LH', '', callsign)
return self._iterate_prefix(callsign, timestamp)
elif re.search('[A-Z]{3}', appendix): #case of US county(?) contest N3HBX/UAL
callsign = re.sub('/[A-Z]{3}$', '', callsign)
return self._iterate_prefix(callsign, timestamp)
else:
# check if the appendix is a valid country prefix
return self._iterate_prefix(re.sub('/', '', appendix), timestamp)
# Single character appendix (callsign/x)
elif re.search('/[A-Z0-9]$', callsign): # case call/p or /b /m or /5 etc.
appendix = re.search('/[A-Z0-9]$', callsign)
appendix = re.sub('/', '', appendix.group(0))
if appendix == 'B': # special case Beacon
callsign = re.sub('/B', '', callsign)
data = self._iterate_prefix(callsign, timestamp).copy()
data[const.BEACON] = True
return data
elif re.search('\d$', appendix):
area_nr = re.search('\d$', appendix).group(0)
callsign = re.sub('/\d$', '', callsign) #remove /number
if len(re.findall(r'\d+', callsign)) == 1: #call has just on digit e.g. DH1TW
callsign = re.sub('[\d]+', area_nr, callsign)
else: # call has several digits e.g. 7N4AAL
pass # no (two) digit prefix contries known where appendix would change entitiy
return self._iterate_prefix(callsign, timestamp)
else:
return self._iterate_prefix(callsign, timestamp)
# regular callsigns, without prefix or appendix
elif re.match('^[\d]{0,1}[A-Z]{1,2}\d([A-Z]{1,4}|\d{3,3}|\d{1,3}[A-Z])[A-Z]{0,5}$', callsign):
return self._iterate_prefix(callsign, timestamp)
# callsigns with prefixes (xxx/callsign)
elif re.search('^[A-Z0-9]{1,4}/', entire_callsign):
pfx = re.search('^[A-Z0-9]{1,4}/', entire_callsign)
pfx = re.sub('/', '', pfx.group(0))
#make sure that the remaining part is actually a callsign (avoid: OZ/JO81)
rest = re.search('/[A-Z0-9]+', entire_callsign)
rest = re.sub('/', '', rest.group(0))
if re.match('^[\d]{0,1}[A-Z]{1,2}\d([A-Z]{1,4}|\d{3,3}|\d{1,3}[A-Z])[A-Z]{0,5}$', rest):
return self._iterate_prefix(pfx)
if entire_callsign in callsign_exceptions:
return self._iterate_prefix(callsign_exceptions[entire_callsign])
self._logger.debug("Could not decode " + callsign)
raise KeyError("Callsign could not be decoded") | [
"def",
"_dismantle_callsign",
"(",
"self",
",",
"callsign",
",",
"timestamp",
"=",
"timestamp_now",
")",
":",
"entire_callsign",
"=",
"callsign",
".",
"upper",
"(",
")",
"if",
"re",
".",
"search",
"(",
"'[/A-Z0-9\\-]{3,15}'",
",",
"entire_callsign",
")",
":",
"# make sure the call has at least 3 characters",
"if",
"re",
".",
"search",
"(",
"'\\-\\d{1,3}$'",
",",
"entire_callsign",
")",
":",
"# cut off any -10 / -02 appendixes",
"callsign",
"=",
"re",
".",
"sub",
"(",
"'\\-\\d{1,3}$'",
",",
"''",
",",
"entire_callsign",
")",
"if",
"re",
".",
"search",
"(",
"'/[A-Z0-9]{1,4}/[A-Z0-9]{1,4}$'",
",",
"callsign",
")",
":",
"callsign",
"=",
"re",
".",
"sub",
"(",
"'/[A-Z0-9]{1,4}$'",
",",
"''",
",",
"callsign",
")",
"# cut off 2. appendix DH1TW/HC2/P -> DH1TW/HC2",
"# multiple character appendix (callsign/xxx)",
"if",
"re",
".",
"search",
"(",
"'[A-Z0-9]{4,10}/[A-Z0-9]{2,4}$'",
",",
"callsign",
")",
":",
"# case call/xxx, but ignoring /p and /m or /5",
"appendix",
"=",
"re",
".",
"search",
"(",
"'/[A-Z0-9]{2,4}$'",
",",
"callsign",
")",
"appendix",
"=",
"re",
".",
"sub",
"(",
"'/'",
",",
"''",
",",
"appendix",
".",
"group",
"(",
"0",
")",
")",
"self",
".",
"_logger",
".",
"debug",
"(",
"\"appendix: \"",
"+",
"appendix",
")",
"if",
"appendix",
"==",
"'MM'",
":",
"# special case Martime Mobile",
"#self._mm = True",
"return",
"{",
"'adif'",
":",
"999",
",",
"'continent'",
":",
"''",
",",
"'country'",
":",
"'MARITIME MOBILE'",
",",
"'cqz'",
":",
"0",
",",
"'latitude'",
":",
"0.0",
",",
"'longitude'",
":",
"0.0",
"}",
"elif",
"appendix",
"==",
"'AM'",
":",
"# special case Aeronautic Mobile",
"return",
"{",
"'adif'",
":",
"998",
",",
"'continent'",
":",
"''",
",",
"'country'",
":",
"'AIRCAFT MOBILE'",
",",
"'cqz'",
":",
"0",
",",
"'latitude'",
":",
"0.0",
",",
"'longitude'",
":",
"0.0",
"}",
"elif",
"appendix",
"==",
"'QRP'",
":",
"# special case QRP",
"callsign",
"=",
"re",
".",
"sub",
"(",
"'/QRP'",
",",
"''",
",",
"callsign",
")",
"return",
"self",
".",
"_iterate_prefix",
"(",
"callsign",
",",
"timestamp",
")",
"elif",
"appendix",
"==",
"'QRPP'",
":",
"# special case QRPP",
"callsign",
"=",
"re",
".",
"sub",
"(",
"'/QRPP'",
",",
"''",
",",
"callsign",
")",
"return",
"self",
".",
"_iterate_prefix",
"(",
"callsign",
",",
"timestamp",
")",
"elif",
"appendix",
"==",
"'BCN'",
":",
"# filter all beacons",
"callsign",
"=",
"re",
".",
"sub",
"(",
"'/BCN'",
",",
"''",
",",
"callsign",
")",
"data",
"=",
"self",
".",
"_iterate_prefix",
"(",
"callsign",
",",
"timestamp",
")",
".",
"copy",
"(",
")",
"data",
"[",
"const",
".",
"BEACON",
"]",
"=",
"True",
"return",
"data",
"elif",
"appendix",
"==",
"\"LH\"",
":",
"# Filter all Lighthouses",
"callsign",
"=",
"re",
".",
"sub",
"(",
"'/LH'",
",",
"''",
",",
"callsign",
")",
"return",
"self",
".",
"_iterate_prefix",
"(",
"callsign",
",",
"timestamp",
")",
"elif",
"re",
".",
"search",
"(",
"'[A-Z]{3}'",
",",
"appendix",
")",
":",
"#case of US county(?) contest N3HBX/UAL",
"callsign",
"=",
"re",
".",
"sub",
"(",
"'/[A-Z]{3}$'",
",",
"''",
",",
"callsign",
")",
"return",
"self",
".",
"_iterate_prefix",
"(",
"callsign",
",",
"timestamp",
")",
"else",
":",
"# check if the appendix is a valid country prefix",
"return",
"self",
".",
"_iterate_prefix",
"(",
"re",
".",
"sub",
"(",
"'/'",
",",
"''",
",",
"appendix",
")",
",",
"timestamp",
")",
"# Single character appendix (callsign/x)",
"elif",
"re",
".",
"search",
"(",
"'/[A-Z0-9]$'",
",",
"callsign",
")",
":",
"# case call/p or /b /m or /5 etc.",
"appendix",
"=",
"re",
".",
"search",
"(",
"'/[A-Z0-9]$'",
",",
"callsign",
")",
"appendix",
"=",
"re",
".",
"sub",
"(",
"'/'",
",",
"''",
",",
"appendix",
".",
"group",
"(",
"0",
")",
")",
"if",
"appendix",
"==",
"'B'",
":",
"# special case Beacon",
"callsign",
"=",
"re",
".",
"sub",
"(",
"'/B'",
",",
"''",
",",
"callsign",
")",
"data",
"=",
"self",
".",
"_iterate_prefix",
"(",
"callsign",
",",
"timestamp",
")",
".",
"copy",
"(",
")",
"data",
"[",
"const",
".",
"BEACON",
"]",
"=",
"True",
"return",
"data",
"elif",
"re",
".",
"search",
"(",
"'\\d$'",
",",
"appendix",
")",
":",
"area_nr",
"=",
"re",
".",
"search",
"(",
"'\\d$'",
",",
"appendix",
")",
".",
"group",
"(",
"0",
")",
"callsign",
"=",
"re",
".",
"sub",
"(",
"'/\\d$'",
",",
"''",
",",
"callsign",
")",
"#remove /number",
"if",
"len",
"(",
"re",
".",
"findall",
"(",
"r'\\d+'",
",",
"callsign",
")",
")",
"==",
"1",
":",
"#call has just on digit e.g. DH1TW",
"callsign",
"=",
"re",
".",
"sub",
"(",
"'[\\d]+'",
",",
"area_nr",
",",
"callsign",
")",
"else",
":",
"# call has several digits e.g. 7N4AAL",
"pass",
"# no (two) digit prefix contries known where appendix would change entitiy",
"return",
"self",
".",
"_iterate_prefix",
"(",
"callsign",
",",
"timestamp",
")",
"else",
":",
"return",
"self",
".",
"_iterate_prefix",
"(",
"callsign",
",",
"timestamp",
")",
"# regular callsigns, without prefix or appendix",
"elif",
"re",
".",
"match",
"(",
"'^[\\d]{0,1}[A-Z]{1,2}\\d([A-Z]{1,4}|\\d{3,3}|\\d{1,3}[A-Z])[A-Z]{0,5}$'",
",",
"callsign",
")",
":",
"return",
"self",
".",
"_iterate_prefix",
"(",
"callsign",
",",
"timestamp",
")",
"# callsigns with prefixes (xxx/callsign)",
"elif",
"re",
".",
"search",
"(",
"'^[A-Z0-9]{1,4}/'",
",",
"entire_callsign",
")",
":",
"pfx",
"=",
"re",
".",
"search",
"(",
"'^[A-Z0-9]{1,4}/'",
",",
"entire_callsign",
")",
"pfx",
"=",
"re",
".",
"sub",
"(",
"'/'",
",",
"''",
",",
"pfx",
".",
"group",
"(",
"0",
")",
")",
"#make sure that the remaining part is actually a callsign (avoid: OZ/JO81)",
"rest",
"=",
"re",
".",
"search",
"(",
"'/[A-Z0-9]+'",
",",
"entire_callsign",
")",
"rest",
"=",
"re",
".",
"sub",
"(",
"'/'",
",",
"''",
",",
"rest",
".",
"group",
"(",
"0",
")",
")",
"if",
"re",
".",
"match",
"(",
"'^[\\d]{0,1}[A-Z]{1,2}\\d([A-Z]{1,4}|\\d{3,3}|\\d{1,3}[A-Z])[A-Z]{0,5}$'",
",",
"rest",
")",
":",
"return",
"self",
".",
"_iterate_prefix",
"(",
"pfx",
")",
"if",
"entire_callsign",
"in",
"callsign_exceptions",
":",
"return",
"self",
".",
"_iterate_prefix",
"(",
"callsign_exceptions",
"[",
"entire_callsign",
"]",
")",
"self",
".",
"_logger",
".",
"debug",
"(",
"\"Could not decode \"",
"+",
"callsign",
")",
"raise",
"KeyError",
"(",
"\"Callsign could not be decoded\"",
")"
] | try to identify the callsign's identity by analyzing it in the following order:
Args:
callsign (str): Amateur Radio callsign
timestamp (datetime, optional): datetime in UTC (tzinfo=pytz.UTC)
Raises:
KeyError: Callsign could not be identified | [
"try",
"to",
"identify",
"the",
"callsign",
"s",
"identity",
"by",
"analyzing",
"it",
"in",
"the",
"following",
"order",
":"
] | train | https://github.com/dh1tw/pyhamtools/blob/ee7e4b8732e23c298da10e07163748156c16d0fa/pyhamtools/callinfo.py#L112-L222 | 0.007511 |
saltstack/salt | salt/proxy/arista_pyeapi.py | call | def call(method, *args, **kwargs):
'''
Calls an arbitrary pyeapi method.
'''
kwargs = clean_kwargs(**kwargs)
return getattr(pyeapi_device['connection'], method)(*args, **kwargs) | python | def call(method, *args, **kwargs):
'''
Calls an arbitrary pyeapi method.
'''
kwargs = clean_kwargs(**kwargs)
return getattr(pyeapi_device['connection'], method)(*args, **kwargs) | [
"def",
"call",
"(",
"method",
",",
"*",
"args",
",",
"*",
"*",
"kwargs",
")",
":",
"kwargs",
"=",
"clean_kwargs",
"(",
"*",
"*",
"kwargs",
")",
"return",
"getattr",
"(",
"pyeapi_device",
"[",
"'connection'",
"]",
",",
"method",
")",
"(",
"*",
"args",
",",
"*",
"*",
"kwargs",
")"
] | Calls an arbitrary pyeapi method. | [
"Calls",
"an",
"arbitrary",
"pyeapi",
"method",
"."
] | train | https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/proxy/arista_pyeapi.py#L173-L178 | 0.005076 |
upsight/doctor | doctor/utils/__init__.py | copy_func | def copy_func(func: Callable) -> Callable:
"""Returns a copy of a function.
:param func: The function to copy.
:returns: The copied function.
"""
copied = types.FunctionType(
func.__code__, func.__globals__, name=func.__name__,
argdefs=func.__defaults__, closure=func.__closure__)
copied = functools.update_wrapper(copied, func)
copied.__kwdefaults__ = func.__kwdefaults__
return copied | python | def copy_func(func: Callable) -> Callable:
"""Returns a copy of a function.
:param func: The function to copy.
:returns: The copied function.
"""
copied = types.FunctionType(
func.__code__, func.__globals__, name=func.__name__,
argdefs=func.__defaults__, closure=func.__closure__)
copied = functools.update_wrapper(copied, func)
copied.__kwdefaults__ = func.__kwdefaults__
return copied | [
"def",
"copy_func",
"(",
"func",
":",
"Callable",
")",
"->",
"Callable",
":",
"copied",
"=",
"types",
".",
"FunctionType",
"(",
"func",
".",
"__code__",
",",
"func",
".",
"__globals__",
",",
"name",
"=",
"func",
".",
"__name__",
",",
"argdefs",
"=",
"func",
".",
"__defaults__",
",",
"closure",
"=",
"func",
".",
"__closure__",
")",
"copied",
"=",
"functools",
".",
"update_wrapper",
"(",
"copied",
",",
"func",
")",
"copied",
".",
"__kwdefaults__",
"=",
"func",
".",
"__kwdefaults__",
"return",
"copied"
] | Returns a copy of a function.
:param func: The function to copy.
:returns: The copied function. | [
"Returns",
"a",
"copy",
"of",
"a",
"function",
"."
] | train | https://github.com/upsight/doctor/blob/2cf1d433f6f1aa1355644b449a757c0660793cdd/doctor/utils/__init__.py#L25-L36 | 0.002299 |
cohorte/cohorte-herald | python/snippets/herald_irc/bonus.py | BasicCommandsBot.cmd_join | def cmd_join(self, connection, sender, target, payload):
"""
Asks the bot to join a channel
"""
if payload:
connection.join(payload)
else:
raise ValueError("No channel given") | python | def cmd_join(self, connection, sender, target, payload):
"""
Asks the bot to join a channel
"""
if payload:
connection.join(payload)
else:
raise ValueError("No channel given") | [
"def",
"cmd_join",
"(",
"self",
",",
"connection",
",",
"sender",
",",
"target",
",",
"payload",
")",
":",
"if",
"payload",
":",
"connection",
".",
"join",
"(",
"payload",
")",
"else",
":",
"raise",
"ValueError",
"(",
"\"No channel given\"",
")"
] | Asks the bot to join a channel | [
"Asks",
"the",
"bot",
"to",
"join",
"a",
"channel"
] | train | https://github.com/cohorte/cohorte-herald/blob/bb3445d0031c8b3abad71e6219cc559b49faa3ee/python/snippets/herald_irc/bonus.py#L20-L27 | 0.008368 |
drastus/unicover | unicover/unicover.py | UniCover.dispatch | def dispatch(self, args):
"""
Calls proper method depending on command-line arguments.
"""
if not args.list and not args.group:
if not args.font and not args.char and not args.block:
self.info()
return
else:
args.list = args.group = True
self._display = {k: args.__dict__[k] for k in ('list', 'group', 'omit_summary')}
if args.char:
char = self._getChar(args.char)
if args.font:
self.fontChar(args.font, char)
else:
self.char(char)
else:
block = self._getBlock(args.block)
self.chars(args.font, block) | python | def dispatch(self, args):
"""
Calls proper method depending on command-line arguments.
"""
if not args.list and not args.group:
if not args.font and not args.char and not args.block:
self.info()
return
else:
args.list = args.group = True
self._display = {k: args.__dict__[k] for k in ('list', 'group', 'omit_summary')}
if args.char:
char = self._getChar(args.char)
if args.font:
self.fontChar(args.font, char)
else:
self.char(char)
else:
block = self._getBlock(args.block)
self.chars(args.font, block) | [
"def",
"dispatch",
"(",
"self",
",",
"args",
")",
":",
"if",
"not",
"args",
".",
"list",
"and",
"not",
"args",
".",
"group",
":",
"if",
"not",
"args",
".",
"font",
"and",
"not",
"args",
".",
"char",
"and",
"not",
"args",
".",
"block",
":",
"self",
".",
"info",
"(",
")",
"return",
"else",
":",
"args",
".",
"list",
"=",
"args",
".",
"group",
"=",
"True",
"self",
".",
"_display",
"=",
"{",
"k",
":",
"args",
".",
"__dict__",
"[",
"k",
"]",
"for",
"k",
"in",
"(",
"'list'",
",",
"'group'",
",",
"'omit_summary'",
")",
"}",
"if",
"args",
".",
"char",
":",
"char",
"=",
"self",
".",
"_getChar",
"(",
"args",
".",
"char",
")",
"if",
"args",
".",
"font",
":",
"self",
".",
"fontChar",
"(",
"args",
".",
"font",
",",
"char",
")",
"else",
":",
"self",
".",
"char",
"(",
"char",
")",
"else",
":",
"block",
"=",
"self",
".",
"_getBlock",
"(",
"args",
".",
"block",
")",
"self",
".",
"chars",
"(",
"args",
".",
"font",
",",
"block",
")"
] | Calls proper method depending on command-line arguments. | [
"Calls",
"proper",
"method",
"depending",
"on",
"command",
"-",
"line",
"arguments",
"."
] | train | https://github.com/drastus/unicover/blob/4702d0151c63d525c25718a838396afe62302255/unicover/unicover.py#L55-L74 | 0.004161 |
taskcluster/taskcluster-client.py | taskcluster/github.py | Github.githubWebHookConsumer | def githubWebHookConsumer(self, *args, **kwargs):
"""
Consume GitHub WebHook
Capture a GitHub event and publish it via pulse, if it's a push,
release or pull request.
This method is ``experimental``
"""
return self._makeApiCall(self.funcinfo["githubWebHookConsumer"], *args, **kwargs) | python | def githubWebHookConsumer(self, *args, **kwargs):
"""
Consume GitHub WebHook
Capture a GitHub event and publish it via pulse, if it's a push,
release or pull request.
This method is ``experimental``
"""
return self._makeApiCall(self.funcinfo["githubWebHookConsumer"], *args, **kwargs) | [
"def",
"githubWebHookConsumer",
"(",
"self",
",",
"*",
"args",
",",
"*",
"*",
"kwargs",
")",
":",
"return",
"self",
".",
"_makeApiCall",
"(",
"self",
".",
"funcinfo",
"[",
"\"githubWebHookConsumer\"",
"]",
",",
"*",
"args",
",",
"*",
"*",
"kwargs",
")"
] | Consume GitHub WebHook
Capture a GitHub event and publish it via pulse, if it's a push,
release or pull request.
This method is ``experimental`` | [
"Consume",
"GitHub",
"WebHook"
] | train | https://github.com/taskcluster/taskcluster-client.py/blob/bcc95217f8bf80bed2ae5885a19fa0035da7ebc9/taskcluster/github.py#L43-L53 | 0.008746 |
all-umass/graphs | graphs/construction/b_matching.py | _updateB | def _updateB(oldB, B, W, degrees, damping, inds, backinds): # pragma: no cover
'''belief update function.'''
for j,d in enumerate(degrees):
kk = inds[j]
bk = backinds[j]
if d == 0:
B[kk,bk] = -np.inf
continue
belief = W[kk,bk] + W[j]
oldBj = oldB[j]
if d == oldBj.shape[0]:
bth = quickselect(-oldBj, d-1)
bplus = -1
else:
bth,bplus = quickselect(-oldBj, d-1, d)
belief -= np.where(oldBj >= oldBj[bth], oldBj[bplus], oldBj[bth])
B[kk,bk] = damping*belief + (1-damping)*oldB[kk,bk] | python | def _updateB(oldB, B, W, degrees, damping, inds, backinds): # pragma: no cover
'''belief update function.'''
for j,d in enumerate(degrees):
kk = inds[j]
bk = backinds[j]
if d == 0:
B[kk,bk] = -np.inf
continue
belief = W[kk,bk] + W[j]
oldBj = oldB[j]
if d == oldBj.shape[0]:
bth = quickselect(-oldBj, d-1)
bplus = -1
else:
bth,bplus = quickselect(-oldBj, d-1, d)
belief -= np.where(oldBj >= oldBj[bth], oldBj[bplus], oldBj[bth])
B[kk,bk] = damping*belief + (1-damping)*oldB[kk,bk] | [
"def",
"_updateB",
"(",
"oldB",
",",
"B",
",",
"W",
",",
"degrees",
",",
"damping",
",",
"inds",
",",
"backinds",
")",
":",
"# pragma: no cover\r",
"for",
"j",
",",
"d",
"in",
"enumerate",
"(",
"degrees",
")",
":",
"kk",
"=",
"inds",
"[",
"j",
"]",
"bk",
"=",
"backinds",
"[",
"j",
"]",
"if",
"d",
"==",
"0",
":",
"B",
"[",
"kk",
",",
"bk",
"]",
"=",
"-",
"np",
".",
"inf",
"continue",
"belief",
"=",
"W",
"[",
"kk",
",",
"bk",
"]",
"+",
"W",
"[",
"j",
"]",
"oldBj",
"=",
"oldB",
"[",
"j",
"]",
"if",
"d",
"==",
"oldBj",
".",
"shape",
"[",
"0",
"]",
":",
"bth",
"=",
"quickselect",
"(",
"-",
"oldBj",
",",
"d",
"-",
"1",
")",
"bplus",
"=",
"-",
"1",
"else",
":",
"bth",
",",
"bplus",
"=",
"quickselect",
"(",
"-",
"oldBj",
",",
"d",
"-",
"1",
",",
"d",
")",
"belief",
"-=",
"np",
".",
"where",
"(",
"oldBj",
">=",
"oldBj",
"[",
"bth",
"]",
",",
"oldBj",
"[",
"bplus",
"]",
",",
"oldBj",
"[",
"bth",
"]",
")",
"B",
"[",
"kk",
",",
"bk",
"]",
"=",
"damping",
"*",
"belief",
"+",
"(",
"1",
"-",
"damping",
")",
"*",
"oldB",
"[",
"kk",
",",
"bk",
"]"
] | belief update function. | [
"belief",
"update",
"function",
"."
] | train | https://github.com/all-umass/graphs/blob/4fbeb025dfe33340335f34300f58dd3809228822/graphs/construction/b_matching.py#L127-L146 | 0.026224 |
bwhite/hadoopy | hadoopy/thirdparty/pyinstaller/PyInstaller/lib/altgraph/ObjectGraph.py | ObjectGraph.removeNode | def removeNode(self, node):
"""
Remove the given node from the graph if it exists
"""
ident = self.getIdent(node)
if ident is not None:
self.graph.hide_node(ident) | python | def removeNode(self, node):
"""
Remove the given node from the graph if it exists
"""
ident = self.getIdent(node)
if ident is not None:
self.graph.hide_node(ident) | [
"def",
"removeNode",
"(",
"self",
",",
"node",
")",
":",
"ident",
"=",
"self",
".",
"getIdent",
"(",
"node",
")",
"if",
"ident",
"is",
"not",
"None",
":",
"self",
".",
"graph",
".",
"hide_node",
"(",
"ident",
")"
] | Remove the given node from the graph if it exists | [
"Remove",
"the",
"given",
"node",
"from",
"the",
"graph",
"if",
"it",
"exists"
] | train | https://github.com/bwhite/hadoopy/blob/ff39b4e6d4e6efaf1f571cf0f2c0e0d7ab28c2d6/hadoopy/thirdparty/pyinstaller/PyInstaller/lib/altgraph/ObjectGraph.py#L78-L84 | 0.009302 |
NASA-AMMOS/AIT-Core | ait/core/dtype.py | ArrayType.parse | def parse (name):
"""parse(name) -> [typename | None, nelems | None]
Parses an ArrayType name to return the element type name and
number of elements, e.g.:
>>> ArrayType.parse('MSB_U16[32]')
['MSB_U16', 32]
If typename cannot be determined, None is returned.
Similarly, if nelems is not an integer or less than one (1),
None is returned.
"""
parts = [None, None]
start = name.find('[')
if start != -1:
stop = name.find(']', start)
if stop != -1:
try:
parts[0] = name[:start]
parts[1] = int(name[start + 1:stop])
if parts[1] <= 0:
raise ValueError
except ValueError:
msg = 'ArrayType specification: "%s" must have an '
msg += 'integer greater than zero in square brackets.'
raise ValueError(msg % name)
return parts | python | def parse (name):
"""parse(name) -> [typename | None, nelems | None]
Parses an ArrayType name to return the element type name and
number of elements, e.g.:
>>> ArrayType.parse('MSB_U16[32]')
['MSB_U16', 32]
If typename cannot be determined, None is returned.
Similarly, if nelems is not an integer or less than one (1),
None is returned.
"""
parts = [None, None]
start = name.find('[')
if start != -1:
stop = name.find(']', start)
if stop != -1:
try:
parts[0] = name[:start]
parts[1] = int(name[start + 1:stop])
if parts[1] <= 0:
raise ValueError
except ValueError:
msg = 'ArrayType specification: "%s" must have an '
msg += 'integer greater than zero in square brackets.'
raise ValueError(msg % name)
return parts | [
"def",
"parse",
"(",
"name",
")",
":",
"parts",
"=",
"[",
"None",
",",
"None",
"]",
"start",
"=",
"name",
".",
"find",
"(",
"'['",
")",
"if",
"start",
"!=",
"-",
"1",
":",
"stop",
"=",
"name",
".",
"find",
"(",
"']'",
",",
"start",
")",
"if",
"stop",
"!=",
"-",
"1",
":",
"try",
":",
"parts",
"[",
"0",
"]",
"=",
"name",
"[",
":",
"start",
"]",
"parts",
"[",
"1",
"]",
"=",
"int",
"(",
"name",
"[",
"start",
"+",
"1",
":",
"stop",
"]",
")",
"if",
"parts",
"[",
"1",
"]",
"<=",
"0",
":",
"raise",
"ValueError",
"except",
"ValueError",
":",
"msg",
"=",
"'ArrayType specification: \"%s\" must have an '",
"msg",
"+=",
"'integer greater than zero in square brackets.'",
"raise",
"ValueError",
"(",
"msg",
"%",
"name",
")",
"return",
"parts"
] | parse(name) -> [typename | None, nelems | None]
Parses an ArrayType name to return the element type name and
number of elements, e.g.:
>>> ArrayType.parse('MSB_U16[32]')
['MSB_U16', 32]
If typename cannot be determined, None is returned.
Similarly, if nelems is not an integer or less than one (1),
None is returned. | [
"parse",
"(",
"name",
")",
"-",
">",
"[",
"typename",
"|",
"None",
"nelems",
"|",
"None",
"]"
] | train | https://github.com/NASA-AMMOS/AIT-Core/blob/9d85bd9c738e7a6a6fbdff672bea708238b02a3a/ait/core/dtype.py#L415-L444 | 0.003876 |
bsolomon1124/pyfinance | pyfinance/ols.py | RollingOLS._ss_reg | def _ss_reg(self):
"""Sum of squares of the regression."""
return np.sum(
np.square(self._predicted - np.expand_dims(self._ybar, axis=1)),
axis=1,
) | python | def _ss_reg(self):
"""Sum of squares of the regression."""
return np.sum(
np.square(self._predicted - np.expand_dims(self._ybar, axis=1)),
axis=1,
) | [
"def",
"_ss_reg",
"(",
"self",
")",
":",
"return",
"np",
".",
"sum",
"(",
"np",
".",
"square",
"(",
"self",
".",
"_predicted",
"-",
"np",
".",
"expand_dims",
"(",
"self",
".",
"_ybar",
",",
"axis",
"=",
"1",
")",
")",
",",
"axis",
"=",
"1",
",",
")"
] | Sum of squares of the regression. | [
"Sum",
"of",
"squares",
"of",
"the",
"regression",
"."
] | train | https://github.com/bsolomon1124/pyfinance/blob/c95925209a809b4e648e79cbeaf7711d8e5ff1a6/pyfinance/ols.py#L469-L474 | 0.00995 |
glue-viz/glue-vispy-viewers | glue_vispy_viewers/extern/vispy/plot/plotwidget.py | PlotWidget.volume | def volume(self, vol, clim=None, method='mip', threshold=None,
cmap='grays'):
"""Show a 3D volume
Parameters
----------
vol : ndarray
Volume to render.
clim : tuple of two floats | None
The contrast limits. The values in the volume are mapped to
black and white corresponding to these values. Default maps
between min and max.
method : {'mip', 'iso', 'translucent', 'additive'}
The render style to use. See corresponding docs for details.
Default 'mip'.
threshold : float
The threshold to use for the isosurafce render style. By default
the mean of the given volume is used.
cmap : str
The colormap to use.
Returns
-------
volume : instance of Volume
The volume visualization.
See also
--------
Volume
"""
self._configure_3d()
volume = scene.Volume(vol, clim, method, threshold, cmap=cmap)
self.view.add(volume)
self.view.camera.set_range()
return volume | python | def volume(self, vol, clim=None, method='mip', threshold=None,
cmap='grays'):
"""Show a 3D volume
Parameters
----------
vol : ndarray
Volume to render.
clim : tuple of two floats | None
The contrast limits. The values in the volume are mapped to
black and white corresponding to these values. Default maps
between min and max.
method : {'mip', 'iso', 'translucent', 'additive'}
The render style to use. See corresponding docs for details.
Default 'mip'.
threshold : float
The threshold to use for the isosurafce render style. By default
the mean of the given volume is used.
cmap : str
The colormap to use.
Returns
-------
volume : instance of Volume
The volume visualization.
See also
--------
Volume
"""
self._configure_3d()
volume = scene.Volume(vol, clim, method, threshold, cmap=cmap)
self.view.add(volume)
self.view.camera.set_range()
return volume | [
"def",
"volume",
"(",
"self",
",",
"vol",
",",
"clim",
"=",
"None",
",",
"method",
"=",
"'mip'",
",",
"threshold",
"=",
"None",
",",
"cmap",
"=",
"'grays'",
")",
":",
"self",
".",
"_configure_3d",
"(",
")",
"volume",
"=",
"scene",
".",
"Volume",
"(",
"vol",
",",
"clim",
",",
"method",
",",
"threshold",
",",
"cmap",
"=",
"cmap",
")",
"self",
".",
"view",
".",
"add",
"(",
"volume",
")",
"self",
".",
"view",
".",
"camera",
".",
"set_range",
"(",
")",
"return",
"volume"
] | Show a 3D volume
Parameters
----------
vol : ndarray
Volume to render.
clim : tuple of two floats | None
The contrast limits. The values in the volume are mapped to
black and white corresponding to these values. Default maps
between min and max.
method : {'mip', 'iso', 'translucent', 'additive'}
The render style to use. See corresponding docs for details.
Default 'mip'.
threshold : float
The threshold to use for the isosurafce render style. By default
the mean of the given volume is used.
cmap : str
The colormap to use.
Returns
-------
volume : instance of Volume
The volume visualization.
See also
--------
Volume | [
"Show",
"a",
"3D",
"volume"
] | train | https://github.com/glue-viz/glue-vispy-viewers/blob/54a4351d98c1f90dfb1a557d1b447c1f57470eea/glue_vispy_viewers/extern/vispy/plot/plotwidget.py#L372-L406 | 0.002604 |
chardet/chardet | convert_language_model.py | convert_sbcs_model | def convert_sbcs_model(old_model, alphabet):
"""Create a SingleByteCharSetModel object representing the charset."""
# Setup tables necessary for computing transition frequencies for model
char_to_order = {i: order
for i, order in enumerate(old_model['char_to_order_map'])}
pos_ratio = old_model['typical_positive_ratio']
keep_ascii_letters = old_model['keep_english_letter']
curr_model = SingleByteCharSetModel(charset_name=old_model['charset_name'],
language=old_model['language'],
char_to_order_map=char_to_order,
# language_model is filled in later
language_model=None,
typical_positive_ratio=pos_ratio,
keep_ascii_letters=keep_ascii_letters,
alphabet=alphabet)
return curr_model | python | def convert_sbcs_model(old_model, alphabet):
"""Create a SingleByteCharSetModel object representing the charset."""
# Setup tables necessary for computing transition frequencies for model
char_to_order = {i: order
for i, order in enumerate(old_model['char_to_order_map'])}
pos_ratio = old_model['typical_positive_ratio']
keep_ascii_letters = old_model['keep_english_letter']
curr_model = SingleByteCharSetModel(charset_name=old_model['charset_name'],
language=old_model['language'],
char_to_order_map=char_to_order,
# language_model is filled in later
language_model=None,
typical_positive_ratio=pos_ratio,
keep_ascii_letters=keep_ascii_letters,
alphabet=alphabet)
return curr_model | [
"def",
"convert_sbcs_model",
"(",
"old_model",
",",
"alphabet",
")",
":",
"# Setup tables necessary for computing transition frequencies for model",
"char_to_order",
"=",
"{",
"i",
":",
"order",
"for",
"i",
",",
"order",
"in",
"enumerate",
"(",
"old_model",
"[",
"'char_to_order_map'",
"]",
")",
"}",
"pos_ratio",
"=",
"old_model",
"[",
"'typical_positive_ratio'",
"]",
"keep_ascii_letters",
"=",
"old_model",
"[",
"'keep_english_letter'",
"]",
"curr_model",
"=",
"SingleByteCharSetModel",
"(",
"charset_name",
"=",
"old_model",
"[",
"'charset_name'",
"]",
",",
"language",
"=",
"old_model",
"[",
"'language'",
"]",
",",
"char_to_order_map",
"=",
"char_to_order",
",",
"# language_model is filled in later",
"language_model",
"=",
"None",
",",
"typical_positive_ratio",
"=",
"pos_ratio",
",",
"keep_ascii_letters",
"=",
"keep_ascii_letters",
",",
"alphabet",
"=",
"alphabet",
")",
"return",
"curr_model"
] | Create a SingleByteCharSetModel object representing the charset. | [
"Create",
"a",
"SingleByteCharSetModel",
"object",
"representing",
"the",
"charset",
"."
] | train | https://github.com/chardet/chardet/blob/b5194bf8250b7d180ac4edff51e09cab9d99febe/convert_language_model.py#L57-L73 | 0.000988 |
mongodb/mongo-python-driver | pymongo/message.py | _delete_compressed | def _delete_compressed(collection_name, spec, opts, flags, ctx):
"""Internal compressed unacknowledged delete message helper."""
op_delete, max_bson_size = _delete(collection_name, spec, opts, flags)
rid, msg = _compress(2006, op_delete, ctx)
return rid, msg, max_bson_size | python | def _delete_compressed(collection_name, spec, opts, flags, ctx):
"""Internal compressed unacknowledged delete message helper."""
op_delete, max_bson_size = _delete(collection_name, spec, opts, flags)
rid, msg = _compress(2006, op_delete, ctx)
return rid, msg, max_bson_size | [
"def",
"_delete_compressed",
"(",
"collection_name",
",",
"spec",
",",
"opts",
",",
"flags",
",",
"ctx",
")",
":",
"op_delete",
",",
"max_bson_size",
"=",
"_delete",
"(",
"collection_name",
",",
"spec",
",",
"opts",
",",
"flags",
")",
"rid",
",",
"msg",
"=",
"_compress",
"(",
"2006",
",",
"op_delete",
",",
"ctx",
")",
"return",
"rid",
",",
"msg",
",",
"max_bson_size"
] | Internal compressed unacknowledged delete message helper. | [
"Internal",
"compressed",
"unacknowledged",
"delete",
"message",
"helper",
"."
] | train | https://github.com/mongodb/mongo-python-driver/blob/c29c21449e3aae74154207058cf85fd94018d4cd/pymongo/message.py#L806-L810 | 0.00346 |
inspirehep/refextract | refextract/references/engine.py | search_for_book_in_misc | def search_for_book_in_misc(citation, kbs):
"""Searches for books in the misc_txt field if the citation is not recognized as anything like a journal, book, etc.
"""
citation_year = year_from_citation(citation)
for citation_element in citation:
LOGGER.debug(u"Searching for book title in: %s", citation_element['misc_txt'])
for title in kbs['books']:
startIndex = find_substring_ignore_special_chars(citation_element['misc_txt'], title)
if startIndex != -1:
line = kbs['books'][title.upper()]
book_year = line[2].strip(';')
book_authors = line[0]
book_found = False
if citation_year == book_year:
# For now consider the citation as valid, we are using
# an exact search, we don't need to check the authors
# However, the code below will be useful if we decide
# to introduce fuzzy matching.
book_found = True
for author in get_possible_author_names(citation):
if find_substring_ignore_special_chars(book_authors, author) != -1:
book_found = True
for author in re.findall('[a-zA-Z]{4,}', book_authors):
if find_substring_ignore_special_chars(citation_element['misc_txt'], author) != -1:
book_found = True
if book_found:
LOGGER.debug(u"Book found: %s", title)
book_element = {'type': 'BOOK',
'misc_txt': '',
'authors': book_authors,
'title': line[1],
'year': book_year}
citation.append(book_element)
citation_element['misc_txt'] = cut_substring_with_special_chars(citation_element['misc_txt'], title, startIndex)
# Remove year from misc txt
citation_element['misc_txt'] = remove_year(citation_element['misc_txt'], book_year)
return True
LOGGER.debug("Book not found!")
return False | python | def search_for_book_in_misc(citation, kbs):
"""Searches for books in the misc_txt field if the citation is not recognized as anything like a journal, book, etc.
"""
citation_year = year_from_citation(citation)
for citation_element in citation:
LOGGER.debug(u"Searching for book title in: %s", citation_element['misc_txt'])
for title in kbs['books']:
startIndex = find_substring_ignore_special_chars(citation_element['misc_txt'], title)
if startIndex != -1:
line = kbs['books'][title.upper()]
book_year = line[2].strip(';')
book_authors = line[0]
book_found = False
if citation_year == book_year:
# For now consider the citation as valid, we are using
# an exact search, we don't need to check the authors
# However, the code below will be useful if we decide
# to introduce fuzzy matching.
book_found = True
for author in get_possible_author_names(citation):
if find_substring_ignore_special_chars(book_authors, author) != -1:
book_found = True
for author in re.findall('[a-zA-Z]{4,}', book_authors):
if find_substring_ignore_special_chars(citation_element['misc_txt'], author) != -1:
book_found = True
if book_found:
LOGGER.debug(u"Book found: %s", title)
book_element = {'type': 'BOOK',
'misc_txt': '',
'authors': book_authors,
'title': line[1],
'year': book_year}
citation.append(book_element)
citation_element['misc_txt'] = cut_substring_with_special_chars(citation_element['misc_txt'], title, startIndex)
# Remove year from misc txt
citation_element['misc_txt'] = remove_year(citation_element['misc_txt'], book_year)
return True
LOGGER.debug("Book not found!")
return False | [
"def",
"search_for_book_in_misc",
"(",
"citation",
",",
"kbs",
")",
":",
"citation_year",
"=",
"year_from_citation",
"(",
"citation",
")",
"for",
"citation_element",
"in",
"citation",
":",
"LOGGER",
".",
"debug",
"(",
"u\"Searching for book title in: %s\"",
",",
"citation_element",
"[",
"'misc_txt'",
"]",
")",
"for",
"title",
"in",
"kbs",
"[",
"'books'",
"]",
":",
"startIndex",
"=",
"find_substring_ignore_special_chars",
"(",
"citation_element",
"[",
"'misc_txt'",
"]",
",",
"title",
")",
"if",
"startIndex",
"!=",
"-",
"1",
":",
"line",
"=",
"kbs",
"[",
"'books'",
"]",
"[",
"title",
".",
"upper",
"(",
")",
"]",
"book_year",
"=",
"line",
"[",
"2",
"]",
".",
"strip",
"(",
"';'",
")",
"book_authors",
"=",
"line",
"[",
"0",
"]",
"book_found",
"=",
"False",
"if",
"citation_year",
"==",
"book_year",
":",
"# For now consider the citation as valid, we are using",
"# an exact search, we don't need to check the authors",
"# However, the code below will be useful if we decide",
"# to introduce fuzzy matching.",
"book_found",
"=",
"True",
"for",
"author",
"in",
"get_possible_author_names",
"(",
"citation",
")",
":",
"if",
"find_substring_ignore_special_chars",
"(",
"book_authors",
",",
"author",
")",
"!=",
"-",
"1",
":",
"book_found",
"=",
"True",
"for",
"author",
"in",
"re",
".",
"findall",
"(",
"'[a-zA-Z]{4,}'",
",",
"book_authors",
")",
":",
"if",
"find_substring_ignore_special_chars",
"(",
"citation_element",
"[",
"'misc_txt'",
"]",
",",
"author",
")",
"!=",
"-",
"1",
":",
"book_found",
"=",
"True",
"if",
"book_found",
":",
"LOGGER",
".",
"debug",
"(",
"u\"Book found: %s\"",
",",
"title",
")",
"book_element",
"=",
"{",
"'type'",
":",
"'BOOK'",
",",
"'misc_txt'",
":",
"''",
",",
"'authors'",
":",
"book_authors",
",",
"'title'",
":",
"line",
"[",
"1",
"]",
",",
"'year'",
":",
"book_year",
"}",
"citation",
".",
"append",
"(",
"book_element",
")",
"citation_element",
"[",
"'misc_txt'",
"]",
"=",
"cut_substring_with_special_chars",
"(",
"citation_element",
"[",
"'misc_txt'",
"]",
",",
"title",
",",
"startIndex",
")",
"# Remove year from misc txt",
"citation_element",
"[",
"'misc_txt'",
"]",
"=",
"remove_year",
"(",
"citation_element",
"[",
"'misc_txt'",
"]",
",",
"book_year",
")",
"return",
"True",
"LOGGER",
".",
"debug",
"(",
"\"Book not found!\"",
")",
"return",
"False"
] | Searches for books in the misc_txt field if the citation is not recognized as anything like a journal, book, etc. | [
"Searches",
"for",
"books",
"in",
"the",
"misc_txt",
"field",
"if",
"the",
"citation",
"is",
"not",
"recognized",
"as",
"anything",
"like",
"a",
"journal",
"book",
"etc",
"."
] | train | https://github.com/inspirehep/refextract/blob/d70e3787be3c495a3a07d1517b53f81d51c788c7/refextract/references/engine.py#L736-L779 | 0.003444 |
mitsei/dlkit | dlkit/records/repository/vcb/vcb_records.py | TimeStampFormRecord.set_start_timestamp | def set_start_timestamp(self, start_timestamp=None):
"""stub"""
if start_timestamp is None:
raise NullArgument()
if self.get_start_timestamp_metadata().is_read_only():
raise NoAccess()
if not self.my_osid_object_form._is_valid_integer(
start_timestamp,
self.get_start_timestamp_metadata()):
raise InvalidArgument()
self.my_osid_object_form._my_map['startTimestamp'] = start_timestamp | python | def set_start_timestamp(self, start_timestamp=None):
"""stub"""
if start_timestamp is None:
raise NullArgument()
if self.get_start_timestamp_metadata().is_read_only():
raise NoAccess()
if not self.my_osid_object_form._is_valid_integer(
start_timestamp,
self.get_start_timestamp_metadata()):
raise InvalidArgument()
self.my_osid_object_form._my_map['startTimestamp'] = start_timestamp | [
"def",
"set_start_timestamp",
"(",
"self",
",",
"start_timestamp",
"=",
"None",
")",
":",
"if",
"start_timestamp",
"is",
"None",
":",
"raise",
"NullArgument",
"(",
")",
"if",
"self",
".",
"get_start_timestamp_metadata",
"(",
")",
".",
"is_read_only",
"(",
")",
":",
"raise",
"NoAccess",
"(",
")",
"if",
"not",
"self",
".",
"my_osid_object_form",
".",
"_is_valid_integer",
"(",
"start_timestamp",
",",
"self",
".",
"get_start_timestamp_metadata",
"(",
")",
")",
":",
"raise",
"InvalidArgument",
"(",
")",
"self",
".",
"my_osid_object_form",
".",
"_my_map",
"[",
"'startTimestamp'",
"]",
"=",
"start_timestamp"
] | stub | [
"stub"
] | train | https://github.com/mitsei/dlkit/blob/445f968a175d61c8d92c0f617a3c17dc1dc7c584/dlkit/records/repository/vcb/vcb_records.py#L111-L121 | 0.004073 |
mattja/sdeint | sdeint/wiener.py | deltaW | def deltaW(N, m, h):
"""Generate sequence of Wiener increments for m independent Wiener
processes W_j(t) j=0..m-1 for each of N time intervals of length h.
Returns:
dW (array of shape (N, m)): The [n, j] element has the value
W_j((n+1)*h) - W_j(n*h)
"""
return np.random.normal(0.0, np.sqrt(h), (N, m)) | python | def deltaW(N, m, h):
"""Generate sequence of Wiener increments for m independent Wiener
processes W_j(t) j=0..m-1 for each of N time intervals of length h.
Returns:
dW (array of shape (N, m)): The [n, j] element has the value
W_j((n+1)*h) - W_j(n*h)
"""
return np.random.normal(0.0, np.sqrt(h), (N, m)) | [
"def",
"deltaW",
"(",
"N",
",",
"m",
",",
"h",
")",
":",
"return",
"np",
".",
"random",
".",
"normal",
"(",
"0.0",
",",
"np",
".",
"sqrt",
"(",
"h",
")",
",",
"(",
"N",
",",
"m",
")",
")"
] | Generate sequence of Wiener increments for m independent Wiener
processes W_j(t) j=0..m-1 for each of N time intervals of length h.
Returns:
dW (array of shape (N, m)): The [n, j] element has the value
W_j((n+1)*h) - W_j(n*h) | [
"Generate",
"sequence",
"of",
"Wiener",
"increments",
"for",
"m",
"independent",
"Wiener",
"processes",
"W_j",
"(",
"t",
")",
"j",
"=",
"0",
"..",
"m",
"-",
"1",
"for",
"each",
"of",
"N",
"time",
"intervals",
"of",
"length",
"h",
"."
] | train | https://github.com/mattja/sdeint/blob/7cf807cdf97b3bb39d29e1c2dc834b519499b601/sdeint/wiener.py#L42-L50 | 0.008824 |
ihmeuw/vivarium | src/vivarium/framework/state_machine.py | Machine.setup | def setup(self, builder):
"""Performs this component's simulation setup and return sub-components.
Parameters
----------
builder : `engine.Builder`
Interface to several simulation tools including access to common random
number generation, in particular.
Returns
-------
iterable
This component's sub-components.
"""
builder.components.add_components(self.states)
self.population_view = builder.population.get_view([self.state_column]) | python | def setup(self, builder):
"""Performs this component's simulation setup and return sub-components.
Parameters
----------
builder : `engine.Builder`
Interface to several simulation tools including access to common random
number generation, in particular.
Returns
-------
iterable
This component's sub-components.
"""
builder.components.add_components(self.states)
self.population_view = builder.population.get_view([self.state_column]) | [
"def",
"setup",
"(",
"self",
",",
"builder",
")",
":",
"builder",
".",
"components",
".",
"add_components",
"(",
"self",
".",
"states",
")",
"self",
".",
"population_view",
"=",
"builder",
".",
"population",
".",
"get_view",
"(",
"[",
"self",
".",
"state_column",
"]",
")"
] | Performs this component's simulation setup and return sub-components.
Parameters
----------
builder : `engine.Builder`
Interface to several simulation tools including access to common random
number generation, in particular.
Returns
-------
iterable
This component's sub-components. | [
"Performs",
"this",
"component",
"s",
"simulation",
"setup",
"and",
"return",
"sub",
"-",
"components",
"."
] | train | https://github.com/ihmeuw/vivarium/blob/c5f5d50f775c8bf337d3aae1ff7c57c025a8e258/src/vivarium/framework/state_machine.py#L377-L392 | 0.007246 |
dshean/pygeotools | pygeotools/lib/filtlib.py | circular_mask | def circular_mask(size):
"""Create a circular mask for an array
Useful when sampling rasters for a laser shot
"""
r = size/2
c = (r,r)
y,x = np.ogrid[-c[0]:size-c[0], -c[1]:size-c[1]]
mask = ~(x*x + y*y <= r*r)
return mask | python | def circular_mask(size):
"""Create a circular mask for an array
Useful when sampling rasters for a laser shot
"""
r = size/2
c = (r,r)
y,x = np.ogrid[-c[0]:size-c[0], -c[1]:size-c[1]]
mask = ~(x*x + y*y <= r*r)
return mask | [
"def",
"circular_mask",
"(",
"size",
")",
":",
"r",
"=",
"size",
"/",
"2",
"c",
"=",
"(",
"r",
",",
"r",
")",
"y",
",",
"x",
"=",
"np",
".",
"ogrid",
"[",
"-",
"c",
"[",
"0",
"]",
":",
"size",
"-",
"c",
"[",
"0",
"]",
",",
"-",
"c",
"[",
"1",
"]",
":",
"size",
"-",
"c",
"[",
"1",
"]",
"]",
"mask",
"=",
"~",
"(",
"x",
"*",
"x",
"+",
"y",
"*",
"y",
"<=",
"r",
"*",
"r",
")",
"return",
"mask"
] | Create a circular mask for an array
Useful when sampling rasters for a laser shot | [
"Create",
"a",
"circular",
"mask",
"for",
"an",
"array",
"Useful",
"when",
"sampling",
"rasters",
"for",
"a",
"laser",
"shot"
] | train | https://github.com/dshean/pygeotools/blob/5ac745717c0098d01eb293ff1fe32fd7358c76ab/pygeotools/lib/filtlib.py#L303-L312 | 0.015444 |
odlgroup/odl | odl/operator/operator.py | FunctionalLeftVectorMult._call | def _call(self, x, out=None):
"""Implement ``self(x[, out])``."""
if out is None:
return self.vector * self.functional(x)
else:
scalar = self.functional(x)
out.lincomb(scalar, self.vector) | python | def _call(self, x, out=None):
"""Implement ``self(x[, out])``."""
if out is None:
return self.vector * self.functional(x)
else:
scalar = self.functional(x)
out.lincomb(scalar, self.vector) | [
"def",
"_call",
"(",
"self",
",",
"x",
",",
"out",
"=",
"None",
")",
":",
"if",
"out",
"is",
"None",
":",
"return",
"self",
".",
"vector",
"*",
"self",
".",
"functional",
"(",
"x",
")",
"else",
":",
"scalar",
"=",
"self",
".",
"functional",
"(",
"x",
")",
"out",
".",
"lincomb",
"(",
"scalar",
",",
"self",
".",
"vector",
")"
] | Implement ``self(x[, out])``. | [
"Implement",
"self",
"(",
"x",
"[",
"out",
"]",
")",
"."
] | train | https://github.com/odlgroup/odl/blob/b8443f6aca90e191ba36c91d32253c5a36249a6c/odl/operator/operator.py#L1923-L1929 | 0.008065 |
radjkarl/fancyWidgets | DUMP/pyqtgraphBased/parametertree/parameterTypes.py | WidgetParameterItem.optsChanged | def optsChanged(self, param, opts):
"""Called when any options are changed that are not
name, value, default, or limits"""
# print "opts changed:", opts
ParameterItem.optsChanged(self, param, opts)
w = self.widget
if 'readonly' in opts:
self.updateDefaultBtn()
if isinstance(w, (QtWidgets.QCheckBox, ColorButton)):
w.setEnabled(not opts['readonly'])
# If widget is a SpinBox, pass options straight through
if isinstance(self.widget, SpinBox):
if 'units' in opts and 'suffix' not in opts:
opts['suffix'] = opts['units']
w.setOpts(**opts)
self.updateDisplayLabel() | python | def optsChanged(self, param, opts):
"""Called when any options are changed that are not
name, value, default, or limits"""
# print "opts changed:", opts
ParameterItem.optsChanged(self, param, opts)
w = self.widget
if 'readonly' in opts:
self.updateDefaultBtn()
if isinstance(w, (QtWidgets.QCheckBox, ColorButton)):
w.setEnabled(not opts['readonly'])
# If widget is a SpinBox, pass options straight through
if isinstance(self.widget, SpinBox):
if 'units' in opts and 'suffix' not in opts:
opts['suffix'] = opts['units']
w.setOpts(**opts)
self.updateDisplayLabel() | [
"def",
"optsChanged",
"(",
"self",
",",
"param",
",",
"opts",
")",
":",
"# print \"opts changed:\", opts",
"ParameterItem",
".",
"optsChanged",
"(",
"self",
",",
"param",
",",
"opts",
")",
"w",
"=",
"self",
".",
"widget",
"if",
"'readonly'",
"in",
"opts",
":",
"self",
".",
"updateDefaultBtn",
"(",
")",
"if",
"isinstance",
"(",
"w",
",",
"(",
"QtWidgets",
".",
"QCheckBox",
",",
"ColorButton",
")",
")",
":",
"w",
".",
"setEnabled",
"(",
"not",
"opts",
"[",
"'readonly'",
"]",
")",
"# If widget is a SpinBox, pass options straight through",
"if",
"isinstance",
"(",
"self",
".",
"widget",
",",
"SpinBox",
")",
":",
"if",
"'units'",
"in",
"opts",
"and",
"'suffix'",
"not",
"in",
"opts",
":",
"opts",
"[",
"'suffix'",
"]",
"=",
"opts",
"[",
"'units'",
"]",
"w",
".",
"setOpts",
"(",
"*",
"*",
"opts",
")",
"self",
".",
"updateDisplayLabel",
"(",
")"
] | Called when any options are changed that are not
name, value, default, or limits | [
"Called",
"when",
"any",
"options",
"are",
"changed",
"that",
"are",
"not",
"name",
"value",
"default",
"or",
"limits"
] | train | https://github.com/radjkarl/fancyWidgets/blob/ffe0d5747c5296c78575f0e0909af915a4a5698f/DUMP/pyqtgraphBased/parametertree/parameterTypes.py#L287-L303 | 0.002782 |
googleapis/google-cloud-python | bigtable/google/cloud/bigtable/row.py | ConditionalRow.commit | def commit(self):
"""Makes a ``CheckAndMutateRow`` API request.
If no mutations have been created in the row, no request is made.
The mutations will be applied conditionally, based on whether the
filter matches any cells in the :class:`ConditionalRow` or not. (Each
method which adds a mutation has a ``state`` parameter for this
purpose.)
Mutations are applied atomically and in order, meaning that earlier
mutations can be masked / negated by later ones. Cells already present
in the row are left unchanged unless explicitly changed by a mutation.
After committing the accumulated mutations, resets the local
mutations.
For example:
.. literalinclude:: snippets_table.py
:start-after: [START bigtable_row_commit]
:end-before: [END bigtable_row_commit]
:rtype: bool
:returns: Flag indicating if the filter was matched (which also
indicates which set of mutations were applied by the server).
:raises: :class:`ValueError <exceptions.ValueError>` if the number of
mutations exceeds the :data:`MAX_MUTATIONS`.
"""
true_mutations = self._get_mutations(state=True)
false_mutations = self._get_mutations(state=False)
num_true_mutations = len(true_mutations)
num_false_mutations = len(false_mutations)
if num_true_mutations == 0 and num_false_mutations == 0:
return
if num_true_mutations > MAX_MUTATIONS or num_false_mutations > MAX_MUTATIONS:
raise ValueError(
"Exceed the maximum allowable mutations (%d). Had %s true "
"mutations and %d false mutations."
% (MAX_MUTATIONS, num_true_mutations, num_false_mutations)
)
data_client = self._table._instance._client.table_data_client
resp = data_client.check_and_mutate_row(
table_name=self._table.name,
row_key=self._row_key,
predicate_filter=self._filter.to_pb(),
true_mutations=true_mutations,
false_mutations=false_mutations,
)
self.clear()
return resp.predicate_matched | python | def commit(self):
"""Makes a ``CheckAndMutateRow`` API request.
If no mutations have been created in the row, no request is made.
The mutations will be applied conditionally, based on whether the
filter matches any cells in the :class:`ConditionalRow` or not. (Each
method which adds a mutation has a ``state`` parameter for this
purpose.)
Mutations are applied atomically and in order, meaning that earlier
mutations can be masked / negated by later ones. Cells already present
in the row are left unchanged unless explicitly changed by a mutation.
After committing the accumulated mutations, resets the local
mutations.
For example:
.. literalinclude:: snippets_table.py
:start-after: [START bigtable_row_commit]
:end-before: [END bigtable_row_commit]
:rtype: bool
:returns: Flag indicating if the filter was matched (which also
indicates which set of mutations were applied by the server).
:raises: :class:`ValueError <exceptions.ValueError>` if the number of
mutations exceeds the :data:`MAX_MUTATIONS`.
"""
true_mutations = self._get_mutations(state=True)
false_mutations = self._get_mutations(state=False)
num_true_mutations = len(true_mutations)
num_false_mutations = len(false_mutations)
if num_true_mutations == 0 and num_false_mutations == 0:
return
if num_true_mutations > MAX_MUTATIONS or num_false_mutations > MAX_MUTATIONS:
raise ValueError(
"Exceed the maximum allowable mutations (%d). Had %s true "
"mutations and %d false mutations."
% (MAX_MUTATIONS, num_true_mutations, num_false_mutations)
)
data_client = self._table._instance._client.table_data_client
resp = data_client.check_and_mutate_row(
table_name=self._table.name,
row_key=self._row_key,
predicate_filter=self._filter.to_pb(),
true_mutations=true_mutations,
false_mutations=false_mutations,
)
self.clear()
return resp.predicate_matched | [
"def",
"commit",
"(",
"self",
")",
":",
"true_mutations",
"=",
"self",
".",
"_get_mutations",
"(",
"state",
"=",
"True",
")",
"false_mutations",
"=",
"self",
".",
"_get_mutations",
"(",
"state",
"=",
"False",
")",
"num_true_mutations",
"=",
"len",
"(",
"true_mutations",
")",
"num_false_mutations",
"=",
"len",
"(",
"false_mutations",
")",
"if",
"num_true_mutations",
"==",
"0",
"and",
"num_false_mutations",
"==",
"0",
":",
"return",
"if",
"num_true_mutations",
">",
"MAX_MUTATIONS",
"or",
"num_false_mutations",
">",
"MAX_MUTATIONS",
":",
"raise",
"ValueError",
"(",
"\"Exceed the maximum allowable mutations (%d). Had %s true \"",
"\"mutations and %d false mutations.\"",
"%",
"(",
"MAX_MUTATIONS",
",",
"num_true_mutations",
",",
"num_false_mutations",
")",
")",
"data_client",
"=",
"self",
".",
"_table",
".",
"_instance",
".",
"_client",
".",
"table_data_client",
"resp",
"=",
"data_client",
".",
"check_and_mutate_row",
"(",
"table_name",
"=",
"self",
".",
"_table",
".",
"name",
",",
"row_key",
"=",
"self",
".",
"_row_key",
",",
"predicate_filter",
"=",
"self",
".",
"_filter",
".",
"to_pb",
"(",
")",
",",
"true_mutations",
"=",
"true_mutations",
",",
"false_mutations",
"=",
"false_mutations",
",",
")",
"self",
".",
"clear",
"(",
")",
"return",
"resp",
".",
"predicate_matched"
] | Makes a ``CheckAndMutateRow`` API request.
If no mutations have been created in the row, no request is made.
The mutations will be applied conditionally, based on whether the
filter matches any cells in the :class:`ConditionalRow` or not. (Each
method which adds a mutation has a ``state`` parameter for this
purpose.)
Mutations are applied atomically and in order, meaning that earlier
mutations can be masked / negated by later ones. Cells already present
in the row are left unchanged unless explicitly changed by a mutation.
After committing the accumulated mutations, resets the local
mutations.
For example:
.. literalinclude:: snippets_table.py
:start-after: [START bigtable_row_commit]
:end-before: [END bigtable_row_commit]
:rtype: bool
:returns: Flag indicating if the filter was matched (which also
indicates which set of mutations were applied by the server).
:raises: :class:`ValueError <exceptions.ValueError>` if the number of
mutations exceeds the :data:`MAX_MUTATIONS`. | [
"Makes",
"a",
"CheckAndMutateRow",
"API",
"request",
"."
] | train | https://github.com/googleapis/google-cloud-python/blob/85e80125a59cb10f8cb105f25ecc099e4b940b50/bigtable/google/cloud/bigtable/row.py#L538-L589 | 0.001333 |
saltstack/salt | salt/cloud/clouds/softlayer.py | avail_images | def avail_images(call=None):
'''
Return a dict of all available VM images on the cloud provider.
'''
if call == 'action':
raise SaltCloudSystemExit(
'The avail_images function must be called with '
'-f or --function, or with the --list-images option'
)
ret = {}
conn = get_conn()
response = conn.getCreateObjectOptions()
for image in response['operatingSystems']:
ret[image['itemPrice']['item']['description']] = {
'name': image['itemPrice']['item']['description'],
'template': image['template']['operatingSystemReferenceCode'],
}
return ret | python | def avail_images(call=None):
'''
Return a dict of all available VM images on the cloud provider.
'''
if call == 'action':
raise SaltCloudSystemExit(
'The avail_images function must be called with '
'-f or --function, or with the --list-images option'
)
ret = {}
conn = get_conn()
response = conn.getCreateObjectOptions()
for image in response['operatingSystems']:
ret[image['itemPrice']['item']['description']] = {
'name': image['itemPrice']['item']['description'],
'template': image['template']['operatingSystemReferenceCode'],
}
return ret | [
"def",
"avail_images",
"(",
"call",
"=",
"None",
")",
":",
"if",
"call",
"==",
"'action'",
":",
"raise",
"SaltCloudSystemExit",
"(",
"'The avail_images function must be called with '",
"'-f or --function, or with the --list-images option'",
")",
"ret",
"=",
"{",
"}",
"conn",
"=",
"get_conn",
"(",
")",
"response",
"=",
"conn",
".",
"getCreateObjectOptions",
"(",
")",
"for",
"image",
"in",
"response",
"[",
"'operatingSystems'",
"]",
":",
"ret",
"[",
"image",
"[",
"'itemPrice'",
"]",
"[",
"'item'",
"]",
"[",
"'description'",
"]",
"]",
"=",
"{",
"'name'",
":",
"image",
"[",
"'itemPrice'",
"]",
"[",
"'item'",
"]",
"[",
"'description'",
"]",
",",
"'template'",
":",
"image",
"[",
"'template'",
"]",
"[",
"'operatingSystemReferenceCode'",
"]",
",",
"}",
"return",
"ret"
] | Return a dict of all available VM images on the cloud provider. | [
"Return",
"a",
"dict",
"of",
"all",
"available",
"VM",
"images",
"on",
"the",
"cloud",
"provider",
"."
] | train | https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/cloud/clouds/softlayer.py#L179-L197 | 0.00152 |
haikuginger/beekeeper | beekeeper/variables.py | Variable.has_value_of_type | def has_value_of_type(self, var_type):
"""
Does the variable both have the given type and
have a variable value we can use?
"""
if self.has_value() and self.has_type(var_type):
return True
return False | python | def has_value_of_type(self, var_type):
"""
Does the variable both have the given type and
have a variable value we can use?
"""
if self.has_value() and self.has_type(var_type):
return True
return False | [
"def",
"has_value_of_type",
"(",
"self",
",",
"var_type",
")",
":",
"if",
"self",
".",
"has_value",
"(",
")",
"and",
"self",
".",
"has_type",
"(",
"var_type",
")",
":",
"return",
"True",
"return",
"False"
] | Does the variable both have the given type and
have a variable value we can use? | [
"Does",
"the",
"variable",
"both",
"have",
"the",
"given",
"type",
"and",
"have",
"a",
"variable",
"value",
"we",
"can",
"use?"
] | train | https://github.com/haikuginger/beekeeper/blob/b647d3add0b407ec5dc3a2a39c4f6dac31243b18/beekeeper/variables.py#L101-L108 | 0.007663 |
OpenHydrology/floodestimation | floodestimation/analysis.py | QmedAnalysis._qmed_from_descriptors_2008 | def _qmed_from_descriptors_2008(self, as_rural=False, donor_catchments=None):
"""
Return QMED estimation based on FEH catchment descriptors, 2008 methodology.
Methodology source: Science Report SC050050, p. 36
:param as_rural: assume catchment is fully rural. Default: false.
:type as rural: bool
:param donor_catchments: override donor catchment to improve QMED catchment. If `None` (default),
donor catchment will be searched automatically, if empty list, no donors will be used.
:type donor_catchments: :class:`Catchment`
:return: QMED in m³/s
:rtype: float
"""
try:
# Basis rural QMED from descriptors
lnqmed_rural = 2.1170 \
+ 0.8510 * log(self.catchment.descriptors.dtm_area) \
- 1.8734 * 1000 / self.catchment.descriptors.saar \
+ 3.4451 * log(self.catchment.descriptors.farl) \
- 3.0800 * self.catchment.descriptors.bfihost ** 2.0
qmed_rural = exp(lnqmed_rural)
# Log intermediate results
self.results_log['qmed_descr_rural'] = qmed_rural
if donor_catchments is None:
# If no donor catchments are provided, find the nearest 25
donor_catchments = self.find_donor_catchments()
if donor_catchments:
# If found multiply rural estimate with weighted adjustment factors from all donors
weights = self._vec_alpha(donor_catchments)
errors = self._vec_lnqmed_residuals(donor_catchments)
correction = np.dot(weights, errors)
lnqmed_rural += correction
qmed_rural = exp(lnqmed_rural)
# Log intermediate results
self.results_log['donors'] = donor_catchments
for i, donor in enumerate(self.results_log['donors']):
donor.weight = weights[i]
donor.factor = exp(errors[i])
self.results_log['donor_adj_factor'] = exp(correction)
self.results_log['qmed_adj_rural'] = qmed_rural
if as_rural:
return qmed_rural
else:
# Apply urbanisation adjustment
urban_adj_factor = self.urban_adj_factor()
# Log intermediate results
self.results_log['qmed_descr_urban'] = self.results_log['qmed_descr_rural'] * urban_adj_factor
return qmed_rural * urban_adj_factor
except (TypeError, KeyError):
raise InsufficientDataError("Catchment `descriptors` attribute must be set first.") | python | def _qmed_from_descriptors_2008(self, as_rural=False, donor_catchments=None):
"""
Return QMED estimation based on FEH catchment descriptors, 2008 methodology.
Methodology source: Science Report SC050050, p. 36
:param as_rural: assume catchment is fully rural. Default: false.
:type as rural: bool
:param donor_catchments: override donor catchment to improve QMED catchment. If `None` (default),
donor catchment will be searched automatically, if empty list, no donors will be used.
:type donor_catchments: :class:`Catchment`
:return: QMED in m³/s
:rtype: float
"""
try:
# Basis rural QMED from descriptors
lnqmed_rural = 2.1170 \
+ 0.8510 * log(self.catchment.descriptors.dtm_area) \
- 1.8734 * 1000 / self.catchment.descriptors.saar \
+ 3.4451 * log(self.catchment.descriptors.farl) \
- 3.0800 * self.catchment.descriptors.bfihost ** 2.0
qmed_rural = exp(lnqmed_rural)
# Log intermediate results
self.results_log['qmed_descr_rural'] = qmed_rural
if donor_catchments is None:
# If no donor catchments are provided, find the nearest 25
donor_catchments = self.find_donor_catchments()
if donor_catchments:
# If found multiply rural estimate with weighted adjustment factors from all donors
weights = self._vec_alpha(donor_catchments)
errors = self._vec_lnqmed_residuals(donor_catchments)
correction = np.dot(weights, errors)
lnqmed_rural += correction
qmed_rural = exp(lnqmed_rural)
# Log intermediate results
self.results_log['donors'] = donor_catchments
for i, donor in enumerate(self.results_log['donors']):
donor.weight = weights[i]
donor.factor = exp(errors[i])
self.results_log['donor_adj_factor'] = exp(correction)
self.results_log['qmed_adj_rural'] = qmed_rural
if as_rural:
return qmed_rural
else:
# Apply urbanisation adjustment
urban_adj_factor = self.urban_adj_factor()
# Log intermediate results
self.results_log['qmed_descr_urban'] = self.results_log['qmed_descr_rural'] * urban_adj_factor
return qmed_rural * urban_adj_factor
except (TypeError, KeyError):
raise InsufficientDataError("Catchment `descriptors` attribute must be set first.") | [
"def",
"_qmed_from_descriptors_2008",
"(",
"self",
",",
"as_rural",
"=",
"False",
",",
"donor_catchments",
"=",
"None",
")",
":",
"try",
":",
"# Basis rural QMED from descriptors",
"lnqmed_rural",
"=",
"2.1170",
"+",
"0.8510",
"*",
"log",
"(",
"self",
".",
"catchment",
".",
"descriptors",
".",
"dtm_area",
")",
"-",
"1.8734",
"*",
"1000",
"/",
"self",
".",
"catchment",
".",
"descriptors",
".",
"saar",
"+",
"3.4451",
"*",
"log",
"(",
"self",
".",
"catchment",
".",
"descriptors",
".",
"farl",
")",
"-",
"3.0800",
"*",
"self",
".",
"catchment",
".",
"descriptors",
".",
"bfihost",
"**",
"2.0",
"qmed_rural",
"=",
"exp",
"(",
"lnqmed_rural",
")",
"# Log intermediate results",
"self",
".",
"results_log",
"[",
"'qmed_descr_rural'",
"]",
"=",
"qmed_rural",
"if",
"donor_catchments",
"is",
"None",
":",
"# If no donor catchments are provided, find the nearest 25",
"donor_catchments",
"=",
"self",
".",
"find_donor_catchments",
"(",
")",
"if",
"donor_catchments",
":",
"# If found multiply rural estimate with weighted adjustment factors from all donors",
"weights",
"=",
"self",
".",
"_vec_alpha",
"(",
"donor_catchments",
")",
"errors",
"=",
"self",
".",
"_vec_lnqmed_residuals",
"(",
"donor_catchments",
")",
"correction",
"=",
"np",
".",
"dot",
"(",
"weights",
",",
"errors",
")",
"lnqmed_rural",
"+=",
"correction",
"qmed_rural",
"=",
"exp",
"(",
"lnqmed_rural",
")",
"# Log intermediate results",
"self",
".",
"results_log",
"[",
"'donors'",
"]",
"=",
"donor_catchments",
"for",
"i",
",",
"donor",
"in",
"enumerate",
"(",
"self",
".",
"results_log",
"[",
"'donors'",
"]",
")",
":",
"donor",
".",
"weight",
"=",
"weights",
"[",
"i",
"]",
"donor",
".",
"factor",
"=",
"exp",
"(",
"errors",
"[",
"i",
"]",
")",
"self",
".",
"results_log",
"[",
"'donor_adj_factor'",
"]",
"=",
"exp",
"(",
"correction",
")",
"self",
".",
"results_log",
"[",
"'qmed_adj_rural'",
"]",
"=",
"qmed_rural",
"if",
"as_rural",
":",
"return",
"qmed_rural",
"else",
":",
"# Apply urbanisation adjustment",
"urban_adj_factor",
"=",
"self",
".",
"urban_adj_factor",
"(",
")",
"# Log intermediate results",
"self",
".",
"results_log",
"[",
"'qmed_descr_urban'",
"]",
"=",
"self",
".",
"results_log",
"[",
"'qmed_descr_rural'",
"]",
"*",
"urban_adj_factor",
"return",
"qmed_rural",
"*",
"urban_adj_factor",
"except",
"(",
"TypeError",
",",
"KeyError",
")",
":",
"raise",
"InsufficientDataError",
"(",
"\"Catchment `descriptors` attribute must be set first.\"",
")"
] | Return QMED estimation based on FEH catchment descriptors, 2008 methodology.
Methodology source: Science Report SC050050, p. 36
:param as_rural: assume catchment is fully rural. Default: false.
:type as rural: bool
:param donor_catchments: override donor catchment to improve QMED catchment. If `None` (default),
donor catchment will be searched automatically, if empty list, no donors will be used.
:type donor_catchments: :class:`Catchment`
:return: QMED in m³/s
:rtype: float | [
"Return",
"QMED",
"estimation",
"based",
"on",
"FEH",
"catchment",
"descriptors",
"2008",
"methodology",
"."
] | train | https://github.com/OpenHydrology/floodestimation/blob/782da7c5abd1348923129efe89fb70003ebb088c/floodestimation/analysis.py#L359-L416 | 0.003288 |
juju/charm-helpers | charmhelpers/contrib/openstack/amulet/utils.py | OpenStackAmuletUtils.authenticate_swift_user | def authenticate_swift_user(self, keystone, user, password, tenant):
"""Authenticates a regular user with swift api."""
self.log.debug('Authenticating swift user ({})...'.format(user))
ep = keystone.service_catalog.url_for(service_type='identity',
interface='publicURL')
if keystone.session:
return swiftclient.Connection(session=keystone.session)
else:
return swiftclient.Connection(authurl=ep,
user=user,
key=password,
tenant_name=tenant,
auth_version='2.0') | python | def authenticate_swift_user(self, keystone, user, password, tenant):
"""Authenticates a regular user with swift api."""
self.log.debug('Authenticating swift user ({})...'.format(user))
ep = keystone.service_catalog.url_for(service_type='identity',
interface='publicURL')
if keystone.session:
return swiftclient.Connection(session=keystone.session)
else:
return swiftclient.Connection(authurl=ep,
user=user,
key=password,
tenant_name=tenant,
auth_version='2.0') | [
"def",
"authenticate_swift_user",
"(",
"self",
",",
"keystone",
",",
"user",
",",
"password",
",",
"tenant",
")",
":",
"self",
".",
"log",
".",
"debug",
"(",
"'Authenticating swift user ({})...'",
".",
"format",
"(",
"user",
")",
")",
"ep",
"=",
"keystone",
".",
"service_catalog",
".",
"url_for",
"(",
"service_type",
"=",
"'identity'",
",",
"interface",
"=",
"'publicURL'",
")",
"if",
"keystone",
".",
"session",
":",
"return",
"swiftclient",
".",
"Connection",
"(",
"session",
"=",
"keystone",
".",
"session",
")",
"else",
":",
"return",
"swiftclient",
".",
"Connection",
"(",
"authurl",
"=",
"ep",
",",
"user",
"=",
"user",
",",
"key",
"=",
"password",
",",
"tenant_name",
"=",
"tenant",
",",
"auth_version",
"=",
"'2.0'",
")"
] | Authenticates a regular user with swift api. | [
"Authenticates",
"a",
"regular",
"user",
"with",
"swift",
"api",
"."
] | train | https://github.com/juju/charm-helpers/blob/aa785c40c3b7a8c69dbfbc7921d6b9f30142e171/charmhelpers/contrib/openstack/amulet/utils.py#L660-L672 | 0.00271 |
sentinel-hub/sentinelhub-py | sentinelhub/config.py | SHConfig.get_config_dict | def get_config_dict(self):
""" Get a dictionary representation of `SHConfig` class
:return: A dictionary with configuration parameters
:rtype: OrderedDict
"""
return OrderedDict((prop, getattr(self, prop)) for prop in self._instance.CONFIG_PARAMS) | python | def get_config_dict(self):
""" Get a dictionary representation of `SHConfig` class
:return: A dictionary with configuration parameters
:rtype: OrderedDict
"""
return OrderedDict((prop, getattr(self, prop)) for prop in self._instance.CONFIG_PARAMS) | [
"def",
"get_config_dict",
"(",
"self",
")",
":",
"return",
"OrderedDict",
"(",
"(",
"prop",
",",
"getattr",
"(",
"self",
",",
"prop",
")",
")",
"for",
"prop",
"in",
"self",
".",
"_instance",
".",
"CONFIG_PARAMS",
")"
] | Get a dictionary representation of `SHConfig` class
:return: A dictionary with configuration parameters
:rtype: OrderedDict | [
"Get",
"a",
"dictionary",
"representation",
"of",
"SHConfig",
"class"
] | train | https://github.com/sentinel-hub/sentinelhub-py/blob/08a83b7f1e289187159a643336995d8369860fea/sentinelhub/config.py#L213-L219 | 0.010417 |
minhhoit/yacms | yacms/blog/views.py | blog_post_detail | def blog_post_detail(request, slug, year=None, month=None, day=None,
template="blog/blog_post_detail.html",
extra_context=None):
""". Custom templates are checked for using the name
``blog/blog_post_detail_XXX.html`` where ``XXX`` is the blog
posts's slug.
"""
blog_posts = BlogPost.objects.published(
for_user=request.user).select_related()
blog_post = get_object_or_404(blog_posts, slug=slug)
related_posts = blog_post.related_posts.published(for_user=request.user)
context = {"blog_post": blog_post, "editable_obj": blog_post,
"related_posts": related_posts}
context.update(extra_context or {})
templates = [u"blog/blog_post_detail_%s.html" % str(slug), template]
return TemplateResponse(request, templates, context) | python | def blog_post_detail(request, slug, year=None, month=None, day=None,
template="blog/blog_post_detail.html",
extra_context=None):
""". Custom templates are checked for using the name
``blog/blog_post_detail_XXX.html`` where ``XXX`` is the blog
posts's slug.
"""
blog_posts = BlogPost.objects.published(
for_user=request.user).select_related()
blog_post = get_object_or_404(blog_posts, slug=slug)
related_posts = blog_post.related_posts.published(for_user=request.user)
context = {"blog_post": blog_post, "editable_obj": blog_post,
"related_posts": related_posts}
context.update(extra_context or {})
templates = [u"blog/blog_post_detail_%s.html" % str(slug), template]
return TemplateResponse(request, templates, context) | [
"def",
"blog_post_detail",
"(",
"request",
",",
"slug",
",",
"year",
"=",
"None",
",",
"month",
"=",
"None",
",",
"day",
"=",
"None",
",",
"template",
"=",
"\"blog/blog_post_detail.html\"",
",",
"extra_context",
"=",
"None",
")",
":",
"blog_posts",
"=",
"BlogPost",
".",
"objects",
".",
"published",
"(",
"for_user",
"=",
"request",
".",
"user",
")",
".",
"select_related",
"(",
")",
"blog_post",
"=",
"get_object_or_404",
"(",
"blog_posts",
",",
"slug",
"=",
"slug",
")",
"related_posts",
"=",
"blog_post",
".",
"related_posts",
".",
"published",
"(",
"for_user",
"=",
"request",
".",
"user",
")",
"context",
"=",
"{",
"\"blog_post\"",
":",
"blog_post",
",",
"\"editable_obj\"",
":",
"blog_post",
",",
"\"related_posts\"",
":",
"related_posts",
"}",
"context",
".",
"update",
"(",
"extra_context",
"or",
"{",
"}",
")",
"templates",
"=",
"[",
"u\"blog/blog_post_detail_%s.html\"",
"%",
"str",
"(",
"slug",
")",
",",
"template",
"]",
"return",
"TemplateResponse",
"(",
"request",
",",
"templates",
",",
"context",
")"
] | . Custom templates are checked for using the name
``blog/blog_post_detail_XXX.html`` where ``XXX`` is the blog
posts's slug. | [
".",
"Custom",
"templates",
"are",
"checked",
"for",
"using",
"the",
"name",
"blog",
"/",
"blog_post_detail_XXX",
".",
"html",
"where",
"XXX",
"is",
"the",
"blog",
"posts",
"s",
"slug",
"."
] | train | https://github.com/minhhoit/yacms/blob/2921b706b7107c6e8c5f2bbf790ff11f85a2167f/yacms/blog/views.py#L66-L81 | 0.001167 |
SKA-ScienceDataProcessor/integration-prototype | sip/execution_control/docker_api/sip_docker_swarm/docker_swarm_client.py | DockerSwarmClient.get_node_details | def get_node_details(self, node_id: list) -> dict:
"""Get details of a node.
Only the manager nodes can retrieve details of a node
Args:
node_id (list): List of node ID
Returns:
dict, details of the node
"""
# Raise an exception if we are not a manager
if not self._manager:
raise RuntimeError('Only the Swarm manager node can '
'retrieve node details.')
node = self._client.nodes.get(node_id)
return node.attrs | python | def get_node_details(self, node_id: list) -> dict:
"""Get details of a node.
Only the manager nodes can retrieve details of a node
Args:
node_id (list): List of node ID
Returns:
dict, details of the node
"""
# Raise an exception if we are not a manager
if not self._manager:
raise RuntimeError('Only the Swarm manager node can '
'retrieve node details.')
node = self._client.nodes.get(node_id)
return node.attrs | [
"def",
"get_node_details",
"(",
"self",
",",
"node_id",
":",
"list",
")",
"->",
"dict",
":",
"# Raise an exception if we are not a manager",
"if",
"not",
"self",
".",
"_manager",
":",
"raise",
"RuntimeError",
"(",
"'Only the Swarm manager node can '",
"'retrieve node details.'",
")",
"node",
"=",
"self",
".",
"_client",
".",
"nodes",
".",
"get",
"(",
"node_id",
")",
"return",
"node",
".",
"attrs"
] | Get details of a node.
Only the manager nodes can retrieve details of a node
Args:
node_id (list): List of node ID
Returns:
dict, details of the node | [
"Get",
"details",
"of",
"a",
"node",
"."
] | train | https://github.com/SKA-ScienceDataProcessor/integration-prototype/blob/8c8006de6ad71dcd44114b0338780738079c87d4/sip/execution_control/docker_api/sip_docker_swarm/docker_swarm_client.py#L332-L350 | 0.003604 |
lwcook/horsetail-matching | horsetailmatching/hm.py | _matrix_integration | def _matrix_integration(q, h, t):
''' Returns the dp metric for a single horsetail
curve at a given value of the epistemic uncertainties'''
N = len(q)
# correction if CDF has gone out of trapezium range
if h[-1] < 0.9: h[-1] = 1.0
W = np.zeros([N, N])
for i in range(N):
W[i, i] = 0.5*(h[min(i+1, N-1)] - h[max(i-1, 0)])
dp = (q - t).T.dot(W).dot(q - t)
return dp | python | def _matrix_integration(q, h, t):
''' Returns the dp metric for a single horsetail
curve at a given value of the epistemic uncertainties'''
N = len(q)
# correction if CDF has gone out of trapezium range
if h[-1] < 0.9: h[-1] = 1.0
W = np.zeros([N, N])
for i in range(N):
W[i, i] = 0.5*(h[min(i+1, N-1)] - h[max(i-1, 0)])
dp = (q - t).T.dot(W).dot(q - t)
return dp | [
"def",
"_matrix_integration",
"(",
"q",
",",
"h",
",",
"t",
")",
":",
"N",
"=",
"len",
"(",
"q",
")",
"# correction if CDF has gone out of trapezium range",
"if",
"h",
"[",
"-",
"1",
"]",
"<",
"0.9",
":",
"h",
"[",
"-",
"1",
"]",
"=",
"1.0",
"W",
"=",
"np",
".",
"zeros",
"(",
"[",
"N",
",",
"N",
"]",
")",
"for",
"i",
"in",
"range",
"(",
"N",
")",
":",
"W",
"[",
"i",
",",
"i",
"]",
"=",
"0.5",
"*",
"(",
"h",
"[",
"min",
"(",
"i",
"+",
"1",
",",
"N",
"-",
"1",
")",
"]",
"-",
"h",
"[",
"max",
"(",
"i",
"-",
"1",
",",
"0",
")",
"]",
")",
"dp",
"=",
"(",
"q",
"-",
"t",
")",
".",
"T",
".",
"dot",
"(",
"W",
")",
".",
"dot",
"(",
"q",
"-",
"t",
")",
"return",
"dp"
] | Returns the dp metric for a single horsetail
curve at a given value of the epistemic uncertainties | [
"Returns",
"the",
"dp",
"metric",
"for",
"a",
"single",
"horsetail",
"curve",
"at",
"a",
"given",
"value",
"of",
"the",
"epistemic",
"uncertainties"
] | train | https://github.com/lwcook/horsetail-matching/blob/f3d5f8d01249debbca978f412ce4eae017458119/horsetailmatching/hm.py#L921-L936 | 0.004854 |
tanghaibao/jcvi | jcvi/compara/catalog.py | layout | def layout(args):
"""
%prog layout omgfile taxa
Build column formatted gene lists after omgparse(). Use species list
separated by comma in place of taxa, e.g. "BR,BO,AN,CN"
"""
p = OptionParser(layout.__doc__)
p.add_option("--sort",
help="Sort layout file based on bedfile [default: %default]")
opts, args = p.parse_args(args)
if len(args) != 2:
sys.exit(not p.print_help())
omgfile, taxa = args
listfile = omgfile.rsplit(".", 1)[0] + ".list"
taxa = taxa.split(",")
ntaxa = len(taxa)
fw = open(listfile, "w")
data = []
fp = open(omgfile)
for row in fp:
genes, idxs = row.split()
row = ["."] * ntaxa
genes = genes.split(",")
ixs = [int(x) for x in idxs.split(",")]
for gene, idx in zip(genes, ixs):
row[idx] = gene
txs = ",".join(taxa[x] for x in ixs)
print("\t".join(("\t".join(row), txs)), file=fw)
data.append(row)
coldata = zip(*data)
ngenes = []
for i, tx in enumerate(taxa):
genes = [x for x in coldata[i] if x != '.']
genes = set(x.strip("|") for x in genes)
ngenes.append((len(genes), tx))
details = ", ".join("{0} {1}".format(a, b) for a, b in ngenes)
total = sum(a for a, b in ngenes)
s = "A list of {0} orthologous families that collectively".format(len(data))
s += " contain a total of {0} genes ({1})".format(total, details)
print(s, file=sys.stderr)
fw.close()
lastcolumn = ntaxa + 1
cmd = "sort -k{0},{0} {1} -o {1}".format(lastcolumn, listfile)
sh(cmd)
logging.debug("List file written to `{0}`.".format(listfile))
sort = opts.sort
if sort:
thread = Bed(sort)
sort_layout(thread, listfile) | python | def layout(args):
"""
%prog layout omgfile taxa
Build column formatted gene lists after omgparse(). Use species list
separated by comma in place of taxa, e.g. "BR,BO,AN,CN"
"""
p = OptionParser(layout.__doc__)
p.add_option("--sort",
help="Sort layout file based on bedfile [default: %default]")
opts, args = p.parse_args(args)
if len(args) != 2:
sys.exit(not p.print_help())
omgfile, taxa = args
listfile = omgfile.rsplit(".", 1)[0] + ".list"
taxa = taxa.split(",")
ntaxa = len(taxa)
fw = open(listfile, "w")
data = []
fp = open(omgfile)
for row in fp:
genes, idxs = row.split()
row = ["."] * ntaxa
genes = genes.split(",")
ixs = [int(x) for x in idxs.split(",")]
for gene, idx in zip(genes, ixs):
row[idx] = gene
txs = ",".join(taxa[x] for x in ixs)
print("\t".join(("\t".join(row), txs)), file=fw)
data.append(row)
coldata = zip(*data)
ngenes = []
for i, tx in enumerate(taxa):
genes = [x for x in coldata[i] if x != '.']
genes = set(x.strip("|") for x in genes)
ngenes.append((len(genes), tx))
details = ", ".join("{0} {1}".format(a, b) for a, b in ngenes)
total = sum(a for a, b in ngenes)
s = "A list of {0} orthologous families that collectively".format(len(data))
s += " contain a total of {0} genes ({1})".format(total, details)
print(s, file=sys.stderr)
fw.close()
lastcolumn = ntaxa + 1
cmd = "sort -k{0},{0} {1} -o {1}".format(lastcolumn, listfile)
sh(cmd)
logging.debug("List file written to `{0}`.".format(listfile))
sort = opts.sort
if sort:
thread = Bed(sort)
sort_layout(thread, listfile) | [
"def",
"layout",
"(",
"args",
")",
":",
"p",
"=",
"OptionParser",
"(",
"layout",
".",
"__doc__",
")",
"p",
".",
"add_option",
"(",
"\"--sort\"",
",",
"help",
"=",
"\"Sort layout file based on bedfile [default: %default]\"",
")",
"opts",
",",
"args",
"=",
"p",
".",
"parse_args",
"(",
"args",
")",
"if",
"len",
"(",
"args",
")",
"!=",
"2",
":",
"sys",
".",
"exit",
"(",
"not",
"p",
".",
"print_help",
"(",
")",
")",
"omgfile",
",",
"taxa",
"=",
"args",
"listfile",
"=",
"omgfile",
".",
"rsplit",
"(",
"\".\"",
",",
"1",
")",
"[",
"0",
"]",
"+",
"\".list\"",
"taxa",
"=",
"taxa",
".",
"split",
"(",
"\",\"",
")",
"ntaxa",
"=",
"len",
"(",
"taxa",
")",
"fw",
"=",
"open",
"(",
"listfile",
",",
"\"w\"",
")",
"data",
"=",
"[",
"]",
"fp",
"=",
"open",
"(",
"omgfile",
")",
"for",
"row",
"in",
"fp",
":",
"genes",
",",
"idxs",
"=",
"row",
".",
"split",
"(",
")",
"row",
"=",
"[",
"\".\"",
"]",
"*",
"ntaxa",
"genes",
"=",
"genes",
".",
"split",
"(",
"\",\"",
")",
"ixs",
"=",
"[",
"int",
"(",
"x",
")",
"for",
"x",
"in",
"idxs",
".",
"split",
"(",
"\",\"",
")",
"]",
"for",
"gene",
",",
"idx",
"in",
"zip",
"(",
"genes",
",",
"ixs",
")",
":",
"row",
"[",
"idx",
"]",
"=",
"gene",
"txs",
"=",
"\",\"",
".",
"join",
"(",
"taxa",
"[",
"x",
"]",
"for",
"x",
"in",
"ixs",
")",
"print",
"(",
"\"\\t\"",
".",
"join",
"(",
"(",
"\"\\t\"",
".",
"join",
"(",
"row",
")",
",",
"txs",
")",
")",
",",
"file",
"=",
"fw",
")",
"data",
".",
"append",
"(",
"row",
")",
"coldata",
"=",
"zip",
"(",
"*",
"data",
")",
"ngenes",
"=",
"[",
"]",
"for",
"i",
",",
"tx",
"in",
"enumerate",
"(",
"taxa",
")",
":",
"genes",
"=",
"[",
"x",
"for",
"x",
"in",
"coldata",
"[",
"i",
"]",
"if",
"x",
"!=",
"'.'",
"]",
"genes",
"=",
"set",
"(",
"x",
".",
"strip",
"(",
"\"|\"",
")",
"for",
"x",
"in",
"genes",
")",
"ngenes",
".",
"append",
"(",
"(",
"len",
"(",
"genes",
")",
",",
"tx",
")",
")",
"details",
"=",
"\", \"",
".",
"join",
"(",
"\"{0} {1}\"",
".",
"format",
"(",
"a",
",",
"b",
")",
"for",
"a",
",",
"b",
"in",
"ngenes",
")",
"total",
"=",
"sum",
"(",
"a",
"for",
"a",
",",
"b",
"in",
"ngenes",
")",
"s",
"=",
"\"A list of {0} orthologous families that collectively\"",
".",
"format",
"(",
"len",
"(",
"data",
")",
")",
"s",
"+=",
"\" contain a total of {0} genes ({1})\"",
".",
"format",
"(",
"total",
",",
"details",
")",
"print",
"(",
"s",
",",
"file",
"=",
"sys",
".",
"stderr",
")",
"fw",
".",
"close",
"(",
")",
"lastcolumn",
"=",
"ntaxa",
"+",
"1",
"cmd",
"=",
"\"sort -k{0},{0} {1} -o {1}\"",
".",
"format",
"(",
"lastcolumn",
",",
"listfile",
")",
"sh",
"(",
"cmd",
")",
"logging",
".",
"debug",
"(",
"\"List file written to `{0}`.\"",
".",
"format",
"(",
"listfile",
")",
")",
"sort",
"=",
"opts",
".",
"sort",
"if",
"sort",
":",
"thread",
"=",
"Bed",
"(",
"sort",
")",
"sort_layout",
"(",
"thread",
",",
"listfile",
")"
] | %prog layout omgfile taxa
Build column formatted gene lists after omgparse(). Use species list
separated by comma in place of taxa, e.g. "BR,BO,AN,CN" | [
"%prog",
"layout",
"omgfile",
"taxa"
] | train | https://github.com/tanghaibao/jcvi/blob/d2e31a77b6ade7f41f3b321febc2b4744d1cdeca/jcvi/compara/catalog.py#L295-L351 | 0.001123 |
clalancette/pycdlib | pycdlib/utils.py | copy_data | def copy_data(data_length, blocksize, infp, outfp):
# type: (int, int, BinaryIO, BinaryIO) -> None
'''
A utility function to copy data from the input file object to the output
file object. This function will use the most efficient copy method available,
which is often sendfile.
Parameters:
data_length - The amount of data to copy.
blocksize - How much data to copy per iteration.
infp - The file object to copy data from.
outfp - The file object to copy data to.
Returns:
Nothing.
'''
use_sendfile = False
if have_sendfile:
# Python 3 implements the fileno method for all file-like objects, so
# we can't just use the existence of the method to tell whether it is
# available. Instead, we try to assign it, and if we fail, then we
# assume it is not available.
try:
x_unused = infp.fileno() # NOQA
y_unused = outfp.fileno() # NOQA
use_sendfile = True
except (AttributeError, io.UnsupportedOperation):
pass
if use_sendfile:
# This is one of those instances where using the file object and the
# file descriptor causes problems. The sendfile() call actually updates
# the underlying file descriptor, but the file object does not know
# about it. To get around this, we instead get the offset, allow
# sendfile() to update the offset, then manually seek the file object
# to the right location. This ensures that the file object gets updated
# properly.
in_offset = infp.tell()
out_offset = outfp.tell()
sendfile(outfp.fileno(), infp.fileno(), in_offset, data_length)
infp.seek(in_offset + data_length)
outfp.seek(out_offset + data_length)
else:
left = data_length
readsize = blocksize
while left > 0:
if left < readsize:
readsize = left
data = infp.read(readsize)
# We have seen ISOs in the wild (Tribes Vengeance 1of4.iso) that
# lie about the size of their files, causing reads to fail (since
# we hit EOF before the supposed end of the file). If we are using
# sendfile above, sendfile just silently returns as much data as it
# can, with no additional checking. We should do the same here, so
# if we got less data than we asked for, abort the loop silently.
data_len = len(data)
if data_len != readsize:
data_len = left
outfp.write(data)
left -= data_len | python | def copy_data(data_length, blocksize, infp, outfp):
# type: (int, int, BinaryIO, BinaryIO) -> None
'''
A utility function to copy data from the input file object to the output
file object. This function will use the most efficient copy method available,
which is often sendfile.
Parameters:
data_length - The amount of data to copy.
blocksize - How much data to copy per iteration.
infp - The file object to copy data from.
outfp - The file object to copy data to.
Returns:
Nothing.
'''
use_sendfile = False
if have_sendfile:
# Python 3 implements the fileno method for all file-like objects, so
# we can't just use the existence of the method to tell whether it is
# available. Instead, we try to assign it, and if we fail, then we
# assume it is not available.
try:
x_unused = infp.fileno() # NOQA
y_unused = outfp.fileno() # NOQA
use_sendfile = True
except (AttributeError, io.UnsupportedOperation):
pass
if use_sendfile:
# This is one of those instances where using the file object and the
# file descriptor causes problems. The sendfile() call actually updates
# the underlying file descriptor, but the file object does not know
# about it. To get around this, we instead get the offset, allow
# sendfile() to update the offset, then manually seek the file object
# to the right location. This ensures that the file object gets updated
# properly.
in_offset = infp.tell()
out_offset = outfp.tell()
sendfile(outfp.fileno(), infp.fileno(), in_offset, data_length)
infp.seek(in_offset + data_length)
outfp.seek(out_offset + data_length)
else:
left = data_length
readsize = blocksize
while left > 0:
if left < readsize:
readsize = left
data = infp.read(readsize)
# We have seen ISOs in the wild (Tribes Vengeance 1of4.iso) that
# lie about the size of their files, causing reads to fail (since
# we hit EOF before the supposed end of the file). If we are using
# sendfile above, sendfile just silently returns as much data as it
# can, with no additional checking. We should do the same here, so
# if we got less data than we asked for, abort the loop silently.
data_len = len(data)
if data_len != readsize:
data_len = left
outfp.write(data)
left -= data_len | [
"def",
"copy_data",
"(",
"data_length",
",",
"blocksize",
",",
"infp",
",",
"outfp",
")",
":",
"# type: (int, int, BinaryIO, BinaryIO) -> None",
"use_sendfile",
"=",
"False",
"if",
"have_sendfile",
":",
"# Python 3 implements the fileno method for all file-like objects, so",
"# we can't just use the existence of the method to tell whether it is",
"# available. Instead, we try to assign it, and if we fail, then we",
"# assume it is not available.",
"try",
":",
"x_unused",
"=",
"infp",
".",
"fileno",
"(",
")",
"# NOQA",
"y_unused",
"=",
"outfp",
".",
"fileno",
"(",
")",
"# NOQA",
"use_sendfile",
"=",
"True",
"except",
"(",
"AttributeError",
",",
"io",
".",
"UnsupportedOperation",
")",
":",
"pass",
"if",
"use_sendfile",
":",
"# This is one of those instances where using the file object and the",
"# file descriptor causes problems. The sendfile() call actually updates",
"# the underlying file descriptor, but the file object does not know",
"# about it. To get around this, we instead get the offset, allow",
"# sendfile() to update the offset, then manually seek the file object",
"# to the right location. This ensures that the file object gets updated",
"# properly.",
"in_offset",
"=",
"infp",
".",
"tell",
"(",
")",
"out_offset",
"=",
"outfp",
".",
"tell",
"(",
")",
"sendfile",
"(",
"outfp",
".",
"fileno",
"(",
")",
",",
"infp",
".",
"fileno",
"(",
")",
",",
"in_offset",
",",
"data_length",
")",
"infp",
".",
"seek",
"(",
"in_offset",
"+",
"data_length",
")",
"outfp",
".",
"seek",
"(",
"out_offset",
"+",
"data_length",
")",
"else",
":",
"left",
"=",
"data_length",
"readsize",
"=",
"blocksize",
"while",
"left",
">",
"0",
":",
"if",
"left",
"<",
"readsize",
":",
"readsize",
"=",
"left",
"data",
"=",
"infp",
".",
"read",
"(",
"readsize",
")",
"# We have seen ISOs in the wild (Tribes Vengeance 1of4.iso) that",
"# lie about the size of their files, causing reads to fail (since",
"# we hit EOF before the supposed end of the file). If we are using",
"# sendfile above, sendfile just silently returns as much data as it",
"# can, with no additional checking. We should do the same here, so",
"# if we got less data than we asked for, abort the loop silently.",
"data_len",
"=",
"len",
"(",
"data",
")",
"if",
"data_len",
"!=",
"readsize",
":",
"data_len",
"=",
"left",
"outfp",
".",
"write",
"(",
"data",
")",
"left",
"-=",
"data_len"
] | A utility function to copy data from the input file object to the output
file object. This function will use the most efficient copy method available,
which is often sendfile.
Parameters:
data_length - The amount of data to copy.
blocksize - How much data to copy per iteration.
infp - The file object to copy data from.
outfp - The file object to copy data to.
Returns:
Nothing. | [
"A",
"utility",
"function",
"to",
"copy",
"data",
"from",
"the",
"input",
"file",
"object",
"to",
"the",
"output",
"file",
"object",
".",
"This",
"function",
"will",
"use",
"the",
"most",
"efficient",
"copy",
"method",
"available",
"which",
"is",
"often",
"sendfile",
"."
] | train | https://github.com/clalancette/pycdlib/blob/1e7b77a809e905d67dc71e12d70e850be26b6233/pycdlib/utils.py#L93-L151 | 0.001518 |
srsudar/eg | eg/core.py | _parse_arguments | def _parse_arguments():
"""
Constructs and parses the command line arguments for eg. Returns an args
object as returned by parser.parse_args().
"""
parser = argparse.ArgumentParser(
description='eg provides examples of common command usage.'
)
parser.add_argument(
'-v',
'--version',
action='store_true',
help='Display version information about eg'
)
parser.add_argument(
'-f',
'--config-file',
help='Path to the .egrc file, if it is not in the default location.'
)
parser.add_argument(
'-e',
'--edit',
action='store_true',
help="""Edit the custom examples for the given command. If editor-cmd
is not set in your .egrc and $VISUAL and $EDITOR are not set, prints a
message and does nothing."""
)
parser.add_argument(
'--examples-dir',
help='The location to the examples/ dir that ships with eg'
)
parser.add_argument(
'-c',
'--custom-dir',
help='Path to a directory containing user-defined examples.'
)
parser.add_argument(
'-p',
'--pager-cmd',
help='String literal that will be invoked to page output.'
)
parser.add_argument(
'-l',
'--list',
action='store_true',
help='Show all the programs with eg entries.'
)
parser.add_argument(
'--color',
action='store_true',
dest='use_color',
default=None,
help='Colorize output.'
)
parser.add_argument(
'-s',
'--squeeze',
action='store_true',
default=None,
help='Show fewer blank lines in output.'
)
parser.add_argument(
'--no-color',
action='store_false',
dest='use_color',
help='Do not colorize output.'
)
parser.add_argument(
'program',
nargs='?',
help='The program for which to display examples.'
)
args = parser.parse_args()
if len(sys.argv) < 2:
# Too few arguments. We can't specify this using argparse alone, so we
# have to manually check.
parser.print_help()
parser.exit()
elif not args.version and not args.list and not args.program:
parser.error(_MSG_BAD_ARGS)
else:
return args | python | def _parse_arguments():
"""
Constructs and parses the command line arguments for eg. Returns an args
object as returned by parser.parse_args().
"""
parser = argparse.ArgumentParser(
description='eg provides examples of common command usage.'
)
parser.add_argument(
'-v',
'--version',
action='store_true',
help='Display version information about eg'
)
parser.add_argument(
'-f',
'--config-file',
help='Path to the .egrc file, if it is not in the default location.'
)
parser.add_argument(
'-e',
'--edit',
action='store_true',
help="""Edit the custom examples for the given command. If editor-cmd
is not set in your .egrc and $VISUAL and $EDITOR are not set, prints a
message and does nothing."""
)
parser.add_argument(
'--examples-dir',
help='The location to the examples/ dir that ships with eg'
)
parser.add_argument(
'-c',
'--custom-dir',
help='Path to a directory containing user-defined examples.'
)
parser.add_argument(
'-p',
'--pager-cmd',
help='String literal that will be invoked to page output.'
)
parser.add_argument(
'-l',
'--list',
action='store_true',
help='Show all the programs with eg entries.'
)
parser.add_argument(
'--color',
action='store_true',
dest='use_color',
default=None,
help='Colorize output.'
)
parser.add_argument(
'-s',
'--squeeze',
action='store_true',
default=None,
help='Show fewer blank lines in output.'
)
parser.add_argument(
'--no-color',
action='store_false',
dest='use_color',
help='Do not colorize output.'
)
parser.add_argument(
'program',
nargs='?',
help='The program for which to display examples.'
)
args = parser.parse_args()
if len(sys.argv) < 2:
# Too few arguments. We can't specify this using argparse alone, so we
# have to manually check.
parser.print_help()
parser.exit()
elif not args.version and not args.list and not args.program:
parser.error(_MSG_BAD_ARGS)
else:
return args | [
"def",
"_parse_arguments",
"(",
")",
":",
"parser",
"=",
"argparse",
".",
"ArgumentParser",
"(",
"description",
"=",
"'eg provides examples of common command usage.'",
")",
"parser",
".",
"add_argument",
"(",
"'-v'",
",",
"'--version'",
",",
"action",
"=",
"'store_true'",
",",
"help",
"=",
"'Display version information about eg'",
")",
"parser",
".",
"add_argument",
"(",
"'-f'",
",",
"'--config-file'",
",",
"help",
"=",
"'Path to the .egrc file, if it is not in the default location.'",
")",
"parser",
".",
"add_argument",
"(",
"'-e'",
",",
"'--edit'",
",",
"action",
"=",
"'store_true'",
",",
"help",
"=",
"\"\"\"Edit the custom examples for the given command. If editor-cmd\n is not set in your .egrc and $VISUAL and $EDITOR are not set, prints a\n message and does nothing.\"\"\"",
")",
"parser",
".",
"add_argument",
"(",
"'--examples-dir'",
",",
"help",
"=",
"'The location to the examples/ dir that ships with eg'",
")",
"parser",
".",
"add_argument",
"(",
"'-c'",
",",
"'--custom-dir'",
",",
"help",
"=",
"'Path to a directory containing user-defined examples.'",
")",
"parser",
".",
"add_argument",
"(",
"'-p'",
",",
"'--pager-cmd'",
",",
"help",
"=",
"'String literal that will be invoked to page output.'",
")",
"parser",
".",
"add_argument",
"(",
"'-l'",
",",
"'--list'",
",",
"action",
"=",
"'store_true'",
",",
"help",
"=",
"'Show all the programs with eg entries.'",
")",
"parser",
".",
"add_argument",
"(",
"'--color'",
",",
"action",
"=",
"'store_true'",
",",
"dest",
"=",
"'use_color'",
",",
"default",
"=",
"None",
",",
"help",
"=",
"'Colorize output.'",
")",
"parser",
".",
"add_argument",
"(",
"'-s'",
",",
"'--squeeze'",
",",
"action",
"=",
"'store_true'",
",",
"default",
"=",
"None",
",",
"help",
"=",
"'Show fewer blank lines in output.'",
")",
"parser",
".",
"add_argument",
"(",
"'--no-color'",
",",
"action",
"=",
"'store_false'",
",",
"dest",
"=",
"'use_color'",
",",
"help",
"=",
"'Do not colorize output.'",
")",
"parser",
".",
"add_argument",
"(",
"'program'",
",",
"nargs",
"=",
"'?'",
",",
"help",
"=",
"'The program for which to display examples.'",
")",
"args",
"=",
"parser",
".",
"parse_args",
"(",
")",
"if",
"len",
"(",
"sys",
".",
"argv",
")",
"<",
"2",
":",
"# Too few arguments. We can't specify this using argparse alone, so we",
"# have to manually check.",
"parser",
".",
"print_help",
"(",
")",
"parser",
".",
"exit",
"(",
")",
"elif",
"not",
"args",
".",
"version",
"and",
"not",
"args",
".",
"list",
"and",
"not",
"args",
".",
"program",
":",
"parser",
".",
"error",
"(",
"_MSG_BAD_ARGS",
")",
"else",
":",
"return",
"args"
] | Constructs and parses the command line arguments for eg. Returns an args
object as returned by parser.parse_args(). | [
"Constructs",
"and",
"parses",
"the",
"command",
"line",
"arguments",
"for",
"eg",
".",
"Returns",
"an",
"args",
"object",
"as",
"returned",
"by",
"parser",
".",
"parse_args",
"()",
"."
] | train | https://github.com/srsudar/eg/blob/96142a74f4416b4a7000c85032c070df713b849e/eg/core.py#L66-L160 | 0.000423 |
wglass/lighthouse | lighthouse/haproxy/control.py | HAProxyControl.get_info | def get_info(self):
"""
Parses the output of a "show info" HAProxy command and returns a
simple dictionary of the results.
"""
info_response = self.send_command("show info")
if not info_response:
return {}
def convert_camel_case(string):
return all_cap_re.sub(
r'\1_\2',
first_cap_re.sub(r'\1_\2', string)
).lower()
return dict(
(convert_camel_case(label), value)
for label, value in [
line.split(": ")
for line in info_response.split("\n")
]
) | python | def get_info(self):
"""
Parses the output of a "show info" HAProxy command and returns a
simple dictionary of the results.
"""
info_response = self.send_command("show info")
if not info_response:
return {}
def convert_camel_case(string):
return all_cap_re.sub(
r'\1_\2',
first_cap_re.sub(r'\1_\2', string)
).lower()
return dict(
(convert_camel_case(label), value)
for label, value in [
line.split(": ")
for line in info_response.split("\n")
]
) | [
"def",
"get_info",
"(",
"self",
")",
":",
"info_response",
"=",
"self",
".",
"send_command",
"(",
"\"show info\"",
")",
"if",
"not",
"info_response",
":",
"return",
"{",
"}",
"def",
"convert_camel_case",
"(",
"string",
")",
":",
"return",
"all_cap_re",
".",
"sub",
"(",
"r'\\1_\\2'",
",",
"first_cap_re",
".",
"sub",
"(",
"r'\\1_\\2'",
",",
"string",
")",
")",
".",
"lower",
"(",
")",
"return",
"dict",
"(",
"(",
"convert_camel_case",
"(",
"label",
")",
",",
"value",
")",
"for",
"label",
",",
"value",
"in",
"[",
"line",
".",
"split",
"(",
"\": \"",
")",
"for",
"line",
"in",
"info_response",
".",
"split",
"(",
"\"\\n\"",
")",
"]",
")"
] | Parses the output of a "show info" HAProxy command and returns a
simple dictionary of the results. | [
"Parses",
"the",
"output",
"of",
"a",
"show",
"info",
"HAProxy",
"command",
"and",
"returns",
"a",
"simple",
"dictionary",
"of",
"the",
"results",
"."
] | train | https://github.com/wglass/lighthouse/blob/f4ce6550895acc31e433ede0c05d366718a3ffe5/lighthouse/haproxy/control.py#L97-L119 | 0.003053 |
saltstack/salt | salt/cloud/clouds/vultrpy.py | _lookup_vultrid | def _lookup_vultrid(which_key, availkey, keyname):
'''
Helper function to retrieve a Vultr ID
'''
if DETAILS == {}:
_cache_provider_details()
which_key = six.text_type(which_key)
try:
return DETAILS[availkey][which_key][keyname]
except KeyError:
return False | python | def _lookup_vultrid(which_key, availkey, keyname):
'''
Helper function to retrieve a Vultr ID
'''
if DETAILS == {}:
_cache_provider_details()
which_key = six.text_type(which_key)
try:
return DETAILS[availkey][which_key][keyname]
except KeyError:
return False | [
"def",
"_lookup_vultrid",
"(",
"which_key",
",",
"availkey",
",",
"keyname",
")",
":",
"if",
"DETAILS",
"==",
"{",
"}",
":",
"_cache_provider_details",
"(",
")",
"which_key",
"=",
"six",
".",
"text_type",
"(",
"which_key",
")",
"try",
":",
"return",
"DETAILS",
"[",
"availkey",
"]",
"[",
"which_key",
"]",
"[",
"keyname",
"]",
"except",
"KeyError",
":",
"return",
"False"
] | Helper function to retrieve a Vultr ID | [
"Helper",
"function",
"to",
"retrieve",
"a",
"Vultr",
"ID"
] | train | https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/cloud/clouds/vultrpy.py#L251-L262 | 0.003215 |
CalebBell/ht | ht/hx.py | temperature_effectiveness_TEMA_H | def temperature_effectiveness_TEMA_H(R1, NTU1, Ntp, optimal=True):
r'''Returns temperature effectiveness `P1` of a TEMA H type heat exchanger
with a specified heat capacity ratio, number of transfer units `NTU1`,
and of number of tube passes `Ntp`. For the two tube pass case, there are
two possible orientations, one inefficient and one efficient controlled
by the `optimal` option. The supported cases are as follows:
* One tube pass (tube fluid split into two streams individually mixed,
shell fluid mixed)
* Two tube passes (shell fluid mixed, tube pass mixed between passes)
* Two tube passes (shell fluid mixed, tube pass mixed between passes, inlet
tube side next to inlet shell-side)
1-1 TEMA H, tube fluid split into two streams individually mixed, shell
fluid mixed:
.. math::
P_1 = E[1 + (1 - BR_1/2)(1 - A R_1/2 + ABR_1)] - AB(1 - BR_1/2)
A = \frac{1}{1 + R_1/2}\{1 - \exp[-NTU_1(1 + R_1/2)/2]\}
B = \frac{1-D}{1-R_1 D/2}
D = \exp[-NTU_1(1-R_1/2)/2]
E = (A + B - ABR_1/2)/2
1-2 TEMA H, shell and tube fluids mixed in each pass at the cross section:
.. math::
P_1 = \frac{1}{R_1}\left[1 - \frac{(1-D)^4}{B - 4G/R_1}\right]
B = (1+H)(1+E)^2
G = (1-D)^2(D^2 + E^2) + D^2(1 + E)^2
H = [1 - \exp(-2\beta)]/(4/R_1 -1)
E = [1 - \exp(-\beta)]/(4/R_1 - 1)
D = [1 - \exp(-\alpha)]/(4/R_1 + 1)
\alpha = NTU_1(4 + R_1)/8
\beta = NTU_1(4-R_1)/8
1-2 TEMA H, shell and tube fluids mixed in each pass at the cross section
but with the inlet tube stream coming in next to the shell fluid inlet
in an inefficient way (this is only shown in [2]_, and the stream 1/2
convention in it is different but converted here; P1 is still returned):
.. math::
P_2 = \left[1 - \frac{B + 4GR_2}{(1-D)^4}\right]
B = (1 + H)(1 + E)^2
G = (1-D)^2(D^2 + E^2) + D^2(1 + E)^2
D = \frac{1 - \exp(-\alpha)}{1 - 4R_2}
E = \frac{\exp(-\beta) - 1}{4R_2 +1}
H = \frac{\exp(-2\beta) - 1}{4R_2 +1}
\alpha = \frac{NTU_2}{8}(4R_2 -1)
\beta = \frac{NTU_2}{8}(4R_2 +1)
Parameters
----------
R1 : float
Heat capacity ratio of the heat exchanger in the P-NTU method,
calculated with respect to stream 1 (shell side = 1, tube side = 2) [-]
NTU1 : float
Thermal number of transfer units of the heat exchanger in the P-NTU
method, calculated with respect to stream 1 (shell side = 1, tube side
= 2) [-]
Ntp : int
Number of tube passes, 1, or 2, [-]
optimal : bool, optional
Whether or not the arrangement is configured to give more of a
countercurrent and efficient (True) case or an inefficient parallel
case, [-]
Returns
-------
P1 : float
Thermal effectiveness of the heat exchanger in the P-NTU method,
calculated with respect to stream 1 [-]
Notes
-----
For numbers of tube passes greater than 1 or 2, an exception is raised.
The convention for the formulas in [1]_ and [3]_ are with the shell side
as side 1, and the tube side as side 2. [2]_ has formulas with the
opposite convention.
Examples
--------
>>> temperature_effectiveness_TEMA_H(R1=1/3., NTU1=1., Ntp=1)
0.5730728284905833
References
----------
.. [1] Shah, Ramesh K., and Dusan P. Sekulic. Fundamentals of Heat
Exchanger Design. 1st edition. Hoboken, NJ: Wiley, 2002.
.. [2] Thulukkanam, Kuppan. Heat Exchanger Design Handbook, Second Edition.
CRC Press, 2013.
.. [3] Rohsenow, Warren and James Hartnett and Young Cho. Handbook of Heat
Transfer, 3E. New York: McGraw-Hill, 1998.
'''
if Ntp == 1:
A = 1./(1 + R1/2.)*(1. - exp(-NTU1*(1. + R1/2.)/2.))
D = exp(-NTU1*(1. - R1/2.)/2.)
if R1 != 2:
B = (1. - D)/(1. - R1*D/2.)
else:
B = NTU1/(2. + NTU1)
E = (A + B - A*B*R1/2.)/2.
P1 = E*(1. + (1. - B*R1/2.)*(1. - A*R1/2. + A*B*R1)) - A*B*(1. - B*R1/2.)
elif Ntp == 2 and optimal:
alpha = NTU1*(4. + R1)/8.
beta = NTU1*(4. - R1)/8.
D = (1. - exp(-alpha))/(4./R1 + 1)
if R1 != 4:
E = (1. - exp(-beta))/(4./R1 - 1.)
H = (1. - exp(-2.*beta))/(4./R1 - 1.)
else:
E = NTU1/2.
H = NTU1
G = (1-D)**2*(D**2 + E**2) + D**2*(1+E)**2
B = (1. + H)*(1. + E)**2
P1 = 1./R1*(1. - (1. - D)**4/(B - 4.*G/R1))
elif Ntp == 2 and not optimal:
R1_orig = R1
#NTU2 = NTU1*R1_orig but we want to treat it as NTU1 in this case
NTU1 = NTU1*R1_orig # switch 1
# R2 = 1/R1 but we want to treat it as R1 in this case
R1 = 1./R1_orig # switch 2
beta = NTU1*(4.*R1 + 1)/8.
alpha = NTU1/8.*(4.*R1 - 1.)
H = (exp(-2.*beta) - 1.)/(4.*R1 + 1.)
E = (exp(-beta) - 1.)/(4.*R1 + 1.)
B = (1. + H)*(1. + E)**2
if R1 != 0.25:
D = (1. - exp(-alpha))/(1. - 4.*R1)
G = (1. - D)**2*(D**2 + E**2) + D**2*(1. + E)**2
P1 = (1. - (B + 4.*G*R1)/(1. - D)**4)
else:
D = -NTU1/8.
G = (1. - D)**2*(D**2 + E**2) + D**2*(1. + E)**2
P1 = (1. - (B + 4.*G*R1)/(1. - D)**4)
P1 = P1/R1_orig # switch 3, confirmed
else:
raise Exception('Supported numbers of tube passes are 1 and 2.')
return P1 | python | def temperature_effectiveness_TEMA_H(R1, NTU1, Ntp, optimal=True):
r'''Returns temperature effectiveness `P1` of a TEMA H type heat exchanger
with a specified heat capacity ratio, number of transfer units `NTU1`,
and of number of tube passes `Ntp`. For the two tube pass case, there are
two possible orientations, one inefficient and one efficient controlled
by the `optimal` option. The supported cases are as follows:
* One tube pass (tube fluid split into two streams individually mixed,
shell fluid mixed)
* Two tube passes (shell fluid mixed, tube pass mixed between passes)
* Two tube passes (shell fluid mixed, tube pass mixed between passes, inlet
tube side next to inlet shell-side)
1-1 TEMA H, tube fluid split into two streams individually mixed, shell
fluid mixed:
.. math::
P_1 = E[1 + (1 - BR_1/2)(1 - A R_1/2 + ABR_1)] - AB(1 - BR_1/2)
A = \frac{1}{1 + R_1/2}\{1 - \exp[-NTU_1(1 + R_1/2)/2]\}
B = \frac{1-D}{1-R_1 D/2}
D = \exp[-NTU_1(1-R_1/2)/2]
E = (A + B - ABR_1/2)/2
1-2 TEMA H, shell and tube fluids mixed in each pass at the cross section:
.. math::
P_1 = \frac{1}{R_1}\left[1 - \frac{(1-D)^4}{B - 4G/R_1}\right]
B = (1+H)(1+E)^2
G = (1-D)^2(D^2 + E^2) + D^2(1 + E)^2
H = [1 - \exp(-2\beta)]/(4/R_1 -1)
E = [1 - \exp(-\beta)]/(4/R_1 - 1)
D = [1 - \exp(-\alpha)]/(4/R_1 + 1)
\alpha = NTU_1(4 + R_1)/8
\beta = NTU_1(4-R_1)/8
1-2 TEMA H, shell and tube fluids mixed in each pass at the cross section
but with the inlet tube stream coming in next to the shell fluid inlet
in an inefficient way (this is only shown in [2]_, and the stream 1/2
convention in it is different but converted here; P1 is still returned):
.. math::
P_2 = \left[1 - \frac{B + 4GR_2}{(1-D)^4}\right]
B = (1 + H)(1 + E)^2
G = (1-D)^2(D^2 + E^2) + D^2(1 + E)^2
D = \frac{1 - \exp(-\alpha)}{1 - 4R_2}
E = \frac{\exp(-\beta) - 1}{4R_2 +1}
H = \frac{\exp(-2\beta) - 1}{4R_2 +1}
\alpha = \frac{NTU_2}{8}(4R_2 -1)
\beta = \frac{NTU_2}{8}(4R_2 +1)
Parameters
----------
R1 : float
Heat capacity ratio of the heat exchanger in the P-NTU method,
calculated with respect to stream 1 (shell side = 1, tube side = 2) [-]
NTU1 : float
Thermal number of transfer units of the heat exchanger in the P-NTU
method, calculated with respect to stream 1 (shell side = 1, tube side
= 2) [-]
Ntp : int
Number of tube passes, 1, or 2, [-]
optimal : bool, optional
Whether or not the arrangement is configured to give more of a
countercurrent and efficient (True) case or an inefficient parallel
case, [-]
Returns
-------
P1 : float
Thermal effectiveness of the heat exchanger in the P-NTU method,
calculated with respect to stream 1 [-]
Notes
-----
For numbers of tube passes greater than 1 or 2, an exception is raised.
The convention for the formulas in [1]_ and [3]_ are with the shell side
as side 1, and the tube side as side 2. [2]_ has formulas with the
opposite convention.
Examples
--------
>>> temperature_effectiveness_TEMA_H(R1=1/3., NTU1=1., Ntp=1)
0.5730728284905833
References
----------
.. [1] Shah, Ramesh K., and Dusan P. Sekulic. Fundamentals of Heat
Exchanger Design. 1st edition. Hoboken, NJ: Wiley, 2002.
.. [2] Thulukkanam, Kuppan. Heat Exchanger Design Handbook, Second Edition.
CRC Press, 2013.
.. [3] Rohsenow, Warren and James Hartnett and Young Cho. Handbook of Heat
Transfer, 3E. New York: McGraw-Hill, 1998.
'''
if Ntp == 1:
A = 1./(1 + R1/2.)*(1. - exp(-NTU1*(1. + R1/2.)/2.))
D = exp(-NTU1*(1. - R1/2.)/2.)
if R1 != 2:
B = (1. - D)/(1. - R1*D/2.)
else:
B = NTU1/(2. + NTU1)
E = (A + B - A*B*R1/2.)/2.
P1 = E*(1. + (1. - B*R1/2.)*(1. - A*R1/2. + A*B*R1)) - A*B*(1. - B*R1/2.)
elif Ntp == 2 and optimal:
alpha = NTU1*(4. + R1)/8.
beta = NTU1*(4. - R1)/8.
D = (1. - exp(-alpha))/(4./R1 + 1)
if R1 != 4:
E = (1. - exp(-beta))/(4./R1 - 1.)
H = (1. - exp(-2.*beta))/(4./R1 - 1.)
else:
E = NTU1/2.
H = NTU1
G = (1-D)**2*(D**2 + E**2) + D**2*(1+E)**2
B = (1. + H)*(1. + E)**2
P1 = 1./R1*(1. - (1. - D)**4/(B - 4.*G/R1))
elif Ntp == 2 and not optimal:
R1_orig = R1
#NTU2 = NTU1*R1_orig but we want to treat it as NTU1 in this case
NTU1 = NTU1*R1_orig # switch 1
# R2 = 1/R1 but we want to treat it as R1 in this case
R1 = 1./R1_orig # switch 2
beta = NTU1*(4.*R1 + 1)/8.
alpha = NTU1/8.*(4.*R1 - 1.)
H = (exp(-2.*beta) - 1.)/(4.*R1 + 1.)
E = (exp(-beta) - 1.)/(4.*R1 + 1.)
B = (1. + H)*(1. + E)**2
if R1 != 0.25:
D = (1. - exp(-alpha))/(1. - 4.*R1)
G = (1. - D)**2*(D**2 + E**2) + D**2*(1. + E)**2
P1 = (1. - (B + 4.*G*R1)/(1. - D)**4)
else:
D = -NTU1/8.
G = (1. - D)**2*(D**2 + E**2) + D**2*(1. + E)**2
P1 = (1. - (B + 4.*G*R1)/(1. - D)**4)
P1 = P1/R1_orig # switch 3, confirmed
else:
raise Exception('Supported numbers of tube passes are 1 and 2.')
return P1 | [
"def",
"temperature_effectiveness_TEMA_H",
"(",
"R1",
",",
"NTU1",
",",
"Ntp",
",",
"optimal",
"=",
"True",
")",
":",
"if",
"Ntp",
"==",
"1",
":",
"A",
"=",
"1.",
"/",
"(",
"1",
"+",
"R1",
"/",
"2.",
")",
"*",
"(",
"1.",
"-",
"exp",
"(",
"-",
"NTU1",
"*",
"(",
"1.",
"+",
"R1",
"/",
"2.",
")",
"/",
"2.",
")",
")",
"D",
"=",
"exp",
"(",
"-",
"NTU1",
"*",
"(",
"1.",
"-",
"R1",
"/",
"2.",
")",
"/",
"2.",
")",
"if",
"R1",
"!=",
"2",
":",
"B",
"=",
"(",
"1.",
"-",
"D",
")",
"/",
"(",
"1.",
"-",
"R1",
"*",
"D",
"/",
"2.",
")",
"else",
":",
"B",
"=",
"NTU1",
"/",
"(",
"2.",
"+",
"NTU1",
")",
"E",
"=",
"(",
"A",
"+",
"B",
"-",
"A",
"*",
"B",
"*",
"R1",
"/",
"2.",
")",
"/",
"2.",
"P1",
"=",
"E",
"*",
"(",
"1.",
"+",
"(",
"1.",
"-",
"B",
"*",
"R1",
"/",
"2.",
")",
"*",
"(",
"1.",
"-",
"A",
"*",
"R1",
"/",
"2.",
"+",
"A",
"*",
"B",
"*",
"R1",
")",
")",
"-",
"A",
"*",
"B",
"*",
"(",
"1.",
"-",
"B",
"*",
"R1",
"/",
"2.",
")",
"elif",
"Ntp",
"==",
"2",
"and",
"optimal",
":",
"alpha",
"=",
"NTU1",
"*",
"(",
"4.",
"+",
"R1",
")",
"/",
"8.",
"beta",
"=",
"NTU1",
"*",
"(",
"4.",
"-",
"R1",
")",
"/",
"8.",
"D",
"=",
"(",
"1.",
"-",
"exp",
"(",
"-",
"alpha",
")",
")",
"/",
"(",
"4.",
"/",
"R1",
"+",
"1",
")",
"if",
"R1",
"!=",
"4",
":",
"E",
"=",
"(",
"1.",
"-",
"exp",
"(",
"-",
"beta",
")",
")",
"/",
"(",
"4.",
"/",
"R1",
"-",
"1.",
")",
"H",
"=",
"(",
"1.",
"-",
"exp",
"(",
"-",
"2.",
"*",
"beta",
")",
")",
"/",
"(",
"4.",
"/",
"R1",
"-",
"1.",
")",
"else",
":",
"E",
"=",
"NTU1",
"/",
"2.",
"H",
"=",
"NTU1",
"G",
"=",
"(",
"1",
"-",
"D",
")",
"**",
"2",
"*",
"(",
"D",
"**",
"2",
"+",
"E",
"**",
"2",
")",
"+",
"D",
"**",
"2",
"*",
"(",
"1",
"+",
"E",
")",
"**",
"2",
"B",
"=",
"(",
"1.",
"+",
"H",
")",
"*",
"(",
"1.",
"+",
"E",
")",
"**",
"2",
"P1",
"=",
"1.",
"/",
"R1",
"*",
"(",
"1.",
"-",
"(",
"1.",
"-",
"D",
")",
"**",
"4",
"/",
"(",
"B",
"-",
"4.",
"*",
"G",
"/",
"R1",
")",
")",
"elif",
"Ntp",
"==",
"2",
"and",
"not",
"optimal",
":",
"R1_orig",
"=",
"R1",
"#NTU2 = NTU1*R1_orig but we want to treat it as NTU1 in this case",
"NTU1",
"=",
"NTU1",
"*",
"R1_orig",
"# switch 1",
"# R2 = 1/R1 but we want to treat it as R1 in this case",
"R1",
"=",
"1.",
"/",
"R1_orig",
"# switch 2",
"beta",
"=",
"NTU1",
"*",
"(",
"4.",
"*",
"R1",
"+",
"1",
")",
"/",
"8.",
"alpha",
"=",
"NTU1",
"/",
"8.",
"*",
"(",
"4.",
"*",
"R1",
"-",
"1.",
")",
"H",
"=",
"(",
"exp",
"(",
"-",
"2.",
"*",
"beta",
")",
"-",
"1.",
")",
"/",
"(",
"4.",
"*",
"R1",
"+",
"1.",
")",
"E",
"=",
"(",
"exp",
"(",
"-",
"beta",
")",
"-",
"1.",
")",
"/",
"(",
"4.",
"*",
"R1",
"+",
"1.",
")",
"B",
"=",
"(",
"1.",
"+",
"H",
")",
"*",
"(",
"1.",
"+",
"E",
")",
"**",
"2",
"if",
"R1",
"!=",
"0.25",
":",
"D",
"=",
"(",
"1.",
"-",
"exp",
"(",
"-",
"alpha",
")",
")",
"/",
"(",
"1.",
"-",
"4.",
"*",
"R1",
")",
"G",
"=",
"(",
"1.",
"-",
"D",
")",
"**",
"2",
"*",
"(",
"D",
"**",
"2",
"+",
"E",
"**",
"2",
")",
"+",
"D",
"**",
"2",
"*",
"(",
"1.",
"+",
"E",
")",
"**",
"2",
"P1",
"=",
"(",
"1.",
"-",
"(",
"B",
"+",
"4.",
"*",
"G",
"*",
"R1",
")",
"/",
"(",
"1.",
"-",
"D",
")",
"**",
"4",
")",
"else",
":",
"D",
"=",
"-",
"NTU1",
"/",
"8.",
"G",
"=",
"(",
"1.",
"-",
"D",
")",
"**",
"2",
"*",
"(",
"D",
"**",
"2",
"+",
"E",
"**",
"2",
")",
"+",
"D",
"**",
"2",
"*",
"(",
"1.",
"+",
"E",
")",
"**",
"2",
"P1",
"=",
"(",
"1.",
"-",
"(",
"B",
"+",
"4.",
"*",
"G",
"*",
"R1",
")",
"/",
"(",
"1.",
"-",
"D",
")",
"**",
"4",
")",
"P1",
"=",
"P1",
"/",
"R1_orig",
"# switch 3, confirmed",
"else",
":",
"raise",
"Exception",
"(",
"'Supported numbers of tube passes are 1 and 2.'",
")",
"return",
"P1"
] | r'''Returns temperature effectiveness `P1` of a TEMA H type heat exchanger
with a specified heat capacity ratio, number of transfer units `NTU1`,
and of number of tube passes `Ntp`. For the two tube pass case, there are
two possible orientations, one inefficient and one efficient controlled
by the `optimal` option. The supported cases are as follows:
* One tube pass (tube fluid split into two streams individually mixed,
shell fluid mixed)
* Two tube passes (shell fluid mixed, tube pass mixed between passes)
* Two tube passes (shell fluid mixed, tube pass mixed between passes, inlet
tube side next to inlet shell-side)
1-1 TEMA H, tube fluid split into two streams individually mixed, shell
fluid mixed:
.. math::
P_1 = E[1 + (1 - BR_1/2)(1 - A R_1/2 + ABR_1)] - AB(1 - BR_1/2)
A = \frac{1}{1 + R_1/2}\{1 - \exp[-NTU_1(1 + R_1/2)/2]\}
B = \frac{1-D}{1-R_1 D/2}
D = \exp[-NTU_1(1-R_1/2)/2]
E = (A + B - ABR_1/2)/2
1-2 TEMA H, shell and tube fluids mixed in each pass at the cross section:
.. math::
P_1 = \frac{1}{R_1}\left[1 - \frac{(1-D)^4}{B - 4G/R_1}\right]
B = (1+H)(1+E)^2
G = (1-D)^2(D^2 + E^2) + D^2(1 + E)^2
H = [1 - \exp(-2\beta)]/(4/R_1 -1)
E = [1 - \exp(-\beta)]/(4/R_1 - 1)
D = [1 - \exp(-\alpha)]/(4/R_1 + 1)
\alpha = NTU_1(4 + R_1)/8
\beta = NTU_1(4-R_1)/8
1-2 TEMA H, shell and tube fluids mixed in each pass at the cross section
but with the inlet tube stream coming in next to the shell fluid inlet
in an inefficient way (this is only shown in [2]_, and the stream 1/2
convention in it is different but converted here; P1 is still returned):
.. math::
P_2 = \left[1 - \frac{B + 4GR_2}{(1-D)^4}\right]
B = (1 + H)(1 + E)^2
G = (1-D)^2(D^2 + E^2) + D^2(1 + E)^2
D = \frac{1 - \exp(-\alpha)}{1 - 4R_2}
E = \frac{\exp(-\beta) - 1}{4R_2 +1}
H = \frac{\exp(-2\beta) - 1}{4R_2 +1}
\alpha = \frac{NTU_2}{8}(4R_2 -1)
\beta = \frac{NTU_2}{8}(4R_2 +1)
Parameters
----------
R1 : float
Heat capacity ratio of the heat exchanger in the P-NTU method,
calculated with respect to stream 1 (shell side = 1, tube side = 2) [-]
NTU1 : float
Thermal number of transfer units of the heat exchanger in the P-NTU
method, calculated with respect to stream 1 (shell side = 1, tube side
= 2) [-]
Ntp : int
Number of tube passes, 1, or 2, [-]
optimal : bool, optional
Whether or not the arrangement is configured to give more of a
countercurrent and efficient (True) case or an inefficient parallel
case, [-]
Returns
-------
P1 : float
Thermal effectiveness of the heat exchanger in the P-NTU method,
calculated with respect to stream 1 [-]
Notes
-----
For numbers of tube passes greater than 1 or 2, an exception is raised.
The convention for the formulas in [1]_ and [3]_ are with the shell side
as side 1, and the tube side as side 2. [2]_ has formulas with the
opposite convention.
Examples
--------
>>> temperature_effectiveness_TEMA_H(R1=1/3., NTU1=1., Ntp=1)
0.5730728284905833
References
----------
.. [1] Shah, Ramesh K., and Dusan P. Sekulic. Fundamentals of Heat
Exchanger Design. 1st edition. Hoboken, NJ: Wiley, 2002.
.. [2] Thulukkanam, Kuppan. Heat Exchanger Design Handbook, Second Edition.
CRC Press, 2013.
.. [3] Rohsenow, Warren and James Hartnett and Young Cho. Handbook of Heat
Transfer, 3E. New York: McGraw-Hill, 1998. | [
"r",
"Returns",
"temperature",
"effectiveness",
"P1",
"of",
"a",
"TEMA",
"H",
"type",
"heat",
"exchanger",
"with",
"a",
"specified",
"heat",
"capacity",
"ratio",
"number",
"of",
"transfer",
"units",
"NTU1",
"and",
"of",
"number",
"of",
"tube",
"passes",
"Ntp",
".",
"For",
"the",
"two",
"tube",
"pass",
"case",
"there",
"are",
"two",
"possible",
"orientations",
"one",
"inefficient",
"and",
"one",
"efficient",
"controlled",
"by",
"the",
"optimal",
"option",
".",
"The",
"supported",
"cases",
"are",
"as",
"follows",
":",
"*",
"One",
"tube",
"pass",
"(",
"tube",
"fluid",
"split",
"into",
"two",
"streams",
"individually",
"mixed",
"shell",
"fluid",
"mixed",
")",
"*",
"Two",
"tube",
"passes",
"(",
"shell",
"fluid",
"mixed",
"tube",
"pass",
"mixed",
"between",
"passes",
")",
"*",
"Two",
"tube",
"passes",
"(",
"shell",
"fluid",
"mixed",
"tube",
"pass",
"mixed",
"between",
"passes",
"inlet",
"tube",
"side",
"next",
"to",
"inlet",
"shell",
"-",
"side",
")",
"1",
"-",
"1",
"TEMA",
"H",
"tube",
"fluid",
"split",
"into",
"two",
"streams",
"individually",
"mixed",
"shell",
"fluid",
"mixed",
":"
] | train | https://github.com/CalebBell/ht/blob/3097ef9524c4cf0068ad453c17b10ec9ce551eee/ht/hx.py#L1469-L1625 | 0.007123 |
PythonCharmers/python-future | src/future/backports/email/_header_value_parser.py | get_attribute | def get_attribute(value):
""" [CFWS] 1*attrtext [CFWS]
This version of the BNF makes the CFWS explicit, and as usual we use a
value terminal for the actual run of characters. The RFC equivalent of
attrtext is the token characters, with the subtraction of '*', "'", and '%'.
We include tab in the excluded set just as we do for token.
"""
attribute = Attribute()
if value and value[0] in CFWS_LEADER:
token, value = get_cfws(value)
attribute.append(token)
if value and value[0] in ATTRIBUTE_ENDS:
raise errors.HeaderParseError(
"expected token but found '{}'".format(value))
token, value = get_attrtext(value)
attribute.append(token)
if value and value[0] in CFWS_LEADER:
token, value = get_cfws(value)
attribute.append(token)
return attribute, value | python | def get_attribute(value):
""" [CFWS] 1*attrtext [CFWS]
This version of the BNF makes the CFWS explicit, and as usual we use a
value terminal for the actual run of characters. The RFC equivalent of
attrtext is the token characters, with the subtraction of '*', "'", and '%'.
We include tab in the excluded set just as we do for token.
"""
attribute = Attribute()
if value and value[0] in CFWS_LEADER:
token, value = get_cfws(value)
attribute.append(token)
if value and value[0] in ATTRIBUTE_ENDS:
raise errors.HeaderParseError(
"expected token but found '{}'".format(value))
token, value = get_attrtext(value)
attribute.append(token)
if value and value[0] in CFWS_LEADER:
token, value = get_cfws(value)
attribute.append(token)
return attribute, value | [
"def",
"get_attribute",
"(",
"value",
")",
":",
"attribute",
"=",
"Attribute",
"(",
")",
"if",
"value",
"and",
"value",
"[",
"0",
"]",
"in",
"CFWS_LEADER",
":",
"token",
",",
"value",
"=",
"get_cfws",
"(",
"value",
")",
"attribute",
".",
"append",
"(",
"token",
")",
"if",
"value",
"and",
"value",
"[",
"0",
"]",
"in",
"ATTRIBUTE_ENDS",
":",
"raise",
"errors",
".",
"HeaderParseError",
"(",
"\"expected token but found '{}'\"",
".",
"format",
"(",
"value",
")",
")",
"token",
",",
"value",
"=",
"get_attrtext",
"(",
"value",
")",
"attribute",
".",
"append",
"(",
"token",
")",
"if",
"value",
"and",
"value",
"[",
"0",
"]",
"in",
"CFWS_LEADER",
":",
"token",
",",
"value",
"=",
"get_cfws",
"(",
"value",
")",
"attribute",
".",
"append",
"(",
"token",
")",
"return",
"attribute",
",",
"value"
] | [CFWS] 1*attrtext [CFWS]
This version of the BNF makes the CFWS explicit, and as usual we use a
value terminal for the actual run of characters. The RFC equivalent of
attrtext is the token characters, with the subtraction of '*', "'", and '%'.
We include tab in the excluded set just as we do for token. | [
"[",
"CFWS",
"]",
"1",
"*",
"attrtext",
"[",
"CFWS",
"]"
] | train | https://github.com/PythonCharmers/python-future/blob/c423752879acc05eebc29b0bb9909327bd5c7308/src/future/backports/email/_header_value_parser.py#L2527-L2548 | 0.002336 |
sdispater/orator | orator/schema/grammars/grammar.py | SchemaGrammar.prefix_list | def prefix_list(self, prefix, values):
"""
Add a prefix to a list of values.
"""
return list(map(lambda value: prefix + " " + value, values)) | python | def prefix_list(self, prefix, values):
"""
Add a prefix to a list of values.
"""
return list(map(lambda value: prefix + " " + value, values)) | [
"def",
"prefix_list",
"(",
"self",
",",
"prefix",
",",
"values",
")",
":",
"return",
"list",
"(",
"map",
"(",
"lambda",
"value",
":",
"prefix",
"+",
"\" \"",
"+",
"value",
",",
"values",
")",
")"
] | Add a prefix to a list of values. | [
"Add",
"a",
"prefix",
"to",
"a",
"list",
"of",
"values",
"."
] | train | https://github.com/sdispater/orator/blob/bd90bf198ee897751848f9a92e49d18e60a74136/orator/schema/grammars/grammar.py#L169-L173 | 0.011561 |
timothydmorton/VESPA | vespa/fpp.py | from_ini | def from_ini(cls, folder, ini_file='fpp.ini', ichrone='mist', recalc=False,
refit_trap=False, **kwargs):
"""
To enable simple usage, initializes a FPPCalculation from a .ini file
By default, a file called ``fpp.ini`` will be looked for in the
current folder. Also present must be a ``star.ini`` file that
contains the observed properties of the target star.
``fpp.ini`` must be of the following form::
name = k2oi
ra = 11:30:14.510
dec = +07:35:18.21
period = 32.988 #days
rprs = 0.0534 #Rp/Rstar
photfile = lc_k2oi.csv
[constraints]
maxrad = 10 #exclusion radius [arcsec]
secthresh = 0.001 #maximum allowed secondary signal depth
#This variable defines contrast curves
#ccfiles = Keck_J.cc, Lick_J.cc
Photfile must be a text file with columns ``(days_from_midtransit,
flux, flux_err)``. Both whitespace- and comma-delimited
will be tried, using ``np.loadtxt``. Photfile need not be there
if there is a pickled :class:`TransitSignal` saved in the same
directory as ``ini_file``, named ``trsig.pkl`` (or another name
as defined by ``trsig`` keyword in ``.ini`` file).
``star.ini`` should look something like the following::
B = 15.005, 0.06
V = 13.496, 0.05
g = 14.223, 0.05
r = 12.858, 0.04
i = 11.661, 0.08
J = 9.763, 0.03
H = 9.135, 0.03
K = 8.899, 0.02
W1 = 8.769, 0.023
W2 = 8.668, 0.02
W3 = 8.552, 0.025
Kepler = 12.473
#Teff = 3503, 80
#feh = 0.09, 0.09
#logg = 4.89, 0.1
Any star properties can be defined; if errors are included
then they will be used in the :class:`isochrones.StarModel`
MCMC fit.
Spectroscopic parameters (``Teff, feh, logg``) are optional.
If included, then they will also be included in
:class:`isochrones.StarModel` fit. A magnitude for the
band in which the transit signal is observed (e.g., ``Kepler``)
is required, though need not have associated uncertainty.
:param folder:
Folder to find configuration files.
:param ini_file:
Input configuration file.
:param star_ini_file:
Input config file for :class:`isochrones.StarModel` fits.
:param recalc:
Whether to re-calculate :class:`PopulationSet`, if a
``popset.h5`` file is already present
:param **kwargs:
Keyword arguments passed to :class:`PopulationSet`.
Creates:
* ``trsig.pkl``: the pickled :class:`vespa.TransitSignal` object.
* ``starfield.h5``: the TRILEGAL field star simulation
* ``starmodel.h5``: the :class:`isochrones.StarModel` fit
* ``popset.h5``: the :class:`vespa.PopulationSet` object
representing the model population simulations.
Raises
------
RuntimeError :
If single, double, and triple starmodels are
not computed, then raises with admonition to run
`starfit --all`.
AttributeError :
If `trsig.pkl` not present in folder, and
`photfile` is not defined in config file.
"""
# Check if all starmodel fits are done.
# If not, tell user to run 'starfit --all'
config = ConfigObj(os.path.join(folder, ini_file))
# Load required entries from ini_file
try:
name = config['name']
ra, dec = config['ra'], config['dec']
period = float(config['period'])
rprs = float(config['rprs'])
except KeyError as err:
raise KeyError('Missing required element of ini file: {}'.format(err))
try:
cadence = float(config['cadence'])
except KeyError:
logging.warning('Cadence not provided in fpp.ini; defaulting to Kepler cadence.')
logging.warning('If this is not a Kepler target, please set cadence (in days).')
cadence = 1626./86400 # Default to Kepler cadence
def fullpath(filename):
if os.path.isabs(filename):
return filename
else:
return os.path.join(folder, filename)
# Non-required entries with default values
popset_file = fullpath(config.get('popset', 'popset.h5'))
starfield_file = fullpath(config.get('starfield', 'starfield.h5'))
trsig_file = fullpath(config.get('trsig', 'trsig.pkl'))
# Check for StarModel fits
starmodel_basename = config.get('starmodel_basename',
'{}_starmodel'.format(ichrone))
single_starmodel_file = os.path.join(folder,'{}_single.h5'.format(starmodel_basename))
binary_starmodel_file = os.path.join(folder,'{}_binary.h5'.format(starmodel_basename))
triple_starmodel_file = os.path.join(folder,'{}_triple.h5'.format(starmodel_basename))
try:
single_starmodel = StarModel.load_hdf(single_starmodel_file)
binary_starmodel = StarModel.load_hdf(binary_starmodel_file)
triple_starmodel = StarModel.load_hdf(triple_starmodel_file)
except Exception as e:
print(e)
raise RuntimeError('Cannot load StarModels. ' +
'Please run `starfit --all {}`.'.format(folder))
# Create (or load) TransitSignal
if os.path.exists(trsig_file):
logging.info('Loading transit signal from {}...'.format(trsig_file))
with open(trsig_file, 'rb') as f:
trsig = pickle.load(f)
else:
try:
photfile = fullpath(config['photfile'])
except KeyError:
raise AttributeError('If transit pickle file (trsig.pkl) ' +
'not present, "photfile" must be' +
'defined.')
trsig = TransitSignal.from_ascii(photfile, P=period, name=name)
if not trsig.hasMCMC or refit_trap:
logging.info('Fitting transitsignal with MCMC...')
trsig.MCMC()
trsig.save(trsig_file)
# Create (or load) PopulationSet
do_only = DEFAULT_MODELS
if os.path.exists(popset_file):
if recalc:
os.remove(popset_file)
else:
with pd.HDFStore(popset_file) as store:
do_only = [m for m in DEFAULT_MODELS if m not in store]
# Check that properties of saved population match requested
try:
popset = PopulationSet.load_hdf(popset_file)
for pop in popset.poplist:
if pop.cadence != cadence:
raise ValueError('Requested cadence ({}) '.format(cadence) +
'does not match stored {})! Set recalc=True.'.format(pop.cadence))
except:
raise
if do_only:
logging.info('Generating {} models for PopulationSet...'.format(do_only))
else:
logging.info('Populations ({}) already generated.'.format(DEFAULT_MODELS))
popset = PopulationSet(period=period, cadence=cadence,
mags=single_starmodel.mags,
ra=ra, dec=dec,
trilegal_filename=starfield_file, # Maybe change parameter name?
starmodel=single_starmodel,
binary_starmodel=binary_starmodel,
triple_starmodel=triple_starmodel,
rprs=rprs, do_only=do_only,
savefile=popset_file, **kwargs)
fpp = cls(trsig, popset, folder=folder)
#############
# Apply constraints
# Exclusion radius
maxrad = float(config['constraints']['maxrad'])
fpp.set_maxrad(maxrad)
if 'secthresh' in config['constraints']:
secthresh = float(config['constraints']['secthresh'])
if not np.isnan(secthresh):
fpp.apply_secthresh(secthresh)
# Odd-even constraint
diff = 3 * np.max(trsig.depthfit[1])
fpp.constrain_oddeven(diff)
#apply contrast curve constraints if present
if 'ccfiles' in config['constraints']:
ccfiles = config['constraints']['ccfiles']
if isinstance(ccfiles, string_types):
ccfiles = [ccfiles]
for ccfile in ccfiles:
if not os.path.isabs(ccfile):
ccfile = os.path.join(folder, ccfile)
m = re.search('(\w+)_(\w+)\.cc',os.path.basename(ccfile))
if not m:
logging.warning('Invalid CC filename ({}); '.format(ccfile) +
'skipping.')
continue
else:
band = m.group(2)
inst = m.group(1)
name = '{} {}-band'.format(inst, band)
cc = ContrastCurveFromFile(ccfile, band, name=name)
fpp.apply_cc(cc)
#apply "velocity contrast curve" if present
if 'vcc' in config['constraints']:
dv = float(config['constraints']['vcc'][0])
dmag = float(config['constraints']['vcc'][1])
vcc = VelocityContrastCurve(dv, dmag)
fpp.apply_vcc(vcc)
return fpp | python | def from_ini(cls, folder, ini_file='fpp.ini', ichrone='mist', recalc=False,
refit_trap=False, **kwargs):
"""
To enable simple usage, initializes a FPPCalculation from a .ini file
By default, a file called ``fpp.ini`` will be looked for in the
current folder. Also present must be a ``star.ini`` file that
contains the observed properties of the target star.
``fpp.ini`` must be of the following form::
name = k2oi
ra = 11:30:14.510
dec = +07:35:18.21
period = 32.988 #days
rprs = 0.0534 #Rp/Rstar
photfile = lc_k2oi.csv
[constraints]
maxrad = 10 #exclusion radius [arcsec]
secthresh = 0.001 #maximum allowed secondary signal depth
#This variable defines contrast curves
#ccfiles = Keck_J.cc, Lick_J.cc
Photfile must be a text file with columns ``(days_from_midtransit,
flux, flux_err)``. Both whitespace- and comma-delimited
will be tried, using ``np.loadtxt``. Photfile need not be there
if there is a pickled :class:`TransitSignal` saved in the same
directory as ``ini_file``, named ``trsig.pkl`` (or another name
as defined by ``trsig`` keyword in ``.ini`` file).
``star.ini`` should look something like the following::
B = 15.005, 0.06
V = 13.496, 0.05
g = 14.223, 0.05
r = 12.858, 0.04
i = 11.661, 0.08
J = 9.763, 0.03
H = 9.135, 0.03
K = 8.899, 0.02
W1 = 8.769, 0.023
W2 = 8.668, 0.02
W3 = 8.552, 0.025
Kepler = 12.473
#Teff = 3503, 80
#feh = 0.09, 0.09
#logg = 4.89, 0.1
Any star properties can be defined; if errors are included
then they will be used in the :class:`isochrones.StarModel`
MCMC fit.
Spectroscopic parameters (``Teff, feh, logg``) are optional.
If included, then they will also be included in
:class:`isochrones.StarModel` fit. A magnitude for the
band in which the transit signal is observed (e.g., ``Kepler``)
is required, though need not have associated uncertainty.
:param folder:
Folder to find configuration files.
:param ini_file:
Input configuration file.
:param star_ini_file:
Input config file for :class:`isochrones.StarModel` fits.
:param recalc:
Whether to re-calculate :class:`PopulationSet`, if a
``popset.h5`` file is already present
:param **kwargs:
Keyword arguments passed to :class:`PopulationSet`.
Creates:
* ``trsig.pkl``: the pickled :class:`vespa.TransitSignal` object.
* ``starfield.h5``: the TRILEGAL field star simulation
* ``starmodel.h5``: the :class:`isochrones.StarModel` fit
* ``popset.h5``: the :class:`vespa.PopulationSet` object
representing the model population simulations.
Raises
------
RuntimeError :
If single, double, and triple starmodels are
not computed, then raises with admonition to run
`starfit --all`.
AttributeError :
If `trsig.pkl` not present in folder, and
`photfile` is not defined in config file.
"""
# Check if all starmodel fits are done.
# If not, tell user to run 'starfit --all'
config = ConfigObj(os.path.join(folder, ini_file))
# Load required entries from ini_file
try:
name = config['name']
ra, dec = config['ra'], config['dec']
period = float(config['period'])
rprs = float(config['rprs'])
except KeyError as err:
raise KeyError('Missing required element of ini file: {}'.format(err))
try:
cadence = float(config['cadence'])
except KeyError:
logging.warning('Cadence not provided in fpp.ini; defaulting to Kepler cadence.')
logging.warning('If this is not a Kepler target, please set cadence (in days).')
cadence = 1626./86400 # Default to Kepler cadence
def fullpath(filename):
if os.path.isabs(filename):
return filename
else:
return os.path.join(folder, filename)
# Non-required entries with default values
popset_file = fullpath(config.get('popset', 'popset.h5'))
starfield_file = fullpath(config.get('starfield', 'starfield.h5'))
trsig_file = fullpath(config.get('trsig', 'trsig.pkl'))
# Check for StarModel fits
starmodel_basename = config.get('starmodel_basename',
'{}_starmodel'.format(ichrone))
single_starmodel_file = os.path.join(folder,'{}_single.h5'.format(starmodel_basename))
binary_starmodel_file = os.path.join(folder,'{}_binary.h5'.format(starmodel_basename))
triple_starmodel_file = os.path.join(folder,'{}_triple.h5'.format(starmodel_basename))
try:
single_starmodel = StarModel.load_hdf(single_starmodel_file)
binary_starmodel = StarModel.load_hdf(binary_starmodel_file)
triple_starmodel = StarModel.load_hdf(triple_starmodel_file)
except Exception as e:
print(e)
raise RuntimeError('Cannot load StarModels. ' +
'Please run `starfit --all {}`.'.format(folder))
# Create (or load) TransitSignal
if os.path.exists(trsig_file):
logging.info('Loading transit signal from {}...'.format(trsig_file))
with open(trsig_file, 'rb') as f:
trsig = pickle.load(f)
else:
try:
photfile = fullpath(config['photfile'])
except KeyError:
raise AttributeError('If transit pickle file (trsig.pkl) ' +
'not present, "photfile" must be' +
'defined.')
trsig = TransitSignal.from_ascii(photfile, P=period, name=name)
if not trsig.hasMCMC or refit_trap:
logging.info('Fitting transitsignal with MCMC...')
trsig.MCMC()
trsig.save(trsig_file)
# Create (or load) PopulationSet
do_only = DEFAULT_MODELS
if os.path.exists(popset_file):
if recalc:
os.remove(popset_file)
else:
with pd.HDFStore(popset_file) as store:
do_only = [m for m in DEFAULT_MODELS if m not in store]
# Check that properties of saved population match requested
try:
popset = PopulationSet.load_hdf(popset_file)
for pop in popset.poplist:
if pop.cadence != cadence:
raise ValueError('Requested cadence ({}) '.format(cadence) +
'does not match stored {})! Set recalc=True.'.format(pop.cadence))
except:
raise
if do_only:
logging.info('Generating {} models for PopulationSet...'.format(do_only))
else:
logging.info('Populations ({}) already generated.'.format(DEFAULT_MODELS))
popset = PopulationSet(period=period, cadence=cadence,
mags=single_starmodel.mags,
ra=ra, dec=dec,
trilegal_filename=starfield_file, # Maybe change parameter name?
starmodel=single_starmodel,
binary_starmodel=binary_starmodel,
triple_starmodel=triple_starmodel,
rprs=rprs, do_only=do_only,
savefile=popset_file, **kwargs)
fpp = cls(trsig, popset, folder=folder)
#############
# Apply constraints
# Exclusion radius
maxrad = float(config['constraints']['maxrad'])
fpp.set_maxrad(maxrad)
if 'secthresh' in config['constraints']:
secthresh = float(config['constraints']['secthresh'])
if not np.isnan(secthresh):
fpp.apply_secthresh(secthresh)
# Odd-even constraint
diff = 3 * np.max(trsig.depthfit[1])
fpp.constrain_oddeven(diff)
#apply contrast curve constraints if present
if 'ccfiles' in config['constraints']:
ccfiles = config['constraints']['ccfiles']
if isinstance(ccfiles, string_types):
ccfiles = [ccfiles]
for ccfile in ccfiles:
if not os.path.isabs(ccfile):
ccfile = os.path.join(folder, ccfile)
m = re.search('(\w+)_(\w+)\.cc',os.path.basename(ccfile))
if not m:
logging.warning('Invalid CC filename ({}); '.format(ccfile) +
'skipping.')
continue
else:
band = m.group(2)
inst = m.group(1)
name = '{} {}-band'.format(inst, band)
cc = ContrastCurveFromFile(ccfile, band, name=name)
fpp.apply_cc(cc)
#apply "velocity contrast curve" if present
if 'vcc' in config['constraints']:
dv = float(config['constraints']['vcc'][0])
dmag = float(config['constraints']['vcc'][1])
vcc = VelocityContrastCurve(dv, dmag)
fpp.apply_vcc(vcc)
return fpp | [
"def",
"from_ini",
"(",
"cls",
",",
"folder",
",",
"ini_file",
"=",
"'fpp.ini'",
",",
"ichrone",
"=",
"'mist'",
",",
"recalc",
"=",
"False",
",",
"refit_trap",
"=",
"False",
",",
"*",
"*",
"kwargs",
")",
":",
"# Check if all starmodel fits are done.",
"# If not, tell user to run 'starfit --all'",
"config",
"=",
"ConfigObj",
"(",
"os",
".",
"path",
".",
"join",
"(",
"folder",
",",
"ini_file",
")",
")",
"# Load required entries from ini_file",
"try",
":",
"name",
"=",
"config",
"[",
"'name'",
"]",
"ra",
",",
"dec",
"=",
"config",
"[",
"'ra'",
"]",
",",
"config",
"[",
"'dec'",
"]",
"period",
"=",
"float",
"(",
"config",
"[",
"'period'",
"]",
")",
"rprs",
"=",
"float",
"(",
"config",
"[",
"'rprs'",
"]",
")",
"except",
"KeyError",
"as",
"err",
":",
"raise",
"KeyError",
"(",
"'Missing required element of ini file: {}'",
".",
"format",
"(",
"err",
")",
")",
"try",
":",
"cadence",
"=",
"float",
"(",
"config",
"[",
"'cadence'",
"]",
")",
"except",
"KeyError",
":",
"logging",
".",
"warning",
"(",
"'Cadence not provided in fpp.ini; defaulting to Kepler cadence.'",
")",
"logging",
".",
"warning",
"(",
"'If this is not a Kepler target, please set cadence (in days).'",
")",
"cadence",
"=",
"1626.",
"/",
"86400",
"# Default to Kepler cadence",
"def",
"fullpath",
"(",
"filename",
")",
":",
"if",
"os",
".",
"path",
".",
"isabs",
"(",
"filename",
")",
":",
"return",
"filename",
"else",
":",
"return",
"os",
".",
"path",
".",
"join",
"(",
"folder",
",",
"filename",
")",
"# Non-required entries with default values",
"popset_file",
"=",
"fullpath",
"(",
"config",
".",
"get",
"(",
"'popset'",
",",
"'popset.h5'",
")",
")",
"starfield_file",
"=",
"fullpath",
"(",
"config",
".",
"get",
"(",
"'starfield'",
",",
"'starfield.h5'",
")",
")",
"trsig_file",
"=",
"fullpath",
"(",
"config",
".",
"get",
"(",
"'trsig'",
",",
"'trsig.pkl'",
")",
")",
"# Check for StarModel fits",
"starmodel_basename",
"=",
"config",
".",
"get",
"(",
"'starmodel_basename'",
",",
"'{}_starmodel'",
".",
"format",
"(",
"ichrone",
")",
")",
"single_starmodel_file",
"=",
"os",
".",
"path",
".",
"join",
"(",
"folder",
",",
"'{}_single.h5'",
".",
"format",
"(",
"starmodel_basename",
")",
")",
"binary_starmodel_file",
"=",
"os",
".",
"path",
".",
"join",
"(",
"folder",
",",
"'{}_binary.h5'",
".",
"format",
"(",
"starmodel_basename",
")",
")",
"triple_starmodel_file",
"=",
"os",
".",
"path",
".",
"join",
"(",
"folder",
",",
"'{}_triple.h5'",
".",
"format",
"(",
"starmodel_basename",
")",
")",
"try",
":",
"single_starmodel",
"=",
"StarModel",
".",
"load_hdf",
"(",
"single_starmodel_file",
")",
"binary_starmodel",
"=",
"StarModel",
".",
"load_hdf",
"(",
"binary_starmodel_file",
")",
"triple_starmodel",
"=",
"StarModel",
".",
"load_hdf",
"(",
"triple_starmodel_file",
")",
"except",
"Exception",
"as",
"e",
":",
"print",
"(",
"e",
")",
"raise",
"RuntimeError",
"(",
"'Cannot load StarModels. '",
"+",
"'Please run `starfit --all {}`.'",
".",
"format",
"(",
"folder",
")",
")",
"# Create (or load) TransitSignal",
"if",
"os",
".",
"path",
".",
"exists",
"(",
"trsig_file",
")",
":",
"logging",
".",
"info",
"(",
"'Loading transit signal from {}...'",
".",
"format",
"(",
"trsig_file",
")",
")",
"with",
"open",
"(",
"trsig_file",
",",
"'rb'",
")",
"as",
"f",
":",
"trsig",
"=",
"pickle",
".",
"load",
"(",
"f",
")",
"else",
":",
"try",
":",
"photfile",
"=",
"fullpath",
"(",
"config",
"[",
"'photfile'",
"]",
")",
"except",
"KeyError",
":",
"raise",
"AttributeError",
"(",
"'If transit pickle file (trsig.pkl) '",
"+",
"'not present, \"photfile\" must be'",
"+",
"'defined.'",
")",
"trsig",
"=",
"TransitSignal",
".",
"from_ascii",
"(",
"photfile",
",",
"P",
"=",
"period",
",",
"name",
"=",
"name",
")",
"if",
"not",
"trsig",
".",
"hasMCMC",
"or",
"refit_trap",
":",
"logging",
".",
"info",
"(",
"'Fitting transitsignal with MCMC...'",
")",
"trsig",
".",
"MCMC",
"(",
")",
"trsig",
".",
"save",
"(",
"trsig_file",
")",
"# Create (or load) PopulationSet",
"do_only",
"=",
"DEFAULT_MODELS",
"if",
"os",
".",
"path",
".",
"exists",
"(",
"popset_file",
")",
":",
"if",
"recalc",
":",
"os",
".",
"remove",
"(",
"popset_file",
")",
"else",
":",
"with",
"pd",
".",
"HDFStore",
"(",
"popset_file",
")",
"as",
"store",
":",
"do_only",
"=",
"[",
"m",
"for",
"m",
"in",
"DEFAULT_MODELS",
"if",
"m",
"not",
"in",
"store",
"]",
"# Check that properties of saved population match requested",
"try",
":",
"popset",
"=",
"PopulationSet",
".",
"load_hdf",
"(",
"popset_file",
")",
"for",
"pop",
"in",
"popset",
".",
"poplist",
":",
"if",
"pop",
".",
"cadence",
"!=",
"cadence",
":",
"raise",
"ValueError",
"(",
"'Requested cadence ({}) '",
".",
"format",
"(",
"cadence",
")",
"+",
"'does not match stored {})! Set recalc=True.'",
".",
"format",
"(",
"pop",
".",
"cadence",
")",
")",
"except",
":",
"raise",
"if",
"do_only",
":",
"logging",
".",
"info",
"(",
"'Generating {} models for PopulationSet...'",
".",
"format",
"(",
"do_only",
")",
")",
"else",
":",
"logging",
".",
"info",
"(",
"'Populations ({}) already generated.'",
".",
"format",
"(",
"DEFAULT_MODELS",
")",
")",
"popset",
"=",
"PopulationSet",
"(",
"period",
"=",
"period",
",",
"cadence",
"=",
"cadence",
",",
"mags",
"=",
"single_starmodel",
".",
"mags",
",",
"ra",
"=",
"ra",
",",
"dec",
"=",
"dec",
",",
"trilegal_filename",
"=",
"starfield_file",
",",
"# Maybe change parameter name?",
"starmodel",
"=",
"single_starmodel",
",",
"binary_starmodel",
"=",
"binary_starmodel",
",",
"triple_starmodel",
"=",
"triple_starmodel",
",",
"rprs",
"=",
"rprs",
",",
"do_only",
"=",
"do_only",
",",
"savefile",
"=",
"popset_file",
",",
"*",
"*",
"kwargs",
")",
"fpp",
"=",
"cls",
"(",
"trsig",
",",
"popset",
",",
"folder",
"=",
"folder",
")",
"#############",
"# Apply constraints",
"# Exclusion radius",
"maxrad",
"=",
"float",
"(",
"config",
"[",
"'constraints'",
"]",
"[",
"'maxrad'",
"]",
")",
"fpp",
".",
"set_maxrad",
"(",
"maxrad",
")",
"if",
"'secthresh'",
"in",
"config",
"[",
"'constraints'",
"]",
":",
"secthresh",
"=",
"float",
"(",
"config",
"[",
"'constraints'",
"]",
"[",
"'secthresh'",
"]",
")",
"if",
"not",
"np",
".",
"isnan",
"(",
"secthresh",
")",
":",
"fpp",
".",
"apply_secthresh",
"(",
"secthresh",
")",
"# Odd-even constraint",
"diff",
"=",
"3",
"*",
"np",
".",
"max",
"(",
"trsig",
".",
"depthfit",
"[",
"1",
"]",
")",
"fpp",
".",
"constrain_oddeven",
"(",
"diff",
")",
"#apply contrast curve constraints if present",
"if",
"'ccfiles'",
"in",
"config",
"[",
"'constraints'",
"]",
":",
"ccfiles",
"=",
"config",
"[",
"'constraints'",
"]",
"[",
"'ccfiles'",
"]",
"if",
"isinstance",
"(",
"ccfiles",
",",
"string_types",
")",
":",
"ccfiles",
"=",
"[",
"ccfiles",
"]",
"for",
"ccfile",
"in",
"ccfiles",
":",
"if",
"not",
"os",
".",
"path",
".",
"isabs",
"(",
"ccfile",
")",
":",
"ccfile",
"=",
"os",
".",
"path",
".",
"join",
"(",
"folder",
",",
"ccfile",
")",
"m",
"=",
"re",
".",
"search",
"(",
"'(\\w+)_(\\w+)\\.cc'",
",",
"os",
".",
"path",
".",
"basename",
"(",
"ccfile",
")",
")",
"if",
"not",
"m",
":",
"logging",
".",
"warning",
"(",
"'Invalid CC filename ({}); '",
".",
"format",
"(",
"ccfile",
")",
"+",
"'skipping.'",
")",
"continue",
"else",
":",
"band",
"=",
"m",
".",
"group",
"(",
"2",
")",
"inst",
"=",
"m",
".",
"group",
"(",
"1",
")",
"name",
"=",
"'{} {}-band'",
".",
"format",
"(",
"inst",
",",
"band",
")",
"cc",
"=",
"ContrastCurveFromFile",
"(",
"ccfile",
",",
"band",
",",
"name",
"=",
"name",
")",
"fpp",
".",
"apply_cc",
"(",
"cc",
")",
"#apply \"velocity contrast curve\" if present",
"if",
"'vcc'",
"in",
"config",
"[",
"'constraints'",
"]",
":",
"dv",
"=",
"float",
"(",
"config",
"[",
"'constraints'",
"]",
"[",
"'vcc'",
"]",
"[",
"0",
"]",
")",
"dmag",
"=",
"float",
"(",
"config",
"[",
"'constraints'",
"]",
"[",
"'vcc'",
"]",
"[",
"1",
"]",
")",
"vcc",
"=",
"VelocityContrastCurve",
"(",
"dv",
",",
"dmag",
")",
"fpp",
".",
"apply_vcc",
"(",
"vcc",
")",
"return",
"fpp"
] | To enable simple usage, initializes a FPPCalculation from a .ini file
By default, a file called ``fpp.ini`` will be looked for in the
current folder. Also present must be a ``star.ini`` file that
contains the observed properties of the target star.
``fpp.ini`` must be of the following form::
name = k2oi
ra = 11:30:14.510
dec = +07:35:18.21
period = 32.988 #days
rprs = 0.0534 #Rp/Rstar
photfile = lc_k2oi.csv
[constraints]
maxrad = 10 #exclusion radius [arcsec]
secthresh = 0.001 #maximum allowed secondary signal depth
#This variable defines contrast curves
#ccfiles = Keck_J.cc, Lick_J.cc
Photfile must be a text file with columns ``(days_from_midtransit,
flux, flux_err)``. Both whitespace- and comma-delimited
will be tried, using ``np.loadtxt``. Photfile need not be there
if there is a pickled :class:`TransitSignal` saved in the same
directory as ``ini_file``, named ``trsig.pkl`` (or another name
as defined by ``trsig`` keyword in ``.ini`` file).
``star.ini`` should look something like the following::
B = 15.005, 0.06
V = 13.496, 0.05
g = 14.223, 0.05
r = 12.858, 0.04
i = 11.661, 0.08
J = 9.763, 0.03
H = 9.135, 0.03
K = 8.899, 0.02
W1 = 8.769, 0.023
W2 = 8.668, 0.02
W3 = 8.552, 0.025
Kepler = 12.473
#Teff = 3503, 80
#feh = 0.09, 0.09
#logg = 4.89, 0.1
Any star properties can be defined; if errors are included
then they will be used in the :class:`isochrones.StarModel`
MCMC fit.
Spectroscopic parameters (``Teff, feh, logg``) are optional.
If included, then they will also be included in
:class:`isochrones.StarModel` fit. A magnitude for the
band in which the transit signal is observed (e.g., ``Kepler``)
is required, though need not have associated uncertainty.
:param folder:
Folder to find configuration files.
:param ini_file:
Input configuration file.
:param star_ini_file:
Input config file for :class:`isochrones.StarModel` fits.
:param recalc:
Whether to re-calculate :class:`PopulationSet`, if a
``popset.h5`` file is already present
:param **kwargs:
Keyword arguments passed to :class:`PopulationSet`.
Creates:
* ``trsig.pkl``: the pickled :class:`vespa.TransitSignal` object.
* ``starfield.h5``: the TRILEGAL field star simulation
* ``starmodel.h5``: the :class:`isochrones.StarModel` fit
* ``popset.h5``: the :class:`vespa.PopulationSet` object
representing the model population simulations.
Raises
------
RuntimeError :
If single, double, and triple starmodels are
not computed, then raises with admonition to run
`starfit --all`.
AttributeError :
If `trsig.pkl` not present in folder, and
`photfile` is not defined in config file. | [
"To",
"enable",
"simple",
"usage",
"initializes",
"a",
"FPPCalculation",
"from",
"a",
".",
"ini",
"file"
] | train | https://github.com/timothydmorton/VESPA/blob/0446b54d48009f3655cfd1a3957ceea21d3adcaa/vespa/fpp.py#L91-L336 | 0.003278 |
tensorflow/tensor2tensor | tensor2tensor/data_generators/vqa.py | _get_vqa_v2_image_feature_dataset | def _get_vqa_v2_image_feature_dataset(
directory, feature_url, feature_filename="mscoco_feat.tar.gz"):
"""Extract the VQA V2 feature data set to directory unless it's there."""
feature_file = generator_utils.maybe_download_from_drive(
directory, feature_filename, feature_url)
with tarfile.open(feature_file, "r:gz") as feature_tar:
feature_tar.extractall(directory) | python | def _get_vqa_v2_image_feature_dataset(
directory, feature_url, feature_filename="mscoco_feat.tar.gz"):
"""Extract the VQA V2 feature data set to directory unless it's there."""
feature_file = generator_utils.maybe_download_from_drive(
directory, feature_filename, feature_url)
with tarfile.open(feature_file, "r:gz") as feature_tar:
feature_tar.extractall(directory) | [
"def",
"_get_vqa_v2_image_feature_dataset",
"(",
"directory",
",",
"feature_url",
",",
"feature_filename",
"=",
"\"mscoco_feat.tar.gz\"",
")",
":",
"feature_file",
"=",
"generator_utils",
".",
"maybe_download_from_drive",
"(",
"directory",
",",
"feature_filename",
",",
"feature_url",
")",
"with",
"tarfile",
".",
"open",
"(",
"feature_file",
",",
"\"r:gz\"",
")",
"as",
"feature_tar",
":",
"feature_tar",
".",
"extractall",
"(",
"directory",
")"
] | Extract the VQA V2 feature data set to directory unless it's there. | [
"Extract",
"the",
"VQA",
"V2",
"feature",
"data",
"set",
"to",
"directory",
"unless",
"it",
"s",
"there",
"."
] | train | https://github.com/tensorflow/tensor2tensor/blob/272500b6efe353aeb638d2745ed56e519462ca31/tensor2tensor/data_generators/vqa.py#L65-L71 | 0.012953 |
DarkEnergySurvey/ugali | ugali/utils/plotting.py | twoDimensionalScatter | def twoDimensionalScatter(title, title_x, title_y,
x, y,
lim_x = None, lim_y = None,
color = 'b', size = 20, alpha=None):
"""
Create a two-dimensional scatter plot.
INPUTS
"""
plt.figure()
plt.scatter(x, y, c=color, s=size, alpha=alpha, edgecolors='none')
plt.xlabel(title_x)
plt.ylabel(title_y)
plt.title(title)
if type(color) is not str:
plt.colorbar()
if lim_x:
plt.xlim(lim_x[0], lim_x[1])
if lim_y:
plt.ylim(lim_y[0], lim_y[1]) | python | def twoDimensionalScatter(title, title_x, title_y,
x, y,
lim_x = None, lim_y = None,
color = 'b', size = 20, alpha=None):
"""
Create a two-dimensional scatter plot.
INPUTS
"""
plt.figure()
plt.scatter(x, y, c=color, s=size, alpha=alpha, edgecolors='none')
plt.xlabel(title_x)
plt.ylabel(title_y)
plt.title(title)
if type(color) is not str:
plt.colorbar()
if lim_x:
plt.xlim(lim_x[0], lim_x[1])
if lim_y:
plt.ylim(lim_y[0], lim_y[1]) | [
"def",
"twoDimensionalScatter",
"(",
"title",
",",
"title_x",
",",
"title_y",
",",
"x",
",",
"y",
",",
"lim_x",
"=",
"None",
",",
"lim_y",
"=",
"None",
",",
"color",
"=",
"'b'",
",",
"size",
"=",
"20",
",",
"alpha",
"=",
"None",
")",
":",
"plt",
".",
"figure",
"(",
")",
"plt",
".",
"scatter",
"(",
"x",
",",
"y",
",",
"c",
"=",
"color",
",",
"s",
"=",
"size",
",",
"alpha",
"=",
"alpha",
",",
"edgecolors",
"=",
"'none'",
")",
"plt",
".",
"xlabel",
"(",
"title_x",
")",
"plt",
".",
"ylabel",
"(",
"title_y",
")",
"plt",
".",
"title",
"(",
"title",
")",
"if",
"type",
"(",
"color",
")",
"is",
"not",
"str",
":",
"plt",
".",
"colorbar",
"(",
")",
"if",
"lim_x",
":",
"plt",
".",
"xlim",
"(",
"lim_x",
"[",
"0",
"]",
",",
"lim_x",
"[",
"1",
"]",
")",
"if",
"lim_y",
":",
"plt",
".",
"ylim",
"(",
"lim_y",
"[",
"0",
"]",
",",
"lim_y",
"[",
"1",
"]",
")"
] | Create a two-dimensional scatter plot.
INPUTS | [
"Create",
"a",
"two",
"-",
"dimensional",
"scatter",
"plot",
"."
] | train | https://github.com/DarkEnergySurvey/ugali/blob/21e890b4117fc810afb6fb058e8055d564f03382/ugali/utils/plotting.py#L105-L127 | 0.016949 |
pycontribs/pyrax | pyrax/object_storage.py | StorageClient.fetch_object | def fetch_object(self, container, obj, include_meta=False,
chunk_size=None, size=None, extra_info=None):
"""
Fetches the object from storage.
If 'include_meta' is False, only the bytes representing the
stored object are returned.
Note: if 'chunk_size' is defined, you must fully read the object's
contents before making another request.
If 'size' is specified, only the first 'size' bytes of the object will
be returned. If the object if smaller than 'size', the entire object is
returned.
When 'include_meta' is True, what is returned from this method is a
2-tuple:
Element 0: a dictionary containing metadata about the file.
Element 1: a stream of bytes representing the object's contents.
The 'extra_info' parameter is included for backwards compatibility. It
is no longer used at all, and will not be modified with swiftclient
info, since swiftclient is not used any more.
"""
return self._manager.fetch_object(container, obj,
include_meta=include_meta, chunk_size=chunk_size, size=size) | python | def fetch_object(self, container, obj, include_meta=False,
chunk_size=None, size=None, extra_info=None):
"""
Fetches the object from storage.
If 'include_meta' is False, only the bytes representing the
stored object are returned.
Note: if 'chunk_size' is defined, you must fully read the object's
contents before making another request.
If 'size' is specified, only the first 'size' bytes of the object will
be returned. If the object if smaller than 'size', the entire object is
returned.
When 'include_meta' is True, what is returned from this method is a
2-tuple:
Element 0: a dictionary containing metadata about the file.
Element 1: a stream of bytes representing the object's contents.
The 'extra_info' parameter is included for backwards compatibility. It
is no longer used at all, and will not be modified with swiftclient
info, since swiftclient is not used any more.
"""
return self._manager.fetch_object(container, obj,
include_meta=include_meta, chunk_size=chunk_size, size=size) | [
"def",
"fetch_object",
"(",
"self",
",",
"container",
",",
"obj",
",",
"include_meta",
"=",
"False",
",",
"chunk_size",
"=",
"None",
",",
"size",
"=",
"None",
",",
"extra_info",
"=",
"None",
")",
":",
"return",
"self",
".",
"_manager",
".",
"fetch_object",
"(",
"container",
",",
"obj",
",",
"include_meta",
"=",
"include_meta",
",",
"chunk_size",
"=",
"chunk_size",
",",
"size",
"=",
"size",
")"
] | Fetches the object from storage.
If 'include_meta' is False, only the bytes representing the
stored object are returned.
Note: if 'chunk_size' is defined, you must fully read the object's
contents before making another request.
If 'size' is specified, only the first 'size' bytes of the object will
be returned. If the object if smaller than 'size', the entire object is
returned.
When 'include_meta' is True, what is returned from this method is a
2-tuple:
Element 0: a dictionary containing metadata about the file.
Element 1: a stream of bytes representing the object's contents.
The 'extra_info' parameter is included for backwards compatibility. It
is no longer used at all, and will not be modified with swiftclient
info, since swiftclient is not used any more. | [
"Fetches",
"the",
"object",
"from",
"storage",
"."
] | train | https://github.com/pycontribs/pyrax/blob/9ddfd5064b3a292d7337906f3b2d5dce95b50b99/pyrax/object_storage.py#L2763-L2788 | 0.003401 |
sdss/tree | bin/setup_tree.py | write_file | def write_file(environ, term='bash', out_dir=None, tree_dir=None):
''' Write a tree environment file
Loops over the tree environ and writes them out to a bash, tsch, or
modules file
Parameters:
environ (dict):
The tree dictionary environment
term (str):
The type of shell header to write, can be "bash", "tsch", or "modules"
tree_dir (str):
The path to this repository
out_dir (str):
The output path to write the files (default is etc/)
'''
# get the proper name, header and file extension
name = environ['default']['name']
header = write_header(term=term, name=name, tree_dir=tree_dir)
exts = {'bash': '.sh', 'tsch': '.csh', 'modules': '.module'}
ext = exts[term]
# shell command
if term == 'bash':
cmd = 'export {0}={1}\n'
else:
cmd = 'setenv {0} {1}\n'
# write the environment config files
filename = os.path.join(out_dir, name + ext)
with open(filename, 'w') as f:
f.write(header + '\n')
for key, values in environ.items():
if key != 'default':
# write separator
f.write('#\n# {0}\n#\n'.format(key))
# write tree names and paths
for tree_name, tree_path in values.items():
f.write(cmd.format(tree_name.upper(), tree_path))
# write default .version file for modules
modules_version = write_version(name)
if term == 'modules' and environ['default']['current']:
version_name = os.path.join(out_dir, '.version')
with open(version_name, 'w') as f:
f.write(modules_version) | python | def write_file(environ, term='bash', out_dir=None, tree_dir=None):
''' Write a tree environment file
Loops over the tree environ and writes them out to a bash, tsch, or
modules file
Parameters:
environ (dict):
The tree dictionary environment
term (str):
The type of shell header to write, can be "bash", "tsch", or "modules"
tree_dir (str):
The path to this repository
out_dir (str):
The output path to write the files (default is etc/)
'''
# get the proper name, header and file extension
name = environ['default']['name']
header = write_header(term=term, name=name, tree_dir=tree_dir)
exts = {'bash': '.sh', 'tsch': '.csh', 'modules': '.module'}
ext = exts[term]
# shell command
if term == 'bash':
cmd = 'export {0}={1}\n'
else:
cmd = 'setenv {0} {1}\n'
# write the environment config files
filename = os.path.join(out_dir, name + ext)
with open(filename, 'w') as f:
f.write(header + '\n')
for key, values in environ.items():
if key != 'default':
# write separator
f.write('#\n# {0}\n#\n'.format(key))
# write tree names and paths
for tree_name, tree_path in values.items():
f.write(cmd.format(tree_name.upper(), tree_path))
# write default .version file for modules
modules_version = write_version(name)
if term == 'modules' and environ['default']['current']:
version_name = os.path.join(out_dir, '.version')
with open(version_name, 'w') as f:
f.write(modules_version) | [
"def",
"write_file",
"(",
"environ",
",",
"term",
"=",
"'bash'",
",",
"out_dir",
"=",
"None",
",",
"tree_dir",
"=",
"None",
")",
":",
"# get the proper name, header and file extension",
"name",
"=",
"environ",
"[",
"'default'",
"]",
"[",
"'name'",
"]",
"header",
"=",
"write_header",
"(",
"term",
"=",
"term",
",",
"name",
"=",
"name",
",",
"tree_dir",
"=",
"tree_dir",
")",
"exts",
"=",
"{",
"'bash'",
":",
"'.sh'",
",",
"'tsch'",
":",
"'.csh'",
",",
"'modules'",
":",
"'.module'",
"}",
"ext",
"=",
"exts",
"[",
"term",
"]",
"# shell command",
"if",
"term",
"==",
"'bash'",
":",
"cmd",
"=",
"'export {0}={1}\\n'",
"else",
":",
"cmd",
"=",
"'setenv {0} {1}\\n'",
"# write the environment config files",
"filename",
"=",
"os",
".",
"path",
".",
"join",
"(",
"out_dir",
",",
"name",
"+",
"ext",
")",
"with",
"open",
"(",
"filename",
",",
"'w'",
")",
"as",
"f",
":",
"f",
".",
"write",
"(",
"header",
"+",
"'\\n'",
")",
"for",
"key",
",",
"values",
"in",
"environ",
".",
"items",
"(",
")",
":",
"if",
"key",
"!=",
"'default'",
":",
"# write separator",
"f",
".",
"write",
"(",
"'#\\n# {0}\\n#\\n'",
".",
"format",
"(",
"key",
")",
")",
"# write tree names and paths",
"for",
"tree_name",
",",
"tree_path",
"in",
"values",
".",
"items",
"(",
")",
":",
"f",
".",
"write",
"(",
"cmd",
".",
"format",
"(",
"tree_name",
".",
"upper",
"(",
")",
",",
"tree_path",
")",
")",
"# write default .version file for modules",
"modules_version",
"=",
"write_version",
"(",
"name",
")",
"if",
"term",
"==",
"'modules'",
"and",
"environ",
"[",
"'default'",
"]",
"[",
"'current'",
"]",
":",
"version_name",
"=",
"os",
".",
"path",
".",
"join",
"(",
"out_dir",
",",
"'.version'",
")",
"with",
"open",
"(",
"version_name",
",",
"'w'",
")",
"as",
"f",
":",
"f",
".",
"write",
"(",
"modules_version",
")"
] | Write a tree environment file
Loops over the tree environ and writes them out to a bash, tsch, or
modules file
Parameters:
environ (dict):
The tree dictionary environment
term (str):
The type of shell header to write, can be "bash", "tsch", or "modules"
tree_dir (str):
The path to this repository
out_dir (str):
The output path to write the files (default is etc/) | [
"Write",
"a",
"tree",
"environment",
"file"
] | train | https://github.com/sdss/tree/blob/f61fe0876c138ccb61874912d4b8590dadfa835c/bin/setup_tree.py#L272-L319 | 0.001183 |
cloudera/cm_api | python/src/cm_api/endpoints/services.py | ApiService.sync_hue_db | def sync_hue_db(self, *servers):
"""
Synchronize the Hue server's database.
@param servers: Name of Hue Server roles to synchronize. Not required starting with API v10.
@return: List of submitted commands.
"""
actual_version = self._get_resource_root().version
if actual_version < 10:
return self._role_cmd('hueSyncDb', servers)
return self._cmd('hueSyncDb', api_version=10) | python | def sync_hue_db(self, *servers):
"""
Synchronize the Hue server's database.
@param servers: Name of Hue Server roles to synchronize. Not required starting with API v10.
@return: List of submitted commands.
"""
actual_version = self._get_resource_root().version
if actual_version < 10:
return self._role_cmd('hueSyncDb', servers)
return self._cmd('hueSyncDb', api_version=10) | [
"def",
"sync_hue_db",
"(",
"self",
",",
"*",
"servers",
")",
":",
"actual_version",
"=",
"self",
".",
"_get_resource_root",
"(",
")",
".",
"version",
"if",
"actual_version",
"<",
"10",
":",
"return",
"self",
".",
"_role_cmd",
"(",
"'hueSyncDb'",
",",
"servers",
")",
"return",
"self",
".",
"_cmd",
"(",
"'hueSyncDb'",
",",
"api_version",
"=",
"10",
")"
] | Synchronize the Hue server's database.
@param servers: Name of Hue Server roles to synchronize. Not required starting with API v10.
@return: List of submitted commands. | [
"Synchronize",
"the",
"Hue",
"server",
"s",
"database",
"."
] | train | https://github.com/cloudera/cm_api/blob/5d2512375bd94684b4da36df9e0d9177865ffcbb/python/src/cm_api/endpoints/services.py#L1337-L1349 | 0.007229 |
awslabs/aws-serverlessrepo-python | serverlessrepo/parser.py | get_app_metadata | def get_app_metadata(template_dict):
"""
Get the application metadata from a SAM template.
:param template_dict: SAM template as a dictionary
:type template_dict: dict
:return: Application metadata as defined in the template
:rtype: ApplicationMetadata
:raises ApplicationMetadataNotFoundError
"""
if SERVERLESS_REPO_APPLICATION in template_dict.get(METADATA, {}):
app_metadata_dict = template_dict.get(METADATA).get(SERVERLESS_REPO_APPLICATION)
return ApplicationMetadata(app_metadata_dict)
raise ApplicationMetadataNotFoundError(
error_message='missing {} section in template Metadata'.format(SERVERLESS_REPO_APPLICATION)) | python | def get_app_metadata(template_dict):
"""
Get the application metadata from a SAM template.
:param template_dict: SAM template as a dictionary
:type template_dict: dict
:return: Application metadata as defined in the template
:rtype: ApplicationMetadata
:raises ApplicationMetadataNotFoundError
"""
if SERVERLESS_REPO_APPLICATION in template_dict.get(METADATA, {}):
app_metadata_dict = template_dict.get(METADATA).get(SERVERLESS_REPO_APPLICATION)
return ApplicationMetadata(app_metadata_dict)
raise ApplicationMetadataNotFoundError(
error_message='missing {} section in template Metadata'.format(SERVERLESS_REPO_APPLICATION)) | [
"def",
"get_app_metadata",
"(",
"template_dict",
")",
":",
"if",
"SERVERLESS_REPO_APPLICATION",
"in",
"template_dict",
".",
"get",
"(",
"METADATA",
",",
"{",
"}",
")",
":",
"app_metadata_dict",
"=",
"template_dict",
".",
"get",
"(",
"METADATA",
")",
".",
"get",
"(",
"SERVERLESS_REPO_APPLICATION",
")",
"return",
"ApplicationMetadata",
"(",
"app_metadata_dict",
")",
"raise",
"ApplicationMetadataNotFoundError",
"(",
"error_message",
"=",
"'missing {} section in template Metadata'",
".",
"format",
"(",
"SERVERLESS_REPO_APPLICATION",
")",
")"
] | Get the application metadata from a SAM template.
:param template_dict: SAM template as a dictionary
:type template_dict: dict
:return: Application metadata as defined in the template
:rtype: ApplicationMetadata
:raises ApplicationMetadataNotFoundError | [
"Get",
"the",
"application",
"metadata",
"from",
"a",
"SAM",
"template",
"."
] | train | https://github.com/awslabs/aws-serverlessrepo-python/blob/e2126cee0191266cfb8a3a2bc3270bf50330907c/serverlessrepo/parser.py#L98-L113 | 0.004348 |
daviddrysdale/python-phonenumbers | python/phonenumbers/phonenumberutil.py | _parse_prefix_as_idd | def _parse_prefix_as_idd(idd_pattern, number):
"""Strips the IDD from the start of the number if present.
Helper function used by _maybe_strip_i18n_prefix_and_normalize().
Returns a 2-tuple:
- Boolean indicating if IDD was stripped
- Number with IDD stripped
"""
match = idd_pattern.match(number)
if match:
match_end = match.end()
# Only strip this if the first digit after the match is not a 0, since
# country calling codes cannot begin with 0.
digit_match = _CAPTURING_DIGIT_PATTERN.search(number[match_end:])
if digit_match:
normalized_group = normalize_digits_only(digit_match.group(1))
if normalized_group == U_ZERO:
return (False, number)
return (True, number[match_end:])
return (False, number) | python | def _parse_prefix_as_idd(idd_pattern, number):
"""Strips the IDD from the start of the number if present.
Helper function used by _maybe_strip_i18n_prefix_and_normalize().
Returns a 2-tuple:
- Boolean indicating if IDD was stripped
- Number with IDD stripped
"""
match = idd_pattern.match(number)
if match:
match_end = match.end()
# Only strip this if the first digit after the match is not a 0, since
# country calling codes cannot begin with 0.
digit_match = _CAPTURING_DIGIT_PATTERN.search(number[match_end:])
if digit_match:
normalized_group = normalize_digits_only(digit_match.group(1))
if normalized_group == U_ZERO:
return (False, number)
return (True, number[match_end:])
return (False, number) | [
"def",
"_parse_prefix_as_idd",
"(",
"idd_pattern",
",",
"number",
")",
":",
"match",
"=",
"idd_pattern",
".",
"match",
"(",
"number",
")",
"if",
"match",
":",
"match_end",
"=",
"match",
".",
"end",
"(",
")",
"# Only strip this if the first digit after the match is not a 0, since",
"# country calling codes cannot begin with 0.",
"digit_match",
"=",
"_CAPTURING_DIGIT_PATTERN",
".",
"search",
"(",
"number",
"[",
"match_end",
":",
"]",
")",
"if",
"digit_match",
":",
"normalized_group",
"=",
"normalize_digits_only",
"(",
"digit_match",
".",
"group",
"(",
"1",
")",
")",
"if",
"normalized_group",
"==",
"U_ZERO",
":",
"return",
"(",
"False",
",",
"number",
")",
"return",
"(",
"True",
",",
"number",
"[",
"match_end",
":",
"]",
")",
"return",
"(",
"False",
",",
"number",
")"
] | Strips the IDD from the start of the number if present.
Helper function used by _maybe_strip_i18n_prefix_and_normalize().
Returns a 2-tuple:
- Boolean indicating if IDD was stripped
- Number with IDD stripped | [
"Strips",
"the",
"IDD",
"from",
"the",
"start",
"of",
"the",
"number",
"if",
"present",
"."
] | train | https://github.com/daviddrysdale/python-phonenumbers/blob/9cc5bb4ab5e661e70789b4c64bf7a9383c7bdc20/python/phonenumbers/phonenumberutil.py#L2552-L2572 | 0.001202 |
tcalmant/ipopo | pelix/ipopo/handlers/requiresvarfilter.py | _VariableFilterMixIn.update_filter | def update_filter(self):
"""
Update the filter according to the new properties
:return: True if the filter changed, else False
:raise ValueError: The filter is invalid
"""
# Consider the filter invalid
self.valid_filter = False
try:
# Format the new filter
filter_str = self._original_filter.format(
**self._component_context.properties
)
except KeyError as ex:
# An entry is missing: abandon
logging.warning("Missing filter value: %s", ex)
raise ValueError("Missing filter value")
try:
# Parse the new LDAP filter
new_filter = ldapfilter.get_ldap_filter(filter_str)
except (TypeError, ValueError) as ex:
logging.warning("Error parsing filter: %s", ex)
raise ValueError("Error parsing filter")
# The filter is valid
self.valid_filter = True
# Compare to the "old" one
if new_filter != self.requirement.filter:
# Replace the requirement filter
self.requirement.filter = new_filter
return True
# Same filter
return False | python | def update_filter(self):
"""
Update the filter according to the new properties
:return: True if the filter changed, else False
:raise ValueError: The filter is invalid
"""
# Consider the filter invalid
self.valid_filter = False
try:
# Format the new filter
filter_str = self._original_filter.format(
**self._component_context.properties
)
except KeyError as ex:
# An entry is missing: abandon
logging.warning("Missing filter value: %s", ex)
raise ValueError("Missing filter value")
try:
# Parse the new LDAP filter
new_filter = ldapfilter.get_ldap_filter(filter_str)
except (TypeError, ValueError) as ex:
logging.warning("Error parsing filter: %s", ex)
raise ValueError("Error parsing filter")
# The filter is valid
self.valid_filter = True
# Compare to the "old" one
if new_filter != self.requirement.filter:
# Replace the requirement filter
self.requirement.filter = new_filter
return True
# Same filter
return False | [
"def",
"update_filter",
"(",
"self",
")",
":",
"# Consider the filter invalid",
"self",
".",
"valid_filter",
"=",
"False",
"try",
":",
"# Format the new filter",
"filter_str",
"=",
"self",
".",
"_original_filter",
".",
"format",
"(",
"*",
"*",
"self",
".",
"_component_context",
".",
"properties",
")",
"except",
"KeyError",
"as",
"ex",
":",
"# An entry is missing: abandon",
"logging",
".",
"warning",
"(",
"\"Missing filter value: %s\"",
",",
"ex",
")",
"raise",
"ValueError",
"(",
"\"Missing filter value\"",
")",
"try",
":",
"# Parse the new LDAP filter",
"new_filter",
"=",
"ldapfilter",
".",
"get_ldap_filter",
"(",
"filter_str",
")",
"except",
"(",
"TypeError",
",",
"ValueError",
")",
"as",
"ex",
":",
"logging",
".",
"warning",
"(",
"\"Error parsing filter: %s\"",
",",
"ex",
")",
"raise",
"ValueError",
"(",
"\"Error parsing filter\"",
")",
"# The filter is valid",
"self",
".",
"valid_filter",
"=",
"True",
"# Compare to the \"old\" one",
"if",
"new_filter",
"!=",
"self",
".",
"requirement",
".",
"filter",
":",
"# Replace the requirement filter",
"self",
".",
"requirement",
".",
"filter",
"=",
"new_filter",
"return",
"True",
"# Same filter",
"return",
"False"
] | Update the filter according to the new properties
:return: True if the filter changed, else False
:raise ValueError: The filter is invalid | [
"Update",
"the",
"filter",
"according",
"to",
"the",
"new",
"properties"
] | train | https://github.com/tcalmant/ipopo/blob/2f9ae0c44cd9c34ef1a9d50837b3254e75678eb1/pelix/ipopo/handlers/requiresvarfilter.py#L173-L210 | 0.001623 |
siemens/django-mantis-stix-importer | mantis_stix_importer/importer.py | STIX_Import.cybox_defined_object_in_fact_term_handler | def cybox_defined_object_in_fact_term_handler(self, enrichment, fact, attr_info, add_fact_kargs):
"""
From CybOX 1.x to Cybox 2.0.0, there was a structural change in the way
observable properties are included in the XML: in CybOX 1.x, they
were embedded in an element called 'Defined_Object' -- since CybOX 2.x,
we have the 'Properties' element. Here, we rename occurrences of 'Defined_Object'
in a fact term with 'Properties'. As a result, fact terms for, e.g.,
'Properties/Header/To/Recipient/AddressValue' for an Email object are the same
for imports from Cybox 1.x and Cybox 2.x.
"""
add_fact_kargs['fact_term_name'] = self.RE_DEFINED_OBJECT.sub('Properties', fact['term'])
return True | python | def cybox_defined_object_in_fact_term_handler(self, enrichment, fact, attr_info, add_fact_kargs):
"""
From CybOX 1.x to Cybox 2.0.0, there was a structural change in the way
observable properties are included in the XML: in CybOX 1.x, they
were embedded in an element called 'Defined_Object' -- since CybOX 2.x,
we have the 'Properties' element. Here, we rename occurrences of 'Defined_Object'
in a fact term with 'Properties'. As a result, fact terms for, e.g.,
'Properties/Header/To/Recipient/AddressValue' for an Email object are the same
for imports from Cybox 1.x and Cybox 2.x.
"""
add_fact_kargs['fact_term_name'] = self.RE_DEFINED_OBJECT.sub('Properties', fact['term'])
return True | [
"def",
"cybox_defined_object_in_fact_term_handler",
"(",
"self",
",",
"enrichment",
",",
"fact",
",",
"attr_info",
",",
"add_fact_kargs",
")",
":",
"add_fact_kargs",
"[",
"'fact_term_name'",
"]",
"=",
"self",
".",
"RE_DEFINED_OBJECT",
".",
"sub",
"(",
"'Properties'",
",",
"fact",
"[",
"'term'",
"]",
")",
"return",
"True"
] | From CybOX 1.x to Cybox 2.0.0, there was a structural change in the way
observable properties are included in the XML: in CybOX 1.x, they
were embedded in an element called 'Defined_Object' -- since CybOX 2.x,
we have the 'Properties' element. Here, we rename occurrences of 'Defined_Object'
in a fact term with 'Properties'. As a result, fact terms for, e.g.,
'Properties/Header/To/Recipient/AddressValue' for an Email object are the same
for imports from Cybox 1.x and Cybox 2.x. | [
"From",
"CybOX",
"1",
".",
"x",
"to",
"Cybox",
"2",
".",
"0",
".",
"0",
"there",
"was",
"a",
"structural",
"change",
"in",
"the",
"way",
"observable",
"properties",
"are",
"included",
"in",
"the",
"XML",
":",
"in",
"CybOX",
"1",
".",
"x",
"they",
"were",
"embedded",
"in",
"an",
"element",
"called",
"Defined_Object",
"--",
"since",
"CybOX",
"2",
".",
"x",
"we",
"have",
"the",
"Properties",
"element",
".",
"Here",
"we",
"rename",
"occurrences",
"of",
"Defined_Object",
"in",
"a",
"fact",
"term",
"with",
"Properties",
".",
"As",
"a",
"result",
"fact",
"terms",
"for",
"e",
".",
"g",
".",
"Properties",
"/",
"Header",
"/",
"To",
"/",
"Recipient",
"/",
"AddressValue",
"for",
"an",
"Email",
"object",
"are",
"the",
"same",
"for",
"imports",
"from",
"Cybox",
"1",
".",
"x",
"and",
"Cybox",
"2",
".",
"x",
"."
] | train | https://github.com/siemens/django-mantis-stix-importer/blob/20f5709e068101dad299f58134513d8873c91ba5/mantis_stix_importer/importer.py#L949-L960 | 0.007722 |
potatolondon/gae-pytz | makezoneinfo.py | extract_tar | def extract_tar(fileobj):
"""Yields 3-tuples of (name, modified, bytes)."""
import time
archive = tarfile.open(fileobj=fileobj)
filenames = [info.name for info in archive.getmembers() if info.isfile()]
for src_name, dst_name in filter_tzfiles(filenames):
mtime = archive.getmember(src_name).mtime
modified = tuple(time.gmtime(mtime)[:6])
bytes = archive.extractfile(src_name).read()
yield dst_name, modified, bytes | python | def extract_tar(fileobj):
"""Yields 3-tuples of (name, modified, bytes)."""
import time
archive = tarfile.open(fileobj=fileobj)
filenames = [info.name for info in archive.getmembers() if info.isfile()]
for src_name, dst_name in filter_tzfiles(filenames):
mtime = archive.getmember(src_name).mtime
modified = tuple(time.gmtime(mtime)[:6])
bytes = archive.extractfile(src_name).read()
yield dst_name, modified, bytes | [
"def",
"extract_tar",
"(",
"fileobj",
")",
":",
"import",
"time",
"archive",
"=",
"tarfile",
".",
"open",
"(",
"fileobj",
"=",
"fileobj",
")",
"filenames",
"=",
"[",
"info",
".",
"name",
"for",
"info",
"in",
"archive",
".",
"getmembers",
"(",
")",
"if",
"info",
".",
"isfile",
"(",
")",
"]",
"for",
"src_name",
",",
"dst_name",
"in",
"filter_tzfiles",
"(",
"filenames",
")",
":",
"mtime",
"=",
"archive",
".",
"getmember",
"(",
"src_name",
")",
".",
"mtime",
"modified",
"=",
"tuple",
"(",
"time",
".",
"gmtime",
"(",
"mtime",
")",
"[",
":",
"6",
"]",
")",
"bytes",
"=",
"archive",
".",
"extractfile",
"(",
"src_name",
")",
".",
"read",
"(",
")",
"yield",
"dst_name",
",",
"modified",
",",
"bytes"
] | Yields 3-tuples of (name, modified, bytes). | [
"Yields",
"3",
"-",
"tuples",
"of",
"(",
"name",
"modified",
"bytes",
")",
"."
] | train | https://github.com/potatolondon/gae-pytz/blob/24741951a7af3e79cd8727ae3f79265decc93fef/makezoneinfo.py#L37-L49 | 0.002132 |
SuperCowPowers/workbench | workbench/server/data_store.py | DataStore.sample_storage_size | def sample_storage_size(self):
"""Get the storage size of the samples storage collection."""
try:
coll_stats = self.database.command('collStats', 'fs.chunks')
sample_storage_size = coll_stats['size']/1024.0/1024.0
return sample_storage_size
except pymongo.errors.OperationFailure:
return 0 | python | def sample_storage_size(self):
"""Get the storage size of the samples storage collection."""
try:
coll_stats = self.database.command('collStats', 'fs.chunks')
sample_storage_size = coll_stats['size']/1024.0/1024.0
return sample_storage_size
except pymongo.errors.OperationFailure:
return 0 | [
"def",
"sample_storage_size",
"(",
"self",
")",
":",
"try",
":",
"coll_stats",
"=",
"self",
".",
"database",
".",
"command",
"(",
"'collStats'",
",",
"'fs.chunks'",
")",
"sample_storage_size",
"=",
"coll_stats",
"[",
"'size'",
"]",
"/",
"1024.0",
"/",
"1024.0",
"return",
"sample_storage_size",
"except",
"pymongo",
".",
"errors",
".",
"OperationFailure",
":",
"return",
"0"
] | Get the storage size of the samples storage collection. | [
"Get",
"the",
"storage",
"size",
"of",
"the",
"samples",
"storage",
"collection",
"."
] | train | https://github.com/SuperCowPowers/workbench/blob/710232756dd717f734253315e3d0b33c9628dafb/workbench/server/data_store.py#L104-L112 | 0.005525 |
biolink/ontobio | ontobio/ontol.py | Ontology.all_synonyms | def all_synonyms(self, include_label=False):
"""
Retrieves all synonyms
Arguments
---------
include_label : bool
If True, include label/names as Synonym objects
Returns
-------
list[Synonym]
:class:`Synonym` objects
"""
syns = []
for n in self.nodes():
syns = syns + self.synonyms(n, include_label=include_label)
return syns | python | def all_synonyms(self, include_label=False):
"""
Retrieves all synonyms
Arguments
---------
include_label : bool
If True, include label/names as Synonym objects
Returns
-------
list[Synonym]
:class:`Synonym` objects
"""
syns = []
for n in self.nodes():
syns = syns + self.synonyms(n, include_label=include_label)
return syns | [
"def",
"all_synonyms",
"(",
"self",
",",
"include_label",
"=",
"False",
")",
":",
"syns",
"=",
"[",
"]",
"for",
"n",
"in",
"self",
".",
"nodes",
"(",
")",
":",
"syns",
"=",
"syns",
"+",
"self",
".",
"synonyms",
"(",
"n",
",",
"include_label",
"=",
"include_label",
")",
"return",
"syns"
] | Retrieves all synonyms
Arguments
---------
include_label : bool
If True, include label/names as Synonym objects
Returns
-------
list[Synonym]
:class:`Synonym` objects | [
"Retrieves",
"all",
"synonyms"
] | train | https://github.com/biolink/ontobio/blob/4e512a7831cfe6bc1b32f2c3be2ba41bc5cf7345/ontobio/ontol.py#L865-L882 | 0.004367 |
ejeschke/ginga | ginga/rv/plugins/WCSMatch.py | WCSMatch.xfmset_cb | def xfmset_cb(self, setting, value, chviewer, info):
"""This callback is called when a channel window is transformed
(flipped, or swap axes).
"""
return self.xfmset(chviewer, info.chinfo) | python | def xfmset_cb(self, setting, value, chviewer, info):
"""This callback is called when a channel window is transformed
(flipped, or swap axes).
"""
return self.xfmset(chviewer, info.chinfo) | [
"def",
"xfmset_cb",
"(",
"self",
",",
"setting",
",",
"value",
",",
"chviewer",
",",
"info",
")",
":",
"return",
"self",
".",
"xfmset",
"(",
"chviewer",
",",
"info",
".",
"chinfo",
")"
] | This callback is called when a channel window is transformed
(flipped, or swap axes). | [
"This",
"callback",
"is",
"called",
"when",
"a",
"channel",
"window",
"is",
"transformed",
"(",
"flipped",
"or",
"swap",
"axes",
")",
"."
] | train | https://github.com/ejeschke/ginga/blob/a78c893ec6f37a837de851947e9bb4625c597915/ginga/rv/plugins/WCSMatch.py#L402-L406 | 0.009132 |
gem/oq-engine | openquake/hazardlib/gsim/chiou_youngs_2014.py | ChiouYoungs2014._get_mean | def _get_mean(self, sites, C, ln_y_ref, exp1, exp2):
"""
Add site effects to an intensity.
Implements eq. 13b.
"""
# we do not support estimating of basin depth and instead
# rely on it being available (since we require it).
# centered_z1pt0
centered_z1pt0 = self._get_centered_z1pt0(sites)
# we consider random variables being zero since we want
# to find the exact mean value.
eta = epsilon = 0.
ln_y = (
# first line of eq. 12
ln_y_ref + eta
# second line
+ C['phi1'] * np.log(sites.vs30 / 1130).clip(-np.inf, 0)
# third line
+ C['phi2'] * (exp1 - exp2)
* np.log((np.exp(ln_y_ref) * np.exp(eta) + C['phi4']) / C['phi4'])
# fourth line
+ C['phi5']
* (1.0 - np.exp(-1. * centered_z1pt0 / C['phi6']))
# fifth line
+ epsilon
)
return ln_y | python | def _get_mean(self, sites, C, ln_y_ref, exp1, exp2):
"""
Add site effects to an intensity.
Implements eq. 13b.
"""
# we do not support estimating of basin depth and instead
# rely on it being available (since we require it).
# centered_z1pt0
centered_z1pt0 = self._get_centered_z1pt0(sites)
# we consider random variables being zero since we want
# to find the exact mean value.
eta = epsilon = 0.
ln_y = (
# first line of eq. 12
ln_y_ref + eta
# second line
+ C['phi1'] * np.log(sites.vs30 / 1130).clip(-np.inf, 0)
# third line
+ C['phi2'] * (exp1 - exp2)
* np.log((np.exp(ln_y_ref) * np.exp(eta) + C['phi4']) / C['phi4'])
# fourth line
+ C['phi5']
* (1.0 - np.exp(-1. * centered_z1pt0 / C['phi6']))
# fifth line
+ epsilon
)
return ln_y | [
"def",
"_get_mean",
"(",
"self",
",",
"sites",
",",
"C",
",",
"ln_y_ref",
",",
"exp1",
",",
"exp2",
")",
":",
"# we do not support estimating of basin depth and instead",
"# rely on it being available (since we require it).",
"# centered_z1pt0",
"centered_z1pt0",
"=",
"self",
".",
"_get_centered_z1pt0",
"(",
"sites",
")",
"# we consider random variables being zero since we want",
"# to find the exact mean value.",
"eta",
"=",
"epsilon",
"=",
"0.",
"ln_y",
"=",
"(",
"# first line of eq. 12",
"ln_y_ref",
"+",
"eta",
"# second line",
"+",
"C",
"[",
"'phi1'",
"]",
"*",
"np",
".",
"log",
"(",
"sites",
".",
"vs30",
"/",
"1130",
")",
".",
"clip",
"(",
"-",
"np",
".",
"inf",
",",
"0",
")",
"# third line",
"+",
"C",
"[",
"'phi2'",
"]",
"*",
"(",
"exp1",
"-",
"exp2",
")",
"*",
"np",
".",
"log",
"(",
"(",
"np",
".",
"exp",
"(",
"ln_y_ref",
")",
"*",
"np",
".",
"exp",
"(",
"eta",
")",
"+",
"C",
"[",
"'phi4'",
"]",
")",
"/",
"C",
"[",
"'phi4'",
"]",
")",
"# fourth line",
"+",
"C",
"[",
"'phi5'",
"]",
"*",
"(",
"1.0",
"-",
"np",
".",
"exp",
"(",
"-",
"1.",
"*",
"centered_z1pt0",
"/",
"C",
"[",
"'phi6'",
"]",
")",
")",
"# fifth line",
"+",
"epsilon",
")",
"return",
"ln_y"
] | Add site effects to an intensity.
Implements eq. 13b. | [
"Add",
"site",
"effects",
"to",
"an",
"intensity",
"."
] | train | https://github.com/gem/oq-engine/blob/8294553a0b8aba33fd96437a35065d03547d0040/openquake/hazardlib/gsim/chiou_youngs_2014.py#L93-L122 | 0.002008 |
scanny/python-pptx | pptx/shapes/shapetree.py | SlideShapeFactory | def SlideShapeFactory(shape_elm, parent):
"""
Return an instance of the appropriate shape proxy class for *shape_elm*
on a slide.
"""
if shape_elm.has_ph_elm:
return _SlidePlaceholderFactory(shape_elm, parent)
return BaseShapeFactory(shape_elm, parent) | python | def SlideShapeFactory(shape_elm, parent):
"""
Return an instance of the appropriate shape proxy class for *shape_elm*
on a slide.
"""
if shape_elm.has_ph_elm:
return _SlidePlaceholderFactory(shape_elm, parent)
return BaseShapeFactory(shape_elm, parent) | [
"def",
"SlideShapeFactory",
"(",
"shape_elm",
",",
"parent",
")",
":",
"if",
"shape_elm",
".",
"has_ph_elm",
":",
"return",
"_SlidePlaceholderFactory",
"(",
"shape_elm",
",",
"parent",
")",
"return",
"BaseShapeFactory",
"(",
"shape_elm",
",",
"parent",
")"
] | Return an instance of the appropriate shape proxy class for *shape_elm*
on a slide. | [
"Return",
"an",
"instance",
"of",
"the",
"appropriate",
"shape",
"proxy",
"class",
"for",
"*",
"shape_elm",
"*",
"on",
"a",
"slide",
"."
] | train | https://github.com/scanny/python-pptx/blob/d6ab8234f8b03953d2f831ff9394b1852db34130/pptx/shapes/shapetree.py#L812-L819 | 0.003521 |
SavinaRoja/OpenAccess_EPUB | src/openaccess_epub/publisher/plos.py | PLoS.make_article_info_dates | def make_article_info_dates(self):
"""
Makes the section containing important dates for the article: typically
Received, Accepted, and Published.
"""
dates_div = etree.Element('div', {'id': 'article-dates'})
d = './front/article-meta/history/date'
received = self.article.root.xpath(d + "[@date-type='received']")
accepted = self.article.root.xpath(d + "[@date-type='accepted']")
if received:
b = etree.SubElement(dates_div, 'b')
b.text = 'Received: '
dt = self.date_tuple_from_date(received[0], 'Received')
formatted_date_string = self.format_date_string(dt)
append_new_text(dates_div, formatted_date_string + '; ')
if accepted:
b = etree.SubElement(dates_div, 'b')
b.text = 'Accepted: '
dt = self.date_tuple_from_date(accepted[0], 'Accepted')
formatted_date_string = self.format_date_string(dt)
append_new_text(dates_div, formatted_date_string + '; ')
#Published date is required
pub_date = self.article.root.xpath("./front/article-meta/pub-date[@pub-type='epub']")[0]
b = etree.SubElement(dates_div, 'b')
b.text = 'Published: '
dt = self.date_tuple_from_date(pub_date, 'Published')
formatted_date_string = self.format_date_string(dt)
append_new_text(dates_div, formatted_date_string)
return dates_div | python | def make_article_info_dates(self):
"""
Makes the section containing important dates for the article: typically
Received, Accepted, and Published.
"""
dates_div = etree.Element('div', {'id': 'article-dates'})
d = './front/article-meta/history/date'
received = self.article.root.xpath(d + "[@date-type='received']")
accepted = self.article.root.xpath(d + "[@date-type='accepted']")
if received:
b = etree.SubElement(dates_div, 'b')
b.text = 'Received: '
dt = self.date_tuple_from_date(received[0], 'Received')
formatted_date_string = self.format_date_string(dt)
append_new_text(dates_div, formatted_date_string + '; ')
if accepted:
b = etree.SubElement(dates_div, 'b')
b.text = 'Accepted: '
dt = self.date_tuple_from_date(accepted[0], 'Accepted')
formatted_date_string = self.format_date_string(dt)
append_new_text(dates_div, formatted_date_string + '; ')
#Published date is required
pub_date = self.article.root.xpath("./front/article-meta/pub-date[@pub-type='epub']")[0]
b = etree.SubElement(dates_div, 'b')
b.text = 'Published: '
dt = self.date_tuple_from_date(pub_date, 'Published')
formatted_date_string = self.format_date_string(dt)
append_new_text(dates_div, formatted_date_string)
return dates_div | [
"def",
"make_article_info_dates",
"(",
"self",
")",
":",
"dates_div",
"=",
"etree",
".",
"Element",
"(",
"'div'",
",",
"{",
"'id'",
":",
"'article-dates'",
"}",
")",
"d",
"=",
"'./front/article-meta/history/date'",
"received",
"=",
"self",
".",
"article",
".",
"root",
".",
"xpath",
"(",
"d",
"+",
"\"[@date-type='received']\"",
")",
"accepted",
"=",
"self",
".",
"article",
".",
"root",
".",
"xpath",
"(",
"d",
"+",
"\"[@date-type='accepted']\"",
")",
"if",
"received",
":",
"b",
"=",
"etree",
".",
"SubElement",
"(",
"dates_div",
",",
"'b'",
")",
"b",
".",
"text",
"=",
"'Received: '",
"dt",
"=",
"self",
".",
"date_tuple_from_date",
"(",
"received",
"[",
"0",
"]",
",",
"'Received'",
")",
"formatted_date_string",
"=",
"self",
".",
"format_date_string",
"(",
"dt",
")",
"append_new_text",
"(",
"dates_div",
",",
"formatted_date_string",
"+",
"'; '",
")",
"if",
"accepted",
":",
"b",
"=",
"etree",
".",
"SubElement",
"(",
"dates_div",
",",
"'b'",
")",
"b",
".",
"text",
"=",
"'Accepted: '",
"dt",
"=",
"self",
".",
"date_tuple_from_date",
"(",
"accepted",
"[",
"0",
"]",
",",
"'Accepted'",
")",
"formatted_date_string",
"=",
"self",
".",
"format_date_string",
"(",
"dt",
")",
"append_new_text",
"(",
"dates_div",
",",
"formatted_date_string",
"+",
"'; '",
")",
"#Published date is required",
"pub_date",
"=",
"self",
".",
"article",
".",
"root",
".",
"xpath",
"(",
"\"./front/article-meta/pub-date[@pub-type='epub']\"",
")",
"[",
"0",
"]",
"b",
"=",
"etree",
".",
"SubElement",
"(",
"dates_div",
",",
"'b'",
")",
"b",
".",
"text",
"=",
"'Published: '",
"dt",
"=",
"self",
".",
"date_tuple_from_date",
"(",
"pub_date",
",",
"'Published'",
")",
"formatted_date_string",
"=",
"self",
".",
"format_date_string",
"(",
"dt",
")",
"append_new_text",
"(",
"dates_div",
",",
"formatted_date_string",
")",
"return",
"dates_div"
] | Makes the section containing important dates for the article: typically
Received, Accepted, and Published. | [
"Makes",
"the",
"section",
"containing",
"important",
"dates",
"for",
"the",
"article",
":",
"typically",
"Received",
"Accepted",
"and",
"Published",
"."
] | train | https://github.com/SavinaRoja/OpenAccess_EPUB/blob/6b77ba30b7394fd003920e7a7957bca963a90656/src/openaccess_epub/publisher/plos.py#L524-L554 | 0.002723 |
KarchinLab/probabilistic2020 | prob2020/python/permutation.py | effect_permutation | def effect_permutation(context_counts,
context_to_mut,
seq_context,
gene_seq,
num_permutations=10000,
pseudo_count=0):
"""Performs null-permutations for effect-based mutation statistics
in a single gene.
Parameters
----------
context_counts : pd.Series
number of mutations for each context
context_to_mut : dict
dictionary mapping nucleotide context to a list of observed
somatic base changes.
seq_context : SequenceContext
Sequence context for the entire gene sequence (regardless
of where mutations occur). The nucleotide contexts are
identified at positions along the gene.
gene_seq : GeneSequence
Sequence of gene of interest
num_permutations : int, default: 10000
number of permutations to create for null
pseudo_count : int, default: 0
Pseudo-count for number of recurrent missense mutations for each
permutation for the null distribution. Increasing pseudo_count
makes the statistical test more stringent.
Returns
-------
effect_entropy_list : list
list of entropy of effect values under the null
recur_list : list
number of recurrent missense mutations
inactivating_list : list
number of inactivating mutations
"""
mycontexts = context_counts.index.tolist()
somatic_base = [base
for one_context in mycontexts
for base in context_to_mut[one_context]]
# get random positions determined by sequence context
tmp_contxt_pos = seq_context.random_pos(context_counts.iteritems(),
num_permutations)
tmp_mut_pos = np.hstack(pos_array for base, pos_array in tmp_contxt_pos)
# calculate position-based statistics as a result of random positions
effect_entropy_list, recur_list, inactivating_list = [], [], []
for row in tmp_mut_pos:
# get info about mutations
tmp_mut_info = mc.get_aa_mut_info(row,
somatic_base,
gene_seq)
# calculate position info
tmp_entropy, tmp_recur, tmp_inactivating = cutils.calc_effect_info(tmp_mut_info['Codon Pos'],
tmp_mut_info['Reference AA'],
tmp_mut_info['Somatic AA'],
pseudo_count=pseudo_count,
is_obs=0)
effect_entropy_list.append(tmp_entropy)
recur_list.append(tmp_recur)
inactivating_list.append(tmp_inactivating)
return effect_entropy_list, recur_list, inactivating_list | python | def effect_permutation(context_counts,
context_to_mut,
seq_context,
gene_seq,
num_permutations=10000,
pseudo_count=0):
"""Performs null-permutations for effect-based mutation statistics
in a single gene.
Parameters
----------
context_counts : pd.Series
number of mutations for each context
context_to_mut : dict
dictionary mapping nucleotide context to a list of observed
somatic base changes.
seq_context : SequenceContext
Sequence context for the entire gene sequence (regardless
of where mutations occur). The nucleotide contexts are
identified at positions along the gene.
gene_seq : GeneSequence
Sequence of gene of interest
num_permutations : int, default: 10000
number of permutations to create for null
pseudo_count : int, default: 0
Pseudo-count for number of recurrent missense mutations for each
permutation for the null distribution. Increasing pseudo_count
makes the statistical test more stringent.
Returns
-------
effect_entropy_list : list
list of entropy of effect values under the null
recur_list : list
number of recurrent missense mutations
inactivating_list : list
number of inactivating mutations
"""
mycontexts = context_counts.index.tolist()
somatic_base = [base
for one_context in mycontexts
for base in context_to_mut[one_context]]
# get random positions determined by sequence context
tmp_contxt_pos = seq_context.random_pos(context_counts.iteritems(),
num_permutations)
tmp_mut_pos = np.hstack(pos_array for base, pos_array in tmp_contxt_pos)
# calculate position-based statistics as a result of random positions
effect_entropy_list, recur_list, inactivating_list = [], [], []
for row in tmp_mut_pos:
# get info about mutations
tmp_mut_info = mc.get_aa_mut_info(row,
somatic_base,
gene_seq)
# calculate position info
tmp_entropy, tmp_recur, tmp_inactivating = cutils.calc_effect_info(tmp_mut_info['Codon Pos'],
tmp_mut_info['Reference AA'],
tmp_mut_info['Somatic AA'],
pseudo_count=pseudo_count,
is_obs=0)
effect_entropy_list.append(tmp_entropy)
recur_list.append(tmp_recur)
inactivating_list.append(tmp_inactivating)
return effect_entropy_list, recur_list, inactivating_list | [
"def",
"effect_permutation",
"(",
"context_counts",
",",
"context_to_mut",
",",
"seq_context",
",",
"gene_seq",
",",
"num_permutations",
"=",
"10000",
",",
"pseudo_count",
"=",
"0",
")",
":",
"mycontexts",
"=",
"context_counts",
".",
"index",
".",
"tolist",
"(",
")",
"somatic_base",
"=",
"[",
"base",
"for",
"one_context",
"in",
"mycontexts",
"for",
"base",
"in",
"context_to_mut",
"[",
"one_context",
"]",
"]",
"# get random positions determined by sequence context",
"tmp_contxt_pos",
"=",
"seq_context",
".",
"random_pos",
"(",
"context_counts",
".",
"iteritems",
"(",
")",
",",
"num_permutations",
")",
"tmp_mut_pos",
"=",
"np",
".",
"hstack",
"(",
"pos_array",
"for",
"base",
",",
"pos_array",
"in",
"tmp_contxt_pos",
")",
"# calculate position-based statistics as a result of random positions",
"effect_entropy_list",
",",
"recur_list",
",",
"inactivating_list",
"=",
"[",
"]",
",",
"[",
"]",
",",
"[",
"]",
"for",
"row",
"in",
"tmp_mut_pos",
":",
"# get info about mutations",
"tmp_mut_info",
"=",
"mc",
".",
"get_aa_mut_info",
"(",
"row",
",",
"somatic_base",
",",
"gene_seq",
")",
"# calculate position info",
"tmp_entropy",
",",
"tmp_recur",
",",
"tmp_inactivating",
"=",
"cutils",
".",
"calc_effect_info",
"(",
"tmp_mut_info",
"[",
"'Codon Pos'",
"]",
",",
"tmp_mut_info",
"[",
"'Reference AA'",
"]",
",",
"tmp_mut_info",
"[",
"'Somatic AA'",
"]",
",",
"pseudo_count",
"=",
"pseudo_count",
",",
"is_obs",
"=",
"0",
")",
"effect_entropy_list",
".",
"append",
"(",
"tmp_entropy",
")",
"recur_list",
".",
"append",
"(",
"tmp_recur",
")",
"inactivating_list",
".",
"append",
"(",
"tmp_inactivating",
")",
"return",
"effect_entropy_list",
",",
"recur_list",
",",
"inactivating_list"
] | Performs null-permutations for effect-based mutation statistics
in a single gene.
Parameters
----------
context_counts : pd.Series
number of mutations for each context
context_to_mut : dict
dictionary mapping nucleotide context to a list of observed
somatic base changes.
seq_context : SequenceContext
Sequence context for the entire gene sequence (regardless
of where mutations occur). The nucleotide contexts are
identified at positions along the gene.
gene_seq : GeneSequence
Sequence of gene of interest
num_permutations : int, default: 10000
number of permutations to create for null
pseudo_count : int, default: 0
Pseudo-count for number of recurrent missense mutations for each
permutation for the null distribution. Increasing pseudo_count
makes the statistical test more stringent.
Returns
-------
effect_entropy_list : list
list of entropy of effect values under the null
recur_list : list
number of recurrent missense mutations
inactivating_list : list
number of inactivating mutations | [
"Performs",
"null",
"-",
"permutations",
"for",
"effect",
"-",
"based",
"mutation",
"statistics",
"in",
"a",
"single",
"gene",
"."
] | train | https://github.com/KarchinLab/probabilistic2020/blob/5d70583b0a7c07cfe32e95f3a70e05df412acb84/prob2020/python/permutation.py#L486-L552 | 0.002027 |