repository_name
stringlengths 7
55
| func_path_in_repository
stringlengths 4
223
| func_name
stringlengths 1
134
| whole_func_string
stringlengths 75
104k
| language
stringclasses 1
value | func_code_string
stringlengths 75
104k
| func_code_tokens
sequencelengths 19
28.4k
| func_documentation_string
stringlengths 1
46.9k
| func_documentation_tokens
sequencelengths 1
1.97k
| split_name
stringclasses 1
value | func_code_url
stringlengths 87
315
|
---|---|---|---|---|---|---|---|---|---|---|
RI-imaging/nrefocus | nrefocus/_autofocus.py | autofocus | def autofocus(field, nm, res, ival, roi=None,
metric="average gradient", padding=True,
ret_d=False, ret_grad=False, num_cpus=1):
"""Numerical autofocusing of a field using the Helmholtz equation.
Parameters
----------
field : 1d or 2d ndarray
Electric field is BG-Corrected, i.e. field = EX/BEx
nm : float
Refractive index of medium.
res : float
Size of wavelength in pixels.
ival : tuple of floats
Approximate interval to search for optimal focus in px.
roi : rectangular region of interest (x1, y1, x2, y2)
Region of interest of `field` for which the metric will be
minimized. If not given, the entire `field` will be used.
metric : str
- "average gradient" : average gradient metric of amplitude
- "rms contrast" : RMS contrast of phase data
- "spectrum" : sum of filtered Fourier coefficients
padding: bool
Perform padding with linear ramp from edge to average
to reduce ringing artifacts.
.. versionchanged:: 0.1.4
improved padding value and padding location
red_d : bool
Return the autofocusing distance in pixels. Defaults to False.
red_grad : bool
Return the computed gradients as a list.
num_cpus : int
Not implemented.
Returns
-------
field, [d, [grad]]
The focused field and optionally, the optimal focusing distance and
the computed gradients.
"""
if metric == "average gradient":
def metric_func(x): return metrics.average_gradient(np.abs(x))
elif metric == "rms contrast":
def metric_func(x): return -metrics.contrast_rms(np.angle(x))
elif metric == "spectrum":
def metric_func(x): return metrics.spectral(np.abs(x), res)
else:
raise ValueError("No such metric: {}".format(metric))
field, d, grad = minimize_metric(field, metric_func, nm, res, ival,
roi=roi, padding=padding)
ret_list = [field]
if ret_d:
ret_list += [d]
if ret_grad:
ret_list += [grad]
if len(ret_list) == 1:
return ret_list[0]
else:
return tuple(ret_list) | python | def autofocus(field, nm, res, ival, roi=None,
metric="average gradient", padding=True,
ret_d=False, ret_grad=False, num_cpus=1):
"""Numerical autofocusing of a field using the Helmholtz equation.
Parameters
----------
field : 1d or 2d ndarray
Electric field is BG-Corrected, i.e. field = EX/BEx
nm : float
Refractive index of medium.
res : float
Size of wavelength in pixels.
ival : tuple of floats
Approximate interval to search for optimal focus in px.
roi : rectangular region of interest (x1, y1, x2, y2)
Region of interest of `field` for which the metric will be
minimized. If not given, the entire `field` will be used.
metric : str
- "average gradient" : average gradient metric of amplitude
- "rms contrast" : RMS contrast of phase data
- "spectrum" : sum of filtered Fourier coefficients
padding: bool
Perform padding with linear ramp from edge to average
to reduce ringing artifacts.
.. versionchanged:: 0.1.4
improved padding value and padding location
red_d : bool
Return the autofocusing distance in pixels. Defaults to False.
red_grad : bool
Return the computed gradients as a list.
num_cpus : int
Not implemented.
Returns
-------
field, [d, [grad]]
The focused field and optionally, the optimal focusing distance and
the computed gradients.
"""
if metric == "average gradient":
def metric_func(x): return metrics.average_gradient(np.abs(x))
elif metric == "rms contrast":
def metric_func(x): return -metrics.contrast_rms(np.angle(x))
elif metric == "spectrum":
def metric_func(x): return metrics.spectral(np.abs(x), res)
else:
raise ValueError("No such metric: {}".format(metric))
field, d, grad = minimize_metric(field, metric_func, nm, res, ival,
roi=roi, padding=padding)
ret_list = [field]
if ret_d:
ret_list += [d]
if ret_grad:
ret_list += [grad]
if len(ret_list) == 1:
return ret_list[0]
else:
return tuple(ret_list) | [
"def",
"autofocus",
"(",
"field",
",",
"nm",
",",
"res",
",",
"ival",
",",
"roi",
"=",
"None",
",",
"metric",
"=",
"\"average gradient\"",
",",
"padding",
"=",
"True",
",",
"ret_d",
"=",
"False",
",",
"ret_grad",
"=",
"False",
",",
"num_cpus",
"=",
"1",
")",
":",
"if",
"metric",
"==",
"\"average gradient\"",
":",
"def",
"metric_func",
"(",
"x",
")",
":",
"return",
"metrics",
".",
"average_gradient",
"(",
"np",
".",
"abs",
"(",
"x",
")",
")",
"elif",
"metric",
"==",
"\"rms contrast\"",
":",
"def",
"metric_func",
"(",
"x",
")",
":",
"return",
"-",
"metrics",
".",
"contrast_rms",
"(",
"np",
".",
"angle",
"(",
"x",
")",
")",
"elif",
"metric",
"==",
"\"spectrum\"",
":",
"def",
"metric_func",
"(",
"x",
")",
":",
"return",
"metrics",
".",
"spectral",
"(",
"np",
".",
"abs",
"(",
"x",
")",
",",
"res",
")",
"else",
":",
"raise",
"ValueError",
"(",
"\"No such metric: {}\"",
".",
"format",
"(",
"metric",
")",
")",
"field",
",",
"d",
",",
"grad",
"=",
"minimize_metric",
"(",
"field",
",",
"metric_func",
",",
"nm",
",",
"res",
",",
"ival",
",",
"roi",
"=",
"roi",
",",
"padding",
"=",
"padding",
")",
"ret_list",
"=",
"[",
"field",
"]",
"if",
"ret_d",
":",
"ret_list",
"+=",
"[",
"d",
"]",
"if",
"ret_grad",
":",
"ret_list",
"+=",
"[",
"grad",
"]",
"if",
"len",
"(",
"ret_list",
")",
"==",
"1",
":",
"return",
"ret_list",
"[",
"0",
"]",
"else",
":",
"return",
"tuple",
"(",
"ret_list",
")"
] | Numerical autofocusing of a field using the Helmholtz equation.
Parameters
----------
field : 1d or 2d ndarray
Electric field is BG-Corrected, i.e. field = EX/BEx
nm : float
Refractive index of medium.
res : float
Size of wavelength in pixels.
ival : tuple of floats
Approximate interval to search for optimal focus in px.
roi : rectangular region of interest (x1, y1, x2, y2)
Region of interest of `field` for which the metric will be
minimized. If not given, the entire `field` will be used.
metric : str
- "average gradient" : average gradient metric of amplitude
- "rms contrast" : RMS contrast of phase data
- "spectrum" : sum of filtered Fourier coefficients
padding: bool
Perform padding with linear ramp from edge to average
to reduce ringing artifacts.
.. versionchanged:: 0.1.4
improved padding value and padding location
red_d : bool
Return the autofocusing distance in pixels. Defaults to False.
red_grad : bool
Return the computed gradients as a list.
num_cpus : int
Not implemented.
Returns
-------
field, [d, [grad]]
The focused field and optionally, the optimal focusing distance and
the computed gradients. | [
"Numerical",
"autofocusing",
"of",
"a",
"field",
"using",
"the",
"Helmholtz",
"equation",
"."
] | train | https://github.com/RI-imaging/nrefocus/blob/ad09aeecace609ab8f9effcb662d2b7d50826080/nrefocus/_autofocus.py#L19-L83 |
RI-imaging/nrefocus | nrefocus/_autofocus.py | autofocus_stack | def autofocus_stack(fieldstack, nm, res, ival, roi=None,
metric="average gradient", padding=True,
same_dist=False, ret_ds=False, ret_grads=False,
num_cpus=_cpu_count, copy=True):
"""Numerical autofocusing of a stack using the Helmholtz equation.
Parameters
----------
fieldstack : 2d or 3d ndarray
Electric field is BG-Corrected, i.e. Field = EX/BEx
nm : float
Refractive index of medium.
res : float
Size of wavelength in pixels.
ival : tuple of floats
Approximate interval to search for optimal focus in px.
metric : str
see `autofocus_field`.
padding : bool
Perform padding with linear ramp from edge to average
to reduce ringing artifacts.
.. versionchanged:: 0.1.4
improved padding value and padding location
ret_dopt : bool
Return optimized distance and gradient plotting data.
same_dist : bool
Refocus entire sinogram with one distance.
red_ds : bool
Return the autofocusing distances in pixels. Defaults to False.
If sam_dist is True, still returns autofocusing distances
of first pass. The used refocusing distance is the
average.
red_grads : bool
Return the computed gradients as a list.
copy : bool
If False, overwrites input array.
Returns
-------
The focused field (and the refocussing distance + data if d is None)
"""
dopt = list()
grad = list()
M = fieldstack.shape[0]
# setup arguments
stackargs = list()
for s in range(M):
stackargs.append([fieldstack[s].copy(copy), nm, res, ival,
roi, metric, padding, True, True, 1])
# perform first pass
p = mp.Pool(num_cpus)
result = p.map_async(_autofocus_wrapper, stackargs).get()
p.close()
p.terminate()
p.join()
# result = []
# for arg in stackargs:
# result += _autofocus_wrapper(arg)
newstack = np.zeros(fieldstack.shape, dtype=fieldstack.dtype)
for s in range(M):
field, ds, gs = result[s]
dopt.append(ds)
grad.append(gs)
newstack[s] = field
# perform second pass if `same_dist` is True
if same_dist:
# find average dopt
davg = np.average(dopt)
newstack = refocus_stack(fieldstack, davg, nm, res,
num_cpus=num_cpus, copy=copy,
padding=padding)
ret_list = [newstack]
if ret_ds:
ret_list += [dopt]
if ret_grads:
ret_list += [grad]
if len(ret_list) == 1:
return ret_list[0]
else:
return tuple(ret_list) | python | def autofocus_stack(fieldstack, nm, res, ival, roi=None,
metric="average gradient", padding=True,
same_dist=False, ret_ds=False, ret_grads=False,
num_cpus=_cpu_count, copy=True):
"""Numerical autofocusing of a stack using the Helmholtz equation.
Parameters
----------
fieldstack : 2d or 3d ndarray
Electric field is BG-Corrected, i.e. Field = EX/BEx
nm : float
Refractive index of medium.
res : float
Size of wavelength in pixels.
ival : tuple of floats
Approximate interval to search for optimal focus in px.
metric : str
see `autofocus_field`.
padding : bool
Perform padding with linear ramp from edge to average
to reduce ringing artifacts.
.. versionchanged:: 0.1.4
improved padding value and padding location
ret_dopt : bool
Return optimized distance and gradient plotting data.
same_dist : bool
Refocus entire sinogram with one distance.
red_ds : bool
Return the autofocusing distances in pixels. Defaults to False.
If sam_dist is True, still returns autofocusing distances
of first pass. The used refocusing distance is the
average.
red_grads : bool
Return the computed gradients as a list.
copy : bool
If False, overwrites input array.
Returns
-------
The focused field (and the refocussing distance + data if d is None)
"""
dopt = list()
grad = list()
M = fieldstack.shape[0]
# setup arguments
stackargs = list()
for s in range(M):
stackargs.append([fieldstack[s].copy(copy), nm, res, ival,
roi, metric, padding, True, True, 1])
# perform first pass
p = mp.Pool(num_cpus)
result = p.map_async(_autofocus_wrapper, stackargs).get()
p.close()
p.terminate()
p.join()
# result = []
# for arg in stackargs:
# result += _autofocus_wrapper(arg)
newstack = np.zeros(fieldstack.shape, dtype=fieldstack.dtype)
for s in range(M):
field, ds, gs = result[s]
dopt.append(ds)
grad.append(gs)
newstack[s] = field
# perform second pass if `same_dist` is True
if same_dist:
# find average dopt
davg = np.average(dopt)
newstack = refocus_stack(fieldstack, davg, nm, res,
num_cpus=num_cpus, copy=copy,
padding=padding)
ret_list = [newstack]
if ret_ds:
ret_list += [dopt]
if ret_grads:
ret_list += [grad]
if len(ret_list) == 1:
return ret_list[0]
else:
return tuple(ret_list) | [
"def",
"autofocus_stack",
"(",
"fieldstack",
",",
"nm",
",",
"res",
",",
"ival",
",",
"roi",
"=",
"None",
",",
"metric",
"=",
"\"average gradient\"",
",",
"padding",
"=",
"True",
",",
"same_dist",
"=",
"False",
",",
"ret_ds",
"=",
"False",
",",
"ret_grads",
"=",
"False",
",",
"num_cpus",
"=",
"_cpu_count",
",",
"copy",
"=",
"True",
")",
":",
"dopt",
"=",
"list",
"(",
")",
"grad",
"=",
"list",
"(",
")",
"M",
"=",
"fieldstack",
".",
"shape",
"[",
"0",
"]",
"# setup arguments",
"stackargs",
"=",
"list",
"(",
")",
"for",
"s",
"in",
"range",
"(",
"M",
")",
":",
"stackargs",
".",
"append",
"(",
"[",
"fieldstack",
"[",
"s",
"]",
".",
"copy",
"(",
"copy",
")",
",",
"nm",
",",
"res",
",",
"ival",
",",
"roi",
",",
"metric",
",",
"padding",
",",
"True",
",",
"True",
",",
"1",
"]",
")",
"# perform first pass",
"p",
"=",
"mp",
".",
"Pool",
"(",
"num_cpus",
")",
"result",
"=",
"p",
".",
"map_async",
"(",
"_autofocus_wrapper",
",",
"stackargs",
")",
".",
"get",
"(",
")",
"p",
".",
"close",
"(",
")",
"p",
".",
"terminate",
"(",
")",
"p",
".",
"join",
"(",
")",
"# result = []",
"# for arg in stackargs:",
"# result += _autofocus_wrapper(arg)",
"newstack",
"=",
"np",
".",
"zeros",
"(",
"fieldstack",
".",
"shape",
",",
"dtype",
"=",
"fieldstack",
".",
"dtype",
")",
"for",
"s",
"in",
"range",
"(",
"M",
")",
":",
"field",
",",
"ds",
",",
"gs",
"=",
"result",
"[",
"s",
"]",
"dopt",
".",
"append",
"(",
"ds",
")",
"grad",
".",
"append",
"(",
"gs",
")",
"newstack",
"[",
"s",
"]",
"=",
"field",
"# perform second pass if `same_dist` is True",
"if",
"same_dist",
":",
"# find average dopt",
"davg",
"=",
"np",
".",
"average",
"(",
"dopt",
")",
"newstack",
"=",
"refocus_stack",
"(",
"fieldstack",
",",
"davg",
",",
"nm",
",",
"res",
",",
"num_cpus",
"=",
"num_cpus",
",",
"copy",
"=",
"copy",
",",
"padding",
"=",
"padding",
")",
"ret_list",
"=",
"[",
"newstack",
"]",
"if",
"ret_ds",
":",
"ret_list",
"+=",
"[",
"dopt",
"]",
"if",
"ret_grads",
":",
"ret_list",
"+=",
"[",
"grad",
"]",
"if",
"len",
"(",
"ret_list",
")",
"==",
"1",
":",
"return",
"ret_list",
"[",
"0",
"]",
"else",
":",
"return",
"tuple",
"(",
"ret_list",
")"
] | Numerical autofocusing of a stack using the Helmholtz equation.
Parameters
----------
fieldstack : 2d or 3d ndarray
Electric field is BG-Corrected, i.e. Field = EX/BEx
nm : float
Refractive index of medium.
res : float
Size of wavelength in pixels.
ival : tuple of floats
Approximate interval to search for optimal focus in px.
metric : str
see `autofocus_field`.
padding : bool
Perform padding with linear ramp from edge to average
to reduce ringing artifacts.
.. versionchanged:: 0.1.4
improved padding value and padding location
ret_dopt : bool
Return optimized distance and gradient plotting data.
same_dist : bool
Refocus entire sinogram with one distance.
red_ds : bool
Return the autofocusing distances in pixels. Defaults to False.
If sam_dist is True, still returns autofocusing distances
of first pass. The used refocusing distance is the
average.
red_grads : bool
Return the computed gradients as a list.
copy : bool
If False, overwrites input array.
Returns
-------
The focused field (and the refocussing distance + data if d is None) | [
"Numerical",
"autofocusing",
"of",
"a",
"stack",
"using",
"the",
"Helmholtz",
"equation",
"."
] | train | https://github.com/RI-imaging/nrefocus/blob/ad09aeecace609ab8f9effcb662d2b7d50826080/nrefocus/_autofocus.py#L86-L175 |
RI-imaging/nrefocus | nrefocus/_autofocus.py | minimize_metric | def minimize_metric(field, metric_func, nm, res, ival, roi=None,
coarse_acc=1, fine_acc=.005,
return_gradient=True, padding=True):
"""Find the focus by minimizing the `metric` of an image
Parameters
----------
field : 2d array
electric field
metric_func : callable
some metric to be minimized
ival : tuple of floats
(minimum, maximum) of interval to search in pixels
nm : float
RI of medium
res : float
wavelength in pixels
roi : rectangular region of interest (x1, y1, x2, y2)
Region of interest of `field` for which the metric will be
minimized. If not given, the entire `field` will be used.
coarse_acc : float
accuracy for determination of global minimum in pixels
fine_acc : float
accuracy for fine localization percentage of gradient change
return_gradient:
return x and y values of computed gradient
padding : bool
perform padding with linear ramp from edge to average
to reduce ringing artifacts.
.. versionchanged:: 0.1.4
improved padding value and padding location
"""
if roi is not None:
assert len(roi) == len(field.shape) * \
2, "ROI must match field dimension"
initshape = field.shape
Fshape = len(initshape)
propfunc = fft_propagate
if roi is None:
if Fshape == 2:
roi = (0, 0, field.shape[0], field.shape[1])
else:
roi = (0, field.shape[0])
roi = 1*np.array(roi)
if padding:
# Pad with correct complex number
field = pad.pad_add(field)
if ival[0] > ival[1]:
ival = (ival[1], ival[0])
# set coarse interval
# coarse_acc = int(np.ceil(ival[1]-ival[0]))/100
N = 100 / coarse_acc
zc = np.linspace(ival[0], ival[1], N, endpoint=True)
# compute fft of field
fftfield = np.fft.fftn(field)
# fftplan = fftw3.Plan(fftfield.copy(), None, nthreads = _ncores,
# direction="backward", flags=_fftwflags)
# initiate gradient vector
gradc = np.zeros(zc.shape)
for i in range(len(zc)):
d = zc[i]
# fsp = propfunc(fftfield, d, nm, res, fftplan=fftplan)
fsp = propfunc(fftfield, d, nm, res)
if Fshape == 2:
gradc[i] = metric_func(fsp[roi[0]:roi[2], roi[1]:roi[3]])
else:
gradc[i] = metric_func(fsp[roi[0]:roi[1]])
minid = np.argmin(gradc)
if minid == 0:
zc -= zc[1] - zc[0]
minid += 1
if minid == len(zc) - 1:
zc += zc[1] - zc[0]
minid -= 1
zf = 1*zc
gradf = 1 * gradc
numfine = 10
mingrad = gradc[minid]
while True:
gradf = np.zeros(numfine)
ival = (zf[minid - 1], zf[minid + 1])
zf = np.linspace(ival[0], ival[1], numfine)
for i in range(len(zf)):
d = zf[i]
fsp = propfunc(fftfield, d, nm, res)
if Fshape == 2:
gradf[i] = metric_func(fsp[roi[0]:roi[2], roi[1]:roi[3]])
else:
gradf[i] = metric_func(fsp[roi[0]:roi[1]])
minid = np.argmin(gradf)
if minid == 0:
zf -= zf[1] - zf[0]
minid += 1
if minid == len(zf) - 1:
zf += zf[1] - zf[0]
minid -= 1
if abs(mingrad - gradf[minid]) / 100 < fine_acc:
break
minid = np.argmin(gradf)
fsp = propfunc(fftfield, zf[minid], nm, res)
if padding:
fsp = pad.pad_rem(fsp)
if return_gradient:
return fsp, zf[minid], [(zc, gradc), (zf, gradf)]
return fsp, zf[minid] | python | def minimize_metric(field, metric_func, nm, res, ival, roi=None,
coarse_acc=1, fine_acc=.005,
return_gradient=True, padding=True):
"""Find the focus by minimizing the `metric` of an image
Parameters
----------
field : 2d array
electric field
metric_func : callable
some metric to be minimized
ival : tuple of floats
(minimum, maximum) of interval to search in pixels
nm : float
RI of medium
res : float
wavelength in pixels
roi : rectangular region of interest (x1, y1, x2, y2)
Region of interest of `field` for which the metric will be
minimized. If not given, the entire `field` will be used.
coarse_acc : float
accuracy for determination of global minimum in pixels
fine_acc : float
accuracy for fine localization percentage of gradient change
return_gradient:
return x and y values of computed gradient
padding : bool
perform padding with linear ramp from edge to average
to reduce ringing artifacts.
.. versionchanged:: 0.1.4
improved padding value and padding location
"""
if roi is not None:
assert len(roi) == len(field.shape) * \
2, "ROI must match field dimension"
initshape = field.shape
Fshape = len(initshape)
propfunc = fft_propagate
if roi is None:
if Fshape == 2:
roi = (0, 0, field.shape[0], field.shape[1])
else:
roi = (0, field.shape[0])
roi = 1*np.array(roi)
if padding:
# Pad with correct complex number
field = pad.pad_add(field)
if ival[0] > ival[1]:
ival = (ival[1], ival[0])
# set coarse interval
# coarse_acc = int(np.ceil(ival[1]-ival[0]))/100
N = 100 / coarse_acc
zc = np.linspace(ival[0], ival[1], N, endpoint=True)
# compute fft of field
fftfield = np.fft.fftn(field)
# fftplan = fftw3.Plan(fftfield.copy(), None, nthreads = _ncores,
# direction="backward", flags=_fftwflags)
# initiate gradient vector
gradc = np.zeros(zc.shape)
for i in range(len(zc)):
d = zc[i]
# fsp = propfunc(fftfield, d, nm, res, fftplan=fftplan)
fsp = propfunc(fftfield, d, nm, res)
if Fshape == 2:
gradc[i] = metric_func(fsp[roi[0]:roi[2], roi[1]:roi[3]])
else:
gradc[i] = metric_func(fsp[roi[0]:roi[1]])
minid = np.argmin(gradc)
if minid == 0:
zc -= zc[1] - zc[0]
minid += 1
if minid == len(zc) - 1:
zc += zc[1] - zc[0]
minid -= 1
zf = 1*zc
gradf = 1 * gradc
numfine = 10
mingrad = gradc[minid]
while True:
gradf = np.zeros(numfine)
ival = (zf[minid - 1], zf[minid + 1])
zf = np.linspace(ival[0], ival[1], numfine)
for i in range(len(zf)):
d = zf[i]
fsp = propfunc(fftfield, d, nm, res)
if Fshape == 2:
gradf[i] = metric_func(fsp[roi[0]:roi[2], roi[1]:roi[3]])
else:
gradf[i] = metric_func(fsp[roi[0]:roi[1]])
minid = np.argmin(gradf)
if minid == 0:
zf -= zf[1] - zf[0]
minid += 1
if minid == len(zf) - 1:
zf += zf[1] - zf[0]
minid -= 1
if abs(mingrad - gradf[minid]) / 100 < fine_acc:
break
minid = np.argmin(gradf)
fsp = propfunc(fftfield, zf[minid], nm, res)
if padding:
fsp = pad.pad_rem(fsp)
if return_gradient:
return fsp, zf[minid], [(zc, gradc), (zf, gradf)]
return fsp, zf[minid] | [
"def",
"minimize_metric",
"(",
"field",
",",
"metric_func",
",",
"nm",
",",
"res",
",",
"ival",
",",
"roi",
"=",
"None",
",",
"coarse_acc",
"=",
"1",
",",
"fine_acc",
"=",
".005",
",",
"return_gradient",
"=",
"True",
",",
"padding",
"=",
"True",
")",
":",
"if",
"roi",
"is",
"not",
"None",
":",
"assert",
"len",
"(",
"roi",
")",
"==",
"len",
"(",
"field",
".",
"shape",
")",
"*",
"2",
",",
"\"ROI must match field dimension\"",
"initshape",
"=",
"field",
".",
"shape",
"Fshape",
"=",
"len",
"(",
"initshape",
")",
"propfunc",
"=",
"fft_propagate",
"if",
"roi",
"is",
"None",
":",
"if",
"Fshape",
"==",
"2",
":",
"roi",
"=",
"(",
"0",
",",
"0",
",",
"field",
".",
"shape",
"[",
"0",
"]",
",",
"field",
".",
"shape",
"[",
"1",
"]",
")",
"else",
":",
"roi",
"=",
"(",
"0",
",",
"field",
".",
"shape",
"[",
"0",
"]",
")",
"roi",
"=",
"1",
"*",
"np",
".",
"array",
"(",
"roi",
")",
"if",
"padding",
":",
"# Pad with correct complex number",
"field",
"=",
"pad",
".",
"pad_add",
"(",
"field",
")",
"if",
"ival",
"[",
"0",
"]",
">",
"ival",
"[",
"1",
"]",
":",
"ival",
"=",
"(",
"ival",
"[",
"1",
"]",
",",
"ival",
"[",
"0",
"]",
")",
"# set coarse interval",
"# coarse_acc = int(np.ceil(ival[1]-ival[0]))/100",
"N",
"=",
"100",
"/",
"coarse_acc",
"zc",
"=",
"np",
".",
"linspace",
"(",
"ival",
"[",
"0",
"]",
",",
"ival",
"[",
"1",
"]",
",",
"N",
",",
"endpoint",
"=",
"True",
")",
"# compute fft of field",
"fftfield",
"=",
"np",
".",
"fft",
".",
"fftn",
"(",
"field",
")",
"# fftplan = fftw3.Plan(fftfield.copy(), None, nthreads = _ncores,",
"# direction=\"backward\", flags=_fftwflags)",
"# initiate gradient vector",
"gradc",
"=",
"np",
".",
"zeros",
"(",
"zc",
".",
"shape",
")",
"for",
"i",
"in",
"range",
"(",
"len",
"(",
"zc",
")",
")",
":",
"d",
"=",
"zc",
"[",
"i",
"]",
"# fsp = propfunc(fftfield, d, nm, res, fftplan=fftplan)",
"fsp",
"=",
"propfunc",
"(",
"fftfield",
",",
"d",
",",
"nm",
",",
"res",
")",
"if",
"Fshape",
"==",
"2",
":",
"gradc",
"[",
"i",
"]",
"=",
"metric_func",
"(",
"fsp",
"[",
"roi",
"[",
"0",
"]",
":",
"roi",
"[",
"2",
"]",
",",
"roi",
"[",
"1",
"]",
":",
"roi",
"[",
"3",
"]",
"]",
")",
"else",
":",
"gradc",
"[",
"i",
"]",
"=",
"metric_func",
"(",
"fsp",
"[",
"roi",
"[",
"0",
"]",
":",
"roi",
"[",
"1",
"]",
"]",
")",
"minid",
"=",
"np",
".",
"argmin",
"(",
"gradc",
")",
"if",
"minid",
"==",
"0",
":",
"zc",
"-=",
"zc",
"[",
"1",
"]",
"-",
"zc",
"[",
"0",
"]",
"minid",
"+=",
"1",
"if",
"minid",
"==",
"len",
"(",
"zc",
")",
"-",
"1",
":",
"zc",
"+=",
"zc",
"[",
"1",
"]",
"-",
"zc",
"[",
"0",
"]",
"minid",
"-=",
"1",
"zf",
"=",
"1",
"*",
"zc",
"gradf",
"=",
"1",
"*",
"gradc",
"numfine",
"=",
"10",
"mingrad",
"=",
"gradc",
"[",
"minid",
"]",
"while",
"True",
":",
"gradf",
"=",
"np",
".",
"zeros",
"(",
"numfine",
")",
"ival",
"=",
"(",
"zf",
"[",
"minid",
"-",
"1",
"]",
",",
"zf",
"[",
"minid",
"+",
"1",
"]",
")",
"zf",
"=",
"np",
".",
"linspace",
"(",
"ival",
"[",
"0",
"]",
",",
"ival",
"[",
"1",
"]",
",",
"numfine",
")",
"for",
"i",
"in",
"range",
"(",
"len",
"(",
"zf",
")",
")",
":",
"d",
"=",
"zf",
"[",
"i",
"]",
"fsp",
"=",
"propfunc",
"(",
"fftfield",
",",
"d",
",",
"nm",
",",
"res",
")",
"if",
"Fshape",
"==",
"2",
":",
"gradf",
"[",
"i",
"]",
"=",
"metric_func",
"(",
"fsp",
"[",
"roi",
"[",
"0",
"]",
":",
"roi",
"[",
"2",
"]",
",",
"roi",
"[",
"1",
"]",
":",
"roi",
"[",
"3",
"]",
"]",
")",
"else",
":",
"gradf",
"[",
"i",
"]",
"=",
"metric_func",
"(",
"fsp",
"[",
"roi",
"[",
"0",
"]",
":",
"roi",
"[",
"1",
"]",
"]",
")",
"minid",
"=",
"np",
".",
"argmin",
"(",
"gradf",
")",
"if",
"minid",
"==",
"0",
":",
"zf",
"-=",
"zf",
"[",
"1",
"]",
"-",
"zf",
"[",
"0",
"]",
"minid",
"+=",
"1",
"if",
"minid",
"==",
"len",
"(",
"zf",
")",
"-",
"1",
":",
"zf",
"+=",
"zf",
"[",
"1",
"]",
"-",
"zf",
"[",
"0",
"]",
"minid",
"-=",
"1",
"if",
"abs",
"(",
"mingrad",
"-",
"gradf",
"[",
"minid",
"]",
")",
"/",
"100",
"<",
"fine_acc",
":",
"break",
"minid",
"=",
"np",
".",
"argmin",
"(",
"gradf",
")",
"fsp",
"=",
"propfunc",
"(",
"fftfield",
",",
"zf",
"[",
"minid",
"]",
",",
"nm",
",",
"res",
")",
"if",
"padding",
":",
"fsp",
"=",
"pad",
".",
"pad_rem",
"(",
"fsp",
")",
"if",
"return_gradient",
":",
"return",
"fsp",
",",
"zf",
"[",
"minid",
"]",
",",
"[",
"(",
"zc",
",",
"gradc",
")",
",",
"(",
"zf",
",",
"gradf",
")",
"]",
"return",
"fsp",
",",
"zf",
"[",
"minid",
"]"
] | Find the focus by minimizing the `metric` of an image
Parameters
----------
field : 2d array
electric field
metric_func : callable
some metric to be minimized
ival : tuple of floats
(minimum, maximum) of interval to search in pixels
nm : float
RI of medium
res : float
wavelength in pixels
roi : rectangular region of interest (x1, y1, x2, y2)
Region of interest of `field` for which the metric will be
minimized. If not given, the entire `field` will be used.
coarse_acc : float
accuracy for determination of global minimum in pixels
fine_acc : float
accuracy for fine localization percentage of gradient change
return_gradient:
return x and y values of computed gradient
padding : bool
perform padding with linear ramp from edge to average
to reduce ringing artifacts.
.. versionchanged:: 0.1.4
improved padding value and padding location | [
"Find",
"the",
"focus",
"by",
"minimizing",
"the",
"metric",
"of",
"an",
"image"
] | train | https://github.com/RI-imaging/nrefocus/blob/ad09aeecace609ab8f9effcb662d2b7d50826080/nrefocus/_autofocus.py#L178-L297 |
anteater/anteater | anteater/src/project_scan.py | prepare_project | def prepare_project(project, project_dir, binaries, ips, urls):
"""
Generates blacklists / whitelists
"""
# Get Various Lists / Project Waivers
lists = get_lists.GetLists()
# Get file name black list and project waivers
file_audit_list, file_audit_project_list = lists.file_audit_list(project)
# Get file content black list and project waivers
flag_list, ignore_list = lists.file_content_list(project)
# Get File Ignore Lists
file_ignore = lists.file_ignore()
ignore_directories = lists.ignore_directories(project)
# Get URL Ignore Lists
url_ignore = lists.url_ignore(project)
# Get IP Ignore Lists
ip_ignore = lists.ip_ignore(project)
# Get Binary Ignore Lists
hashlist = get_lists.GetLists()
if binaries or ips or urls:
try:
apikey = os.environ["VT_KEY"]
except KeyError:
logger.error("Please set your virustotal.com API key as an environment variable")
sys.exit(1)
try:
vt_rate_type = config.get('config', 'vt_rate_type')
except six.moves.configparser.NoSectionError:
logger.error("A config section is required for vt_rate_type with a public | private option")
sys.exit(1)
patten = re.compile(r'\bpublic\b|\bprivate\b')
if not patten.match(vt_rate_type):
logger.error("Unrecognized %s option for vt_rate_type", vt_rate_type)
sys.exit(1)
# Perform rudimentary scans
scan_file(project, project_dir, binaries, ips, urls, file_audit_list,
file_audit_project_list, flag_list, ignore_list, hashlist,
file_ignore, ignore_directories, url_ignore, ip_ignore, apikey) | python | def prepare_project(project, project_dir, binaries, ips, urls):
"""
Generates blacklists / whitelists
"""
# Get Various Lists / Project Waivers
lists = get_lists.GetLists()
# Get file name black list and project waivers
file_audit_list, file_audit_project_list = lists.file_audit_list(project)
# Get file content black list and project waivers
flag_list, ignore_list = lists.file_content_list(project)
# Get File Ignore Lists
file_ignore = lists.file_ignore()
ignore_directories = lists.ignore_directories(project)
# Get URL Ignore Lists
url_ignore = lists.url_ignore(project)
# Get IP Ignore Lists
ip_ignore = lists.ip_ignore(project)
# Get Binary Ignore Lists
hashlist = get_lists.GetLists()
if binaries or ips or urls:
try:
apikey = os.environ["VT_KEY"]
except KeyError:
logger.error("Please set your virustotal.com API key as an environment variable")
sys.exit(1)
try:
vt_rate_type = config.get('config', 'vt_rate_type')
except six.moves.configparser.NoSectionError:
logger.error("A config section is required for vt_rate_type with a public | private option")
sys.exit(1)
patten = re.compile(r'\bpublic\b|\bprivate\b')
if not patten.match(vt_rate_type):
logger.error("Unrecognized %s option for vt_rate_type", vt_rate_type)
sys.exit(1)
# Perform rudimentary scans
scan_file(project, project_dir, binaries, ips, urls, file_audit_list,
file_audit_project_list, flag_list, ignore_list, hashlist,
file_ignore, ignore_directories, url_ignore, ip_ignore, apikey) | [
"def",
"prepare_project",
"(",
"project",
",",
"project_dir",
",",
"binaries",
",",
"ips",
",",
"urls",
")",
":",
"# Get Various Lists / Project Waivers",
"lists",
"=",
"get_lists",
".",
"GetLists",
"(",
")",
"# Get file name black list and project waivers",
"file_audit_list",
",",
"file_audit_project_list",
"=",
"lists",
".",
"file_audit_list",
"(",
"project",
")",
"# Get file content black list and project waivers",
"flag_list",
",",
"ignore_list",
"=",
"lists",
".",
"file_content_list",
"(",
"project",
")",
"# Get File Ignore Lists",
"file_ignore",
"=",
"lists",
".",
"file_ignore",
"(",
")",
"ignore_directories",
"=",
"lists",
".",
"ignore_directories",
"(",
"project",
")",
"# Get URL Ignore Lists",
"url_ignore",
"=",
"lists",
".",
"url_ignore",
"(",
"project",
")",
"# Get IP Ignore Lists",
"ip_ignore",
"=",
"lists",
".",
"ip_ignore",
"(",
"project",
")",
"# Get Binary Ignore Lists",
"hashlist",
"=",
"get_lists",
".",
"GetLists",
"(",
")",
"if",
"binaries",
"or",
"ips",
"or",
"urls",
":",
"try",
":",
"apikey",
"=",
"os",
".",
"environ",
"[",
"\"VT_KEY\"",
"]",
"except",
"KeyError",
":",
"logger",
".",
"error",
"(",
"\"Please set your virustotal.com API key as an environment variable\"",
")",
"sys",
".",
"exit",
"(",
"1",
")",
"try",
":",
"vt_rate_type",
"=",
"config",
".",
"get",
"(",
"'config'",
",",
"'vt_rate_type'",
")",
"except",
"six",
".",
"moves",
".",
"configparser",
".",
"NoSectionError",
":",
"logger",
".",
"error",
"(",
"\"A config section is required for vt_rate_type with a public | private option\"",
")",
"sys",
".",
"exit",
"(",
"1",
")",
"patten",
"=",
"re",
".",
"compile",
"(",
"r'\\bpublic\\b|\\bprivate\\b'",
")",
"if",
"not",
"patten",
".",
"match",
"(",
"vt_rate_type",
")",
":",
"logger",
".",
"error",
"(",
"\"Unrecognized %s option for vt_rate_type\"",
",",
"vt_rate_type",
")",
"sys",
".",
"exit",
"(",
"1",
")",
"# Perform rudimentary scans",
"scan_file",
"(",
"project",
",",
"project_dir",
",",
"binaries",
",",
"ips",
",",
"urls",
",",
"file_audit_list",
",",
"file_audit_project_list",
",",
"flag_list",
",",
"ignore_list",
",",
"hashlist",
",",
"file_ignore",
",",
"ignore_directories",
",",
"url_ignore",
",",
"ip_ignore",
",",
"apikey",
")"
] | Generates blacklists / whitelists | [
"Generates",
"blacklists",
"/",
"whitelists"
] | train | https://github.com/anteater/anteater/blob/a980adbed8563ef92494f565acd371e91f50f155/anteater/src/project_scan.py#L41-L89 |
anteater/anteater | anteater/src/project_scan.py | scan_file | def scan_file(project, project_dir, binaries, ips, urls, file_audit_list,
file_audit_project_list, flag_list, ignore_list, hashlist,
file_ignore, ignore_directories, url_ignore, ip_ignore, apikey):
"""
Main scan tasks begin
"""
logger.info("Commencing scan tasks..")
for root, dirs, files in os.walk(project_dir):
# Filter out ignored directories from list.
for items in files:
full_path = os.path.join(root, items)
split_path = full_path.split(project + '/', 1)[-1]
# Check for Blacklisted file names
if not any(x in split_path for x in ignore_directories):
if file_audit_list.search(full_path) and not \
file_audit_project_list.search(full_path):
match = file_audit_list.search(full_path)
logger.error('Blacklisted filename: %s', full_path)
logger.error('Matched String: %s', match.group())
with open(reports_dir + "file-names_" + project + ".log",
"a") as gate_report:
gate_report. \
write('Blacklisted filename: {0}\n'.
format(full_path))
gate_report. \
write('Matched String: {0}'.
format(match.group()))
# Check if Binary is whitelisted
if is_binary(full_path) and binaries:
split_path = full_path.split(project + '/', 1)[-1]
binary_hash = hashlist.binary_hash(project, split_path)
with open(full_path, 'rb') as afile:
hasher = hashlib.sha256()
buf = afile.read()
hasher.update(buf)
sha256hash = hasher.hexdigest()
if sha256hash in binary_hash:
logger.info('Found matching file hash for: %s', full_path)
logger.info('No further action needed for: %s', full_path)
else:
logger.info('Non Whitelisted Binary file: %s', full_path)
scan_binary(full_path, split_path, project, sha256hash, apikey)
else:
if not items.endswith(tuple(file_ignore)) and not is_binary(full_path):
try:
fo = open(full_path, 'r')
lines = fo.readlines()
except IOError:
logger.error('%s does not exist', full_path)
for line in lines:
# Find IP Addresses and send for report to Virus Total
if ips:
ipaddr = re.findall(r'(?:\d{1,3}\.)+(?:\d{1,3})', line)
if ipaddr:
ipaddr = ipaddr[0]
if re.search(ip_ignore, ipaddr):
logger.info('%s is in IP ignore list.', ipaddr)
else:
try:
ipaddress.ip_address(ipaddr).is_global
scan_ipaddr(ipaddr, line, project, split_path, apikey)
except:
pass # Ok to pass here, as this captures the odd string which is nt an IP Address
# Check for URLs and send for report to Virus Total
if urls:
url = re.search("(?P<url>https?://[^\s]+)", line) or re.search("(?P<url>www[^\s]+)", line)
if url:
url = url.group("url")
if re.search(url_ignore, url):
logger.info('%s is in URL ignore list.', url)
else:
scan_url(url, line, project, split_path, apikey)
# Check flagged content in files
for key, value in flag_list.items():
regex = value['regex']
desc = value['desc']
if re.search(regex, line) and not re.search(ignore_list, line):
logger.error('File contains violation: %s', full_path)
logger.error('Flagged Content: %s', line.rstrip())
logger.error('Matched Regular Exp: %s', regex)
logger.error('Rationale: %s', desc.rstrip())
with open(reports_dir + "contents-" + project + ".log", "a") as gate_report:
gate_report.write('File contains violation: {0}\n'.format(full_path))
gate_report.write('Flagged Content: {0}'.format(line))
gate_report.write('Matched Regular Exp: {0}'.format(regex))
gate_report.write('Rationale: {0}\n'.format(desc.rstrip())) | python | def scan_file(project, project_dir, binaries, ips, urls, file_audit_list,
file_audit_project_list, flag_list, ignore_list, hashlist,
file_ignore, ignore_directories, url_ignore, ip_ignore, apikey):
"""
Main scan tasks begin
"""
logger.info("Commencing scan tasks..")
for root, dirs, files in os.walk(project_dir):
# Filter out ignored directories from list.
for items in files:
full_path = os.path.join(root, items)
split_path = full_path.split(project + '/', 1)[-1]
# Check for Blacklisted file names
if not any(x in split_path for x in ignore_directories):
if file_audit_list.search(full_path) and not \
file_audit_project_list.search(full_path):
match = file_audit_list.search(full_path)
logger.error('Blacklisted filename: %s', full_path)
logger.error('Matched String: %s', match.group())
with open(reports_dir + "file-names_" + project + ".log",
"a") as gate_report:
gate_report. \
write('Blacklisted filename: {0}\n'.
format(full_path))
gate_report. \
write('Matched String: {0}'.
format(match.group()))
# Check if Binary is whitelisted
if is_binary(full_path) and binaries:
split_path = full_path.split(project + '/', 1)[-1]
binary_hash = hashlist.binary_hash(project, split_path)
with open(full_path, 'rb') as afile:
hasher = hashlib.sha256()
buf = afile.read()
hasher.update(buf)
sha256hash = hasher.hexdigest()
if sha256hash in binary_hash:
logger.info('Found matching file hash for: %s', full_path)
logger.info('No further action needed for: %s', full_path)
else:
logger.info('Non Whitelisted Binary file: %s', full_path)
scan_binary(full_path, split_path, project, sha256hash, apikey)
else:
if not items.endswith(tuple(file_ignore)) and not is_binary(full_path):
try:
fo = open(full_path, 'r')
lines = fo.readlines()
except IOError:
logger.error('%s does not exist', full_path)
for line in lines:
# Find IP Addresses and send for report to Virus Total
if ips:
ipaddr = re.findall(r'(?:\d{1,3}\.)+(?:\d{1,3})', line)
if ipaddr:
ipaddr = ipaddr[0]
if re.search(ip_ignore, ipaddr):
logger.info('%s is in IP ignore list.', ipaddr)
else:
try:
ipaddress.ip_address(ipaddr).is_global
scan_ipaddr(ipaddr, line, project, split_path, apikey)
except:
pass # Ok to pass here, as this captures the odd string which is nt an IP Address
# Check for URLs and send for report to Virus Total
if urls:
url = re.search("(?P<url>https?://[^\s]+)", line) or re.search("(?P<url>www[^\s]+)", line)
if url:
url = url.group("url")
if re.search(url_ignore, url):
logger.info('%s is in URL ignore list.', url)
else:
scan_url(url, line, project, split_path, apikey)
# Check flagged content in files
for key, value in flag_list.items():
regex = value['regex']
desc = value['desc']
if re.search(regex, line) and not re.search(ignore_list, line):
logger.error('File contains violation: %s', full_path)
logger.error('Flagged Content: %s', line.rstrip())
logger.error('Matched Regular Exp: %s', regex)
logger.error('Rationale: %s', desc.rstrip())
with open(reports_dir + "contents-" + project + ".log", "a") as gate_report:
gate_report.write('File contains violation: {0}\n'.format(full_path))
gate_report.write('Flagged Content: {0}'.format(line))
gate_report.write('Matched Regular Exp: {0}'.format(regex))
gate_report.write('Rationale: {0}\n'.format(desc.rstrip())) | [
"def",
"scan_file",
"(",
"project",
",",
"project_dir",
",",
"binaries",
",",
"ips",
",",
"urls",
",",
"file_audit_list",
",",
"file_audit_project_list",
",",
"flag_list",
",",
"ignore_list",
",",
"hashlist",
",",
"file_ignore",
",",
"ignore_directories",
",",
"url_ignore",
",",
"ip_ignore",
",",
"apikey",
")",
":",
"logger",
".",
"info",
"(",
"\"Commencing scan tasks..\"",
")",
"for",
"root",
",",
"dirs",
",",
"files",
"in",
"os",
".",
"walk",
"(",
"project_dir",
")",
":",
"# Filter out ignored directories from list.",
"for",
"items",
"in",
"files",
":",
"full_path",
"=",
"os",
".",
"path",
".",
"join",
"(",
"root",
",",
"items",
")",
"split_path",
"=",
"full_path",
".",
"split",
"(",
"project",
"+",
"'/'",
",",
"1",
")",
"[",
"-",
"1",
"]",
"# Check for Blacklisted file names",
"if",
"not",
"any",
"(",
"x",
"in",
"split_path",
"for",
"x",
"in",
"ignore_directories",
")",
":",
"if",
"file_audit_list",
".",
"search",
"(",
"full_path",
")",
"and",
"not",
"file_audit_project_list",
".",
"search",
"(",
"full_path",
")",
":",
"match",
"=",
"file_audit_list",
".",
"search",
"(",
"full_path",
")",
"logger",
".",
"error",
"(",
"'Blacklisted filename: %s'",
",",
"full_path",
")",
"logger",
".",
"error",
"(",
"'Matched String: %s'",
",",
"match",
".",
"group",
"(",
")",
")",
"with",
"open",
"(",
"reports_dir",
"+",
"\"file-names_\"",
"+",
"project",
"+",
"\".log\"",
",",
"\"a\"",
")",
"as",
"gate_report",
":",
"gate_report",
".",
"write",
"(",
"'Blacklisted filename: {0}\\n'",
".",
"format",
"(",
"full_path",
")",
")",
"gate_report",
".",
"write",
"(",
"'Matched String: {0}'",
".",
"format",
"(",
"match",
".",
"group",
"(",
")",
")",
")",
"# Check if Binary is whitelisted",
"if",
"is_binary",
"(",
"full_path",
")",
"and",
"binaries",
":",
"split_path",
"=",
"full_path",
".",
"split",
"(",
"project",
"+",
"'/'",
",",
"1",
")",
"[",
"-",
"1",
"]",
"binary_hash",
"=",
"hashlist",
".",
"binary_hash",
"(",
"project",
",",
"split_path",
")",
"with",
"open",
"(",
"full_path",
",",
"'rb'",
")",
"as",
"afile",
":",
"hasher",
"=",
"hashlib",
".",
"sha256",
"(",
")",
"buf",
"=",
"afile",
".",
"read",
"(",
")",
"hasher",
".",
"update",
"(",
"buf",
")",
"sha256hash",
"=",
"hasher",
".",
"hexdigest",
"(",
")",
"if",
"sha256hash",
"in",
"binary_hash",
":",
"logger",
".",
"info",
"(",
"'Found matching file hash for: %s'",
",",
"full_path",
")",
"logger",
".",
"info",
"(",
"'No further action needed for: %s'",
",",
"full_path",
")",
"else",
":",
"logger",
".",
"info",
"(",
"'Non Whitelisted Binary file: %s'",
",",
"full_path",
")",
"scan_binary",
"(",
"full_path",
",",
"split_path",
",",
"project",
",",
"sha256hash",
",",
"apikey",
")",
"else",
":",
"if",
"not",
"items",
".",
"endswith",
"(",
"tuple",
"(",
"file_ignore",
")",
")",
"and",
"not",
"is_binary",
"(",
"full_path",
")",
":",
"try",
":",
"fo",
"=",
"open",
"(",
"full_path",
",",
"'r'",
")",
"lines",
"=",
"fo",
".",
"readlines",
"(",
")",
"except",
"IOError",
":",
"logger",
".",
"error",
"(",
"'%s does not exist'",
",",
"full_path",
")",
"for",
"line",
"in",
"lines",
":",
"# Find IP Addresses and send for report to Virus Total",
"if",
"ips",
":",
"ipaddr",
"=",
"re",
".",
"findall",
"(",
"r'(?:\\d{1,3}\\.)+(?:\\d{1,3})'",
",",
"line",
")",
"if",
"ipaddr",
":",
"ipaddr",
"=",
"ipaddr",
"[",
"0",
"]",
"if",
"re",
".",
"search",
"(",
"ip_ignore",
",",
"ipaddr",
")",
":",
"logger",
".",
"info",
"(",
"'%s is in IP ignore list.'",
",",
"ipaddr",
")",
"else",
":",
"try",
":",
"ipaddress",
".",
"ip_address",
"(",
"ipaddr",
")",
".",
"is_global",
"scan_ipaddr",
"(",
"ipaddr",
",",
"line",
",",
"project",
",",
"split_path",
",",
"apikey",
")",
"except",
":",
"pass",
"# Ok to pass here, as this captures the odd string which is nt an IP Address",
"# Check for URLs and send for report to Virus Total",
"if",
"urls",
":",
"url",
"=",
"re",
".",
"search",
"(",
"\"(?P<url>https?://[^\\s]+)\"",
",",
"line",
")",
"or",
"re",
".",
"search",
"(",
"\"(?P<url>www[^\\s]+)\"",
",",
"line",
")",
"if",
"url",
":",
"url",
"=",
"url",
".",
"group",
"(",
"\"url\"",
")",
"if",
"re",
".",
"search",
"(",
"url_ignore",
",",
"url",
")",
":",
"logger",
".",
"info",
"(",
"'%s is in URL ignore list.'",
",",
"url",
")",
"else",
":",
"scan_url",
"(",
"url",
",",
"line",
",",
"project",
",",
"split_path",
",",
"apikey",
")",
"# Check flagged content in files",
"for",
"key",
",",
"value",
"in",
"flag_list",
".",
"items",
"(",
")",
":",
"regex",
"=",
"value",
"[",
"'regex'",
"]",
"desc",
"=",
"value",
"[",
"'desc'",
"]",
"if",
"re",
".",
"search",
"(",
"regex",
",",
"line",
")",
"and",
"not",
"re",
".",
"search",
"(",
"ignore_list",
",",
"line",
")",
":",
"logger",
".",
"error",
"(",
"'File contains violation: %s'",
",",
"full_path",
")",
"logger",
".",
"error",
"(",
"'Flagged Content: %s'",
",",
"line",
".",
"rstrip",
"(",
")",
")",
"logger",
".",
"error",
"(",
"'Matched Regular Exp: %s'",
",",
"regex",
")",
"logger",
".",
"error",
"(",
"'Rationale: %s'",
",",
"desc",
".",
"rstrip",
"(",
")",
")",
"with",
"open",
"(",
"reports_dir",
"+",
"\"contents-\"",
"+",
"project",
"+",
"\".log\"",
",",
"\"a\"",
")",
"as",
"gate_report",
":",
"gate_report",
".",
"write",
"(",
"'File contains violation: {0}\\n'",
".",
"format",
"(",
"full_path",
")",
")",
"gate_report",
".",
"write",
"(",
"'Flagged Content: {0}'",
".",
"format",
"(",
"line",
")",
")",
"gate_report",
".",
"write",
"(",
"'Matched Regular Exp: {0}'",
".",
"format",
"(",
"regex",
")",
")",
"gate_report",
".",
"write",
"(",
"'Rationale: {0}\\n'",
".",
"format",
"(",
"desc",
".",
"rstrip",
"(",
")",
")",
")"
] | Main scan tasks begin | [
"Main",
"scan",
"tasks",
"begin"
] | train | https://github.com/anteater/anteater/blob/a980adbed8563ef92494f565acd371e91f50f155/anteater/src/project_scan.py#L92-L183 |
anteater/anteater | anteater/src/project_scan.py | scan_ipaddr | def scan_ipaddr(ipaddr, line, project, split_path, apikey):
"""
If an IP Address is found, scan it
"""
logger.info('Found what I believe is an IP Address: %s', line.strip())
logger.info('File %s. Parsed IP Address: %s', split_path, ipaddr)
with open(reports_dir + "ips-" + project + ".log", "a") as gate_report:
gate_report.write('File {} contains what I believe is an IP Address: {}\n'.format(split_path, ipaddr))
v_api = virus_total.VirusTotal()
scan_ip = v_api.send_ip(ipaddr, apikey)
response_code = scan_ip['response_code']
verbose_msg = scan_ip['verbose_msg']
urls = scan_ip['detected_urls']
with open(reports_dir + "ips-" + project + ".log", "a") as gate_report:
if urls:
logger.error('%s has been known to resolve to the following malicious urls:', ipaddr)
gate_report.write('{} has been known to resolve to the following malicious urls:\n'.format(ipaddr))
for url in urls:
logger.info('%s on date: %s', url['url'], url['scan_date'])
gate_report.write('{} on {}\n'.format(url['url'], url['scan_date']))
sleep(0.2)
else:
logger.info('No malicious DNS history found for: %s', ipaddr)
gate_report.write('No malicious DNS history found for: {}\n'.format(ipaddr)) | python | def scan_ipaddr(ipaddr, line, project, split_path, apikey):
"""
If an IP Address is found, scan it
"""
logger.info('Found what I believe is an IP Address: %s', line.strip())
logger.info('File %s. Parsed IP Address: %s', split_path, ipaddr)
with open(reports_dir + "ips-" + project + ".log", "a") as gate_report:
gate_report.write('File {} contains what I believe is an IP Address: {}\n'.format(split_path, ipaddr))
v_api = virus_total.VirusTotal()
scan_ip = v_api.send_ip(ipaddr, apikey)
response_code = scan_ip['response_code']
verbose_msg = scan_ip['verbose_msg']
urls = scan_ip['detected_urls']
with open(reports_dir + "ips-" + project + ".log", "a") as gate_report:
if urls:
logger.error('%s has been known to resolve to the following malicious urls:', ipaddr)
gate_report.write('{} has been known to resolve to the following malicious urls:\n'.format(ipaddr))
for url in urls:
logger.info('%s on date: %s', url['url'], url['scan_date'])
gate_report.write('{} on {}\n'.format(url['url'], url['scan_date']))
sleep(0.2)
else:
logger.info('No malicious DNS history found for: %s', ipaddr)
gate_report.write('No malicious DNS history found for: {}\n'.format(ipaddr)) | [
"def",
"scan_ipaddr",
"(",
"ipaddr",
",",
"line",
",",
"project",
",",
"split_path",
",",
"apikey",
")",
":",
"logger",
".",
"info",
"(",
"'Found what I believe is an IP Address: %s'",
",",
"line",
".",
"strip",
"(",
")",
")",
"logger",
".",
"info",
"(",
"'File %s. Parsed IP Address: %s'",
",",
"split_path",
",",
"ipaddr",
")",
"with",
"open",
"(",
"reports_dir",
"+",
"\"ips-\"",
"+",
"project",
"+",
"\".log\"",
",",
"\"a\"",
")",
"as",
"gate_report",
":",
"gate_report",
".",
"write",
"(",
"'File {} contains what I believe is an IP Address: {}\\n'",
".",
"format",
"(",
"split_path",
",",
"ipaddr",
")",
")",
"v_api",
"=",
"virus_total",
".",
"VirusTotal",
"(",
")",
"scan_ip",
"=",
"v_api",
".",
"send_ip",
"(",
"ipaddr",
",",
"apikey",
")",
"response_code",
"=",
"scan_ip",
"[",
"'response_code'",
"]",
"verbose_msg",
"=",
"scan_ip",
"[",
"'verbose_msg'",
"]",
"urls",
"=",
"scan_ip",
"[",
"'detected_urls'",
"]",
"with",
"open",
"(",
"reports_dir",
"+",
"\"ips-\"",
"+",
"project",
"+",
"\".log\"",
",",
"\"a\"",
")",
"as",
"gate_report",
":",
"if",
"urls",
":",
"logger",
".",
"error",
"(",
"'%s has been known to resolve to the following malicious urls:'",
",",
"ipaddr",
")",
"gate_report",
".",
"write",
"(",
"'{} has been known to resolve to the following malicious urls:\\n'",
".",
"format",
"(",
"ipaddr",
")",
")",
"for",
"url",
"in",
"urls",
":",
"logger",
".",
"info",
"(",
"'%s on date: %s'",
",",
"url",
"[",
"'url'",
"]",
",",
"url",
"[",
"'scan_date'",
"]",
")",
"gate_report",
".",
"write",
"(",
"'{} on {}\\n'",
".",
"format",
"(",
"url",
"[",
"'url'",
"]",
",",
"url",
"[",
"'scan_date'",
"]",
")",
")",
"sleep",
"(",
"0.2",
")",
"else",
":",
"logger",
".",
"info",
"(",
"'No malicious DNS history found for: %s'",
",",
"ipaddr",
")",
"gate_report",
".",
"write",
"(",
"'No malicious DNS history found for: {}\\n'",
".",
"format",
"(",
"ipaddr",
")",
")"
] | If an IP Address is found, scan it | [
"If",
"an",
"IP",
"Address",
"is",
"found",
"scan",
"it"
] | train | https://github.com/anteater/anteater/blob/a980adbed8563ef92494f565acd371e91f50f155/anteater/src/project_scan.py#L252-L276 |
anteater/anteater | anteater/src/project_scan.py | scan_url | def scan_url(url, line, project, split_path, apikey):
"""
If URL is found, scan it
"""
logger.info('File %s contains what I believe is a URL: %s', split_path, line.strip())
logger.info('Scanning: %s', url)
with open(reports_dir + "urls-" + project + ".log", "a") as gate_report:
gate_report.write('File {} contains what I believe is a URL: {}\n'.format(split_path, url))
v_api = virus_total.VirusTotal()
while True:
url_report = v_api.url_report(url, apikey)
response_code = url_report['response_code']
# report does not exist, need to scan
if response_code == -2:
logger.info('Report job still queued..')
if response_code == 0:
logger.info('No report for %s', url)
break
if response_code == 1:
logger.info('Report found, job complete for %s.', url)
break
try:
positives = url_report['positives']
if positives > 0:
for site, results in url_report['scans'].items():
printed = False
if results['detected']:
with open(reports_dir + "urls-" + project + ".log", "a") as gate_report:
logger.error("%s is recorded as a %s by %s", url, results['result'], site)
gate_report.write('{} is recorded as a {} by {}\n'.format(url, results['result'], site))
if not printed:
printed = True
with open(reports_dir + "urls-" + project + ".log", "a") as gate_report:
logger.error("Full report available here: %s", url_report['permalink'])
gate_report.write('Full report available here: {}\n'.format(url_report['permalink']))
else:
logger.info("%s is recorded as a clean", url)
with open(reports_dir + "urls-" + project + ".log", "a") as gate_report:
gate_report.write('{} is recorded as a clean\n'.format(url))
except:
# No positives so we can pass this
pass | python | def scan_url(url, line, project, split_path, apikey):
"""
If URL is found, scan it
"""
logger.info('File %s contains what I believe is a URL: %s', split_path, line.strip())
logger.info('Scanning: %s', url)
with open(reports_dir + "urls-" + project + ".log", "a") as gate_report:
gate_report.write('File {} contains what I believe is a URL: {}\n'.format(split_path, url))
v_api = virus_total.VirusTotal()
while True:
url_report = v_api.url_report(url, apikey)
response_code = url_report['response_code']
# report does not exist, need to scan
if response_code == -2:
logger.info('Report job still queued..')
if response_code == 0:
logger.info('No report for %s', url)
break
if response_code == 1:
logger.info('Report found, job complete for %s.', url)
break
try:
positives = url_report['positives']
if positives > 0:
for site, results in url_report['scans'].items():
printed = False
if results['detected']:
with open(reports_dir + "urls-" + project + ".log", "a") as gate_report:
logger.error("%s is recorded as a %s by %s", url, results['result'], site)
gate_report.write('{} is recorded as a {} by {}\n'.format(url, results['result'], site))
if not printed:
printed = True
with open(reports_dir + "urls-" + project + ".log", "a") as gate_report:
logger.error("Full report available here: %s", url_report['permalink'])
gate_report.write('Full report available here: {}\n'.format(url_report['permalink']))
else:
logger.info("%s is recorded as a clean", url)
with open(reports_dir + "urls-" + project + ".log", "a") as gate_report:
gate_report.write('{} is recorded as a clean\n'.format(url))
except:
# No positives so we can pass this
pass | [
"def",
"scan_url",
"(",
"url",
",",
"line",
",",
"project",
",",
"split_path",
",",
"apikey",
")",
":",
"logger",
".",
"info",
"(",
"'File %s contains what I believe is a URL: %s'",
",",
"split_path",
",",
"line",
".",
"strip",
"(",
")",
")",
"logger",
".",
"info",
"(",
"'Scanning: %s'",
",",
"url",
")",
"with",
"open",
"(",
"reports_dir",
"+",
"\"urls-\"",
"+",
"project",
"+",
"\".log\"",
",",
"\"a\"",
")",
"as",
"gate_report",
":",
"gate_report",
".",
"write",
"(",
"'File {} contains what I believe is a URL: {}\\n'",
".",
"format",
"(",
"split_path",
",",
"url",
")",
")",
"v_api",
"=",
"virus_total",
".",
"VirusTotal",
"(",
")",
"while",
"True",
":",
"url_report",
"=",
"v_api",
".",
"url_report",
"(",
"url",
",",
"apikey",
")",
"response_code",
"=",
"url_report",
"[",
"'response_code'",
"]",
"# report does not exist, need to scan",
"if",
"response_code",
"==",
"-",
"2",
":",
"logger",
".",
"info",
"(",
"'Report job still queued..'",
")",
"if",
"response_code",
"==",
"0",
":",
"logger",
".",
"info",
"(",
"'No report for %s'",
",",
"url",
")",
"break",
"if",
"response_code",
"==",
"1",
":",
"logger",
".",
"info",
"(",
"'Report found, job complete for %s.'",
",",
"url",
")",
"break",
"try",
":",
"positives",
"=",
"url_report",
"[",
"'positives'",
"]",
"if",
"positives",
">",
"0",
":",
"for",
"site",
",",
"results",
"in",
"url_report",
"[",
"'scans'",
"]",
".",
"items",
"(",
")",
":",
"printed",
"=",
"False",
"if",
"results",
"[",
"'detected'",
"]",
":",
"with",
"open",
"(",
"reports_dir",
"+",
"\"urls-\"",
"+",
"project",
"+",
"\".log\"",
",",
"\"a\"",
")",
"as",
"gate_report",
":",
"logger",
".",
"error",
"(",
"\"%s is recorded as a %s by %s\"",
",",
"url",
",",
"results",
"[",
"'result'",
"]",
",",
"site",
")",
"gate_report",
".",
"write",
"(",
"'{} is recorded as a {} by {}\\n'",
".",
"format",
"(",
"url",
",",
"results",
"[",
"'result'",
"]",
",",
"site",
")",
")",
"if",
"not",
"printed",
":",
"printed",
"=",
"True",
"with",
"open",
"(",
"reports_dir",
"+",
"\"urls-\"",
"+",
"project",
"+",
"\".log\"",
",",
"\"a\"",
")",
"as",
"gate_report",
":",
"logger",
".",
"error",
"(",
"\"Full report available here: %s\"",
",",
"url_report",
"[",
"'permalink'",
"]",
")",
"gate_report",
".",
"write",
"(",
"'Full report available here: {}\\n'",
".",
"format",
"(",
"url_report",
"[",
"'permalink'",
"]",
")",
")",
"else",
":",
"logger",
".",
"info",
"(",
"\"%s is recorded as a clean\"",
",",
"url",
")",
"with",
"open",
"(",
"reports_dir",
"+",
"\"urls-\"",
"+",
"project",
"+",
"\".log\"",
",",
"\"a\"",
")",
"as",
"gate_report",
":",
"gate_report",
".",
"write",
"(",
"'{} is recorded as a clean\\n'",
".",
"format",
"(",
"url",
")",
")",
"except",
":",
"# No positives so we can pass this",
"pass"
] | If URL is found, scan it | [
"If",
"URL",
"is",
"found",
"scan",
"it"
] | train | https://github.com/anteater/anteater/blob/a980adbed8563ef92494f565acd371e91f50f155/anteater/src/project_scan.py#L279-L327 |
ttinies/sc2gameMapRepo | sc2maptool/index.py | getIndex | def getIndex(folderPath=None):
"""parse the 'Maps' subfolder directory divining criteria for valid maps"""
try: return cache.structure
except AttributeError: pass # if it doesn't exist, generate and cache the map file data
if folderPath == None:
from sc2maptool.startup import setup
folderPath = setup()
############################################################################
def folderSearch(path, attrList=[]):
ret = []
for item in glob(os.path.join(path, '*')):
if item == os.sep: continue
itemName = os.path.basename(item)
if os.path.isdir(item): ret += folderSearch(item, attrList + [itemName])
elif itemName.endswith(c.SC2_MAP_EXT): ret.append( MapRecord(itemName, item, attrList) )
return ret
############################################################################
cache.structure = folderSearch(folderPath)
return cache.structure | python | def getIndex(folderPath=None):
"""parse the 'Maps' subfolder directory divining criteria for valid maps"""
try: return cache.structure
except AttributeError: pass # if it doesn't exist, generate and cache the map file data
if folderPath == None:
from sc2maptool.startup import setup
folderPath = setup()
############################################################################
def folderSearch(path, attrList=[]):
ret = []
for item in glob(os.path.join(path, '*')):
if item == os.sep: continue
itemName = os.path.basename(item)
if os.path.isdir(item): ret += folderSearch(item, attrList + [itemName])
elif itemName.endswith(c.SC2_MAP_EXT): ret.append( MapRecord(itemName, item, attrList) )
return ret
############################################################################
cache.structure = folderSearch(folderPath)
return cache.structure | [
"def",
"getIndex",
"(",
"folderPath",
"=",
"None",
")",
":",
"try",
":",
"return",
"cache",
".",
"structure",
"except",
"AttributeError",
":",
"pass",
"# if it doesn't exist, generate and cache the map file data",
"if",
"folderPath",
"==",
"None",
":",
"from",
"sc2maptool",
".",
"startup",
"import",
"setup",
"folderPath",
"=",
"setup",
"(",
")",
"############################################################################",
"def",
"folderSearch",
"(",
"path",
",",
"attrList",
"=",
"[",
"]",
")",
":",
"ret",
"=",
"[",
"]",
"for",
"item",
"in",
"glob",
"(",
"os",
".",
"path",
".",
"join",
"(",
"path",
",",
"'*'",
")",
")",
":",
"if",
"item",
"==",
"os",
".",
"sep",
":",
"continue",
"itemName",
"=",
"os",
".",
"path",
".",
"basename",
"(",
"item",
")",
"if",
"os",
".",
"path",
".",
"isdir",
"(",
"item",
")",
":",
"ret",
"+=",
"folderSearch",
"(",
"item",
",",
"attrList",
"+",
"[",
"itemName",
"]",
")",
"elif",
"itemName",
".",
"endswith",
"(",
"c",
".",
"SC2_MAP_EXT",
")",
":",
"ret",
".",
"append",
"(",
"MapRecord",
"(",
"itemName",
",",
"item",
",",
"attrList",
")",
")",
"return",
"ret",
"############################################################################",
"cache",
".",
"structure",
"=",
"folderSearch",
"(",
"folderPath",
")",
"return",
"cache",
".",
"structure"
] | parse the 'Maps' subfolder directory divining criteria for valid maps | [
"parse",
"the",
"Maps",
"subfolder",
"directory",
"divining",
"criteria",
"for",
"valid",
"maps"
] | train | https://github.com/ttinies/sc2gameMapRepo/blob/3a215067fae8f86f6a3ffe37272fbd7a5461cfab/sc2maptool/index.py#L16-L34 |
lablup/backend.ai-common | src/ai/backend/common/types.py | _stringify_number | def _stringify_number(v):
'''
Stringify a number, preventing unwanted scientific notations.
'''
if isinstance(v, (float, Decimal)):
if math.isinf(v) and v > 0:
v = 'Infinity'
elif math.isinf(v) and v < 0:
v = '-Infinity'
else:
v = '{:f}'.format(v)
elif isinstance(v, BinarySize):
v = '{:d}'.format(int(v))
elif isinstance(v, int):
v = '{:d}'.format(v)
else:
v = str(v)
return v | python | def _stringify_number(v):
'''
Stringify a number, preventing unwanted scientific notations.
'''
if isinstance(v, (float, Decimal)):
if math.isinf(v) and v > 0:
v = 'Infinity'
elif math.isinf(v) and v < 0:
v = '-Infinity'
else:
v = '{:f}'.format(v)
elif isinstance(v, BinarySize):
v = '{:d}'.format(int(v))
elif isinstance(v, int):
v = '{:d}'.format(v)
else:
v = str(v)
return v | [
"def",
"_stringify_number",
"(",
"v",
")",
":",
"if",
"isinstance",
"(",
"v",
",",
"(",
"float",
",",
"Decimal",
")",
")",
":",
"if",
"math",
".",
"isinf",
"(",
"v",
")",
"and",
"v",
">",
"0",
":",
"v",
"=",
"'Infinity'",
"elif",
"math",
".",
"isinf",
"(",
"v",
")",
"and",
"v",
"<",
"0",
":",
"v",
"=",
"'-Infinity'",
"else",
":",
"v",
"=",
"'{:f}'",
".",
"format",
"(",
"v",
")",
"elif",
"isinstance",
"(",
"v",
",",
"BinarySize",
")",
":",
"v",
"=",
"'{:d}'",
".",
"format",
"(",
"int",
"(",
"v",
")",
")",
"elif",
"isinstance",
"(",
"v",
",",
"int",
")",
":",
"v",
"=",
"'{:d}'",
".",
"format",
"(",
"v",
")",
"else",
":",
"v",
"=",
"str",
"(",
"v",
")",
"return",
"v"
] | Stringify a number, preventing unwanted scientific notations. | [
"Stringify",
"a",
"number",
"preventing",
"unwanted",
"scientific",
"notations",
"."
] | train | https://github.com/lablup/backend.ai-common/blob/20b3a2551ee5bb3b88e7836471bc244a70ad0ae6/src/ai/backend/common/types.py#L692-L709 |
lablup/backend.ai-common | src/ai/backend/common/types.py | ImageRef.resolve_alias | async def resolve_alias(cls, alias_key: str, etcd: etcd.AsyncEtcd):
'''
Resolve the tag using etcd so that the current instance indicates
a concrete, latest image.
Note that alias resolving does not take the registry component into
account.
'''
alias_target = None
repeats = 0
while repeats < 8:
prev_alias_key = alias_key
alias_key = await etcd.get(f'images/_aliases/{alias_key}')
if alias_key is None:
alias_target = prev_alias_key
break
repeats += 1
else:
raise AliasResolutionFailed('Could not resolve the given image name!')
known_registries = await get_known_registries(etcd)
return cls(alias_target, known_registries) | python | async def resolve_alias(cls, alias_key: str, etcd: etcd.AsyncEtcd):
'''
Resolve the tag using etcd so that the current instance indicates
a concrete, latest image.
Note that alias resolving does not take the registry component into
account.
'''
alias_target = None
repeats = 0
while repeats < 8:
prev_alias_key = alias_key
alias_key = await etcd.get(f'images/_aliases/{alias_key}')
if alias_key is None:
alias_target = prev_alias_key
break
repeats += 1
else:
raise AliasResolutionFailed('Could not resolve the given image name!')
known_registries = await get_known_registries(etcd)
return cls(alias_target, known_registries) | [
"async",
"def",
"resolve_alias",
"(",
"cls",
",",
"alias_key",
":",
"str",
",",
"etcd",
":",
"etcd",
".",
"AsyncEtcd",
")",
":",
"alias_target",
"=",
"None",
"repeats",
"=",
"0",
"while",
"repeats",
"<",
"8",
":",
"prev_alias_key",
"=",
"alias_key",
"alias_key",
"=",
"await",
"etcd",
".",
"get",
"(",
"f'images/_aliases/{alias_key}'",
")",
"if",
"alias_key",
"is",
"None",
":",
"alias_target",
"=",
"prev_alias_key",
"break",
"repeats",
"+=",
"1",
"else",
":",
"raise",
"AliasResolutionFailed",
"(",
"'Could not resolve the given image name!'",
")",
"known_registries",
"=",
"await",
"get_known_registries",
"(",
"etcd",
")",
"return",
"cls",
"(",
"alias_target",
",",
"known_registries",
")"
] | Resolve the tag using etcd so that the current instance indicates
a concrete, latest image.
Note that alias resolving does not take the registry component into
account. | [
"Resolve",
"the",
"tag",
"using",
"etcd",
"so",
"that",
"the",
"current",
"instance",
"indicates",
"a",
"concrete",
"latest",
"image",
"."
] | train | https://github.com/lablup/backend.ai-common/blob/20b3a2551ee5bb3b88e7836471bc244a70ad0ae6/src/ai/backend/common/types.py#L249-L269 |
BlueBrain/nat | nat/ontoServ.py | getOntoCategory | def getOntoCategory(curie, alwaysFetch=False):
"""
Accessing web-based ontology service is too long, so we cache the
information in a pickle file and query the services only if the info
has not already been cached.
"""
fileName = os.path.join(os.path.dirname(__file__), "ontoCategories.bin")
if not alwaysFetch:
try:
with open(fileName, "rb") as catFile:
ontoCat = pickle.load(catFile)
if curie in ontoCat:
return ontoCat[curie]
except:
ontoCat = {}
base = bases["KS"]
query = base + "/vocabulary/id/" + curie
response = requests.get(query)
if not response.ok:
ontoCat[curie] = []
else:
try:
concepts = response.json()
except ValueError:
print(query)
print(response)
raise
if len(concepts["categories"]):
ontoCat[curie] = concepts["categories"]
else:
ontoCat[curie] = []
try:
with open(fileName, "wb") as catFile:
pickle.dump(ontoCat, catFile)
except:
pass
return ontoCat[curie] | python | def getOntoCategory(curie, alwaysFetch=False):
"""
Accessing web-based ontology service is too long, so we cache the
information in a pickle file and query the services only if the info
has not already been cached.
"""
fileName = os.path.join(os.path.dirname(__file__), "ontoCategories.bin")
if not alwaysFetch:
try:
with open(fileName, "rb") as catFile:
ontoCat = pickle.load(catFile)
if curie in ontoCat:
return ontoCat[curie]
except:
ontoCat = {}
base = bases["KS"]
query = base + "/vocabulary/id/" + curie
response = requests.get(query)
if not response.ok:
ontoCat[curie] = []
else:
try:
concepts = response.json()
except ValueError:
print(query)
print(response)
raise
if len(concepts["categories"]):
ontoCat[curie] = concepts["categories"]
else:
ontoCat[curie] = []
try:
with open(fileName, "wb") as catFile:
pickle.dump(ontoCat, catFile)
except:
pass
return ontoCat[curie] | [
"def",
"getOntoCategory",
"(",
"curie",
",",
"alwaysFetch",
"=",
"False",
")",
":",
"fileName",
"=",
"os",
".",
"path",
".",
"join",
"(",
"os",
".",
"path",
".",
"dirname",
"(",
"__file__",
")",
",",
"\"ontoCategories.bin\"",
")",
"if",
"not",
"alwaysFetch",
":",
"try",
":",
"with",
"open",
"(",
"fileName",
",",
"\"rb\"",
")",
"as",
"catFile",
":",
"ontoCat",
"=",
"pickle",
".",
"load",
"(",
"catFile",
")",
"if",
"curie",
"in",
"ontoCat",
":",
"return",
"ontoCat",
"[",
"curie",
"]",
"except",
":",
"ontoCat",
"=",
"{",
"}",
"base",
"=",
"bases",
"[",
"\"KS\"",
"]",
"query",
"=",
"base",
"+",
"\"/vocabulary/id/\"",
"+",
"curie",
"response",
"=",
"requests",
".",
"get",
"(",
"query",
")",
"if",
"not",
"response",
".",
"ok",
":",
"ontoCat",
"[",
"curie",
"]",
"=",
"[",
"]",
"else",
":",
"try",
":",
"concepts",
"=",
"response",
".",
"json",
"(",
")",
"except",
"ValueError",
":",
"print",
"(",
"query",
")",
"print",
"(",
"response",
")",
"raise",
"if",
"len",
"(",
"concepts",
"[",
"\"categories\"",
"]",
")",
":",
"ontoCat",
"[",
"curie",
"]",
"=",
"concepts",
"[",
"\"categories\"",
"]",
"else",
":",
"ontoCat",
"[",
"curie",
"]",
"=",
"[",
"]",
"try",
":",
"with",
"open",
"(",
"fileName",
",",
"\"wb\"",
")",
"as",
"catFile",
":",
"pickle",
".",
"dump",
"(",
"ontoCat",
",",
"catFile",
")",
"except",
":",
"pass",
"return",
"ontoCat",
"[",
"curie",
"]"
] | Accessing web-based ontology service is too long, so we cache the
information in a pickle file and query the services only if the info
has not already been cached. | [
"Accessing",
"web",
"-",
"based",
"ontology",
"service",
"is",
"too",
"long",
"so",
"we",
"cache",
"the",
"information",
"in",
"a",
"pickle",
"file",
"and",
"query",
"the",
"services",
"only",
"if",
"the",
"info",
"has",
"not",
"already",
"been",
"cached",
"."
] | train | https://github.com/BlueBrain/nat/blob/0934f06e48e6efedf55a9617b15becae0d7b277c/nat/ontoServ.py#L16-L60 |
anteater/anteater | anteater/main.py | _init_logging | def _init_logging(anteater_log):
""" Setup root logger for package """
LOG.setLevel(logging.DEBUG)
ch = logging.StreamHandler()
formatter = logging.Formatter('%(asctime)s - %(name)s - '
'%(levelname)s - %(message)s')
ch.setFormatter(formatter)
ch.setLevel(logging.DEBUG)
# create the directory if it does not exist
path = os.path.dirname(anteater_log)
try:
os.makedirs(path)
except OSError as e:
if e.errno != errno.EEXIST:
raise
handler = logging.FileHandler(anteater_log)
handler.setFormatter(formatter)
handler.setLevel(logging.DEBUG)
del logging.root.handlers[:]
logging.root.addHandler(ch)
logging.root.addHandler(handler) | python | def _init_logging(anteater_log):
""" Setup root logger for package """
LOG.setLevel(logging.DEBUG)
ch = logging.StreamHandler()
formatter = logging.Formatter('%(asctime)s - %(name)s - '
'%(levelname)s - %(message)s')
ch.setFormatter(formatter)
ch.setLevel(logging.DEBUG)
# create the directory if it does not exist
path = os.path.dirname(anteater_log)
try:
os.makedirs(path)
except OSError as e:
if e.errno != errno.EEXIST:
raise
handler = logging.FileHandler(anteater_log)
handler.setFormatter(formatter)
handler.setLevel(logging.DEBUG)
del logging.root.handlers[:]
logging.root.addHandler(ch)
logging.root.addHandler(handler) | [
"def",
"_init_logging",
"(",
"anteater_log",
")",
":",
"LOG",
".",
"setLevel",
"(",
"logging",
".",
"DEBUG",
")",
"ch",
"=",
"logging",
".",
"StreamHandler",
"(",
")",
"formatter",
"=",
"logging",
".",
"Formatter",
"(",
"'%(asctime)s - %(name)s - '",
"'%(levelname)s - %(message)s'",
")",
"ch",
".",
"setFormatter",
"(",
"formatter",
")",
"ch",
".",
"setLevel",
"(",
"logging",
".",
"DEBUG",
")",
"# create the directory if it does not exist",
"path",
"=",
"os",
".",
"path",
".",
"dirname",
"(",
"anteater_log",
")",
"try",
":",
"os",
".",
"makedirs",
"(",
"path",
")",
"except",
"OSError",
"as",
"e",
":",
"if",
"e",
".",
"errno",
"!=",
"errno",
".",
"EEXIST",
":",
"raise",
"handler",
"=",
"logging",
".",
"FileHandler",
"(",
"anteater_log",
")",
"handler",
".",
"setFormatter",
"(",
"formatter",
")",
"handler",
".",
"setLevel",
"(",
"logging",
".",
"DEBUG",
")",
"del",
"logging",
".",
"root",
".",
"handlers",
"[",
":",
"]",
"logging",
".",
"root",
".",
"addHandler",
"(",
"ch",
")",
"logging",
".",
"root",
".",
"addHandler",
"(",
"handler",
")"
] | Setup root logger for package | [
"Setup",
"root",
"logger",
"for",
"package"
] | train | https://github.com/anteater/anteater/blob/a980adbed8563ef92494f565acd371e91f50f155/anteater/main.py#L43-L66 |
anteater/anteater | anteater/main.py | check_dir | def check_dir():
""" Creates a directory for scan reports """
try:
os.makedirs(reports_dir)
logger.info('Creating reports directory: %s', reports_dir)
except OSError as e:
if e.errno != errno.EEXIST:
raise | python | def check_dir():
""" Creates a directory for scan reports """
try:
os.makedirs(reports_dir)
logger.info('Creating reports directory: %s', reports_dir)
except OSError as e:
if e.errno != errno.EEXIST:
raise | [
"def",
"check_dir",
"(",
")",
":",
"try",
":",
"os",
".",
"makedirs",
"(",
"reports_dir",
")",
"logger",
".",
"info",
"(",
"'Creating reports directory: %s'",
",",
"reports_dir",
")",
"except",
"OSError",
"as",
"e",
":",
"if",
"e",
".",
"errno",
"!=",
"errno",
".",
"EEXIST",
":",
"raise"
] | Creates a directory for scan reports | [
"Creates",
"a",
"directory",
"for",
"scan",
"reports"
] | train | https://github.com/anteater/anteater/blob/a980adbed8563ef92494f565acd371e91f50f155/anteater/main.py#L69-L76 |
anteater/anteater | anteater/main.py | main | def main():
""" Main function, mostly for passing arguments """
_init_logging(config.get('config', 'anteater_log'))
check_dir()
arguments = docopt(__doc__, version=__version__)
if arguments['<patchset>']:
prepare_patchset(arguments['<project>'], arguments['<patchset>'],
arguments['--binaries'], arguments['--ips'], arguments['--urls'])
elif arguments['<project_path>']:
prepare_project(arguments['<project>'], arguments['<project_path>'],
arguments['--binaries'], arguments['--ips'], arguments['--urls']) | python | def main():
""" Main function, mostly for passing arguments """
_init_logging(config.get('config', 'anteater_log'))
check_dir()
arguments = docopt(__doc__, version=__version__)
if arguments['<patchset>']:
prepare_patchset(arguments['<project>'], arguments['<patchset>'],
arguments['--binaries'], arguments['--ips'], arguments['--urls'])
elif arguments['<project_path>']:
prepare_project(arguments['<project>'], arguments['<project_path>'],
arguments['--binaries'], arguments['--ips'], arguments['--urls']) | [
"def",
"main",
"(",
")",
":",
"_init_logging",
"(",
"config",
".",
"get",
"(",
"'config'",
",",
"'anteater_log'",
")",
")",
"check_dir",
"(",
")",
"arguments",
"=",
"docopt",
"(",
"__doc__",
",",
"version",
"=",
"__version__",
")",
"if",
"arguments",
"[",
"'<patchset>'",
"]",
":",
"prepare_patchset",
"(",
"arguments",
"[",
"'<project>'",
"]",
",",
"arguments",
"[",
"'<patchset>'",
"]",
",",
"arguments",
"[",
"'--binaries'",
"]",
",",
"arguments",
"[",
"'--ips'",
"]",
",",
"arguments",
"[",
"'--urls'",
"]",
")",
"elif",
"arguments",
"[",
"'<project_path>'",
"]",
":",
"prepare_project",
"(",
"arguments",
"[",
"'<project>'",
"]",
",",
"arguments",
"[",
"'<project_path>'",
"]",
",",
"arguments",
"[",
"'--binaries'",
"]",
",",
"arguments",
"[",
"'--ips'",
"]",
",",
"arguments",
"[",
"'--urls'",
"]",
")"
] | Main function, mostly for passing arguments | [
"Main",
"function",
"mostly",
"for",
"passing",
"arguments"
] | train | https://github.com/anteater/anteater/blob/a980adbed8563ef92494f565acd371e91f50f155/anteater/main.py#L79-L90 |
hammerlab/stanity | stanity/fit.py | fit | def fit(model_code, *args, **kwargs):
"""
Fit a Stan model. Caches the compiled model.
*args and **kwargs are passed to the pystan.stan function.
Arguments you most likely want to pass: data, init, iter, chains.
Unlike pystan.stan, if the n_jobs kwarg is not specified, it defaults to
-1.
Parameters
-------------------
model_code : string
Stan model
Returns
-------------------
pystan StanFit4Model instance : the fit model
"""
kwargs = dict(kwargs)
kwargs['model_code'] = model_code
if 'n_jobs' not in kwargs:
kwargs['n_jobs'] = -1
if model_code in FIT_CACHE:
print("Reusing model.")
kwargs['fit'] = FIT_CACHE[model_code]
else:
print("NOT reusing model.")
start = time.time()
FIT_CACHE[model_code] = pystan.stan(*args, **kwargs)
print("Ran in %0.3f sec." % (time.time() - start))
return FIT_CACHE[model_code] | python | def fit(model_code, *args, **kwargs):
"""
Fit a Stan model. Caches the compiled model.
*args and **kwargs are passed to the pystan.stan function.
Arguments you most likely want to pass: data, init, iter, chains.
Unlike pystan.stan, if the n_jobs kwarg is not specified, it defaults to
-1.
Parameters
-------------------
model_code : string
Stan model
Returns
-------------------
pystan StanFit4Model instance : the fit model
"""
kwargs = dict(kwargs)
kwargs['model_code'] = model_code
if 'n_jobs' not in kwargs:
kwargs['n_jobs'] = -1
if model_code in FIT_CACHE:
print("Reusing model.")
kwargs['fit'] = FIT_CACHE[model_code]
else:
print("NOT reusing model.")
start = time.time()
FIT_CACHE[model_code] = pystan.stan(*args, **kwargs)
print("Ran in %0.3f sec." % (time.time() - start))
return FIT_CACHE[model_code] | [
"def",
"fit",
"(",
"model_code",
",",
"*",
"args",
",",
"*",
"*",
"kwargs",
")",
":",
"kwargs",
"=",
"dict",
"(",
"kwargs",
")",
"kwargs",
"[",
"'model_code'",
"]",
"=",
"model_code",
"if",
"'n_jobs'",
"not",
"in",
"kwargs",
":",
"kwargs",
"[",
"'n_jobs'",
"]",
"=",
"-",
"1",
"if",
"model_code",
"in",
"FIT_CACHE",
":",
"print",
"(",
"\"Reusing model.\"",
")",
"kwargs",
"[",
"'fit'",
"]",
"=",
"FIT_CACHE",
"[",
"model_code",
"]",
"else",
":",
"print",
"(",
"\"NOT reusing model.\"",
")",
"start",
"=",
"time",
".",
"time",
"(",
")",
"FIT_CACHE",
"[",
"model_code",
"]",
"=",
"pystan",
".",
"stan",
"(",
"*",
"args",
",",
"*",
"*",
"kwargs",
")",
"print",
"(",
"\"Ran in %0.3f sec.\"",
"%",
"(",
"time",
".",
"time",
"(",
")",
"-",
"start",
")",
")",
"return",
"FIT_CACHE",
"[",
"model_code",
"]"
] | Fit a Stan model. Caches the compiled model.
*args and **kwargs are passed to the pystan.stan function.
Arguments you most likely want to pass: data, init, iter, chains.
Unlike pystan.stan, if the n_jobs kwarg is not specified, it defaults to
-1.
Parameters
-------------------
model_code : string
Stan model
Returns
-------------------
pystan StanFit4Model instance : the fit model | [
"Fit",
"a",
"Stan",
"model",
".",
"Caches",
"the",
"compiled",
"model",
"."
] | train | https://github.com/hammerlab/stanity/blob/6c36abc207c4ce94f78968501dab839a56f35a41/stanity/fit.py#L6-L39 |
rainwoodman/kdcount | kdcount/__init__.py | KDNode.enumiter | def enumiter(self, other, rmax, bunch=100000):
""" cross correlate with other, for all pairs
closer than rmax, iterate.
for r, i, j in A.enumiter(...):
...
where r is the distance, i and j are the original
input array index of the data.
This uses a thread to convert from KDNode.enum.
"""
def feeder(process):
self.enum(other, rmax, process, bunch)
for r, i, j in makeiter(feeder):
yield r, i, j | python | def enumiter(self, other, rmax, bunch=100000):
""" cross correlate with other, for all pairs
closer than rmax, iterate.
for r, i, j in A.enumiter(...):
...
where r is the distance, i and j are the original
input array index of the data.
This uses a thread to convert from KDNode.enum.
"""
def feeder(process):
self.enum(other, rmax, process, bunch)
for r, i, j in makeiter(feeder):
yield r, i, j | [
"def",
"enumiter",
"(",
"self",
",",
"other",
",",
"rmax",
",",
"bunch",
"=",
"100000",
")",
":",
"def",
"feeder",
"(",
"process",
")",
":",
"self",
".",
"enum",
"(",
"other",
",",
"rmax",
",",
"process",
",",
"bunch",
")",
"for",
"r",
",",
"i",
",",
"j",
"in",
"makeiter",
"(",
"feeder",
")",
":",
"yield",
"r",
",",
"i",
",",
"j"
] | cross correlate with other, for all pairs
closer than rmax, iterate.
for r, i, j in A.enumiter(...):
...
where r is the distance, i and j are the original
input array index of the data.
This uses a thread to convert from KDNode.enum. | [
"cross",
"correlate",
"with",
"other",
"for",
"all",
"pairs",
"closer",
"than",
"rmax",
"iterate",
"."
] | train | https://github.com/rainwoodman/kdcount/blob/483548f6d27a4f245cd5d98880b5f4edd6cc8dc1/kdcount/__init__.py#L13-L28 |
rainwoodman/kdcount | kdcount/__init__.py | KDNode.enum | def enum(self, other, rmax, process=None, bunch=100000, **kwargs):
""" cross correlate with other, for all pairs
closer than rmax, iterate.
>>> def process(r, i, j, **kwargs):
>>> ...
>>> A.enum(... process, **kwargs):
>>> ...
where r is the distance, i and j are the original
input array index of the data. arbitrary args can be passed
to process via kwargs.
"""
rall = None
if process is None:
rall = [numpy.empty(0, 'f8')]
iall = [numpy.empty(0, 'intp')]
jall = [numpy.empty(0, 'intp')]
def process(r1, i1, j1, **kwargs):
rall[0] = numpy.append(rall[0], r1)
iall[0] = numpy.append(iall[0], i1)
jall[0] = numpy.append(jall[0], j1)
_core.KDNode.enum(self, other, rmax, process, bunch, **kwargs)
if rall is not None:
return rall[0], iall[0], jall[0]
else:
return None | python | def enum(self, other, rmax, process=None, bunch=100000, **kwargs):
""" cross correlate with other, for all pairs
closer than rmax, iterate.
>>> def process(r, i, j, **kwargs):
>>> ...
>>> A.enum(... process, **kwargs):
>>> ...
where r is the distance, i and j are the original
input array index of the data. arbitrary args can be passed
to process via kwargs.
"""
rall = None
if process is None:
rall = [numpy.empty(0, 'f8')]
iall = [numpy.empty(0, 'intp')]
jall = [numpy.empty(0, 'intp')]
def process(r1, i1, j1, **kwargs):
rall[0] = numpy.append(rall[0], r1)
iall[0] = numpy.append(iall[0], i1)
jall[0] = numpy.append(jall[0], j1)
_core.KDNode.enum(self, other, rmax, process, bunch, **kwargs)
if rall is not None:
return rall[0], iall[0], jall[0]
else:
return None | [
"def",
"enum",
"(",
"self",
",",
"other",
",",
"rmax",
",",
"process",
"=",
"None",
",",
"bunch",
"=",
"100000",
",",
"*",
"*",
"kwargs",
")",
":",
"rall",
"=",
"None",
"if",
"process",
"is",
"None",
":",
"rall",
"=",
"[",
"numpy",
".",
"empty",
"(",
"0",
",",
"'f8'",
")",
"]",
"iall",
"=",
"[",
"numpy",
".",
"empty",
"(",
"0",
",",
"'intp'",
")",
"]",
"jall",
"=",
"[",
"numpy",
".",
"empty",
"(",
"0",
",",
"'intp'",
")",
"]",
"def",
"process",
"(",
"r1",
",",
"i1",
",",
"j1",
",",
"*",
"*",
"kwargs",
")",
":",
"rall",
"[",
"0",
"]",
"=",
"numpy",
".",
"append",
"(",
"rall",
"[",
"0",
"]",
",",
"r1",
")",
"iall",
"[",
"0",
"]",
"=",
"numpy",
".",
"append",
"(",
"iall",
"[",
"0",
"]",
",",
"i1",
")",
"jall",
"[",
"0",
"]",
"=",
"numpy",
".",
"append",
"(",
"jall",
"[",
"0",
"]",
",",
"j1",
")",
"_core",
".",
"KDNode",
".",
"enum",
"(",
"self",
",",
"other",
",",
"rmax",
",",
"process",
",",
"bunch",
",",
"*",
"*",
"kwargs",
")",
"if",
"rall",
"is",
"not",
"None",
":",
"return",
"rall",
"[",
"0",
"]",
",",
"iall",
"[",
"0",
"]",
",",
"jall",
"[",
"0",
"]",
"else",
":",
"return",
"None"
] | cross correlate with other, for all pairs
closer than rmax, iterate.
>>> def process(r, i, j, **kwargs):
>>> ...
>>> A.enum(... process, **kwargs):
>>> ...
where r is the distance, i and j are the original
input array index of the data. arbitrary args can be passed
to process via kwargs. | [
"cross",
"correlate",
"with",
"other",
"for",
"all",
"pairs",
"closer",
"than",
"rmax",
"iterate",
"."
] | train | https://github.com/rainwoodman/kdcount/blob/483548f6d27a4f245cd5d98880b5f4edd6cc8dc1/kdcount/__init__.py#L30-L59 |
rainwoodman/kdcount | kdcount/__init__.py | KDNode.count | def count(self, other, r, attrs=None, info={}):
""" Gray & Moore based fast dual tree counting.
r is the edge of bins:
-inf or r[i-1] < count[i] <= r[i]
attrs: None or tuple
if tuple, attrs = (attr_self, attr_other)
Returns: count,
count, weight of attrs is not None
"""
r = numpy.array(r, dtype='f8')
return _core.KDNode.count(self, other, r, attrs, info=info) | python | def count(self, other, r, attrs=None, info={}):
""" Gray & Moore based fast dual tree counting.
r is the edge of bins:
-inf or r[i-1] < count[i] <= r[i]
attrs: None or tuple
if tuple, attrs = (attr_self, attr_other)
Returns: count,
count, weight of attrs is not None
"""
r = numpy.array(r, dtype='f8')
return _core.KDNode.count(self, other, r, attrs, info=info) | [
"def",
"count",
"(",
"self",
",",
"other",
",",
"r",
",",
"attrs",
"=",
"None",
",",
"info",
"=",
"{",
"}",
")",
":",
"r",
"=",
"numpy",
".",
"array",
"(",
"r",
",",
"dtype",
"=",
"'f8'",
")",
"return",
"_core",
".",
"KDNode",
".",
"count",
"(",
"self",
",",
"other",
",",
"r",
",",
"attrs",
",",
"info",
"=",
"info",
")"
] | Gray & Moore based fast dual tree counting.
r is the edge of bins:
-inf or r[i-1] < count[i] <= r[i]
attrs: None or tuple
if tuple, attrs = (attr_self, attr_other)
Returns: count,
count, weight of attrs is not None | [
"Gray",
"&",
"Moore",
"based",
"fast",
"dual",
"tree",
"counting",
"."
] | train | https://github.com/rainwoodman/kdcount/blob/483548f6d27a4f245cd5d98880b5f4edd6cc8dc1/kdcount/__init__.py#L61-L77 |
rainwoodman/kdcount | kdcount/__init__.py | KDNode.fof | def fof(self, linkinglength, out=None, method='splay'):
""" Friend-of-Friend clustering with linking length.
Returns: the label
"""
if out is None:
out = numpy.empty(self.size, dtype='intp')
return _core.KDNode.fof(self, linkinglength, out, method) | python | def fof(self, linkinglength, out=None, method='splay'):
""" Friend-of-Friend clustering with linking length.
Returns: the label
"""
if out is None:
out = numpy.empty(self.size, dtype='intp')
return _core.KDNode.fof(self, linkinglength, out, method) | [
"def",
"fof",
"(",
"self",
",",
"linkinglength",
",",
"out",
"=",
"None",
",",
"method",
"=",
"'splay'",
")",
":",
"if",
"out",
"is",
"None",
":",
"out",
"=",
"numpy",
".",
"empty",
"(",
"self",
".",
"size",
",",
"dtype",
"=",
"'intp'",
")",
"return",
"_core",
".",
"KDNode",
".",
"fof",
"(",
"self",
",",
"linkinglength",
",",
"out",
",",
"method",
")"
] | Friend-of-Friend clustering with linking length.
Returns: the label | [
"Friend",
"-",
"of",
"-",
"Friend",
"clustering",
"with",
"linking",
"length",
"."
] | train | https://github.com/rainwoodman/kdcount/blob/483548f6d27a4f245cd5d98880b5f4edd6cc8dc1/kdcount/__init__.py#L79-L86 |
rainwoodman/kdcount | kdcount/__init__.py | KDNode.integrate | def integrate(self, min, max, attr=None, info={}):
""" Calculate the total number of points between [min, max).
If attr is given, also calculate the sum of the weight.
This is a M log(N) operation, where M is the number of min/max
queries and N is number of points.
"""
if numpy.isscalar(min):
min = [min for i in range(self.ndims)]
if numpy.isscalar(max):
max = [max for i in range(self.ndims)]
min = numpy.array(min, dtype='f8', order='C')
max = numpy.array(max, dtype='f8', order='C')
if (min).shape[-1] != self.ndims:
raise ValueError("dimension of min does not match Node")
if (max).shape[-1] != self.ndims:
raise ValueError("dimension of max does not match Node")
min, max = broadcast_arrays(min, max)
return _core.KDNode.integrate(self, min, max, attr, info) | python | def integrate(self, min, max, attr=None, info={}):
""" Calculate the total number of points between [min, max).
If attr is given, also calculate the sum of the weight.
This is a M log(N) operation, where M is the number of min/max
queries and N is number of points.
"""
if numpy.isscalar(min):
min = [min for i in range(self.ndims)]
if numpy.isscalar(max):
max = [max for i in range(self.ndims)]
min = numpy.array(min, dtype='f8', order='C')
max = numpy.array(max, dtype='f8', order='C')
if (min).shape[-1] != self.ndims:
raise ValueError("dimension of min does not match Node")
if (max).shape[-1] != self.ndims:
raise ValueError("dimension of max does not match Node")
min, max = broadcast_arrays(min, max)
return _core.KDNode.integrate(self, min, max, attr, info) | [
"def",
"integrate",
"(",
"self",
",",
"min",
",",
"max",
",",
"attr",
"=",
"None",
",",
"info",
"=",
"{",
"}",
")",
":",
"if",
"numpy",
".",
"isscalar",
"(",
"min",
")",
":",
"min",
"=",
"[",
"min",
"for",
"i",
"in",
"range",
"(",
"self",
".",
"ndims",
")",
"]",
"if",
"numpy",
".",
"isscalar",
"(",
"max",
")",
":",
"max",
"=",
"[",
"max",
"for",
"i",
"in",
"range",
"(",
"self",
".",
"ndims",
")",
"]",
"min",
"=",
"numpy",
".",
"array",
"(",
"min",
",",
"dtype",
"=",
"'f8'",
",",
"order",
"=",
"'C'",
")",
"max",
"=",
"numpy",
".",
"array",
"(",
"max",
",",
"dtype",
"=",
"'f8'",
",",
"order",
"=",
"'C'",
")",
"if",
"(",
"min",
")",
".",
"shape",
"[",
"-",
"1",
"]",
"!=",
"self",
".",
"ndims",
":",
"raise",
"ValueError",
"(",
"\"dimension of min does not match Node\"",
")",
"if",
"(",
"max",
")",
".",
"shape",
"[",
"-",
"1",
"]",
"!=",
"self",
".",
"ndims",
":",
"raise",
"ValueError",
"(",
"\"dimension of max does not match Node\"",
")",
"min",
",",
"max",
"=",
"broadcast_arrays",
"(",
"min",
",",
"max",
")",
"return",
"_core",
".",
"KDNode",
".",
"integrate",
"(",
"self",
",",
"min",
",",
"max",
",",
"attr",
",",
"info",
")"
] | Calculate the total number of points between [min, max).
If attr is given, also calculate the sum of the weight.
This is a M log(N) operation, where M is the number of min/max
queries and N is number of points. | [
"Calculate",
"the",
"total",
"number",
"of",
"points",
"between",
"[",
"min",
"max",
")",
"."
] | train | https://github.com/rainwoodman/kdcount/blob/483548f6d27a4f245cd5d98880b5f4edd6cc8dc1/kdcount/__init__.py#L88-L110 |
rainwoodman/kdcount | kdcount/__init__.py | KDNode.make_forest | def make_forest(self, chunksize):
""" Divide a tree branch to a forest,
each subtree of size at most chunksize """
heap = []
heappush(heap, (-self.size, self))
while True:
w, x = heappop(heap)
if w == 0:
heappush(heap, (0, x))
break
if x.less is None \
or (x.size < chunksize):
heappush(heap, (0, x))
continue
heappush(heap, (x.less.size, x.less))
heappush(heap, (x.greater.size, x.greater))
for w, x in heap:
yield x | python | def make_forest(self, chunksize):
""" Divide a tree branch to a forest,
each subtree of size at most chunksize """
heap = []
heappush(heap, (-self.size, self))
while True:
w, x = heappop(heap)
if w == 0:
heappush(heap, (0, x))
break
if x.less is None \
or (x.size < chunksize):
heappush(heap, (0, x))
continue
heappush(heap, (x.less.size, x.less))
heappush(heap, (x.greater.size, x.greater))
for w, x in heap:
yield x | [
"def",
"make_forest",
"(",
"self",
",",
"chunksize",
")",
":",
"heap",
"=",
"[",
"]",
"heappush",
"(",
"heap",
",",
"(",
"-",
"self",
".",
"size",
",",
"self",
")",
")",
"while",
"True",
":",
"w",
",",
"x",
"=",
"heappop",
"(",
"heap",
")",
"if",
"w",
"==",
"0",
":",
"heappush",
"(",
"heap",
",",
"(",
"0",
",",
"x",
")",
")",
"break",
"if",
"x",
".",
"less",
"is",
"None",
"or",
"(",
"x",
".",
"size",
"<",
"chunksize",
")",
":",
"heappush",
"(",
"heap",
",",
"(",
"0",
",",
"x",
")",
")",
"continue",
"heappush",
"(",
"heap",
",",
"(",
"x",
".",
"less",
".",
"size",
",",
"x",
".",
"less",
")",
")",
"heappush",
"(",
"heap",
",",
"(",
"x",
".",
"greater",
".",
"size",
",",
"x",
".",
"greater",
")",
")",
"for",
"w",
",",
"x",
"in",
"heap",
":",
"yield",
"x"
] | Divide a tree branch to a forest,
each subtree of size at most chunksize | [
"Divide",
"a",
"tree",
"branch",
"to",
"a",
"forest",
"each",
"subtree",
"of",
"size",
"at",
"most",
"chunksize"
] | train | https://github.com/rainwoodman/kdcount/blob/483548f6d27a4f245cd5d98880b5f4edd6cc8dc1/kdcount/__init__.py#L112-L129 |
Numigi/gitoo | src/cli.py | _install_one | def _install_one(
repo_url, branch, destination, commit='', patches=None,
exclude_modules=None, include_modules=None, base=False, work_directory=''
):
""" Install a third party odoo add-on
:param string repo_url: url of the repo that contains the patch.
:param string branch: name of the branch to checkout.
:param string destination: the folder where the add-on should end up at.
:param string commit: Optional commit rev to checkout to. If mentioned, that take over the branch
:param string work_directory: the path to the directory of the yaml file.
:param list patches: Optional list of patches to apply.
"""
patches = patches or []
patches = [
core.FilePatch(file=patch['file'], work_directory=work_directory)
if 'file' in patch else core.Patch(**patch)
for patch in patches
]
addon_cls = core.Base if base else core.Addon
addon = addon_cls(
repo_url, branch, commit=commit, patches=patches,
exclude_modules=exclude_modules, include_modules=include_modules)
addon.install(destination) | python | def _install_one(
repo_url, branch, destination, commit='', patches=None,
exclude_modules=None, include_modules=None, base=False, work_directory=''
):
""" Install a third party odoo add-on
:param string repo_url: url of the repo that contains the patch.
:param string branch: name of the branch to checkout.
:param string destination: the folder where the add-on should end up at.
:param string commit: Optional commit rev to checkout to. If mentioned, that take over the branch
:param string work_directory: the path to the directory of the yaml file.
:param list patches: Optional list of patches to apply.
"""
patches = patches or []
patches = [
core.FilePatch(file=patch['file'], work_directory=work_directory)
if 'file' in patch else core.Patch(**patch)
for patch in patches
]
addon_cls = core.Base if base else core.Addon
addon = addon_cls(
repo_url, branch, commit=commit, patches=patches,
exclude_modules=exclude_modules, include_modules=include_modules)
addon.install(destination) | [
"def",
"_install_one",
"(",
"repo_url",
",",
"branch",
",",
"destination",
",",
"commit",
"=",
"''",
",",
"patches",
"=",
"None",
",",
"exclude_modules",
"=",
"None",
",",
"include_modules",
"=",
"None",
",",
"base",
"=",
"False",
",",
"work_directory",
"=",
"''",
")",
":",
"patches",
"=",
"patches",
"or",
"[",
"]",
"patches",
"=",
"[",
"core",
".",
"FilePatch",
"(",
"file",
"=",
"patch",
"[",
"'file'",
"]",
",",
"work_directory",
"=",
"work_directory",
")",
"if",
"'file'",
"in",
"patch",
"else",
"core",
".",
"Patch",
"(",
"*",
"*",
"patch",
")",
"for",
"patch",
"in",
"patches",
"]",
"addon_cls",
"=",
"core",
".",
"Base",
"if",
"base",
"else",
"core",
".",
"Addon",
"addon",
"=",
"addon_cls",
"(",
"repo_url",
",",
"branch",
",",
"commit",
"=",
"commit",
",",
"patches",
"=",
"patches",
",",
"exclude_modules",
"=",
"exclude_modules",
",",
"include_modules",
"=",
"include_modules",
")",
"addon",
".",
"install",
"(",
"destination",
")"
] | Install a third party odoo add-on
:param string repo_url: url of the repo that contains the patch.
:param string branch: name of the branch to checkout.
:param string destination: the folder where the add-on should end up at.
:param string commit: Optional commit rev to checkout to. If mentioned, that take over the branch
:param string work_directory: the path to the directory of the yaml file.
:param list patches: Optional list of patches to apply. | [
"Install",
"a",
"third",
"party",
"odoo",
"add",
"-",
"on"
] | train | https://github.com/Numigi/gitoo/blob/0921f5fb8a948021760bb0373a40f9fbe8a4a2e5/src/cli.py#L43-L66 |
Numigi/gitoo | src/cli.py | _install_all | def _install_all(destination='', conf_file=''):
"""Use the conf file to list all the third party Odoo add-ons that will be installed
and the patches that should be applied.
:param string destination: the folder where add-ons should end up at.
Default: pwd/3rd
:param string conf_file: path to a conf file that describe the add-ons to install.
Default: pwd/third_party_addons.yaml
"""
dir_path = os.path.dirname(os.path.realpath(__file__))
destination = destination or os.path.join(dir_path, '..', '3rd')
conf_file = conf_file or os.path.join(dir_path, '..', "third_party_addons.yaml")
work_directory = os.path.dirname(os.path.realpath(conf_file))
with open(conf_file, "r") as conf_data:
data = yaml.load(conf_data)
for addons in data:
_install_one(
addons['url'],
addons['branch'],
os.path.abspath(destination),
commit=addons.get('commit'),
patches=addons.get('patches'),
exclude_modules=addons.get('excludes'),
include_modules=addons.get('includes'),
base=addons.get('base'),
work_directory=work_directory,
) | python | def _install_all(destination='', conf_file=''):
"""Use the conf file to list all the third party Odoo add-ons that will be installed
and the patches that should be applied.
:param string destination: the folder where add-ons should end up at.
Default: pwd/3rd
:param string conf_file: path to a conf file that describe the add-ons to install.
Default: pwd/third_party_addons.yaml
"""
dir_path = os.path.dirname(os.path.realpath(__file__))
destination = destination or os.path.join(dir_path, '..', '3rd')
conf_file = conf_file or os.path.join(dir_path, '..', "third_party_addons.yaml")
work_directory = os.path.dirname(os.path.realpath(conf_file))
with open(conf_file, "r") as conf_data:
data = yaml.load(conf_data)
for addons in data:
_install_one(
addons['url'],
addons['branch'],
os.path.abspath(destination),
commit=addons.get('commit'),
patches=addons.get('patches'),
exclude_modules=addons.get('excludes'),
include_modules=addons.get('includes'),
base=addons.get('base'),
work_directory=work_directory,
) | [
"def",
"_install_all",
"(",
"destination",
"=",
"''",
",",
"conf_file",
"=",
"''",
")",
":",
"dir_path",
"=",
"os",
".",
"path",
".",
"dirname",
"(",
"os",
".",
"path",
".",
"realpath",
"(",
"__file__",
")",
")",
"destination",
"=",
"destination",
"or",
"os",
".",
"path",
".",
"join",
"(",
"dir_path",
",",
"'..'",
",",
"'3rd'",
")",
"conf_file",
"=",
"conf_file",
"or",
"os",
".",
"path",
".",
"join",
"(",
"dir_path",
",",
"'..'",
",",
"\"third_party_addons.yaml\"",
")",
"work_directory",
"=",
"os",
".",
"path",
".",
"dirname",
"(",
"os",
".",
"path",
".",
"realpath",
"(",
"conf_file",
")",
")",
"with",
"open",
"(",
"conf_file",
",",
"\"r\"",
")",
"as",
"conf_data",
":",
"data",
"=",
"yaml",
".",
"load",
"(",
"conf_data",
")",
"for",
"addons",
"in",
"data",
":",
"_install_one",
"(",
"addons",
"[",
"'url'",
"]",
",",
"addons",
"[",
"'branch'",
"]",
",",
"os",
".",
"path",
".",
"abspath",
"(",
"destination",
")",
",",
"commit",
"=",
"addons",
".",
"get",
"(",
"'commit'",
")",
",",
"patches",
"=",
"addons",
".",
"get",
"(",
"'patches'",
")",
",",
"exclude_modules",
"=",
"addons",
".",
"get",
"(",
"'excludes'",
")",
",",
"include_modules",
"=",
"addons",
".",
"get",
"(",
"'includes'",
")",
",",
"base",
"=",
"addons",
".",
"get",
"(",
"'base'",
")",
",",
"work_directory",
"=",
"work_directory",
",",
")"
] | Use the conf file to list all the third party Odoo add-ons that will be installed
and the patches that should be applied.
:param string destination: the folder where add-ons should end up at.
Default: pwd/3rd
:param string conf_file: path to a conf file that describe the add-ons to install.
Default: pwd/third_party_addons.yaml | [
"Use",
"the",
"conf",
"file",
"to",
"list",
"all",
"the",
"third",
"party",
"Odoo",
"add",
"-",
"ons",
"that",
"will",
"be",
"installed",
"and",
"the",
"patches",
"that",
"should",
"be",
"applied",
"."
] | train | https://github.com/Numigi/gitoo/blob/0921f5fb8a948021760bb0373a40f9fbe8a4a2e5/src/cli.py#L69-L96 |
ISA-tools/biopy-isatab | bcbio/isatab/parser.py | find_lt | def find_lt(a, x):
"""Find rightmost value less than x"""
i = bisect.bisect_left(a, x)
if i:
return a[i-1]
raise ValueError | python | def find_lt(a, x):
"""Find rightmost value less than x"""
i = bisect.bisect_left(a, x)
if i:
return a[i-1]
raise ValueError | [
"def",
"find_lt",
"(",
"a",
",",
"x",
")",
":",
"i",
"=",
"bisect",
".",
"bisect_left",
"(",
"a",
",",
"x",
")",
"if",
"i",
":",
"return",
"a",
"[",
"i",
"-",
"1",
"]",
"raise",
"ValueError"
] | Find rightmost value less than x | [
"Find",
"rightmost",
"value",
"less",
"than",
"x"
] | train | https://github.com/ISA-tools/biopy-isatab/blob/fe42c98184d5eb5f28d8c0b7c3fc63a9b9729f27/bcbio/isatab/parser.py#L36-L41 |
ISA-tools/biopy-isatab | bcbio/isatab/parser.py | parse | def parse(isatab_ref):
"""Entry point to parse an ISA-Tab directory.
isatab_ref can point to a directory of ISA-Tab data, in which case we
search for the investigator file, or be a reference to the high level
investigation file.
"""
if os.path.isdir(isatab_ref):
fnames = glob.glob(os.path.join(isatab_ref, "i_*.txt")) + \
glob.glob(os.path.join(isatab_ref, "*.idf.txt"))
assert len(fnames) == 1
isatab_ref = fnames[0]
assert os.path.exists(isatab_ref), "Did not find investigation file: %s" % isatab_ref
i_parser = InvestigationParser()
with open(isatab_ref, "rU") as in_handle:
rec = i_parser.parse(in_handle)
s_parser = StudyAssayParser(isatab_ref)
rec = s_parser.parse(rec)
return rec | python | def parse(isatab_ref):
"""Entry point to parse an ISA-Tab directory.
isatab_ref can point to a directory of ISA-Tab data, in which case we
search for the investigator file, or be a reference to the high level
investigation file.
"""
if os.path.isdir(isatab_ref):
fnames = glob.glob(os.path.join(isatab_ref, "i_*.txt")) + \
glob.glob(os.path.join(isatab_ref, "*.idf.txt"))
assert len(fnames) == 1
isatab_ref = fnames[0]
assert os.path.exists(isatab_ref), "Did not find investigation file: %s" % isatab_ref
i_parser = InvestigationParser()
with open(isatab_ref, "rU") as in_handle:
rec = i_parser.parse(in_handle)
s_parser = StudyAssayParser(isatab_ref)
rec = s_parser.parse(rec)
return rec | [
"def",
"parse",
"(",
"isatab_ref",
")",
":",
"if",
"os",
".",
"path",
".",
"isdir",
"(",
"isatab_ref",
")",
":",
"fnames",
"=",
"glob",
".",
"glob",
"(",
"os",
".",
"path",
".",
"join",
"(",
"isatab_ref",
",",
"\"i_*.txt\"",
")",
")",
"+",
"glob",
".",
"glob",
"(",
"os",
".",
"path",
".",
"join",
"(",
"isatab_ref",
",",
"\"*.idf.txt\"",
")",
")",
"assert",
"len",
"(",
"fnames",
")",
"==",
"1",
"isatab_ref",
"=",
"fnames",
"[",
"0",
"]",
"assert",
"os",
".",
"path",
".",
"exists",
"(",
"isatab_ref",
")",
",",
"\"Did not find investigation file: %s\"",
"%",
"isatab_ref",
"i_parser",
"=",
"InvestigationParser",
"(",
")",
"with",
"open",
"(",
"isatab_ref",
",",
"\"rU\"",
")",
"as",
"in_handle",
":",
"rec",
"=",
"i_parser",
".",
"parse",
"(",
"in_handle",
")",
"s_parser",
"=",
"StudyAssayParser",
"(",
"isatab_ref",
")",
"rec",
"=",
"s_parser",
".",
"parse",
"(",
"rec",
")",
"return",
"rec"
] | Entry point to parse an ISA-Tab directory.
isatab_ref can point to a directory of ISA-Tab data, in which case we
search for the investigator file, or be a reference to the high level
investigation file. | [
"Entry",
"point",
"to",
"parse",
"an",
"ISA",
"-",
"Tab",
"directory",
".",
"isatab_ref",
"can",
"point",
"to",
"a",
"directory",
"of",
"ISA",
"-",
"Tab",
"data",
"in",
"which",
"case",
"we",
"search",
"for",
"the",
"investigator",
"file",
"or",
"be",
"a",
"reference",
"to",
"the",
"high",
"level",
"investigation",
"file",
"."
] | train | https://github.com/ISA-tools/biopy-isatab/blob/fe42c98184d5eb5f28d8c0b7c3fc63a9b9729f27/bcbio/isatab/parser.py#L51-L68 |
ISA-tools/biopy-isatab | bcbio/isatab/parser.py | InvestigationParser._parse_region | def _parse_region(self, rec, line_iter):
"""Parse a section of an ISA-Tab, assigning information to a supplied record.
"""
had_info = False
keyvals, section = self._parse_keyvals(line_iter)
if keyvals:
rec.metadata = keyvals[0]
while section and section[0] != "STUDY":
had_info = True
keyvals, next_section = self._parse_keyvals(line_iter)
attr_name = self._sections[section[0]]
if attr_name in self._nolist:
try:
keyvals = keyvals[0]
except IndexError:
keyvals = {}
setattr(rec, attr_name, keyvals)
section = next_section
return rec, had_info | python | def _parse_region(self, rec, line_iter):
"""Parse a section of an ISA-Tab, assigning information to a supplied record.
"""
had_info = False
keyvals, section = self._parse_keyvals(line_iter)
if keyvals:
rec.metadata = keyvals[0]
while section and section[0] != "STUDY":
had_info = True
keyvals, next_section = self._parse_keyvals(line_iter)
attr_name = self._sections[section[0]]
if attr_name in self._nolist:
try:
keyvals = keyvals[0]
except IndexError:
keyvals = {}
setattr(rec, attr_name, keyvals)
section = next_section
return rec, had_info | [
"def",
"_parse_region",
"(",
"self",
",",
"rec",
",",
"line_iter",
")",
":",
"had_info",
"=",
"False",
"keyvals",
",",
"section",
"=",
"self",
".",
"_parse_keyvals",
"(",
"line_iter",
")",
"if",
"keyvals",
":",
"rec",
".",
"metadata",
"=",
"keyvals",
"[",
"0",
"]",
"while",
"section",
"and",
"section",
"[",
"0",
"]",
"!=",
"\"STUDY\"",
":",
"had_info",
"=",
"True",
"keyvals",
",",
"next_section",
"=",
"self",
".",
"_parse_keyvals",
"(",
"line_iter",
")",
"attr_name",
"=",
"self",
".",
"_sections",
"[",
"section",
"[",
"0",
"]",
"]",
"if",
"attr_name",
"in",
"self",
".",
"_nolist",
":",
"try",
":",
"keyvals",
"=",
"keyvals",
"[",
"0",
"]",
"except",
"IndexError",
":",
"keyvals",
"=",
"{",
"}",
"setattr",
"(",
"rec",
",",
"attr_name",
",",
"keyvals",
")",
"section",
"=",
"next_section",
"return",
"rec",
",",
"had_info"
] | Parse a section of an ISA-Tab, assigning information to a supplied record. | [
"Parse",
"a",
"section",
"of",
"an",
"ISA",
"-",
"Tab",
"assigning",
"information",
"to",
"a",
"supplied",
"record",
"."
] | train | https://github.com/ISA-tools/biopy-isatab/blob/fe42c98184d5eb5f28d8c0b7c3fc63a9b9729f27/bcbio/isatab/parser.py#L109-L129 |
ISA-tools/biopy-isatab | bcbio/isatab/parser.py | InvestigationParser._line_iter | def _line_iter(self, in_handle):
"""Read tab delimited file, handling ISA-Tab special case headers.
"""
reader = csv.reader(in_handle, dialect="excel-tab")
for line in reader:
if len(line) > 0 and line[0]:
# check for section headers; all uppercase and a single value
if line[0].upper() == line[0] and "".join(line[1:]) == "":
line = [line[0]]
yield line | python | def _line_iter(self, in_handle):
"""Read tab delimited file, handling ISA-Tab special case headers.
"""
reader = csv.reader(in_handle, dialect="excel-tab")
for line in reader:
if len(line) > 0 and line[0]:
# check for section headers; all uppercase and a single value
if line[0].upper() == line[0] and "".join(line[1:]) == "":
line = [line[0]]
yield line | [
"def",
"_line_iter",
"(",
"self",
",",
"in_handle",
")",
":",
"reader",
"=",
"csv",
".",
"reader",
"(",
"in_handle",
",",
"dialect",
"=",
"\"excel-tab\"",
")",
"for",
"line",
"in",
"reader",
":",
"if",
"len",
"(",
"line",
")",
">",
"0",
"and",
"line",
"[",
"0",
"]",
":",
"# check for section headers; all uppercase and a single value",
"if",
"line",
"[",
"0",
"]",
".",
"upper",
"(",
")",
"==",
"line",
"[",
"0",
"]",
"and",
"\"\"",
".",
"join",
"(",
"line",
"[",
"1",
":",
"]",
")",
"==",
"\"\"",
":",
"line",
"=",
"[",
"line",
"[",
"0",
"]",
"]",
"yield",
"line"
] | Read tab delimited file, handling ISA-Tab special case headers. | [
"Read",
"tab",
"delimited",
"file",
"handling",
"ISA",
"-",
"Tab",
"special",
"case",
"headers",
"."
] | train | https://github.com/ISA-tools/biopy-isatab/blob/fe42c98184d5eb5f28d8c0b7c3fc63a9b9729f27/bcbio/isatab/parser.py#L131-L140 |
ISA-tools/biopy-isatab | bcbio/isatab/parser.py | InvestigationParser._parse_keyvals | def _parse_keyvals(self, line_iter):
"""Generate dictionary from key/value pairs.
"""
out = None
line = None
for line in line_iter:
if len(line) == 1 and line[0].upper() == line[0]:
break
else:
# setup output dictionaries, trimming off blank columns
if out is None:
while not line[-1]:
line = line[:-1]
out = [{} for _ in line[1:]]
# add blank values if the line is stripped
while len(line) < len(out) + 1:
line.append("")
for i in range(len(out)):
out[i][line[0]] = line[i+1].strip()
line = None
return out, line | python | def _parse_keyvals(self, line_iter):
"""Generate dictionary from key/value pairs.
"""
out = None
line = None
for line in line_iter:
if len(line) == 1 and line[0].upper() == line[0]:
break
else:
# setup output dictionaries, trimming off blank columns
if out is None:
while not line[-1]:
line = line[:-1]
out = [{} for _ in line[1:]]
# add blank values if the line is stripped
while len(line) < len(out) + 1:
line.append("")
for i in range(len(out)):
out[i][line[0]] = line[i+1].strip()
line = None
return out, line | [
"def",
"_parse_keyvals",
"(",
"self",
",",
"line_iter",
")",
":",
"out",
"=",
"None",
"line",
"=",
"None",
"for",
"line",
"in",
"line_iter",
":",
"if",
"len",
"(",
"line",
")",
"==",
"1",
"and",
"line",
"[",
"0",
"]",
".",
"upper",
"(",
")",
"==",
"line",
"[",
"0",
"]",
":",
"break",
"else",
":",
"# setup output dictionaries, trimming off blank columns",
"if",
"out",
"is",
"None",
":",
"while",
"not",
"line",
"[",
"-",
"1",
"]",
":",
"line",
"=",
"line",
"[",
":",
"-",
"1",
"]",
"out",
"=",
"[",
"{",
"}",
"for",
"_",
"in",
"line",
"[",
"1",
":",
"]",
"]",
"# add blank values if the line is stripped",
"while",
"len",
"(",
"line",
")",
"<",
"len",
"(",
"out",
")",
"+",
"1",
":",
"line",
".",
"append",
"(",
"\"\"",
")",
"for",
"i",
"in",
"range",
"(",
"len",
"(",
"out",
")",
")",
":",
"out",
"[",
"i",
"]",
"[",
"line",
"[",
"0",
"]",
"]",
"=",
"line",
"[",
"i",
"+",
"1",
"]",
".",
"strip",
"(",
")",
"line",
"=",
"None",
"return",
"out",
",",
"line"
] | Generate dictionary from key/value pairs. | [
"Generate",
"dictionary",
"from",
"key",
"/",
"value",
"pairs",
"."
] | train | https://github.com/ISA-tools/biopy-isatab/blob/fe42c98184d5eb5f28d8c0b7c3fc63a9b9729f27/bcbio/isatab/parser.py#L142-L162 |
ISA-tools/biopy-isatab | bcbio/isatab/parser.py | StudyAssayParser.parse | def parse(self, rec):
"""Retrieve row data from files associated with the ISATabRecord.
"""
final_studies = []
for study in rec.studies:
source_data = self._parse_study(study.metadata["Study File Name"],
["Source Name", "Sample Name", "Comment[ENA_SAMPLE]"])
if source_data:
study.nodes = source_data
final_assays = []
for assay in study.assays:
cur_assay = ISATabAssayRecord(assay)
assay_data = self._parse_study(assay["Study Assay File Name"],
["Sample Name","Extract Name","Raw Data File","Derived Data File", "Image File", "Acquisition Parameter Data File", "Free Induction Decay Data File"])
cur_assay.nodes = assay_data
self._get_process_nodes(assay["Study Assay File Name"], cur_assay)
final_assays.append(cur_assay)
study.assays = final_assays
#get process nodes
self._get_process_nodes(study.metadata["Study File Name"], study)
final_studies.append(study)
rec.studies = final_studies
return rec | python | def parse(self, rec):
"""Retrieve row data from files associated with the ISATabRecord.
"""
final_studies = []
for study in rec.studies:
source_data = self._parse_study(study.metadata["Study File Name"],
["Source Name", "Sample Name", "Comment[ENA_SAMPLE]"])
if source_data:
study.nodes = source_data
final_assays = []
for assay in study.assays:
cur_assay = ISATabAssayRecord(assay)
assay_data = self._parse_study(assay["Study Assay File Name"],
["Sample Name","Extract Name","Raw Data File","Derived Data File", "Image File", "Acquisition Parameter Data File", "Free Induction Decay Data File"])
cur_assay.nodes = assay_data
self._get_process_nodes(assay["Study Assay File Name"], cur_assay)
final_assays.append(cur_assay)
study.assays = final_assays
#get process nodes
self._get_process_nodes(study.metadata["Study File Name"], study)
final_studies.append(study)
rec.studies = final_studies
return rec | [
"def",
"parse",
"(",
"self",
",",
"rec",
")",
":",
"final_studies",
"=",
"[",
"]",
"for",
"study",
"in",
"rec",
".",
"studies",
":",
"source_data",
"=",
"self",
".",
"_parse_study",
"(",
"study",
".",
"metadata",
"[",
"\"Study File Name\"",
"]",
",",
"[",
"\"Source Name\"",
",",
"\"Sample Name\"",
",",
"\"Comment[ENA_SAMPLE]\"",
"]",
")",
"if",
"source_data",
":",
"study",
".",
"nodes",
"=",
"source_data",
"final_assays",
"=",
"[",
"]",
"for",
"assay",
"in",
"study",
".",
"assays",
":",
"cur_assay",
"=",
"ISATabAssayRecord",
"(",
"assay",
")",
"assay_data",
"=",
"self",
".",
"_parse_study",
"(",
"assay",
"[",
"\"Study Assay File Name\"",
"]",
",",
"[",
"\"Sample Name\"",
",",
"\"Extract Name\"",
",",
"\"Raw Data File\"",
",",
"\"Derived Data File\"",
",",
"\"Image File\"",
",",
"\"Acquisition Parameter Data File\"",
",",
"\"Free Induction Decay Data File\"",
"]",
")",
"cur_assay",
".",
"nodes",
"=",
"assay_data",
"self",
".",
"_get_process_nodes",
"(",
"assay",
"[",
"\"Study Assay File Name\"",
"]",
",",
"cur_assay",
")",
"final_assays",
".",
"append",
"(",
"cur_assay",
")",
"study",
".",
"assays",
"=",
"final_assays",
"#get process nodes",
"self",
".",
"_get_process_nodes",
"(",
"study",
".",
"metadata",
"[",
"\"Study File Name\"",
"]",
",",
"study",
")",
"final_studies",
".",
"append",
"(",
"study",
")",
"rec",
".",
"studies",
"=",
"final_studies",
"return",
"rec"
] | Retrieve row data from files associated with the ISATabRecord. | [
"Retrieve",
"row",
"data",
"from",
"files",
"associated",
"with",
"the",
"ISATabRecord",
"."
] | train | https://github.com/ISA-tools/biopy-isatab/blob/fe42c98184d5eb5f28d8c0b7c3fc63a9b9729f27/bcbio/isatab/parser.py#L193-L216 |
ISA-tools/biopy-isatab | bcbio/isatab/parser.py | StudyAssayParser._parse_study | def _parse_study(self, fname, node_types):
"""Parse study or assay row oriented file around the supplied base node.
"""
if not os.path.exists(os.path.join(self._dir, fname)):
return None
nodes = {}
with open(os.path.join(self._dir, fname), "rU") as in_handle:
reader = csv.reader(in_handle, dialect="excel-tab")
header = self._swap_synonyms(next(reader))
hgroups = self._collapse_header(header)
htypes = self._characterize_header(header, hgroups)
for node_type in node_types:
try:
name_index = header.index(node_type)
except ValueError:
name_index = None
if name_index is None:
#print "Could not find standard header name: %s in %s" \
# % (node_type, header)
continue
in_handle.seek(0, 0)
for line in reader:
name = line[name_index]
#to deal with same name used for different node types (e.g. Source Name and Sample Name using the same string)
node_index = self._build_node_index(node_type,name)
#skip the header line and empty lines
if name in header:
continue
if (not name):
continue
try:
node = nodes[node_index]
except KeyError:
#print("creating node ", name, " index", node_index)
node = NodeRecord(name, node_type)
node.metadata = collections.defaultdict(set)
nodes[node_index] = node
attrs = self._line_keyvals(line, header, hgroups, htypes, node.metadata)
nodes[node_index].metadata = attrs
return dict([(k, self._finalize_metadata(v)) for k, v in nodes.items()]) | python | def _parse_study(self, fname, node_types):
"""Parse study or assay row oriented file around the supplied base node.
"""
if not os.path.exists(os.path.join(self._dir, fname)):
return None
nodes = {}
with open(os.path.join(self._dir, fname), "rU") as in_handle:
reader = csv.reader(in_handle, dialect="excel-tab")
header = self._swap_synonyms(next(reader))
hgroups = self._collapse_header(header)
htypes = self._characterize_header(header, hgroups)
for node_type in node_types:
try:
name_index = header.index(node_type)
except ValueError:
name_index = None
if name_index is None:
#print "Could not find standard header name: %s in %s" \
# % (node_type, header)
continue
in_handle.seek(0, 0)
for line in reader:
name = line[name_index]
#to deal with same name used for different node types (e.g. Source Name and Sample Name using the same string)
node_index = self._build_node_index(node_type,name)
#skip the header line and empty lines
if name in header:
continue
if (not name):
continue
try:
node = nodes[node_index]
except KeyError:
#print("creating node ", name, " index", node_index)
node = NodeRecord(name, node_type)
node.metadata = collections.defaultdict(set)
nodes[node_index] = node
attrs = self._line_keyvals(line, header, hgroups, htypes, node.metadata)
nodes[node_index].metadata = attrs
return dict([(k, self._finalize_metadata(v)) for k, v in nodes.items()]) | [
"def",
"_parse_study",
"(",
"self",
",",
"fname",
",",
"node_types",
")",
":",
"if",
"not",
"os",
".",
"path",
".",
"exists",
"(",
"os",
".",
"path",
".",
"join",
"(",
"self",
".",
"_dir",
",",
"fname",
")",
")",
":",
"return",
"None",
"nodes",
"=",
"{",
"}",
"with",
"open",
"(",
"os",
".",
"path",
".",
"join",
"(",
"self",
".",
"_dir",
",",
"fname",
")",
",",
"\"rU\"",
")",
"as",
"in_handle",
":",
"reader",
"=",
"csv",
".",
"reader",
"(",
"in_handle",
",",
"dialect",
"=",
"\"excel-tab\"",
")",
"header",
"=",
"self",
".",
"_swap_synonyms",
"(",
"next",
"(",
"reader",
")",
")",
"hgroups",
"=",
"self",
".",
"_collapse_header",
"(",
"header",
")",
"htypes",
"=",
"self",
".",
"_characterize_header",
"(",
"header",
",",
"hgroups",
")",
"for",
"node_type",
"in",
"node_types",
":",
"try",
":",
"name_index",
"=",
"header",
".",
"index",
"(",
"node_type",
")",
"except",
"ValueError",
":",
"name_index",
"=",
"None",
"if",
"name_index",
"is",
"None",
":",
"#print \"Could not find standard header name: %s in %s\" \\",
"# % (node_type, header)",
"continue",
"in_handle",
".",
"seek",
"(",
"0",
",",
"0",
")",
"for",
"line",
"in",
"reader",
":",
"name",
"=",
"line",
"[",
"name_index",
"]",
"#to deal with same name used for different node types (e.g. Source Name and Sample Name using the same string)",
"node_index",
"=",
"self",
".",
"_build_node_index",
"(",
"node_type",
",",
"name",
")",
"#skip the header line and empty lines",
"if",
"name",
"in",
"header",
":",
"continue",
"if",
"(",
"not",
"name",
")",
":",
"continue",
"try",
":",
"node",
"=",
"nodes",
"[",
"node_index",
"]",
"except",
"KeyError",
":",
"#print(\"creating node \", name, \" index\", node_index)",
"node",
"=",
"NodeRecord",
"(",
"name",
",",
"node_type",
")",
"node",
".",
"metadata",
"=",
"collections",
".",
"defaultdict",
"(",
"set",
")",
"nodes",
"[",
"node_index",
"]",
"=",
"node",
"attrs",
"=",
"self",
".",
"_line_keyvals",
"(",
"line",
",",
"header",
",",
"hgroups",
",",
"htypes",
",",
"node",
".",
"metadata",
")",
"nodes",
"[",
"node_index",
"]",
".",
"metadata",
"=",
"attrs",
"return",
"dict",
"(",
"[",
"(",
"k",
",",
"self",
".",
"_finalize_metadata",
"(",
"v",
")",
")",
"for",
"k",
",",
"v",
"in",
"nodes",
".",
"items",
"(",
")",
"]",
")"
] | Parse study or assay row oriented file around the supplied base node. | [
"Parse",
"study",
"or",
"assay",
"row",
"oriented",
"file",
"around",
"the",
"supplied",
"base",
"node",
"."
] | train | https://github.com/ISA-tools/biopy-isatab/blob/fe42c98184d5eb5f28d8c0b7c3fc63a9b9729f27/bcbio/isatab/parser.py#L292-L335 |
ISA-tools/biopy-isatab | bcbio/isatab/parser.py | StudyAssayParser._finalize_metadata | def _finalize_metadata(self, node):
"""Convert node metadata back into a standard dictionary and list.
"""
final = {}
for key, val in iter(node.metadata.items()):
#val = list(val)
#if isinstance(val[0], tuple):
# val = [dict(v) for v in val]
final[key] = list(val)
node.metadata = final
return node | python | def _finalize_metadata(self, node):
"""Convert node metadata back into a standard dictionary and list.
"""
final = {}
for key, val in iter(node.metadata.items()):
#val = list(val)
#if isinstance(val[0], tuple):
# val = [dict(v) for v in val]
final[key] = list(val)
node.metadata = final
return node | [
"def",
"_finalize_metadata",
"(",
"self",
",",
"node",
")",
":",
"final",
"=",
"{",
"}",
"for",
"key",
",",
"val",
"in",
"iter",
"(",
"node",
".",
"metadata",
".",
"items",
"(",
")",
")",
":",
"#val = list(val)",
"#if isinstance(val[0], tuple):",
"# val = [dict(v) for v in val]",
"final",
"[",
"key",
"]",
"=",
"list",
"(",
"val",
")",
"node",
".",
"metadata",
"=",
"final",
"return",
"node"
] | Convert node metadata back into a standard dictionary and list. | [
"Convert",
"node",
"metadata",
"back",
"into",
"a",
"standard",
"dictionary",
"and",
"list",
"."
] | train | https://github.com/ISA-tools/biopy-isatab/blob/fe42c98184d5eb5f28d8c0b7c3fc63a9b9729f27/bcbio/isatab/parser.py#L337-L347 |
ISA-tools/biopy-isatab | bcbio/isatab/parser.py | StudyAssayParser._line_by_type | def _line_by_type(self, line, header, hgroups, htypes, out, want_type,
collapse_quals_fn = None):
"""Parse out key value pairs for line information based on a group of values.
"""
for index, htype in ((i, t) for i, t in enumerate(htypes) if t == want_type):
col = hgroups[index][0]
key = header[col]#self._clean_header(header[col])
if collapse_quals_fn:
val = collapse_quals_fn(line, header, hgroups[index])
else:
val = line[col]
out[key].add(val)
return out | python | def _line_by_type(self, line, header, hgroups, htypes, out, want_type,
collapse_quals_fn = None):
"""Parse out key value pairs for line information based on a group of values.
"""
for index, htype in ((i, t) for i, t in enumerate(htypes) if t == want_type):
col = hgroups[index][0]
key = header[col]#self._clean_header(header[col])
if collapse_quals_fn:
val = collapse_quals_fn(line, header, hgroups[index])
else:
val = line[col]
out[key].add(val)
return out | [
"def",
"_line_by_type",
"(",
"self",
",",
"line",
",",
"header",
",",
"hgroups",
",",
"htypes",
",",
"out",
",",
"want_type",
",",
"collapse_quals_fn",
"=",
"None",
")",
":",
"for",
"index",
",",
"htype",
"in",
"(",
"(",
"i",
",",
"t",
")",
"for",
"i",
",",
"t",
"in",
"enumerate",
"(",
"htypes",
")",
"if",
"t",
"==",
"want_type",
")",
":",
"col",
"=",
"hgroups",
"[",
"index",
"]",
"[",
"0",
"]",
"key",
"=",
"header",
"[",
"col",
"]",
"#self._clean_header(header[col])",
"if",
"collapse_quals_fn",
":",
"val",
"=",
"collapse_quals_fn",
"(",
"line",
",",
"header",
",",
"hgroups",
"[",
"index",
"]",
")",
"else",
":",
"val",
"=",
"line",
"[",
"col",
"]",
"out",
"[",
"key",
"]",
".",
"add",
"(",
"val",
")",
"return",
"out"
] | Parse out key value pairs for line information based on a group of values. | [
"Parse",
"out",
"key",
"value",
"pairs",
"for",
"line",
"information",
"based",
"on",
"a",
"group",
"of",
"values",
"."
] | train | https://github.com/ISA-tools/biopy-isatab/blob/fe42c98184d5eb5f28d8c0b7c3fc63a9b9729f27/bcbio/isatab/parser.py#L357-L369 |
ISA-tools/biopy-isatab | bcbio/isatab/parser.py | StudyAssayParser._collapse_attributes | def _collapse_attributes(self, line, header, indexes):
"""Combine attributes in multiple columns into single named tuple.
"""
names = []
vals = []
pat = re.compile("[\W]+")
for i in indexes:
names.append(pat.sub("_", self._clean_header(header[i])))
vals.append(line[i])
Attrs = collections.namedtuple('Attrs', names)
return Attrs(*vals) | python | def _collapse_attributes(self, line, header, indexes):
"""Combine attributes in multiple columns into single named tuple.
"""
names = []
vals = []
pat = re.compile("[\W]+")
for i in indexes:
names.append(pat.sub("_", self._clean_header(header[i])))
vals.append(line[i])
Attrs = collections.namedtuple('Attrs', names)
return Attrs(*vals) | [
"def",
"_collapse_attributes",
"(",
"self",
",",
"line",
",",
"header",
",",
"indexes",
")",
":",
"names",
"=",
"[",
"]",
"vals",
"=",
"[",
"]",
"pat",
"=",
"re",
".",
"compile",
"(",
"\"[\\W]+\"",
")",
"for",
"i",
"in",
"indexes",
":",
"names",
".",
"append",
"(",
"pat",
".",
"sub",
"(",
"\"_\"",
",",
"self",
".",
"_clean_header",
"(",
"header",
"[",
"i",
"]",
")",
")",
")",
"vals",
".",
"append",
"(",
"line",
"[",
"i",
"]",
")",
"Attrs",
"=",
"collections",
".",
"namedtuple",
"(",
"'Attrs'",
",",
"names",
")",
"return",
"Attrs",
"(",
"*",
"vals",
")"
] | Combine attributes in multiple columns into single named tuple. | [
"Combine",
"attributes",
"in",
"multiple",
"columns",
"into",
"single",
"named",
"tuple",
"."
] | train | https://github.com/ISA-tools/biopy-isatab/blob/fe42c98184d5eb5f28d8c0b7c3fc63a9b9729f27/bcbio/isatab/parser.py#L371-L381 |
ISA-tools/biopy-isatab | bcbio/isatab/parser.py | StudyAssayParser._clean_header | def _clean_header(self, header):
"""Remove ISA-Tab specific information from Header[real name] headers.
"""
if header.find("[") >= 0:
header = header.replace("]", "").split("[")[-1]
# ISATab can start with numbers but this is not supported in
# the python datastructure, so prefix with isa_ to make legal
try:
int(header[0])
header = "isa_" + header
except ValueError:
pass
return header | python | def _clean_header(self, header):
"""Remove ISA-Tab specific information from Header[real name] headers.
"""
if header.find("[") >= 0:
header = header.replace("]", "").split("[")[-1]
# ISATab can start with numbers but this is not supported in
# the python datastructure, so prefix with isa_ to make legal
try:
int(header[0])
header = "isa_" + header
except ValueError:
pass
return header | [
"def",
"_clean_header",
"(",
"self",
",",
"header",
")",
":",
"if",
"header",
".",
"find",
"(",
"\"[\"",
")",
">=",
"0",
":",
"header",
"=",
"header",
".",
"replace",
"(",
"\"]\"",
",",
"\"\"",
")",
".",
"split",
"(",
"\"[\"",
")",
"[",
"-",
"1",
"]",
"# ISATab can start with numbers but this is not supported in",
"# the python datastructure, so prefix with isa_ to make legal",
"try",
":",
"int",
"(",
"header",
"[",
"0",
"]",
")",
"header",
"=",
"\"isa_\"",
"+",
"header",
"except",
"ValueError",
":",
"pass",
"return",
"header"
] | Remove ISA-Tab specific information from Header[real name] headers. | [
"Remove",
"ISA",
"-",
"Tab",
"specific",
"information",
"from",
"Header",
"[",
"real",
"name",
"]",
"headers",
"."
] | train | https://github.com/ISA-tools/biopy-isatab/blob/fe42c98184d5eb5f28d8c0b7c3fc63a9b9729f27/bcbio/isatab/parser.py#L383-L395 |
ISA-tools/biopy-isatab | bcbio/isatab/parser.py | StudyAssayParser._characterize_header | def _characterize_header(self, header, hgroups):
"""Characterize header groups into different data types.
"""
out = []
for h in [header[g[0]] for g in hgroups]:
this_ctype = None
for ctype, names in self._col_types.items():
if h.startswith(names):
this_ctype = ctype
break
out.append(this_ctype)
return out | python | def _characterize_header(self, header, hgroups):
"""Characterize header groups into different data types.
"""
out = []
for h in [header[g[0]] for g in hgroups]:
this_ctype = None
for ctype, names in self._col_types.items():
if h.startswith(names):
this_ctype = ctype
break
out.append(this_ctype)
return out | [
"def",
"_characterize_header",
"(",
"self",
",",
"header",
",",
"hgroups",
")",
":",
"out",
"=",
"[",
"]",
"for",
"h",
"in",
"[",
"header",
"[",
"g",
"[",
"0",
"]",
"]",
"for",
"g",
"in",
"hgroups",
"]",
":",
"this_ctype",
"=",
"None",
"for",
"ctype",
",",
"names",
"in",
"self",
".",
"_col_types",
".",
"items",
"(",
")",
":",
"if",
"h",
".",
"startswith",
"(",
"names",
")",
":",
"this_ctype",
"=",
"ctype",
"break",
"out",
".",
"append",
"(",
"this_ctype",
")",
"return",
"out"
] | Characterize header groups into different data types. | [
"Characterize",
"header",
"groups",
"into",
"different",
"data",
"types",
"."
] | train | https://github.com/ISA-tools/biopy-isatab/blob/fe42c98184d5eb5f28d8c0b7c3fc63a9b9729f27/bcbio/isatab/parser.py#L397-L408 |
ISA-tools/biopy-isatab | bcbio/isatab/parser.py | StudyAssayParser._collapse_header | def _collapse_header(self, header):
"""Combine header columns into related groups.
"""
out = []
for i, h in enumerate(header):
if h.startswith(self._col_quals):
out[-1].append(i)
else:
out.append([i])
return out | python | def _collapse_header(self, header):
"""Combine header columns into related groups.
"""
out = []
for i, h in enumerate(header):
if h.startswith(self._col_quals):
out[-1].append(i)
else:
out.append([i])
return out | [
"def",
"_collapse_header",
"(",
"self",
",",
"header",
")",
":",
"out",
"=",
"[",
"]",
"for",
"i",
",",
"h",
"in",
"enumerate",
"(",
"header",
")",
":",
"if",
"h",
".",
"startswith",
"(",
"self",
".",
"_col_quals",
")",
":",
"out",
"[",
"-",
"1",
"]",
".",
"append",
"(",
"i",
")",
"else",
":",
"out",
".",
"append",
"(",
"[",
"i",
"]",
")",
"return",
"out"
] | Combine header columns into related groups. | [
"Combine",
"header",
"columns",
"into",
"related",
"groups",
"."
] | train | https://github.com/ISA-tools/biopy-isatab/blob/fe42c98184d5eb5f28d8c0b7c3fc63a9b9729f27/bcbio/isatab/parser.py#L410-L419 |
ttinies/sc2gameMapRepo | sc2maptool/cli.py | main | def main(): # mini/unit test
"""
PURPOSE: command-line interface for map information
"""
options = optionsParser().parse_args()
params = getSelectionParams(options)
if options.list or options.details:
specifiedMaps = filterMapNames(
options.mapname,
records = filterMapAttrs(**params),
excludeRegex=options.exclude,
closestMatch=options.best
)
if specifiedMaps:
for v in specifiedMaps:
if options.details: v.display()
else: print(v)
print("Found %d maps that match given criteria."%(len(specifiedMaps)))
else:
print("No matching maps found.")
else:
try:
specifiedMaps = selectMap(
options.mapname,
excludeName =options.exclude,
closestMatch=options.best,
**params)
except Exception as e:
specifiedMaps = []
print("No matching maps found: %s"%e)
if not isinstance(specifiedMaps, list):
specifiedMaps = [specifiedMaps]
for m in specifiedMaps:
if options.path: print(m.path)
else: print(m.name) | python | def main(): # mini/unit test
"""
PURPOSE: command-line interface for map information
"""
options = optionsParser().parse_args()
params = getSelectionParams(options)
if options.list or options.details:
specifiedMaps = filterMapNames(
options.mapname,
records = filterMapAttrs(**params),
excludeRegex=options.exclude,
closestMatch=options.best
)
if specifiedMaps:
for v in specifiedMaps:
if options.details: v.display()
else: print(v)
print("Found %d maps that match given criteria."%(len(specifiedMaps)))
else:
print("No matching maps found.")
else:
try:
specifiedMaps = selectMap(
options.mapname,
excludeName =options.exclude,
closestMatch=options.best,
**params)
except Exception as e:
specifiedMaps = []
print("No matching maps found: %s"%e)
if not isinstance(specifiedMaps, list):
specifiedMaps = [specifiedMaps]
for m in specifiedMaps:
if options.path: print(m.path)
else: print(m.name) | [
"def",
"main",
"(",
")",
":",
"# mini/unit test",
"options",
"=",
"optionsParser",
"(",
")",
".",
"parse_args",
"(",
")",
"params",
"=",
"getSelectionParams",
"(",
"options",
")",
"if",
"options",
".",
"list",
"or",
"options",
".",
"details",
":",
"specifiedMaps",
"=",
"filterMapNames",
"(",
"options",
".",
"mapname",
",",
"records",
"=",
"filterMapAttrs",
"(",
"*",
"*",
"params",
")",
",",
"excludeRegex",
"=",
"options",
".",
"exclude",
",",
"closestMatch",
"=",
"options",
".",
"best",
")",
"if",
"specifiedMaps",
":",
"for",
"v",
"in",
"specifiedMaps",
":",
"if",
"options",
".",
"details",
":",
"v",
".",
"display",
"(",
")",
"else",
":",
"print",
"(",
"v",
")",
"print",
"(",
"\"Found %d maps that match given criteria.\"",
"%",
"(",
"len",
"(",
"specifiedMaps",
")",
")",
")",
"else",
":",
"print",
"(",
"\"No matching maps found.\"",
")",
"else",
":",
"try",
":",
"specifiedMaps",
"=",
"selectMap",
"(",
"options",
".",
"mapname",
",",
"excludeName",
"=",
"options",
".",
"exclude",
",",
"closestMatch",
"=",
"options",
".",
"best",
",",
"*",
"*",
"params",
")",
"except",
"Exception",
"as",
"e",
":",
"specifiedMaps",
"=",
"[",
"]",
"print",
"(",
"\"No matching maps found: %s\"",
"%",
"e",
")",
"if",
"not",
"isinstance",
"(",
"specifiedMaps",
",",
"list",
")",
":",
"specifiedMaps",
"=",
"[",
"specifiedMaps",
"]",
"for",
"m",
"in",
"specifiedMaps",
":",
"if",
"options",
".",
"path",
":",
"print",
"(",
"m",
".",
"path",
")",
"else",
":",
"print",
"(",
"m",
".",
"name",
")"
] | PURPOSE: command-line interface for map information | [
"PURPOSE",
":",
"command",
"-",
"line",
"interface",
"for",
"map",
"information"
] | train | https://github.com/ttinies/sc2gameMapRepo/blob/3a215067fae8f86f6a3ffe37272fbd7a5461cfab/sc2maptool/cli.py#L56-L90 |
MacHu-GWU/dataIO-project | dataIO/pk.py | is_pickle_file | def is_pickle_file(abspath):
"""Parse file extension.
- *.pickle: uncompressed, utf-8 encode pickle file
- *.gz: compressed, utf-8 encode pickle file
"""
abspath = abspath.lower()
fname, ext = os.path.splitext(abspath)
if ext in [".pickle", ".pk", ".p"]:
is_pickle = True
elif ext == ".gz":
is_pickle = False
elif ext == ".tmp":
return is_pickle_file(fname)
else:
raise PickleExtError(
"'%s' is not a valid pickle file. "
"extension has to be '.pickle' for uncompressed, '.gz' "
"for compressed." % abspath)
return is_pickle | python | def is_pickle_file(abspath):
"""Parse file extension.
- *.pickle: uncompressed, utf-8 encode pickle file
- *.gz: compressed, utf-8 encode pickle file
"""
abspath = abspath.lower()
fname, ext = os.path.splitext(abspath)
if ext in [".pickle", ".pk", ".p"]:
is_pickle = True
elif ext == ".gz":
is_pickle = False
elif ext == ".tmp":
return is_pickle_file(fname)
else:
raise PickleExtError(
"'%s' is not a valid pickle file. "
"extension has to be '.pickle' for uncompressed, '.gz' "
"for compressed." % abspath)
return is_pickle | [
"def",
"is_pickle_file",
"(",
"abspath",
")",
":",
"abspath",
"=",
"abspath",
".",
"lower",
"(",
")",
"fname",
",",
"ext",
"=",
"os",
".",
"path",
".",
"splitext",
"(",
"abspath",
")",
"if",
"ext",
"in",
"[",
"\".pickle\"",
",",
"\".pk\"",
",",
"\".p\"",
"]",
":",
"is_pickle",
"=",
"True",
"elif",
"ext",
"==",
"\".gz\"",
":",
"is_pickle",
"=",
"False",
"elif",
"ext",
"==",
"\".tmp\"",
":",
"return",
"is_pickle_file",
"(",
"fname",
")",
"else",
":",
"raise",
"PickleExtError",
"(",
"\"'%s' is not a valid pickle file. \"",
"\"extension has to be '.pickle' for uncompressed, '.gz' \"",
"\"for compressed.\"",
"%",
"abspath",
")",
"return",
"is_pickle"
] | Parse file extension.
- *.pickle: uncompressed, utf-8 encode pickle file
- *.gz: compressed, utf-8 encode pickle file | [
"Parse",
"file",
"extension",
".",
"-",
"*",
".",
"pickle",
":",
"uncompressed",
"utf",
"-",
"8",
"encode",
"pickle",
"file",
"-",
"*",
".",
"gz",
":",
"compressed",
"utf",
"-",
"8",
"encode",
"pickle",
"file"
] | train | https://github.com/MacHu-GWU/dataIO-project/blob/7e1cc192b5e53426eed6dbd742918619b8fd60ab/dataIO/pk.py#L43-L62 |
MacHu-GWU/dataIO-project | dataIO/pk.py | load | def load(abspath, default=None, enable_verbose=True):
"""Load Pickle from file. If file are not exists, returns ``default``.
:param abspath: file path. use absolute path as much as you can.
extension has to be ``.pickle`` or ``.gz`` (for compressed Pickle).
:type abspath: string
:param default: default ``dict()``, if ``abspath`` not exists, return the
default Python object instead.
:param enable_verbose: default ``True``, help-message-display trigger.
:type enable_verbose: boolean
Usage::
>>> from dataIO import pk
>>> pk.load("test.pickle") # if you have a pickle file
Load from `test.pickle` ...
Complete! Elapse 0.000432 sec.
{'a': 1, 'b': 2}
**中文文档**
从Pickle文件中读取数据
:param abspath: Pickle文件绝对路径, 扩展名需为 ``.pickle`` 或 ``.gz``, 其中 ``.gz``
是被压缩后的Pickle文件
:type abspath: ``字符串``
:param default: 默认 ``dict()``, 如果文件路径不存在, 则会返回指定的默认值
:param enable_verbose: 默认 ``True``, 信息提示的开关, 批处理时建议关闭
:type enable_verbose: ``布尔值``
"""
if default is None:
default = dict()
prt("\nLoad from '%s' ..." % abspath, enable_verbose)
abspath = lower_ext(str(abspath))
is_pickle = is_pickle_file(abspath)
if not os.path.exists(abspath):
prt(" File not found, use default value: %r" % default,
enable_verbose)
return default
st = time.clock()
if is_pickle:
data = pickle.loads(textfile.readbytes(abspath))
else:
data = pickle.loads(compress.read_gzip(abspath))
prt(" Complete! Elapse %.6f sec." % (time.clock() - st), enable_verbose)
return data | python | def load(abspath, default=None, enable_verbose=True):
"""Load Pickle from file. If file are not exists, returns ``default``.
:param abspath: file path. use absolute path as much as you can.
extension has to be ``.pickle`` or ``.gz`` (for compressed Pickle).
:type abspath: string
:param default: default ``dict()``, if ``abspath`` not exists, return the
default Python object instead.
:param enable_verbose: default ``True``, help-message-display trigger.
:type enable_verbose: boolean
Usage::
>>> from dataIO import pk
>>> pk.load("test.pickle") # if you have a pickle file
Load from `test.pickle` ...
Complete! Elapse 0.000432 sec.
{'a': 1, 'b': 2}
**中文文档**
从Pickle文件中读取数据
:param abspath: Pickle文件绝对路径, 扩展名需为 ``.pickle`` 或 ``.gz``, 其中 ``.gz``
是被压缩后的Pickle文件
:type abspath: ``字符串``
:param default: 默认 ``dict()``, 如果文件路径不存在, 则会返回指定的默认值
:param enable_verbose: 默认 ``True``, 信息提示的开关, 批处理时建议关闭
:type enable_verbose: ``布尔值``
"""
if default is None:
default = dict()
prt("\nLoad from '%s' ..." % abspath, enable_verbose)
abspath = lower_ext(str(abspath))
is_pickle = is_pickle_file(abspath)
if not os.path.exists(abspath):
prt(" File not found, use default value: %r" % default,
enable_verbose)
return default
st = time.clock()
if is_pickle:
data = pickle.loads(textfile.readbytes(abspath))
else:
data = pickle.loads(compress.read_gzip(abspath))
prt(" Complete! Elapse %.6f sec." % (time.clock() - st), enable_verbose)
return data | [
"def",
"load",
"(",
"abspath",
",",
"default",
"=",
"None",
",",
"enable_verbose",
"=",
"True",
")",
":",
"if",
"default",
"is",
"None",
":",
"default",
"=",
"dict",
"(",
")",
"prt",
"(",
"\"\\nLoad from '%s' ...\"",
"%",
"abspath",
",",
"enable_verbose",
")",
"abspath",
"=",
"lower_ext",
"(",
"str",
"(",
"abspath",
")",
")",
"is_pickle",
"=",
"is_pickle_file",
"(",
"abspath",
")",
"if",
"not",
"os",
".",
"path",
".",
"exists",
"(",
"abspath",
")",
":",
"prt",
"(",
"\" File not found, use default value: %r\"",
"%",
"default",
",",
"enable_verbose",
")",
"return",
"default",
"st",
"=",
"time",
".",
"clock",
"(",
")",
"if",
"is_pickle",
":",
"data",
"=",
"pickle",
".",
"loads",
"(",
"textfile",
".",
"readbytes",
"(",
"abspath",
")",
")",
"else",
":",
"data",
"=",
"pickle",
".",
"loads",
"(",
"compress",
".",
"read_gzip",
"(",
"abspath",
")",
")",
"prt",
"(",
"\" Complete! Elapse %.6f sec.\"",
"%",
"(",
"time",
".",
"clock",
"(",
")",
"-",
"st",
")",
",",
"enable_verbose",
")",
"return",
"data"
] | Load Pickle from file. If file are not exists, returns ``default``.
:param abspath: file path. use absolute path as much as you can.
extension has to be ``.pickle`` or ``.gz`` (for compressed Pickle).
:type abspath: string
:param default: default ``dict()``, if ``abspath`` not exists, return the
default Python object instead.
:param enable_verbose: default ``True``, help-message-display trigger.
:type enable_verbose: boolean
Usage::
>>> from dataIO import pk
>>> pk.load("test.pickle") # if you have a pickle file
Load from `test.pickle` ...
Complete! Elapse 0.000432 sec.
{'a': 1, 'b': 2}
**中文文档**
从Pickle文件中读取数据
:param abspath: Pickle文件绝对路径, 扩展名需为 ``.pickle`` 或 ``.gz``, 其中 ``.gz``
是被压缩后的Pickle文件
:type abspath: ``字符串``
:param default: 默认 ``dict()``, 如果文件路径不存在, 则会返回指定的默认值
:param enable_verbose: 默认 ``True``, 信息提示的开关, 批处理时建议关闭
:type enable_verbose: ``布尔值`` | [
"Load",
"Pickle",
"from",
"file",
".",
"If",
"file",
"are",
"not",
"exists",
"returns",
"default",
"."
] | train | https://github.com/MacHu-GWU/dataIO-project/blob/7e1cc192b5e53426eed6dbd742918619b8fd60ab/dataIO/pk.py#L72-L126 |
MacHu-GWU/dataIO-project | dataIO/pk.py | dump | def dump(data, abspath, pk_protocol=py23.pk_protocol,
overwrite=False, enable_verbose=True):
"""Dump picklable object to file.
Provides multiple choice to customize the behavior.
:param data: picklable python object.
:type data: dict or list
:param abspath: ``save as`` path, file extension has to be ``.pickle`` or ``.gz``
(for compressed Pickle)
:type abspath: string
:param pk_protocol: default = your python version, use 2, to make a
py2.x/3.x compatible pickle file. But 3 is faster.
:type pk_protocol: int
:param overwrite: default ``False``, If ``True``, when you dump to existing
file, it silently overwrite it. If ``False``, an alert message is shown.
Default setting ``False`` is to prevent overwrite file by mistake.
:type overwrite: boolean
:param enable_verbose: default True, help-message-display trigger.
:type enable_verbose: boolean
Usage::
>>> from dataIO import pk
>>> data = {"a": 1, "b": 2}
>>> dump(data, "test.pickle", overwrite=True)
Dump to `test.pickle` ...
Complete! Elapse 0.002432 sec
**中文文档**
将Python中可被序列化的"字典", "列表"以及他们的组合, 按照Json的编码方式写入文件
文件
参数列表
:param data: 可Pickle化的Python对象
:type data: ``字典`` 或 ``列表``
:param abspath: Pickle文件绝对路径, 扩展名需为 ``.pickle`` 或 ``.gz``, 其中 ``.gz``
是被压缩后的Pickle文件
:type abspath: ``字符串``
:param pk_protocol: 默认值为你的Python大版本号, 使用2可以使得Python2/3都能
兼容你的Pickle文件。不过Python3的速度更快。
:type pk_protocol: int
:param overwrite: 默认 ``False``, 当为``True``时, 如果写入路径已经存在, 则会
自动覆盖原文件。而为``False``时, 则会打印警告文件, 防止误操作覆盖源文件。
:type overwrite: "布尔值"
:param enable_verbose: 默认 ``True``, 信息提示的开关, 批处理时建议关闭
:type enable_verbose: ``布尔值``
"""
prt("\nDump to '%s' ..." % abspath, enable_verbose)
abspath = lower_ext(str(abspath))
is_pickle = is_pickle_file(abspath)
if os.path.exists(abspath):
if not overwrite: # 存在, 并且overwrite=False
prt(" Stop! File exists and overwrite is not allowed",
enable_verbose)
return
st = time.clock()
content = pickle.dumps(data, pk_protocol)
if is_pickle:
textfile.writebytes(content, abspath)
else:
compress.write_gzip(content, abspath)
prt(" Complete! Elapse %.6f sec." % (time.clock() - st), enable_verbose) | python | def dump(data, abspath, pk_protocol=py23.pk_protocol,
overwrite=False, enable_verbose=True):
"""Dump picklable object to file.
Provides multiple choice to customize the behavior.
:param data: picklable python object.
:type data: dict or list
:param abspath: ``save as`` path, file extension has to be ``.pickle`` or ``.gz``
(for compressed Pickle)
:type abspath: string
:param pk_protocol: default = your python version, use 2, to make a
py2.x/3.x compatible pickle file. But 3 is faster.
:type pk_protocol: int
:param overwrite: default ``False``, If ``True``, when you dump to existing
file, it silently overwrite it. If ``False``, an alert message is shown.
Default setting ``False`` is to prevent overwrite file by mistake.
:type overwrite: boolean
:param enable_verbose: default True, help-message-display trigger.
:type enable_verbose: boolean
Usage::
>>> from dataIO import pk
>>> data = {"a": 1, "b": 2}
>>> dump(data, "test.pickle", overwrite=True)
Dump to `test.pickle` ...
Complete! Elapse 0.002432 sec
**中文文档**
将Python中可被序列化的"字典", "列表"以及他们的组合, 按照Json的编码方式写入文件
文件
参数列表
:param data: 可Pickle化的Python对象
:type data: ``字典`` 或 ``列表``
:param abspath: Pickle文件绝对路径, 扩展名需为 ``.pickle`` 或 ``.gz``, 其中 ``.gz``
是被压缩后的Pickle文件
:type abspath: ``字符串``
:param pk_protocol: 默认值为你的Python大版本号, 使用2可以使得Python2/3都能
兼容你的Pickle文件。不过Python3的速度更快。
:type pk_protocol: int
:param overwrite: 默认 ``False``, 当为``True``时, 如果写入路径已经存在, 则会
自动覆盖原文件。而为``False``时, 则会打印警告文件, 防止误操作覆盖源文件。
:type overwrite: "布尔值"
:param enable_verbose: 默认 ``True``, 信息提示的开关, 批处理时建议关闭
:type enable_verbose: ``布尔值``
"""
prt("\nDump to '%s' ..." % abspath, enable_verbose)
abspath = lower_ext(str(abspath))
is_pickle = is_pickle_file(abspath)
if os.path.exists(abspath):
if not overwrite: # 存在, 并且overwrite=False
prt(" Stop! File exists and overwrite is not allowed",
enable_verbose)
return
st = time.clock()
content = pickle.dumps(data, pk_protocol)
if is_pickle:
textfile.writebytes(content, abspath)
else:
compress.write_gzip(content, abspath)
prt(" Complete! Elapse %.6f sec." % (time.clock() - st), enable_verbose) | [
"def",
"dump",
"(",
"data",
",",
"abspath",
",",
"pk_protocol",
"=",
"py23",
".",
"pk_protocol",
",",
"overwrite",
"=",
"False",
",",
"enable_verbose",
"=",
"True",
")",
":",
"prt",
"(",
"\"\\nDump to '%s' ...\"",
"%",
"abspath",
",",
"enable_verbose",
")",
"abspath",
"=",
"lower_ext",
"(",
"str",
"(",
"abspath",
")",
")",
"is_pickle",
"=",
"is_pickle_file",
"(",
"abspath",
")",
"if",
"os",
".",
"path",
".",
"exists",
"(",
"abspath",
")",
":",
"if",
"not",
"overwrite",
":",
"# 存在, 并且overwrite=False",
"prt",
"(",
"\" Stop! File exists and overwrite is not allowed\"",
",",
"enable_verbose",
")",
"return",
"st",
"=",
"time",
".",
"clock",
"(",
")",
"content",
"=",
"pickle",
".",
"dumps",
"(",
"data",
",",
"pk_protocol",
")",
"if",
"is_pickle",
":",
"textfile",
".",
"writebytes",
"(",
"content",
",",
"abspath",
")",
"else",
":",
"compress",
".",
"write_gzip",
"(",
"content",
",",
"abspath",
")",
"prt",
"(",
"\" Complete! Elapse %.6f sec.\"",
"%",
"(",
"time",
".",
"clock",
"(",
")",
"-",
"st",
")",
",",
"enable_verbose",
")"
] | Dump picklable object to file.
Provides multiple choice to customize the behavior.
:param data: picklable python object.
:type data: dict or list
:param abspath: ``save as`` path, file extension has to be ``.pickle`` or ``.gz``
(for compressed Pickle)
:type abspath: string
:param pk_protocol: default = your python version, use 2, to make a
py2.x/3.x compatible pickle file. But 3 is faster.
:type pk_protocol: int
:param overwrite: default ``False``, If ``True``, when you dump to existing
file, it silently overwrite it. If ``False``, an alert message is shown.
Default setting ``False`` is to prevent overwrite file by mistake.
:type overwrite: boolean
:param enable_verbose: default True, help-message-display trigger.
:type enable_verbose: boolean
Usage::
>>> from dataIO import pk
>>> data = {"a": 1, "b": 2}
>>> dump(data, "test.pickle", overwrite=True)
Dump to `test.pickle` ...
Complete! Elapse 0.002432 sec
**中文文档**
将Python中可被序列化的"字典", "列表"以及他们的组合, 按照Json的编码方式写入文件
文件
参数列表
:param data: 可Pickle化的Python对象
:type data: ``字典`` 或 ``列表``
:param abspath: Pickle文件绝对路径, 扩展名需为 ``.pickle`` 或 ``.gz``, 其中 ``.gz``
是被压缩后的Pickle文件
:type abspath: ``字符串``
:param pk_protocol: 默认值为你的Python大版本号, 使用2可以使得Python2/3都能
兼容你的Pickle文件。不过Python3的速度更快。
:type pk_protocol: int
:param overwrite: 默认 ``False``, 当为``True``时, 如果写入路径已经存在, 则会
自动覆盖原文件。而为``False``时, 则会打印警告文件, 防止误操作覆盖源文件。
:type overwrite: "布尔值"
:param enable_verbose: 默认 ``True``, 信息提示的开关, 批处理时建议关闭
:type enable_verbose: ``布尔值`` | [
"Dump",
"picklable",
"object",
"to",
"file",
".",
"Provides",
"multiple",
"choice",
"to",
"customize",
"the",
"behavior",
"."
] | train | https://github.com/MacHu-GWU/dataIO-project/blob/7e1cc192b5e53426eed6dbd742918619b8fd60ab/dataIO/pk.py#L129-L204 |
MacHu-GWU/dataIO-project | dataIO/pk.py | safe_dump | def safe_dump(data, abspath, pk_protocol=py23.pk_protocol, enable_verbose=True):
"""A stable version of :func:`dump`, this method will silently overwrite
existing file.
There's a issue with :func:`dump`: If your program is interrupted while
writing, you got an incomplete file, and you also lose the original file.
So this method write pickle to a temporary file first, then rename to what
you expect, and silently overwrite old one. This way can guarantee atomic
write.
**中文文档**
在对文件进行写入时, 如果程序中断, 则会留下一个不完整的文件。如果使用了覆盖式
写入, 则我们即没有得到新文件, 同时也丢失了原文件。所以为了保证写操作的原子性
(要么全部完成, 要么全部都不完成), 更好的方法是: 首先将文件写入一个临时文件中,
完成后再讲文件重命名, 覆盖旧文件。这样即使中途程序被中断, 也仅仅是留下了一个
未完成的临时文件而已, 不会影响原文件。
"""
abspath = lower_ext(str(abspath))
abspath_temp = "%s.tmp" % abspath
dump(data, abspath_temp,
pk_protocol=pk_protocol, enable_verbose=enable_verbose)
shutil.move(abspath_temp, abspath) | python | def safe_dump(data, abspath, pk_protocol=py23.pk_protocol, enable_verbose=True):
"""A stable version of :func:`dump`, this method will silently overwrite
existing file.
There's a issue with :func:`dump`: If your program is interrupted while
writing, you got an incomplete file, and you also lose the original file.
So this method write pickle to a temporary file first, then rename to what
you expect, and silently overwrite old one. This way can guarantee atomic
write.
**中文文档**
在对文件进行写入时, 如果程序中断, 则会留下一个不完整的文件。如果使用了覆盖式
写入, 则我们即没有得到新文件, 同时也丢失了原文件。所以为了保证写操作的原子性
(要么全部完成, 要么全部都不完成), 更好的方法是: 首先将文件写入一个临时文件中,
完成后再讲文件重命名, 覆盖旧文件。这样即使中途程序被中断, 也仅仅是留下了一个
未完成的临时文件而已, 不会影响原文件。
"""
abspath = lower_ext(str(abspath))
abspath_temp = "%s.tmp" % abspath
dump(data, abspath_temp,
pk_protocol=pk_protocol, enable_verbose=enable_verbose)
shutil.move(abspath_temp, abspath) | [
"def",
"safe_dump",
"(",
"data",
",",
"abspath",
",",
"pk_protocol",
"=",
"py23",
".",
"pk_protocol",
",",
"enable_verbose",
"=",
"True",
")",
":",
"abspath",
"=",
"lower_ext",
"(",
"str",
"(",
"abspath",
")",
")",
"abspath_temp",
"=",
"\"%s.tmp\"",
"%",
"abspath",
"dump",
"(",
"data",
",",
"abspath_temp",
",",
"pk_protocol",
"=",
"pk_protocol",
",",
"enable_verbose",
"=",
"enable_verbose",
")",
"shutil",
".",
"move",
"(",
"abspath_temp",
",",
"abspath",
")"
] | A stable version of :func:`dump`, this method will silently overwrite
existing file.
There's a issue with :func:`dump`: If your program is interrupted while
writing, you got an incomplete file, and you also lose the original file.
So this method write pickle to a temporary file first, then rename to what
you expect, and silently overwrite old one. This way can guarantee atomic
write.
**中文文档**
在对文件进行写入时, 如果程序中断, 则会留下一个不完整的文件。如果使用了覆盖式
写入, 则我们即没有得到新文件, 同时也丢失了原文件。所以为了保证写操作的原子性
(要么全部完成, 要么全部都不完成), 更好的方法是: 首先将文件写入一个临时文件中,
完成后再讲文件重命名, 覆盖旧文件。这样即使中途程序被中断, 也仅仅是留下了一个
未完成的临时文件而已, 不会影响原文件。 | [
"A",
"stable",
"version",
"of",
":",
"func",
":",
"dump",
"this",
"method",
"will",
"silently",
"overwrite",
"existing",
"file",
".",
"There",
"s",
"a",
"issue",
"with",
":",
"func",
":",
"dump",
":",
"If",
"your",
"program",
"is",
"interrupted",
"while",
"writing",
"you",
"got",
"an",
"incomplete",
"file",
"and",
"you",
"also",
"lose",
"the",
"original",
"file",
".",
"So",
"this",
"method",
"write",
"pickle",
"to",
"a",
"temporary",
"file",
"first",
"then",
"rename",
"to",
"what",
"you",
"expect",
"and",
"silently",
"overwrite",
"old",
"one",
".",
"This",
"way",
"can",
"guarantee",
"atomic",
"write",
".",
"**",
"中文文档",
"**"
] | train | https://github.com/MacHu-GWU/dataIO-project/blob/7e1cc192b5e53426eed6dbd742918619b8fd60ab/dataIO/pk.py#L207-L229 |
MacHu-GWU/dataIO-project | dataIO/pk.py | obj2bytes | def obj2bytes(obj, pk_protocol=py23.pk_protocol):
"""Convert arbitrary pickable Python Object to bytes.
**中文文档**
将可Pickle化的Python对象转化为bytestr
"""
return pickle.dumps(obj, protocol=pk_protocol) | python | def obj2bytes(obj, pk_protocol=py23.pk_protocol):
"""Convert arbitrary pickable Python Object to bytes.
**中文文档**
将可Pickle化的Python对象转化为bytestr
"""
return pickle.dumps(obj, protocol=pk_protocol) | [
"def",
"obj2bytes",
"(",
"obj",
",",
"pk_protocol",
"=",
"py23",
".",
"pk_protocol",
")",
":",
"return",
"pickle",
".",
"dumps",
"(",
"obj",
",",
"protocol",
"=",
"pk_protocol",
")"
] | Convert arbitrary pickable Python Object to bytes.
**中文文档**
将可Pickle化的Python对象转化为bytestr | [
"Convert",
"arbitrary",
"pickable",
"Python",
"Object",
"to",
"bytes",
"."
] | train | https://github.com/MacHu-GWU/dataIO-project/blob/7e1cc192b5e53426eed6dbd742918619b8fd60ab/dataIO/pk.py#L232-L239 |
MacHu-GWU/dataIO-project | dataIO/pk.py | obj2str | def obj2str(obj, pk_protocol=py23.pk_protocol):
"""Convert arbitrary object to base64 encoded string.
**中文文档**
将可Pickle化的Python对象转化为utf-8编码的 ``纯ASCII字符串``
"""
return base64.urlsafe_b64encode(pickle.dumps(
obj, protocol=pk_protocol)).decode("utf-8") | python | def obj2str(obj, pk_protocol=py23.pk_protocol):
"""Convert arbitrary object to base64 encoded string.
**中文文档**
将可Pickle化的Python对象转化为utf-8编码的 ``纯ASCII字符串``
"""
return base64.urlsafe_b64encode(pickle.dumps(
obj, protocol=pk_protocol)).decode("utf-8") | [
"def",
"obj2str",
"(",
"obj",
",",
"pk_protocol",
"=",
"py23",
".",
"pk_protocol",
")",
":",
"return",
"base64",
".",
"urlsafe_b64encode",
"(",
"pickle",
".",
"dumps",
"(",
"obj",
",",
"protocol",
"=",
"pk_protocol",
")",
")",
".",
"decode",
"(",
"\"utf-8\"",
")"
] | Convert arbitrary object to base64 encoded string.
**中文文档**
将可Pickle化的Python对象转化为utf-8编码的 ``纯ASCII字符串`` | [
"Convert",
"arbitrary",
"object",
"to",
"base64",
"encoded",
"string",
"."
] | train | https://github.com/MacHu-GWU/dataIO-project/blob/7e1cc192b5e53426eed6dbd742918619b8fd60ab/dataIO/pk.py#L252-L260 |
rainwoodman/kdcount | kdcount/utils.py | bincount | def bincount(dig, weight, minlength):
""" bincount supporting scalar and vector weight """
if numpy.isscalar(weight):
return numpy.bincount(dig, minlength=minlength) * weight
else:
return numpy.bincount(dig, weight, minlength) | python | def bincount(dig, weight, minlength):
""" bincount supporting scalar and vector weight """
if numpy.isscalar(weight):
return numpy.bincount(dig, minlength=minlength) * weight
else:
return numpy.bincount(dig, weight, minlength) | [
"def",
"bincount",
"(",
"dig",
",",
"weight",
",",
"minlength",
")",
":",
"if",
"numpy",
".",
"isscalar",
"(",
"weight",
")",
":",
"return",
"numpy",
".",
"bincount",
"(",
"dig",
",",
"minlength",
"=",
"minlength",
")",
"*",
"weight",
"else",
":",
"return",
"numpy",
".",
"bincount",
"(",
"dig",
",",
"weight",
",",
"minlength",
")"
] | bincount supporting scalar and vector weight | [
"bincount",
"supporting",
"scalar",
"and",
"vector",
"weight"
] | train | https://github.com/rainwoodman/kdcount/blob/483548f6d27a4f245cd5d98880b5f4edd6cc8dc1/kdcount/utils.py#L27-L32 |
timothycrosley/concentration | concentration/run.py | reset_network | def reset_network(message):
"""Resets the users network to make changes take effect"""
for command in settings.RESTART_NETWORK:
try:
subprocess.check_call(command)
except:
pass
print(message) | python | def reset_network(message):
"""Resets the users network to make changes take effect"""
for command in settings.RESTART_NETWORK:
try:
subprocess.check_call(command)
except:
pass
print(message) | [
"def",
"reset_network",
"(",
"message",
")",
":",
"for",
"command",
"in",
"settings",
".",
"RESTART_NETWORK",
":",
"try",
":",
"subprocess",
".",
"check_call",
"(",
"command",
")",
"except",
":",
"pass",
"print",
"(",
"message",
")"
] | Resets the users network to make changes take effect | [
"Resets",
"the",
"users",
"network",
"to",
"make",
"changes",
"take",
"effect"
] | train | https://github.com/timothycrosley/concentration/blob/5d07a79cdf56054c42b6e2d1c95ea51bc6678fc4/concentration/run.py#L15-L22 |
timothycrosley/concentration | concentration/run.py | improve | def improve():
"""Disables access to websites that are defined as 'distractors'"""
with open(settings.HOSTS_FILE, "r+") as hosts_file:
contents = hosts_file.read()
if not settings.START_TOKEN in contents and not settings.END_TOKEN in contents:
hosts_file.write(settings.START_TOKEN + "\n")
for site in set(settings.DISTRACTORS):
hosts_file.write("{0}\t{1}\n".format(settings.REDIRECT_TO, site))
for sub_domain in settings.SUB_DOMAINS:
hosts_file.write("{0}\t{1}.{2}\n".format(settings.REDIRECT_TO, sub_domain, site))
hosts_file.write(settings.END_TOKEN + "\n")
reset_network("Concentration is now improved :D!") | python | def improve():
"""Disables access to websites that are defined as 'distractors'"""
with open(settings.HOSTS_FILE, "r+") as hosts_file:
contents = hosts_file.read()
if not settings.START_TOKEN in contents and not settings.END_TOKEN in contents:
hosts_file.write(settings.START_TOKEN + "\n")
for site in set(settings.DISTRACTORS):
hosts_file.write("{0}\t{1}\n".format(settings.REDIRECT_TO, site))
for sub_domain in settings.SUB_DOMAINS:
hosts_file.write("{0}\t{1}.{2}\n".format(settings.REDIRECT_TO, sub_domain, site))
hosts_file.write(settings.END_TOKEN + "\n")
reset_network("Concentration is now improved :D!") | [
"def",
"improve",
"(",
")",
":",
"with",
"open",
"(",
"settings",
".",
"HOSTS_FILE",
",",
"\"r+\"",
")",
"as",
"hosts_file",
":",
"contents",
"=",
"hosts_file",
".",
"read",
"(",
")",
"if",
"not",
"settings",
".",
"START_TOKEN",
"in",
"contents",
"and",
"not",
"settings",
".",
"END_TOKEN",
"in",
"contents",
":",
"hosts_file",
".",
"write",
"(",
"settings",
".",
"START_TOKEN",
"+",
"\"\\n\"",
")",
"for",
"site",
"in",
"set",
"(",
"settings",
".",
"DISTRACTORS",
")",
":",
"hosts_file",
".",
"write",
"(",
"\"{0}\\t{1}\\n\"",
".",
"format",
"(",
"settings",
".",
"REDIRECT_TO",
",",
"site",
")",
")",
"for",
"sub_domain",
"in",
"settings",
".",
"SUB_DOMAINS",
":",
"hosts_file",
".",
"write",
"(",
"\"{0}\\t{1}.{2}\\n\"",
".",
"format",
"(",
"settings",
".",
"REDIRECT_TO",
",",
"sub_domain",
",",
"site",
")",
")",
"hosts_file",
".",
"write",
"(",
"settings",
".",
"END_TOKEN",
"+",
"\"\\n\"",
")",
"reset_network",
"(",
"\"Concentration is now improved :D!\"",
")"
] | Disables access to websites that are defined as 'distractors | [
"Disables",
"access",
"to",
"websites",
"that",
"are",
"defined",
"as",
"distractors"
] | train | https://github.com/timothycrosley/concentration/blob/5d07a79cdf56054c42b6e2d1c95ea51bc6678fc4/concentration/run.py#L26-L38 |
timothycrosley/concentration | concentration/run.py | lose | def lose():
"""Enables access to websites that are defined as 'distractors'"""
changed = False
with open(settings.HOSTS_FILE, "r") as hosts_file:
new_file = []
in_block = False
for line in hosts_file:
if in_block:
if line.strip() == settings.END_TOKEN:
in_block = False
changed = True
elif line.strip() == settings.START_TOKEN:
in_block = True
else:
new_file.append(line)
if changed:
with open(settings.HOSTS_FILE, "w") as hosts_file:
hosts_file.write("".join(new_file))
reset_network("Concentration is now lost :(.") | python | def lose():
"""Enables access to websites that are defined as 'distractors'"""
changed = False
with open(settings.HOSTS_FILE, "r") as hosts_file:
new_file = []
in_block = False
for line in hosts_file:
if in_block:
if line.strip() == settings.END_TOKEN:
in_block = False
changed = True
elif line.strip() == settings.START_TOKEN:
in_block = True
else:
new_file.append(line)
if changed:
with open(settings.HOSTS_FILE, "w") as hosts_file:
hosts_file.write("".join(new_file))
reset_network("Concentration is now lost :(.") | [
"def",
"lose",
"(",
")",
":",
"changed",
"=",
"False",
"with",
"open",
"(",
"settings",
".",
"HOSTS_FILE",
",",
"\"r\"",
")",
"as",
"hosts_file",
":",
"new_file",
"=",
"[",
"]",
"in_block",
"=",
"False",
"for",
"line",
"in",
"hosts_file",
":",
"if",
"in_block",
":",
"if",
"line",
".",
"strip",
"(",
")",
"==",
"settings",
".",
"END_TOKEN",
":",
"in_block",
"=",
"False",
"changed",
"=",
"True",
"elif",
"line",
".",
"strip",
"(",
")",
"==",
"settings",
".",
"START_TOKEN",
":",
"in_block",
"=",
"True",
"else",
":",
"new_file",
".",
"append",
"(",
"line",
")",
"if",
"changed",
":",
"with",
"open",
"(",
"settings",
".",
"HOSTS_FILE",
",",
"\"w\"",
")",
"as",
"hosts_file",
":",
"hosts_file",
".",
"write",
"(",
"\"\"",
".",
"join",
"(",
"new_file",
")",
")",
"reset_network",
"(",
"\"Concentration is now lost :(.\"",
")"
] | Enables access to websites that are defined as 'distractors | [
"Enables",
"access",
"to",
"websites",
"that",
"are",
"defined",
"as",
"distractors"
] | train | https://github.com/timothycrosley/concentration/blob/5d07a79cdf56054c42b6e2d1c95ea51bc6678fc4/concentration/run.py#L42-L61 |
timothycrosley/concentration | concentration/run.py | take_break | def take_break(minutes: hug.types.number=5):
"""Enables temporarily breaking concentration"""
print("")
print("######################################### ARE YOU SURE? #####################################")
try:
for remaining in range(60, -1, -1):
sys.stdout.write("\r")
sys.stdout.write("{:2d} seconds to change your mind. Won't you prefer programming? Or a book?".format(remaining))
sys.stdout.flush()
time.sleep(1)
except KeyboardInterrupt:
print("")
print("")
print(":D :D :D\nGood on you! <3")
return
# The user insisted on breaking concentration.
lose()
print("")
print("######################################### TAKING A BREAK ####################################")
try:
for remaining in range(minutes * 60, -1, -1):
sys.stdout.write("\r")
sys.stdout.write("{:2d} seconds remaining without concentration.".format(remaining))
sys.stdout.flush()
time.sleep(1)
except KeyboardInterrupt:
pass
finally:
sys.stdout.write("\rEnough distraction! \n")
print("######################################### BREAK OVER :) #####################################")
print("")
improve() | python | def take_break(minutes: hug.types.number=5):
"""Enables temporarily breaking concentration"""
print("")
print("######################################### ARE YOU SURE? #####################################")
try:
for remaining in range(60, -1, -1):
sys.stdout.write("\r")
sys.stdout.write("{:2d} seconds to change your mind. Won't you prefer programming? Or a book?".format(remaining))
sys.stdout.flush()
time.sleep(1)
except KeyboardInterrupt:
print("")
print("")
print(":D :D :D\nGood on you! <3")
return
# The user insisted on breaking concentration.
lose()
print("")
print("######################################### TAKING A BREAK ####################################")
try:
for remaining in range(minutes * 60, -1, -1):
sys.stdout.write("\r")
sys.stdout.write("{:2d} seconds remaining without concentration.".format(remaining))
sys.stdout.flush()
time.sleep(1)
except KeyboardInterrupt:
pass
finally:
sys.stdout.write("\rEnough distraction! \n")
print("######################################### BREAK OVER :) #####################################")
print("")
improve() | [
"def",
"take_break",
"(",
"minutes",
":",
"hug",
".",
"types",
".",
"number",
"=",
"5",
")",
":",
"print",
"(",
"\"\"",
")",
"print",
"(",
"\"######################################### ARE YOU SURE? #####################################\"",
")",
"try",
":",
"for",
"remaining",
"in",
"range",
"(",
"60",
",",
"-",
"1",
",",
"-",
"1",
")",
":",
"sys",
".",
"stdout",
".",
"write",
"(",
"\"\\r\"",
")",
"sys",
".",
"stdout",
".",
"write",
"(",
"\"{:2d} seconds to change your mind. Won't you prefer programming? Or a book?\"",
".",
"format",
"(",
"remaining",
")",
")",
"sys",
".",
"stdout",
".",
"flush",
"(",
")",
"time",
".",
"sleep",
"(",
"1",
")",
"except",
"KeyboardInterrupt",
":",
"print",
"(",
"\"\"",
")",
"print",
"(",
"\"\"",
")",
"print",
"(",
"\":D :D :D\\nGood on you! <3\"",
")",
"return",
"# The user insisted on breaking concentration.",
"lose",
"(",
")",
"print",
"(",
"\"\"",
")",
"print",
"(",
"\"######################################### TAKING A BREAK ####################################\"",
")",
"try",
":",
"for",
"remaining",
"in",
"range",
"(",
"minutes",
"*",
"60",
",",
"-",
"1",
",",
"-",
"1",
")",
":",
"sys",
".",
"stdout",
".",
"write",
"(",
"\"\\r\"",
")",
"sys",
".",
"stdout",
".",
"write",
"(",
"\"{:2d} seconds remaining without concentration.\"",
".",
"format",
"(",
"remaining",
")",
")",
"sys",
".",
"stdout",
".",
"flush",
"(",
")",
"time",
".",
"sleep",
"(",
"1",
")",
"except",
"KeyboardInterrupt",
":",
"pass",
"finally",
":",
"sys",
".",
"stdout",
".",
"write",
"(",
"\"\\rEnough distraction! \\n\"",
")",
"print",
"(",
"\"######################################### BREAK OVER :) #####################################\"",
")",
"print",
"(",
"\"\"",
")",
"improve",
"(",
")"
] | Enables temporarily breaking concentration | [
"Enables",
"temporarily",
"breaking",
"concentration"
] | train | https://github.com/timothycrosley/concentration/blob/5d07a79cdf56054c42b6e2d1c95ea51bc6678fc4/concentration/run.py#L65-L97 |
managedbyq/mbq.metrics | mbq/metrics/contrib/django/middleware/connection_stats.py | ConnectionStatsMiddleware.local_port_range | def local_port_range(self):
"""Tuple of (low_port, high_port) reflecting the local port range
assigned to outbound connections. We use this as part of a heuristic
to determine whether a connection is inbound or outbound.
"""
if self._local_port_range is None:
with open('/proc/sys/net/ipv4/ip_local_port_range', 'r') as f:
self._local_port_range = tuple(map(int, f.read().split('\t')))
return self._local_port_range | python | def local_port_range(self):
"""Tuple of (low_port, high_port) reflecting the local port range
assigned to outbound connections. We use this as part of a heuristic
to determine whether a connection is inbound or outbound.
"""
if self._local_port_range is None:
with open('/proc/sys/net/ipv4/ip_local_port_range', 'r') as f:
self._local_port_range = tuple(map(int, f.read().split('\t')))
return self._local_port_range | [
"def",
"local_port_range",
"(",
"self",
")",
":",
"if",
"self",
".",
"_local_port_range",
"is",
"None",
":",
"with",
"open",
"(",
"'/proc/sys/net/ipv4/ip_local_port_range'",
",",
"'r'",
")",
"as",
"f",
":",
"self",
".",
"_local_port_range",
"=",
"tuple",
"(",
"map",
"(",
"int",
",",
"f",
".",
"read",
"(",
")",
".",
"split",
"(",
"'\\t'",
")",
")",
")",
"return",
"self",
".",
"_local_port_range"
] | Tuple of (low_port, high_port) reflecting the local port range
assigned to outbound connections. We use this as part of a heuristic
to determine whether a connection is inbound or outbound. | [
"Tuple",
"of",
"(",
"low_port",
"high_port",
")",
"reflecting",
"the",
"local",
"port",
"range",
"assigned",
"to",
"outbound",
"connections",
".",
"We",
"use",
"this",
"as",
"part",
"of",
"a",
"heuristic",
"to",
"determine",
"whether",
"a",
"connection",
"is",
"inbound",
"or",
"outbound",
"."
] | train | https://github.com/managedbyq/mbq.metrics/blob/22ce48dbf132f9ddd4adf86d25df6f58e3d7a520/mbq/metrics/contrib/django/middleware/connection_stats.py#L44-L52 |
MozillaSecurity/fuzzfetch | src/fuzzfetch/fetch.py | _si | def _si(number):
"""Format a number using base-2 SI prefixes"""
prefixes = ['', 'K', 'M', 'G', 'T', 'P', 'E', 'Z', 'Y']
while number > 1024:
number /= 1024.0
prefixes.pop(0)
return '%0.2f%s' % (number, prefixes.pop(0)) | python | def _si(number):
"""Format a number using base-2 SI prefixes"""
prefixes = ['', 'K', 'M', 'G', 'T', 'P', 'E', 'Z', 'Y']
while number > 1024:
number /= 1024.0
prefixes.pop(0)
return '%0.2f%s' % (number, prefixes.pop(0)) | [
"def",
"_si",
"(",
"number",
")",
":",
"prefixes",
"=",
"[",
"''",
",",
"'K'",
",",
"'M'",
",",
"'G'",
",",
"'T'",
",",
"'P'",
",",
"'E'",
",",
"'Z'",
",",
"'Y'",
"]",
"while",
"number",
">",
"1024",
":",
"number",
"/=",
"1024.0",
"prefixes",
".",
"pop",
"(",
"0",
")",
"return",
"'%0.2f%s'",
"%",
"(",
"number",
",",
"prefixes",
".",
"pop",
"(",
"0",
")",
")"
] | Format a number using base-2 SI prefixes | [
"Format",
"a",
"number",
"using",
"base",
"-",
"2",
"SI",
"prefixes"
] | train | https://github.com/MozillaSecurity/fuzzfetch/blob/166cbfc71b679db019b9ac777dce12ccfdfc2c10/src/fuzzfetch/fetch.py#L72-L78 |
MozillaSecurity/fuzzfetch | src/fuzzfetch/fetch.py | _get_url | def _get_url(url):
"""Retrieve requested URL"""
try:
data = HTTP_SESSION.get(url, stream=True)
data.raise_for_status()
except requests.exceptions.RequestException as exc:
raise FetcherException(exc)
return data | python | def _get_url(url):
"""Retrieve requested URL"""
try:
data = HTTP_SESSION.get(url, stream=True)
data.raise_for_status()
except requests.exceptions.RequestException as exc:
raise FetcherException(exc)
return data | [
"def",
"_get_url",
"(",
"url",
")",
":",
"try",
":",
"data",
"=",
"HTTP_SESSION",
".",
"get",
"(",
"url",
",",
"stream",
"=",
"True",
")",
"data",
".",
"raise_for_status",
"(",
")",
"except",
"requests",
".",
"exceptions",
".",
"RequestException",
"as",
"exc",
":",
"raise",
"FetcherException",
"(",
"exc",
")",
"return",
"data"
] | Retrieve requested URL | [
"Retrieve",
"requested",
"URL"
] | train | https://github.com/MozillaSecurity/fuzzfetch/blob/166cbfc71b679db019b9ac777dce12ccfdfc2c10/src/fuzzfetch/fetch.py#L81-L89 |
MozillaSecurity/fuzzfetch | src/fuzzfetch/fetch.py | _extract_file | def _extract_file(zip_fp, info, path):
"""Extract files while explicitly setting the proper permissions"""
zip_fp.extract(info.filename, path=path)
out_path = os.path.join(path, info.filename)
perm = info.external_attr >> 16
perm |= stat.S_IREAD # make sure we're not accidentally setting this to 0
os.chmod(out_path, perm) | python | def _extract_file(zip_fp, info, path):
"""Extract files while explicitly setting the proper permissions"""
zip_fp.extract(info.filename, path=path)
out_path = os.path.join(path, info.filename)
perm = info.external_attr >> 16
perm |= stat.S_IREAD # make sure we're not accidentally setting this to 0
os.chmod(out_path, perm) | [
"def",
"_extract_file",
"(",
"zip_fp",
",",
"info",
",",
"path",
")",
":",
"zip_fp",
".",
"extract",
"(",
"info",
".",
"filename",
",",
"path",
"=",
"path",
")",
"out_path",
"=",
"os",
".",
"path",
".",
"join",
"(",
"path",
",",
"info",
".",
"filename",
")",
"perm",
"=",
"info",
".",
"external_attr",
">>",
"16",
"perm",
"|=",
"stat",
".",
"S_IREAD",
"# make sure we're not accidentally setting this to 0",
"os",
".",
"chmod",
"(",
"out_path",
",",
"perm",
")"
] | Extract files while explicitly setting the proper permissions | [
"Extract",
"files",
"while",
"explicitly",
"setting",
"the",
"proper",
"permissions"
] | train | https://github.com/MozillaSecurity/fuzzfetch/blob/166cbfc71b679db019b9ac777dce12ccfdfc2c10/src/fuzzfetch/fetch.py#L110-L117 |
MozillaSecurity/fuzzfetch | src/fuzzfetch/fetch.py | BuildFlags.build_string | def build_string(self):
"""
Taskcluster denotes builds in one of two formats - i.e. linux64-asan or linux64-asan-opt
The latter is generated. If it fails, the caller should try the former.
"""
return (('-ccov' if self.coverage else '') +
('-fuzzing' if self.fuzzing else '') +
('-asan' if self.asan else '') +
('-valgrind' if self.valgrind else '') +
('-debug' if self.debug else '-opt')) | python | def build_string(self):
"""
Taskcluster denotes builds in one of two formats - i.e. linux64-asan or linux64-asan-opt
The latter is generated. If it fails, the caller should try the former.
"""
return (('-ccov' if self.coverage else '') +
('-fuzzing' if self.fuzzing else '') +
('-asan' if self.asan else '') +
('-valgrind' if self.valgrind else '') +
('-debug' if self.debug else '-opt')) | [
"def",
"build_string",
"(",
"self",
")",
":",
"return",
"(",
"(",
"'-ccov'",
"if",
"self",
".",
"coverage",
"else",
"''",
")",
"+",
"(",
"'-fuzzing'",
"if",
"self",
".",
"fuzzing",
"else",
"''",
")",
"+",
"(",
"'-asan'",
"if",
"self",
".",
"asan",
"else",
"''",
")",
"+",
"(",
"'-valgrind'",
"if",
"self",
".",
"valgrind",
"else",
"''",
")",
"+",
"(",
"'-debug'",
"if",
"self",
".",
"debug",
"else",
"'-opt'",
")",
")"
] | Taskcluster denotes builds in one of two formats - i.e. linux64-asan or linux64-asan-opt
The latter is generated. If it fails, the caller should try the former. | [
"Taskcluster",
"denotes",
"builds",
"in",
"one",
"of",
"two",
"formats",
"-",
"i",
".",
"e",
".",
"linux64",
"-",
"asan",
"or",
"linux64",
"-",
"asan",
"-",
"opt",
"The",
"latter",
"is",
"generated",
".",
"If",
"it",
"fails",
"the",
"caller",
"should",
"try",
"the",
"former",
"."
] | train | https://github.com/MozillaSecurity/fuzzfetch/blob/166cbfc71b679db019b9ac777dce12ccfdfc2c10/src/fuzzfetch/fetch.py#L129-L138 |
MozillaSecurity/fuzzfetch | src/fuzzfetch/fetch.py | Platform.auto_name_prefix | def auto_name_prefix(self):
"""
Generate platform prefix for cross-platform downloads.
"""
# if the platform is not native, auto_name would clobber native downloads.
# make a prefix to avoid this
native_system = std_platform.system()
native_machine = self.CPU_ALIASES.get(std_platform.machine(), std_platform.machine())
if native_system == self.system and native_machine == self.machine:
return ''
platform = {
'linux': 'linux32',
'android-api-16': 'android-arm',
'android-aarch64': 'android-arm64',
}.get(self.gecko_platform, self.gecko_platform)
return platform + '-' | python | def auto_name_prefix(self):
"""
Generate platform prefix for cross-platform downloads.
"""
# if the platform is not native, auto_name would clobber native downloads.
# make a prefix to avoid this
native_system = std_platform.system()
native_machine = self.CPU_ALIASES.get(std_platform.machine(), std_platform.machine())
if native_system == self.system and native_machine == self.machine:
return ''
platform = {
'linux': 'linux32',
'android-api-16': 'android-arm',
'android-aarch64': 'android-arm64',
}.get(self.gecko_platform, self.gecko_platform)
return platform + '-' | [
"def",
"auto_name_prefix",
"(",
"self",
")",
":",
"# if the platform is not native, auto_name would clobber native downloads.",
"# make a prefix to avoid this",
"native_system",
"=",
"std_platform",
".",
"system",
"(",
")",
"native_machine",
"=",
"self",
".",
"CPU_ALIASES",
".",
"get",
"(",
"std_platform",
".",
"machine",
"(",
")",
",",
"std_platform",
".",
"machine",
"(",
")",
")",
"if",
"native_system",
"==",
"self",
".",
"system",
"and",
"native_machine",
"==",
"self",
".",
"machine",
":",
"return",
"''",
"platform",
"=",
"{",
"'linux'",
":",
"'linux32'",
",",
"'android-api-16'",
":",
"'android-arm'",
",",
"'android-aarch64'",
":",
"'android-arm64'",
",",
"}",
".",
"get",
"(",
"self",
".",
"gecko_platform",
",",
"self",
".",
"gecko_platform",
")",
"return",
"platform",
"+",
"'-'"
] | Generate platform prefix for cross-platform downloads. | [
"Generate",
"platform",
"prefix",
"for",
"cross",
"-",
"platform",
"downloads",
"."
] | train | https://github.com/MozillaSecurity/fuzzfetch/blob/166cbfc71b679db019b9ac777dce12ccfdfc2c10/src/fuzzfetch/fetch.py#L171-L186 |
MozillaSecurity/fuzzfetch | src/fuzzfetch/fetch.py | BuildTask.iterall | def iterall(cls, build, branch, flags, platform=None):
"""Generator for all possible BuildTasks with these parameters"""
# Prepare build type
if platform is None:
platform = Platform()
target_platform = platform.gecko_platform
is_namespace = False
if cls.RE_DATE.match(build):
task_urls = map(''.join,
itertools.product(cls._pushdate_urls(build.replace('-', '.'), branch, target_platform),
(flags.build_string(),)))
elif cls.RE_REV.match(build):
task_urls = (cls._revision_url(build.lower(), branch, target_platform) + flags.build_string(),)
elif build == 'latest':
namespace = 'gecko.v2.mozilla-' + branch + '.latest'
product = 'mobile' if 'android' in target_platform else 'firefox'
task_urls = (cls.URL_BASE + '/task/' + namespace + '.' + product + '.' + target_platform +
flags.build_string(),)
else:
# try to use build argument directly as a namespace
task_urls = (cls.URL_BASE + '/task/' + build,)
is_namespace = True
for (url, try_wo_opt) in itertools.product(task_urls, (False, True)):
if try_wo_opt:
if '-opt' not in url or is_namespace:
continue
url = url.replace('-opt', '')
try:
data = HTTP_SESSION.get(url)
data.raise_for_status()
except requests.exceptions.RequestException:
continue
obj = cls(None, None, None, _blank=True)
obj.url = url
obj._data = data.json() # pylint: disable=protected-access
LOG.debug('Found archive for %s', cls._debug_str(build))
yield obj | python | def iterall(cls, build, branch, flags, platform=None):
"""Generator for all possible BuildTasks with these parameters"""
# Prepare build type
if platform is None:
platform = Platform()
target_platform = platform.gecko_platform
is_namespace = False
if cls.RE_DATE.match(build):
task_urls = map(''.join,
itertools.product(cls._pushdate_urls(build.replace('-', '.'), branch, target_platform),
(flags.build_string(),)))
elif cls.RE_REV.match(build):
task_urls = (cls._revision_url(build.lower(), branch, target_platform) + flags.build_string(),)
elif build == 'latest':
namespace = 'gecko.v2.mozilla-' + branch + '.latest'
product = 'mobile' if 'android' in target_platform else 'firefox'
task_urls = (cls.URL_BASE + '/task/' + namespace + '.' + product + '.' + target_platform +
flags.build_string(),)
else:
# try to use build argument directly as a namespace
task_urls = (cls.URL_BASE + '/task/' + build,)
is_namespace = True
for (url, try_wo_opt) in itertools.product(task_urls, (False, True)):
if try_wo_opt:
if '-opt' not in url or is_namespace:
continue
url = url.replace('-opt', '')
try:
data = HTTP_SESSION.get(url)
data.raise_for_status()
except requests.exceptions.RequestException:
continue
obj = cls(None, None, None, _blank=True)
obj.url = url
obj._data = data.json() # pylint: disable=protected-access
LOG.debug('Found archive for %s', cls._debug_str(build))
yield obj | [
"def",
"iterall",
"(",
"cls",
",",
"build",
",",
"branch",
",",
"flags",
",",
"platform",
"=",
"None",
")",
":",
"# Prepare build type",
"if",
"platform",
"is",
"None",
":",
"platform",
"=",
"Platform",
"(",
")",
"target_platform",
"=",
"platform",
".",
"gecko_platform",
"is_namespace",
"=",
"False",
"if",
"cls",
".",
"RE_DATE",
".",
"match",
"(",
"build",
")",
":",
"task_urls",
"=",
"map",
"(",
"''",
".",
"join",
",",
"itertools",
".",
"product",
"(",
"cls",
".",
"_pushdate_urls",
"(",
"build",
".",
"replace",
"(",
"'-'",
",",
"'.'",
")",
",",
"branch",
",",
"target_platform",
")",
",",
"(",
"flags",
".",
"build_string",
"(",
")",
",",
")",
")",
")",
"elif",
"cls",
".",
"RE_REV",
".",
"match",
"(",
"build",
")",
":",
"task_urls",
"=",
"(",
"cls",
".",
"_revision_url",
"(",
"build",
".",
"lower",
"(",
")",
",",
"branch",
",",
"target_platform",
")",
"+",
"flags",
".",
"build_string",
"(",
")",
",",
")",
"elif",
"build",
"==",
"'latest'",
":",
"namespace",
"=",
"'gecko.v2.mozilla-'",
"+",
"branch",
"+",
"'.latest'",
"product",
"=",
"'mobile'",
"if",
"'android'",
"in",
"target_platform",
"else",
"'firefox'",
"task_urls",
"=",
"(",
"cls",
".",
"URL_BASE",
"+",
"'/task/'",
"+",
"namespace",
"+",
"'.'",
"+",
"product",
"+",
"'.'",
"+",
"target_platform",
"+",
"flags",
".",
"build_string",
"(",
")",
",",
")",
"else",
":",
"# try to use build argument directly as a namespace",
"task_urls",
"=",
"(",
"cls",
".",
"URL_BASE",
"+",
"'/task/'",
"+",
"build",
",",
")",
"is_namespace",
"=",
"True",
"for",
"(",
"url",
",",
"try_wo_opt",
")",
"in",
"itertools",
".",
"product",
"(",
"task_urls",
",",
"(",
"False",
",",
"True",
")",
")",
":",
"if",
"try_wo_opt",
":",
"if",
"'-opt'",
"not",
"in",
"url",
"or",
"is_namespace",
":",
"continue",
"url",
"=",
"url",
".",
"replace",
"(",
"'-opt'",
",",
"''",
")",
"try",
":",
"data",
"=",
"HTTP_SESSION",
".",
"get",
"(",
"url",
")",
"data",
".",
"raise_for_status",
"(",
")",
"except",
"requests",
".",
"exceptions",
".",
"RequestException",
":",
"continue",
"obj",
"=",
"cls",
"(",
"None",
",",
"None",
",",
"None",
",",
"_blank",
"=",
"True",
")",
"obj",
".",
"url",
"=",
"url",
"obj",
".",
"_data",
"=",
"data",
".",
"json",
"(",
")",
"# pylint: disable=protected-access",
"LOG",
".",
"debug",
"(",
"'Found archive for %s'",
",",
"cls",
".",
"_debug_str",
"(",
"build",
")",
")",
"yield",
"obj"
] | Generator for all possible BuildTasks with these parameters | [
"Generator",
"for",
"all",
"possible",
"BuildTasks",
"with",
"these",
"parameters"
] | train | https://github.com/MozillaSecurity/fuzzfetch/blob/166cbfc71b679db019b9ac777dce12ccfdfc2c10/src/fuzzfetch/fetch.py#L220-L265 |
MozillaSecurity/fuzzfetch | src/fuzzfetch/fetch.py | BuildTask._pushdate_urls | def _pushdate_urls(cls, pushdate, branch, target_platform):
"""Multiple entries exist per push date. Iterate over all until a working entry is found"""
url_base = cls.URL_BASE + '/namespaces/gecko.v2.mozilla-' + branch + '.pushdate.' + pushdate
try:
base = HTTP_SESSION.post(url_base, json={})
base.raise_for_status()
except requests.exceptions.RequestException as exc:
raise FetcherException(exc)
product = 'mobile' if 'android' in target_platform else 'firefox'
json = base.json()
for namespace in sorted(json['namespaces'], key=lambda x: x['name']):
yield cls.URL_BASE + '/task/' + namespace['namespace'] + '.' + product + '.' + target_platform | python | def _pushdate_urls(cls, pushdate, branch, target_platform):
"""Multiple entries exist per push date. Iterate over all until a working entry is found"""
url_base = cls.URL_BASE + '/namespaces/gecko.v2.mozilla-' + branch + '.pushdate.' + pushdate
try:
base = HTTP_SESSION.post(url_base, json={})
base.raise_for_status()
except requests.exceptions.RequestException as exc:
raise FetcherException(exc)
product = 'mobile' if 'android' in target_platform else 'firefox'
json = base.json()
for namespace in sorted(json['namespaces'], key=lambda x: x['name']):
yield cls.URL_BASE + '/task/' + namespace['namespace'] + '.' + product + '.' + target_platform | [
"def",
"_pushdate_urls",
"(",
"cls",
",",
"pushdate",
",",
"branch",
",",
"target_platform",
")",
":",
"url_base",
"=",
"cls",
".",
"URL_BASE",
"+",
"'/namespaces/gecko.v2.mozilla-'",
"+",
"branch",
"+",
"'.pushdate.'",
"+",
"pushdate",
"try",
":",
"base",
"=",
"HTTP_SESSION",
".",
"post",
"(",
"url_base",
",",
"json",
"=",
"{",
"}",
")",
"base",
".",
"raise_for_status",
"(",
")",
"except",
"requests",
".",
"exceptions",
".",
"RequestException",
"as",
"exc",
":",
"raise",
"FetcherException",
"(",
"exc",
")",
"product",
"=",
"'mobile'",
"if",
"'android'",
"in",
"target_platform",
"else",
"'firefox'",
"json",
"=",
"base",
".",
"json",
"(",
")",
"for",
"namespace",
"in",
"sorted",
"(",
"json",
"[",
"'namespaces'",
"]",
",",
"key",
"=",
"lambda",
"x",
":",
"x",
"[",
"'name'",
"]",
")",
":",
"yield",
"cls",
".",
"URL_BASE",
"+",
"'/task/'",
"+",
"namespace",
"[",
"'namespace'",
"]",
"+",
"'.'",
"+",
"product",
"+",
"'.'",
"+",
"target_platform"
] | Multiple entries exist per push date. Iterate over all until a working entry is found | [
"Multiple",
"entries",
"exist",
"per",
"push",
"date",
".",
"Iterate",
"over",
"all",
"until",
"a",
"working",
"entry",
"is",
"found"
] | train | https://github.com/MozillaSecurity/fuzzfetch/blob/166cbfc71b679db019b9ac777dce12ccfdfc2c10/src/fuzzfetch/fetch.py#L273-L286 |
MozillaSecurity/fuzzfetch | src/fuzzfetch/fetch.py | BuildTask._revision_url | def _revision_url(cls, rev, branch, target_platform):
"""Retrieve the URL for revision based builds"""
namespace = 'gecko.v2.mozilla-' + branch + '.revision.' + rev
product = 'mobile' if 'android' in target_platform else 'firefox'
return cls.URL_BASE + '/task/' + namespace + '.' + product + '.' + target_platform | python | def _revision_url(cls, rev, branch, target_platform):
"""Retrieve the URL for revision based builds"""
namespace = 'gecko.v2.mozilla-' + branch + '.revision.' + rev
product = 'mobile' if 'android' in target_platform else 'firefox'
return cls.URL_BASE + '/task/' + namespace + '.' + product + '.' + target_platform | [
"def",
"_revision_url",
"(",
"cls",
",",
"rev",
",",
"branch",
",",
"target_platform",
")",
":",
"namespace",
"=",
"'gecko.v2.mozilla-'",
"+",
"branch",
"+",
"'.revision.'",
"+",
"rev",
"product",
"=",
"'mobile'",
"if",
"'android'",
"in",
"target_platform",
"else",
"'firefox'",
"return",
"cls",
".",
"URL_BASE",
"+",
"'/task/'",
"+",
"namespace",
"+",
"'.'",
"+",
"product",
"+",
"'.'",
"+",
"target_platform"
] | Retrieve the URL for revision based builds | [
"Retrieve",
"the",
"URL",
"for",
"revision",
"based",
"builds"
] | train | https://github.com/MozillaSecurity/fuzzfetch/blob/166cbfc71b679db019b9ac777dce12ccfdfc2c10/src/fuzzfetch/fetch.py#L289-L293 |
MozillaSecurity/fuzzfetch | src/fuzzfetch/fetch.py | Fetcher.iterall | def iterall(cls, target, branch, build, flags, platform=None):
"""Return an iterable for all available builds matching a particular build type"""
flags = BuildFlags(*flags)
for task in BuildTask.iterall(build, branch, flags, platform):
yield cls(target, branch, task, flags, platform) | python | def iterall(cls, target, branch, build, flags, platform=None):
"""Return an iterable for all available builds matching a particular build type"""
flags = BuildFlags(*flags)
for task in BuildTask.iterall(build, branch, flags, platform):
yield cls(target, branch, task, flags, platform) | [
"def",
"iterall",
"(",
"cls",
",",
"target",
",",
"branch",
",",
"build",
",",
"flags",
",",
"platform",
"=",
"None",
")",
":",
"flags",
"=",
"BuildFlags",
"(",
"*",
"flags",
")",
"for",
"task",
"in",
"BuildTask",
".",
"iterall",
"(",
"build",
",",
"branch",
",",
"flags",
",",
"platform",
")",
":",
"yield",
"cls",
"(",
"target",
",",
"branch",
",",
"task",
",",
"flags",
",",
"platform",
")"
] | Return an iterable for all available builds matching a particular build type | [
"Return",
"an",
"iterable",
"for",
"all",
"available",
"builds",
"matching",
"a",
"particular",
"build",
"type"
] | train | https://github.com/MozillaSecurity/fuzzfetch/blob/166cbfc71b679db019b9ac777dce12ccfdfc2c10/src/fuzzfetch/fetch.py#L386-L390 |
MozillaSecurity/fuzzfetch | src/fuzzfetch/fetch.py | Fetcher._artifacts | def _artifacts(self):
"""Retrieve the artifacts json object"""
if '_artifacts' not in self._memo:
json = _get_url(self._artifacts_url).json()
self._memo['_artifacts'] = json['artifacts']
return self._memo['_artifacts'] | python | def _artifacts(self):
"""Retrieve the artifacts json object"""
if '_artifacts' not in self._memo:
json = _get_url(self._artifacts_url).json()
self._memo['_artifacts'] = json['artifacts']
return self._memo['_artifacts'] | [
"def",
"_artifacts",
"(",
"self",
")",
":",
"if",
"'_artifacts'",
"not",
"in",
"self",
".",
"_memo",
":",
"json",
"=",
"_get_url",
"(",
"self",
".",
"_artifacts_url",
")",
".",
"json",
"(",
")",
"self",
".",
"_memo",
"[",
"'_artifacts'",
"]",
"=",
"json",
"[",
"'artifacts'",
"]",
"return",
"self",
".",
"_memo",
"[",
"'_artifacts'",
"]"
] | Retrieve the artifacts json object | [
"Retrieve",
"the",
"artifacts",
"json",
"object"
] | train | https://github.com/MozillaSecurity/fuzzfetch/blob/166cbfc71b679db019b9ac777dce12ccfdfc2c10/src/fuzzfetch/fetch.py#L393-L398 |
MozillaSecurity/fuzzfetch | src/fuzzfetch/fetch.py | Fetcher._artifact_base | def _artifact_base(self):
"""
Build the artifact basename
Builds are base.tar.bz2, info is base.json, shell is base.jsshell.zip...
"""
if '_artifact_base' not in self._memo:
for artifact in self._artifacts:
if self.re_target.search(artifact['name']) is not None:
artifact_base = os.path.splitext(artifact['name'])[0]
break
else:
raise FetcherException('Could not find build info in artifacts')
self._memo['_artifact_base'] = artifact_base
return self._memo['_artifact_base'] | python | def _artifact_base(self):
"""
Build the artifact basename
Builds are base.tar.bz2, info is base.json, shell is base.jsshell.zip...
"""
if '_artifact_base' not in self._memo:
for artifact in self._artifacts:
if self.re_target.search(artifact['name']) is not None:
artifact_base = os.path.splitext(artifact['name'])[0]
break
else:
raise FetcherException('Could not find build info in artifacts')
self._memo['_artifact_base'] = artifact_base
return self._memo['_artifact_base'] | [
"def",
"_artifact_base",
"(",
"self",
")",
":",
"if",
"'_artifact_base'",
"not",
"in",
"self",
".",
"_memo",
":",
"for",
"artifact",
"in",
"self",
".",
"_artifacts",
":",
"if",
"self",
".",
"re_target",
".",
"search",
"(",
"artifact",
"[",
"'name'",
"]",
")",
"is",
"not",
"None",
":",
"artifact_base",
"=",
"os",
".",
"path",
".",
"splitext",
"(",
"artifact",
"[",
"'name'",
"]",
")",
"[",
"0",
"]",
"break",
"else",
":",
"raise",
"FetcherException",
"(",
"'Could not find build info in artifacts'",
")",
"self",
".",
"_memo",
"[",
"'_artifact_base'",
"]",
"=",
"artifact_base",
"return",
"self",
".",
"_memo",
"[",
"'_artifact_base'",
"]"
] | Build the artifact basename
Builds are base.tar.bz2, info is base.json, shell is base.jsshell.zip... | [
"Build",
"the",
"artifact",
"basename",
"Builds",
"are",
"base",
".",
"tar",
".",
"bz2",
"info",
"is",
"base",
".",
"json",
"shell",
"is",
"base",
".",
"jsshell",
".",
"zip",
"..."
] | train | https://github.com/MozillaSecurity/fuzzfetch/blob/166cbfc71b679db019b9ac777dce12ccfdfc2c10/src/fuzzfetch/fetch.py#L401-L414 |
MozillaSecurity/fuzzfetch | src/fuzzfetch/fetch.py | Fetcher.build_info | def build_info(self):
"""Return the build's info"""
if 'build_info' not in self._memo:
self._memo['build_info'] = _get_url(self.artifact_url('json')).json()
return self._memo['build_info'] | python | def build_info(self):
"""Return the build's info"""
if 'build_info' not in self._memo:
self._memo['build_info'] = _get_url(self.artifact_url('json')).json()
return self._memo['build_info'] | [
"def",
"build_info",
"(",
"self",
")",
":",
"if",
"'build_info'",
"not",
"in",
"self",
".",
"_memo",
":",
"self",
".",
"_memo",
"[",
"'build_info'",
"]",
"=",
"_get_url",
"(",
"self",
".",
"artifact_url",
"(",
"'json'",
")",
")",
".",
"json",
"(",
")",
"return",
"self",
".",
"_memo",
"[",
"'build_info'",
"]"
] | Return the build's info | [
"Return",
"the",
"build",
"s",
"info"
] | train | https://github.com/MozillaSecurity/fuzzfetch/blob/166cbfc71b679db019b9ac777dce12ccfdfc2c10/src/fuzzfetch/fetch.py#L432-L436 |
MozillaSecurity/fuzzfetch | src/fuzzfetch/fetch.py | Fetcher.moz_info | def moz_info(self):
"""Return the build's mozinfo"""
if 'moz_info' not in self._memo:
self._memo['moz_info'] = _get_url(self.artifact_url('mozinfo.json')).json()
return self._memo['moz_info'] | python | def moz_info(self):
"""Return the build's mozinfo"""
if 'moz_info' not in self._memo:
self._memo['moz_info'] = _get_url(self.artifact_url('mozinfo.json')).json()
return self._memo['moz_info'] | [
"def",
"moz_info",
"(",
"self",
")",
":",
"if",
"'moz_info'",
"not",
"in",
"self",
".",
"_memo",
":",
"self",
".",
"_memo",
"[",
"'moz_info'",
"]",
"=",
"_get_url",
"(",
"self",
".",
"artifact_url",
"(",
"'mozinfo.json'",
")",
")",
".",
"json",
"(",
")",
"return",
"self",
".",
"_memo",
"[",
"'moz_info'",
"]"
] | Return the build's mozinfo | [
"Return",
"the",
"build",
"s",
"mozinfo"
] | train | https://github.com/MozillaSecurity/fuzzfetch/blob/166cbfc71b679db019b9ac777dce12ccfdfc2c10/src/fuzzfetch/fetch.py#L444-L448 |
MozillaSecurity/fuzzfetch | src/fuzzfetch/fetch.py | Fetcher.extract_build | def extract_build(self, path='.', tests=None, full_symbols=False):
"""
Download and extract the build and requested extra artifacts
@type path:
@param path:
@type tests:
@param tests:
@type full_symbols:
@param full_symbols:
"""
if self._target == 'js':
self.extract_zip('jsshell.zip', path=os.path.join(path))
else:
if self._platform.system == 'Linux':
self.extract_tar('tar.bz2', path)
elif self._platform.system == 'Darwin':
self.extract_dmg(path)
elif self._platform.system == 'Windows':
self.extract_zip('zip', path)
# windows builds are extracted under 'firefox/'
# move everything under firefox/ up a level to the destination path
firefox = os.path.join(path, 'firefox')
for root, dirs, files in os.walk(firefox):
newroot = root.replace(firefox, path)
for dirname in dirs:
os.mkdir(os.path.join(newroot, dirname))
for filename in files:
os.rename(os.path.join(root, filename), os.path.join(newroot, filename))
shutil.rmtree(firefox, onerror=onerror)
elif self._platform.system == 'Android':
self.download_apk(path)
else:
raise FetcherException("'%s' is not a supported platform" % self._platform.system)
if tests:
# validate tests
tests = set(tests or [])
if not tests.issubset(self.TEST_CHOICES):
invalid_test = tuple(tests - self.TEST_CHOICES)[0]
raise FetcherException("'%s' is not a supported test type" % invalid_test)
os.mkdir(os.path.join(path, 'tests'))
if 'common' in tests:
try:
self.extract_tar('common.tests.tar.gz', path=os.path.join(path, 'tests'))
except FetcherException:
self.extract_zip('common.tests.zip', path=os.path.join(path, 'tests'))
if 'reftests' in tests:
try:
self.extract_tar('reftest.tests.tar.gz', path=os.path.join(path, 'tests'))
except FetcherException:
self.extract_zip('reftest.tests.zip', path=os.path.join(path, 'tests'))
if 'gtest' in tests:
try:
self.extract_tar('gtest.tests.tar.gz', path=path)
except FetcherException:
self.extract_zip('gtest.tests.zip', path=path)
if self._platform.system == 'Windows':
libxul = 'xul.dll'
elif self._platform.system == 'Linux':
libxul = 'libxul.so'
elif self._platform.system == 'Darwin':
libxul = 'XUL'
else:
raise FetcherException("'%s' is not a supported platform for gtest" % self._platform.system)
os.rename(os.path.join(path, 'gtest', 'gtest_bin', 'gtest', libxul),
os.path.join(path, 'gtest', libxul))
shutil.copy(os.path.join(path, 'gtest', 'dependentlibs.list.gtest'),
os.path.join(path, 'dependentlibs.list.gtest'))
if self._flags.coverage:
self.extract_zip('code-coverage-gcno.zip', path=path)
if not self._flags.asan and not self._flags.valgrind:
if full_symbols:
symbols = 'crashreporter-symbols-full.zip'
else:
symbols = 'crashreporter-symbols.zip'
os.mkdir(os.path.join(path, 'symbols'))
self.extract_zip(symbols, path=os.path.join(path, 'symbols'))
self._layout_for_domfuzz(path)
self._write_fuzzmanagerconf(path) | python | def extract_build(self, path='.', tests=None, full_symbols=False):
"""
Download and extract the build and requested extra artifacts
@type path:
@param path:
@type tests:
@param tests:
@type full_symbols:
@param full_symbols:
"""
if self._target == 'js':
self.extract_zip('jsshell.zip', path=os.path.join(path))
else:
if self._platform.system == 'Linux':
self.extract_tar('tar.bz2', path)
elif self._platform.system == 'Darwin':
self.extract_dmg(path)
elif self._platform.system == 'Windows':
self.extract_zip('zip', path)
# windows builds are extracted under 'firefox/'
# move everything under firefox/ up a level to the destination path
firefox = os.path.join(path, 'firefox')
for root, dirs, files in os.walk(firefox):
newroot = root.replace(firefox, path)
for dirname in dirs:
os.mkdir(os.path.join(newroot, dirname))
for filename in files:
os.rename(os.path.join(root, filename), os.path.join(newroot, filename))
shutil.rmtree(firefox, onerror=onerror)
elif self._platform.system == 'Android':
self.download_apk(path)
else:
raise FetcherException("'%s' is not a supported platform" % self._platform.system)
if tests:
# validate tests
tests = set(tests or [])
if not tests.issubset(self.TEST_CHOICES):
invalid_test = tuple(tests - self.TEST_CHOICES)[0]
raise FetcherException("'%s' is not a supported test type" % invalid_test)
os.mkdir(os.path.join(path, 'tests'))
if 'common' in tests:
try:
self.extract_tar('common.tests.tar.gz', path=os.path.join(path, 'tests'))
except FetcherException:
self.extract_zip('common.tests.zip', path=os.path.join(path, 'tests'))
if 'reftests' in tests:
try:
self.extract_tar('reftest.tests.tar.gz', path=os.path.join(path, 'tests'))
except FetcherException:
self.extract_zip('reftest.tests.zip', path=os.path.join(path, 'tests'))
if 'gtest' in tests:
try:
self.extract_tar('gtest.tests.tar.gz', path=path)
except FetcherException:
self.extract_zip('gtest.tests.zip', path=path)
if self._platform.system == 'Windows':
libxul = 'xul.dll'
elif self._platform.system == 'Linux':
libxul = 'libxul.so'
elif self._platform.system == 'Darwin':
libxul = 'XUL'
else:
raise FetcherException("'%s' is not a supported platform for gtest" % self._platform.system)
os.rename(os.path.join(path, 'gtest', 'gtest_bin', 'gtest', libxul),
os.path.join(path, 'gtest', libxul))
shutil.copy(os.path.join(path, 'gtest', 'dependentlibs.list.gtest'),
os.path.join(path, 'dependentlibs.list.gtest'))
if self._flags.coverage:
self.extract_zip('code-coverage-gcno.zip', path=path)
if not self._flags.asan and not self._flags.valgrind:
if full_symbols:
symbols = 'crashreporter-symbols-full.zip'
else:
symbols = 'crashreporter-symbols.zip'
os.mkdir(os.path.join(path, 'symbols'))
self.extract_zip(symbols, path=os.path.join(path, 'symbols'))
self._layout_for_domfuzz(path)
self._write_fuzzmanagerconf(path) | [
"def",
"extract_build",
"(",
"self",
",",
"path",
"=",
"'.'",
",",
"tests",
"=",
"None",
",",
"full_symbols",
"=",
"False",
")",
":",
"if",
"self",
".",
"_target",
"==",
"'js'",
":",
"self",
".",
"extract_zip",
"(",
"'jsshell.zip'",
",",
"path",
"=",
"os",
".",
"path",
".",
"join",
"(",
"path",
")",
")",
"else",
":",
"if",
"self",
".",
"_platform",
".",
"system",
"==",
"'Linux'",
":",
"self",
".",
"extract_tar",
"(",
"'tar.bz2'",
",",
"path",
")",
"elif",
"self",
".",
"_platform",
".",
"system",
"==",
"'Darwin'",
":",
"self",
".",
"extract_dmg",
"(",
"path",
")",
"elif",
"self",
".",
"_platform",
".",
"system",
"==",
"'Windows'",
":",
"self",
".",
"extract_zip",
"(",
"'zip'",
",",
"path",
")",
"# windows builds are extracted under 'firefox/'",
"# move everything under firefox/ up a level to the destination path",
"firefox",
"=",
"os",
".",
"path",
".",
"join",
"(",
"path",
",",
"'firefox'",
")",
"for",
"root",
",",
"dirs",
",",
"files",
"in",
"os",
".",
"walk",
"(",
"firefox",
")",
":",
"newroot",
"=",
"root",
".",
"replace",
"(",
"firefox",
",",
"path",
")",
"for",
"dirname",
"in",
"dirs",
":",
"os",
".",
"mkdir",
"(",
"os",
".",
"path",
".",
"join",
"(",
"newroot",
",",
"dirname",
")",
")",
"for",
"filename",
"in",
"files",
":",
"os",
".",
"rename",
"(",
"os",
".",
"path",
".",
"join",
"(",
"root",
",",
"filename",
")",
",",
"os",
".",
"path",
".",
"join",
"(",
"newroot",
",",
"filename",
")",
")",
"shutil",
".",
"rmtree",
"(",
"firefox",
",",
"onerror",
"=",
"onerror",
")",
"elif",
"self",
".",
"_platform",
".",
"system",
"==",
"'Android'",
":",
"self",
".",
"download_apk",
"(",
"path",
")",
"else",
":",
"raise",
"FetcherException",
"(",
"\"'%s' is not a supported platform\"",
"%",
"self",
".",
"_platform",
".",
"system",
")",
"if",
"tests",
":",
"# validate tests",
"tests",
"=",
"set",
"(",
"tests",
"or",
"[",
"]",
")",
"if",
"not",
"tests",
".",
"issubset",
"(",
"self",
".",
"TEST_CHOICES",
")",
":",
"invalid_test",
"=",
"tuple",
"(",
"tests",
"-",
"self",
".",
"TEST_CHOICES",
")",
"[",
"0",
"]",
"raise",
"FetcherException",
"(",
"\"'%s' is not a supported test type\"",
"%",
"invalid_test",
")",
"os",
".",
"mkdir",
"(",
"os",
".",
"path",
".",
"join",
"(",
"path",
",",
"'tests'",
")",
")",
"if",
"'common'",
"in",
"tests",
":",
"try",
":",
"self",
".",
"extract_tar",
"(",
"'common.tests.tar.gz'",
",",
"path",
"=",
"os",
".",
"path",
".",
"join",
"(",
"path",
",",
"'tests'",
")",
")",
"except",
"FetcherException",
":",
"self",
".",
"extract_zip",
"(",
"'common.tests.zip'",
",",
"path",
"=",
"os",
".",
"path",
".",
"join",
"(",
"path",
",",
"'tests'",
")",
")",
"if",
"'reftests'",
"in",
"tests",
":",
"try",
":",
"self",
".",
"extract_tar",
"(",
"'reftest.tests.tar.gz'",
",",
"path",
"=",
"os",
".",
"path",
".",
"join",
"(",
"path",
",",
"'tests'",
")",
")",
"except",
"FetcherException",
":",
"self",
".",
"extract_zip",
"(",
"'reftest.tests.zip'",
",",
"path",
"=",
"os",
".",
"path",
".",
"join",
"(",
"path",
",",
"'tests'",
")",
")",
"if",
"'gtest'",
"in",
"tests",
":",
"try",
":",
"self",
".",
"extract_tar",
"(",
"'gtest.tests.tar.gz'",
",",
"path",
"=",
"path",
")",
"except",
"FetcherException",
":",
"self",
".",
"extract_zip",
"(",
"'gtest.tests.zip'",
",",
"path",
"=",
"path",
")",
"if",
"self",
".",
"_platform",
".",
"system",
"==",
"'Windows'",
":",
"libxul",
"=",
"'xul.dll'",
"elif",
"self",
".",
"_platform",
".",
"system",
"==",
"'Linux'",
":",
"libxul",
"=",
"'libxul.so'",
"elif",
"self",
".",
"_platform",
".",
"system",
"==",
"'Darwin'",
":",
"libxul",
"=",
"'XUL'",
"else",
":",
"raise",
"FetcherException",
"(",
"\"'%s' is not a supported platform for gtest\"",
"%",
"self",
".",
"_platform",
".",
"system",
")",
"os",
".",
"rename",
"(",
"os",
".",
"path",
".",
"join",
"(",
"path",
",",
"'gtest'",
",",
"'gtest_bin'",
",",
"'gtest'",
",",
"libxul",
")",
",",
"os",
".",
"path",
".",
"join",
"(",
"path",
",",
"'gtest'",
",",
"libxul",
")",
")",
"shutil",
".",
"copy",
"(",
"os",
".",
"path",
".",
"join",
"(",
"path",
",",
"'gtest'",
",",
"'dependentlibs.list.gtest'",
")",
",",
"os",
".",
"path",
".",
"join",
"(",
"path",
",",
"'dependentlibs.list.gtest'",
")",
")",
"if",
"self",
".",
"_flags",
".",
"coverage",
":",
"self",
".",
"extract_zip",
"(",
"'code-coverage-gcno.zip'",
",",
"path",
"=",
"path",
")",
"if",
"not",
"self",
".",
"_flags",
".",
"asan",
"and",
"not",
"self",
".",
"_flags",
".",
"valgrind",
":",
"if",
"full_symbols",
":",
"symbols",
"=",
"'crashreporter-symbols-full.zip'",
"else",
":",
"symbols",
"=",
"'crashreporter-symbols.zip'",
"os",
".",
"mkdir",
"(",
"os",
".",
"path",
".",
"join",
"(",
"path",
",",
"'symbols'",
")",
")",
"self",
".",
"extract_zip",
"(",
"symbols",
",",
"path",
"=",
"os",
".",
"path",
".",
"join",
"(",
"path",
",",
"'symbols'",
")",
")",
"self",
".",
"_layout_for_domfuzz",
"(",
"path",
")",
"self",
".",
"_write_fuzzmanagerconf",
"(",
"path",
")"
] | Download and extract the build and requested extra artifacts
@type path:
@param path:
@type tests:
@param tests:
@type full_symbols:
@param full_symbols: | [
"Download",
"and",
"extract",
"the",
"build",
"and",
"requested",
"extra",
"artifacts"
] | train | https://github.com/MozillaSecurity/fuzzfetch/blob/166cbfc71b679db019b9ac777dce12ccfdfc2c10/src/fuzzfetch/fetch.py#L485-L569 |
MozillaSecurity/fuzzfetch | src/fuzzfetch/fetch.py | Fetcher._layout_for_domfuzz | def _layout_for_domfuzz(self, path):
"""
Update directory to work with DOMFuzz
@type path: str
@param path: A string representation of the fuzzmanager config path
"""
old_dir = os.getcwd()
os.chdir(os.path.join(path))
try:
os.mkdir('dist')
link_name = os.path.join('dist', 'bin')
if self._platform.system == 'Darwin' and self._target == 'firefox':
ff_loc = glob.glob('*.app/Contents/MacOS/firefox')
assert len(ff_loc) == 1
os.symlink(os.path.join(os.pardir, os.path.dirname(ff_loc[0])), # pylint: disable=no-member
link_name)
os.symlink(os.path.join(os.pardir, os.pardir, os.pardir, 'symbols'), # pylint: disable=no-member
os.path.join(os.path.dirname(ff_loc[0]), 'symbols'))
elif self._platform.system == 'Linux':
os.symlink(os.pardir, link_name) # pylint: disable=no-member
elif self._platform.system == 'Windows':
# create a junction point at dist\bin pointing to the firefox.exe path
junction_path.symlink(os.curdir, link_name)
finally:
os.chdir(old_dir) | python | def _layout_for_domfuzz(self, path):
"""
Update directory to work with DOMFuzz
@type path: str
@param path: A string representation of the fuzzmanager config path
"""
old_dir = os.getcwd()
os.chdir(os.path.join(path))
try:
os.mkdir('dist')
link_name = os.path.join('dist', 'bin')
if self._platform.system == 'Darwin' and self._target == 'firefox':
ff_loc = glob.glob('*.app/Contents/MacOS/firefox')
assert len(ff_loc) == 1
os.symlink(os.path.join(os.pardir, os.path.dirname(ff_loc[0])), # pylint: disable=no-member
link_name)
os.symlink(os.path.join(os.pardir, os.pardir, os.pardir, 'symbols'), # pylint: disable=no-member
os.path.join(os.path.dirname(ff_loc[0]), 'symbols'))
elif self._platform.system == 'Linux':
os.symlink(os.pardir, link_name) # pylint: disable=no-member
elif self._platform.system == 'Windows':
# create a junction point at dist\bin pointing to the firefox.exe path
junction_path.symlink(os.curdir, link_name)
finally:
os.chdir(old_dir) | [
"def",
"_layout_for_domfuzz",
"(",
"self",
",",
"path",
")",
":",
"old_dir",
"=",
"os",
".",
"getcwd",
"(",
")",
"os",
".",
"chdir",
"(",
"os",
".",
"path",
".",
"join",
"(",
"path",
")",
")",
"try",
":",
"os",
".",
"mkdir",
"(",
"'dist'",
")",
"link_name",
"=",
"os",
".",
"path",
".",
"join",
"(",
"'dist'",
",",
"'bin'",
")",
"if",
"self",
".",
"_platform",
".",
"system",
"==",
"'Darwin'",
"and",
"self",
".",
"_target",
"==",
"'firefox'",
":",
"ff_loc",
"=",
"glob",
".",
"glob",
"(",
"'*.app/Contents/MacOS/firefox'",
")",
"assert",
"len",
"(",
"ff_loc",
")",
"==",
"1",
"os",
".",
"symlink",
"(",
"os",
".",
"path",
".",
"join",
"(",
"os",
".",
"pardir",
",",
"os",
".",
"path",
".",
"dirname",
"(",
"ff_loc",
"[",
"0",
"]",
")",
")",
",",
"# pylint: disable=no-member",
"link_name",
")",
"os",
".",
"symlink",
"(",
"os",
".",
"path",
".",
"join",
"(",
"os",
".",
"pardir",
",",
"os",
".",
"pardir",
",",
"os",
".",
"pardir",
",",
"'symbols'",
")",
",",
"# pylint: disable=no-member",
"os",
".",
"path",
".",
"join",
"(",
"os",
".",
"path",
".",
"dirname",
"(",
"ff_loc",
"[",
"0",
"]",
")",
",",
"'symbols'",
")",
")",
"elif",
"self",
".",
"_platform",
".",
"system",
"==",
"'Linux'",
":",
"os",
".",
"symlink",
"(",
"os",
".",
"pardir",
",",
"link_name",
")",
"# pylint: disable=no-member",
"elif",
"self",
".",
"_platform",
".",
"system",
"==",
"'Windows'",
":",
"# create a junction point at dist\\bin pointing to the firefox.exe path",
"junction_path",
".",
"symlink",
"(",
"os",
".",
"curdir",
",",
"link_name",
")",
"finally",
":",
"os",
".",
"chdir",
"(",
"old_dir",
")"
] | Update directory to work with DOMFuzz
@type path: str
@param path: A string representation of the fuzzmanager config path | [
"Update",
"directory",
"to",
"work",
"with",
"DOMFuzz"
] | train | https://github.com/MozillaSecurity/fuzzfetch/blob/166cbfc71b679db019b9ac777dce12ccfdfc2c10/src/fuzzfetch/fetch.py#L571-L596 |
MozillaSecurity/fuzzfetch | src/fuzzfetch/fetch.py | Fetcher._write_fuzzmanagerconf | def _write_fuzzmanagerconf(self, path):
"""
Write fuzzmanager config file for selected build
@type path: basestring
@param path: A string representation of the fuzzmanager config path
"""
output = configparser.RawConfigParser()
output.add_section('Main')
output.set('Main', 'platform', self.moz_info['processor'].replace('_', '-'))
output.set('Main', 'product', 'mozilla-' + self._branch)
output.set('Main', 'product_version', '%.8s-%.12s' % (self.build_id, self.changeset))
# make sure 'os' match what FM expects
os_name = self.moz_info['os'].lower()
if os_name.startswith('android'):
output.set('Main', 'os', 'android')
elif os_name.startswith('lin'):
output.set('Main', 'os', 'linux')
elif os_name.startswith('mac'):
output.set('Main', 'os', 'macosx')
elif os_name.startswith('win'):
output.set('Main', 'os', 'windows')
else:
output.set('Main', 'os', self.moz_info['os'])
output.add_section('Metadata')
output.set('Metadata', 'pathPrefix', self.moz_info['topsrcdir'])
output.set('Metadata', 'buildFlags', self._flags.build_string().lstrip('-'))
if self._platform.system == "Windows":
fm_name = self._target + '.exe.fuzzmanagerconf'
conf_path = os.path.join(path, 'dist', 'bin', fm_name)
elif self._platform.system == "Android":
conf_path = os.path.join(path, 'target.apk.fuzzmanagerconf')
else:
fm_name = self._target + '.fuzzmanagerconf'
conf_path = os.path.join(path, 'dist', 'bin', fm_name)
with open(conf_path, 'w') as conf_fp:
output.write(conf_fp) | python | def _write_fuzzmanagerconf(self, path):
"""
Write fuzzmanager config file for selected build
@type path: basestring
@param path: A string representation of the fuzzmanager config path
"""
output = configparser.RawConfigParser()
output.add_section('Main')
output.set('Main', 'platform', self.moz_info['processor'].replace('_', '-'))
output.set('Main', 'product', 'mozilla-' + self._branch)
output.set('Main', 'product_version', '%.8s-%.12s' % (self.build_id, self.changeset))
# make sure 'os' match what FM expects
os_name = self.moz_info['os'].lower()
if os_name.startswith('android'):
output.set('Main', 'os', 'android')
elif os_name.startswith('lin'):
output.set('Main', 'os', 'linux')
elif os_name.startswith('mac'):
output.set('Main', 'os', 'macosx')
elif os_name.startswith('win'):
output.set('Main', 'os', 'windows')
else:
output.set('Main', 'os', self.moz_info['os'])
output.add_section('Metadata')
output.set('Metadata', 'pathPrefix', self.moz_info['topsrcdir'])
output.set('Metadata', 'buildFlags', self._flags.build_string().lstrip('-'))
if self._platform.system == "Windows":
fm_name = self._target + '.exe.fuzzmanagerconf'
conf_path = os.path.join(path, 'dist', 'bin', fm_name)
elif self._platform.system == "Android":
conf_path = os.path.join(path, 'target.apk.fuzzmanagerconf')
else:
fm_name = self._target + '.fuzzmanagerconf'
conf_path = os.path.join(path, 'dist', 'bin', fm_name)
with open(conf_path, 'w') as conf_fp:
output.write(conf_fp) | [
"def",
"_write_fuzzmanagerconf",
"(",
"self",
",",
"path",
")",
":",
"output",
"=",
"configparser",
".",
"RawConfigParser",
"(",
")",
"output",
".",
"add_section",
"(",
"'Main'",
")",
"output",
".",
"set",
"(",
"'Main'",
",",
"'platform'",
",",
"self",
".",
"moz_info",
"[",
"'processor'",
"]",
".",
"replace",
"(",
"'_'",
",",
"'-'",
")",
")",
"output",
".",
"set",
"(",
"'Main'",
",",
"'product'",
",",
"'mozilla-'",
"+",
"self",
".",
"_branch",
")",
"output",
".",
"set",
"(",
"'Main'",
",",
"'product_version'",
",",
"'%.8s-%.12s'",
"%",
"(",
"self",
".",
"build_id",
",",
"self",
".",
"changeset",
")",
")",
"# make sure 'os' match what FM expects",
"os_name",
"=",
"self",
".",
"moz_info",
"[",
"'os'",
"]",
".",
"lower",
"(",
")",
"if",
"os_name",
".",
"startswith",
"(",
"'android'",
")",
":",
"output",
".",
"set",
"(",
"'Main'",
",",
"'os'",
",",
"'android'",
")",
"elif",
"os_name",
".",
"startswith",
"(",
"'lin'",
")",
":",
"output",
".",
"set",
"(",
"'Main'",
",",
"'os'",
",",
"'linux'",
")",
"elif",
"os_name",
".",
"startswith",
"(",
"'mac'",
")",
":",
"output",
".",
"set",
"(",
"'Main'",
",",
"'os'",
",",
"'macosx'",
")",
"elif",
"os_name",
".",
"startswith",
"(",
"'win'",
")",
":",
"output",
".",
"set",
"(",
"'Main'",
",",
"'os'",
",",
"'windows'",
")",
"else",
":",
"output",
".",
"set",
"(",
"'Main'",
",",
"'os'",
",",
"self",
".",
"moz_info",
"[",
"'os'",
"]",
")",
"output",
".",
"add_section",
"(",
"'Metadata'",
")",
"output",
".",
"set",
"(",
"'Metadata'",
",",
"'pathPrefix'",
",",
"self",
".",
"moz_info",
"[",
"'topsrcdir'",
"]",
")",
"output",
".",
"set",
"(",
"'Metadata'",
",",
"'buildFlags'",
",",
"self",
".",
"_flags",
".",
"build_string",
"(",
")",
".",
"lstrip",
"(",
"'-'",
")",
")",
"if",
"self",
".",
"_platform",
".",
"system",
"==",
"\"Windows\"",
":",
"fm_name",
"=",
"self",
".",
"_target",
"+",
"'.exe.fuzzmanagerconf'",
"conf_path",
"=",
"os",
".",
"path",
".",
"join",
"(",
"path",
",",
"'dist'",
",",
"'bin'",
",",
"fm_name",
")",
"elif",
"self",
".",
"_platform",
".",
"system",
"==",
"\"Android\"",
":",
"conf_path",
"=",
"os",
".",
"path",
".",
"join",
"(",
"path",
",",
"'target.apk.fuzzmanagerconf'",
")",
"else",
":",
"fm_name",
"=",
"self",
".",
"_target",
"+",
"'.fuzzmanagerconf'",
"conf_path",
"=",
"os",
".",
"path",
".",
"join",
"(",
"path",
",",
"'dist'",
",",
"'bin'",
",",
"fm_name",
")",
"with",
"open",
"(",
"conf_path",
",",
"'w'",
")",
"as",
"conf_fp",
":",
"output",
".",
"write",
"(",
"conf_fp",
")"
] | Write fuzzmanager config file for selected build
@type path: basestring
@param path: A string representation of the fuzzmanager config path | [
"Write",
"fuzzmanager",
"config",
"file",
"for",
"selected",
"build"
] | train | https://github.com/MozillaSecurity/fuzzfetch/blob/166cbfc71b679db019b9ac777dce12ccfdfc2c10/src/fuzzfetch/fetch.py#L598-L635 |
MozillaSecurity/fuzzfetch | src/fuzzfetch/fetch.py | Fetcher.extract_zip | def extract_zip(self, suffix, path='.'):
"""
Download and extract a zip artifact
@type suffix:
@param suffix:
@type path:
@param path:
"""
zip_fd, zip_fn = tempfile.mkstemp(prefix='fuzzfetch-', suffix='.zip')
os.close(zip_fd)
try:
_download_url(self.artifact_url(suffix), zip_fn)
LOG.info('.. extracting')
with zipfile.ZipFile(zip_fn) as zip_fp:
for info in zip_fp.infolist():
_extract_file(zip_fp, info, path)
finally:
os.unlink(zip_fn) | python | def extract_zip(self, suffix, path='.'):
"""
Download and extract a zip artifact
@type suffix:
@param suffix:
@type path:
@param path:
"""
zip_fd, zip_fn = tempfile.mkstemp(prefix='fuzzfetch-', suffix='.zip')
os.close(zip_fd)
try:
_download_url(self.artifact_url(suffix), zip_fn)
LOG.info('.. extracting')
with zipfile.ZipFile(zip_fn) as zip_fp:
for info in zip_fp.infolist():
_extract_file(zip_fp, info, path)
finally:
os.unlink(zip_fn) | [
"def",
"extract_zip",
"(",
"self",
",",
"suffix",
",",
"path",
"=",
"'.'",
")",
":",
"zip_fd",
",",
"zip_fn",
"=",
"tempfile",
".",
"mkstemp",
"(",
"prefix",
"=",
"'fuzzfetch-'",
",",
"suffix",
"=",
"'.zip'",
")",
"os",
".",
"close",
"(",
"zip_fd",
")",
"try",
":",
"_download_url",
"(",
"self",
".",
"artifact_url",
"(",
"suffix",
")",
",",
"zip_fn",
")",
"LOG",
".",
"info",
"(",
"'.. extracting'",
")",
"with",
"zipfile",
".",
"ZipFile",
"(",
"zip_fn",
")",
"as",
"zip_fp",
":",
"for",
"info",
"in",
"zip_fp",
".",
"infolist",
"(",
")",
":",
"_extract_file",
"(",
"zip_fp",
",",
"info",
",",
"path",
")",
"finally",
":",
"os",
".",
"unlink",
"(",
"zip_fn",
")"
] | Download and extract a zip artifact
@type suffix:
@param suffix:
@type path:
@param path: | [
"Download",
"and",
"extract",
"a",
"zip",
"artifact"
] | train | https://github.com/MozillaSecurity/fuzzfetch/blob/166cbfc71b679db019b9ac777dce12ccfdfc2c10/src/fuzzfetch/fetch.py#L637-L656 |
MozillaSecurity/fuzzfetch | src/fuzzfetch/fetch.py | Fetcher.extract_tar | def extract_tar(self, suffix, path='.'):
"""
Extract builds with .tar.(*) extension
When unpacking a build archive, only extract the firefox directory
@type suffix:
@param suffix:
@type path:
@param path:
"""
mode = suffix.split('.')[-1]
tar_fd, tar_fn = tempfile.mkstemp(prefix='fuzzfetch-', suffix='.tar.%s' % mode)
os.close(tar_fd)
try:
_download_url(self.artifact_url(suffix), tar_fn)
LOG.info('.. extracting')
with tarfile.open(tar_fn, mode='r:%s' % mode) as tar:
members = []
for member in tar.getmembers():
if member.path.startswith("firefox/"):
member.path = member.path[8:]
members.append(member)
elif member.path != "firefox":
# Ignore top-level build directory
members.append(member)
tar.extractall(members=members, path=path)
finally:
os.unlink(tar_fn) | python | def extract_tar(self, suffix, path='.'):
"""
Extract builds with .tar.(*) extension
When unpacking a build archive, only extract the firefox directory
@type suffix:
@param suffix:
@type path:
@param path:
"""
mode = suffix.split('.')[-1]
tar_fd, tar_fn = tempfile.mkstemp(prefix='fuzzfetch-', suffix='.tar.%s' % mode)
os.close(tar_fd)
try:
_download_url(self.artifact_url(suffix), tar_fn)
LOG.info('.. extracting')
with tarfile.open(tar_fn, mode='r:%s' % mode) as tar:
members = []
for member in tar.getmembers():
if member.path.startswith("firefox/"):
member.path = member.path[8:]
members.append(member)
elif member.path != "firefox":
# Ignore top-level build directory
members.append(member)
tar.extractall(members=members, path=path)
finally:
os.unlink(tar_fn) | [
"def",
"extract_tar",
"(",
"self",
",",
"suffix",
",",
"path",
"=",
"'.'",
")",
":",
"mode",
"=",
"suffix",
".",
"split",
"(",
"'.'",
")",
"[",
"-",
"1",
"]",
"tar_fd",
",",
"tar_fn",
"=",
"tempfile",
".",
"mkstemp",
"(",
"prefix",
"=",
"'fuzzfetch-'",
",",
"suffix",
"=",
"'.tar.%s'",
"%",
"mode",
")",
"os",
".",
"close",
"(",
"tar_fd",
")",
"try",
":",
"_download_url",
"(",
"self",
".",
"artifact_url",
"(",
"suffix",
")",
",",
"tar_fn",
")",
"LOG",
".",
"info",
"(",
"'.. extracting'",
")",
"with",
"tarfile",
".",
"open",
"(",
"tar_fn",
",",
"mode",
"=",
"'r:%s'",
"%",
"mode",
")",
"as",
"tar",
":",
"members",
"=",
"[",
"]",
"for",
"member",
"in",
"tar",
".",
"getmembers",
"(",
")",
":",
"if",
"member",
".",
"path",
".",
"startswith",
"(",
"\"firefox/\"",
")",
":",
"member",
".",
"path",
"=",
"member",
".",
"path",
"[",
"8",
":",
"]",
"members",
".",
"append",
"(",
"member",
")",
"elif",
"member",
".",
"path",
"!=",
"\"firefox\"",
":",
"# Ignore top-level build directory",
"members",
".",
"append",
"(",
"member",
")",
"tar",
".",
"extractall",
"(",
"members",
"=",
"members",
",",
"path",
"=",
"path",
")",
"finally",
":",
"os",
".",
"unlink",
"(",
"tar_fn",
")"
] | Extract builds with .tar.(*) extension
When unpacking a build archive, only extract the firefox directory
@type suffix:
@param suffix:
@type path:
@param path: | [
"Extract",
"builds",
"with",
".",
"tar",
".",
"(",
"*",
")",
"extension",
"When",
"unpacking",
"a",
"build",
"archive",
"only",
"extract",
"the",
"firefox",
"directory"
] | train | https://github.com/MozillaSecurity/fuzzfetch/blob/166cbfc71b679db019b9ac777dce12ccfdfc2c10/src/fuzzfetch/fetch.py#L658-L686 |
MozillaSecurity/fuzzfetch | src/fuzzfetch/fetch.py | Fetcher.download_apk | def download_apk(self, path='.'):
"""
Download Android .apk
@type path:
@param path:
"""
apk_fd, apk_fn = tempfile.mkstemp(prefix='fuzzfetch-', suffix='.apk')
os.close(apk_fd)
try:
_download_url(self.artifact_url('apk'), apk_fn)
shutil.copy(apk_fn, os.path.join(path, 'target.apk'))
finally:
os.unlink(apk_fn) | python | def download_apk(self, path='.'):
"""
Download Android .apk
@type path:
@param path:
"""
apk_fd, apk_fn = tempfile.mkstemp(prefix='fuzzfetch-', suffix='.apk')
os.close(apk_fd)
try:
_download_url(self.artifact_url('apk'), apk_fn)
shutil.copy(apk_fn, os.path.join(path, 'target.apk'))
finally:
os.unlink(apk_fn) | [
"def",
"download_apk",
"(",
"self",
",",
"path",
"=",
"'.'",
")",
":",
"apk_fd",
",",
"apk_fn",
"=",
"tempfile",
".",
"mkstemp",
"(",
"prefix",
"=",
"'fuzzfetch-'",
",",
"suffix",
"=",
"'.apk'",
")",
"os",
".",
"close",
"(",
"apk_fd",
")",
"try",
":",
"_download_url",
"(",
"self",
".",
"artifact_url",
"(",
"'apk'",
")",
",",
"apk_fn",
")",
"shutil",
".",
"copy",
"(",
"apk_fn",
",",
"os",
".",
"path",
".",
"join",
"(",
"path",
",",
"'target.apk'",
")",
")",
"finally",
":",
"os",
".",
"unlink",
"(",
"apk_fn",
")"
] | Download Android .apk
@type path:
@param path: | [
"Download",
"Android",
".",
"apk"
] | train | https://github.com/MozillaSecurity/fuzzfetch/blob/166cbfc71b679db019b9ac777dce12ccfdfc2c10/src/fuzzfetch/fetch.py#L688-L701 |
MozillaSecurity/fuzzfetch | src/fuzzfetch/fetch.py | Fetcher.extract_dmg | def extract_dmg(self, path='.'):
"""
Extract builds with .dmg extension
Will only work if `hdiutil` is available.
@type path:
@param path:
"""
dmg_fd, dmg_fn = tempfile.mkstemp(prefix='fuzzfetch-', suffix='.dmg')
os.close(dmg_fd)
out_tmp = tempfile.mkdtemp(prefix='fuzzfetch-', suffix='.tmp')
try:
_download_url(self.artifact_url('dmg'), dmg_fn)
if std_platform.system() == 'Darwin':
LOG.info('.. extracting')
subprocess.check_call(['hdiutil', 'attach', '-quiet', '-mountpoint', out_tmp, dmg_fn])
try:
apps = [mt for mt in os.listdir(out_tmp) if mt.endswith('app')]
assert len(apps) == 1
shutil.copytree(os.path.join(out_tmp, apps[0]), os.path.join(path, apps[0]), symlinks=True)
finally:
subprocess.check_call(['hdiutil', 'detach', '-quiet', out_tmp])
else:
LOG.warning('.. can\'t extract target.dmg on %s', std_platform.system())
shutil.copy(dmg_fn, os.path.join(path, 'target.dmg'))
finally:
shutil.rmtree(out_tmp, onerror=onerror)
os.unlink(dmg_fn) | python | def extract_dmg(self, path='.'):
"""
Extract builds with .dmg extension
Will only work if `hdiutil` is available.
@type path:
@param path:
"""
dmg_fd, dmg_fn = tempfile.mkstemp(prefix='fuzzfetch-', suffix='.dmg')
os.close(dmg_fd)
out_tmp = tempfile.mkdtemp(prefix='fuzzfetch-', suffix='.tmp')
try:
_download_url(self.artifact_url('dmg'), dmg_fn)
if std_platform.system() == 'Darwin':
LOG.info('.. extracting')
subprocess.check_call(['hdiutil', 'attach', '-quiet', '-mountpoint', out_tmp, dmg_fn])
try:
apps = [mt for mt in os.listdir(out_tmp) if mt.endswith('app')]
assert len(apps) == 1
shutil.copytree(os.path.join(out_tmp, apps[0]), os.path.join(path, apps[0]), symlinks=True)
finally:
subprocess.check_call(['hdiutil', 'detach', '-quiet', out_tmp])
else:
LOG.warning('.. can\'t extract target.dmg on %s', std_platform.system())
shutil.copy(dmg_fn, os.path.join(path, 'target.dmg'))
finally:
shutil.rmtree(out_tmp, onerror=onerror)
os.unlink(dmg_fn) | [
"def",
"extract_dmg",
"(",
"self",
",",
"path",
"=",
"'.'",
")",
":",
"dmg_fd",
",",
"dmg_fn",
"=",
"tempfile",
".",
"mkstemp",
"(",
"prefix",
"=",
"'fuzzfetch-'",
",",
"suffix",
"=",
"'.dmg'",
")",
"os",
".",
"close",
"(",
"dmg_fd",
")",
"out_tmp",
"=",
"tempfile",
".",
"mkdtemp",
"(",
"prefix",
"=",
"'fuzzfetch-'",
",",
"suffix",
"=",
"'.tmp'",
")",
"try",
":",
"_download_url",
"(",
"self",
".",
"artifact_url",
"(",
"'dmg'",
")",
",",
"dmg_fn",
")",
"if",
"std_platform",
".",
"system",
"(",
")",
"==",
"'Darwin'",
":",
"LOG",
".",
"info",
"(",
"'.. extracting'",
")",
"subprocess",
".",
"check_call",
"(",
"[",
"'hdiutil'",
",",
"'attach'",
",",
"'-quiet'",
",",
"'-mountpoint'",
",",
"out_tmp",
",",
"dmg_fn",
"]",
")",
"try",
":",
"apps",
"=",
"[",
"mt",
"for",
"mt",
"in",
"os",
".",
"listdir",
"(",
"out_tmp",
")",
"if",
"mt",
".",
"endswith",
"(",
"'app'",
")",
"]",
"assert",
"len",
"(",
"apps",
")",
"==",
"1",
"shutil",
".",
"copytree",
"(",
"os",
".",
"path",
".",
"join",
"(",
"out_tmp",
",",
"apps",
"[",
"0",
"]",
")",
",",
"os",
".",
"path",
".",
"join",
"(",
"path",
",",
"apps",
"[",
"0",
"]",
")",
",",
"symlinks",
"=",
"True",
")",
"finally",
":",
"subprocess",
".",
"check_call",
"(",
"[",
"'hdiutil'",
",",
"'detach'",
",",
"'-quiet'",
",",
"out_tmp",
"]",
")",
"else",
":",
"LOG",
".",
"warning",
"(",
"'.. can\\'t extract target.dmg on %s'",
",",
"std_platform",
".",
"system",
"(",
")",
")",
"shutil",
".",
"copy",
"(",
"dmg_fn",
",",
"os",
".",
"path",
".",
"join",
"(",
"path",
",",
"'target.dmg'",
")",
")",
"finally",
":",
"shutil",
".",
"rmtree",
"(",
"out_tmp",
",",
"onerror",
"=",
"onerror",
")",
"os",
".",
"unlink",
"(",
"dmg_fn",
")"
] | Extract builds with .dmg extension
Will only work if `hdiutil` is available.
@type path:
@param path: | [
"Extract",
"builds",
"with",
".",
"dmg",
"extension"
] | train | https://github.com/MozillaSecurity/fuzzfetch/blob/166cbfc71b679db019b9ac777dce12ccfdfc2c10/src/fuzzfetch/fetch.py#L703-L731 |
MozillaSecurity/fuzzfetch | src/fuzzfetch/fetch.py | Fetcher.from_args | def from_args(cls, args=None, skip_dir_check=False):
"""
Construct a Fetcher from given command line arguments.
@type args: list(str)
@param args: Command line arguments (optional). Default is to use args from sys.argv
@type skip_dir_check: bool
@param skip_dir_check: Boolean identifying whether to check for existing build directory
@rtype: tuple(Fetcher, output path)
@return: Returns a Fetcher object and keyword arguments for extract_build.
"""
parser = argparse.ArgumentParser()
parser.set_defaults(target='firefox', build='latest', tests=None) # branch default is set after parsing
target_group = parser.add_argument_group('Target')
target_group.add_argument('--target', choices=sorted(cls.TARGET_CHOICES),
help=('Specify the build target. (default: %(default)s)'))
target_group.add_argument('--os', choices=sorted(Platform.SUPPORTED),
help=('Specify the target system. (default: ' + std_platform.system() + ')'))
cpu_choices = sorted(set(itertools.chain(itertools.chain.from_iterable(Platform.SUPPORTED.values()),
Platform.CPU_ALIASES)))
target_group.add_argument('--cpu', choices=cpu_choices,
help=('Specify the target CPU. (default: ' + std_platform.machine() + ')'))
type_group = parser.add_argument_group('Build')
type_group.add_argument('--build', metavar='DATE|REV|NS',
help='Specify the build to download, (default: %(default)s)'
' Accepts values in format YYYY-MM-DD (2017-01-01)'
' revision (57b37213d81150642f5139764e7044b07b9dccc3)'
' or TaskCluster namespace (gecko.v2....)')
branch_group = parser.add_argument_group('Branch')
branch_args = branch_group.add_mutually_exclusive_group()
branch_args.add_argument('--inbound', action='store_const', const='inbound', dest='branch',
help='Download from mozilla-inbound')
branch_args.add_argument('--central', action='store_const', const='central', dest='branch',
help='Download from mozilla-central (default)')
branch_args.add_argument('--release', action='store_const', const='release', dest='branch',
help='Download from mozilla-release')
branch_args.add_argument('--beta', action='store_const', const='beta', dest='branch',
help='Download from mozilla-beta')
branch_args.add_argument('--esr52', action='store_const', const='esr52', dest='branch',
help='Download from mozilla-esr52')
branch_args.add_argument('--esr', action='store_const', const='esr60', dest='branch',
help='Download from mozilla-esr60')
build_group = parser.add_argument_group('Build Arguments')
build_group.add_argument('-d', '--debug', action='store_true',
help='Get debug builds w/ symbols (default=optimized).')
build_group.add_argument('-a', '--asan', action='store_true',
help='Download AddressSanitizer builds.')
build_group.add_argument('--fuzzing', action='store_true',
help='Download --enable-fuzzing builds.')
build_group.add_argument('--coverage', action='store_true',
help='Download --coverage builds. This also pulls down the *.gcno files')
build_group.add_argument('--valgrind', action='store_true',
help='Download Valgrind builds.')
test_group = parser.add_argument_group('Test Arguments')
test_group.add_argument('--tests', nargs='+', metavar='', choices=cls.TEST_CHOICES,
help=('Download tests associated with this build. Acceptable values are: ' +
', '.join(cls.TEST_CHOICES)))
test_group.add_argument('--full-symbols', action='store_true',
help='Download the full crashreport-symbols.zip archive.')
misc_group = parser.add_argument_group('Misc. Arguments')
misc_group.add_argument('-n', '--name',
help='Specify a name (default=auto)')
misc_group.add_argument('-o', '--out', default=os.getcwd(),
help='Specify output directory (default=.)')
misc_group.add_argument('--dry-run', action='store_true',
help="Search for build and output metadata only, don't download anything.")
args = parser.parse_args(args=args)
if re.match(r'(\d{4}-\d{2}-\d{2}|[0-9A-Fa-f]{40}|latest)$', args.build) is None:
# this is a custom build
# ensure conflicting options are not set
if args.branch is not None:
parser.error('Cannot specify --build namespace and branch argument: %s' % args.branch)
if args.debug:
parser.error('Cannot specify --build namespace and --debug')
if args.asan:
parser.error('Cannot specify --build namespace and --asan')
if args.fuzzing:
parser.error('Cannot specify --build namespace and --fuzzing')
if args.coverage:
parser.error('Cannot specify --build namespace and --coverage')
if args.valgrind:
parser.error('Cannot specify --build namespace and --valgrind')
# do this default manually so we can error if combined with --build namespace
# parser.set_defaults(branch='central')
elif args.branch is None:
args.branch = 'central'
flags = BuildFlags(args.asan, args.debug, args.fuzzing, args.coverage, args.valgrind)
obj = cls(args.target, args.branch, args.build, flags, Platform(args.os, args.cpu))
if args.name is None:
args.name = obj.get_auto_name()
final_dir = os.path.realpath(os.path.join(args.out, args.name))
if not skip_dir_check and os.path.exists(final_dir):
parser.error('Folder exists: %s .. exiting' % final_dir)
extract_options = {
'dry_run': args.dry_run,
'out': final_dir,
'full_symbols': args.full_symbols,
'tests': args.tests
}
return obj, extract_options | python | def from_args(cls, args=None, skip_dir_check=False):
"""
Construct a Fetcher from given command line arguments.
@type args: list(str)
@param args: Command line arguments (optional). Default is to use args from sys.argv
@type skip_dir_check: bool
@param skip_dir_check: Boolean identifying whether to check for existing build directory
@rtype: tuple(Fetcher, output path)
@return: Returns a Fetcher object and keyword arguments for extract_build.
"""
parser = argparse.ArgumentParser()
parser.set_defaults(target='firefox', build='latest', tests=None) # branch default is set after parsing
target_group = parser.add_argument_group('Target')
target_group.add_argument('--target', choices=sorted(cls.TARGET_CHOICES),
help=('Specify the build target. (default: %(default)s)'))
target_group.add_argument('--os', choices=sorted(Platform.SUPPORTED),
help=('Specify the target system. (default: ' + std_platform.system() + ')'))
cpu_choices = sorted(set(itertools.chain(itertools.chain.from_iterable(Platform.SUPPORTED.values()),
Platform.CPU_ALIASES)))
target_group.add_argument('--cpu', choices=cpu_choices,
help=('Specify the target CPU. (default: ' + std_platform.machine() + ')'))
type_group = parser.add_argument_group('Build')
type_group.add_argument('--build', metavar='DATE|REV|NS',
help='Specify the build to download, (default: %(default)s)'
' Accepts values in format YYYY-MM-DD (2017-01-01)'
' revision (57b37213d81150642f5139764e7044b07b9dccc3)'
' or TaskCluster namespace (gecko.v2....)')
branch_group = parser.add_argument_group('Branch')
branch_args = branch_group.add_mutually_exclusive_group()
branch_args.add_argument('--inbound', action='store_const', const='inbound', dest='branch',
help='Download from mozilla-inbound')
branch_args.add_argument('--central', action='store_const', const='central', dest='branch',
help='Download from mozilla-central (default)')
branch_args.add_argument('--release', action='store_const', const='release', dest='branch',
help='Download from mozilla-release')
branch_args.add_argument('--beta', action='store_const', const='beta', dest='branch',
help='Download from mozilla-beta')
branch_args.add_argument('--esr52', action='store_const', const='esr52', dest='branch',
help='Download from mozilla-esr52')
branch_args.add_argument('--esr', action='store_const', const='esr60', dest='branch',
help='Download from mozilla-esr60')
build_group = parser.add_argument_group('Build Arguments')
build_group.add_argument('-d', '--debug', action='store_true',
help='Get debug builds w/ symbols (default=optimized).')
build_group.add_argument('-a', '--asan', action='store_true',
help='Download AddressSanitizer builds.')
build_group.add_argument('--fuzzing', action='store_true',
help='Download --enable-fuzzing builds.')
build_group.add_argument('--coverage', action='store_true',
help='Download --coverage builds. This also pulls down the *.gcno files')
build_group.add_argument('--valgrind', action='store_true',
help='Download Valgrind builds.')
test_group = parser.add_argument_group('Test Arguments')
test_group.add_argument('--tests', nargs='+', metavar='', choices=cls.TEST_CHOICES,
help=('Download tests associated with this build. Acceptable values are: ' +
', '.join(cls.TEST_CHOICES)))
test_group.add_argument('--full-symbols', action='store_true',
help='Download the full crashreport-symbols.zip archive.')
misc_group = parser.add_argument_group('Misc. Arguments')
misc_group.add_argument('-n', '--name',
help='Specify a name (default=auto)')
misc_group.add_argument('-o', '--out', default=os.getcwd(),
help='Specify output directory (default=.)')
misc_group.add_argument('--dry-run', action='store_true',
help="Search for build and output metadata only, don't download anything.")
args = parser.parse_args(args=args)
if re.match(r'(\d{4}-\d{2}-\d{2}|[0-9A-Fa-f]{40}|latest)$', args.build) is None:
# this is a custom build
# ensure conflicting options are not set
if args.branch is not None:
parser.error('Cannot specify --build namespace and branch argument: %s' % args.branch)
if args.debug:
parser.error('Cannot specify --build namespace and --debug')
if args.asan:
parser.error('Cannot specify --build namespace and --asan')
if args.fuzzing:
parser.error('Cannot specify --build namespace and --fuzzing')
if args.coverage:
parser.error('Cannot specify --build namespace and --coverage')
if args.valgrind:
parser.error('Cannot specify --build namespace and --valgrind')
# do this default manually so we can error if combined with --build namespace
# parser.set_defaults(branch='central')
elif args.branch is None:
args.branch = 'central'
flags = BuildFlags(args.asan, args.debug, args.fuzzing, args.coverage, args.valgrind)
obj = cls(args.target, args.branch, args.build, flags, Platform(args.os, args.cpu))
if args.name is None:
args.name = obj.get_auto_name()
final_dir = os.path.realpath(os.path.join(args.out, args.name))
if not skip_dir_check and os.path.exists(final_dir):
parser.error('Folder exists: %s .. exiting' % final_dir)
extract_options = {
'dry_run': args.dry_run,
'out': final_dir,
'full_symbols': args.full_symbols,
'tests': args.tests
}
return obj, extract_options | [
"def",
"from_args",
"(",
"cls",
",",
"args",
"=",
"None",
",",
"skip_dir_check",
"=",
"False",
")",
":",
"parser",
"=",
"argparse",
".",
"ArgumentParser",
"(",
")",
"parser",
".",
"set_defaults",
"(",
"target",
"=",
"'firefox'",
",",
"build",
"=",
"'latest'",
",",
"tests",
"=",
"None",
")",
"# branch default is set after parsing",
"target_group",
"=",
"parser",
".",
"add_argument_group",
"(",
"'Target'",
")",
"target_group",
".",
"add_argument",
"(",
"'--target'",
",",
"choices",
"=",
"sorted",
"(",
"cls",
".",
"TARGET_CHOICES",
")",
",",
"help",
"=",
"(",
"'Specify the build target. (default: %(default)s)'",
")",
")",
"target_group",
".",
"add_argument",
"(",
"'--os'",
",",
"choices",
"=",
"sorted",
"(",
"Platform",
".",
"SUPPORTED",
")",
",",
"help",
"=",
"(",
"'Specify the target system. (default: '",
"+",
"std_platform",
".",
"system",
"(",
")",
"+",
"')'",
")",
")",
"cpu_choices",
"=",
"sorted",
"(",
"set",
"(",
"itertools",
".",
"chain",
"(",
"itertools",
".",
"chain",
".",
"from_iterable",
"(",
"Platform",
".",
"SUPPORTED",
".",
"values",
"(",
")",
")",
",",
"Platform",
".",
"CPU_ALIASES",
")",
")",
")",
"target_group",
".",
"add_argument",
"(",
"'--cpu'",
",",
"choices",
"=",
"cpu_choices",
",",
"help",
"=",
"(",
"'Specify the target CPU. (default: '",
"+",
"std_platform",
".",
"machine",
"(",
")",
"+",
"')'",
")",
")",
"type_group",
"=",
"parser",
".",
"add_argument_group",
"(",
"'Build'",
")",
"type_group",
".",
"add_argument",
"(",
"'--build'",
",",
"metavar",
"=",
"'DATE|REV|NS'",
",",
"help",
"=",
"'Specify the build to download, (default: %(default)s)'",
"' Accepts values in format YYYY-MM-DD (2017-01-01)'",
"' revision (57b37213d81150642f5139764e7044b07b9dccc3)'",
"' or TaskCluster namespace (gecko.v2....)'",
")",
"branch_group",
"=",
"parser",
".",
"add_argument_group",
"(",
"'Branch'",
")",
"branch_args",
"=",
"branch_group",
".",
"add_mutually_exclusive_group",
"(",
")",
"branch_args",
".",
"add_argument",
"(",
"'--inbound'",
",",
"action",
"=",
"'store_const'",
",",
"const",
"=",
"'inbound'",
",",
"dest",
"=",
"'branch'",
",",
"help",
"=",
"'Download from mozilla-inbound'",
")",
"branch_args",
".",
"add_argument",
"(",
"'--central'",
",",
"action",
"=",
"'store_const'",
",",
"const",
"=",
"'central'",
",",
"dest",
"=",
"'branch'",
",",
"help",
"=",
"'Download from mozilla-central (default)'",
")",
"branch_args",
".",
"add_argument",
"(",
"'--release'",
",",
"action",
"=",
"'store_const'",
",",
"const",
"=",
"'release'",
",",
"dest",
"=",
"'branch'",
",",
"help",
"=",
"'Download from mozilla-release'",
")",
"branch_args",
".",
"add_argument",
"(",
"'--beta'",
",",
"action",
"=",
"'store_const'",
",",
"const",
"=",
"'beta'",
",",
"dest",
"=",
"'branch'",
",",
"help",
"=",
"'Download from mozilla-beta'",
")",
"branch_args",
".",
"add_argument",
"(",
"'--esr52'",
",",
"action",
"=",
"'store_const'",
",",
"const",
"=",
"'esr52'",
",",
"dest",
"=",
"'branch'",
",",
"help",
"=",
"'Download from mozilla-esr52'",
")",
"branch_args",
".",
"add_argument",
"(",
"'--esr'",
",",
"action",
"=",
"'store_const'",
",",
"const",
"=",
"'esr60'",
",",
"dest",
"=",
"'branch'",
",",
"help",
"=",
"'Download from mozilla-esr60'",
")",
"build_group",
"=",
"parser",
".",
"add_argument_group",
"(",
"'Build Arguments'",
")",
"build_group",
".",
"add_argument",
"(",
"'-d'",
",",
"'--debug'",
",",
"action",
"=",
"'store_true'",
",",
"help",
"=",
"'Get debug builds w/ symbols (default=optimized).'",
")",
"build_group",
".",
"add_argument",
"(",
"'-a'",
",",
"'--asan'",
",",
"action",
"=",
"'store_true'",
",",
"help",
"=",
"'Download AddressSanitizer builds.'",
")",
"build_group",
".",
"add_argument",
"(",
"'--fuzzing'",
",",
"action",
"=",
"'store_true'",
",",
"help",
"=",
"'Download --enable-fuzzing builds.'",
")",
"build_group",
".",
"add_argument",
"(",
"'--coverage'",
",",
"action",
"=",
"'store_true'",
",",
"help",
"=",
"'Download --coverage builds. This also pulls down the *.gcno files'",
")",
"build_group",
".",
"add_argument",
"(",
"'--valgrind'",
",",
"action",
"=",
"'store_true'",
",",
"help",
"=",
"'Download Valgrind builds.'",
")",
"test_group",
"=",
"parser",
".",
"add_argument_group",
"(",
"'Test Arguments'",
")",
"test_group",
".",
"add_argument",
"(",
"'--tests'",
",",
"nargs",
"=",
"'+'",
",",
"metavar",
"=",
"''",
",",
"choices",
"=",
"cls",
".",
"TEST_CHOICES",
",",
"help",
"=",
"(",
"'Download tests associated with this build. Acceptable values are: '",
"+",
"', '",
".",
"join",
"(",
"cls",
".",
"TEST_CHOICES",
")",
")",
")",
"test_group",
".",
"add_argument",
"(",
"'--full-symbols'",
",",
"action",
"=",
"'store_true'",
",",
"help",
"=",
"'Download the full crashreport-symbols.zip archive.'",
")",
"misc_group",
"=",
"parser",
".",
"add_argument_group",
"(",
"'Misc. Arguments'",
")",
"misc_group",
".",
"add_argument",
"(",
"'-n'",
",",
"'--name'",
",",
"help",
"=",
"'Specify a name (default=auto)'",
")",
"misc_group",
".",
"add_argument",
"(",
"'-o'",
",",
"'--out'",
",",
"default",
"=",
"os",
".",
"getcwd",
"(",
")",
",",
"help",
"=",
"'Specify output directory (default=.)'",
")",
"misc_group",
".",
"add_argument",
"(",
"'--dry-run'",
",",
"action",
"=",
"'store_true'",
",",
"help",
"=",
"\"Search for build and output metadata only, don't download anything.\"",
")",
"args",
"=",
"parser",
".",
"parse_args",
"(",
"args",
"=",
"args",
")",
"if",
"re",
".",
"match",
"(",
"r'(\\d{4}-\\d{2}-\\d{2}|[0-9A-Fa-f]{40}|latest)$'",
",",
"args",
".",
"build",
")",
"is",
"None",
":",
"# this is a custom build",
"# ensure conflicting options are not set",
"if",
"args",
".",
"branch",
"is",
"not",
"None",
":",
"parser",
".",
"error",
"(",
"'Cannot specify --build namespace and branch argument: %s'",
"%",
"args",
".",
"branch",
")",
"if",
"args",
".",
"debug",
":",
"parser",
".",
"error",
"(",
"'Cannot specify --build namespace and --debug'",
")",
"if",
"args",
".",
"asan",
":",
"parser",
".",
"error",
"(",
"'Cannot specify --build namespace and --asan'",
")",
"if",
"args",
".",
"fuzzing",
":",
"parser",
".",
"error",
"(",
"'Cannot specify --build namespace and --fuzzing'",
")",
"if",
"args",
".",
"coverage",
":",
"parser",
".",
"error",
"(",
"'Cannot specify --build namespace and --coverage'",
")",
"if",
"args",
".",
"valgrind",
":",
"parser",
".",
"error",
"(",
"'Cannot specify --build namespace and --valgrind'",
")",
"# do this default manually so we can error if combined with --build namespace",
"# parser.set_defaults(branch='central')",
"elif",
"args",
".",
"branch",
"is",
"None",
":",
"args",
".",
"branch",
"=",
"'central'",
"flags",
"=",
"BuildFlags",
"(",
"args",
".",
"asan",
",",
"args",
".",
"debug",
",",
"args",
".",
"fuzzing",
",",
"args",
".",
"coverage",
",",
"args",
".",
"valgrind",
")",
"obj",
"=",
"cls",
"(",
"args",
".",
"target",
",",
"args",
".",
"branch",
",",
"args",
".",
"build",
",",
"flags",
",",
"Platform",
"(",
"args",
".",
"os",
",",
"args",
".",
"cpu",
")",
")",
"if",
"args",
".",
"name",
"is",
"None",
":",
"args",
".",
"name",
"=",
"obj",
".",
"get_auto_name",
"(",
")",
"final_dir",
"=",
"os",
".",
"path",
".",
"realpath",
"(",
"os",
".",
"path",
".",
"join",
"(",
"args",
".",
"out",
",",
"args",
".",
"name",
")",
")",
"if",
"not",
"skip_dir_check",
"and",
"os",
".",
"path",
".",
"exists",
"(",
"final_dir",
")",
":",
"parser",
".",
"error",
"(",
"'Folder exists: %s .. exiting'",
"%",
"final_dir",
")",
"extract_options",
"=",
"{",
"'dry_run'",
":",
"args",
".",
"dry_run",
",",
"'out'",
":",
"final_dir",
",",
"'full_symbols'",
":",
"args",
".",
"full_symbols",
",",
"'tests'",
":",
"args",
".",
"tests",
"}",
"return",
"obj",
",",
"extract_options"
] | Construct a Fetcher from given command line arguments.
@type args: list(str)
@param args: Command line arguments (optional). Default is to use args from sys.argv
@type skip_dir_check: bool
@param skip_dir_check: Boolean identifying whether to check for existing build directory
@rtype: tuple(Fetcher, output path)
@return: Returns a Fetcher object and keyword arguments for extract_build. | [
"Construct",
"a",
"Fetcher",
"from",
"given",
"command",
"line",
"arguments",
"."
] | train | https://github.com/MozillaSecurity/fuzzfetch/blob/166cbfc71b679db019b9ac777dce12ccfdfc2c10/src/fuzzfetch/fetch.py#L734-L849 |
MozillaSecurity/fuzzfetch | src/fuzzfetch/fetch.py | Fetcher.main | def main(cls):
"""
fuzzfetch main entry point
Run with --help for usage
"""
log_level = logging.INFO
log_fmt = '[%(asctime)s] %(message)s'
if bool(os.getenv('DEBUG')):
log_level = logging.DEBUG
log_fmt = '%(levelname).1s %(name)s [%(asctime)s] %(message)s'
logging.basicConfig(format=log_fmt, datefmt='%Y-%m-%d %H:%M:%S', level=log_level)
logging.getLogger('requests').setLevel(logging.WARNING)
obj, extract_args = cls.from_args()
LOG.info('Identified task: %s', obj.task_url)
LOG.info('> Task ID: %s', obj.task_id)
LOG.info('> Rank: %s', obj.rank)
LOG.info('> Changeset: %s', obj.changeset)
LOG.info('> Build ID: %s', obj.build_id)
if extract_args['dry_run']:
return
out = extract_args['out']
os.mkdir(out)
try:
obj.extract_build(out, tests=extract_args['tests'], full_symbols=extract_args['full_symbols'])
os.makedirs(os.path.join(out, 'download'))
with open(os.path.join(out, 'download', 'firefox-temp.txt'), 'a') as dl_fd:
dl_fd.write('buildID=' + obj.build_id + os.linesep)
except: # noqa
if os.path.isdir(out):
junction_path.rmtree(out)
raise | python | def main(cls):
"""
fuzzfetch main entry point
Run with --help for usage
"""
log_level = logging.INFO
log_fmt = '[%(asctime)s] %(message)s'
if bool(os.getenv('DEBUG')):
log_level = logging.DEBUG
log_fmt = '%(levelname).1s %(name)s [%(asctime)s] %(message)s'
logging.basicConfig(format=log_fmt, datefmt='%Y-%m-%d %H:%M:%S', level=log_level)
logging.getLogger('requests').setLevel(logging.WARNING)
obj, extract_args = cls.from_args()
LOG.info('Identified task: %s', obj.task_url)
LOG.info('> Task ID: %s', obj.task_id)
LOG.info('> Rank: %s', obj.rank)
LOG.info('> Changeset: %s', obj.changeset)
LOG.info('> Build ID: %s', obj.build_id)
if extract_args['dry_run']:
return
out = extract_args['out']
os.mkdir(out)
try:
obj.extract_build(out, tests=extract_args['tests'], full_symbols=extract_args['full_symbols'])
os.makedirs(os.path.join(out, 'download'))
with open(os.path.join(out, 'download', 'firefox-temp.txt'), 'a') as dl_fd:
dl_fd.write('buildID=' + obj.build_id + os.linesep)
except: # noqa
if os.path.isdir(out):
junction_path.rmtree(out)
raise | [
"def",
"main",
"(",
"cls",
")",
":",
"log_level",
"=",
"logging",
".",
"INFO",
"log_fmt",
"=",
"'[%(asctime)s] %(message)s'",
"if",
"bool",
"(",
"os",
".",
"getenv",
"(",
"'DEBUG'",
")",
")",
":",
"log_level",
"=",
"logging",
".",
"DEBUG",
"log_fmt",
"=",
"'%(levelname).1s %(name)s [%(asctime)s] %(message)s'",
"logging",
".",
"basicConfig",
"(",
"format",
"=",
"log_fmt",
",",
"datefmt",
"=",
"'%Y-%m-%d %H:%M:%S'",
",",
"level",
"=",
"log_level",
")",
"logging",
".",
"getLogger",
"(",
"'requests'",
")",
".",
"setLevel",
"(",
"logging",
".",
"WARNING",
")",
"obj",
",",
"extract_args",
"=",
"cls",
".",
"from_args",
"(",
")",
"LOG",
".",
"info",
"(",
"'Identified task: %s'",
",",
"obj",
".",
"task_url",
")",
"LOG",
".",
"info",
"(",
"'> Task ID: %s'",
",",
"obj",
".",
"task_id",
")",
"LOG",
".",
"info",
"(",
"'> Rank: %s'",
",",
"obj",
".",
"rank",
")",
"LOG",
".",
"info",
"(",
"'> Changeset: %s'",
",",
"obj",
".",
"changeset",
")",
"LOG",
".",
"info",
"(",
"'> Build ID: %s'",
",",
"obj",
".",
"build_id",
")",
"if",
"extract_args",
"[",
"'dry_run'",
"]",
":",
"return",
"out",
"=",
"extract_args",
"[",
"'out'",
"]",
"os",
".",
"mkdir",
"(",
"out",
")",
"try",
":",
"obj",
".",
"extract_build",
"(",
"out",
",",
"tests",
"=",
"extract_args",
"[",
"'tests'",
"]",
",",
"full_symbols",
"=",
"extract_args",
"[",
"'full_symbols'",
"]",
")",
"os",
".",
"makedirs",
"(",
"os",
".",
"path",
".",
"join",
"(",
"out",
",",
"'download'",
")",
")",
"with",
"open",
"(",
"os",
".",
"path",
".",
"join",
"(",
"out",
",",
"'download'",
",",
"'firefox-temp.txt'",
")",
",",
"'a'",
")",
"as",
"dl_fd",
":",
"dl_fd",
".",
"write",
"(",
"'buildID='",
"+",
"obj",
".",
"build_id",
"+",
"os",
".",
"linesep",
")",
"except",
":",
"# noqa",
"if",
"os",
".",
"path",
".",
"isdir",
"(",
"out",
")",
":",
"junction_path",
".",
"rmtree",
"(",
"out",
")",
"raise"
] | fuzzfetch main entry point
Run with --help for usage | [
"fuzzfetch",
"main",
"entry",
"point"
] | train | https://github.com/MozillaSecurity/fuzzfetch/blob/166cbfc71b679db019b9ac777dce12ccfdfc2c10/src/fuzzfetch/fetch.py#L852-L888 |
ClericPy/torequests | torequests/crawlers.py | CommonRequests.init_original_response | def init_original_response(self):
"""Get the original response for comparing, confirm ``is_cookie_necessary``"""
if 'json' in self.request:
self.request['data'] = json.dumps(self.request.pop('json')).encode(
self.encoding)
r1 = self.req.request(
retry=self.retry, timeout=self.timeout, **self.request)
resp = r1.x
assert resp, ValueError(
'original_response should not be failed. %s' % self.request)
self.encoding = self.encoding or resp.encoding
self.original_response = self.ensure_response(r1)
return self.original_response | python | def init_original_response(self):
"""Get the original response for comparing, confirm ``is_cookie_necessary``"""
if 'json' in self.request:
self.request['data'] = json.dumps(self.request.pop('json')).encode(
self.encoding)
r1 = self.req.request(
retry=self.retry, timeout=self.timeout, **self.request)
resp = r1.x
assert resp, ValueError(
'original_response should not be failed. %s' % self.request)
self.encoding = self.encoding or resp.encoding
self.original_response = self.ensure_response(r1)
return self.original_response | [
"def",
"init_original_response",
"(",
"self",
")",
":",
"if",
"'json'",
"in",
"self",
".",
"request",
":",
"self",
".",
"request",
"[",
"'data'",
"]",
"=",
"json",
".",
"dumps",
"(",
"self",
".",
"request",
".",
"pop",
"(",
"'json'",
")",
")",
".",
"encode",
"(",
"self",
".",
"encoding",
")",
"r1",
"=",
"self",
".",
"req",
".",
"request",
"(",
"retry",
"=",
"self",
".",
"retry",
",",
"timeout",
"=",
"self",
".",
"timeout",
",",
"*",
"*",
"self",
".",
"request",
")",
"resp",
"=",
"r1",
".",
"x",
"assert",
"resp",
",",
"ValueError",
"(",
"'original_response should not be failed. %s'",
"%",
"self",
".",
"request",
")",
"self",
".",
"encoding",
"=",
"self",
".",
"encoding",
"or",
"resp",
".",
"encoding",
"self",
".",
"original_response",
"=",
"self",
".",
"ensure_response",
"(",
"r1",
")",
"return",
"self",
".",
"original_response"
] | Get the original response for comparing, confirm ``is_cookie_necessary`` | [
"Get",
"the",
"original",
"response",
"for",
"comparing",
"confirm",
"is_cookie_necessary"
] | train | https://github.com/ClericPy/torequests/blob/1793261688d7a47e1c3a0830d83f8552f5e3e5d9/torequests/crawlers.py#L75-L87 |
ClericPy/torequests | torequests/crawlers.py | CleanRequest.init_original_response | def init_original_response(self):
"""Get the original response for comparing, confirm is_cookie_necessary"""
no_cookie_resp = None
self.is_cookie_necessary = True
if 'json' in self.request:
self.request['data'] = json.dumps(self.request.pop('json')).encode(
self.encoding)
r1 = self.req.request(
retry=self.retry, timeout=self.timeout, **self.request)
if 'headers' in self.request:
# test is_cookie_necessary
cookie = self.request['headers'].get('Cookie', None)
if cookie:
new_request = deepcopy(self.request)
new_request['headers']['Cookie'] = ''
r2 = self.req.request(
retry=self.retry, timeout=self.timeout, **new_request)
no_cookie_resp = self.ensure_response(r2)
resp = r1.x
assert resp, ValueError(
'original_response should not be failed. %s' % self.request)
self.original_response = self.ensure_response(r1)
self.encoding = self.encoding or resp.encoding
if no_cookie_resp == self.original_response:
self.ignore['headers'].append('Cookie')
self.is_cookie_necessary = False
return self.original_response | python | def init_original_response(self):
"""Get the original response for comparing, confirm is_cookie_necessary"""
no_cookie_resp = None
self.is_cookie_necessary = True
if 'json' in self.request:
self.request['data'] = json.dumps(self.request.pop('json')).encode(
self.encoding)
r1 = self.req.request(
retry=self.retry, timeout=self.timeout, **self.request)
if 'headers' in self.request:
# test is_cookie_necessary
cookie = self.request['headers'].get('Cookie', None)
if cookie:
new_request = deepcopy(self.request)
new_request['headers']['Cookie'] = ''
r2 = self.req.request(
retry=self.retry, timeout=self.timeout, **new_request)
no_cookie_resp = self.ensure_response(r2)
resp = r1.x
assert resp, ValueError(
'original_response should not be failed. %s' % self.request)
self.original_response = self.ensure_response(r1)
self.encoding = self.encoding or resp.encoding
if no_cookie_resp == self.original_response:
self.ignore['headers'].append('Cookie')
self.is_cookie_necessary = False
return self.original_response | [
"def",
"init_original_response",
"(",
"self",
")",
":",
"no_cookie_resp",
"=",
"None",
"self",
".",
"is_cookie_necessary",
"=",
"True",
"if",
"'json'",
"in",
"self",
".",
"request",
":",
"self",
".",
"request",
"[",
"'data'",
"]",
"=",
"json",
".",
"dumps",
"(",
"self",
".",
"request",
".",
"pop",
"(",
"'json'",
")",
")",
".",
"encode",
"(",
"self",
".",
"encoding",
")",
"r1",
"=",
"self",
".",
"req",
".",
"request",
"(",
"retry",
"=",
"self",
".",
"retry",
",",
"timeout",
"=",
"self",
".",
"timeout",
",",
"*",
"*",
"self",
".",
"request",
")",
"if",
"'headers'",
"in",
"self",
".",
"request",
":",
"# test is_cookie_necessary",
"cookie",
"=",
"self",
".",
"request",
"[",
"'headers'",
"]",
".",
"get",
"(",
"'Cookie'",
",",
"None",
")",
"if",
"cookie",
":",
"new_request",
"=",
"deepcopy",
"(",
"self",
".",
"request",
")",
"new_request",
"[",
"'headers'",
"]",
"[",
"'Cookie'",
"]",
"=",
"''",
"r2",
"=",
"self",
".",
"req",
".",
"request",
"(",
"retry",
"=",
"self",
".",
"retry",
",",
"timeout",
"=",
"self",
".",
"timeout",
",",
"*",
"*",
"new_request",
")",
"no_cookie_resp",
"=",
"self",
".",
"ensure_response",
"(",
"r2",
")",
"resp",
"=",
"r1",
".",
"x",
"assert",
"resp",
",",
"ValueError",
"(",
"'original_response should not be failed. %s'",
"%",
"self",
".",
"request",
")",
"self",
".",
"original_response",
"=",
"self",
".",
"ensure_response",
"(",
"r1",
")",
"self",
".",
"encoding",
"=",
"self",
".",
"encoding",
"or",
"resp",
".",
"encoding",
"if",
"no_cookie_resp",
"==",
"self",
".",
"original_response",
":",
"self",
".",
"ignore",
"[",
"'headers'",
"]",
".",
"append",
"(",
"'Cookie'",
")",
"self",
".",
"is_cookie_necessary",
"=",
"False",
"return",
"self",
".",
"original_response"
] | Get the original response for comparing, confirm is_cookie_necessary | [
"Get",
"the",
"original",
"response",
"for",
"comparing",
"confirm",
"is_cookie_necessary"
] | train | https://github.com/ClericPy/torequests/blob/1793261688d7a47e1c3a0830d83f8552f5e3e5d9/torequests/crawlers.py#L145-L171 |
ClericPy/torequests | torequests/crawlers.py | CleanRequest.sort_url_qsl | def sort_url_qsl(cls, raw_url, **kwargs):
"""Do nothing but sort the params of url.
raw_url: the raw url to be sorted;
kwargs: (optional) same kwargs for ``sorted``.
"""
parsed_url = urlparse(raw_url)
qsl = parse_qsl(parsed_url.query)
return cls._join_url(parsed_url, sorted(qsl, **kwargs)) | python | def sort_url_qsl(cls, raw_url, **kwargs):
"""Do nothing but sort the params of url.
raw_url: the raw url to be sorted;
kwargs: (optional) same kwargs for ``sorted``.
"""
parsed_url = urlparse(raw_url)
qsl = parse_qsl(parsed_url.query)
return cls._join_url(parsed_url, sorted(qsl, **kwargs)) | [
"def",
"sort_url_qsl",
"(",
"cls",
",",
"raw_url",
",",
"*",
"*",
"kwargs",
")",
":",
"parsed_url",
"=",
"urlparse",
"(",
"raw_url",
")",
"qsl",
"=",
"parse_qsl",
"(",
"parsed_url",
".",
"query",
")",
"return",
"cls",
".",
"_join_url",
"(",
"parsed_url",
",",
"sorted",
"(",
"qsl",
",",
"*",
"*",
"kwargs",
")",
")"
] | Do nothing but sort the params of url.
raw_url: the raw url to be sorted;
kwargs: (optional) same kwargs for ``sorted``. | [
"Do",
"nothing",
"but",
"sort",
"the",
"params",
"of",
"url",
"."
] | train | https://github.com/ClericPy/torequests/blob/1793261688d7a47e1c3a0830d83f8552f5e3e5d9/torequests/crawlers.py#L174-L182 |
ClericPy/torequests | torequests/crawlers.py | CleanRequest.clean_url | def clean_url(self):
"""Only clean the url params and return self."""
raw_url = self.request['url']
parsed_url = urlparse(raw_url)
qsl = parse_qsl(parsed_url.query)
for qs in qsl:
new_url = self._join_url(parsed_url,
[i for i in qsl if i is not qs])
new_request = deepcopy(self.request)
new_request['url'] = new_url
self._add_task('qsl', qs, new_request)
return self | python | def clean_url(self):
"""Only clean the url params and return self."""
raw_url = self.request['url']
parsed_url = urlparse(raw_url)
qsl = parse_qsl(parsed_url.query)
for qs in qsl:
new_url = self._join_url(parsed_url,
[i for i in qsl if i is not qs])
new_request = deepcopy(self.request)
new_request['url'] = new_url
self._add_task('qsl', qs, new_request)
return self | [
"def",
"clean_url",
"(",
"self",
")",
":",
"raw_url",
"=",
"self",
".",
"request",
"[",
"'url'",
"]",
"parsed_url",
"=",
"urlparse",
"(",
"raw_url",
")",
"qsl",
"=",
"parse_qsl",
"(",
"parsed_url",
".",
"query",
")",
"for",
"qs",
"in",
"qsl",
":",
"new_url",
"=",
"self",
".",
"_join_url",
"(",
"parsed_url",
",",
"[",
"i",
"for",
"i",
"in",
"qsl",
"if",
"i",
"is",
"not",
"qs",
"]",
")",
"new_request",
"=",
"deepcopy",
"(",
"self",
".",
"request",
")",
"new_request",
"[",
"'url'",
"]",
"=",
"new_url",
"self",
".",
"_add_task",
"(",
"'qsl'",
",",
"qs",
",",
"new_request",
")",
"return",
"self"
] | Only clean the url params and return self. | [
"Only",
"clean",
"the",
"url",
"params",
"and",
"return",
"self",
"."
] | train | https://github.com/ClericPy/torequests/blob/1793261688d7a47e1c3a0830d83f8552f5e3e5d9/torequests/crawlers.py#L201-L212 |
ClericPy/torequests | torequests/crawlers.py | CleanRequest.clean_post_data | def clean_post_data(self):
"""Only clean the post-data and return self.
Including form-data / bytes-data / json-data."""
data = self.request.get('data')
if not (data and self.request['method'] == 'post'):
return self
# case of total_data
new_request = deepcopy(self.request)
new_request.pop('data')
self._add_task('total_data', data, new_request)
# case of form_data
if isinstance(data, dict):
for key in data:
new_request = deepcopy(self.request)
new_form = deepcopy(data)
new_form.pop(key)
new_request['data'] = new_form
self._add_task('form_data', key, new_request)
return self
# case of json_data
try:
json_data = json.loads(data.decode(self.encoding))
for key in json_data:
new_request = deepcopy(self.request)
new_json = deepcopy(json_data)
new_json.pop(key)
new_request['data'] = json.dumps(new_json).encode(self.encoding)
self._add_task('json_data', key, new_request)
self.has_json_data = True
return self
except JSONDecodeError:
return self | python | def clean_post_data(self):
"""Only clean the post-data and return self.
Including form-data / bytes-data / json-data."""
data = self.request.get('data')
if not (data and self.request['method'] == 'post'):
return self
# case of total_data
new_request = deepcopy(self.request)
new_request.pop('data')
self._add_task('total_data', data, new_request)
# case of form_data
if isinstance(data, dict):
for key in data:
new_request = deepcopy(self.request)
new_form = deepcopy(data)
new_form.pop(key)
new_request['data'] = new_form
self._add_task('form_data', key, new_request)
return self
# case of json_data
try:
json_data = json.loads(data.decode(self.encoding))
for key in json_data:
new_request = deepcopy(self.request)
new_json = deepcopy(json_data)
new_json.pop(key)
new_request['data'] = json.dumps(new_json).encode(self.encoding)
self._add_task('json_data', key, new_request)
self.has_json_data = True
return self
except JSONDecodeError:
return self | [
"def",
"clean_post_data",
"(",
"self",
")",
":",
"data",
"=",
"self",
".",
"request",
".",
"get",
"(",
"'data'",
")",
"if",
"not",
"(",
"data",
"and",
"self",
".",
"request",
"[",
"'method'",
"]",
"==",
"'post'",
")",
":",
"return",
"self",
"# case of total_data",
"new_request",
"=",
"deepcopy",
"(",
"self",
".",
"request",
")",
"new_request",
".",
"pop",
"(",
"'data'",
")",
"self",
".",
"_add_task",
"(",
"'total_data'",
",",
"data",
",",
"new_request",
")",
"# case of form_data",
"if",
"isinstance",
"(",
"data",
",",
"dict",
")",
":",
"for",
"key",
"in",
"data",
":",
"new_request",
"=",
"deepcopy",
"(",
"self",
".",
"request",
")",
"new_form",
"=",
"deepcopy",
"(",
"data",
")",
"new_form",
".",
"pop",
"(",
"key",
")",
"new_request",
"[",
"'data'",
"]",
"=",
"new_form",
"self",
".",
"_add_task",
"(",
"'form_data'",
",",
"key",
",",
"new_request",
")",
"return",
"self",
"# case of json_data",
"try",
":",
"json_data",
"=",
"json",
".",
"loads",
"(",
"data",
".",
"decode",
"(",
"self",
".",
"encoding",
")",
")",
"for",
"key",
"in",
"json_data",
":",
"new_request",
"=",
"deepcopy",
"(",
"self",
".",
"request",
")",
"new_json",
"=",
"deepcopy",
"(",
"json_data",
")",
"new_json",
".",
"pop",
"(",
"key",
")",
"new_request",
"[",
"'data'",
"]",
"=",
"json",
".",
"dumps",
"(",
"new_json",
")",
".",
"encode",
"(",
"self",
".",
"encoding",
")",
"self",
".",
"_add_task",
"(",
"'json_data'",
",",
"key",
",",
"new_request",
")",
"self",
".",
"has_json_data",
"=",
"True",
"return",
"self",
"except",
"JSONDecodeError",
":",
"return",
"self"
] | Only clean the post-data and return self.
Including form-data / bytes-data / json-data. | [
"Only",
"clean",
"the",
"post",
"-",
"data",
"and",
"return",
"self",
".",
"Including",
"form",
"-",
"data",
"/",
"bytes",
"-",
"data",
"/",
"json",
"-",
"data",
"."
] | train | https://github.com/ClericPy/torequests/blob/1793261688d7a47e1c3a0830d83f8552f5e3e5d9/torequests/crawlers.py#L214-L248 |
ClericPy/torequests | torequests/crawlers.py | CleanRequest.clean_cookie | def clean_cookie(self):
"""Only clean the cookie from headers and return self."""
if not self.is_cookie_necessary:
return self
headers = self.request.get('headers', {})
cookies = SimpleCookie(headers['Cookie'])
for k, v in cookies.items():
new_cookie = '; '.join(
[i.OutputString() for i in cookies.values() if i != v])
new_request = deepcopy(self.request)
new_request['headers']['Cookie'] = new_cookie
self._add_task('Cookie', k, new_request)
return self | python | def clean_cookie(self):
"""Only clean the cookie from headers and return self."""
if not self.is_cookie_necessary:
return self
headers = self.request.get('headers', {})
cookies = SimpleCookie(headers['Cookie'])
for k, v in cookies.items():
new_cookie = '; '.join(
[i.OutputString() for i in cookies.values() if i != v])
new_request = deepcopy(self.request)
new_request['headers']['Cookie'] = new_cookie
self._add_task('Cookie', k, new_request)
return self | [
"def",
"clean_cookie",
"(",
"self",
")",
":",
"if",
"not",
"self",
".",
"is_cookie_necessary",
":",
"return",
"self",
"headers",
"=",
"self",
".",
"request",
".",
"get",
"(",
"'headers'",
",",
"{",
"}",
")",
"cookies",
"=",
"SimpleCookie",
"(",
"headers",
"[",
"'Cookie'",
"]",
")",
"for",
"k",
",",
"v",
"in",
"cookies",
".",
"items",
"(",
")",
":",
"new_cookie",
"=",
"'; '",
".",
"join",
"(",
"[",
"i",
".",
"OutputString",
"(",
")",
"for",
"i",
"in",
"cookies",
".",
"values",
"(",
")",
"if",
"i",
"!=",
"v",
"]",
")",
"new_request",
"=",
"deepcopy",
"(",
"self",
".",
"request",
")",
"new_request",
"[",
"'headers'",
"]",
"[",
"'Cookie'",
"]",
"=",
"new_cookie",
"self",
".",
"_add_task",
"(",
"'Cookie'",
",",
"k",
",",
"new_request",
")",
"return",
"self"
] | Only clean the cookie from headers and return self. | [
"Only",
"clean",
"the",
"cookie",
"from",
"headers",
"and",
"return",
"self",
"."
] | train | https://github.com/ClericPy/torequests/blob/1793261688d7a47e1c3a0830d83f8552f5e3e5d9/torequests/crawlers.py#L250-L262 |
ClericPy/torequests | torequests/crawlers.py | CleanRequest.clean_headers | def clean_headers(self):
"""Only clean the headers (cookie include) and return self."""
if not isinstance(self.request.get('headers'), dict):
return self
headers = self.request['headers']
if 'Cookie' in headers:
self.clean_cookie()
for key in headers:
# cookie will be checked in other methods.
if key == 'Cookie':
continue
new_request = deepcopy(self.request)
new_headers = deepcopy(headers)
new_headers.pop(key)
new_request['headers'] = new_headers
self._add_task('headers', key, new_request)
return self | python | def clean_headers(self):
"""Only clean the headers (cookie include) and return self."""
if not isinstance(self.request.get('headers'), dict):
return self
headers = self.request['headers']
if 'Cookie' in headers:
self.clean_cookie()
for key in headers:
# cookie will be checked in other methods.
if key == 'Cookie':
continue
new_request = deepcopy(self.request)
new_headers = deepcopy(headers)
new_headers.pop(key)
new_request['headers'] = new_headers
self._add_task('headers', key, new_request)
return self | [
"def",
"clean_headers",
"(",
"self",
")",
":",
"if",
"not",
"isinstance",
"(",
"self",
".",
"request",
".",
"get",
"(",
"'headers'",
")",
",",
"dict",
")",
":",
"return",
"self",
"headers",
"=",
"self",
".",
"request",
"[",
"'headers'",
"]",
"if",
"'Cookie'",
"in",
"headers",
":",
"self",
".",
"clean_cookie",
"(",
")",
"for",
"key",
"in",
"headers",
":",
"# cookie will be checked in other methods.",
"if",
"key",
"==",
"'Cookie'",
":",
"continue",
"new_request",
"=",
"deepcopy",
"(",
"self",
".",
"request",
")",
"new_headers",
"=",
"deepcopy",
"(",
"headers",
")",
"new_headers",
".",
"pop",
"(",
"key",
")",
"new_request",
"[",
"'headers'",
"]",
"=",
"new_headers",
"self",
".",
"_add_task",
"(",
"'headers'",
",",
"key",
",",
"new_request",
")",
"return",
"self"
] | Only clean the headers (cookie include) and return self. | [
"Only",
"clean",
"the",
"headers",
"(",
"cookie",
"include",
")",
"and",
"return",
"self",
"."
] | train | https://github.com/ClericPy/torequests/blob/1793261688d7a47e1c3a0830d83f8552f5e3e5d9/torequests/crawlers.py#L264-L280 |
ClericPy/torequests | torequests/crawlers.py | CleanRequest.reset_new_request | def reset_new_request(self):
"""Remove the non-sense args from the self.ignore, return self.new_request"""
raw_url = self.new_request['url']
parsed_url = urlparse(raw_url)
qsl = parse_qsl(parsed_url.query)
new_url = self._join_url(
parsed_url, [i for i in qsl if i not in self.ignore['qsl']])
self.new_request['url'] = new_url
self.logger_function('ignore: %s' % self.ignore)
for key in self.ignore['headers']:
self.new_request['headers'].pop(key)
if not self.new_request.get('headers'):
self.new_request.pop('headers', None)
if self.ignore['Cookie'] and 'Cookie' not in self.ignore['headers']:
headers = self.new_request['headers']
headers = {key.title(): headers[key] for key in headers}
if 'Cookie' in headers:
cookies = SimpleCookie(headers['Cookie'])
new_cookie = '; '.join([
i[1].OutputString()
for i in cookies.items()
if i[0] not in self.ignore['Cookie']
])
self.new_request['headers']['Cookie'] = new_cookie
if self.new_request['method'] == 'post':
data = self.new_request.get('data')
if data:
if isinstance(data, dict):
for key in self.ignore['form_data']:
data.pop(key)
if (not data) or self.ignore['total_data']:
# not need data any more
self.new_request.pop('data', None)
if self.has_json_data and 'data' in self.new_request:
json_data = json.loads(data.decode(self.encoding))
for key in self.ignore['json_data']:
json_data.pop(key)
self.new_request['data'] = json.dumps(json_data).encode(
self.encoding)
return self.new_request | python | def reset_new_request(self):
"""Remove the non-sense args from the self.ignore, return self.new_request"""
raw_url = self.new_request['url']
parsed_url = urlparse(raw_url)
qsl = parse_qsl(parsed_url.query)
new_url = self._join_url(
parsed_url, [i for i in qsl if i not in self.ignore['qsl']])
self.new_request['url'] = new_url
self.logger_function('ignore: %s' % self.ignore)
for key in self.ignore['headers']:
self.new_request['headers'].pop(key)
if not self.new_request.get('headers'):
self.new_request.pop('headers', None)
if self.ignore['Cookie'] and 'Cookie' not in self.ignore['headers']:
headers = self.new_request['headers']
headers = {key.title(): headers[key] for key in headers}
if 'Cookie' in headers:
cookies = SimpleCookie(headers['Cookie'])
new_cookie = '; '.join([
i[1].OutputString()
for i in cookies.items()
if i[0] not in self.ignore['Cookie']
])
self.new_request['headers']['Cookie'] = new_cookie
if self.new_request['method'] == 'post':
data = self.new_request.get('data')
if data:
if isinstance(data, dict):
for key in self.ignore['form_data']:
data.pop(key)
if (not data) or self.ignore['total_data']:
# not need data any more
self.new_request.pop('data', None)
if self.has_json_data and 'data' in self.new_request:
json_data = json.loads(data.decode(self.encoding))
for key in self.ignore['json_data']:
json_data.pop(key)
self.new_request['data'] = json.dumps(json_data).encode(
self.encoding)
return self.new_request | [
"def",
"reset_new_request",
"(",
"self",
")",
":",
"raw_url",
"=",
"self",
".",
"new_request",
"[",
"'url'",
"]",
"parsed_url",
"=",
"urlparse",
"(",
"raw_url",
")",
"qsl",
"=",
"parse_qsl",
"(",
"parsed_url",
".",
"query",
")",
"new_url",
"=",
"self",
".",
"_join_url",
"(",
"parsed_url",
",",
"[",
"i",
"for",
"i",
"in",
"qsl",
"if",
"i",
"not",
"in",
"self",
".",
"ignore",
"[",
"'qsl'",
"]",
"]",
")",
"self",
".",
"new_request",
"[",
"'url'",
"]",
"=",
"new_url",
"self",
".",
"logger_function",
"(",
"'ignore: %s'",
"%",
"self",
".",
"ignore",
")",
"for",
"key",
"in",
"self",
".",
"ignore",
"[",
"'headers'",
"]",
":",
"self",
".",
"new_request",
"[",
"'headers'",
"]",
".",
"pop",
"(",
"key",
")",
"if",
"not",
"self",
".",
"new_request",
".",
"get",
"(",
"'headers'",
")",
":",
"self",
".",
"new_request",
".",
"pop",
"(",
"'headers'",
",",
"None",
")",
"if",
"self",
".",
"ignore",
"[",
"'Cookie'",
"]",
"and",
"'Cookie'",
"not",
"in",
"self",
".",
"ignore",
"[",
"'headers'",
"]",
":",
"headers",
"=",
"self",
".",
"new_request",
"[",
"'headers'",
"]",
"headers",
"=",
"{",
"key",
".",
"title",
"(",
")",
":",
"headers",
"[",
"key",
"]",
"for",
"key",
"in",
"headers",
"}",
"if",
"'Cookie'",
"in",
"headers",
":",
"cookies",
"=",
"SimpleCookie",
"(",
"headers",
"[",
"'Cookie'",
"]",
")",
"new_cookie",
"=",
"'; '",
".",
"join",
"(",
"[",
"i",
"[",
"1",
"]",
".",
"OutputString",
"(",
")",
"for",
"i",
"in",
"cookies",
".",
"items",
"(",
")",
"if",
"i",
"[",
"0",
"]",
"not",
"in",
"self",
".",
"ignore",
"[",
"'Cookie'",
"]",
"]",
")",
"self",
".",
"new_request",
"[",
"'headers'",
"]",
"[",
"'Cookie'",
"]",
"=",
"new_cookie",
"if",
"self",
".",
"new_request",
"[",
"'method'",
"]",
"==",
"'post'",
":",
"data",
"=",
"self",
".",
"new_request",
".",
"get",
"(",
"'data'",
")",
"if",
"data",
":",
"if",
"isinstance",
"(",
"data",
",",
"dict",
")",
":",
"for",
"key",
"in",
"self",
".",
"ignore",
"[",
"'form_data'",
"]",
":",
"data",
".",
"pop",
"(",
"key",
")",
"if",
"(",
"not",
"data",
")",
"or",
"self",
".",
"ignore",
"[",
"'total_data'",
"]",
":",
"# not need data any more",
"self",
".",
"new_request",
".",
"pop",
"(",
"'data'",
",",
"None",
")",
"if",
"self",
".",
"has_json_data",
"and",
"'data'",
"in",
"self",
".",
"new_request",
":",
"json_data",
"=",
"json",
".",
"loads",
"(",
"data",
".",
"decode",
"(",
"self",
".",
"encoding",
")",
")",
"for",
"key",
"in",
"self",
".",
"ignore",
"[",
"'json_data'",
"]",
":",
"json_data",
".",
"pop",
"(",
"key",
")",
"self",
".",
"new_request",
"[",
"'data'",
"]",
"=",
"json",
".",
"dumps",
"(",
"json_data",
")",
".",
"encode",
"(",
"self",
".",
"encoding",
")",
"return",
"self",
".",
"new_request"
] | Remove the non-sense args from the self.ignore, return self.new_request | [
"Remove",
"the",
"non",
"-",
"sense",
"args",
"from",
"the",
"self",
".",
"ignore",
"return",
"self",
".",
"new_request"
] | train | https://github.com/ClericPy/torequests/blob/1793261688d7a47e1c3a0830d83f8552f5e3e5d9/torequests/crawlers.py#L282-L323 |
ClericPy/torequests | torequests/crawlers.py | CleanRequest.result | def result(self):
"""Whole task, clean_all + reset_new_request, return self.new_request."""
if not self.tasks:
self.clean_all()
tasks_length = len(self.tasks)
self.logger_function(
'%s tasks of request, will cost at least %s seconds.' %
(tasks_length,
round(self.req.interval / self.req.n * tasks_length, 2)))
self.req.x
for task in self.tasks:
key, value, fut = task
if fut.x and fut.cx:
# fut.x == req success & fut.cx == response not changed.
self.ignore[key].append(value)
return self.reset_new_request() | python | def result(self):
"""Whole task, clean_all + reset_new_request, return self.new_request."""
if not self.tasks:
self.clean_all()
tasks_length = len(self.tasks)
self.logger_function(
'%s tasks of request, will cost at least %s seconds.' %
(tasks_length,
round(self.req.interval / self.req.n * tasks_length, 2)))
self.req.x
for task in self.tasks:
key, value, fut = task
if fut.x and fut.cx:
# fut.x == req success & fut.cx == response not changed.
self.ignore[key].append(value)
return self.reset_new_request() | [
"def",
"result",
"(",
"self",
")",
":",
"if",
"not",
"self",
".",
"tasks",
":",
"self",
".",
"clean_all",
"(",
")",
"tasks_length",
"=",
"len",
"(",
"self",
".",
"tasks",
")",
"self",
".",
"logger_function",
"(",
"'%s tasks of request, will cost at least %s seconds.'",
"%",
"(",
"tasks_length",
",",
"round",
"(",
"self",
".",
"req",
".",
"interval",
"/",
"self",
".",
"req",
".",
"n",
"*",
"tasks_length",
",",
"2",
")",
")",
")",
"self",
".",
"req",
".",
"x",
"for",
"task",
"in",
"self",
".",
"tasks",
":",
"key",
",",
"value",
",",
"fut",
"=",
"task",
"if",
"fut",
".",
"x",
"and",
"fut",
".",
"cx",
":",
"# fut.x == req success & fut.cx == response not changed.",
"self",
".",
"ignore",
"[",
"key",
"]",
".",
"append",
"(",
"value",
")",
"return",
"self",
".",
"reset_new_request",
"(",
")"
] | Whole task, clean_all + reset_new_request, return self.new_request. | [
"Whole",
"task",
"clean_all",
"+",
"reset_new_request",
"return",
"self",
".",
"new_request",
"."
] | train | https://github.com/ClericPy/torequests/blob/1793261688d7a47e1c3a0830d83f8552f5e3e5d9/torequests/crawlers.py#L329-L344 |
ClericPy/torequests | torequests/crawlers.py | Seed.as_json | def as_json(self, ensure_ascii=False):
"""Property return key-value json-string from __slots__."""
return json.dumps(self.as_dict, ensure_ascii=ensure_ascii) | python | def as_json(self, ensure_ascii=False):
"""Property return key-value json-string from __slots__."""
return json.dumps(self.as_dict, ensure_ascii=ensure_ascii) | [
"def",
"as_json",
"(",
"self",
",",
"ensure_ascii",
"=",
"False",
")",
":",
"return",
"json",
".",
"dumps",
"(",
"self",
".",
"as_dict",
",",
"ensure_ascii",
"=",
"ensure_ascii",
")"
] | Property return key-value json-string from __slots__. | [
"Property",
"return",
"key",
"-",
"value",
"json",
"-",
"string",
"from",
"__slots__",
"."
] | train | https://github.com/ClericPy/torequests/blob/1793261688d7a47e1c3a0830d83f8552f5e3e5d9/torequests/crawlers.py#L382-L384 |
vsoch/helpme | helpme/client/__init__.py | main | def main():
'''the main entry point for the HelpMe Command line application. Currently,
the user can request help or set config values for a particular helper.
'''
# Customize parser
parser = get_parser()
subparsers = get_subparsers(parser)
def help(return_code=0):
'''print help, including the software version and active client
and exit with return code.
'''
version = helpme.__version__
bot.custom(message='Command Line Tool v%s' %version,
prefix='\n[HelpMe] ',
color='CYAN')
parser.print_help()
sys.exit(return_code)
# If the user didn't provide any arguments, show the full help
if len(sys.argv) == 1:
help()
try:
args, unknown = parser.parse_known_args()
except:
sys.exit(0)
extras = None
if args.command in HELPME_HELPERS and len(unknown) > 0:
extras = unknown
# if environment logging variable not set, make silent
if args.debug is False:
os.environ['MESSAGELEVEL'] = "INFO"
# Show the version and exit
if args.version is True:
print(helpme.__version__)
sys.exit(0)
if args.command == "config": from .config import main
if args.command == "list": from .list import main
if args.command in HELPME_HELPERS: from .help import main
# Pass on to the correct parser
return_code = 0
try:
main(args, extras)
sys.exit(return_code)
except UnboundLocalError:
return_code = 1
help(return_code) | python | def main():
'''the main entry point for the HelpMe Command line application. Currently,
the user can request help or set config values for a particular helper.
'''
# Customize parser
parser = get_parser()
subparsers = get_subparsers(parser)
def help(return_code=0):
'''print help, including the software version and active client
and exit with return code.
'''
version = helpme.__version__
bot.custom(message='Command Line Tool v%s' %version,
prefix='\n[HelpMe] ',
color='CYAN')
parser.print_help()
sys.exit(return_code)
# If the user didn't provide any arguments, show the full help
if len(sys.argv) == 1:
help()
try:
args, unknown = parser.parse_known_args()
except:
sys.exit(0)
extras = None
if args.command in HELPME_HELPERS and len(unknown) > 0:
extras = unknown
# if environment logging variable not set, make silent
if args.debug is False:
os.environ['MESSAGELEVEL'] = "INFO"
# Show the version and exit
if args.version is True:
print(helpme.__version__)
sys.exit(0)
if args.command == "config": from .config import main
if args.command == "list": from .list import main
if args.command in HELPME_HELPERS: from .help import main
# Pass on to the correct parser
return_code = 0
try:
main(args, extras)
sys.exit(return_code)
except UnboundLocalError:
return_code = 1
help(return_code) | [
"def",
"main",
"(",
")",
":",
"# Customize parser",
"parser",
"=",
"get_parser",
"(",
")",
"subparsers",
"=",
"get_subparsers",
"(",
"parser",
")",
"def",
"help",
"(",
"return_code",
"=",
"0",
")",
":",
"'''print help, including the software version and active client \n and exit with return code.\n '''",
"version",
"=",
"helpme",
".",
"__version__",
"bot",
".",
"custom",
"(",
"message",
"=",
"'Command Line Tool v%s'",
"%",
"version",
",",
"prefix",
"=",
"'\\n[HelpMe] '",
",",
"color",
"=",
"'CYAN'",
")",
"parser",
".",
"print_help",
"(",
")",
"sys",
".",
"exit",
"(",
"return_code",
")",
"# If the user didn't provide any arguments, show the full help",
"if",
"len",
"(",
"sys",
".",
"argv",
")",
"==",
"1",
":",
"help",
"(",
")",
"try",
":",
"args",
",",
"unknown",
"=",
"parser",
".",
"parse_known_args",
"(",
")",
"except",
":",
"sys",
".",
"exit",
"(",
"0",
")",
"extras",
"=",
"None",
"if",
"args",
".",
"command",
"in",
"HELPME_HELPERS",
"and",
"len",
"(",
"unknown",
")",
">",
"0",
":",
"extras",
"=",
"unknown",
"# if environment logging variable not set, make silent",
"if",
"args",
".",
"debug",
"is",
"False",
":",
"os",
".",
"environ",
"[",
"'MESSAGELEVEL'",
"]",
"=",
"\"INFO\"",
"# Show the version and exit",
"if",
"args",
".",
"version",
"is",
"True",
":",
"print",
"(",
"helpme",
".",
"__version__",
")",
"sys",
".",
"exit",
"(",
"0",
")",
"if",
"args",
".",
"command",
"==",
"\"config\"",
":",
"from",
".",
"config",
"import",
"main",
"if",
"args",
".",
"command",
"==",
"\"list\"",
":",
"from",
".",
"list",
"import",
"main",
"if",
"args",
".",
"command",
"in",
"HELPME_HELPERS",
":",
"from",
".",
"help",
"import",
"main",
"# Pass on to the correct parser",
"return_code",
"=",
"0",
"try",
":",
"main",
"(",
"args",
",",
"extras",
")",
"sys",
".",
"exit",
"(",
"return_code",
")",
"except",
"UnboundLocalError",
":",
"return_code",
"=",
"1",
"help",
"(",
"return_code",
")"
] | the main entry point for the HelpMe Command line application. Currently,
the user can request help or set config values for a particular helper. | [
"the",
"main",
"entry",
"point",
"for",
"the",
"HelpMe",
"Command",
"line",
"application",
".",
"Currently",
"the",
"user",
"can",
"request",
"help",
"or",
"set",
"config",
"values",
"for",
"a",
"particular",
"helper",
"."
] | train | https://github.com/vsoch/helpme/blob/e609172260b10cddadb2d2023ab26da8082a9feb/helpme/client/__init__.py#L95-L153 |
mjirik/io3d | io3d/cachefile.py | CacheFile.get_or_save_default | def get_or_save_default(self, key, default_value):
"""
Get value stored in cache file or store there default value.
:param key:
:param default_value:
:return:
"""
val = self.get_or_none(key)
if val is None:
self.update(key, default_value)
val = default_value
return val | python | def get_or_save_default(self, key, default_value):
"""
Get value stored in cache file or store there default value.
:param key:
:param default_value:
:return:
"""
val = self.get_or_none(key)
if val is None:
self.update(key, default_value)
val = default_value
return val | [
"def",
"get_or_save_default",
"(",
"self",
",",
"key",
",",
"default_value",
")",
":",
"val",
"=",
"self",
".",
"get_or_none",
"(",
"key",
")",
"if",
"val",
"is",
"None",
":",
"self",
".",
"update",
"(",
"key",
",",
"default_value",
")",
"val",
"=",
"default_value",
"return",
"val"
] | Get value stored in cache file or store there default value.
:param key:
:param default_value:
:return: | [
"Get",
"value",
"stored",
"in",
"cache",
"file",
"or",
"store",
"there",
"default",
"value",
".",
":",
"param",
"key",
":",
":",
"param",
"default_value",
":",
":",
"return",
":"
] | train | https://github.com/mjirik/io3d/blob/ccaf3e378dcc967f2565d477fc27583fd0f61fcc/io3d/cachefile.py#L46-L57 |
mjirik/io3d | io3d/datawriter.py | saveOverlayToDicomCopy | def saveOverlayToDicomCopy(input_dcmfilelist, output_dicom_dir, overlays,
crinfo, orig_shape):
""" Save overlay to dicom. """
from . import datawriter as dwriter
# import qmisc
if not os.path.exists(output_dicom_dir):
os.makedirs(output_dicom_dir)
import imtools.image_manipulation
# uncrop all overlays
for key in overlays:
overlays[key] = imtools.image_manipulation.uncrop(overlays[key], crinfo, orig_shape)
dw = dwriter.DataWriter()
dw.DataCopyWithOverlay(input_dcmfilelist, output_dicom_dir, overlays) | python | def saveOverlayToDicomCopy(input_dcmfilelist, output_dicom_dir, overlays,
crinfo, orig_shape):
""" Save overlay to dicom. """
from . import datawriter as dwriter
# import qmisc
if not os.path.exists(output_dicom_dir):
os.makedirs(output_dicom_dir)
import imtools.image_manipulation
# uncrop all overlays
for key in overlays:
overlays[key] = imtools.image_manipulation.uncrop(overlays[key], crinfo, orig_shape)
dw = dwriter.DataWriter()
dw.DataCopyWithOverlay(input_dcmfilelist, output_dicom_dir, overlays) | [
"def",
"saveOverlayToDicomCopy",
"(",
"input_dcmfilelist",
",",
"output_dicom_dir",
",",
"overlays",
",",
"crinfo",
",",
"orig_shape",
")",
":",
"from",
".",
"import",
"datawriter",
"as",
"dwriter",
"# import qmisc",
"if",
"not",
"os",
".",
"path",
".",
"exists",
"(",
"output_dicom_dir",
")",
":",
"os",
".",
"makedirs",
"(",
"output_dicom_dir",
")",
"import",
"imtools",
".",
"image_manipulation",
"# uncrop all overlays",
"for",
"key",
"in",
"overlays",
":",
"overlays",
"[",
"key",
"]",
"=",
"imtools",
".",
"image_manipulation",
".",
"uncrop",
"(",
"overlays",
"[",
"key",
"]",
",",
"crinfo",
",",
"orig_shape",
")",
"dw",
"=",
"dwriter",
".",
"DataWriter",
"(",
")",
"dw",
".",
"DataCopyWithOverlay",
"(",
"input_dcmfilelist",
",",
"output_dicom_dir",
",",
"overlays",
")"
] | Save overlay to dicom. | [
"Save",
"overlay",
"to",
"dicom",
"."
] | train | https://github.com/mjirik/io3d/blob/ccaf3e378dcc967f2565d477fc27583fd0f61fcc/io3d/datawriter.py#L527-L542 |
mjirik/io3d | io3d/datawriter.py | DataWriter.__get_segmentation_path | def __get_segmentation_path(self, path):
""" Create path with "_segmentation" suffix and keep extension.
:param path:
:return:
"""
startpath, ext = os.path.splitext(path)
segmentation_path = startpath + "_segmentation" + ext
return segmentation_path | python | def __get_segmentation_path(self, path):
""" Create path with "_segmentation" suffix and keep extension.
:param path:
:return:
"""
startpath, ext = os.path.splitext(path)
segmentation_path = startpath + "_segmentation" + ext
return segmentation_path | [
"def",
"__get_segmentation_path",
"(",
"self",
",",
"path",
")",
":",
"startpath",
",",
"ext",
"=",
"os",
".",
"path",
".",
"splitext",
"(",
"path",
")",
"segmentation_path",
"=",
"startpath",
"+",
"\"_segmentation\"",
"+",
"ext",
"return",
"segmentation_path"
] | Create path with "_segmentation" suffix and keep extension.
:param path:
:return: | [
"Create",
"path",
"with",
"_segmentation",
"suffix",
"and",
"keep",
"extension",
"."
] | train | https://github.com/mjirik/io3d/blob/ccaf3e378dcc967f2565d477fc27583fd0f61fcc/io3d/datawriter.py#L48-L56 |
mjirik/io3d | io3d/datawriter.py | DataWriter.Write3DData | def Write3DData(self, data3d, path, filetype='auto', metadata=None, progress_callback=None, sfin=True):
"""
:param data3d: input ndarray data
:param path: output path, to specify slice number advanced formatting options (like {:06d}) can be used
Check function filename_format() for more details.
:param metadata: {'voxelsize_mm': [1, 1, 1]}
:param filetype: dcm, vtk, rawiv, image_stack
:param progress_callback: fuction for progressbar f.e. callback(value, minimum, maximum)
:param sfin: Use separate file for segmentation if necessary
"""
self.orig_path = path
path = os.path.expanduser(path)
try:
d3d = data3d.pop('data3d')
metadata = data3d
data3d = d3d
except:
pass
if progress_callback is not None:
self.progress_callback = progress_callback
if filetype == 'auto':
startpath, ext = os.path.splitext(path)
filetype = ext[1:].lower()
segmentation = None
if metadata is not None and "segmentation" in metadata.keys():
segmentation_path = self.__get_segmentation_path(path)
segmentation = metadata["segmentation"]
mtd = {'voxelsize_mm': [1, 1, 1]}
if metadata is not None:
mtd.update(metadata)
metadata=mtd
if path.find('{') >= 0:
filetype = 'image_stack'
# one_file_per_slice = True
# if one_file_per_slice:
# self._one_file_per_slice(self, data3d, path, filetype, metadata)
# else:
# self._all_in_one_file(self, data3d, path, filetype, metadata)
#
# def _all_in_one_file(self, data3d, path, filetype, metadata):
if filetype in ['vtk', 'tiff', 'tif', "mhd", "nii", "raw"]:
self._write_with_sitk(path, data3d, metadata)
if sfin and segmentation is not None:
self._write_with_sitk(segmentation_path, segmentation, metadata)
elif filetype in ['dcm', 'DCM', 'dicom']:
self._write_with_sitk(path, data3d, metadata)
self._fix_sitk_bug(path, metadata)
if sfin and segmentation is not None:
self._write_with_sitk(segmentation_path, segmentation, metadata)
self._fix_sitk_bug(segmentation_path, metadata)
elif filetype in ['rawiv']:
rawN.write(path, data3d, metadata)
elif filetype in ['image_stack']:
self.save_image_stack(data3d, path, metadata)
elif filetype in ['hdf5', 'hdf', 'h5', 'he5']:
self.save_hdf5(data3d, path, metadata)
elif filetype in ['pkl', 'pklz']:
from . import misc
metadata['data3d'] = data3d
datap = metadata
misc.obj_to_file(datap, path)
else:
logger.error('Unknown filetype: "' + filetype + '"')
raise ValueError("Unknown filetype: '" + filetype + "'") | python | def Write3DData(self, data3d, path, filetype='auto', metadata=None, progress_callback=None, sfin=True):
"""
:param data3d: input ndarray data
:param path: output path, to specify slice number advanced formatting options (like {:06d}) can be used
Check function filename_format() for more details.
:param metadata: {'voxelsize_mm': [1, 1, 1]}
:param filetype: dcm, vtk, rawiv, image_stack
:param progress_callback: fuction for progressbar f.e. callback(value, minimum, maximum)
:param sfin: Use separate file for segmentation if necessary
"""
self.orig_path = path
path = os.path.expanduser(path)
try:
d3d = data3d.pop('data3d')
metadata = data3d
data3d = d3d
except:
pass
if progress_callback is not None:
self.progress_callback = progress_callback
if filetype == 'auto':
startpath, ext = os.path.splitext(path)
filetype = ext[1:].lower()
segmentation = None
if metadata is not None and "segmentation" in metadata.keys():
segmentation_path = self.__get_segmentation_path(path)
segmentation = metadata["segmentation"]
mtd = {'voxelsize_mm': [1, 1, 1]}
if metadata is not None:
mtd.update(metadata)
metadata=mtd
if path.find('{') >= 0:
filetype = 'image_stack'
# one_file_per_slice = True
# if one_file_per_slice:
# self._one_file_per_slice(self, data3d, path, filetype, metadata)
# else:
# self._all_in_one_file(self, data3d, path, filetype, metadata)
#
# def _all_in_one_file(self, data3d, path, filetype, metadata):
if filetype in ['vtk', 'tiff', 'tif', "mhd", "nii", "raw"]:
self._write_with_sitk(path, data3d, metadata)
if sfin and segmentation is not None:
self._write_with_sitk(segmentation_path, segmentation, metadata)
elif filetype in ['dcm', 'DCM', 'dicom']:
self._write_with_sitk(path, data3d, metadata)
self._fix_sitk_bug(path, metadata)
if sfin and segmentation is not None:
self._write_with_sitk(segmentation_path, segmentation, metadata)
self._fix_sitk_bug(segmentation_path, metadata)
elif filetype in ['rawiv']:
rawN.write(path, data3d, metadata)
elif filetype in ['image_stack']:
self.save_image_stack(data3d, path, metadata)
elif filetype in ['hdf5', 'hdf', 'h5', 'he5']:
self.save_hdf5(data3d, path, metadata)
elif filetype in ['pkl', 'pklz']:
from . import misc
metadata['data3d'] = data3d
datap = metadata
misc.obj_to_file(datap, path)
else:
logger.error('Unknown filetype: "' + filetype + '"')
raise ValueError("Unknown filetype: '" + filetype + "'") | [
"def",
"Write3DData",
"(",
"self",
",",
"data3d",
",",
"path",
",",
"filetype",
"=",
"'auto'",
",",
"metadata",
"=",
"None",
",",
"progress_callback",
"=",
"None",
",",
"sfin",
"=",
"True",
")",
":",
"self",
".",
"orig_path",
"=",
"path",
"path",
"=",
"os",
".",
"path",
".",
"expanduser",
"(",
"path",
")",
"try",
":",
"d3d",
"=",
"data3d",
".",
"pop",
"(",
"'data3d'",
")",
"metadata",
"=",
"data3d",
"data3d",
"=",
"d3d",
"except",
":",
"pass",
"if",
"progress_callback",
"is",
"not",
"None",
":",
"self",
".",
"progress_callback",
"=",
"progress_callback",
"if",
"filetype",
"==",
"'auto'",
":",
"startpath",
",",
"ext",
"=",
"os",
".",
"path",
".",
"splitext",
"(",
"path",
")",
"filetype",
"=",
"ext",
"[",
"1",
":",
"]",
".",
"lower",
"(",
")",
"segmentation",
"=",
"None",
"if",
"metadata",
"is",
"not",
"None",
"and",
"\"segmentation\"",
"in",
"metadata",
".",
"keys",
"(",
")",
":",
"segmentation_path",
"=",
"self",
".",
"__get_segmentation_path",
"(",
"path",
")",
"segmentation",
"=",
"metadata",
"[",
"\"segmentation\"",
"]",
"mtd",
"=",
"{",
"'voxelsize_mm'",
":",
"[",
"1",
",",
"1",
",",
"1",
"]",
"}",
"if",
"metadata",
"is",
"not",
"None",
":",
"mtd",
".",
"update",
"(",
"metadata",
")",
"metadata",
"=",
"mtd",
"if",
"path",
".",
"find",
"(",
"'{'",
")",
">=",
"0",
":",
"filetype",
"=",
"'image_stack'",
"# one_file_per_slice = True",
"# if one_file_per_slice:",
"# self._one_file_per_slice(self, data3d, path, filetype, metadata)",
"# else:",
"# self._all_in_one_file(self, data3d, path, filetype, metadata)",
"#",
"# def _all_in_one_file(self, data3d, path, filetype, metadata):",
"if",
"filetype",
"in",
"[",
"'vtk'",
",",
"'tiff'",
",",
"'tif'",
",",
"\"mhd\"",
",",
"\"nii\"",
",",
"\"raw\"",
"]",
":",
"self",
".",
"_write_with_sitk",
"(",
"path",
",",
"data3d",
",",
"metadata",
")",
"if",
"sfin",
"and",
"segmentation",
"is",
"not",
"None",
":",
"self",
".",
"_write_with_sitk",
"(",
"segmentation_path",
",",
"segmentation",
",",
"metadata",
")",
"elif",
"filetype",
"in",
"[",
"'dcm'",
",",
"'DCM'",
",",
"'dicom'",
"]",
":",
"self",
".",
"_write_with_sitk",
"(",
"path",
",",
"data3d",
",",
"metadata",
")",
"self",
".",
"_fix_sitk_bug",
"(",
"path",
",",
"metadata",
")",
"if",
"sfin",
"and",
"segmentation",
"is",
"not",
"None",
":",
"self",
".",
"_write_with_sitk",
"(",
"segmentation_path",
",",
"segmentation",
",",
"metadata",
")",
"self",
".",
"_fix_sitk_bug",
"(",
"segmentation_path",
",",
"metadata",
")",
"elif",
"filetype",
"in",
"[",
"'rawiv'",
"]",
":",
"rawN",
".",
"write",
"(",
"path",
",",
"data3d",
",",
"metadata",
")",
"elif",
"filetype",
"in",
"[",
"'image_stack'",
"]",
":",
"self",
".",
"save_image_stack",
"(",
"data3d",
",",
"path",
",",
"metadata",
")",
"elif",
"filetype",
"in",
"[",
"'hdf5'",
",",
"'hdf'",
",",
"'h5'",
",",
"'he5'",
"]",
":",
"self",
".",
"save_hdf5",
"(",
"data3d",
",",
"path",
",",
"metadata",
")",
"elif",
"filetype",
"in",
"[",
"'pkl'",
",",
"'pklz'",
"]",
":",
"from",
".",
"import",
"misc",
"metadata",
"[",
"'data3d'",
"]",
"=",
"data3d",
"datap",
"=",
"metadata",
"misc",
".",
"obj_to_file",
"(",
"datap",
",",
"path",
")",
"else",
":",
"logger",
".",
"error",
"(",
"'Unknown filetype: \"'",
"+",
"filetype",
"+",
"'\"'",
")",
"raise",
"ValueError",
"(",
"\"Unknown filetype: '\"",
"+",
"filetype",
"+",
"\"'\"",
")"
] | :param data3d: input ndarray data
:param path: output path, to specify slice number advanced formatting options (like {:06d}) can be used
Check function filename_format() for more details.
:param metadata: {'voxelsize_mm': [1, 1, 1]}
:param filetype: dcm, vtk, rawiv, image_stack
:param progress_callback: fuction for progressbar f.e. callback(value, minimum, maximum)
:param sfin: Use separate file for segmentation if necessary | [
":",
"param",
"data3d",
":",
"input",
"ndarray",
"data",
":",
"param",
"path",
":",
"output",
"path",
"to",
"specify",
"slice",
"number",
"advanced",
"formatting",
"options",
"(",
"like",
"{",
":",
"06d",
"}",
")",
"can",
"be",
"used",
"Check",
"function",
"filename_format",
"()",
"for",
"more",
"details",
".",
":",
"param",
"metadata",
":",
"{",
"voxelsize_mm",
":",
"[",
"1",
"1",
"1",
"]",
"}",
":",
"param",
"filetype",
":",
"dcm",
"vtk",
"rawiv",
"image_stack",
":",
"param",
"progress_callback",
":",
"fuction",
"for",
"progressbar",
"f",
".",
"e",
".",
"callback",
"(",
"value",
"minimum",
"maximum",
")",
":",
"param",
"sfin",
":",
"Use",
"separate",
"file",
"for",
"segmentation",
"if",
"necessary"
] | train | https://github.com/mjirik/io3d/blob/ccaf3e378dcc967f2565d477fc27583fd0f61fcc/io3d/datawriter.py#L59-L137 |
mjirik/io3d | io3d/datawriter.py | DataWriter._fix_sitk_bug | def _fix_sitk_bug(self, path, metadata):
"""
There is a bug in simple ITK for Z axis in 3D images. This is a fix
:param path:
:param metadata:
:return:
"""
ds = dicom.read_file(path)
ds.SpacingBetweenSlices = str(metadata["voxelsize_mm"][0])[:16]
dicom.write_file(path, ds) | python | def _fix_sitk_bug(self, path, metadata):
"""
There is a bug in simple ITK for Z axis in 3D images. This is a fix
:param path:
:param metadata:
:return:
"""
ds = dicom.read_file(path)
ds.SpacingBetweenSlices = str(metadata["voxelsize_mm"][0])[:16]
dicom.write_file(path, ds) | [
"def",
"_fix_sitk_bug",
"(",
"self",
",",
"path",
",",
"metadata",
")",
":",
"ds",
"=",
"dicom",
".",
"read_file",
"(",
"path",
")",
"ds",
".",
"SpacingBetweenSlices",
"=",
"str",
"(",
"metadata",
"[",
"\"voxelsize_mm\"",
"]",
"[",
"0",
"]",
")",
"[",
":",
"16",
"]",
"dicom",
".",
"write_file",
"(",
"path",
",",
"ds",
")"
] | There is a bug in simple ITK for Z axis in 3D images. This is a fix
:param path:
:param metadata:
:return: | [
"There",
"is",
"a",
"bug",
"in",
"simple",
"ITK",
"for",
"Z",
"axis",
"in",
"3D",
"images",
".",
"This",
"is",
"a",
"fix",
":",
"param",
"path",
":",
":",
"param",
"metadata",
":",
":",
"return",
":"
] | train | https://github.com/mjirik/io3d/blob/ccaf3e378dcc967f2565d477fc27583fd0f61fcc/io3d/datawriter.py#L150-L159 |
mjirik/io3d | io3d/datawriter.py | DataWriter.DataCopyWithOverlay | def DataCopyWithOverlay(self, dcmfilelist, out_dir, overlays):
"""
Function make 3D data from dicom file slices
:dcmfilelist list of sorted .dcm files
:overlays dictionary of binary overlays. {1:np.array([...]), 3:...}
:out_dir output directory
"""
dcmlist = dcmfilelist
# data3d = []
for i in range(len(dcmlist)):
onefile = dcmlist[i]
logger.info(onefile)
data = dicom.read_file(onefile)
for i_overlay in overlays.keys():
overlay3d = overlays[i_overlay]
data = self.encode_overlay_slice(data,
overlay3d[-1 - i, :, :],
i_overlay)
# construct output path
head, tail = os.path.split(os.path.normpath(onefile))
filename_out = os.path.join(out_dir, tail)
# save
data.save_as(filename_out) | python | def DataCopyWithOverlay(self, dcmfilelist, out_dir, overlays):
"""
Function make 3D data from dicom file slices
:dcmfilelist list of sorted .dcm files
:overlays dictionary of binary overlays. {1:np.array([...]), 3:...}
:out_dir output directory
"""
dcmlist = dcmfilelist
# data3d = []
for i in range(len(dcmlist)):
onefile = dcmlist[i]
logger.info(onefile)
data = dicom.read_file(onefile)
for i_overlay in overlays.keys():
overlay3d = overlays[i_overlay]
data = self.encode_overlay_slice(data,
overlay3d[-1 - i, :, :],
i_overlay)
# construct output path
head, tail = os.path.split(os.path.normpath(onefile))
filename_out = os.path.join(out_dir, tail)
# save
data.save_as(filename_out) | [
"def",
"DataCopyWithOverlay",
"(",
"self",
",",
"dcmfilelist",
",",
"out_dir",
",",
"overlays",
")",
":",
"dcmlist",
"=",
"dcmfilelist",
"# data3d = []",
"for",
"i",
"in",
"range",
"(",
"len",
"(",
"dcmlist",
")",
")",
":",
"onefile",
"=",
"dcmlist",
"[",
"i",
"]",
"logger",
".",
"info",
"(",
"onefile",
")",
"data",
"=",
"dicom",
".",
"read_file",
"(",
"onefile",
")",
"for",
"i_overlay",
"in",
"overlays",
".",
"keys",
"(",
")",
":",
"overlay3d",
"=",
"overlays",
"[",
"i_overlay",
"]",
"data",
"=",
"self",
".",
"encode_overlay_slice",
"(",
"data",
",",
"overlay3d",
"[",
"-",
"1",
"-",
"i",
",",
":",
",",
":",
"]",
",",
"i_overlay",
")",
"# construct output path",
"head",
",",
"tail",
"=",
"os",
".",
"path",
".",
"split",
"(",
"os",
".",
"path",
".",
"normpath",
"(",
"onefile",
")",
")",
"filename_out",
"=",
"os",
".",
"path",
".",
"join",
"(",
"out_dir",
",",
"tail",
")",
"# save",
"data",
".",
"save_as",
"(",
"filename_out",
")"
] | Function make 3D data from dicom file slices
:dcmfilelist list of sorted .dcm files
:overlays dictionary of binary overlays. {1:np.array([...]), 3:...}
:out_dir output directory | [
"Function",
"make",
"3D",
"data",
"from",
"dicom",
"file",
"slices"
] | train | https://github.com/mjirik/io3d/blob/ccaf3e378dcc967f2565d477fc27583fd0f61fcc/io3d/datawriter.py#L208-L237 |
mjirik/io3d | io3d/datawriter.py | DataWriter.add_overlay_to_slice_file | def add_overlay_to_slice_file(
self,
filename,
overlay,
i_overlay,
filename_out=None
):
""" Function adds overlay to existing file.
"""
if filename_out is None:
filename_out = filename
filename = op.expanduser(filename)
data = dicom.read_file(filename)
data = self.encode_overlay_slice(data, overlay, i_overlay)
data.save_as(filename_out) | python | def add_overlay_to_slice_file(
self,
filename,
overlay,
i_overlay,
filename_out=None
):
""" Function adds overlay to existing file.
"""
if filename_out is None:
filename_out = filename
filename = op.expanduser(filename)
data = dicom.read_file(filename)
data = self.encode_overlay_slice(data, overlay, i_overlay)
data.save_as(filename_out) | [
"def",
"add_overlay_to_slice_file",
"(",
"self",
",",
"filename",
",",
"overlay",
",",
"i_overlay",
",",
"filename_out",
"=",
"None",
")",
":",
"if",
"filename_out",
"is",
"None",
":",
"filename_out",
"=",
"filename",
"filename",
"=",
"op",
".",
"expanduser",
"(",
"filename",
")",
"data",
"=",
"dicom",
".",
"read_file",
"(",
"filename",
")",
"data",
"=",
"self",
".",
"encode_overlay_slice",
"(",
"data",
",",
"overlay",
",",
"i_overlay",
")",
"data",
".",
"save_as",
"(",
"filename_out",
")"
] | Function adds overlay to existing file. | [
"Function",
"adds",
"overlay",
"to",
"existing",
"file",
"."
] | train | https://github.com/mjirik/io3d/blob/ccaf3e378dcc967f2565d477fc27583fd0f61fcc/io3d/datawriter.py#L240-L254 |
mjirik/io3d | io3d/deprecation.py | deprecated | def deprecated(instructions):
"""
Flags a method as deprecated.
:param instructions: A human-friendly string of instructions, such as: 'Please migrate to add_proxy() ASAP.'
:return: DeprecatedWarning
"""
def decorator(func):
"""This is a decorator which can be used to mark functions as deprecated.
It will result in a warning being emitted when the function is used.
"""
@functools.wraps(func)
def wrapper(*args, **kwargs):
message = 'Call to deprecated function {}. {}'.format(func.__name__,
instructions)
frame = inspect.currentframe().f_back
warnings.warn_explicit(message,
category=DeprecatedWarning,
filename=inspect.getfile(frame.f_code),
lineno=frame.f_lineno)
return func(*args, **kwargs)
return wrapper
return decorator | python | def deprecated(instructions):
"""
Flags a method as deprecated.
:param instructions: A human-friendly string of instructions, such as: 'Please migrate to add_proxy() ASAP.'
:return: DeprecatedWarning
"""
def decorator(func):
"""This is a decorator which can be used to mark functions as deprecated.
It will result in a warning being emitted when the function is used.
"""
@functools.wraps(func)
def wrapper(*args, **kwargs):
message = 'Call to deprecated function {}. {}'.format(func.__name__,
instructions)
frame = inspect.currentframe().f_back
warnings.warn_explicit(message,
category=DeprecatedWarning,
filename=inspect.getfile(frame.f_code),
lineno=frame.f_lineno)
return func(*args, **kwargs)
return wrapper
return decorator | [
"def",
"deprecated",
"(",
"instructions",
")",
":",
"def",
"decorator",
"(",
"func",
")",
":",
"\"\"\"This is a decorator which can be used to mark functions as deprecated.\n\n It will result in a warning being emitted when the function is used.\n \"\"\"",
"@",
"functools",
".",
"wraps",
"(",
"func",
")",
"def",
"wrapper",
"(",
"*",
"args",
",",
"*",
"*",
"kwargs",
")",
":",
"message",
"=",
"'Call to deprecated function {}. {}'",
".",
"format",
"(",
"func",
".",
"__name__",
",",
"instructions",
")",
"frame",
"=",
"inspect",
".",
"currentframe",
"(",
")",
".",
"f_back",
"warnings",
".",
"warn_explicit",
"(",
"message",
",",
"category",
"=",
"DeprecatedWarning",
",",
"filename",
"=",
"inspect",
".",
"getfile",
"(",
"frame",
".",
"f_code",
")",
",",
"lineno",
"=",
"frame",
".",
"f_lineno",
")",
"return",
"func",
"(",
"*",
"args",
",",
"*",
"*",
"kwargs",
")",
"return",
"wrapper",
"return",
"decorator"
] | Flags a method as deprecated.
:param instructions: A human-friendly string of instructions, such as: 'Please migrate to add_proxy() ASAP.'
:return: DeprecatedWarning | [
"Flags",
"a",
"method",
"as",
"deprecated",
"."
] | train | https://github.com/mjirik/io3d/blob/ccaf3e378dcc967f2565d477fc27583fd0f61fcc/io3d/deprecation.py#L15-L38 |
SetBased/py-stratum | pystratum/style/PyStratumStyle.py | PyStratumStyle.log_verbose | def log_verbose(self, message):
"""
Logs a message only when logging level is verbose.
:param str|list[str] message: The message.
"""
if self.get_verbosity() >= Output.VERBOSITY_VERBOSE:
self.writeln(message) | python | def log_verbose(self, message):
"""
Logs a message only when logging level is verbose.
:param str|list[str] message: The message.
"""
if self.get_verbosity() >= Output.VERBOSITY_VERBOSE:
self.writeln(message) | [
"def",
"log_verbose",
"(",
"self",
",",
"message",
")",
":",
"if",
"self",
".",
"get_verbosity",
"(",
")",
">=",
"Output",
".",
"VERBOSITY_VERBOSE",
":",
"self",
".",
"writeln",
"(",
"message",
")"
] | Logs a message only when logging level is verbose.
:param str|list[str] message: The message. | [
"Logs",
"a",
"message",
"only",
"when",
"logging",
"level",
"is",
"verbose",
"."
] | train | https://github.com/SetBased/py-stratum/blob/7c5ffaa2fdd03f865832a5190b5897ff2c0e3155/pystratum/style/PyStratumStyle.py#L50-L57 |
SetBased/py-stratum | pystratum/style/PyStratumStyle.py | PyStratumStyle.log_very_verbose | def log_very_verbose(self, message):
"""
Logs a message only when logging level is very verbose.
:param str|list[str] message: The message.
"""
if self.get_verbosity() >= Output.VERBOSITY_VERY_VERBOSE:
self.writeln(message) | python | def log_very_verbose(self, message):
"""
Logs a message only when logging level is very verbose.
:param str|list[str] message: The message.
"""
if self.get_verbosity() >= Output.VERBOSITY_VERY_VERBOSE:
self.writeln(message) | [
"def",
"log_very_verbose",
"(",
"self",
",",
"message",
")",
":",
"if",
"self",
".",
"get_verbosity",
"(",
")",
">=",
"Output",
".",
"VERBOSITY_VERY_VERBOSE",
":",
"self",
".",
"writeln",
"(",
"message",
")"
] | Logs a message only when logging level is very verbose.
:param str|list[str] message: The message. | [
"Logs",
"a",
"message",
"only",
"when",
"logging",
"level",
"is",
"very",
"verbose",
"."
] | train | https://github.com/SetBased/py-stratum/blob/7c5ffaa2fdd03f865832a5190b5897ff2c0e3155/pystratum/style/PyStratumStyle.py#L60-L67 |
PlaidWeb/Pushl | pushl/feeds.py | get_feed | async def get_feed(config, url):
""" Get a feed
Arguments:
config -- the configuration
url -- The URL of the feed
retval -- a tuple of feed,previous_version,changed
"""
LOGGER.debug("++WAIT: cache get feed %s", url)
previous = config.cache.get(
'feed', url, schema_version=SCHEMA_VERSION) if config.cache else None
LOGGER.debug("++DONE: cache get feed %s", url)
headers = previous.caching if previous else None
LOGGER.debug("++WAIT: request get %s", url)
request = await utils.retry_get(config, url, headers=headers)
LOGGER.debug("++DONE: request get %s", url)
if not request or not request.success:
LOGGER.error("Could not get feed %s: %d",
url,
request.status if request else -1)
return None, previous, False
if request.cached:
LOGGER.debug("%s: Reusing cached version", url)
return previous, previous, False
current = Feed(request)
if config.cache:
LOGGER.debug("%s: Saving to cache", url)
LOGGER.debug("++WAIT: cache set feed %s", url)
config.cache.set('feed', url, current)
LOGGER.debug("++DONE: cache set feed %s", url)
LOGGER.debug("%s: Returning new content", url)
return current, previous, (not previous
or current.digest != previous.digest
or current.status != previous.status) | python | async def get_feed(config, url):
""" Get a feed
Arguments:
config -- the configuration
url -- The URL of the feed
retval -- a tuple of feed,previous_version,changed
"""
LOGGER.debug("++WAIT: cache get feed %s", url)
previous = config.cache.get(
'feed', url, schema_version=SCHEMA_VERSION) if config.cache else None
LOGGER.debug("++DONE: cache get feed %s", url)
headers = previous.caching if previous else None
LOGGER.debug("++WAIT: request get %s", url)
request = await utils.retry_get(config, url, headers=headers)
LOGGER.debug("++DONE: request get %s", url)
if not request or not request.success:
LOGGER.error("Could not get feed %s: %d",
url,
request.status if request else -1)
return None, previous, False
if request.cached:
LOGGER.debug("%s: Reusing cached version", url)
return previous, previous, False
current = Feed(request)
if config.cache:
LOGGER.debug("%s: Saving to cache", url)
LOGGER.debug("++WAIT: cache set feed %s", url)
config.cache.set('feed', url, current)
LOGGER.debug("++DONE: cache set feed %s", url)
LOGGER.debug("%s: Returning new content", url)
return current, previous, (not previous
or current.digest != previous.digest
or current.status != previous.status) | [
"async",
"def",
"get_feed",
"(",
"config",
",",
"url",
")",
":",
"LOGGER",
".",
"debug",
"(",
"\"++WAIT: cache get feed %s\"",
",",
"url",
")",
"previous",
"=",
"config",
".",
"cache",
".",
"get",
"(",
"'feed'",
",",
"url",
",",
"schema_version",
"=",
"SCHEMA_VERSION",
")",
"if",
"config",
".",
"cache",
"else",
"None",
"LOGGER",
".",
"debug",
"(",
"\"++DONE: cache get feed %s\"",
",",
"url",
")",
"headers",
"=",
"previous",
".",
"caching",
"if",
"previous",
"else",
"None",
"LOGGER",
".",
"debug",
"(",
"\"++WAIT: request get %s\"",
",",
"url",
")",
"request",
"=",
"await",
"utils",
".",
"retry_get",
"(",
"config",
",",
"url",
",",
"headers",
"=",
"headers",
")",
"LOGGER",
".",
"debug",
"(",
"\"++DONE: request get %s\"",
",",
"url",
")",
"if",
"not",
"request",
"or",
"not",
"request",
".",
"success",
":",
"LOGGER",
".",
"error",
"(",
"\"Could not get feed %s: %d\"",
",",
"url",
",",
"request",
".",
"status",
"if",
"request",
"else",
"-",
"1",
")",
"return",
"None",
",",
"previous",
",",
"False",
"if",
"request",
".",
"cached",
":",
"LOGGER",
".",
"debug",
"(",
"\"%s: Reusing cached version\"",
",",
"url",
")",
"return",
"previous",
",",
"previous",
",",
"False",
"current",
"=",
"Feed",
"(",
"request",
")",
"if",
"config",
".",
"cache",
":",
"LOGGER",
".",
"debug",
"(",
"\"%s: Saving to cache\"",
",",
"url",
")",
"LOGGER",
".",
"debug",
"(",
"\"++WAIT: cache set feed %s\"",
",",
"url",
")",
"config",
".",
"cache",
".",
"set",
"(",
"'feed'",
",",
"url",
",",
"current",
")",
"LOGGER",
".",
"debug",
"(",
"\"++DONE: cache set feed %s\"",
",",
"url",
")",
"LOGGER",
".",
"debug",
"(",
"\"%s: Returning new content\"",
",",
"url",
")",
"return",
"current",
",",
"previous",
",",
"(",
"not",
"previous",
"or",
"current",
".",
"digest",
"!=",
"previous",
".",
"digest",
"or",
"current",
".",
"status",
"!=",
"previous",
".",
"status",
")"
] | Get a feed
Arguments:
config -- the configuration
url -- The URL of the feed
retval -- a tuple of feed,previous_version,changed | [
"Get",
"a",
"feed"
] | train | https://github.com/PlaidWeb/Pushl/blob/5ea92275c37a6c1989e3d5f53e26c6e0ebfb9a8c/pushl/feeds.py#L95-L137 |
PlaidWeb/Pushl | pushl/feeds.py | Feed.archive_namespace | def archive_namespace(self):
""" Returns the known namespace of the RFC5005 extension, if any """
try:
for ns_prefix, url in self.feed.namespaces.items():
if url == 'http://purl.org/syndication/history/1.0':
return ns_prefix
except AttributeError:
pass
return None | python | def archive_namespace(self):
""" Returns the known namespace of the RFC5005 extension, if any """
try:
for ns_prefix, url in self.feed.namespaces.items():
if url == 'http://purl.org/syndication/history/1.0':
return ns_prefix
except AttributeError:
pass
return None | [
"def",
"archive_namespace",
"(",
"self",
")",
":",
"try",
":",
"for",
"ns_prefix",
",",
"url",
"in",
"self",
".",
"feed",
".",
"namespaces",
".",
"items",
"(",
")",
":",
"if",
"url",
"==",
"'http://purl.org/syndication/history/1.0'",
":",
"return",
"ns_prefix",
"except",
"AttributeError",
":",
"pass",
"return",
"None"
] | Returns the known namespace of the RFC5005 extension, if any | [
"Returns",
"the",
"known",
"namespace",
"of",
"the",
"RFC5005",
"extension",
"if",
"any"
] | train | https://github.com/PlaidWeb/Pushl/blob/5ea92275c37a6c1989e3d5f53e26c6e0ebfb9a8c/pushl/feeds.py#L33-L41 |
PlaidWeb/Pushl | pushl/feeds.py | Feed.entry_links | def entry_links(self):
""" Given a parsed feed, return the links to its entries, including ones
which disappeared (as a quick-and-dirty way to support deletions)
"""
return {entry['link'] for entry in self.feed.entries if entry and entry.get('link')} | python | def entry_links(self):
""" Given a parsed feed, return the links to its entries, including ones
which disappeared (as a quick-and-dirty way to support deletions)
"""
return {entry['link'] for entry in self.feed.entries if entry and entry.get('link')} | [
"def",
"entry_links",
"(",
"self",
")",
":",
"return",
"{",
"entry",
"[",
"'link'",
"]",
"for",
"entry",
"in",
"self",
".",
"feed",
".",
"entries",
"if",
"entry",
"and",
"entry",
".",
"get",
"(",
"'link'",
")",
"}"
] | Given a parsed feed, return the links to its entries, including ones
which disappeared (as a quick-and-dirty way to support deletions) | [
"Given",
"a",
"parsed",
"feed",
"return",
"the",
"links",
"to",
"its",
"entries",
"including",
"ones",
"which",
"disappeared",
"(",
"as",
"a",
"quick",
"-",
"and",
"-",
"dirty",
"way",
"to",
"support",
"deletions",
")"
] | train | https://github.com/PlaidWeb/Pushl/blob/5ea92275c37a6c1989e3d5f53e26c6e0ebfb9a8c/pushl/feeds.py#L44-L48 |
PlaidWeb/Pushl | pushl/feeds.py | Feed.is_archive | def is_archive(self):
""" Given a parsed feed, returns True if this is an archive feed """
ns_prefix = self.archive_namespace
if ns_prefix:
if ns_prefix + '_archive' in self.feed.feed:
# This is declared to be an archive view
return True
if ns_prefix + '_current' in self.feed.feed:
# This is declared to be the current view
return False
# Either we don't have the namespace, or the view wasn't declared.
rels = collections.defaultdict(list)
for link in self.feed.feed.links:
rels[link.rel].append(link.href)
return ('current' in rels and
('self' not in rels or
rels['self'] != rels['current'])) | python | def is_archive(self):
""" Given a parsed feed, returns True if this is an archive feed """
ns_prefix = self.archive_namespace
if ns_prefix:
if ns_prefix + '_archive' in self.feed.feed:
# This is declared to be an archive view
return True
if ns_prefix + '_current' in self.feed.feed:
# This is declared to be the current view
return False
# Either we don't have the namespace, or the view wasn't declared.
rels = collections.defaultdict(list)
for link in self.feed.feed.links:
rels[link.rel].append(link.href)
return ('current' in rels and
('self' not in rels or
rels['self'] != rels['current'])) | [
"def",
"is_archive",
"(",
"self",
")",
":",
"ns_prefix",
"=",
"self",
".",
"archive_namespace",
"if",
"ns_prefix",
":",
"if",
"ns_prefix",
"+",
"'_archive'",
"in",
"self",
".",
"feed",
".",
"feed",
":",
"# This is declared to be an archive view",
"return",
"True",
"if",
"ns_prefix",
"+",
"'_current'",
"in",
"self",
".",
"feed",
".",
"feed",
":",
"# This is declared to be the current view",
"return",
"False",
"# Either we don't have the namespace, or the view wasn't declared.",
"rels",
"=",
"collections",
".",
"defaultdict",
"(",
"list",
")",
"for",
"link",
"in",
"self",
".",
"feed",
".",
"feed",
".",
"links",
":",
"rels",
"[",
"link",
".",
"rel",
"]",
".",
"append",
"(",
"link",
".",
"href",
")",
"return",
"(",
"'current'",
"in",
"rels",
"and",
"(",
"'self'",
"not",
"in",
"rels",
"or",
"rels",
"[",
"'self'",
"]",
"!=",
"rels",
"[",
"'current'",
"]",
")",
")"
] | Given a parsed feed, returns True if this is an archive feed | [
"Given",
"a",
"parsed",
"feed",
"returns",
"True",
"if",
"this",
"is",
"an",
"archive",
"feed"
] | train | https://github.com/PlaidWeb/Pushl/blob/5ea92275c37a6c1989e3d5f53e26c6e0ebfb9a8c/pushl/feeds.py#L51-L70 |
PlaidWeb/Pushl | pushl/feeds.py | Feed.update_websub | async def update_websub(self, config, hub):
""" Update WebSub hub to know about this feed """
try:
LOGGER.debug("WebSub: Notifying %s of %s", hub, self.url)
request = await utils.retry_post(
config,
hub,
data={
'hub.mode': 'publish',
'hub.url': self.url
})
if request.success:
LOGGER.info("%s: WebSub notification sent to %s",
self.url, hub)
else:
LOGGER.warning("%s: Hub %s returned status code %s: %s", self.url, hub,
request.status, request.text)
except Exception as err: # pylint:disable=broad-except
LOGGER.warning("WebSub %s: got %s: %s",
hub, err.__class__.__name__, err) | python | async def update_websub(self, config, hub):
""" Update WebSub hub to know about this feed """
try:
LOGGER.debug("WebSub: Notifying %s of %s", hub, self.url)
request = await utils.retry_post(
config,
hub,
data={
'hub.mode': 'publish',
'hub.url': self.url
})
if request.success:
LOGGER.info("%s: WebSub notification sent to %s",
self.url, hub)
else:
LOGGER.warning("%s: Hub %s returned status code %s: %s", self.url, hub,
request.status, request.text)
except Exception as err: # pylint:disable=broad-except
LOGGER.warning("WebSub %s: got %s: %s",
hub, err.__class__.__name__, err) | [
"async",
"def",
"update_websub",
"(",
"self",
",",
"config",
",",
"hub",
")",
":",
"try",
":",
"LOGGER",
".",
"debug",
"(",
"\"WebSub: Notifying %s of %s\"",
",",
"hub",
",",
"self",
".",
"url",
")",
"request",
"=",
"await",
"utils",
".",
"retry_post",
"(",
"config",
",",
"hub",
",",
"data",
"=",
"{",
"'hub.mode'",
":",
"'publish'",
",",
"'hub.url'",
":",
"self",
".",
"url",
"}",
")",
"if",
"request",
".",
"success",
":",
"LOGGER",
".",
"info",
"(",
"\"%s: WebSub notification sent to %s\"",
",",
"self",
".",
"url",
",",
"hub",
")",
"else",
":",
"LOGGER",
".",
"warning",
"(",
"\"%s: Hub %s returned status code %s: %s\"",
",",
"self",
".",
"url",
",",
"hub",
",",
"request",
".",
"status",
",",
"request",
".",
"text",
")",
"except",
"Exception",
"as",
"err",
":",
"# pylint:disable=broad-except",
"LOGGER",
".",
"warning",
"(",
"\"WebSub %s: got %s: %s\"",
",",
"hub",
",",
"err",
".",
"__class__",
".",
"__name__",
",",
"err",
")"
] | Update WebSub hub to know about this feed | [
"Update",
"WebSub",
"hub",
"to",
"know",
"about",
"this",
"feed"
] | train | https://github.com/PlaidWeb/Pushl/blob/5ea92275c37a6c1989e3d5f53e26c6e0ebfb9a8c/pushl/feeds.py#L72-L92 |
SetBased/py-stratum | pystratum/DocBlockReflection.py | DocBlockReflection.get_tags | def get_tags(self, name):
"""
Returns a list of tags.
@param str name: The name of the tag.
:rtype: list[str]
"""
tags = list()
for tag in self._tags:
if tag[0] == name:
tags.append(tag[1])
return tags | python | def get_tags(self, name):
"""
Returns a list of tags.
@param str name: The name of the tag.
:rtype: list[str]
"""
tags = list()
for tag in self._tags:
if tag[0] == name:
tags.append(tag[1])
return tags | [
"def",
"get_tags",
"(",
"self",
",",
"name",
")",
":",
"tags",
"=",
"list",
"(",
")",
"for",
"tag",
"in",
"self",
".",
"_tags",
":",
"if",
"tag",
"[",
"0",
"]",
"==",
"name",
":",
"tags",
".",
"append",
"(",
"tag",
"[",
"1",
"]",
")",
"return",
"tags"
] | Returns a list of tags.
@param str name: The name of the tag.
:rtype: list[str] | [
"Returns",
"a",
"list",
"of",
"tags",
"."
] | train | https://github.com/SetBased/py-stratum/blob/7c5ffaa2fdd03f865832a5190b5897ff2c0e3155/pystratum/DocBlockReflection.py#L68-L81 |
SetBased/py-stratum | pystratum/DocBlockReflection.py | DocBlockReflection.__remove_leading_empty_lines | def __remove_leading_empty_lines(lines):
"""
Removes leading empty lines from a list of lines.
:param list[str] lines: The lines.
"""
tmp = list()
empty = True
for i in range(0, len(lines)):
empty = empty and lines[i] == ''
if not empty:
tmp.append(lines[i])
return tmp | python | def __remove_leading_empty_lines(lines):
"""
Removes leading empty lines from a list of lines.
:param list[str] lines: The lines.
"""
tmp = list()
empty = True
for i in range(0, len(lines)):
empty = empty and lines[i] == ''
if not empty:
tmp.append(lines[i])
return tmp | [
"def",
"__remove_leading_empty_lines",
"(",
"lines",
")",
":",
"tmp",
"=",
"list",
"(",
")",
"empty",
"=",
"True",
"for",
"i",
"in",
"range",
"(",
"0",
",",
"len",
"(",
"lines",
")",
")",
":",
"empty",
"=",
"empty",
"and",
"lines",
"[",
"i",
"]",
"==",
"''",
"if",
"not",
"empty",
":",
"tmp",
".",
"append",
"(",
"lines",
"[",
"i",
"]",
")",
"return",
"tmp"
] | Removes leading empty lines from a list of lines.
:param list[str] lines: The lines. | [
"Removes",
"leading",
"empty",
"lines",
"from",
"a",
"list",
"of",
"lines",
"."
] | train | https://github.com/SetBased/py-stratum/blob/7c5ffaa2fdd03f865832a5190b5897ff2c0e3155/pystratum/DocBlockReflection.py#L94-L107 |
SetBased/py-stratum | pystratum/DocBlockReflection.py | DocBlockReflection.__remove_trailing_empty_lines | def __remove_trailing_empty_lines(lines):
"""
Removes leading empty lines from a list of lines.
:param list[str] lines: The lines.
"""
lines.reverse()
tmp = DocBlockReflection.__remove_leading_empty_lines(lines)
lines.reverse()
tmp.reverse()
return tmp | python | def __remove_trailing_empty_lines(lines):
"""
Removes leading empty lines from a list of lines.
:param list[str] lines: The lines.
"""
lines.reverse()
tmp = DocBlockReflection.__remove_leading_empty_lines(lines)
lines.reverse()
tmp.reverse()
return tmp | [
"def",
"__remove_trailing_empty_lines",
"(",
"lines",
")",
":",
"lines",
".",
"reverse",
"(",
")",
"tmp",
"=",
"DocBlockReflection",
".",
"__remove_leading_empty_lines",
"(",
"lines",
")",
"lines",
".",
"reverse",
"(",
")",
"tmp",
".",
"reverse",
"(",
")",
"return",
"tmp"
] | Removes leading empty lines from a list of lines.
:param list[str] lines: The lines. | [
"Removes",
"leading",
"empty",
"lines",
"from",
"a",
"list",
"of",
"lines",
"."
] | train | https://github.com/SetBased/py-stratum/blob/7c5ffaa2fdd03f865832a5190b5897ff2c0e3155/pystratum/DocBlockReflection.py#L111-L122 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.