nwo
stringlengths 5
91
| sha
stringlengths 40
40
| path
stringlengths 5
174
| language
stringclasses 1
value | identifier
stringlengths 1
120
| parameters
stringlengths 0
3.15k
| argument_list
stringclasses 1
value | return_statement
stringlengths 0
24.1k
| docstring
stringlengths 0
27.3k
| docstring_summary
stringlengths 0
13.8k
| docstring_tokens
sequence | function
stringlengths 22
139k
| function_tokens
sequence | url
stringlengths 87
283
|
---|---|---|---|---|---|---|---|---|---|---|---|---|---|
PaddlePaddle/Research | 2da0bd6c72d60e9df403aff23a7802779561c4a1 | CV/CLPI-Collaborative-Learning-for-Diabetic-Retinopathy-Grading/opencv_transforms.py | python | RandomRotation.__call__ | (self, img) | return F.rotate(img, angle, self.resample, self.expand, self.center) | img (numpy ndarray): Image to be rotated.
Returns:
numpy ndarray: Rotated image. | img (numpy ndarray): Image to be rotated.
Returns:
numpy ndarray: Rotated image. | [
"img",
"(",
"numpy",
"ndarray",
")",
":",
"Image",
"to",
"be",
"rotated",
".",
"Returns",
":",
"numpy",
"ndarray",
":",
"Rotated",
"image",
"."
] | def __call__(self, img):
"""
img (numpy ndarray): Image to be rotated.
Returns:
numpy ndarray: Rotated image.
"""
angle = self.get_params(self.degrees)
return F.rotate(img, angle, self.resample, self.expand, self.center) | [
"def",
"__call__",
"(",
"self",
",",
"img",
")",
":",
"angle",
"=",
"self",
".",
"get_params",
"(",
"self",
".",
"degrees",
")",
"return",
"F",
".",
"rotate",
"(",
"img",
",",
"angle",
",",
"self",
".",
"resample",
",",
"self",
".",
"expand",
",",
"self",
".",
"center",
")"
] | https://github.com/PaddlePaddle/Research/blob/2da0bd6c72d60e9df403aff23a7802779561c4a1/CV/CLPI-Collaborative-Learning-for-Diabetic-Retinopathy-Grading/opencv_transforms.py#L691-L700 |
|
enthought/chaco | 0907d1dedd07a499202efbaf2fe2a4e51b4c8e5f | chaco/plots/contour/contour_poly_plot.py | python | ContourPolyPlot._update_polys | (self) | Updates the cache of contour polygons | Updates the cache of contour polygons | [
"Updates",
"the",
"cache",
"of",
"contour",
"polygons"
] | def _update_polys(self):
""" Updates the cache of contour polygons """
if self.value.is_masked():
# XXX masked data and get_data_mask not currently implemented
data, mask = self.value.get_data_mask()
mask &= isfinite(data)
else:
data = self.value.get_data()
mask = isfinite(data)
x_data, y_data = self.index.get_data()
xs = x_data.get_data()
ys = y_data.get_data()
xg, yg = meshgrid(xs, ys)
# note: contour wants mask True in invalid locations
c = Cntr(xg, yg, data, ~mask)
self._cached_contours = {}
for i in range(len(self._levels) - 1):
key = (self._levels[i], self._levels[i + 1])
self._cached_polys[key] = []
polys = c.trace(*key)
for poly in polys:
self._cached_polys[key].append(transpose(poly))
self._poly_cache_valid = True | [
"def",
"_update_polys",
"(",
"self",
")",
":",
"if",
"self",
".",
"value",
".",
"is_masked",
"(",
")",
":",
"# XXX masked data and get_data_mask not currently implemented",
"data",
",",
"mask",
"=",
"self",
".",
"value",
".",
"get_data_mask",
"(",
")",
"mask",
"&=",
"isfinite",
"(",
"data",
")",
"else",
":",
"data",
"=",
"self",
".",
"value",
".",
"get_data",
"(",
")",
"mask",
"=",
"isfinite",
"(",
"data",
")",
"x_data",
",",
"y_data",
"=",
"self",
".",
"index",
".",
"get_data",
"(",
")",
"xs",
"=",
"x_data",
".",
"get_data",
"(",
")",
"ys",
"=",
"y_data",
".",
"get_data",
"(",
")",
"xg",
",",
"yg",
"=",
"meshgrid",
"(",
"xs",
",",
"ys",
")",
"# note: contour wants mask True in invalid locations",
"c",
"=",
"Cntr",
"(",
"xg",
",",
"yg",
",",
"data",
",",
"~",
"mask",
")",
"self",
".",
"_cached_contours",
"=",
"{",
"}",
"for",
"i",
"in",
"range",
"(",
"len",
"(",
"self",
".",
"_levels",
")",
"-",
"1",
")",
":",
"key",
"=",
"(",
"self",
".",
"_levels",
"[",
"i",
"]",
",",
"self",
".",
"_levels",
"[",
"i",
"+",
"1",
"]",
")",
"self",
".",
"_cached_polys",
"[",
"key",
"]",
"=",
"[",
"]",
"polys",
"=",
"c",
".",
"trace",
"(",
"*",
"key",
")",
"for",
"poly",
"in",
"polys",
":",
"self",
".",
"_cached_polys",
"[",
"key",
"]",
".",
"append",
"(",
"transpose",
"(",
"poly",
")",
")",
"self",
".",
"_poly_cache_valid",
"=",
"True"
] | https://github.com/enthought/chaco/blob/0907d1dedd07a499202efbaf2fe2a4e51b4c8e5f/chaco/plots/contour/contour_poly_plot.py#L79-L104 |
||
F8LEFT/DecLLVM | d38e45e3d0dd35634adae1d0cf7f96f3bd96e74c | python/idaapi.py | python | type_pair_t.__init__ | (self, *args) | __init__(self) -> type_pair_t
__init__(self, l) -> type_pair_t
__init__(self, l, g) -> type_pair_t | __init__(self) -> type_pair_t
__init__(self, l) -> type_pair_t
__init__(self, l, g) -> type_pair_t | [
"__init__",
"(",
"self",
")",
"-",
">",
"type_pair_t",
"__init__",
"(",
"self",
"l",
")",
"-",
">",
"type_pair_t",
"__init__",
"(",
"self",
"l",
"g",
")",
"-",
">",
"type_pair_t"
] | def __init__(self, *args):
"""
__init__(self) -> type_pair_t
__init__(self, l) -> type_pair_t
__init__(self, l, g) -> type_pair_t
"""
this = _idaapi.new_type_pair_t(*args)
try: self.this.append(this)
except: self.this = this | [
"def",
"__init__",
"(",
"self",
",",
"*",
"args",
")",
":",
"this",
"=",
"_idaapi",
".",
"new_type_pair_t",
"(",
"*",
"args",
")",
"try",
":",
"self",
".",
"this",
".",
"append",
"(",
"this",
")",
"except",
":",
"self",
".",
"this",
"=",
"this"
] | https://github.com/F8LEFT/DecLLVM/blob/d38e45e3d0dd35634adae1d0cf7f96f3bd96e74c/python/idaapi.py#L32542-L32550 |
||
zhl2008/awd-platform | 0416b31abea29743387b10b3914581fbe8e7da5e | web_flaskbb/lib/python2.7/site-packages/click/core.py | python | Option.get_default | (self, ctx) | return Parameter.get_default(self, ctx) | [] | def get_default(self, ctx):
# If we're a non boolean flag out default is more complex because
# we need to look at all flags in the same group to figure out
# if we're the the default one in which case we return the flag
# value as default.
if self.is_flag and not self.is_bool_flag:
for param in ctx.command.params:
if param.name == self.name and param.default:
return param.flag_value
return None
return Parameter.get_default(self, ctx) | [
"def",
"get_default",
"(",
"self",
",",
"ctx",
")",
":",
"# If we're a non boolean flag out default is more complex because",
"# we need to look at all flags in the same group to figure out",
"# if we're the the default one in which case we return the flag",
"# value as default.",
"if",
"self",
".",
"is_flag",
"and",
"not",
"self",
".",
"is_bool_flag",
":",
"for",
"param",
"in",
"ctx",
".",
"command",
".",
"params",
":",
"if",
"param",
".",
"name",
"==",
"self",
".",
"name",
"and",
"param",
".",
"default",
":",
"return",
"param",
".",
"flag_value",
"return",
"None",
"return",
"Parameter",
".",
"get_default",
"(",
"self",
",",
"ctx",
")"
] | https://github.com/zhl2008/awd-platform/blob/0416b31abea29743387b10b3914581fbe8e7da5e/web_flaskbb/lib/python2.7/site-packages/click/core.py#L1626-L1636 |
|||
saltstack/salt | fae5bc757ad0f1716483ce7ae180b451545c2058 | salt/modules/boto3_elasticsearch.py | python | add_tags | (
domain_name=None,
arn=None,
tags=None,
region=None,
key=None,
keyid=None,
profile=None,
) | return ret | Attaches tags to an existing Elasticsearch domain.
Tags are a set of case-sensitive key value pairs.
An Elasticsearch domain may have up to 10 tags.
:param str domain_name: The name of the Elasticsearch domain you want to add tags to.
:param str arn: The ARN of the Elasticsearch domain you want to add tags to.
Specifying this overrides ``domain_name``.
:param dict tags: The dict of tags to add to the Elasticsearch domain.
:rtype: dict
:return: Dictionary with key 'result' and as value a boolean denoting success or failure.
Upon failure, also contains a key 'error' with the error message as value.
.. versionadded:: 3001
CLI Example:
.. code-block:: bash
salt myminion boto3_elasticsearch.add_tags domain_name=mydomain tags='{"foo": "bar", "baz": "qux"}' | Attaches tags to an existing Elasticsearch domain.
Tags are a set of case-sensitive key value pairs.
An Elasticsearch domain may have up to 10 tags. | [
"Attaches",
"tags",
"to",
"an",
"existing",
"Elasticsearch",
"domain",
".",
"Tags",
"are",
"a",
"set",
"of",
"case",
"-",
"sensitive",
"key",
"value",
"pairs",
".",
"An",
"Elasticsearch",
"domain",
"may",
"have",
"up",
"to",
"10",
"tags",
"."
] | def add_tags(
domain_name=None,
arn=None,
tags=None,
region=None,
key=None,
keyid=None,
profile=None,
):
"""
Attaches tags to an existing Elasticsearch domain.
Tags are a set of case-sensitive key value pairs.
An Elasticsearch domain may have up to 10 tags.
:param str domain_name: The name of the Elasticsearch domain you want to add tags to.
:param str arn: The ARN of the Elasticsearch domain you want to add tags to.
Specifying this overrides ``domain_name``.
:param dict tags: The dict of tags to add to the Elasticsearch domain.
:rtype: dict
:return: Dictionary with key 'result' and as value a boolean denoting success or failure.
Upon failure, also contains a key 'error' with the error message as value.
.. versionadded:: 3001
CLI Example:
.. code-block:: bash
salt myminion boto3_elasticsearch.add_tags domain_name=mydomain tags='{"foo": "bar", "baz": "qux"}'
"""
if not any((arn, domain_name)):
raise SaltInvocationError(
"At least one of domain_name or arn must be specified."
)
ret = {"result": False}
if arn is None:
res = describe_elasticsearch_domain(
domain_name=domain_name,
region=region,
key=key,
keyid=keyid,
profile=profile,
)
if "error" in res:
ret.update(res)
elif not res["result"]:
ret.update(
{
"error": 'The domain with name "{}" does not exist.'.format(
domain_name
)
}
)
else:
arn = res["response"].get("ARN")
if arn:
boto_params = {
"ARN": arn,
"TagList": [
{"Key": k, "Value": value} for k, value in (tags or {}).items()
],
}
try:
conn = _get_conn(region=region, key=key, keyid=keyid, profile=profile)
conn.add_tags(**boto_params)
ret["result"] = True
except (ParamValidationError, ClientError) as exp:
ret.update({"error": __utils__["boto3.get_error"](exp)["message"]})
return ret | [
"def",
"add_tags",
"(",
"domain_name",
"=",
"None",
",",
"arn",
"=",
"None",
",",
"tags",
"=",
"None",
",",
"region",
"=",
"None",
",",
"key",
"=",
"None",
",",
"keyid",
"=",
"None",
",",
"profile",
"=",
"None",
",",
")",
":",
"if",
"not",
"any",
"(",
"(",
"arn",
",",
"domain_name",
")",
")",
":",
"raise",
"SaltInvocationError",
"(",
"\"At least one of domain_name or arn must be specified.\"",
")",
"ret",
"=",
"{",
"\"result\"",
":",
"False",
"}",
"if",
"arn",
"is",
"None",
":",
"res",
"=",
"describe_elasticsearch_domain",
"(",
"domain_name",
"=",
"domain_name",
",",
"region",
"=",
"region",
",",
"key",
"=",
"key",
",",
"keyid",
"=",
"keyid",
",",
"profile",
"=",
"profile",
",",
")",
"if",
"\"error\"",
"in",
"res",
":",
"ret",
".",
"update",
"(",
"res",
")",
"elif",
"not",
"res",
"[",
"\"result\"",
"]",
":",
"ret",
".",
"update",
"(",
"{",
"\"error\"",
":",
"'The domain with name \"{}\" does not exist.'",
".",
"format",
"(",
"domain_name",
")",
"}",
")",
"else",
":",
"arn",
"=",
"res",
"[",
"\"response\"",
"]",
".",
"get",
"(",
"\"ARN\"",
")",
"if",
"arn",
":",
"boto_params",
"=",
"{",
"\"ARN\"",
":",
"arn",
",",
"\"TagList\"",
":",
"[",
"{",
"\"Key\"",
":",
"k",
",",
"\"Value\"",
":",
"value",
"}",
"for",
"k",
",",
"value",
"in",
"(",
"tags",
"or",
"{",
"}",
")",
".",
"items",
"(",
")",
"]",
",",
"}",
"try",
":",
"conn",
"=",
"_get_conn",
"(",
"region",
"=",
"region",
",",
"key",
"=",
"key",
",",
"keyid",
"=",
"keyid",
",",
"profile",
"=",
"profile",
")",
"conn",
".",
"add_tags",
"(",
"*",
"*",
"boto_params",
")",
"ret",
"[",
"\"result\"",
"]",
"=",
"True",
"except",
"(",
"ParamValidationError",
",",
"ClientError",
")",
"as",
"exp",
":",
"ret",
".",
"update",
"(",
"{",
"\"error\"",
":",
"__utils__",
"[",
"\"boto3.get_error\"",
"]",
"(",
"exp",
")",
"[",
"\"message\"",
"]",
"}",
")",
"return",
"ret"
] | https://github.com/saltstack/salt/blob/fae5bc757ad0f1716483ce7ae180b451545c2058/salt/modules/boto3_elasticsearch.py#L94-L163 |
|
armijnhemel/binaryanalysis-ng | 34c655ed71d3d022ee49c4e1271002b2ebf40001 | src/bangfilesystems.py | python | unpack_cbfs | (fileresult, scanenvironment, offset, unpackdir) | return {'status': True, 'length': romsize, 'labels': labels,
'filesandlabels': unpackedfilesandlabels, 'offset': cbfsstart} | Verify/label coreboot file system images | Verify/label coreboot file system images | [
"Verify",
"/",
"label",
"coreboot",
"file",
"system",
"images"
] | def unpack_cbfs(fileresult, scanenvironment, offset, unpackdir):
'''Verify/label coreboot file system images'''
filesize = fileresult.filesize
filename_full = scanenvironment.unpack_path(fileresult.filename)
unpackedfilesandlabels = []
labels = []
unpackingerror = {}
unpackedsize = 0
# open the file, skip the component magic
checkfile = open(filename_full, 'rb')
checkfile.seek(offset)
# what follows is a list of components
havecbfscomponents = False
# there should be one master header, which defines a
# few characteristics, such as the byte alignment. This
# one should come first to be able to read the rest.
seenmasterheader = False
# It is assumed that the first block encountered is the master header
while True:
checkbytes = checkfile.read(8)
if checkbytes != b'LARCHIVE':
break
unpackedsize += 8
# length
checkbytes = checkfile.read(4)
componentlength = int.from_bytes(checkbytes, byteorder='big')
unpackedsize += 4
# type
checkbytes = checkfile.read(4)
componenttype = int.from_bytes(checkbytes, byteorder='big')
unpackedsize += 4
# checksum
checkbytes = checkfile.read(4)
checksum = int.from_bytes(checkbytes, byteorder='big')
unpackedsize += 4
# offset
checkbytes = checkfile.read(4)
componentoffset = int.from_bytes(checkbytes, byteorder='big')
unpackedsize += 4
if offset + componentoffset + componentlength > filesize:
checkfile.close()
unpackingerror = {'offset': offset, 'fatal': False,
'reason': 'data outside of file'}
return {'status': False, 'error': unpackingerror}
# "The difference between the size of the header and offset
# is the size of the component name."
checkbytes = checkfile.read(componentoffset - 24)
if b'\x00' not in checkbytes:
checkfile.close()
unpackingerror = {'offset': offset, 'fatal': False,
'reason': 'invalid component name'}
return {'status': False, 'error': unpackingerror}
try:
componentname = checkbytes.split(b'\x00')[0].decode()
except:
checkfile.close()
unpackingerror = {'offset': offset, 'fatal': False,
'reason': 'invalid component name'}
return {'status': False, 'error': unpackingerror}
# store the current offset
curoffset = checkfile.tell()
# read the first four bytes of the payload to see
# if this is the master header
checkbytes = checkfile.read(4)
if checkbytes == b'ORBC':
if seenmasterheader:
checkfile.close()
unpackingerror = {'offset': offset, 'fatal': False,
'reason': 'only one master header allowed'}
return {'status': False, 'error': unpackingerror}
# version
checkbytes = checkfile.read(4)
masterversion = int.from_bytes(checkbytes, byteorder='big')
# romsize
checkbytes = checkfile.read(4)
romsize = int.from_bytes(checkbytes, byteorder='big')
# check if the rom size isn't larger than the actual file.
# As the master header won't be at the beginning of the file
# the check should be against size of the entire file, not
# starting at the offset, unless that is already part
# of another file (TODO).
if romsize > filesize:
checkfile.close()
unpackingerror = {'offset': offset, 'fatal': False,
'reason': 'not enough data for image'}
return {'status': False, 'error': unpackingerror}
# boot block size
checkbytes = checkfile.read(4)
bootblocksize = int.from_bytes(checkbytes, byteorder='big')
if bootblocksize > romsize:
checkfile.close()
unpackingerror = {'offset': offset, 'fatal': False,
'reason': 'invalid boot block size'}
return {'status': False, 'error': unpackingerror}
# align, always 64 bytes
checkbytes = checkfile.read(4)
align = int.from_bytes(checkbytes, byteorder='big')
if align != 64:
checkfile.close()
unpackingerror = {'offset': offset, 'fatal': False,
'reason': 'invalid alignment size'}
return {'status': False, 'error': unpackingerror}
# rom cannot be smaller than alignment
if romsize < align:
checkfile.close()
unpackingerror = {'offset': offset, 'fatal': False,
'reason': 'invalid rom size'}
return {'status': False, 'error': unpackingerror}
# offset of first block
checkbytes = checkfile.read(4)
cbfsoffset = int.from_bytes(checkbytes, byteorder='big')
if cbfsoffset > romsize:
checkfile.close()
unpackingerror = {'offset': offset, 'fatal': False,
'reason': 'offset of first block cannot be outside image'}
return {'status': False, 'error': unpackingerror}
if cbfsoffset > offset:
checkfile.close()
unpackingerror = {'offset': offset, 'fatal': False,
'reason': 'offset of first block cannot be outside image'}
return {'status': False, 'error': unpackingerror}
seenmasterheader = True
if not seenmasterheader:
checkfile.close()
unpackingerror = {'offset': offset, 'fatal': False,
'reason': 'master header not seen'}
return {'status': False, 'error': unpackingerror}
# skip the data
checkfile.seek(curoffset)
checkfile.seek(componentlength, os.SEEK_CUR)
# then read some more alignment bytes, if necessary
if (checkfile.tell() - offset) % align != 0:
padbytes = align - (checkfile.tell() - offset) % align
checkfile.seek(padbytes, os.SEEK_CUR)
if not seenmasterheader:
checkfile.close()
unpackingerror = {'offset': offset, 'fatal': False,
'reason': 'master header not seen'}
return {'status': False, 'error': unpackingerror}
if checkfile.tell() - offset != romsize - cbfsoffset:
checkfile.close()
unpackingerror = {'offset': offset, 'fatal': False,
'reason': 'invalid coreboot image'}
return {'status': False, 'error': unpackingerror}
cbfsstart = offset - cbfsoffset
if cbfsstart == 0 and romsize == filesize:
labels.append("coreboot")
checkfile.close()
return {'status': True, 'length': romsize, 'labels': labels,
'filesandlabels': unpackedfilesandlabels, 'offset': cbfsstart}
# else carve the file. It is anonymous, so just give it a name
outfile_rel = os.path.join(unpackdir, "unpacked.coreboot")
outfile_full = scanenvironment.unpack_path(outfile_rel)
outfile = open(outfile_full, 'wb')
os.sendfile(outfile.fileno(), checkfile.fileno(), cbfsstart, romsize)
outfile.close()
checkfile.close()
unpackedfilesandlabels.append((outfile_rel, ['coreboot', 'unpacked']))
return {'status': True, 'length': romsize, 'labels': labels,
'filesandlabels': unpackedfilesandlabels, 'offset': cbfsstart} | [
"def",
"unpack_cbfs",
"(",
"fileresult",
",",
"scanenvironment",
",",
"offset",
",",
"unpackdir",
")",
":",
"filesize",
"=",
"fileresult",
".",
"filesize",
"filename_full",
"=",
"scanenvironment",
".",
"unpack_path",
"(",
"fileresult",
".",
"filename",
")",
"unpackedfilesandlabels",
"=",
"[",
"]",
"labels",
"=",
"[",
"]",
"unpackingerror",
"=",
"{",
"}",
"unpackedsize",
"=",
"0",
"# open the file, skip the component magic",
"checkfile",
"=",
"open",
"(",
"filename_full",
",",
"'rb'",
")",
"checkfile",
".",
"seek",
"(",
"offset",
")",
"# what follows is a list of components",
"havecbfscomponents",
"=",
"False",
"# there should be one master header, which defines a",
"# few characteristics, such as the byte alignment. This",
"# one should come first to be able to read the rest.",
"seenmasterheader",
"=",
"False",
"# It is assumed that the first block encountered is the master header",
"while",
"True",
":",
"checkbytes",
"=",
"checkfile",
".",
"read",
"(",
"8",
")",
"if",
"checkbytes",
"!=",
"b'LARCHIVE'",
":",
"break",
"unpackedsize",
"+=",
"8",
"# length",
"checkbytes",
"=",
"checkfile",
".",
"read",
"(",
"4",
")",
"componentlength",
"=",
"int",
".",
"from_bytes",
"(",
"checkbytes",
",",
"byteorder",
"=",
"'big'",
")",
"unpackedsize",
"+=",
"4",
"# type",
"checkbytes",
"=",
"checkfile",
".",
"read",
"(",
"4",
")",
"componenttype",
"=",
"int",
".",
"from_bytes",
"(",
"checkbytes",
",",
"byteorder",
"=",
"'big'",
")",
"unpackedsize",
"+=",
"4",
"# checksum",
"checkbytes",
"=",
"checkfile",
".",
"read",
"(",
"4",
")",
"checksum",
"=",
"int",
".",
"from_bytes",
"(",
"checkbytes",
",",
"byteorder",
"=",
"'big'",
")",
"unpackedsize",
"+=",
"4",
"# offset",
"checkbytes",
"=",
"checkfile",
".",
"read",
"(",
"4",
")",
"componentoffset",
"=",
"int",
".",
"from_bytes",
"(",
"checkbytes",
",",
"byteorder",
"=",
"'big'",
")",
"unpackedsize",
"+=",
"4",
"if",
"offset",
"+",
"componentoffset",
"+",
"componentlength",
">",
"filesize",
":",
"checkfile",
".",
"close",
"(",
")",
"unpackingerror",
"=",
"{",
"'offset'",
":",
"offset",
",",
"'fatal'",
":",
"False",
",",
"'reason'",
":",
"'data outside of file'",
"}",
"return",
"{",
"'status'",
":",
"False",
",",
"'error'",
":",
"unpackingerror",
"}",
"# \"The difference between the size of the header and offset",
"# is the size of the component name.\"",
"checkbytes",
"=",
"checkfile",
".",
"read",
"(",
"componentoffset",
"-",
"24",
")",
"if",
"b'\\x00'",
"not",
"in",
"checkbytes",
":",
"checkfile",
".",
"close",
"(",
")",
"unpackingerror",
"=",
"{",
"'offset'",
":",
"offset",
",",
"'fatal'",
":",
"False",
",",
"'reason'",
":",
"'invalid component name'",
"}",
"return",
"{",
"'status'",
":",
"False",
",",
"'error'",
":",
"unpackingerror",
"}",
"try",
":",
"componentname",
"=",
"checkbytes",
".",
"split",
"(",
"b'\\x00'",
")",
"[",
"0",
"]",
".",
"decode",
"(",
")",
"except",
":",
"checkfile",
".",
"close",
"(",
")",
"unpackingerror",
"=",
"{",
"'offset'",
":",
"offset",
",",
"'fatal'",
":",
"False",
",",
"'reason'",
":",
"'invalid component name'",
"}",
"return",
"{",
"'status'",
":",
"False",
",",
"'error'",
":",
"unpackingerror",
"}",
"# store the current offset",
"curoffset",
"=",
"checkfile",
".",
"tell",
"(",
")",
"# read the first four bytes of the payload to see",
"# if this is the master header",
"checkbytes",
"=",
"checkfile",
".",
"read",
"(",
"4",
")",
"if",
"checkbytes",
"==",
"b'ORBC'",
":",
"if",
"seenmasterheader",
":",
"checkfile",
".",
"close",
"(",
")",
"unpackingerror",
"=",
"{",
"'offset'",
":",
"offset",
",",
"'fatal'",
":",
"False",
",",
"'reason'",
":",
"'only one master header allowed'",
"}",
"return",
"{",
"'status'",
":",
"False",
",",
"'error'",
":",
"unpackingerror",
"}",
"# version",
"checkbytes",
"=",
"checkfile",
".",
"read",
"(",
"4",
")",
"masterversion",
"=",
"int",
".",
"from_bytes",
"(",
"checkbytes",
",",
"byteorder",
"=",
"'big'",
")",
"# romsize",
"checkbytes",
"=",
"checkfile",
".",
"read",
"(",
"4",
")",
"romsize",
"=",
"int",
".",
"from_bytes",
"(",
"checkbytes",
",",
"byteorder",
"=",
"'big'",
")",
"# check if the rom size isn't larger than the actual file.",
"# As the master header won't be at the beginning of the file",
"# the check should be against size of the entire file, not",
"# starting at the offset, unless that is already part",
"# of another file (TODO).",
"if",
"romsize",
">",
"filesize",
":",
"checkfile",
".",
"close",
"(",
")",
"unpackingerror",
"=",
"{",
"'offset'",
":",
"offset",
",",
"'fatal'",
":",
"False",
",",
"'reason'",
":",
"'not enough data for image'",
"}",
"return",
"{",
"'status'",
":",
"False",
",",
"'error'",
":",
"unpackingerror",
"}",
"# boot block size",
"checkbytes",
"=",
"checkfile",
".",
"read",
"(",
"4",
")",
"bootblocksize",
"=",
"int",
".",
"from_bytes",
"(",
"checkbytes",
",",
"byteorder",
"=",
"'big'",
")",
"if",
"bootblocksize",
">",
"romsize",
":",
"checkfile",
".",
"close",
"(",
")",
"unpackingerror",
"=",
"{",
"'offset'",
":",
"offset",
",",
"'fatal'",
":",
"False",
",",
"'reason'",
":",
"'invalid boot block size'",
"}",
"return",
"{",
"'status'",
":",
"False",
",",
"'error'",
":",
"unpackingerror",
"}",
"# align, always 64 bytes",
"checkbytes",
"=",
"checkfile",
".",
"read",
"(",
"4",
")",
"align",
"=",
"int",
".",
"from_bytes",
"(",
"checkbytes",
",",
"byteorder",
"=",
"'big'",
")",
"if",
"align",
"!=",
"64",
":",
"checkfile",
".",
"close",
"(",
")",
"unpackingerror",
"=",
"{",
"'offset'",
":",
"offset",
",",
"'fatal'",
":",
"False",
",",
"'reason'",
":",
"'invalid alignment size'",
"}",
"return",
"{",
"'status'",
":",
"False",
",",
"'error'",
":",
"unpackingerror",
"}",
"# rom cannot be smaller than alignment",
"if",
"romsize",
"<",
"align",
":",
"checkfile",
".",
"close",
"(",
")",
"unpackingerror",
"=",
"{",
"'offset'",
":",
"offset",
",",
"'fatal'",
":",
"False",
",",
"'reason'",
":",
"'invalid rom size'",
"}",
"return",
"{",
"'status'",
":",
"False",
",",
"'error'",
":",
"unpackingerror",
"}",
"# offset of first block",
"checkbytes",
"=",
"checkfile",
".",
"read",
"(",
"4",
")",
"cbfsoffset",
"=",
"int",
".",
"from_bytes",
"(",
"checkbytes",
",",
"byteorder",
"=",
"'big'",
")",
"if",
"cbfsoffset",
">",
"romsize",
":",
"checkfile",
".",
"close",
"(",
")",
"unpackingerror",
"=",
"{",
"'offset'",
":",
"offset",
",",
"'fatal'",
":",
"False",
",",
"'reason'",
":",
"'offset of first block cannot be outside image'",
"}",
"return",
"{",
"'status'",
":",
"False",
",",
"'error'",
":",
"unpackingerror",
"}",
"if",
"cbfsoffset",
">",
"offset",
":",
"checkfile",
".",
"close",
"(",
")",
"unpackingerror",
"=",
"{",
"'offset'",
":",
"offset",
",",
"'fatal'",
":",
"False",
",",
"'reason'",
":",
"'offset of first block cannot be outside image'",
"}",
"return",
"{",
"'status'",
":",
"False",
",",
"'error'",
":",
"unpackingerror",
"}",
"seenmasterheader",
"=",
"True",
"if",
"not",
"seenmasterheader",
":",
"checkfile",
".",
"close",
"(",
")",
"unpackingerror",
"=",
"{",
"'offset'",
":",
"offset",
",",
"'fatal'",
":",
"False",
",",
"'reason'",
":",
"'master header not seen'",
"}",
"return",
"{",
"'status'",
":",
"False",
",",
"'error'",
":",
"unpackingerror",
"}",
"# skip the data",
"checkfile",
".",
"seek",
"(",
"curoffset",
")",
"checkfile",
".",
"seek",
"(",
"componentlength",
",",
"os",
".",
"SEEK_CUR",
")",
"# then read some more alignment bytes, if necessary",
"if",
"(",
"checkfile",
".",
"tell",
"(",
")",
"-",
"offset",
")",
"%",
"align",
"!=",
"0",
":",
"padbytes",
"=",
"align",
"-",
"(",
"checkfile",
".",
"tell",
"(",
")",
"-",
"offset",
")",
"%",
"align",
"checkfile",
".",
"seek",
"(",
"padbytes",
",",
"os",
".",
"SEEK_CUR",
")",
"if",
"not",
"seenmasterheader",
":",
"checkfile",
".",
"close",
"(",
")",
"unpackingerror",
"=",
"{",
"'offset'",
":",
"offset",
",",
"'fatal'",
":",
"False",
",",
"'reason'",
":",
"'master header not seen'",
"}",
"return",
"{",
"'status'",
":",
"False",
",",
"'error'",
":",
"unpackingerror",
"}",
"if",
"checkfile",
".",
"tell",
"(",
")",
"-",
"offset",
"!=",
"romsize",
"-",
"cbfsoffset",
":",
"checkfile",
".",
"close",
"(",
")",
"unpackingerror",
"=",
"{",
"'offset'",
":",
"offset",
",",
"'fatal'",
":",
"False",
",",
"'reason'",
":",
"'invalid coreboot image'",
"}",
"return",
"{",
"'status'",
":",
"False",
",",
"'error'",
":",
"unpackingerror",
"}",
"cbfsstart",
"=",
"offset",
"-",
"cbfsoffset",
"if",
"cbfsstart",
"==",
"0",
"and",
"romsize",
"==",
"filesize",
":",
"labels",
".",
"append",
"(",
"\"coreboot\"",
")",
"checkfile",
".",
"close",
"(",
")",
"return",
"{",
"'status'",
":",
"True",
",",
"'length'",
":",
"romsize",
",",
"'labels'",
":",
"labels",
",",
"'filesandlabels'",
":",
"unpackedfilesandlabels",
",",
"'offset'",
":",
"cbfsstart",
"}",
"# else carve the file. It is anonymous, so just give it a name",
"outfile_rel",
"=",
"os",
".",
"path",
".",
"join",
"(",
"unpackdir",
",",
"\"unpacked.coreboot\"",
")",
"outfile_full",
"=",
"scanenvironment",
".",
"unpack_path",
"(",
"outfile_rel",
")",
"outfile",
"=",
"open",
"(",
"outfile_full",
",",
"'wb'",
")",
"os",
".",
"sendfile",
"(",
"outfile",
".",
"fileno",
"(",
")",
",",
"checkfile",
".",
"fileno",
"(",
")",
",",
"cbfsstart",
",",
"romsize",
")",
"outfile",
".",
"close",
"(",
")",
"checkfile",
".",
"close",
"(",
")",
"unpackedfilesandlabels",
".",
"append",
"(",
"(",
"outfile_rel",
",",
"[",
"'coreboot'",
",",
"'unpacked'",
"]",
")",
")",
"return",
"{",
"'status'",
":",
"True",
",",
"'length'",
":",
"romsize",
",",
"'labels'",
":",
"labels",
",",
"'filesandlabels'",
":",
"unpackedfilesandlabels",
",",
"'offset'",
":",
"cbfsstart",
"}"
] | https://github.com/armijnhemel/binaryanalysis-ng/blob/34c655ed71d3d022ee49c4e1271002b2ebf40001/src/bangfilesystems.py#L2972-L3162 |
|
ales-tsurko/cells | 4cf7e395cd433762bea70cdc863a346f3a6fe1d0 | packaging/macos/python/lib/python3.7/asyncio/coroutines.py | python | coroutine | (func) | return wrapper | Decorator to mark coroutines.
If the coroutine is not yielded from before it is destroyed,
an error message is logged. | Decorator to mark coroutines. | [
"Decorator",
"to",
"mark",
"coroutines",
"."
] | def coroutine(func):
"""Decorator to mark coroutines.
If the coroutine is not yielded from before it is destroyed,
an error message is logged.
"""
if inspect.iscoroutinefunction(func):
# In Python 3.5 that's all we need to do for coroutines
# defined with "async def".
return func
if inspect.isgeneratorfunction(func):
coro = func
else:
@functools.wraps(func)
def coro(*args, **kw):
res = func(*args, **kw)
if (base_futures.isfuture(res) or inspect.isgenerator(res) or
isinstance(res, CoroWrapper)):
res = yield from res
else:
# If 'res' is an awaitable, run it.
try:
await_meth = res.__await__
except AttributeError:
pass
else:
if isinstance(res, collections.abc.Awaitable):
res = yield from await_meth()
return res
coro = types.coroutine(coro)
if not _DEBUG:
wrapper = coro
else:
@functools.wraps(func)
def wrapper(*args, **kwds):
w = CoroWrapper(coro(*args, **kwds), func=func)
if w._source_traceback:
del w._source_traceback[-1]
# Python < 3.5 does not implement __qualname__
# on generator objects, so we set it manually.
# We use getattr as some callables (such as
# functools.partial may lack __qualname__).
w.__name__ = getattr(func, '__name__', None)
w.__qualname__ = getattr(func, '__qualname__', None)
return w
wrapper._is_coroutine = _is_coroutine # For iscoroutinefunction().
return wrapper | [
"def",
"coroutine",
"(",
"func",
")",
":",
"if",
"inspect",
".",
"iscoroutinefunction",
"(",
"func",
")",
":",
"# In Python 3.5 that's all we need to do for coroutines",
"# defined with \"async def\".",
"return",
"func",
"if",
"inspect",
".",
"isgeneratorfunction",
"(",
"func",
")",
":",
"coro",
"=",
"func",
"else",
":",
"@",
"functools",
".",
"wraps",
"(",
"func",
")",
"def",
"coro",
"(",
"*",
"args",
",",
"*",
"*",
"kw",
")",
":",
"res",
"=",
"func",
"(",
"*",
"args",
",",
"*",
"*",
"kw",
")",
"if",
"(",
"base_futures",
".",
"isfuture",
"(",
"res",
")",
"or",
"inspect",
".",
"isgenerator",
"(",
"res",
")",
"or",
"isinstance",
"(",
"res",
",",
"CoroWrapper",
")",
")",
":",
"res",
"=",
"yield",
"from",
"res",
"else",
":",
"# If 'res' is an awaitable, run it.",
"try",
":",
"await_meth",
"=",
"res",
".",
"__await__",
"except",
"AttributeError",
":",
"pass",
"else",
":",
"if",
"isinstance",
"(",
"res",
",",
"collections",
".",
"abc",
".",
"Awaitable",
")",
":",
"res",
"=",
"yield",
"from",
"await_meth",
"(",
")",
"return",
"res",
"coro",
"=",
"types",
".",
"coroutine",
"(",
"coro",
")",
"if",
"not",
"_DEBUG",
":",
"wrapper",
"=",
"coro",
"else",
":",
"@",
"functools",
".",
"wraps",
"(",
"func",
")",
"def",
"wrapper",
"(",
"*",
"args",
",",
"*",
"*",
"kwds",
")",
":",
"w",
"=",
"CoroWrapper",
"(",
"coro",
"(",
"*",
"args",
",",
"*",
"*",
"kwds",
")",
",",
"func",
"=",
"func",
")",
"if",
"w",
".",
"_source_traceback",
":",
"del",
"w",
".",
"_source_traceback",
"[",
"-",
"1",
"]",
"# Python < 3.5 does not implement __qualname__",
"# on generator objects, so we set it manually.",
"# We use getattr as some callables (such as",
"# functools.partial may lack __qualname__).",
"w",
".",
"__name__",
"=",
"getattr",
"(",
"func",
",",
"'__name__'",
",",
"None",
")",
"w",
".",
"__qualname__",
"=",
"getattr",
"(",
"func",
",",
"'__qualname__'",
",",
"None",
")",
"return",
"w",
"wrapper",
".",
"_is_coroutine",
"=",
"_is_coroutine",
"# For iscoroutinefunction().",
"return",
"wrapper"
] | https://github.com/ales-tsurko/cells/blob/4cf7e395cd433762bea70cdc863a346f3a6fe1d0/packaging/macos/python/lib/python3.7/asyncio/coroutines.py#L104-L153 |
|
facebookresearch/pytext | 1a4e184b233856fcfb9997d74f167cbf5bbbfb8d | pytext/metric_reporters/channel.py | python | Channel.report | (
self,
stage,
epoch,
metrics,
model_select_metric,
loss,
preds,
targets,
scores,
context,
*args,
) | Defines how to format and report data to the output channel.
Args:
stage (Stage): train, eval or test
epoch (int): current epoch
metrics (Any): all metrics
model_select_metric (double): a single numeric metric to pick best model
loss (double): average loss
preds (List[Any]): list of predictions
targets (List[Any]): list of targets
scores (List[Any]): list of scores
context (Dict[str, List[Any]]): dict of any additional context data,
each context is a list of data that maps to each example | Defines how to format and report data to the output channel. | [
"Defines",
"how",
"to",
"format",
"and",
"report",
"data",
"to",
"the",
"output",
"channel",
"."
] | def report(
self,
stage,
epoch,
metrics,
model_select_metric,
loss,
preds,
targets,
scores,
context,
*args,
):
"""
Defines how to format and report data to the output channel.
Args:
stage (Stage): train, eval or test
epoch (int): current epoch
metrics (Any): all metrics
model_select_metric (double): a single numeric metric to pick best model
loss (double): average loss
preds (List[Any]): list of predictions
targets (List[Any]): list of targets
scores (List[Any]): list of scores
context (Dict[str, List[Any]]): dict of any additional context data,
each context is a list of data that maps to each example
"""
raise NotImplementedError() | [
"def",
"report",
"(",
"self",
",",
"stage",
",",
"epoch",
",",
"metrics",
",",
"model_select_metric",
",",
"loss",
",",
"preds",
",",
"targets",
",",
"scores",
",",
"context",
",",
"*",
"args",
",",
")",
":",
"raise",
"NotImplementedError",
"(",
")"
] | https://github.com/facebookresearch/pytext/blob/1a4e184b233856fcfb9997d74f167cbf5bbbfb8d/pytext/metric_reporters/channel.py#L38-L66 |
||
dictation-toolbox/Caster | 6c57587cbba783fd5b9e0a5e49e5957c9f73c22c | castervoice/lib/merge/state/stackitems.py | python | StackItemAsynchronous.begin | (self) | here pass along a closure to the timer multiplexer | here pass along a closure to the timer multiplexer | [
"here",
"pass",
"along",
"a",
"closure",
"to",
"the",
"timer",
"multiplexer"
] | def begin(self):
'''here pass along a closure to the timer multiplexer'''
execute_context_levels = self.executeCL
context_level = self.forward[0]
repetitions = self.repetitions
count = {"value": 0}
execute = self.execute
def closure():
do_terminate = execute_context_levels(context_level)
if do_terminate:
execute(do_terminate)
elif repetitions != 0: # if not run forever
count["value"] += 1
if count["value"] == repetitions:
execute(False)
self.closure = closure
self.timer = get_current_engine().create_timer(self.closure, self.time_in_seconds)
self.closure() | [
"def",
"begin",
"(",
"self",
")",
":",
"execute_context_levels",
"=",
"self",
".",
"executeCL",
"context_level",
"=",
"self",
".",
"forward",
"[",
"0",
"]",
"repetitions",
"=",
"self",
".",
"repetitions",
"count",
"=",
"{",
"\"value\"",
":",
"0",
"}",
"execute",
"=",
"self",
".",
"execute",
"def",
"closure",
"(",
")",
":",
"do_terminate",
"=",
"execute_context_levels",
"(",
"context_level",
")",
"if",
"do_terminate",
":",
"execute",
"(",
"do_terminate",
")",
"elif",
"repetitions",
"!=",
"0",
":",
"# if not run forever",
"count",
"[",
"\"value\"",
"]",
"+=",
"1",
"if",
"count",
"[",
"\"value\"",
"]",
"==",
"repetitions",
":",
"execute",
"(",
"False",
")",
"self",
".",
"closure",
"=",
"closure",
"self",
".",
"timer",
"=",
"get_current_engine",
"(",
")",
".",
"create_timer",
"(",
"self",
".",
"closure",
",",
"self",
".",
"time_in_seconds",
")",
"self",
".",
"closure",
"(",
")"
] | https://github.com/dictation-toolbox/Caster/blob/6c57587cbba783fd5b9e0a5e49e5957c9f73c22c/castervoice/lib/merge/state/stackitems.py#L225-L245 |
||
Te-k/harpoon | 654381fff282aacc7e3c8264fd4cead40eedf48d | harpoon/commands/ip.py | python | CommandIp.ipinfo | (self, ip, dns=True) | return ipinfo | Return information on an IP address
{"asn", "asn_name", "city", "country"} | Return information on an IP address
{"asn", "asn_name", "city", "country"} | [
"Return",
"information",
"on",
"an",
"IP",
"address",
"{",
"asn",
"asn_name",
"city",
"country",
"}"
] | def ipinfo(self, ip, dns=True):
"""
Return information on an IP address
{"asn", "asn_name", "city", "country"}
"""
self.check_geoipdb()
ipinfo = {}
if dns:
ipinfo["hostname"] = ""
try:
ipinfo["hostname"] = socket.gethostbyaddr(ip)[0]
except socket.herror:
pass
try:
citydb = geoip2.database.Reader(self.geocity)
res = citydb.city(ip)
ipinfo["city"] = res.city.name
ipinfo["country"] = res.country.name
except geoip2.errors.AddressNotFoundError:
ipinfo["city"] = "Unknown"
ipinfo["country"] = "Unknown"
except FileNotFoundError:
print(
"GeoIP database not found, make sure you have correctly installed geoipupdate"
)
sys.exit(1)
asninfo = self.ip_get_asn(ip)
ipinfo["asn"] = asninfo["asn"]
ipinfo["asn_name"] = asninfo["name"]
ipinfo["specific"] = ""
try:
with open(self.specific_ips) as f:
data = f.read().split("\n")
for d in data:
if d.strip().startswith(ip):
ipinfo["specific"] = d.split(",")[1].strip()
except FileNotFoundError:
pass
# TODO: add private
asnc = CommandAsn()
res = asnc.asn_caida(ipinfo["asn"])
ipinfo["asn_type"] = res["type"]
return ipinfo | [
"def",
"ipinfo",
"(",
"self",
",",
"ip",
",",
"dns",
"=",
"True",
")",
":",
"self",
".",
"check_geoipdb",
"(",
")",
"ipinfo",
"=",
"{",
"}",
"if",
"dns",
":",
"ipinfo",
"[",
"\"hostname\"",
"]",
"=",
"\"\"",
"try",
":",
"ipinfo",
"[",
"\"hostname\"",
"]",
"=",
"socket",
".",
"gethostbyaddr",
"(",
"ip",
")",
"[",
"0",
"]",
"except",
"socket",
".",
"herror",
":",
"pass",
"try",
":",
"citydb",
"=",
"geoip2",
".",
"database",
".",
"Reader",
"(",
"self",
".",
"geocity",
")",
"res",
"=",
"citydb",
".",
"city",
"(",
"ip",
")",
"ipinfo",
"[",
"\"city\"",
"]",
"=",
"res",
".",
"city",
".",
"name",
"ipinfo",
"[",
"\"country\"",
"]",
"=",
"res",
".",
"country",
".",
"name",
"except",
"geoip2",
".",
"errors",
".",
"AddressNotFoundError",
":",
"ipinfo",
"[",
"\"city\"",
"]",
"=",
"\"Unknown\"",
"ipinfo",
"[",
"\"country\"",
"]",
"=",
"\"Unknown\"",
"except",
"FileNotFoundError",
":",
"print",
"(",
"\"GeoIP database not found, make sure you have correctly installed geoipupdate\"",
")",
"sys",
".",
"exit",
"(",
"1",
")",
"asninfo",
"=",
"self",
".",
"ip_get_asn",
"(",
"ip",
")",
"ipinfo",
"[",
"\"asn\"",
"]",
"=",
"asninfo",
"[",
"\"asn\"",
"]",
"ipinfo",
"[",
"\"asn_name\"",
"]",
"=",
"asninfo",
"[",
"\"name\"",
"]",
"ipinfo",
"[",
"\"specific\"",
"]",
"=",
"\"\"",
"try",
":",
"with",
"open",
"(",
"self",
".",
"specific_ips",
")",
"as",
"f",
":",
"data",
"=",
"f",
".",
"read",
"(",
")",
".",
"split",
"(",
"\"\\n\"",
")",
"for",
"d",
"in",
"data",
":",
"if",
"d",
".",
"strip",
"(",
")",
".",
"startswith",
"(",
"ip",
")",
":",
"ipinfo",
"[",
"\"specific\"",
"]",
"=",
"d",
".",
"split",
"(",
"\",\"",
")",
"[",
"1",
"]",
".",
"strip",
"(",
")",
"except",
"FileNotFoundError",
":",
"pass",
"# TODO: add private",
"asnc",
"=",
"CommandAsn",
"(",
")",
"res",
"=",
"asnc",
".",
"asn_caida",
"(",
"ipinfo",
"[",
"\"asn\"",
"]",
")",
"ipinfo",
"[",
"\"asn_type\"",
"]",
"=",
"res",
"[",
"\"type\"",
"]",
"return",
"ipinfo"
] | https://github.com/Te-k/harpoon/blob/654381fff282aacc7e3c8264fd4cead40eedf48d/harpoon/commands/ip.py#L152-L195 |
|
jMetal/jMetalPy | c3a7be2e135379377e7b8d9a303cf624cc339e46 | jmetal/algorithm/multiobjective/nsgaiii.py | python | NSGAIII.get_result | (self) | return ranking.get_subfront(0) | Return only non dominated solutions. | Return only non dominated solutions. | [
"Return",
"only",
"non",
"dominated",
"solutions",
"."
] | def get_result(self):
""" Return only non dominated solutions."""
ranking = FastNonDominatedRanking(self.dominance_comparator)
ranking.compute_ranking(self.solutions, k=self.population_size)
return ranking.get_subfront(0) | [
"def",
"get_result",
"(",
"self",
")",
":",
"ranking",
"=",
"FastNonDominatedRanking",
"(",
"self",
".",
"dominance_comparator",
")",
"ranking",
".",
"compute_ranking",
"(",
"self",
".",
"solutions",
",",
"k",
"=",
"self",
".",
"population_size",
")",
"return",
"ranking",
".",
"get_subfront",
"(",
"0",
")"
] | https://github.com/jMetal/jMetalPy/blob/c3a7be2e135379377e7b8d9a303cf624cc339e46/jmetal/algorithm/multiobjective/nsgaiii.py#L351-L356 |
|
SteveDoyle2/pyNastran | eda651ac2d4883d95a34951f8a002ff94f642a1a | pyNastran/dev/bdf_vectorized/bdf.py | python | BDF.__init__ | (self, debug: Union[str, bool, None],
log: Optional[SimpleLogger]=None, mode: str='msc') | Initializes the BDF object
Parameters
----------
debug : bool/None
used to set the logger if no logger is passed in
True: logs debug/info/error messages
False: logs info/error messages
None: logs error messages
log : logging module object / None
if log is set, debug is ignored and uses the
settings the logging object has | Initializes the BDF object | [
"Initializes",
"the",
"BDF",
"object"
] | def __init__(self, debug: Union[str, bool, None],
log: Optional[SimpleLogger]=None, mode: str='msc'):
"""
Initializes the BDF object
Parameters
----------
debug : bool/None
used to set the logger if no logger is passed in
True: logs debug/info/error messages
False: logs info/error messages
None: logs error messages
log : logging module object / None
if log is set, debug is ignored and uses the
settings the logging object has
"""
AddCard.__init__(self)
CrossReference.__init__(self)
WriteMesh.__init__(self)
GetMethods.__init__(self)
assert debug in [True, False, None], 'debug=%r' % debug
self.echo = False
self.read_includes = True
# file management parameters
self.active_filenames = []
self.active_filename = None
self.include_dir = ''
self.dumplines = False
# this flag will be flipped to True someday (and then removed), but
# doesn't support 100% of cards yet. It enables a new method for card
# parsing.
#
# 80.3 seconds -> 67.2 seconds for full_bay model
# (multiple BDF passes among other things)
self._fast_add = True
log_args = {} if CPYLOG_VERSION <= '1.5.0' else {'nlevels': 2}
self.log = get_logger2(log=log, debug=debug, **log_args)
#: list of all read in cards - useful in determining if entire BDF
#: was read & really useful in debugging
self.card_count = {}
#: stores the card_count of cards that have been rejected
self.reject_count = {}
#: was an ENDDATA card found
#self.foundEndData = False
#: useful in debugging errors in input
self.debug = debug
#: flag that allows for OpenMDAO-style optimization syntax to be used
self._is_dynamic_syntax = False
#: lines that were rejected b/c they were for a card that isn't supported
self.reject_lines = []
#: cards that were created, but not processed
self.reject_cards = []
# self.__init_attributes()
#: the list of possible cards that will be parsed
self.cards_to_read = set([
'GRID', 'SPOINT', 'EPOINT', 'POINT', 'POINTAX',
'PARAM', ## params
# coords
'CORD1R', 'CORD1C', 'CORD1S',
'CORD2R', 'CORD2C', 'CORD2S',
'PELAS', 'CELAS1', 'CELAS2', 'CELAS3', 'CELAS4',
'CROD', 'PROD', 'CONROD',
'CTUBE', 'PTUBE',
'PBAR', 'PBARL', 'CBAR',
'CBEAM',
'PSHEAR', 'CSHEAR',
'CQUAD4', 'CTRIA3', 'CQUAD8', 'CTRIA6',
'PSHELL', 'PCOMP', 'PCOMPG',
'PSOLID', 'PLSOLID',
'CTETRA', 'CTETRA4', 'CTETRA10',
'CPYRAM', 'CPYRAM5', 'CPYRAM13',
'CPENTA', 'CPENTA6', 'CPENTA15',
'CHEXA', 'CHEXA8', 'CHEXA20',
'CBUSH', 'CBUSH1D', 'CBUSH2D',
#'PBUSH', 'PBUSH1D', 'PBUSH2D',
'CONM1', 'CONM2',
'PLOTEL',
'RBAR', 'RBAR1', 'RBE1', 'RBE2', 'RBE3', 'RROD', 'RSPLINE',
'MAT1', 'MAT8',
# loads
'LOAD', 'GRAV',
'FORCE', 'FORCE1', 'FORCE2',
'MOMENT', 'MOMENT1', 'MOMENT2',
'PLOAD', 'PLOAD2', 'PLOAD4', 'PLOADX1',
'TLOAD1', 'TLOAD2', 'DELAY',
'RLOAD1', 'DPHASE', #'RLOAD2',
# constraints
'SPC', 'SPCADD', 'SPC1', 'SPCD',
'MPC', 'MPCADD',
# aero cards
'AERO', ## aero
'AEROS', ## aeros
'GUST', ## gusts
'FLUTTER', ## flutters
'FLFACT', ## flfacts
'MKAERO1', 'MKAERO2', ## mkaeros
'AECOMP', ## aecomps
'AEFACT', ## aefacts
'AELINK', ## aelinks
'AELIST', ## aelists
'AEPARM', ## aeparams
'AESTAT', ## aestats
'AESURF', ## aesurf
#'AESURFS', ## aesurfs
'CAERO1', 'CAERO2', 'CAERO3', 'CAERO4', ## caeros
# 'CAERO5',
'PAERO1', 'PAERO2', 'PAERO3', ## paeros
'PAERO4', # 'PAERO5',
'MONPNT1', ## monitor_points
'SPLINE1', 'SPLINE2', 'SPLINE4', 'SPLINE5', ## splines
#'SPLINE3', 'SPLINE6', 'SPLINE7',
'TRIM', ## trims
'CSSCHD', ## csschds
'DIVERG', ## divergs
# ---- dynamic cards ---- #
'DAREA', ## dareas
'DPHASE', ## dphases
'DELAY', ## delays
'NLPARM', ## nlparms
'ROTORG', 'ROTORD', ## rotors
'NLPCI', ## nlpcis
'TSTEP', ## tsteps
'TSTEPNL', 'TSTEP1', ## tstepnls
# direct matrix input cards
'DMIG', 'DMIJ', 'DMIJI', 'DMIK', 'DMI',
# optimization cards
'DEQATN', 'DTABLE',
'DCONSTR', 'DESVAR', 'DDVAL', 'DRESP1', 'DRESP2', 'DRESP3',
'DVCREL1', 'DVCREL2',
'DVPREL1', 'DVPREL2',
'DVMREL1', 'DVMREL2',
'DOPTPRM', 'DLINK', 'DCONADD', 'DVGRID',
'SET1', 'SET3', ## sets
'ASET', 'ASET1', ## asets
'BSET', 'BSET1', ## bsets
'CSET', 'CSET1', ## csets
'QSET', 'QSET1', ## qsets
'USET', 'USET1', ## usets
## suport/suport1/se_suport
'SUPORT', 'SUPORT1', 'SESUP',
#: methods
'EIGB', 'EIGR', 'EIGRL',
#: cMethods
'EIGC', 'EIGP',
# other
'INCLUDE', # '='
'ENDDATA',
])
case_control_cards = {'FREQ', 'GUST', 'MPC', 'SPC', 'NLPARM', 'NSM',
'TEMP', 'TSTEPNL', 'INCLUDE'}
self._unique_bulk_data_cards = self.cards_to_read.difference(case_control_cards)
#: / is the delete from restart card
self.special_cards = ['DEQATN', '/']
self._make_card_parser()
if self.is_msc:
self.set_as_msc()
elif self.is_nx:
self.set_as_nx()
#elif self.is_optistruct:
#self.set_as_optistruct()
#elif self.is_radioss:
#self.set_as_radioss()
else:
msg = 'mode=%r is not supported; modes=[msc, nx]' % self._nastran_format
raise NotImplementedError(msg) | [
"def",
"__init__",
"(",
"self",
",",
"debug",
":",
"Union",
"[",
"str",
",",
"bool",
",",
"None",
"]",
",",
"log",
":",
"Optional",
"[",
"SimpleLogger",
"]",
"=",
"None",
",",
"mode",
":",
"str",
"=",
"'msc'",
")",
":",
"AddCard",
".",
"__init__",
"(",
"self",
")",
"CrossReference",
".",
"__init__",
"(",
"self",
")",
"WriteMesh",
".",
"__init__",
"(",
"self",
")",
"GetMethods",
".",
"__init__",
"(",
"self",
")",
"assert",
"debug",
"in",
"[",
"True",
",",
"False",
",",
"None",
"]",
",",
"'debug=%r'",
"%",
"debug",
"self",
".",
"echo",
"=",
"False",
"self",
".",
"read_includes",
"=",
"True",
"# file management parameters",
"self",
".",
"active_filenames",
"=",
"[",
"]",
"self",
".",
"active_filename",
"=",
"None",
"self",
".",
"include_dir",
"=",
"''",
"self",
".",
"dumplines",
"=",
"False",
"# this flag will be flipped to True someday (and then removed), but",
"# doesn't support 100% of cards yet. It enables a new method for card",
"# parsing.",
"#",
"# 80.3 seconds -> 67.2 seconds for full_bay model",
"# (multiple BDF passes among other things)",
"self",
".",
"_fast_add",
"=",
"True",
"log_args",
"=",
"{",
"}",
"if",
"CPYLOG_VERSION",
"<=",
"'1.5.0'",
"else",
"{",
"'nlevels'",
":",
"2",
"}",
"self",
".",
"log",
"=",
"get_logger2",
"(",
"log",
"=",
"log",
",",
"debug",
"=",
"debug",
",",
"*",
"*",
"log_args",
")",
"#: list of all read in cards - useful in determining if entire BDF",
"#: was read & really useful in debugging",
"self",
".",
"card_count",
"=",
"{",
"}",
"#: stores the card_count of cards that have been rejected",
"self",
".",
"reject_count",
"=",
"{",
"}",
"#: was an ENDDATA card found",
"#self.foundEndData = False",
"#: useful in debugging errors in input",
"self",
".",
"debug",
"=",
"debug",
"#: flag that allows for OpenMDAO-style optimization syntax to be used",
"self",
".",
"_is_dynamic_syntax",
"=",
"False",
"#: lines that were rejected b/c they were for a card that isn't supported",
"self",
".",
"reject_lines",
"=",
"[",
"]",
"#: cards that were created, but not processed",
"self",
".",
"reject_cards",
"=",
"[",
"]",
"# self.__init_attributes()",
"#: the list of possible cards that will be parsed",
"self",
".",
"cards_to_read",
"=",
"set",
"(",
"[",
"'GRID'",
",",
"'SPOINT'",
",",
"'EPOINT'",
",",
"'POINT'",
",",
"'POINTAX'",
",",
"'PARAM'",
",",
"## params",
"# coords",
"'CORD1R'",
",",
"'CORD1C'",
",",
"'CORD1S'",
",",
"'CORD2R'",
",",
"'CORD2C'",
",",
"'CORD2S'",
",",
"'PELAS'",
",",
"'CELAS1'",
",",
"'CELAS2'",
",",
"'CELAS3'",
",",
"'CELAS4'",
",",
"'CROD'",
",",
"'PROD'",
",",
"'CONROD'",
",",
"'CTUBE'",
",",
"'PTUBE'",
",",
"'PBAR'",
",",
"'PBARL'",
",",
"'CBAR'",
",",
"'CBEAM'",
",",
"'PSHEAR'",
",",
"'CSHEAR'",
",",
"'CQUAD4'",
",",
"'CTRIA3'",
",",
"'CQUAD8'",
",",
"'CTRIA6'",
",",
"'PSHELL'",
",",
"'PCOMP'",
",",
"'PCOMPG'",
",",
"'PSOLID'",
",",
"'PLSOLID'",
",",
"'CTETRA'",
",",
"'CTETRA4'",
",",
"'CTETRA10'",
",",
"'CPYRAM'",
",",
"'CPYRAM5'",
",",
"'CPYRAM13'",
",",
"'CPENTA'",
",",
"'CPENTA6'",
",",
"'CPENTA15'",
",",
"'CHEXA'",
",",
"'CHEXA8'",
",",
"'CHEXA20'",
",",
"'CBUSH'",
",",
"'CBUSH1D'",
",",
"'CBUSH2D'",
",",
"#'PBUSH', 'PBUSH1D', 'PBUSH2D',",
"'CONM1'",
",",
"'CONM2'",
",",
"'PLOTEL'",
",",
"'RBAR'",
",",
"'RBAR1'",
",",
"'RBE1'",
",",
"'RBE2'",
",",
"'RBE3'",
",",
"'RROD'",
",",
"'RSPLINE'",
",",
"'MAT1'",
",",
"'MAT8'",
",",
"# loads",
"'LOAD'",
",",
"'GRAV'",
",",
"'FORCE'",
",",
"'FORCE1'",
",",
"'FORCE2'",
",",
"'MOMENT'",
",",
"'MOMENT1'",
",",
"'MOMENT2'",
",",
"'PLOAD'",
",",
"'PLOAD2'",
",",
"'PLOAD4'",
",",
"'PLOADX1'",
",",
"'TLOAD1'",
",",
"'TLOAD2'",
",",
"'DELAY'",
",",
"'RLOAD1'",
",",
"'DPHASE'",
",",
"#'RLOAD2',",
"# constraints",
"'SPC'",
",",
"'SPCADD'",
",",
"'SPC1'",
",",
"'SPCD'",
",",
"'MPC'",
",",
"'MPCADD'",
",",
"# aero cards",
"'AERO'",
",",
"## aero",
"'AEROS'",
",",
"## aeros",
"'GUST'",
",",
"## gusts",
"'FLUTTER'",
",",
"## flutters",
"'FLFACT'",
",",
"## flfacts",
"'MKAERO1'",
",",
"'MKAERO2'",
",",
"## mkaeros",
"'AECOMP'",
",",
"## aecomps",
"'AEFACT'",
",",
"## aefacts",
"'AELINK'",
",",
"## aelinks",
"'AELIST'",
",",
"## aelists",
"'AEPARM'",
",",
"## aeparams",
"'AESTAT'",
",",
"## aestats",
"'AESURF'",
",",
"## aesurf",
"#'AESURFS', ## aesurfs",
"'CAERO1'",
",",
"'CAERO2'",
",",
"'CAERO3'",
",",
"'CAERO4'",
",",
"## caeros",
"# 'CAERO5',",
"'PAERO1'",
",",
"'PAERO2'",
",",
"'PAERO3'",
",",
"## paeros",
"'PAERO4'",
",",
"# 'PAERO5',",
"'MONPNT1'",
",",
"## monitor_points",
"'SPLINE1'",
",",
"'SPLINE2'",
",",
"'SPLINE4'",
",",
"'SPLINE5'",
",",
"## splines",
"#'SPLINE3', 'SPLINE6', 'SPLINE7',",
"'TRIM'",
",",
"## trims",
"'CSSCHD'",
",",
"## csschds",
"'DIVERG'",
",",
"## divergs",
"# ---- dynamic cards ---- #",
"'DAREA'",
",",
"## dareas",
"'DPHASE'",
",",
"## dphases",
"'DELAY'",
",",
"## delays",
"'NLPARM'",
",",
"## nlparms",
"'ROTORG'",
",",
"'ROTORD'",
",",
"## rotors",
"'NLPCI'",
",",
"## nlpcis",
"'TSTEP'",
",",
"## tsteps",
"'TSTEPNL'",
",",
"'TSTEP1'",
",",
"## tstepnls",
"# direct matrix input cards",
"'DMIG'",
",",
"'DMIJ'",
",",
"'DMIJI'",
",",
"'DMIK'",
",",
"'DMI'",
",",
"# optimization cards",
"'DEQATN'",
",",
"'DTABLE'",
",",
"'DCONSTR'",
",",
"'DESVAR'",
",",
"'DDVAL'",
",",
"'DRESP1'",
",",
"'DRESP2'",
",",
"'DRESP3'",
",",
"'DVCREL1'",
",",
"'DVCREL2'",
",",
"'DVPREL1'",
",",
"'DVPREL2'",
",",
"'DVMREL1'",
",",
"'DVMREL2'",
",",
"'DOPTPRM'",
",",
"'DLINK'",
",",
"'DCONADD'",
",",
"'DVGRID'",
",",
"'SET1'",
",",
"'SET3'",
",",
"## sets",
"'ASET'",
",",
"'ASET1'",
",",
"## asets",
"'BSET'",
",",
"'BSET1'",
",",
"## bsets",
"'CSET'",
",",
"'CSET1'",
",",
"## csets",
"'QSET'",
",",
"'QSET1'",
",",
"## qsets",
"'USET'",
",",
"'USET1'",
",",
"## usets",
"## suport/suport1/se_suport",
"'SUPORT'",
",",
"'SUPORT1'",
",",
"'SESUP'",
",",
"#: methods",
"'EIGB'",
",",
"'EIGR'",
",",
"'EIGRL'",
",",
"#: cMethods",
"'EIGC'",
",",
"'EIGP'",
",",
"# other",
"'INCLUDE'",
",",
"# '='",
"'ENDDATA'",
",",
"]",
")",
"case_control_cards",
"=",
"{",
"'FREQ'",
",",
"'GUST'",
",",
"'MPC'",
",",
"'SPC'",
",",
"'NLPARM'",
",",
"'NSM'",
",",
"'TEMP'",
",",
"'TSTEPNL'",
",",
"'INCLUDE'",
"}",
"self",
".",
"_unique_bulk_data_cards",
"=",
"self",
".",
"cards_to_read",
".",
"difference",
"(",
"case_control_cards",
")",
"#: / is the delete from restart card",
"self",
".",
"special_cards",
"=",
"[",
"'DEQATN'",
",",
"'/'",
"]",
"self",
".",
"_make_card_parser",
"(",
")",
"if",
"self",
".",
"is_msc",
":",
"self",
".",
"set_as_msc",
"(",
")",
"elif",
"self",
".",
"is_nx",
":",
"self",
".",
"set_as_nx",
"(",
")",
"#elif self.is_optistruct:",
"#self.set_as_optistruct()",
"#elif self.is_radioss:",
"#self.set_as_radioss()",
"else",
":",
"msg",
"=",
"'mode=%r is not supported; modes=[msc, nx]'",
"%",
"self",
".",
"_nastran_format",
"raise",
"NotImplementedError",
"(",
"msg",
")"
] | https://github.com/SteveDoyle2/pyNastran/blob/eda651ac2d4883d95a34951f8a002ff94f642a1a/pyNastran/dev/bdf_vectorized/bdf.py#L223-L421 |
||
sanic-org/sanic | 8b0eaa097cf4ef13a6b52dce24ae93fb20f73947 | sanic/http.py | python | Http.http1_response_normal | (
self, data: bytes, end_stream: bool
) | Format / keep track of non-chunked response. | Format / keep track of non-chunked response. | [
"Format",
"/",
"keep",
"track",
"of",
"non",
"-",
"chunked",
"response",
"."
] | async def http1_response_normal(
self, data: bytes, end_stream: bool
) -> None:
"""
Format / keep track of non-chunked response.
"""
bytes_left = self.response_bytes_left - len(data)
if bytes_left <= 0:
if bytes_left < 0:
raise ServerError("Response was bigger than content-length")
await self._send(data)
self.response_func = None
self.stage = Stage.IDLE
else:
if end_stream:
raise ServerError("Response was smaller than content-length")
await self._send(data)
self.response_bytes_left = bytes_left | [
"async",
"def",
"http1_response_normal",
"(",
"self",
",",
"data",
":",
"bytes",
",",
"end_stream",
":",
"bool",
")",
"->",
"None",
":",
"bytes_left",
"=",
"self",
".",
"response_bytes_left",
"-",
"len",
"(",
"data",
")",
"if",
"bytes_left",
"<=",
"0",
":",
"if",
"bytes_left",
"<",
"0",
":",
"raise",
"ServerError",
"(",
"\"Response was bigger than content-length\"",
")",
"await",
"self",
".",
"_send",
"(",
"data",
")",
"self",
".",
"response_func",
"=",
"None",
"self",
".",
"stage",
"=",
"Stage",
".",
"IDLE",
"else",
":",
"if",
"end_stream",
":",
"raise",
"ServerError",
"(",
"\"Response was smaller than content-length\"",
")",
"await",
"self",
".",
"_send",
"(",
"data",
")",
"self",
".",
"response_bytes_left",
"=",
"bytes_left"
] | https://github.com/sanic-org/sanic/blob/8b0eaa097cf4ef13a6b52dce24ae93fb20f73947/sanic/http.py#L399-L418 |
||
scanny/python-pptx | 71d1ca0b2b3b9178d64cdab565e8503a25a54e0b | pptx/chart/series.py | python | _BaseSeries.name | (self) | return name | The string label given to this series, appears as the title of the
column for this series in the Excel worksheet. It also appears as the
label for this series in the legend. | The string label given to this series, appears as the title of the
column for this series in the Excel worksheet. It also appears as the
label for this series in the legend. | [
"The",
"string",
"label",
"given",
"to",
"this",
"series",
"appears",
"as",
"the",
"title",
"of",
"the",
"column",
"for",
"this",
"series",
"in",
"the",
"Excel",
"worksheet",
".",
"It",
"also",
"appears",
"as",
"the",
"label",
"for",
"this",
"series",
"in",
"the",
"legend",
"."
] | def name(self):
"""
The string label given to this series, appears as the title of the
column for this series in the Excel worksheet. It also appears as the
label for this series in the legend.
"""
names = self._element.xpath("./c:tx//c:pt/c:v/text()")
name = names[0] if names else ""
return name | [
"def",
"name",
"(",
"self",
")",
":",
"names",
"=",
"self",
".",
"_element",
".",
"xpath",
"(",
"\"./c:tx//c:pt/c:v/text()\"",
")",
"name",
"=",
"names",
"[",
"0",
"]",
"if",
"names",
"else",
"\"\"",
"return",
"name"
] | https://github.com/scanny/python-pptx/blob/71d1ca0b2b3b9178d64cdab565e8503a25a54e0b/pptx/chart/series.py#L43-L51 |
|
Netflix/brutal | 13052ddbaf873acd9c9dac54b19a42ab7b70f7a9 | brutal/core/plugin.py | python | BotPlugin.msg | (self, msg, room=None, event=None) | [] | def msg(self, msg, room=None, event=None):
a = Action(source_bot=self.bot, source_event=event).msg(msg, room=room)
self._queue_action(a, event) | [
"def",
"msg",
"(",
"self",
",",
"msg",
",",
"room",
"=",
"None",
",",
"event",
"=",
"None",
")",
":",
"a",
"=",
"Action",
"(",
"source_bot",
"=",
"self",
".",
"bot",
",",
"source_event",
"=",
"event",
")",
".",
"msg",
"(",
"msg",
",",
"room",
"=",
"room",
")",
"self",
".",
"_queue_action",
"(",
"a",
",",
"event",
")"
] | https://github.com/Netflix/brutal/blob/13052ddbaf873acd9c9dac54b19a42ab7b70f7a9/brutal/core/plugin.py#L554-L556 |
||||
dimagi/commcare-hq | d67ff1d3b4c51fa050c19e60c3253a79d3452a39 | corehq/motech/dhis2/views.py | python | DataSetMapUpdateView.get_form_kwargs | (self) | return kwargs | [] | def get_form_kwargs(self):
kwargs = super().get_form_kwargs()
kwargs['domain'] = self.domain
return kwargs | [
"def",
"get_form_kwargs",
"(",
"self",
")",
":",
"kwargs",
"=",
"super",
"(",
")",
".",
"get_form_kwargs",
"(",
")",
"kwargs",
"[",
"'domain'",
"]",
"=",
"self",
".",
"domain",
"return",
"kwargs"
] | https://github.com/dimagi/commcare-hq/blob/d67ff1d3b4c51fa050c19e60c3253a79d3452a39/corehq/motech/dhis2/views.py#L244-L247 |
|||
KalleHallden/AutoTimer | 2d954216700c4930baa154e28dbddc34609af7ce | env/lib/python2.7/site-packages/setuptools/command/develop.py | python | develop.finalize_options | (self) | [] | def finalize_options(self):
ei = self.get_finalized_command("egg_info")
if ei.broken_egg_info:
template = "Please rename %r to %r before using 'develop'"
args = ei.egg_info, ei.broken_egg_info
raise DistutilsError(template % args)
self.args = [ei.egg_name]
easy_install.finalize_options(self)
self.expand_basedirs()
self.expand_dirs()
# pick up setup-dir .egg files only: no .egg-info
self.package_index.scan(glob.glob('*.egg'))
egg_link_fn = ei.egg_name + '.egg-link'
self.egg_link = os.path.join(self.install_dir, egg_link_fn)
self.egg_base = ei.egg_base
if self.egg_path is None:
self.egg_path = os.path.abspath(ei.egg_base)
target = pkg_resources.normalize_path(self.egg_base)
egg_path = pkg_resources.normalize_path(
os.path.join(self.install_dir, self.egg_path))
if egg_path != target:
raise DistutilsOptionError(
"--egg-path must be a relative path from the install"
" directory to " + target
)
# Make a distribution for the package's source
self.dist = pkg_resources.Distribution(
target,
pkg_resources.PathMetadata(target, os.path.abspath(ei.egg_info)),
project_name=ei.egg_name
)
self.setup_path = self._resolve_setup_path(
self.egg_base,
self.install_dir,
self.egg_path,
) | [
"def",
"finalize_options",
"(",
"self",
")",
":",
"ei",
"=",
"self",
".",
"get_finalized_command",
"(",
"\"egg_info\"",
")",
"if",
"ei",
".",
"broken_egg_info",
":",
"template",
"=",
"\"Please rename %r to %r before using 'develop'\"",
"args",
"=",
"ei",
".",
"egg_info",
",",
"ei",
".",
"broken_egg_info",
"raise",
"DistutilsError",
"(",
"template",
"%",
"args",
")",
"self",
".",
"args",
"=",
"[",
"ei",
".",
"egg_name",
"]",
"easy_install",
".",
"finalize_options",
"(",
"self",
")",
"self",
".",
"expand_basedirs",
"(",
")",
"self",
".",
"expand_dirs",
"(",
")",
"# pick up setup-dir .egg files only: no .egg-info",
"self",
".",
"package_index",
".",
"scan",
"(",
"glob",
".",
"glob",
"(",
"'*.egg'",
")",
")",
"egg_link_fn",
"=",
"ei",
".",
"egg_name",
"+",
"'.egg-link'",
"self",
".",
"egg_link",
"=",
"os",
".",
"path",
".",
"join",
"(",
"self",
".",
"install_dir",
",",
"egg_link_fn",
")",
"self",
".",
"egg_base",
"=",
"ei",
".",
"egg_base",
"if",
"self",
".",
"egg_path",
"is",
"None",
":",
"self",
".",
"egg_path",
"=",
"os",
".",
"path",
".",
"abspath",
"(",
"ei",
".",
"egg_base",
")",
"target",
"=",
"pkg_resources",
".",
"normalize_path",
"(",
"self",
".",
"egg_base",
")",
"egg_path",
"=",
"pkg_resources",
".",
"normalize_path",
"(",
"os",
".",
"path",
".",
"join",
"(",
"self",
".",
"install_dir",
",",
"self",
".",
"egg_path",
")",
")",
"if",
"egg_path",
"!=",
"target",
":",
"raise",
"DistutilsOptionError",
"(",
"\"--egg-path must be a relative path from the install\"",
"\" directory to \"",
"+",
"target",
")",
"# Make a distribution for the package's source",
"self",
".",
"dist",
"=",
"pkg_resources",
".",
"Distribution",
"(",
"target",
",",
"pkg_resources",
".",
"PathMetadata",
"(",
"target",
",",
"os",
".",
"path",
".",
"abspath",
"(",
"ei",
".",
"egg_info",
")",
")",
",",
"project_name",
"=",
"ei",
".",
"egg_name",
")",
"self",
".",
"setup_path",
"=",
"self",
".",
"_resolve_setup_path",
"(",
"self",
".",
"egg_base",
",",
"self",
".",
"install_dir",
",",
"self",
".",
"egg_path",
",",
")"
] | https://github.com/KalleHallden/AutoTimer/blob/2d954216700c4930baa154e28dbddc34609af7ce/env/lib/python2.7/site-packages/setuptools/command/develop.py#L48-L88 |
||||
vprusso/youtube_tutorials | a3f2fadfc408aafbf4505bcd5f2f4a41d670b44d | data_structures/trees/binary_trees/binary_tree.py | python | Queue.__len__ | (self) | return self.size() | [] | def __len__(self):
return self.size() | [
"def",
"__len__",
"(",
"self",
")",
":",
"return",
"self",
".",
"size",
"(",
")"
] | https://github.com/vprusso/youtube_tutorials/blob/a3f2fadfc408aafbf4505bcd5f2f4a41d670b44d/data_structures/trees/binary_trees/binary_tree.py#L36-L37 |
|||
tobegit3hub/deep_image_model | 8a53edecd9e00678b278bb10f6fb4bdb1e4ee25e | java_predict_client/src/main/proto/tensorflow/python/tools/optimize_for_inference_lib.py | python | ensure_graph_is_valid | (graph_def) | Makes sure that the graph is internally consistent.
Checks basic properties of the graph def and raises an exception if there are
input references to missing nodes, duplicated names, or other logic errors.
Args:
graph_def: Definition of a graph to be checked.
Raises:
ValueError: If the graph is incorrectly constructed. | Makes sure that the graph is internally consistent. | [
"Makes",
"sure",
"that",
"the",
"graph",
"is",
"internally",
"consistent",
"."
] | def ensure_graph_is_valid(graph_def):
"""Makes sure that the graph is internally consistent.
Checks basic properties of the graph def and raises an exception if there are
input references to missing nodes, duplicated names, or other logic errors.
Args:
graph_def: Definition of a graph to be checked.
Raises:
ValueError: If the graph is incorrectly constructed.
"""
node_map = {}
for node in graph_def.node:
if node.name not in node_map.keys():
node_map[node.name] = node
else:
raise ValueError("Duplicate node names detected for ", node.name)
for node in graph_def.node:
for input_name in node.input:
input_node_name = node_name_from_input(input_name)
if input_node_name not in node_map.keys():
raise ValueError("Input for ", node.name, " not found: ", input_name) | [
"def",
"ensure_graph_is_valid",
"(",
"graph_def",
")",
":",
"node_map",
"=",
"{",
"}",
"for",
"node",
"in",
"graph_def",
".",
"node",
":",
"if",
"node",
".",
"name",
"not",
"in",
"node_map",
".",
"keys",
"(",
")",
":",
"node_map",
"[",
"node",
".",
"name",
"]",
"=",
"node",
"else",
":",
"raise",
"ValueError",
"(",
"\"Duplicate node names detected for \"",
",",
"node",
".",
"name",
")",
"for",
"node",
"in",
"graph_def",
".",
"node",
":",
"for",
"input_name",
"in",
"node",
".",
"input",
":",
"input_node_name",
"=",
"node_name_from_input",
"(",
"input_name",
")",
"if",
"input_node_name",
"not",
"in",
"node_map",
".",
"keys",
"(",
")",
":",
"raise",
"ValueError",
"(",
"\"Input for \"",
",",
"node",
".",
"name",
",",
"\" not found: \"",
",",
"input_name",
")"
] | https://github.com/tobegit3hub/deep_image_model/blob/8a53edecd9e00678b278bb10f6fb4bdb1e4ee25e/java_predict_client/src/main/proto/tensorflow/python/tools/optimize_for_inference_lib.py#L94-L116 |
||
gramps-project/gramps | 04d4651a43eb210192f40a9f8c2bad8ee8fa3753 | gramps/plugins/db/dbapi/dbapi.py | python | DBAPI.undo_data | (self, data, handle, obj_key) | Helper method to undo/redo the changes made | Helper method to undo/redo the changes made | [
"Helper",
"method",
"to",
"undo",
"/",
"redo",
"the",
"changes",
"made"
] | def undo_data(self, data, handle, obj_key):
"""
Helper method to undo/redo the changes made
"""
cls = KEY_TO_CLASS_MAP[obj_key]
table = cls.lower()
if data is None:
sql = "DELETE FROM %s WHERE handle = ?" % table
self.dbapi.execute(sql, [handle])
else:
if self._has_handle(obj_key, handle):
sql = "UPDATE %s SET blob_data = ? WHERE handle = ?" % table
self.dbapi.execute(sql, [pickle.dumps(data), handle])
else:
sql = "INSERT INTO %s (handle, blob_data) VALUES (?, ?)" % table
self.dbapi.execute(sql, [handle, pickle.dumps(data)])
obj = self._get_table_func(cls)["class_func"].create(data)
self._update_secondary_values(obj) | [
"def",
"undo_data",
"(",
"self",
",",
"data",
",",
"handle",
",",
"obj_key",
")",
":",
"cls",
"=",
"KEY_TO_CLASS_MAP",
"[",
"obj_key",
"]",
"table",
"=",
"cls",
".",
"lower",
"(",
")",
"if",
"data",
"is",
"None",
":",
"sql",
"=",
"\"DELETE FROM %s WHERE handle = ?\"",
"%",
"table",
"self",
".",
"dbapi",
".",
"execute",
"(",
"sql",
",",
"[",
"handle",
"]",
")",
"else",
":",
"if",
"self",
".",
"_has_handle",
"(",
"obj_key",
",",
"handle",
")",
":",
"sql",
"=",
"\"UPDATE %s SET blob_data = ? WHERE handle = ?\"",
"%",
"table",
"self",
".",
"dbapi",
".",
"execute",
"(",
"sql",
",",
"[",
"pickle",
".",
"dumps",
"(",
"data",
")",
",",
"handle",
"]",
")",
"else",
":",
"sql",
"=",
"\"INSERT INTO %s (handle, blob_data) VALUES (?, ?)\"",
"%",
"table",
"self",
".",
"dbapi",
".",
"execute",
"(",
"sql",
",",
"[",
"handle",
",",
"pickle",
".",
"dumps",
"(",
"data",
")",
"]",
")",
"obj",
"=",
"self",
".",
"_get_table_func",
"(",
"cls",
")",
"[",
"\"class_func\"",
"]",
".",
"create",
"(",
"data",
")",
"self",
".",
"_update_secondary_values",
"(",
"obj",
")"
] | https://github.com/gramps-project/gramps/blob/04d4651a43eb210192f40a9f8c2bad8ee8fa3753/gramps/plugins/db/dbapi/dbapi.py#L958-L975 |
||
kerlomz/captcha_trainer | 72b0cd02c66a9b44073820098155b3278c8bde61 | optimizer/AdaBound.py | python | AdaBoundOptimizer._apply_sparse_shared | (self, grad, var, indices, scatter_add) | return control_flow_ops.group(*[var_update, m_t, v_t, vhat_t]) | [] | def _apply_sparse_shared(self, grad, var, indices, scatter_add):
if StrictVersion(tf.__version__) >= StrictVersion('1.10.0'):
graph = None if context.executing_eagerly() else ops.get_default_graph()
else:
graph = ops.get_default_graph()
beta1_power = math_ops.cast(self._get_non_slot_variable("beta1_power", graph=graph), var.dtype.base_dtype)
beta2_power = math_ops.cast(self._get_non_slot_variable("beta2_power", graph=graph), var.dtype.base_dtype)
lr_t = math_ops.cast(self._lr_t, var.dtype.base_dtype)
base_lr_t = math_ops.cast(self._base_lr_t, var.dtype.base_dtype)
beta1_t = math_ops.cast(self._beta1_t, var.dtype.base_dtype)
beta2_t = math_ops.cast(self._beta2_t, var.dtype.base_dtype)
epsilon_t = math_ops.cast(self._epsilon_t, var.dtype.base_dtype)
gamma_t = math_ops.cast(self._gamma_t, var.dtype.base_dtype)
step_size = (lr_t * math_ops.sqrt(1 - beta2_power) / (1 - beta1_power))
final_lr = self._final_lr * lr_t / base_lr_t
lower_bound = final_lr * (1. - 1. / (gamma_t + 1.))
upper_bound = final_lr * (1. + 1. / (gamma_t))
# m_t = beta1 * m + (1 - beta1) * g_t
m = self.get_slot(var, "m")
m_scaled_g_values = grad * (1 - beta1_t)
m_t = state_ops.assign(m, m * beta1_t, use_locking=self._use_locking)
with ops.control_dependencies([m_t]):
m_t = scatter_add(m, indices, m_scaled_g_values)
# v_t = beta2 * v + (1 - beta2) * (g_t * g_t)
v = self.get_slot(var, "v")
v_scaled_g_values = (grad * grad) * (1 - beta2_t)
v_t = state_ops.assign(v, v * beta2_t, use_locking=self._use_locking)
with ops.control_dependencies([v_t]):
v_t = scatter_add(v, indices, v_scaled_g_values)
# amsgrad
vhat = self.get_slot(var, "vhat")
if self._amsbound:
vhat_t = state_ops.assign(vhat, math_ops.maximum(v_t, vhat))
v_sqrt = math_ops.sqrt(vhat_t)
else:
vhat_t = state_ops.assign(vhat, vhat)
v_sqrt = math_ops.sqrt(v_t)
# Compute the bounds
step_size_bound = step_size / (v_sqrt + epsilon_t)
bounded_lr = m_t * clip_by_value(step_size_bound, lower_bound, upper_bound)
var_update = state_ops.assign_sub(var, bounded_lr, use_locking=self._use_locking)
return control_flow_ops.group(*[var_update, m_t, v_t, vhat_t]) | [
"def",
"_apply_sparse_shared",
"(",
"self",
",",
"grad",
",",
"var",
",",
"indices",
",",
"scatter_add",
")",
":",
"if",
"StrictVersion",
"(",
"tf",
".",
"__version__",
")",
">=",
"StrictVersion",
"(",
"'1.10.0'",
")",
":",
"graph",
"=",
"None",
"if",
"context",
".",
"executing_eagerly",
"(",
")",
"else",
"ops",
".",
"get_default_graph",
"(",
")",
"else",
":",
"graph",
"=",
"ops",
".",
"get_default_graph",
"(",
")",
"beta1_power",
"=",
"math_ops",
".",
"cast",
"(",
"self",
".",
"_get_non_slot_variable",
"(",
"\"beta1_power\"",
",",
"graph",
"=",
"graph",
")",
",",
"var",
".",
"dtype",
".",
"base_dtype",
")",
"beta2_power",
"=",
"math_ops",
".",
"cast",
"(",
"self",
".",
"_get_non_slot_variable",
"(",
"\"beta2_power\"",
",",
"graph",
"=",
"graph",
")",
",",
"var",
".",
"dtype",
".",
"base_dtype",
")",
"lr_t",
"=",
"math_ops",
".",
"cast",
"(",
"self",
".",
"_lr_t",
",",
"var",
".",
"dtype",
".",
"base_dtype",
")",
"base_lr_t",
"=",
"math_ops",
".",
"cast",
"(",
"self",
".",
"_base_lr_t",
",",
"var",
".",
"dtype",
".",
"base_dtype",
")",
"beta1_t",
"=",
"math_ops",
".",
"cast",
"(",
"self",
".",
"_beta1_t",
",",
"var",
".",
"dtype",
".",
"base_dtype",
")",
"beta2_t",
"=",
"math_ops",
".",
"cast",
"(",
"self",
".",
"_beta2_t",
",",
"var",
".",
"dtype",
".",
"base_dtype",
")",
"epsilon_t",
"=",
"math_ops",
".",
"cast",
"(",
"self",
".",
"_epsilon_t",
",",
"var",
".",
"dtype",
".",
"base_dtype",
")",
"gamma_t",
"=",
"math_ops",
".",
"cast",
"(",
"self",
".",
"_gamma_t",
",",
"var",
".",
"dtype",
".",
"base_dtype",
")",
"step_size",
"=",
"(",
"lr_t",
"*",
"math_ops",
".",
"sqrt",
"(",
"1",
"-",
"beta2_power",
")",
"/",
"(",
"1",
"-",
"beta1_power",
")",
")",
"final_lr",
"=",
"self",
".",
"_final_lr",
"*",
"lr_t",
"/",
"base_lr_t",
"lower_bound",
"=",
"final_lr",
"*",
"(",
"1.",
"-",
"1.",
"/",
"(",
"gamma_t",
"+",
"1.",
")",
")",
"upper_bound",
"=",
"final_lr",
"*",
"(",
"1.",
"+",
"1.",
"/",
"(",
"gamma_t",
")",
")",
"# m_t = beta1 * m + (1 - beta1) * g_t",
"m",
"=",
"self",
".",
"get_slot",
"(",
"var",
",",
"\"m\"",
")",
"m_scaled_g_values",
"=",
"grad",
"*",
"(",
"1",
"-",
"beta1_t",
")",
"m_t",
"=",
"state_ops",
".",
"assign",
"(",
"m",
",",
"m",
"*",
"beta1_t",
",",
"use_locking",
"=",
"self",
".",
"_use_locking",
")",
"with",
"ops",
".",
"control_dependencies",
"(",
"[",
"m_t",
"]",
")",
":",
"m_t",
"=",
"scatter_add",
"(",
"m",
",",
"indices",
",",
"m_scaled_g_values",
")",
"# v_t = beta2 * v + (1 - beta2) * (g_t * g_t)",
"v",
"=",
"self",
".",
"get_slot",
"(",
"var",
",",
"\"v\"",
")",
"v_scaled_g_values",
"=",
"(",
"grad",
"*",
"grad",
")",
"*",
"(",
"1",
"-",
"beta2_t",
")",
"v_t",
"=",
"state_ops",
".",
"assign",
"(",
"v",
",",
"v",
"*",
"beta2_t",
",",
"use_locking",
"=",
"self",
".",
"_use_locking",
")",
"with",
"ops",
".",
"control_dependencies",
"(",
"[",
"v_t",
"]",
")",
":",
"v_t",
"=",
"scatter_add",
"(",
"v",
",",
"indices",
",",
"v_scaled_g_values",
")",
"# amsgrad",
"vhat",
"=",
"self",
".",
"get_slot",
"(",
"var",
",",
"\"vhat\"",
")",
"if",
"self",
".",
"_amsbound",
":",
"vhat_t",
"=",
"state_ops",
".",
"assign",
"(",
"vhat",
",",
"math_ops",
".",
"maximum",
"(",
"v_t",
",",
"vhat",
")",
")",
"v_sqrt",
"=",
"math_ops",
".",
"sqrt",
"(",
"vhat_t",
")",
"else",
":",
"vhat_t",
"=",
"state_ops",
".",
"assign",
"(",
"vhat",
",",
"vhat",
")",
"v_sqrt",
"=",
"math_ops",
".",
"sqrt",
"(",
"v_t",
")",
"# Compute the bounds",
"step_size_bound",
"=",
"step_size",
"/",
"(",
"v_sqrt",
"+",
"epsilon_t",
")",
"bounded_lr",
"=",
"m_t",
"*",
"clip_by_value",
"(",
"step_size_bound",
",",
"lower_bound",
",",
"upper_bound",
")",
"var_update",
"=",
"state_ops",
".",
"assign_sub",
"(",
"var",
",",
"bounded_lr",
",",
"use_locking",
"=",
"self",
".",
"_use_locking",
")",
"return",
"control_flow_ops",
".",
"group",
"(",
"*",
"[",
"var_update",
",",
"m_t",
",",
"v_t",
",",
"vhat_t",
"]",
")"
] | https://github.com/kerlomz/captcha_trainer/blob/72b0cd02c66a9b44073820098155b3278c8bde61/optimizer/AdaBound.py#L179-L227 |
|||
wmliang/pe-afl | 4036d2f41da20ff12ecac43a076de5d60ce68bd9 | pefile.py | python | PE.get_string_at_rva | (self, rva, max_length=MAX_STRING_LENGTH) | return self.get_string_from_data(0, s.get_data(rva, length=max_length)) | Get an ASCII string located at the given address. | Get an ASCII string located at the given address. | [
"Get",
"an",
"ASCII",
"string",
"located",
"at",
"the",
"given",
"address",
"."
] | def get_string_at_rva(self, rva, max_length=MAX_STRING_LENGTH):
"""Get an ASCII string located at the given address."""
if rva is None:
return None
s = self.get_section_by_rva(rva)
if not s:
return self.get_string_from_data(0, self.__data__[rva:rva+max_length])
return self.get_string_from_data(0, s.get_data(rva, length=max_length)) | [
"def",
"get_string_at_rva",
"(",
"self",
",",
"rva",
",",
"max_length",
"=",
"MAX_STRING_LENGTH",
")",
":",
"if",
"rva",
"is",
"None",
":",
"return",
"None",
"s",
"=",
"self",
".",
"get_section_by_rva",
"(",
"rva",
")",
"if",
"not",
"s",
":",
"return",
"self",
".",
"get_string_from_data",
"(",
"0",
",",
"self",
".",
"__data__",
"[",
"rva",
":",
"rva",
"+",
"max_length",
"]",
")",
"return",
"self",
".",
"get_string_from_data",
"(",
"0",
",",
"s",
".",
"get_data",
"(",
"rva",
",",
"length",
"=",
"max_length",
")",
")"
] | https://github.com/wmliang/pe-afl/blob/4036d2f41da20ff12ecac43a076de5d60ce68bd9/pefile.py#L4432-L4441 |
|
taomujian/linbing | fe772a58f41e3b046b51a866bdb7e4655abaf51a | python/app/thirdparty/dirsearch/thirdparty/requests/packages/urllib3/poolmanager.py | python | PoolManager.urlopen | (self, method, url, redirect=True, **kw) | return self.urlopen(method, redirect_location, **kw) | Same as :meth:`urllib3.HTTPConnectionPool.urlopen`
with custom cross-host redirect logic and only sends the request-uri
portion of the ``url``.
The given ``url`` parameter must be absolute, such that an appropriate
:class:`urllib3.connectionpool.ConnectionPool` can be chosen for it. | Same as :meth:`urllib3.HTTPConnectionPool.urlopen`
with custom cross-host redirect logic and only sends the request-uri
portion of the ``url``. | [
"Same",
"as",
":",
"meth",
":",
"urllib3",
".",
"HTTPConnectionPool",
".",
"urlopen",
"with",
"custom",
"cross",
"-",
"host",
"redirect",
"logic",
"and",
"only",
"sends",
"the",
"request",
"-",
"uri",
"portion",
"of",
"the",
"url",
"."
] | def urlopen(self, method, url, redirect=True, **kw):
"""
Same as :meth:`urllib3.HTTPConnectionPool.urlopen`
with custom cross-host redirect logic and only sends the request-uri
portion of the ``url``.
The given ``url`` parameter must be absolute, such that an appropriate
:class:`urllib3.connectionpool.ConnectionPool` can be chosen for it.
"""
u = parse_url(url)
self._validate_proxy_scheme_url_selection(u.scheme)
conn = self.connection_from_host(u.host, port=u.port, scheme=u.scheme)
kw["assert_same_host"] = False
kw["redirect"] = False
if "headers" not in kw:
kw["headers"] = self.headers.copy()
if self._proxy_requires_url_absolute_form(u):
response = conn.urlopen(method, url, **kw)
else:
response = conn.urlopen(method, u.request_uri, **kw)
redirect_location = redirect and response.get_redirect_location()
if not redirect_location:
return response
# Support relative URLs for redirecting.
redirect_location = urljoin(url, redirect_location)
# RFC 7231, Section 6.4.4
if response.status == 303:
method = "GET"
retries = kw.get("retries")
if not isinstance(retries, Retry):
retries = Retry.from_int(retries, redirect=redirect)
# Strip headers marked as unsafe to forward to the redirected location.
# Check remove_headers_on_redirect to avoid a potential network call within
# conn.is_same_host() which may use socket.gethostbyname() in the future.
if retries.remove_headers_on_redirect and not conn.is_same_host(
redirect_location
):
headers = list(six.iterkeys(kw["headers"]))
for header in headers:
if header.lower() in retries.remove_headers_on_redirect:
kw["headers"].pop(header, None)
try:
retries = retries.increment(method, url, response=response, _pool=conn)
except MaxRetryError:
if retries.raise_on_redirect:
response.drain_conn()
raise
return response
kw["retries"] = retries
kw["redirect"] = redirect
log.info("Redirecting %s -> %s", url, redirect_location)
response.drain_conn()
return self.urlopen(method, redirect_location, **kw) | [
"def",
"urlopen",
"(",
"self",
",",
"method",
",",
"url",
",",
"redirect",
"=",
"True",
",",
"*",
"*",
"kw",
")",
":",
"u",
"=",
"parse_url",
"(",
"url",
")",
"self",
".",
"_validate_proxy_scheme_url_selection",
"(",
"u",
".",
"scheme",
")",
"conn",
"=",
"self",
".",
"connection_from_host",
"(",
"u",
".",
"host",
",",
"port",
"=",
"u",
".",
"port",
",",
"scheme",
"=",
"u",
".",
"scheme",
")",
"kw",
"[",
"\"assert_same_host\"",
"]",
"=",
"False",
"kw",
"[",
"\"redirect\"",
"]",
"=",
"False",
"if",
"\"headers\"",
"not",
"in",
"kw",
":",
"kw",
"[",
"\"headers\"",
"]",
"=",
"self",
".",
"headers",
".",
"copy",
"(",
")",
"if",
"self",
".",
"_proxy_requires_url_absolute_form",
"(",
"u",
")",
":",
"response",
"=",
"conn",
".",
"urlopen",
"(",
"method",
",",
"url",
",",
"*",
"*",
"kw",
")",
"else",
":",
"response",
"=",
"conn",
".",
"urlopen",
"(",
"method",
",",
"u",
".",
"request_uri",
",",
"*",
"*",
"kw",
")",
"redirect_location",
"=",
"redirect",
"and",
"response",
".",
"get_redirect_location",
"(",
")",
"if",
"not",
"redirect_location",
":",
"return",
"response",
"# Support relative URLs for redirecting.",
"redirect_location",
"=",
"urljoin",
"(",
"url",
",",
"redirect_location",
")",
"# RFC 7231, Section 6.4.4",
"if",
"response",
".",
"status",
"==",
"303",
":",
"method",
"=",
"\"GET\"",
"retries",
"=",
"kw",
".",
"get",
"(",
"\"retries\"",
")",
"if",
"not",
"isinstance",
"(",
"retries",
",",
"Retry",
")",
":",
"retries",
"=",
"Retry",
".",
"from_int",
"(",
"retries",
",",
"redirect",
"=",
"redirect",
")",
"# Strip headers marked as unsafe to forward to the redirected location.",
"# Check remove_headers_on_redirect to avoid a potential network call within",
"# conn.is_same_host() which may use socket.gethostbyname() in the future.",
"if",
"retries",
".",
"remove_headers_on_redirect",
"and",
"not",
"conn",
".",
"is_same_host",
"(",
"redirect_location",
")",
":",
"headers",
"=",
"list",
"(",
"six",
".",
"iterkeys",
"(",
"kw",
"[",
"\"headers\"",
"]",
")",
")",
"for",
"header",
"in",
"headers",
":",
"if",
"header",
".",
"lower",
"(",
")",
"in",
"retries",
".",
"remove_headers_on_redirect",
":",
"kw",
"[",
"\"headers\"",
"]",
".",
"pop",
"(",
"header",
",",
"None",
")",
"try",
":",
"retries",
"=",
"retries",
".",
"increment",
"(",
"method",
",",
"url",
",",
"response",
"=",
"response",
",",
"_pool",
"=",
"conn",
")",
"except",
"MaxRetryError",
":",
"if",
"retries",
".",
"raise_on_redirect",
":",
"response",
".",
"drain_conn",
"(",
")",
"raise",
"return",
"response",
"kw",
"[",
"\"retries\"",
"]",
"=",
"retries",
"kw",
"[",
"\"redirect\"",
"]",
"=",
"redirect",
"log",
".",
"info",
"(",
"\"Redirecting %s -> %s\"",
",",
"url",
",",
"redirect_location",
")",
"response",
".",
"drain_conn",
"(",
")",
"return",
"self",
".",
"urlopen",
"(",
"method",
",",
"redirect_location",
",",
"*",
"*",
"kw",
")"
] | https://github.com/taomujian/linbing/blob/fe772a58f41e3b046b51a866bdb7e4655abaf51a/python/app/thirdparty/dirsearch/thirdparty/requests/packages/urllib3/poolmanager.py#L352-L417 |
|
P1sec/pycrate | d12bbccf1df8c9c7891a26967a9d2635610ec5b8 | pycrate_core/elt.py | python | Alt.clone | (self) | return clone | Produces an independent clone of self
Args:
None
Returns:
clone (self.__class__ instance) | Produces an independent clone of self
Args:
None
Returns:
clone (self.__class__ instance) | [
"Produces",
"an",
"independent",
"clone",
"of",
"self",
"Args",
":",
"None",
"Returns",
":",
"clone",
"(",
"self",
".",
"__class__",
"instance",
")"
] | def clone(self):
"""Produces an independent clone of self
Args:
None
Returns:
clone (self.__class__ instance)
"""
kw = {
'GEN': self._GEN,
'DEFAULT': self.DEFAULT.clone(),
'sel': self._sel
}
if self._desc != self.__class__._desc:
kw['desc'] = self._desc
if self._hier != self.__class__._hier:
kw['hier'] = self._hier
if self._trans != self.__class__._trans:
kw['trans'] = self._trans
clone = self.__class__(self._name, **kw)
clone.insert(self.get_sel(), self.get_alt().clone())
return clone | [
"def",
"clone",
"(",
"self",
")",
":",
"kw",
"=",
"{",
"'GEN'",
":",
"self",
".",
"_GEN",
",",
"'DEFAULT'",
":",
"self",
".",
"DEFAULT",
".",
"clone",
"(",
")",
",",
"'sel'",
":",
"self",
".",
"_sel",
"}",
"if",
"self",
".",
"_desc",
"!=",
"self",
".",
"__class__",
".",
"_desc",
":",
"kw",
"[",
"'desc'",
"]",
"=",
"self",
".",
"_desc",
"if",
"self",
".",
"_hier",
"!=",
"self",
".",
"__class__",
".",
"_hier",
":",
"kw",
"[",
"'hier'",
"]",
"=",
"self",
".",
"_hier",
"if",
"self",
".",
"_trans",
"!=",
"self",
".",
"__class__",
".",
"_trans",
":",
"kw",
"[",
"'trans'",
"]",
"=",
"self",
".",
"_trans",
"clone",
"=",
"self",
".",
"__class__",
"(",
"self",
".",
"_name",
",",
"*",
"*",
"kw",
")",
"clone",
".",
"insert",
"(",
"self",
".",
"get_sel",
"(",
")",
",",
"self",
".",
"get_alt",
"(",
")",
".",
"clone",
"(",
")",
")",
"return",
"clone"
] | https://github.com/P1sec/pycrate/blob/d12bbccf1df8c9c7891a26967a9d2635610ec5b8/pycrate_core/elt.py#L4555-L4577 |
|
kirthevasank/nasbot | 3c745dc986be30e3721087c8fa768099032a0802 | utils/experimenters.py | python | BasicExperimenter.wrapup_experiments | (self) | Any code to wrap up the experiments goes here. | Any code to wrap up the experiments goes here. | [
"Any",
"code",
"to",
"wrap",
"up",
"the",
"experiments",
"goes",
"here",
"."
] | def wrapup_experiments(self):
""" Any code to wrap up the experiments goes here. """
# pylint: disable=no-self-use
pass | [
"def",
"wrapup_experiments",
"(",
"self",
")",
":",
"# pylint: disable=no-self-use",
"pass"
] | https://github.com/kirthevasank/nasbot/blob/3c745dc986be30e3721087c8fa768099032a0802/utils/experimenters.py#L119-L122 |
||
vmware-archive/liota | 9dde472542edd6d1ee01f66e7d78161dfa993f8f | liota/core/metric_handler.py | python | initialize | () | Initialization for metric handling:
create events priority queue, collect queue, and send queue;
spawn event check thread and send thread; and
create collection thread pool.
:return: | Initialization for metric handling:
create events priority queue, collect queue, and send queue;
spawn event check thread and send thread; and
create collection thread pool.
:return: | [
"Initialization",
"for",
"metric",
"handling",
":",
"create",
"events",
"priority",
"queue",
"collect",
"queue",
"and",
"send",
"queue",
";",
"spawn",
"event",
"check",
"thread",
"and",
"send",
"thread",
";",
"and",
"create",
"collection",
"thread",
"pool",
".",
":",
"return",
":"
] | def initialize():
"""
Initialization for metric handling:
create events priority queue, collect queue, and send queue;
spawn event check thread and send thread; and
create collection thread pool.
:return:
"""
global is_initialization_done
if is_initialization_done:
log.debug("Initialization already done")
pass
else:
log.debug("Initializing.............")
global event_ds
if event_ds is None:
event_ds = EventsPriorityQueue()
global event_checker_thread
if event_checker_thread is None:
event_checker_thread = EventCheckerThread(
name="EventCheckerThread")
global collect_queue
if collect_queue is None:
collect_queue = Queue()
global send_queue
if send_queue is None:
send_queue = Queue()
global send_thread
if send_thread is None:
send_thread = SendThread(name="SendThread")
global collect_thread_pool
collect_thread_pool_size = int(read_liota_config('CORE_CFG','collect_thread_pool_size'))
collect_thread_pool = CollectionThreadPool(collect_thread_pool_size)
is_initialization_done = True | [
"def",
"initialize",
"(",
")",
":",
"global",
"is_initialization_done",
"if",
"is_initialization_done",
":",
"log",
".",
"debug",
"(",
"\"Initialization already done\"",
")",
"pass",
"else",
":",
"log",
".",
"debug",
"(",
"\"Initializing.............\"",
")",
"global",
"event_ds",
"if",
"event_ds",
"is",
"None",
":",
"event_ds",
"=",
"EventsPriorityQueue",
"(",
")",
"global",
"event_checker_thread",
"if",
"event_checker_thread",
"is",
"None",
":",
"event_checker_thread",
"=",
"EventCheckerThread",
"(",
"name",
"=",
"\"EventCheckerThread\"",
")",
"global",
"collect_queue",
"if",
"collect_queue",
"is",
"None",
":",
"collect_queue",
"=",
"Queue",
"(",
")",
"global",
"send_queue",
"if",
"send_queue",
"is",
"None",
":",
"send_queue",
"=",
"Queue",
"(",
")",
"global",
"send_thread",
"if",
"send_thread",
"is",
"None",
":",
"send_thread",
"=",
"SendThread",
"(",
"name",
"=",
"\"SendThread\"",
")",
"global",
"collect_thread_pool",
"collect_thread_pool_size",
"=",
"int",
"(",
"read_liota_config",
"(",
"'CORE_CFG'",
",",
"'collect_thread_pool_size'",
")",
")",
"collect_thread_pool",
"=",
"CollectionThreadPool",
"(",
"collect_thread_pool_size",
")",
"is_initialization_done",
"=",
"True"
] | https://github.com/vmware-archive/liota/blob/9dde472542edd6d1ee01f66e7d78161dfa993f8f/liota/core/metric_handler.py#L300-L333 |
||
omz/PythonistaAppTemplate | f560f93f8876d82a21d108977f90583df08d55af | PythonistaAppTemplate/PythonistaKit.framework/pylib/logging/__init__.py | python | Logger.getEffectiveLevel | (self) | return NOTSET | Get the effective level for this logger.
Loop through this logger and its parents in the logger hierarchy,
looking for a non-zero logging level. Return the first one found. | Get the effective level for this logger. | [
"Get",
"the",
"effective",
"level",
"for",
"this",
"logger",
"."
] | def getEffectiveLevel(self):
"""
Get the effective level for this logger.
Loop through this logger and its parents in the logger hierarchy,
looking for a non-zero logging level. Return the first one found.
"""
logger = self
while logger:
if logger.level:
return logger.level
logger = logger.parent
return NOTSET | [
"def",
"getEffectiveLevel",
"(",
"self",
")",
":",
"logger",
"=",
"self",
"while",
"logger",
":",
"if",
"logger",
".",
"level",
":",
"return",
"logger",
".",
"level",
"logger",
"=",
"logger",
".",
"parent",
"return",
"NOTSET"
] | https://github.com/omz/PythonistaAppTemplate/blob/f560f93f8876d82a21d108977f90583df08d55af/PythonistaAppTemplate/PythonistaKit.framework/pylib/logging/__init__.py#L1328-L1340 |
|
evhub/coconut | 27a4af9dc06667870f736f20c862930001b8cbb2 | coconut/compiler/util.py | python | maybeparens | (lparen, item, rparen, prefer_parens=False) | Wrap an item in optional parentheses, only applying them if necessary. | Wrap an item in optional parentheses, only applying them if necessary. | [
"Wrap",
"an",
"item",
"in",
"optional",
"parentheses",
"only",
"applying",
"them",
"if",
"necessary",
"."
] | def maybeparens(lparen, item, rparen, prefer_parens=False):
"""Wrap an item in optional parentheses, only applying them if necessary."""
if prefer_parens:
return lparen.suppress() + item + rparen.suppress() | item
else:
return item | lparen.suppress() + item + rparen.suppress() | [
"def",
"maybeparens",
"(",
"lparen",
",",
"item",
",",
"rparen",
",",
"prefer_parens",
"=",
"False",
")",
":",
"if",
"prefer_parens",
":",
"return",
"lparen",
".",
"suppress",
"(",
")",
"+",
"item",
"+",
"rparen",
".",
"suppress",
"(",
")",
"|",
"item",
"else",
":",
"return",
"item",
"|",
"lparen",
".",
"suppress",
"(",
")",
"+",
"item",
"+",
"rparen",
".",
"suppress",
"(",
")"
] | https://github.com/evhub/coconut/blob/27a4af9dc06667870f736f20c862930001b8cbb2/coconut/compiler/util.py#L643-L648 |
||
pypa/pipenv | b21baade71a86ab3ee1429f71fbc14d4f95fb75d | pipenv/patched/yaml3/scanner.py | python | Scanner.fetch_plain | (self) | [] | def fetch_plain(self):
# A plain scalar could be a simple key.
self.save_possible_simple_key()
# No simple keys after plain scalars. But note that `scan_plain` will
# change this flag if the scan is finished at the beginning of the
# line.
self.allow_simple_key = False
# Scan and add SCALAR. May change `allow_simple_key`.
self.tokens.append(self.scan_plain()) | [
"def",
"fetch_plain",
"(",
"self",
")",
":",
"# A plain scalar could be a simple key.",
"self",
".",
"save_possible_simple_key",
"(",
")",
"# No simple keys after plain scalars. But note that `scan_plain` will",
"# change this flag if the scan is finished at the beginning of the",
"# line.",
"self",
".",
"allow_simple_key",
"=",
"False",
"# Scan and add SCALAR. May change `allow_simple_key`.",
"self",
".",
"tokens",
".",
"append",
"(",
"self",
".",
"scan_plain",
"(",
")",
")"
] | https://github.com/pypa/pipenv/blob/b21baade71a86ab3ee1429f71fbc14d4f95fb75d/pipenv/patched/yaml3/scanner.py#L668-L679 |
||||
packetloop/packetpig | 6e101090224df219123ff5f6ab4c37524637571f | lib/scripts/impacket/impacket/dot11.py | python | Dot11.get_protectedFrame | (self) | return ((b >> 6) & 0x01) | Return 802.11 frame 'Protected' field | Return 802.11 frame 'Protected' field | [
"Return",
"802",
".",
"11",
"frame",
"Protected",
"field"
] | def get_protectedFrame(self):
"Return 802.11 frame 'Protected' field"
b = self.header.get_byte(1)
return ((b >> 6) & 0x01) | [
"def",
"get_protectedFrame",
"(",
"self",
")",
":",
"b",
"=",
"self",
".",
"header",
".",
"get_byte",
"(",
"1",
")",
"return",
"(",
"(",
"b",
">>",
"6",
")",
"&",
"0x01",
")"
] | https://github.com/packetloop/packetpig/blob/6e101090224df219123ff5f6ab4c37524637571f/lib/scripts/impacket/impacket/dot11.py#L308-L311 |
|
log2timeline/plaso | fe2e316b8c76a0141760c0f2f181d84acb83abc2 | plaso/preprocessors/windows.py | python | WindowsServicesAndDriversPlugin._ParseKey | (self, mediator, registry_key, value_name) | Parses a Windows Registry key for a preprocessing attribute.
Args:
mediator (PreprocessMediator): mediates interactions between preprocess
plugins and other components, such as storage and knowledge base.
registry_key (dfwinreg.WinRegistryKey): Windows Registry key.
value_name (str): name of the Windows Registry value or None if not
specified.
Raises:
errors.PreProcessFail: if the preprocessing fails. | Parses a Windows Registry key for a preprocessing attribute. | [
"Parses",
"a",
"Windows",
"Registry",
"key",
"for",
"a",
"preprocessing",
"attribute",
"."
] | def _ParseKey(self, mediator, registry_key, value_name):
"""Parses a Windows Registry key for a preprocessing attribute.
Args:
mediator (PreprocessMediator): mediates interactions between preprocess
plugins and other components, such as storage and knowledge base.
registry_key (dfwinreg.WinRegistryKey): Windows Registry key.
value_name (str): name of the Windows Registry value or None if not
specified.
Raises:
errors.PreProcessFail: if the preprocessing fails.
"""
service_type = None
start_type = None
registry_value = registry_key.GetValueByName('Type')
if registry_value:
service_type = registry_value.GetDataAsObject()
registry_value = registry_key.GetValueByName('Start')
if registry_value:
start_type = registry_value.GetDataAsObject()
if None in (service_type, start_type):
return
service_configuration = artifacts.WindowsServiceConfigurationArtifact(
name=registry_key.name, service_type=service_type,
start_type=start_type)
registry_value = registry_key.GetValueByName('ErrorControl')
if registry_value:
service_configuration.error_control = registry_value.GetDataAsObject()
registry_value = registry_key.GetValueByName('ImagePath')
if registry_value:
service_configuration.image_path = registry_value.GetDataAsObject()
registry_value = registry_key.GetValueByName('ObjectName')
if registry_value:
service_configuration.object_name = registry_value.GetDataAsObject()
sub_registry_key = registry_key.GetSubkeyByName('Parameters')
if sub_registry_key:
registry_value = sub_registry_key.GetValueByName('ServiceDll')
if registry_value:
service_configuration.service_dll = registry_value.GetDataAsObject()
try:
mediator.AddArtifact(service_configuration)
except KeyError:
mediator.ProducePreprocessingWarning(
self.ARTIFACT_DEFINITION_NAME,
'Unable to add Windows service configuation: {0:s} artifact.'.format(
registry_value.name)) | [
"def",
"_ParseKey",
"(",
"self",
",",
"mediator",
",",
"registry_key",
",",
"value_name",
")",
":",
"service_type",
"=",
"None",
"start_type",
"=",
"None",
"registry_value",
"=",
"registry_key",
".",
"GetValueByName",
"(",
"'Type'",
")",
"if",
"registry_value",
":",
"service_type",
"=",
"registry_value",
".",
"GetDataAsObject",
"(",
")",
"registry_value",
"=",
"registry_key",
".",
"GetValueByName",
"(",
"'Start'",
")",
"if",
"registry_value",
":",
"start_type",
"=",
"registry_value",
".",
"GetDataAsObject",
"(",
")",
"if",
"None",
"in",
"(",
"service_type",
",",
"start_type",
")",
":",
"return",
"service_configuration",
"=",
"artifacts",
".",
"WindowsServiceConfigurationArtifact",
"(",
"name",
"=",
"registry_key",
".",
"name",
",",
"service_type",
"=",
"service_type",
",",
"start_type",
"=",
"start_type",
")",
"registry_value",
"=",
"registry_key",
".",
"GetValueByName",
"(",
"'ErrorControl'",
")",
"if",
"registry_value",
":",
"service_configuration",
".",
"error_control",
"=",
"registry_value",
".",
"GetDataAsObject",
"(",
")",
"registry_value",
"=",
"registry_key",
".",
"GetValueByName",
"(",
"'ImagePath'",
")",
"if",
"registry_value",
":",
"service_configuration",
".",
"image_path",
"=",
"registry_value",
".",
"GetDataAsObject",
"(",
")",
"registry_value",
"=",
"registry_key",
".",
"GetValueByName",
"(",
"'ObjectName'",
")",
"if",
"registry_value",
":",
"service_configuration",
".",
"object_name",
"=",
"registry_value",
".",
"GetDataAsObject",
"(",
")",
"sub_registry_key",
"=",
"registry_key",
".",
"GetSubkeyByName",
"(",
"'Parameters'",
")",
"if",
"sub_registry_key",
":",
"registry_value",
"=",
"sub_registry_key",
".",
"GetValueByName",
"(",
"'ServiceDll'",
")",
"if",
"registry_value",
":",
"service_configuration",
".",
"service_dll",
"=",
"registry_value",
".",
"GetDataAsObject",
"(",
")",
"try",
":",
"mediator",
".",
"AddArtifact",
"(",
"service_configuration",
")",
"except",
"KeyError",
":",
"mediator",
".",
"ProducePreprocessingWarning",
"(",
"self",
".",
"ARTIFACT_DEFINITION_NAME",
",",
"'Unable to add Windows service configuation: {0:s} artifact.'",
".",
"format",
"(",
"registry_value",
".",
"name",
")",
")"
] | https://github.com/log2timeline/plaso/blob/fe2e316b8c76a0141760c0f2f181d84acb83abc2/plaso/preprocessors/windows.py#L719-L774 |
||
oracle/graalpython | 577e02da9755d916056184ec441c26e00b70145c | graalpython/lib-python/3/datetime.py | python | timedelta.__ge__ | (self, other) | [] | def __ge__(self, other):
if isinstance(other, timedelta):
return self._cmp(other) >= 0
else:
return NotImplemented | [
"def",
"__ge__",
"(",
"self",
",",
"other",
")",
":",
"if",
"isinstance",
"(",
"other",
",",
"timedelta",
")",
":",
"return",
"self",
".",
"_cmp",
"(",
"other",
")",
">=",
"0",
"else",
":",
"return",
"NotImplemented"
] | https://github.com/oracle/graalpython/blob/577e02da9755d916056184ec441c26e00b70145c/graalpython/lib-python/3/datetime.py#L750-L754 |
||||
biolab/orange3 | 41685e1c7b1d1babe680113685a2d44bcc9fec0b | Orange/widgets/utils/plot/owplotgui.py | python | VariableSelectionModel.dropMimeData | (self, mime, action, row, column, parent) | return True | [] | def dropMimeData(self, mime, action, row, column, parent):
if action == Qt.IgnoreAction:
return True # pragma: no cover
if not mime.hasFormat(self.MIME_TYPE):
return False # pragma: no cover
prev_index = mime.property('item_index')
if prev_index is None:
return False
var = self[prev_index.row()]
if self.is_selected(prev_index):
self.selected_vars.remove(var)
if row < len(self) and self.is_selected(self.index(row)):
postpos = self.selected_vars.index(self[row])
self.selected_vars.insert(postpos, var)
elif row == 0 or self.is_selected(self.index(row - 1)):
self.selected_vars.append(var)
self.selection_changed.emit()
return True | [
"def",
"dropMimeData",
"(",
"self",
",",
"mime",
",",
"action",
",",
"row",
",",
"column",
",",
"parent",
")",
":",
"if",
"action",
"==",
"Qt",
".",
"IgnoreAction",
":",
"return",
"True",
"# pragma: no cover",
"if",
"not",
"mime",
".",
"hasFormat",
"(",
"self",
".",
"MIME_TYPE",
")",
":",
"return",
"False",
"# pragma: no cover",
"prev_index",
"=",
"mime",
".",
"property",
"(",
"'item_index'",
")",
"if",
"prev_index",
"is",
"None",
":",
"return",
"False",
"var",
"=",
"self",
"[",
"prev_index",
".",
"row",
"(",
")",
"]",
"if",
"self",
".",
"is_selected",
"(",
"prev_index",
")",
":",
"self",
".",
"selected_vars",
".",
"remove",
"(",
"var",
")",
"if",
"row",
"<",
"len",
"(",
"self",
")",
"and",
"self",
".",
"is_selected",
"(",
"self",
".",
"index",
"(",
"row",
")",
")",
":",
"postpos",
"=",
"self",
".",
"selected_vars",
".",
"index",
"(",
"self",
"[",
"row",
"]",
")",
"self",
".",
"selected_vars",
".",
"insert",
"(",
"postpos",
",",
"var",
")",
"elif",
"row",
"==",
"0",
"or",
"self",
".",
"is_selected",
"(",
"self",
".",
"index",
"(",
"row",
"-",
"1",
")",
")",
":",
"self",
".",
"selected_vars",
".",
"append",
"(",
"var",
")",
"self",
".",
"selection_changed",
".",
"emit",
"(",
")",
"return",
"True"
] | https://github.com/biolab/orange3/blob/41685e1c7b1d1babe680113685a2d44bcc9fec0b/Orange/widgets/utils/plot/owplotgui.py#L106-L123 |
|||
chribsen/simple-machine-learning-examples | dc94e52a4cebdc8bb959ff88b81ff8cfeca25022 | venv/lib/python2.7/site-packages/deap/benchmarks/gp.py | python | rational_polynomial | (data) | return 30. * (data[0] - 1) * (data[2] - 1) / (data[1]**2 * (data[0] - 10)) | Rational polynomial ball benchmark function.
.. list-table::
:widths: 10 50
:stub-columns: 1
* - Range
- :math:`\mathbf{x} \in [0, 2]^3`
* - Function
- :math:`f(\mathbf{x}) = \\frac{30 * (x_1 - 1) (x_3 - 1)}{x_2^2 (x_1 - 10)}` | Rational polynomial ball benchmark function.
.. list-table::
:widths: 10 50
:stub-columns: 1 | [
"Rational",
"polynomial",
"ball",
"benchmark",
"function",
".",
"..",
"list",
"-",
"table",
"::",
":",
"widths",
":",
"10",
"50",
":",
"stub",
"-",
"columns",
":",
"1"
] | def rational_polynomial(data):
"""Rational polynomial ball benchmark function.
.. list-table::
:widths: 10 50
:stub-columns: 1
* - Range
- :math:`\mathbf{x} \in [0, 2]^3`
* - Function
- :math:`f(\mathbf{x}) = \\frac{30 * (x_1 - 1) (x_3 - 1)}{x_2^2 (x_1 - 10)}`
"""
return 30. * (data[0] - 1) * (data[2] - 1) / (data[1]**2 * (data[0] - 10)) | [
"def",
"rational_polynomial",
"(",
"data",
")",
":",
"return",
"30.",
"*",
"(",
"data",
"[",
"0",
"]",
"-",
"1",
")",
"*",
"(",
"data",
"[",
"2",
"]",
"-",
"1",
")",
"/",
"(",
"data",
"[",
"1",
"]",
"**",
"2",
"*",
"(",
"data",
"[",
"0",
"]",
"-",
"10",
")",
")"
] | https://github.com/chribsen/simple-machine-learning-examples/blob/dc94e52a4cebdc8bb959ff88b81ff8cfeca25022/venv/lib/python2.7/site-packages/deap/benchmarks/gp.py#L74-L86 |
|
Qiskit/qiskit-terra | b66030e3b9192efdd3eb95cf25c6545fe0a13da4 | qiskit/pulse/library/discrete.py | python | triangle | (
duration: int, amp: complex, freq: float = None, phase: float = 0, name: Optional[str] = None
) | return _sampled_triangle_pulse(duration, amp, freq, phase=phase, name=name) | r"""Generates triangle wave :class:`~qiskit.pulse.library.Waveform`.
For :math:`A=` ``amp``, :math:`T=` ``period``, and :math:`\phi=` ``phase``,
applies the `midpoint` sampling strategy to generate a discrete pulse sampled from
the continuous function:
.. math::
f(x) = A \left(-2\left|\text{sawtooth}(x, A, T, \phi)\right| + 1\right)
This a non-sinusoidal wave with linear ramping.
Args:
duration: Duration of pulse. Must be greater than zero.
amp: Pulse amplitude. Wave range is :math:`[-` ``amp`` :math:`,` ``amp`` :math:`]`.
freq: Pulse frequency, units of 1./dt. If ``None`` defaults to 1./duration.
phase: Pulse phase.
name: Name of pulse.
Example:
.. jupyter-execute::
import matplotlib.pyplot as plt
from qiskit.pulse.library import triangle
import numpy as np
duration = 100
amp = 1
freq = 1 / duration
triangle_wave = np.real(triangle(duration, amp, freq).samples)
plt.plot(range(duration), triangle_wave) | r"""Generates triangle wave :class:`~qiskit.pulse.library.Waveform`. | [
"r",
"Generates",
"triangle",
"wave",
":",
"class",
":",
"~qiskit",
".",
"pulse",
".",
"library",
".",
"Waveform",
"."
] | def triangle(
duration: int, amp: complex, freq: float = None, phase: float = 0, name: Optional[str] = None
) -> Waveform:
r"""Generates triangle wave :class:`~qiskit.pulse.library.Waveform`.
For :math:`A=` ``amp``, :math:`T=` ``period``, and :math:`\phi=` ``phase``,
applies the `midpoint` sampling strategy to generate a discrete pulse sampled from
the continuous function:
.. math::
f(x) = A \left(-2\left|\text{sawtooth}(x, A, T, \phi)\right| + 1\right)
This a non-sinusoidal wave with linear ramping.
Args:
duration: Duration of pulse. Must be greater than zero.
amp: Pulse amplitude. Wave range is :math:`[-` ``amp`` :math:`,` ``amp`` :math:`]`.
freq: Pulse frequency, units of 1./dt. If ``None`` defaults to 1./duration.
phase: Pulse phase.
name: Name of pulse.
Example:
.. jupyter-execute::
import matplotlib.pyplot as plt
from qiskit.pulse.library import triangle
import numpy as np
duration = 100
amp = 1
freq = 1 / duration
triangle_wave = np.real(triangle(duration, amp, freq).samples)
plt.plot(range(duration), triangle_wave)
"""
if freq is None:
freq = 1.0 / duration
return _sampled_triangle_pulse(duration, amp, freq, phase=phase, name=name) | [
"def",
"triangle",
"(",
"duration",
":",
"int",
",",
"amp",
":",
"complex",
",",
"freq",
":",
"float",
"=",
"None",
",",
"phase",
":",
"float",
"=",
"0",
",",
"name",
":",
"Optional",
"[",
"str",
"]",
"=",
"None",
")",
"->",
"Waveform",
":",
"if",
"freq",
"is",
"None",
":",
"freq",
"=",
"1.0",
"/",
"duration",
"return",
"_sampled_triangle_pulse",
"(",
"duration",
",",
"amp",
",",
"freq",
",",
"phase",
"=",
"phase",
",",
"name",
"=",
"name",
")"
] | https://github.com/Qiskit/qiskit-terra/blob/b66030e3b9192efdd3eb95cf25c6545fe0a13da4/qiskit/pulse/library/discrete.py#L145-L183 |
|
smallcorgi/Faster-RCNN_TF | d9adb24c8ffdbae3b56eb55fc629d719fee3d741 | lib/networks/factory.py | python | list_networks | () | return __sets.keys() | List all registered imdbs. | List all registered imdbs. | [
"List",
"all",
"registered",
"imdbs",
"."
] | def list_networks():
"""List all registered imdbs."""
return __sets.keys() | [
"def",
"list_networks",
"(",
")",
":",
"return",
"__sets",
".",
"keys",
"(",
")"
] | https://github.com/smallcorgi/Faster-RCNN_TF/blob/d9adb24c8ffdbae3b56eb55fc629d719fee3d741/lib/networks/factory.py#L35-L37 |
|
CvvT/dumpDex | 92ab3b7e996194a06bf1dd5538a4954e8a5ee9c1 | python/idaapi.py | python | node_info_t.valid_text | (self, *args) | return _idaapi.node_info_t_valid_text(self, *args) | valid_text(self) -> bool | valid_text(self) -> bool | [
"valid_text",
"(",
"self",
")",
"-",
">",
"bool"
] | def valid_text(self, *args):
"""
valid_text(self) -> bool
"""
return _idaapi.node_info_t_valid_text(self, *args) | [
"def",
"valid_text",
"(",
"self",
",",
"*",
"args",
")",
":",
"return",
"_idaapi",
".",
"node_info_t_valid_text",
"(",
"self",
",",
"*",
"args",
")"
] | https://github.com/CvvT/dumpDex/blob/92ab3b7e996194a06bf1dd5538a4954e8a5ee9c1/python/idaapi.py#L50845-L50849 |
|
ifwe/digsby | f5fe00244744aa131e07f09348d10563f3d8fa99 | digsby/src/digsby/DigsbyBaseProtocol.py | python | DigsbyProtocol.session_started_notify | (self, s) | whatever needs to be done after most setup and before the callback/state change happens.
usually that will be plugins doing their own setup.
allows notify to be overridden in subclass | whatever needs to be done after most setup and before the callback/state change happens.
usually that will be plugins doing their own setup.
allows notify to be overridden in subclass | [
"whatever",
"needs",
"to",
"be",
"done",
"after",
"most",
"setup",
"and",
"before",
"the",
"callback",
"/",
"state",
"change",
"happens",
".",
"usually",
"that",
"will",
"be",
"plugins",
"doing",
"their",
"own",
"setup",
".",
"allows",
"notify",
"to",
"be",
"overridden",
"in",
"subclass"
] | def session_started_notify(self, s):
'''
whatever needs to be done after most setup and before the callback/state change happens.
usually that will be plugins doing their own setup.
allows notify to be overridden in subclass
'''
hooks.notify('digsby.jabber.session_started', self, s) | [
"def",
"session_started_notify",
"(",
"self",
",",
"s",
")",
":",
"hooks",
".",
"notify",
"(",
"'digsby.jabber.session_started'",
",",
"self",
",",
"s",
")"
] | https://github.com/ifwe/digsby/blob/f5fe00244744aa131e07f09348d10563f3d8fa99/digsby/src/digsby/DigsbyBaseProtocol.py#L288-L294 |
||
pycontribs/pyrax | a0c022981f76a4cba96a22ecc19bb52843ac4fbe | pyrax/object_storage.py | python | StorageClient._sync_folder_to_container | (self, folder_path, container, prefix, delete,
include_hidden, ignore, ignore_timestamps, object_prefix, verbose) | This is the internal method that is called recursively to handle
nested folder structures. | This is the internal method that is called recursively to handle
nested folder structures. | [
"This",
"is",
"the",
"internal",
"method",
"that",
"is",
"called",
"recursively",
"to",
"handle",
"nested",
"folder",
"structures",
"."
] | def _sync_folder_to_container(self, folder_path, container, prefix, delete,
include_hidden, ignore, ignore_timestamps, object_prefix, verbose):
"""
This is the internal method that is called recursively to handle
nested folder structures.
"""
fnames = os.listdir(folder_path)
ignore = utils.coerce_to_list(ignore)
log = logging.getLogger("pyrax")
if not include_hidden:
ignore.append(".*")
for fname in fnames:
if utils.match_pattern(fname, ignore):
self._sync_summary["ignored"] += 1
continue
pth = os.path.join(folder_path, fname)
if os.path.isdir(pth):
subprefix = fname
if prefix:
subprefix = os.path.join(prefix, subprefix)
self._sync_folder_to_container(pth, container, prefix=subprefix,
delete=delete, include_hidden=include_hidden,
ignore=ignore, ignore_timestamps=ignore_timestamps,
object_prefix=object_prefix, verbose=verbose)
continue
self._local_files.append(os.path.join(object_prefix, prefix,
fname))
local_etag = utils.get_checksum(pth)
if object_prefix:
prefix = os.path.join(object_prefix, prefix)
object_prefix = ""
fullname_with_prefix = os.path.join(prefix, fname)
try:
obj = self._remote_files[fullname_with_prefix]
obj_etag = obj.etag
except KeyError:
obj = None
obj_etag = None
if local_etag != obj_etag:
if not ignore_timestamps:
if obj:
obj_time_str = obj.last_modified[:19]
else:
obj_time_str = EARLY_DATE_STR
local_mod = datetime.datetime.utcfromtimestamp(
os.stat(pth).st_mtime)
local_mod_str = local_mod.isoformat()
if obj_time_str >= local_mod_str:
# Remote object is newer
self._sync_summary["older"] += 1
if verbose:
log.info("%s NOT UPLOADED because remote object is "
"newer", fullname_with_prefix)
log.info(" Local: %s Remote: %s" % (
local_mod_str, obj_time_str))
continue
try:
container.upload_file(pth, obj_name=fullname_with_prefix,
etag=local_etag, return_none=True)
self._sync_summary["uploaded"] += 1
if verbose:
log.info("%s UPLOADED", fullname_with_prefix)
except Exception as e:
# Record the failure, and move on
self._sync_summary["failed"] += 1
self._sync_summary["failure_reasons"].append("%s" % e)
if verbose:
log.error("%s UPLOAD FAILED. Exception: %s" %
(fullname_with_prefix, e))
else:
self._sync_summary["duplicate"] += 1
if verbose:
log.info("%s NOT UPLOADED because it already exists",
fullname_with_prefix)
if delete and not prefix:
self._delete_objects_not_in_list(container, object_prefix) | [
"def",
"_sync_folder_to_container",
"(",
"self",
",",
"folder_path",
",",
"container",
",",
"prefix",
",",
"delete",
",",
"include_hidden",
",",
"ignore",
",",
"ignore_timestamps",
",",
"object_prefix",
",",
"verbose",
")",
":",
"fnames",
"=",
"os",
".",
"listdir",
"(",
"folder_path",
")",
"ignore",
"=",
"utils",
".",
"coerce_to_list",
"(",
"ignore",
")",
"log",
"=",
"logging",
".",
"getLogger",
"(",
"\"pyrax\"",
")",
"if",
"not",
"include_hidden",
":",
"ignore",
".",
"append",
"(",
"\".*\"",
")",
"for",
"fname",
"in",
"fnames",
":",
"if",
"utils",
".",
"match_pattern",
"(",
"fname",
",",
"ignore",
")",
":",
"self",
".",
"_sync_summary",
"[",
"\"ignored\"",
"]",
"+=",
"1",
"continue",
"pth",
"=",
"os",
".",
"path",
".",
"join",
"(",
"folder_path",
",",
"fname",
")",
"if",
"os",
".",
"path",
".",
"isdir",
"(",
"pth",
")",
":",
"subprefix",
"=",
"fname",
"if",
"prefix",
":",
"subprefix",
"=",
"os",
".",
"path",
".",
"join",
"(",
"prefix",
",",
"subprefix",
")",
"self",
".",
"_sync_folder_to_container",
"(",
"pth",
",",
"container",
",",
"prefix",
"=",
"subprefix",
",",
"delete",
"=",
"delete",
",",
"include_hidden",
"=",
"include_hidden",
",",
"ignore",
"=",
"ignore",
",",
"ignore_timestamps",
"=",
"ignore_timestamps",
",",
"object_prefix",
"=",
"object_prefix",
",",
"verbose",
"=",
"verbose",
")",
"continue",
"self",
".",
"_local_files",
".",
"append",
"(",
"os",
".",
"path",
".",
"join",
"(",
"object_prefix",
",",
"prefix",
",",
"fname",
")",
")",
"local_etag",
"=",
"utils",
".",
"get_checksum",
"(",
"pth",
")",
"if",
"object_prefix",
":",
"prefix",
"=",
"os",
".",
"path",
".",
"join",
"(",
"object_prefix",
",",
"prefix",
")",
"object_prefix",
"=",
"\"\"",
"fullname_with_prefix",
"=",
"os",
".",
"path",
".",
"join",
"(",
"prefix",
",",
"fname",
")",
"try",
":",
"obj",
"=",
"self",
".",
"_remote_files",
"[",
"fullname_with_prefix",
"]",
"obj_etag",
"=",
"obj",
".",
"etag",
"except",
"KeyError",
":",
"obj",
"=",
"None",
"obj_etag",
"=",
"None",
"if",
"local_etag",
"!=",
"obj_etag",
":",
"if",
"not",
"ignore_timestamps",
":",
"if",
"obj",
":",
"obj_time_str",
"=",
"obj",
".",
"last_modified",
"[",
":",
"19",
"]",
"else",
":",
"obj_time_str",
"=",
"EARLY_DATE_STR",
"local_mod",
"=",
"datetime",
".",
"datetime",
".",
"utcfromtimestamp",
"(",
"os",
".",
"stat",
"(",
"pth",
")",
".",
"st_mtime",
")",
"local_mod_str",
"=",
"local_mod",
".",
"isoformat",
"(",
")",
"if",
"obj_time_str",
">=",
"local_mod_str",
":",
"# Remote object is newer",
"self",
".",
"_sync_summary",
"[",
"\"older\"",
"]",
"+=",
"1",
"if",
"verbose",
":",
"log",
".",
"info",
"(",
"\"%s NOT UPLOADED because remote object is \"",
"\"newer\"",
",",
"fullname_with_prefix",
")",
"log",
".",
"info",
"(",
"\" Local: %s Remote: %s\"",
"%",
"(",
"local_mod_str",
",",
"obj_time_str",
")",
")",
"continue",
"try",
":",
"container",
".",
"upload_file",
"(",
"pth",
",",
"obj_name",
"=",
"fullname_with_prefix",
",",
"etag",
"=",
"local_etag",
",",
"return_none",
"=",
"True",
")",
"self",
".",
"_sync_summary",
"[",
"\"uploaded\"",
"]",
"+=",
"1",
"if",
"verbose",
":",
"log",
".",
"info",
"(",
"\"%s UPLOADED\"",
",",
"fullname_with_prefix",
")",
"except",
"Exception",
"as",
"e",
":",
"# Record the failure, and move on",
"self",
".",
"_sync_summary",
"[",
"\"failed\"",
"]",
"+=",
"1",
"self",
".",
"_sync_summary",
"[",
"\"failure_reasons\"",
"]",
".",
"append",
"(",
"\"%s\"",
"%",
"e",
")",
"if",
"verbose",
":",
"log",
".",
"error",
"(",
"\"%s UPLOAD FAILED. Exception: %s\"",
"%",
"(",
"fullname_with_prefix",
",",
"e",
")",
")",
"else",
":",
"self",
".",
"_sync_summary",
"[",
"\"duplicate\"",
"]",
"+=",
"1",
"if",
"verbose",
":",
"log",
".",
"info",
"(",
"\"%s NOT UPLOADED because it already exists\"",
",",
"fullname_with_prefix",
")",
"if",
"delete",
"and",
"not",
"prefix",
":",
"self",
".",
"_delete_objects_not_in_list",
"(",
"container",
",",
"object_prefix",
")"
] | https://github.com/pycontribs/pyrax/blob/a0c022981f76a4cba96a22ecc19bb52843ac4fbe/pyrax/object_storage.py#L3074-L3149 |
||
ganeti/ganeti | d340a9ddd12f501bef57da421b5f9b969a4ba905 | lib/bootstrap.py | python | MajorityHealthy | (ignore_offline_nodes=False) | return (majority_healthy, nonvoting_nodes) | Check if the majority of nodes is healthy
Gather master votes from all nodes known to this node;
return True if a strict majority of nodes is reachable and
has some opinion on which node is master. Note that this will
not guarantee any node to win an election but it ensures that
a standard master-failover is still possible.
@return: tuple of (boolean, [str]); the first is if a majority of nodes are
healthy, the second is a list of the node names that are not considered
healthy. | Check if the majority of nodes is healthy | [
"Check",
"if",
"the",
"majority",
"of",
"nodes",
"is",
"healthy"
] | def MajorityHealthy(ignore_offline_nodes=False):
"""Check if the majority of nodes is healthy
Gather master votes from all nodes known to this node;
return True if a strict majority of nodes is reachable and
has some opinion on which node is master. Note that this will
not guarantee any node to win an election but it ensures that
a standard master-failover is still possible.
@return: tuple of (boolean, [str]); the first is if a majority of nodes are
healthy, the second is a list of the node names that are not considered
healthy.
"""
if ignore_offline_nodes:
node_names = ssconf.SimpleStore().GetOnlineNodeList()
else:
node_names = ssconf.SimpleStore().GetNodeList()
node_count = len(node_names)
vote_list = _GatherMasterVotes(node_names)
if not vote_list:
logging.warning(('Voting list was None; cannot determine if a majority of '
'nodes are healthy'))
return (False, node_names)
total_votes = sum([count for (node, count) in vote_list if node is not None])
majority_healthy = 2 * total_votes > node_count
# The list of nodes that did not vote is calculated to provide useful
# debugging information to the client.
voting_nodes = [node for (node, _) in vote_list]
nonvoting_nodes = [node for node in node_names if node not in voting_nodes]
logging.info("Total %d nodes, %d votes: %s", node_count, total_votes,
vote_list)
return (majority_healthy, nonvoting_nodes) | [
"def",
"MajorityHealthy",
"(",
"ignore_offline_nodes",
"=",
"False",
")",
":",
"if",
"ignore_offline_nodes",
":",
"node_names",
"=",
"ssconf",
".",
"SimpleStore",
"(",
")",
".",
"GetOnlineNodeList",
"(",
")",
"else",
":",
"node_names",
"=",
"ssconf",
".",
"SimpleStore",
"(",
")",
".",
"GetNodeList",
"(",
")",
"node_count",
"=",
"len",
"(",
"node_names",
")",
"vote_list",
"=",
"_GatherMasterVotes",
"(",
"node_names",
")",
"if",
"not",
"vote_list",
":",
"logging",
".",
"warning",
"(",
"(",
"'Voting list was None; cannot determine if a majority of '",
"'nodes are healthy'",
")",
")",
"return",
"(",
"False",
",",
"node_names",
")",
"total_votes",
"=",
"sum",
"(",
"[",
"count",
"for",
"(",
"node",
",",
"count",
")",
"in",
"vote_list",
"if",
"node",
"is",
"not",
"None",
"]",
")",
"majority_healthy",
"=",
"2",
"*",
"total_votes",
">",
"node_count",
"# The list of nodes that did not vote is calculated to provide useful",
"# debugging information to the client.",
"voting_nodes",
"=",
"[",
"node",
"for",
"(",
"node",
",",
"_",
")",
"in",
"vote_list",
"]",
"nonvoting_nodes",
"=",
"[",
"node",
"for",
"node",
"in",
"node_names",
"if",
"node",
"not",
"in",
"voting_nodes",
"]",
"logging",
".",
"info",
"(",
"\"Total %d nodes, %d votes: %s\"",
",",
"node_count",
",",
"total_votes",
",",
"vote_list",
")",
"return",
"(",
"majority_healthy",
",",
"nonvoting_nodes",
")"
] | https://github.com/ganeti/ganeti/blob/d340a9ddd12f501bef57da421b5f9b969a4ba905/lib/bootstrap.py#L1199-L1236 |
|
evilhero/mylar | dbee01d7e48e8c717afa01b2de1946c5d0b956cb | lib/transmissionrpc/client.py | python | Client.change_torrent | (self, ids, timeout=None, **kwargs) | Change torrent parameters for the torrent(s) with the supplied id's. The
parameters are:
============================ ===== =============== =======================================================================================
Argument RPC Replaced by Description
============================ ===== =============== =======================================================================================
``bandwidthPriority`` 5 - Priority for this transfer.
``downloadLimit`` 5 - Set the speed limit for download in Kib/s.
``downloadLimited`` 5 - Enable download speed limiter.
``files_unwanted`` 1 - A list of file id's that shouldn't be downloaded.
``files_wanted`` 1 - A list of file id's that should be downloaded.
``honorsSessionLimits`` 5 - Enables or disables the transfer to honour the upload limit set in the session.
``location`` 1 - Local download location.
``peer_limit`` 1 - The peer limit for the torrents.
``priority_high`` 1 - A list of file id's that should have high priority.
``priority_low`` 1 - A list of file id's that should have normal priority.
``priority_normal`` 1 - A list of file id's that should have low priority.
``queuePosition`` 14 - Position of this transfer in its queue.
``seedIdleLimit`` 10 - Seed inactivity limit in minutes.
``seedIdleMode`` 10 - Seed inactivity mode. 0 = Use session limit, 1 = Use transfer limit, 2 = Disable limit.
``seedRatioLimit`` 5 - Seeding ratio.
``seedRatioMode`` 5 - Which ratio to use. 0 = Use session limit, 1 = Use transfer limit, 2 = Disable limit.
``speed_limit_down`` 1 - 5 downloadLimit Set the speed limit for download in Kib/s.
``speed_limit_down_enabled`` 1 - 5 downloadLimited Enable download speed limiter.
``speed_limit_up`` 1 - 5 uploadLimit Set the speed limit for upload in Kib/s.
``speed_limit_up_enabled`` 1 - 5 uploadLimited Enable upload speed limiter.
``trackerAdd`` 10 - Array of string with announce URLs to add.
``trackerRemove`` 10 - Array of ids of trackers to remove.
``trackerReplace`` 10 - Array of (id, url) tuples where the announce URL should be replaced.
``uploadLimit`` 5 - Set the speed limit for upload in Kib/s.
``uploadLimited`` 5 - Enable upload speed limiter.
============================ ===== =============== =======================================================================================
.. NOTE::
transmissionrpc will try to automatically fix argument errors. | Change torrent parameters for the torrent(s) with the supplied id's. The
parameters are: | [
"Change",
"torrent",
"parameters",
"for",
"the",
"torrent",
"(",
"s",
")",
"with",
"the",
"supplied",
"id",
"s",
".",
"The",
"parameters",
"are",
":"
] | def change_torrent(self, ids, timeout=None, **kwargs):
"""
Change torrent parameters for the torrent(s) with the supplied id's. The
parameters are:
============================ ===== =============== =======================================================================================
Argument RPC Replaced by Description
============================ ===== =============== =======================================================================================
``bandwidthPriority`` 5 - Priority for this transfer.
``downloadLimit`` 5 - Set the speed limit for download in Kib/s.
``downloadLimited`` 5 - Enable download speed limiter.
``files_unwanted`` 1 - A list of file id's that shouldn't be downloaded.
``files_wanted`` 1 - A list of file id's that should be downloaded.
``honorsSessionLimits`` 5 - Enables or disables the transfer to honour the upload limit set in the session.
``location`` 1 - Local download location.
``peer_limit`` 1 - The peer limit for the torrents.
``priority_high`` 1 - A list of file id's that should have high priority.
``priority_low`` 1 - A list of file id's that should have normal priority.
``priority_normal`` 1 - A list of file id's that should have low priority.
``queuePosition`` 14 - Position of this transfer in its queue.
``seedIdleLimit`` 10 - Seed inactivity limit in minutes.
``seedIdleMode`` 10 - Seed inactivity mode. 0 = Use session limit, 1 = Use transfer limit, 2 = Disable limit.
``seedRatioLimit`` 5 - Seeding ratio.
``seedRatioMode`` 5 - Which ratio to use. 0 = Use session limit, 1 = Use transfer limit, 2 = Disable limit.
``speed_limit_down`` 1 - 5 downloadLimit Set the speed limit for download in Kib/s.
``speed_limit_down_enabled`` 1 - 5 downloadLimited Enable download speed limiter.
``speed_limit_up`` 1 - 5 uploadLimit Set the speed limit for upload in Kib/s.
``speed_limit_up_enabled`` 1 - 5 uploadLimited Enable upload speed limiter.
``trackerAdd`` 10 - Array of string with announce URLs to add.
``trackerRemove`` 10 - Array of ids of trackers to remove.
``trackerReplace`` 10 - Array of (id, url) tuples where the announce URL should be replaced.
``uploadLimit`` 5 - Set the speed limit for upload in Kib/s.
``uploadLimited`` 5 - Enable upload speed limiter.
============================ ===== =============== =======================================================================================
.. NOTE::
transmissionrpc will try to automatically fix argument errors.
"""
args = {}
for key, value in iteritems(kwargs):
argument = make_rpc_name(key)
(arg, val) = argument_value_convert('torrent-set' , argument, value, self.rpc_version)
args[arg] = val
if len(args) > 0:
self._request('torrent-set', args, ids, True, timeout=timeout)
else:
ValueError("No arguments to set") | [
"def",
"change_torrent",
"(",
"self",
",",
"ids",
",",
"timeout",
"=",
"None",
",",
"*",
"*",
"kwargs",
")",
":",
"args",
"=",
"{",
"}",
"for",
"key",
",",
"value",
"in",
"iteritems",
"(",
"kwargs",
")",
":",
"argument",
"=",
"make_rpc_name",
"(",
"key",
")",
"(",
"arg",
",",
"val",
")",
"=",
"argument_value_convert",
"(",
"'torrent-set'",
",",
"argument",
",",
"value",
",",
"self",
".",
"rpc_version",
")",
"args",
"[",
"arg",
"]",
"=",
"val",
"if",
"len",
"(",
"args",
")",
">",
"0",
":",
"self",
".",
"_request",
"(",
"'torrent-set'",
",",
"args",
",",
"ids",
",",
"True",
",",
"timeout",
"=",
"timeout",
")",
"else",
":",
"ValueError",
"(",
"\"No arguments to set\"",
")"
] | https://github.com/evilhero/mylar/blob/dbee01d7e48e8c717afa01b2de1946c5d0b956cb/lib/transmissionrpc/client.py#L700-L747 |
||
saltstack/salt | fae5bc757ad0f1716483ce7ae180b451545c2058 | salt/fileserver/minionfs.py | python | file_list | (load) | return ret | Return a list of all files on the file server in a specified environment | Return a list of all files on the file server in a specified environment | [
"Return",
"a",
"list",
"of",
"all",
"files",
"on",
"the",
"file",
"server",
"in",
"a",
"specified",
"environment"
] | def file_list(load):
"""
Return a list of all files on the file server in a specified environment
"""
if "env" in load:
# "env" is not supported; Use "saltenv".
load.pop("env")
if load["saltenv"] not in envs():
return []
mountpoint = salt.utils.url.strip_proto(__opts__["minionfs_mountpoint"])
prefix = load.get("prefix", "").strip("/")
if mountpoint and prefix.startswith(mountpoint + os.path.sep):
prefix = prefix[len(mountpoint + os.path.sep) :]
minions_cache_dir = os.path.join(__opts__["cachedir"], "minions")
minion_dirs = os.listdir(minions_cache_dir)
# If the prefix is not an empty string, then get the minion id from it. The
# minion ID will be the part before the first slash, so if there is no
# slash, this is an invalid path.
if prefix:
tgt_minion, _, prefix = prefix.partition("/")
if not prefix:
# No minion ID in path
return []
# Reassign minion_dirs so we don't unnecessarily walk every minion's
# pushed files
if tgt_minion not in minion_dirs:
log.warning(
"No files found in minionfs cache for minion ID '%s'", tgt_minion
)
return []
minion_dirs = [tgt_minion]
ret = []
for minion in minion_dirs:
if not _is_exposed(minion):
continue
minion_files_dir = os.path.join(minions_cache_dir, minion, "files")
if not os.path.isdir(minion_files_dir):
log.debug(
"minionfs: could not find files directory under %s!",
os.path.join(minions_cache_dir, minion),
)
continue
walk_dir = os.path.join(minion_files_dir, prefix)
# Do not follow links for security reasons
for root, _, files in salt.utils.path.os_walk(walk_dir, followlinks=False):
for fname in files:
# Ignore links for security reasons
if os.path.islink(os.path.join(root, fname)):
continue
relpath = os.path.relpath(os.path.join(root, fname), minion_files_dir)
if relpath.startswith("../"):
continue
rel_fn = os.path.join(mountpoint, minion, relpath)
if not salt.fileserver.is_file_ignored(__opts__, rel_fn):
ret.append(rel_fn)
return ret | [
"def",
"file_list",
"(",
"load",
")",
":",
"if",
"\"env\"",
"in",
"load",
":",
"# \"env\" is not supported; Use \"saltenv\".",
"load",
".",
"pop",
"(",
"\"env\"",
")",
"if",
"load",
"[",
"\"saltenv\"",
"]",
"not",
"in",
"envs",
"(",
")",
":",
"return",
"[",
"]",
"mountpoint",
"=",
"salt",
".",
"utils",
".",
"url",
".",
"strip_proto",
"(",
"__opts__",
"[",
"\"minionfs_mountpoint\"",
"]",
")",
"prefix",
"=",
"load",
".",
"get",
"(",
"\"prefix\"",
",",
"\"\"",
")",
".",
"strip",
"(",
"\"/\"",
")",
"if",
"mountpoint",
"and",
"prefix",
".",
"startswith",
"(",
"mountpoint",
"+",
"os",
".",
"path",
".",
"sep",
")",
":",
"prefix",
"=",
"prefix",
"[",
"len",
"(",
"mountpoint",
"+",
"os",
".",
"path",
".",
"sep",
")",
":",
"]",
"minions_cache_dir",
"=",
"os",
".",
"path",
".",
"join",
"(",
"__opts__",
"[",
"\"cachedir\"",
"]",
",",
"\"minions\"",
")",
"minion_dirs",
"=",
"os",
".",
"listdir",
"(",
"minions_cache_dir",
")",
"# If the prefix is not an empty string, then get the minion id from it. The",
"# minion ID will be the part before the first slash, so if there is no",
"# slash, this is an invalid path.",
"if",
"prefix",
":",
"tgt_minion",
",",
"_",
",",
"prefix",
"=",
"prefix",
".",
"partition",
"(",
"\"/\"",
")",
"if",
"not",
"prefix",
":",
"# No minion ID in path",
"return",
"[",
"]",
"# Reassign minion_dirs so we don't unnecessarily walk every minion's",
"# pushed files",
"if",
"tgt_minion",
"not",
"in",
"minion_dirs",
":",
"log",
".",
"warning",
"(",
"\"No files found in minionfs cache for minion ID '%s'\"",
",",
"tgt_minion",
")",
"return",
"[",
"]",
"minion_dirs",
"=",
"[",
"tgt_minion",
"]",
"ret",
"=",
"[",
"]",
"for",
"minion",
"in",
"minion_dirs",
":",
"if",
"not",
"_is_exposed",
"(",
"minion",
")",
":",
"continue",
"minion_files_dir",
"=",
"os",
".",
"path",
".",
"join",
"(",
"minions_cache_dir",
",",
"minion",
",",
"\"files\"",
")",
"if",
"not",
"os",
".",
"path",
".",
"isdir",
"(",
"minion_files_dir",
")",
":",
"log",
".",
"debug",
"(",
"\"minionfs: could not find files directory under %s!\"",
",",
"os",
".",
"path",
".",
"join",
"(",
"minions_cache_dir",
",",
"minion",
")",
",",
")",
"continue",
"walk_dir",
"=",
"os",
".",
"path",
".",
"join",
"(",
"minion_files_dir",
",",
"prefix",
")",
"# Do not follow links for security reasons",
"for",
"root",
",",
"_",
",",
"files",
"in",
"salt",
".",
"utils",
".",
"path",
".",
"os_walk",
"(",
"walk_dir",
",",
"followlinks",
"=",
"False",
")",
":",
"for",
"fname",
"in",
"files",
":",
"# Ignore links for security reasons",
"if",
"os",
".",
"path",
".",
"islink",
"(",
"os",
".",
"path",
".",
"join",
"(",
"root",
",",
"fname",
")",
")",
":",
"continue",
"relpath",
"=",
"os",
".",
"path",
".",
"relpath",
"(",
"os",
".",
"path",
".",
"join",
"(",
"root",
",",
"fname",
")",
",",
"minion_files_dir",
")",
"if",
"relpath",
".",
"startswith",
"(",
"\"../\"",
")",
":",
"continue",
"rel_fn",
"=",
"os",
".",
"path",
".",
"join",
"(",
"mountpoint",
",",
"minion",
",",
"relpath",
")",
"if",
"not",
"salt",
".",
"fileserver",
".",
"is_file_ignored",
"(",
"__opts__",
",",
"rel_fn",
")",
":",
"ret",
".",
"append",
"(",
"rel_fn",
")",
"return",
"ret"
] | https://github.com/saltstack/salt/blob/fae5bc757ad0f1716483ce7ae180b451545c2058/salt/fileserver/minionfs.py#L225-L284 |
|
magenta/magenta | be6558f1a06984faff6d6949234f5fe9ad0ffdb5 | magenta/models/coconet/lib_hparams.py | python | Hyperparameters.use_softmax_loss | (self) | [] | def use_softmax_loss(self):
if not self.separate_instruments and (self.num_instruments > 1 or
self.num_instruments == 0):
return False
else:
return True | [
"def",
"use_softmax_loss",
"(",
"self",
")",
":",
"if",
"not",
"self",
".",
"separate_instruments",
"and",
"(",
"self",
".",
"num_instruments",
">",
"1",
"or",
"self",
".",
"num_instruments",
"==",
"0",
")",
":",
"return",
"False",
"else",
":",
"return",
"True"
] | https://github.com/magenta/magenta/blob/be6558f1a06984faff6d6949234f5fe9ad0ffdb5/magenta/models/coconet/lib_hparams.py#L193-L198 |
||||
thinkle/gourmet | 8af29c8ded24528030e5ae2ea3461f61c1e5a575 | gourmet/recindex.py | python | RecIndex.search_entry_activate_cb | (self, *args) | [] | def search_entry_activate_cb (self, *args):
if self.rmodel._get_length_()==1:
self.rec_tree_select_rec()
elif self.srchentry.get_text():
if not self.search_as_you_type:
self.search()
GObject.idle_add(lambda *args: self.limit_search())
else:
self.limit_search() | [
"def",
"search_entry_activate_cb",
"(",
"self",
",",
"*",
"args",
")",
":",
"if",
"self",
".",
"rmodel",
".",
"_get_length_",
"(",
")",
"==",
"1",
":",
"self",
".",
"rec_tree_select_rec",
"(",
")",
"elif",
"self",
".",
"srchentry",
".",
"get_text",
"(",
")",
":",
"if",
"not",
"self",
".",
"search_as_you_type",
":",
"self",
".",
"search",
"(",
")",
"GObject",
".",
"idle_add",
"(",
"lambda",
"*",
"args",
":",
"self",
".",
"limit_search",
"(",
")",
")",
"else",
":",
"self",
".",
"limit_search",
"(",
")"
] | https://github.com/thinkle/gourmet/blob/8af29c8ded24528030e5ae2ea3461f61c1e5a575/gourmet/recindex.py#L220-L228 |
||||
lk-geimfari/mimesis | 36653b49f28719c0a2aa20fef6c6df3911811b32 | mimesis/providers/internet.py | python | Internet.ip_v4_object | (self) | return IPv4Address(
self.random.randint(0, self._MAX_IPV4),
) | Generate random :py:class:`ipaddress.IPv4Address` object.
:return: :py:class:`ipaddress.IPv4Address` object. | Generate random :py:class:`ipaddress.IPv4Address` object. | [
"Generate",
"random",
":",
"py",
":",
"class",
":",
"ipaddress",
".",
"IPv4Address",
"object",
"."
] | def ip_v4_object(self) -> IPv4Address:
"""Generate random :py:class:`ipaddress.IPv4Address` object.
:return: :py:class:`ipaddress.IPv4Address` object.
"""
return IPv4Address(
self.random.randint(0, self._MAX_IPV4),
) | [
"def",
"ip_v4_object",
"(",
"self",
")",
"->",
"IPv4Address",
":",
"return",
"IPv4Address",
"(",
"self",
".",
"random",
".",
"randint",
"(",
"0",
",",
"self",
".",
"_MAX_IPV4",
")",
",",
")"
] | https://github.com/lk-geimfari/mimesis/blob/36653b49f28719c0a2aa20fef6c6df3911811b32/mimesis/providers/internet.py#L94-L101 |
|
jgorset/fandjango | 3c350669a48dd226065690f10802a13b39c0beef | fandjango/models.py | python | User.graph | (self) | return GraphAPI(self.oauth_token.token) | A ``Facepy.GraphAPI`` instance initialized with the user's access token (See `Facepy`_).
.. _Facepy: http://github.com/jgorset/facepy | A ``Facepy.GraphAPI`` instance initialized with the user's access token (See `Facepy`_). | [
"A",
"Facepy",
".",
"GraphAPI",
"instance",
"initialized",
"with",
"the",
"user",
"s",
"access",
"token",
"(",
"See",
"Facepy",
"_",
")",
"."
] | def graph(self):
"""
A ``Facepy.GraphAPI`` instance initialized with the user's access token (See `Facepy`_).
.. _Facepy: http://github.com/jgorset/facepy
"""
return GraphAPI(self.oauth_token.token) | [
"def",
"graph",
"(",
"self",
")",
":",
"return",
"GraphAPI",
"(",
"self",
".",
"oauth_token",
".",
"token",
")"
] | https://github.com/jgorset/fandjango/blob/3c350669a48dd226065690f10802a13b39c0beef/fandjango/models.py#L118-L124 |
|
vlachoudis/bCNC | 67126b4894dabf6579baf47af8d0f9b7de35e6e3 | bCNC/lib/svg_elements.py | python | Viewbox.property_by_object | (self, obj) | [] | def property_by_object(self, obj):
self.x = obj.x
self.y = obj.y
self.width = obj.width
self.height = obj.height
self.preserve_aspect_ratio = obj.preserve_aspect_ratio | [
"def",
"property_by_object",
"(",
"self",
",",
"obj",
")",
":",
"self",
".",
"x",
"=",
"obj",
".",
"x",
"self",
".",
"y",
"=",
"obj",
".",
"y",
"self",
".",
"width",
"=",
"obj",
".",
"width",
"self",
".",
"height",
"=",
"obj",
".",
"height",
"self",
".",
"preserve_aspect_ratio",
"=",
"obj",
".",
"preserve_aspect_ratio"
] | https://github.com/vlachoudis/bCNC/blob/67126b4894dabf6579baf47af8d0f9b7de35e6e3/bCNC/lib/svg_elements.py#L2786-L2791 |
||||
spectacles/CodeComplice | 8ca8ee4236f72b58caa4209d2fbd5fa56bd31d62 | libs/codeintel2/css_linter.py | python | _CSSParser._parse_identifier_list | (self, classifier, separator) | [] | def _parse_identifier_list(self, classifier, separator):
while True:
tok = self._tokenizer.get_next_token()
self._check_tag_tok(tok, 9)
if not self._classifier.is_operator(tok, separator):
self._tokenizer.put_back(tok)
break
tok = self._tokenizer.get_next_token()
if not (classifier(tok) and self._lex_identifier(tok)):
self._add_result("expecting an identifier", tok)
return self._parser_putback_recover(tok) | [
"def",
"_parse_identifier_list",
"(",
"self",
",",
"classifier",
",",
"separator",
")",
":",
"while",
"True",
":",
"tok",
"=",
"self",
".",
"_tokenizer",
".",
"get_next_token",
"(",
")",
"self",
".",
"_check_tag_tok",
"(",
"tok",
",",
"9",
")",
"if",
"not",
"self",
".",
"_classifier",
".",
"is_operator",
"(",
"tok",
",",
"separator",
")",
":",
"self",
".",
"_tokenizer",
".",
"put_back",
"(",
"tok",
")",
"break",
"tok",
"=",
"self",
".",
"_tokenizer",
".",
"get_next_token",
"(",
")",
"if",
"not",
"(",
"classifier",
"(",
"tok",
")",
"and",
"self",
".",
"_lex_identifier",
"(",
"tok",
")",
")",
":",
"self",
".",
"_add_result",
"(",
"\"expecting an identifier\"",
",",
"tok",
")",
"return",
"self",
".",
"_parser_putback_recover",
"(",
"tok",
")"
] | https://github.com/spectacles/CodeComplice/blob/8ca8ee4236f72b58caa4209d2fbd5fa56bd31d62/libs/codeintel2/css_linter.py#L1515-L1525 |
||||
marinho/geraldo | 868ebdce67176d9b6205cddc92476f642c783fff | site/newsite/site-geraldo/django/core/management/__init__.py | python | ManagementUtility.execute | (self) | Given the command-line arguments, this figures out which subcommand is
being run, creates a parser appropriate to that command, and runs it. | Given the command-line arguments, this figures out which subcommand is
being run, creates a parser appropriate to that command, and runs it. | [
"Given",
"the",
"command",
"-",
"line",
"arguments",
"this",
"figures",
"out",
"which",
"subcommand",
"is",
"being",
"run",
"creates",
"a",
"parser",
"appropriate",
"to",
"that",
"command",
"and",
"runs",
"it",
"."
] | def execute(self):
"""
Given the command-line arguments, this figures out which subcommand is
being run, creates a parser appropriate to that command, and runs it.
"""
# Preprocess options to extract --settings and --pythonpath.
# These options could affect the commands that are available, so they
# must be processed early.
parser = LaxOptionParser(usage="%prog subcommand [options] [args]",
version=get_version(),
option_list=BaseCommand.option_list)
try:
options, args = parser.parse_args(self.argv)
handle_default_options(options)
except:
pass # Ignore any option errors at this point.
try:
subcommand = self.argv[1]
except IndexError:
sys.stderr.write("Type '%s help' for usage.\n" % self.prog_name)
sys.exit(1)
if subcommand == 'help':
if len(args) > 2:
self.fetch_command(args[2]).print_help(self.prog_name, args[2])
else:
parser.print_lax_help()
sys.stderr.write(self.main_help_text() + '\n')
sys.exit(1)
# Special-cases: We want 'django-admin.py --version' and
# 'django-admin.py --help' to work, for backwards compatibility.
elif self.argv[1:] == ['--version']:
# LaxOptionParser already takes care of printing the version.
pass
elif self.argv[1:] == ['--help']:
parser.print_lax_help()
sys.stderr.write(self.main_help_text() + '\n')
else:
self.fetch_command(subcommand).run_from_argv(self.argv) | [
"def",
"execute",
"(",
"self",
")",
":",
"# Preprocess options to extract --settings and --pythonpath.",
"# These options could affect the commands that are available, so they",
"# must be processed early.",
"parser",
"=",
"LaxOptionParser",
"(",
"usage",
"=",
"\"%prog subcommand [options] [args]\"",
",",
"version",
"=",
"get_version",
"(",
")",
",",
"option_list",
"=",
"BaseCommand",
".",
"option_list",
")",
"try",
":",
"options",
",",
"args",
"=",
"parser",
".",
"parse_args",
"(",
"self",
".",
"argv",
")",
"handle_default_options",
"(",
"options",
")",
"except",
":",
"pass",
"# Ignore any option errors at this point.",
"try",
":",
"subcommand",
"=",
"self",
".",
"argv",
"[",
"1",
"]",
"except",
"IndexError",
":",
"sys",
".",
"stderr",
".",
"write",
"(",
"\"Type '%s help' for usage.\\n\"",
"%",
"self",
".",
"prog_name",
")",
"sys",
".",
"exit",
"(",
"1",
")",
"if",
"subcommand",
"==",
"'help'",
":",
"if",
"len",
"(",
"args",
")",
">",
"2",
":",
"self",
".",
"fetch_command",
"(",
"args",
"[",
"2",
"]",
")",
".",
"print_help",
"(",
"self",
".",
"prog_name",
",",
"args",
"[",
"2",
"]",
")",
"else",
":",
"parser",
".",
"print_lax_help",
"(",
")",
"sys",
".",
"stderr",
".",
"write",
"(",
"self",
".",
"main_help_text",
"(",
")",
"+",
"'\\n'",
")",
"sys",
".",
"exit",
"(",
"1",
")",
"# Special-cases: We want 'django-admin.py --version' and",
"# 'django-admin.py --help' to work, for backwards compatibility.",
"elif",
"self",
".",
"argv",
"[",
"1",
":",
"]",
"==",
"[",
"'--version'",
"]",
":",
"# LaxOptionParser already takes care of printing the version.",
"pass",
"elif",
"self",
".",
"argv",
"[",
"1",
":",
"]",
"==",
"[",
"'--help'",
"]",
":",
"parser",
".",
"print_lax_help",
"(",
")",
"sys",
".",
"stderr",
".",
"write",
"(",
"self",
".",
"main_help_text",
"(",
")",
"+",
"'\\n'",
")",
"else",
":",
"self",
".",
"fetch_command",
"(",
"subcommand",
")",
".",
"run_from_argv",
"(",
"self",
".",
"argv",
")"
] | https://github.com/marinho/geraldo/blob/868ebdce67176d9b6205cddc92476f642c783fff/site/newsite/site-geraldo/django/core/management/__init__.py#L256-L295 |
||
munki/munki | 4b778f0e5a73ed3df9eb62d93c5227efb29eebe3 | code/client/munkilib/keychain.py | python | make_client_keychain | (cert_info=None) | Builds a client cert keychain from existing client certs | Builds a client cert keychain from existing client certs | [
"Builds",
"a",
"client",
"cert",
"keychain",
"from",
"existing",
"client",
"certs"
] | def make_client_keychain(cert_info=None):
'''Builds a client cert keychain from existing client certs'''
if not cert_info:
# just grab data from Munki's preferences/defaults
cert_info = get_munki_client_cert_info()
client_cert_path = cert_info['client_cert_path']
client_key_path = cert_info['client_key_path']
site_urls = cert_info['site_urls']
if not client_cert_path:
# no client, so nothing to do
display.display_debug1(
'No client cert info provided, '
'so no client keychain will be created.')
return
else:
display.display_debug1('Client cert path: %s', client_cert_path)
display.display_debug1('Client key path: %s', client_key_path)
# to do some of the following options correctly, we need to be root
# and have root's home.
# check to see if we're root
if os.geteuid() != 0:
display.display_error(
'Can\'t make our client keychain unless we are root!')
return
# switch HOME if needed to root's home
original_home = os.environ.get('HOME')
if original_home:
os.environ['HOME'] = os.path.expanduser('~root')
keychain_pass = (
prefs.pref('KeychainPassword') or DEFAULT_KEYCHAIN_PASSWORD)
abs_keychain_path = get_keychain_path()
if os.path.exists(abs_keychain_path):
os.unlink(abs_keychain_path)
if not os.path.exists(os.path.dirname(abs_keychain_path)):
os.makedirs(os.path.dirname(abs_keychain_path), 0o700)
# create a new keychain
display.display_debug1('Creating client keychain...')
try:
output = security('create-keychain',
'-p', keychain_pass, abs_keychain_path)
if output:
display.display_debug2(output)
except SecurityError as err:
display.display_error(
'Could not create keychain %s: %s', abs_keychain_path, err)
if original_home:
# switch it back
os.environ['HOME'] = original_home
return
# Ensure the keychain is in the search path and unlocked
added_keychain = add_to_keychain_list(abs_keychain_path)
unlock_and_set_nonlocking(abs_keychain_path)
# Add client cert (and optionally key)
client_cert_file = None
combined_pem = None
if client_key_path:
# combine client cert and private key before we import
cert_data = read_file(client_cert_path)
key_data = read_file(client_key_path)
# write the combined data
combined_pem = os.path.join(osutils.tmpdir(), 'combined.pem')
if write_file(cert_data + key_data, combined_pem):
client_cert_file = combined_pem
else:
display.display_error(
'Could not combine client cert and key for import!')
else:
client_cert_file = client_cert_path
if client_cert_file:
# client_cert_file is combined_pem or client_cert_file
display.display_debug2('Importing client cert and key...')
try:
output = security(
'import', client_cert_file, '-A', '-k', abs_keychain_path)
if output:
display.display_debug2(output)
except SecurityError as err:
display.display_error(
'Could not import %s: %s', client_cert_file, err)
if combined_pem:
# we created this; we should clean it up
try:
os.unlink(combined_pem)
except (OSError, IOError):
pass
id_hash = pem_cert_sha1_digest(client_cert_path)
if not id_hash:
display.display_error(
'Cannot create keychain identity preference.')
else:
# set up identity preference(s) linking the identity (cert and key)
# to the various urls
display.display_debug1('Creating identity preferences...')
try:
output = security('default-keychain')
if output:
display.display_debug2('Default keychain is %s', output)
# One is defined, remember the path
default_keychain = [
x.strip().strip('"')
for x in output.split('\n') if x.strip()][0]
except SecurityError as err:
# error raised if there is no default
default_keychain = None
# Temporarily assign the default keychain to ours
try:
output = security(
'default-keychain', '-s', abs_keychain_path)
if output:
display.display_debug2(
'Attempting to set default keychain to %s resulted in: %s',
abs_keychain_path, output)
except SecurityError as err:
display.display_error(
'Could not set default keychain to %s failed: %s'
% (abs_keychain_path, err))
default_keychain = None
# Create the identity preferences
for url in site_urls:
try:
display.display_debug2(
'Adding identity preference for %s...' % url)
output = security(
'set-identity-preference',
'-s', url, '-Z', id_hash, abs_keychain_path)
if output:
display.display_debug2(
'security set-identity-preference output: ' + output)
except SecurityError as err:
display.display_error(
'Setting identity preference for %s failed: %s'
% (url, err))
if default_keychain:
# We originally had a different default, set it back
output = security(
'default-keychain', '-s', default_keychain)
if output:
display.display_debug2(
'Attempting to set default keychain to %s resulted in: %s',
default_keychain, output)
# we're done, clean up.
if added_keychain:
remove_from_keychain_list(abs_keychain_path)
if original_home:
# switch it back
os.environ['HOME'] = original_home
display.display_info(
'Completed creation of client keychain at %s' % abs_keychain_path) | [
"def",
"make_client_keychain",
"(",
"cert_info",
"=",
"None",
")",
":",
"if",
"not",
"cert_info",
":",
"# just grab data from Munki's preferences/defaults",
"cert_info",
"=",
"get_munki_client_cert_info",
"(",
")",
"client_cert_path",
"=",
"cert_info",
"[",
"'client_cert_path'",
"]",
"client_key_path",
"=",
"cert_info",
"[",
"'client_key_path'",
"]",
"site_urls",
"=",
"cert_info",
"[",
"'site_urls'",
"]",
"if",
"not",
"client_cert_path",
":",
"# no client, so nothing to do",
"display",
".",
"display_debug1",
"(",
"'No client cert info provided, '",
"'so no client keychain will be created.'",
")",
"return",
"else",
":",
"display",
".",
"display_debug1",
"(",
"'Client cert path: %s'",
",",
"client_cert_path",
")",
"display",
".",
"display_debug1",
"(",
"'Client key path: %s'",
",",
"client_key_path",
")",
"# to do some of the following options correctly, we need to be root",
"# and have root's home.",
"# check to see if we're root",
"if",
"os",
".",
"geteuid",
"(",
")",
"!=",
"0",
":",
"display",
".",
"display_error",
"(",
"'Can\\'t make our client keychain unless we are root!'",
")",
"return",
"# switch HOME if needed to root's home",
"original_home",
"=",
"os",
".",
"environ",
".",
"get",
"(",
"'HOME'",
")",
"if",
"original_home",
":",
"os",
".",
"environ",
"[",
"'HOME'",
"]",
"=",
"os",
".",
"path",
".",
"expanduser",
"(",
"'~root'",
")",
"keychain_pass",
"=",
"(",
"prefs",
".",
"pref",
"(",
"'KeychainPassword'",
")",
"or",
"DEFAULT_KEYCHAIN_PASSWORD",
")",
"abs_keychain_path",
"=",
"get_keychain_path",
"(",
")",
"if",
"os",
".",
"path",
".",
"exists",
"(",
"abs_keychain_path",
")",
":",
"os",
".",
"unlink",
"(",
"abs_keychain_path",
")",
"if",
"not",
"os",
".",
"path",
".",
"exists",
"(",
"os",
".",
"path",
".",
"dirname",
"(",
"abs_keychain_path",
")",
")",
":",
"os",
".",
"makedirs",
"(",
"os",
".",
"path",
".",
"dirname",
"(",
"abs_keychain_path",
")",
",",
"0o700",
")",
"# create a new keychain",
"display",
".",
"display_debug1",
"(",
"'Creating client keychain...'",
")",
"try",
":",
"output",
"=",
"security",
"(",
"'create-keychain'",
",",
"'-p'",
",",
"keychain_pass",
",",
"abs_keychain_path",
")",
"if",
"output",
":",
"display",
".",
"display_debug2",
"(",
"output",
")",
"except",
"SecurityError",
"as",
"err",
":",
"display",
".",
"display_error",
"(",
"'Could not create keychain %s: %s'",
",",
"abs_keychain_path",
",",
"err",
")",
"if",
"original_home",
":",
"# switch it back",
"os",
".",
"environ",
"[",
"'HOME'",
"]",
"=",
"original_home",
"return",
"# Ensure the keychain is in the search path and unlocked",
"added_keychain",
"=",
"add_to_keychain_list",
"(",
"abs_keychain_path",
")",
"unlock_and_set_nonlocking",
"(",
"abs_keychain_path",
")",
"# Add client cert (and optionally key)",
"client_cert_file",
"=",
"None",
"combined_pem",
"=",
"None",
"if",
"client_key_path",
":",
"# combine client cert and private key before we import",
"cert_data",
"=",
"read_file",
"(",
"client_cert_path",
")",
"key_data",
"=",
"read_file",
"(",
"client_key_path",
")",
"# write the combined data",
"combined_pem",
"=",
"os",
".",
"path",
".",
"join",
"(",
"osutils",
".",
"tmpdir",
"(",
")",
",",
"'combined.pem'",
")",
"if",
"write_file",
"(",
"cert_data",
"+",
"key_data",
",",
"combined_pem",
")",
":",
"client_cert_file",
"=",
"combined_pem",
"else",
":",
"display",
".",
"display_error",
"(",
"'Could not combine client cert and key for import!'",
")",
"else",
":",
"client_cert_file",
"=",
"client_cert_path",
"if",
"client_cert_file",
":",
"# client_cert_file is combined_pem or client_cert_file",
"display",
".",
"display_debug2",
"(",
"'Importing client cert and key...'",
")",
"try",
":",
"output",
"=",
"security",
"(",
"'import'",
",",
"client_cert_file",
",",
"'-A'",
",",
"'-k'",
",",
"abs_keychain_path",
")",
"if",
"output",
":",
"display",
".",
"display_debug2",
"(",
"output",
")",
"except",
"SecurityError",
"as",
"err",
":",
"display",
".",
"display_error",
"(",
"'Could not import %s: %s'",
",",
"client_cert_file",
",",
"err",
")",
"if",
"combined_pem",
":",
"# we created this; we should clean it up",
"try",
":",
"os",
".",
"unlink",
"(",
"combined_pem",
")",
"except",
"(",
"OSError",
",",
"IOError",
")",
":",
"pass",
"id_hash",
"=",
"pem_cert_sha1_digest",
"(",
"client_cert_path",
")",
"if",
"not",
"id_hash",
":",
"display",
".",
"display_error",
"(",
"'Cannot create keychain identity preference.'",
")",
"else",
":",
"# set up identity preference(s) linking the identity (cert and key)",
"# to the various urls",
"display",
".",
"display_debug1",
"(",
"'Creating identity preferences...'",
")",
"try",
":",
"output",
"=",
"security",
"(",
"'default-keychain'",
")",
"if",
"output",
":",
"display",
".",
"display_debug2",
"(",
"'Default keychain is %s'",
",",
"output",
")",
"# One is defined, remember the path",
"default_keychain",
"=",
"[",
"x",
".",
"strip",
"(",
")",
".",
"strip",
"(",
"'\"'",
")",
"for",
"x",
"in",
"output",
".",
"split",
"(",
"'\\n'",
")",
"if",
"x",
".",
"strip",
"(",
")",
"]",
"[",
"0",
"]",
"except",
"SecurityError",
"as",
"err",
":",
"# error raised if there is no default",
"default_keychain",
"=",
"None",
"# Temporarily assign the default keychain to ours",
"try",
":",
"output",
"=",
"security",
"(",
"'default-keychain'",
",",
"'-s'",
",",
"abs_keychain_path",
")",
"if",
"output",
":",
"display",
".",
"display_debug2",
"(",
"'Attempting to set default keychain to %s resulted in: %s'",
",",
"abs_keychain_path",
",",
"output",
")",
"except",
"SecurityError",
"as",
"err",
":",
"display",
".",
"display_error",
"(",
"'Could not set default keychain to %s failed: %s'",
"%",
"(",
"abs_keychain_path",
",",
"err",
")",
")",
"default_keychain",
"=",
"None",
"# Create the identity preferences",
"for",
"url",
"in",
"site_urls",
":",
"try",
":",
"display",
".",
"display_debug2",
"(",
"'Adding identity preference for %s...'",
"%",
"url",
")",
"output",
"=",
"security",
"(",
"'set-identity-preference'",
",",
"'-s'",
",",
"url",
",",
"'-Z'",
",",
"id_hash",
",",
"abs_keychain_path",
")",
"if",
"output",
":",
"display",
".",
"display_debug2",
"(",
"'security set-identity-preference output: '",
"+",
"output",
")",
"except",
"SecurityError",
"as",
"err",
":",
"display",
".",
"display_error",
"(",
"'Setting identity preference for %s failed: %s'",
"%",
"(",
"url",
",",
"err",
")",
")",
"if",
"default_keychain",
":",
"# We originally had a different default, set it back",
"output",
"=",
"security",
"(",
"'default-keychain'",
",",
"'-s'",
",",
"default_keychain",
")",
"if",
"output",
":",
"display",
".",
"display_debug2",
"(",
"'Attempting to set default keychain to %s resulted in: %s'",
",",
"default_keychain",
",",
"output",
")",
"# we're done, clean up.",
"if",
"added_keychain",
":",
"remove_from_keychain_list",
"(",
"abs_keychain_path",
")",
"if",
"original_home",
":",
"# switch it back",
"os",
".",
"environ",
"[",
"'HOME'",
"]",
"=",
"original_home",
"display",
".",
"display_info",
"(",
"'Completed creation of client keychain at %s'",
"%",
"abs_keychain_path",
")"
] | https://github.com/munki/munki/blob/4b778f0e5a73ed3df9eb62d93c5227efb29eebe3/code/client/munkilib/keychain.py#L215-L371 |
||
pulp/pulp | a0a28d804f997b6f81c391378aff2e4c90183df9 | server/pulp/server/managers/content/orphan.py | python | OrphanManager.generate_orphans_by_type_with_unit_keys | (content_type_id) | Return an generator of all orphaned content units of the given content type.
Each content unit will contain the fields specified in the content type
definition's search indexes.
:param content_type_id: id of the content type
:type content_type_id: basestring
:return: generator of orphaned content units for the given content type
:rtype: generator | Return an generator of all orphaned content units of the given content type. | [
"Return",
"an",
"generator",
"of",
"all",
"orphaned",
"content",
"units",
"of",
"the",
"given",
"content",
"type",
"."
] | def generate_orphans_by_type_with_unit_keys(content_type_id):
"""
Return an generator of all orphaned content units of the given content type.
Each content unit will contain the fields specified in the content type
definition's search indexes.
:param content_type_id: id of the content type
:type content_type_id: basestring
:return: generator of orphaned content units for the given content type
:rtype: generator
"""
try:
unit_key_fields = units_controller.get_unit_key_fields_for_type(content_type_id)
except ValueError:
raise MissingResource(content_type_id=content_type_id)
fields = ['_id', '_content_type_id']
fields.extend(unit_key_fields)
for content_unit in OrphanManager.generate_orphans_by_type(content_type_id, fields):
yield content_unit | [
"def",
"generate_orphans_by_type_with_unit_keys",
"(",
"content_type_id",
")",
":",
"try",
":",
"unit_key_fields",
"=",
"units_controller",
".",
"get_unit_key_fields_for_type",
"(",
"content_type_id",
")",
"except",
"ValueError",
":",
"raise",
"MissingResource",
"(",
"content_type_id",
"=",
"content_type_id",
")",
"fields",
"=",
"[",
"'_id'",
",",
"'_content_type_id'",
"]",
"fields",
".",
"extend",
"(",
"unit_key_fields",
")",
"for",
"content_unit",
"in",
"OrphanManager",
".",
"generate_orphans_by_type",
"(",
"content_type_id",
",",
"fields",
")",
":",
"yield",
"content_unit"
] | https://github.com/pulp/pulp/blob/a0a28d804f997b6f81c391378aff2e4c90183df9/server/pulp/server/managers/content/orphan.py#L117-L137 |
||
andresriancho/w3af | cd22e5252243a87aaa6d0ddea47cf58dacfe00a9 | w3af/plugins/grep/form_autocomplete.py | python | form_autocomplete.grep | (self, request, response) | Plugin entry point, test existence of HTML auto-completable forms
containing password-type inputs. Either form's <autocomplete> attribute
is not present or is 'off'.
:param request: The HTTP request object.
:param response: The HTTP response object
:return: None, all results are saved in the kb. | Plugin entry point, test existence of HTML auto-completable forms
containing password-type inputs. Either form's <autocomplete> attribute
is not present or is 'off'. | [
"Plugin",
"entry",
"point",
"test",
"existence",
"of",
"HTML",
"auto",
"-",
"completable",
"forms",
"containing",
"password",
"-",
"type",
"inputs",
".",
"Either",
"form",
"s",
"<autocomplete",
">",
"attribute",
"is",
"not",
"present",
"or",
"is",
"off",
"."
] | def grep(self, request, response):
"""
Plugin entry point, test existence of HTML auto-completable forms
containing password-type inputs. Either form's <autocomplete> attribute
is not present or is 'off'.
:param request: The HTTP request object.
:param response: The HTTP response object
:return: None, all results are saved in the kb.
"""
if not response.is_text_or_html():
return
try:
doc_parser = parser_cache.dpc.get_document_parser_for(response)
except BaseFrameworkException:
return
for form in doc_parser.get_forms():
# Only analyze forms which have autocomplete enabled at <form>
if form.get_autocomplete() is False:
continue
for form_field_list in form.meta.itervalues():
for form_field in form_field_list:
if form_field.input_type != INPUT_TYPE_PASSWD:
continue
if not form_field.autocomplete:
continue
url = response.get_url()
desc = ('The URL: "%s" has a "<form>" element with '
'auto-complete enabled.')
desc %= url
i = Info('Auto-completable form', desc, response.id,
self.get_name())
i.add_to_highlight('autocomplete')
i.set_url(url)
i[AutoCompleteInfoSet.ITAG] = form.get_action().uri2url()
self.kb_append_uniq_group(self, 'form_autocomplete', i,
group_klass=AutoCompleteInfoSet)
break | [
"def",
"grep",
"(",
"self",
",",
"request",
",",
"response",
")",
":",
"if",
"not",
"response",
".",
"is_text_or_html",
"(",
")",
":",
"return",
"try",
":",
"doc_parser",
"=",
"parser_cache",
".",
"dpc",
".",
"get_document_parser_for",
"(",
"response",
")",
"except",
"BaseFrameworkException",
":",
"return",
"for",
"form",
"in",
"doc_parser",
".",
"get_forms",
"(",
")",
":",
"# Only analyze forms which have autocomplete enabled at <form>",
"if",
"form",
".",
"get_autocomplete",
"(",
")",
"is",
"False",
":",
"continue",
"for",
"form_field_list",
"in",
"form",
".",
"meta",
".",
"itervalues",
"(",
")",
":",
"for",
"form_field",
"in",
"form_field_list",
":",
"if",
"form_field",
".",
"input_type",
"!=",
"INPUT_TYPE_PASSWD",
":",
"continue",
"if",
"not",
"form_field",
".",
"autocomplete",
":",
"continue",
"url",
"=",
"response",
".",
"get_url",
"(",
")",
"desc",
"=",
"(",
"'The URL: \"%s\" has a \"<form>\" element with '",
"'auto-complete enabled.'",
")",
"desc",
"%=",
"url",
"i",
"=",
"Info",
"(",
"'Auto-completable form'",
",",
"desc",
",",
"response",
".",
"id",
",",
"self",
".",
"get_name",
"(",
")",
")",
"i",
".",
"add_to_highlight",
"(",
"'autocomplete'",
")",
"i",
".",
"set_url",
"(",
"url",
")",
"i",
"[",
"AutoCompleteInfoSet",
".",
"ITAG",
"]",
"=",
"form",
".",
"get_action",
"(",
")",
".",
"uri2url",
"(",
")",
"self",
".",
"kb_append_uniq_group",
"(",
"self",
",",
"'form_autocomplete'",
",",
"i",
",",
"group_klass",
"=",
"AutoCompleteInfoSet",
")",
"break"
] | https://github.com/andresriancho/w3af/blob/cd22e5252243a87aaa6d0ddea47cf58dacfe00a9/w3af/plugins/grep/form_autocomplete.py#L39-L84 |
||
aboul3la/Sublist3r | 729d649ec5370730172bf6f5314aafd68c874124 | subbrute/subbrute.py | python | trace | (*args, **kwargs) | [] | def trace(*args, **kwargs):
if verbose:
for a in args:
sys.stderr.write(str(a))
sys.stderr.write(" ")
sys.stderr.write("\n") | [
"def",
"trace",
"(",
"*",
"args",
",",
"*",
"*",
"kwargs",
")",
":",
"if",
"verbose",
":",
"for",
"a",
"in",
"args",
":",
"sys",
".",
"stderr",
".",
"write",
"(",
"str",
"(",
"a",
")",
")",
"sys",
".",
"stderr",
".",
"write",
"(",
"\" \"",
")",
"sys",
".",
"stderr",
".",
"write",
"(",
"\"\\n\"",
")"
] | https://github.com/aboul3la/Sublist3r/blob/729d649ec5370730172bf6f5314aafd68c874124/subbrute/subbrute.py#L516-L521 |
||||
mpenning/ciscoconfparse | a6a176e6ceac7c5f3e974272fa70273476ba84a3 | ciscoconfparse/models_cisco.py | python | BaseIOSIntfLine._build_abbvs | (self) | return retval | r"""Build a set of valid abbreviations (lowercased) for the interface | r"""Build a set of valid abbreviations (lowercased) for the interface | [
"r",
"Build",
"a",
"set",
"of",
"valid",
"abbreviations",
"(",
"lowercased",
")",
"for",
"the",
"interface"
] | def _build_abbvs(self):
r"""Build a set of valid abbreviations (lowercased) for the interface"""
retval = set([])
port_type_chars = self.port_type.lower()
subinterface_number = self.subinterface_number
for sep in ["", " "]:
for ii in range(1, len(port_type_chars) + 1):
retval.add(
"{0}{1}{2}".format(port_type_chars[0:ii], sep, subinterface_number)
)
return retval | [
"def",
"_build_abbvs",
"(",
"self",
")",
":",
"retval",
"=",
"set",
"(",
"[",
"]",
")",
"port_type_chars",
"=",
"self",
".",
"port_type",
".",
"lower",
"(",
")",
"subinterface_number",
"=",
"self",
".",
"subinterface_number",
"for",
"sep",
"in",
"[",
"\"\"",
",",
"\" \"",
"]",
":",
"for",
"ii",
"in",
"range",
"(",
"1",
",",
"len",
"(",
"port_type_chars",
")",
"+",
"1",
")",
":",
"retval",
".",
"add",
"(",
"\"{0}{1}{2}\"",
".",
"format",
"(",
"port_type_chars",
"[",
"0",
":",
"ii",
"]",
",",
"sep",
",",
"subinterface_number",
")",
")",
"return",
"retval"
] | https://github.com/mpenning/ciscoconfparse/blob/a6a176e6ceac7c5f3e974272fa70273476ba84a3/ciscoconfparse/models_cisco.py#L383-L393 |
|
architv/soccer-cli | 04932b677eed1fef032eaccffebabfcaeec2d48b | soccer/writers.py | python | Csv.league_scores | (self, total_data, time, show_upcoming, use_12_hour_format) | Store output of fixtures based on league and time to a CSV file | Store output of fixtures based on league and time to a CSV file | [
"Store",
"output",
"of",
"fixtures",
"based",
"on",
"league",
"and",
"time",
"to",
"a",
"CSV",
"file"
] | def league_scores(self, total_data, time, show_upcoming, use_12_hour_format):
"""Store output of fixtures based on league and time to a CSV file"""
headers = ['League', 'Home Team Name', 'Home Team Goals',
'Away Team Goals', 'Away Team Name']
result = [headers]
league = total_data['competition']['name']
result.extend([league,
score['homeTeam']['name'],
score['score']['fullTime']['homeTeam'],
score['score']['fullTime']['awayTeam'],
score['awayTeam']['name']]
for score in total_data['matches'])
self.generate_output(result) | [
"def",
"league_scores",
"(",
"self",
",",
"total_data",
",",
"time",
",",
"show_upcoming",
",",
"use_12_hour_format",
")",
":",
"headers",
"=",
"[",
"'League'",
",",
"'Home Team Name'",
",",
"'Home Team Goals'",
",",
"'Away Team Goals'",
",",
"'Away Team Name'",
"]",
"result",
"=",
"[",
"headers",
"]",
"league",
"=",
"total_data",
"[",
"'competition'",
"]",
"[",
"'name'",
"]",
"result",
".",
"extend",
"(",
"[",
"league",
",",
"score",
"[",
"'homeTeam'",
"]",
"[",
"'name'",
"]",
",",
"score",
"[",
"'score'",
"]",
"[",
"'fullTime'",
"]",
"[",
"'homeTeam'",
"]",
",",
"score",
"[",
"'score'",
"]",
"[",
"'fullTime'",
"]",
"[",
"'awayTeam'",
"]",
",",
"score",
"[",
"'awayTeam'",
"]",
"[",
"'name'",
"]",
"]",
"for",
"score",
"in",
"total_data",
"[",
"'matches'",
"]",
")",
"self",
".",
"generate_output",
"(",
"result",
")"
] | https://github.com/architv/soccer-cli/blob/04932b677eed1fef032eaccffebabfcaeec2d48b/soccer/writers.py#L273-L285 |
||
roclark/sportsipy | c19f545d3376d62ded6304b137dc69238ac620a9 | sportsipy/mlb/roster.py | python | Roster._get_id | (self, player) | return re.sub(r'\.shtml.*', '', name) | Parse the player ID.
Given a PyQuery object representing a single player on the team roster,
parse the player ID and return it as a string.
Parameters
----------
player : PyQuery object
A PyQuery object representing the player information from the
roster table.
Returns
-------
string
Returns a string of the player ID. | Parse the player ID. | [
"Parse",
"the",
"player",
"ID",
"."
] | def _get_id(self, player):
"""
Parse the player ID.
Given a PyQuery object representing a single player on the team roster,
parse the player ID and return it as a string.
Parameters
----------
player : PyQuery object
A PyQuery object representing the player information from the
roster table.
Returns
-------
string
Returns a string of the player ID.
"""
name_tag = player('td[data-stat="player"] a')
name = re.sub(r'.*/players/./', '', str(name_tag))
return re.sub(r'\.shtml.*', '', name) | [
"def",
"_get_id",
"(",
"self",
",",
"player",
")",
":",
"name_tag",
"=",
"player",
"(",
"'td[data-stat=\"player\"] a'",
")",
"name",
"=",
"re",
".",
"sub",
"(",
"r'.*/players/./'",
",",
"''",
",",
"str",
"(",
"name_tag",
")",
")",
"return",
"re",
".",
"sub",
"(",
"r'\\.shtml.*'",
",",
"''",
",",
"name",
")"
] | https://github.com/roclark/sportsipy/blob/c19f545d3376d62ded6304b137dc69238ac620a9/sportsipy/mlb/roster.py#L1516-L1536 |
|
pawamoy/aria2p | 2855c6a9a38e36278671258439f6caf59c39cfc3 | docs/gen_credits.py | python | get_credits_data | () | return {
"project_name": project_name,
"direct_dependencies": sorted(direct_dependencies),
"indirect_dependencies": sorted(indirect_dependencies),
"more_credits": "http://pawamoy.github.io/credits/",
} | Return data used to generate the credits file.
Returns:
Data required to render the credits template. | Return data used to generate the credits file. | [
"Return",
"data",
"used",
"to",
"generate",
"the",
"credits",
"file",
"."
] | def get_credits_data() -> dict:
"""Return data used to generate the credits file.
Returns:
Data required to render the credits template.
"""
project_dir = Path(__file__).parent.parent
metadata = toml.load(project_dir / "pyproject.toml")["project"]
metadata_pdm = toml.load(project_dir / "pyproject.toml")["tool"]["pdm"]
lock_data = toml.load(project_dir / "pdm.lock")
project_name = metadata["name"]
all_dependencies = chain(
metadata.get("dependencies", []),
chain(*metadata.get("optional-dependencies", {}).values()),
chain(*metadata_pdm.get("dev-dependencies", {}).values()),
)
direct_dependencies = {re.sub(r"[^\w-].*$", "", dep) for dep in all_dependencies}
direct_dependencies = {dep.lower() for dep in direct_dependencies}
indirect_dependencies = {pkg["name"].lower() for pkg in lock_data["package"]}
indirect_dependencies -= direct_dependencies
return {
"project_name": project_name,
"direct_dependencies": sorted(direct_dependencies),
"indirect_dependencies": sorted(indirect_dependencies),
"more_credits": "http://pawamoy.github.io/credits/",
} | [
"def",
"get_credits_data",
"(",
")",
"->",
"dict",
":",
"project_dir",
"=",
"Path",
"(",
"__file__",
")",
".",
"parent",
".",
"parent",
"metadata",
"=",
"toml",
".",
"load",
"(",
"project_dir",
"/",
"\"pyproject.toml\"",
")",
"[",
"\"project\"",
"]",
"metadata_pdm",
"=",
"toml",
".",
"load",
"(",
"project_dir",
"/",
"\"pyproject.toml\"",
")",
"[",
"\"tool\"",
"]",
"[",
"\"pdm\"",
"]",
"lock_data",
"=",
"toml",
".",
"load",
"(",
"project_dir",
"/",
"\"pdm.lock\"",
")",
"project_name",
"=",
"metadata",
"[",
"\"name\"",
"]",
"all_dependencies",
"=",
"chain",
"(",
"metadata",
".",
"get",
"(",
"\"dependencies\"",
",",
"[",
"]",
")",
",",
"chain",
"(",
"*",
"metadata",
".",
"get",
"(",
"\"optional-dependencies\"",
",",
"{",
"}",
")",
".",
"values",
"(",
")",
")",
",",
"chain",
"(",
"*",
"metadata_pdm",
".",
"get",
"(",
"\"dev-dependencies\"",
",",
"{",
"}",
")",
".",
"values",
"(",
")",
")",
",",
")",
"direct_dependencies",
"=",
"{",
"re",
".",
"sub",
"(",
"r\"[^\\w-].*$\"",
",",
"\"\"",
",",
"dep",
")",
"for",
"dep",
"in",
"all_dependencies",
"}",
"direct_dependencies",
"=",
"{",
"dep",
".",
"lower",
"(",
")",
"for",
"dep",
"in",
"direct_dependencies",
"}",
"indirect_dependencies",
"=",
"{",
"pkg",
"[",
"\"name\"",
"]",
".",
"lower",
"(",
")",
"for",
"pkg",
"in",
"lock_data",
"[",
"\"package\"",
"]",
"}",
"indirect_dependencies",
"-=",
"direct_dependencies",
"return",
"{",
"\"project_name\"",
":",
"project_name",
",",
"\"direct_dependencies\"",
":",
"sorted",
"(",
"direct_dependencies",
")",
",",
"\"indirect_dependencies\"",
":",
"sorted",
"(",
"indirect_dependencies",
")",
",",
"\"more_credits\"",
":",
"\"http://pawamoy.github.io/credits/\"",
",",
"}"
] | https://github.com/pawamoy/aria2p/blob/2855c6a9a38e36278671258439f6caf59c39cfc3/docs/gen_credits.py#L15-L42 |
|
m-labs/artiq | eaa1505c947c7987cdbd31c24056823c740e84e0 | artiq/coredevice/ttl.py | python | TTLInOut.input | (self) | Set the direction to input at the current position of the time
cursor.
There must be a delay of at least one RTIO clock cycle before any
other command can be issued.
This method only configures the direction at the FPGA. When using
buffered I/O interfaces, such as the Sinara TTL cards, the buffer
direction must be configured separately in the hardware. | Set the direction to input at the current position of the time
cursor. | [
"Set",
"the",
"direction",
"to",
"input",
"at",
"the",
"current",
"position",
"of",
"the",
"time",
"cursor",
"."
] | def input(self):
"""Set the direction to input at the current position of the time
cursor.
There must be a delay of at least one RTIO clock cycle before any
other command can be issued.
This method only configures the direction at the FPGA. When using
buffered I/O interfaces, such as the Sinara TTL cards, the buffer
direction must be configured separately in the hardware."""
self.set_oe(False) | [
"def",
"input",
"(",
"self",
")",
":",
"self",
".",
"set_oe",
"(",
"False",
")"
] | https://github.com/m-labs/artiq/blob/eaa1505c947c7987cdbd31c24056823c740e84e0/artiq/coredevice/ttl.py#L149-L159 |
||
maartenbreddels/ipyvolume | 71eded3085ed5f00a961c90db8b84fde13a6dee3 | ipyvolume/pylab.py | python | xyzlabel | (labelx, labely, labelz) | Set all labels at once. | Set all labels at once. | [
"Set",
"all",
"labels",
"at",
"once",
"."
] | def xyzlabel(labelx, labely, labelz):
"""Set all labels at once."""
xlabel(labelx)
ylabel(labely)
zlabel(labelz) | [
"def",
"xyzlabel",
"(",
"labelx",
",",
"labely",
",",
"labelz",
")",
":",
"xlabel",
"(",
"labelx",
")",
"ylabel",
"(",
"labely",
")",
"zlabel",
"(",
"labelz",
")"
] | https://github.com/maartenbreddels/ipyvolume/blob/71eded3085ed5f00a961c90db8b84fde13a6dee3/ipyvolume/pylab.py#L1372-L1376 |
||
ethereum/trinity | 6383280c5044feb06695ac2f7bc1100b7bcf4fe0 | trinity/sync/full/chain.py | python | FastChainSyncer.is_complete | (self) | return self._body_syncer.is_complete | [] | def is_complete(self) -> bool:
return self._body_syncer.is_complete | [
"def",
"is_complete",
"(",
"self",
")",
"->",
"bool",
":",
"return",
"self",
".",
"_body_syncer",
".",
"is_complete"
] | https://github.com/ethereum/trinity/blob/6383280c5044feb06695ac2f7bc1100b7bcf4fe0/trinity/sync/full/chain.py#L492-L493 |
|||
dylandjian/SuperGo | 27da9db1598f2a13004067d853a41cbe83cb2016 | models/mcts.py | python | Node.expand | (self, probas) | Create a child node for every non-zero move probability | Create a child node for every non-zero move probability | [
"Create",
"a",
"child",
"node",
"for",
"every",
"non",
"-",
"zero",
"move",
"probability"
] | def expand(self, probas):
""" Create a child node for every non-zero move probability """
self.childrens = [Node(parent=self, move=idx, proba=probas[idx]) \
for idx in range(probas.shape[0]) if probas[idx] > 0] | [
"def",
"expand",
"(",
"self",
",",
"probas",
")",
":",
"self",
".",
"childrens",
"=",
"[",
"Node",
"(",
"parent",
"=",
"self",
",",
"move",
"=",
"idx",
",",
"proba",
"=",
"probas",
"[",
"idx",
"]",
")",
"for",
"idx",
"in",
"range",
"(",
"probas",
".",
"shape",
"[",
"0",
"]",
")",
"if",
"probas",
"[",
"idx",
"]",
">",
"0",
"]"
] | https://github.com/dylandjian/SuperGo/blob/27da9db1598f2a13004067d853a41cbe83cb2016/models/mcts.py#L73-L77 |
||
KristianOellegaard/django-hvad | b4fb1ff3674bd8309530ed5dcb95e9c3afd53a10 | hvad/manager.py | python | TranslationAwareQueryset.values | (self, *fields) | return self._filter_extra(extra_filters).values(*fields) | [] | def values(self, *fields):
fields, extra_filters = self._translate_fieldnames(fields)
return self._filter_extra(extra_filters).values(*fields) | [
"def",
"values",
"(",
"self",
",",
"*",
"fields",
")",
":",
"fields",
",",
"extra_filters",
"=",
"self",
".",
"_translate_fieldnames",
"(",
"fields",
")",
"return",
"self",
".",
"_filter_extra",
"(",
"extra_filters",
")",
".",
"values",
"(",
"*",
"fields",
")"
] | https://github.com/KristianOellegaard/django-hvad/blob/b4fb1ff3674bd8309530ed5dcb95e9c3afd53a10/hvad/manager.py#L942-L944 |
|||
bendmorris/static-python | 2e0f8c4d7ed5b359dc7d8a75b6fb37e6b6c5c473 | Lib/lib2to3/pgen2/tokenize.py | python | _get_normal_name | (orig_enc) | return orig_enc | Imitates get_normal_name in tokenizer.c. | Imitates get_normal_name in tokenizer.c. | [
"Imitates",
"get_normal_name",
"in",
"tokenizer",
".",
"c",
"."
] | def _get_normal_name(orig_enc):
"""Imitates get_normal_name in tokenizer.c."""
# Only care about the first 12 characters.
enc = orig_enc[:12].lower().replace("_", "-")
if enc == "utf-8" or enc.startswith("utf-8-"):
return "utf-8"
if enc in ("latin-1", "iso-8859-1", "iso-latin-1") or \
enc.startswith(("latin-1-", "iso-8859-1-", "iso-latin-1-")):
return "iso-8859-1"
return orig_enc | [
"def",
"_get_normal_name",
"(",
"orig_enc",
")",
":",
"# Only care about the first 12 characters.",
"enc",
"=",
"orig_enc",
"[",
":",
"12",
"]",
".",
"lower",
"(",
")",
".",
"replace",
"(",
"\"_\"",
",",
"\"-\"",
")",
"if",
"enc",
"==",
"\"utf-8\"",
"or",
"enc",
".",
"startswith",
"(",
"\"utf-8-\"",
")",
":",
"return",
"\"utf-8\"",
"if",
"enc",
"in",
"(",
"\"latin-1\"",
",",
"\"iso-8859-1\"",
",",
"\"iso-latin-1\"",
")",
"or",
"enc",
".",
"startswith",
"(",
"(",
"\"latin-1-\"",
",",
"\"iso-8859-1-\"",
",",
"\"iso-latin-1-\"",
")",
")",
":",
"return",
"\"iso-8859-1\"",
"return",
"orig_enc"
] | https://github.com/bendmorris/static-python/blob/2e0f8c4d7ed5b359dc7d8a75b6fb37e6b6c5c473/Lib/lib2to3/pgen2/tokenize.py#L241-L250 |
|
yt-project/yt | dc7b24f9b266703db4c843e329c6c8644d47b824 | yt/visualization/plot_container.py | python | PlotContainer.show | (self) | r"""This will send any existing plots to the IPython notebook.
If yt is being run from within an IPython session, and it is able to
determine this, this function will send any existing plots to the
notebook for display.
If yt can't determine if it's inside an IPython session, it will raise
YTNotInsideNotebook.
Examples
--------
>>> from yt.mods import SlicePlot
>>> slc = SlicePlot(
... ds, "x", [("gas", "density"), ("gas", "velocity_magnitude")]
... )
>>> slc.show() | r"""This will send any existing plots to the IPython notebook. | [
"r",
"This",
"will",
"send",
"any",
"existing",
"plots",
"to",
"the",
"IPython",
"notebook",
"."
] | def show(self):
r"""This will send any existing plots to the IPython notebook.
If yt is being run from within an IPython session, and it is able to
determine this, this function will send any existing plots to the
notebook for display.
If yt can't determine if it's inside an IPython session, it will raise
YTNotInsideNotebook.
Examples
--------
>>> from yt.mods import SlicePlot
>>> slc = SlicePlot(
... ds, "x", [("gas", "density"), ("gas", "velocity_magnitude")]
... )
>>> slc.show()
"""
interactivity = self.plots[list(self.plots.keys())[0]].interactivity
if interactivity:
for v in sorted(self.plots.values()):
v.show()
else:
if "__IPYTHON__" in dir(builtins):
from IPython.display import display
display(self)
else:
raise YTNotInsideNotebook | [
"def",
"show",
"(",
"self",
")",
":",
"interactivity",
"=",
"self",
".",
"plots",
"[",
"list",
"(",
"self",
".",
"plots",
".",
"keys",
"(",
")",
")",
"[",
"0",
"]",
"]",
".",
"interactivity",
"if",
"interactivity",
":",
"for",
"v",
"in",
"sorted",
"(",
"self",
".",
"plots",
".",
"values",
"(",
")",
")",
":",
"v",
".",
"show",
"(",
")",
"else",
":",
"if",
"\"__IPYTHON__\"",
"in",
"dir",
"(",
"builtins",
")",
":",
"from",
"IPython",
".",
"display",
"import",
"display",
"display",
"(",
"self",
")",
"else",
":",
"raise",
"YTNotInsideNotebook"
] | https://github.com/yt-project/yt/blob/dc7b24f9b266703db4c843e329c6c8644d47b824/yt/visualization/plot_container.py#L630-L660 |
||
Kkevsterrr/backdoorme | f9755ca6cec600335e681752e7a1c5c617bb5a39 | backdoors/shell/__pupy/pupy/packages/windows/x86/psutil/_pslinux.py | python | Connections.decode_address | (self, addr, family) | return (ip, port) | Accept an "ip:port" address as displayed in /proc/net/*
and convert it into a human readable form, like:
"0500000A:0016" -> ("10.0.0.5", 22)
"0000000000000000FFFF00000100007F:9E49" -> ("::ffff:127.0.0.1", 40521)
The IP address portion is a little or big endian four-byte
hexadecimal number; that is, the least significant byte is listed
first, so we need to reverse the order of the bytes to convert it
to an IP address.
The port is represented as a two-byte hexadecimal number.
Reference:
http://linuxdevcenter.com/pub/a/linux/2000/11/16/LinuxAdmin.html | Accept an "ip:port" address as displayed in /proc/net/*
and convert it into a human readable form, like: | [
"Accept",
"an",
"ip",
":",
"port",
"address",
"as",
"displayed",
"in",
"/",
"proc",
"/",
"net",
"/",
"*",
"and",
"convert",
"it",
"into",
"a",
"human",
"readable",
"form",
"like",
":"
] | def decode_address(self, addr, family):
"""Accept an "ip:port" address as displayed in /proc/net/*
and convert it into a human readable form, like:
"0500000A:0016" -> ("10.0.0.5", 22)
"0000000000000000FFFF00000100007F:9E49" -> ("::ffff:127.0.0.1", 40521)
The IP address portion is a little or big endian four-byte
hexadecimal number; that is, the least significant byte is listed
first, so we need to reverse the order of the bytes to convert it
to an IP address.
The port is represented as a two-byte hexadecimal number.
Reference:
http://linuxdevcenter.com/pub/a/linux/2000/11/16/LinuxAdmin.html
"""
ip, port = addr.split(':')
port = int(port, 16)
# this usually refers to a local socket in listen mode with
# no end-points connected
if not port:
return ()
if PY3:
ip = ip.encode('ascii')
if family == socket.AF_INET:
# see: https://github.com/giampaolo/psutil/issues/201
if sys.byteorder == 'little':
ip = socket.inet_ntop(family, base64.b16decode(ip)[::-1])
else:
ip = socket.inet_ntop(family, base64.b16decode(ip))
else: # IPv6
# old version - let's keep it, just in case...
# ip = ip.decode('hex')
# return socket.inet_ntop(socket.AF_INET6,
# ''.join(ip[i:i+4][::-1] for i in xrange(0, 16, 4)))
ip = base64.b16decode(ip)
# see: https://github.com/giampaolo/psutil/issues/201
if sys.byteorder == 'little':
ip = socket.inet_ntop(
socket.AF_INET6,
struct.pack('>4I', *struct.unpack('<4I', ip)))
else:
ip = socket.inet_ntop(
socket.AF_INET6,
struct.pack('<4I', *struct.unpack('<4I', ip)))
return (ip, port) | [
"def",
"decode_address",
"(",
"self",
",",
"addr",
",",
"family",
")",
":",
"ip",
",",
"port",
"=",
"addr",
".",
"split",
"(",
"':'",
")",
"port",
"=",
"int",
"(",
"port",
",",
"16",
")",
"# this usually refers to a local socket in listen mode with",
"# no end-points connected",
"if",
"not",
"port",
":",
"return",
"(",
")",
"if",
"PY3",
":",
"ip",
"=",
"ip",
".",
"encode",
"(",
"'ascii'",
")",
"if",
"family",
"==",
"socket",
".",
"AF_INET",
":",
"# see: https://github.com/giampaolo/psutil/issues/201",
"if",
"sys",
".",
"byteorder",
"==",
"'little'",
":",
"ip",
"=",
"socket",
".",
"inet_ntop",
"(",
"family",
",",
"base64",
".",
"b16decode",
"(",
"ip",
")",
"[",
":",
":",
"-",
"1",
"]",
")",
"else",
":",
"ip",
"=",
"socket",
".",
"inet_ntop",
"(",
"family",
",",
"base64",
".",
"b16decode",
"(",
"ip",
")",
")",
"else",
":",
"# IPv6",
"# old version - let's keep it, just in case...",
"# ip = ip.decode('hex')",
"# return socket.inet_ntop(socket.AF_INET6,",
"# ''.join(ip[i:i+4][::-1] for i in xrange(0, 16, 4)))",
"ip",
"=",
"base64",
".",
"b16decode",
"(",
"ip",
")",
"# see: https://github.com/giampaolo/psutil/issues/201",
"if",
"sys",
".",
"byteorder",
"==",
"'little'",
":",
"ip",
"=",
"socket",
".",
"inet_ntop",
"(",
"socket",
".",
"AF_INET6",
",",
"struct",
".",
"pack",
"(",
"'>4I'",
",",
"*",
"struct",
".",
"unpack",
"(",
"'<4I'",
",",
"ip",
")",
")",
")",
"else",
":",
"ip",
"=",
"socket",
".",
"inet_ntop",
"(",
"socket",
".",
"AF_INET6",
",",
"struct",
".",
"pack",
"(",
"'<4I'",
",",
"*",
"struct",
".",
"unpack",
"(",
"'<4I'",
",",
"ip",
")",
")",
")",
"return",
"(",
"ip",
",",
"port",
")"
] | https://github.com/Kkevsterrr/backdoorme/blob/f9755ca6cec600335e681752e7a1c5c617bb5a39/backdoors/shell/__pupy/pupy/packages/windows/x86/psutil/_pslinux.py#L433-L478 |
|
giampaolo/psutil | 55161bd4850986359a029f1c9a81bcf66f37afa8 | psutil/__init__.py | python | Process.resume | (self) | Resume process execution with SIGCONT pre-emptively checking
whether PID has been reused.
On Windows this has the effect of resuming all process threads. | Resume process execution with SIGCONT pre-emptively checking
whether PID has been reused.
On Windows this has the effect of resuming all process threads. | [
"Resume",
"process",
"execution",
"with",
"SIGCONT",
"pre",
"-",
"emptively",
"checking",
"whether",
"PID",
"has",
"been",
"reused",
".",
"On",
"Windows",
"this",
"has",
"the",
"effect",
"of",
"resuming",
"all",
"process",
"threads",
"."
] | def resume(self):
"""Resume process execution with SIGCONT pre-emptively checking
whether PID has been reused.
On Windows this has the effect of resuming all process threads.
"""
if POSIX:
self._send_signal(signal.SIGCONT)
else: # pragma: no cover
self._proc.resume() | [
"def",
"resume",
"(",
"self",
")",
":",
"if",
"POSIX",
":",
"self",
".",
"_send_signal",
"(",
"signal",
".",
"SIGCONT",
")",
"else",
":",
"# pragma: no cover",
"self",
".",
"_proc",
".",
"resume",
"(",
")"
] | https://github.com/giampaolo/psutil/blob/55161bd4850986359a029f1c9a81bcf66f37afa8/psutil/__init__.py#L1219-L1227 |
||
StyraHem/ShellyForHASS | 902c04ab25b0a7667718eeef53bb6ad43614fe2f | custom_components/shelly/binary_sensor.py | python | ShellyBinaryInfoSensor.is_on | (self) | return self._state | Return the state of the sensor. | Return the state of the sensor. | [
"Return",
"the",
"state",
"of",
"the",
"sensor",
"."
] | def is_on(self):
"""Return the state of the sensor."""
return self._state | [
"def",
"is_on",
"(",
"self",
")",
":",
"return",
"self",
".",
"_state"
] | https://github.com/StyraHem/ShellyForHASS/blob/902c04ab25b0a7667718eeef53bb6ad43614fe2f/custom_components/shelly/binary_sensor.py#L209-L211 |
|
tomplus/kubernetes_asyncio | f028cc793e3a2c519be6a52a49fb77ff0b014c9b | kubernetes_asyncio/client/models/v2beta1_horizontal_pod_autoscaler_condition.py | python | V2beta1HorizontalPodAutoscalerCondition.__repr__ | (self) | return self.to_str() | For `print` and `pprint` | For `print` and `pprint` | [
"For",
"print",
"and",
"pprint"
] | def __repr__(self):
"""For `print` and `pprint`"""
return self.to_str() | [
"def",
"__repr__",
"(",
"self",
")",
":",
"return",
"self",
".",
"to_str",
"(",
")"
] | https://github.com/tomplus/kubernetes_asyncio/blob/f028cc793e3a2c519be6a52a49fb77ff0b014c9b/kubernetes_asyncio/client/models/v2beta1_horizontal_pod_autoscaler_condition.py#L220-L222 |
|
holoviz/holoviews | cc6b27f01710402fdfee2aeef1507425ca78c91f | holoviews/core/spaces.py | python | DynamicMap.layout | (self, dimensions=None, **kwargs) | return self.groupby(dimensions, container_type=NdLayout, **kwargs) | Groups data by supplied dimension(s) laying the groups along
the dimension(s) out in a NdLayout.
Args:
dimensions: Dimension/str or list
Dimension or list of dimensions to group by
Returns:
layout: NdLayout
NdLayout with supplied dimensions | Groups data by supplied dimension(s) laying the groups along
the dimension(s) out in a NdLayout. | [
"Groups",
"data",
"by",
"supplied",
"dimension",
"(",
"s",
")",
"laying",
"the",
"groups",
"along",
"the",
"dimension",
"(",
"s",
")",
"out",
"in",
"a",
"NdLayout",
"."
] | def layout(self, dimensions=None, **kwargs):
"""
Groups data by supplied dimension(s) laying the groups along
the dimension(s) out in a NdLayout.
Args:
dimensions: Dimension/str or list
Dimension or list of dimensions to group by
Returns:
layout: NdLayout
NdLayout with supplied dimensions
"""
return self.groupby(dimensions, container_type=NdLayout, **kwargs) | [
"def",
"layout",
"(",
"self",
",",
"dimensions",
"=",
"None",
",",
"*",
"*",
"kwargs",
")",
":",
"return",
"self",
".",
"groupby",
"(",
"dimensions",
",",
"container_type",
"=",
"NdLayout",
",",
"*",
"*",
"kwargs",
")"
] | https://github.com/holoviz/holoviews/blob/cc6b27f01710402fdfee2aeef1507425ca78c91f/holoviews/core/spaces.py#L1611-L1624 |
|
bisohns/search-engine-parser | ede1355a1f63398d9217b8e502fbd6c52b53bf09 | search_engine_parser/core/engines/googlescholar.py | python | Search.parse_single_result | (self, single_result, return_type=ReturnType.FULL, **kwargs) | return rdict | Parses the source code to return
:param single_result: single result found in <div class="gs_r gs_or gs_scl">
:type single_result: `bs4.element.ResultSet`
:return: parsed title, link, description, file link, result type of single result
:rtype: dict | Parses the source code to return | [
"Parses",
"the",
"source",
"code",
"to",
"return"
] | def parse_single_result(self, single_result, return_type=ReturnType.FULL, **kwargs):
"""
Parses the source code to return
:param single_result: single result found in <div class="gs_r gs_or gs_scl">
:type single_result: `bs4.element.ResultSet`
:return: parsed title, link, description, file link, result type of single result
:rtype: dict
"""
rdict = SearchItem()
r_elem = single_result.find('h3', class_='gs_rt')
if return_type in (ReturnType.FULL, ReturnType.LINK):
link_tag = r_elem.find('a')
if link_tag:
raw_link = link_tag.get('href')
else:
raw_link = ''
rdict["links"] = raw_link
if return_type in (ReturnType.FULL, return_type.DESCRIPTION):
desc = single_result.find('div', class_='gs_rs')
if desc:
desc = desc.text
else:
desc = ''
rdict["descriptions"] = desc
if return_type in (ReturnType.FULL, return_type.TITLE):
title = r_elem.text
title = re.sub(r'^[\[\w+\]]+ ', '', title)
rdict["titles"] = title
if return_type == ReturnType.FULL:
t_elem = single_result.find('span', class_='gs_ct1')
if t_elem:
result_type = t_elem.text
else:
result_type = ''
f_elem = single_result.find('div', class_='gs_or_ggsm')
if f_elem:
flink_tag = r_elem.find('a')
if flink_tag:
file_link = flink_tag.get('href')
else:
file_link = ''
else:
file_link = ''
rdict.update({
"result_types": result_type,
"files_links": file_link
})
return rdict | [
"def",
"parse_single_result",
"(",
"self",
",",
"single_result",
",",
"return_type",
"=",
"ReturnType",
".",
"FULL",
",",
"*",
"*",
"kwargs",
")",
":",
"rdict",
"=",
"SearchItem",
"(",
")",
"r_elem",
"=",
"single_result",
".",
"find",
"(",
"'h3'",
",",
"class_",
"=",
"'gs_rt'",
")",
"if",
"return_type",
"in",
"(",
"ReturnType",
".",
"FULL",
",",
"ReturnType",
".",
"LINK",
")",
":",
"link_tag",
"=",
"r_elem",
".",
"find",
"(",
"'a'",
")",
"if",
"link_tag",
":",
"raw_link",
"=",
"link_tag",
".",
"get",
"(",
"'href'",
")",
"else",
":",
"raw_link",
"=",
"''",
"rdict",
"[",
"\"links\"",
"]",
"=",
"raw_link",
"if",
"return_type",
"in",
"(",
"ReturnType",
".",
"FULL",
",",
"return_type",
".",
"DESCRIPTION",
")",
":",
"desc",
"=",
"single_result",
".",
"find",
"(",
"'div'",
",",
"class_",
"=",
"'gs_rs'",
")",
"if",
"desc",
":",
"desc",
"=",
"desc",
".",
"text",
"else",
":",
"desc",
"=",
"''",
"rdict",
"[",
"\"descriptions\"",
"]",
"=",
"desc",
"if",
"return_type",
"in",
"(",
"ReturnType",
".",
"FULL",
",",
"return_type",
".",
"TITLE",
")",
":",
"title",
"=",
"r_elem",
".",
"text",
"title",
"=",
"re",
".",
"sub",
"(",
"r'^[\\[\\w+\\]]+ '",
",",
"''",
",",
"title",
")",
"rdict",
"[",
"\"titles\"",
"]",
"=",
"title",
"if",
"return_type",
"==",
"ReturnType",
".",
"FULL",
":",
"t_elem",
"=",
"single_result",
".",
"find",
"(",
"'span'",
",",
"class_",
"=",
"'gs_ct1'",
")",
"if",
"t_elem",
":",
"result_type",
"=",
"t_elem",
".",
"text",
"else",
":",
"result_type",
"=",
"''",
"f_elem",
"=",
"single_result",
".",
"find",
"(",
"'div'",
",",
"class_",
"=",
"'gs_or_ggsm'",
")",
"if",
"f_elem",
":",
"flink_tag",
"=",
"r_elem",
".",
"find",
"(",
"'a'",
")",
"if",
"flink_tag",
":",
"file_link",
"=",
"flink_tag",
".",
"get",
"(",
"'href'",
")",
"else",
":",
"file_link",
"=",
"''",
"else",
":",
"file_link",
"=",
"''",
"rdict",
".",
"update",
"(",
"{",
"\"result_types\"",
":",
"result_type",
",",
"\"files_links\"",
":",
"file_link",
"}",
")",
"return",
"rdict"
] | https://github.com/bisohns/search-engine-parser/blob/ede1355a1f63398d9217b8e502fbd6c52b53bf09/search_engine_parser/core/engines/googlescholar.py#L34-L88 |
|
googleads/google-ads-python | 2a1d6062221f6aad1992a6bcca0e7e4a93d2db86 | google/ads/googleads/v7/services/services/remarketing_action_service/client.py | python | RemarketingActionServiceClient.parse_common_project_path | (path: str) | return m.groupdict() if m else {} | Parse a project path into its component segments. | Parse a project path into its component segments. | [
"Parse",
"a",
"project",
"path",
"into",
"its",
"component",
"segments",
"."
] | def parse_common_project_path(path: str) -> Dict[str, str]:
"""Parse a project path into its component segments."""
m = re.match(r"^projects/(?P<project>.+?)$", path)
return m.groupdict() if m else {} | [
"def",
"parse_common_project_path",
"(",
"path",
":",
"str",
")",
"->",
"Dict",
"[",
"str",
",",
"str",
"]",
":",
"m",
"=",
"re",
".",
"match",
"(",
"r\"^projects/(?P<project>.+?)$\"",
",",
"path",
")",
"return",
"m",
".",
"groupdict",
"(",
")",
"if",
"m",
"else",
"{",
"}"
] | https://github.com/googleads/google-ads-python/blob/2a1d6062221f6aad1992a6bcca0e7e4a93d2db86/google/ads/googleads/v7/services/services/remarketing_action_service/client.py#L224-L227 |
|
DEAP/deap | 2f63dcf6aaa341b8fe5d66d99e9e003a21312fef | deap/benchmarks/binary.py | python | bin2float | (min_, max_, nbits) | return wrap | Convert a binary array into an array of float where each
float is composed of *nbits* and is between *min_* and *max_*
and return the result of the decorated function. | Convert a binary array into an array of float where each
float is composed of *nbits* and is between *min_* and *max_*
and return the result of the decorated function. | [
"Convert",
"a",
"binary",
"array",
"into",
"an",
"array",
"of",
"float",
"where",
"each",
"float",
"is",
"composed",
"of",
"*",
"nbits",
"*",
"and",
"is",
"between",
"*",
"min_",
"*",
"and",
"*",
"max_",
"*",
"and",
"return",
"the",
"result",
"of",
"the",
"decorated",
"function",
"."
] | def bin2float(min_, max_, nbits):
"""Convert a binary array into an array of float where each
float is composed of *nbits* and is between *min_* and *max_*
and return the result of the decorated function.
"""
def wrap(function):
@wraps(function)
def wrapped_function(individual, *args, **kargs):
# User must take care to make nelem an integer.
nelem = len(individual)//nbits
decoded = [0] * nelem
for i in xrange(nelem):
gene = int("".join(map(str,
individual[i*nbits:i*nbits+nbits])),
2)
div = 2**nbits - 1
temp = gene/div
decoded[i] = min_ + (temp * (max_ - min_))
return function(decoded, *args, **kargs)
return wrapped_function
return wrap | [
"def",
"bin2float",
"(",
"min_",
",",
"max_",
",",
"nbits",
")",
":",
"def",
"wrap",
"(",
"function",
")",
":",
"@",
"wraps",
"(",
"function",
")",
"def",
"wrapped_function",
"(",
"individual",
",",
"*",
"args",
",",
"*",
"*",
"kargs",
")",
":",
"# User must take care to make nelem an integer.",
"nelem",
"=",
"len",
"(",
"individual",
")",
"//",
"nbits",
"decoded",
"=",
"[",
"0",
"]",
"*",
"nelem",
"for",
"i",
"in",
"xrange",
"(",
"nelem",
")",
":",
"gene",
"=",
"int",
"(",
"\"\"",
".",
"join",
"(",
"map",
"(",
"str",
",",
"individual",
"[",
"i",
"*",
"nbits",
":",
"i",
"*",
"nbits",
"+",
"nbits",
"]",
")",
")",
",",
"2",
")",
"div",
"=",
"2",
"**",
"nbits",
"-",
"1",
"temp",
"=",
"gene",
"/",
"div",
"decoded",
"[",
"i",
"]",
"=",
"min_",
"+",
"(",
"temp",
"*",
"(",
"max_",
"-",
"min_",
")",
")",
"return",
"function",
"(",
"decoded",
",",
"*",
"args",
",",
"*",
"*",
"kargs",
")",
"return",
"wrapped_function",
"return",
"wrap"
] | https://github.com/DEAP/deap/blob/2f63dcf6aaa341b8fe5d66d99e9e003a21312fef/deap/benchmarks/binary.py#L20-L41 |
|
travisgoodspeed/goodfet | 1750cc1e8588af5470385e52fa098ca7364c2863 | client/experiments.py | python | experiments.rtrSweep | (self,freq,lowID,highID, attempts = 1,duration = 1, verbose = True) | This method will sweep through the range of ids given by lowID to highID and
send a remote transmissions request (RTR) to each id and then listen for a response.
The RTR will be repeated in the given number of attempts and will sniff for the given duration
continuing to the next id.
Any messages that are sniffed will be saved to a csv file. The filename will be stored in the DATA_LOCATION folder
with a filename that is the date (YYYYMMDD)_rtr.csv. If the file already exists it will append to the end of the file
The format will follow that of L{GoodFETMCPCANCommunication.sniff} in that the columns will be as follows:
1. timestamp: as floating point number
2. error boolean: 1 if there was an error detected of packet formatting (not exhaustive check). 0 otherwise
3. comment tag: comment about experiments as String
4. duration: Length of overall sniff
5. filtering: 1 if there was filtering. 0 otherwise
6. db0: Integer
---
7. db7: Integer
@type freq: number
@param freq: The frequency at which the bus is communicating
@type lowID: integer
@param lowID: The low end of the id sweep
@type highID: integer
@param highID: The high end of the id sweep
@type attempts: integer
@param attempts: The number of times a RTR will be repeated for a given standard id
@type duration: integer
@param duration: The length of time that it will listen to the bus after sending an RTR
@type verbose: boolean
@param verbose: When true, messages will be printed out to the terminal
@rtype: None
@return: Does not return anything | This method will sweep through the range of ids given by lowID to highID and
send a remote transmissions request (RTR) to each id and then listen for a response.
The RTR will be repeated in the given number of attempts and will sniff for the given duration
continuing to the next id.
Any messages that are sniffed will be saved to a csv file. The filename will be stored in the DATA_LOCATION folder
with a filename that is the date (YYYYMMDD)_rtr.csv. If the file already exists it will append to the end of the file
The format will follow that of L{GoodFETMCPCANCommunication.sniff} in that the columns will be as follows:
1. timestamp: as floating point number
2. error boolean: 1 if there was an error detected of packet formatting (not exhaustive check). 0 otherwise
3. comment tag: comment about experiments as String
4. duration: Length of overall sniff
5. filtering: 1 if there was filtering. 0 otherwise
6. db0: Integer
---
7. db7: Integer | [
"This",
"method",
"will",
"sweep",
"through",
"the",
"range",
"of",
"ids",
"given",
"by",
"lowID",
"to",
"highID",
"and",
"send",
"a",
"remote",
"transmissions",
"request",
"(",
"RTR",
")",
"to",
"each",
"id",
"and",
"then",
"listen",
"for",
"a",
"response",
".",
"The",
"RTR",
"will",
"be",
"repeated",
"in",
"the",
"given",
"number",
"of",
"attempts",
"and",
"will",
"sniff",
"for",
"the",
"given",
"duration",
"continuing",
"to",
"the",
"next",
"id",
".",
"Any",
"messages",
"that",
"are",
"sniffed",
"will",
"be",
"saved",
"to",
"a",
"csv",
"file",
".",
"The",
"filename",
"will",
"be",
"stored",
"in",
"the",
"DATA_LOCATION",
"folder",
"with",
"a",
"filename",
"that",
"is",
"the",
"date",
"(",
"YYYYMMDD",
")",
"_rtr",
".",
"csv",
".",
"If",
"the",
"file",
"already",
"exists",
"it",
"will",
"append",
"to",
"the",
"end",
"of",
"the",
"file",
"The",
"format",
"will",
"follow",
"that",
"of",
"L",
"{",
"GoodFETMCPCANCommunication",
".",
"sniff",
"}",
"in",
"that",
"the",
"columns",
"will",
"be",
"as",
"follows",
":",
"1",
".",
"timestamp",
":",
"as",
"floating",
"point",
"number",
"2",
".",
"error",
"boolean",
":",
"1",
"if",
"there",
"was",
"an",
"error",
"detected",
"of",
"packet",
"formatting",
"(",
"not",
"exhaustive",
"check",
")",
".",
"0",
"otherwise",
"3",
".",
"comment",
"tag",
":",
"comment",
"about",
"experiments",
"as",
"String",
"4",
".",
"duration",
":",
"Length",
"of",
"overall",
"sniff",
"5",
".",
"filtering",
":",
"1",
"if",
"there",
"was",
"filtering",
".",
"0",
"otherwise",
"6",
".",
"db0",
":",
"Integer",
"---",
"7",
".",
"db7",
":",
"Integer"
] | def rtrSweep(self,freq,lowID,highID, attempts = 1,duration = 1, verbose = True):
"""
This method will sweep through the range of ids given by lowID to highID and
send a remote transmissions request (RTR) to each id and then listen for a response.
The RTR will be repeated in the given number of attempts and will sniff for the given duration
continuing to the next id.
Any messages that are sniffed will be saved to a csv file. The filename will be stored in the DATA_LOCATION folder
with a filename that is the date (YYYYMMDD)_rtr.csv. If the file already exists it will append to the end of the file
The format will follow that of L{GoodFETMCPCANCommunication.sniff} in that the columns will be as follows:
1. timestamp: as floating point number
2. error boolean: 1 if there was an error detected of packet formatting (not exhaustive check). 0 otherwise
3. comment tag: comment about experiments as String
4. duration: Length of overall sniff
5. filtering: 1 if there was filtering. 0 otherwise
6. db0: Integer
---
7. db7: Integer
@type freq: number
@param freq: The frequency at which the bus is communicating
@type lowID: integer
@param lowID: The low end of the id sweep
@type highID: integer
@param highID: The high end of the id sweep
@type attempts: integer
@param attempts: The number of times a RTR will be repeated for a given standard id
@type duration: integer
@param duration: The length of time that it will listen to the bus after sending an RTR
@type verbose: boolean
@param verbose: When true, messages will be printed out to the terminal
@rtype: None
@return: Does not return anything
"""
#set up file for writing
now = datetime.datetime.now()
datestr = now.strftime("%Y%m%d")
path = self.DATA_LOCATION+datestr+"_rtr.csv"
filename = path
outfile = open(filename,'a');
dataWriter = csv.writer(outfile,delimiter=',');
dataWriter.writerow(['# Time Error Bytes 1-13']);
dataWriter.writerow(['#' + "rtr sweep from %d to %d"%(lowID,highID)])
if( verbose):
print "started"
#self.client.serInit()
#self.spitSetup(freq)
#for each id
for i in range(lowID,highID+1, 1):
self.client.serInit()
self.spitSetup(freq) #reset the chip to try and avoid serial timeouts
#set filters
standardid = [i, i, i, i]
self.addFilter(standardid, verbose = True)
#### split SID into different areas
SIDlow = (standardid[0] & 0x07) << 5; # get SID bits 2:0, rotate them to bits 7:5
SIDhigh = (standardid[0] >> 3) & 0xFF; # get SID bits 10:3, rotate them to bits 7:0
#create RTR packet
packet = [SIDhigh, SIDlow, 0x00,0x00,0x40]
dataWriter.writerow(["#requested id %d"%i])
#self.client.poke8(0x2C,0x00); #clear the CANINTF register; we care about bits 0 and 1 (RXnIF flags) which indicate a message is being held
#clear buffer
packet1 = self.client.rxpacket();
packet2 = self.client.rxpacket();
#send in rtr request
self.client.txpacket(packet)
## listen for 2 packets. one should be the rtr we requested the other should be
## a new packet response
starttime = tT.time()
while ((time.time() - starttime) < duration): #listen for the given duration time period
packet = self.client.rxpacket()
if( packet == None):
continue
# we have sniffed a packet, save it
row = []
row.append("%f"%time.time()) #timestamp
row.append(0) #error flag (not checkign)
row.append("rtrRequest_%d"%i) #comment
row.append(duration) #sniff time
row.append(1) # filtering boolean
for byte in packet:
row.append("%02x"%ord(byte));
dataWriter.writerow(row)
print self.client.packet2parsedstr(packet)
trial= 2;
# for each trial repeat
while( trial <= attempts):
print "trial: ", trial
self.client.MCPrts(TXB0=True);
starttime = time.time()
# this time we will sniff for the given amount of time to see if there is a
# time till the packets come in
while( (time.time()-starttime) < duration):
packet=self.client.rxpacket();
if( packet == None):
continue
row = []
row.append("%f"%time.time()) #timestamp
row.append(0) #error flag (not checking)
row.append("rtrRequest_%d"%i) #comment
row.append(duration) #sniff time
row.append(1) # filtering boolean
for byte in packet:
row.append("%02x"%ord(byte));
dataWriter.writerow(row)
print self.client.packet2parsedstr(packet)
trial += 1
print "sweep complete"
outfile.close() | [
"def",
"rtrSweep",
"(",
"self",
",",
"freq",
",",
"lowID",
",",
"highID",
",",
"attempts",
"=",
"1",
",",
"duration",
"=",
"1",
",",
"verbose",
"=",
"True",
")",
":",
"#set up file for writing",
"now",
"=",
"datetime",
".",
"datetime",
".",
"now",
"(",
")",
"datestr",
"=",
"now",
".",
"strftime",
"(",
"\"%Y%m%d\"",
")",
"path",
"=",
"self",
".",
"DATA_LOCATION",
"+",
"datestr",
"+",
"\"_rtr.csv\"",
"filename",
"=",
"path",
"outfile",
"=",
"open",
"(",
"filename",
",",
"'a'",
")",
"dataWriter",
"=",
"csv",
".",
"writer",
"(",
"outfile",
",",
"delimiter",
"=",
"','",
")",
"dataWriter",
".",
"writerow",
"(",
"[",
"'# Time Error Bytes 1-13'",
"]",
")",
"dataWriter",
".",
"writerow",
"(",
"[",
"'#'",
"+",
"\"rtr sweep from %d to %d\"",
"%",
"(",
"lowID",
",",
"highID",
")",
"]",
")",
"if",
"(",
"verbose",
")",
":",
"print",
"\"started\"",
"#self.client.serInit()",
"#self.spitSetup(freq)",
"#for each id",
"for",
"i",
"in",
"range",
"(",
"lowID",
",",
"highID",
"+",
"1",
",",
"1",
")",
":",
"self",
".",
"client",
".",
"serInit",
"(",
")",
"self",
".",
"spitSetup",
"(",
"freq",
")",
"#reset the chip to try and avoid serial timeouts",
"#set filters",
"standardid",
"=",
"[",
"i",
",",
"i",
",",
"i",
",",
"i",
"]",
"self",
".",
"addFilter",
"(",
"standardid",
",",
"verbose",
"=",
"True",
")",
"#### split SID into different areas",
"SIDlow",
"=",
"(",
"standardid",
"[",
"0",
"]",
"&",
"0x07",
")",
"<<",
"5",
"# get SID bits 2:0, rotate them to bits 7:5",
"SIDhigh",
"=",
"(",
"standardid",
"[",
"0",
"]",
">>",
"3",
")",
"&",
"0xFF",
"# get SID bits 10:3, rotate them to bits 7:0",
"#create RTR packet",
"packet",
"=",
"[",
"SIDhigh",
",",
"SIDlow",
",",
"0x00",
",",
"0x00",
",",
"0x40",
"]",
"dataWriter",
".",
"writerow",
"(",
"[",
"\"#requested id %d\"",
"%",
"i",
"]",
")",
"#self.client.poke8(0x2C,0x00); #clear the CANINTF register; we care about bits 0 and 1 (RXnIF flags) which indicate a message is being held ",
"#clear buffer",
"packet1",
"=",
"self",
".",
"client",
".",
"rxpacket",
"(",
")",
"packet2",
"=",
"self",
".",
"client",
".",
"rxpacket",
"(",
")",
"#send in rtr request",
"self",
".",
"client",
".",
"txpacket",
"(",
"packet",
")",
"## listen for 2 packets. one should be the rtr we requested the other should be",
"## a new packet response",
"starttime",
"=",
"tT",
".",
"time",
"(",
")",
"while",
"(",
"(",
"time",
".",
"time",
"(",
")",
"-",
"starttime",
")",
"<",
"duration",
")",
":",
"#listen for the given duration time period",
"packet",
"=",
"self",
".",
"client",
".",
"rxpacket",
"(",
")",
"if",
"(",
"packet",
"==",
"None",
")",
":",
"continue",
"# we have sniffed a packet, save it",
"row",
"=",
"[",
"]",
"row",
".",
"append",
"(",
"\"%f\"",
"%",
"time",
".",
"time",
"(",
")",
")",
"#timestamp",
"row",
".",
"append",
"(",
"0",
")",
"#error flag (not checkign)",
"row",
".",
"append",
"(",
"\"rtrRequest_%d\"",
"%",
"i",
")",
"#comment",
"row",
".",
"append",
"(",
"duration",
")",
"#sniff time",
"row",
".",
"append",
"(",
"1",
")",
"# filtering boolean",
"for",
"byte",
"in",
"packet",
":",
"row",
".",
"append",
"(",
"\"%02x\"",
"%",
"ord",
"(",
"byte",
")",
")",
"dataWriter",
".",
"writerow",
"(",
"row",
")",
"print",
"self",
".",
"client",
".",
"packet2parsedstr",
"(",
"packet",
")",
"trial",
"=",
"2",
"# for each trial repeat",
"while",
"(",
"trial",
"<=",
"attempts",
")",
":",
"print",
"\"trial: \"",
",",
"trial",
"self",
".",
"client",
".",
"MCPrts",
"(",
"TXB0",
"=",
"True",
")",
"starttime",
"=",
"time",
".",
"time",
"(",
")",
"# this time we will sniff for the given amount of time to see if there is a",
"# time till the packets come in",
"while",
"(",
"(",
"time",
".",
"time",
"(",
")",
"-",
"starttime",
")",
"<",
"duration",
")",
":",
"packet",
"=",
"self",
".",
"client",
".",
"rxpacket",
"(",
")",
"if",
"(",
"packet",
"==",
"None",
")",
":",
"continue",
"row",
"=",
"[",
"]",
"row",
".",
"append",
"(",
"\"%f\"",
"%",
"time",
".",
"time",
"(",
")",
")",
"#timestamp",
"row",
".",
"append",
"(",
"0",
")",
"#error flag (not checking)",
"row",
".",
"append",
"(",
"\"rtrRequest_%d\"",
"%",
"i",
")",
"#comment",
"row",
".",
"append",
"(",
"duration",
")",
"#sniff time",
"row",
".",
"append",
"(",
"1",
")",
"# filtering boolean",
"for",
"byte",
"in",
"packet",
":",
"row",
".",
"append",
"(",
"\"%02x\"",
"%",
"ord",
"(",
"byte",
")",
")",
"dataWriter",
".",
"writerow",
"(",
"row",
")",
"print",
"self",
".",
"client",
".",
"packet2parsedstr",
"(",
"packet",
")",
"trial",
"+=",
"1",
"print",
"\"sweep complete\"",
"outfile",
".",
"close",
"(",
")"
] | https://github.com/travisgoodspeed/goodfet/blob/1750cc1e8588af5470385e52fa098ca7364c2863/client/experiments.py#L119-L231 |
||
manuelbua/gitver | 77d5a4420209a4ca00349b094eeca1f13e50d8e5 | gitver/termcolors.py | python | Terminal.set_quiet_flags | (self, quiet_stdout, quiet_stderr) | [] | def set_quiet_flags(self, quiet_stdout, quiet_stderr):
self.is_quiet = quiet_stdout
self.is_quiet_err = quiet_stderr | [
"def",
"set_quiet_flags",
"(",
"self",
",",
"quiet_stdout",
",",
"quiet_stderr",
")",
":",
"self",
".",
"is_quiet",
"=",
"quiet_stdout",
"self",
".",
"is_quiet_err",
"=",
"quiet_stderr"
] | https://github.com/manuelbua/gitver/blob/77d5a4420209a4ca00349b094eeca1f13e50d8e5/gitver/termcolors.py#L71-L73 |
||||
Dikea/Dialog-System-with-Task-Retrieval-and-Seq2seq | bf1195386daeb289889a7c1ade54036faa313e48 | task_dialog/invoice_task.py | python | invoiceReviseRule | (sentence) | return None | dialogue stratege for invoice revise rule | dialogue stratege for invoice revise rule | [
"dialogue",
"stratege",
"for",
"invoice",
"revise",
"rule"
] | def invoiceReviseRule(sentence):
'''
dialogue stratege for invoice revise rule
'''
invoice_keyword = "|".join(["发票","报销凭证","专票"])
content_keyword = '|'.join(['组织机构x', '数字x', 'ORDERID_DIGIT'])
sign = '|'.join([ '改', '换',"重新","开错"])
if re.search(sign, sentence):
if re.search("时候|多久|多长时间",sentence):
return "亲,[数字x]个工作日即可"
if re.search("更换|换开|重新开",sentence):
return "订单一旦生成无法更改,您可以提供下抬头和税号,等您的订单完成以后帮您换开#E-s[数字x]"
if re.search("改|换",sentence):
if re.search("组织机构x",sentence):
return "这边帮您提交[组织机构x]的修改申请,请您耐心等待,待您的发票修改完成后我会短信告知您的"
if re.search("姓名x",sentence):
return "这边帮您提交的修改申请,请您耐心等待,待您的发票修改完成后我会短信告知您的"
if re.search("发票类型",sentence):
return "您好,等订单完成后,您可以申请换开发票类型"
if re.search("发票.*改.*增.*票",sentence):
return "亲爱的,您是想改成增值税发票是吗?为了更好的处理您的问题,我现在立刻为您升级至专员为您处理,我们的专员会在[数字x]小时之内联系您的,请您提供联系电话和姓名,谢谢"
if re.search("(?=.*个人)(?=.*(公司|单位))|开公司",sentence):
if re.search("[改换]为(公司|单位)",sentence):
return "您好,请将您的单位信息提供下"
if re.search("公司.*[改换].*个人",sentence):
return "您好,如果您需要将抬头修改为公司,请将您的单位信息提供下;公司抬头无法改为个人抬头的"
return "您好,请将您的单位信息提供下"
if re.search('抬头|税号', sentence):
return "亲亲亲请您提供一下要改的抬头和税号哦"
if re.search("付款人",sentence):
return "您好,这边核实到付款人是无法修改的呢"
if re.search("可以.*[嘛么吗]",sentence):
return "亲,您把公司抬头和税号发过来,我这边可以帮您记录修改呢"
#return "亲,可以的呢,亲亲提供下您需要修改的内容哈#E-s[数字x],电子发票需要修改的话要辛苦您提供一下您公司的开票信息哟谢谢"
if re.search('想|给|帮|怎么|怎样|如何|(可以|能)修改[嘛吗么]', sentence):
if re.search("打印",sentence):
return "已提交申请修改好的发票[数字x]个工作日内开出,端:我的京东—客户服务—我的发票—发票详情下载即可;端:我的—客户服务—发票服务—发票详情查看即可;微信手端:京东优选—个人中心—订单详情查看即可载即可;"
return "亲,您把公司抬头和税号发过来,我这边可以帮您记录修改呢"
#return "亲亲提供下您需要修改的内容哈#E-s[数字x],电子发票需要修改的话要辛苦您提供一下您公司的开票信息哟谢谢"
if re.search('公司|单位', sentence):
#print("公司:{}".format(sentence))
return "亲亲您提供一下需要修改的公司抬头和税号哦,【抬头:税号:您的手机联系方式:开明细还是大类】"
if not re.search("不",sentence):
return "亲,您把公司抬头和税号发过来,我这边可以帮您记录修改呢"
#return "亲,你是要修改发票么,亲亲提供下您需要修改的内容哈#E-s[数字x],电子发票需要修改的话要辛苦您提供一下您公司的开票信息哟谢谢"
else:
if re.search("组织机构x",sentence):
return "这边帮您提交[组织机构x]的修改申请,请您耐心等待,待您的发票修改完成后我会短信告知您的"
if re.search("姓名x",sentence):
return "这边帮您提交的修改申请,请您耐心等待,待您的发票修改完成后我会短信告知您的"
# if re.search("",sentence):
# return
return None | [
"def",
"invoiceReviseRule",
"(",
"sentence",
")",
":",
"invoice_keyword",
"=",
"\"|\"",
".",
"join",
"(",
"[",
"\"发票\",\"报销",
"凭",
"证\",\"专票\"])",
"",
"",
"",
"",
"content_keyword",
"=",
"'|'",
".",
"join",
"(",
"[",
"'组织机构x', '数字x',",
" ",
"ORDERID_D",
"I",
"IT'])",
"",
"",
"sign",
"=",
"'|'",
".",
"join",
"(",
"[",
"'改', ",
"'",
"',\"重新",
"\"",
",\"开错\"])",
"",
"",
"",
"",
"if",
"re",
".",
"search",
"(",
"sign",
",",
"sentence",
")",
":",
"if",
"re",
".",
"search",
"(",
"\"时候|多久|多长时间\",sentence):",
"",
"",
"",
"",
"return",
"\"亲,[数字x]个工作日即可\"",
"if",
"re",
".",
"search",
"(",
"\"更换|换开|重新开\",sentence):",
"",
"",
"",
"",
"return",
"\"订单一旦生成无法更改,您可以提供下抬头和税号,等您的订单完成以后帮您换开#E-s[数字x]\"",
"if",
"re",
".",
"search",
"(",
"\"改|换\",sen",
"t",
"ence):",
"",
"",
"if",
"re",
".",
"search",
"(",
"\"组织机构x\",sentenc",
"e",
"):",
"",
"",
"return",
"\"这边帮您提交[组织机构x]的修改申请,请您耐心等待,待您的发票修改完成后我会短信告知您的\"",
"if",
"re",
".",
"search",
"(",
"\"姓名x\",sen",
"t",
"ence):",
"",
"",
"return",
"\"这边帮您提交的修改申请,请您耐心等待,待您的发票修改完成后我会短信告知您的\"",
"if",
"re",
".",
"search",
"(",
"\"发票类型\",sentenc",
"e",
"):",
"",
"",
"return",
"\"您好,等订单完成后,您可以申请换开发票类型\"",
"if",
"re",
".",
"search",
"(",
"\"发票.*改.*增.*票\",sentence)",
":",
"",
"",
"",
"return",
"\"亲爱的,您是想改成增值税发票是吗?为了更好的处理您的问题,我现在立刻为您升级至专员为您处理,我们的专员会在[数字x]小时之内联系您的,请您提供联系电话和姓名,谢谢\"",
"if",
"re",
".",
"search",
"(",
"\"(?=.*个人)(?=.*(公司|单位))|开公司\",sentence):",
"",
"",
"",
"",
"if",
"re",
".",
"search",
"(",
"\"[改换]为(公司|单位)\",sentence):",
"",
"",
"",
"",
"return",
"\"您好,请将您的单位信息提供下\"",
"if",
"re",
".",
"search",
"(",
"\"公司.*[改换].*个人\",sentence):",
"",
"",
"",
"",
"return",
"\"您好,如果您需要将抬头修改为公司,请将您的单位信息提供下;公司抬头无法改为个人抬头的\"",
"return",
"\"您好,请将您的单位信息提供下\"",
"if",
"re",
".",
"search",
"(",
"'抬头|税号', senten",
"c",
"):",
"",
"",
"return",
"\"亲亲亲请您提供一下要改的抬头和税号哦\"",
"if",
"re",
".",
"search",
"(",
"\"付款人\",sente",
"n",
"ce):",
"",
"",
"return",
"\"您好,这边核实到付款人是无法修改的呢\"",
"if",
"re",
".",
"search",
"(",
"\"可以.*[嘛么吗]\",sentence)",
":",
"",
"",
"",
"return",
"\"亲,您把公司抬头和税号发过来,我这边可以帮您记录修改呢\"",
"#return \"亲,可以的呢,亲亲提供下您需要修改的内容哈#E-s[数字x],电子发票需要修改的话要辛苦您提供一下您公司的开票信息哟谢谢\"",
"if",
"re",
".",
"search",
"(",
"'想|给|帮|怎么|怎样|如何|(可以|能)修改[嘛吗么]', sentence):",
"",
"",
"",
"",
"if",
"re",
".",
"search",
"(",
"\"打印\",sen",
"t",
"ence):",
"",
"",
"return",
"\"已提交申请修改好的发票[数字x]个工作日内开出,端:我的京东—客户服务—我的发票—发票详情下载即可;端:我的—客户服务—发票服务—发票详情查看即可;微信手端:京东优选—个人中心—订单详情查看即可载即可;\"",
"return",
"\"亲,您把公司抬头和税号发过来,我这边可以帮您记录修改呢\" ",
"#return \"亲亲提供下您需要修改的内容哈#E-s[数字x],电子发票需要修改的话要辛苦您提供一下您公司的开票信息哟谢谢\"",
"if",
"re",
".",
"search",
"(",
"'公司|单位', senten",
"c",
"):",
"",
"",
"#print(\"公司:{}\".format(sentence))",
"return",
"\"亲亲您提供一下需要修改的公司抬头和税号哦,【抬头:税号:您的手机联系方式:开明细还是大类】\"",
"if",
"not",
"re",
".",
"search",
"(",
"\"不\",s",
"e",
"ntence):",
"",
"",
"return",
"\"亲,您把公司抬头和税号发过来,我这边可以帮您记录修改呢\"",
"#return \"亲,你是要修改发票么,亲亲提供下您需要修改的内容哈#E-s[数字x],电子发票需要修改的话要辛苦您提供一下您公司的开票信息哟谢谢\"",
"else",
":",
"if",
"re",
".",
"search",
"(",
"\"组织机构x\",sentenc",
"e",
"):",
"",
"",
"return",
"\"这边帮您提交[组织机构x]的修改申请,请您耐心等待,待您的发票修改完成后我会短信告知您的\"",
"if",
"re",
".",
"search",
"(",
"\"姓名x\",sen",
"t",
"ence):",
"",
"",
"return",
"\"这边帮您提交的修改申请,请您耐心等待,待您的发票修改完成后我会短信告知您的\"",
"# if re.search(\"\",sentence):",
"# return ",
"return",
"None"
] | https://github.com/Dikea/Dialog-System-with-Task-Retrieval-and-Seq2seq/blob/bf1195386daeb289889a7c1ade54036faa313e48/task_dialog/invoice_task.py#L158-L222 |
|
inasafe/inasafe | 355eb2ce63f516b9c26af0c86a24f99e53f63f87 | safe/impact_function/provenance_utilities.py | python | get_analysis_question | (hazard, exposure) | return question | Construct analysis question based on hazard and exposure.
:param hazard: A hazard definition.
:type hazard: dict
:param exposure: An exposure definition.
:type exposure: dict
:returns: Analysis question based on reporting standards.
:rtype: str | Construct analysis question based on hazard and exposure. | [
"Construct",
"analysis",
"question",
"based",
"on",
"hazard",
"and",
"exposure",
"."
] | def get_analysis_question(hazard, exposure):
"""Construct analysis question based on hazard and exposure.
:param hazard: A hazard definition.
:type hazard: dict
:param exposure: An exposure definition.
:type exposure: dict
:returns: Analysis question based on reporting standards.
:rtype: str
"""
# First we look for a translated hardcoded question.
question = specific_analysis_question(hazard, exposure)
if question:
return question
if hazard == hazard_generic:
# Secondly, if the hazard is generic, we don't need the hazard.
question = tr(
'In each of the hazard zones {exposure_measure} {exposure_name} '
'might be affected?').format(
exposure_measure=exposure['measure_question'],
exposure_name=exposure['name'])
return question
# Then, we fallback on a generated string on the fly.
question = tr(
'In the event of a {hazard_name}, {exposure_measure} {exposure_name} '
'might be affected?').format(
hazard_name=hazard['name'],
exposure_measure=exposure['measure_question'],
exposure_name=exposure['name'])
return question | [
"def",
"get_analysis_question",
"(",
"hazard",
",",
"exposure",
")",
":",
"# First we look for a translated hardcoded question.",
"question",
"=",
"specific_analysis_question",
"(",
"hazard",
",",
"exposure",
")",
"if",
"question",
":",
"return",
"question",
"if",
"hazard",
"==",
"hazard_generic",
":",
"# Secondly, if the hazard is generic, we don't need the hazard.",
"question",
"=",
"tr",
"(",
"'In each of the hazard zones {exposure_measure} {exposure_name} '",
"'might be affected?'",
")",
".",
"format",
"(",
"exposure_measure",
"=",
"exposure",
"[",
"'measure_question'",
"]",
",",
"exposure_name",
"=",
"exposure",
"[",
"'name'",
"]",
")",
"return",
"question",
"# Then, we fallback on a generated string on the fly.",
"question",
"=",
"tr",
"(",
"'In the event of a {hazard_name}, {exposure_measure} {exposure_name} '",
"'might be affected?'",
")",
".",
"format",
"(",
"hazard_name",
"=",
"hazard",
"[",
"'name'",
"]",
",",
"exposure_measure",
"=",
"exposure",
"[",
"'measure_question'",
"]",
",",
"exposure_name",
"=",
"exposure",
"[",
"'name'",
"]",
")",
"return",
"question"
] | https://github.com/inasafe/inasafe/blob/355eb2ce63f516b9c26af0c86a24f99e53f63f87/safe/impact_function/provenance_utilities.py#L48-L81 |
|
AnasAboureada/Penetration-Testing-Study-Notes | 8152fd609cf818dba2f07e060738a24c56221687 | scripts/hash_check.py | python | printMinus | (message) | [] | def printMinus(message):
print "[-] " + message | [
"def",
"printMinus",
"(",
"message",
")",
":",
"print",
"\"[-] \"",
"+",
"message"
] | https://github.com/AnasAboureada/Penetration-Testing-Study-Notes/blob/8152fd609cf818dba2f07e060738a24c56221687/scripts/hash_check.py#L58-L59 |
||||
cs230-stanford/cs230-code-examples | 478e747b1c8bf57c6e2ce6b7ffd8068fe0287056 | tensorflow/vision/search_hyperparams.py | python | launch_training_job | (parent_dir, data_dir, job_name, params) | Launch training of the model with a set of hyperparameters in parent_dir/job_name
Args:
parent_dir: (string) directory containing config, weights and log
data_dir: (string) directory containing the dataset
params: (dict) containing hyperparameters | Launch training of the model with a set of hyperparameters in parent_dir/job_name | [
"Launch",
"training",
"of",
"the",
"model",
"with",
"a",
"set",
"of",
"hyperparameters",
"in",
"parent_dir",
"/",
"job_name"
] | def launch_training_job(parent_dir, data_dir, job_name, params):
"""Launch training of the model with a set of hyperparameters in parent_dir/job_name
Args:
parent_dir: (string) directory containing config, weights and log
data_dir: (string) directory containing the dataset
params: (dict) containing hyperparameters
"""
# Create a new folder in parent_dir with unique_name "job_name"
model_dir = os.path.join(parent_dir, job_name)
if not os.path.exists(model_dir):
os.makedirs(model_dir)
# Write parameters in json file
json_path = os.path.join(model_dir, 'params.json')
params.save(json_path)
# Launch training with this config
cmd = "{python} train.py --model_dir {model_dir} --data_dir {data_dir}".format(python=PYTHON,
model_dir=model_dir, data_dir=data_dir)
print(cmd)
check_call(cmd, shell=True) | [
"def",
"launch_training_job",
"(",
"parent_dir",
",",
"data_dir",
",",
"job_name",
",",
"params",
")",
":",
"# Create a new folder in parent_dir with unique_name \"job_name\"",
"model_dir",
"=",
"os",
".",
"path",
".",
"join",
"(",
"parent_dir",
",",
"job_name",
")",
"if",
"not",
"os",
".",
"path",
".",
"exists",
"(",
"model_dir",
")",
":",
"os",
".",
"makedirs",
"(",
"model_dir",
")",
"# Write parameters in json file",
"json_path",
"=",
"os",
".",
"path",
".",
"join",
"(",
"model_dir",
",",
"'params.json'",
")",
"params",
".",
"save",
"(",
"json_path",
")",
"# Launch training with this config",
"cmd",
"=",
"\"{python} train.py --model_dir {model_dir} --data_dir {data_dir}\"",
".",
"format",
"(",
"python",
"=",
"PYTHON",
",",
"model_dir",
"=",
"model_dir",
",",
"data_dir",
"=",
"data_dir",
")",
"print",
"(",
"cmd",
")",
"check_call",
"(",
"cmd",
",",
"shell",
"=",
"True",
")"
] | https://github.com/cs230-stanford/cs230-code-examples/blob/478e747b1c8bf57c6e2ce6b7ffd8068fe0287056/tensorflow/vision/search_hyperparams.py#L19-L40 |
||
ahmetcemturan/SFACT | 7576e29ba72b33e5058049b77b7b558875542747 | skeinforge_application/skeinforge_plugins/craft_plugins/raft.py | python | main | () | Display the raft dialog. | Display the raft dialog. | [
"Display",
"the",
"raft",
"dialog",
"."
] | def main():
'Display the raft dialog.'
if len(sys.argv) > 1:
writeOutput(' '.join(sys.argv[1 :]))
else:
settings.startMainLoopFromConstructor(getNewRepository()) | [
"def",
"main",
"(",
")",
":",
"if",
"len",
"(",
"sys",
".",
"argv",
")",
">",
"1",
":",
"writeOutput",
"(",
"' '",
".",
"join",
"(",
"sys",
".",
"argv",
"[",
"1",
":",
"]",
")",
")",
"else",
":",
"settings",
".",
"startMainLoopFromConstructor",
"(",
"getNewRepository",
"(",
")",
")"
] | https://github.com/ahmetcemturan/SFACT/blob/7576e29ba72b33e5058049b77b7b558875542747/skeinforge_application/skeinforge_plugins/craft_plugins/raft.py#L1110-L1115 |
||
sql-machine-learning/elasticdl | f7ded492965791d09d6c2b7dae13134d68af903d | elasticdl/python/common/k8s_client.py | python | Client.__init__ | (
self,
*,
image_name,
namespace,
job_name,
event_callback=None,
periodic_call_func=None,
cluster_spec="",
cluster_spec_json="",
force_use_kube_config_file=False
) | ElasticDL k8s client.
Args:
image_name: Docker image path for ElasticDL pod.
namespace: The name of the Kubernetes namespace where ElasticDL
pods will be created.
job_name: ElasticDL job name, should be unique in the namespace.
Used as pod name prefix and value for "elasticdl" label.
event_callback: If not None, an event watcher will be created and
events passed to the callback.
periodic_call_func: If not None, call this method periodically.
force_use_kube_config_file: If true, force to load the cluster
config from ~/.kube/config. Otherwise, if it's in a process
running in a K8S environment, it loads the incluster config,
if not, it loads the kube config file. | ElasticDL k8s client. | [
"ElasticDL",
"k8s",
"client",
"."
] | def __init__(
self,
*,
image_name,
namespace,
job_name,
event_callback=None,
periodic_call_func=None,
cluster_spec="",
cluster_spec_json="",
force_use_kube_config_file=False
):
"""
ElasticDL k8s client.
Args:
image_name: Docker image path for ElasticDL pod.
namespace: The name of the Kubernetes namespace where ElasticDL
pods will be created.
job_name: ElasticDL job name, should be unique in the namespace.
Used as pod name prefix and value for "elasticdl" label.
event_callback: If not None, an event watcher will be created and
events passed to the callback.
periodic_call_func: If not None, call this method periodically.
force_use_kube_config_file: If true, force to load the cluster
config from ~/.kube/config. Otherwise, if it's in a process
running in a K8S environment, it loads the incluster config,
if not, it loads the kube config file.
"""
super().__init__(
image_name=image_name,
namespace=namespace,
job_name=job_name,
cluster_spec=cluster_spec,
cluster_spec_json=cluster_spec_json,
force_use_kube_config_file=force_use_kube_config_file,
)
self._event_cb = event_callback
self._periodic_call_func = periodic_call_func | [
"def",
"__init__",
"(",
"self",
",",
"*",
",",
"image_name",
",",
"namespace",
",",
"job_name",
",",
"event_callback",
"=",
"None",
",",
"periodic_call_func",
"=",
"None",
",",
"cluster_spec",
"=",
"\"\"",
",",
"cluster_spec_json",
"=",
"\"\"",
",",
"force_use_kube_config_file",
"=",
"False",
")",
":",
"super",
"(",
")",
".",
"__init__",
"(",
"image_name",
"=",
"image_name",
",",
"namespace",
"=",
"namespace",
",",
"job_name",
"=",
"job_name",
",",
"cluster_spec",
"=",
"cluster_spec",
",",
"cluster_spec_json",
"=",
"cluster_spec_json",
",",
"force_use_kube_config_file",
"=",
"force_use_kube_config_file",
",",
")",
"self",
".",
"_event_cb",
"=",
"event_callback",
"self",
".",
"_periodic_call_func",
"=",
"periodic_call_func"
] | https://github.com/sql-machine-learning/elasticdl/blob/f7ded492965791d09d6c2b7dae13134d68af903d/elasticdl/python/common/k8s_client.py#L42-L80 |
||
oracle/graalpython | 577e02da9755d916056184ec441c26e00b70145c | graalpython/lib-python/3/email/message.py | python | Message.attach | (self, payload) | Add the given payload to the current payload.
The current payload will always be a list of objects after this method
is called. If you want to set the payload to a scalar object, use
set_payload() instead. | Add the given payload to the current payload. | [
"Add",
"the",
"given",
"payload",
"to",
"the",
"current",
"payload",
"."
] | def attach(self, payload):
"""Add the given payload to the current payload.
The current payload will always be a list of objects after this method
is called. If you want to set the payload to a scalar object, use
set_payload() instead.
"""
if self._payload is None:
self._payload = [payload]
else:
try:
self._payload.append(payload)
except AttributeError:
raise TypeError("Attach is not valid on a message with a"
" non-multipart payload") | [
"def",
"attach",
"(",
"self",
",",
"payload",
")",
":",
"if",
"self",
".",
"_payload",
"is",
"None",
":",
"self",
".",
"_payload",
"=",
"[",
"payload",
"]",
"else",
":",
"try",
":",
"self",
".",
"_payload",
".",
"append",
"(",
"payload",
")",
"except",
"AttributeError",
":",
"raise",
"TypeError",
"(",
"\"Attach is not valid on a message with a\"",
"\" non-multipart payload\"",
")"
] | https://github.com/oracle/graalpython/blob/577e02da9755d916056184ec441c26e00b70145c/graalpython/lib-python/3/email/message.py#L197-L211 |
||
scikit-learn/scikit-learn | 1d1aadd0711b87d2a11c80aad15df6f8cf156712 | sklearn/decomposition/_lda.py | python | LatentDirichletAllocation.perplexity | (self, X, sub_sampling=False) | return self._perplexity_precomp_distr(X, sub_sampling=sub_sampling) | Calculate approximate perplexity for data X.
Perplexity is defined as exp(-1. * log-likelihood per word)
.. versionchanged:: 0.19
*doc_topic_distr* argument has been deprecated and is ignored
because user no longer has access to unnormalized distribution
Parameters
----------
X : {array-like, sparse matrix} of shape (n_samples, n_features)
Document word matrix.
sub_sampling : bool
Do sub-sampling or not.
Returns
-------
score : float
Perplexity score. | Calculate approximate perplexity for data X. | [
"Calculate",
"approximate",
"perplexity",
"for",
"data",
"X",
"."
] | def perplexity(self, X, sub_sampling=False):
"""Calculate approximate perplexity for data X.
Perplexity is defined as exp(-1. * log-likelihood per word)
.. versionchanged:: 0.19
*doc_topic_distr* argument has been deprecated and is ignored
because user no longer has access to unnormalized distribution
Parameters
----------
X : {array-like, sparse matrix} of shape (n_samples, n_features)
Document word matrix.
sub_sampling : bool
Do sub-sampling or not.
Returns
-------
score : float
Perplexity score.
"""
check_is_fitted(self)
X = self._check_non_neg_array(
X, reset_n_features=True, whom="LatentDirichletAllocation.perplexity"
)
return self._perplexity_precomp_distr(X, sub_sampling=sub_sampling) | [
"def",
"perplexity",
"(",
"self",
",",
"X",
",",
"sub_sampling",
"=",
"False",
")",
":",
"check_is_fitted",
"(",
"self",
")",
"X",
"=",
"self",
".",
"_check_non_neg_array",
"(",
"X",
",",
"reset_n_features",
"=",
"True",
",",
"whom",
"=",
"\"LatentDirichletAllocation.perplexity\"",
")",
"return",
"self",
".",
"_perplexity_precomp_distr",
"(",
"X",
",",
"sub_sampling",
"=",
"sub_sampling",
")"
] | https://github.com/scikit-learn/scikit-learn/blob/1d1aadd0711b87d2a11c80aad15df6f8cf156712/sklearn/decomposition/_lda.py#L865-L891 |
|
timbertson/python-readability | 7b6d93a2ced8f6ce27ab1d7c26595684f8b36114 | readability/BeautifulSoup.py | python | Tag.get | (self, key, default=None) | return self._getAttrMap().get(key, default) | Returns the value of the 'key' attribute for the tag, or
the value given for 'default' if it doesn't have that
attribute. | Returns the value of the 'key' attribute for the tag, or
the value given for 'default' if it doesn't have that
attribute. | [
"Returns",
"the",
"value",
"of",
"the",
"key",
"attribute",
"for",
"the",
"tag",
"or",
"the",
"value",
"given",
"for",
"default",
"if",
"it",
"doesn",
"t",
"have",
"that",
"attribute",
"."
] | def get(self, key, default=None):
"""Returns the value of the 'key' attribute for the tag, or
the value given for 'default' if it doesn't have that
attribute."""
return self._getAttrMap().get(key, default) | [
"def",
"get",
"(",
"self",
",",
"key",
",",
"default",
"=",
"None",
")",
":",
"return",
"self",
".",
"_getAttrMap",
"(",
")",
".",
"get",
"(",
"key",
",",
"default",
")"
] | https://github.com/timbertson/python-readability/blob/7b6d93a2ced8f6ce27ab1d7c26595684f8b36114/readability/BeautifulSoup.py#L529-L533 |
|
cheshirekow/cmake_format | eff5df1f41c665ea7cac799396042e4f406ef09a | cmakelang/format/formatter.py | python | Cursor.__getitem__ | (self, idx) | A cursor can be accessed as a two-element array. For a cursor `c`,
`c[0]` refers to the row (`x`) and `c[1]` refers to the column (`y`). | A cursor can be accessed as a two-element array. For a cursor `c`,
`c[0]` refers to the row (`x`) and `c[1]` refers to the column (`y`). | [
"A",
"cursor",
"can",
"be",
"accessed",
"as",
"a",
"two",
"-",
"element",
"array",
".",
"For",
"a",
"cursor",
"c",
"c",
"[",
"0",
"]",
"refers",
"to",
"the",
"row",
"(",
"x",
")",
"and",
"c",
"[",
"1",
"]",
"refers",
"to",
"the",
"column",
"(",
"y",
")",
"."
] | def __getitem__(self, idx):
"""
A cursor can be accessed as a two-element array. For a cursor `c`,
`c[0]` refers to the row (`x`) and `c[1]` refers to the column (`y`).
"""
if idx == 0:
return self.x
if idx == 1:
return self.y
raise IndexError('Cursor indices must be 0 or 1') | [
"def",
"__getitem__",
"(",
"self",
",",
"idx",
")",
":",
"if",
"idx",
"==",
"0",
":",
"return",
"self",
".",
"x",
"if",
"idx",
"==",
"1",
":",
"return",
"self",
".",
"y",
"raise",
"IndexError",
"(",
"'Cursor indices must be 0 or 1'",
")"
] | https://github.com/cheshirekow/cmake_format/blob/eff5df1f41c665ea7cac799396042e4f406ef09a/cmakelang/format/formatter.py#L235-L246 |
||
Tautulli/Tautulli | 2410eb33805aaac4bd1c5dad0f71e4f15afaf742 | lib/future/types/newint.py | python | newint.__new__ | (cls, x=0, base=10) | From the Py3 int docstring:
| int(x=0) -> integer
| int(x, base=10) -> integer
|
| Convert a number or string to an integer, or return 0 if no
| arguments are given. If x is a number, return x.__int__(). For
| floating point numbers, this truncates towards zero.
|
| If x is not a number or if base is given, then x must be a string,
| bytes, or bytearray instance representing an integer literal in the
| given base. The literal can be preceded by '+' or '-' and be
| surrounded by whitespace. The base defaults to 10. Valid bases are
| 0 and 2-36. Base 0 means to interpret the base from the string as an
| integer literal.
| >>> int('0b100', base=0)
| 4 | From the Py3 int docstring: | [
"From",
"the",
"Py3",
"int",
"docstring",
":"
] | def __new__(cls, x=0, base=10):
"""
From the Py3 int docstring:
| int(x=0) -> integer
| int(x, base=10) -> integer
|
| Convert a number or string to an integer, or return 0 if no
| arguments are given. If x is a number, return x.__int__(). For
| floating point numbers, this truncates towards zero.
|
| If x is not a number or if base is given, then x must be a string,
| bytes, or bytearray instance representing an integer literal in the
| given base. The literal can be preceded by '+' or '-' and be
| surrounded by whitespace. The base defaults to 10. Valid bases are
| 0 and 2-36. Base 0 means to interpret the base from the string as an
| integer literal.
| >>> int('0b100', base=0)
| 4
"""
try:
val = x.__int__()
except AttributeError:
val = x
else:
if not isint(val):
raise TypeError('__int__ returned non-int ({0})'.format(
type(val)))
if base != 10:
# Explicit base
if not (istext(val) or isbytes(val) or isinstance(val, bytearray)):
raise TypeError(
"int() can't convert non-string with explicit base")
try:
return super(newint, cls).__new__(cls, val, base)
except TypeError:
return super(newint, cls).__new__(cls, newbytes(val), base)
# After here, base is 10
try:
return super(newint, cls).__new__(cls, val)
except TypeError:
# Py2 long doesn't handle bytearray input with an explicit base, so
# handle this here.
# Py3: int(bytearray(b'10'), 2) == 2
# Py2: int(bytearray(b'10'), 2) == 2 raises TypeError
# Py2: long(bytearray(b'10'), 2) == 2 raises TypeError
try:
return super(newint, cls).__new__(cls, newbytes(val))
except:
raise TypeError("newint argument must be a string or a number,"
"not '{0}'".format(type(val))) | [
"def",
"__new__",
"(",
"cls",
",",
"x",
"=",
"0",
",",
"base",
"=",
"10",
")",
":",
"try",
":",
"val",
"=",
"x",
".",
"__int__",
"(",
")",
"except",
"AttributeError",
":",
"val",
"=",
"x",
"else",
":",
"if",
"not",
"isint",
"(",
"val",
")",
":",
"raise",
"TypeError",
"(",
"'__int__ returned non-int ({0})'",
".",
"format",
"(",
"type",
"(",
"val",
")",
")",
")",
"if",
"base",
"!=",
"10",
":",
"# Explicit base",
"if",
"not",
"(",
"istext",
"(",
"val",
")",
"or",
"isbytes",
"(",
"val",
")",
"or",
"isinstance",
"(",
"val",
",",
"bytearray",
")",
")",
":",
"raise",
"TypeError",
"(",
"\"int() can't convert non-string with explicit base\"",
")",
"try",
":",
"return",
"super",
"(",
"newint",
",",
"cls",
")",
".",
"__new__",
"(",
"cls",
",",
"val",
",",
"base",
")",
"except",
"TypeError",
":",
"return",
"super",
"(",
"newint",
",",
"cls",
")",
".",
"__new__",
"(",
"cls",
",",
"newbytes",
"(",
"val",
")",
",",
"base",
")",
"# After here, base is 10",
"try",
":",
"return",
"super",
"(",
"newint",
",",
"cls",
")",
".",
"__new__",
"(",
"cls",
",",
"val",
")",
"except",
"TypeError",
":",
"# Py2 long doesn't handle bytearray input with an explicit base, so",
"# handle this here.",
"# Py3: int(bytearray(b'10'), 2) == 2",
"# Py2: int(bytearray(b'10'), 2) == 2 raises TypeError",
"# Py2: long(bytearray(b'10'), 2) == 2 raises TypeError",
"try",
":",
"return",
"super",
"(",
"newint",
",",
"cls",
")",
".",
"__new__",
"(",
"cls",
",",
"newbytes",
"(",
"val",
")",
")",
"except",
":",
"raise",
"TypeError",
"(",
"\"newint argument must be a string or a number,\"",
"\"not '{0}'\"",
".",
"format",
"(",
"type",
"(",
"val",
")",
")",
")"
] | https://github.com/Tautulli/Tautulli/blob/2410eb33805aaac4bd1c5dad0f71e4f15afaf742/lib/future/types/newint.py#L37-L89 |
||
Axelrod-Python/Axelrod | 00e18323c1b1af74df873773e44f31e1b9a299c6 | axelrod/strategies/hmm.py | python | mutate_row | (row, mutation_probability, rng) | return row | , crossover_lists_of_lists
Given a row of probabilities, randomly change each entry with probability
`mutation_probability` (a value between 0 and 1). If changing, then change
by a value randomly (uniformly) chosen from [-0.25, 0.25] bounded by 0 and
100%. | , crossover_lists_of_lists
Given a row of probabilities, randomly change each entry with probability
`mutation_probability` (a value between 0 and 1). If changing, then change
by a value randomly (uniformly) chosen from [-0.25, 0.25] bounded by 0 and
100%. | [
"crossover_lists_of_lists",
"Given",
"a",
"row",
"of",
"probabilities",
"randomly",
"change",
"each",
"entry",
"with",
"probability",
"mutation_probability",
"(",
"a",
"value",
"between",
"0",
"and",
"1",
")",
".",
"If",
"changing",
"then",
"change",
"by",
"a",
"value",
"randomly",
"(",
"uniformly",
")",
"chosen",
"from",
"[",
"-",
"0",
".",
"25",
"0",
".",
"25",
"]",
"bounded",
"by",
"0",
"and",
"100%",
"."
] | def mutate_row(row, mutation_probability, rng):
""", crossover_lists_of_lists
Given a row of probabilities, randomly change each entry with probability
`mutation_probability` (a value between 0 and 1). If changing, then change
by a value randomly (uniformly) chosen from [-0.25, 0.25] bounded by 0 and
100%.
"""
randoms = rng.random(len(row))
for i in range(len(row)):
if randoms[i] < mutation_probability:
ep = rng.uniform(-1, 1) / 4
row[i] += ep
if row[i] < 0:
row[i] = 0
if row[i] > 1:
row[i] = 1
return row | [
"def",
"mutate_row",
"(",
"row",
",",
"mutation_probability",
",",
"rng",
")",
":",
"randoms",
"=",
"rng",
".",
"random",
"(",
"len",
"(",
"row",
")",
")",
"for",
"i",
"in",
"range",
"(",
"len",
"(",
"row",
")",
")",
":",
"if",
"randoms",
"[",
"i",
"]",
"<",
"mutation_probability",
":",
"ep",
"=",
"rng",
".",
"uniform",
"(",
"-",
"1",
",",
"1",
")",
"/",
"4",
"row",
"[",
"i",
"]",
"+=",
"ep",
"if",
"row",
"[",
"i",
"]",
"<",
"0",
":",
"row",
"[",
"i",
"]",
"=",
"0",
"if",
"row",
"[",
"i",
"]",
">",
"1",
":",
"row",
"[",
"i",
"]",
"=",
"1",
"return",
"row"
] | https://github.com/Axelrod-Python/Axelrod/blob/00e18323c1b1af74df873773e44f31e1b9a299c6/axelrod/strategies/hmm.py#L33-L49 |
|
psf/black | 33e3bb1e4e326713f85749705179da2e31520670 | src/blib2to3/pgen2/driver.py | python | Driver.parse_file | (
self, filename: Path, encoding: Optional[Text] = None, debug: bool = False
) | Parse a file and return the syntax tree. | Parse a file and return the syntax tree. | [
"Parse",
"a",
"file",
"and",
"return",
"the",
"syntax",
"tree",
"."
] | def parse_file(
self, filename: Path, encoding: Optional[Text] = None, debug: bool = False
) -> NL:
"""Parse a file and return the syntax tree."""
with io.open(filename, "r", encoding=encoding) as stream:
return self.parse_stream(stream, debug) | [
"def",
"parse_file",
"(",
"self",
",",
"filename",
":",
"Path",
",",
"encoding",
":",
"Optional",
"[",
"Text",
"]",
"=",
"None",
",",
"debug",
":",
"bool",
"=",
"False",
")",
"->",
"NL",
":",
"with",
"io",
".",
"open",
"(",
"filename",
",",
"\"r\"",
",",
"encoding",
"=",
"encoding",
")",
"as",
"stream",
":",
"return",
"self",
".",
"parse_stream",
"(",
"stream",
",",
"debug",
")"
] | https://github.com/psf/black/blob/33e3bb1e4e326713f85749705179da2e31520670/src/blib2to3/pgen2/driver.py#L201-L206 |
||
tkarras/progressive_growing_of_gans | 2504c3f3cb98ca58751610ad61fa1097313152bd | networks.py | python | dense | (x, fmaps, gain=np.sqrt(2), use_wscale=False) | return tf.matmul(x, w) | [] | def dense(x, fmaps, gain=np.sqrt(2), use_wscale=False):
if len(x.shape) > 2:
x = tf.reshape(x, [-1, np.prod([d.value for d in x.shape[1:]])])
w = get_weight([x.shape[1].value, fmaps], gain=gain, use_wscale=use_wscale)
w = tf.cast(w, x.dtype)
return tf.matmul(x, w) | [
"def",
"dense",
"(",
"x",
",",
"fmaps",
",",
"gain",
"=",
"np",
".",
"sqrt",
"(",
"2",
")",
",",
"use_wscale",
"=",
"False",
")",
":",
"if",
"len",
"(",
"x",
".",
"shape",
")",
">",
"2",
":",
"x",
"=",
"tf",
".",
"reshape",
"(",
"x",
",",
"[",
"-",
"1",
",",
"np",
".",
"prod",
"(",
"[",
"d",
".",
"value",
"for",
"d",
"in",
"x",
".",
"shape",
"[",
"1",
":",
"]",
"]",
")",
"]",
")",
"w",
"=",
"get_weight",
"(",
"[",
"x",
".",
"shape",
"[",
"1",
"]",
".",
"value",
",",
"fmaps",
"]",
",",
"gain",
"=",
"gain",
",",
"use_wscale",
"=",
"use_wscale",
")",
"w",
"=",
"tf",
".",
"cast",
"(",
"w",
",",
"x",
".",
"dtype",
")",
"return",
"tf",
".",
"matmul",
"(",
"x",
",",
"w",
")"
] | https://github.com/tkarras/progressive_growing_of_gans/blob/2504c3f3cb98ca58751610ad61fa1097313152bd/networks.py#L34-L39 |
|||
eventable/vobject | 498555a553155ea9b26aace93332ae79365ecb31 | vobject/base.py | python | newFromBehavior | (name, id=None) | return obj | Given a name, return a behaviored ContentLine or Component. | Given a name, return a behaviored ContentLine or Component. | [
"Given",
"a",
"name",
"return",
"a",
"behaviored",
"ContentLine",
"or",
"Component",
"."
] | def newFromBehavior(name, id=None):
"""
Given a name, return a behaviored ContentLine or Component.
"""
name = name.upper()
behavior = getBehavior(name, id)
if behavior is None:
raise VObjectError("No behavior found named {0!s}".format(name))
if behavior.isComponent:
obj = Component(name)
else:
obj = ContentLine(name, [], '')
obj.behavior = behavior
obj.isNative = False
return obj | [
"def",
"newFromBehavior",
"(",
"name",
",",
"id",
"=",
"None",
")",
":",
"name",
"=",
"name",
".",
"upper",
"(",
")",
"behavior",
"=",
"getBehavior",
"(",
"name",
",",
"id",
")",
"if",
"behavior",
"is",
"None",
":",
"raise",
"VObjectError",
"(",
"\"No behavior found named {0!s}\"",
".",
"format",
"(",
"name",
")",
")",
"if",
"behavior",
".",
"isComponent",
":",
"obj",
"=",
"Component",
"(",
"name",
")",
"else",
":",
"obj",
"=",
"ContentLine",
"(",
"name",
",",
"[",
"]",
",",
"''",
")",
"obj",
".",
"behavior",
"=",
"behavior",
"obj",
".",
"isNative",
"=",
"False",
"return",
"obj"
] | https://github.com/eventable/vobject/blob/498555a553155ea9b26aace93332ae79365ecb31/vobject/base.py#L1200-L1214 |
|
line/line-bot-sdk-python | d97d488876d504ab3cb6ecfc1574ea42bd669565 | linebot/api.py | python | LineBotApi.update_rich_menu_alias | (self, rich_menu_alias_id, rich_menu_alias, timeout=None) | Call update rich menu alias API.
https://developers.line.biz/en/reference/messaging-api/#update-rich-menu-alias
:param str rich_menu_alias_id: ID of an uploaded rich menu alias.
:param rich_menu_alias: Inquired to create a rich menu alias object.
:type rich_menu_alias: T <= :py:class:`linebot.models.rich_menu.RichMenuAlias`
:param timeout: (optional) How long to wait for the server
to send data before giving up, as a float,
or a (connect timeout, read timeout) float tuple.
Default is self.http_client.timeout
:type timeout: float | tuple(float, float)
:rtype: str
:return: rich menu id | Call update rich menu alias API. | [
"Call",
"update",
"rich",
"menu",
"alias",
"API",
"."
] | def update_rich_menu_alias(self, rich_menu_alias_id, rich_menu_alias, timeout=None):
"""Call update rich menu alias API.
https://developers.line.biz/en/reference/messaging-api/#update-rich-menu-alias
:param str rich_menu_alias_id: ID of an uploaded rich menu alias.
:param rich_menu_alias: Inquired to create a rich menu alias object.
:type rich_menu_alias: T <= :py:class:`linebot.models.rich_menu.RichMenuAlias`
:param timeout: (optional) How long to wait for the server
to send data before giving up, as a float,
or a (connect timeout, read timeout) float tuple.
Default is self.http_client.timeout
:type timeout: float | tuple(float, float)
:rtype: str
:return: rich menu id
"""
self._post(
'/v2/bot/richmenu/alias/{rich_menu_id}'.format(rich_menu_id=rich_menu_alias_id),
data=rich_menu_alias.as_json_string(),
timeout=timeout
) | [
"def",
"update_rich_menu_alias",
"(",
"self",
",",
"rich_menu_alias_id",
",",
"rich_menu_alias",
",",
"timeout",
"=",
"None",
")",
":",
"self",
".",
"_post",
"(",
"'/v2/bot/richmenu/alias/{rich_menu_id}'",
".",
"format",
"(",
"rich_menu_id",
"=",
"rich_menu_alias_id",
")",
",",
"data",
"=",
"rich_menu_alias",
".",
"as_json_string",
"(",
")",
",",
"timeout",
"=",
"timeout",
")"
] | https://github.com/line/line-bot-sdk-python/blob/d97d488876d504ab3cb6ecfc1574ea42bd669565/linebot/api.py#L756-L776 |
||
stanford-futuredata/noscope | 6c6aa72e09280530dfdcf87871c1ac43df3b6cc3 | scripts/export-tf-graphs.py | python | main | () | [] | def main():
parser = argparse.ArgumentParser()
parser.add_argument('--model_dir', required=True, help='Models with *.h5')
parser.add_argument('--output_dir', required=True, help='Where to output the files (as *.pb)')
args = parser.parse_args()
fnames = glob.glob(os.path.join(args.model_dir, '*.h5'))
first = np.zeros( (1, 50, 50, 3) )
for keras_model_fname in fnames:
sess = tf.Session()
frozen_graph_path = os.path.splitext(os.path.split(keras_model_fname)[1])[0] + '.pb'
frozen_graph_path = os.path.join(args.output_dir, frozen_graph_path)
print 'Producing: ' + frozen_graph_path
model = get_keras_model(keras_model_fname, sess)
img1 = tf.placeholder(tf.float32, shape=(None, 50, 50, 3), name='input_img')
tf_model = model(img1)
output = tf.identity(tf_model, name='output_prob')
# Run to set weights
sess.run(output,
feed_dict = {img1: first})
tf.train.write_graph(sess.graph_def, '/tmp', 'graph-structure.pb')
saver = saver_lib.Saver()
checkpoint_path = saver.save(sess, '/tmp/vars', global_step=0)
input_graph_path = '/tmp/graph-structure.pb'
input_saver_def_path = ''
input_binary = False
input_checkpoint_path = '/tmp/vars-0'
output_node_names = 'output_prob'
restore_op_name = 'save/restore_all'
filename_tensor_name = 'save/Const:0'
clear_devices = False
initializer_nodes = ""
freeze_graph(input_graph_path, input_saver_def_path,
input_binary, input_checkpoint_path,
output_node_names, restore_op_name,
filename_tensor_name, frozen_graph_path,
clear_devices, initializer_nodes)
# Clean up
sess.close()
tf.reset_default_graph() | [
"def",
"main",
"(",
")",
":",
"parser",
"=",
"argparse",
".",
"ArgumentParser",
"(",
")",
"parser",
".",
"add_argument",
"(",
"'--model_dir'",
",",
"required",
"=",
"True",
",",
"help",
"=",
"'Models with *.h5'",
")",
"parser",
".",
"add_argument",
"(",
"'--output_dir'",
",",
"required",
"=",
"True",
",",
"help",
"=",
"'Where to output the files (as *.pb)'",
")",
"args",
"=",
"parser",
".",
"parse_args",
"(",
")",
"fnames",
"=",
"glob",
".",
"glob",
"(",
"os",
".",
"path",
".",
"join",
"(",
"args",
".",
"model_dir",
",",
"'*.h5'",
")",
")",
"first",
"=",
"np",
".",
"zeros",
"(",
"(",
"1",
",",
"50",
",",
"50",
",",
"3",
")",
")",
"for",
"keras_model_fname",
"in",
"fnames",
":",
"sess",
"=",
"tf",
".",
"Session",
"(",
")",
"frozen_graph_path",
"=",
"os",
".",
"path",
".",
"splitext",
"(",
"os",
".",
"path",
".",
"split",
"(",
"keras_model_fname",
")",
"[",
"1",
"]",
")",
"[",
"0",
"]",
"+",
"'.pb'",
"frozen_graph_path",
"=",
"os",
".",
"path",
".",
"join",
"(",
"args",
".",
"output_dir",
",",
"frozen_graph_path",
")",
"print",
"'Producing: '",
"+",
"frozen_graph_path",
"model",
"=",
"get_keras_model",
"(",
"keras_model_fname",
",",
"sess",
")",
"img1",
"=",
"tf",
".",
"placeholder",
"(",
"tf",
".",
"float32",
",",
"shape",
"=",
"(",
"None",
",",
"50",
",",
"50",
",",
"3",
")",
",",
"name",
"=",
"'input_img'",
")",
"tf_model",
"=",
"model",
"(",
"img1",
")",
"output",
"=",
"tf",
".",
"identity",
"(",
"tf_model",
",",
"name",
"=",
"'output_prob'",
")",
"# Run to set weights",
"sess",
".",
"run",
"(",
"output",
",",
"feed_dict",
"=",
"{",
"img1",
":",
"first",
"}",
")",
"tf",
".",
"train",
".",
"write_graph",
"(",
"sess",
".",
"graph_def",
",",
"'/tmp'",
",",
"'graph-structure.pb'",
")",
"saver",
"=",
"saver_lib",
".",
"Saver",
"(",
")",
"checkpoint_path",
"=",
"saver",
".",
"save",
"(",
"sess",
",",
"'/tmp/vars'",
",",
"global_step",
"=",
"0",
")",
"input_graph_path",
"=",
"'/tmp/graph-structure.pb'",
"input_saver_def_path",
"=",
"''",
"input_binary",
"=",
"False",
"input_checkpoint_path",
"=",
"'/tmp/vars-0'",
"output_node_names",
"=",
"'output_prob'",
"restore_op_name",
"=",
"'save/restore_all'",
"filename_tensor_name",
"=",
"'save/Const:0'",
"clear_devices",
"=",
"False",
"initializer_nodes",
"=",
"\"\"",
"freeze_graph",
"(",
"input_graph_path",
",",
"input_saver_def_path",
",",
"input_binary",
",",
"input_checkpoint_path",
",",
"output_node_names",
",",
"restore_op_name",
",",
"filename_tensor_name",
",",
"frozen_graph_path",
",",
"clear_devices",
",",
"initializer_nodes",
")",
"# Clean up",
"sess",
".",
"close",
"(",
")",
"tf",
".",
"reset_default_graph",
"(",
")"
] | https://github.com/stanford-futuredata/noscope/blob/6c6aa72e09280530dfdcf87871c1ac43df3b6cc3/scripts/export-tf-graphs.py#L29-L73 |
||||
attzonko/mmpy_bot | 570a18e5671ff3339d44bd21a3cbe1289c8fb848 | mmpy_bot/utils.py | python | completed_future | () | return future | Utility function to create a stub Future object that asyncio can wait for. | Utility function to create a stub Future object that asyncio can wait for. | [
"Utility",
"function",
"to",
"create",
"a",
"stub",
"Future",
"object",
"that",
"asyncio",
"can",
"wait",
"for",
"."
] | def completed_future():
"""Utility function to create a stub Future object that asyncio can wait for."""
future = asyncio.Future()
future.set_result(True)
return future | [
"def",
"completed_future",
"(",
")",
":",
"future",
"=",
"asyncio",
".",
"Future",
"(",
")",
"future",
".",
"set_result",
"(",
"True",
")",
"return",
"future"
] | https://github.com/attzonko/mmpy_bot/blob/570a18e5671ff3339d44bd21a3cbe1289c8fb848/mmpy_bot/utils.py#L9-L13 |
|
mozillazg/pypy | 2ff5cd960c075c991389f842c6d59e71cf0cb7d0 | lib-python/2.7/uuid.py | python | _ifconfig_getnode | () | Get the hardware address on Unix by running ifconfig. | Get the hardware address on Unix by running ifconfig. | [
"Get",
"the",
"hardware",
"address",
"on",
"Unix",
"by",
"running",
"ifconfig",
"."
] | def _ifconfig_getnode():
"""Get the hardware address on Unix by running ifconfig."""
# This works on Linux ('' or '-a'), Tru64 ('-av'), but not all Unixes.
keywords = ('hwaddr', 'ether', 'address:', 'lladdr')
for args in ('', '-a', '-av'):
mac = _find_mac('ifconfig', args, keywords, lambda i: i+1)
if mac:
return mac | [
"def",
"_ifconfig_getnode",
"(",
")",
":",
"# This works on Linux ('' or '-a'), Tru64 ('-av'), but not all Unixes.",
"keywords",
"=",
"(",
"'hwaddr'",
",",
"'ether'",
",",
"'address:'",
",",
"'lladdr'",
")",
"for",
"args",
"in",
"(",
"''",
",",
"'-a'",
",",
"'-av'",
")",
":",
"mac",
"=",
"_find_mac",
"(",
"'ifconfig'",
",",
"args",
",",
"keywords",
",",
"lambda",
"i",
":",
"i",
"+",
"1",
")",
"if",
"mac",
":",
"return",
"mac"
] | https://github.com/mozillazg/pypy/blob/2ff5cd960c075c991389f842c6d59e71cf0cb7d0/lib-python/2.7/uuid.py#L357-L364 |
||
qutebrowser/qutebrowser | 3a2aaaacbf97f4bf0c72463f3da94ed2822a5442 | qutebrowser/config/websettings.py | python | AbstractSettings.update_for_url | (self, url: QUrl) | Update settings customized for the given tab. | Update settings customized for the given tab. | [
"Update",
"settings",
"customized",
"for",
"the",
"given",
"tab",
"."
] | def update_for_url(self, url: QUrl) -> None:
"""Update settings customized for the given tab."""
qtutils.ensure_valid(url)
for values in config.instance:
if not values.opt.supports_pattern:
continue
value = values.get_for_url(url, fallback=False)
self._update_setting(values.opt.name, value) | [
"def",
"update_for_url",
"(",
"self",
",",
"url",
":",
"QUrl",
")",
"->",
"None",
":",
"qtutils",
".",
"ensure_valid",
"(",
"url",
")",
"for",
"values",
"in",
"config",
".",
"instance",
":",
"if",
"not",
"values",
".",
"opt",
".",
"supports_pattern",
":",
"continue",
"value",
"=",
"values",
".",
"get_for_url",
"(",
"url",
",",
"fallback",
"=",
"False",
")",
"self",
".",
"_update_setting",
"(",
"values",
".",
"opt",
".",
"name",
",",
"value",
")"
] | https://github.com/qutebrowser/qutebrowser/blob/3a2aaaacbf97f4bf0c72463f3da94ed2822a5442/qutebrowser/config/websettings.py#L190-L198 |
||
zhl2008/awd-platform | 0416b31abea29743387b10b3914581fbe8e7da5e | web_flaskbb/lib/python2.7/site-packages/redis/connection.py | python | ConnectionPool.make_connection | (self) | return self.connection_class(**self.connection_kwargs) | Create a new connection | Create a new connection | [
"Create",
"a",
"new",
"connection"
] | def make_connection(self):
"Create a new connection"
if self._created_connections >= self.max_connections:
raise ConnectionError("Too many connections")
self._created_connections += 1
return self.connection_class(**self.connection_kwargs) | [
"def",
"make_connection",
"(",
"self",
")",
":",
"if",
"self",
".",
"_created_connections",
">=",
"self",
".",
"max_connections",
":",
"raise",
"ConnectionError",
"(",
"\"Too many connections\"",
")",
"self",
".",
"_created_connections",
"+=",
"1",
"return",
"self",
".",
"connection_class",
"(",
"*",
"*",
"self",
".",
"connection_kwargs",
")"
] | https://github.com/zhl2008/awd-platform/blob/0416b31abea29743387b10b3914581fbe8e7da5e/web_flaskbb/lib/python2.7/site-packages/redis/connection.py#L978-L983 |
|
proycon/pynlpl | 7707f69a91caaa6cde037f0d0379f1d42500a68b | pynlpl/evaluation.py | python | ConfusionMatrix.__str__ | (self) | return o | Print Confusion Matrix in table form | Print Confusion Matrix in table form | [
"Print",
"Confusion",
"Matrix",
"in",
"table",
"form"
] | def __str__(self):
"""Print Confusion Matrix in table form"""
o = "== Confusion Matrix == (hor: goals, vert: observations)\n\n"
keys = set()
for goalkey,observationkey in self._count.keys():
keys.add(goalkey)
keys.add(observationkey)
keys = sorted( keys)
linemask = "%20s"
cells = ['']
for keyH in keys:
l = len(keyH)
if l < 4:
l = 4
elif l > 15:
l = 15
linemask += " %" + str(l) + "s"
cells.append(keyH)
linemask += "\n"
o += linemask % tuple(cells)
for keyV in keys:
linemask = "%20s"
cells = [keyV]
for keyH in keys:
l = len(keyH)
if l < 4:
l = 4
elif l > 15:
l = 15
linemask += " %" + str(l) + "d"
try:
count = self._count[(keyH, keyV)]
except:
count = 0
cells.append(count)
linemask += "\n"
o += linemask % tuple(cells)
return o | [
"def",
"__str__",
"(",
"self",
")",
":",
"o",
"=",
"\"== Confusion Matrix == (hor: goals, vert: observations)\\n\\n\"",
"keys",
"=",
"set",
"(",
")",
"for",
"goalkey",
",",
"observationkey",
"in",
"self",
".",
"_count",
".",
"keys",
"(",
")",
":",
"keys",
".",
"add",
"(",
"goalkey",
")",
"keys",
".",
"add",
"(",
"observationkey",
")",
"keys",
"=",
"sorted",
"(",
"keys",
")",
"linemask",
"=",
"\"%20s\"",
"cells",
"=",
"[",
"''",
"]",
"for",
"keyH",
"in",
"keys",
":",
"l",
"=",
"len",
"(",
"keyH",
")",
"if",
"l",
"<",
"4",
":",
"l",
"=",
"4",
"elif",
"l",
">",
"15",
":",
"l",
"=",
"15",
"linemask",
"+=",
"\" %\"",
"+",
"str",
"(",
"l",
")",
"+",
"\"s\"",
"cells",
".",
"append",
"(",
"keyH",
")",
"linemask",
"+=",
"\"\\n\"",
"o",
"+=",
"linemask",
"%",
"tuple",
"(",
"cells",
")",
"for",
"keyV",
"in",
"keys",
":",
"linemask",
"=",
"\"%20s\"",
"cells",
"=",
"[",
"keyV",
"]",
"for",
"keyH",
"in",
"keys",
":",
"l",
"=",
"len",
"(",
"keyH",
")",
"if",
"l",
"<",
"4",
":",
"l",
"=",
"4",
"elif",
"l",
">",
"15",
":",
"l",
"=",
"15",
"linemask",
"+=",
"\" %\"",
"+",
"str",
"(",
"l",
")",
"+",
"\"d\"",
"try",
":",
"count",
"=",
"self",
".",
"_count",
"[",
"(",
"keyH",
",",
"keyV",
")",
"]",
"except",
":",
"count",
"=",
"0",
"cells",
".",
"append",
"(",
"count",
")",
"linemask",
"+=",
"\"\\n\"",
"o",
"+=",
"linemask",
"%",
"tuple",
"(",
"cells",
")",
"return",
"o"
] | https://github.com/proycon/pynlpl/blob/7707f69a91caaa6cde037f0d0379f1d42500a68b/pynlpl/evaluation.py#L130-L172 |
|
rmax/dirbot-mysql | 19b750f88279c0bbd50ccfa39709d57628057ac9 | dirbot/pipelines.py | python | MySQLStorePipeline._do_upsert | (self, conn, item, spider) | Perform an insert or update. | Perform an insert or update. | [
"Perform",
"an",
"insert",
"or",
"update",
"."
] | def _do_upsert(self, conn, item, spider):
"""Perform an insert or update."""
guid = self._get_guid(item)
now = datetime.utcnow().replace(microsecond=0).isoformat(' ')
conn.execute("""SELECT EXISTS(
SELECT 1 FROM website WHERE guid = %s
)""", (guid, ))
ret = conn.fetchone()[0]
if ret:
conn.execute("""
UPDATE website
SET name=%s, description=%s, url=%s, updated=%s
WHERE guid=%s
""", (item['name'], item['description'], item['url'], now, guid))
spider.log("Item updated in db: %s %r" % (guid, item))
else:
conn.execute("""
INSERT INTO website (guid, name, description, url, updated)
VALUES (%s, %s, %s, %s, %s)
""", (guid, item['name'], item['description'], item['url'], now))
spider.log("Item stored in db: %s %r" % (guid, item)) | [
"def",
"_do_upsert",
"(",
"self",
",",
"conn",
",",
"item",
",",
"spider",
")",
":",
"guid",
"=",
"self",
".",
"_get_guid",
"(",
"item",
")",
"now",
"=",
"datetime",
".",
"utcnow",
"(",
")",
".",
"replace",
"(",
"microsecond",
"=",
"0",
")",
".",
"isoformat",
"(",
"' '",
")",
"conn",
".",
"execute",
"(",
"\"\"\"SELECT EXISTS(\n SELECT 1 FROM website WHERE guid = %s\n )\"\"\"",
",",
"(",
"guid",
",",
")",
")",
"ret",
"=",
"conn",
".",
"fetchone",
"(",
")",
"[",
"0",
"]",
"if",
"ret",
":",
"conn",
".",
"execute",
"(",
"\"\"\"\n UPDATE website\n SET name=%s, description=%s, url=%s, updated=%s\n WHERE guid=%s\n \"\"\"",
",",
"(",
"item",
"[",
"'name'",
"]",
",",
"item",
"[",
"'description'",
"]",
",",
"item",
"[",
"'url'",
"]",
",",
"now",
",",
"guid",
")",
")",
"spider",
".",
"log",
"(",
"\"Item updated in db: %s %r\"",
"%",
"(",
"guid",
",",
"item",
")",
")",
"else",
":",
"conn",
".",
"execute",
"(",
"\"\"\"\n INSERT INTO website (guid, name, description, url, updated)\n VALUES (%s, %s, %s, %s, %s)\n \"\"\"",
",",
"(",
"guid",
",",
"item",
"[",
"'name'",
"]",
",",
"item",
"[",
"'description'",
"]",
",",
"item",
"[",
"'url'",
"]",
",",
"now",
")",
")",
"spider",
".",
"log",
"(",
"\"Item stored in db: %s %r\"",
"%",
"(",
"guid",
",",
"item",
")",
")"
] | https://github.com/rmax/dirbot-mysql/blob/19b750f88279c0bbd50ccfa39709d57628057ac9/dirbot/pipelines.py#L69-L91 |
||
evennia/evennia | fa79110ba6b219932f22297838e8ac72ebc0be0e | evennia/contrib/building_menu.py | python | BuildingMenu.add_choice_edit | (
self,
title="description",
key="d",
aliases=None,
attr="db.desc",
glance="\n {obj.db.desc}",
on_enter=None,
) | return self.add_choice(
title, key=key, aliases=aliases, attr=attr, glance=glance, on_enter=on_enter, text=""
) | Add a simple choice to edit a given attribute in the EvEditor.
Args:
title (str, optional): the choice's title.
key (str, optional): the choice's key.
aliases (list of str, optional): the choice's aliases.
glance (str or callable, optional): the at-a-glance description.
on_enter (callable, optional): a different callable to edit
the attribute.
Returns:
choice (Choice): the newly-created choice.
Note:
This is just a shortcut method, calling `add_choice`.
If `on_enter` is not set, use `menu_edit` which opens
an EvEditor to edit the specified attribute.
When the caller closes the editor (with :q), the menu
will be re-opened. | Add a simple choice to edit a given attribute in the EvEditor. | [
"Add",
"a",
"simple",
"choice",
"to",
"edit",
"a",
"given",
"attribute",
"in",
"the",
"EvEditor",
"."
] | def add_choice_edit(
self,
title="description",
key="d",
aliases=None,
attr="db.desc",
glance="\n {obj.db.desc}",
on_enter=None,
):
"""
Add a simple choice to edit a given attribute in the EvEditor.
Args:
title (str, optional): the choice's title.
key (str, optional): the choice's key.
aliases (list of str, optional): the choice's aliases.
glance (str or callable, optional): the at-a-glance description.
on_enter (callable, optional): a different callable to edit
the attribute.
Returns:
choice (Choice): the newly-created choice.
Note:
This is just a shortcut method, calling `add_choice`.
If `on_enter` is not set, use `menu_edit` which opens
an EvEditor to edit the specified attribute.
When the caller closes the editor (with :q), the menu
will be re-opened.
"""
on_enter = on_enter or menu_edit
return self.add_choice(
title, key=key, aliases=aliases, attr=attr, glance=glance, on_enter=on_enter, text=""
) | [
"def",
"add_choice_edit",
"(",
"self",
",",
"title",
"=",
"\"description\"",
",",
"key",
"=",
"\"d\"",
",",
"aliases",
"=",
"None",
",",
"attr",
"=",
"\"db.desc\"",
",",
"glance",
"=",
"\"\\n {obj.db.desc}\"",
",",
"on_enter",
"=",
"None",
",",
")",
":",
"on_enter",
"=",
"on_enter",
"or",
"menu_edit",
"return",
"self",
".",
"add_choice",
"(",
"title",
",",
"key",
"=",
"key",
",",
"aliases",
"=",
"aliases",
",",
"attr",
"=",
"attr",
",",
"glance",
"=",
"glance",
",",
"on_enter",
"=",
"on_enter",
",",
"text",
"=",
"\"\"",
")"
] | https://github.com/evennia/evennia/blob/fa79110ba6b219932f22297838e8ac72ebc0be0e/evennia/contrib/building_menu.py#L878-L912 |
|
salabim/salabim | e0de846b042daf2dc71aaf43d8adc6486b57f376 | salabim_exp.py | python | weighted_percentile | (a, percentile=None, weights=None) | return out_percentiles | O(nlgn) implementation for weighted_percentile. | O(nlgn) implementation for weighted_percentile. | [
"O",
"(",
"nlgn",
")",
"implementation",
"for",
"weighted_percentile",
"."
] | def weighted_percentile(a, percentile=None, weights=None):
"""
O(nlgn) implementation for weighted_percentile.
"""
import numpy as np
a = np.array(a)
percentile = np.array(percentile) / 100.0
if weights is None:
weights = np.ones(len(a))
else:
weights = np.array(weights)
a_indsort = np.argsort(a)
a_sort = a[a_indsort]
weights_sort = weights[a_indsort]
ecdf = np.cumsum(weights_sort)
percentile_index_positions = percentile * (weights.sum() - 1) + 1
# need the 1 offset at the end due to ecdf not starting at 0
locations = np.searchsorted(ecdf, percentile_index_positions)
out_percentiles = np.zeros(len(percentile_index_positions))
for i, empiricalLocation in enumerate(locations):
# iterate across the requested percentiles
if ecdf[empiricalLocation - 1] == np.floor(percentile_index_positions[i]):
# i.e. is the percentile in between 2 separate values
uppWeight = percentile_index_positions[i] - ecdf[empiricalLocation - 1]
lowWeight = 1 - uppWeight
out_percentiles[i] = a_sort[empiricalLocation - 1] * lowWeight + a_sort[empiricalLocation] * uppWeight
else:
# i.e. the percentile is entirely in one bin
out_percentiles[i] = a_sort[empiricalLocation]
return out_percentiles | [
"def",
"weighted_percentile",
"(",
"a",
",",
"percentile",
"=",
"None",
",",
"weights",
"=",
"None",
")",
":",
"import",
"numpy",
"as",
"np",
"a",
"=",
"np",
".",
"array",
"(",
"a",
")",
"percentile",
"=",
"np",
".",
"array",
"(",
"percentile",
")",
"/",
"100.0",
"if",
"weights",
"is",
"None",
":",
"weights",
"=",
"np",
".",
"ones",
"(",
"len",
"(",
"a",
")",
")",
"else",
":",
"weights",
"=",
"np",
".",
"array",
"(",
"weights",
")",
"a_indsort",
"=",
"np",
".",
"argsort",
"(",
"a",
")",
"a_sort",
"=",
"a",
"[",
"a_indsort",
"]",
"weights_sort",
"=",
"weights",
"[",
"a_indsort",
"]",
"ecdf",
"=",
"np",
".",
"cumsum",
"(",
"weights_sort",
")",
"percentile_index_positions",
"=",
"percentile",
"*",
"(",
"weights",
".",
"sum",
"(",
")",
"-",
"1",
")",
"+",
"1",
"# need the 1 offset at the end due to ecdf not starting at 0",
"locations",
"=",
"np",
".",
"searchsorted",
"(",
"ecdf",
",",
"percentile_index_positions",
")",
"out_percentiles",
"=",
"np",
".",
"zeros",
"(",
"len",
"(",
"percentile_index_positions",
")",
")",
"for",
"i",
",",
"empiricalLocation",
"in",
"enumerate",
"(",
"locations",
")",
":",
"# iterate across the requested percentiles",
"if",
"ecdf",
"[",
"empiricalLocation",
"-",
"1",
"]",
"==",
"np",
".",
"floor",
"(",
"percentile_index_positions",
"[",
"i",
"]",
")",
":",
"# i.e. is the percentile in between 2 separate values",
"uppWeight",
"=",
"percentile_index_positions",
"[",
"i",
"]",
"-",
"ecdf",
"[",
"empiricalLocation",
"-",
"1",
"]",
"lowWeight",
"=",
"1",
"-",
"uppWeight",
"out_percentiles",
"[",
"i",
"]",
"=",
"a_sort",
"[",
"empiricalLocation",
"-",
"1",
"]",
"*",
"lowWeight",
"+",
"a_sort",
"[",
"empiricalLocation",
"]",
"*",
"uppWeight",
"else",
":",
"# i.e. the percentile is entirely in one bin",
"out_percentiles",
"[",
"i",
"]",
"=",
"a_sort",
"[",
"empiricalLocation",
"]",
"return",
"out_percentiles"
] | https://github.com/salabim/salabim/blob/e0de846b042daf2dc71aaf43d8adc6486b57f376/salabim_exp.py#L1454-L1489 |
|
ansible/ansible-modules-extras | f216ba8e0616bc8ad8794c22d4b48e1ab18886cf | cloud/amazon/efs_facts.py | python | EFSConnection.get_security_groups | (self, **kwargs) | return iterate_all(
'SecurityGroups',
self.connection.describe_mount_target_security_groups,
**kwargs
) | Returns security groups for selected instance of EFS | Returns security groups for selected instance of EFS | [
"Returns",
"security",
"groups",
"for",
"selected",
"instance",
"of",
"EFS"
] | def get_security_groups(self, **kwargs):
"""
Returns security groups for selected instance of EFS
"""
return iterate_all(
'SecurityGroups',
self.connection.describe_mount_target_security_groups,
**kwargs
) | [
"def",
"get_security_groups",
"(",
"self",
",",
"*",
"*",
"kwargs",
")",
":",
"return",
"iterate_all",
"(",
"'SecurityGroups'",
",",
"self",
".",
"connection",
".",
"describe_mount_target_security_groups",
",",
"*",
"*",
"kwargs",
")"
] | https://github.com/ansible/ansible-modules-extras/blob/f216ba8e0616bc8ad8794c22d4b48e1ab18886cf/cloud/amazon/efs_facts.py#L248-L256 |