nwo
stringlengths 5
91
| sha
stringlengths 40
40
| path
stringlengths 5
174
| language
stringclasses 1
value | identifier
stringlengths 1
120
| parameters
stringlengths 0
3.15k
| argument_list
stringclasses 1
value | return_statement
stringlengths 0
24.1k
| docstring
stringlengths 0
27.3k
| docstring_summary
stringlengths 0
13.8k
| docstring_tokens
sequence | function
stringlengths 22
139k
| function_tokens
sequence | url
stringlengths 87
283
|
---|---|---|---|---|---|---|---|---|---|---|---|---|---|
apple/ccs-calendarserver | 13c706b985fb728b9aab42dc0fef85aae21921c3 | twistedcaldav/notifications.py | python | NotificationResource.__init__ | (self, parent) | [] | def __init__(self, parent):
self._parent = parent
CalDAVResource.__init__(self) | [
"def",
"__init__",
"(",
"self",
",",
"parent",
")",
":",
"self",
".",
"_parent",
"=",
"parent",
"CalDAVResource",
".",
"__init__",
"(",
"self",
")"
] | https://github.com/apple/ccs-calendarserver/blob/13c706b985fb728b9aab42dc0fef85aae21921c3/twistedcaldav/notifications.py#L49-L51 |
||||
hzlzh/AlfredWorkflow.com | 7055f14f6922c80ea5943839eb0caff11ae57255 | Sources/Workflows/KindleBookstore/PyAl/Request/requests_cache/backends/base.py | python | BaseCache.clear | (self) | Clear cache | Clear cache | [
"Clear",
"cache"
] | def clear(self):
""" Clear cache
"""
self.responses.clear()
self.url_map.clear() | [
"def",
"clear",
"(",
"self",
")",
":",
"self",
".",
"responses",
".",
"clear",
"(",
")",
"self",
".",
"url_map",
".",
"clear",
"(",
")"
] | https://github.com/hzlzh/AlfredWorkflow.com/blob/7055f14f6922c80ea5943839eb0caff11ae57255/Sources/Workflows/KindleBookstore/PyAl/Request/requests_cache/backends/base.py#L77-L81 |
||
DxCx/plugin.video.9anime | 34358c2f701e5ddf19d3276926374a16f63f7b6a | resources/lib/ui/js2py/legecy_translators/nparser.py | python | isLeftHandSide | (expr=None) | return (expr.type == Syntax.Identifier) or (expr.type == Syntax.MemberExpression) | [] | def isLeftHandSide(expr=None):
return (expr.type == Syntax.Identifier) or (expr.type == Syntax.MemberExpression) | [
"def",
"isLeftHandSide",
"(",
"expr",
"=",
"None",
")",
":",
"return",
"(",
"expr",
".",
"type",
"==",
"Syntax",
".",
"Identifier",
")",
"or",
"(",
"expr",
".",
"type",
"==",
"Syntax",
".",
"MemberExpression",
")"
] | https://github.com/DxCx/plugin.video.9anime/blob/34358c2f701e5ddf19d3276926374a16f63f7b6a/resources/lib/ui/js2py/legecy_translators/nparser.py#L1497-L1498 |
|||
urwid/urwid | e2423b5069f51d318ea1ac0f355a0efe5448f7eb | urwid/main_loop.py | python | SelectEventLoop.remove_watch_file | (self, handle) | return False | Remove an input file.
Returns True if the input file exists, False otherwise | Remove an input file. | [
"Remove",
"an",
"input",
"file",
"."
] | def remove_watch_file(self, handle):
"""
Remove an input file.
Returns True if the input file exists, False otherwise
"""
if handle in self._watch_files:
del self._watch_files[handle]
return True
return False | [
"def",
"remove_watch_file",
"(",
"self",
",",
"handle",
")",
":",
"if",
"handle",
"in",
"self",
".",
"_watch_files",
":",
"del",
"self",
".",
"_watch_files",
"[",
"handle",
"]",
"return",
"True",
"return",
"False"
] | https://github.com/urwid/urwid/blob/e2423b5069f51d318ea1ac0f355a0efe5448f7eb/urwid/main_loop.py#L741-L750 |
|
zzzeek/mako | 12819efda61b0c478a700670575c951b6cde7383 | mako/codegen.py | python | _GenerateRenderMethod.write_variable_declares | (self, identifiers, toplevel=False, limit=None) | write variable declarations at the top of a function.
the variable declarations are in the form of callable
definitions for defs and/or name lookup within the
function's context argument. the names declared are based
on the names that are referenced in the function body,
which don't otherwise have any explicit assignment
operation. names that are assigned within the body are
assumed to be locally-scoped variables and are not
separately declared.
for def callable definitions, if the def is a top-level
callable then a 'stub' callable is generated which wraps
the current Context into a closure. if the def is not
top-level, it is fully rendered as a local closure. | write variable declarations at the top of a function. | [
"write",
"variable",
"declarations",
"at",
"the",
"top",
"of",
"a",
"function",
"."
] | def write_variable_declares(self, identifiers, toplevel=False, limit=None):
"""write variable declarations at the top of a function.
the variable declarations are in the form of callable
definitions for defs and/or name lookup within the
function's context argument. the names declared are based
on the names that are referenced in the function body,
which don't otherwise have any explicit assignment
operation. names that are assigned within the body are
assumed to be locally-scoped variables and are not
separately declared.
for def callable definitions, if the def is a top-level
callable then a 'stub' callable is generated which wraps
the current Context into a closure. if the def is not
top-level, it is fully rendered as a local closure.
"""
# collection of all defs available to us in this scope
comp_idents = dict([(c.funcname, c) for c in identifiers.defs])
to_write = set()
# write "context.get()" for all variables we are going to
# need that arent in the namespace yet
to_write = to_write.union(identifiers.undeclared)
# write closure functions for closures that we define
# right here
to_write = to_write.union(
[c.funcname for c in identifiers.closuredefs.values()]
)
# remove identifiers that are declared in the argument
# signature of the callable
to_write = to_write.difference(identifiers.argument_declared)
# remove identifiers that we are going to assign to.
# in this way we mimic Python's behavior,
# i.e. assignment to a variable within a block
# means that variable is now a "locally declared" var,
# which cannot be referenced beforehand.
to_write = to_write.difference(identifiers.locally_declared)
if self.compiler.enable_loop:
has_loop = "loop" in to_write
to_write.discard("loop")
else:
has_loop = False
# if a limiting set was sent, constraint to those items in that list
# (this is used for the caching decorator)
if limit is not None:
to_write = to_write.intersection(limit)
if toplevel and getattr(self.compiler, "has_ns_imports", False):
self.printer.writeline("_import_ns = {}")
self.compiler.has_imports = True
for ident, ns in self.compiler.namespaces.items():
if "import" in ns.attributes:
self.printer.writeline(
"_mako_get_namespace(context, %r)."
"_populate(_import_ns, %r)"
% (
ident,
re.split(r"\s*,\s*", ns.attributes["import"]),
)
)
if has_loop:
self.printer.writeline("loop = __M_loop = runtime.LoopStack()")
for ident in to_write:
if ident in comp_idents:
comp = comp_idents[ident]
if comp.is_block:
if not comp.is_anonymous:
self.write_def_decl(comp, identifiers)
else:
self.write_inline_def(comp, identifiers, nested=True)
else:
if comp.is_root():
self.write_def_decl(comp, identifiers)
else:
self.write_inline_def(comp, identifiers, nested=True)
elif ident in self.compiler.namespaces:
self.printer.writeline(
"%s = _mako_get_namespace(context, %r)" % (ident, ident)
)
else:
if getattr(self.compiler, "has_ns_imports", False):
if self.compiler.strict_undefined:
self.printer.writelines(
"%s = _import_ns.get(%r, UNDEFINED)"
% (ident, ident),
"if %s is UNDEFINED:" % ident,
"try:",
"%s = context[%r]" % (ident, ident),
"except KeyError:",
"raise NameError(\"'%s' is not defined\")" % ident,
None,
None,
)
else:
self.printer.writeline(
"%s = _import_ns.get"
"(%r, context.get(%r, UNDEFINED))"
% (ident, ident, ident)
)
else:
if self.compiler.strict_undefined:
self.printer.writelines(
"try:",
"%s = context[%r]" % (ident, ident),
"except KeyError:",
"raise NameError(\"'%s' is not defined\")" % ident,
None,
)
else:
self.printer.writeline(
"%s = context.get(%r, UNDEFINED)" % (ident, ident)
)
self.printer.writeline("__M_writer = context.writer()") | [
"def",
"write_variable_declares",
"(",
"self",
",",
"identifiers",
",",
"toplevel",
"=",
"False",
",",
"limit",
"=",
"None",
")",
":",
"# collection of all defs available to us in this scope",
"comp_idents",
"=",
"dict",
"(",
"[",
"(",
"c",
".",
"funcname",
",",
"c",
")",
"for",
"c",
"in",
"identifiers",
".",
"defs",
"]",
")",
"to_write",
"=",
"set",
"(",
")",
"# write \"context.get()\" for all variables we are going to",
"# need that arent in the namespace yet",
"to_write",
"=",
"to_write",
".",
"union",
"(",
"identifiers",
".",
"undeclared",
")",
"# write closure functions for closures that we define",
"# right here",
"to_write",
"=",
"to_write",
".",
"union",
"(",
"[",
"c",
".",
"funcname",
"for",
"c",
"in",
"identifiers",
".",
"closuredefs",
".",
"values",
"(",
")",
"]",
")",
"# remove identifiers that are declared in the argument",
"# signature of the callable",
"to_write",
"=",
"to_write",
".",
"difference",
"(",
"identifiers",
".",
"argument_declared",
")",
"# remove identifiers that we are going to assign to.",
"# in this way we mimic Python's behavior,",
"# i.e. assignment to a variable within a block",
"# means that variable is now a \"locally declared\" var,",
"# which cannot be referenced beforehand.",
"to_write",
"=",
"to_write",
".",
"difference",
"(",
"identifiers",
".",
"locally_declared",
")",
"if",
"self",
".",
"compiler",
".",
"enable_loop",
":",
"has_loop",
"=",
"\"loop\"",
"in",
"to_write",
"to_write",
".",
"discard",
"(",
"\"loop\"",
")",
"else",
":",
"has_loop",
"=",
"False",
"# if a limiting set was sent, constraint to those items in that list",
"# (this is used for the caching decorator)",
"if",
"limit",
"is",
"not",
"None",
":",
"to_write",
"=",
"to_write",
".",
"intersection",
"(",
"limit",
")",
"if",
"toplevel",
"and",
"getattr",
"(",
"self",
".",
"compiler",
",",
"\"has_ns_imports\"",
",",
"False",
")",
":",
"self",
".",
"printer",
".",
"writeline",
"(",
"\"_import_ns = {}\"",
")",
"self",
".",
"compiler",
".",
"has_imports",
"=",
"True",
"for",
"ident",
",",
"ns",
"in",
"self",
".",
"compiler",
".",
"namespaces",
".",
"items",
"(",
")",
":",
"if",
"\"import\"",
"in",
"ns",
".",
"attributes",
":",
"self",
".",
"printer",
".",
"writeline",
"(",
"\"_mako_get_namespace(context, %r).\"",
"\"_populate(_import_ns, %r)\"",
"%",
"(",
"ident",
",",
"re",
".",
"split",
"(",
"r\"\\s*,\\s*\"",
",",
"ns",
".",
"attributes",
"[",
"\"import\"",
"]",
")",
",",
")",
")",
"if",
"has_loop",
":",
"self",
".",
"printer",
".",
"writeline",
"(",
"\"loop = __M_loop = runtime.LoopStack()\"",
")",
"for",
"ident",
"in",
"to_write",
":",
"if",
"ident",
"in",
"comp_idents",
":",
"comp",
"=",
"comp_idents",
"[",
"ident",
"]",
"if",
"comp",
".",
"is_block",
":",
"if",
"not",
"comp",
".",
"is_anonymous",
":",
"self",
".",
"write_def_decl",
"(",
"comp",
",",
"identifiers",
")",
"else",
":",
"self",
".",
"write_inline_def",
"(",
"comp",
",",
"identifiers",
",",
"nested",
"=",
"True",
")",
"else",
":",
"if",
"comp",
".",
"is_root",
"(",
")",
":",
"self",
".",
"write_def_decl",
"(",
"comp",
",",
"identifiers",
")",
"else",
":",
"self",
".",
"write_inline_def",
"(",
"comp",
",",
"identifiers",
",",
"nested",
"=",
"True",
")",
"elif",
"ident",
"in",
"self",
".",
"compiler",
".",
"namespaces",
":",
"self",
".",
"printer",
".",
"writeline",
"(",
"\"%s = _mako_get_namespace(context, %r)\"",
"%",
"(",
"ident",
",",
"ident",
")",
")",
"else",
":",
"if",
"getattr",
"(",
"self",
".",
"compiler",
",",
"\"has_ns_imports\"",
",",
"False",
")",
":",
"if",
"self",
".",
"compiler",
".",
"strict_undefined",
":",
"self",
".",
"printer",
".",
"writelines",
"(",
"\"%s = _import_ns.get(%r, UNDEFINED)\"",
"%",
"(",
"ident",
",",
"ident",
")",
",",
"\"if %s is UNDEFINED:\"",
"%",
"ident",
",",
"\"try:\"",
",",
"\"%s = context[%r]\"",
"%",
"(",
"ident",
",",
"ident",
")",
",",
"\"except KeyError:\"",
",",
"\"raise NameError(\\\"'%s' is not defined\\\")\"",
"%",
"ident",
",",
"None",
",",
"None",
",",
")",
"else",
":",
"self",
".",
"printer",
".",
"writeline",
"(",
"\"%s = _import_ns.get\"",
"\"(%r, context.get(%r, UNDEFINED))\"",
"%",
"(",
"ident",
",",
"ident",
",",
"ident",
")",
")",
"else",
":",
"if",
"self",
".",
"compiler",
".",
"strict_undefined",
":",
"self",
".",
"printer",
".",
"writelines",
"(",
"\"try:\"",
",",
"\"%s = context[%r]\"",
"%",
"(",
"ident",
",",
"ident",
")",
",",
"\"except KeyError:\"",
",",
"\"raise NameError(\\\"'%s' is not defined\\\")\"",
"%",
"ident",
",",
"None",
",",
")",
"else",
":",
"self",
".",
"printer",
".",
"writeline",
"(",
"\"%s = context.get(%r, UNDEFINED)\"",
"%",
"(",
"ident",
",",
"ident",
")",
")",
"self",
".",
"printer",
".",
"writeline",
"(",
"\"__M_writer = context.writer()\"",
")"
] | https://github.com/zzzeek/mako/blob/12819efda61b0c478a700670575c951b6cde7383/mako/codegen.py#L464-L588 |
||
ladybug-tools/honeybee-legacy | bd62af4862fe022801fb87dbc8794fdf1dff73a9 | src/Honeybee_Honeybee.py | python | hb_WriteRAD.collectResults | (self, subWorkingDir, radFileName, numOfCPUs, analysisRecipe, expectedResultFiles) | [] | def collectResults(self, subWorkingDir, radFileName, numOfCPUs, analysisRecipe, expectedResultFiles):
if analysisRecipe.type == 2:
#annual simulation
runAnnualGlare = analysisRecipe.DSParameters.runAnnualGlare
onlyAnnualGlare = analysisRecipe.DSParameters.onlyAnnualGlare
numOfIllFiles = analysisRecipe.DSParameters.numOfIll
annualGlareViews = analysisRecipe.DSParameters.RhinoViewsName
DSResultFilesAddress = []
if not(runAnnualGlare and onlyAnnualGlare):
# read the number of .ill files
# and the number of .dc files
if subWorkingDir[-1] == os.sep: subWorkingDir = subWorkingDir[:-1]
startTime = time.time()
# check if the results are available
files = os.listdir(subWorkingDir)
numIll = 0
numDc = 0
for file in files:
if file.EndsWith('ill'):
DSResultFilesAddress.append(os.path.join(subWorkingDir, file))
numIll+=1
elif file.EndsWith('dc'):
numDc+=1
# /2 in case of conceptual dynamic blinds in Daysim
if numIll!= numOfCPUs * numOfIllFiles or not \
(numDc == numOfCPUs * numOfIllFiles or \
numDc == numOfCPUs * numOfIllFiles /2):
print "Can't find the results for the study"
DSResultFilesAddress = []
# check for results of annual glare analysis if any
annualGlareResults = {}
for view in annualGlareViews:
if view not in annualGlareResults.keys():
annualGlareResults[view] = []
dgpFile = os.path.join(subWorkingDir, radFileName + '_0.dgp')
if runAnnualGlare and os.path.isfile(dgpFile):
with open(dgpFile, "r") as dgpRes:
for line in dgpRes:
try:
hourlyRes = line.split(" ")[4:]
# for each view there should be a number
for view, res in zip(annualGlareViews, hourlyRes):
annualGlareResults[view].append(res.strip())
except:
pass
return DSResultFilesAddress, annualGlareResults
elif analysisRecipe.type == 0:
# image-based analysis
return expectedResultFiles
else:
RADResultFilesAddress = expectedResultFiles
# grid-based analysis
numRes = 0
files = os.listdir(subWorkingDir)
for file in files:
if file.EndsWith('res'): numRes+=1
if numRes != numOfCPUs:
print "Cannot find the results of the study"
RADResultFilesAddress = []
time.sleep(1)
return RADResultFilesAddress | [
"def",
"collectResults",
"(",
"self",
",",
"subWorkingDir",
",",
"radFileName",
",",
"numOfCPUs",
",",
"analysisRecipe",
",",
"expectedResultFiles",
")",
":",
"if",
"analysisRecipe",
".",
"type",
"==",
"2",
":",
"#annual simulation",
"runAnnualGlare",
"=",
"analysisRecipe",
".",
"DSParameters",
".",
"runAnnualGlare",
"onlyAnnualGlare",
"=",
"analysisRecipe",
".",
"DSParameters",
".",
"onlyAnnualGlare",
"numOfIllFiles",
"=",
"analysisRecipe",
".",
"DSParameters",
".",
"numOfIll",
"annualGlareViews",
"=",
"analysisRecipe",
".",
"DSParameters",
".",
"RhinoViewsName",
"DSResultFilesAddress",
"=",
"[",
"]",
"if",
"not",
"(",
"runAnnualGlare",
"and",
"onlyAnnualGlare",
")",
":",
"# read the number of .ill files",
"# and the number of .dc files",
"if",
"subWorkingDir",
"[",
"-",
"1",
"]",
"==",
"os",
".",
"sep",
":",
"subWorkingDir",
"=",
"subWorkingDir",
"[",
":",
"-",
"1",
"]",
"startTime",
"=",
"time",
".",
"time",
"(",
")",
"# check if the results are available",
"files",
"=",
"os",
".",
"listdir",
"(",
"subWorkingDir",
")",
"numIll",
"=",
"0",
"numDc",
"=",
"0",
"for",
"file",
"in",
"files",
":",
"if",
"file",
".",
"EndsWith",
"(",
"'ill'",
")",
":",
"DSResultFilesAddress",
".",
"append",
"(",
"os",
".",
"path",
".",
"join",
"(",
"subWorkingDir",
",",
"file",
")",
")",
"numIll",
"+=",
"1",
"elif",
"file",
".",
"EndsWith",
"(",
"'dc'",
")",
":",
"numDc",
"+=",
"1",
"# /2 in case of conceptual dynamic blinds in Daysim",
"if",
"numIll",
"!=",
"numOfCPUs",
"*",
"numOfIllFiles",
"or",
"not",
"(",
"numDc",
"==",
"numOfCPUs",
"*",
"numOfIllFiles",
"or",
"numDc",
"==",
"numOfCPUs",
"*",
"numOfIllFiles",
"/",
"2",
")",
":",
"print",
"\"Can't find the results for the study\"",
"DSResultFilesAddress",
"=",
"[",
"]",
"# check for results of annual glare analysis if any",
"annualGlareResults",
"=",
"{",
"}",
"for",
"view",
"in",
"annualGlareViews",
":",
"if",
"view",
"not",
"in",
"annualGlareResults",
".",
"keys",
"(",
")",
":",
"annualGlareResults",
"[",
"view",
"]",
"=",
"[",
"]",
"dgpFile",
"=",
"os",
".",
"path",
".",
"join",
"(",
"subWorkingDir",
",",
"radFileName",
"+",
"'_0.dgp'",
")",
"if",
"runAnnualGlare",
"and",
"os",
".",
"path",
".",
"isfile",
"(",
"dgpFile",
")",
":",
"with",
"open",
"(",
"dgpFile",
",",
"\"r\"",
")",
"as",
"dgpRes",
":",
"for",
"line",
"in",
"dgpRes",
":",
"try",
":",
"hourlyRes",
"=",
"line",
".",
"split",
"(",
"\" \"",
")",
"[",
"4",
":",
"]",
"# for each view there should be a number",
"for",
"view",
",",
"res",
"in",
"zip",
"(",
"annualGlareViews",
",",
"hourlyRes",
")",
":",
"annualGlareResults",
"[",
"view",
"]",
".",
"append",
"(",
"res",
".",
"strip",
"(",
")",
")",
"except",
":",
"pass",
"return",
"DSResultFilesAddress",
",",
"annualGlareResults",
"elif",
"analysisRecipe",
".",
"type",
"==",
"0",
":",
"# image-based analysis",
"return",
"expectedResultFiles",
"else",
":",
"RADResultFilesAddress",
"=",
"expectedResultFiles",
"# grid-based analysis",
"numRes",
"=",
"0",
"files",
"=",
"os",
".",
"listdir",
"(",
"subWorkingDir",
")",
"for",
"file",
"in",
"files",
":",
"if",
"file",
".",
"EndsWith",
"(",
"'res'",
")",
":",
"numRes",
"+=",
"1",
"if",
"numRes",
"!=",
"numOfCPUs",
":",
"print",
"\"Cannot find the results of the study\"",
"RADResultFilesAddress",
"=",
"[",
"]",
"time",
".",
"sleep",
"(",
"1",
")",
"return",
"RADResultFilesAddress"
] | https://github.com/ladybug-tools/honeybee-legacy/blob/bd62af4862fe022801fb87dbc8794fdf1dff73a9/src/Honeybee_Honeybee.py#L2424-L2493 |
||||
zopefoundation/ZODB | bc13ca74bde916876c43b40f10968ba06f2de733 | src/ZODB/interfaces.py | python | IBlobStorage.temporaryDirectory | () | Return a directory that should be used for uncommitted blob data.
If Blobs use this, then commits can be performed with a simple rename. | Return a directory that should be used for uncommitted blob data. | [
"Return",
"a",
"directory",
"that",
"should",
"be",
"used",
"for",
"uncommitted",
"blob",
"data",
"."
] | def temporaryDirectory():
"""Return a directory that should be used for uncommitted blob data.
If Blobs use this, then commits can be performed with a simple rename.
""" | [
"def",
"temporaryDirectory",
"(",
")",
":"
] | https://github.com/zopefoundation/ZODB/blob/bc13ca74bde916876c43b40f10968ba06f2de733/src/ZODB/interfaces.py#L1418-L1422 |
||
brightmart/text_classification | ae3269dc4b0dc366c65011a6a3cca300c924d320 | a08_EntityNetwork/data_util_zhihu.py | python | create_voabulary_label | (voabulary_label='train-zhihu4-only-title-all.txt',name_scope='',use_seq2seq=False) | return vocabulary_word2index_label,vocabulary_index2word_label | [] | def create_voabulary_label(voabulary_label='train-zhihu4-only-title-all.txt',name_scope='',use_seq2seq=False):#'train-zhihu.txt'
print("create_voabulary_label_sorted.started.traning_data_path:",voabulary_label)
cache_path ='../cache_vocabulary_label_pik/'+ name_scope + "_label_voabulary.pik"
if os.path.exists(cache_path):#如果缓存文件存在,则直接读取
with open(cache_path, 'r') as data_f:
vocabulary_word2index_label, vocabulary_index2word_label=pickle.load(data_f)
return vocabulary_word2index_label, vocabulary_index2word_label
else:
zhihu_f_train = codecs.open(voabulary_label, 'r', 'utf8')
lines=zhihu_f_train.readlines()
count=0
vocabulary_word2index_label={}
vocabulary_index2word_label={}
vocabulary_label_count_dict={} #{label:count}
for i,line in enumerate(lines):
if '__label__' in line: #'__label__-2051131023989903826
label=line[line.index('__label__')+len('__label__'):].strip().replace("\n","")
if vocabulary_label_count_dict.get(label,None) is not None:
vocabulary_label_count_dict[label]=vocabulary_label_count_dict[label]+1
else:
vocabulary_label_count_dict[label]=1
list_label=sort_by_value(vocabulary_label_count_dict)
print("length of list_label:",len(list_label));#print(";list_label:",list_label)
countt=0
##########################################################################################
if use_seq2seq:#if used for seq2seq model,insert two special label(token):_GO AND _END
i_list=[0,1,2];label_special_list=[_GO,_END,_PAD]
for i,label in zip(i_list,label_special_list):
vocabulary_word2index_label[label] = i
vocabulary_index2word_label[i] = label
#########################################################################################
for i,label in enumerate(list_label):
if i<10:
count_value=vocabulary_label_count_dict[label]
print("label:",label,"count_value:",count_value)
countt=countt+count_value
indexx = i + 3 if use_seq2seq else i
vocabulary_word2index_label[label]=indexx
vocabulary_index2word_label[indexx]=label
print("count top10:",countt)
#save to file system if vocabulary of words is not exists.
if not os.path.exists(cache_path): #如果不存在写到缓存文件中
with open(cache_path, 'a') as data_f:
pickle.dump((vocabulary_word2index_label,vocabulary_index2word_label), data_f)
print("create_voabulary_label_sorted.ended.len of vocabulary_label:",len(vocabulary_index2word_label))
return vocabulary_word2index_label,vocabulary_index2word_label | [
"def",
"create_voabulary_label",
"(",
"voabulary_label",
"=",
"'train-zhihu4-only-title-all.txt'",
",",
"name_scope",
"=",
"''",
",",
"use_seq2seq",
"=",
"False",
")",
":",
"#'train-zhihu.txt'",
"print",
"(",
"\"create_voabulary_label_sorted.started.traning_data_path:\"",
",",
"voabulary_label",
")",
"cache_path",
"=",
"'../cache_vocabulary_label_pik/'",
"+",
"name_scope",
"+",
"\"_label_voabulary.pik\"",
"if",
"os",
".",
"path",
".",
"exists",
"(",
"cache_path",
")",
":",
"#如果缓存文件存在,则直接读取",
"with",
"open",
"(",
"cache_path",
",",
"'r'",
")",
"as",
"data_f",
":",
"vocabulary_word2index_label",
",",
"vocabulary_index2word_label",
"=",
"pickle",
".",
"load",
"(",
"data_f",
")",
"return",
"vocabulary_word2index_label",
",",
"vocabulary_index2word_label",
"else",
":",
"zhihu_f_train",
"=",
"codecs",
".",
"open",
"(",
"voabulary_label",
",",
"'r'",
",",
"'utf8'",
")",
"lines",
"=",
"zhihu_f_train",
".",
"readlines",
"(",
")",
"count",
"=",
"0",
"vocabulary_word2index_label",
"=",
"{",
"}",
"vocabulary_index2word_label",
"=",
"{",
"}",
"vocabulary_label_count_dict",
"=",
"{",
"}",
"#{label:count}",
"for",
"i",
",",
"line",
"in",
"enumerate",
"(",
"lines",
")",
":",
"if",
"'__label__'",
"in",
"line",
":",
"#'__label__-2051131023989903826",
"label",
"=",
"line",
"[",
"line",
".",
"index",
"(",
"'__label__'",
")",
"+",
"len",
"(",
"'__label__'",
")",
":",
"]",
".",
"strip",
"(",
")",
".",
"replace",
"(",
"\"\\n\"",
",",
"\"\"",
")",
"if",
"vocabulary_label_count_dict",
".",
"get",
"(",
"label",
",",
"None",
")",
"is",
"not",
"None",
":",
"vocabulary_label_count_dict",
"[",
"label",
"]",
"=",
"vocabulary_label_count_dict",
"[",
"label",
"]",
"+",
"1",
"else",
":",
"vocabulary_label_count_dict",
"[",
"label",
"]",
"=",
"1",
"list_label",
"=",
"sort_by_value",
"(",
"vocabulary_label_count_dict",
")",
"print",
"(",
"\"length of list_label:\"",
",",
"len",
"(",
"list_label",
")",
")",
"#print(\";list_label:\",list_label)",
"countt",
"=",
"0",
"##########################################################################################",
"if",
"use_seq2seq",
":",
"#if used for seq2seq model,insert two special label(token):_GO AND _END",
"i_list",
"=",
"[",
"0",
",",
"1",
",",
"2",
"]",
"label_special_list",
"=",
"[",
"_GO",
",",
"_END",
",",
"_PAD",
"]",
"for",
"i",
",",
"label",
"in",
"zip",
"(",
"i_list",
",",
"label_special_list",
")",
":",
"vocabulary_word2index_label",
"[",
"label",
"]",
"=",
"i",
"vocabulary_index2word_label",
"[",
"i",
"]",
"=",
"label",
"#########################################################################################",
"for",
"i",
",",
"label",
"in",
"enumerate",
"(",
"list_label",
")",
":",
"if",
"i",
"<",
"10",
":",
"count_value",
"=",
"vocabulary_label_count_dict",
"[",
"label",
"]",
"print",
"(",
"\"label:\"",
",",
"label",
",",
"\"count_value:\"",
",",
"count_value",
")",
"countt",
"=",
"countt",
"+",
"count_value",
"indexx",
"=",
"i",
"+",
"3",
"if",
"use_seq2seq",
"else",
"i",
"vocabulary_word2index_label",
"[",
"label",
"]",
"=",
"indexx",
"vocabulary_index2word_label",
"[",
"indexx",
"]",
"=",
"label",
"print",
"(",
"\"count top10:\"",
",",
"countt",
")",
"#save to file system if vocabulary of words is not exists.",
"if",
"not",
"os",
".",
"path",
".",
"exists",
"(",
"cache_path",
")",
":",
"#如果不存在写到缓存文件中",
"with",
"open",
"(",
"cache_path",
",",
"'a'",
")",
"as",
"data_f",
":",
"pickle",
".",
"dump",
"(",
"(",
"vocabulary_word2index_label",
",",
"vocabulary_index2word_label",
")",
",",
"data_f",
")",
"print",
"(",
"\"create_voabulary_label_sorted.ended.len of vocabulary_label:\"",
",",
"len",
"(",
"vocabulary_index2word_label",
")",
")",
"return",
"vocabulary_word2index_label",
",",
"vocabulary_index2word_label"
] | https://github.com/brightmart/text_classification/blob/ae3269dc4b0dc366c65011a6a3cca300c924d320/a08_EntityNetwork/data_util_zhihu.py#L45-L93 |
|||
IJDykeman/wangTiles | 7c1ee2095ebdf7f72bce07d94c6484915d5cae8b | experimental_code/tiles_3d/venv/lib/python2.7/site-packages/setuptools/sandbox.py | python | DirectorySandbox._remap_pair | (self, operation, src, dst, *args, **kw) | return (src,dst) | Called for path pairs like rename, link, and symlink operations | Called for path pairs like rename, link, and symlink operations | [
"Called",
"for",
"path",
"pairs",
"like",
"rename",
"link",
"and",
"symlink",
"operations"
] | def _remap_pair(self, operation, src, dst, *args, **kw):
"""Called for path pairs like rename, link, and symlink operations"""
if not self._ok(src) or not self._ok(dst):
self._violation(operation, src, dst, *args, **kw)
return (src,dst) | [
"def",
"_remap_pair",
"(",
"self",
",",
"operation",
",",
"src",
",",
"dst",
",",
"*",
"args",
",",
"*",
"*",
"kw",
")",
":",
"if",
"not",
"self",
".",
"_ok",
"(",
"src",
")",
"or",
"not",
"self",
".",
"_ok",
"(",
"dst",
")",
":",
"self",
".",
"_violation",
"(",
"operation",
",",
"src",
",",
"dst",
",",
"*",
"args",
",",
"*",
"*",
"kw",
")",
"return",
"(",
"src",
",",
"dst",
")"
] | https://github.com/IJDykeman/wangTiles/blob/7c1ee2095ebdf7f72bce07d94c6484915d5cae8b/experimental_code/tiles_3d/venv/lib/python2.7/site-packages/setuptools/sandbox.py#L265-L269 |
|
openedx/edx-platform | 68dd185a0ab45862a2a61e0f803d7e03d2be71b5 | openedx/core/djangoapps/password_policy/settings/common.py | python | plugin_settings | (settings) | Adds default settings for the password_policy app. | Adds default settings for the password_policy app. | [
"Adds",
"default",
"settings",
"for",
"the",
"password_policy",
"app",
"."
] | def plugin_settings(settings):
"""
Adds default settings for the password_policy app.
"""
# Settings for managing the rollout of password policy compliance enforcement.
settings.PASSWORD_POLICY_COMPLIANCE_ROLLOUT_CONFIG = {
# Global switch to enable/disable password policy compliance enforcement on login.
'ENFORCE_COMPLIANCE_ON_LOGIN': False,
# The date that staff users (users with is_staff permissions) will be required to be compliant with
# current password policy requirements. After this date, non-compliant users will be forced to reset their
# password before logging in.
#
# This should be a timezone-aware date string parsable by dateutils.parser.parse
# Ex: 2018-04-19 00:00:00+00:00
'STAFF_USER_COMPLIANCE_DEADLINE': None,
# The date that users with elevated privileges (users with entries in the course_access_roles table) will be
# required to be compliant with current password policy requirements. After this date, non-compliant users will
# be forced to reset their password before logging in.
#
# This should be a timezone-aware date string parsable by dateutils.parser.parse
# Ex: 2018-04-19 00:00:00+00:00
'ELEVATED_PRIVILEGE_USER_COMPLIANCE_DEADLINE': None,
# The date that all users will be required to be compliant with current password policy requirements. After
# this date, non-compliant users will be forced to reset their password before logging in.
#
# This should be a timezone-aware date string parsable by dateutils.parser.parse
# Ex: 2018-04-19 00:00:00+00:00
'GENERAL_USER_COMPLIANCE_DEADLINE': None,
} | [
"def",
"plugin_settings",
"(",
"settings",
")",
":",
"# Settings for managing the rollout of password policy compliance enforcement.",
"settings",
".",
"PASSWORD_POLICY_COMPLIANCE_ROLLOUT_CONFIG",
"=",
"{",
"# Global switch to enable/disable password policy compliance enforcement on login.",
"'ENFORCE_COMPLIANCE_ON_LOGIN'",
":",
"False",
",",
"# The date that staff users (users with is_staff permissions) will be required to be compliant with",
"# current password policy requirements. After this date, non-compliant users will be forced to reset their",
"# password before logging in.",
"#",
"# This should be a timezone-aware date string parsable by dateutils.parser.parse",
"# Ex: 2018-04-19 00:00:00+00:00",
"'STAFF_USER_COMPLIANCE_DEADLINE'",
":",
"None",
",",
"# The date that users with elevated privileges (users with entries in the course_access_roles table) will be",
"# required to be compliant with current password policy requirements. After this date, non-compliant users will",
"# be forced to reset their password before logging in.",
"#",
"# This should be a timezone-aware date string parsable by dateutils.parser.parse",
"# Ex: 2018-04-19 00:00:00+00:00",
"'ELEVATED_PRIVILEGE_USER_COMPLIANCE_DEADLINE'",
":",
"None",
",",
"# The date that all users will be required to be compliant with current password policy requirements. After",
"# this date, non-compliant users will be forced to reset their password before logging in.",
"#",
"# This should be a timezone-aware date string parsable by dateutils.parser.parse",
"# Ex: 2018-04-19 00:00:00+00:00",
"'GENERAL_USER_COMPLIANCE_DEADLINE'",
":",
"None",
",",
"}"
] | https://github.com/openedx/edx-platform/blob/68dd185a0ab45862a2a61e0f803d7e03d2be71b5/openedx/core/djangoapps/password_policy/settings/common.py#L6-L37 |
||
mozilla-services/autopush | 87e273c4581af88478d9e2658aa51d8c82a6d630 | autopush/router/fcm.py | python | FCMRouter.route_notification | (self, notification, uaid_data) | return deferToThread(self._route, notification, router_data) | Start the FCM notification routing, returns a deferred | Start the FCM notification routing, returns a deferred | [
"Start",
"the",
"FCM",
"notification",
"routing",
"returns",
"a",
"deferred"
] | def route_notification(self, notification, uaid_data):
"""Start the FCM notification routing, returns a deferred"""
router_data = uaid_data["router_data"]
# Kick the entire notification routing off to a thread
return deferToThread(self._route, notification, router_data) | [
"def",
"route_notification",
"(",
"self",
",",
"notification",
",",
"uaid_data",
")",
":",
"router_data",
"=",
"uaid_data",
"[",
"\"router_data\"",
"]",
"# Kick the entire notification routing off to a thread",
"return",
"deferToThread",
"(",
"self",
".",
"_route",
",",
"notification",
",",
"router_data",
")"
] | https://github.com/mozilla-services/autopush/blob/87e273c4581af88478d9e2658aa51d8c82a6d630/autopush/router/fcm.py#L146-L150 |
|
rtqichen/torchdiffeq | 5a819e471c15cac5e4ec97a0e472b1569a1a872b | torchdiffeq/_impl/rk_common.py | python | _UncheckedAssign.forward | (ctx, scratch, value, index) | return scratch | [] | def forward(ctx, scratch, value, index):
ctx.index = index
scratch.data[index] = value # sneak past the version checker
return scratch | [
"def",
"forward",
"(",
"ctx",
",",
"scratch",
",",
"value",
",",
"index",
")",
":",
"ctx",
".",
"index",
"=",
"index",
"scratch",
".",
"data",
"[",
"index",
"]",
"=",
"value",
"# sneak past the version checker",
"return",
"scratch"
] | https://github.com/rtqichen/torchdiffeq/blob/5a819e471c15cac5e4ec97a0e472b1569a1a872b/torchdiffeq/_impl/rk_common.py#L31-L34 |
|||
pantsbuild/pex | 473c6ac732ed4bc338b4b20a9ec930d1d722c9b4 | pex/vendor/_vendored/pip/pip/_internal/req/req_tracker.py | python | RequirementTracker.cleanup | (self) | [] | def cleanup(self):
# type: () -> None
for req in set(self._entries):
self.remove(req)
logger.debug("Removed build tracker: %r", self._root) | [
"def",
"cleanup",
"(",
"self",
")",
":",
"# type: () -> None",
"for",
"req",
"in",
"set",
"(",
"self",
".",
"_entries",
")",
":",
"self",
".",
"remove",
"(",
"req",
")",
"logger",
".",
"debug",
"(",
"\"Removed build tracker: %r\"",
",",
"self",
".",
"_root",
")"
] | https://github.com/pantsbuild/pex/blob/473c6ac732ed4bc338b4b20a9ec930d1d722c9b4/pex/vendor/_vendored/pip/pip/_internal/req/req_tracker.py#L139-L144 |
||||
vlachoudis/bCNC | 67126b4894dabf6579baf47af8d0f9b7de35e6e3 | bCNC/FilePage.py | python | SerialFrame.ctrlChange | (self) | [] | def ctrlChange(self):
#self.app.controller = Utils.CONTROLLER.get(self.ctrlCombo.get(), 0)
#print("selected",self.ctrlCombo.get())
self.app.controllerSet(self.ctrlCombo.get()) | [
"def",
"ctrlChange",
"(",
"self",
")",
":",
"#self.app.controller = Utils.CONTROLLER.get(self.ctrlCombo.get(), 0)",
"#print(\"selected\",self.ctrlCombo.get())",
"self",
".",
"app",
".",
"controllerSet",
"(",
"self",
".",
"ctrlCombo",
".",
"get",
"(",
")",
")"
] | https://github.com/vlachoudis/bCNC/blob/67126b4894dabf6579baf47af8d0f9b7de35e6e3/bCNC/FilePage.py#L319-L322 |
||||
google-research/mixmatch | 1011a1d51eaa9ca6f5dba02096a848d1fe3fc38e | third_party/vat_utils.py | python | logsoftmax | (x) | return lsm | Compute log-domain softmax of logits. | Compute log-domain softmax of logits. | [
"Compute",
"log",
"-",
"domain",
"softmax",
"of",
"logits",
"."
] | def logsoftmax(x):
"""Compute log-domain softmax of logits."""
xdev = x - tf.reduce_max(x, 1, keepdims=True)
lsm = xdev - tf.log(tf.reduce_sum(tf.exp(xdev), 1, keepdims=True))
return lsm | [
"def",
"logsoftmax",
"(",
"x",
")",
":",
"xdev",
"=",
"x",
"-",
"tf",
".",
"reduce_max",
"(",
"x",
",",
"1",
",",
"keepdims",
"=",
"True",
")",
"lsm",
"=",
"xdev",
"-",
"tf",
".",
"log",
"(",
"tf",
".",
"reduce_sum",
"(",
"tf",
".",
"exp",
"(",
"xdev",
")",
",",
"1",
",",
"keepdims",
"=",
"True",
")",
")",
"return",
"lsm"
] | https://github.com/google-research/mixmatch/blob/1011a1d51eaa9ca6f5dba02096a848d1fe3fc38e/third_party/vat_utils.py#L53-L57 |
|
blawar/nut | 2cf351400418399a70164987e28670309f6c9cb5 | nut/Usb.py | python | UsbResponse.__init__ | (self, packet) | [] | def __init__(self, packet):
super(UsbResponse, self).__init__(None)
self.packet = packet | [
"def",
"__init__",
"(",
"self",
",",
"packet",
")",
":",
"super",
"(",
"UsbResponse",
",",
"self",
")",
".",
"__init__",
"(",
"None",
")",
"self",
".",
"packet",
"=",
"packet"
] | https://github.com/blawar/nut/blob/2cf351400418399a70164987e28670309f6c9cb5/nut/Usb.py#L55-L57 |
||||
google-research/mixmatch | 1011a1d51eaa9ca6f5dba02096a848d1fe3fc38e | pseudo_label.py | python | PseudoLabel.model | (self, lr, wd, ema, warmup_pos, consistency_weight, threshold, **kwargs) | return EasyDict(
x=x_in, y=y_in, label=l_in, train_op=train_op, tune_op=train_bn,
classify_raw=tf.nn.softmax(classifier(x_in, training=False)), # No EMA, for debugging.
classify_op=tf.nn.softmax(classifier(x_in, getter=ema_getter, training=False))) | [] | def model(self, lr, wd, ema, warmup_pos, consistency_weight, threshold, **kwargs):
hwc = [self.dataset.height, self.dataset.width, self.dataset.colors]
x_in = tf.placeholder(tf.float32, [None] + hwc, 'x')
y_in = tf.placeholder(tf.float32, [None] + hwc, 'y')
l_in = tf.placeholder(tf.int32, [None], 'labels')
l = tf.one_hot(l_in, self.nclass)
wd *= lr
warmup = tf.clip_by_value(tf.to_float(self.step) / (warmup_pos * (FLAGS.train_kimg << 10)), 0, 1)
classifier = functools.partial(self.classifier, **kwargs)
logits_x = classifier(x_in, training=True)
post_ops = tf.get_collection(tf.GraphKeys.UPDATE_OPS) # Take only first call to update batch norm.
logits_y = classifier(y_in, training=True)
# Get the pseudo-label loss
loss_pl = tf.nn.sparse_softmax_cross_entropy_with_logits(
labels=tf.argmax(logits_y, axis=-1), logits=logits_y
)
# Masks denoting which data points have high-confidence predictions
greater_than_thresh = tf.reduce_any(
tf.greater(tf.nn.softmax(logits_y), threshold),
axis=-1,
keepdims=True,
)
greater_than_thresh = tf.cast(greater_than_thresh, loss_pl.dtype)
# Only enforce the loss when the model is confident
loss_pl *= greater_than_thresh
# Note that we also average over examples without confident outputs;
# this is consistent with the realistic evaluation codebase
loss_pl = tf.reduce_mean(loss_pl)
loss = tf.nn.softmax_cross_entropy_with_logits_v2(labels=l, logits=logits_x)
loss = tf.reduce_mean(loss)
tf.summary.scalar('losses/xe', loss)
tf.summary.scalar('losses/pl', loss_pl)
ema = tf.train.ExponentialMovingAverage(decay=ema)
ema_op = ema.apply(utils.model_vars())
ema_getter = functools.partial(utils.getter_ema, ema)
post_ops.append(ema_op)
post_ops.extend([tf.assign(v, v * (1 - wd)) for v in utils.model_vars('classify') if 'kernel' in v.name])
train_op = tf.train.AdamOptimizer(lr).minimize(loss + loss_pl * warmup * consistency_weight,
colocate_gradients_with_ops=True)
with tf.control_dependencies([train_op]):
train_op = tf.group(*post_ops)
# Tuning op: only retrain batch norm.
skip_ops = tf.get_collection(tf.GraphKeys.UPDATE_OPS)
classifier(x_in, training=True)
train_bn = tf.group(*[v for v in tf.get_collection(tf.GraphKeys.UPDATE_OPS)
if v not in skip_ops])
return EasyDict(
x=x_in, y=y_in, label=l_in, train_op=train_op, tune_op=train_bn,
classify_raw=tf.nn.softmax(classifier(x_in, training=False)), # No EMA, for debugging.
classify_op=tf.nn.softmax(classifier(x_in, getter=ema_getter, training=False))) | [
"def",
"model",
"(",
"self",
",",
"lr",
",",
"wd",
",",
"ema",
",",
"warmup_pos",
",",
"consistency_weight",
",",
"threshold",
",",
"*",
"*",
"kwargs",
")",
":",
"hwc",
"=",
"[",
"self",
".",
"dataset",
".",
"height",
",",
"self",
".",
"dataset",
".",
"width",
",",
"self",
".",
"dataset",
".",
"colors",
"]",
"x_in",
"=",
"tf",
".",
"placeholder",
"(",
"tf",
".",
"float32",
",",
"[",
"None",
"]",
"+",
"hwc",
",",
"'x'",
")",
"y_in",
"=",
"tf",
".",
"placeholder",
"(",
"tf",
".",
"float32",
",",
"[",
"None",
"]",
"+",
"hwc",
",",
"'y'",
")",
"l_in",
"=",
"tf",
".",
"placeholder",
"(",
"tf",
".",
"int32",
",",
"[",
"None",
"]",
",",
"'labels'",
")",
"l",
"=",
"tf",
".",
"one_hot",
"(",
"l_in",
",",
"self",
".",
"nclass",
")",
"wd",
"*=",
"lr",
"warmup",
"=",
"tf",
".",
"clip_by_value",
"(",
"tf",
".",
"to_float",
"(",
"self",
".",
"step",
")",
"/",
"(",
"warmup_pos",
"*",
"(",
"FLAGS",
".",
"train_kimg",
"<<",
"10",
")",
")",
",",
"0",
",",
"1",
")",
"classifier",
"=",
"functools",
".",
"partial",
"(",
"self",
".",
"classifier",
",",
"*",
"*",
"kwargs",
")",
"logits_x",
"=",
"classifier",
"(",
"x_in",
",",
"training",
"=",
"True",
")",
"post_ops",
"=",
"tf",
".",
"get_collection",
"(",
"tf",
".",
"GraphKeys",
".",
"UPDATE_OPS",
")",
"# Take only first call to update batch norm.",
"logits_y",
"=",
"classifier",
"(",
"y_in",
",",
"training",
"=",
"True",
")",
"# Get the pseudo-label loss",
"loss_pl",
"=",
"tf",
".",
"nn",
".",
"sparse_softmax_cross_entropy_with_logits",
"(",
"labels",
"=",
"tf",
".",
"argmax",
"(",
"logits_y",
",",
"axis",
"=",
"-",
"1",
")",
",",
"logits",
"=",
"logits_y",
")",
"# Masks denoting which data points have high-confidence predictions",
"greater_than_thresh",
"=",
"tf",
".",
"reduce_any",
"(",
"tf",
".",
"greater",
"(",
"tf",
".",
"nn",
".",
"softmax",
"(",
"logits_y",
")",
",",
"threshold",
")",
",",
"axis",
"=",
"-",
"1",
",",
"keepdims",
"=",
"True",
",",
")",
"greater_than_thresh",
"=",
"tf",
".",
"cast",
"(",
"greater_than_thresh",
",",
"loss_pl",
".",
"dtype",
")",
"# Only enforce the loss when the model is confident",
"loss_pl",
"*=",
"greater_than_thresh",
"# Note that we also average over examples without confident outputs;",
"# this is consistent with the realistic evaluation codebase",
"loss_pl",
"=",
"tf",
".",
"reduce_mean",
"(",
"loss_pl",
")",
"loss",
"=",
"tf",
".",
"nn",
".",
"softmax_cross_entropy_with_logits_v2",
"(",
"labels",
"=",
"l",
",",
"logits",
"=",
"logits_x",
")",
"loss",
"=",
"tf",
".",
"reduce_mean",
"(",
"loss",
")",
"tf",
".",
"summary",
".",
"scalar",
"(",
"'losses/xe'",
",",
"loss",
")",
"tf",
".",
"summary",
".",
"scalar",
"(",
"'losses/pl'",
",",
"loss_pl",
")",
"ema",
"=",
"tf",
".",
"train",
".",
"ExponentialMovingAverage",
"(",
"decay",
"=",
"ema",
")",
"ema_op",
"=",
"ema",
".",
"apply",
"(",
"utils",
".",
"model_vars",
"(",
")",
")",
"ema_getter",
"=",
"functools",
".",
"partial",
"(",
"utils",
".",
"getter_ema",
",",
"ema",
")",
"post_ops",
".",
"append",
"(",
"ema_op",
")",
"post_ops",
".",
"extend",
"(",
"[",
"tf",
".",
"assign",
"(",
"v",
",",
"v",
"*",
"(",
"1",
"-",
"wd",
")",
")",
"for",
"v",
"in",
"utils",
".",
"model_vars",
"(",
"'classify'",
")",
"if",
"'kernel'",
"in",
"v",
".",
"name",
"]",
")",
"train_op",
"=",
"tf",
".",
"train",
".",
"AdamOptimizer",
"(",
"lr",
")",
".",
"minimize",
"(",
"loss",
"+",
"loss_pl",
"*",
"warmup",
"*",
"consistency_weight",
",",
"colocate_gradients_with_ops",
"=",
"True",
")",
"with",
"tf",
".",
"control_dependencies",
"(",
"[",
"train_op",
"]",
")",
":",
"train_op",
"=",
"tf",
".",
"group",
"(",
"*",
"post_ops",
")",
"# Tuning op: only retrain batch norm.",
"skip_ops",
"=",
"tf",
".",
"get_collection",
"(",
"tf",
".",
"GraphKeys",
".",
"UPDATE_OPS",
")",
"classifier",
"(",
"x_in",
",",
"training",
"=",
"True",
")",
"train_bn",
"=",
"tf",
".",
"group",
"(",
"*",
"[",
"v",
"for",
"v",
"in",
"tf",
".",
"get_collection",
"(",
"tf",
".",
"GraphKeys",
".",
"UPDATE_OPS",
")",
"if",
"v",
"not",
"in",
"skip_ops",
"]",
")",
"return",
"EasyDict",
"(",
"x",
"=",
"x_in",
",",
"y",
"=",
"y_in",
",",
"label",
"=",
"l_in",
",",
"train_op",
"=",
"train_op",
",",
"tune_op",
"=",
"train_bn",
",",
"classify_raw",
"=",
"tf",
".",
"nn",
".",
"softmax",
"(",
"classifier",
"(",
"x_in",
",",
"training",
"=",
"False",
")",
")",
",",
"# No EMA, for debugging.",
"classify_op",
"=",
"tf",
".",
"nn",
".",
"softmax",
"(",
"classifier",
"(",
"x_in",
",",
"getter",
"=",
"ema_getter",
",",
"training",
"=",
"False",
")",
")",
")"
] | https://github.com/google-research/mixmatch/blob/1011a1d51eaa9ca6f5dba02096a848d1fe3fc38e/pseudo_label.py#L33-L88 |
|||
Kotti/Kotti | 771bc397698183ecba364b7b77635d5c094bbcf5 | kotti/resources.py | python | ContainerMixin.children_with_permission | (
self, request: Request, permission: str = "view"
) | return [c for c in self.children if request.has_permission(permission, c)] | Return only those children for which the user initiating the
request has the asked permission.
:param request: current request
:type request: :class:`kotti.request.Request`
:param permission: The permission for which you want the allowed
children
:type permission: str
:result: List of child nodes
:rtype: list | Return only those children for which the user initiating the
request has the asked permission. | [
"Return",
"only",
"those",
"children",
"for",
"which",
"the",
"user",
"initiating",
"the",
"request",
"has",
"the",
"asked",
"permission",
"."
] | def children_with_permission(
self, request: Request, permission: str = "view"
) -> "List[Node]":
"""Return only those children for which the user initiating the
request has the asked permission.
:param request: current request
:type request: :class:`kotti.request.Request`
:param permission: The permission for which you want the allowed
children
:type permission: str
:result: List of child nodes
:rtype: list
"""
return [c for c in self.children if request.has_permission(permission, c)] | [
"def",
"children_with_permission",
"(",
"self",
",",
"request",
":",
"Request",
",",
"permission",
":",
"str",
"=",
"\"view\"",
")",
"->",
"\"List[Node]\"",
":",
"return",
"[",
"c",
"for",
"c",
"in",
"self",
".",
"children",
"if",
"request",
".",
"has_permission",
"(",
"permission",
",",
"c",
")",
"]"
] | https://github.com/Kotti/Kotti/blob/771bc397698183ecba364b7b77635d5c094bbcf5/kotti/resources.py#L185-L202 |
|
pculture/mirovideoconverter3 | 27efad91845c8ae544dc27034adb0d3e18ca8f1f | mvc/widgets/cellpack.py | python | Margin.inner_rect | (self, x, y, width, height) | return (x + self.margin_left,
y + self.margin_top,
width - self.margin_width,
height - self.margin_height) | Returns the x, y, width, height of the inner
box. | Returns the x, y, width, height of the inner
box. | [
"Returns",
"the",
"x",
"y",
"width",
"height",
"of",
"the",
"inner",
"box",
"."
] | def inner_rect(self, x, y, width, height):
"""Returns the x, y, width, height of the inner
box.
"""
return (x + self.margin_left,
y + self.margin_top,
width - self.margin_width,
height - self.margin_height) | [
"def",
"inner_rect",
"(",
"self",
",",
"x",
",",
"y",
",",
"width",
",",
"height",
")",
":",
"return",
"(",
"x",
"+",
"self",
".",
"margin_left",
",",
"y",
"+",
"self",
".",
"margin_top",
",",
"width",
"-",
"self",
".",
"margin_width",
",",
"height",
"-",
"self",
".",
"margin_height",
")"
] | https://github.com/pculture/mirovideoconverter3/blob/27efad91845c8ae544dc27034adb0d3e18ca8f1f/mvc/widgets/cellpack.py#L21-L28 |
|
pypa/pipenv | b21baade71a86ab3ee1429f71fbc14d4f95fb75d | pipenv/patched/notpip/_vendor/msgpack/fallback.py | python | Packer.reset | (self) | Reset internal buffer.
This method is useful only when autoreset=False. | Reset internal buffer. | [
"Reset",
"internal",
"buffer",
"."
] | def reset(self):
"""Reset internal buffer.
This method is useful only when autoreset=False.
"""
self._buffer = StringIO() | [
"def",
"reset",
"(",
"self",
")",
":",
"self",
".",
"_buffer",
"=",
"StringIO",
"(",
")"
] | https://github.com/pypa/pipenv/blob/b21baade71a86ab3ee1429f71fbc14d4f95fb75d/pipenv/patched/notpip/_vendor/msgpack/fallback.py#L1075-L1080 |
||
triaquae/triaquae | bbabf736b3ba56a0c6498e7f04e16c13b8b8f2b9 | TriAquae/models/Ubuntu_13/pyasn1/type/constraint.py | python | AbstractConstraint.getValueMap | (self) | return self._valueMap | [] | def getValueMap(self): return self._valueMap | [
"def",
"getValueMap",
"(",
"self",
")",
":",
"return",
"self",
".",
"_valueMap"
] | https://github.com/triaquae/triaquae/blob/bbabf736b3ba56a0c6498e7f04e16c13b8b8f2b9/TriAquae/models/Ubuntu_13/pyasn1/type/constraint.py#L61-L61 |
|||
renskiy/fabricio | 030b1d6e8050628fcfce78615a056e27e8e56324 | fabricio/docker/stack.py | python | Stack.images | (self) | return list(set(images.values())) | [] | def images(self):
images = self.__get_images()
return list(set(images.values())) | [
"def",
"images",
"(",
"self",
")",
":",
"images",
"=",
"self",
".",
"__get_images",
"(",
")",
"return",
"list",
"(",
"set",
"(",
"images",
".",
"values",
"(",
")",
")",
")"
] | https://github.com/renskiy/fabricio/blob/030b1d6e8050628fcfce78615a056e27e8e56324/fabricio/docker/stack.py#L241-L243 |
|||
ambujraj/hacktoberfest2018 | 53df2cac8b3404261131a873352ec4f2ffa3544d | MAC_changer/venv/lib/python3.7/site-packages/pip-10.0.1-py3.7.egg/pip/_vendor/distlib/database.py | python | make_dist | (name, version, **kwargs) | return Distribution(md) | A convenience method for making a dist given just a name and version. | A convenience method for making a dist given just a name and version. | [
"A",
"convenience",
"method",
"for",
"making",
"a",
"dist",
"given",
"just",
"a",
"name",
"and",
"version",
"."
] | def make_dist(name, version, **kwargs):
"""
A convenience method for making a dist given just a name and version.
"""
summary = kwargs.pop('summary', 'Placeholder for summary')
md = Metadata(**kwargs)
md.name = name
md.version = version
md.summary = summary or 'Placeholder for summary'
return Distribution(md) | [
"def",
"make_dist",
"(",
"name",
",",
"version",
",",
"*",
"*",
"kwargs",
")",
":",
"summary",
"=",
"kwargs",
".",
"pop",
"(",
"'summary'",
",",
"'Placeholder for summary'",
")",
"md",
"=",
"Metadata",
"(",
"*",
"*",
"kwargs",
")",
"md",
".",
"name",
"=",
"name",
"md",
".",
"version",
"=",
"version",
"md",
".",
"summary",
"=",
"summary",
"or",
"'Placeholder for summary'",
"return",
"Distribution",
"(",
"md",
")"
] | https://github.com/ambujraj/hacktoberfest2018/blob/53df2cac8b3404261131a873352ec4f2ffa3544d/MAC_changer/venv/lib/python3.7/site-packages/pip-10.0.1-py3.7.egg/pip/_vendor/distlib/database.py#L1327-L1336 |
|
conan-io/conan-center-index | b43225178fc9688b3dcb59847a7835b5585e5135 | recipes/boost/all/conanfile.py | python | BoostConan._with_dependency | (self, dependency) | return False | Return true when dependency is required according to the dependencies-x.y.z.yml file | Return true when dependency is required according to the dependencies-x.y.z.yml file | [
"Return",
"true",
"when",
"dependency",
"is",
"required",
"according",
"to",
"the",
"dependencies",
"-",
"x",
".",
"y",
".",
"z",
".",
"yml",
"file"
] | def _with_dependency(self, dependency):
"""
Return true when dependency is required according to the dependencies-x.y.z.yml file
"""
for name, reqs in self._dependencies["requirements"].items():
if dependency in reqs:
if not self.options.get_safe("without_{}".format(name), True):
return True
return False | [
"def",
"_with_dependency",
"(",
"self",
",",
"dependency",
")",
":",
"for",
"name",
",",
"reqs",
"in",
"self",
".",
"_dependencies",
"[",
"\"requirements\"",
"]",
".",
"items",
"(",
")",
":",
"if",
"dependency",
"in",
"reqs",
":",
"if",
"not",
"self",
".",
"options",
".",
"get_safe",
"(",
"\"without_{}\"",
".",
"format",
"(",
"name",
")",
",",
"True",
")",
":",
"return",
"True",
"return",
"False"
] | https://github.com/conan-io/conan-center-index/blob/b43225178fc9688b3dcb59847a7835b5585e5135/recipes/boost/all/conanfile.py#L450-L458 |
|
KalleHallden/AutoTimer | 2d954216700c4930baa154e28dbddc34609af7ce | env/lib/python2.7/site-packages/setuptools/_vendor/pyparsing.py | python | dictOf | ( key, value ) | return Dict( ZeroOrMore( Group ( key + value ) ) ) | Helper to easily and clearly define a dictionary by specifying the respective patterns
for the key and value. Takes care of defining the C{L{Dict}}, C{L{ZeroOrMore}}, and C{L{Group}} tokens
in the proper order. The key pattern can include delimiting markers or punctuation,
as long as they are suppressed, thereby leaving the significant key text. The value
pattern can include named results, so that the C{Dict} results can include named token
fields.
Example::
text = "shape: SQUARE posn: upper left color: light blue texture: burlap"
attr_expr = (label + Suppress(':') + OneOrMore(data_word, stopOn=label).setParseAction(' '.join))
print(OneOrMore(attr_expr).parseString(text).dump())
attr_label = label
attr_value = Suppress(':') + OneOrMore(data_word, stopOn=label).setParseAction(' '.join)
# similar to Dict, but simpler call format
result = dictOf(attr_label, attr_value).parseString(text)
print(result.dump())
print(result['shape'])
print(result.shape) # object attribute access works too
print(result.asDict())
prints::
[['shape', 'SQUARE'], ['posn', 'upper left'], ['color', 'light blue'], ['texture', 'burlap']]
- color: light blue
- posn: upper left
- shape: SQUARE
- texture: burlap
SQUARE
SQUARE
{'color': 'light blue', 'shape': 'SQUARE', 'posn': 'upper left', 'texture': 'burlap'} | Helper to easily and clearly define a dictionary by specifying the respective patterns
for the key and value. Takes care of defining the C{L{Dict}}, C{L{ZeroOrMore}}, and C{L{Group}} tokens
in the proper order. The key pattern can include delimiting markers or punctuation,
as long as they are suppressed, thereby leaving the significant key text. The value
pattern can include named results, so that the C{Dict} results can include named token
fields. | [
"Helper",
"to",
"easily",
"and",
"clearly",
"define",
"a",
"dictionary",
"by",
"specifying",
"the",
"respective",
"patterns",
"for",
"the",
"key",
"and",
"value",
".",
"Takes",
"care",
"of",
"defining",
"the",
"C",
"{",
"L",
"{",
"Dict",
"}}",
"C",
"{",
"L",
"{",
"ZeroOrMore",
"}}",
"and",
"C",
"{",
"L",
"{",
"Group",
"}}",
"tokens",
"in",
"the",
"proper",
"order",
".",
"The",
"key",
"pattern",
"can",
"include",
"delimiting",
"markers",
"or",
"punctuation",
"as",
"long",
"as",
"they",
"are",
"suppressed",
"thereby",
"leaving",
"the",
"significant",
"key",
"text",
".",
"The",
"value",
"pattern",
"can",
"include",
"named",
"results",
"so",
"that",
"the",
"C",
"{",
"Dict",
"}",
"results",
"can",
"include",
"named",
"token",
"fields",
"."
] | def dictOf( key, value ):
"""
Helper to easily and clearly define a dictionary by specifying the respective patterns
for the key and value. Takes care of defining the C{L{Dict}}, C{L{ZeroOrMore}}, and C{L{Group}} tokens
in the proper order. The key pattern can include delimiting markers or punctuation,
as long as they are suppressed, thereby leaving the significant key text. The value
pattern can include named results, so that the C{Dict} results can include named token
fields.
Example::
text = "shape: SQUARE posn: upper left color: light blue texture: burlap"
attr_expr = (label + Suppress(':') + OneOrMore(data_word, stopOn=label).setParseAction(' '.join))
print(OneOrMore(attr_expr).parseString(text).dump())
attr_label = label
attr_value = Suppress(':') + OneOrMore(data_word, stopOn=label).setParseAction(' '.join)
# similar to Dict, but simpler call format
result = dictOf(attr_label, attr_value).parseString(text)
print(result.dump())
print(result['shape'])
print(result.shape) # object attribute access works too
print(result.asDict())
prints::
[['shape', 'SQUARE'], ['posn', 'upper left'], ['color', 'light blue'], ['texture', 'burlap']]
- color: light blue
- posn: upper left
- shape: SQUARE
- texture: burlap
SQUARE
SQUARE
{'color': 'light blue', 'shape': 'SQUARE', 'posn': 'upper left', 'texture': 'burlap'}
"""
return Dict( ZeroOrMore( Group ( key + value ) ) ) | [
"def",
"dictOf",
"(",
"key",
",",
"value",
")",
":",
"return",
"Dict",
"(",
"ZeroOrMore",
"(",
"Group",
"(",
"key",
"+",
"value",
")",
")",
")"
] | https://github.com/KalleHallden/AutoTimer/blob/2d954216700c4930baa154e28dbddc34609af7ce/env/lib/python2.7/site-packages/setuptools/_vendor/pyparsing.py#L4646-L4679 |
|
FSecureLABS/Jandroid | e31d0dab58a2bfd6ed8e0a387172b8bd7c893436 | libs/platform-tools/platform-tools_darwin/systrace/catapult/devil/devil/android/battery_utils.py | python | BatteryUtils.__init__ | (self, device, default_timeout=_DEFAULT_TIMEOUT,
default_retries=_DEFAULT_RETRIES) | BatteryUtils constructor.
Args:
device: A DeviceUtils instance.
default_timeout: An integer containing the default number of seconds to
wait for an operation to complete if no explicit value
is provided.
default_retries: An integer containing the default number or times an
operation should be retried on failure if no explicit
value is provided.
Raises:
TypeError: If it is not passed a DeviceUtils instance. | BatteryUtils constructor. | [
"BatteryUtils",
"constructor",
"."
] | def __init__(self, device, default_timeout=_DEFAULT_TIMEOUT,
default_retries=_DEFAULT_RETRIES):
"""BatteryUtils constructor.
Args:
device: A DeviceUtils instance.
default_timeout: An integer containing the default number of seconds to
wait for an operation to complete if no explicit value
is provided.
default_retries: An integer containing the default number or times an
operation should be retried on failure if no explicit
value is provided.
Raises:
TypeError: If it is not passed a DeviceUtils instance.
"""
if not isinstance(device, device_utils.DeviceUtils):
raise TypeError('Must be initialized with DeviceUtils object.')
self._device = device
self._cache = device.GetClientCache(self.__class__.__name__)
self._default_timeout = default_timeout
self._default_retries = default_retries | [
"def",
"__init__",
"(",
"self",
",",
"device",
",",
"default_timeout",
"=",
"_DEFAULT_TIMEOUT",
",",
"default_retries",
"=",
"_DEFAULT_RETRIES",
")",
":",
"if",
"not",
"isinstance",
"(",
"device",
",",
"device_utils",
".",
"DeviceUtils",
")",
":",
"raise",
"TypeError",
"(",
"'Must be initialized with DeviceUtils object.'",
")",
"self",
".",
"_device",
"=",
"device",
"self",
".",
"_cache",
"=",
"device",
".",
"GetClientCache",
"(",
"self",
".",
"__class__",
".",
"__name__",
")",
"self",
".",
"_default_timeout",
"=",
"default_timeout",
"self",
".",
"_default_retries",
"=",
"default_retries"
] | https://github.com/FSecureLABS/Jandroid/blob/e31d0dab58a2bfd6ed8e0a387172b8bd7c893436/libs/platform-tools/platform-tools_darwin/systrace/catapult/devil/devil/android/battery_utils.py#L182-L202 |
||
rotki/rotki | aafa446815cdd5e9477436d1b02bee7d01b398c8 | rotkehlchen/exchanges/exchange.py | python | ExchangeInterface.query_online_margin_history | (
self,
start_ts: Timestamp,
end_ts: Timestamp,
) | Queries the exchange's API for the margin positions history of the user
Should be implemented by subclasses if the exchange can return margin position history in
any form. This is only implemented for bitmex at the moment. | Queries the exchange's API for the margin positions history of the user | [
"Queries",
"the",
"exchange",
"s",
"API",
"for",
"the",
"margin",
"positions",
"history",
"of",
"the",
"user"
] | def query_online_margin_history(
self,
start_ts: Timestamp,
end_ts: Timestamp,
) -> List[MarginPosition]:
"""Queries the exchange's API for the margin positions history of the user
Should be implemented by subclasses if the exchange can return margin position history in
any form. This is only implemented for bitmex at the moment.
"""
raise NotImplementedError(
'query_online_margin_history() should only be implemented by subclasses',
) | [
"def",
"query_online_margin_history",
"(",
"self",
",",
"start_ts",
":",
"Timestamp",
",",
"end_ts",
":",
"Timestamp",
",",
")",
"->",
"List",
"[",
"MarginPosition",
"]",
":",
"raise",
"NotImplementedError",
"(",
"'query_online_margin_history() should only be implemented by subclasses'",
",",
")"
] | https://github.com/rotki/rotki/blob/aafa446815cdd5e9477436d1b02bee7d01b398c8/rotkehlchen/exchanges/exchange.py#L197-L209 |
||
kornia/kornia | b12d6611b1c41d47b2c93675f0ea344b5314a688 | kornia/augmentation/container/video.py | python | VideoSequential.forward | ( # type: ignore
self, input: torch.Tensor, label: Optional[torch.Tensor] = None, params: Optional[List[ParamItem]] = None
) | return self.__packup_output__(output, label) | Define the video computation performed. | Define the video computation performed. | [
"Define",
"the",
"video",
"computation",
"performed",
"."
] | def forward( # type: ignore
self, input: torch.Tensor, label: Optional[torch.Tensor] = None, params: Optional[List[ParamItem]] = None
) -> Union[TensorWithTransformMat, Tuple[TensorWithTransformMat, torch.Tensor]]:
"""Define the video computation performed."""
if len(input.shape) != 5:
raise AssertionError(f"Input must be a 5-dim tensor. Got {input.shape}.")
if params is None:
params = self.forward_parameters(input.shape)
# Size of T
if self.apply_inverse_func in (InputApplyInverse, MaskApplyInverse):
frame_num: int = input.size(self._temporal_channel)
input, label = self._input_shape_convert_in(input, label, frame_num)
else:
if label is not None:
raise ValueError(f"Invalid label value. Got {label}")
batch_size: int = input.size(0)
input = input.view(-1, *input.shape[2:])
out = super().forward(input, label, params) # type: ignore
if self.return_label:
output, label = cast(Tuple[TensorWithTransformMat, torch.Tensor], out)
else:
output = cast(TensorWithTransformMat, out)
if isinstance(output, (tuple, list)):
if self.apply_inverse_func in (InputApplyInverse, MaskApplyInverse):
_out, label = self._input_shape_convert_back(output[0], label, frame_num)
output = (_out, output[1])
else:
if label is not None:
raise ValueError(f"Invalid label value. Got {label}")
output = output[0].view(batch_size, -1, *output[0].shape[1:])
else:
if self.apply_inverse_func in (InputApplyInverse, MaskApplyInverse):
output, label = self._input_shape_convert_back(output, label, frame_num)
else:
if label is not None:
raise ValueError(f"Invalid label value. Got {label}")
output = output.view(batch_size, -1, *output.shape[1:])
return self.__packup_output__(output, label) | [
"def",
"forward",
"(",
"# type: ignore",
"self",
",",
"input",
":",
"torch",
".",
"Tensor",
",",
"label",
":",
"Optional",
"[",
"torch",
".",
"Tensor",
"]",
"=",
"None",
",",
"params",
":",
"Optional",
"[",
"List",
"[",
"ParamItem",
"]",
"]",
"=",
"None",
")",
"->",
"Union",
"[",
"TensorWithTransformMat",
",",
"Tuple",
"[",
"TensorWithTransformMat",
",",
"torch",
".",
"Tensor",
"]",
"]",
":",
"if",
"len",
"(",
"input",
".",
"shape",
")",
"!=",
"5",
":",
"raise",
"AssertionError",
"(",
"f\"Input must be a 5-dim tensor. Got {input.shape}.\"",
")",
"if",
"params",
"is",
"None",
":",
"params",
"=",
"self",
".",
"forward_parameters",
"(",
"input",
".",
"shape",
")",
"# Size of T",
"if",
"self",
".",
"apply_inverse_func",
"in",
"(",
"InputApplyInverse",
",",
"MaskApplyInverse",
")",
":",
"frame_num",
":",
"int",
"=",
"input",
".",
"size",
"(",
"self",
".",
"_temporal_channel",
")",
"input",
",",
"label",
"=",
"self",
".",
"_input_shape_convert_in",
"(",
"input",
",",
"label",
",",
"frame_num",
")",
"else",
":",
"if",
"label",
"is",
"not",
"None",
":",
"raise",
"ValueError",
"(",
"f\"Invalid label value. Got {label}\"",
")",
"batch_size",
":",
"int",
"=",
"input",
".",
"size",
"(",
"0",
")",
"input",
"=",
"input",
".",
"view",
"(",
"-",
"1",
",",
"*",
"input",
".",
"shape",
"[",
"2",
":",
"]",
")",
"out",
"=",
"super",
"(",
")",
".",
"forward",
"(",
"input",
",",
"label",
",",
"params",
")",
"# type: ignore",
"if",
"self",
".",
"return_label",
":",
"output",
",",
"label",
"=",
"cast",
"(",
"Tuple",
"[",
"TensorWithTransformMat",
",",
"torch",
".",
"Tensor",
"]",
",",
"out",
")",
"else",
":",
"output",
"=",
"cast",
"(",
"TensorWithTransformMat",
",",
"out",
")",
"if",
"isinstance",
"(",
"output",
",",
"(",
"tuple",
",",
"list",
")",
")",
":",
"if",
"self",
".",
"apply_inverse_func",
"in",
"(",
"InputApplyInverse",
",",
"MaskApplyInverse",
")",
":",
"_out",
",",
"label",
"=",
"self",
".",
"_input_shape_convert_back",
"(",
"output",
"[",
"0",
"]",
",",
"label",
",",
"frame_num",
")",
"output",
"=",
"(",
"_out",
",",
"output",
"[",
"1",
"]",
")",
"else",
":",
"if",
"label",
"is",
"not",
"None",
":",
"raise",
"ValueError",
"(",
"f\"Invalid label value. Got {label}\"",
")",
"output",
"=",
"output",
"[",
"0",
"]",
".",
"view",
"(",
"batch_size",
",",
"-",
"1",
",",
"*",
"output",
"[",
"0",
"]",
".",
"shape",
"[",
"1",
":",
"]",
")",
"else",
":",
"if",
"self",
".",
"apply_inverse_func",
"in",
"(",
"InputApplyInverse",
",",
"MaskApplyInverse",
")",
":",
"output",
",",
"label",
"=",
"self",
".",
"_input_shape_convert_back",
"(",
"output",
",",
"label",
",",
"frame_num",
")",
"else",
":",
"if",
"label",
"is",
"not",
"None",
":",
"raise",
"ValueError",
"(",
"f\"Invalid label value. Got {label}\"",
")",
"output",
"=",
"output",
".",
"view",
"(",
"batch_size",
",",
"-",
"1",
",",
"*",
"output",
".",
"shape",
"[",
"1",
":",
"]",
")",
"return",
"self",
".",
"__packup_output__",
"(",
"output",
",",
"label",
")"
] | https://github.com/kornia/kornia/blob/b12d6611b1c41d47b2c93675f0ea344b5314a688/kornia/augmentation/container/video.py#L233-L275 |
|
Tautulli/Tautulli | 2410eb33805aaac4bd1c5dad0f71e4f15afaf742 | lib/future/backports/http/cookies.py | python | _unquote | (mystr) | return _nulljoin(res) | [] | def _unquote(mystr):
# If there aren't any doublequotes,
# then there can't be any special characters. See RFC 2109.
if len(mystr) < 2:
return mystr
if mystr[0] != '"' or mystr[-1] != '"':
return mystr
# We have to assume that we must decode this string.
# Down to work.
# Remove the "s
mystr = mystr[1:-1]
# Check for special sequences. Examples:
# \012 --> \n
# \" --> "
#
i = 0
n = len(mystr)
res = []
while 0 <= i < n:
o_match = _OctalPatt.search(mystr, i)
q_match = _QuotePatt.search(mystr, i)
if not o_match and not q_match: # Neither matched
res.append(mystr[i:])
break
# else:
j = k = -1
if o_match:
j = o_match.start(0)
if q_match:
k = q_match.start(0)
if q_match and (not o_match or k < j): # QuotePatt matched
res.append(mystr[i:k])
res.append(mystr[k+1])
i = k + 2
else: # OctalPatt matched
res.append(mystr[i:j])
res.append(chr(int(mystr[j+1:j+4], 8)))
i = j + 4
return _nulljoin(res) | [
"def",
"_unquote",
"(",
"mystr",
")",
":",
"# If there aren't any doublequotes,",
"# then there can't be any special characters. See RFC 2109.",
"if",
"len",
"(",
"mystr",
")",
"<",
"2",
":",
"return",
"mystr",
"if",
"mystr",
"[",
"0",
"]",
"!=",
"'\"'",
"or",
"mystr",
"[",
"-",
"1",
"]",
"!=",
"'\"'",
":",
"return",
"mystr",
"# We have to assume that we must decode this string.",
"# Down to work.",
"# Remove the \"s",
"mystr",
"=",
"mystr",
"[",
"1",
":",
"-",
"1",
"]",
"# Check for special sequences. Examples:",
"# \\012 --> \\n",
"# \\\" --> \"",
"#",
"i",
"=",
"0",
"n",
"=",
"len",
"(",
"mystr",
")",
"res",
"=",
"[",
"]",
"while",
"0",
"<=",
"i",
"<",
"n",
":",
"o_match",
"=",
"_OctalPatt",
".",
"search",
"(",
"mystr",
",",
"i",
")",
"q_match",
"=",
"_QuotePatt",
".",
"search",
"(",
"mystr",
",",
"i",
")",
"if",
"not",
"o_match",
"and",
"not",
"q_match",
":",
"# Neither matched",
"res",
".",
"append",
"(",
"mystr",
"[",
"i",
":",
"]",
")",
"break",
"# else:",
"j",
"=",
"k",
"=",
"-",
"1",
"if",
"o_match",
":",
"j",
"=",
"o_match",
".",
"start",
"(",
"0",
")",
"if",
"q_match",
":",
"k",
"=",
"q_match",
".",
"start",
"(",
"0",
")",
"if",
"q_match",
"and",
"(",
"not",
"o_match",
"or",
"k",
"<",
"j",
")",
":",
"# QuotePatt matched",
"res",
".",
"append",
"(",
"mystr",
"[",
"i",
":",
"k",
"]",
")",
"res",
".",
"append",
"(",
"mystr",
"[",
"k",
"+",
"1",
"]",
")",
"i",
"=",
"k",
"+",
"2",
"else",
":",
"# OctalPatt matched",
"res",
".",
"append",
"(",
"mystr",
"[",
"i",
":",
"j",
"]",
")",
"res",
".",
"append",
"(",
"chr",
"(",
"int",
"(",
"mystr",
"[",
"j",
"+",
"1",
":",
"j",
"+",
"4",
"]",
",",
"8",
")",
")",
")",
"i",
"=",
"j",
"+",
"4",
"return",
"_nulljoin",
"(",
"res",
")"
] | https://github.com/Tautulli/Tautulli/blob/2410eb33805aaac4bd1c5dad0f71e4f15afaf742/lib/future/backports/http/cookies.py#L251-L292 |
|||
lukelbd/proplot | d0bc9c0857d9295b380b8613ef9aba81d50a067c | proplot/colors.py | python | ContinuousColormap.__str__ | (self) | return type(self).__name__ + f'(name={self.name!r})' | [] | def __str__(self):
return type(self).__name__ + f'(name={self.name!r})' | [
"def",
"__str__",
"(",
"self",
")",
":",
"return",
"type",
"(",
"self",
")",
".",
"__name__",
"+",
"f'(name={self.name!r})'"
] | https://github.com/lukelbd/proplot/blob/d0bc9c0857d9295b380b8613ef9aba81d50a067c/proplot/colors.py#L907-L908 |
|||
zewelor/bt-mqtt-gateway | 5c344a00e4ee968c632942c092c864303753255c | workers/base.py | python | BaseWorker.format_discovery_id | (self, mac, *sensor_args) | return "bt-mqtt-gateway/{}".format(
self.format_discovery_topic(mac, *sensor_args)
) | [] | def format_discovery_id(self, mac, *sensor_args):
return "bt-mqtt-gateway/{}".format(
self.format_discovery_topic(mac, *sensor_args)
) | [
"def",
"format_discovery_id",
"(",
"self",
",",
"mac",
",",
"*",
"sensor_args",
")",
":",
"return",
"\"bt-mqtt-gateway/{}\"",
".",
"format",
"(",
"self",
".",
"format_discovery_topic",
"(",
"mac",
",",
"*",
"sensor_args",
")",
")"
] | https://github.com/zewelor/bt-mqtt-gateway/blob/5c344a00e4ee968c632942c092c864303753255c/workers/base.py#L29-L32 |
|||
mudpi/mudpi-core | fb206b1136f529c7197f1e6b29629ed05630d377 | mudpi/extensions/i2c/toggle.py | python | I2CToggle.init | (self) | return True | Connect to the relay over I2C | Connect to the relay over I2C | [
"Connect",
"to",
"the",
"relay",
"over",
"I2C"
] | def init(self):
""" Connect to the relay over I2C """
super().init()
if self.invert_state:
self.pin_state_on = 0x00
self.pin_state_off = 0xFF
else:
self.pin_state_on = 0xFF
self.pin_state_off = 0x00
# Prepare the relay i2c connection
self.bus = smbus2.SMBus(DEVICE_BUS)
self.bus.write_byte_data(self.address, self.register, self.pin_state_off)
# Active is used to keep track of durations
self.active = False
time.sleep(0.1)
return True | [
"def",
"init",
"(",
"self",
")",
":",
"super",
"(",
")",
".",
"init",
"(",
")",
"if",
"self",
".",
"invert_state",
":",
"self",
".",
"pin_state_on",
"=",
"0x00",
"self",
".",
"pin_state_off",
"=",
"0xFF",
"else",
":",
"self",
".",
"pin_state_on",
"=",
"0xFF",
"self",
".",
"pin_state_off",
"=",
"0x00",
"# Prepare the relay i2c connection",
"self",
".",
"bus",
"=",
"smbus2",
".",
"SMBus",
"(",
"DEVICE_BUS",
")",
"self",
".",
"bus",
".",
"write_byte_data",
"(",
"self",
".",
"address",
",",
"self",
".",
"register",
",",
"self",
".",
"pin_state_off",
")",
"# Active is used to keep track of durations",
"self",
".",
"active",
"=",
"False",
"time",
".",
"sleep",
"(",
"0.1",
")",
"return",
"True"
] | https://github.com/mudpi/mudpi-core/blob/fb206b1136f529c7197f1e6b29629ed05630d377/mudpi/extensions/i2c/toggle.py#L81-L100 |
|
apache/libcloud | 90971e17bfd7b6bb97b2489986472c531cc8e140 | libcloud/dns/drivers/durabledns.py | python | DurableDNSDriver.list_records | (self, zone) | return records | Return a list of records for the provided zone.
:param zone: Zone to list records for.
:type zone: :class:`Zone`
:return: ``list`` of :class:`Record` | Return a list of records for the provided zone. | [
"Return",
"a",
"list",
"of",
"records",
"for",
"the",
"provided",
"zone",
"."
] | def list_records(self, zone):
"""
Return a list of records for the provided zone.
:param zone: Zone to list records for.
:type zone: :class:`Zone`
:return: ``list`` of :class:`Record`
"""
schema_params = SCHEMA_BUILDER_MAP.get("list_records")
attributes = schema_params.get("attributes")
schema = api_schema_builder(
schema_params.get("urn_nid"), schema_params.get("method"), attributes
)
params = {"apiuser": self.key, "apikey": self.secret, "zonename": zone.id}
urn = list(schema)[0]
for child in urn:
key = child.tag.split(":")[2]
if key in attributes:
child.text = str(params.get(key))
req_data = tostring(schema)
action = "/services/dns/listRecords.php?"
params = {}
headers = {"SOAPAction": "urn:listRecordswsdl#listRecords"}
try:
response = self.connection.request(
action=action,
params=params,
data=req_data,
method="POST",
headers=headers,
)
except DurableDNSException as e:
if "Zone does not exist" in e.message:
raise ZoneDoesNotExistError(
zone_id=zone.id, driver=self, value=e.message
)
raise e
# listRecords method doens't return full data in records as getRecord
# method does.
records = []
for data in response.objects:
record = self.get_record(zone.id, data.get("id"))
records.append(record)
return records | [
"def",
"list_records",
"(",
"self",
",",
"zone",
")",
":",
"schema_params",
"=",
"SCHEMA_BUILDER_MAP",
".",
"get",
"(",
"\"list_records\"",
")",
"attributes",
"=",
"schema_params",
".",
"get",
"(",
"\"attributes\"",
")",
"schema",
"=",
"api_schema_builder",
"(",
"schema_params",
".",
"get",
"(",
"\"urn_nid\"",
")",
",",
"schema_params",
".",
"get",
"(",
"\"method\"",
")",
",",
"attributes",
")",
"params",
"=",
"{",
"\"apiuser\"",
":",
"self",
".",
"key",
",",
"\"apikey\"",
":",
"self",
".",
"secret",
",",
"\"zonename\"",
":",
"zone",
".",
"id",
"}",
"urn",
"=",
"list",
"(",
"schema",
")",
"[",
"0",
"]",
"for",
"child",
"in",
"urn",
":",
"key",
"=",
"child",
".",
"tag",
".",
"split",
"(",
"\":\"",
")",
"[",
"2",
"]",
"if",
"key",
"in",
"attributes",
":",
"child",
".",
"text",
"=",
"str",
"(",
"params",
".",
"get",
"(",
"key",
")",
")",
"req_data",
"=",
"tostring",
"(",
"schema",
")",
"action",
"=",
"\"/services/dns/listRecords.php?\"",
"params",
"=",
"{",
"}",
"headers",
"=",
"{",
"\"SOAPAction\"",
":",
"\"urn:listRecordswsdl#listRecords\"",
"}",
"try",
":",
"response",
"=",
"self",
".",
"connection",
".",
"request",
"(",
"action",
"=",
"action",
",",
"params",
"=",
"params",
",",
"data",
"=",
"req_data",
",",
"method",
"=",
"\"POST\"",
",",
"headers",
"=",
"headers",
",",
")",
"except",
"DurableDNSException",
"as",
"e",
":",
"if",
"\"Zone does not exist\"",
"in",
"e",
".",
"message",
":",
"raise",
"ZoneDoesNotExistError",
"(",
"zone_id",
"=",
"zone",
".",
"id",
",",
"driver",
"=",
"self",
",",
"value",
"=",
"e",
".",
"message",
")",
"raise",
"e",
"# listRecords method doens't return full data in records as getRecord",
"# method does.",
"records",
"=",
"[",
"]",
"for",
"data",
"in",
"response",
".",
"objects",
":",
"record",
"=",
"self",
".",
"get_record",
"(",
"zone",
".",
"id",
",",
"data",
".",
"get",
"(",
"\"id\"",
")",
")",
"records",
".",
"append",
"(",
"record",
")",
"return",
"records"
] | https://github.com/apache/libcloud/blob/90971e17bfd7b6bb97b2489986472c531cc8e140/libcloud/dns/drivers/durabledns.py#L118-L164 |
|
collinsctk/PyQYT | 7af3673955f94ff1b2df2f94220cd2dab2e252af | ExtentionPackages/pysmi/parser/smi.py | python | SmiV2Parser.p_typeSMIonly | (self, p) | typeSMIonly : COUNTER32
| GAUGE32
| COUNTER64 | typeSMIonly : COUNTER32
| GAUGE32
| COUNTER64 | [
"typeSMIonly",
":",
"COUNTER32",
"|",
"GAUGE32",
"|",
"COUNTER64"
] | def p_typeSMIonly(self, p):
"""typeSMIonly : COUNTER32
| GAUGE32
| COUNTER64"""
p[0] = p[1] | [
"def",
"p_typeSMIonly",
"(",
"self",
",",
"p",
")",
":",
"p",
"[",
"0",
"]",
"=",
"p",
"[",
"1",
"]"
] | https://github.com/collinsctk/PyQYT/blob/7af3673955f94ff1b2df2f94220cd2dab2e252af/ExtentionPackages/pysmi/parser/smi.py#L274-L278 |
||
alan-turing-institute/sktime | 79cc513346b1257a6f3fa8e4ed855b5a2a7de716 | sktime/distances/lower_bounding.py | python | itakura_parallelogram | (
x: np.ndarray, y: np.ndarray, itakura_max_slope: float
) | return bounding_matrix | Create a itakura parallelogram bounding matrix.
Parameters
----------
x: np.ndarray (2d array)
First timeseries.
y: np.ndarray (2d array)
Second timeseries.
itakura_max_slope: float or int
Gradient of the slope must be between 0 and 1.
Returns
-------
np.ndarray (2d of size mxn where m is len(x) and n is len(y)).
Sakoe Chiba bounding matrix where the values inside the bound are finite
values (0s) and outside the bounds are infinity (non finite).
Raises
------
ValueError
If the itakura_max_slope is not a float or int. | Create a itakura parallelogram bounding matrix. | [
"Create",
"a",
"itakura",
"parallelogram",
"bounding",
"matrix",
"."
] | def itakura_parallelogram(
x: np.ndarray, y: np.ndarray, itakura_max_slope: float
) -> np.ndarray:
"""Create a itakura parallelogram bounding matrix.
Parameters
----------
x: np.ndarray (2d array)
First timeseries.
y: np.ndarray (2d array)
Second timeseries.
itakura_max_slope: float or int
Gradient of the slope must be between 0 and 1.
Returns
-------
np.ndarray (2d of size mxn where m is len(x) and n is len(y)).
Sakoe Chiba bounding matrix where the values inside the bound are finite
values (0s) and outside the bounds are infinity (non finite).
Raises
------
ValueError
If the itakura_max_slope is not a float or int.
"""
if itakura_max_slope < 0 or itakura_max_slope > 1:
raise ValueError("Window must between 0 and 1")
bounding_matrix = np.full((y.shape[0], x.shape[0]), np.inf)
itakura_max_slope = math.floor(((x.shape[0] / 100) * itakura_max_slope) * 100) / 2
x_size = x.shape[0]
y_size = y.shape[0]
middle_x_upper = math.ceil(x_size / 2)
middle_x_lower = math.floor(x_size / 2)
if middle_x_lower == middle_x_upper:
middle_x_lower = middle_x_lower - 1
middle_y = math.floor(y_size / 2)
difference_from_middle_y = abs((middle_x_lower * itakura_max_slope) - middle_y)
middle_y_lower = middle_y + difference_from_middle_y
middle_y_upper = middle_y - difference_from_middle_y
x_upper_line_values = np.interp(
list(range(x_size)),
[0, middle_x_lower, middle_x_upper, x_size - 1],
[0, middle_y_upper, middle_y_upper, y_size - 1],
)
x_lower_line_values = np.interp(
list(range(x_size)),
[0, middle_x_lower, middle_x_upper, x_size - 1],
[0, middle_y_lower, middle_y_lower, y_size - 1],
)
if np.array_equal(x_upper_line_values, x_lower_line_values):
x_upper_line_values = _check_line_steps(x_upper_line_values)
bounding_matrix = create_shape_on_matrix(
bounding_matrix, x_upper_line_values, x_lower_line_values
)
return bounding_matrix | [
"def",
"itakura_parallelogram",
"(",
"x",
":",
"np",
".",
"ndarray",
",",
"y",
":",
"np",
".",
"ndarray",
",",
"itakura_max_slope",
":",
"float",
")",
"->",
"np",
".",
"ndarray",
":",
"if",
"itakura_max_slope",
"<",
"0",
"or",
"itakura_max_slope",
">",
"1",
":",
"raise",
"ValueError",
"(",
"\"Window must between 0 and 1\"",
")",
"bounding_matrix",
"=",
"np",
".",
"full",
"(",
"(",
"y",
".",
"shape",
"[",
"0",
"]",
",",
"x",
".",
"shape",
"[",
"0",
"]",
")",
",",
"np",
".",
"inf",
")",
"itakura_max_slope",
"=",
"math",
".",
"floor",
"(",
"(",
"(",
"x",
".",
"shape",
"[",
"0",
"]",
"/",
"100",
")",
"*",
"itakura_max_slope",
")",
"*",
"100",
")",
"/",
"2",
"x_size",
"=",
"x",
".",
"shape",
"[",
"0",
"]",
"y_size",
"=",
"y",
".",
"shape",
"[",
"0",
"]",
"middle_x_upper",
"=",
"math",
".",
"ceil",
"(",
"x_size",
"/",
"2",
")",
"middle_x_lower",
"=",
"math",
".",
"floor",
"(",
"x_size",
"/",
"2",
")",
"if",
"middle_x_lower",
"==",
"middle_x_upper",
":",
"middle_x_lower",
"=",
"middle_x_lower",
"-",
"1",
"middle_y",
"=",
"math",
".",
"floor",
"(",
"y_size",
"/",
"2",
")",
"difference_from_middle_y",
"=",
"abs",
"(",
"(",
"middle_x_lower",
"*",
"itakura_max_slope",
")",
"-",
"middle_y",
")",
"middle_y_lower",
"=",
"middle_y",
"+",
"difference_from_middle_y",
"middle_y_upper",
"=",
"middle_y",
"-",
"difference_from_middle_y",
"x_upper_line_values",
"=",
"np",
".",
"interp",
"(",
"list",
"(",
"range",
"(",
"x_size",
")",
")",
",",
"[",
"0",
",",
"middle_x_lower",
",",
"middle_x_upper",
",",
"x_size",
"-",
"1",
"]",
",",
"[",
"0",
",",
"middle_y_upper",
",",
"middle_y_upper",
",",
"y_size",
"-",
"1",
"]",
",",
")",
"x_lower_line_values",
"=",
"np",
".",
"interp",
"(",
"list",
"(",
"range",
"(",
"x_size",
")",
")",
",",
"[",
"0",
",",
"middle_x_lower",
",",
"middle_x_upper",
",",
"x_size",
"-",
"1",
"]",
",",
"[",
"0",
",",
"middle_y_lower",
",",
"middle_y_lower",
",",
"y_size",
"-",
"1",
"]",
",",
")",
"if",
"np",
".",
"array_equal",
"(",
"x_upper_line_values",
",",
"x_lower_line_values",
")",
":",
"x_upper_line_values",
"=",
"_check_line_steps",
"(",
"x_upper_line_values",
")",
"bounding_matrix",
"=",
"create_shape_on_matrix",
"(",
"bounding_matrix",
",",
"x_upper_line_values",
",",
"x_lower_line_values",
")",
"return",
"bounding_matrix"
] | https://github.com/alan-turing-institute/sktime/blob/79cc513346b1257a6f3fa8e4ed855b5a2a7de716/sktime/distances/lower_bounding.py#L180-L241 |
|
wistbean/fxxkpython | 88e16d79d8dd37236ba6ecd0d0ff11d63143968c | vip/qyxuan/projects/venv/lib/python3.6/site-packages/pip-19.0.3-py3.6.egg/pip/_vendor/pkg_resources/__init__.py | python | _ReqExtras.markers_pass | (self, req, extras=None) | return not req.marker or any(extra_evals) | Evaluate markers for req against each extra that
demanded it.
Return False if the req has a marker and fails
evaluation. Otherwise, return True. | Evaluate markers for req against each extra that
demanded it. | [
"Evaluate",
"markers",
"for",
"req",
"against",
"each",
"extra",
"that",
"demanded",
"it",
"."
] | def markers_pass(self, req, extras=None):
"""
Evaluate markers for req against each extra that
demanded it.
Return False if the req has a marker and fails
evaluation. Otherwise, return True.
"""
extra_evals = (
req.marker.evaluate({'extra': extra})
for extra in self.get(req, ()) + (extras or (None,))
)
return not req.marker or any(extra_evals) | [
"def",
"markers_pass",
"(",
"self",
",",
"req",
",",
"extras",
"=",
"None",
")",
":",
"extra_evals",
"=",
"(",
"req",
".",
"marker",
".",
"evaluate",
"(",
"{",
"'extra'",
":",
"extra",
"}",
")",
"for",
"extra",
"in",
"self",
".",
"get",
"(",
"req",
",",
"(",
")",
")",
"+",
"(",
"extras",
"or",
"(",
"None",
",",
")",
")",
")",
"return",
"not",
"req",
".",
"marker",
"or",
"any",
"(",
"extra_evals",
")"
] | https://github.com/wistbean/fxxkpython/blob/88e16d79d8dd37236ba6ecd0d0ff11d63143968c/vip/qyxuan/projects/venv/lib/python3.6/site-packages/pip-19.0.3-py3.6.egg/pip/_vendor/pkg_resources/__init__.py#L942-L954 |
|
ganglia/gmond_python_modules | 2f7fcab3d27926ef4a2feb1b53c09af16a43e729 | gpu/nvidia/nvidia-ml-py-3.295.00/pynvml.py | python | nvmlDeviceGetMaxPcieLinkGeneration | (handle) | return gen.value | [] | def nvmlDeviceGetMaxPcieLinkGeneration(handle):
fn = _nvmlGetFunctionPointer("nvmlDeviceGetMaxPcieLinkGeneration")
gen = c_uint()
ret = fn(handle, byref(gen))
_nvmlCheckReturn(ret)
return gen.value | [
"def",
"nvmlDeviceGetMaxPcieLinkGeneration",
"(",
"handle",
")",
":",
"fn",
"=",
"_nvmlGetFunctionPointer",
"(",
"\"nvmlDeviceGetMaxPcieLinkGeneration\"",
")",
"gen",
"=",
"c_uint",
"(",
")",
"ret",
"=",
"fn",
"(",
"handle",
",",
"byref",
"(",
"gen",
")",
")",
"_nvmlCheckReturn",
"(",
"ret",
")",
"return",
"gen",
".",
"value"
] | https://github.com/ganglia/gmond_python_modules/blob/2f7fcab3d27926ef4a2feb1b53c09af16a43e729/gpu/nvidia/nvidia-ml-py-3.295.00/pynvml.py#L879-L884 |
|||
holzschu/Carnets | 44effb10ddfc6aa5c8b0687582a724ba82c6b547 | Library/lib/python3.7/site-packages/matplotlib-3.0.3-py3.7-macosx-10.9-x86_64.egg/matplotlib/table.py | python | Cell._set_text_position | (self, renderer) | Set text up so it draws in the right place.
Currently support 'left', 'center' and 'right' | Set text up so it draws in the right place. | [
"Set",
"text",
"up",
"so",
"it",
"draws",
"in",
"the",
"right",
"place",
"."
] | def _set_text_position(self, renderer):
""" Set text up so it draws in the right place.
Currently support 'left', 'center' and 'right'
"""
bbox = self.get_window_extent(renderer)
l, b, w, h = bbox.bounds
# draw in center vertically
self._text.set_verticalalignment('center')
y = b + (h / 2.0)
# now position horizontally
if self._loc == 'center':
self._text.set_horizontalalignment('center')
x = l + (w / 2.0)
elif self._loc == 'left':
self._text.set_horizontalalignment('left')
x = l + (w * self.PAD)
else:
self._text.set_horizontalalignment('right')
x = l + (w * (1.0 - self.PAD))
self._text.set_position((x, y)) | [
"def",
"_set_text_position",
"(",
"self",
",",
"renderer",
")",
":",
"bbox",
"=",
"self",
".",
"get_window_extent",
"(",
"renderer",
")",
"l",
",",
"b",
",",
"w",
",",
"h",
"=",
"bbox",
".",
"bounds",
"# draw in center vertically",
"self",
".",
"_text",
".",
"set_verticalalignment",
"(",
"'center'",
")",
"y",
"=",
"b",
"+",
"(",
"h",
"/",
"2.0",
")",
"# now position horizontally",
"if",
"self",
".",
"_loc",
"==",
"'center'",
":",
"self",
".",
"_text",
".",
"set_horizontalalignment",
"(",
"'center'",
")",
"x",
"=",
"l",
"+",
"(",
"w",
"/",
"2.0",
")",
"elif",
"self",
".",
"_loc",
"==",
"'left'",
":",
"self",
".",
"_text",
".",
"set_horizontalalignment",
"(",
"'left'",
")",
"x",
"=",
"l",
"+",
"(",
"w",
"*",
"self",
".",
"PAD",
")",
"else",
":",
"self",
".",
"_text",
".",
"set_horizontalalignment",
"(",
"'right'",
")",
"x",
"=",
"l",
"+",
"(",
"w",
"*",
"(",
"1.0",
"-",
"self",
".",
"PAD",
")",
")",
"self",
".",
"_text",
".",
"set_position",
"(",
"(",
"x",
",",
"y",
")",
")"
] | https://github.com/holzschu/Carnets/blob/44effb10ddfc6aa5c8b0687582a724ba82c6b547/Library/lib/python3.7/site-packages/matplotlib-3.0.3-py3.7-macosx-10.9-x86_64.egg/matplotlib/table.py#L103-L126 |
||
n1nj4sec/pupy | a5d766ea81fdfe3bc2c38c9bdaf10e9b75af3b39 | pupy/packages/posix/all/ptyshell.py | python | PtyShell.stop | (self) | [] | def stop(self):
super(PtyShell, self).stop()
self.close() | [
"def",
"stop",
"(",
"self",
")",
":",
"super",
"(",
"PtyShell",
",",
"self",
")",
".",
"stop",
"(",
")",
"self",
".",
"close",
"(",
")"
] | https://github.com/n1nj4sec/pupy/blob/a5d766ea81fdfe3bc2c38c9bdaf10e9b75af3b39/pupy/packages/posix/all/ptyshell.py#L134-L136 |
||||
benknight/hue-alfred-workflow | 4447ba61116caf4a448b50c4bfb866565d66d81e | logic/packages/png/png.py | python | Writer.write_packed | (self, outfile, rows) | return self.write_passes(outfile, rows, packed=True) | Write PNG file to `outfile`. The pixel data comes from `rows`
which should be in boxed row packed format. Each row should be
a sequence of packed bytes.
Technically, this method does work for interlaced images but it
is best avoided. For interlaced images, the rows should be
presented in the order that they appear in the file.
This method should not be used when the source image bit depth
is not one naturally supported by PNG; the bit depth should be
1, 2, 4, 8, or 16. | Write PNG file to `outfile`. The pixel data comes from `rows`
which should be in boxed row packed format. Each row should be
a sequence of packed bytes. | [
"Write",
"PNG",
"file",
"to",
"outfile",
".",
"The",
"pixel",
"data",
"comes",
"from",
"rows",
"which",
"should",
"be",
"in",
"boxed",
"row",
"packed",
"format",
".",
"Each",
"row",
"should",
"be",
"a",
"sequence",
"of",
"packed",
"bytes",
"."
] | def write_packed(self, outfile, rows):
"""
Write PNG file to `outfile`. The pixel data comes from `rows`
which should be in boxed row packed format. Each row should be
a sequence of packed bytes.
Technically, this method does work for interlaced images but it
is best avoided. For interlaced images, the rows should be
presented in the order that they appear in the file.
This method should not be used when the source image bit depth
is not one naturally supported by PNG; the bit depth should be
1, 2, 4, 8, or 16.
"""
if self.rescale:
raise Error("write_packed method not suitable for bit depth %d" %
self.rescale[0])
return self.write_passes(outfile, rows, packed=True) | [
"def",
"write_packed",
"(",
"self",
",",
"outfile",
",",
"rows",
")",
":",
"if",
"self",
".",
"rescale",
":",
"raise",
"Error",
"(",
"\"write_packed method not suitable for bit depth %d\"",
"%",
"self",
".",
"rescale",
"[",
"0",
"]",
")",
"return",
"self",
".",
"write_passes",
"(",
"outfile",
",",
"rows",
",",
"packed",
"=",
"True",
")"
] | https://github.com/benknight/hue-alfred-workflow/blob/4447ba61116caf4a448b50c4bfb866565d66d81e/logic/packages/png/png.py#L826-L844 |
|
fortharris/Pcode | 147962d160a834c219e12cb456abc130826468e4 | Extensions/BaseScintilla.py | python | BaseScintilla.extendSelectionToEndOfDisplayLine | (self) | Extend the selection to the end of the displayed line. | Extend the selection to the end of the displayed line. | [
"Extend",
"the",
"selection",
"to",
"the",
"end",
"of",
"the",
"displayed",
"line",
"."
] | def extendSelectionToEndOfDisplayLine(self):
"""
Extend the selection to the end of the displayed line.
"""
self.SendScintilla(QsciScintilla.SCI_LINEENDDISPLAYEXTEND) | [
"def",
"extendSelectionToEndOfDisplayLine",
"(",
"self",
")",
":",
"self",
".",
"SendScintilla",
"(",
"QsciScintilla",
".",
"SCI_LINEENDDISPLAYEXTEND",
")"
] | https://github.com/fortharris/Pcode/blob/147962d160a834c219e12cb456abc130826468e4/Extensions/BaseScintilla.py#L163-L167 |
||
openhatch/oh-mainline | ce29352a034e1223141dcc2f317030bbc3359a51 | vendor/packages/twisted/twisted/mail/pop3.py | python | IServerFactory.cap_EXPIRE | () | Return the minimum number of days messages are retained. | Return the minimum number of days messages are retained. | [
"Return",
"the",
"minimum",
"number",
"of",
"days",
"messages",
"are",
"retained",
"."
] | def cap_EXPIRE():
"""Return the minimum number of days messages are retained.""" | [
"def",
"cap_EXPIRE",
"(",
")",
":"
] | https://github.com/openhatch/oh-mainline/blob/ce29352a034e1223141dcc2f317030bbc3359a51/vendor/packages/twisted/twisted/mail/pop3.py#L842-L843 |
||
adafruit/Adafruit_Blinka | f6a653e6cc34e71c9ef7912b858de1018f08ecf8 | src/adafruit_blinka/microcontroller/generic_linux/spi.py | python | SPI.write_readinto | (
self, buffer_out, buffer_in, out_start=0, out_end=None, in_start=0, in_end=None
) | Perform a half-duplex write from buffer_out and then
read data into buffer_in | Perform a half-duplex write from buffer_out and then
read data into buffer_in | [
"Perform",
"a",
"half",
"-",
"duplex",
"write",
"from",
"buffer_out",
"and",
"then",
"read",
"data",
"into",
"buffer_in"
] | def write_readinto(
self, buffer_out, buffer_in, out_start=0, out_end=None, in_start=0, in_end=None
):
"""Perform a half-duplex write from buffer_out and then
read data into buffer_in
"""
if not buffer_out or not buffer_in:
return
if out_end is None:
out_end = len(buffer_out)
if in_end is None:
in_end = len(buffer_in)
if out_end - out_start != in_end - in_start:
raise RuntimeError("Buffer slices must be of equal length.")
try:
# self._spi.open(self._port, 0)
# self.set_no_cs()
self._spi.max_speed_hz = self.baudrate
self._spi.mode = self.mode
self._spi.bits_per_word = self.bits
data = self._spi.transfer(list(buffer_out[out_start : out_end + 1]))
for i in range((in_end - in_start)):
buffer_in[i + in_start] = data[i]
# self._spi.close()
except FileNotFoundError:
print("Could not open SPI device - check if SPI is enabled in kernel!")
raise | [
"def",
"write_readinto",
"(",
"self",
",",
"buffer_out",
",",
"buffer_in",
",",
"out_start",
"=",
"0",
",",
"out_end",
"=",
"None",
",",
"in_start",
"=",
"0",
",",
"in_end",
"=",
"None",
")",
":",
"if",
"not",
"buffer_out",
"or",
"not",
"buffer_in",
":",
"return",
"if",
"out_end",
"is",
"None",
":",
"out_end",
"=",
"len",
"(",
"buffer_out",
")",
"if",
"in_end",
"is",
"None",
":",
"in_end",
"=",
"len",
"(",
"buffer_in",
")",
"if",
"out_end",
"-",
"out_start",
"!=",
"in_end",
"-",
"in_start",
":",
"raise",
"RuntimeError",
"(",
"\"Buffer slices must be of equal length.\"",
")",
"try",
":",
"# self._spi.open(self._port, 0)",
"# self.set_no_cs()",
"self",
".",
"_spi",
".",
"max_speed_hz",
"=",
"self",
".",
"baudrate",
"self",
".",
"_spi",
".",
"mode",
"=",
"self",
".",
"mode",
"self",
".",
"_spi",
".",
"bits_per_word",
"=",
"self",
".",
"bits",
"data",
"=",
"self",
".",
"_spi",
".",
"transfer",
"(",
"list",
"(",
"buffer_out",
"[",
"out_start",
":",
"out_end",
"+",
"1",
"]",
")",
")",
"for",
"i",
"in",
"range",
"(",
"(",
"in_end",
"-",
"in_start",
")",
")",
":",
"buffer_in",
"[",
"i",
"+",
"in_start",
"]",
"=",
"data",
"[",
"i",
"]",
"# self._spi.close()",
"except",
"FileNotFoundError",
":",
"print",
"(",
"\"Could not open SPI device - check if SPI is enabled in kernel!\"",
")",
"raise"
] | https://github.com/adafruit/Adafruit_Blinka/blob/f6a653e6cc34e71c9ef7912b858de1018f08ecf8/src/adafruit_blinka/microcontroller/generic_linux/spi.py#L110-L136 |
||
box/box-python-sdk | e8abbb515cfe77d9533df77c807d55d6b494ceaa | boxsdk/object/folder.py | python | Folder.create_upload_session | (self, file_size: int, file_name: str) | return self.translator.translate(
session=self._session,
response_object=response,
) | Creates a new chunked upload session for upload a new file.
:param file_size:
The size of the file in bytes that will be uploaded.
:param file_name:
The name of the file that will be uploaded.
:returns:
A :class:`UploadSession` object. | Creates a new chunked upload session for upload a new file. | [
"Creates",
"a",
"new",
"chunked",
"upload",
"session",
"for",
"upload",
"a",
"new",
"file",
"."
] | def create_upload_session(self, file_size: int, file_name: str) -> 'UploadSession':
"""
Creates a new chunked upload session for upload a new file.
:param file_size:
The size of the file in bytes that will be uploaded.
:param file_name:
The name of the file that will be uploaded.
:returns:
A :class:`UploadSession` object.
"""
url = f'{self.session.api_config.UPLOAD_URL}/files/upload_sessions'
body_params = {
'folder_id': self.object_id,
'file_size': file_size,
'file_name': file_name,
}
response = self._session.post(url, data=json.dumps(body_params)).json()
return self.translator.translate(
session=self._session,
response_object=response,
) | [
"def",
"create_upload_session",
"(",
"self",
",",
"file_size",
":",
"int",
",",
"file_name",
":",
"str",
")",
"->",
"'UploadSession'",
":",
"url",
"=",
"f'{self.session.api_config.UPLOAD_URL}/files/upload_sessions'",
"body_params",
"=",
"{",
"'folder_id'",
":",
"self",
".",
"object_id",
",",
"'file_size'",
":",
"file_size",
",",
"'file_name'",
":",
"file_name",
",",
"}",
"response",
"=",
"self",
".",
"_session",
".",
"post",
"(",
"url",
",",
"data",
"=",
"json",
".",
"dumps",
"(",
"body_params",
")",
")",
".",
"json",
"(",
")",
"return",
"self",
".",
"translator",
".",
"translate",
"(",
"session",
"=",
"self",
".",
"_session",
",",
"response_object",
"=",
"response",
",",
")"
] | https://github.com/box/box-python-sdk/blob/e8abbb515cfe77d9533df77c807d55d6b494ceaa/boxsdk/object/folder.py#L115-L136 |
|
eduardocereto/pyboleto | 3fab222c3a1d0b2dd4f8a0c3f788ffe8e9c61559 | pyboleto/pdf.py | python | BoletoPDF._codigoBarraI25 | (self, num, x, y) | Imprime Código de barras otimizado para boletos
O código de barras é otmizado para que o comprimeto seja sempre o
estipulado pela febraban de 103mm. | Imprime Código de barras otimizado para boletos | [
"Imprime",
"Código",
"de",
"barras",
"otimizado",
"para",
"boletos"
] | def _codigoBarraI25(self, num, x, y):
"""Imprime Código de barras otimizado para boletos
O código de barras é otmizado para que o comprimeto seja sempre o
estipulado pela febraban de 103mm.
"""
# http://en.wikipedia.org/wiki/Interleaved_2_of_5
altura = 13 * mm
comprimento = 103 * mm
thin_bar = 0.254320987654 * mm # Tamanho correto aproximado
bc = I2of5(num,
barWidth=thin_bar,
ratio=3,
barHeight=altura,
bearers=0,
quiet=0,
checksum=0)
# Recalcula o tamanho do thin_bar para que o cod de barras tenha o
# comprimento correto
thin_bar = (thin_bar * comprimento) / bc.width
bc.__init__(num, barWidth=thin_bar)
bc.drawOn(self.pdf_canvas, x, y) | [
"def",
"_codigoBarraI25",
"(",
"self",
",",
"num",
",",
"x",
",",
"y",
")",
":",
"# http://en.wikipedia.org/wiki/Interleaved_2_of_5",
"altura",
"=",
"13",
"*",
"mm",
"comprimento",
"=",
"103",
"*",
"mm",
"thin_bar",
"=",
"0.254320987654",
"*",
"mm",
"# Tamanho correto aproximado",
"bc",
"=",
"I2of5",
"(",
"num",
",",
"barWidth",
"=",
"thin_bar",
",",
"ratio",
"=",
"3",
",",
"barHeight",
"=",
"altura",
",",
"bearers",
"=",
"0",
",",
"quiet",
"=",
"0",
",",
"checksum",
"=",
"0",
")",
"# Recalcula o tamanho do thin_bar para que o cod de barras tenha o",
"# comprimento correto",
"thin_bar",
"=",
"(",
"thin_bar",
"*",
"comprimento",
")",
"/",
"bc",
".",
"width",
"bc",
".",
"__init__",
"(",
"num",
",",
"barWidth",
"=",
"thin_bar",
")",
"bc",
".",
"drawOn",
"(",
"self",
".",
"pdf_canvas",
",",
"x",
",",
"y",
")"
] | https://github.com/eduardocereto/pyboleto/blob/3fab222c3a1d0b2dd4f8a0c3f788ffe8e9c61559/pyboleto/pdf.py#L849-L876 |
||
kupferlauncher/kupfer | 1c1e9bcbce05a82f503f68f8b3955c20b02639b3 | waflib/TaskGen.py | python | task_gen.get_name | (self) | If the attribute ``name`` is not set on the instance,
the name is computed from the target name::
def build(bld):
x = bld(name='foo')
x.get_name() # foo
y = bld(target='bar')
y.get_name() # bar
:rtype: string
:return: name of this task generator | If the attribute ``name`` is not set on the instance,
the name is computed from the target name:: | [
"If",
"the",
"attribute",
"name",
"is",
"not",
"set",
"on",
"the",
"instance",
"the",
"name",
"is",
"computed",
"from",
"the",
"target",
"name",
"::"
] | def get_name(self):
"""
If the attribute ``name`` is not set on the instance,
the name is computed from the target name::
def build(bld):
x = bld(name='foo')
x.get_name() # foo
y = bld(target='bar')
y.get_name() # bar
:rtype: string
:return: name of this task generator
"""
try:
return self._name
except AttributeError:
if isinstance(self.target, list):
lst = [str(x) for x in self.target]
name = self._name = ','.join(lst)
else:
name = self._name = str(self.target)
return name | [
"def",
"get_name",
"(",
"self",
")",
":",
"try",
":",
"return",
"self",
".",
"_name",
"except",
"AttributeError",
":",
"if",
"isinstance",
"(",
"self",
".",
"target",
",",
"list",
")",
":",
"lst",
"=",
"[",
"str",
"(",
"x",
")",
"for",
"x",
"in",
"self",
".",
"target",
"]",
"name",
"=",
"self",
".",
"_name",
"=",
"','",
".",
"join",
"(",
"lst",
")",
"else",
":",
"name",
"=",
"self",
".",
"_name",
"=",
"str",
"(",
"self",
".",
"target",
")",
"return",
"name"
] | https://github.com/kupferlauncher/kupfer/blob/1c1e9bcbce05a82f503f68f8b3955c20b02639b3/waflib/TaskGen.py#L119-L141 |
||
aws-samples/aws-kube-codesuite | ab4e5ce45416b83bffb947ab8d234df5437f4fca | src/kubernetes/client/models/v1_service_account.py | python | V1ServiceAccount.to_str | (self) | return pformat(self.to_dict()) | Returns the string representation of the model | Returns the string representation of the model | [
"Returns",
"the",
"string",
"representation",
"of",
"the",
"model"
] | def to_str(self):
"""
Returns the string representation of the model
"""
return pformat(self.to_dict()) | [
"def",
"to_str",
"(",
"self",
")",
":",
"return",
"pformat",
"(",
"self",
".",
"to_dict",
"(",
")",
")"
] | https://github.com/aws-samples/aws-kube-codesuite/blob/ab4e5ce45416b83bffb947ab8d234df5437f4fca/src/kubernetes/client/models/v1_service_account.py#L222-L226 |
|
prody/ProDy | b24bbf58aa8fffe463c8548ae50e3955910e5b7f | prody/proteins/pdbfile.py | python | writePDBStream | (stream, atoms, csets=None, **kwargs) | Write *atoms* in PDB format to a *stream*.
:arg stream: anything that implements a :meth:`write` method (e.g. file,
buffer, stdout)
:arg renumber: whether to renumber atoms with serial indices
Default is **True**
:type renumber: bool | Write *atoms* in PDB format to a *stream*. | [
"Write",
"*",
"atoms",
"*",
"in",
"PDB",
"format",
"to",
"a",
"*",
"stream",
"*",
"."
] | def writePDBStream(stream, atoms, csets=None, **kwargs):
"""Write *atoms* in PDB format to a *stream*.
:arg stream: anything that implements a :meth:`write` method (e.g. file,
buffer, stdout)
:arg renumber: whether to renumber atoms with serial indices
Default is **True**
:type renumber: bool
"""
renumber = kwargs.get('renumber', True)
remark = str(atoms)
try:
coordsets = atoms._getCoordsets(csets)
except AttributeError:
try:
coordsets = atoms._getCoords()
except AttributeError:
raise TypeError('atoms must be an object with coordinate sets')
if coordsets is not None:
coordsets = [coordsets]
else:
if coordsets.ndim == 2:
coordsets = [coordsets]
if coordsets is None:
raise ValueError('atoms does not have any coordinate sets')
try:
acsi = atoms.getACSIndex()
except AttributeError:
try:
atoms = atoms.getAtoms()
except AttributeError:
raise TypeError('atoms must be an Atomic instance or an object '
'with `getAtoms` method')
else:
if atoms is None:
raise ValueError('atoms is not associated with an Atomic '
'instance')
try:
acsi = atoms.getACSIndex()
except AttributeError:
raise TypeError('atoms does not have a valid type')
try:
atoms.getIndex()
except AttributeError:
pass
else:
atoms = atoms.select('all')
n_atoms = atoms.numAtoms()
hybrid36 = kwargs.get('hybrid36', False)
occupancy = kwargs.get('occupancy')
if occupancy is None:
occupancies = atoms._getOccupancies()
if occupancies is None:
occupancies = np.zeros(n_atoms, float)
else:
occupancies = np.array(occupancy)
if len(occupancies) != n_atoms:
raise ValueError('len(occupancy) must be equal to number of atoms')
beta = kwargs.get('beta')
if beta is None:
bfactors = atoms._getBetas()
if bfactors is None:
bfactors = np.zeros(n_atoms, float)
else:
bfactors = np.array(beta)
if len(bfactors) != n_atoms:
raise ValueError('len(beta) must be equal to number of atoms')
atomnames = atoms.getNames()
if atomnames is None:
raise ValueError('atom names are not set')
for i, an in enumerate(atomnames):
if len(an) < 4:
atomnames[i] = ' ' + an
s_or_u = np.array(['a']).dtype.char
altlocs = atoms._getAltlocs()
if altlocs is None:
altlocs = np.zeros(n_atoms, s_or_u + '1')
resnames = atoms._getResnames()
if resnames is None:
resnames = ['UNK'] * n_atoms
chainids = atoms._getChids()
if chainids is None:
chainids = np.zeros(n_atoms, s_or_u + '1')
resnums = atoms._getResnums()
if resnums is None:
resnums = np.ones(n_atoms, int)
serials = atoms._getSerials()
if serials is None or renumber:
serials = np.arange(n_atoms, dtype=int) + 1
icodes = atoms._getIcodes()
if icodes is None:
icodes = np.zeros(n_atoms, s_or_u + '1')
hetero = ['ATOM'] * n_atoms
heteroflags = atoms._getFlags('hetatm')
if heteroflags is None:
heteroflags = atoms._getFlags('hetero')
if heteroflags is not None:
hetero = np.array(hetero, s_or_u + '6')
hetero[heteroflags] = 'HETATM'
elements = atoms._getElements()
if elements is None:
elements = np.zeros(n_atoms, s_or_u + '1')
else:
elements = np.char.rjust(elements, 2)
segments = atoms._getSegnames()
if segments is None:
segments = np.zeros(n_atoms, s_or_u + '6')
charges = atoms._getCharges()
charges2 = np.empty(n_atoms, s_or_u + '2')
if charges is not None:
for i, charge in enumerate(charges):
charges2[i] = str(abs(int(charge)))
if np.sign(charge) == -1:
charges2[i] += '-'
else:
charges2[i] += '+'
if charges2[i] == '0+':
charges2[i] = ' '
anisous = atoms._getAnisous()
if anisous is not None:
anisous = np.array(anisous * 10000, dtype=int)
# write remarks
stream.write('REMARK {0}\n'.format(remark))
# write secondary structures (if any)
secondary = kwargs.get('secondary', True)
secstrs = atoms._getSecstrs()
if secstrs is not None and secondary:
secindices = atoms._getSecindices()
secclasses = atoms._getSecclasses()
secids = atoms._getSecids()
# write helices
for i in range(1,max(secindices)+1):
torf = np.logical_and(isHelix(secstrs), secindices==i)
if torf.any():
helix_resnums = resnums[torf]
helix_chainids = chainids[torf]
helix_resnames = resnames[torf]
helix_secclasses = secclasses[torf]
helix_secids = secids[torf]
helix_icodes = icodes[torf]
L = helix_resnums[-1] - helix_resnums[0] + 1
stream.write(HELIXLINE.format(serNum=i, helixID=helix_secids[0],
initResName=helix_resnames[0], initChainID=helix_chainids[0],
initSeqNum=helix_resnums[0], initICode=helix_icodes[0],
endResName=helix_resnames[-1], endChainID=helix_chainids[-1],
endSeqNum=helix_resnums[-1], endICode=helix_icodes[-1],
helixClass=helix_secclasses[0], length=L))
# write strands
torf_all_sheets = isSheet(secstrs)
sheet_secids = secids[torf_all_sheets]
for sheet_id in np.unique(sheet_secids):
torf_strands_in_sheet = np.logical_and(torf_all_sheets, secids==sheet_id)
strand_indices = secindices[torf_strands_in_sheet]
numStrands = len(np.unique(strand_indices))
for i in np.unique(strand_indices):
torf_strand = np.logical_and(torf_strands_in_sheet, secindices==i)
strand_resnums = resnums[torf_strand]
strand_chainids = chainids[torf_strand]
strand_resnames = resnames[torf_strand]
strand_secclasses = secclasses[torf_strand]
strand_icodes = icodes[torf_strand]
stream.write(SHEETLINE.format(strand=i, sheetID=sheet_id, numStrands=numStrands,
initResName=strand_resnames[0], initChainID=strand_chainids[0],
initSeqNum=strand_resnums[0], initICode=strand_icodes[0],
endResName=strand_resnames[-1], endChainID=strand_chainids[-1],
endSeqNum=strand_resnums[-1], endICode=strand_icodes[-1],
sense=strand_secclasses[0]))
pass
# write atoms
multi = len(coordsets) > 1
write = stream.write
for m, coords in enumerate(coordsets):
if multi:
write('MODEL{0:9d}\n'.format(m+1))
if not hybrid36:
# We need to check whether serial and residue numbers become hexadecimal
reached_max_n_atom = False
reached_max_n_res = False
pdbline = PDBLINE_LT100K
anisouline = ANISOULINE_LT100K
else:
warned_hybrid36 = False
warned_5_digit = False
for i, xyz in enumerate(coords):
if hybrid36:
pdbline = PDBLINE_H36
anisouline = ANISOULINE_H36
if not warned_hybrid36:
LOGGER.warn('hybrid36 format is being used')
warned_hybrid36 = True
else:
if not (reached_max_n_atom or reached_max_n_res) and (i == MAX_N_ATOM or serials[i] > MAX_N_ATOM):
reached_max_n_atom = True
pdbline = PDBLINE_GE100K
anisouline = ANISOULINE_GE100K
LOGGER.warn('Indices are exceeding 99999 and hexadecimal format is being used for indices')
elif not (reached_max_n_atom or reached_max_n_res) and resnums[i] > MAX_N_RES:
reached_max_n_res = True
pdbline = PDBLINE_GE10K
anisouline = ANISOULINE_GE10K
LOGGER.warn('Resnums are exceeding 9999 and hexadecimal format is being used for resnums')
elif reached_max_n_atom and not reached_max_n_res and resnums[i] > MAX_N_RES:
reached_max_n_res = True
pdbline = PDBLINE_GE100K_GE10K
anisouline = ANISOULINE_GE100K_GE10K
LOGGER.warn('Resnums are exceeding 9999 and hexadecimal format is being used for indices and resnums')
elif reached_max_n_res and not reached_max_n_atom and (i == MAX_N_ATOM or serials[i] > MAX_N_ATOM):
reached_max_n_atom = True
pdbline = PDBLINE_GE100K_GE10K
anisouline = ANISOULINE_GE100K_GE10K
LOGGER.warn('Indices are exceeding 99999 and hexadecimal format is being used for indices and resnums')
if hybrid36:
serial = decToHybrid36(serials[i])
resnum = decToHybrid36(resnums[i], resnum=True)
else:
serial = serials[i]
resnum = resnums[i]
if pdbline == PDBLINE_LT100K or hybrid36:
if len(str(resnum)) == 5:
if icodes[i] == '':
icodes[i] = str(resnum)[4]
if not warned_5_digit:
LOGGER.warn('Storing 5-digit resnums using insertion codes')
warned_5_digit = True
else:
LOGGER.warn('Truncating 5-digit resnum as insertion code is busy.')
resnum = int(str(resnum)[:4])
elif len(str(resnum)) > 5:
if not warned_5_digit:
LOGGER.warn('Truncating {0}-digit resnum as too long to be '
'supported by insertion code.'.format(len(str(resnum))))
warned_5_digit = True
resnum = int(str(resnum)[:4])
else:
final_resnum = '%4x' % int(resnum)
if len(str(final_resnum)) == 5:
if icodes[i] == '':
icodes[i] = str(final_resnum)[4]
if not warned_5_digit:
LOGGER.warn('Storing 5-digit hex resnums using insertion codes')
warned_5_digit = True
else:
LOGGER.warn('Truncating 5-digit hex resnum as insertion code is busy.')
resnum = int(str(final_resnum)[:4], 16)
elif len(str(final_resnum)) > 5:
if not warned_5_digit:
LOGGER.warn('Truncating {0}-digit hex resnum ({1}) as too long to be '
'supported by insertion code.'.format(len(str(final_resnum)),
final_resnum))
warned_5_digit = True
resnum = int(str(final_resnum)[:4], 16)
write(pdbline % (hetero[i], serial,
atomnames[i], altlocs[i],
resnames[i], chainids[i], resnum,
icodes[i],
xyz[0], xyz[1], xyz[2],
occupancies[i], bfactors[i],
segments[i], elements[i], charges2[i]))
if anisous is not None:
anisou = anisous[i]
write(anisouline % ("ANISOU", serial,
atomnames[i], altlocs[i],
resnames[i], chainids[i], resnum,
icodes[i],
anisou[0], anisou[1], anisou[2],
anisou[3], anisou[4], anisou[5],
segments[i], elements[i], charges2[i]))
if atoms.getFlags('pdbter') is not None and atoms.getFlags('pdbter')[i]:
write('TER\n')
if multi:
write('ENDMDL\n')
altlocs = np.zeros(n_atoms, s_or_u + '1') | [
"def",
"writePDBStream",
"(",
"stream",
",",
"atoms",
",",
"csets",
"=",
"None",
",",
"*",
"*",
"kwargs",
")",
":",
"renumber",
"=",
"kwargs",
".",
"get",
"(",
"'renumber'",
",",
"True",
")",
"remark",
"=",
"str",
"(",
"atoms",
")",
"try",
":",
"coordsets",
"=",
"atoms",
".",
"_getCoordsets",
"(",
"csets",
")",
"except",
"AttributeError",
":",
"try",
":",
"coordsets",
"=",
"atoms",
".",
"_getCoords",
"(",
")",
"except",
"AttributeError",
":",
"raise",
"TypeError",
"(",
"'atoms must be an object with coordinate sets'",
")",
"if",
"coordsets",
"is",
"not",
"None",
":",
"coordsets",
"=",
"[",
"coordsets",
"]",
"else",
":",
"if",
"coordsets",
".",
"ndim",
"==",
"2",
":",
"coordsets",
"=",
"[",
"coordsets",
"]",
"if",
"coordsets",
"is",
"None",
":",
"raise",
"ValueError",
"(",
"'atoms does not have any coordinate sets'",
")",
"try",
":",
"acsi",
"=",
"atoms",
".",
"getACSIndex",
"(",
")",
"except",
"AttributeError",
":",
"try",
":",
"atoms",
"=",
"atoms",
".",
"getAtoms",
"(",
")",
"except",
"AttributeError",
":",
"raise",
"TypeError",
"(",
"'atoms must be an Atomic instance or an object '",
"'with `getAtoms` method'",
")",
"else",
":",
"if",
"atoms",
"is",
"None",
":",
"raise",
"ValueError",
"(",
"'atoms is not associated with an Atomic '",
"'instance'",
")",
"try",
":",
"acsi",
"=",
"atoms",
".",
"getACSIndex",
"(",
")",
"except",
"AttributeError",
":",
"raise",
"TypeError",
"(",
"'atoms does not have a valid type'",
")",
"try",
":",
"atoms",
".",
"getIndex",
"(",
")",
"except",
"AttributeError",
":",
"pass",
"else",
":",
"atoms",
"=",
"atoms",
".",
"select",
"(",
"'all'",
")",
"n_atoms",
"=",
"atoms",
".",
"numAtoms",
"(",
")",
"hybrid36",
"=",
"kwargs",
".",
"get",
"(",
"'hybrid36'",
",",
"False",
")",
"occupancy",
"=",
"kwargs",
".",
"get",
"(",
"'occupancy'",
")",
"if",
"occupancy",
"is",
"None",
":",
"occupancies",
"=",
"atoms",
".",
"_getOccupancies",
"(",
")",
"if",
"occupancies",
"is",
"None",
":",
"occupancies",
"=",
"np",
".",
"zeros",
"(",
"n_atoms",
",",
"float",
")",
"else",
":",
"occupancies",
"=",
"np",
".",
"array",
"(",
"occupancy",
")",
"if",
"len",
"(",
"occupancies",
")",
"!=",
"n_atoms",
":",
"raise",
"ValueError",
"(",
"'len(occupancy) must be equal to number of atoms'",
")",
"beta",
"=",
"kwargs",
".",
"get",
"(",
"'beta'",
")",
"if",
"beta",
"is",
"None",
":",
"bfactors",
"=",
"atoms",
".",
"_getBetas",
"(",
")",
"if",
"bfactors",
"is",
"None",
":",
"bfactors",
"=",
"np",
".",
"zeros",
"(",
"n_atoms",
",",
"float",
")",
"else",
":",
"bfactors",
"=",
"np",
".",
"array",
"(",
"beta",
")",
"if",
"len",
"(",
"bfactors",
")",
"!=",
"n_atoms",
":",
"raise",
"ValueError",
"(",
"'len(beta) must be equal to number of atoms'",
")",
"atomnames",
"=",
"atoms",
".",
"getNames",
"(",
")",
"if",
"atomnames",
"is",
"None",
":",
"raise",
"ValueError",
"(",
"'atom names are not set'",
")",
"for",
"i",
",",
"an",
"in",
"enumerate",
"(",
"atomnames",
")",
":",
"if",
"len",
"(",
"an",
")",
"<",
"4",
":",
"atomnames",
"[",
"i",
"]",
"=",
"' '",
"+",
"an",
"s_or_u",
"=",
"np",
".",
"array",
"(",
"[",
"'a'",
"]",
")",
".",
"dtype",
".",
"char",
"altlocs",
"=",
"atoms",
".",
"_getAltlocs",
"(",
")",
"if",
"altlocs",
"is",
"None",
":",
"altlocs",
"=",
"np",
".",
"zeros",
"(",
"n_atoms",
",",
"s_or_u",
"+",
"'1'",
")",
"resnames",
"=",
"atoms",
".",
"_getResnames",
"(",
")",
"if",
"resnames",
"is",
"None",
":",
"resnames",
"=",
"[",
"'UNK'",
"]",
"*",
"n_atoms",
"chainids",
"=",
"atoms",
".",
"_getChids",
"(",
")",
"if",
"chainids",
"is",
"None",
":",
"chainids",
"=",
"np",
".",
"zeros",
"(",
"n_atoms",
",",
"s_or_u",
"+",
"'1'",
")",
"resnums",
"=",
"atoms",
".",
"_getResnums",
"(",
")",
"if",
"resnums",
"is",
"None",
":",
"resnums",
"=",
"np",
".",
"ones",
"(",
"n_atoms",
",",
"int",
")",
"serials",
"=",
"atoms",
".",
"_getSerials",
"(",
")",
"if",
"serials",
"is",
"None",
"or",
"renumber",
":",
"serials",
"=",
"np",
".",
"arange",
"(",
"n_atoms",
",",
"dtype",
"=",
"int",
")",
"+",
"1",
"icodes",
"=",
"atoms",
".",
"_getIcodes",
"(",
")",
"if",
"icodes",
"is",
"None",
":",
"icodes",
"=",
"np",
".",
"zeros",
"(",
"n_atoms",
",",
"s_or_u",
"+",
"'1'",
")",
"hetero",
"=",
"[",
"'ATOM'",
"]",
"*",
"n_atoms",
"heteroflags",
"=",
"atoms",
".",
"_getFlags",
"(",
"'hetatm'",
")",
"if",
"heteroflags",
"is",
"None",
":",
"heteroflags",
"=",
"atoms",
".",
"_getFlags",
"(",
"'hetero'",
")",
"if",
"heteroflags",
"is",
"not",
"None",
":",
"hetero",
"=",
"np",
".",
"array",
"(",
"hetero",
",",
"s_or_u",
"+",
"'6'",
")",
"hetero",
"[",
"heteroflags",
"]",
"=",
"'HETATM'",
"elements",
"=",
"atoms",
".",
"_getElements",
"(",
")",
"if",
"elements",
"is",
"None",
":",
"elements",
"=",
"np",
".",
"zeros",
"(",
"n_atoms",
",",
"s_or_u",
"+",
"'1'",
")",
"else",
":",
"elements",
"=",
"np",
".",
"char",
".",
"rjust",
"(",
"elements",
",",
"2",
")",
"segments",
"=",
"atoms",
".",
"_getSegnames",
"(",
")",
"if",
"segments",
"is",
"None",
":",
"segments",
"=",
"np",
".",
"zeros",
"(",
"n_atoms",
",",
"s_or_u",
"+",
"'6'",
")",
"charges",
"=",
"atoms",
".",
"_getCharges",
"(",
")",
"charges2",
"=",
"np",
".",
"empty",
"(",
"n_atoms",
",",
"s_or_u",
"+",
"'2'",
")",
"if",
"charges",
"is",
"not",
"None",
":",
"for",
"i",
",",
"charge",
"in",
"enumerate",
"(",
"charges",
")",
":",
"charges2",
"[",
"i",
"]",
"=",
"str",
"(",
"abs",
"(",
"int",
"(",
"charge",
")",
")",
")",
"if",
"np",
".",
"sign",
"(",
"charge",
")",
"==",
"-",
"1",
":",
"charges2",
"[",
"i",
"]",
"+=",
"'-'",
"else",
":",
"charges2",
"[",
"i",
"]",
"+=",
"'+'",
"if",
"charges2",
"[",
"i",
"]",
"==",
"'0+'",
":",
"charges2",
"[",
"i",
"]",
"=",
"' '",
"anisous",
"=",
"atoms",
".",
"_getAnisous",
"(",
")",
"if",
"anisous",
"is",
"not",
"None",
":",
"anisous",
"=",
"np",
".",
"array",
"(",
"anisous",
"*",
"10000",
",",
"dtype",
"=",
"int",
")",
"# write remarks",
"stream",
".",
"write",
"(",
"'REMARK {0}\\n'",
".",
"format",
"(",
"remark",
")",
")",
"# write secondary structures (if any)",
"secondary",
"=",
"kwargs",
".",
"get",
"(",
"'secondary'",
",",
"True",
")",
"secstrs",
"=",
"atoms",
".",
"_getSecstrs",
"(",
")",
"if",
"secstrs",
"is",
"not",
"None",
"and",
"secondary",
":",
"secindices",
"=",
"atoms",
".",
"_getSecindices",
"(",
")",
"secclasses",
"=",
"atoms",
".",
"_getSecclasses",
"(",
")",
"secids",
"=",
"atoms",
".",
"_getSecids",
"(",
")",
"# write helices",
"for",
"i",
"in",
"range",
"(",
"1",
",",
"max",
"(",
"secindices",
")",
"+",
"1",
")",
":",
"torf",
"=",
"np",
".",
"logical_and",
"(",
"isHelix",
"(",
"secstrs",
")",
",",
"secindices",
"==",
"i",
")",
"if",
"torf",
".",
"any",
"(",
")",
":",
"helix_resnums",
"=",
"resnums",
"[",
"torf",
"]",
"helix_chainids",
"=",
"chainids",
"[",
"torf",
"]",
"helix_resnames",
"=",
"resnames",
"[",
"torf",
"]",
"helix_secclasses",
"=",
"secclasses",
"[",
"torf",
"]",
"helix_secids",
"=",
"secids",
"[",
"torf",
"]",
"helix_icodes",
"=",
"icodes",
"[",
"torf",
"]",
"L",
"=",
"helix_resnums",
"[",
"-",
"1",
"]",
"-",
"helix_resnums",
"[",
"0",
"]",
"+",
"1",
"stream",
".",
"write",
"(",
"HELIXLINE",
".",
"format",
"(",
"serNum",
"=",
"i",
",",
"helixID",
"=",
"helix_secids",
"[",
"0",
"]",
",",
"initResName",
"=",
"helix_resnames",
"[",
"0",
"]",
",",
"initChainID",
"=",
"helix_chainids",
"[",
"0",
"]",
",",
"initSeqNum",
"=",
"helix_resnums",
"[",
"0",
"]",
",",
"initICode",
"=",
"helix_icodes",
"[",
"0",
"]",
",",
"endResName",
"=",
"helix_resnames",
"[",
"-",
"1",
"]",
",",
"endChainID",
"=",
"helix_chainids",
"[",
"-",
"1",
"]",
",",
"endSeqNum",
"=",
"helix_resnums",
"[",
"-",
"1",
"]",
",",
"endICode",
"=",
"helix_icodes",
"[",
"-",
"1",
"]",
",",
"helixClass",
"=",
"helix_secclasses",
"[",
"0",
"]",
",",
"length",
"=",
"L",
")",
")",
"# write strands",
"torf_all_sheets",
"=",
"isSheet",
"(",
"secstrs",
")",
"sheet_secids",
"=",
"secids",
"[",
"torf_all_sheets",
"]",
"for",
"sheet_id",
"in",
"np",
".",
"unique",
"(",
"sheet_secids",
")",
":",
"torf_strands_in_sheet",
"=",
"np",
".",
"logical_and",
"(",
"torf_all_sheets",
",",
"secids",
"==",
"sheet_id",
")",
"strand_indices",
"=",
"secindices",
"[",
"torf_strands_in_sheet",
"]",
"numStrands",
"=",
"len",
"(",
"np",
".",
"unique",
"(",
"strand_indices",
")",
")",
"for",
"i",
"in",
"np",
".",
"unique",
"(",
"strand_indices",
")",
":",
"torf_strand",
"=",
"np",
".",
"logical_and",
"(",
"torf_strands_in_sheet",
",",
"secindices",
"==",
"i",
")",
"strand_resnums",
"=",
"resnums",
"[",
"torf_strand",
"]",
"strand_chainids",
"=",
"chainids",
"[",
"torf_strand",
"]",
"strand_resnames",
"=",
"resnames",
"[",
"torf_strand",
"]",
"strand_secclasses",
"=",
"secclasses",
"[",
"torf_strand",
"]",
"strand_icodes",
"=",
"icodes",
"[",
"torf_strand",
"]",
"stream",
".",
"write",
"(",
"SHEETLINE",
".",
"format",
"(",
"strand",
"=",
"i",
",",
"sheetID",
"=",
"sheet_id",
",",
"numStrands",
"=",
"numStrands",
",",
"initResName",
"=",
"strand_resnames",
"[",
"0",
"]",
",",
"initChainID",
"=",
"strand_chainids",
"[",
"0",
"]",
",",
"initSeqNum",
"=",
"strand_resnums",
"[",
"0",
"]",
",",
"initICode",
"=",
"strand_icodes",
"[",
"0",
"]",
",",
"endResName",
"=",
"strand_resnames",
"[",
"-",
"1",
"]",
",",
"endChainID",
"=",
"strand_chainids",
"[",
"-",
"1",
"]",
",",
"endSeqNum",
"=",
"strand_resnums",
"[",
"-",
"1",
"]",
",",
"endICode",
"=",
"strand_icodes",
"[",
"-",
"1",
"]",
",",
"sense",
"=",
"strand_secclasses",
"[",
"0",
"]",
")",
")",
"pass",
"# write atoms",
"multi",
"=",
"len",
"(",
"coordsets",
")",
">",
"1",
"write",
"=",
"stream",
".",
"write",
"for",
"m",
",",
"coords",
"in",
"enumerate",
"(",
"coordsets",
")",
":",
"if",
"multi",
":",
"write",
"(",
"'MODEL{0:9d}\\n'",
".",
"format",
"(",
"m",
"+",
"1",
")",
")",
"if",
"not",
"hybrid36",
":",
"# We need to check whether serial and residue numbers become hexadecimal",
"reached_max_n_atom",
"=",
"False",
"reached_max_n_res",
"=",
"False",
"pdbline",
"=",
"PDBLINE_LT100K",
"anisouline",
"=",
"ANISOULINE_LT100K",
"else",
":",
"warned_hybrid36",
"=",
"False",
"warned_5_digit",
"=",
"False",
"for",
"i",
",",
"xyz",
"in",
"enumerate",
"(",
"coords",
")",
":",
"if",
"hybrid36",
":",
"pdbline",
"=",
"PDBLINE_H36",
"anisouline",
"=",
"ANISOULINE_H36",
"if",
"not",
"warned_hybrid36",
":",
"LOGGER",
".",
"warn",
"(",
"'hybrid36 format is being used'",
")",
"warned_hybrid36",
"=",
"True",
"else",
":",
"if",
"not",
"(",
"reached_max_n_atom",
"or",
"reached_max_n_res",
")",
"and",
"(",
"i",
"==",
"MAX_N_ATOM",
"or",
"serials",
"[",
"i",
"]",
">",
"MAX_N_ATOM",
")",
":",
"reached_max_n_atom",
"=",
"True",
"pdbline",
"=",
"PDBLINE_GE100K",
"anisouline",
"=",
"ANISOULINE_GE100K",
"LOGGER",
".",
"warn",
"(",
"'Indices are exceeding 99999 and hexadecimal format is being used for indices'",
")",
"elif",
"not",
"(",
"reached_max_n_atom",
"or",
"reached_max_n_res",
")",
"and",
"resnums",
"[",
"i",
"]",
">",
"MAX_N_RES",
":",
"reached_max_n_res",
"=",
"True",
"pdbline",
"=",
"PDBLINE_GE10K",
"anisouline",
"=",
"ANISOULINE_GE10K",
"LOGGER",
".",
"warn",
"(",
"'Resnums are exceeding 9999 and hexadecimal format is being used for resnums'",
")",
"elif",
"reached_max_n_atom",
"and",
"not",
"reached_max_n_res",
"and",
"resnums",
"[",
"i",
"]",
">",
"MAX_N_RES",
":",
"reached_max_n_res",
"=",
"True",
"pdbline",
"=",
"PDBLINE_GE100K_GE10K",
"anisouline",
"=",
"ANISOULINE_GE100K_GE10K",
"LOGGER",
".",
"warn",
"(",
"'Resnums are exceeding 9999 and hexadecimal format is being used for indices and resnums'",
")",
"elif",
"reached_max_n_res",
"and",
"not",
"reached_max_n_atom",
"and",
"(",
"i",
"==",
"MAX_N_ATOM",
"or",
"serials",
"[",
"i",
"]",
">",
"MAX_N_ATOM",
")",
":",
"reached_max_n_atom",
"=",
"True",
"pdbline",
"=",
"PDBLINE_GE100K_GE10K",
"anisouline",
"=",
"ANISOULINE_GE100K_GE10K",
"LOGGER",
".",
"warn",
"(",
"'Indices are exceeding 99999 and hexadecimal format is being used for indices and resnums'",
")",
"if",
"hybrid36",
":",
"serial",
"=",
"decToHybrid36",
"(",
"serials",
"[",
"i",
"]",
")",
"resnum",
"=",
"decToHybrid36",
"(",
"resnums",
"[",
"i",
"]",
",",
"resnum",
"=",
"True",
")",
"else",
":",
"serial",
"=",
"serials",
"[",
"i",
"]",
"resnum",
"=",
"resnums",
"[",
"i",
"]",
"if",
"pdbline",
"==",
"PDBLINE_LT100K",
"or",
"hybrid36",
":",
"if",
"len",
"(",
"str",
"(",
"resnum",
")",
")",
"==",
"5",
":",
"if",
"icodes",
"[",
"i",
"]",
"==",
"''",
":",
"icodes",
"[",
"i",
"]",
"=",
"str",
"(",
"resnum",
")",
"[",
"4",
"]",
"if",
"not",
"warned_5_digit",
":",
"LOGGER",
".",
"warn",
"(",
"'Storing 5-digit resnums using insertion codes'",
")",
"warned_5_digit",
"=",
"True",
"else",
":",
"LOGGER",
".",
"warn",
"(",
"'Truncating 5-digit resnum as insertion code is busy.'",
")",
"resnum",
"=",
"int",
"(",
"str",
"(",
"resnum",
")",
"[",
":",
"4",
"]",
")",
"elif",
"len",
"(",
"str",
"(",
"resnum",
")",
")",
">",
"5",
":",
"if",
"not",
"warned_5_digit",
":",
"LOGGER",
".",
"warn",
"(",
"'Truncating {0}-digit resnum as too long to be '",
"'supported by insertion code.'",
".",
"format",
"(",
"len",
"(",
"str",
"(",
"resnum",
")",
")",
")",
")",
"warned_5_digit",
"=",
"True",
"resnum",
"=",
"int",
"(",
"str",
"(",
"resnum",
")",
"[",
":",
"4",
"]",
")",
"else",
":",
"final_resnum",
"=",
"'%4x'",
"%",
"int",
"(",
"resnum",
")",
"if",
"len",
"(",
"str",
"(",
"final_resnum",
")",
")",
"==",
"5",
":",
"if",
"icodes",
"[",
"i",
"]",
"==",
"''",
":",
"icodes",
"[",
"i",
"]",
"=",
"str",
"(",
"final_resnum",
")",
"[",
"4",
"]",
"if",
"not",
"warned_5_digit",
":",
"LOGGER",
".",
"warn",
"(",
"'Storing 5-digit hex resnums using insertion codes'",
")",
"warned_5_digit",
"=",
"True",
"else",
":",
"LOGGER",
".",
"warn",
"(",
"'Truncating 5-digit hex resnum as insertion code is busy.'",
")",
"resnum",
"=",
"int",
"(",
"str",
"(",
"final_resnum",
")",
"[",
":",
"4",
"]",
",",
"16",
")",
"elif",
"len",
"(",
"str",
"(",
"final_resnum",
")",
")",
">",
"5",
":",
"if",
"not",
"warned_5_digit",
":",
"LOGGER",
".",
"warn",
"(",
"'Truncating {0}-digit hex resnum ({1}) as too long to be '",
"'supported by insertion code.'",
".",
"format",
"(",
"len",
"(",
"str",
"(",
"final_resnum",
")",
")",
",",
"final_resnum",
")",
")",
"warned_5_digit",
"=",
"True",
"resnum",
"=",
"int",
"(",
"str",
"(",
"final_resnum",
")",
"[",
":",
"4",
"]",
",",
"16",
")",
"write",
"(",
"pdbline",
"%",
"(",
"hetero",
"[",
"i",
"]",
",",
"serial",
",",
"atomnames",
"[",
"i",
"]",
",",
"altlocs",
"[",
"i",
"]",
",",
"resnames",
"[",
"i",
"]",
",",
"chainids",
"[",
"i",
"]",
",",
"resnum",
",",
"icodes",
"[",
"i",
"]",
",",
"xyz",
"[",
"0",
"]",
",",
"xyz",
"[",
"1",
"]",
",",
"xyz",
"[",
"2",
"]",
",",
"occupancies",
"[",
"i",
"]",
",",
"bfactors",
"[",
"i",
"]",
",",
"segments",
"[",
"i",
"]",
",",
"elements",
"[",
"i",
"]",
",",
"charges2",
"[",
"i",
"]",
")",
")",
"if",
"anisous",
"is",
"not",
"None",
":",
"anisou",
"=",
"anisous",
"[",
"i",
"]",
"write",
"(",
"anisouline",
"%",
"(",
"\"ANISOU\"",
",",
"serial",
",",
"atomnames",
"[",
"i",
"]",
",",
"altlocs",
"[",
"i",
"]",
",",
"resnames",
"[",
"i",
"]",
",",
"chainids",
"[",
"i",
"]",
",",
"resnum",
",",
"icodes",
"[",
"i",
"]",
",",
"anisou",
"[",
"0",
"]",
",",
"anisou",
"[",
"1",
"]",
",",
"anisou",
"[",
"2",
"]",
",",
"anisou",
"[",
"3",
"]",
",",
"anisou",
"[",
"4",
"]",
",",
"anisou",
"[",
"5",
"]",
",",
"segments",
"[",
"i",
"]",
",",
"elements",
"[",
"i",
"]",
",",
"charges2",
"[",
"i",
"]",
")",
")",
"if",
"atoms",
".",
"getFlags",
"(",
"'pdbter'",
")",
"is",
"not",
"None",
"and",
"atoms",
".",
"getFlags",
"(",
"'pdbter'",
")",
"[",
"i",
"]",
":",
"write",
"(",
"'TER\\n'",
")",
"if",
"multi",
":",
"write",
"(",
"'ENDMDL\\n'",
")",
"altlocs",
"=",
"np",
".",
"zeros",
"(",
"n_atoms",
",",
"s_or_u",
"+",
"'1'",
")"
] | https://github.com/prody/ProDy/blob/b24bbf58aa8fffe463c8548ae50e3955910e5b7f/prody/proteins/pdbfile.py#L1177-L1507 |
||
pymedusa/Medusa | 1405fbb6eb8ef4d20fcca24c32ddca52b11f0f38 | ext/requests_toolbelt/adapters/appengine.py | python | monkeypatch | (validate_certificate=True) | Sets up all Sessions to use AppEngineAdapter by default.
If you don't want to deal with configuring your own Sessions,
or if you use libraries that use requests directly (ie requests.post),
then you may prefer to monkeypatch and auto-configure all Sessions.
.. warning: :
If ``validate_certificate`` is ``False``, certification validation will
effectively be disabled for all requests. | Sets up all Sessions to use AppEngineAdapter by default. | [
"Sets",
"up",
"all",
"Sessions",
"to",
"use",
"AppEngineAdapter",
"by",
"default",
"."
] | def monkeypatch(validate_certificate=True):
"""Sets up all Sessions to use AppEngineAdapter by default.
If you don't want to deal with configuring your own Sessions,
or if you use libraries that use requests directly (ie requests.post),
then you may prefer to monkeypatch and auto-configure all Sessions.
.. warning: :
If ``validate_certificate`` is ``False``, certification validation will
effectively be disabled for all requests.
"""
_check_version()
# HACK: We should consider modifying urllib3 to support this cleanly,
# so that we can set a module-level variable in the sessions module,
# instead of overriding an imported HTTPAdapter as is done here.
adapter = AppEngineAdapter
if not validate_certificate:
adapter = InsecureAppEngineAdapter
sessions.HTTPAdapter = adapter
adapters.HTTPAdapter = adapter | [
"def",
"monkeypatch",
"(",
"validate_certificate",
"=",
"True",
")",
":",
"_check_version",
"(",
")",
"# HACK: We should consider modifying urllib3 to support this cleanly,",
"# so that we can set a module-level variable in the sessions module,",
"# instead of overriding an imported HTTPAdapter as is done here.",
"adapter",
"=",
"AppEngineAdapter",
"if",
"not",
"validate_certificate",
":",
"adapter",
"=",
"InsecureAppEngineAdapter",
"sessions",
".",
"HTTPAdapter",
"=",
"adapter",
"adapters",
".",
"HTTPAdapter",
"=",
"adapter"
] | https://github.com/pymedusa/Medusa/blob/1405fbb6eb8ef4d20fcca24c32ddca52b11f0f38/ext/requests_toolbelt/adapters/appengine.py#L175-L196 |
||
eliben/deep-learning-samples | d5ca86c5db664fabfb302cbbc231c50ec3d6a103 | linear-regression/multiple_linear_regression.py | python | compute_cost | (X, y, theta) | return cost.flat[0] | Compute the MSE cost of a prediction based on theta, over the whole X.
X: (k, n) each row is an input with n features (including an all-ones
column that should have been added beforehead).
y: (k, 1) observed output per input.
theta: (n, 1) regression parameters.
Note: expects y and theta to be proper column vectors. | Compute the MSE cost of a prediction based on theta, over the whole X. | [
"Compute",
"the",
"MSE",
"cost",
"of",
"a",
"prediction",
"based",
"on",
"theta",
"over",
"the",
"whole",
"X",
"."
] | def compute_cost(X, y, theta):
"""Compute the MSE cost of a prediction based on theta, over the whole X.
X: (k, n) each row is an input with n features (including an all-ones
column that should have been added beforehead).
y: (k, 1) observed output per input.
theta: (n, 1) regression parameters.
Note: expects y and theta to be proper column vectors.
"""
k = X.shape[0]
# Vectorized computation of yhat per sample.
yhat = np.dot(X, theta)
diff = yhat - y
# Vectorized computation using a dot product to compute sum of squares.
cost = np.dot(diff.T, diff) / k
# Cost is a 1x1 matrix, we need a scalar.
return cost.flat[0] | [
"def",
"compute_cost",
"(",
"X",
",",
"y",
",",
"theta",
")",
":",
"k",
"=",
"X",
".",
"shape",
"[",
"0",
"]",
"# Vectorized computation of yhat per sample.",
"yhat",
"=",
"np",
".",
"dot",
"(",
"X",
",",
"theta",
")",
"diff",
"=",
"yhat",
"-",
"y",
"# Vectorized computation using a dot product to compute sum of squares.",
"cost",
"=",
"np",
".",
"dot",
"(",
"diff",
".",
"T",
",",
"diff",
")",
"/",
"k",
"# Cost is a 1x1 matrix, we need a scalar.",
"return",
"cost",
".",
"flat",
"[",
"0",
"]"
] | https://github.com/eliben/deep-learning-samples/blob/d5ca86c5db664fabfb302cbbc231c50ec3d6a103/linear-regression/multiple_linear_regression.py#L42-L59 |
|
weechat/scripts | 99ec0e7eceefabb9efb0f11ec26d45d6e8e84335 | python/otr.py | python | IrcHTMLParser.reset | (self) | Forget all state, called from __init__ | Forget all state, called from __init__ | [
"Forget",
"all",
"state",
"called",
"from",
"__init__"
] | def reset(self):
"""Forget all state, called from __init__"""
PYVER.html_parser.HTMLParser.reset(self)
self.result = ''
self.linktarget = ''
self.linkstart = 0 | [
"def",
"reset",
"(",
"self",
")",
":",
"PYVER",
".",
"html_parser",
".",
"HTMLParser",
".",
"reset",
"(",
"self",
")",
"self",
".",
"result",
"=",
"''",
"self",
".",
"linktarget",
"=",
"''",
"self",
".",
"linkstart",
"=",
"0"
] | https://github.com/weechat/scripts/blob/99ec0e7eceefabb9efb0f11ec26d45d6e8e84335/python/otr.py#L1103-L1108 |
||
OlafenwaMoses/ImageAI | fe2d6bab3ddb1027c54abe7eb961364928869a30 | imageai/Detection/Custom/__init__.py | python | CustomObjectDetection.setModelPath | (self, detection_model_path) | 'setModelPath' is used to specify the filepath to your custom detection model
:param detection_model_path: path to the .h5 model file.
Usually is one of those under <data_directory>/models/detection_model-ex-ddd--loss-dddd.ddd.h5
:return: None | 'setModelPath' is used to specify the filepath to your custom detection model
:param detection_model_path: path to the .h5 model file.
Usually is one of those under <data_directory>/models/detection_model-ex-ddd--loss-dddd.ddd.h5
:return: None | [
"setModelPath",
"is",
"used",
"to",
"specify",
"the",
"filepath",
"to",
"your",
"custom",
"detection",
"model",
":",
"param",
"detection_model_path",
":",
"path",
"to",
"the",
".",
"h5",
"model",
"file",
".",
"Usually",
"is",
"one",
"of",
"those",
"under",
"<data_directory",
">",
"/",
"models",
"/",
"detection_model",
"-",
"ex",
"-",
"ddd",
"--",
"loss",
"-",
"dddd",
".",
"ddd",
".",
"h5",
":",
"return",
":",
"None"
] | def setModelPath(self, detection_model_path):
"""
'setModelPath' is used to specify the filepath to your custom detection model
:param detection_model_path: path to the .h5 model file.
Usually is one of those under <data_directory>/models/detection_model-ex-ddd--loss-dddd.ddd.h5
:return: None
"""
self.__model_path = detection_model_path | [
"def",
"setModelPath",
"(",
"self",
",",
"detection_model_path",
")",
":",
"self",
".",
"__model_path",
"=",
"detection_model_path"
] | https://github.com/OlafenwaMoses/ImageAI/blob/fe2d6bab3ddb1027c54abe7eb961364928869a30/imageai/Detection/Custom/__init__.py#L632-L639 |
||
edfungus/Crouton | ada98b3930192938a48909072b45cb84b945f875 | clients/python_clients/venv/lib/python2.7/site-packages/pkg_resources/__init__.py | python | Distribution.activate | (self, path=None) | Ensure distribution is importable on `path` (default=sys.path) | Ensure distribution is importable on `path` (default=sys.path) | [
"Ensure",
"distribution",
"is",
"importable",
"on",
"path",
"(",
"default",
"=",
"sys",
".",
"path",
")"
] | def activate(self, path=None):
"""Ensure distribution is importable on `path` (default=sys.path)"""
if path is None:
path = sys.path
self.insert_on(path)
if path is sys.path:
fixup_namespace_packages(self.location)
for pkg in self._get_metadata('namespace_packages.txt'):
if pkg in sys.modules:
declare_namespace(pkg) | [
"def",
"activate",
"(",
"self",
",",
"path",
"=",
"None",
")",
":",
"if",
"path",
"is",
"None",
":",
"path",
"=",
"sys",
".",
"path",
"self",
".",
"insert_on",
"(",
"path",
")",
"if",
"path",
"is",
"sys",
".",
"path",
":",
"fixup_namespace_packages",
"(",
"self",
".",
"location",
")",
"for",
"pkg",
"in",
"self",
".",
"_get_metadata",
"(",
"'namespace_packages.txt'",
")",
":",
"if",
"pkg",
"in",
"sys",
".",
"modules",
":",
"declare_namespace",
"(",
"pkg",
")"
] | https://github.com/edfungus/Crouton/blob/ada98b3930192938a48909072b45cb84b945f875/clients/python_clients/venv/lib/python2.7/site-packages/pkg_resources/__init__.py#L2619-L2628 |
||
colour-science/colour | 38782ac059e8ddd91939f3432bf06811c16667f0 | colour/utilities/data_structures.py | python | Node.data | (self, value) | Setter for the **self.data** property. | Setter for the **self.data** property. | [
"Setter",
"for",
"the",
"**",
"self",
".",
"data",
"**",
"property",
"."
] | def data(self, value):
"""
Setter for the **self.data** property.
"""
self._data = value | [
"def",
"data",
"(",
"self",
",",
"value",
")",
":",
"self",
".",
"_data",
"=",
"value"
] | https://github.com/colour-science/colour/blob/38782ac059e8ddd91939f3432bf06811c16667f0/colour/utilities/data_structures.py#L774-L779 |
||
kylebebak/Requester | 4a9f9f051fa5fc951a8f7ad098a328261ca2db97 | commands/request.py | python | RequesterUrlOptionsCommand.show_options | (self, url, view) | Send options request to `url` and display results in pop-up. | Send options request to `url` and display results in pop-up. | [
"Send",
"options",
"request",
"to",
"url",
"and",
"display",
"results",
"in",
"pop",
"-",
"up",
"."
] | def show_options(self, url, view):
"""Send options request to `url` and display results in pop-up.
"""
res = options(url, timeout=3)
if not res.ok:
content = '<h2>OPTIONS: {}</h2>\n<p>request failed</p>'.format(url)
else:
names = ['Allow', 'Access-Control-Allow-Methods', 'Access-Control-Max-Age']
headers = [res.headers.get(name, None) for name in names]
items = '\n'.join('<li>{}: {}</li>'.format(n, h) for n, h in zip(names, headers) if h)
content = '<h2>OPTIONS: {}</h2>\n<ul>{}</ul>'.format(url, items)
try:
json_dict = res.json()
except:
pass
else:
content = '{}\n<pre><code>{}</pre></code>'.format(
content, json.dumps(json_dict, sort_keys=True, indent=2, separators=(',', ': '))
)
view.show_popup(content, max_width=700, max_height=500) | [
"def",
"show_options",
"(",
"self",
",",
"url",
",",
"view",
")",
":",
"res",
"=",
"options",
"(",
"url",
",",
"timeout",
"=",
"3",
")",
"if",
"not",
"res",
".",
"ok",
":",
"content",
"=",
"'<h2>OPTIONS: {}</h2>\\n<p>request failed</p>'",
".",
"format",
"(",
"url",
")",
"else",
":",
"names",
"=",
"[",
"'Allow'",
",",
"'Access-Control-Allow-Methods'",
",",
"'Access-Control-Max-Age'",
"]",
"headers",
"=",
"[",
"res",
".",
"headers",
".",
"get",
"(",
"name",
",",
"None",
")",
"for",
"name",
"in",
"names",
"]",
"items",
"=",
"'\\n'",
".",
"join",
"(",
"'<li>{}: {}</li>'",
".",
"format",
"(",
"n",
",",
"h",
")",
"for",
"n",
",",
"h",
"in",
"zip",
"(",
"names",
",",
"headers",
")",
"if",
"h",
")",
"content",
"=",
"'<h2>OPTIONS: {}</h2>\\n<ul>{}</ul>'",
".",
"format",
"(",
"url",
",",
"items",
")",
"try",
":",
"json_dict",
"=",
"res",
".",
"json",
"(",
")",
"except",
":",
"pass",
"else",
":",
"content",
"=",
"'{}\\n<pre><code>{}</pre></code>'",
".",
"format",
"(",
"content",
",",
"json",
".",
"dumps",
"(",
"json_dict",
",",
"sort_keys",
"=",
"True",
",",
"indent",
"=",
"2",
",",
"separators",
"=",
"(",
"','",
",",
"': '",
")",
")",
")",
"view",
".",
"show_popup",
"(",
"content",
",",
"max_width",
"=",
"700",
",",
"max_height",
"=",
"500",
")"
] | https://github.com/kylebebak/Requester/blob/4a9f9f051fa5fc951a8f7ad098a328261ca2db97/commands/request.py#L461-L481 |
||
alanhamlett/pip-update-requirements | ce875601ef278c8ce00ad586434a978731525561 | pur/packages/pip/_vendor/urllib3/request.py | python | RequestMethods.request_encode_body | (self, method, url, fields=None, headers=None,
encode_multipart=True, multipart_boundary=None,
**urlopen_kw) | return self.urlopen(method, url, **extra_kw) | Make a request using :meth:`urlopen` with the ``fields`` encoded in
the body. This is useful for request methods like POST, PUT, PATCH, etc.
When ``encode_multipart=True`` (default), then
:meth:`urllib3.filepost.encode_multipart_formdata` is used to encode
the payload with the appropriate content type. Otherwise
:meth:`urllib.urlencode` is used with the
'application/x-www-form-urlencoded' content type.
Multipart encoding must be used when posting files, and it's reasonably
safe to use it in other times too. However, it may break request
signing, such as with OAuth.
Supports an optional ``fields`` parameter of key/value strings AND
key/filetuple. A filetuple is a (filename, data, MIME type) tuple where
the MIME type is optional. For example::
fields = {
'foo': 'bar',
'fakefile': ('foofile.txt', 'contents of foofile'),
'realfile': ('barfile.txt', open('realfile').read()),
'typedfile': ('bazfile.bin', open('bazfile').read(),
'image/jpeg'),
'nonamefile': 'contents of nonamefile field',
}
When uploading a file, providing a filename (the first parameter of the
tuple) is optional but recommended to best mimic behavior of browsers.
Note that if ``headers`` are supplied, the 'Content-Type' header will
be overwritten because it depends on the dynamic random boundary string
which is used to compose the body of the request. The random boundary
string can be explicitly set with the ``multipart_boundary`` parameter. | Make a request using :meth:`urlopen` with the ``fields`` encoded in
the body. This is useful for request methods like POST, PUT, PATCH, etc. | [
"Make",
"a",
"request",
"using",
":",
"meth",
":",
"urlopen",
"with",
"the",
"fields",
"encoded",
"in",
"the",
"body",
".",
"This",
"is",
"useful",
"for",
"request",
"methods",
"like",
"POST",
"PUT",
"PATCH",
"etc",
"."
] | def request_encode_body(self, method, url, fields=None, headers=None,
encode_multipart=True, multipart_boundary=None,
**urlopen_kw):
"""
Make a request using :meth:`urlopen` with the ``fields`` encoded in
the body. This is useful for request methods like POST, PUT, PATCH, etc.
When ``encode_multipart=True`` (default), then
:meth:`urllib3.filepost.encode_multipart_formdata` is used to encode
the payload with the appropriate content type. Otherwise
:meth:`urllib.urlencode` is used with the
'application/x-www-form-urlencoded' content type.
Multipart encoding must be used when posting files, and it's reasonably
safe to use it in other times too. However, it may break request
signing, such as with OAuth.
Supports an optional ``fields`` parameter of key/value strings AND
key/filetuple. A filetuple is a (filename, data, MIME type) tuple where
the MIME type is optional. For example::
fields = {
'foo': 'bar',
'fakefile': ('foofile.txt', 'contents of foofile'),
'realfile': ('barfile.txt', open('realfile').read()),
'typedfile': ('bazfile.bin', open('bazfile').read(),
'image/jpeg'),
'nonamefile': 'contents of nonamefile field',
}
When uploading a file, providing a filename (the first parameter of the
tuple) is optional but recommended to best mimic behavior of browsers.
Note that if ``headers`` are supplied, the 'Content-Type' header will
be overwritten because it depends on the dynamic random boundary string
which is used to compose the body of the request. The random boundary
string can be explicitly set with the ``multipart_boundary`` parameter.
"""
if headers is None:
headers = self.headers
extra_kw = {'headers': {}}
if fields:
if 'body' in urlopen_kw:
raise TypeError(
"request got values for both 'fields' and 'body', can only specify one.")
if encode_multipart:
body, content_type = encode_multipart_formdata(fields, boundary=multipart_boundary)
else:
body, content_type = urlencode(fields), 'application/x-www-form-urlencoded'
extra_kw['body'] = body
extra_kw['headers'] = {'Content-Type': content_type}
extra_kw['headers'].update(headers)
extra_kw.update(urlopen_kw)
return self.urlopen(method, url, **extra_kw) | [
"def",
"request_encode_body",
"(",
"self",
",",
"method",
",",
"url",
",",
"fields",
"=",
"None",
",",
"headers",
"=",
"None",
",",
"encode_multipart",
"=",
"True",
",",
"multipart_boundary",
"=",
"None",
",",
"*",
"*",
"urlopen_kw",
")",
":",
"if",
"headers",
"is",
"None",
":",
"headers",
"=",
"self",
".",
"headers",
"extra_kw",
"=",
"{",
"'headers'",
":",
"{",
"}",
"}",
"if",
"fields",
":",
"if",
"'body'",
"in",
"urlopen_kw",
":",
"raise",
"TypeError",
"(",
"\"request got values for both 'fields' and 'body', can only specify one.\"",
")",
"if",
"encode_multipart",
":",
"body",
",",
"content_type",
"=",
"encode_multipart_formdata",
"(",
"fields",
",",
"boundary",
"=",
"multipart_boundary",
")",
"else",
":",
"body",
",",
"content_type",
"=",
"urlencode",
"(",
"fields",
")",
",",
"'application/x-www-form-urlencoded'",
"extra_kw",
"[",
"'body'",
"]",
"=",
"body",
"extra_kw",
"[",
"'headers'",
"]",
"=",
"{",
"'Content-Type'",
":",
"content_type",
"}",
"extra_kw",
"[",
"'headers'",
"]",
".",
"update",
"(",
"headers",
")",
"extra_kw",
".",
"update",
"(",
"urlopen_kw",
")",
"return",
"self",
".",
"urlopen",
"(",
"method",
",",
"url",
",",
"*",
"*",
"extra_kw",
")"
] | https://github.com/alanhamlett/pip-update-requirements/blob/ce875601ef278c8ce00ad586434a978731525561/pur/packages/pip/_vendor/urllib3/request.py#L91-L150 |
|
biolab/orange3 | 41685e1c7b1d1babe680113685a2d44bcc9fec0b | Orange/widgets/data/utils/pythoneditor/editor.py | python | PythonEditor.isComment | (self, line, column) | return recursive_is_type(token_type, Token.Comment) | Check if character at column is a comment | Check if character at column is a comment | [
"Check",
"if",
"character",
"at",
"column",
"is",
"a",
"comment"
] | def isComment(self, line, column):
"""Check if character at column is a comment
"""
block = self.document().findBlockByNumber(line)
# here, pygments' highlighter is implemented, so the dataobject
# that is originally defined in Qutepart isn't the same
# so I'm using pygments' parser, storing it in the data object
dataObject = block.userData()
if dataObject is None:
return False
if len(dataObject.syntax_stack) > 1:
return True
token_type = self._get_token_at(block, column)
def recursive_is_type(token, parent_token):
if token.parent is None:
return False
if token.parent is parent_token:
return True
return recursive_is_type(token.parent, parent_token)
return recursive_is_type(token_type, Token.Comment) | [
"def",
"isComment",
"(",
"self",
",",
"line",
",",
"column",
")",
":",
"block",
"=",
"self",
".",
"document",
"(",
")",
".",
"findBlockByNumber",
"(",
"line",
")",
"# here, pygments' highlighter is implemented, so the dataobject",
"# that is originally defined in Qutepart isn't the same",
"# so I'm using pygments' parser, storing it in the data object",
"dataObject",
"=",
"block",
".",
"userData",
"(",
")",
"if",
"dataObject",
"is",
"None",
":",
"return",
"False",
"if",
"len",
"(",
"dataObject",
".",
"syntax_stack",
")",
">",
"1",
":",
"return",
"True",
"token_type",
"=",
"self",
".",
"_get_token_at",
"(",
"block",
",",
"column",
")",
"def",
"recursive_is_type",
"(",
"token",
",",
"parent_token",
")",
":",
"if",
"token",
".",
"parent",
"is",
"None",
":",
"return",
"False",
"if",
"token",
".",
"parent",
"is",
"parent_token",
":",
"return",
"True",
"return",
"recursive_is_type",
"(",
"token",
".",
"parent",
",",
"parent_token",
")",
"return",
"recursive_is_type",
"(",
"token_type",
",",
"Token",
".",
"Comment",
")"
] | https://github.com/biolab/orange3/blob/41685e1c7b1d1babe680113685a2d44bcc9fec0b/Orange/widgets/data/utils/pythoneditor/editor.py#L1085-L1110 |
|
sympy/sympy | d822fcba181155b85ff2b29fe525adbafb22b448 | sympy/utilities/codegen.py | python | CCodeGen._get_header | (self) | return code_lines | Writes a common header for the generated files. | Writes a common header for the generated files. | [
"Writes",
"a",
"common",
"header",
"for",
"the",
"generated",
"files",
"."
] | def _get_header(self):
"""Writes a common header for the generated files."""
code_lines = []
code_lines.append("/" + "*"*78 + '\n')
tmp = header_comment % {"version": sympy_version,
"project": self.project}
for line in tmp.splitlines():
code_lines.append(" *%s*\n" % line.center(76))
code_lines.append(" " + "*"*78 + "/\n")
return code_lines | [
"def",
"_get_header",
"(",
"self",
")",
":",
"code_lines",
"=",
"[",
"]",
"code_lines",
".",
"append",
"(",
"\"/\"",
"+",
"\"*\"",
"*",
"78",
"+",
"'\\n'",
")",
"tmp",
"=",
"header_comment",
"%",
"{",
"\"version\"",
":",
"sympy_version",
",",
"\"project\"",
":",
"self",
".",
"project",
"}",
"for",
"line",
"in",
"tmp",
".",
"splitlines",
"(",
")",
":",
"code_lines",
".",
"append",
"(",
"\" *%s*\\n\"",
"%",
"line",
".",
"center",
"(",
"76",
")",
")",
"code_lines",
".",
"append",
"(",
"\" \"",
"+",
"\"*\"",
"*",
"78",
"+",
"\"/\\n\"",
")",
"return",
"code_lines"
] | https://github.com/sympy/sympy/blob/d822fcba181155b85ff2b29fe525adbafb22b448/sympy/utilities/codegen.py#L886-L895 |
|
containernet/containernet | 7b2ae38d691b2ed8da2b2700b85ed03562271d01 | mininet/net.py | python | Mininet.__init__ | ( self, topo=None, switch=OVSKernelSwitch, host=Host,
controller=DefaultController, link=Link, intf=Intf,
build=True, xterms=False, cleanup=False, ipBase='10.0.0.0/8',
inNamespace=False,
autoSetMacs=False, autoStaticArp=False, autoPinCpus=False,
listenPort=None, waitConnected=False ) | Create Mininet object.
topo: Topo (topology) object or None
switch: default Switch class
host: default Host class/constructor
controller: default Controller class/constructor
link: default Link class/constructor
intf: default Intf class/constructor
ipBase: base IP address for hosts,
build: build now from topo?
xterms: if build now, spawn xterms?
cleanup: if build now, cleanup before creating?
inNamespace: spawn switches and controller in net namespaces?
autoSetMacs: set MAC addrs automatically like IP addresses?
autoStaticArp: set all-pairs static MAC addrs?
autoPinCpus: pin hosts to (real) cores (requires CPULimitedHost)?
listenPort: base listening port to open; will be incremented for
each additional switch in the net if inNamespace=False
waitConnected: wait for switches to Connect?
(False; True/None=wait indefinitely; time(s)=timed wait) | Create Mininet object.
topo: Topo (topology) object or None
switch: default Switch class
host: default Host class/constructor
controller: default Controller class/constructor
link: default Link class/constructor
intf: default Intf class/constructor
ipBase: base IP address for hosts,
build: build now from topo?
xterms: if build now, spawn xterms?
cleanup: if build now, cleanup before creating?
inNamespace: spawn switches and controller in net namespaces?
autoSetMacs: set MAC addrs automatically like IP addresses?
autoStaticArp: set all-pairs static MAC addrs?
autoPinCpus: pin hosts to (real) cores (requires CPULimitedHost)?
listenPort: base listening port to open; will be incremented for
each additional switch in the net if inNamespace=False
waitConnected: wait for switches to Connect?
(False; True/None=wait indefinitely; time(s)=timed wait) | [
"Create",
"Mininet",
"object",
".",
"topo",
":",
"Topo",
"(",
"topology",
")",
"object",
"or",
"None",
"switch",
":",
"default",
"Switch",
"class",
"host",
":",
"default",
"Host",
"class",
"/",
"constructor",
"controller",
":",
"default",
"Controller",
"class",
"/",
"constructor",
"link",
":",
"default",
"Link",
"class",
"/",
"constructor",
"intf",
":",
"default",
"Intf",
"class",
"/",
"constructor",
"ipBase",
":",
"base",
"IP",
"address",
"for",
"hosts",
"build",
":",
"build",
"now",
"from",
"topo?",
"xterms",
":",
"if",
"build",
"now",
"spawn",
"xterms?",
"cleanup",
":",
"if",
"build",
"now",
"cleanup",
"before",
"creating?",
"inNamespace",
":",
"spawn",
"switches",
"and",
"controller",
"in",
"net",
"namespaces?",
"autoSetMacs",
":",
"set",
"MAC",
"addrs",
"automatically",
"like",
"IP",
"addresses?",
"autoStaticArp",
":",
"set",
"all",
"-",
"pairs",
"static",
"MAC",
"addrs?",
"autoPinCpus",
":",
"pin",
"hosts",
"to",
"(",
"real",
")",
"cores",
"(",
"requires",
"CPULimitedHost",
")",
"?",
"listenPort",
":",
"base",
"listening",
"port",
"to",
"open",
";",
"will",
"be",
"incremented",
"for",
"each",
"additional",
"switch",
"in",
"the",
"net",
"if",
"inNamespace",
"=",
"False",
"waitConnected",
":",
"wait",
"for",
"switches",
"to",
"Connect?",
"(",
"False",
";",
"True",
"/",
"None",
"=",
"wait",
"indefinitely",
";",
"time",
"(",
"s",
")",
"=",
"timed",
"wait",
")"
] | def __init__( self, topo=None, switch=OVSKernelSwitch, host=Host,
controller=DefaultController, link=Link, intf=Intf,
build=True, xterms=False, cleanup=False, ipBase='10.0.0.0/8',
inNamespace=False,
autoSetMacs=False, autoStaticArp=False, autoPinCpus=False,
listenPort=None, waitConnected=False ):
"""Create Mininet object.
topo: Topo (topology) object or None
switch: default Switch class
host: default Host class/constructor
controller: default Controller class/constructor
link: default Link class/constructor
intf: default Intf class/constructor
ipBase: base IP address for hosts,
build: build now from topo?
xterms: if build now, spawn xterms?
cleanup: if build now, cleanup before creating?
inNamespace: spawn switches and controller in net namespaces?
autoSetMacs: set MAC addrs automatically like IP addresses?
autoStaticArp: set all-pairs static MAC addrs?
autoPinCpus: pin hosts to (real) cores (requires CPULimitedHost)?
listenPort: base listening port to open; will be incremented for
each additional switch in the net if inNamespace=False
waitConnected: wait for switches to Connect?
(False; True/None=wait indefinitely; time(s)=timed wait)"""
self.topo = topo
self.switch = switch
self.host = host
self.controller = controller
self.link = link
self.intf = intf
self.ipBase = ipBase
self.ipBaseNum, self.prefixLen = netParse( self.ipBase )
hostIP = ( 0xffffffff >> self.prefixLen ) & self.ipBaseNum
# Start for address allocation
self.nextIP = hostIP if hostIP > 0 else 1
self.inNamespace = inNamespace
self.xterms = xterms
self.cleanup = cleanup
self.autoSetMacs = autoSetMacs
self.autoStaticArp = autoStaticArp
self.autoPinCpus = autoPinCpus
self.numCores = numCores()
self.nextCore = 0 # next core for pinning hosts to CPUs
self.listenPort = listenPort
self.waitConn = waitConnected
self.hosts = []
self.switches = []
self.controllers = []
self.links = []
self.nameToNode = {} # name to Node (Host/Switch) objects
self.terms = [] # list of spawned xterm processes
Mininet.init() # Initialize Mininet if necessary
self.built = False
if topo and build:
self.build() | [
"def",
"__init__",
"(",
"self",
",",
"topo",
"=",
"None",
",",
"switch",
"=",
"OVSKernelSwitch",
",",
"host",
"=",
"Host",
",",
"controller",
"=",
"DefaultController",
",",
"link",
"=",
"Link",
",",
"intf",
"=",
"Intf",
",",
"build",
"=",
"True",
",",
"xterms",
"=",
"False",
",",
"cleanup",
"=",
"False",
",",
"ipBase",
"=",
"'10.0.0.0/8'",
",",
"inNamespace",
"=",
"False",
",",
"autoSetMacs",
"=",
"False",
",",
"autoStaticArp",
"=",
"False",
",",
"autoPinCpus",
"=",
"False",
",",
"listenPort",
"=",
"None",
",",
"waitConnected",
"=",
"False",
")",
":",
"self",
".",
"topo",
"=",
"topo",
"self",
".",
"switch",
"=",
"switch",
"self",
".",
"host",
"=",
"host",
"self",
".",
"controller",
"=",
"controller",
"self",
".",
"link",
"=",
"link",
"self",
".",
"intf",
"=",
"intf",
"self",
".",
"ipBase",
"=",
"ipBase",
"self",
".",
"ipBaseNum",
",",
"self",
".",
"prefixLen",
"=",
"netParse",
"(",
"self",
".",
"ipBase",
")",
"hostIP",
"=",
"(",
"0xffffffff",
">>",
"self",
".",
"prefixLen",
")",
"&",
"self",
".",
"ipBaseNum",
"# Start for address allocation",
"self",
".",
"nextIP",
"=",
"hostIP",
"if",
"hostIP",
">",
"0",
"else",
"1",
"self",
".",
"inNamespace",
"=",
"inNamespace",
"self",
".",
"xterms",
"=",
"xterms",
"self",
".",
"cleanup",
"=",
"cleanup",
"self",
".",
"autoSetMacs",
"=",
"autoSetMacs",
"self",
".",
"autoStaticArp",
"=",
"autoStaticArp",
"self",
".",
"autoPinCpus",
"=",
"autoPinCpus",
"self",
".",
"numCores",
"=",
"numCores",
"(",
")",
"self",
".",
"nextCore",
"=",
"0",
"# next core for pinning hosts to CPUs",
"self",
".",
"listenPort",
"=",
"listenPort",
"self",
".",
"waitConn",
"=",
"waitConnected",
"self",
".",
"hosts",
"=",
"[",
"]",
"self",
".",
"switches",
"=",
"[",
"]",
"self",
".",
"controllers",
"=",
"[",
"]",
"self",
".",
"links",
"=",
"[",
"]",
"self",
".",
"nameToNode",
"=",
"{",
"}",
"# name to Node (Host/Switch) objects",
"self",
".",
"terms",
"=",
"[",
"]",
"# list of spawned xterm processes",
"Mininet",
".",
"init",
"(",
")",
"# Initialize Mininet if necessary",
"self",
".",
"built",
"=",
"False",
"if",
"topo",
"and",
"build",
":",
"self",
".",
"build",
"(",
")"
] | https://github.com/containernet/containernet/blob/7b2ae38d691b2ed8da2b2700b85ed03562271d01/mininet/net.py#L127-L187 |
||
cloudera/hue | 23f02102d4547c17c32bd5ea0eb24e9eadd657a4 | desktop/core/ext-py/oauth2client-4.1.3/oauth2client/client.py | python | AssertionCredentials.__init__ | (self, assertion_type, user_agent=None,
token_uri=oauth2client.GOOGLE_TOKEN_URI,
revoke_uri=oauth2client.GOOGLE_REVOKE_URI,
**unused_kwargs) | Constructor for AssertionFlowCredentials.
Args:
assertion_type: string, assertion type that will be declared to the
auth server
user_agent: string, The HTTP User-Agent to provide for this
application.
token_uri: string, URI for token endpoint. For convenience defaults
to Google's endpoints but any OAuth 2.0 provider can be
used.
revoke_uri: string, URI for revoke endpoint. | Constructor for AssertionFlowCredentials. | [
"Constructor",
"for",
"AssertionFlowCredentials",
"."
] | def __init__(self, assertion_type, user_agent=None,
token_uri=oauth2client.GOOGLE_TOKEN_URI,
revoke_uri=oauth2client.GOOGLE_REVOKE_URI,
**unused_kwargs):
"""Constructor for AssertionFlowCredentials.
Args:
assertion_type: string, assertion type that will be declared to the
auth server
user_agent: string, The HTTP User-Agent to provide for this
application.
token_uri: string, URI for token endpoint. For convenience defaults
to Google's endpoints but any OAuth 2.0 provider can be
used.
revoke_uri: string, URI for revoke endpoint.
"""
super(AssertionCredentials, self).__init__(
None,
None,
None,
None,
None,
token_uri,
user_agent,
revoke_uri=revoke_uri)
self.assertion_type = assertion_type | [
"def",
"__init__",
"(",
"self",
",",
"assertion_type",
",",
"user_agent",
"=",
"None",
",",
"token_uri",
"=",
"oauth2client",
".",
"GOOGLE_TOKEN_URI",
",",
"revoke_uri",
"=",
"oauth2client",
".",
"GOOGLE_REVOKE_URI",
",",
"*",
"*",
"unused_kwargs",
")",
":",
"super",
"(",
"AssertionCredentials",
",",
"self",
")",
".",
"__init__",
"(",
"None",
",",
"None",
",",
"None",
",",
"None",
",",
"None",
",",
"token_uri",
",",
"user_agent",
",",
"revoke_uri",
"=",
"revoke_uri",
")",
"self",
".",
"assertion_type",
"=",
"assertion_type"
] | https://github.com/cloudera/hue/blob/23f02102d4547c17c32bd5ea0eb24e9eadd657a4/desktop/core/ext-py/oauth2client-4.1.3/oauth2client/client.py#L1456-L1481 |
||
allegroai/clearml | 5953dc6eefadcdfcc2bdbb6a0da32be58823a5af | clearml/utilities/plotlympl/mplexporter/renderers/base.py | python | Renderer.draw_path | (self, data, coordinates, pathcodes, style,
offset=None, offset_coordinates="data", mplobj=None) | Draw a path.
In matplotlib, paths are created by filled regions, histograms,
contour plots, patches, etc.
Parameters
----------
data : array_like
A shape (N, 2) array of datapoints.
coordinates : string
A string code, which should be either 'data' for data coordinates,
'figure' for figure (pixel) coordinates, or "points" for raw
point coordinates (useful in conjunction with offsets, below).
pathcodes : list
A list of single-character SVG pathcodes associated with the data.
Path codes are one of ['M', 'm', 'L', 'l', 'Q', 'q', 'T', 't',
'S', 's', 'C', 'c', 'Z', 'z']
See the SVG specification for details. Note that some path codes
consume more than one datapoint (while 'Z' consumes none), so
in general, the length of the pathcodes list will not be the same
as that of the data array.
style : dictionary
a dictionary specifying the appearance of the line.
offset : list (optional)
the (x, y) offset of the path. If not given, no offset will
be used.
offset_coordinates : string (optional)
A string code, which should be either 'data' for data coordinates,
or 'figure' for figure (pixel) coordinates.
mplobj : matplotlib object
the matplotlib plot element which generated this path | Draw a path. | [
"Draw",
"a",
"path",
"."
] | def draw_path(self, data, coordinates, pathcodes, style,
offset=None, offset_coordinates="data", mplobj=None):
"""
Draw a path.
In matplotlib, paths are created by filled regions, histograms,
contour plots, patches, etc.
Parameters
----------
data : array_like
A shape (N, 2) array of datapoints.
coordinates : string
A string code, which should be either 'data' for data coordinates,
'figure' for figure (pixel) coordinates, or "points" for raw
point coordinates (useful in conjunction with offsets, below).
pathcodes : list
A list of single-character SVG pathcodes associated with the data.
Path codes are one of ['M', 'm', 'L', 'l', 'Q', 'q', 'T', 't',
'S', 's', 'C', 'c', 'Z', 'z']
See the SVG specification for details. Note that some path codes
consume more than one datapoint (while 'Z' consumes none), so
in general, the length of the pathcodes list will not be the same
as that of the data array.
style : dictionary
a dictionary specifying the appearance of the line.
offset : list (optional)
the (x, y) offset of the path. If not given, no offset will
be used.
offset_coordinates : string (optional)
A string code, which should be either 'data' for data coordinates,
or 'figure' for figure (pixel) coordinates.
mplobj : matplotlib object
the matplotlib plot element which generated this path
"""
raise NotImplementedError() | [
"def",
"draw_path",
"(",
"self",
",",
"data",
",",
"coordinates",
",",
"pathcodes",
",",
"style",
",",
"offset",
"=",
"None",
",",
"offset_coordinates",
"=",
"\"data\"",
",",
"mplobj",
"=",
"None",
")",
":",
"raise",
"NotImplementedError",
"(",
")"
] | https://github.com/allegroai/clearml/blob/5953dc6eefadcdfcc2bdbb6a0da32be58823a5af/clearml/utilities/plotlympl/mplexporter/renderers/base.py#L341-L376 |
||
nlpyang/PreSumm | 70b810e0f06d179022958dd35c1a3385fe87f28c | src/train_abstractive.py | python | train_abs_multi | (args) | Spawns 1 process per GPU | Spawns 1 process per GPU | [
"Spawns",
"1",
"process",
"per",
"GPU"
] | def train_abs_multi(args):
""" Spawns 1 process per GPU """
init_logger()
nb_gpu = args.world_size
mp = torch.multiprocessing.get_context('spawn')
# Create a thread to listen for errors in the child processes.
error_queue = mp.SimpleQueue()
error_handler = ErrorHandler(error_queue)
# Train with multiprocessing.
procs = []
for i in range(nb_gpu):
device_id = i
procs.append(mp.Process(target=run, args=(args,
device_id, error_queue,), daemon=True))
procs[i].start()
logger.info(" Starting process pid: %d " % procs[i].pid)
error_handler.add_child(procs[i].pid)
for p in procs:
p.join() | [
"def",
"train_abs_multi",
"(",
"args",
")",
":",
"init_logger",
"(",
")",
"nb_gpu",
"=",
"args",
".",
"world_size",
"mp",
"=",
"torch",
".",
"multiprocessing",
".",
"get_context",
"(",
"'spawn'",
")",
"# Create a thread to listen for errors in the child processes.",
"error_queue",
"=",
"mp",
".",
"SimpleQueue",
"(",
")",
"error_handler",
"=",
"ErrorHandler",
"(",
"error_queue",
")",
"# Train with multiprocessing.",
"procs",
"=",
"[",
"]",
"for",
"i",
"in",
"range",
"(",
"nb_gpu",
")",
":",
"device_id",
"=",
"i",
"procs",
".",
"append",
"(",
"mp",
".",
"Process",
"(",
"target",
"=",
"run",
",",
"args",
"=",
"(",
"args",
",",
"device_id",
",",
"error_queue",
",",
")",
",",
"daemon",
"=",
"True",
")",
")",
"procs",
"[",
"i",
"]",
".",
"start",
"(",
")",
"logger",
".",
"info",
"(",
"\" Starting process pid: %d \"",
"%",
"procs",
"[",
"i",
"]",
".",
"pid",
")",
"error_handler",
".",
"add_child",
"(",
"procs",
"[",
"i",
"]",
".",
"pid",
")",
"for",
"p",
"in",
"procs",
":",
"p",
".",
"join",
"(",
")"
] | https://github.com/nlpyang/PreSumm/blob/70b810e0f06d179022958dd35c1a3385fe87f28c/src/train_abstractive.py#L39-L60 |
||
DataBiosphere/toil | 2e148eee2114ece8dcc3ec8a83f36333266ece0d | src/toil/jobStores/googleJobStore.py | python | GoogleJobStore.update_job | (self, job) | [] | def update_job(self, job):
job.pre_update_hook()
self._write_bytes(job.jobStoreID, pickle.dumps(job, protocol=pickle.HIGHEST_PROTOCOL), update=True) | [
"def",
"update_job",
"(",
"self",
",",
"job",
")",
":",
"job",
".",
"pre_update_hook",
"(",
")",
"self",
".",
"_write_bytes",
"(",
"job",
".",
"jobStoreID",
",",
"pickle",
".",
"dumps",
"(",
"job",
",",
"protocol",
"=",
"pickle",
".",
"HIGHEST_PROTOCOL",
")",
",",
"update",
"=",
"True",
")"
] | https://github.com/DataBiosphere/toil/blob/2e148eee2114ece8dcc3ec8a83f36333266ece0d/src/toil/jobStores/googleJobStore.py#L222-L224 |
||||
Nicotine-Plus/nicotine-plus | 6583532193e132206bb2096c77c6ad1ce96c21fa | pynicotine/search.py | python | Search.process_search_request | (self, searchterm, user, searchid, direct=False) | Note: since this section is accessed every time a search request arrives,
several times a second, please keep it as optimized and memory
sparse as possible! | Note: since this section is accessed every time a search request arrives,
several times a second, please keep it as optimized and memory
sparse as possible! | [
"Note",
":",
"since",
"this",
"section",
"is",
"accessed",
"every",
"time",
"a",
"search",
"request",
"arrives",
"several",
"times",
"a",
"second",
"please",
"keep",
"it",
"as",
"optimized",
"and",
"memory",
"sparse",
"as",
"possible!"
] | def process_search_request(self, searchterm, user, searchid, direct=False):
""" Note: since this section is accessed every time a search request arrives,
several times a second, please keep it as optimized and memory
sparse as possible! """
if not searchterm:
return
if not self.config.sections["searches"]["search_results"]:
# Don't return _any_ results when this option is disabled
return
if not direct and user == self.core.login_username:
# We shouldn't send a search response if we initiated the search request,
# unless we're specifically searching our own username
return
maxresults = self.config.sections["searches"]["maxresults"]
if maxresults == 0:
return
# Remember excluded/partial words for later
excluded_words = []
partial_words = []
if '-' in searchterm or '*' in searchterm:
for word in searchterm.split():
if len(word) < 1:
continue
if word.startswith('-'):
for subword in word.translate(self.translatepunctuation).split():
excluded_words.append(subword)
elif word.startswith('*'):
for subword in word.translate(self.translatepunctuation).split():
partial_words.append(subword)
# Strip punctuation
searchterm_old = searchterm
searchterm = searchterm.lower().translate(self.translatepunctuation).strip()
if len(searchterm) < self.config.sections["searches"]["min_search_chars"]:
# Don't send search response if search term contains too few characters
return
checkuser, _reason = self.core.network_filter.check_user(user, None)
if not checkuser:
return
if checkuser == 2:
wordindex = self.share_dbs.get("buddywordindex")
else:
wordindex = self.share_dbs.get("wordindex")
if wordindex is None:
return
# Find common file matches for each word in search term
resultlist = self.create_search_result_list(searchterm, wordindex, excluded_words, partial_words)
if not resultlist:
return
if checkuser == 2:
fileindex = self.share_dbs.get("buddyfileindex")
else:
fileindex = self.share_dbs.get("fileindex")
if fileindex is None:
return
fileinfos = []
numresults = min(len(resultlist), maxresults)
for index in islice(resultlist, numresults):
fileinfo = fileindex.get(repr(index))
if fileinfo is not None:
fileinfos.append(fileinfo)
if numresults != len(fileinfos):
log.add_debug(("File index inconsistency while responding to search request "
"\"%(query)s\". %(expected_num)s results expected, but only %(total_num)s "
"results were found in database."), {
"query": searchterm_old,
"expected_num": numresults,
"total_num": len(fileinfos)
})
numresults = len(fileinfos)
if not numresults:
return
uploadspeed = self.core.transfers.upload_speed
queuesize = self.core.transfers.get_upload_queue_size()
slotsavail = self.core.transfers.allow_new_uploads()
fifoqueue = self.config.sections["transfers"]["fifoqueue"]
message = slskmessages.FileSearchResult(
None, self.core.login_username,
searchid, fileinfos, slotsavail, uploadspeed, queuesize, fifoqueue)
self.core.send_message_to_peer(user, message)
if direct:
log.add_search(
_("User %(user)s is directly searching for \"%(query)s\", returning %(num)i results"), {
'user': user,
'query': searchterm_old,
'num': numresults
})
else:
log.add_search(
_("User %(user)s is searching for \"%(query)s\", returning %(num)i results"), {
'user': user,
'query': searchterm_old,
'num': numresults
}) | [
"def",
"process_search_request",
"(",
"self",
",",
"searchterm",
",",
"user",
",",
"searchid",
",",
"direct",
"=",
"False",
")",
":",
"if",
"not",
"searchterm",
":",
"return",
"if",
"not",
"self",
".",
"config",
".",
"sections",
"[",
"\"searches\"",
"]",
"[",
"\"search_results\"",
"]",
":",
"# Don't return _any_ results when this option is disabled",
"return",
"if",
"not",
"direct",
"and",
"user",
"==",
"self",
".",
"core",
".",
"login_username",
":",
"# We shouldn't send a search response if we initiated the search request,",
"# unless we're specifically searching our own username",
"return",
"maxresults",
"=",
"self",
".",
"config",
".",
"sections",
"[",
"\"searches\"",
"]",
"[",
"\"maxresults\"",
"]",
"if",
"maxresults",
"==",
"0",
":",
"return",
"# Remember excluded/partial words for later",
"excluded_words",
"=",
"[",
"]",
"partial_words",
"=",
"[",
"]",
"if",
"'-'",
"in",
"searchterm",
"or",
"'*'",
"in",
"searchterm",
":",
"for",
"word",
"in",
"searchterm",
".",
"split",
"(",
")",
":",
"if",
"len",
"(",
"word",
")",
"<",
"1",
":",
"continue",
"if",
"word",
".",
"startswith",
"(",
"'-'",
")",
":",
"for",
"subword",
"in",
"word",
".",
"translate",
"(",
"self",
".",
"translatepunctuation",
")",
".",
"split",
"(",
")",
":",
"excluded_words",
".",
"append",
"(",
"subword",
")",
"elif",
"word",
".",
"startswith",
"(",
"'*'",
")",
":",
"for",
"subword",
"in",
"word",
".",
"translate",
"(",
"self",
".",
"translatepunctuation",
")",
".",
"split",
"(",
")",
":",
"partial_words",
".",
"append",
"(",
"subword",
")",
"# Strip punctuation",
"searchterm_old",
"=",
"searchterm",
"searchterm",
"=",
"searchterm",
".",
"lower",
"(",
")",
".",
"translate",
"(",
"self",
".",
"translatepunctuation",
")",
".",
"strip",
"(",
")",
"if",
"len",
"(",
"searchterm",
")",
"<",
"self",
".",
"config",
".",
"sections",
"[",
"\"searches\"",
"]",
"[",
"\"min_search_chars\"",
"]",
":",
"# Don't send search response if search term contains too few characters",
"return",
"checkuser",
",",
"_reason",
"=",
"self",
".",
"core",
".",
"network_filter",
".",
"check_user",
"(",
"user",
",",
"None",
")",
"if",
"not",
"checkuser",
":",
"return",
"if",
"checkuser",
"==",
"2",
":",
"wordindex",
"=",
"self",
".",
"share_dbs",
".",
"get",
"(",
"\"buddywordindex\"",
")",
"else",
":",
"wordindex",
"=",
"self",
".",
"share_dbs",
".",
"get",
"(",
"\"wordindex\"",
")",
"if",
"wordindex",
"is",
"None",
":",
"return",
"# Find common file matches for each word in search term",
"resultlist",
"=",
"self",
".",
"create_search_result_list",
"(",
"searchterm",
",",
"wordindex",
",",
"excluded_words",
",",
"partial_words",
")",
"if",
"not",
"resultlist",
":",
"return",
"if",
"checkuser",
"==",
"2",
":",
"fileindex",
"=",
"self",
".",
"share_dbs",
".",
"get",
"(",
"\"buddyfileindex\"",
")",
"else",
":",
"fileindex",
"=",
"self",
".",
"share_dbs",
".",
"get",
"(",
"\"fileindex\"",
")",
"if",
"fileindex",
"is",
"None",
":",
"return",
"fileinfos",
"=",
"[",
"]",
"numresults",
"=",
"min",
"(",
"len",
"(",
"resultlist",
")",
",",
"maxresults",
")",
"for",
"index",
"in",
"islice",
"(",
"resultlist",
",",
"numresults",
")",
":",
"fileinfo",
"=",
"fileindex",
".",
"get",
"(",
"repr",
"(",
"index",
")",
")",
"if",
"fileinfo",
"is",
"not",
"None",
":",
"fileinfos",
".",
"append",
"(",
"fileinfo",
")",
"if",
"numresults",
"!=",
"len",
"(",
"fileinfos",
")",
":",
"log",
".",
"add_debug",
"(",
"(",
"\"File index inconsistency while responding to search request \"",
"\"\\\"%(query)s\\\". %(expected_num)s results expected, but only %(total_num)s \"",
"\"results were found in database.\"",
")",
",",
"{",
"\"query\"",
":",
"searchterm_old",
",",
"\"expected_num\"",
":",
"numresults",
",",
"\"total_num\"",
":",
"len",
"(",
"fileinfos",
")",
"}",
")",
"numresults",
"=",
"len",
"(",
"fileinfos",
")",
"if",
"not",
"numresults",
":",
"return",
"uploadspeed",
"=",
"self",
".",
"core",
".",
"transfers",
".",
"upload_speed",
"queuesize",
"=",
"self",
".",
"core",
".",
"transfers",
".",
"get_upload_queue_size",
"(",
")",
"slotsavail",
"=",
"self",
".",
"core",
".",
"transfers",
".",
"allow_new_uploads",
"(",
")",
"fifoqueue",
"=",
"self",
".",
"config",
".",
"sections",
"[",
"\"transfers\"",
"]",
"[",
"\"fifoqueue\"",
"]",
"message",
"=",
"slskmessages",
".",
"FileSearchResult",
"(",
"None",
",",
"self",
".",
"core",
".",
"login_username",
",",
"searchid",
",",
"fileinfos",
",",
"slotsavail",
",",
"uploadspeed",
",",
"queuesize",
",",
"fifoqueue",
")",
"self",
".",
"core",
".",
"send_message_to_peer",
"(",
"user",
",",
"message",
")",
"if",
"direct",
":",
"log",
".",
"add_search",
"(",
"_",
"(",
"\"User %(user)s is directly searching for \\\"%(query)s\\\", returning %(num)i results\"",
")",
",",
"{",
"'user'",
":",
"user",
",",
"'query'",
":",
"searchterm_old",
",",
"'num'",
":",
"numresults",
"}",
")",
"else",
":",
"log",
".",
"add_search",
"(",
"_",
"(",
"\"User %(user)s is searching for \\\"%(query)s\\\", returning %(num)i results\"",
")",
",",
"{",
"'user'",
":",
"user",
",",
"'query'",
":",
"searchterm_old",
",",
"'num'",
":",
"numresults",
"}",
")"
] | https://github.com/Nicotine-Plus/nicotine-plus/blob/6583532193e132206bb2096c77c6ad1ce96c21fa/pynicotine/search.py#L426-L547 |
||
cisco/mindmeld | 809c36112e9ea8019fe29d54d136ca14eb4fd8db | mindmeld/components/entity_resolver.py | python | BaseEntityResolver.unload | (self) | Unloads the model from memory. This helps reduce memory requirements while
training other models. | Unloads the model from memory. This helps reduce memory requirements while
training other models. | [
"Unloads",
"the",
"model",
"from",
"memory",
".",
"This",
"helps",
"reduce",
"memory",
"requirements",
"while",
"training",
"other",
"models",
"."
] | def unload(self):
"""
Unloads the model from memory. This helps reduce memory requirements while
training other models.
"""
self._unload()
self.resolver_configurations = {}
self.ready = False | [
"def",
"unload",
"(",
"self",
")",
":",
"self",
".",
"_unload",
"(",
")",
"self",
".",
"resolver_configurations",
"=",
"{",
"}",
"self",
".",
"ready",
"=",
"False"
] | https://github.com/cisco/mindmeld/blob/809c36112e9ea8019fe29d54d136ca14eb4fd8db/mindmeld/components/entity_resolver.py#L466-L473 |
||
CLUEbenchmark/CLUE | 5bd39732734afecb490cf18a5212e692dbf2c007 | baselines/models/bert/run_classifier.py | python | input_fn_builder | (features, seq_length, is_training, drop_remainder) | return input_fn | Creates an `input_fn` closure to be passed to TPUEstimator. | Creates an `input_fn` closure to be passed to TPUEstimator. | [
"Creates",
"an",
"input_fn",
"closure",
"to",
"be",
"passed",
"to",
"TPUEstimator",
"."
] | def input_fn_builder(features, seq_length, is_training, drop_remainder):
"""Creates an `input_fn` closure to be passed to TPUEstimator."""
all_input_ids = []
all_input_mask = []
all_segment_ids = []
all_label_ids = []
for feature in features:
all_input_ids.append(feature.input_ids)
all_input_mask.append(feature.input_mask)
all_segment_ids.append(feature.segment_ids)
all_label_ids.append(feature.label_id)
def input_fn(params):
"""The actual input function."""
batch_size = params["batch_size"]
num_examples = len(features)
# This is for demo purposes and does NOT scale to large data sets. We do
# not use Dataset.from_generator() because that uses tf.py_func which is
# not TPU compatible. The right way to load data is with TFRecordReader.
d = tf.data.Dataset.from_tensor_slices({
"input_ids":
tf.constant(
all_input_ids, shape=[num_examples, seq_length],
dtype=tf.int32),
"input_mask":
tf.constant(
all_input_mask,
shape=[num_examples, seq_length],
dtype=tf.int32),
"segment_ids":
tf.constant(
all_segment_ids,
shape=[num_examples, seq_length],
dtype=tf.int32),
"label_ids":
tf.constant(all_label_ids, shape=[num_examples], dtype=tf.int32),
})
if is_training:
d = d.repeat()
d = d.shuffle(buffer_size=100)
d = d.batch(batch_size=batch_size, drop_remainder=drop_remainder)
return d
return input_fn | [
"def",
"input_fn_builder",
"(",
"features",
",",
"seq_length",
",",
"is_training",
",",
"drop_remainder",
")",
":",
"all_input_ids",
"=",
"[",
"]",
"all_input_mask",
"=",
"[",
"]",
"all_segment_ids",
"=",
"[",
"]",
"all_label_ids",
"=",
"[",
"]",
"for",
"feature",
"in",
"features",
":",
"all_input_ids",
".",
"append",
"(",
"feature",
".",
"input_ids",
")",
"all_input_mask",
".",
"append",
"(",
"feature",
".",
"input_mask",
")",
"all_segment_ids",
".",
"append",
"(",
"feature",
".",
"segment_ids",
")",
"all_label_ids",
".",
"append",
"(",
"feature",
".",
"label_id",
")",
"def",
"input_fn",
"(",
"params",
")",
":",
"\"\"\"The actual input function.\"\"\"",
"batch_size",
"=",
"params",
"[",
"\"batch_size\"",
"]",
"num_examples",
"=",
"len",
"(",
"features",
")",
"# This is for demo purposes and does NOT scale to large data sets. We do",
"# not use Dataset.from_generator() because that uses tf.py_func which is",
"# not TPU compatible. The right way to load data is with TFRecordReader.",
"d",
"=",
"tf",
".",
"data",
".",
"Dataset",
".",
"from_tensor_slices",
"(",
"{",
"\"input_ids\"",
":",
"tf",
".",
"constant",
"(",
"all_input_ids",
",",
"shape",
"=",
"[",
"num_examples",
",",
"seq_length",
"]",
",",
"dtype",
"=",
"tf",
".",
"int32",
")",
",",
"\"input_mask\"",
":",
"tf",
".",
"constant",
"(",
"all_input_mask",
",",
"shape",
"=",
"[",
"num_examples",
",",
"seq_length",
"]",
",",
"dtype",
"=",
"tf",
".",
"int32",
")",
",",
"\"segment_ids\"",
":",
"tf",
".",
"constant",
"(",
"all_segment_ids",
",",
"shape",
"=",
"[",
"num_examples",
",",
"seq_length",
"]",
",",
"dtype",
"=",
"tf",
".",
"int32",
")",
",",
"\"label_ids\"",
":",
"tf",
".",
"constant",
"(",
"all_label_ids",
",",
"shape",
"=",
"[",
"num_examples",
"]",
",",
"dtype",
"=",
"tf",
".",
"int32",
")",
",",
"}",
")",
"if",
"is_training",
":",
"d",
"=",
"d",
".",
"repeat",
"(",
")",
"d",
"=",
"d",
".",
"shuffle",
"(",
"buffer_size",
"=",
"100",
")",
"d",
"=",
"d",
".",
"batch",
"(",
"batch_size",
"=",
"batch_size",
",",
"drop_remainder",
"=",
"drop_remainder",
")",
"return",
"d",
"return",
"input_fn"
] | https://github.com/CLUEbenchmark/CLUE/blob/5bd39732734afecb490cf18a5212e692dbf2c007/baselines/models/bert/run_classifier.py#L641-L690 |
|
legolas123/cv-tricks.com | df831a73a99226026fda353c65b1a11cc6f18c9a | Tensorflow-tutorials/Tensorflow-slim-run-prediction/nets/inception_v4.py | python | block_reduction_b | (inputs, scope=None, reuse=None) | Builds Reduction-B block for Inception v4 network. | Builds Reduction-B block for Inception v4 network. | [
"Builds",
"Reduction",
"-",
"B",
"block",
"for",
"Inception",
"v4",
"network",
"."
] | def block_reduction_b(inputs, scope=None, reuse=None):
"""Builds Reduction-B block for Inception v4 network."""
# By default use stride=1 and SAME padding
with slim.arg_scope([slim.conv2d, slim.avg_pool2d, slim.max_pool2d],
stride=1, padding='SAME'):
with tf.variable_scope(scope, 'BlockReductionB', [inputs], reuse=reuse):
with tf.variable_scope('Branch_0'):
branch_0 = slim.conv2d(inputs, 192, [1, 1], scope='Conv2d_0a_1x1')
branch_0 = slim.conv2d(branch_0, 192, [3, 3], stride=2,
padding='VALID', scope='Conv2d_1a_3x3')
with tf.variable_scope('Branch_1'):
branch_1 = slim.conv2d(inputs, 256, [1, 1], scope='Conv2d_0a_1x1')
branch_1 = slim.conv2d(branch_1, 256, [1, 7], scope='Conv2d_0b_1x7')
branch_1 = slim.conv2d(branch_1, 320, [7, 1], scope='Conv2d_0c_7x1')
branch_1 = slim.conv2d(branch_1, 320, [3, 3], stride=2,
padding='VALID', scope='Conv2d_1a_3x3')
with tf.variable_scope('Branch_2'):
branch_2 = slim.max_pool2d(inputs, [3, 3], stride=2, padding='VALID',
scope='MaxPool_1a_3x3')
return tf.concat(3, [branch_0, branch_1, branch_2]) | [
"def",
"block_reduction_b",
"(",
"inputs",
",",
"scope",
"=",
"None",
",",
"reuse",
"=",
"None",
")",
":",
"# By default use stride=1 and SAME padding",
"with",
"slim",
".",
"arg_scope",
"(",
"[",
"slim",
".",
"conv2d",
",",
"slim",
".",
"avg_pool2d",
",",
"slim",
".",
"max_pool2d",
"]",
",",
"stride",
"=",
"1",
",",
"padding",
"=",
"'SAME'",
")",
":",
"with",
"tf",
".",
"variable_scope",
"(",
"scope",
",",
"'BlockReductionB'",
",",
"[",
"inputs",
"]",
",",
"reuse",
"=",
"reuse",
")",
":",
"with",
"tf",
".",
"variable_scope",
"(",
"'Branch_0'",
")",
":",
"branch_0",
"=",
"slim",
".",
"conv2d",
"(",
"inputs",
",",
"192",
",",
"[",
"1",
",",
"1",
"]",
",",
"scope",
"=",
"'Conv2d_0a_1x1'",
")",
"branch_0",
"=",
"slim",
".",
"conv2d",
"(",
"branch_0",
",",
"192",
",",
"[",
"3",
",",
"3",
"]",
",",
"stride",
"=",
"2",
",",
"padding",
"=",
"'VALID'",
",",
"scope",
"=",
"'Conv2d_1a_3x3'",
")",
"with",
"tf",
".",
"variable_scope",
"(",
"'Branch_1'",
")",
":",
"branch_1",
"=",
"slim",
".",
"conv2d",
"(",
"inputs",
",",
"256",
",",
"[",
"1",
",",
"1",
"]",
",",
"scope",
"=",
"'Conv2d_0a_1x1'",
")",
"branch_1",
"=",
"slim",
".",
"conv2d",
"(",
"branch_1",
",",
"256",
",",
"[",
"1",
",",
"7",
"]",
",",
"scope",
"=",
"'Conv2d_0b_1x7'",
")",
"branch_1",
"=",
"slim",
".",
"conv2d",
"(",
"branch_1",
",",
"320",
",",
"[",
"7",
",",
"1",
"]",
",",
"scope",
"=",
"'Conv2d_0c_7x1'",
")",
"branch_1",
"=",
"slim",
".",
"conv2d",
"(",
"branch_1",
",",
"320",
",",
"[",
"3",
",",
"3",
"]",
",",
"stride",
"=",
"2",
",",
"padding",
"=",
"'VALID'",
",",
"scope",
"=",
"'Conv2d_1a_3x3'",
")",
"with",
"tf",
".",
"variable_scope",
"(",
"'Branch_2'",
")",
":",
"branch_2",
"=",
"slim",
".",
"max_pool2d",
"(",
"inputs",
",",
"[",
"3",
",",
"3",
"]",
",",
"stride",
"=",
"2",
",",
"padding",
"=",
"'VALID'",
",",
"scope",
"=",
"'MaxPool_1a_3x3'",
")",
"return",
"tf",
".",
"concat",
"(",
"3",
",",
"[",
"branch_0",
",",
"branch_1",
",",
"branch_2",
"]",
")"
] | https://github.com/legolas123/cv-tricks.com/blob/df831a73a99226026fda353c65b1a11cc6f18c9a/Tensorflow-tutorials/Tensorflow-slim-run-prediction/nets/inception_v4.py#L99-L118 |
||
cuthbertLab/music21 | bd30d4663e52955ed922c10fdf541419d8c67671 | music21/interval.py | python | ChromaticInterval.transposePitch | (self, p, *, inPlace=False) | Given a :class:`~music21.pitch.Pitch` object, return a new,
transposed Pitch, that is transformed
according to this ChromaticInterval.
Because :class:`~music21.interval.ChromaticInterval` object
do not take into account diatonic spelling,
the new Pitch is simplified to the most common intervals. See
:meth:`~music21.pitch.Pitch.simplifyEnharmonic` with ``mostCommon = True``
to see the results.
>>> tritone = interval.ChromaticInterval(6)
>>> p = pitch.Pitch('E#4')
>>> p2 = tritone.transposePitch(p)
>>> p2
<music21.pitch.Pitch B4>
>>> p3 = tritone.transposePitch(p2)
>>> p3
<music21.pitch.Pitch F5>
If no octave number is given then octaves "wrap around" and thus even
after transposing upward, you could end up with a pitch that is
displayed as lower than the original:
>>> p4 = pitch.Pitch('B')
>>> p4.ps
71.0
>>> p5 = tritone.transposePitch(p4)
Since the octave on p4 was implicit, the ps here wraps around
>>> p5.ps
65.0
Afterwards, spelling of the new pitch will always be inferred.
>>> p4.spellingIsInferred
False
>>> p5.spellingIsInferred
True
Can be done inPlace as well:
>>> p = pitch.Pitch('E#4')
>>> tritone.transposePitch(p, inPlace=True)
>>> p
<music21.pitch.Pitch B4>
>>> p.spellingIsInferred
True
Changed in v.6 -- added inPlace | Given a :class:`~music21.pitch.Pitch` object, return a new,
transposed Pitch, that is transformed
according to this ChromaticInterval. | [
"Given",
"a",
":",
"class",
":",
"~music21",
".",
"pitch",
".",
"Pitch",
"object",
"return",
"a",
"new",
"transposed",
"Pitch",
"that",
"is",
"transformed",
"according",
"to",
"this",
"ChromaticInterval",
"."
] | def transposePitch(self, p, *, inPlace=False):
# noinspection PyShadowingNames
'''
Given a :class:`~music21.pitch.Pitch` object, return a new,
transposed Pitch, that is transformed
according to this ChromaticInterval.
Because :class:`~music21.interval.ChromaticInterval` object
do not take into account diatonic spelling,
the new Pitch is simplified to the most common intervals. See
:meth:`~music21.pitch.Pitch.simplifyEnharmonic` with ``mostCommon = True``
to see the results.
>>> tritone = interval.ChromaticInterval(6)
>>> p = pitch.Pitch('E#4')
>>> p2 = tritone.transposePitch(p)
>>> p2
<music21.pitch.Pitch B4>
>>> p3 = tritone.transposePitch(p2)
>>> p3
<music21.pitch.Pitch F5>
If no octave number is given then octaves "wrap around" and thus even
after transposing upward, you could end up with a pitch that is
displayed as lower than the original:
>>> p4 = pitch.Pitch('B')
>>> p4.ps
71.0
>>> p5 = tritone.transposePitch(p4)
Since the octave on p4 was implicit, the ps here wraps around
>>> p5.ps
65.0
Afterwards, spelling of the new pitch will always be inferred.
>>> p4.spellingIsInferred
False
>>> p5.spellingIsInferred
True
Can be done inPlace as well:
>>> p = pitch.Pitch('E#4')
>>> tritone.transposePitch(p, inPlace=True)
>>> p
<music21.pitch.Pitch B4>
>>> p.spellingIsInferred
True
Changed in v.6 -- added inPlace
'''
if p.octave is None:
useImplicitOctave = True
else:
useImplicitOctave = False
pps = p.ps
if not inPlace:
newPitch = copy.deepcopy(p)
else:
newPitch = p
newPitch.ps = pps + self.semitones
if useImplicitOctave is True:
newPitch.octave = None
if not inPlace:
return newPitch | [
"def",
"transposePitch",
"(",
"self",
",",
"p",
",",
"*",
",",
"inPlace",
"=",
"False",
")",
":",
"# noinspection PyShadowingNames",
"if",
"p",
".",
"octave",
"is",
"None",
":",
"useImplicitOctave",
"=",
"True",
"else",
":",
"useImplicitOctave",
"=",
"False",
"pps",
"=",
"p",
".",
"ps",
"if",
"not",
"inPlace",
":",
"newPitch",
"=",
"copy",
".",
"deepcopy",
"(",
"p",
")",
"else",
":",
"newPitch",
"=",
"p",
"newPitch",
".",
"ps",
"=",
"pps",
"+",
"self",
".",
"semitones",
"if",
"useImplicitOctave",
"is",
"True",
":",
"newPitch",
".",
"octave",
"=",
"None",
"if",
"not",
"inPlace",
":",
"return",
"newPitch"
] | https://github.com/cuthbertLab/music21/blob/bd30d4663e52955ed922c10fdf541419d8c67671/music21/interval.py#L2396-L2466 |
||
OpenEndedGroup/Field | 4f7c8edfb01bb0ccc927b78d3c500f018a4ae37c | Contents/lib/python/wsgiref/simple_server.py | python | WSGIServer.server_bind | (self) | Override server_bind to store the server name. | Override server_bind to store the server name. | [
"Override",
"server_bind",
"to",
"store",
"the",
"server",
"name",
"."
] | def server_bind(self):
"""Override server_bind to store the server name."""
HTTPServer.server_bind(self)
self.setup_environ() | [
"def",
"server_bind",
"(",
"self",
")",
":",
"HTTPServer",
".",
"server_bind",
"(",
"self",
")",
"self",
".",
"setup_environ",
"(",
")"
] | https://github.com/OpenEndedGroup/Field/blob/4f7c8edfb01bb0ccc927b78d3c500f018a4ae37c/Contents/lib/python/wsgiref/simple_server.py#L48-L51 |
||
buckyroberts/Vataxia | 6ae68e8602df3e0544a5ca62ffa847a8a1a83a90 | v1/user_roles/views/moderator.py | python | ModeratorView.get | (request) | return Response(ModeratorSerializer(moderators, many=True).data) | List moderators | List moderators | [
"List",
"moderators"
] | def get(request):
"""
List moderators
"""
moderators = Moderator.objects.all()
return Response(ModeratorSerializer(moderators, many=True).data) | [
"def",
"get",
"(",
"request",
")",
":",
"moderators",
"=",
"Moderator",
".",
"objects",
".",
"all",
"(",
")",
"return",
"Response",
"(",
"ModeratorSerializer",
"(",
"moderators",
",",
"many",
"=",
"True",
")",
".",
"data",
")"
] | https://github.com/buckyroberts/Vataxia/blob/6ae68e8602df3e0544a5ca62ffa847a8a1a83a90/v1/user_roles/views/moderator.py#L15-L21 |
|
CGATOxford/cgat | 326aad4694bdfae8ddc194171bb5d73911243947 | CGAT/scripts/gff_decorate.py | python | decorator_counts | (intervals, start, end, contig, fasta) | return d['nval'], str(d) | compute length distribution. | compute length distribution. | [
"compute",
"length",
"distribution",
"."
] | def decorator_counts(intervals, start, end, contig, fasta):
"""compute length distribution."""
d = Stats.DistributionalParameters([x[1] - x[0] for x in intervals])
return d['nval'], str(d) | [
"def",
"decorator_counts",
"(",
"intervals",
",",
"start",
",",
"end",
",",
"contig",
",",
"fasta",
")",
":",
"d",
"=",
"Stats",
".",
"DistributionalParameters",
"(",
"[",
"x",
"[",
"1",
"]",
"-",
"x",
"[",
"0",
"]",
"for",
"x",
"in",
"intervals",
"]",
")",
"return",
"d",
"[",
"'nval'",
"]",
",",
"str",
"(",
"d",
")"
] | https://github.com/CGATOxford/cgat/blob/326aad4694bdfae8ddc194171bb5d73911243947/CGAT/scripts/gff_decorate.py#L55-L58 |
|
CGATOxford/cgat | 326aad4694bdfae8ddc194171bb5d73911243947 | obsolete/pipeline_xtev.py | python | loadExons | ( infile, outfile ) | load BED file into database | load BED file into database | [
"load",
"BED",
"file",
"into",
"database"
] | def loadExons( infile, outfile ):
'''load BED file into database '''
headers = "contig,start,end,transcript_id,score,strand"
statement = """cat %(infile)s | python ~/src/csv2db.py
--header=%(headers)s
--database=%(database)s
--table=xtev_exons
--index=contig,start
--index=transcript_id
> %(outfile)s; """
P.run() | [
"def",
"loadExons",
"(",
"infile",
",",
"outfile",
")",
":",
"headers",
"=",
"\"contig,start,end,transcript_id,score,strand\"",
"statement",
"=",
"\"\"\"cat %(infile)s | python ~/src/csv2db.py \n --header=%(headers)s\n --database=%(database)s\n --table=xtev_exons\n --index=contig,start\n --index=transcript_id\n > %(outfile)s; \"\"\"",
"P",
".",
"run",
"(",
")"
] | https://github.com/CGATOxford/cgat/blob/326aad4694bdfae8ddc194171bb5d73911243947/obsolete/pipeline_xtev.py#L158-L168 |
||
python-diamond/Diamond | 7000e16cfdf4508ed9291fc4b3800592557b2431 | src/collectors/exim/exim.py | python | EximCollector.get_default_config_help | (self) | return config_help | [] | def get_default_config_help(self):
config_help = super(EximCollector, self).get_default_config_help()
config_help.update({
'bin': 'The path to the exim binary',
'use_sudo': 'Use sudo?',
'sudo_cmd': 'Path to sudo',
'sudo_user': 'User to sudo as',
})
return config_help | [
"def",
"get_default_config_help",
"(",
"self",
")",
":",
"config_help",
"=",
"super",
"(",
"EximCollector",
",",
"self",
")",
".",
"get_default_config_help",
"(",
")",
"config_help",
".",
"update",
"(",
"{",
"'bin'",
":",
"'The path to the exim binary'",
",",
"'use_sudo'",
":",
"'Use sudo?'",
",",
"'sudo_cmd'",
":",
"'Path to sudo'",
",",
"'sudo_user'",
":",
"'User to sudo as'",
",",
"}",
")",
"return",
"config_help"
] | https://github.com/python-diamond/Diamond/blob/7000e16cfdf4508ed9291fc4b3800592557b2431/src/collectors/exim/exim.py#L20-L28 |
|||
savio-code/fern-wifi-cracker | 0da03aba988c66dfa131a45824568abb84b7704a | Fern-Wifi-Cracker/core/wpa.py | python | wpa_attack_dialog.update_progress_bar | (self) | [] | def update_progress_bar(self):
self.progressBar.setValue(self.word_number) | [
"def",
"update_progress_bar",
"(",
"self",
")",
":",
"self",
".",
"progressBar",
".",
"setValue",
"(",
"self",
".",
"word_number",
")"
] | https://github.com/savio-code/fern-wifi-cracker/blob/0da03aba988c66dfa131a45824568abb84b7704a/Fern-Wifi-Cracker/core/wpa.py#L460-L461 |
||||
facebookresearch/ClassyVision | 309d4f12431c6b4d8540010a781dc2aa25fe88e7 | classy_vision/meters/classy_meter.py | python | ClassyMeter.name | (self) | The name of the meter. | The name of the meter. | [
"The",
"name",
"of",
"the",
"meter",
"."
] | def name(self) -> str:
"""The name of the meter."""
raise NotImplementedError | [
"def",
"name",
"(",
"self",
")",
"->",
"str",
":",
"raise",
"NotImplementedError"
] | https://github.com/facebookresearch/ClassyVision/blob/309d4f12431c6b4d8540010a781dc2aa25fe88e7/classy_vision/meters/classy_meter.py#L36-L38 |
||
lifting-bits/mcsema | b7492e2ffb9f2fa64b5ef269753ba1d134bdf2f2 | tools/mcsema_disass/ida7/util.py | python | is_external_segment | (ea) | return False | Returns `True` if the segment containing `ea` looks to be solely containing
external references. | Returns `True` if the segment containing `ea` looks to be solely containing
external references. | [
"Returns",
"True",
"if",
"the",
"segment",
"containing",
"ea",
"looks",
"to",
"be",
"solely",
"containing",
"external",
"references",
"."
] | def is_external_segment(ea):
"""Returns `True` if the segment containing `ea` looks to be solely containing
external references."""
global _NOT_EXTERNAL_SEGMENTS
seg_ea = idc.get_segm_start(ea)
if seg_ea in _NOT_EXTERNAL_SEGMENTS:
return False
if seg_ea in _EXTERNAL_SEGMENTS:
return True
if is_external_segment_by_flags(ea):
_EXTERNAL_SEGMENTS.add(seg_ea)
return True
ext_types = []
seg_name = idc.get_segm_name(seg_ea).lower()
if IS_ELF:
if ".got" in seg_name or ".plt" in seg_name:
_EXTERNAL_SEGMENTS.add(seg_ea)
return True
elif IS_PE:
if ".idata" == seg_name: # Import table.
_EXTERNAL_SEGMENTS.add(seg_ea)
return True
_NOT_EXTERNAL_SEGMENTS.add(seg_ea)
return False | [
"def",
"is_external_segment",
"(",
"ea",
")",
":",
"global",
"_NOT_EXTERNAL_SEGMENTS",
"seg_ea",
"=",
"idc",
".",
"get_segm_start",
"(",
"ea",
")",
"if",
"seg_ea",
"in",
"_NOT_EXTERNAL_SEGMENTS",
":",
"return",
"False",
"if",
"seg_ea",
"in",
"_EXTERNAL_SEGMENTS",
":",
"return",
"True",
"if",
"is_external_segment_by_flags",
"(",
"ea",
")",
":",
"_EXTERNAL_SEGMENTS",
".",
"add",
"(",
"seg_ea",
")",
"return",
"True",
"ext_types",
"=",
"[",
"]",
"seg_name",
"=",
"idc",
".",
"get_segm_name",
"(",
"seg_ea",
")",
".",
"lower",
"(",
")",
"if",
"IS_ELF",
":",
"if",
"\".got\"",
"in",
"seg_name",
"or",
"\".plt\"",
"in",
"seg_name",
":",
"_EXTERNAL_SEGMENTS",
".",
"add",
"(",
"seg_ea",
")",
"return",
"True",
"elif",
"IS_PE",
":",
"if",
"\".idata\"",
"==",
"seg_name",
":",
"# Import table.",
"_EXTERNAL_SEGMENTS",
".",
"add",
"(",
"seg_ea",
")",
"return",
"True",
"_NOT_EXTERNAL_SEGMENTS",
".",
"add",
"(",
"seg_ea",
")",
"return",
"False"
] | https://github.com/lifting-bits/mcsema/blob/b7492e2ffb9f2fa64b5ef269753ba1d134bdf2f2/tools/mcsema_disass/ida7/util.py#L356-L386 |
|
NISH1001/playx | 9050f0c5f9fef7b9c9b14a7f26a055684e260d4c | playx/playlist/ytrelated.py | python | YoutubeRelatedIE._create_mix | (self) | In order to get the playlist, we need to make a request
to youtube music.
YT Music uses JS to automatically update the page URL with
the playlist ID.
This is when we extract the list ID.
Since it does all of it using JS, we can't use requests or
someting similar. | In order to get the playlist, we need to make a request
to youtube music.
YT Music uses JS to automatically update the page URL with
the playlist ID.
This is when we extract the list ID. | [
"In",
"order",
"to",
"get",
"the",
"playlist",
"we",
"need",
"to",
"make",
"a",
"request",
"to",
"youtube",
"music",
".",
"YT",
"Music",
"uses",
"JS",
"to",
"automatically",
"update",
"the",
"page",
"URL",
"with",
"the",
"playlist",
"ID",
".",
"This",
"is",
"when",
"we",
"extract",
"the",
"list",
"ID",
"."
] | def _create_mix(self):
"""
In order to get the playlist, we need to make a request
to youtube music.
YT Music uses JS to automatically update the page URL with
the playlist ID.
This is when we extract the list ID.
Since it does all of it using JS, we can't use requests or
someting similar.
"""
logger.info("Using YTMusic Method")
logger.debug(self.url)
driver = self._get_driver()
driver.get(self.url)
try:
WebDriverWait(driver, 10).until(
lambda driver: driver.current_url != self.url
)
except TimeoutException:
raise DownloadError("Timeout exception occurred")
# The URL should now be updated
updated_url = driver.current_url
playlist_id = updated_url.split("=")[-1]
playlist_url = "https://www.youtube.com/playlist?list={}".format(playlist_id)
self._get_playlist_data(playlist_url) | [
"def",
"_create_mix",
"(",
"self",
")",
":",
"logger",
".",
"info",
"(",
"\"Using YTMusic Method\"",
")",
"logger",
".",
"debug",
"(",
"self",
".",
"url",
")",
"driver",
"=",
"self",
".",
"_get_driver",
"(",
")",
"driver",
".",
"get",
"(",
"self",
".",
"url",
")",
"try",
":",
"WebDriverWait",
"(",
"driver",
",",
"10",
")",
".",
"until",
"(",
"lambda",
"driver",
":",
"driver",
".",
"current_url",
"!=",
"self",
".",
"url",
")",
"except",
"TimeoutException",
":",
"raise",
"DownloadError",
"(",
"\"Timeout exception occurred\"",
")",
"# The URL should now be updated",
"updated_url",
"=",
"driver",
".",
"current_url",
"playlist_id",
"=",
"updated_url",
".",
"split",
"(",
"\"=\"",
")",
"[",
"-",
"1",
"]",
"playlist_url",
"=",
"\"https://www.youtube.com/playlist?list={}\"",
".",
"format",
"(",
"playlist_id",
")",
"self",
".",
"_get_playlist_data",
"(",
"playlist_url",
")"
] | https://github.com/NISH1001/playx/blob/9050f0c5f9fef7b9c9b14a7f26a055684e260d4c/playx/playlist/ytrelated.py#L91-L118 |
||
mdiazcl/fuzzbunch-debian | 2b76c2249ade83a389ae3badb12a1bd09901fd2c | windows/Resources/Python/Core/Lib/textwrap.py | python | TextWrapper._split | (self, text) | return chunks | _split(text : string) -> [string]
Split the text to wrap into indivisible chunks. Chunks are
not quite the same as words; see _wrap_chunks() for full
details. As an example, the text
Look, goof-ball -- use the -b option!
breaks into the following chunks:
'Look,', ' ', 'goof-', 'ball', ' ', '--', ' ',
'use', ' ', 'the', ' ', '-b', ' ', 'option!'
if break_on_hyphens is True, or in:
'Look,', ' ', 'goof-ball', ' ', '--', ' ',
'use', ' ', 'the', ' ', '-b', ' ', option!'
otherwise. | _split(text : string) -> [string]
Split the text to wrap into indivisible chunks. Chunks are
not quite the same as words; see _wrap_chunks() for full
details. As an example, the text
Look, goof-ball -- use the -b option!
breaks into the following chunks:
'Look,', ' ', 'goof-', 'ball', ' ', '--', ' ',
'use', ' ', 'the', ' ', '-b', ' ', 'option!'
if break_on_hyphens is True, or in:
'Look,', ' ', 'goof-ball', ' ', '--', ' ',
'use', ' ', 'the', ' ', '-b', ' ', option!'
otherwise. | [
"_split",
"(",
"text",
":",
"string",
")",
"-",
">",
"[",
"string",
"]",
"Split",
"the",
"text",
"to",
"wrap",
"into",
"indivisible",
"chunks",
".",
"Chunks",
"are",
"not",
"quite",
"the",
"same",
"as",
"words",
";",
"see",
"_wrap_chunks",
"()",
"for",
"full",
"details",
".",
"As",
"an",
"example",
"the",
"text",
"Look",
"goof",
"-",
"ball",
"--",
"use",
"the",
"-",
"b",
"option!",
"breaks",
"into",
"the",
"following",
"chunks",
":",
"Look",
"goof",
"-",
"ball",
"--",
"use",
"the",
"-",
"b",
"option!",
"if",
"break_on_hyphens",
"is",
"True",
"or",
"in",
":",
"Look",
"goof",
"-",
"ball",
"--",
"use",
"the",
"-",
"b",
"option!",
"otherwise",
"."
] | def _split(self, text):
"""_split(text : string) -> [string]
Split the text to wrap into indivisible chunks. Chunks are
not quite the same as words; see _wrap_chunks() for full
details. As an example, the text
Look, goof-ball -- use the -b option!
breaks into the following chunks:
'Look,', ' ', 'goof-', 'ball', ' ', '--', ' ',
'use', ' ', 'the', ' ', '-b', ' ', 'option!'
if break_on_hyphens is True, or in:
'Look,', ' ', 'goof-ball', ' ', '--', ' ',
'use', ' ', 'the', ' ', '-b', ' ', option!'
otherwise.
"""
if isinstance(text, unicode):
if self.break_on_hyphens:
pat = self.wordsep_re_uni
else:
pat = self.wordsep_simple_re_uni
elif self.break_on_hyphens:
pat = self.wordsep_re
else:
pat = self.wordsep_simple_re
chunks = pat.split(text)
chunks = filter(None, chunks)
return chunks | [
"def",
"_split",
"(",
"self",
",",
"text",
")",
":",
"if",
"isinstance",
"(",
"text",
",",
"unicode",
")",
":",
"if",
"self",
".",
"break_on_hyphens",
":",
"pat",
"=",
"self",
".",
"wordsep_re_uni",
"else",
":",
"pat",
"=",
"self",
".",
"wordsep_simple_re_uni",
"elif",
"self",
".",
"break_on_hyphens",
":",
"pat",
"=",
"self",
".",
"wordsep_re",
"else",
":",
"pat",
"=",
"self",
".",
"wordsep_simple_re",
"chunks",
"=",
"pat",
".",
"split",
"(",
"text",
")",
"chunks",
"=",
"filter",
"(",
"None",
",",
"chunks",
")",
"return",
"chunks"
] | https://github.com/mdiazcl/fuzzbunch-debian/blob/2b76c2249ade83a389ae3badb12a1bd09901fd2c/windows/Resources/Python/Core/Lib/textwrap.py#L97-L123 |
|
zhl2008/awd-platform | 0416b31abea29743387b10b3914581fbe8e7da5e | web_flaskbb/lib/python2.7/site-packages/PIL/ImageDraw.py | python | ImageDraw.bitmap | (self, xy, bitmap, fill=None) | Draw a bitmap. | Draw a bitmap. | [
"Draw",
"a",
"bitmap",
"."
] | def bitmap(self, xy, bitmap, fill=None):
"""Draw a bitmap."""
bitmap.load()
ink, fill = self._getink(fill)
if ink is None:
ink = fill
if ink is not None:
self.draw.draw_bitmap(xy, bitmap.im, ink) | [
"def",
"bitmap",
"(",
"self",
",",
"xy",
",",
"bitmap",
",",
"fill",
"=",
"None",
")",
":",
"bitmap",
".",
"load",
"(",
")",
"ink",
",",
"fill",
"=",
"self",
".",
"_getink",
"(",
"fill",
")",
"if",
"ink",
"is",
"None",
":",
"ink",
"=",
"fill",
"if",
"ink",
"is",
"not",
"None",
":",
"self",
".",
"draw",
".",
"draw_bitmap",
"(",
"xy",
",",
"bitmap",
".",
"im",
",",
"ink",
")"
] | https://github.com/zhl2008/awd-platform/blob/0416b31abea29743387b10b3914581fbe8e7da5e/web_flaskbb/lib/python2.7/site-packages/PIL/ImageDraw.py#L127-L134 |
||
securityclippy/elasticintel | aa08d3e9f5ab1c000128e95161139ce97ff0e334 | ingest_feed_lambda/pandas/io/common.py | python | get_filepath_or_buffer | (filepath_or_buffer, encoding=None,
compression=None) | return filepath_or_buffer, None, compression | If the filepath_or_buffer is a url, translate and return the buffer.
Otherwise passthrough.
Parameters
----------
filepath_or_buffer : a url, filepath (str, py.path.local or pathlib.Path),
or buffer
encoding : the encoding to use to decode py3 bytes, default is 'utf-8'
Returns
-------
a filepath_or_buffer, the encoding, the compression | If the filepath_or_buffer is a url, translate and return the buffer.
Otherwise passthrough. | [
"If",
"the",
"filepath_or_buffer",
"is",
"a",
"url",
"translate",
"and",
"return",
"the",
"buffer",
".",
"Otherwise",
"passthrough",
"."
] | def get_filepath_or_buffer(filepath_or_buffer, encoding=None,
compression=None):
"""
If the filepath_or_buffer is a url, translate and return the buffer.
Otherwise passthrough.
Parameters
----------
filepath_or_buffer : a url, filepath (str, py.path.local or pathlib.Path),
or buffer
encoding : the encoding to use to decode py3 bytes, default is 'utf-8'
Returns
-------
a filepath_or_buffer, the encoding, the compression
"""
filepath_or_buffer = _stringify_path(filepath_or_buffer)
if _is_url(filepath_or_buffer):
req = _urlopen(filepath_or_buffer)
content_encoding = req.headers.get('Content-Encoding', None)
if content_encoding == 'gzip':
# Override compression based on Content-Encoding header
compression = 'gzip'
reader = BytesIO(req.read())
return reader, encoding, compression
if _is_s3_url(filepath_or_buffer):
from pandas.io import s3
return s3.get_filepath_or_buffer(filepath_or_buffer,
encoding=encoding,
compression=compression)
if isinstance(filepath_or_buffer, (compat.string_types,
compat.binary_type,
mmap.mmap)):
return _expand_user(filepath_or_buffer), None, compression
if not is_file_like(filepath_or_buffer):
msg = "Invalid file path or buffer object type: {_type}"
raise ValueError(msg.format(_type=type(filepath_or_buffer)))
return filepath_or_buffer, None, compression | [
"def",
"get_filepath_or_buffer",
"(",
"filepath_or_buffer",
",",
"encoding",
"=",
"None",
",",
"compression",
"=",
"None",
")",
":",
"filepath_or_buffer",
"=",
"_stringify_path",
"(",
"filepath_or_buffer",
")",
"if",
"_is_url",
"(",
"filepath_or_buffer",
")",
":",
"req",
"=",
"_urlopen",
"(",
"filepath_or_buffer",
")",
"content_encoding",
"=",
"req",
".",
"headers",
".",
"get",
"(",
"'Content-Encoding'",
",",
"None",
")",
"if",
"content_encoding",
"==",
"'gzip'",
":",
"# Override compression based on Content-Encoding header",
"compression",
"=",
"'gzip'",
"reader",
"=",
"BytesIO",
"(",
"req",
".",
"read",
"(",
")",
")",
"return",
"reader",
",",
"encoding",
",",
"compression",
"if",
"_is_s3_url",
"(",
"filepath_or_buffer",
")",
":",
"from",
"pandas",
".",
"io",
"import",
"s3",
"return",
"s3",
".",
"get_filepath_or_buffer",
"(",
"filepath_or_buffer",
",",
"encoding",
"=",
"encoding",
",",
"compression",
"=",
"compression",
")",
"if",
"isinstance",
"(",
"filepath_or_buffer",
",",
"(",
"compat",
".",
"string_types",
",",
"compat",
".",
"binary_type",
",",
"mmap",
".",
"mmap",
")",
")",
":",
"return",
"_expand_user",
"(",
"filepath_or_buffer",
")",
",",
"None",
",",
"compression",
"if",
"not",
"is_file_like",
"(",
"filepath_or_buffer",
")",
":",
"msg",
"=",
"\"Invalid file path or buffer object type: {_type}\"",
"raise",
"ValueError",
"(",
"msg",
".",
"format",
"(",
"_type",
"=",
"type",
"(",
"filepath_or_buffer",
")",
")",
")",
"return",
"filepath_or_buffer",
",",
"None",
",",
"compression"
] | https://github.com/securityclippy/elasticintel/blob/aa08d3e9f5ab1c000128e95161139ce97ff0e334/ingest_feed_lambda/pandas/io/common.py#L171-L213 |
|
HunterMcGushion/hyperparameter_hunter | 28b1d48e01a993818510811b82a677e0a7a232b2 | hyperparameter_hunter/utils/result_utils.py | python | format_predictions | (
raw_predictions: np.array, dataset_df: pd.DataFrame, target_column: str, id_column: str = None
) | return predictions | Organize components into a pandas.DataFrame that is properly formatted and ready to save
Parameters
----------
raw_predictions: np.array
The actual predictions that were made and that should inhabit the column named
`target_column` in the result
dataset_df: pd.DataFrame
The original data provided that yielded `raw_predictions`. If `id_column` is not None, it
must be in `dataset_df`. In practice, expect this value to be one of the following:
:attr:`experiments.BaseExperiment.train_dataset`,
:attr:`experiments.BaseExperiment.holdout_dataset`, or
:attr:`experiments.BaseExperiment.test_dataset`
target_column: str
The name for the result column containing `raw_predictions`
id_column: str, or None, default=None
If not None, must be the name of a column in `dataset_df`, the contents of which will be
included as a column in the result and are assumed to be sample identifiers of some kind
Returns
-------
predictions: pd.DataFrame
Dataframe containing the formatted predictions | Organize components into a pandas.DataFrame that is properly formatted and ready to save | [
"Organize",
"components",
"into",
"a",
"pandas",
".",
"DataFrame",
"that",
"is",
"properly",
"formatted",
"and",
"ready",
"to",
"save"
] | def format_predictions(
raw_predictions: np.array, dataset_df: pd.DataFrame, target_column: str, id_column: str = None
):
"""Organize components into a pandas.DataFrame that is properly formatted and ready to save
Parameters
----------
raw_predictions: np.array
The actual predictions that were made and that should inhabit the column named
`target_column` in the result
dataset_df: pd.DataFrame
The original data provided that yielded `raw_predictions`. If `id_column` is not None, it
must be in `dataset_df`. In practice, expect this value to be one of the following:
:attr:`experiments.BaseExperiment.train_dataset`,
:attr:`experiments.BaseExperiment.holdout_dataset`, or
:attr:`experiments.BaseExperiment.test_dataset`
target_column: str
The name for the result column containing `raw_predictions`
id_column: str, or None, default=None
If not None, must be the name of a column in `dataset_df`, the contents of which will be
included as a column in the result and are assumed to be sample identifiers of some kind
Returns
-------
predictions: pd.DataFrame
Dataframe containing the formatted predictions"""
predictions = pd.DataFrame()
if id_column is not None:
predictions[id_column] = dataset_df[id_column]
predictions[target_column] = raw_predictions
predictions.reset_index(inplace=True, drop=True)
return predictions | [
"def",
"format_predictions",
"(",
"raw_predictions",
":",
"np",
".",
"array",
",",
"dataset_df",
":",
"pd",
".",
"DataFrame",
",",
"target_column",
":",
"str",
",",
"id_column",
":",
"str",
"=",
"None",
")",
":",
"predictions",
"=",
"pd",
".",
"DataFrame",
"(",
")",
"if",
"id_column",
"is",
"not",
"None",
":",
"predictions",
"[",
"id_column",
"]",
"=",
"dataset_df",
"[",
"id_column",
"]",
"predictions",
"[",
"target_column",
"]",
"=",
"raw_predictions",
"predictions",
".",
"reset_index",
"(",
"inplace",
"=",
"True",
",",
"drop",
"=",
"True",
")",
"return",
"predictions"
] | https://github.com/HunterMcGushion/hyperparameter_hunter/blob/28b1d48e01a993818510811b82a677e0a7a232b2/hyperparameter_hunter/utils/result_utils.py#L31-L62 |
|
gammapy/gammapy | 735b25cd5bbed35e2004d633621896dcd5295e8b | gammapy/estimators/map/ts.py | python | TSMapEstimator.estimate_pad_width | (self, dataset, kernel=None) | return tuple(pad_width) | Estimate pad width of the dataset
Parameters
----------
dataset : `MapDataset`
Input MapDataset.
kernel : `WcsNDMap`
Source model kernel.
Returns
-------
pad_width : tuple
Padding width | Estimate pad width of the dataset | [
"Estimate",
"pad",
"width",
"of",
"the",
"dataset"
] | def estimate_pad_width(self, dataset, kernel=None):
"""Estimate pad width of the dataset
Parameters
----------
dataset : `MapDataset`
Input MapDataset.
kernel : `WcsNDMap`
Source model kernel.
Returns
-------
pad_width : tuple
Padding width
"""
if kernel is None:
kernel = self.estimate_kernel(dataset=dataset)
geom = dataset.counts.geom.to_image()
geom_kernel = kernel.geom.to_image()
pad_width = np.array(geom_kernel.data_shape) // 2
if self.downsampling_factor and self.downsampling_factor > 1:
shape = tuple(np.array(geom.data_shape) + 2 * pad_width)
pad_width = symmetric_crop_pad_width(geom.data_shape, shape_2N(shape))[0]
return tuple(pad_width) | [
"def",
"estimate_pad_width",
"(",
"self",
",",
"dataset",
",",
"kernel",
"=",
"None",
")",
":",
"if",
"kernel",
"is",
"None",
":",
"kernel",
"=",
"self",
".",
"estimate_kernel",
"(",
"dataset",
"=",
"dataset",
")",
"geom",
"=",
"dataset",
".",
"counts",
".",
"geom",
".",
"to_image",
"(",
")",
"geom_kernel",
"=",
"kernel",
".",
"geom",
".",
"to_image",
"(",
")",
"pad_width",
"=",
"np",
".",
"array",
"(",
"geom_kernel",
".",
"data_shape",
")",
"//",
"2",
"if",
"self",
".",
"downsampling_factor",
"and",
"self",
".",
"downsampling_factor",
">",
"1",
":",
"shape",
"=",
"tuple",
"(",
"np",
".",
"array",
"(",
"geom",
".",
"data_shape",
")",
"+",
"2",
"*",
"pad_width",
")",
"pad_width",
"=",
"symmetric_crop_pad_width",
"(",
"geom",
".",
"data_shape",
",",
"shape_2N",
"(",
"shape",
")",
")",
"[",
"0",
"]",
"return",
"tuple",
"(",
"pad_width",
")"
] | https://github.com/gammapy/gammapy/blob/735b25cd5bbed35e2004d633621896dcd5295e8b/gammapy/estimators/map/ts.py#L293-L320 |
|
alexandrebarachant/Grasp-and-lift-EEG-challenge | 36fe555d523c3ca3f201e765b1b1004dc5383dd2 | preprocessing/erp.py | python | toMNE | (X, y=None) | return raw | Tranform array into MNE for epoching. | Tranform array into MNE for epoching. | [
"Tranform",
"array",
"into",
"MNE",
"for",
"epoching",
"."
] | def toMNE(X, y=None):
"""Tranform array into MNE for epoching."""
ch_names = getChannelNames()
montage = read_montage('standard_1005', ch_names)
ch_type = ['eeg']*len(ch_names)
data = X.T
if y is not None:
y = y.transpose()
ch_type.extend(['stim']*6)
event_names = getEventNames()
ch_names.extend(event_names)
# concatenate event file and data
data = np.concatenate((data, y))
info = create_info(ch_names, sfreq=500.0, ch_types=ch_type,
montage=montage)
raw = RawArray(data, info, verbose=False)
return raw | [
"def",
"toMNE",
"(",
"X",
",",
"y",
"=",
"None",
")",
":",
"ch_names",
"=",
"getChannelNames",
"(",
")",
"montage",
"=",
"read_montage",
"(",
"'standard_1005'",
",",
"ch_names",
")",
"ch_type",
"=",
"[",
"'eeg'",
"]",
"*",
"len",
"(",
"ch_names",
")",
"data",
"=",
"X",
".",
"T",
"if",
"y",
"is",
"not",
"None",
":",
"y",
"=",
"y",
".",
"transpose",
"(",
")",
"ch_type",
".",
"extend",
"(",
"[",
"'stim'",
"]",
"*",
"6",
")",
"event_names",
"=",
"getEventNames",
"(",
")",
"ch_names",
".",
"extend",
"(",
"event_names",
")",
"# concatenate event file and data",
"data",
"=",
"np",
".",
"concatenate",
"(",
"(",
"data",
",",
"y",
")",
")",
"info",
"=",
"create_info",
"(",
"ch_names",
",",
"sfreq",
"=",
"500.0",
",",
"ch_types",
"=",
"ch_type",
",",
"montage",
"=",
"montage",
")",
"raw",
"=",
"RawArray",
"(",
"data",
",",
"info",
",",
"verbose",
"=",
"False",
")",
"return",
"raw"
] | https://github.com/alexandrebarachant/Grasp-and-lift-EEG-challenge/blob/36fe555d523c3ca3f201e765b1b1004dc5383dd2/preprocessing/erp.py#L22-L38 |
|
decalage2/olefile | 5ae06e937cd18afebfb49239e8f20b099605136f | olefile/olefile.py | python | get_logger | (name, level=logging.CRITICAL+1) | return logger | Create a suitable logger object for this module.
The goal is not to change settings of the root logger, to avoid getting
other modules' logs on the screen.
If a logger exists with same name, reuse it. (Else it would have duplicate
handlers and messages would be doubled.)
The level is set to CRITICAL+1 by default, to avoid any logging. | Create a suitable logger object for this module.
The goal is not to change settings of the root logger, to avoid getting
other modules' logs on the screen.
If a logger exists with same name, reuse it. (Else it would have duplicate
handlers and messages would be doubled.)
The level is set to CRITICAL+1 by default, to avoid any logging. | [
"Create",
"a",
"suitable",
"logger",
"object",
"for",
"this",
"module",
".",
"The",
"goal",
"is",
"not",
"to",
"change",
"settings",
"of",
"the",
"root",
"logger",
"to",
"avoid",
"getting",
"other",
"modules",
"logs",
"on",
"the",
"screen",
".",
"If",
"a",
"logger",
"exists",
"with",
"same",
"name",
"reuse",
"it",
".",
"(",
"Else",
"it",
"would",
"have",
"duplicate",
"handlers",
"and",
"messages",
"would",
"be",
"doubled",
".",
")",
"The",
"level",
"is",
"set",
"to",
"CRITICAL",
"+",
"1",
"by",
"default",
"to",
"avoid",
"any",
"logging",
"."
] | def get_logger(name, level=logging.CRITICAL+1):
"""
Create a suitable logger object for this module.
The goal is not to change settings of the root logger, to avoid getting
other modules' logs on the screen.
If a logger exists with same name, reuse it. (Else it would have duplicate
handlers and messages would be doubled.)
The level is set to CRITICAL+1 by default, to avoid any logging.
"""
# First, test if there is already a logger with the same name, else it
# will generate duplicate messages (due to duplicate handlers):
if name in logging.Logger.manager.loggerDict:
#NOTE: another less intrusive but more "hackish" solution would be to
# use getLogger then test if its effective level is not default.
logger = logging.getLogger(name)
# make sure level is OK:
logger.setLevel(level)
return logger
# get a new logger:
logger = logging.getLogger(name)
# only add a NullHandler for this logger, it is up to the application
# to configure its own logging:
logger.addHandler(logging.NullHandler())
logger.setLevel(level)
return logger | [
"def",
"get_logger",
"(",
"name",
",",
"level",
"=",
"logging",
".",
"CRITICAL",
"+",
"1",
")",
":",
"# First, test if there is already a logger with the same name, else it",
"# will generate duplicate messages (due to duplicate handlers):",
"if",
"name",
"in",
"logging",
".",
"Logger",
".",
"manager",
".",
"loggerDict",
":",
"#NOTE: another less intrusive but more \"hackish\" solution would be to",
"# use getLogger then test if its effective level is not default.",
"logger",
"=",
"logging",
".",
"getLogger",
"(",
"name",
")",
"# make sure level is OK:",
"logger",
".",
"setLevel",
"(",
"level",
")",
"return",
"logger",
"# get a new logger:",
"logger",
"=",
"logging",
".",
"getLogger",
"(",
"name",
")",
"# only add a NullHandler for this logger, it is up to the application",
"# to configure its own logging:",
"logger",
".",
"addHandler",
"(",
"logging",
".",
"NullHandler",
"(",
")",
")",
"logger",
".",
"setLevel",
"(",
"level",
")",
"return",
"logger"
] | https://github.com/decalage2/olefile/blob/5ae06e937cd18afebfb49239e8f20b099605136f/olefile/olefile.py#L164-L188 |
|
holzschu/Carnets | 44effb10ddfc6aa5c8b0687582a724ba82c6b547 | Library/lib/python3.7/site-packages/matplotlib-3.0.3-py3.7-macosx-10.9-x86_64.egg/matplotlib/axes/_axes.py | python | Axes.fill_between | (self, x, y1, y2=0, where=None, interpolate=False,
step=None, **kwargs) | return collection | Fill the area between two horizontal curves.
The curves are defined by the points (*x*, *y1*) and (*x*, *y2*). This
creates one or multiple polygons describing the filled area.
You may exclude some horizontal sections from filling using *where*.
By default, the edges connect the given points directly. Use *step* if
the filling should be a step function, i.e. constant in between *x*.
Parameters
----------
x : array (length N)
The x coordinates of the nodes defining the curves.
y1 : array (length N) or scalar
The y coordinates of the nodes defining the first curve.
y2 : array (length N) or scalar, optional, default: 0
The y coordinates of the nodes defining the second curve.
where : array of bool (length N), optional, default: None
Define *where* to exclude some horizontal regions from being
filled. The filled regions are defined by the coordinates
``x[where]``. More precisely, fill between ``x[i]`` and ``x[i+1]``
if ``where[i] and where[i+1]``. Note that this definition implies
that an isolated *True* value between two *False* values in
*where* will not result in filling. Both sides of the *True*
position remain unfilled due to the adjacent *False* values.
interpolate : bool, optional
This option is only relvant if *where* is used and the two curves
are crossing each other.
Semantically, *where* is often used for *y1* > *y2* or similar.
By default, the nodes of the polygon defining the filled region
will only be placed at the positions in the *x* array. Such a
polygon cannot describe the above semantics close to the
intersection. The x-sections containing the intersection are
simply clipped.
Setting *interpolate* to *True* will calculate the actual
intersection point and extend the filled region up to this point.
step : {'pre', 'post', 'mid'}, optional
Define *step* if the filling should be a step function,
i.e. constant in between *x*. The value determines where the
step will occur:
- 'pre': The y value is continued constantly to the left from
every *x* position, i.e. the interval ``(x[i-1], x[i]]`` has the
value ``y[i]``.
- 'post': The y value is continued constantly to the right from
every *x* position, i.e. the interval ``[x[i], x[i+1])`` has the
value ``y[i]``.
- 'mid': Steps occur half-way between the *x* positions.
Other Parameters
----------------
**kwargs
All other keyword arguments are passed on to `.PolyCollection`.
They control the `.Polygon` properties:
%(PolyCollection)s
Returns
-------
`.PolyCollection`
A `.PolyCollection` containing the plotted polygons.
See Also
--------
fill_betweenx : Fill between two sets of x-values.
Notes
-----
.. [notes section required to get data note injection right] | Fill the area between two horizontal curves. | [
"Fill",
"the",
"area",
"between",
"two",
"horizontal",
"curves",
"."
] | def fill_between(self, x, y1, y2=0, where=None, interpolate=False,
step=None, **kwargs):
"""
Fill the area between two horizontal curves.
The curves are defined by the points (*x*, *y1*) and (*x*, *y2*). This
creates one or multiple polygons describing the filled area.
You may exclude some horizontal sections from filling using *where*.
By default, the edges connect the given points directly. Use *step* if
the filling should be a step function, i.e. constant in between *x*.
Parameters
----------
x : array (length N)
The x coordinates of the nodes defining the curves.
y1 : array (length N) or scalar
The y coordinates of the nodes defining the first curve.
y2 : array (length N) or scalar, optional, default: 0
The y coordinates of the nodes defining the second curve.
where : array of bool (length N), optional, default: None
Define *where* to exclude some horizontal regions from being
filled. The filled regions are defined by the coordinates
``x[where]``. More precisely, fill between ``x[i]`` and ``x[i+1]``
if ``where[i] and where[i+1]``. Note that this definition implies
that an isolated *True* value between two *False* values in
*where* will not result in filling. Both sides of the *True*
position remain unfilled due to the adjacent *False* values.
interpolate : bool, optional
This option is only relvant if *where* is used and the two curves
are crossing each other.
Semantically, *where* is often used for *y1* > *y2* or similar.
By default, the nodes of the polygon defining the filled region
will only be placed at the positions in the *x* array. Such a
polygon cannot describe the above semantics close to the
intersection. The x-sections containing the intersection are
simply clipped.
Setting *interpolate* to *True* will calculate the actual
intersection point and extend the filled region up to this point.
step : {'pre', 'post', 'mid'}, optional
Define *step* if the filling should be a step function,
i.e. constant in between *x*. The value determines where the
step will occur:
- 'pre': The y value is continued constantly to the left from
every *x* position, i.e. the interval ``(x[i-1], x[i]]`` has the
value ``y[i]``.
- 'post': The y value is continued constantly to the right from
every *x* position, i.e. the interval ``[x[i], x[i+1])`` has the
value ``y[i]``.
- 'mid': Steps occur half-way between the *x* positions.
Other Parameters
----------------
**kwargs
All other keyword arguments are passed on to `.PolyCollection`.
They control the `.Polygon` properties:
%(PolyCollection)s
Returns
-------
`.PolyCollection`
A `.PolyCollection` containing the plotted polygons.
See Also
--------
fill_betweenx : Fill between two sets of x-values.
Notes
-----
.. [notes section required to get data note injection right]
"""
if not rcParams['_internal.classic_mode']:
kwargs = cbook.normalize_kwargs(
kwargs, mcoll.Collection._alias_map)
if not any(c in kwargs for c in ('color', 'facecolor')):
kwargs['facecolor'] = \
self._get_patches_for_fill.get_next_color()
# Handle united data, such as dates
self._process_unit_info(xdata=x, ydata=y1, kwargs=kwargs)
self._process_unit_info(ydata=y2)
# Convert the arrays so we can work with them
x = ma.masked_invalid(self.convert_xunits(x))
y1 = ma.masked_invalid(self.convert_yunits(y1))
y2 = ma.masked_invalid(self.convert_yunits(y2))
for name, array in [('x', x), ('y1', y1), ('y2', y2)]:
if array.ndim > 1:
raise ValueError('Input passed into argument "%r"' % name +
'is not 1-dimensional.')
if where is None:
where = True
where = where & ~functools.reduce(np.logical_or,
map(np.ma.getmask, [x, y1, y2]))
x, y1, y2 = np.broadcast_arrays(np.atleast_1d(x), y1, y2)
polys = []
for ind0, ind1 in cbook.contiguous_regions(where):
xslice = x[ind0:ind1]
y1slice = y1[ind0:ind1]
y2slice = y2[ind0:ind1]
if step is not None:
step_func = STEP_LOOKUP_MAP["steps-" + step]
xslice, y1slice, y2slice = step_func(xslice, y1slice, y2slice)
if not len(xslice):
continue
N = len(xslice)
X = np.zeros((2 * N + 2, 2), float)
if interpolate:
def get_interp_point(ind):
im1 = max(ind - 1, 0)
x_values = x[im1:ind + 1]
diff_values = y1[im1:ind + 1] - y2[im1:ind + 1]
y1_values = y1[im1:ind + 1]
if len(diff_values) == 2:
if np.ma.is_masked(diff_values[1]):
return x[im1], y1[im1]
elif np.ma.is_masked(diff_values[0]):
return x[ind], y1[ind]
diff_order = diff_values.argsort()
diff_root_x = np.interp(
0, diff_values[diff_order], x_values[diff_order])
x_order = x_values.argsort()
diff_root_y = np.interp(diff_root_x, x_values[x_order],
y1_values[x_order])
return diff_root_x, diff_root_y
start = get_interp_point(ind0)
end = get_interp_point(ind1)
else:
# the purpose of the next two lines is for when y2 is a
# scalar like 0 and we want the fill to go all the way
# down to 0 even if none of the y1 sample points do
start = xslice[0], y2slice[0]
end = xslice[-1], y2slice[-1]
X[0] = start
X[N + 1] = end
X[1:N + 1, 0] = xslice
X[1:N + 1, 1] = y1slice
X[N + 2:, 0] = xslice[::-1]
X[N + 2:, 1] = y2slice[::-1]
polys.append(X)
collection = mcoll.PolyCollection(polys, **kwargs)
# now update the datalim and autoscale
XY1 = np.array([x[where], y1[where]]).T
XY2 = np.array([x[where], y2[where]]).T
self.dataLim.update_from_data_xy(XY1, self.ignore_existing_data_limits,
updatex=True, updatey=True)
self.ignore_existing_data_limits = False
self.dataLim.update_from_data_xy(XY2, self.ignore_existing_data_limits,
updatex=False, updatey=True)
self.add_collection(collection, autolim=False)
self.autoscale_view()
return collection | [
"def",
"fill_between",
"(",
"self",
",",
"x",
",",
"y1",
",",
"y2",
"=",
"0",
",",
"where",
"=",
"None",
",",
"interpolate",
"=",
"False",
",",
"step",
"=",
"None",
",",
"*",
"*",
"kwargs",
")",
":",
"if",
"not",
"rcParams",
"[",
"'_internal.classic_mode'",
"]",
":",
"kwargs",
"=",
"cbook",
".",
"normalize_kwargs",
"(",
"kwargs",
",",
"mcoll",
".",
"Collection",
".",
"_alias_map",
")",
"if",
"not",
"any",
"(",
"c",
"in",
"kwargs",
"for",
"c",
"in",
"(",
"'color'",
",",
"'facecolor'",
")",
")",
":",
"kwargs",
"[",
"'facecolor'",
"]",
"=",
"self",
".",
"_get_patches_for_fill",
".",
"get_next_color",
"(",
")",
"# Handle united data, such as dates",
"self",
".",
"_process_unit_info",
"(",
"xdata",
"=",
"x",
",",
"ydata",
"=",
"y1",
",",
"kwargs",
"=",
"kwargs",
")",
"self",
".",
"_process_unit_info",
"(",
"ydata",
"=",
"y2",
")",
"# Convert the arrays so we can work with them",
"x",
"=",
"ma",
".",
"masked_invalid",
"(",
"self",
".",
"convert_xunits",
"(",
"x",
")",
")",
"y1",
"=",
"ma",
".",
"masked_invalid",
"(",
"self",
".",
"convert_yunits",
"(",
"y1",
")",
")",
"y2",
"=",
"ma",
".",
"masked_invalid",
"(",
"self",
".",
"convert_yunits",
"(",
"y2",
")",
")",
"for",
"name",
",",
"array",
"in",
"[",
"(",
"'x'",
",",
"x",
")",
",",
"(",
"'y1'",
",",
"y1",
")",
",",
"(",
"'y2'",
",",
"y2",
")",
"]",
":",
"if",
"array",
".",
"ndim",
">",
"1",
":",
"raise",
"ValueError",
"(",
"'Input passed into argument \"%r\"'",
"%",
"name",
"+",
"'is not 1-dimensional.'",
")",
"if",
"where",
"is",
"None",
":",
"where",
"=",
"True",
"where",
"=",
"where",
"&",
"~",
"functools",
".",
"reduce",
"(",
"np",
".",
"logical_or",
",",
"map",
"(",
"np",
".",
"ma",
".",
"getmask",
",",
"[",
"x",
",",
"y1",
",",
"y2",
"]",
")",
")",
"x",
",",
"y1",
",",
"y2",
"=",
"np",
".",
"broadcast_arrays",
"(",
"np",
".",
"atleast_1d",
"(",
"x",
")",
",",
"y1",
",",
"y2",
")",
"polys",
"=",
"[",
"]",
"for",
"ind0",
",",
"ind1",
"in",
"cbook",
".",
"contiguous_regions",
"(",
"where",
")",
":",
"xslice",
"=",
"x",
"[",
"ind0",
":",
"ind1",
"]",
"y1slice",
"=",
"y1",
"[",
"ind0",
":",
"ind1",
"]",
"y2slice",
"=",
"y2",
"[",
"ind0",
":",
"ind1",
"]",
"if",
"step",
"is",
"not",
"None",
":",
"step_func",
"=",
"STEP_LOOKUP_MAP",
"[",
"\"steps-\"",
"+",
"step",
"]",
"xslice",
",",
"y1slice",
",",
"y2slice",
"=",
"step_func",
"(",
"xslice",
",",
"y1slice",
",",
"y2slice",
")",
"if",
"not",
"len",
"(",
"xslice",
")",
":",
"continue",
"N",
"=",
"len",
"(",
"xslice",
")",
"X",
"=",
"np",
".",
"zeros",
"(",
"(",
"2",
"*",
"N",
"+",
"2",
",",
"2",
")",
",",
"float",
")",
"if",
"interpolate",
":",
"def",
"get_interp_point",
"(",
"ind",
")",
":",
"im1",
"=",
"max",
"(",
"ind",
"-",
"1",
",",
"0",
")",
"x_values",
"=",
"x",
"[",
"im1",
":",
"ind",
"+",
"1",
"]",
"diff_values",
"=",
"y1",
"[",
"im1",
":",
"ind",
"+",
"1",
"]",
"-",
"y2",
"[",
"im1",
":",
"ind",
"+",
"1",
"]",
"y1_values",
"=",
"y1",
"[",
"im1",
":",
"ind",
"+",
"1",
"]",
"if",
"len",
"(",
"diff_values",
")",
"==",
"2",
":",
"if",
"np",
".",
"ma",
".",
"is_masked",
"(",
"diff_values",
"[",
"1",
"]",
")",
":",
"return",
"x",
"[",
"im1",
"]",
",",
"y1",
"[",
"im1",
"]",
"elif",
"np",
".",
"ma",
".",
"is_masked",
"(",
"diff_values",
"[",
"0",
"]",
")",
":",
"return",
"x",
"[",
"ind",
"]",
",",
"y1",
"[",
"ind",
"]",
"diff_order",
"=",
"diff_values",
".",
"argsort",
"(",
")",
"diff_root_x",
"=",
"np",
".",
"interp",
"(",
"0",
",",
"diff_values",
"[",
"diff_order",
"]",
",",
"x_values",
"[",
"diff_order",
"]",
")",
"x_order",
"=",
"x_values",
".",
"argsort",
"(",
")",
"diff_root_y",
"=",
"np",
".",
"interp",
"(",
"diff_root_x",
",",
"x_values",
"[",
"x_order",
"]",
",",
"y1_values",
"[",
"x_order",
"]",
")",
"return",
"diff_root_x",
",",
"diff_root_y",
"start",
"=",
"get_interp_point",
"(",
"ind0",
")",
"end",
"=",
"get_interp_point",
"(",
"ind1",
")",
"else",
":",
"# the purpose of the next two lines is for when y2 is a",
"# scalar like 0 and we want the fill to go all the way",
"# down to 0 even if none of the y1 sample points do",
"start",
"=",
"xslice",
"[",
"0",
"]",
",",
"y2slice",
"[",
"0",
"]",
"end",
"=",
"xslice",
"[",
"-",
"1",
"]",
",",
"y2slice",
"[",
"-",
"1",
"]",
"X",
"[",
"0",
"]",
"=",
"start",
"X",
"[",
"N",
"+",
"1",
"]",
"=",
"end",
"X",
"[",
"1",
":",
"N",
"+",
"1",
",",
"0",
"]",
"=",
"xslice",
"X",
"[",
"1",
":",
"N",
"+",
"1",
",",
"1",
"]",
"=",
"y1slice",
"X",
"[",
"N",
"+",
"2",
":",
",",
"0",
"]",
"=",
"xslice",
"[",
":",
":",
"-",
"1",
"]",
"X",
"[",
"N",
"+",
"2",
":",
",",
"1",
"]",
"=",
"y2slice",
"[",
":",
":",
"-",
"1",
"]",
"polys",
".",
"append",
"(",
"X",
")",
"collection",
"=",
"mcoll",
".",
"PolyCollection",
"(",
"polys",
",",
"*",
"*",
"kwargs",
")",
"# now update the datalim and autoscale",
"XY1",
"=",
"np",
".",
"array",
"(",
"[",
"x",
"[",
"where",
"]",
",",
"y1",
"[",
"where",
"]",
"]",
")",
".",
"T",
"XY2",
"=",
"np",
".",
"array",
"(",
"[",
"x",
"[",
"where",
"]",
",",
"y2",
"[",
"where",
"]",
"]",
")",
".",
"T",
"self",
".",
"dataLim",
".",
"update_from_data_xy",
"(",
"XY1",
",",
"self",
".",
"ignore_existing_data_limits",
",",
"updatex",
"=",
"True",
",",
"updatey",
"=",
"True",
")",
"self",
".",
"ignore_existing_data_limits",
"=",
"False",
"self",
".",
"dataLim",
".",
"update_from_data_xy",
"(",
"XY2",
",",
"self",
".",
"ignore_existing_data_limits",
",",
"updatex",
"=",
"False",
",",
"updatey",
"=",
"True",
")",
"self",
".",
"add_collection",
"(",
"collection",
",",
"autolim",
"=",
"False",
")",
"self",
".",
"autoscale_view",
"(",
")",
"return",
"collection"
] | https://github.com/holzschu/Carnets/blob/44effb10ddfc6aa5c8b0687582a724ba82c6b547/Library/lib/python3.7/site-packages/matplotlib-3.0.3-py3.7-macosx-10.9-x86_64.egg/matplotlib/axes/_axes.py#L4952-L5130 |
|
mesalock-linux/mesapy | ed546d59a21b36feb93e2309d5c6b75aa0ad95c9 | lib-python/2.7/distutils/ccompiler.py | python | CCompiler.set_runtime_library_dirs | (self, dirs) | Set the list of directories to search for shared libraries at
runtime to 'dirs' (a list of strings). This does not affect any
standard search path that the runtime linker may search by
default. | Set the list of directories to search for shared libraries at
runtime to 'dirs' (a list of strings). This does not affect any
standard search path that the runtime linker may search by
default. | [
"Set",
"the",
"list",
"of",
"directories",
"to",
"search",
"for",
"shared",
"libraries",
"at",
"runtime",
"to",
"dirs",
"(",
"a",
"list",
"of",
"strings",
")",
".",
"This",
"does",
"not",
"affect",
"any",
"standard",
"search",
"path",
"that",
"the",
"runtime",
"linker",
"may",
"search",
"by",
"default",
"."
] | def set_runtime_library_dirs(self, dirs):
"""Set the list of directories to search for shared libraries at
runtime to 'dirs' (a list of strings). This does not affect any
standard search path that the runtime linker may search by
default.
"""
self.runtime_library_dirs = dirs[:] | [
"def",
"set_runtime_library_dirs",
"(",
"self",
",",
"dirs",
")",
":",
"self",
".",
"runtime_library_dirs",
"=",
"dirs",
"[",
":",
"]"
] | https://github.com/mesalock-linux/mesapy/blob/ed546d59a21b36feb93e2309d5c6b75aa0ad95c9/lib-python/2.7/distutils/ccompiler.py#L293-L299 |
||
TengXiaoDai/DistributedCrawling | f5c2439e6ce68dd9b49bde084d76473ff9ed4963 | Lib/site-packages/pkg_resources/__init__.py | python | _is_unpacked_egg | (path) | return (
path.lower().endswith('.egg')
) | Determine if given path appears to be an unpacked egg. | Determine if given path appears to be an unpacked egg. | [
"Determine",
"if",
"given",
"path",
"appears",
"to",
"be",
"an",
"unpacked",
"egg",
"."
] | def _is_unpacked_egg(path):
"""
Determine if given path appears to be an unpacked egg.
"""
return (
path.lower().endswith('.egg')
) | [
"def",
"_is_unpacked_egg",
"(",
"path",
")",
":",
"return",
"(",
"path",
".",
"lower",
"(",
")",
".",
"endswith",
"(",
"'.egg'",
")",
")"
] | https://github.com/TengXiaoDai/DistributedCrawling/blob/f5c2439e6ce68dd9b49bde084d76473ff9ed4963/Lib/site-packages/pkg_resources/__init__.py#L2220-L2226 |
|
pymedusa/Medusa | 1405fbb6eb8ef4d20fcca24c32ddca52b11f0f38 | ext3/bs4/__init__.py | python | BeautifulSoup.reset | (self) | Reset this object to a state as though it had never parsed any
markup. | Reset this object to a state as though it had never parsed any
markup. | [
"Reset",
"this",
"object",
"to",
"a",
"state",
"as",
"though",
"it",
"had",
"never",
"parsed",
"any",
"markup",
"."
] | def reset(self):
"""Reset this object to a state as though it had never parsed any
markup.
"""
Tag.__init__(self, self, self.builder, self.ROOT_TAG_NAME)
self.hidden = 1
self.builder.reset()
self.current_data = []
self.currentTag = None
self.tagStack = []
self.open_tag_counter = Counter()
self.preserve_whitespace_tag_stack = []
self.string_container_stack = []
self.pushTag(self) | [
"def",
"reset",
"(",
"self",
")",
":",
"Tag",
".",
"__init__",
"(",
"self",
",",
"self",
",",
"self",
".",
"builder",
",",
"self",
".",
"ROOT_TAG_NAME",
")",
"self",
".",
"hidden",
"=",
"1",
"self",
".",
"builder",
".",
"reset",
"(",
")",
"self",
".",
"current_data",
"=",
"[",
"]",
"self",
".",
"currentTag",
"=",
"None",
"self",
".",
"tagStack",
"=",
"[",
"]",
"self",
".",
"open_tag_counter",
"=",
"Counter",
"(",
")",
"self",
".",
"preserve_whitespace_tag_stack",
"=",
"[",
"]",
"self",
".",
"string_container_stack",
"=",
"[",
"]",
"self",
".",
"pushTag",
"(",
"self",
")"
] | https://github.com/pymedusa/Medusa/blob/1405fbb6eb8ef4d20fcca24c32ddca52b11f0f38/ext3/bs4/__init__.py#L440-L453 |
||
deepgully/me | f7ad65edc2fe435310c6676bc2e322cfe5d4c8f0 | libs/sqlalchemy/events.py | python | PoolEvents.checkin | (self, dbapi_connection, connection_record) | Called when a connection returns to the pool.
Note that the connection may be closed, and may be None if the
connection has been invalidated. ``checkin`` will not be called
for detached connections. (They do not return to the pool.)
:param dbapi_connection: a DBAPI connection.
:param connection_record: the :class:`._ConnectionRecord` managing the
DBAPI connection. | Called when a connection returns to the pool. | [
"Called",
"when",
"a",
"connection",
"returns",
"to",
"the",
"pool",
"."
] | def checkin(self, dbapi_connection, connection_record):
"""Called when a connection returns to the pool.
Note that the connection may be closed, and may be None if the
connection has been invalidated. ``checkin`` will not be called
for detached connections. (They do not return to the pool.)
:param dbapi_connection: a DBAPI connection.
:param connection_record: the :class:`._ConnectionRecord` managing the
DBAPI connection.
""" | [
"def",
"checkin",
"(",
"self",
",",
"dbapi_connection",
",",
"connection_record",
")",
":"
] | https://github.com/deepgully/me/blob/f7ad65edc2fe435310c6676bc2e322cfe5d4c8f0/libs/sqlalchemy/events.py#L326-L338 |
||
buke/GreenOdoo | 3d8c55d426fb41fdb3f2f5a1533cfe05983ba1df | runtime/python/lib/python2.7/site-packages/psutil-0.6.1-py2.7-linux-x86_64.egg/psutil/__init__.py | python | swap_memory | () | return _psplatform.swap_memory() | Return system swap memory statistics as a namedtuple including
the following attributes:
- total: total swap memory in bytes
- used: used swap memory in bytes
- free: free swap memory in bytes
- percent: the percentage usage
- sin: no. of bytes the system has swapped in from disk (cumulative)
- sout: no. of bytes the system has swapped out from disk (cumulative)
'sin' and 'sout' on Windows are meaningless and always set to 0. | Return system swap memory statistics as a namedtuple including
the following attributes: | [
"Return",
"system",
"swap",
"memory",
"statistics",
"as",
"a",
"namedtuple",
"including",
"the",
"following",
"attributes",
":"
] | def swap_memory():
"""Return system swap memory statistics as a namedtuple including
the following attributes:
- total: total swap memory in bytes
- used: used swap memory in bytes
- free: free swap memory in bytes
- percent: the percentage usage
- sin: no. of bytes the system has swapped in from disk (cumulative)
- sout: no. of bytes the system has swapped out from disk (cumulative)
'sin' and 'sout' on Windows are meaningless and always set to 0.
"""
return _psplatform.swap_memory() | [
"def",
"swap_memory",
"(",
")",
":",
"return",
"_psplatform",
".",
"swap_memory",
"(",
")"
] | https://github.com/buke/GreenOdoo/blob/3d8c55d426fb41fdb3f2f5a1533cfe05983ba1df/runtime/python/lib/python2.7/site-packages/psutil-0.6.1-py2.7-linux-x86_64.egg/psutil/__init__.py#L987-L1000 |
|
Azure/azure-devops-cli-extension | 11334cd55806bef0b99c3bee5a438eed71e44037 | azure-devops/azext_devops/devops_sdk/v5_1/build/build_client.py | python | BuildClient.get_project_resources | (self, project, type=None, id=None) | return self._deserialize('[DefinitionResourceReference]', self._unwrap_collection(response)) | GetProjectResources.
[Preview API]
:param str project: Project ID or project name
:param str type:
:param str id:
:rtype: [DefinitionResourceReference] | GetProjectResources.
[Preview API]
:param str project: Project ID or project name
:param str type:
:param str id:
:rtype: [DefinitionResourceReference] | [
"GetProjectResources",
".",
"[",
"Preview",
"API",
"]",
":",
"param",
"str",
"project",
":",
"Project",
"ID",
"or",
"project",
"name",
":",
"param",
"str",
"type",
":",
":",
"param",
"str",
"id",
":",
":",
"rtype",
":",
"[",
"DefinitionResourceReference",
"]"
] | def get_project_resources(self, project, type=None, id=None):
"""GetProjectResources.
[Preview API]
:param str project: Project ID or project name
:param str type:
:param str id:
:rtype: [DefinitionResourceReference]
"""
route_values = {}
if project is not None:
route_values['project'] = self._serialize.url('project', project, 'str')
query_parameters = {}
if type is not None:
query_parameters['type'] = self._serialize.query('type', type, 'str')
if id is not None:
query_parameters['id'] = self._serialize.query('id', id, 'str')
response = self._send(http_method='GET',
location_id='398c85bc-81aa-4822-947c-a194a05f0fef',
version='5.1-preview.1',
route_values=route_values,
query_parameters=query_parameters)
return self._deserialize('[DefinitionResourceReference]', self._unwrap_collection(response)) | [
"def",
"get_project_resources",
"(",
"self",
",",
"project",
",",
"type",
"=",
"None",
",",
"id",
"=",
"None",
")",
":",
"route_values",
"=",
"{",
"}",
"if",
"project",
"is",
"not",
"None",
":",
"route_values",
"[",
"'project'",
"]",
"=",
"self",
".",
"_serialize",
".",
"url",
"(",
"'project'",
",",
"project",
",",
"'str'",
")",
"query_parameters",
"=",
"{",
"}",
"if",
"type",
"is",
"not",
"None",
":",
"query_parameters",
"[",
"'type'",
"]",
"=",
"self",
".",
"_serialize",
".",
"query",
"(",
"'type'",
",",
"type",
",",
"'str'",
")",
"if",
"id",
"is",
"not",
"None",
":",
"query_parameters",
"[",
"'id'",
"]",
"=",
"self",
".",
"_serialize",
".",
"query",
"(",
"'id'",
",",
"id",
",",
"'str'",
")",
"response",
"=",
"self",
".",
"_send",
"(",
"http_method",
"=",
"'GET'",
",",
"location_id",
"=",
"'398c85bc-81aa-4822-947c-a194a05f0fef'",
",",
"version",
"=",
"'5.1-preview.1'",
",",
"route_values",
"=",
"route_values",
",",
"query_parameters",
"=",
"query_parameters",
")",
"return",
"self",
".",
"_deserialize",
"(",
"'[DefinitionResourceReference]'",
",",
"self",
".",
"_unwrap_collection",
"(",
"response",
")",
")"
] | https://github.com/Azure/azure-devops-cli-extension/blob/11334cd55806bef0b99c3bee5a438eed71e44037/azure-devops/azext_devops/devops_sdk/v5_1/build/build_client.py#L226-L247 |
|
gramps-project/gramps | 04d4651a43eb210192f40a9f8c2bad8ee8fa3753 | gramps/gui/widgets/fanchart.py | python | FanChartWidget.set_values | (self, root_person_handle, maxgen, background, childring,
flipupsidedownname, twolinename, radialtext, fontdescr,
grad_start, grad_end, filtr, alpha_filter, form, showid) | Reset the values to be used:
:param root_person_handle: person to show
:param maxgen: maximum generations to show
:param background: config setting of which background procedure to use
:type background: int
:param childring: to show the center ring with children or not
:param twolinename: uses two lines for the display of person's name
:param flipupsidedownname: flip name on the left of the fanchart
for the display of person's name
:param radialtext: try to use radial text or not
:param fontdescr: string describing the font to use
:param grad_start: colors to use for background procedure
:param grad_end: colors to use for background procedure
:param filtr: the person filter to apply to the people in the chart
:param alpha: the alpha transparency value (0-1) to apply to filtered
out data
:param form: the ``FORM_`` constant for the fanchart
:param showid: to show the gramps_id or not | Reset the values to be used: | [
"Reset",
"the",
"values",
"to",
"be",
"used",
":"
] | def set_values(self, root_person_handle, maxgen, background, childring,
flipupsidedownname, twolinename, radialtext, fontdescr,
grad_start, grad_end, filtr, alpha_filter, form, showid):
"""
Reset the values to be used:
:param root_person_handle: person to show
:param maxgen: maximum generations to show
:param background: config setting of which background procedure to use
:type background: int
:param childring: to show the center ring with children or not
:param twolinename: uses two lines for the display of person's name
:param flipupsidedownname: flip name on the left of the fanchart
for the display of person's name
:param radialtext: try to use radial text or not
:param fontdescr: string describing the font to use
:param grad_start: colors to use for background procedure
:param grad_end: colors to use for background procedure
:param filtr: the person filter to apply to the people in the chart
:param alpha: the alpha transparency value (0-1) to apply to filtered
out data
:param form: the ``FORM_`` constant for the fanchart
:param showid: to show the gramps_id or not
"""
self.rootpersonh = root_person_handle
self.generations = maxgen
self.radialtext = radialtext
self.childring = childring
self.twolinename = twolinename
self.flipupsidedownname = flipupsidedownname
self.background = background
self.fontdescr = fontdescr
self.grad_start = grad_start
self.grad_end = grad_end
self.filter = filtr
self.alpha_filter = alpha_filter
self.form = form
self.showid = showid | [
"def",
"set_values",
"(",
"self",
",",
"root_person_handle",
",",
"maxgen",
",",
"background",
",",
"childring",
",",
"flipupsidedownname",
",",
"twolinename",
",",
"radialtext",
",",
"fontdescr",
",",
"grad_start",
",",
"grad_end",
",",
"filtr",
",",
"alpha_filter",
",",
"form",
",",
"showid",
")",
":",
"self",
".",
"rootpersonh",
"=",
"root_person_handle",
"self",
".",
"generations",
"=",
"maxgen",
"self",
".",
"radialtext",
"=",
"radialtext",
"self",
".",
"childring",
"=",
"childring",
"self",
".",
"twolinename",
"=",
"twolinename",
"self",
".",
"flipupsidedownname",
"=",
"flipupsidedownname",
"self",
".",
"background",
"=",
"background",
"self",
".",
"fontdescr",
"=",
"fontdescr",
"self",
".",
"grad_start",
"=",
"grad_start",
"self",
".",
"grad_end",
"=",
"grad_end",
"self",
".",
"filter",
"=",
"filtr",
"self",
".",
"alpha_filter",
"=",
"alpha_filter",
"self",
".",
"form",
"=",
"form",
"self",
".",
"showid",
"=",
"showid"
] | https://github.com/gramps-project/gramps/blob/04d4651a43eb210192f40a9f8c2bad8ee8fa3753/gramps/gui/widgets/fanchart.py#L1253-L1290 |
||
CouchPotato/CouchPotatoServer | 7260c12f72447ddb6f062367c6dfbda03ecd4e9c | libs/pkg_resources.py | python | IMetadataProvider.metadata_isdir | (name) | Is the named metadata a directory? (like ``os.path.isdir()``) | Is the named metadata a directory? (like ``os.path.isdir()``) | [
"Is",
"the",
"named",
"metadata",
"a",
"directory?",
"(",
"like",
"os",
".",
"path",
".",
"isdir",
"()",
")"
] | def metadata_isdir(name):
"""Is the named metadata a directory? (like ``os.path.isdir()``)""" | [
"def",
"metadata_isdir",
"(",
"name",
")",
":"
] | https://github.com/CouchPotato/CouchPotatoServer/blob/7260c12f72447ddb6f062367c6dfbda03ecd4e9c/libs/pkg_resources.py#L343-L344 |
||
prkumar/uplink | 3472806f68a60a93f7cb555d36365551a5411cc5 | uplink/clients/io/state.py | python | BeforeRequest.execute | (self, execution) | return execution.before_request(self._request) | [] | def execute(self, execution):
return execution.before_request(self._request) | [
"def",
"execute",
"(",
"self",
",",
"execution",
")",
":",
"return",
"execution",
".",
"before_request",
"(",
"self",
".",
"_request",
")"
] | https://github.com/prkumar/uplink/blob/3472806f68a60a93f7cb555d36365551a5411cc5/uplink/clients/io/state.py#L35-L36 |
|||
ifwe/digsby | f5fe00244744aa131e07f09348d10563f3d8fa99 | digsby/src/util/asynchttp.py | python | AsyncHTTPConnection.handle_error | (self, why=None) | Overload asyncore's exception handling | Overload asyncore's exception handling | [
"Overload",
"asyncore",
"s",
"exception",
"handling"
] | def handle_error(self, why=None):
"""
Overload asyncore's exception handling
"""
self.__set_state(_STATE_IDLE)
common.socket.handle_error(self, why) | [
"def",
"handle_error",
"(",
"self",
",",
"why",
"=",
"None",
")",
":",
"self",
".",
"__set_state",
"(",
"_STATE_IDLE",
")",
"common",
".",
"socket",
".",
"handle_error",
"(",
"self",
",",
"why",
")"
] | https://github.com/ifwe/digsby/blob/f5fe00244744aa131e07f09348d10563f3d8fa99/digsby/src/util/asynchttp.py#L468-L473 |
||
spesmilo/electrum | bdbd59300fbd35b01605e66145458e5f396108e8 | electrum/lnchannel.py | python | AbstractChannel.short_id_for_GUI | (self) | return format_short_channel_id(self.short_channel_id) | [] | def short_id_for_GUI(self) -> str:
return format_short_channel_id(self.short_channel_id) | [
"def",
"short_id_for_GUI",
"(",
"self",
")",
"->",
"str",
":",
"return",
"format_short_channel_id",
"(",
"self",
".",
"short_channel_id",
")"
] | https://github.com/spesmilo/electrum/blob/bdbd59300fbd35b01605e66145458e5f396108e8/electrum/lnchannel.py#L172-L173 |
|||
akfamily/akshare | 590e50eece9ec067da3538c7059fd660b71f1339 | akshare/movie/movie_yien.py | python | movie_boxoffice_daily | (date: str = "20201018") | return temp_df | 电影票房-单日票房
https://www.endata.com.cn/BoxOffice/BO/Day/index.html
:param date: 只能设置当前日期的前一天的票房数据
:type date: str
:return: 每日票房
:rtype: pandas.DataFrame | 电影票房-单日票房
https://www.endata.com.cn/BoxOffice/BO/Day/index.html
:param date: 只能设置当前日期的前一天的票房数据
:type date: str
:return: 每日票房
:rtype: pandas.DataFrame | [
"电影票房",
"-",
"单日票房",
"https",
":",
"//",
"www",
".",
"endata",
".",
"com",
".",
"cn",
"/",
"BoxOffice",
"/",
"BO",
"/",
"Day",
"/",
"index",
".",
"html",
":",
"param",
"date",
":",
"只能设置当前日期的前一天的票房数据",
":",
"type",
"date",
":",
"str",
":",
"return",
":",
"每日票房",
":",
"rtype",
":",
"pandas",
".",
"DataFrame"
] | def movie_boxoffice_daily(date: str = "20201018") -> pd.DataFrame:
"""
电影票房-单日票房
https://www.endata.com.cn/BoxOffice/BO/Day/index.html
:param date: 只能设置当前日期的前一天的票房数据
:type date: str
:return: 每日票房
:rtype: pandas.DataFrame
"""
last_date = datetime.datetime.strptime(date, "%Y%m%d") - datetime.timedelta(days=1)
last_date = last_date.strftime("%Y%m%d")
url = "https://www.endata.com.cn/API/GetData.ashx"
payload = {
"sdate": f"{date[:4]}-{date[4:6]}-{date[6:]}",
"edate": f"{last_date[:4]}-{last_date[4:6]}-{last_date[6:]}",
"MethodName": "BoxOffice_GetDayBoxOffice",
}
r = requests.post(url, data=payload)
r.encoding = "utf8"
data_json = json.loads(decrypt(r.text))
temp_df = pd.DataFrame(data_json["Data"]["Table"])
temp_df.columns = [
"排序",
"_",
"影片名称",
"_",
"累计票房",
"平均票价",
"上映天数",
"场均人次",
"_",
"_",
"_",
"_",
"_",
"单日票房",
"环比变化",
"_",
"口碑指数",
]
temp_df = temp_df[
["排序", "影片名称", "单日票房", "环比变化", "累计票房", "平均票价", "场均人次", "口碑指数", "上映天数"]
]
return temp_df | [
"def",
"movie_boxoffice_daily",
"(",
"date",
":",
"str",
"=",
"\"20201018\"",
")",
"->",
"pd",
".",
"DataFrame",
":",
"last_date",
"=",
"datetime",
".",
"datetime",
".",
"strptime",
"(",
"date",
",",
"\"%Y%m%d\"",
")",
"-",
"datetime",
".",
"timedelta",
"(",
"days",
"=",
"1",
")",
"last_date",
"=",
"last_date",
".",
"strftime",
"(",
"\"%Y%m%d\"",
")",
"url",
"=",
"\"https://www.endata.com.cn/API/GetData.ashx\"",
"payload",
"=",
"{",
"\"sdate\"",
":",
"f\"{date[:4]}-{date[4:6]}-{date[6:]}\"",
",",
"\"edate\"",
":",
"f\"{last_date[:4]}-{last_date[4:6]}-{last_date[6:]}\"",
",",
"\"MethodName\"",
":",
"\"BoxOffice_GetDayBoxOffice\"",
",",
"}",
"r",
"=",
"requests",
".",
"post",
"(",
"url",
",",
"data",
"=",
"payload",
")",
"r",
".",
"encoding",
"=",
"\"utf8\"",
"data_json",
"=",
"json",
".",
"loads",
"(",
"decrypt",
"(",
"r",
".",
"text",
")",
")",
"temp_df",
"=",
"pd",
".",
"DataFrame",
"(",
"data_json",
"[",
"\"Data\"",
"]",
"[",
"\"Table\"",
"]",
")",
"temp_df",
".",
"columns",
"=",
"[",
"\"排序\",",
"",
"\"_\"",
",",
"\"影片名称\",",
"",
"\"_\"",
",",
"\"累计票房\",",
"",
"\"平均票价\",",
"",
"\"上映天数\",",
"",
"\"场均人次\",",
"",
"\"_\"",
",",
"\"_\"",
",",
"\"_\"",
",",
"\"_\"",
",",
"\"_\"",
",",
"\"单日票房\",",
"",
"\"环比变化\",",
"",
"\"_\"",
",",
"\"口碑指数\",",
"",
"]",
"temp_df",
"=",
"temp_df",
"[",
"[",
"\"排序\", \"影",
"片",
"称\", \"单日票房\", \"环",
"比",
"化\", \"累计票房\", \"平",
"均",
"价\", \"场均人次\", \"口",
"碑",
"数\", \"上映天数\"]",
"",
"",
"",
"",
"",
"",
"",
"",
"",
"]",
"return",
"temp_df"
] | https://github.com/akfamily/akshare/blob/590e50eece9ec067da3538c7059fd660b71f1339/akshare/movie/movie_yien.py#L101-L144 |
|
rhinstaller/anaconda | 63edc8680f1b05cbfe11bef28703acba808c5174 | pyanaconda/core/util.py | python | detect_virtualized_platform | () | return platform | Detect execution in a virtualized environment.
This runs systemd-detect-virt and, if the result is not 'none',
it returns an id of the detected virtualization technology.
Otherwise, it returns None.
:return: a virtualization technology identifier or None | Detect execution in a virtualized environment. | [
"Detect",
"execution",
"in",
"a",
"virtualized",
"environment",
"."
] | def detect_virtualized_platform():
"""Detect execution in a virtualized environment.
This runs systemd-detect-virt and, if the result is not 'none',
it returns an id of the detected virtualization technology.
Otherwise, it returns None.
:return: a virtualization technology identifier or None
"""
try:
platform = execWithCapture("systemd-detect-virt", []).strip()
except (OSError, AttributeError):
return None
if platform == "none":
return None
return platform | [
"def",
"detect_virtualized_platform",
"(",
")",
":",
"try",
":",
"platform",
"=",
"execWithCapture",
"(",
"\"systemd-detect-virt\"",
",",
"[",
"]",
")",
".",
"strip",
"(",
")",
"except",
"(",
"OSError",
",",
"AttributeError",
")",
":",
"return",
"None",
"if",
"platform",
"==",
"\"none\"",
":",
"return",
"None",
"return",
"platform"
] | https://github.com/rhinstaller/anaconda/blob/63edc8680f1b05cbfe11bef28703acba808c5174/pyanaconda/core/util.py#L678-L696 |
|
makerbot/ReplicatorG | d6f2b07785a5a5f1e172fb87cb4303b17c575d5d | skein_engines/skeinforge-47/fabmetheus_utilities/geometry/geometry_tools/dictionary.py | python | Dictionary.getType | (self) | return self.__class__.__name__ | Get type. | Get type. | [
"Get",
"type",
"."
] | def getType(self):
'Get type.'
return self.__class__.__name__ | [
"def",
"getType",
"(",
"self",
")",
":",
"return",
"self",
".",
"__class__",
".",
"__name__"
] | https://github.com/makerbot/ReplicatorG/blob/d6f2b07785a5a5f1e172fb87cb4303b17c575d5d/skein_engines/skeinforge-47/fabmetheus_utilities/geometry/geometry_tools/dictionary.py#L154-L156 |
|
tensorflow/compression | 369d398be937983b3abb7c5445400a6f5d55ffc9 | tensorflow_compression/python/ops/math_ops.py | python | upper_bound | (inputs, bound, gradient="identity_if_towards",
name="upper_bound") | Same as `tf.minimum`, but with helpful gradient for `inputs > bound`.
This function behaves just like `tf.minimum`, but the behavior of the gradient
with respect to `inputs` for input values that hit the bound depends on
`gradient`:
If set to `'disconnected'`, the returned gradient is zero for values that hit
the bound. This is identical to the behavior of `tf.minimum`.
If set to `'identity'`, the gradient is unconditionally replaced with the
identity function (i.e., pretending this function does not exist).
If set to `'identity_if_towards'`, the gradient is replaced with the identity
function, but only if applying gradient descent would push the values of
`inputs` towards the bound. For gradient values that push away from the bound,
the returned gradient is still zero.
Note: In the latter two cases, no gradient is returned for `bound`.
Also, the implementation of `gradient == 'identity_if_towards'` currently
assumes that the shape of `inputs` is the same as the shape of the output. It
won't work reliably for all possible broadcasting scenarios.
Args:
inputs: Input tensor.
bound: Upper bound for the input tensor.
gradient: 'disconnected', 'identity', or 'identity_if_towards' (default).
name: Name for this op.
Returns:
`tf.minimum(inputs, bound)`
Raises:
ValueError: for invalid value of `gradient`. | Same as `tf.minimum`, but with helpful gradient for `inputs > bound`. | [
"Same",
"as",
"tf",
".",
"minimum",
"but",
"with",
"helpful",
"gradient",
"for",
"inputs",
">",
"bound",
"."
] | def upper_bound(inputs, bound, gradient="identity_if_towards",
name="upper_bound"):
"""Same as `tf.minimum`, but with helpful gradient for `inputs > bound`.
This function behaves just like `tf.minimum`, but the behavior of the gradient
with respect to `inputs` for input values that hit the bound depends on
`gradient`:
If set to `'disconnected'`, the returned gradient is zero for values that hit
the bound. This is identical to the behavior of `tf.minimum`.
If set to `'identity'`, the gradient is unconditionally replaced with the
identity function (i.e., pretending this function does not exist).
If set to `'identity_if_towards'`, the gradient is replaced with the identity
function, but only if applying gradient descent would push the values of
`inputs` towards the bound. For gradient values that push away from the bound,
the returned gradient is still zero.
Note: In the latter two cases, no gradient is returned for `bound`.
Also, the implementation of `gradient == 'identity_if_towards'` currently
assumes that the shape of `inputs` is the same as the shape of the output. It
won't work reliably for all possible broadcasting scenarios.
Args:
inputs: Input tensor.
bound: Upper bound for the input tensor.
gradient: 'disconnected', 'identity', or 'identity_if_towards' (default).
name: Name for this op.
Returns:
`tf.minimum(inputs, bound)`
Raises:
ValueError: for invalid value of `gradient`.
"""
with tf.name_scope(name) as scope:
inputs = tf.convert_to_tensor(inputs, name="inputs")
bound = tf.convert_to_tensor(bound, name="bound", dtype=inputs.dtype)
def identity_if_towards_grad(grad):
"""Gradient if gradient == 'identity_if_towards'."""
pass_through_if = tf.logical_or(inputs <= bound, grad > 0)
return (tf.cast(pass_through_if, grad.dtype) * grad, None)
def disconnected_grad(grad):
"""Gradient if gradient == 'disconnected'."""
return (tf.cast(inputs <= bound, grad.dtype) * grad, None)
try:
gradient = {
"identity_if_towards": identity_if_towards_grad,
"identity": lambda grad: (grad, None),
"disconnected": disconnected_grad,
}[gradient]
except KeyError:
raise ValueError("Invalid value for `gradient`: '{}'.".format(gradient))
@tf.custom_gradient
def _upper_bound(inputs, bound):
return tf.minimum(inputs, bound, name=scope), gradient
return _upper_bound(inputs, bound) | [
"def",
"upper_bound",
"(",
"inputs",
",",
"bound",
",",
"gradient",
"=",
"\"identity_if_towards\"",
",",
"name",
"=",
"\"upper_bound\"",
")",
":",
"with",
"tf",
".",
"name_scope",
"(",
"name",
")",
"as",
"scope",
":",
"inputs",
"=",
"tf",
".",
"convert_to_tensor",
"(",
"inputs",
",",
"name",
"=",
"\"inputs\"",
")",
"bound",
"=",
"tf",
".",
"convert_to_tensor",
"(",
"bound",
",",
"name",
"=",
"\"bound\"",
",",
"dtype",
"=",
"inputs",
".",
"dtype",
")",
"def",
"identity_if_towards_grad",
"(",
"grad",
")",
":",
"\"\"\"Gradient if gradient == 'identity_if_towards'.\"\"\"",
"pass_through_if",
"=",
"tf",
".",
"logical_or",
"(",
"inputs",
"<=",
"bound",
",",
"grad",
">",
"0",
")",
"return",
"(",
"tf",
".",
"cast",
"(",
"pass_through_if",
",",
"grad",
".",
"dtype",
")",
"*",
"grad",
",",
"None",
")",
"def",
"disconnected_grad",
"(",
"grad",
")",
":",
"\"\"\"Gradient if gradient == 'disconnected'.\"\"\"",
"return",
"(",
"tf",
".",
"cast",
"(",
"inputs",
"<=",
"bound",
",",
"grad",
".",
"dtype",
")",
"*",
"grad",
",",
"None",
")",
"try",
":",
"gradient",
"=",
"{",
"\"identity_if_towards\"",
":",
"identity_if_towards_grad",
",",
"\"identity\"",
":",
"lambda",
"grad",
":",
"(",
"grad",
",",
"None",
")",
",",
"\"disconnected\"",
":",
"disconnected_grad",
",",
"}",
"[",
"gradient",
"]",
"except",
"KeyError",
":",
"raise",
"ValueError",
"(",
"\"Invalid value for `gradient`: '{}'.\"",
".",
"format",
"(",
"gradient",
")",
")",
"@",
"tf",
".",
"custom_gradient",
"def",
"_upper_bound",
"(",
"inputs",
",",
"bound",
")",
":",
"return",
"tf",
".",
"minimum",
"(",
"inputs",
",",
"bound",
",",
"name",
"=",
"scope",
")",
",",
"gradient",
"return",
"_upper_bound",
"(",
"inputs",
",",
"bound",
")"
] | https://github.com/tensorflow/compression/blob/369d398be937983b3abb7c5445400a6f5d55ffc9/tensorflow_compression/python/ops/math_ops.py#L27-L89 |