nwo
stringlengths 5
91
| sha
stringlengths 40
40
| path
stringlengths 5
174
| language
stringclasses 1
value | identifier
stringlengths 1
120
| parameters
stringlengths 0
3.15k
| argument_list
stringclasses 1
value | return_statement
stringlengths 0
24.1k
| docstring
stringlengths 0
27.3k
| docstring_summary
stringlengths 0
13.8k
| docstring_tokens
sequence | function
stringlengths 22
139k
| function_tokens
sequence | url
stringlengths 87
283
|
---|---|---|---|---|---|---|---|---|---|---|---|---|---|
wxWidgets/Phoenix | b2199e299a6ca6d866aa6f3d0888499136ead9d6 | wx/lib/agw/aui/framemanager.py | python | AuiFloatingFrame.OnMove | (self, event) | Handles the ``wx.EVT_MOVE`` event for :class:`AuiFloatingFrame`.
:param `event`: a :class:`MoveEvent` to be processed.
.. note::
This event is not processed on wxMAC or if :class:`AuiManager` is not using the
``AUI_MGR_USE_NATIVE_MINIFRAMES`` style. | Handles the ``wx.EVT_MOVE`` event for :class:`AuiFloatingFrame`. | [
"Handles",
"the",
"wx",
".",
"EVT_MOVE",
"event",
"for",
":",
"class",
":",
"AuiFloatingFrame",
"."
] | def OnMove(self, event):
"""
Handles the ``wx.EVT_MOVE`` event for :class:`AuiFloatingFrame`.
:param `event`: a :class:`MoveEvent` to be processed.
.. note::
This event is not processed on wxMAC or if :class:`AuiManager` is not using the
``AUI_MGR_USE_NATIVE_MINIFRAMES`` style.
"""
if self._owner_mgr:
self._owner_mgr.OnFloatingPaneMoved(self._pane_window, event) | [
"def",
"OnMove",
"(",
"self",
",",
"event",
")",
":",
"if",
"self",
".",
"_owner_mgr",
":",
"self",
".",
"_owner_mgr",
".",
"OnFloatingPaneMoved",
"(",
"self",
".",
"_pane_window",
",",
"event",
")"
] | https://github.com/wxWidgets/Phoenix/blob/b2199e299a6ca6d866aa6f3d0888499136ead9d6/wx/lib/agw/aui/framemanager.py#L2975-L2989 |
||
ewels/MultiQC | 9b953261d3d684c24eef1827a5ce6718c847a5af | multiqc/modules/hicup/hicup.py | python | MultiqcModule.hicup_dedup_chart | (self) | return bargraph.plot(self.hicup_data, keys, config) | Generate the HiCUP Deduplication plot | Generate the HiCUP Deduplication plot | [
"Generate",
"the",
"HiCUP",
"Deduplication",
"plot"
] | def hicup_dedup_chart(self):
"""Generate the HiCUP Deduplication plot"""
# Specify the order of the different possible categories
keys = OrderedDict()
keys["Deduplication_Cis_Close_Uniques"] = {"color": "#2f7ed8", "name": "Unique: cis < 10Kbp"}
keys["Deduplication_Cis_Far_Uniques"] = {"color": "#0d233a", "name": "Unique: cis > 10Kbp"}
keys["Deduplication_Trans_Uniques"] = {"color": "#492970", "name": "Unique: trans"}
keys["Duplicate_Read_Pairs"] = {"color": "#f28f43", "name": "Duplicate read pairs"}
# Config for the plot
config = {
"id": "hicup_dedup_plot",
"title": "HiCUP: De-Duplication Statistics",
"ylab": "# Di-Tags",
"cpswitch_counts_label": "Number of Di-Tags",
"cpswitch_c_active": False,
}
return bargraph.plot(self.hicup_data, keys, config) | [
"def",
"hicup_dedup_chart",
"(",
"self",
")",
":",
"# Specify the order of the different possible categories",
"keys",
"=",
"OrderedDict",
"(",
")",
"keys",
"[",
"\"Deduplication_Cis_Close_Uniques\"",
"]",
"=",
"{",
"\"color\"",
":",
"\"#2f7ed8\"",
",",
"\"name\"",
":",
"\"Unique: cis < 10Kbp\"",
"}",
"keys",
"[",
"\"Deduplication_Cis_Far_Uniques\"",
"]",
"=",
"{",
"\"color\"",
":",
"\"#0d233a\"",
",",
"\"name\"",
":",
"\"Unique: cis > 10Kbp\"",
"}",
"keys",
"[",
"\"Deduplication_Trans_Uniques\"",
"]",
"=",
"{",
"\"color\"",
":",
"\"#492970\"",
",",
"\"name\"",
":",
"\"Unique: trans\"",
"}",
"keys",
"[",
"\"Duplicate_Read_Pairs\"",
"]",
"=",
"{",
"\"color\"",
":",
"\"#f28f43\"",
",",
"\"name\"",
":",
"\"Duplicate read pairs\"",
"}",
"# Config for the plot",
"config",
"=",
"{",
"\"id\"",
":",
"\"hicup_dedup_plot\"",
",",
"\"title\"",
":",
"\"HiCUP: De-Duplication Statistics\"",
",",
"\"ylab\"",
":",
"\"# Di-Tags\"",
",",
"\"cpswitch_counts_label\"",
":",
"\"Number of Di-Tags\"",
",",
"\"cpswitch_c_active\"",
":",
"False",
",",
"}",
"return",
"bargraph",
".",
"plot",
"(",
"self",
".",
"hicup_data",
",",
"keys",
",",
"config",
")"
] | https://github.com/ewels/MultiQC/blob/9b953261d3d684c24eef1827a5ce6718c847a5af/multiqc/modules/hicup/hicup.py#L256-L275 |
|
coderholic/pyradio | cd3ee2d6b369fedfd009371a59aca23ab39b020f | pyradio/simple_curses_widgets.py | python | SimpleCursesWidgetColumns.enabled | (self, value) | [] | def enabled(self, value):
self._enabled = value
if self._showed:
self.show() | [
"def",
"enabled",
"(",
"self",
",",
"value",
")",
":",
"self",
".",
"_enabled",
"=",
"value",
"if",
"self",
".",
"_showed",
":",
"self",
".",
"show",
"(",
")"
] | https://github.com/coderholic/pyradio/blob/cd3ee2d6b369fedfd009371a59aca23ab39b020f/pyradio/simple_curses_widgets.py#L639-L642 |
||||
DataDog/integrations-core | 934674b29d94b70ccc008f76ea172d0cdae05e1e | sqlserver/datadog_checks/sqlserver/activity.py | python | SqlserverActivity.collect_activity | (self) | Collects all current activity for the SQLServer intance.
:return: | Collects all current activity for the SQLServer intance.
:return: | [
"Collects",
"all",
"current",
"activity",
"for",
"the",
"SQLServer",
"intance",
".",
":",
"return",
":"
] | def collect_activity(self):
"""
Collects all current activity for the SQLServer intance.
:return:
"""
# re-use the check's conn module, but set extra_key=dbm-activity- to ensure we get our own
# raw connection. adodbapi and pyodbc modules are thread safe, but connections are not.
with self.check.connection.open_managed_default_connection(key_prefix=self._conn_key_prefix):
with self.check.connection.get_managed_cursor(key_prefix=self._conn_key_prefix) as cursor:
connections = self._get_active_connections(cursor)
rows = self._get_activity(cursor)
normalized_rows = self._normalize_queries_and_filter_rows(rows, MAX_PAYLOAD_BYTES)
event = self._create_activity_event(normalized_rows, connections)
payload = json.dumps(event, default=default_json_event_encoding)
self._check.database_monitoring_query_activity(payload)
self.check.histogram(
"dd.sqlserver.activity.collect_activity.payload_size", len(payload), **self.check.debug_stats_kwargs()
) | [
"def",
"collect_activity",
"(",
"self",
")",
":",
"# re-use the check's conn module, but set extra_key=dbm-activity- to ensure we get our own",
"# raw connection. adodbapi and pyodbc modules are thread safe, but connections are not.",
"with",
"self",
".",
"check",
".",
"connection",
".",
"open_managed_default_connection",
"(",
"key_prefix",
"=",
"self",
".",
"_conn_key_prefix",
")",
":",
"with",
"self",
".",
"check",
".",
"connection",
".",
"get_managed_cursor",
"(",
"key_prefix",
"=",
"self",
".",
"_conn_key_prefix",
")",
"as",
"cursor",
":",
"connections",
"=",
"self",
".",
"_get_active_connections",
"(",
"cursor",
")",
"rows",
"=",
"self",
".",
"_get_activity",
"(",
"cursor",
")",
"normalized_rows",
"=",
"self",
".",
"_normalize_queries_and_filter_rows",
"(",
"rows",
",",
"MAX_PAYLOAD_BYTES",
")",
"event",
"=",
"self",
".",
"_create_activity_event",
"(",
"normalized_rows",
",",
"connections",
")",
"payload",
"=",
"json",
".",
"dumps",
"(",
"event",
",",
"default",
"=",
"default_json_event_encoding",
")",
"self",
".",
"_check",
".",
"database_monitoring_query_activity",
"(",
"payload",
")",
"self",
".",
"check",
".",
"histogram",
"(",
"\"dd.sqlserver.activity.collect_activity.payload_size\"",
",",
"len",
"(",
"payload",
")",
",",
"*",
"*",
"self",
".",
"check",
".",
"debug_stats_kwargs",
"(",
")",
")"
] | https://github.com/DataDog/integrations-core/blob/934674b29d94b70ccc008f76ea172d0cdae05e1e/sqlserver/datadog_checks/sqlserver/activity.py#L198-L217 |
||
Kkevsterrr/geneva | 36d3585545d4cb3450ea0b166d8d5f20a64ed8d8 | censors/censor9.py | python | Censor9.check_forbidden | (self, packet) | return False | Checks if a packet contains forbidden words. | Checks if a packet contains forbidden words. | [
"Checks",
"if",
"a",
"packet",
"contains",
"forbidden",
"words",
"."
] | def check_forbidden(self, packet):
"""
Checks if a packet contains forbidden words.
"""
# Check if any forbidden words appear in the packet payload
for keyword in self.forbidden:
if keyword in self.get_payload(packet):
self.logger.debug("Packet triggered censor: " + layers.packet.Packet._str_packet(packet))
return True
return False | [
"def",
"check_forbidden",
"(",
"self",
",",
"packet",
")",
":",
"# Check if any forbidden words appear in the packet payload",
"for",
"keyword",
"in",
"self",
".",
"forbidden",
":",
"if",
"keyword",
"in",
"self",
".",
"get_payload",
"(",
"packet",
")",
":",
"self",
".",
"logger",
".",
"debug",
"(",
"\"Packet triggered censor: \"",
"+",
"layers",
".",
"packet",
".",
"Packet",
".",
"_str_packet",
"(",
"packet",
")",
")",
"return",
"True",
"return",
"False"
] | https://github.com/Kkevsterrr/geneva/blob/36d3585545d4cb3450ea0b166d8d5f20a64ed8d8/censors/censor9.py#L85-L94 |
|
xtiankisutsa/MARA_Framework | ac4ac88bfd38f33ae8780a606ed09ab97177c562 | tools/lobotomy/core/include/androguard/androguard/decompiler/dad/control_flow.py | python | intervals | (graph) | return interval_graph, interv_heads | Compute the intervals of the graph
Returns
interval_graph: a graph of the intervals of G
interv_heads: a dict of (header node, interval) | Compute the intervals of the graph
Returns
interval_graph: a graph of the intervals of G
interv_heads: a dict of (header node, interval) | [
"Compute",
"the",
"intervals",
"of",
"the",
"graph",
"Returns",
"interval_graph",
":",
"a",
"graph",
"of",
"the",
"intervals",
"of",
"G",
"interv_heads",
":",
"a",
"dict",
"of",
"(",
"header",
"node",
"interval",
")"
] | def intervals(graph):
'''
Compute the intervals of the graph
Returns
interval_graph: a graph of the intervals of G
interv_heads: a dict of (header node, interval)
'''
interval_graph = Graph() # graph of intervals
heads = set([graph.get_entry()]) # set of header nodes
interv_heads = {} # interv_heads[i] = interval of header i
processed = dict([(i, False) for i in graph])
edges = {}
while heads:
head = heads.pop()
if not processed[head]:
processed[head] = True
interv_heads[head] = Interval(head)
# Check if if there is a node which has all its predecessor in the
# current interval. If there is, add that node to the interval and
# repeat until all the possible nodes have been added.
change = True
while change:
change = False
for node in graph.get_rpo()[1:]:
if all(p in interv_heads[head] for p in graph.preds(node)):
change |= interv_heads[head].add_node(node)
# At this stage, a node which is not in the interval, but has one
# of its predecessor in it, is the header of another interval. So
# we add all such nodes to the header list.
for node in graph:
if node not in interv_heads[head] and node not in heads:
if any(p in interv_heads[head] for p in graph.preds(node)):
edges.setdefault(interv_heads[head], []).append(node)
heads.add(node)
interval_graph.add_node(interv_heads[head])
interv_heads[head].compute_end(graph)
# Edges is a mapping of 'Interval -> [header nodes of interval successors]'
for interval, heads in edges.items():
for head in heads:
interval_graph.add_edge(interval, interv_heads[head])
interval_graph.set_entry(graph.get_entry().interval)
graph_exit = graph.get_exit()
if graph_exit:
interval_graph.set_exit(graph_exit.interval)
return interval_graph, interv_heads | [
"def",
"intervals",
"(",
"graph",
")",
":",
"interval_graph",
"=",
"Graph",
"(",
")",
"# graph of intervals",
"heads",
"=",
"set",
"(",
"[",
"graph",
".",
"get_entry",
"(",
")",
"]",
")",
"# set of header nodes",
"interv_heads",
"=",
"{",
"}",
"# interv_heads[i] = interval of header i",
"processed",
"=",
"dict",
"(",
"[",
"(",
"i",
",",
"False",
")",
"for",
"i",
"in",
"graph",
"]",
")",
"edges",
"=",
"{",
"}",
"while",
"heads",
":",
"head",
"=",
"heads",
".",
"pop",
"(",
")",
"if",
"not",
"processed",
"[",
"head",
"]",
":",
"processed",
"[",
"head",
"]",
"=",
"True",
"interv_heads",
"[",
"head",
"]",
"=",
"Interval",
"(",
"head",
")",
"# Check if if there is a node which has all its predecessor in the",
"# current interval. If there is, add that node to the interval and",
"# repeat until all the possible nodes have been added.",
"change",
"=",
"True",
"while",
"change",
":",
"change",
"=",
"False",
"for",
"node",
"in",
"graph",
".",
"get_rpo",
"(",
")",
"[",
"1",
":",
"]",
":",
"if",
"all",
"(",
"p",
"in",
"interv_heads",
"[",
"head",
"]",
"for",
"p",
"in",
"graph",
".",
"preds",
"(",
"node",
")",
")",
":",
"change",
"|=",
"interv_heads",
"[",
"head",
"]",
".",
"add_node",
"(",
"node",
")",
"# At this stage, a node which is not in the interval, but has one",
"# of its predecessor in it, is the header of another interval. So",
"# we add all such nodes to the header list.",
"for",
"node",
"in",
"graph",
":",
"if",
"node",
"not",
"in",
"interv_heads",
"[",
"head",
"]",
"and",
"node",
"not",
"in",
"heads",
":",
"if",
"any",
"(",
"p",
"in",
"interv_heads",
"[",
"head",
"]",
"for",
"p",
"in",
"graph",
".",
"preds",
"(",
"node",
")",
")",
":",
"edges",
".",
"setdefault",
"(",
"interv_heads",
"[",
"head",
"]",
",",
"[",
"]",
")",
".",
"append",
"(",
"node",
")",
"heads",
".",
"add",
"(",
"node",
")",
"interval_graph",
".",
"add_node",
"(",
"interv_heads",
"[",
"head",
"]",
")",
"interv_heads",
"[",
"head",
"]",
".",
"compute_end",
"(",
"graph",
")",
"# Edges is a mapping of 'Interval -> [header nodes of interval successors]'",
"for",
"interval",
",",
"heads",
"in",
"edges",
".",
"items",
"(",
")",
":",
"for",
"head",
"in",
"heads",
":",
"interval_graph",
".",
"add_edge",
"(",
"interval",
",",
"interv_heads",
"[",
"head",
"]",
")",
"interval_graph",
".",
"set_entry",
"(",
"graph",
".",
"get_entry",
"(",
")",
".",
"interval",
")",
"graph_exit",
"=",
"graph",
".",
"get_exit",
"(",
")",
"if",
"graph_exit",
":",
"interval_graph",
".",
"set_exit",
"(",
"graph_exit",
".",
"interval",
")",
"return",
"interval_graph",
",",
"interv_heads"
] | https://github.com/xtiankisutsa/MARA_Framework/blob/ac4ac88bfd38f33ae8780a606ed09ab97177c562/tools/lobotomy/core/include/androguard/androguard/decompiler/dad/control_flow.py#L31-L83 |
|
smart-mobile-software/gitstack | d9fee8f414f202143eb6e620529e8e5539a2af56 | python/Lib/warnings.py | python | warnpy3k | (message, category=None, stacklevel=1) | Issue a deprecation warning for Python 3.x related changes.
Warnings are omitted unless Python is started with the -3 option. | Issue a deprecation warning for Python 3.x related changes. | [
"Issue",
"a",
"deprecation",
"warning",
"for",
"Python",
"3",
".",
"x",
"related",
"changes",
"."
] | def warnpy3k(message, category=None, stacklevel=1):
"""Issue a deprecation warning for Python 3.x related changes.
Warnings are omitted unless Python is started with the -3 option.
"""
if sys.py3kwarning:
if category is None:
category = DeprecationWarning
warn(message, category, stacklevel+1) | [
"def",
"warnpy3k",
"(",
"message",
",",
"category",
"=",
"None",
",",
"stacklevel",
"=",
"1",
")",
":",
"if",
"sys",
".",
"py3kwarning",
":",
"if",
"category",
"is",
"None",
":",
"category",
"=",
"DeprecationWarning",
"warn",
"(",
"message",
",",
"category",
",",
"stacklevel",
"+",
"1",
")"
] | https://github.com/smart-mobile-software/gitstack/blob/d9fee8f414f202143eb6e620529e8e5539a2af56/python/Lib/warnings.py#L14-L22 |
||
interpretml/interpret-community | 84d86b7514fd9812f1497329bf1c4c9fc864370e | python/interpret_community/common/explanation_utils.py | python | _should_compress_sparse_matrix | (matrix) | return nnz > num_cells / 3 | Returns whether to compress the matrix, which can be sparse or dense format depending on optimal storage.
If more than a third of the values are non-zero in the sparse matrix, we convert it to dense format.
:param matrix: The matrix to compress.
:type matrix: scipy.sparse.csr_matrix or list[scipy.sparse.csr_matrix]
:return: Whether the matrix should be compressed.
:rtype: bool | Returns whether to compress the matrix, which can be sparse or dense format depending on optimal storage. | [
"Returns",
"whether",
"to",
"compress",
"the",
"matrix",
"which",
"can",
"be",
"sparse",
"or",
"dense",
"format",
"depending",
"on",
"optimal",
"storage",
"."
] | def _should_compress_sparse_matrix(matrix):
"""Returns whether to compress the matrix, which can be sparse or dense format depending on optimal storage.
If more than a third of the values are non-zero in the sparse matrix, we convert it to dense format.
:param matrix: The matrix to compress.
:type matrix: scipy.sparse.csr_matrix or list[scipy.sparse.csr_matrix]
:return: Whether the matrix should be compressed.
:rtype: bool
"""
nnz = 0
num_cells = 0
if isinstance(matrix, list):
for class_matrix in matrix:
nnz += class_matrix.nnz
num_cells += class_matrix.shape[0] * class_matrix.shape[1]
else:
nnz += matrix.nnz
num_cells += matrix.shape[0] * matrix.shape[1]
return nnz > num_cells / 3 | [
"def",
"_should_compress_sparse_matrix",
"(",
"matrix",
")",
":",
"nnz",
"=",
"0",
"num_cells",
"=",
"0",
"if",
"isinstance",
"(",
"matrix",
",",
"list",
")",
":",
"for",
"class_matrix",
"in",
"matrix",
":",
"nnz",
"+=",
"class_matrix",
".",
"nnz",
"num_cells",
"+=",
"class_matrix",
".",
"shape",
"[",
"0",
"]",
"*",
"class_matrix",
".",
"shape",
"[",
"1",
"]",
"else",
":",
"nnz",
"+=",
"matrix",
".",
"nnz",
"num_cells",
"+=",
"matrix",
".",
"shape",
"[",
"0",
"]",
"*",
"matrix",
".",
"shape",
"[",
"1",
"]",
"return",
"nnz",
">",
"num_cells",
"/",
"3"
] | https://github.com/interpretml/interpret-community/blob/84d86b7514fd9812f1497329bf1c4c9fc864370e/python/interpret_community/common/explanation_utils.py#L68-L87 |
|
indigo-dc/udocker | 87fb41cb5bcdb211d70f2b7f067c8e33d8959a1f | udocker/helper/elfpatcher.py | python | ElfPatcher.restore_ld | (self) | return True | Restore ld.so | Restore ld.so | [
"Restore",
"ld",
".",
"so"
] | def restore_ld(self):
"""Restore ld.so"""
elf_loader = self.get_container_loader()
futil_ldso = FileUtil(self._container_ld_so_orig)
if futil_ldso.size() <= 0:
Msg().err("Error: original loader not found or empty")
return False
if not futil_ldso.copyto(elf_loader):
Msg().err("Error: in loader copy or file locked by other process")
return False
return True | [
"def",
"restore_ld",
"(",
"self",
")",
":",
"elf_loader",
"=",
"self",
".",
"get_container_loader",
"(",
")",
"futil_ldso",
"=",
"FileUtil",
"(",
"self",
".",
"_container_ld_so_orig",
")",
"if",
"futil_ldso",
".",
"size",
"(",
")",
"<=",
"0",
":",
"Msg",
"(",
")",
".",
"err",
"(",
"\"Error: original loader not found or empty\"",
")",
"return",
"False",
"if",
"not",
"futil_ldso",
".",
"copyto",
"(",
"elf_loader",
")",
":",
"Msg",
"(",
")",
".",
"err",
"(",
"\"Error: in loader copy or file locked by other process\"",
")",
"return",
"False",
"return",
"True"
] | https://github.com/indigo-dc/udocker/blob/87fb41cb5bcdb211d70f2b7f067c8e33d8959a1f/udocker/helper/elfpatcher.py#L231-L243 |
|
twitter/zktraffic | 82db04d9aafa13f694d4f5c7265069db42c0307c | zktraffic/cli/printer.py | python | DefaultPrinter.request_handler | (self, req) | [] | def request_handler(self, req):
# close requests don't have a reply, dispatch it immediately
if req.is_close:
self.write(req)
else:
self._requests_by_client[req.client].add(req)
self._seen_requests += 1 | [
"def",
"request_handler",
"(",
"self",
",",
"req",
")",
":",
"# close requests don't have a reply, dispatch it immediately",
"if",
"req",
".",
"is_close",
":",
"self",
".",
"write",
"(",
"req",
")",
"else",
":",
"self",
".",
"_requests_by_client",
"[",
"req",
".",
"client",
"]",
".",
"add",
"(",
"req",
")",
"self",
".",
"_seen_requests",
"+=",
"1"
] | https://github.com/twitter/zktraffic/blob/82db04d9aafa13f694d4f5c7265069db42c0307c/zktraffic/cli/printer.py#L215-L222 |
||||
collinsctk/PyQYT | 7af3673955f94ff1b2df2f94220cd2dab2e252af | ExtentionPackages/Crypto/Util/RFC1751.py | python | _key2bin | (s) | return ''.join(kl) | Convert a key into a string of binary digits | Convert a key into a string of binary digits | [
"Convert",
"a",
"key",
"into",
"a",
"string",
"of",
"binary",
"digits"
] | def _key2bin(s):
"Convert a key into a string of binary digits"
kl=[bord(x) for x in s]
kl=[binary[x>>4]+binary[x&15] for x in kl]
return ''.join(kl) | [
"def",
"_key2bin",
"(",
"s",
")",
":",
"kl",
"=",
"[",
"bord",
"(",
"x",
")",
"for",
"x",
"in",
"s",
"]",
"kl",
"=",
"[",
"binary",
"[",
"x",
">>",
"4",
"]",
"+",
"binary",
"[",
"x",
"&",
"15",
"]",
"for",
"x",
"in",
"kl",
"]",
"return",
"''",
".",
"join",
"(",
"kl",
")"
] | https://github.com/collinsctk/PyQYT/blob/7af3673955f94ff1b2df2f94220cd2dab2e252af/ExtentionPackages/Crypto/Util/RFC1751.py#L38-L42 |
|
nopernik/mpDNS | b17dc39e7068406df82cb3431b3042e74e520cf9 | circuits/io/process.py | python | Process.stop | (self) | [] | def stop(self):
if self.p is not None:
self.p.terminate() | [
"def",
"stop",
"(",
"self",
")",
":",
"if",
"self",
".",
"p",
"is",
"not",
"None",
":",
"self",
".",
"p",
".",
"terminate",
"(",
")"
] | https://github.com/nopernik/mpDNS/blob/b17dc39e7068406df82cb3431b3042e74e520cf9/circuits/io/process.py#L114-L116 |
||||
AppScale/gts | 46f909cf5dc5ba81faf9d81dc9af598dcf8a82a9 | AppServer/google/appengine/tools/dev_appserver_import_hook.py | python | FakeGetPlatform | () | Fake distutils.util.get_platform on OS/X. Pass-through otherwise. | Fake distutils.util.get_platform on OS/X. Pass-through otherwise. | [
"Fake",
"distutils",
".",
"util",
".",
"get_platform",
"on",
"OS",
"/",
"X",
".",
"Pass",
"-",
"through",
"otherwise",
"."
] | def FakeGetPlatform():
"""Fake distutils.util.get_platform on OS/X. Pass-through otherwise."""
if sys.platform == 'darwin':
return 'macosx-'
else:
return distutils.util.get_platform() | [
"def",
"FakeGetPlatform",
"(",
")",
":",
"if",
"sys",
".",
"platform",
"==",
"'darwin'",
":",
"return",
"'macosx-'",
"else",
":",
"return",
"distutils",
".",
"util",
".",
"get_platform",
"(",
")"
] | https://github.com/AppScale/gts/blob/46f909cf5dc5ba81faf9d81dc9af598dcf8a82a9/AppServer/google/appengine/tools/dev_appserver_import_hook.py#L123-L128 |
||
mongodb/mongo-python-driver | c760f900f2e4109a247c2ffc8ad3549362007772 | pymongo/monitoring.py | python | _EventListeners.enabled_for_server_heartbeat | (self) | return self.__enabled_for_server_heartbeat | Are any ServerHeartbeatListener instances registered? | Are any ServerHeartbeatListener instances registered? | [
"Are",
"any",
"ServerHeartbeatListener",
"instances",
"registered?"
] | def enabled_for_server_heartbeat(self):
"""Are any ServerHeartbeatListener instances registered?"""
return self.__enabled_for_server_heartbeat | [
"def",
"enabled_for_server_heartbeat",
"(",
"self",
")",
":",
"return",
"self",
".",
"__enabled_for_server_heartbeat"
] | https://github.com/mongodb/mongo-python-driver/blob/c760f900f2e4109a247c2ffc8ad3549362007772/pymongo/monitoring.py#L1305-L1307 |
|
tomplus/kubernetes_asyncio | f028cc793e3a2c519be6a52a49fb77ff0b014c9b | kubernetes_asyncio/client/models/v1_mutating_webhook.py | python | V1MutatingWebhook.timeout_seconds | (self, timeout_seconds) | Sets the timeout_seconds of this V1MutatingWebhook.
TimeoutSeconds specifies the timeout for this webhook. After the timeout passes, the webhook call will be ignored or the API call will fail based on the failure policy. The timeout value must be between 1 and 30 seconds. Default to 10 seconds. # noqa: E501
:param timeout_seconds: The timeout_seconds of this V1MutatingWebhook. # noqa: E501
:type: int | Sets the timeout_seconds of this V1MutatingWebhook. | [
"Sets",
"the",
"timeout_seconds",
"of",
"this",
"V1MutatingWebhook",
"."
] | def timeout_seconds(self, timeout_seconds):
"""Sets the timeout_seconds of this V1MutatingWebhook.
TimeoutSeconds specifies the timeout for this webhook. After the timeout passes, the webhook call will be ignored or the API call will fail based on the failure policy. The timeout value must be between 1 and 30 seconds. Default to 10 seconds. # noqa: E501
:param timeout_seconds: The timeout_seconds of this V1MutatingWebhook. # noqa: E501
:type: int
"""
self._timeout_seconds = timeout_seconds | [
"def",
"timeout_seconds",
"(",
"self",
",",
"timeout_seconds",
")",
":",
"self",
".",
"_timeout_seconds",
"=",
"timeout_seconds"
] | https://github.com/tomplus/kubernetes_asyncio/blob/f028cc793e3a2c519be6a52a49fb77ff0b014c9b/kubernetes_asyncio/client/models/v1_mutating_webhook.py#L345-L354 |
||
FSecureLABS/Jandroid | e31d0dab58a2bfd6ed8e0a387172b8bd7c893436 | libs/platform-tools/platform-tools_windows/systrace/catapult/common/py_vulcanize/third_party/rjsmin/bench/jsmin.py | python | JavascriptMinify._jsmin | (self) | Copy the input to the output, deleting the characters which are
insignificant to JavaScript. Comments will be removed. Tabs will be
replaced with spaces. Carriage returns will be replaced with linefeeds.
Most spaces and linefeeds will be removed. | Copy the input to the output, deleting the characters which are
insignificant to JavaScript. Comments will be removed. Tabs will be
replaced with spaces. Carriage returns will be replaced with linefeeds.
Most spaces and linefeeds will be removed. | [
"Copy",
"the",
"input",
"to",
"the",
"output",
"deleting",
"the",
"characters",
"which",
"are",
"insignificant",
"to",
"JavaScript",
".",
"Comments",
"will",
"be",
"removed",
".",
"Tabs",
"will",
"be",
"replaced",
"with",
"spaces",
".",
"Carriage",
"returns",
"will",
"be",
"replaced",
"with",
"linefeeds",
".",
"Most",
"spaces",
"and",
"linefeeds",
"will",
"be",
"removed",
"."
] | def _jsmin(self):
"""Copy the input to the output, deleting the characters which are
insignificant to JavaScript. Comments will be removed. Tabs will be
replaced with spaces. Carriage returns will be replaced with linefeeds.
Most spaces and linefeeds will be removed.
"""
self.theA = '\n'
self._action(3)
while self.theA != '\000':
if self.theA == ' ':
if isAlphanum(self.theB):
self._action(1)
else:
self._action(2)
elif self.theA == '\n':
if self.theB in ['{', '[', '(', '+', '-']:
self._action(1)
elif self.theB == ' ':
self._action(3)
else:
if isAlphanum(self.theB):
self._action(1)
else:
self._action(2)
else:
if self.theB == ' ':
if isAlphanum(self.theA):
self._action(1)
else:
self._action(3)
elif self.theB == '\n':
if self.theA in ['}', ']', ')', '+', '-', '"', '\'']:
self._action(1)
else:
if isAlphanum(self.theA):
self._action(1)
else:
self._action(3)
else:
self._action(1) | [
"def",
"_jsmin",
"(",
"self",
")",
":",
"self",
".",
"theA",
"=",
"'\\n'",
"self",
".",
"_action",
"(",
"3",
")",
"while",
"self",
".",
"theA",
"!=",
"'\\000'",
":",
"if",
"self",
".",
"theA",
"==",
"' '",
":",
"if",
"isAlphanum",
"(",
"self",
".",
"theB",
")",
":",
"self",
".",
"_action",
"(",
"1",
")",
"else",
":",
"self",
".",
"_action",
"(",
"2",
")",
"elif",
"self",
".",
"theA",
"==",
"'\\n'",
":",
"if",
"self",
".",
"theB",
"in",
"[",
"'{'",
",",
"'['",
",",
"'('",
",",
"'+'",
",",
"'-'",
"]",
":",
"self",
".",
"_action",
"(",
"1",
")",
"elif",
"self",
".",
"theB",
"==",
"' '",
":",
"self",
".",
"_action",
"(",
"3",
")",
"else",
":",
"if",
"isAlphanum",
"(",
"self",
".",
"theB",
")",
":",
"self",
".",
"_action",
"(",
"1",
")",
"else",
":",
"self",
".",
"_action",
"(",
"2",
")",
"else",
":",
"if",
"self",
".",
"theB",
"==",
"' '",
":",
"if",
"isAlphanum",
"(",
"self",
".",
"theA",
")",
":",
"self",
".",
"_action",
"(",
"1",
")",
"else",
":",
"self",
".",
"_action",
"(",
"3",
")",
"elif",
"self",
".",
"theB",
"==",
"'\\n'",
":",
"if",
"self",
".",
"theA",
"in",
"[",
"'}'",
",",
"']'",
",",
"')'",
",",
"'+'",
",",
"'-'",
",",
"'\"'",
",",
"'\\''",
"]",
":",
"self",
".",
"_action",
"(",
"1",
")",
"else",
":",
"if",
"isAlphanum",
"(",
"self",
".",
"theA",
")",
":",
"self",
".",
"_action",
"(",
"1",
")",
"else",
":",
"self",
".",
"_action",
"(",
"3",
")",
"else",
":",
"self",
".",
"_action",
"(",
"1",
")"
] | https://github.com/FSecureLABS/Jandroid/blob/e31d0dab58a2bfd6ed8e0a387172b8bd7c893436/libs/platform-tools/platform-tools_windows/systrace/catapult/common/py_vulcanize/third_party/rjsmin/bench/jsmin.py#L171-L211 |
||
linxid/Machine_Learning_Study_Path | 558e82d13237114bbb8152483977806fc0c222af | Machine Learning In Action/Chapter4-NaiveBayes/venv/Lib/site.py | python | addsitepackages | (known_paths, sys_prefix=sys.prefix, exec_prefix=sys.exec_prefix) | return None | Add site-packages (and possibly site-python) to sys.path | Add site-packages (and possibly site-python) to sys.path | [
"Add",
"site",
"-",
"packages",
"(",
"and",
"possibly",
"site",
"-",
"python",
")",
"to",
"sys",
".",
"path"
] | def addsitepackages(known_paths, sys_prefix=sys.prefix, exec_prefix=sys.exec_prefix):
"""Add site-packages (and possibly site-python) to sys.path"""
prefixes = [os.path.join(sys_prefix, "local"), sys_prefix]
if exec_prefix != sys_prefix:
prefixes.append(os.path.join(exec_prefix, "local"))
for prefix in prefixes:
if prefix:
if sys.platform in ('os2emx', 'riscos') or _is_jython:
sitedirs = [os.path.join(prefix, "Lib", "site-packages")]
elif _is_pypy:
sitedirs = [os.path.join(prefix, 'site-packages')]
elif sys.platform == 'darwin' and prefix == sys_prefix:
if prefix.startswith("/System/Library/Frameworks/"): # Apple's Python
sitedirs = [os.path.join("/Library/Python", sys.version[:3], "site-packages"),
os.path.join(prefix, "Extras", "lib", "python")]
else: # any other Python distros on OSX work this way
sitedirs = [os.path.join(prefix, "lib",
"python" + sys.version[:3], "site-packages")]
elif os.sep == '/':
sitedirs = [os.path.join(prefix,
"lib",
"python" + sys.version[:3],
"site-packages"),
os.path.join(prefix, "lib", "site-python"),
os.path.join(prefix, "python" + sys.version[:3], "lib-dynload")]
lib64_dir = os.path.join(prefix, "lib64", "python" + sys.version[:3], "site-packages")
if (os.path.exists(lib64_dir) and
os.path.realpath(lib64_dir) not in [os.path.realpath(p) for p in sitedirs]):
if _is_64bit:
sitedirs.insert(0, lib64_dir)
else:
sitedirs.append(lib64_dir)
try:
# sys.getobjects only available in --with-pydebug build
sys.getobjects
sitedirs.insert(0, os.path.join(sitedirs[0], 'debug'))
except AttributeError:
pass
# Debian-specific dist-packages directories:
sitedirs.append(os.path.join(prefix, "local/lib",
"python" + sys.version[:3],
"dist-packages"))
if sys.version[0] == '2':
sitedirs.append(os.path.join(prefix, "lib",
"python" + sys.version[:3],
"dist-packages"))
else:
sitedirs.append(os.path.join(prefix, "lib",
"python" + sys.version[0],
"dist-packages"))
sitedirs.append(os.path.join(prefix, "lib", "dist-python"))
else:
sitedirs = [prefix, os.path.join(prefix, "lib", "site-packages")]
if sys.platform == 'darwin':
# for framework builds *only* we add the standard Apple
# locations. Currently only per-user, but /Library and
# /Network/Library could be added too
if 'Python.framework' in prefix:
home = os.environ.get('HOME')
if home:
sitedirs.append(
os.path.join(home,
'Library',
'Python',
sys.version[:3],
'site-packages'))
for sitedir in sitedirs:
if os.path.isdir(sitedir):
addsitedir(sitedir, known_paths)
return None | [
"def",
"addsitepackages",
"(",
"known_paths",
",",
"sys_prefix",
"=",
"sys",
".",
"prefix",
",",
"exec_prefix",
"=",
"sys",
".",
"exec_prefix",
")",
":",
"prefixes",
"=",
"[",
"os",
".",
"path",
".",
"join",
"(",
"sys_prefix",
",",
"\"local\"",
")",
",",
"sys_prefix",
"]",
"if",
"exec_prefix",
"!=",
"sys_prefix",
":",
"prefixes",
".",
"append",
"(",
"os",
".",
"path",
".",
"join",
"(",
"exec_prefix",
",",
"\"local\"",
")",
")",
"for",
"prefix",
"in",
"prefixes",
":",
"if",
"prefix",
":",
"if",
"sys",
".",
"platform",
"in",
"(",
"'os2emx'",
",",
"'riscos'",
")",
"or",
"_is_jython",
":",
"sitedirs",
"=",
"[",
"os",
".",
"path",
".",
"join",
"(",
"prefix",
",",
"\"Lib\"",
",",
"\"site-packages\"",
")",
"]",
"elif",
"_is_pypy",
":",
"sitedirs",
"=",
"[",
"os",
".",
"path",
".",
"join",
"(",
"prefix",
",",
"'site-packages'",
")",
"]",
"elif",
"sys",
".",
"platform",
"==",
"'darwin'",
"and",
"prefix",
"==",
"sys_prefix",
":",
"if",
"prefix",
".",
"startswith",
"(",
"\"/System/Library/Frameworks/\"",
")",
":",
"# Apple's Python",
"sitedirs",
"=",
"[",
"os",
".",
"path",
".",
"join",
"(",
"\"/Library/Python\"",
",",
"sys",
".",
"version",
"[",
":",
"3",
"]",
",",
"\"site-packages\"",
")",
",",
"os",
".",
"path",
".",
"join",
"(",
"prefix",
",",
"\"Extras\"",
",",
"\"lib\"",
",",
"\"python\"",
")",
"]",
"else",
":",
"# any other Python distros on OSX work this way",
"sitedirs",
"=",
"[",
"os",
".",
"path",
".",
"join",
"(",
"prefix",
",",
"\"lib\"",
",",
"\"python\"",
"+",
"sys",
".",
"version",
"[",
":",
"3",
"]",
",",
"\"site-packages\"",
")",
"]",
"elif",
"os",
".",
"sep",
"==",
"'/'",
":",
"sitedirs",
"=",
"[",
"os",
".",
"path",
".",
"join",
"(",
"prefix",
",",
"\"lib\"",
",",
"\"python\"",
"+",
"sys",
".",
"version",
"[",
":",
"3",
"]",
",",
"\"site-packages\"",
")",
",",
"os",
".",
"path",
".",
"join",
"(",
"prefix",
",",
"\"lib\"",
",",
"\"site-python\"",
")",
",",
"os",
".",
"path",
".",
"join",
"(",
"prefix",
",",
"\"python\"",
"+",
"sys",
".",
"version",
"[",
":",
"3",
"]",
",",
"\"lib-dynload\"",
")",
"]",
"lib64_dir",
"=",
"os",
".",
"path",
".",
"join",
"(",
"prefix",
",",
"\"lib64\"",
",",
"\"python\"",
"+",
"sys",
".",
"version",
"[",
":",
"3",
"]",
",",
"\"site-packages\"",
")",
"if",
"(",
"os",
".",
"path",
".",
"exists",
"(",
"lib64_dir",
")",
"and",
"os",
".",
"path",
".",
"realpath",
"(",
"lib64_dir",
")",
"not",
"in",
"[",
"os",
".",
"path",
".",
"realpath",
"(",
"p",
")",
"for",
"p",
"in",
"sitedirs",
"]",
")",
":",
"if",
"_is_64bit",
":",
"sitedirs",
".",
"insert",
"(",
"0",
",",
"lib64_dir",
")",
"else",
":",
"sitedirs",
".",
"append",
"(",
"lib64_dir",
")",
"try",
":",
"# sys.getobjects only available in --with-pydebug build",
"sys",
".",
"getobjects",
"sitedirs",
".",
"insert",
"(",
"0",
",",
"os",
".",
"path",
".",
"join",
"(",
"sitedirs",
"[",
"0",
"]",
",",
"'debug'",
")",
")",
"except",
"AttributeError",
":",
"pass",
"# Debian-specific dist-packages directories:",
"sitedirs",
".",
"append",
"(",
"os",
".",
"path",
".",
"join",
"(",
"prefix",
",",
"\"local/lib\"",
",",
"\"python\"",
"+",
"sys",
".",
"version",
"[",
":",
"3",
"]",
",",
"\"dist-packages\"",
")",
")",
"if",
"sys",
".",
"version",
"[",
"0",
"]",
"==",
"'2'",
":",
"sitedirs",
".",
"append",
"(",
"os",
".",
"path",
".",
"join",
"(",
"prefix",
",",
"\"lib\"",
",",
"\"python\"",
"+",
"sys",
".",
"version",
"[",
":",
"3",
"]",
",",
"\"dist-packages\"",
")",
")",
"else",
":",
"sitedirs",
".",
"append",
"(",
"os",
".",
"path",
".",
"join",
"(",
"prefix",
",",
"\"lib\"",
",",
"\"python\"",
"+",
"sys",
".",
"version",
"[",
"0",
"]",
",",
"\"dist-packages\"",
")",
")",
"sitedirs",
".",
"append",
"(",
"os",
".",
"path",
".",
"join",
"(",
"prefix",
",",
"\"lib\"",
",",
"\"dist-python\"",
")",
")",
"else",
":",
"sitedirs",
"=",
"[",
"prefix",
",",
"os",
".",
"path",
".",
"join",
"(",
"prefix",
",",
"\"lib\"",
",",
"\"site-packages\"",
")",
"]",
"if",
"sys",
".",
"platform",
"==",
"'darwin'",
":",
"# for framework builds *only* we add the standard Apple",
"# locations. Currently only per-user, but /Library and",
"# /Network/Library could be added too",
"if",
"'Python.framework'",
"in",
"prefix",
":",
"home",
"=",
"os",
".",
"environ",
".",
"get",
"(",
"'HOME'",
")",
"if",
"home",
":",
"sitedirs",
".",
"append",
"(",
"os",
".",
"path",
".",
"join",
"(",
"home",
",",
"'Library'",
",",
"'Python'",
",",
"sys",
".",
"version",
"[",
":",
"3",
"]",
",",
"'site-packages'",
")",
")",
"for",
"sitedir",
"in",
"sitedirs",
":",
"if",
"os",
".",
"path",
".",
"isdir",
"(",
"sitedir",
")",
":",
"addsitedir",
"(",
"sitedir",
",",
"known_paths",
")",
"return",
"None"
] | https://github.com/linxid/Machine_Learning_Study_Path/blob/558e82d13237114bbb8152483977806fc0c222af/Machine Learning In Action/Chapter4-NaiveBayes/venv/Lib/site.py#L209-L283 |
|
cogitas3d/OrtogOnBlender | 881e93f5beb2263e44c270974dd0e81deca44762 | NomePaciente.py | python | NomePacienteDynamic.execute | (self, context) | return {'FINISHED'} | [] | def execute(self, context):
NomePacienteDynamicDef(self, context)
return {'FINISHED'} | [
"def",
"execute",
"(",
"self",
",",
"context",
")",
":",
"NomePacienteDynamicDef",
"(",
"self",
",",
"context",
")",
"return",
"{",
"'FINISHED'",
"}"
] | https://github.com/cogitas3d/OrtogOnBlender/blob/881e93f5beb2263e44c270974dd0e81deca44762/NomePaciente.py#L1117-L1119 |
|||
lad1337/XDM | 0c1b7009fe00f06f102a6f67c793478f515e7efe | site-packages/cherrypy/lib/sessions.py | python | close | () | Close the session object for this request. | Close the session object for this request. | [
"Close",
"the",
"session",
"object",
"for",
"this",
"request",
"."
] | def close():
"""Close the session object for this request."""
sess = getattr(cherrypy.serving, "session", None)
if getattr(sess, "locked", False):
# If the session is still locked we release the lock
sess.release_lock() | [
"def",
"close",
"(",
")",
":",
"sess",
"=",
"getattr",
"(",
"cherrypy",
".",
"serving",
",",
"\"session\"",
",",
"None",
")",
"if",
"getattr",
"(",
"sess",
",",
"\"locked\"",
",",
"False",
")",
":",
"# If the session is still locked we release the lock",
"sess",
".",
"release_lock",
"(",
")"
] | https://github.com/lad1337/XDM/blob/0c1b7009fe00f06f102a6f67c793478f515e7efe/site-packages/cherrypy/lib/sessions.py#L701-L706 |
||
oracle/graalpython | 577e02da9755d916056184ec441c26e00b70145c | graalpython/lib-python/3/tracemalloc.py | python | _Traces.__repr__ | (self) | return "<Traces len=%s>" % len(self) | [] | def __repr__(self):
return "<Traces len=%s>" % len(self) | [
"def",
"__repr__",
"(",
"self",
")",
":",
"return",
"\"<Traces len=%s>\"",
"%",
"len",
"(",
"self",
")"
] | https://github.com/oracle/graalpython/blob/577e02da9755d916056184ec441c26e00b70145c/graalpython/lib-python/3/tracemalloc.py#L308-L309 |
|||
mrJean1/PyGeodesy | 7da5ca71aa3edb7bc49e219e0b8190686e1a7965 | pygeodesy/named.py | python | callername | (up=1, dflt=NN, source=False, underOK=False) | return dflt | Get the name of the invoking callable.
@kwarg up: Number of call stack frames up (C{int}).
@kwarg dflt: Default return value (C{any}).
@kwarg source: Include source file name and line
number (C{bool}).
@kwarg underOK: Private, internal callables are OK (C{bool}).
@return: The callable name (C{str}) or B{C{dflt}} if none found. | Get the name of the invoking callable. | [
"Get",
"the",
"name",
"of",
"the",
"invoking",
"callable",
"."
] | def callername(up=1, dflt=NN, source=False, underOK=False):
'''Get the name of the invoking callable.
@kwarg up: Number of call stack frames up (C{int}).
@kwarg dflt: Default return value (C{any}).
@kwarg source: Include source file name and line
number (C{bool}).
@kwarg underOK: Private, internal callables are OK (C{bool}).
@return: The callable name (C{str}) or B{C{dflt}} if none found.
'''
try: # see .lazily._caller3
for u in range(up, up + 32):
n, f, s = _caller3(u)
if n and (underOK or n.startswith(_DUNDER_) or
not n.startswith(_UNDER_)):
if source:
n = NN(n, _AT_, f, _COLON_, str(s))
return n
except (AttributeError, ValueError):
pass
return dflt | [
"def",
"callername",
"(",
"up",
"=",
"1",
",",
"dflt",
"=",
"NN",
",",
"source",
"=",
"False",
",",
"underOK",
"=",
"False",
")",
":",
"try",
":",
"# see .lazily._caller3",
"for",
"u",
"in",
"range",
"(",
"up",
",",
"up",
"+",
"32",
")",
":",
"n",
",",
"f",
",",
"s",
"=",
"_caller3",
"(",
"u",
")",
"if",
"n",
"and",
"(",
"underOK",
"or",
"n",
".",
"startswith",
"(",
"_DUNDER_",
")",
"or",
"not",
"n",
".",
"startswith",
"(",
"_UNDER_",
")",
")",
":",
"if",
"source",
":",
"n",
"=",
"NN",
"(",
"n",
",",
"_AT_",
",",
"f",
",",
"_COLON_",
",",
"str",
"(",
"s",
")",
")",
"return",
"n",
"except",
"(",
"AttributeError",
",",
"ValueError",
")",
":",
"pass",
"return",
"dflt"
] | https://github.com/mrJean1/PyGeodesy/blob/7da5ca71aa3edb7bc49e219e0b8190686e1a7965/pygeodesy/named.py#L1059-L1080 |
|
kozec/sc-controller | ce92c773b8b26f6404882e9209aff212c4053170 | scc/gui/menu_editor.py | python | MenuEditor.set_new_menu | (self) | Setups editor for creating new menu. | Setups editor for creating new menu. | [
"Setups",
"editor",
"for",
"creating",
"new",
"menu",
"."
] | def set_new_menu(self):
"""
Setups editor for creating new menu.
"""
self.set_title(_("New Menu"))
rbInProfile = self.builder.get_object("rbInProfile")
entName = self.builder.get_object("entName")
rbInProfile.set_active(True)
self.original_id = None
self.original_type = MenuEditor.TYPE_INTERNAL
entName.set_text("") | [
"def",
"set_new_menu",
"(",
"self",
")",
":",
"self",
".",
"set_title",
"(",
"_",
"(",
"\"New Menu\"",
")",
")",
"rbInProfile",
"=",
"self",
".",
"builder",
".",
"get_object",
"(",
"\"rbInProfile\"",
")",
"entName",
"=",
"self",
".",
"builder",
".",
"get_object",
"(",
"\"entName\"",
")",
"rbInProfile",
".",
"set_active",
"(",
"True",
")",
"self",
".",
"original_id",
"=",
"None",
"self",
".",
"original_type",
"=",
"MenuEditor",
".",
"TYPE_INTERNAL",
"entName",
".",
"set_text",
"(",
"\"\"",
")"
] | https://github.com/kozec/sc-controller/blob/ce92c773b8b26f6404882e9209aff212c4053170/scc/gui/menu_editor.py#L279-L290 |
||
gcovr/gcovr | 09e89b5287fa5a11408a208cb34aea0efd19d4f5 | gcovr/writer/cobertura.py | python | print_xml_report | (covdata, output_file, options) | produce an XML report in the Cobertura format | produce an XML report in the Cobertura format | [
"produce",
"an",
"XML",
"report",
"in",
"the",
"Cobertura",
"format"
] | def print_xml_report(covdata, output_file, options):
"""produce an XML report in the Cobertura format"""
functionTotal = 0
functionCovered = 0
branchTotal = 0
branchCovered = 0
lineTotal = 0
lineCovered = 0
for key in covdata.keys():
(total, covered, _) = covdata[key].function_coverage()
functionTotal += total
functionCovered += covered
for key in covdata.keys():
(total, covered, _) = covdata[key].branch_coverage()
branchTotal += total
branchCovered += covered
for key in covdata.keys():
(total, covered, _) = covdata[key].line_coverage()
lineTotal += total
lineCovered += covered
root = etree.Element("coverage")
root.set(
"line-rate", lineTotal == 0 and '0.0'
or str(float(lineCovered) / lineTotal)
)
root.set(
"function-rate", functionTotal == 0 and '0.0'
or str(float(functionCovered) / functionTotal)
)
root.set(
"branch-rate", branchTotal == 0 and '0.0'
or str(float(branchCovered) / branchTotal)
)
root.set(
"lines-covered", str(lineCovered)
)
root.set(
"lines-valid", str(lineTotal)
)
root.set(
"functions-covered", str(functionCovered)
)
root.set(
"functions-valid", str(functionTotal)
)
root.set(
"branches-covered", str(branchCovered)
)
root.set(
"branches-valid", str(branchTotal)
)
root.set(
"complexity", "0.0"
)
root.set(
"timestamp", str(int(options.timestamp.timestamp()))
)
root.set(
"version", "gcovr %s" % (__version__,)
)
# Generate the <sources> element: this is either the root directory
# (specified by --root), or the CWD.
# sources = doc.createElement("sources")
sources = etree.SubElement(root, "sources")
# Generate the coverage output (on a per-package basis)
# packageXml = doc.createElement("packages")
packageXml = etree.SubElement(root, "packages")
packages = {}
for f in sorted(covdata):
data = covdata[f]
filename = presentable_filename(f, root_filter=options.root_filter)
if '/' in filename:
directory, fname = filename.rsplit('/', 1)
else:
directory, fname = '', filename
package = packages.setdefault(
directory, [etree.Element("package"), {}, 0, 0, 0, 0, 0, 0]
)
c = etree.Element("class")
# The Cobertura DTD requires a methods section, which isn't
# trivial to get from gcov (so we will leave it blank)
etree.SubElement(c, "methods")
lines = etree.SubElement(c, "lines")
class_lines = 0
class_hits = 0
class_branches = 0
class_branch_hits = 0
for lineno in sorted(data.lines):
line_cov = data.lines[lineno]
if line_cov.is_covered or line_cov.is_uncovered:
class_lines += 1
else:
continue
if line_cov.is_covered:
class_hits += 1
hits = line_cov.count
L = etree.Element("line")
L.set("number", str(lineno))
L.set("hits", str(hits))
branches = line_cov.branches
if not branches:
L.set("branch", "false")
else:
b_total, b_hits, coverage = line_cov.branch_coverage()
L.set("branch", "true")
L.set(
"condition-coverage",
"{}% ({}/{})".format(int(coverage), b_hits, b_total)
)
cond = etree.Element('condition')
cond.set("number", "0")
cond.set("type", "jump")
cond.set("coverage", "{}%".format(int(coverage)))
class_branch_hits += b_hits
class_branches += float(len(branches))
conditions = etree.Element("conditions")
conditions.append(cond)
L.append(conditions)
lines.append(L)
className = fname.replace('.', '_')
c.set("name", className)
c.set("filename", filename)
c.set(
"line-rate",
str(class_hits / (1.0 * class_lines or 1.0))
)
c.set(
"branch-rate",
str(class_branch_hits / (1.0 * class_branches or 1.0))
)
c.set("complexity", "0.0")
package[1][className] = c
package[2] += class_hits
package[3] += class_lines
package[4] += class_branch_hits
package[5] += class_branches
class_functions = 0
class_function_hits = 0
for function_name in data.functions:
class_functions += 1
if data.functions[function_name].count > 0:
class_function_hits += 1
package[6] = class_function_hits
package[7] = class_functions
keys = list(packages.keys())
keys.sort()
for packageName in keys:
packageData = packages[packageName]
package = packageData[0]
packageXml.append(package)
classes = etree.SubElement(package, "classes")
classNames = list(packageData[1].keys())
classNames.sort()
for className in classNames:
classes.append(packageData[1][className])
package.set("name", packageName.replace('/', '.'))
package.set(
"line-rate", str(packageData[2] / (1.0 * packageData[3] or 1.0))
)
package.set(
"function-rate", str(packageData[6] / (1.0 * packageData[7] or 1.0))
)
package.set(
"branch-rate", str(packageData[4] / (1.0 * packageData[5] or 1.0))
)
package.set("complexity", "0.0")
# Populate the <sources> element: this is the root directory
etree.SubElement(sources, "source").text = options.root.strip()
with open_binary_for_writing(output_file, 'coverage.xml') as fh:
fh.write(
etree.tostring(root,
pretty_print=options.prettyxml,
encoding="UTF-8",
xml_declaration=True,
doctype="<!DOCTYPE coverage SYSTEM 'http://cobertura.sourceforge.net/xml/coverage-04.dtd'>")) | [
"def",
"print_xml_report",
"(",
"covdata",
",",
"output_file",
",",
"options",
")",
":",
"functionTotal",
"=",
"0",
"functionCovered",
"=",
"0",
"branchTotal",
"=",
"0",
"branchCovered",
"=",
"0",
"lineTotal",
"=",
"0",
"lineCovered",
"=",
"0",
"for",
"key",
"in",
"covdata",
".",
"keys",
"(",
")",
":",
"(",
"total",
",",
"covered",
",",
"_",
")",
"=",
"covdata",
"[",
"key",
"]",
".",
"function_coverage",
"(",
")",
"functionTotal",
"+=",
"total",
"functionCovered",
"+=",
"covered",
"for",
"key",
"in",
"covdata",
".",
"keys",
"(",
")",
":",
"(",
"total",
",",
"covered",
",",
"_",
")",
"=",
"covdata",
"[",
"key",
"]",
".",
"branch_coverage",
"(",
")",
"branchTotal",
"+=",
"total",
"branchCovered",
"+=",
"covered",
"for",
"key",
"in",
"covdata",
".",
"keys",
"(",
")",
":",
"(",
"total",
",",
"covered",
",",
"_",
")",
"=",
"covdata",
"[",
"key",
"]",
".",
"line_coverage",
"(",
")",
"lineTotal",
"+=",
"total",
"lineCovered",
"+=",
"covered",
"root",
"=",
"etree",
".",
"Element",
"(",
"\"coverage\"",
")",
"root",
".",
"set",
"(",
"\"line-rate\"",
",",
"lineTotal",
"==",
"0",
"and",
"'0.0'",
"or",
"str",
"(",
"float",
"(",
"lineCovered",
")",
"/",
"lineTotal",
")",
")",
"root",
".",
"set",
"(",
"\"function-rate\"",
",",
"functionTotal",
"==",
"0",
"and",
"'0.0'",
"or",
"str",
"(",
"float",
"(",
"functionCovered",
")",
"/",
"functionTotal",
")",
")",
"root",
".",
"set",
"(",
"\"branch-rate\"",
",",
"branchTotal",
"==",
"0",
"and",
"'0.0'",
"or",
"str",
"(",
"float",
"(",
"branchCovered",
")",
"/",
"branchTotal",
")",
")",
"root",
".",
"set",
"(",
"\"lines-covered\"",
",",
"str",
"(",
"lineCovered",
")",
")",
"root",
".",
"set",
"(",
"\"lines-valid\"",
",",
"str",
"(",
"lineTotal",
")",
")",
"root",
".",
"set",
"(",
"\"functions-covered\"",
",",
"str",
"(",
"functionCovered",
")",
")",
"root",
".",
"set",
"(",
"\"functions-valid\"",
",",
"str",
"(",
"functionTotal",
")",
")",
"root",
".",
"set",
"(",
"\"branches-covered\"",
",",
"str",
"(",
"branchCovered",
")",
")",
"root",
".",
"set",
"(",
"\"branches-valid\"",
",",
"str",
"(",
"branchTotal",
")",
")",
"root",
".",
"set",
"(",
"\"complexity\"",
",",
"\"0.0\"",
")",
"root",
".",
"set",
"(",
"\"timestamp\"",
",",
"str",
"(",
"int",
"(",
"options",
".",
"timestamp",
".",
"timestamp",
"(",
")",
")",
")",
")",
"root",
".",
"set",
"(",
"\"version\"",
",",
"\"gcovr %s\"",
"%",
"(",
"__version__",
",",
")",
")",
"# Generate the <sources> element: this is either the root directory",
"# (specified by --root), or the CWD.",
"# sources = doc.createElement(\"sources\")",
"sources",
"=",
"etree",
".",
"SubElement",
"(",
"root",
",",
"\"sources\"",
")",
"# Generate the coverage output (on a per-package basis)",
"# packageXml = doc.createElement(\"packages\")",
"packageXml",
"=",
"etree",
".",
"SubElement",
"(",
"root",
",",
"\"packages\"",
")",
"packages",
"=",
"{",
"}",
"for",
"f",
"in",
"sorted",
"(",
"covdata",
")",
":",
"data",
"=",
"covdata",
"[",
"f",
"]",
"filename",
"=",
"presentable_filename",
"(",
"f",
",",
"root_filter",
"=",
"options",
".",
"root_filter",
")",
"if",
"'/'",
"in",
"filename",
":",
"directory",
",",
"fname",
"=",
"filename",
".",
"rsplit",
"(",
"'/'",
",",
"1",
")",
"else",
":",
"directory",
",",
"fname",
"=",
"''",
",",
"filename",
"package",
"=",
"packages",
".",
"setdefault",
"(",
"directory",
",",
"[",
"etree",
".",
"Element",
"(",
"\"package\"",
")",
",",
"{",
"}",
",",
"0",
",",
"0",
",",
"0",
",",
"0",
",",
"0",
",",
"0",
"]",
")",
"c",
"=",
"etree",
".",
"Element",
"(",
"\"class\"",
")",
"# The Cobertura DTD requires a methods section, which isn't",
"# trivial to get from gcov (so we will leave it blank)",
"etree",
".",
"SubElement",
"(",
"c",
",",
"\"methods\"",
")",
"lines",
"=",
"etree",
".",
"SubElement",
"(",
"c",
",",
"\"lines\"",
")",
"class_lines",
"=",
"0",
"class_hits",
"=",
"0",
"class_branches",
"=",
"0",
"class_branch_hits",
"=",
"0",
"for",
"lineno",
"in",
"sorted",
"(",
"data",
".",
"lines",
")",
":",
"line_cov",
"=",
"data",
".",
"lines",
"[",
"lineno",
"]",
"if",
"line_cov",
".",
"is_covered",
"or",
"line_cov",
".",
"is_uncovered",
":",
"class_lines",
"+=",
"1",
"else",
":",
"continue",
"if",
"line_cov",
".",
"is_covered",
":",
"class_hits",
"+=",
"1",
"hits",
"=",
"line_cov",
".",
"count",
"L",
"=",
"etree",
".",
"Element",
"(",
"\"line\"",
")",
"L",
".",
"set",
"(",
"\"number\"",
",",
"str",
"(",
"lineno",
")",
")",
"L",
".",
"set",
"(",
"\"hits\"",
",",
"str",
"(",
"hits",
")",
")",
"branches",
"=",
"line_cov",
".",
"branches",
"if",
"not",
"branches",
":",
"L",
".",
"set",
"(",
"\"branch\"",
",",
"\"false\"",
")",
"else",
":",
"b_total",
",",
"b_hits",
",",
"coverage",
"=",
"line_cov",
".",
"branch_coverage",
"(",
")",
"L",
".",
"set",
"(",
"\"branch\"",
",",
"\"true\"",
")",
"L",
".",
"set",
"(",
"\"condition-coverage\"",
",",
"\"{}% ({}/{})\"",
".",
"format",
"(",
"int",
"(",
"coverage",
")",
",",
"b_hits",
",",
"b_total",
")",
")",
"cond",
"=",
"etree",
".",
"Element",
"(",
"'condition'",
")",
"cond",
".",
"set",
"(",
"\"number\"",
",",
"\"0\"",
")",
"cond",
".",
"set",
"(",
"\"type\"",
",",
"\"jump\"",
")",
"cond",
".",
"set",
"(",
"\"coverage\"",
",",
"\"{}%\"",
".",
"format",
"(",
"int",
"(",
"coverage",
")",
")",
")",
"class_branch_hits",
"+=",
"b_hits",
"class_branches",
"+=",
"float",
"(",
"len",
"(",
"branches",
")",
")",
"conditions",
"=",
"etree",
".",
"Element",
"(",
"\"conditions\"",
")",
"conditions",
".",
"append",
"(",
"cond",
")",
"L",
".",
"append",
"(",
"conditions",
")",
"lines",
".",
"append",
"(",
"L",
")",
"className",
"=",
"fname",
".",
"replace",
"(",
"'.'",
",",
"'_'",
")",
"c",
".",
"set",
"(",
"\"name\"",
",",
"className",
")",
"c",
".",
"set",
"(",
"\"filename\"",
",",
"filename",
")",
"c",
".",
"set",
"(",
"\"line-rate\"",
",",
"str",
"(",
"class_hits",
"/",
"(",
"1.0",
"*",
"class_lines",
"or",
"1.0",
")",
")",
")",
"c",
".",
"set",
"(",
"\"branch-rate\"",
",",
"str",
"(",
"class_branch_hits",
"/",
"(",
"1.0",
"*",
"class_branches",
"or",
"1.0",
")",
")",
")",
"c",
".",
"set",
"(",
"\"complexity\"",
",",
"\"0.0\"",
")",
"package",
"[",
"1",
"]",
"[",
"className",
"]",
"=",
"c",
"package",
"[",
"2",
"]",
"+=",
"class_hits",
"package",
"[",
"3",
"]",
"+=",
"class_lines",
"package",
"[",
"4",
"]",
"+=",
"class_branch_hits",
"package",
"[",
"5",
"]",
"+=",
"class_branches",
"class_functions",
"=",
"0",
"class_function_hits",
"=",
"0",
"for",
"function_name",
"in",
"data",
".",
"functions",
":",
"class_functions",
"+=",
"1",
"if",
"data",
".",
"functions",
"[",
"function_name",
"]",
".",
"count",
">",
"0",
":",
"class_function_hits",
"+=",
"1",
"package",
"[",
"6",
"]",
"=",
"class_function_hits",
"package",
"[",
"7",
"]",
"=",
"class_functions",
"keys",
"=",
"list",
"(",
"packages",
".",
"keys",
"(",
")",
")",
"keys",
".",
"sort",
"(",
")",
"for",
"packageName",
"in",
"keys",
":",
"packageData",
"=",
"packages",
"[",
"packageName",
"]",
"package",
"=",
"packageData",
"[",
"0",
"]",
"packageXml",
".",
"append",
"(",
"package",
")",
"classes",
"=",
"etree",
".",
"SubElement",
"(",
"package",
",",
"\"classes\"",
")",
"classNames",
"=",
"list",
"(",
"packageData",
"[",
"1",
"]",
".",
"keys",
"(",
")",
")",
"classNames",
".",
"sort",
"(",
")",
"for",
"className",
"in",
"classNames",
":",
"classes",
".",
"append",
"(",
"packageData",
"[",
"1",
"]",
"[",
"className",
"]",
")",
"package",
".",
"set",
"(",
"\"name\"",
",",
"packageName",
".",
"replace",
"(",
"'/'",
",",
"'.'",
")",
")",
"package",
".",
"set",
"(",
"\"line-rate\"",
",",
"str",
"(",
"packageData",
"[",
"2",
"]",
"/",
"(",
"1.0",
"*",
"packageData",
"[",
"3",
"]",
"or",
"1.0",
")",
")",
")",
"package",
".",
"set",
"(",
"\"function-rate\"",
",",
"str",
"(",
"packageData",
"[",
"6",
"]",
"/",
"(",
"1.0",
"*",
"packageData",
"[",
"7",
"]",
"or",
"1.0",
")",
")",
")",
"package",
".",
"set",
"(",
"\"branch-rate\"",
",",
"str",
"(",
"packageData",
"[",
"4",
"]",
"/",
"(",
"1.0",
"*",
"packageData",
"[",
"5",
"]",
"or",
"1.0",
")",
")",
")",
"package",
".",
"set",
"(",
"\"complexity\"",
",",
"\"0.0\"",
")",
"# Populate the <sources> element: this is the root directory",
"etree",
".",
"SubElement",
"(",
"sources",
",",
"\"source\"",
")",
".",
"text",
"=",
"options",
".",
"root",
".",
"strip",
"(",
")",
"with",
"open_binary_for_writing",
"(",
"output_file",
",",
"'coverage.xml'",
")",
"as",
"fh",
":",
"fh",
".",
"write",
"(",
"etree",
".",
"tostring",
"(",
"root",
",",
"pretty_print",
"=",
"options",
".",
"prettyxml",
",",
"encoding",
"=",
"\"UTF-8\"",
",",
"xml_declaration",
"=",
"True",
",",
"doctype",
"=",
"\"<!DOCTYPE coverage SYSTEM 'http://cobertura.sourceforge.net/xml/coverage-04.dtd'>\"",
")",
")"
] | https://github.com/gcovr/gcovr/blob/09e89b5287fa5a11408a208cb34aea0efd19d4f5/gcovr/writer/cobertura.py#L25-L216 |
||
bilelmoussaoui/nautilus-git | 5389549b4bcde91cf8d691e61a4a28d3428ddb3c | src/models/git.py | python | Git.get_remote_url | (self) | return url | Return remote url. | Return remote url. | [
"Return",
"remote",
"url",
"."
] | def get_remote_url(self):
"""Return remote url."""
url = execute("git config --get remote.origin.url", self.dir)
if url[0:8] == "https://":
pass
elif url[0:4] == "git@":
# Remove git@ and .git in beginning/end and replace : with /
url = url[4:-4]
url = url.replace(":", "/")
url = "https://" + url
else:
raise RuntimeWarning("No valid url found for remote origin.")
return url | [
"def",
"get_remote_url",
"(",
"self",
")",
":",
"url",
"=",
"execute",
"(",
"\"git config --get remote.origin.url\"",
",",
"self",
".",
"dir",
")",
"if",
"url",
"[",
"0",
":",
"8",
"]",
"==",
"\"https://\"",
":",
"pass",
"elif",
"url",
"[",
"0",
":",
"4",
"]",
"==",
"\"git@\"",
":",
"# Remove git@ and .git in beginning/end and replace : with /",
"url",
"=",
"url",
"[",
"4",
":",
"-",
"4",
"]",
"url",
"=",
"url",
".",
"replace",
"(",
"\":\"",
",",
"\"/\"",
")",
"url",
"=",
"\"https://\"",
"+",
"url",
"else",
":",
"raise",
"RuntimeWarning",
"(",
"\"No valid url found for remote origin.\"",
")",
"return",
"url"
] | https://github.com/bilelmoussaoui/nautilus-git/blob/5389549b4bcde91cf8d691e61a4a28d3428ddb3c/src/models/git.py#L113-L126 |
|
AppScale/gts | 46f909cf5dc5ba81faf9d81dc9af598dcf8a82a9 | AppServer/lib/django-1.2/django/middleware/transaction.py | python | TransactionMiddleware.process_request | (self, request) | Enters transaction management | Enters transaction management | [
"Enters",
"transaction",
"management"
] | def process_request(self, request):
"""Enters transaction management"""
transaction.enter_transaction_management()
transaction.managed(True) | [
"def",
"process_request",
"(",
"self",
",",
"request",
")",
":",
"transaction",
".",
"enter_transaction_management",
"(",
")",
"transaction",
".",
"managed",
"(",
"True",
")"
] | https://github.com/AppScale/gts/blob/46f909cf5dc5ba81faf9d81dc9af598dcf8a82a9/AppServer/lib/django-1.2/django/middleware/transaction.py#L10-L13 |
||
naftaliharris/tauthon | 5587ceec329b75f7caf6d65a036db61ac1bae214 | Lib/logging/__init__.py | python | Formatter.__init__ | (self, fmt=None, datefmt=None) | Initialize the formatter with specified format strings.
Initialize the formatter either with the specified format string, or a
default as described above. Allow for specialized date formatting with
the optional datefmt argument (if omitted, you get the ISO8601 format). | Initialize the formatter with specified format strings. | [
"Initialize",
"the",
"formatter",
"with",
"specified",
"format",
"strings",
"."
] | def __init__(self, fmt=None, datefmt=None):
"""
Initialize the formatter with specified format strings.
Initialize the formatter either with the specified format string, or a
default as described above. Allow for specialized date formatting with
the optional datefmt argument (if omitted, you get the ISO8601 format).
"""
if fmt:
self._fmt = fmt
else:
self._fmt = "%(message)s"
self.datefmt = datefmt | [
"def",
"__init__",
"(",
"self",
",",
"fmt",
"=",
"None",
",",
"datefmt",
"=",
"None",
")",
":",
"if",
"fmt",
":",
"self",
".",
"_fmt",
"=",
"fmt",
"else",
":",
"self",
".",
"_fmt",
"=",
"\"%(message)s\"",
"self",
".",
"datefmt",
"=",
"datefmt"
] | https://github.com/naftaliharris/tauthon/blob/5587ceec329b75f7caf6d65a036db61ac1bae214/Lib/logging/__init__.py#L391-L403 |
||
yanx27/Pointnet_Pointnet2_pytorch | e365b9f7b9c3d7d6444278d92e298e3f078794e1 | log/part_seg/pointnet2_part_seg_msg/pointnet2_utils.py | python | PointNetFeaturePropagation.forward | (self, xyz1, xyz2, points1, points2) | return new_points | Input:
xyz1: input points position data, [B, C, N]
xyz2: sampled input points position data, [B, C, S]
points1: input points data, [B, D, N]
points2: input points data, [B, D, S]
Return:
new_points: upsampled points data, [B, D', N] | Input:
xyz1: input points position data, [B, C, N]
xyz2: sampled input points position data, [B, C, S]
points1: input points data, [B, D, N]
points2: input points data, [B, D, S]
Return:
new_points: upsampled points data, [B, D', N] | [
"Input",
":",
"xyz1",
":",
"input",
"points",
"position",
"data",
"[",
"B",
"C",
"N",
"]",
"xyz2",
":",
"sampled",
"input",
"points",
"position",
"data",
"[",
"B",
"C",
"S",
"]",
"points1",
":",
"input",
"points",
"data",
"[",
"B",
"D",
"N",
"]",
"points2",
":",
"input",
"points",
"data",
"[",
"B",
"D",
"S",
"]",
"Return",
":",
"new_points",
":",
"upsampled",
"points",
"data",
"[",
"B",
"D",
"N",
"]"
] | def forward(self, xyz1, xyz2, points1, points2):
"""
Input:
xyz1: input points position data, [B, C, N]
xyz2: sampled input points position data, [B, C, S]
points1: input points data, [B, D, N]
points2: input points data, [B, D, S]
Return:
new_points: upsampled points data, [B, D', N]
"""
xyz1 = xyz1.permute(0, 2, 1)
xyz2 = xyz2.permute(0, 2, 1)
points2 = points2.permute(0, 2, 1)
B, N, C = xyz1.shape
_, S, _ = xyz2.shape
if S == 1:
interpolated_points = points2.repeat(1, N, 1)
else:
dists = square_distance(xyz1, xyz2)
dists, idx = dists.sort(dim=-1)
dists, idx = dists[:, :, :3], idx[:, :, :3] # [B, N, 3]
dist_recip = 1.0 / (dists + 1e-8)
norm = torch.sum(dist_recip, dim=2, keepdim=True)
weight = dist_recip / norm
interpolated_points = torch.sum(index_points(points2, idx) * weight.view(B, N, 3, 1), dim=2)
if points1 is not None:
points1 = points1.permute(0, 2, 1)
new_points = torch.cat([points1, interpolated_points], dim=-1)
else:
new_points = interpolated_points
new_points = new_points.permute(0, 2, 1)
for i, conv in enumerate(self.mlp_convs):
bn = self.mlp_bns[i]
new_points = F.relu(bn(conv(new_points)), inplace=True)
return new_points | [
"def",
"forward",
"(",
"self",
",",
"xyz1",
",",
"xyz2",
",",
"points1",
",",
"points2",
")",
":",
"xyz1",
"=",
"xyz1",
".",
"permute",
"(",
"0",
",",
"2",
",",
"1",
")",
"xyz2",
"=",
"xyz2",
".",
"permute",
"(",
"0",
",",
"2",
",",
"1",
")",
"points2",
"=",
"points2",
".",
"permute",
"(",
"0",
",",
"2",
",",
"1",
")",
"B",
",",
"N",
",",
"C",
"=",
"xyz1",
".",
"shape",
"_",
",",
"S",
",",
"_",
"=",
"xyz2",
".",
"shape",
"if",
"S",
"==",
"1",
":",
"interpolated_points",
"=",
"points2",
".",
"repeat",
"(",
"1",
",",
"N",
",",
"1",
")",
"else",
":",
"dists",
"=",
"square_distance",
"(",
"xyz1",
",",
"xyz2",
")",
"dists",
",",
"idx",
"=",
"dists",
".",
"sort",
"(",
"dim",
"=",
"-",
"1",
")",
"dists",
",",
"idx",
"=",
"dists",
"[",
":",
",",
":",
",",
":",
"3",
"]",
",",
"idx",
"[",
":",
",",
":",
",",
":",
"3",
"]",
"# [B, N, 3]",
"dist_recip",
"=",
"1.0",
"/",
"(",
"dists",
"+",
"1e-8",
")",
"norm",
"=",
"torch",
".",
"sum",
"(",
"dist_recip",
",",
"dim",
"=",
"2",
",",
"keepdim",
"=",
"True",
")",
"weight",
"=",
"dist_recip",
"/",
"norm",
"interpolated_points",
"=",
"torch",
".",
"sum",
"(",
"index_points",
"(",
"points2",
",",
"idx",
")",
"*",
"weight",
".",
"view",
"(",
"B",
",",
"N",
",",
"3",
",",
"1",
")",
",",
"dim",
"=",
"2",
")",
"if",
"points1",
"is",
"not",
"None",
":",
"points1",
"=",
"points1",
".",
"permute",
"(",
"0",
",",
"2",
",",
"1",
")",
"new_points",
"=",
"torch",
".",
"cat",
"(",
"[",
"points1",
",",
"interpolated_points",
"]",
",",
"dim",
"=",
"-",
"1",
")",
"else",
":",
"new_points",
"=",
"interpolated_points",
"new_points",
"=",
"new_points",
".",
"permute",
"(",
"0",
",",
"2",
",",
"1",
")",
"for",
"i",
",",
"conv",
"in",
"enumerate",
"(",
"self",
".",
"mlp_convs",
")",
":",
"bn",
"=",
"self",
".",
"mlp_bns",
"[",
"i",
"]",
"new_points",
"=",
"F",
".",
"relu",
"(",
"bn",
"(",
"conv",
"(",
"new_points",
")",
")",
",",
"inplace",
"=",
"True",
")",
"return",
"new_points"
] | https://github.com/yanx27/Pointnet_Pointnet2_pytorch/blob/e365b9f7b9c3d7d6444278d92e298e3f078794e1/log/part_seg/pointnet2_part_seg_msg/pointnet2_utils.py#L276-L315 |
|
dmlc/dgl | 8d14a739bc9e446d6c92ef83eafe5782398118de | examples/mxnet/scenegraph/model/faster_rcnn.py | python | faster_rcnn_resnet101_v1d_custom | (classes, transfer=None, pretrained_base=True,
pretrained=False, **kwargs) | return net | r"""Faster RCNN model with resnet101_v1d base network on custom dataset.
Parameters
----------
classes : iterable of str
Names of custom foreground classes. `len(classes)` is the number of foreground classes.
transfer : str or None
If not `None`, will try to reuse pre-trained weights from faster RCNN networks trained
on other datasets.
pretrained_base : bool or str
Boolean value controls whether to load the default pretrained weights for model.
String value represents the hashtag for a certain version of pretrained weights.
ctx : Context, default CPU
The context in which to load the pretrained weights.
root : str, default '~/.mxnet/models'
Location for keeping the model parameters.
Returns
-------
mxnet.gluon.HybridBlock
Hybrid faster RCNN network. | r"""Faster RCNN model with resnet101_v1d base network on custom dataset. | [
"r",
"Faster",
"RCNN",
"model",
"with",
"resnet101_v1d",
"base",
"network",
"on",
"custom",
"dataset",
"."
] | def faster_rcnn_resnet101_v1d_custom(classes, transfer=None, pretrained_base=True,
pretrained=False, **kwargs):
r"""Faster RCNN model with resnet101_v1d base network on custom dataset.
Parameters
----------
classes : iterable of str
Names of custom foreground classes. `len(classes)` is the number of foreground classes.
transfer : str or None
If not `None`, will try to reuse pre-trained weights from faster RCNN networks trained
on other datasets.
pretrained_base : bool or str
Boolean value controls whether to load the default pretrained weights for model.
String value represents the hashtag for a certain version of pretrained weights.
ctx : Context, default CPU
The context in which to load the pretrained weights.
root : str, default '~/.mxnet/models'
Location for keeping the model parameters.
Returns
-------
mxnet.gluon.HybridBlock
Hybrid faster RCNN network.
"""
if pretrained:
warnings.warn("Custom models don't provide `pretrained` weights, ignored.")
if transfer is None:
from gluoncv.model_zoo.resnetv1b import resnet101_v1d
base_network = resnet101_v1d(pretrained=pretrained_base, dilated=False,
use_global_stats=True, **kwargs)
features = nn.HybridSequential()
top_features = nn.HybridSequential()
for layer in ['conv1', 'bn1', 'relu', 'maxpool', 'layer1', 'layer2', 'layer3']:
features.add(getattr(base_network, layer))
for layer in ['layer4']:
top_features.add(getattr(base_network, layer))
train_patterns = '|'.join(['.*dense', '.*rpn', '.*down(2|3|4)_conv',
'.*layers(2|3|4)_conv'])
return get_faster_rcnn(
name='resnet101_v1d', dataset='custom', pretrained=pretrained,
features=features, top_features=top_features, classes=classes,
short=600, max_size=1000, train_patterns=train_patterns,
nms_thresh=0.5, nms_topk=400, post_nms=100,
roi_mode='align', roi_size=(14, 14), strides=16, clip=4.14,
rpn_channel=1024, base_size=16, scales=(2, 4, 8, 16, 32),
ratios=(0.5, 1, 2), alloc_size=(128, 128), rpn_nms_thresh=0.7,
rpn_train_pre_nms=12000, rpn_train_post_nms=2000,
rpn_test_pre_nms=6000, rpn_test_post_nms=300, rpn_min_size=16,
num_sample=128, pos_iou_thresh=0.5, pos_ratio=0.25, max_num_gt=3000,
**kwargs)
else:
net = faster_rcnn_resnet101_v1d_coco(pretrained=True)
reuse_classes = [x for x in classes if x in net.classes]
net.reset_class(classes, reuse_weights=reuse_classes)
return net | [
"def",
"faster_rcnn_resnet101_v1d_custom",
"(",
"classes",
",",
"transfer",
"=",
"None",
",",
"pretrained_base",
"=",
"True",
",",
"pretrained",
"=",
"False",
",",
"*",
"*",
"kwargs",
")",
":",
"if",
"pretrained",
":",
"warnings",
".",
"warn",
"(",
"\"Custom models don't provide `pretrained` weights, ignored.\"",
")",
"if",
"transfer",
"is",
"None",
":",
"from",
"gluoncv",
".",
"model_zoo",
".",
"resnetv1b",
"import",
"resnet101_v1d",
"base_network",
"=",
"resnet101_v1d",
"(",
"pretrained",
"=",
"pretrained_base",
",",
"dilated",
"=",
"False",
",",
"use_global_stats",
"=",
"True",
",",
"*",
"*",
"kwargs",
")",
"features",
"=",
"nn",
".",
"HybridSequential",
"(",
")",
"top_features",
"=",
"nn",
".",
"HybridSequential",
"(",
")",
"for",
"layer",
"in",
"[",
"'conv1'",
",",
"'bn1'",
",",
"'relu'",
",",
"'maxpool'",
",",
"'layer1'",
",",
"'layer2'",
",",
"'layer3'",
"]",
":",
"features",
".",
"add",
"(",
"getattr",
"(",
"base_network",
",",
"layer",
")",
")",
"for",
"layer",
"in",
"[",
"'layer4'",
"]",
":",
"top_features",
".",
"add",
"(",
"getattr",
"(",
"base_network",
",",
"layer",
")",
")",
"train_patterns",
"=",
"'|'",
".",
"join",
"(",
"[",
"'.*dense'",
",",
"'.*rpn'",
",",
"'.*down(2|3|4)_conv'",
",",
"'.*layers(2|3|4)_conv'",
"]",
")",
"return",
"get_faster_rcnn",
"(",
"name",
"=",
"'resnet101_v1d'",
",",
"dataset",
"=",
"'custom'",
",",
"pretrained",
"=",
"pretrained",
",",
"features",
"=",
"features",
",",
"top_features",
"=",
"top_features",
",",
"classes",
"=",
"classes",
",",
"short",
"=",
"600",
",",
"max_size",
"=",
"1000",
",",
"train_patterns",
"=",
"train_patterns",
",",
"nms_thresh",
"=",
"0.5",
",",
"nms_topk",
"=",
"400",
",",
"post_nms",
"=",
"100",
",",
"roi_mode",
"=",
"'align'",
",",
"roi_size",
"=",
"(",
"14",
",",
"14",
")",
",",
"strides",
"=",
"16",
",",
"clip",
"=",
"4.14",
",",
"rpn_channel",
"=",
"1024",
",",
"base_size",
"=",
"16",
",",
"scales",
"=",
"(",
"2",
",",
"4",
",",
"8",
",",
"16",
",",
"32",
")",
",",
"ratios",
"=",
"(",
"0.5",
",",
"1",
",",
"2",
")",
",",
"alloc_size",
"=",
"(",
"128",
",",
"128",
")",
",",
"rpn_nms_thresh",
"=",
"0.7",
",",
"rpn_train_pre_nms",
"=",
"12000",
",",
"rpn_train_post_nms",
"=",
"2000",
",",
"rpn_test_pre_nms",
"=",
"6000",
",",
"rpn_test_post_nms",
"=",
"300",
",",
"rpn_min_size",
"=",
"16",
",",
"num_sample",
"=",
"128",
",",
"pos_iou_thresh",
"=",
"0.5",
",",
"pos_ratio",
"=",
"0.25",
",",
"max_num_gt",
"=",
"3000",
",",
"*",
"*",
"kwargs",
")",
"else",
":",
"net",
"=",
"faster_rcnn_resnet101_v1d_coco",
"(",
"pretrained",
"=",
"True",
")",
"reuse_classes",
"=",
"[",
"x",
"for",
"x",
"in",
"classes",
"if",
"x",
"in",
"net",
".",
"classes",
"]",
"net",
".",
"reset_class",
"(",
"classes",
",",
"reuse_weights",
"=",
"reuse_classes",
")",
"return",
"net"
] | https://github.com/dmlc/dgl/blob/8d14a739bc9e446d6c92ef83eafe5782398118de/examples/mxnet/scenegraph/model/faster_rcnn.py#L695-L749 |
|
mne-tools/mne-python | f90b303ce66a8415e64edd4605b09ac0179c1ebf | mne/datasets/utils.py | python | has_dataset | (name) | return dp.endswith(check) | Check for presence of a dataset.
Parameters
----------
name : str | dict
The dataset to check. Strings refer to one of the supported datasets
listed :ref:`here <datasets>`. A :class:`dict` can be used to check for
user-defined datasets (see the Notes section of :func:`fetch_dataset`),
and must contain keys ``dataset_name``, ``archive_name``, ``url``,
``folder_name``, ``hash``.
Returns
-------
has : bool
True if the dataset is present. | Check for presence of a dataset. | [
"Check",
"for",
"presence",
"of",
"a",
"dataset",
"."
] | def has_dataset(name):
"""Check for presence of a dataset.
Parameters
----------
name : str | dict
The dataset to check. Strings refer to one of the supported datasets
listed :ref:`here <datasets>`. A :class:`dict` can be used to check for
user-defined datasets (see the Notes section of :func:`fetch_dataset`),
and must contain keys ``dataset_name``, ``archive_name``, ``url``,
``folder_name``, ``hash``.
Returns
-------
has : bool
True if the dataset is present.
"""
from mne.datasets._fetch import fetch_dataset
if isinstance(name, dict):
dataset_name = name['dataset_name']
dataset_params = name
else:
dataset_name = 'spm' if name == 'spm_face' else name
dataset_params = MNE_DATASETS[dataset_name]
dataset_params['dataset_name'] = dataset_name
config_key = dataset_params['config_key']
# get download path for specific dataset
path = _get_path(path=None, key=config_key, name=dataset_name)
dp = fetch_dataset(dataset_params, path=path, download=False,
check_version=False)
if dataset_name.startswith('bst_'):
check = dataset_name
else:
check = MNE_DATASETS[dataset_name]['folder_name']
return dp.endswith(check) | [
"def",
"has_dataset",
"(",
"name",
")",
":",
"from",
"mne",
".",
"datasets",
".",
"_fetch",
"import",
"fetch_dataset",
"if",
"isinstance",
"(",
"name",
",",
"dict",
")",
":",
"dataset_name",
"=",
"name",
"[",
"'dataset_name'",
"]",
"dataset_params",
"=",
"name",
"else",
":",
"dataset_name",
"=",
"'spm'",
"if",
"name",
"==",
"'spm_face'",
"else",
"name",
"dataset_params",
"=",
"MNE_DATASETS",
"[",
"dataset_name",
"]",
"dataset_params",
"[",
"'dataset_name'",
"]",
"=",
"dataset_name",
"config_key",
"=",
"dataset_params",
"[",
"'config_key'",
"]",
"# get download path for specific dataset",
"path",
"=",
"_get_path",
"(",
"path",
"=",
"None",
",",
"key",
"=",
"config_key",
",",
"name",
"=",
"dataset_name",
")",
"dp",
"=",
"fetch_dataset",
"(",
"dataset_params",
",",
"path",
"=",
"path",
",",
"download",
"=",
"False",
",",
"check_version",
"=",
"False",
")",
"if",
"dataset_name",
".",
"startswith",
"(",
"'bst_'",
")",
":",
"check",
"=",
"dataset_name",
"else",
":",
"check",
"=",
"MNE_DATASETS",
"[",
"dataset_name",
"]",
"[",
"'folder_name'",
"]",
"return",
"dp",
".",
"endswith",
"(",
"check",
")"
] | https://github.com/mne-tools/mne-python/blob/f90b303ce66a8415e64edd4605b09ac0179c1ebf/mne/datasets/utils.py#L200-L238 |
|
replit-archive/empythoned | 977ec10ced29a3541a4973dc2b59910805695752 | cpython/Lib/rfc822.py | python | quote | (s) | return s.replace('\\', '\\\\').replace('"', '\\"') | Add quotes around a string. | Add quotes around a string. | [
"Add",
"quotes",
"around",
"a",
"string",
"."
] | def quote(s):
"""Add quotes around a string."""
return s.replace('\\', '\\\\').replace('"', '\\"') | [
"def",
"quote",
"(",
"s",
")",
":",
"return",
"s",
".",
"replace",
"(",
"'\\\\'",
",",
"'\\\\\\\\'",
")",
".",
"replace",
"(",
"'\"'",
",",
"'\\\\\"'",
")"
] | https://github.com/replit-archive/empythoned/blob/977ec10ced29a3541a4973dc2b59910805695752/cpython/Lib/rfc822.py#L482-L484 |
|
CLUEbenchmark/CLUE | 5bd39732734afecb490cf18a5212e692dbf2c007 | baselines/models_pytorch/mrc_pytorch/run_c3.py | python | InputExample.__init__ | (self, guid, text_a, text_b=None, label=None, text_c=None) | Constructs a InputExample.
Args:
guid: Unique id for the example.
text_a: string. The untokenized text of the first sequence. For single
sequence tasks, only this sequence must be specified.
text_b: (Optional) string. The untokenized text of the second sequence.
Only must be specified for sequence pair tasks.
label: (Optional) string. The label of the example. This should be
specified for train and dev examples, but not for test examples. | Constructs a InputExample.
Args:
guid: Unique id for the example.
text_a: string. The untokenized text of the first sequence. For single
sequence tasks, only this sequence must be specified.
text_b: (Optional) string. The untokenized text of the second sequence.
Only must be specified for sequence pair tasks.
label: (Optional) string. The label of the example. This should be
specified for train and dev examples, but not for test examples. | [
"Constructs",
"a",
"InputExample",
".",
"Args",
":",
"guid",
":",
"Unique",
"id",
"for",
"the",
"example",
".",
"text_a",
":",
"string",
".",
"The",
"untokenized",
"text",
"of",
"the",
"first",
"sequence",
".",
"For",
"single",
"sequence",
"tasks",
"only",
"this",
"sequence",
"must",
"be",
"specified",
".",
"text_b",
":",
"(",
"Optional",
")",
"string",
".",
"The",
"untokenized",
"text",
"of",
"the",
"second",
"sequence",
".",
"Only",
"must",
"be",
"specified",
"for",
"sequence",
"pair",
"tasks",
".",
"label",
":",
"(",
"Optional",
")",
"string",
".",
"The",
"label",
"of",
"the",
"example",
".",
"This",
"should",
"be",
"specified",
"for",
"train",
"and",
"dev",
"examples",
"but",
"not",
"for",
"test",
"examples",
"."
] | def __init__(self, guid, text_a, text_b=None, label=None, text_c=None):
"""Constructs a InputExample.
Args:
guid: Unique id for the example.
text_a: string. The untokenized text of the first sequence. For single
sequence tasks, only this sequence must be specified.
text_b: (Optional) string. The untokenized text of the second sequence.
Only must be specified for sequence pair tasks.
label: (Optional) string. The label of the example. This should be
specified for train and dev examples, but not for test examples.
"""
self.guid = guid
self.text_a = text_a
self.text_b = text_b
self.text_c = text_c
self.label = label | [
"def",
"__init__",
"(",
"self",
",",
"guid",
",",
"text_a",
",",
"text_b",
"=",
"None",
",",
"label",
"=",
"None",
",",
"text_c",
"=",
"None",
")",
":",
"self",
".",
"guid",
"=",
"guid",
"self",
".",
"text_a",
"=",
"text_a",
"self",
".",
"text_b",
"=",
"text_b",
"self",
".",
"text_c",
"=",
"text_c",
"self",
".",
"label",
"=",
"label"
] | https://github.com/CLUEbenchmark/CLUE/blob/5bd39732734afecb490cf18a5212e692dbf2c007/baselines/models_pytorch/mrc_pytorch/run_c3.py#L53-L68 |
||
kennethreitz-archive/requests3 | 69eb662703b40db58fdc6c095d0fe130c56649bb | requests3/core/_http/_sync/poolmanager.py | python | proxy_from_url | (url, **kw) | return ProxyManager(proxy_url=url, **kw) | [] | def proxy_from_url(url, **kw):
return ProxyManager(proxy_url=url, **kw) | [
"def",
"proxy_from_url",
"(",
"url",
",",
"*",
"*",
"kw",
")",
":",
"return",
"ProxyManager",
"(",
"proxy_url",
"=",
"url",
",",
"*",
"*",
"kw",
")"
] | https://github.com/kennethreitz-archive/requests3/blob/69eb662703b40db58fdc6c095d0fe130c56649bb/requests3/core/_http/_sync/poolmanager.py#L425-L426 |
|||
proycon/pynlpl | 7707f69a91caaa6cde037f0d0379f1d42500a68b | pynlpl/formats/folia.py | python | parsecommonarguments | (object, doc, annotationtype, required, allowed, **kwargs) | return kwargs | Internal function to parse common FoLiA attributes and sets up the instance accordingly. Do not invoke directly. | Internal function to parse common FoLiA attributes and sets up the instance accordingly. Do not invoke directly. | [
"Internal",
"function",
"to",
"parse",
"common",
"FoLiA",
"attributes",
"and",
"sets",
"up",
"the",
"instance",
"accordingly",
".",
"Do",
"not",
"invoke",
"directly",
"."
] | def parsecommonarguments(object, doc, annotationtype, required, allowed, **kwargs):
"""Internal function to parse common FoLiA attributes and sets up the instance accordingly. Do not invoke directly."""
object.doc = doc #The FoLiA root document
if required is None:
required = tuple()
if allowed is None:
allowed = tuple()
supported = required + allowed
if 'generate_id_in' in kwargs:
try:
kwargs['id'] = kwargs['generate_id_in'].generate_id(object.__class__)
except GenerateIDException:
pass #ID could not be generated, just skip
del kwargs['generate_id_in']
if 'id' in kwargs:
if Attrib.ID not in supported:
raise ValueError("ID is not supported on " + object.__class__.__name__)
isncname(kwargs['id'])
object.id = kwargs['id']
del kwargs['id']
elif Attrib.ID in required:
raise ValueError("ID is required for " + object.__class__.__name__)
else:
object.id = None
if 'set' in kwargs:
if Attrib.CLASS not in supported and not object.SETONLY:
raise ValueError("Set is not supported on " + object.__class__.__name__)
if not kwargs['set']:
object.set ="undefined"
else:
object.set = kwargs['set']
del kwargs['set']
if object.set:
if doc and (not (annotationtype in doc.annotationdefaults) or not (object.set in doc.annotationdefaults[annotationtype])):
if object.set in doc.alias_set:
object.set = doc.alias_set[object.set]
elif doc.autodeclare:
doc.annotations.append( (annotationtype, object.set ) )
doc.annotationdefaults[annotationtype] = {object.set: {} }
else:
raise ValueError("Set '" + object.set + "' is used for " + object.__class__.__name__ + ", but has no declaration!")
elif annotationtype in doc.annotationdefaults and len(doc.annotationdefaults[annotationtype]) == 1:
object.set = list(doc.annotationdefaults[annotationtype].keys())[0]
elif object.ANNOTATIONTYPE == AnnotationType.TEXT:
object.set = "undefined" #text content needs never be declared (for backward compatibility) and is in set 'undefined'
elif Attrib.CLASS in required: #or (hasattr(object,'SETONLY') and object.SETONLY):
raise ValueError("Set is required for " + object.__class__.__name__)
if 'class' in kwargs:
if not Attrib.CLASS in supported:
raise ValueError("Class is not supported for " + object.__class__.__name__)
object.cls = kwargs['class']
del kwargs['class']
elif 'cls' in kwargs:
if not Attrib.CLASS in supported:
raise ValueError("Class is not supported on " + object.__class__.__name__)
object.cls = kwargs['cls']
del kwargs['cls']
elif Attrib.CLASS in required:
raise ValueError("Class is required for " + object.__class__.__name__)
if object.cls and not object.set:
if doc and doc.autodeclare:
if not (annotationtype, 'undefined') in doc.annotations:
doc.annotations.append( (annotationtype, 'undefined') )
doc.annotationdefaults[annotationtype] = {'undefined': {} }
object.set = 'undefined'
else:
raise ValueError("Set is required for " + object.__class__.__name__ + ". Class '" + object.cls + "' assigned without set.")
if 'annotator' in kwargs:
if not Attrib.ANNOTATOR in supported:
raise ValueError("Annotator is not supported for " + object.__class__.__name__)
object.annotator = kwargs['annotator']
del kwargs['annotator']
elif doc and annotationtype in doc.annotationdefaults and object.set in doc.annotationdefaults[annotationtype] and 'annotator' in doc.annotationdefaults[annotationtype][object.set]:
object.annotator = doc.annotationdefaults[annotationtype][object.set]['annotator']
elif Attrib.ANNOTATOR in required:
raise ValueError("Annotator is required for " + object.__class__.__name__)
if 'annotatortype' in kwargs:
if not Attrib.ANNOTATOR in supported:
raise ValueError("Annotatortype is not supported for " + object.__class__.__name__)
if kwargs['annotatortype'] == 'auto' or kwargs['annotatortype'] == AnnotatorType.AUTO:
object.annotatortype = AnnotatorType.AUTO
elif kwargs['annotatortype'] == 'manual' or kwargs['annotatortype'] == AnnotatorType.MANUAL:
object.annotatortype = AnnotatorType.MANUAL
else:
raise ValueError("annotatortype must be 'auto' or 'manual', got " + repr(kwargs['annotatortype']))
del kwargs['annotatortype']
elif doc and annotationtype in doc.annotationdefaults and object.set in doc.annotationdefaults[annotationtype] and 'annotatortype' in doc.annotationdefaults[annotationtype][object.set]:
object.annotatortype = doc.annotationdefaults[annotationtype][object.set]['annotatortype']
elif Attrib.ANNOTATOR in required:
raise ValueError("Annotatortype is required for " + object.__class__.__name__)
if 'confidence' in kwargs:
if not Attrib.CONFIDENCE in supported:
raise ValueError("Confidence is not supported")
if kwargs['confidence'] is not None:
try:
object.confidence = float(kwargs['confidence'])
assert object.confidence >= 0.0 and object.confidence <= 1.0
except:
raise ValueError("Confidence must be a floating point number between 0 and 1, got " + repr(kwargs['confidence']) )
del kwargs['confidence']
elif Attrib.CONFIDENCE in required:
raise ValueError("Confidence is required for " + object.__class__.__name__)
if 'n' in kwargs:
if not Attrib.N in supported:
raise ValueError("N is not supported for " + object.__class__.__name__)
object.n = kwargs['n']
del kwargs['n']
elif Attrib.N in required:
raise ValueError("N is required for " + object.__class__.__name__)
if 'datetime' in kwargs:
if not Attrib.DATETIME in supported:
raise ValueError("Datetime is not supported")
if isinstance(kwargs['datetime'], datetime):
object.datetime = kwargs['datetime']
else:
#try:
object.datetime = parse_datetime(kwargs['datetime'])
#except:
# raise ValueError("Unable to parse datetime: " + str(repr(kwargs['datetime'])))
del kwargs['datetime']
elif doc and annotationtype in doc.annotationdefaults and object.set in doc.annotationdefaults[annotationtype] and 'datetime' in doc.annotationdefaults[annotationtype][object.set]:
object.datetime = doc.annotationdefaults[annotationtype][object.set]['datetime']
elif Attrib.DATETIME in required:
raise ValueError("Datetime is required for " + object.__class__.__name__)
if 'src' in kwargs:
if not Attrib.SRC in supported:
raise ValueError("Source is not supported for " + object.__class__.__name__)
object.src = kwargs['src']
del kwargs['src']
elif Attrib.SRC in required:
raise ValueError("Source is required for " + object.__class__.__name__)
if 'begintime' in kwargs:
if not Attrib.BEGINTIME in supported:
raise ValueError("Begintime is not supported for " + object.__class__.__name__)
object.begintime = parsetime(kwargs['begintime'])
del kwargs['begintime']
elif Attrib.BEGINTIME in required:
raise ValueError("Begintime is required for " + object.__class__.__name__)
if 'endtime' in kwargs:
if not Attrib.ENDTIME in supported:
raise ValueError("Endtime is not supported for " + object.__class__.__name__)
object.endtime = parsetime(kwargs['endtime'])
del kwargs['endtime']
elif Attrib.ENDTIME in required:
raise ValueError("Endtime is required for " + object.__class__.__name__)
if 'speaker' in kwargs:
if not Attrib.SPEAKER in supported:
raise ValueError("Speaker is not supported for " + object.__class__.__name__)
object.speaker = kwargs['speaker']
del kwargs['speaker']
elif Attrib.SPEAKER in required:
raise ValueError("Speaker is required for " + object.__class__.__name__)
if 'auth' in kwargs:
if kwargs['auth'] in ('no','false'):
object.auth = False
else:
object.auth = bool(kwargs['auth'])
del kwargs['auth']
else:
object.auth = object.__class__.AUTH
if 'text' in kwargs:
if kwargs['text']:
object.settext(kwargs['text'])
del kwargs['text']
if 'phon' in kwargs:
if kwargs['phon']:
object.setphon(kwargs['phon'])
del kwargs['phon']
if 'textclass' in kwargs:
if not Attrib.TEXTCLASS in supported:
raise ValueError("Textclass is not supported for " + object.__class__.__name__)
object.textclass = kwargs['textclass']
del kwargs['textclass']
else:
if Attrib.TEXTCLASS in supported:
object.textclass = "current"
if 'metadata' in kwargs:
if not Attrib.METADATA in supported:
raise ValueError("Metadata is not supported for " + object.__class__.__name__)
object.metadata = kwargs['metadata']
if doc:
try:
doc.submetadata[kwargs['metadata']]
except KeyError:
raise KeyError("No such metadata defined: " + kwargs['metadata'])
del kwargs['metadata']
if object.XLINK:
if 'href' in kwargs:
object.href =kwargs['href']
del kwargs['href']
if 'xlinktype' in kwargs:
object.xlinktype = kwargs['xlinktype']
del kwargs['xlinktype']
if 'xlinkrole' in kwargs:
object.xlinkrole = kwargs['xlinkrole']
del kwargs['xlinkrole']
if 'xlinklabel' in kwargs:
object.xlinklabel = kwargs['xlinklabel']
del kwargs['xlinklabel']
if 'xlinkshow' in kwargs:
object.xlinkshow = kwargs['xlinkshow']
del kwargs['xlinklabel']
if 'xlinktitle' in kwargs:
object.xlinktitle = kwargs['xlinktitle']
del kwargs['xlinktitle']
if doc and doc.debug >= 2:
print(" @id = ", repr(object.id),file=stderr)
print(" @set = ", repr(object.set),file=stderr)
print(" @class = ", repr(object.cls),file=stderr)
print(" @annotator = ", repr(object.annotator),file=stderr)
print(" @annotatortype= ", repr(object.annotatortype),file=stderr)
print(" @confidence = ", repr(object.confidence),file=stderr)
print(" @n = ", repr(object.n),file=stderr)
print(" @datetime = ", repr(object.datetime),file=stderr)
#set index
if object.id and doc:
if object.id in doc.index:
if doc.debug >= 1: print("[PyNLPl FoLiA DEBUG] Duplicate ID not permitted:" + object.id,file=stderr)
raise DuplicateIDError("Duplicate ID not permitted: " + object.id)
else:
if doc.debug >= 1: print("[PyNLPl FoLiA DEBUG] Adding to index: " + object.id,file=stderr)
doc.index[object.id] = object
#Parse feature attributes (shortcut for feature specification for some elements)
for c in object.ACCEPTED_DATA:
if issubclass(c, Feature):
if c.SUBSET in kwargs:
if kwargs[c.SUBSET]:
object.append(c,cls=kwargs[c.SUBSET])
del kwargs[c.SUBSET]
return kwargs | [
"def",
"parsecommonarguments",
"(",
"object",
",",
"doc",
",",
"annotationtype",
",",
"required",
",",
"allowed",
",",
"*",
"*",
"kwargs",
")",
":",
"object",
".",
"doc",
"=",
"doc",
"#The FoLiA root document",
"if",
"required",
"is",
"None",
":",
"required",
"=",
"tuple",
"(",
")",
"if",
"allowed",
"is",
"None",
":",
"allowed",
"=",
"tuple",
"(",
")",
"supported",
"=",
"required",
"+",
"allowed",
"if",
"'generate_id_in'",
"in",
"kwargs",
":",
"try",
":",
"kwargs",
"[",
"'id'",
"]",
"=",
"kwargs",
"[",
"'generate_id_in'",
"]",
".",
"generate_id",
"(",
"object",
".",
"__class__",
")",
"except",
"GenerateIDException",
":",
"pass",
"#ID could not be generated, just skip",
"del",
"kwargs",
"[",
"'generate_id_in'",
"]",
"if",
"'id'",
"in",
"kwargs",
":",
"if",
"Attrib",
".",
"ID",
"not",
"in",
"supported",
":",
"raise",
"ValueError",
"(",
"\"ID is not supported on \"",
"+",
"object",
".",
"__class__",
".",
"__name__",
")",
"isncname",
"(",
"kwargs",
"[",
"'id'",
"]",
")",
"object",
".",
"id",
"=",
"kwargs",
"[",
"'id'",
"]",
"del",
"kwargs",
"[",
"'id'",
"]",
"elif",
"Attrib",
".",
"ID",
"in",
"required",
":",
"raise",
"ValueError",
"(",
"\"ID is required for \"",
"+",
"object",
".",
"__class__",
".",
"__name__",
")",
"else",
":",
"object",
".",
"id",
"=",
"None",
"if",
"'set'",
"in",
"kwargs",
":",
"if",
"Attrib",
".",
"CLASS",
"not",
"in",
"supported",
"and",
"not",
"object",
".",
"SETONLY",
":",
"raise",
"ValueError",
"(",
"\"Set is not supported on \"",
"+",
"object",
".",
"__class__",
".",
"__name__",
")",
"if",
"not",
"kwargs",
"[",
"'set'",
"]",
":",
"object",
".",
"set",
"=",
"\"undefined\"",
"else",
":",
"object",
".",
"set",
"=",
"kwargs",
"[",
"'set'",
"]",
"del",
"kwargs",
"[",
"'set'",
"]",
"if",
"object",
".",
"set",
":",
"if",
"doc",
"and",
"(",
"not",
"(",
"annotationtype",
"in",
"doc",
".",
"annotationdefaults",
")",
"or",
"not",
"(",
"object",
".",
"set",
"in",
"doc",
".",
"annotationdefaults",
"[",
"annotationtype",
"]",
")",
")",
":",
"if",
"object",
".",
"set",
"in",
"doc",
".",
"alias_set",
":",
"object",
".",
"set",
"=",
"doc",
".",
"alias_set",
"[",
"object",
".",
"set",
"]",
"elif",
"doc",
".",
"autodeclare",
":",
"doc",
".",
"annotations",
".",
"append",
"(",
"(",
"annotationtype",
",",
"object",
".",
"set",
")",
")",
"doc",
".",
"annotationdefaults",
"[",
"annotationtype",
"]",
"=",
"{",
"object",
".",
"set",
":",
"{",
"}",
"}",
"else",
":",
"raise",
"ValueError",
"(",
"\"Set '\"",
"+",
"object",
".",
"set",
"+",
"\"' is used for \"",
"+",
"object",
".",
"__class__",
".",
"__name__",
"+",
"\", but has no declaration!\"",
")",
"elif",
"annotationtype",
"in",
"doc",
".",
"annotationdefaults",
"and",
"len",
"(",
"doc",
".",
"annotationdefaults",
"[",
"annotationtype",
"]",
")",
"==",
"1",
":",
"object",
".",
"set",
"=",
"list",
"(",
"doc",
".",
"annotationdefaults",
"[",
"annotationtype",
"]",
".",
"keys",
"(",
")",
")",
"[",
"0",
"]",
"elif",
"object",
".",
"ANNOTATIONTYPE",
"==",
"AnnotationType",
".",
"TEXT",
":",
"object",
".",
"set",
"=",
"\"undefined\"",
"#text content needs never be declared (for backward compatibility) and is in set 'undefined'",
"elif",
"Attrib",
".",
"CLASS",
"in",
"required",
":",
"#or (hasattr(object,'SETONLY') and object.SETONLY):",
"raise",
"ValueError",
"(",
"\"Set is required for \"",
"+",
"object",
".",
"__class__",
".",
"__name__",
")",
"if",
"'class'",
"in",
"kwargs",
":",
"if",
"not",
"Attrib",
".",
"CLASS",
"in",
"supported",
":",
"raise",
"ValueError",
"(",
"\"Class is not supported for \"",
"+",
"object",
".",
"__class__",
".",
"__name__",
")",
"object",
".",
"cls",
"=",
"kwargs",
"[",
"'class'",
"]",
"del",
"kwargs",
"[",
"'class'",
"]",
"elif",
"'cls'",
"in",
"kwargs",
":",
"if",
"not",
"Attrib",
".",
"CLASS",
"in",
"supported",
":",
"raise",
"ValueError",
"(",
"\"Class is not supported on \"",
"+",
"object",
".",
"__class__",
".",
"__name__",
")",
"object",
".",
"cls",
"=",
"kwargs",
"[",
"'cls'",
"]",
"del",
"kwargs",
"[",
"'cls'",
"]",
"elif",
"Attrib",
".",
"CLASS",
"in",
"required",
":",
"raise",
"ValueError",
"(",
"\"Class is required for \"",
"+",
"object",
".",
"__class__",
".",
"__name__",
")",
"if",
"object",
".",
"cls",
"and",
"not",
"object",
".",
"set",
":",
"if",
"doc",
"and",
"doc",
".",
"autodeclare",
":",
"if",
"not",
"(",
"annotationtype",
",",
"'undefined'",
")",
"in",
"doc",
".",
"annotations",
":",
"doc",
".",
"annotations",
".",
"append",
"(",
"(",
"annotationtype",
",",
"'undefined'",
")",
")",
"doc",
".",
"annotationdefaults",
"[",
"annotationtype",
"]",
"=",
"{",
"'undefined'",
":",
"{",
"}",
"}",
"object",
".",
"set",
"=",
"'undefined'",
"else",
":",
"raise",
"ValueError",
"(",
"\"Set is required for \"",
"+",
"object",
".",
"__class__",
".",
"__name__",
"+",
"\". Class '\"",
"+",
"object",
".",
"cls",
"+",
"\"' assigned without set.\"",
")",
"if",
"'annotator'",
"in",
"kwargs",
":",
"if",
"not",
"Attrib",
".",
"ANNOTATOR",
"in",
"supported",
":",
"raise",
"ValueError",
"(",
"\"Annotator is not supported for \"",
"+",
"object",
".",
"__class__",
".",
"__name__",
")",
"object",
".",
"annotator",
"=",
"kwargs",
"[",
"'annotator'",
"]",
"del",
"kwargs",
"[",
"'annotator'",
"]",
"elif",
"doc",
"and",
"annotationtype",
"in",
"doc",
".",
"annotationdefaults",
"and",
"object",
".",
"set",
"in",
"doc",
".",
"annotationdefaults",
"[",
"annotationtype",
"]",
"and",
"'annotator'",
"in",
"doc",
".",
"annotationdefaults",
"[",
"annotationtype",
"]",
"[",
"object",
".",
"set",
"]",
":",
"object",
".",
"annotator",
"=",
"doc",
".",
"annotationdefaults",
"[",
"annotationtype",
"]",
"[",
"object",
".",
"set",
"]",
"[",
"'annotator'",
"]",
"elif",
"Attrib",
".",
"ANNOTATOR",
"in",
"required",
":",
"raise",
"ValueError",
"(",
"\"Annotator is required for \"",
"+",
"object",
".",
"__class__",
".",
"__name__",
")",
"if",
"'annotatortype'",
"in",
"kwargs",
":",
"if",
"not",
"Attrib",
".",
"ANNOTATOR",
"in",
"supported",
":",
"raise",
"ValueError",
"(",
"\"Annotatortype is not supported for \"",
"+",
"object",
".",
"__class__",
".",
"__name__",
")",
"if",
"kwargs",
"[",
"'annotatortype'",
"]",
"==",
"'auto'",
"or",
"kwargs",
"[",
"'annotatortype'",
"]",
"==",
"AnnotatorType",
".",
"AUTO",
":",
"object",
".",
"annotatortype",
"=",
"AnnotatorType",
".",
"AUTO",
"elif",
"kwargs",
"[",
"'annotatortype'",
"]",
"==",
"'manual'",
"or",
"kwargs",
"[",
"'annotatortype'",
"]",
"==",
"AnnotatorType",
".",
"MANUAL",
":",
"object",
".",
"annotatortype",
"=",
"AnnotatorType",
".",
"MANUAL",
"else",
":",
"raise",
"ValueError",
"(",
"\"annotatortype must be 'auto' or 'manual', got \"",
"+",
"repr",
"(",
"kwargs",
"[",
"'annotatortype'",
"]",
")",
")",
"del",
"kwargs",
"[",
"'annotatortype'",
"]",
"elif",
"doc",
"and",
"annotationtype",
"in",
"doc",
".",
"annotationdefaults",
"and",
"object",
".",
"set",
"in",
"doc",
".",
"annotationdefaults",
"[",
"annotationtype",
"]",
"and",
"'annotatortype'",
"in",
"doc",
".",
"annotationdefaults",
"[",
"annotationtype",
"]",
"[",
"object",
".",
"set",
"]",
":",
"object",
".",
"annotatortype",
"=",
"doc",
".",
"annotationdefaults",
"[",
"annotationtype",
"]",
"[",
"object",
".",
"set",
"]",
"[",
"'annotatortype'",
"]",
"elif",
"Attrib",
".",
"ANNOTATOR",
"in",
"required",
":",
"raise",
"ValueError",
"(",
"\"Annotatortype is required for \"",
"+",
"object",
".",
"__class__",
".",
"__name__",
")",
"if",
"'confidence'",
"in",
"kwargs",
":",
"if",
"not",
"Attrib",
".",
"CONFIDENCE",
"in",
"supported",
":",
"raise",
"ValueError",
"(",
"\"Confidence is not supported\"",
")",
"if",
"kwargs",
"[",
"'confidence'",
"]",
"is",
"not",
"None",
":",
"try",
":",
"object",
".",
"confidence",
"=",
"float",
"(",
"kwargs",
"[",
"'confidence'",
"]",
")",
"assert",
"object",
".",
"confidence",
">=",
"0.0",
"and",
"object",
".",
"confidence",
"<=",
"1.0",
"except",
":",
"raise",
"ValueError",
"(",
"\"Confidence must be a floating point number between 0 and 1, got \"",
"+",
"repr",
"(",
"kwargs",
"[",
"'confidence'",
"]",
")",
")",
"del",
"kwargs",
"[",
"'confidence'",
"]",
"elif",
"Attrib",
".",
"CONFIDENCE",
"in",
"required",
":",
"raise",
"ValueError",
"(",
"\"Confidence is required for \"",
"+",
"object",
".",
"__class__",
".",
"__name__",
")",
"if",
"'n'",
"in",
"kwargs",
":",
"if",
"not",
"Attrib",
".",
"N",
"in",
"supported",
":",
"raise",
"ValueError",
"(",
"\"N is not supported for \"",
"+",
"object",
".",
"__class__",
".",
"__name__",
")",
"object",
".",
"n",
"=",
"kwargs",
"[",
"'n'",
"]",
"del",
"kwargs",
"[",
"'n'",
"]",
"elif",
"Attrib",
".",
"N",
"in",
"required",
":",
"raise",
"ValueError",
"(",
"\"N is required for \"",
"+",
"object",
".",
"__class__",
".",
"__name__",
")",
"if",
"'datetime'",
"in",
"kwargs",
":",
"if",
"not",
"Attrib",
".",
"DATETIME",
"in",
"supported",
":",
"raise",
"ValueError",
"(",
"\"Datetime is not supported\"",
")",
"if",
"isinstance",
"(",
"kwargs",
"[",
"'datetime'",
"]",
",",
"datetime",
")",
":",
"object",
".",
"datetime",
"=",
"kwargs",
"[",
"'datetime'",
"]",
"else",
":",
"#try:",
"object",
".",
"datetime",
"=",
"parse_datetime",
"(",
"kwargs",
"[",
"'datetime'",
"]",
")",
"#except:",
"# raise ValueError(\"Unable to parse datetime: \" + str(repr(kwargs['datetime'])))",
"del",
"kwargs",
"[",
"'datetime'",
"]",
"elif",
"doc",
"and",
"annotationtype",
"in",
"doc",
".",
"annotationdefaults",
"and",
"object",
".",
"set",
"in",
"doc",
".",
"annotationdefaults",
"[",
"annotationtype",
"]",
"and",
"'datetime'",
"in",
"doc",
".",
"annotationdefaults",
"[",
"annotationtype",
"]",
"[",
"object",
".",
"set",
"]",
":",
"object",
".",
"datetime",
"=",
"doc",
".",
"annotationdefaults",
"[",
"annotationtype",
"]",
"[",
"object",
".",
"set",
"]",
"[",
"'datetime'",
"]",
"elif",
"Attrib",
".",
"DATETIME",
"in",
"required",
":",
"raise",
"ValueError",
"(",
"\"Datetime is required for \"",
"+",
"object",
".",
"__class__",
".",
"__name__",
")",
"if",
"'src'",
"in",
"kwargs",
":",
"if",
"not",
"Attrib",
".",
"SRC",
"in",
"supported",
":",
"raise",
"ValueError",
"(",
"\"Source is not supported for \"",
"+",
"object",
".",
"__class__",
".",
"__name__",
")",
"object",
".",
"src",
"=",
"kwargs",
"[",
"'src'",
"]",
"del",
"kwargs",
"[",
"'src'",
"]",
"elif",
"Attrib",
".",
"SRC",
"in",
"required",
":",
"raise",
"ValueError",
"(",
"\"Source is required for \"",
"+",
"object",
".",
"__class__",
".",
"__name__",
")",
"if",
"'begintime'",
"in",
"kwargs",
":",
"if",
"not",
"Attrib",
".",
"BEGINTIME",
"in",
"supported",
":",
"raise",
"ValueError",
"(",
"\"Begintime is not supported for \"",
"+",
"object",
".",
"__class__",
".",
"__name__",
")",
"object",
".",
"begintime",
"=",
"parsetime",
"(",
"kwargs",
"[",
"'begintime'",
"]",
")",
"del",
"kwargs",
"[",
"'begintime'",
"]",
"elif",
"Attrib",
".",
"BEGINTIME",
"in",
"required",
":",
"raise",
"ValueError",
"(",
"\"Begintime is required for \"",
"+",
"object",
".",
"__class__",
".",
"__name__",
")",
"if",
"'endtime'",
"in",
"kwargs",
":",
"if",
"not",
"Attrib",
".",
"ENDTIME",
"in",
"supported",
":",
"raise",
"ValueError",
"(",
"\"Endtime is not supported for \"",
"+",
"object",
".",
"__class__",
".",
"__name__",
")",
"object",
".",
"endtime",
"=",
"parsetime",
"(",
"kwargs",
"[",
"'endtime'",
"]",
")",
"del",
"kwargs",
"[",
"'endtime'",
"]",
"elif",
"Attrib",
".",
"ENDTIME",
"in",
"required",
":",
"raise",
"ValueError",
"(",
"\"Endtime is required for \"",
"+",
"object",
".",
"__class__",
".",
"__name__",
")",
"if",
"'speaker'",
"in",
"kwargs",
":",
"if",
"not",
"Attrib",
".",
"SPEAKER",
"in",
"supported",
":",
"raise",
"ValueError",
"(",
"\"Speaker is not supported for \"",
"+",
"object",
".",
"__class__",
".",
"__name__",
")",
"object",
".",
"speaker",
"=",
"kwargs",
"[",
"'speaker'",
"]",
"del",
"kwargs",
"[",
"'speaker'",
"]",
"elif",
"Attrib",
".",
"SPEAKER",
"in",
"required",
":",
"raise",
"ValueError",
"(",
"\"Speaker is required for \"",
"+",
"object",
".",
"__class__",
".",
"__name__",
")",
"if",
"'auth'",
"in",
"kwargs",
":",
"if",
"kwargs",
"[",
"'auth'",
"]",
"in",
"(",
"'no'",
",",
"'false'",
")",
":",
"object",
".",
"auth",
"=",
"False",
"else",
":",
"object",
".",
"auth",
"=",
"bool",
"(",
"kwargs",
"[",
"'auth'",
"]",
")",
"del",
"kwargs",
"[",
"'auth'",
"]",
"else",
":",
"object",
".",
"auth",
"=",
"object",
".",
"__class__",
".",
"AUTH",
"if",
"'text'",
"in",
"kwargs",
":",
"if",
"kwargs",
"[",
"'text'",
"]",
":",
"object",
".",
"settext",
"(",
"kwargs",
"[",
"'text'",
"]",
")",
"del",
"kwargs",
"[",
"'text'",
"]",
"if",
"'phon'",
"in",
"kwargs",
":",
"if",
"kwargs",
"[",
"'phon'",
"]",
":",
"object",
".",
"setphon",
"(",
"kwargs",
"[",
"'phon'",
"]",
")",
"del",
"kwargs",
"[",
"'phon'",
"]",
"if",
"'textclass'",
"in",
"kwargs",
":",
"if",
"not",
"Attrib",
".",
"TEXTCLASS",
"in",
"supported",
":",
"raise",
"ValueError",
"(",
"\"Textclass is not supported for \"",
"+",
"object",
".",
"__class__",
".",
"__name__",
")",
"object",
".",
"textclass",
"=",
"kwargs",
"[",
"'textclass'",
"]",
"del",
"kwargs",
"[",
"'textclass'",
"]",
"else",
":",
"if",
"Attrib",
".",
"TEXTCLASS",
"in",
"supported",
":",
"object",
".",
"textclass",
"=",
"\"current\"",
"if",
"'metadata'",
"in",
"kwargs",
":",
"if",
"not",
"Attrib",
".",
"METADATA",
"in",
"supported",
":",
"raise",
"ValueError",
"(",
"\"Metadata is not supported for \"",
"+",
"object",
".",
"__class__",
".",
"__name__",
")",
"object",
".",
"metadata",
"=",
"kwargs",
"[",
"'metadata'",
"]",
"if",
"doc",
":",
"try",
":",
"doc",
".",
"submetadata",
"[",
"kwargs",
"[",
"'metadata'",
"]",
"]",
"except",
"KeyError",
":",
"raise",
"KeyError",
"(",
"\"No such metadata defined: \"",
"+",
"kwargs",
"[",
"'metadata'",
"]",
")",
"del",
"kwargs",
"[",
"'metadata'",
"]",
"if",
"object",
".",
"XLINK",
":",
"if",
"'href'",
"in",
"kwargs",
":",
"object",
".",
"href",
"=",
"kwargs",
"[",
"'href'",
"]",
"del",
"kwargs",
"[",
"'href'",
"]",
"if",
"'xlinktype'",
"in",
"kwargs",
":",
"object",
".",
"xlinktype",
"=",
"kwargs",
"[",
"'xlinktype'",
"]",
"del",
"kwargs",
"[",
"'xlinktype'",
"]",
"if",
"'xlinkrole'",
"in",
"kwargs",
":",
"object",
".",
"xlinkrole",
"=",
"kwargs",
"[",
"'xlinkrole'",
"]",
"del",
"kwargs",
"[",
"'xlinkrole'",
"]",
"if",
"'xlinklabel'",
"in",
"kwargs",
":",
"object",
".",
"xlinklabel",
"=",
"kwargs",
"[",
"'xlinklabel'",
"]",
"del",
"kwargs",
"[",
"'xlinklabel'",
"]",
"if",
"'xlinkshow'",
"in",
"kwargs",
":",
"object",
".",
"xlinkshow",
"=",
"kwargs",
"[",
"'xlinkshow'",
"]",
"del",
"kwargs",
"[",
"'xlinklabel'",
"]",
"if",
"'xlinktitle'",
"in",
"kwargs",
":",
"object",
".",
"xlinktitle",
"=",
"kwargs",
"[",
"'xlinktitle'",
"]",
"del",
"kwargs",
"[",
"'xlinktitle'",
"]",
"if",
"doc",
"and",
"doc",
".",
"debug",
">=",
"2",
":",
"print",
"(",
"\" @id = \"",
",",
"repr",
"(",
"object",
".",
"id",
")",
",",
"file",
"=",
"stderr",
")",
"print",
"(",
"\" @set = \"",
",",
"repr",
"(",
"object",
".",
"set",
")",
",",
"file",
"=",
"stderr",
")",
"print",
"(",
"\" @class = \"",
",",
"repr",
"(",
"object",
".",
"cls",
")",
",",
"file",
"=",
"stderr",
")",
"print",
"(",
"\" @annotator = \"",
",",
"repr",
"(",
"object",
".",
"annotator",
")",
",",
"file",
"=",
"stderr",
")",
"print",
"(",
"\" @annotatortype= \"",
",",
"repr",
"(",
"object",
".",
"annotatortype",
")",
",",
"file",
"=",
"stderr",
")",
"print",
"(",
"\" @confidence = \"",
",",
"repr",
"(",
"object",
".",
"confidence",
")",
",",
"file",
"=",
"stderr",
")",
"print",
"(",
"\" @n = \"",
",",
"repr",
"(",
"object",
".",
"n",
")",
",",
"file",
"=",
"stderr",
")",
"print",
"(",
"\" @datetime = \"",
",",
"repr",
"(",
"object",
".",
"datetime",
")",
",",
"file",
"=",
"stderr",
")",
"#set index",
"if",
"object",
".",
"id",
"and",
"doc",
":",
"if",
"object",
".",
"id",
"in",
"doc",
".",
"index",
":",
"if",
"doc",
".",
"debug",
">=",
"1",
":",
"print",
"(",
"\"[PyNLPl FoLiA DEBUG] Duplicate ID not permitted:\"",
"+",
"object",
".",
"id",
",",
"file",
"=",
"stderr",
")",
"raise",
"DuplicateIDError",
"(",
"\"Duplicate ID not permitted: \"",
"+",
"object",
".",
"id",
")",
"else",
":",
"if",
"doc",
".",
"debug",
">=",
"1",
":",
"print",
"(",
"\"[PyNLPl FoLiA DEBUG] Adding to index: \"",
"+",
"object",
".",
"id",
",",
"file",
"=",
"stderr",
")",
"doc",
".",
"index",
"[",
"object",
".",
"id",
"]",
"=",
"object",
"#Parse feature attributes (shortcut for feature specification for some elements)",
"for",
"c",
"in",
"object",
".",
"ACCEPTED_DATA",
":",
"if",
"issubclass",
"(",
"c",
",",
"Feature",
")",
":",
"if",
"c",
".",
"SUBSET",
"in",
"kwargs",
":",
"if",
"kwargs",
"[",
"c",
".",
"SUBSET",
"]",
":",
"object",
".",
"append",
"(",
"c",
",",
"cls",
"=",
"kwargs",
"[",
"c",
".",
"SUBSET",
"]",
")",
"del",
"kwargs",
"[",
"c",
".",
"SUBSET",
"]",
"return",
"kwargs"
] | https://github.com/proycon/pynlpl/blob/7707f69a91caaa6cde037f0d0379f1d42500a68b/pynlpl/formats/folia.py#L234-L508 |
|
omz/PythonistaAppTemplate | f560f93f8876d82a21d108977f90583df08d55af | PythonistaAppTemplate/PythonistaKit.framework/pylib_ext/matplotlib/axes.py | python | Axes.set_axisbelow | (self, b) | Set whether the axis ticks and gridlines are above or below most
artists
ACCEPTS: [ *True* | *False* ] | Set whether the axis ticks and gridlines are above or below most
artists | [
"Set",
"whether",
"the",
"axis",
"ticks",
"and",
"gridlines",
"are",
"above",
"or",
"below",
"most",
"artists"
] | def set_axisbelow(self, b):
"""
Set whether the axis ticks and gridlines are above or below most
artists
ACCEPTS: [ *True* | *False* ]
"""
self._axisbelow = b | [
"def",
"set_axisbelow",
"(",
"self",
",",
"b",
")",
":",
"self",
".",
"_axisbelow",
"=",
"b"
] | https://github.com/omz/PythonistaAppTemplate/blob/f560f93f8876d82a21d108977f90583df08d55af/PythonistaAppTemplate/PythonistaKit.framework/pylib_ext/matplotlib/axes.py#L2134-L2141 |
||
Komodo/KomodoEdit | 61edab75dce2bdb03943b387b0608ea36f548e8e | src/codeintel/play/core.py | python | FlexGridSizer.RemoveGrowableRow | (*args, **kwargs) | return _core.FlexGridSizer_RemoveGrowableRow(*args, **kwargs) | RemoveGrowableRow(size_t idx) | RemoveGrowableRow(size_t idx) | [
"RemoveGrowableRow",
"(",
"size_t",
"idx",
")"
] | def RemoveGrowableRow(*args, **kwargs):
"""RemoveGrowableRow(size_t idx)"""
return _core.FlexGridSizer_RemoveGrowableRow(*args, **kwargs) | [
"def",
"RemoveGrowableRow",
"(",
"*",
"args",
",",
"*",
"*",
"kwargs",
")",
":",
"return",
"_core",
".",
"FlexGridSizer_RemoveGrowableRow",
"(",
"*",
"args",
",",
"*",
"*",
"kwargs",
")"
] | https://github.com/Komodo/KomodoEdit/blob/61edab75dce2bdb03943b387b0608ea36f548e8e/src/codeintel/play/core.py#L8639-L8641 |
|
KunpengLi1994/VSRN | 777ae74326fdb6abe69dbd3911d0e545322520d1 | model.py | python | EncoderImagePrecomp.init_weights | (self) | Xavier initialization for the fully connected layer | Xavier initialization for the fully connected layer | [
"Xavier",
"initialization",
"for",
"the",
"fully",
"connected",
"layer"
] | def init_weights(self):
"""Xavier initialization for the fully connected layer
"""
r = np.sqrt(6.) / np.sqrt(self.fc.in_features +
self.fc.out_features)
self.fc.weight.data.uniform_(-r, r)
self.fc.bias.data.fill_(0) | [
"def",
"init_weights",
"(",
"self",
")",
":",
"r",
"=",
"np",
".",
"sqrt",
"(",
"6.",
")",
"/",
"np",
".",
"sqrt",
"(",
"self",
".",
"fc",
".",
"in_features",
"+",
"self",
".",
"fc",
".",
"out_features",
")",
"self",
".",
"fc",
".",
"weight",
".",
"data",
".",
"uniform_",
"(",
"-",
"r",
",",
"r",
")",
"self",
".",
"fc",
".",
"bias",
".",
"data",
".",
"fill_",
"(",
"0",
")"
] | https://github.com/KunpengLi1994/VSRN/blob/777ae74326fdb6abe69dbd3911d0e545322520d1/model.py#L162-L168 |
||
Tautulli/Tautulli | 2410eb33805aaac4bd1c5dad0f71e4f15afaf742 | lib/requests/cookies.py | python | RequestsCookieJar._find | (self, name, domain=None, path=None) | Requests uses this method internally to get cookie values.
If there are conflicting cookies, _find arbitrarily chooses one.
See _find_no_duplicates if you want an exception thrown if there are
conflicting cookies.
:param name: a string containing name of cookie
:param domain: (optional) string containing domain of cookie
:param path: (optional) string containing path of cookie
:return: cookie.value | Requests uses this method internally to get cookie values. | [
"Requests",
"uses",
"this",
"method",
"internally",
"to",
"get",
"cookie",
"values",
"."
] | def _find(self, name, domain=None, path=None):
"""Requests uses this method internally to get cookie values.
If there are conflicting cookies, _find arbitrarily chooses one.
See _find_no_duplicates if you want an exception thrown if there are
conflicting cookies.
:param name: a string containing name of cookie
:param domain: (optional) string containing domain of cookie
:param path: (optional) string containing path of cookie
:return: cookie.value
"""
for cookie in iter(self):
if cookie.name == name:
if domain is None or cookie.domain == domain:
if path is None or cookie.path == path:
return cookie.value
raise KeyError('name=%r, domain=%r, path=%r' % (name, domain, path)) | [
"def",
"_find",
"(",
"self",
",",
"name",
",",
"domain",
"=",
"None",
",",
"path",
"=",
"None",
")",
":",
"for",
"cookie",
"in",
"iter",
"(",
"self",
")",
":",
"if",
"cookie",
".",
"name",
"==",
"name",
":",
"if",
"domain",
"is",
"None",
"or",
"cookie",
".",
"domain",
"==",
"domain",
":",
"if",
"path",
"is",
"None",
"or",
"cookie",
".",
"path",
"==",
"path",
":",
"return",
"cookie",
".",
"value",
"raise",
"KeyError",
"(",
"'name=%r, domain=%r, path=%r'",
"%",
"(",
"name",
",",
"domain",
",",
"path",
")",
")"
] | https://github.com/Tautulli/Tautulli/blob/2410eb33805aaac4bd1c5dad0f71e4f15afaf742/lib/requests/cookies.py#L356-L374 |
||
oilshell/oil | 94388e7d44a9ad879b12615f6203b38596b5a2d3 | Python-2.7.13/Tools/ccbench/ccbench.py | python | run_latency_tests | (max_threads) | [] | def run_latency_tests(max_threads):
for task in latency_tasks:
print("Background CPU task:", task.__doc__)
print()
func, args = task()
nthreads = 0
while nthreads <= max_threads:
results = run_latency_test(func, args, nthreads)
n = len(results)
# We print out milliseconds
lats = [1000 * (t2 - t1) for (t1, t2) in results]
#print(list(map(int, lats)))
avg = sum(lats) / n
dev = (sum((x - avg) ** 2 for x in lats) / n) ** 0.5
print("CPU threads=%d: %d ms. (std dev: %d ms.)" % (nthreads, avg, dev), end="")
print()
#print(" [... from %d samples]" % n)
nthreads += 1
print() | [
"def",
"run_latency_tests",
"(",
"max_threads",
")",
":",
"for",
"task",
"in",
"latency_tasks",
":",
"print",
"(",
"\"Background CPU task:\"",
",",
"task",
".",
"__doc__",
")",
"print",
"(",
")",
"func",
",",
"args",
"=",
"task",
"(",
")",
"nthreads",
"=",
"0",
"while",
"nthreads",
"<=",
"max_threads",
":",
"results",
"=",
"run_latency_test",
"(",
"func",
",",
"args",
",",
"nthreads",
")",
"n",
"=",
"len",
"(",
"results",
")",
"# We print out milliseconds",
"lats",
"=",
"[",
"1000",
"*",
"(",
"t2",
"-",
"t1",
")",
"for",
"(",
"t1",
",",
"t2",
")",
"in",
"results",
"]",
"#print(list(map(int, lats)))",
"avg",
"=",
"sum",
"(",
"lats",
")",
"/",
"n",
"dev",
"=",
"(",
"sum",
"(",
"(",
"x",
"-",
"avg",
")",
"**",
"2",
"for",
"x",
"in",
"lats",
")",
"/",
"n",
")",
"**",
"0.5",
"print",
"(",
"\"CPU threads=%d: %d ms. (std dev: %d ms.)\"",
"%",
"(",
"nthreads",
",",
"avg",
",",
"dev",
")",
",",
"end",
"=",
"\"\"",
")",
"print",
"(",
")",
"#print(\" [... from %d samples]\" % n)",
"nthreads",
"+=",
"1",
"print",
"(",
")"
] | https://github.com/oilshell/oil/blob/94388e7d44a9ad879b12615f6203b38596b5a2d3/Python-2.7.13/Tools/ccbench/ccbench.py#L380-L398 |
||||
pytorch/fairseq | 1575f30dd0a9f7b3c499db0b4767aa4e9f79056c | fairseq/tasks/fairseq_task.py | python | FairseqTask.reduce_metrics | (self, logging_outputs, criterion) | Aggregate logging outputs from data parallel training. | Aggregate logging outputs from data parallel training. | [
"Aggregate",
"logging",
"outputs",
"from",
"data",
"parallel",
"training",
"."
] | def reduce_metrics(self, logging_outputs, criterion):
"""Aggregate logging outputs from data parallel training."""
# backward compatibility for tasks that override aggregate_logging_outputs
base_func = FairseqTask.aggregate_logging_outputs
self_func = getattr(self, "aggregate_logging_outputs").__func__
if self_func is not base_func:
utils.deprecation_warning(
"Tasks should implement the reduce_metrics API. "
"Falling back to deprecated aggregate_logging_outputs API."
)
agg_logging_outputs = self.aggregate_logging_outputs(
logging_outputs, criterion
)
for k, v in agg_logging_outputs.items():
metrics.log_scalar(k, v)
return
if not any("ntokens" in log for log in logging_outputs):
warnings.warn(
"ntokens not found in Criterion logging outputs, cannot log wpb or wps"
)
else:
ntokens = sum(log.get("ntokens", 0) for log in logging_outputs)
metrics.log_scalar("wpb", ntokens, priority=180, round=1)
metrics.log_speed("wps", ntokens, priority=90, round=1)
if not any("nsentences" in log for log in logging_outputs):
warnings.warn(
"nsentences not found in Criterion logging outputs, cannot log bsz"
)
else:
nsentences = sum(log.get("nsentences", 0) for log in logging_outputs)
metrics.log_scalar("bsz", nsentences, priority=190, round=1)
criterion.__class__.reduce_metrics(logging_outputs) | [
"def",
"reduce_metrics",
"(",
"self",
",",
"logging_outputs",
",",
"criterion",
")",
":",
"# backward compatibility for tasks that override aggregate_logging_outputs",
"base_func",
"=",
"FairseqTask",
".",
"aggregate_logging_outputs",
"self_func",
"=",
"getattr",
"(",
"self",
",",
"\"aggregate_logging_outputs\"",
")",
".",
"__func__",
"if",
"self_func",
"is",
"not",
"base_func",
":",
"utils",
".",
"deprecation_warning",
"(",
"\"Tasks should implement the reduce_metrics API. \"",
"\"Falling back to deprecated aggregate_logging_outputs API.\"",
")",
"agg_logging_outputs",
"=",
"self",
".",
"aggregate_logging_outputs",
"(",
"logging_outputs",
",",
"criterion",
")",
"for",
"k",
",",
"v",
"in",
"agg_logging_outputs",
".",
"items",
"(",
")",
":",
"metrics",
".",
"log_scalar",
"(",
"k",
",",
"v",
")",
"return",
"if",
"not",
"any",
"(",
"\"ntokens\"",
"in",
"log",
"for",
"log",
"in",
"logging_outputs",
")",
":",
"warnings",
".",
"warn",
"(",
"\"ntokens not found in Criterion logging outputs, cannot log wpb or wps\"",
")",
"else",
":",
"ntokens",
"=",
"sum",
"(",
"log",
".",
"get",
"(",
"\"ntokens\"",
",",
"0",
")",
"for",
"log",
"in",
"logging_outputs",
")",
"metrics",
".",
"log_scalar",
"(",
"\"wpb\"",
",",
"ntokens",
",",
"priority",
"=",
"180",
",",
"round",
"=",
"1",
")",
"metrics",
".",
"log_speed",
"(",
"\"wps\"",
",",
"ntokens",
",",
"priority",
"=",
"90",
",",
"round",
"=",
"1",
")",
"if",
"not",
"any",
"(",
"\"nsentences\"",
"in",
"log",
"for",
"log",
"in",
"logging_outputs",
")",
":",
"warnings",
".",
"warn",
"(",
"\"nsentences not found in Criterion logging outputs, cannot log bsz\"",
")",
"else",
":",
"nsentences",
"=",
"sum",
"(",
"log",
".",
"get",
"(",
"\"nsentences\"",
",",
"0",
")",
"for",
"log",
"in",
"logging_outputs",
")",
"metrics",
".",
"log_scalar",
"(",
"\"bsz\"",
",",
"nsentences",
",",
"priority",
"=",
"190",
",",
"round",
"=",
"1",
")",
"criterion",
".",
"__class__",
".",
"reduce_metrics",
"(",
"logging_outputs",
")"
] | https://github.com/pytorch/fairseq/blob/1575f30dd0a9f7b3c499db0b4767aa4e9f79056c/fairseq/tasks/fairseq_task.py#L559-L593 |
||
twilio/twilio-python | 6e1e811ea57a1edfadd5161ace87397c563f6915 | twilio/rest/api/v2010/account/call/recording.py | python | RecordingContext.fetch | (self) | return RecordingInstance(
self._version,
payload,
account_sid=self._solution['account_sid'],
call_sid=self._solution['call_sid'],
sid=self._solution['sid'],
) | Fetch the RecordingInstance
:returns: The fetched RecordingInstance
:rtype: twilio.rest.api.v2010.account.call.recording.RecordingInstance | Fetch the RecordingInstance | [
"Fetch",
"the",
"RecordingInstance"
] | def fetch(self):
"""
Fetch the RecordingInstance
:returns: The fetched RecordingInstance
:rtype: twilio.rest.api.v2010.account.call.recording.RecordingInstance
"""
payload = self._version.fetch(method='GET', uri=self._uri, )
return RecordingInstance(
self._version,
payload,
account_sid=self._solution['account_sid'],
call_sid=self._solution['call_sid'],
sid=self._solution['sid'],
) | [
"def",
"fetch",
"(",
"self",
")",
":",
"payload",
"=",
"self",
".",
"_version",
".",
"fetch",
"(",
"method",
"=",
"'GET'",
",",
"uri",
"=",
"self",
".",
"_uri",
",",
")",
"return",
"RecordingInstance",
"(",
"self",
".",
"_version",
",",
"payload",
",",
"account_sid",
"=",
"self",
".",
"_solution",
"[",
"'account_sid'",
"]",
",",
"call_sid",
"=",
"self",
".",
"_solution",
"[",
"'call_sid'",
"]",
",",
"sid",
"=",
"self",
".",
"_solution",
"[",
"'sid'",
"]",
",",
")"
] | https://github.com/twilio/twilio-python/blob/6e1e811ea57a1edfadd5161ace87397c563f6915/twilio/rest/api/v2010/account/call/recording.py#L308-L323 |
|
google/upvote_py2 | 51606947641763489db31bffcb934e8112eb5a08 | upvote/gae/utils/handler_utils.py | python | UpvoteRequestHandler.RequestCounter | (self) | return None | Returns a monitoring.RequestCounter specific to this webapp2.RequestHandler.
Subclasses should override this method in order to enable monitoring of
requests made to this RequestHandler.
Returns:
A monitoring.RequestCounter to be used for tracking requests to this
RequestHandler. | Returns a monitoring.RequestCounter specific to this webapp2.RequestHandler. | [
"Returns",
"a",
"monitoring",
".",
"RequestCounter",
"specific",
"to",
"this",
"webapp2",
".",
"RequestHandler",
"."
] | def RequestCounter(self):
"""Returns a monitoring.RequestCounter specific to this webapp2.RequestHandler.
Subclasses should override this method in order to enable monitoring of
requests made to this RequestHandler.
Returns:
A monitoring.RequestCounter to be used for tracking requests to this
RequestHandler.
"""
return None | [
"def",
"RequestCounter",
"(",
"self",
")",
":",
"return",
"None"
] | https://github.com/google/upvote_py2/blob/51606947641763489db31bffcb934e8112eb5a08/upvote/gae/utils/handler_utils.py#L122-L132 |
|
Ridter/acefile | 31f90219fb560364b6f5d27f512fccfae81d04f4 | acefile.py | python | AceArchive._getmember_byname | (self, name) | return match | Return an :class:`AceMember` object corresponding to archive member
name *name*.
Raise :class:`KeyError` if *name* is not present in the archive.
If *name* occurs multiple times in the archive, then the last occurence
is returned. | Return an :class:`AceMember` object corresponding to archive member
name *name*.
Raise :class:`KeyError` if *name* is not present in the archive.
If *name* occurs multiple times in the archive, then the last occurence
is returned. | [
"Return",
"an",
":",
"class",
":",
"AceMember",
"object",
"corresponding",
"to",
"archive",
"member",
"name",
"*",
"name",
"*",
".",
"Raise",
":",
"class",
":",
"KeyError",
"if",
"*",
"name",
"*",
"is",
"not",
"present",
"in",
"the",
"archive",
".",
"If",
"*",
"name",
"*",
"occurs",
"multiple",
"times",
"in",
"the",
"archive",
"then",
"the",
"last",
"occurence",
"is",
"returned",
"."
] | def _getmember_byname(self, name):
"""
Return an :class:`AceMember` object corresponding to archive member
name *name*.
Raise :class:`KeyError` if *name* is not present in the archive.
If *name* occurs multiple times in the archive, then the last occurence
is returned.
"""
match = None
for am in self.__members:
if am.filename == name:
match = am
if match == None:
raise KeyError("no member '%s' in archive" % name)
return match | [
"def",
"_getmember_byname",
"(",
"self",
",",
"name",
")",
":",
"match",
"=",
"None",
"for",
"am",
"in",
"self",
".",
"__members",
":",
"if",
"am",
".",
"filename",
"==",
"name",
":",
"match",
"=",
"am",
"if",
"match",
"==",
"None",
":",
"raise",
"KeyError",
"(",
"\"no member '%s' in archive\"",
"%",
"name",
")",
"return",
"match"
] | https://github.com/Ridter/acefile/blob/31f90219fb560364b6f5d27f512fccfae81d04f4/acefile.py#L3375-L3389 |
|
oilshell/oil | 94388e7d44a9ad879b12615f6203b38596b5a2d3 | Python-2.7.13/Lib/plat-irix6/IN.py | python | IN6_IS_ADDR_MC_GLOBAL | (p) | return | [] | def IN6_IS_ADDR_MC_GLOBAL(p): return | [
"def",
"IN6_IS_ADDR_MC_GLOBAL",
"(",
"p",
")",
":",
"return"
] | https://github.com/oilshell/oil/blob/94388e7d44a9ad879b12615f6203b38596b5a2d3/Python-2.7.13/Lib/plat-irix6/IN.py#L344-L344 |
|||
TesterlifeRaymond/doraemon | d5cb6e34bd5f2aa97273ce0c0c9303e32beaa333 | venv/lib/python3.6/site-packages/pip/_vendor/distlib/_backport/tarfile.py | python | _Stream.close | (self) | Close the _Stream object. No operation should be
done on it afterwards. | Close the _Stream object. No operation should be
done on it afterwards. | [
"Close",
"the",
"_Stream",
"object",
".",
"No",
"operation",
"should",
"be",
"done",
"on",
"it",
"afterwards",
"."
] | def close(self):
"""Close the _Stream object. No operation should be
done on it afterwards.
"""
if self.closed:
return
if self.mode == "w" and self.comptype != "tar":
self.buf += self.cmp.flush()
if self.mode == "w" and self.buf:
self.fileobj.write(self.buf)
self.buf = b""
if self.comptype == "gz":
# The native zlib crc is an unsigned 32-bit integer, but
# the Python wrapper implicitly casts that to a signed C
# long. So, on a 32-bit box self.crc may "look negative",
# while the same crc on a 64-bit box may "look positive".
# To avoid irksome warnings from the `struct` module, force
# it to look positive on all boxes.
self.fileobj.write(struct.pack("<L", self.crc & 0xffffffff))
self.fileobj.write(struct.pack("<L", self.pos & 0xffffFFFF))
if not self._extfileobj:
self.fileobj.close()
self.closed = True | [
"def",
"close",
"(",
"self",
")",
":",
"if",
"self",
".",
"closed",
":",
"return",
"if",
"self",
".",
"mode",
"==",
"\"w\"",
"and",
"self",
".",
"comptype",
"!=",
"\"tar\"",
":",
"self",
".",
"buf",
"+=",
"self",
".",
"cmp",
".",
"flush",
"(",
")",
"if",
"self",
".",
"mode",
"==",
"\"w\"",
"and",
"self",
".",
"buf",
":",
"self",
".",
"fileobj",
".",
"write",
"(",
"self",
".",
"buf",
")",
"self",
".",
"buf",
"=",
"b\"\"",
"if",
"self",
".",
"comptype",
"==",
"\"gz\"",
":",
"# The native zlib crc is an unsigned 32-bit integer, but",
"# the Python wrapper implicitly casts that to a signed C",
"# long. So, on a 32-bit box self.crc may \"look negative\",",
"# while the same crc on a 64-bit box may \"look positive\".",
"# To avoid irksome warnings from the `struct` module, force",
"# it to look positive on all boxes.",
"self",
".",
"fileobj",
".",
"write",
"(",
"struct",
".",
"pack",
"(",
"\"<L\"",
",",
"self",
".",
"crc",
"&",
"0xffffffff",
")",
")",
"self",
".",
"fileobj",
".",
"write",
"(",
"struct",
".",
"pack",
"(",
"\"<L\"",
",",
"self",
".",
"pos",
"&",
"0xffffFFFF",
")",
")",
"if",
"not",
"self",
".",
"_extfileobj",
":",
"self",
".",
"fileobj",
".",
"close",
"(",
")",
"self",
".",
"closed",
"=",
"True"
] | https://github.com/TesterlifeRaymond/doraemon/blob/d5cb6e34bd5f2aa97273ce0c0c9303e32beaa333/venv/lib/python3.6/site-packages/pip/_vendor/distlib/_backport/tarfile.py#L488-L514 |
||
soobinseo/Tacotron-pytorch | 84dbb464da9e1c9ade413ce4129d2b70caa073bc | text/cmudict.py | python | CMUDict.lookup | (self, word) | return self._entries.get(word.upper()) | Returns list of ARPAbet pronunciations of the given word. | Returns list of ARPAbet pronunciations of the given word. | [
"Returns",
"list",
"of",
"ARPAbet",
"pronunciations",
"of",
"the",
"given",
"word",
"."
] | def lookup(self, word):
'''Returns list of ARPAbet pronunciations of the given word.'''
return self._entries.get(word.upper()) | [
"def",
"lookup",
"(",
"self",
",",
"word",
")",
":",
"return",
"self",
".",
"_entries",
".",
"get",
"(",
"word",
".",
"upper",
"(",
")",
")"
] | https://github.com/soobinseo/Tacotron-pytorch/blob/84dbb464da9e1c9ade413ce4129d2b70caa073bc/text/cmudict.py#L37-L39 |
|
goruck/smart-zoneminder | e2f7bf49f22f3aa00a082bc227de64237d8e7699 | tpu-servers/detect_servers_tpu.py | python | ReadLabelFile | (file_path) | return ret | [] | def ReadLabelFile(file_path):
# Function to read labels from text files.
with open(file_path, 'r') as f:
lines = f.readlines()
ret = {}
for line in lines:
pair = line.strip().split(maxsplit=1)
ret[int(pair[0])] = pair[1].strip()
return ret | [
"def",
"ReadLabelFile",
"(",
"file_path",
")",
":",
"# Function to read labels from text files.",
"with",
"open",
"(",
"file_path",
",",
"'r'",
")",
"as",
"f",
":",
"lines",
"=",
"f",
".",
"readlines",
"(",
")",
"ret",
"=",
"{",
"}",
"for",
"line",
"in",
"lines",
":",
"pair",
"=",
"line",
".",
"strip",
"(",
")",
".",
"split",
"(",
"maxsplit",
"=",
"1",
")",
"ret",
"[",
"int",
"(",
"pair",
"[",
"0",
"]",
")",
"]",
"=",
"pair",
"[",
"1",
"]",
".",
"strip",
"(",
")",
"return",
"ret"
] | https://github.com/goruck/smart-zoneminder/blob/e2f7bf49f22f3aa00a082bc227de64237d8e7699/tpu-servers/detect_servers_tpu.py#L89-L97 |
|||
projecthamster/hamster | 19d160090de30e756bdc3122ff935bdaa86e2843 | waflib/Tools/python.py | python | get_python_variables | (self, variables, imports=None) | return return_values | Spawn a new python process to dump configuration variables
:param variables: variables to print
:type variables: list of string
:param imports: one import by element
:type imports: list of string
:return: the variable values
:rtype: list of string | Spawn a new python process to dump configuration variables | [
"Spawn",
"a",
"new",
"python",
"process",
"to",
"dump",
"configuration",
"variables"
] | def get_python_variables(self, variables, imports=None):
"""
Spawn a new python process to dump configuration variables
:param variables: variables to print
:type variables: list of string
:param imports: one import by element
:type imports: list of string
:return: the variable values
:rtype: list of string
"""
if not imports:
try:
imports = self.python_imports
except AttributeError:
imports = DISTUTILS_IMP
program = list(imports) # copy
program.append('')
for v in variables:
program.append("print(repr(%s))" % v)
os_env = dict(os.environ)
try:
del os_env['MACOSX_DEPLOYMENT_TARGET'] # see comments in the OSX tool
except KeyError:
pass
try:
out = self.cmd_and_log(self.env.PYTHON + ['-c', '\n'.join(program)], env=os_env)
except Errors.WafError:
self.fatal('The distutils module is unusable: install "python-devel"?')
self.to_log(out)
return_values = []
for s in out.splitlines():
s = s.strip()
if not s:
continue
if s == 'None':
return_values.append(None)
elif (s[0] == "'" and s[-1] == "'") or (s[0] == '"' and s[-1] == '"'):
return_values.append(eval(s))
elif s[0].isdigit():
return_values.append(int(s))
else: break
return return_values | [
"def",
"get_python_variables",
"(",
"self",
",",
"variables",
",",
"imports",
"=",
"None",
")",
":",
"if",
"not",
"imports",
":",
"try",
":",
"imports",
"=",
"self",
".",
"python_imports",
"except",
"AttributeError",
":",
"imports",
"=",
"DISTUTILS_IMP",
"program",
"=",
"list",
"(",
"imports",
")",
"# copy",
"program",
".",
"append",
"(",
"''",
")",
"for",
"v",
"in",
"variables",
":",
"program",
".",
"append",
"(",
"\"print(repr(%s))\"",
"%",
"v",
")",
"os_env",
"=",
"dict",
"(",
"os",
".",
"environ",
")",
"try",
":",
"del",
"os_env",
"[",
"'MACOSX_DEPLOYMENT_TARGET'",
"]",
"# see comments in the OSX tool",
"except",
"KeyError",
":",
"pass",
"try",
":",
"out",
"=",
"self",
".",
"cmd_and_log",
"(",
"self",
".",
"env",
".",
"PYTHON",
"+",
"[",
"'-c'",
",",
"'\\n'",
".",
"join",
"(",
"program",
")",
"]",
",",
"env",
"=",
"os_env",
")",
"except",
"Errors",
".",
"WafError",
":",
"self",
".",
"fatal",
"(",
"'The distutils module is unusable: install \"python-devel\"?'",
")",
"self",
".",
"to_log",
"(",
"out",
")",
"return_values",
"=",
"[",
"]",
"for",
"s",
"in",
"out",
".",
"splitlines",
"(",
")",
":",
"s",
"=",
"s",
".",
"strip",
"(",
")",
"if",
"not",
"s",
":",
"continue",
"if",
"s",
"==",
"'None'",
":",
"return_values",
".",
"append",
"(",
"None",
")",
"elif",
"(",
"s",
"[",
"0",
"]",
"==",
"\"'\"",
"and",
"s",
"[",
"-",
"1",
"]",
"==",
"\"'\"",
")",
"or",
"(",
"s",
"[",
"0",
"]",
"==",
"'\"'",
"and",
"s",
"[",
"-",
"1",
"]",
"==",
"'\"'",
")",
":",
"return_values",
".",
"append",
"(",
"eval",
"(",
"s",
")",
")",
"elif",
"s",
"[",
"0",
"]",
".",
"isdigit",
"(",
")",
":",
"return_values",
".",
"append",
"(",
"int",
"(",
"s",
")",
")",
"else",
":",
"break",
"return",
"return_values"
] | https://github.com/projecthamster/hamster/blob/19d160090de30e756bdc3122ff935bdaa86e2843/waflib/Tools/python.py#L192-L236 |
|
cloudera/hue | 23f02102d4547c17c32bd5ea0eb24e9eadd657a4 | desktop/libs/indexer/src/indexer/controller.py | python | CollectionManagerController.update_collection | (self, name, fields) | Only create new fields | Only create new fields | [
"Only",
"create",
"new",
"fields"
] | def update_collection(self, name, fields):
"""
Only create new fields
"""
api = SolrApi(SOLR_URL.get(), self.user, SECURITY_ENABLED.get())
# Create only new fields
# Fields that already exist, do not overwrite since there is no way to do that, currently.
old_field_names = list(api.fields(name)['schema']['fields'].keys())
new_fields = [field for field in fields if field['name'] not in old_field_names]
new_fields_filtered = []
for field in new_fields:
new_field = {}
for attribute in [attribute for attribute in ALLOWED_FIELD_ATTRIBUTES if attribute in field]:
new_field[attribute] = field[attribute]
new_fields_filtered.append(new_field)
api.add_fields(name, new_fields_filtered) | [
"def",
"update_collection",
"(",
"self",
",",
"name",
",",
"fields",
")",
":",
"api",
"=",
"SolrApi",
"(",
"SOLR_URL",
".",
"get",
"(",
")",
",",
"self",
".",
"user",
",",
"SECURITY_ENABLED",
".",
"get",
"(",
")",
")",
"# Create only new fields",
"# Fields that already exist, do not overwrite since there is no way to do that, currently.",
"old_field_names",
"=",
"list",
"(",
"api",
".",
"fields",
"(",
"name",
")",
"[",
"'schema'",
"]",
"[",
"'fields'",
"]",
".",
"keys",
"(",
")",
")",
"new_fields",
"=",
"[",
"field",
"for",
"field",
"in",
"fields",
"if",
"field",
"[",
"'name'",
"]",
"not",
"in",
"old_field_names",
"]",
"new_fields_filtered",
"=",
"[",
"]",
"for",
"field",
"in",
"new_fields",
":",
"new_field",
"=",
"{",
"}",
"for",
"attribute",
"in",
"[",
"attribute",
"for",
"attribute",
"in",
"ALLOWED_FIELD_ATTRIBUTES",
"if",
"attribute",
"in",
"field",
"]",
":",
"new_field",
"[",
"attribute",
"]",
"=",
"field",
"[",
"attribute",
"]",
"new_fields_filtered",
".",
"append",
"(",
"new_field",
")",
"api",
".",
"add_fields",
"(",
"name",
",",
"new_fields_filtered",
")"
] | https://github.com/cloudera/hue/blob/23f02102d4547c17c32bd5ea0eb24e9eadd657a4/desktop/libs/indexer/src/indexer/controller.py#L227-L243 |
||
zhl2008/awd-platform | 0416b31abea29743387b10b3914581fbe8e7da5e | web_flaskbb/lib/python2.7/site-packages/pip/_vendor/distlib/database.py | python | Distribution.dev_requires | (self) | return self._get_requirements('dev_requires') | [] | def dev_requires(self):
return self._get_requirements('dev_requires') | [
"def",
"dev_requires",
"(",
"self",
")",
":",
"return",
"self",
".",
"_get_requirements",
"(",
"'dev_requires'",
")"
] | https://github.com/zhl2008/awd-platform/blob/0416b31abea29743387b10b3914581fbe8e7da5e/web_flaskbb/lib/python2.7/site-packages/pip/_vendor/distlib/database.py#L401-L402 |
|||
steveKapturowski/tensorflow-rl | 6dc58da69bad0349a646cfc94ea9c5d1eada8351 | utils/cts.py | python | CTSNode.log_prob | (self, context, symbol) | Computes the log probability of the symbol in this subtree. | Computes the log probability of the symbol in this subtree. | [
"Computes",
"the",
"log",
"probability",
"of",
"the",
"symbol",
"in",
"this",
"subtree",
"."
] | def log_prob(self, context, symbol):
"""Computes the log probability of the symbol in this subtree."""
lp_estimator = math.log(self.estimator.prob(symbol))
if len(context) > 0:
# See update() above. More efficient is to avoid creating the
# nodes and use a default node, but we omit this for clarity.
child = self.get_child(context[-1])
lp_child = child.log_prob(context[:-1], symbol)
return self.mix_prediction(lp_estimator, lp_child)
else:
return lp_estimator | [
"def",
"log_prob",
"(",
"self",
",",
"context",
",",
"symbol",
")",
":",
"lp_estimator",
"=",
"math",
".",
"log",
"(",
"self",
".",
"estimator",
".",
"prob",
"(",
"symbol",
")",
")",
"if",
"len",
"(",
"context",
")",
">",
"0",
":",
"# See update() above. More efficient is to avoid creating the",
"# nodes and use a default node, but we omit this for clarity.",
"child",
"=",
"self",
".",
"get_child",
"(",
"context",
"[",
"-",
"1",
"]",
")",
"lp_child",
"=",
"child",
".",
"log_prob",
"(",
"context",
"[",
":",
"-",
"1",
"]",
",",
"symbol",
")",
"return",
"self",
".",
"mix_prediction",
"(",
"lp_estimator",
",",
"lp_child",
")",
"else",
":",
"return",
"lp_estimator"
] | https://github.com/steveKapturowski/tensorflow-rl/blob/6dc58da69bad0349a646cfc94ea9c5d1eada8351/utils/cts.py#L182-L195 |
||
makerbot/ReplicatorG | d6f2b07785a5a5f1e172fb87cb4303b17c575d5d | skein_engines/skeinforge-35/fabmetheus_utilities/fabmetheus_tools/interpret_plugins/csv.py | python | main | () | Display the inset dialog. | Display the inset dialog. | [
"Display",
"the",
"inset",
"dialog",
"."
] | def main():
"Display the inset dialog."
if len(sys.argv) > 1:
getCarving(' '.join(sys.argv[1 :])) | [
"def",
"main",
"(",
")",
":",
"if",
"len",
"(",
"sys",
".",
"argv",
")",
">",
"1",
":",
"getCarving",
"(",
"' '",
".",
"join",
"(",
"sys",
".",
"argv",
"[",
"1",
":",
"]",
")",
")"
] | https://github.com/makerbot/ReplicatorG/blob/d6f2b07785a5a5f1e172fb87cb4303b17c575d5d/skein_engines/skeinforge-35/fabmetheus_utilities/fabmetheus_tools/interpret_plugins/csv.py#L170-L173 |
||
CMA-ES/pycma | f6eed1ef7e747cec1ab2e5c835d6f2fd1ebc097f | cma/fitness_functions.py | python | elli | (x) | return sum(1e6**(np.arange(len(x)) / (len(x) - 1 + 1e-9)) * np.asarray(x)**2) | unbound test function, needed to test multiprocessor, as long
as the other test functions are defined within a class and
only accessable via the class instance | unbound test function, needed to test multiprocessor, as long
as the other test functions are defined within a class and
only accessable via the class instance | [
"unbound",
"test",
"function",
"needed",
"to",
"test",
"multiprocessor",
"as",
"long",
"as",
"the",
"other",
"test",
"functions",
"are",
"defined",
"within",
"a",
"class",
"and",
"only",
"accessable",
"via",
"the",
"class",
"instance"
] | def elli(x):
"""unbound test function, needed to test multiprocessor, as long
as the other test functions are defined within a class and
only accessable via the class instance"""
return sum(1e6**(np.arange(len(x)) / (len(x) - 1 + 1e-9)) * np.asarray(x)**2) | [
"def",
"elli",
"(",
"x",
")",
":",
"return",
"sum",
"(",
"1e6",
"**",
"(",
"np",
".",
"arange",
"(",
"len",
"(",
"x",
")",
")",
"/",
"(",
"len",
"(",
"x",
")",
"-",
"1",
"+",
"1e-9",
")",
")",
"*",
"np",
".",
"asarray",
"(",
"x",
")",
"**",
"2",
")"
] | https://github.com/CMA-ES/pycma/blob/f6eed1ef7e747cec1ab2e5c835d6f2fd1ebc097f/cma/fitness_functions.py#L58-L62 |
|
bashtage/linearmodels | 9256269f01ff8c5f85e65342d66149a5636661b6 | linearmodels/iv/results.py | python | _LSModelResultsBase.tstats | (self) | return Series(self._params / self.std_errors, name="tstat") | Parameter t-statistics | Parameter t-statistics | [
"Parameter",
"t",
"-",
"statistics"
] | def tstats(self) -> Series:
"""Parameter t-statistics"""
return Series(self._params / self.std_errors, name="tstat") | [
"def",
"tstats",
"(",
"self",
")",
"->",
"Series",
":",
"return",
"Series",
"(",
"self",
".",
"_params",
"/",
"self",
".",
"std_errors",
",",
"name",
"=",
"\"tstat\"",
")"
] | https://github.com/bashtage/linearmodels/blob/9256269f01ff8c5f85e65342d66149a5636661b6/linearmodels/iv/results.py#L184-L186 |
|
Pyomo/pyomo | dbd4faee151084f343b893cc2b0c04cf2b76fd92 | pyomo/contrib/pynumero/sparse/block_vector.py | python | BlockVector.ptp | (self, axis=None, out=None, keepdims=False) | return self.max()-self.min() | Peak to peak (maximum - minimum) value along a given axis. | Peak to peak (maximum - minimum) value along a given axis. | [
"Peak",
"to",
"peak",
"(",
"maximum",
"-",
"minimum",
")",
"value",
"along",
"a",
"given",
"axis",
"."
] | def ptp(self, axis=None, out=None, keepdims=False):
"""
Peak to peak (maximum - minimum) value along a given axis.
"""
assert_block_structure(self)
assert out is None, 'Out keyword not supported'
return self.max()-self.min() | [
"def",
"ptp",
"(",
"self",
",",
"axis",
"=",
"None",
",",
"out",
"=",
"None",
",",
"keepdims",
"=",
"False",
")",
":",
"assert_block_structure",
"(",
"self",
")",
"assert",
"out",
"is",
"None",
",",
"'Out keyword not supported'",
"return",
"self",
".",
"max",
"(",
")",
"-",
"self",
".",
"min",
"(",
")"
] | https://github.com/Pyomo/pyomo/blob/dbd4faee151084f343b893cc2b0c04cf2b76fd92/pyomo/contrib/pynumero/sparse/block_vector.py#L485-L491 |
|
JaniceWuo/MovieRecommend | 4c86db64ca45598917d304f535413df3bc9fea65 | movierecommend/venv1/Lib/site-packages/pip-9.0.1-py3.6.egg/pip/models/index.py | python | Index.__init__ | (self, url) | [] | def __init__(self, url):
self.url = url
self.netloc = urllib_parse.urlsplit(url).netloc
self.simple_url = self.url_to_path('simple')
self.pypi_url = self.url_to_path('pypi')
self.pip_json_url = self.url_to_path('pypi/pip/json') | [
"def",
"__init__",
"(",
"self",
",",
"url",
")",
":",
"self",
".",
"url",
"=",
"url",
"self",
".",
"netloc",
"=",
"urllib_parse",
".",
"urlsplit",
"(",
"url",
")",
".",
"netloc",
"self",
".",
"simple_url",
"=",
"self",
".",
"url_to_path",
"(",
"'simple'",
")",
"self",
".",
"pypi_url",
"=",
"self",
".",
"url_to_path",
"(",
"'pypi'",
")",
"self",
".",
"pip_json_url",
"=",
"self",
".",
"url_to_path",
"(",
"'pypi/pip/json'",
")"
] | https://github.com/JaniceWuo/MovieRecommend/blob/4c86db64ca45598917d304f535413df3bc9fea65/movierecommend/venv1/Lib/site-packages/pip-9.0.1-py3.6.egg/pip/models/index.py#L5-L10 |
||||
deepmind/mathematics_dataset | 900a0813b1997f45a4d0a8e448a652f37e4b5685 | mathematics_dataset/util/display.py | python | StringOrdinal.__init__ | (self, position) | Initializes a `StringOrdinal`.
Args:
position: An integer >= 0.
Raises:
ValueError: If `position` is non-positive or out of range. | Initializes a `StringOrdinal`. | [
"Initializes",
"a",
"StringOrdinal",
"."
] | def __init__(self, position):
"""Initializes a `StringOrdinal`.
Args:
position: An integer >= 0.
Raises:
ValueError: If `position` is non-positive or out of range.
"""
if position < 0 or position >= len(_ORDINALS):
raise ValueError('Unsupported ordinal {}.'.format(position))
self._string = _ORDINALS[position] | [
"def",
"__init__",
"(",
"self",
",",
"position",
")",
":",
"if",
"position",
"<",
"0",
"or",
"position",
">=",
"len",
"(",
"_ORDINALS",
")",
":",
"raise",
"ValueError",
"(",
"'Unsupported ordinal {}.'",
".",
"format",
"(",
"position",
")",
")",
"self",
".",
"_string",
"=",
"_ORDINALS",
"[",
"position",
"]"
] | https://github.com/deepmind/mathematics_dataset/blob/900a0813b1997f45a4d0a8e448a652f37e4b5685/mathematics_dataset/util/display.py#L324-L335 |
||
apache/tvm | 6eb4ed813ebcdcd9558f0906a1870db8302ff1e0 | python/tvm/relay/frontend/caffe.py | python | OperatorConverter.convert_tanh | (self, op) | return out | Convert TanH layer | Convert TanH layer | [
"Convert",
"TanH",
"layer"
] | def convert_tanh(self, op):
"""Convert TanH layer"""
inputs = op.bottom
in_expr = self.exp_tab.get_expr(inputs[0])
out = _op.tanh(in_expr)
return out | [
"def",
"convert_tanh",
"(",
"self",
",",
"op",
")",
":",
"inputs",
"=",
"op",
".",
"bottom",
"in_expr",
"=",
"self",
".",
"exp_tab",
".",
"get_expr",
"(",
"inputs",
"[",
"0",
"]",
")",
"out",
"=",
"_op",
".",
"tanh",
"(",
"in_expr",
")",
"return",
"out"
] | https://github.com/apache/tvm/blob/6eb4ed813ebcdcd9558f0906a1870db8302ff1e0/python/tvm/relay/frontend/caffe.py#L560-L565 |
|
buke/GreenOdoo | 3d8c55d426fb41fdb3f2f5a1533cfe05983ba1df | runtime/python/lib/python2.7/lib-tk/ttk.py | python | Treeview.see | (self, item) | Ensure that item is visible.
Sets all of item's ancestors open option to True, and scrolls
the widget if necessary so that item is within the visible
portion of the tree. | Ensure that item is visible. | [
"Ensure",
"that",
"item",
"is",
"visible",
"."
] | def see(self, item):
"""Ensure that item is visible.
Sets all of item's ancestors open option to True, and scrolls
the widget if necessary so that item is within the visible
portion of the tree."""
self.tk.call(self._w, "see", item) | [
"def",
"see",
"(",
"self",
",",
"item",
")",
":",
"self",
".",
"tk",
".",
"call",
"(",
"self",
".",
"_w",
",",
"\"see\"",
",",
"item",
")"
] | https://github.com/buke/GreenOdoo/blob/3d8c55d426fb41fdb3f2f5a1533cfe05983ba1df/runtime/python/lib/python2.7/lib-tk/ttk.py#L1382-L1388 |
||
quic/aimet | dae9bae9a77ca719aa7553fefde4768270fc3518 | TrainingExtensions/tensorflow/src/python/aimet_tensorflow/quantizer_info.py | python | QuantizerInfo.quant_scheme | (self) | return self.tensor_quantizer.getQuantScheme() | Reads the quant_scheme associated with the Quantize op
:return: quant_scheme as libpymo.QuantizationMode type | Reads the quant_scheme associated with the Quantize op
:return: quant_scheme as libpymo.QuantizationMode type | [
"Reads",
"the",
"quant_scheme",
"associated",
"with",
"the",
"Quantize",
"op",
":",
"return",
":",
"quant_scheme",
"as",
"libpymo",
".",
"QuantizationMode",
"type"
] | def quant_scheme(self) -> libpymo.QuantizationMode:
"""
Reads the quant_scheme associated with the Quantize op
:return: quant_scheme as libpymo.QuantizationMode type
"""
return self.tensor_quantizer.getQuantScheme() | [
"def",
"quant_scheme",
"(",
"self",
")",
"->",
"libpymo",
".",
"QuantizationMode",
":",
"return",
"self",
".",
"tensor_quantizer",
".",
"getQuantScheme",
"(",
")"
] | https://github.com/quic/aimet/blob/dae9bae9a77ca719aa7553fefde4768270fc3518/TrainingExtensions/tensorflow/src/python/aimet_tensorflow/quantizer_info.py#L160-L165 |
|
JaniceWuo/MovieRecommend | 4c86db64ca45598917d304f535413df3bc9fea65 | movierecommend/venv1/Lib/site-packages/django/db/models/fields/files.py | python | FileField.deconstruct | (self) | return name, path, args, kwargs | [] | def deconstruct(self):
name, path, args, kwargs = super(FileField, self).deconstruct()
if kwargs.get("max_length") == 100:
del kwargs["max_length"]
kwargs['upload_to'] = self.upload_to
if self.storage is not default_storage:
kwargs['storage'] = self.storage
return name, path, args, kwargs | [
"def",
"deconstruct",
"(",
"self",
")",
":",
"name",
",",
"path",
",",
"args",
",",
"kwargs",
"=",
"super",
"(",
"FileField",
",",
"self",
")",
".",
"deconstruct",
"(",
")",
"if",
"kwargs",
".",
"get",
"(",
"\"max_length\"",
")",
"==",
"100",
":",
"del",
"kwargs",
"[",
"\"max_length\"",
"]",
"kwargs",
"[",
"'upload_to'",
"]",
"=",
"self",
".",
"upload_to",
"if",
"self",
".",
"storage",
"is",
"not",
"default_storage",
":",
"kwargs",
"[",
"'storage'",
"]",
"=",
"self",
".",
"storage",
"return",
"name",
",",
"path",
",",
"args",
",",
"kwargs"
] | https://github.com/JaniceWuo/MovieRecommend/blob/4c86db64ca45598917d304f535413df3bc9fea65/movierecommend/venv1/Lib/site-packages/django/db/models/fields/files.py#L271-L278 |
|||
mesonbuild/meson | a22d0f9a0a787df70ce79b05d0c45de90a970048 | mesonbuild/coredata.py | python | UserUmaskOption.__init__ | (self, description: str, value: T.Any, yielding: T.Optional[bool] = None) | [] | def __init__(self, description: str, value: T.Any, yielding: T.Optional[bool] = None):
super().__init__(description, (0, 0o777, value), yielding)
self.choices = ['preserve', '0000-0777'] | [
"def",
"__init__",
"(",
"self",
",",
"description",
":",
"str",
",",
"value",
":",
"T",
".",
"Any",
",",
"yielding",
":",
"T",
".",
"Optional",
"[",
"bool",
"]",
"=",
"None",
")",
":",
"super",
"(",
")",
".",
"__init__",
"(",
"description",
",",
"(",
"0",
",",
"0o777",
",",
"value",
")",
",",
"yielding",
")",
"self",
".",
"choices",
"=",
"[",
"'preserve'",
",",
"'0000-0777'",
"]"
] | https://github.com/mesonbuild/meson/blob/a22d0f9a0a787df70ce79b05d0c45de90a970048/mesonbuild/coredata.py#L165-L167 |
||||
confluentinc/confluent-kafka-python | 2ac0d72b24b14e5246445ad9ce66ec9c8828ef4e | src/confluent_kafka/schema_registry/schema_registry_client.py | python | SchemaRegistryClient.__init__ | (self, conf) | [] | def __init__(self, conf):
self._rest_client = _RestClient(conf)
self._cache = _SchemaCache() | [
"def",
"__init__",
"(",
"self",
",",
"conf",
")",
":",
"self",
".",
"_rest_client",
"=",
"_RestClient",
"(",
"conf",
")",
"self",
".",
"_cache",
"=",
"_SchemaCache",
"(",
")"
] | https://github.com/confluentinc/confluent-kafka-python/blob/2ac0d72b24b14e5246445ad9ce66ec9c8828ef4e/src/confluent_kafka/schema_registry/schema_registry_client.py#L291-L293 |
||||
maas/maas | db2f89970c640758a51247c59bf1ec6f60cf4ab5 | src/provisioningserver/utils/network.py | python | get_default_monitored_interfaces | (interfaces: dict) | return monitored_interfaces | Return a list of interfaces that should be monitored by default.
This function takes the interface map and filters out VLANs,
bond parents, and disabled interfaces. | Return a list of interfaces that should be monitored by default. | [
"Return",
"a",
"list",
"of",
"interfaces",
"that",
"should",
"be",
"monitored",
"by",
"default",
"."
] | def get_default_monitored_interfaces(interfaces: dict) -> list:
"""Return a list of interfaces that should be monitored by default.
This function takes the interface map and filters out VLANs,
bond parents, and disabled interfaces.
"""
children_map = get_interface_children(interfaces)
monitored_interfaces = []
# By default, monitor physical interfaces (without children that are
# bonds), bond interfaces, and bridge interfaces without parents.
for ifname in interfaces:
interface = interfaces[ifname]
if not interface["enabled"]:
# Skip interfaces which are not link-up.
continue
iftype = interface.get("type", None)
if iftype == "physical":
should_monitor = True
for child in interface_children(ifname, interfaces, children_map):
if child.data["type"] == "bond":
# This interface is a bond member. Skip it, since would
# rather just monitor the bond interface.
should_monitor = False
break
if should_monitor:
monitored_interfaces.append(ifname)
elif iftype == "bond":
monitored_interfaces.append(ifname)
elif iftype == "bridge":
# If the bridge has parents, that means a physical, bond, or
# VLAN interface on the host is a member of the bridge. (Which
# means we're already monitoring the fabric by virtue of the
# fact that we are monitoring the parent.) Only bridges that
# stand alone (are not connected to any interfaces MAAS cares
# about) should therefore be monitored. (In other words, if
# the bridge has zero parents, it is a virtual network, which
# MAAS may be managing virtual machines on.)
if len(interface["parents"]) == 0:
monitored_interfaces.append(ifname)
return monitored_interfaces | [
"def",
"get_default_monitored_interfaces",
"(",
"interfaces",
":",
"dict",
")",
"->",
"list",
":",
"children_map",
"=",
"get_interface_children",
"(",
"interfaces",
")",
"monitored_interfaces",
"=",
"[",
"]",
"# By default, monitor physical interfaces (without children that are",
"# bonds), bond interfaces, and bridge interfaces without parents.",
"for",
"ifname",
"in",
"interfaces",
":",
"interface",
"=",
"interfaces",
"[",
"ifname",
"]",
"if",
"not",
"interface",
"[",
"\"enabled\"",
"]",
":",
"# Skip interfaces which are not link-up.",
"continue",
"iftype",
"=",
"interface",
".",
"get",
"(",
"\"type\"",
",",
"None",
")",
"if",
"iftype",
"==",
"\"physical\"",
":",
"should_monitor",
"=",
"True",
"for",
"child",
"in",
"interface_children",
"(",
"ifname",
",",
"interfaces",
",",
"children_map",
")",
":",
"if",
"child",
".",
"data",
"[",
"\"type\"",
"]",
"==",
"\"bond\"",
":",
"# This interface is a bond member. Skip it, since would",
"# rather just monitor the bond interface.",
"should_monitor",
"=",
"False",
"break",
"if",
"should_monitor",
":",
"monitored_interfaces",
".",
"append",
"(",
"ifname",
")",
"elif",
"iftype",
"==",
"\"bond\"",
":",
"monitored_interfaces",
".",
"append",
"(",
"ifname",
")",
"elif",
"iftype",
"==",
"\"bridge\"",
":",
"# If the bridge has parents, that means a physical, bond, or",
"# VLAN interface on the host is a member of the bridge. (Which",
"# means we're already monitoring the fabric by virtue of the",
"# fact that we are monitoring the parent.) Only bridges that",
"# stand alone (are not connected to any interfaces MAAS cares",
"# about) should therefore be monitored. (In other words, if",
"# the bridge has zero parents, it is a virtual network, which",
"# MAAS may be managing virtual machines on.)",
"if",
"len",
"(",
"interface",
"[",
"\"parents\"",
"]",
")",
"==",
"0",
":",
"monitored_interfaces",
".",
"append",
"(",
"ifname",
")",
"return",
"monitored_interfaces"
] | https://github.com/maas/maas/blob/db2f89970c640758a51247c59bf1ec6f60cf4ab5/src/provisioningserver/utils/network.py#L1051-L1090 |
|
dimagi/commcare-hq | d67ff1d3b4c51fa050c19e60c3253a79d3452a39 | corehq/apps/data_interfaces/views.py | python | AutomaticUpdateRuleListView._format_rule | (self, rule) | return {
'id': rule.pk,
'name': rule.name,
'case_type': rule.case_type,
'active': rule.active,
'last_run': (ServerTime(rule.last_run)
.user_time(self.project_timezone)
.done()
.strftime(SERVER_DATETIME_FORMAT)) if rule.last_run else '-',
'edit_url': reverse(self.edit_url_name, args=[self.domain, rule.pk]),
'action_error': "", # must be provided because knockout template looks for it
} | [] | def _format_rule(self, rule):
return {
'id': rule.pk,
'name': rule.name,
'case_type': rule.case_type,
'active': rule.active,
'last_run': (ServerTime(rule.last_run)
.user_time(self.project_timezone)
.done()
.strftime(SERVER_DATETIME_FORMAT)) if rule.last_run else '-',
'edit_url': reverse(self.edit_url_name, args=[self.domain, rule.pk]),
'action_error': "", # must be provided because knockout template looks for it
} | [
"def",
"_format_rule",
"(",
"self",
",",
"rule",
")",
":",
"return",
"{",
"'id'",
":",
"rule",
".",
"pk",
",",
"'name'",
":",
"rule",
".",
"name",
",",
"'case_type'",
":",
"rule",
".",
"case_type",
",",
"'active'",
":",
"rule",
".",
"active",
",",
"'last_run'",
":",
"(",
"ServerTime",
"(",
"rule",
".",
"last_run",
")",
".",
"user_time",
"(",
"self",
".",
"project_timezone",
")",
".",
"done",
"(",
")",
".",
"strftime",
"(",
"SERVER_DATETIME_FORMAT",
")",
")",
"if",
"rule",
".",
"last_run",
"else",
"'-'",
",",
"'edit_url'",
":",
"reverse",
"(",
"self",
".",
"edit_url_name",
",",
"args",
"=",
"[",
"self",
".",
"domain",
",",
"rule",
".",
"pk",
"]",
")",
",",
"'action_error'",
":",
"\"\"",
",",
"# must be provided because knockout template looks for it",
"}"
] | https://github.com/dimagi/commcare-hq/blob/d67ff1d3b4c51fa050c19e60c3253a79d3452a39/corehq/apps/data_interfaces/views.py#L693-L705 |
|||
aws-samples/aws-kube-codesuite | ab4e5ce45416b83bffb947ab8d234df5437f4fca | src/requests/help.py | python | main | () | Pretty-print the bug information as JSON. | Pretty-print the bug information as JSON. | [
"Pretty",
"-",
"print",
"the",
"bug",
"information",
"as",
"JSON",
"."
] | def main():
"""Pretty-print the bug information as JSON."""
print(json.dumps(info(), sort_keys=True, indent=2)) | [
"def",
"main",
"(",
")",
":",
"print",
"(",
"json",
".",
"dumps",
"(",
"info",
"(",
")",
",",
"sort_keys",
"=",
"True",
",",
"indent",
"=",
"2",
")",
")"
] | https://github.com/aws-samples/aws-kube-codesuite/blob/ab4e5ce45416b83bffb947ab8d234df5437f4fca/src/requests/help.py#L114-L116 |
||
evennia/evennia | fa79110ba6b219932f22297838e8ac72ebc0be0e | evennia/contrib/evscaperoom/commands.py | python | CmdEvscapeRoom.parse | (self) | Parse incoming arguments for use in all child classes. | Parse incoming arguments for use in all child classes. | [
"Parse",
"incoming",
"arguments",
"for",
"use",
"in",
"all",
"child",
"classes",
"."
] | def parse(self):
"""
Parse incoming arguments for use in all child classes.
"""
caller = self.caller
self.args = self.args.strip()
# splits to either ['obj'] or e.g. ['obj', 'on', 'obj']
parts = [part.strip() for part in _RE_ARGSPLIT.split(" " + self.args, 1)]
nparts = len(parts)
self.obj1 = None
self.arg1 = None
self.prep = None
self.obj2 = None
self.arg2 = None
if nparts == 1:
self.obj1, self.arg1 = self._search(parts[0], self.obj1_search)
elif nparts == 3:
obj1, self.prep, obj2 = parts
self.obj1, self.arg1 = self._search(obj1, self.obj1_search)
self.obj2, self.arg2 = self._search(obj2, self.obj2_search)
self.room = caller.location
self.roomstate = self.room.db.state | [
"def",
"parse",
"(",
"self",
")",
":",
"caller",
"=",
"self",
".",
"caller",
"self",
".",
"args",
"=",
"self",
".",
"args",
".",
"strip",
"(",
")",
"# splits to either ['obj'] or e.g. ['obj', 'on', 'obj']",
"parts",
"=",
"[",
"part",
".",
"strip",
"(",
")",
"for",
"part",
"in",
"_RE_ARGSPLIT",
".",
"split",
"(",
"\" \"",
"+",
"self",
".",
"args",
",",
"1",
")",
"]",
"nparts",
"=",
"len",
"(",
"parts",
")",
"self",
".",
"obj1",
"=",
"None",
"self",
".",
"arg1",
"=",
"None",
"self",
".",
"prep",
"=",
"None",
"self",
".",
"obj2",
"=",
"None",
"self",
".",
"arg2",
"=",
"None",
"if",
"nparts",
"==",
"1",
":",
"self",
".",
"obj1",
",",
"self",
".",
"arg1",
"=",
"self",
".",
"_search",
"(",
"parts",
"[",
"0",
"]",
",",
"self",
".",
"obj1_search",
")",
"elif",
"nparts",
"==",
"3",
":",
"obj1",
",",
"self",
".",
"prep",
",",
"obj2",
"=",
"parts",
"self",
".",
"obj1",
",",
"self",
".",
"arg1",
"=",
"self",
".",
"_search",
"(",
"obj1",
",",
"self",
".",
"obj1_search",
")",
"self",
".",
"obj2",
",",
"self",
".",
"arg2",
"=",
"self",
".",
"_search",
"(",
"obj2",
",",
"self",
".",
"obj2_search",
")",
"self",
".",
"room",
"=",
"caller",
".",
"location",
"self",
".",
"roomstate",
"=",
"self",
".",
"room",
".",
"db",
".",
"state"
] | https://github.com/evennia/evennia/blob/fa79110ba6b219932f22297838e8ac72ebc0be0e/evennia/contrib/evscaperoom/commands.py#L165-L189 |
||
apple/ccs-calendarserver | 13c706b985fb728b9aab42dc0fef85aae21921c3 | txdav/caldav/datastore/sql_external.py | python | ManagedAttachmentExternal.__init__ | (self, managedID, size) | [] | def __init__(self, managedID, size):
self._managedID = managedID
self._size = size | [
"def",
"__init__",
"(",
"self",
",",
"managedID",
",",
"size",
")",
":",
"self",
".",
"_managedID",
"=",
"managedID",
"self",
".",
"_size",
"=",
"size"
] | https://github.com/apple/ccs-calendarserver/blob/13c706b985fb728b9aab42dc0fef85aae21921c3/txdav/caldav/datastore/sql_external.py#L252-L254 |
||||
SteveDoyle2/pyNastran | eda651ac2d4883d95a34951f8a002ff94f642a1a | pyNastran/bdf/bdf.py | python | BDF_._prepare_deform | (self, unused_card: List[str], card_obj: BDFCard, comment='') | return loads | adds a DEFORM | adds a DEFORM | [
"adds",
"a",
"DEFORM"
] | def _prepare_deform(self, unused_card: List[str], card_obj: BDFCard, comment='') -> DEFORM:
"""adds a DEFORM"""
loads = [DEFORM.add_card(card_obj, comment=comment)]
if card_obj.field(4):
loads.append(DEFORM.add_card(card_obj, 1, comment=comment))
if card_obj.field(6):
loads.append(DEFORM.add_card(card_obj, 2, comment=comment))
for loadi in loads:
self._add_methods._add_load_object(loadi)
return loads | [
"def",
"_prepare_deform",
"(",
"self",
",",
"unused_card",
":",
"List",
"[",
"str",
"]",
",",
"card_obj",
":",
"BDFCard",
",",
"comment",
"=",
"''",
")",
"->",
"DEFORM",
":",
"loads",
"=",
"[",
"DEFORM",
".",
"add_card",
"(",
"card_obj",
",",
"comment",
"=",
"comment",
")",
"]",
"if",
"card_obj",
".",
"field",
"(",
"4",
")",
":",
"loads",
".",
"append",
"(",
"DEFORM",
".",
"add_card",
"(",
"card_obj",
",",
"1",
",",
"comment",
"=",
"comment",
")",
")",
"if",
"card_obj",
".",
"field",
"(",
"6",
")",
":",
"loads",
".",
"append",
"(",
"DEFORM",
".",
"add_card",
"(",
"card_obj",
",",
"2",
",",
"comment",
"=",
"comment",
")",
")",
"for",
"loadi",
"in",
"loads",
":",
"self",
".",
"_add_methods",
".",
"_add_load_object",
"(",
"loadi",
")",
"return",
"loads"
] | https://github.com/SteveDoyle2/pyNastran/blob/eda651ac2d4883d95a34951f8a002ff94f642a1a/pyNastran/bdf/bdf.py#L2780-L2789 |
|
VITA-Group/FasterSeg | 478b0265eb9ab626cfbe503ad16d2452878b38cc | search/operations.py | python | BasicResidual_downup_2x._flops | (h, w, C_in, C_out, kernel_size=3, stride=1, dilation=1, groups=1) | return flops | [] | def _flops(h, w, C_in, C_out, kernel_size=3, stride=1, dilation=1, groups=1):
assert stride in [1, 2]
layer = BasicResidual_downup_2x(C_in, C_out, kernel_size, stride, dilation, groups, slimmable=False)
flops, params = profile(layer, inputs=(torch.randn(1, C_in, h, w),), verbose=False)
return flops | [
"def",
"_flops",
"(",
"h",
",",
"w",
",",
"C_in",
",",
"C_out",
",",
"kernel_size",
"=",
"3",
",",
"stride",
"=",
"1",
",",
"dilation",
"=",
"1",
",",
"groups",
"=",
"1",
")",
":",
"assert",
"stride",
"in",
"[",
"1",
",",
"2",
"]",
"layer",
"=",
"BasicResidual_downup_2x",
"(",
"C_in",
",",
"C_out",
",",
"kernel_size",
",",
"stride",
",",
"dilation",
",",
"groups",
",",
"slimmable",
"=",
"False",
")",
"flops",
",",
"params",
"=",
"profile",
"(",
"layer",
",",
"inputs",
"=",
"(",
"torch",
".",
"randn",
"(",
"1",
",",
"C_in",
",",
"h",
",",
"w",
")",
",",
")",
",",
"verbose",
"=",
"False",
")",
"return",
"flops"
] | https://github.com/VITA-Group/FasterSeg/blob/478b0265eb9ab626cfbe503ad16d2452878b38cc/search/operations.py#L401-L405 |
|||
kuri65536/python-for-android | 26402a08fc46b09ef94e8d7a6bbc3a54ff9d0891 | python-modules/twisted/twisted/internet/posixbase.py | python | PosixReactorBase.listenSSL | (self, port, factory, contextFactory, backlog=50, interface='') | return p | @see: twisted.internet.interfaces.IReactorSSL.listenSSL | [] | def listenSSL(self, port, factory, contextFactory, backlog=50, interface=''):
"""@see: twisted.internet.interfaces.IReactorSSL.listenSSL
"""
assert sslEnabled, "SSL support is not present"
p = ssl.Port(port, factory, contextFactory, backlog, interface, self)
p.startListening()
return p | [
"def",
"listenSSL",
"(",
"self",
",",
"port",
",",
"factory",
",",
"contextFactory",
",",
"backlog",
"=",
"50",
",",
"interface",
"=",
"''",
")",
":",
"assert",
"sslEnabled",
",",
"\"SSL support is not present\"",
"p",
"=",
"ssl",
".",
"Port",
"(",
"port",
",",
"factory",
",",
"contextFactory",
",",
"backlog",
",",
"interface",
",",
"self",
")",
"p",
".",
"startListening",
"(",
")",
"return",
"p"
] | https://github.com/kuri65536/python-for-android/blob/26402a08fc46b09ef94e8d7a6bbc3a54ff9d0891/python-modules/twisted/twisted/internet/posixbase.py#L439-L445 |
||
libertysoft3/saidit | 271c7d03adb369f82921d811360b00812e42da24 | r2/r2/lib/db/queries.py | python | user_query | (kind, user_id, sort, time) | return make_results(q) | General profile-page query. | General profile-page query. | [
"General",
"profile",
"-",
"page",
"query",
"."
] | def user_query(kind, user_id, sort, time):
"""General profile-page query."""
q = kind._query(kind.c.author_id == user_id,
kind.c._spam == (True, False),
sort = db_sort(sort))
if time != 'all':
q._filter(db_times[time])
return make_results(q) | [
"def",
"user_query",
"(",
"kind",
",",
"user_id",
",",
"sort",
",",
"time",
")",
":",
"q",
"=",
"kind",
".",
"_query",
"(",
"kind",
".",
"c",
".",
"author_id",
"==",
"user_id",
",",
"kind",
".",
"c",
".",
"_spam",
"==",
"(",
"True",
",",
"False",
")",
",",
"sort",
"=",
"db_sort",
"(",
"sort",
")",
")",
"if",
"time",
"!=",
"'all'",
":",
"q",
".",
"_filter",
"(",
"db_times",
"[",
"time",
"]",
")",
"return",
"make_results",
"(",
"q",
")"
] | https://github.com/libertysoft3/saidit/blob/271c7d03adb369f82921d811360b00812e42da24/r2/r2/lib/db/queries.py#L581-L588 |
|
TesterlifeRaymond/doraemon | d5cb6e34bd5f2aa97273ce0c0c9303e32beaa333 | venv/lib/python3.6/site-packages/pip/_vendor/pyparsing.py | python | ParserElement.leaveWhitespace | ( self ) | return self | Disables the skipping of whitespace before matching the characters in the
C{ParserElement}'s defined pattern. This is normally only used internally by
the pyparsing module, but may be needed in some whitespace-sensitive grammars. | Disables the skipping of whitespace before matching the characters in the
C{ParserElement}'s defined pattern. This is normally only used internally by
the pyparsing module, but may be needed in some whitespace-sensitive grammars. | [
"Disables",
"the",
"skipping",
"of",
"whitespace",
"before",
"matching",
"the",
"characters",
"in",
"the",
"C",
"{",
"ParserElement",
"}",
"s",
"defined",
"pattern",
".",
"This",
"is",
"normally",
"only",
"used",
"internally",
"by",
"the",
"pyparsing",
"module",
"but",
"may",
"be",
"needed",
"in",
"some",
"whitespace",
"-",
"sensitive",
"grammars",
"."
] | def leaveWhitespace( self ):
"""
Disables the skipping of whitespace before matching the characters in the
C{ParserElement}'s defined pattern. This is normally only used internally by
the pyparsing module, but may be needed in some whitespace-sensitive grammars.
"""
self.skipWhitespace = False
return self | [
"def",
"leaveWhitespace",
"(",
"self",
")",
":",
"self",
".",
"skipWhitespace",
"=",
"False",
"return",
"self"
] | https://github.com/TesterlifeRaymond/doraemon/blob/d5cb6e34bd5f2aa97273ce0c0c9303e32beaa333/venv/lib/python3.6/site-packages/pip/_vendor/pyparsing.py#L2011-L2018 |
|
chebee7i/nxpd | a0797cd0ee4f8584c9ee49bab45e63f6ed05613a | nxpd/pydot/__init__.py | python | Graph.to_string | (self) | return ''.join(graph) | Returns a string representation of the graph in dot language.
It will return the graph and all its subelements in string from. | Returns a string representation of the graph in dot language. | [
"Returns",
"a",
"string",
"representation",
"of",
"the",
"graph",
"in",
"dot",
"language",
"."
] | def to_string(self):
"""Returns a string representation of the graph in dot language.
It will return the graph and all its subelements in string from.
"""
graph = list()
if self.obj_dict.get('strict', None) is not None:
if self == self.get_parent_graph() and self.obj_dict['strict']:
graph.append('strict ')
if self.obj_dict['name'] == '':
if 'show_keyword' in self.obj_dict and self.obj_dict['show_keyword']:
graph.append('subgraph {\n')
else:
graph.append('{\n')
else:
graph.append('%s %s {\n' % (self.obj_dict['type'], self.obj_dict['name']))
for attr, value in sorted(self.obj_dict['attributes'].items(), key=itemgetter(0)):
if value is not None:
graph.append('%s=%s' % (attr, quote_if_necessary(value)))
else:
graph.append(attr)
graph.append(';\n')
edges_done = set()
edge_obj_dicts = list()
for e in self.obj_dict['edges'].values():
edge_obj_dicts.extend(e)
if edge_obj_dicts:
edge_src_set, edge_dst_set = list(zip(*[obj['points'] for obj in edge_obj_dicts]))
edge_src_set, edge_dst_set = set(edge_src_set), set(edge_dst_set)
else:
edge_src_set, edge_dst_set = set(), set()
node_obj_dicts = list()
for e in self.obj_dict['nodes'].values():
node_obj_dicts.extend(e)
sgraph_obj_dicts = list()
for sg in self.obj_dict['subgraphs'].values():
sgraph_obj_dicts.extend(sg)
obj_list = sorted([
(obj['sequence'], obj)
for obj
in (edge_obj_dicts + node_obj_dicts + sgraph_obj_dicts)
])
for idx, obj in obj_list:
if obj['type'] == 'node':
node = Node(obj_dict=obj)
if self.obj_dict.get('suppress_disconnected', False):
if (node.get_name() not in edge_src_set and
node.get_name() not in edge_dst_set):
continue
graph.append(node.to_string() + '\n')
elif obj['type'] == 'edge':
edge = Edge(obj_dict=obj)
if self.obj_dict.get('simplify', False) and edge in edges_done:
continue
graph.append(edge.to_string() + '\n')
edges_done.add(edge)
else:
sgraph = Subgraph(obj_dict=obj)
graph.append(sgraph.to_string() + '\n')
graph.append('}\n')
return ''.join(graph) | [
"def",
"to_string",
"(",
"self",
")",
":",
"graph",
"=",
"list",
"(",
")",
"if",
"self",
".",
"obj_dict",
".",
"get",
"(",
"'strict'",
",",
"None",
")",
"is",
"not",
"None",
":",
"if",
"self",
"==",
"self",
".",
"get_parent_graph",
"(",
")",
"and",
"self",
".",
"obj_dict",
"[",
"'strict'",
"]",
":",
"graph",
".",
"append",
"(",
"'strict '",
")",
"if",
"self",
".",
"obj_dict",
"[",
"'name'",
"]",
"==",
"''",
":",
"if",
"'show_keyword'",
"in",
"self",
".",
"obj_dict",
"and",
"self",
".",
"obj_dict",
"[",
"'show_keyword'",
"]",
":",
"graph",
".",
"append",
"(",
"'subgraph {\\n'",
")",
"else",
":",
"graph",
".",
"append",
"(",
"'{\\n'",
")",
"else",
":",
"graph",
".",
"append",
"(",
"'%s %s {\\n'",
"%",
"(",
"self",
".",
"obj_dict",
"[",
"'type'",
"]",
",",
"self",
".",
"obj_dict",
"[",
"'name'",
"]",
")",
")",
"for",
"attr",
",",
"value",
"in",
"sorted",
"(",
"self",
".",
"obj_dict",
"[",
"'attributes'",
"]",
".",
"items",
"(",
")",
",",
"key",
"=",
"itemgetter",
"(",
"0",
")",
")",
":",
"if",
"value",
"is",
"not",
"None",
":",
"graph",
".",
"append",
"(",
"'%s=%s'",
"%",
"(",
"attr",
",",
"quote_if_necessary",
"(",
"value",
")",
")",
")",
"else",
":",
"graph",
".",
"append",
"(",
"attr",
")",
"graph",
".",
"append",
"(",
"';\\n'",
")",
"edges_done",
"=",
"set",
"(",
")",
"edge_obj_dicts",
"=",
"list",
"(",
")",
"for",
"e",
"in",
"self",
".",
"obj_dict",
"[",
"'edges'",
"]",
".",
"values",
"(",
")",
":",
"edge_obj_dicts",
".",
"extend",
"(",
"e",
")",
"if",
"edge_obj_dicts",
":",
"edge_src_set",
",",
"edge_dst_set",
"=",
"list",
"(",
"zip",
"(",
"*",
"[",
"obj",
"[",
"'points'",
"]",
"for",
"obj",
"in",
"edge_obj_dicts",
"]",
")",
")",
"edge_src_set",
",",
"edge_dst_set",
"=",
"set",
"(",
"edge_src_set",
")",
",",
"set",
"(",
"edge_dst_set",
")",
"else",
":",
"edge_src_set",
",",
"edge_dst_set",
"=",
"set",
"(",
")",
",",
"set",
"(",
")",
"node_obj_dicts",
"=",
"list",
"(",
")",
"for",
"e",
"in",
"self",
".",
"obj_dict",
"[",
"'nodes'",
"]",
".",
"values",
"(",
")",
":",
"node_obj_dicts",
".",
"extend",
"(",
"e",
")",
"sgraph_obj_dicts",
"=",
"list",
"(",
")",
"for",
"sg",
"in",
"self",
".",
"obj_dict",
"[",
"'subgraphs'",
"]",
".",
"values",
"(",
")",
":",
"sgraph_obj_dicts",
".",
"extend",
"(",
"sg",
")",
"obj_list",
"=",
"sorted",
"(",
"[",
"(",
"obj",
"[",
"'sequence'",
"]",
",",
"obj",
")",
"for",
"obj",
"in",
"(",
"edge_obj_dicts",
"+",
"node_obj_dicts",
"+",
"sgraph_obj_dicts",
")",
"]",
")",
"for",
"idx",
",",
"obj",
"in",
"obj_list",
":",
"if",
"obj",
"[",
"'type'",
"]",
"==",
"'node'",
":",
"node",
"=",
"Node",
"(",
"obj_dict",
"=",
"obj",
")",
"if",
"self",
".",
"obj_dict",
".",
"get",
"(",
"'suppress_disconnected'",
",",
"False",
")",
":",
"if",
"(",
"node",
".",
"get_name",
"(",
")",
"not",
"in",
"edge_src_set",
"and",
"node",
".",
"get_name",
"(",
")",
"not",
"in",
"edge_dst_set",
")",
":",
"continue",
"graph",
".",
"append",
"(",
"node",
".",
"to_string",
"(",
")",
"+",
"'\\n'",
")",
"elif",
"obj",
"[",
"'type'",
"]",
"==",
"'edge'",
":",
"edge",
"=",
"Edge",
"(",
"obj_dict",
"=",
"obj",
")",
"if",
"self",
".",
"obj_dict",
".",
"get",
"(",
"'simplify'",
",",
"False",
")",
"and",
"edge",
"in",
"edges_done",
":",
"continue",
"graph",
".",
"append",
"(",
"edge",
".",
"to_string",
"(",
")",
"+",
"'\\n'",
")",
"edges_done",
".",
"add",
"(",
"edge",
")",
"else",
":",
"sgraph",
"=",
"Subgraph",
"(",
"obj_dict",
"=",
"obj",
")",
"graph",
".",
"append",
"(",
"sgraph",
".",
"to_string",
"(",
")",
"+",
"'\\n'",
")",
"graph",
".",
"append",
"(",
"'}\\n'",
")",
"return",
"''",
".",
"join",
"(",
"graph",
")"
] | https://github.com/chebee7i/nxpd/blob/a0797cd0ee4f8584c9ee49bab45e63f6ed05613a/nxpd/pydot/__init__.py#L1527-L1606 |
|
GoogleCloudPlatform/appengine-mapreduce | 2045eb3605b6ecb40c83d11dd5442a89fe5c5dd6 | python/src/mapreduce/input_readers.py | python | LogInputReader.__init__ | (self,
start_time=None,
end_time=None,
minimum_log_level=None,
include_incomplete=False,
include_app_logs=False,
version_ids=None,
module_versions=None,
**kwargs) | Constructor.
Args:
start_time: The earliest request completion or last-update time of logs
that should be mapped over, in seconds since the Unix epoch.
end_time: The latest request completion or last-update time that logs
should be mapped over, in seconds since the Unix epoch.
minimum_log_level: An application log level which serves as a filter on
the requests mapped over--requests with no application log at or above
the specified level will be omitted, even if include_app_logs is False.
include_incomplete: Whether or not to include requests that have started
but not yet finished, as a boolean. Defaults to False.
include_app_logs: Whether or not to include application level logs in the
mapped logs, as a boolean. Defaults to False.
version_ids: A list of version ids whose logs should be read. This can not
be used with module_versions
module_versions: A list of tuples containing a module and version id
whose logs should be read. This can not be used with version_ids
**kwargs: A dictionary of keywords associated with this input reader. | Constructor. | [
"Constructor",
"."
] | def __init__(self,
start_time=None,
end_time=None,
minimum_log_level=None,
include_incomplete=False,
include_app_logs=False,
version_ids=None,
module_versions=None,
**kwargs):
"""Constructor.
Args:
start_time: The earliest request completion or last-update time of logs
that should be mapped over, in seconds since the Unix epoch.
end_time: The latest request completion or last-update time that logs
should be mapped over, in seconds since the Unix epoch.
minimum_log_level: An application log level which serves as a filter on
the requests mapped over--requests with no application log at or above
the specified level will be omitted, even if include_app_logs is False.
include_incomplete: Whether or not to include requests that have started
but not yet finished, as a boolean. Defaults to False.
include_app_logs: Whether or not to include application level logs in the
mapped logs, as a boolean. Defaults to False.
version_ids: A list of version ids whose logs should be read. This can not
be used with module_versions
module_versions: A list of tuples containing a module and version id
whose logs should be read. This can not be used with version_ids
**kwargs: A dictionary of keywords associated with this input reader.
"""
InputReader.__init__(self) # pylint: disable=non-parent-init-called
# The rule for __params is that its contents will always be suitable as
# input to logservice.fetch().
self.__params = dict(kwargs)
if start_time is not None:
self.__params[self.START_TIME_PARAM] = start_time
if end_time is not None:
self.__params[self.END_TIME_PARAM] = end_time
if minimum_log_level is not None:
self.__params[self.MINIMUM_LOG_LEVEL_PARAM] = minimum_log_level
if include_incomplete is not None:
self.__params[self.INCLUDE_INCOMPLETE_PARAM] = include_incomplete
if include_app_logs is not None:
self.__params[self.INCLUDE_APP_LOGS_PARAM] = include_app_logs
if version_ids:
self.__params[self.VERSION_IDS_PARAM] = version_ids
if module_versions:
self.__params[self.MODULE_VERSIONS_PARAM] = module_versions
# Any submitted prototype_request will be in encoded form.
if self._PROTOTYPE_REQUEST_PARAM in self.__params:
prototype_request = log_service_pb.LogReadRequest(
self.__params[self._PROTOTYPE_REQUEST_PARAM])
self.__params[self._PROTOTYPE_REQUEST_PARAM] = prototype_request | [
"def",
"__init__",
"(",
"self",
",",
"start_time",
"=",
"None",
",",
"end_time",
"=",
"None",
",",
"minimum_log_level",
"=",
"None",
",",
"include_incomplete",
"=",
"False",
",",
"include_app_logs",
"=",
"False",
",",
"version_ids",
"=",
"None",
",",
"module_versions",
"=",
"None",
",",
"*",
"*",
"kwargs",
")",
":",
"InputReader",
".",
"__init__",
"(",
"self",
")",
"# pylint: disable=non-parent-init-called",
"# The rule for __params is that its contents will always be suitable as",
"# input to logservice.fetch().",
"self",
".",
"__params",
"=",
"dict",
"(",
"kwargs",
")",
"if",
"start_time",
"is",
"not",
"None",
":",
"self",
".",
"__params",
"[",
"self",
".",
"START_TIME_PARAM",
"]",
"=",
"start_time",
"if",
"end_time",
"is",
"not",
"None",
":",
"self",
".",
"__params",
"[",
"self",
".",
"END_TIME_PARAM",
"]",
"=",
"end_time",
"if",
"minimum_log_level",
"is",
"not",
"None",
":",
"self",
".",
"__params",
"[",
"self",
".",
"MINIMUM_LOG_LEVEL_PARAM",
"]",
"=",
"minimum_log_level",
"if",
"include_incomplete",
"is",
"not",
"None",
":",
"self",
".",
"__params",
"[",
"self",
".",
"INCLUDE_INCOMPLETE_PARAM",
"]",
"=",
"include_incomplete",
"if",
"include_app_logs",
"is",
"not",
"None",
":",
"self",
".",
"__params",
"[",
"self",
".",
"INCLUDE_APP_LOGS_PARAM",
"]",
"=",
"include_app_logs",
"if",
"version_ids",
":",
"self",
".",
"__params",
"[",
"self",
".",
"VERSION_IDS_PARAM",
"]",
"=",
"version_ids",
"if",
"module_versions",
":",
"self",
".",
"__params",
"[",
"self",
".",
"MODULE_VERSIONS_PARAM",
"]",
"=",
"module_versions",
"# Any submitted prototype_request will be in encoded form.",
"if",
"self",
".",
"_PROTOTYPE_REQUEST_PARAM",
"in",
"self",
".",
"__params",
":",
"prototype_request",
"=",
"log_service_pb",
".",
"LogReadRequest",
"(",
"self",
".",
"__params",
"[",
"self",
".",
"_PROTOTYPE_REQUEST_PARAM",
"]",
")",
"self",
".",
"__params",
"[",
"self",
".",
"_PROTOTYPE_REQUEST_PARAM",
"]",
"=",
"prototype_request"
] | https://github.com/GoogleCloudPlatform/appengine-mapreduce/blob/2045eb3605b6ecb40c83d11dd5442a89fe5c5dd6/python/src/mapreduce/input_readers.py#L2048-L2102 |
||
SheffieldML/GPy | bb1bc5088671f9316bc92a46d356734e34c2d5c0 | benchmarks/regression/methods.py | python | GP_RBF._predict | (self, test_data) | return self.model.predict(test_data)[0] | [] | def _predict(self, test_data):
return self.model.predict(test_data)[0] | [
"def",
"_predict",
"(",
"self",
",",
"test_data",
")",
":",
"return",
"self",
".",
"model",
".",
"predict",
"(",
"test_data",
")",
"[",
"0",
"]"
] | https://github.com/SheffieldML/GPy/blob/bb1bc5088671f9316bc92a46d356734e34c2d5c0/benchmarks/regression/methods.py#L62-L63 |
|||
hatRiot/zarp | 2e772350a01c2aeed3f4da9685cd0cc5d6b3ecad | src/lib/scapy/as_resolvers.py | python | AS_resolver._parse_whois | (self, txt) | return asn,desc.strip() | [] | def _parse_whois(self, txt):
asn,desc = None,""
for l in txt.splitlines():
if not asn and l.startswith("origin:"):
asn = l[7:].strip()
if l.startswith("descr:"):
if desc:
desc += r"\n"
desc += l[6:].strip()
if asn is not None and desc:
break
return asn,desc.strip() | [
"def",
"_parse_whois",
"(",
"self",
",",
"txt",
")",
":",
"asn",
",",
"desc",
"=",
"None",
",",
"\"\"",
"for",
"l",
"in",
"txt",
".",
"splitlines",
"(",
")",
":",
"if",
"not",
"asn",
"and",
"l",
".",
"startswith",
"(",
"\"origin:\"",
")",
":",
"asn",
"=",
"l",
"[",
"7",
":",
"]",
".",
"strip",
"(",
")",
"if",
"l",
".",
"startswith",
"(",
"\"descr:\"",
")",
":",
"if",
"desc",
":",
"desc",
"+=",
"r\"\\n\"",
"desc",
"+=",
"l",
"[",
"6",
":",
"]",
".",
"strip",
"(",
")",
"if",
"asn",
"is",
"not",
"None",
"and",
"desc",
":",
"break",
"return",
"asn",
",",
"desc",
".",
"strip",
"(",
")"
] | https://github.com/hatRiot/zarp/blob/2e772350a01c2aeed3f4da9685cd0cc5d6b3ecad/src/lib/scapy/as_resolvers.py#L28-L39 |
|||
googlefonts/gftools | 8ad55dd4d7e38729524329c79f236476f1576e67 | bin/gftools-build-vf.py | python | display_args | () | Prints info about argparse flag use. | Prints info about argparse flag use. | [
"Prints",
"info",
"about",
"argparse",
"flag",
"use",
"."
] | def display_args():
"""
Prints info about argparse flag use.
"""
print("\n**** Settings:")
print(" [+] --drawbot\t\t", end="")
if args.drawbot == True:
printG(args.drawbot)
else:
printR(args.drawbot)
print(" [+] --googlefonts\t\t", end="")
if args.googlefonts is not None:
printG(args.googlefonts)
else:
printR(args.googlefonts)
print(" [+] --ttfautohint\t\t", end="")
if args.ttfautohint is not None:
printG(args.ttfautohint)
else:
printR(args.ttfautohint)
print(" [+] --fontbakery\t\t", end="")
if args.fontbakery == True:
printG(args.fontbakery)
else:
printR(args.fontbakery)
print(" [+] --static\t\t", end="")
if args.static == True:
printG(args.static)
else:
printR(args.static)
print(" [+] --fixnonhinting\t", end="")
if args.fixnonhinting == True:
printG(args.fixnonhinting)
else:
printR(args.fixnonhinting)
print(" [+] --addfont\t\t", end="")
if args.addfont == True:
printG(args.addfont)
else:
printR(args.addfont)
print(" [+] --ufosrc\t\t", end="")
if args.ufosrc == True:
printG(args.ufosrc)
else:
printR(args.ufosrc)
printG(" [!] Done")
time.sleep(8) | [
"def",
"display_args",
"(",
")",
":",
"print",
"(",
"\"\\n**** Settings:\"",
")",
"print",
"(",
"\" [+] --drawbot\\t\\t\"",
",",
"end",
"=",
"\"\"",
")",
"if",
"args",
".",
"drawbot",
"==",
"True",
":",
"printG",
"(",
"args",
".",
"drawbot",
")",
"else",
":",
"printR",
"(",
"args",
".",
"drawbot",
")",
"print",
"(",
"\" [+] --googlefonts\\t\\t\"",
",",
"end",
"=",
"\"\"",
")",
"if",
"args",
".",
"googlefonts",
"is",
"not",
"None",
":",
"printG",
"(",
"args",
".",
"googlefonts",
")",
"else",
":",
"printR",
"(",
"args",
".",
"googlefonts",
")",
"print",
"(",
"\" [+] --ttfautohint\\t\\t\"",
",",
"end",
"=",
"\"\"",
")",
"if",
"args",
".",
"ttfautohint",
"is",
"not",
"None",
":",
"printG",
"(",
"args",
".",
"ttfautohint",
")",
"else",
":",
"printR",
"(",
"args",
".",
"ttfautohint",
")",
"print",
"(",
"\" [+] --fontbakery\\t\\t\"",
",",
"end",
"=",
"\"\"",
")",
"if",
"args",
".",
"fontbakery",
"==",
"True",
":",
"printG",
"(",
"args",
".",
"fontbakery",
")",
"else",
":",
"printR",
"(",
"args",
".",
"fontbakery",
")",
"print",
"(",
"\" [+] --static\\t\\t\"",
",",
"end",
"=",
"\"\"",
")",
"if",
"args",
".",
"static",
"==",
"True",
":",
"printG",
"(",
"args",
".",
"static",
")",
"else",
":",
"printR",
"(",
"args",
".",
"static",
")",
"print",
"(",
"\" [+] --fixnonhinting\\t\"",
",",
"end",
"=",
"\"\"",
")",
"if",
"args",
".",
"fixnonhinting",
"==",
"True",
":",
"printG",
"(",
"args",
".",
"fixnonhinting",
")",
"else",
":",
"printR",
"(",
"args",
".",
"fixnonhinting",
")",
"print",
"(",
"\" [+] --addfont\\t\\t\"",
",",
"end",
"=",
"\"\"",
")",
"if",
"args",
".",
"addfont",
"==",
"True",
":",
"printG",
"(",
"args",
".",
"addfont",
")",
"else",
":",
"printR",
"(",
"args",
".",
"addfont",
")",
"print",
"(",
"\" [+] --ufosrc\\t\\t\"",
",",
"end",
"=",
"\"\"",
")",
"if",
"args",
".",
"ufosrc",
"==",
"True",
":",
"printG",
"(",
"args",
".",
"ufosrc",
")",
"else",
":",
"printR",
"(",
"args",
".",
"ufosrc",
")",
"printG",
"(",
"\" [!] Done\"",
")",
"time",
".",
"sleep",
"(",
"8",
")"
] | https://github.com/googlefonts/gftools/blob/8ad55dd4d7e38729524329c79f236476f1576e67/bin/gftools-build-vf.py#L130-L185 |
||
mesalock-linux/mesapy | ed546d59a21b36feb93e2309d5c6b75aa0ad95c9 | rpython/rtyper/lltypesystem/lltype.py | python | scoped_alloc | (T, n=None, zero=False) | return _make_scoped_allocator(T, zero)(n=n) | Returns a context manager which handles allocation and
deallocation of temporary memory. Use it in a with statement::
with scoped_alloc(Array(Signed), 1) as array:
...use array...
...it's freed now. | Returns a context manager which handles allocation and
deallocation of temporary memory. Use it in a with statement:: | [
"Returns",
"a",
"context",
"manager",
"which",
"handles",
"allocation",
"and",
"deallocation",
"of",
"temporary",
"memory",
".",
"Use",
"it",
"in",
"a",
"with",
"statement",
"::"
] | def scoped_alloc(T, n=None, zero=False):
"""Returns a context manager which handles allocation and
deallocation of temporary memory. Use it in a with statement::
with scoped_alloc(Array(Signed), 1) as array:
...use array...
...it's freed now.
"""
return _make_scoped_allocator(T, zero)(n=n) | [
"def",
"scoped_alloc",
"(",
"T",
",",
"n",
"=",
"None",
",",
"zero",
"=",
"False",
")",
":",
"return",
"_make_scoped_allocator",
"(",
"T",
",",
"zero",
")",
"(",
"n",
"=",
"n",
")"
] | https://github.com/mesalock-linux/mesapy/blob/ed546d59a21b36feb93e2309d5c6b75aa0ad95c9/rpython/rtyper/lltypesystem/lltype.py#L2288-L2296 |
|
cloudera/hue | 23f02102d4547c17c32bd5ea0eb24e9eadd657a4 | desktop/core/ext-py/py4j-0.9/src/py4j/java_gateway.py | python | JavaGateway.start_callback_server | (self, callback_server_parameters=None) | return True | Starts the callback server.
:param callback_server_parameters: parameters to use to start the
server. If not provided, it will use the gateway callback server
parameters.
:rtype: Returns True if the server was started by this call or False if
it was already started (you cannot have more than one started
callback server). | Starts the callback server. | [
"Starts",
"the",
"callback",
"server",
"."
] | def start_callback_server(self, callback_server_parameters=None):
"""Starts the callback server.
:param callback_server_parameters: parameters to use to start the
server. If not provided, it will use the gateway callback server
parameters.
:rtype: Returns True if the server was started by this call or False if
it was already started (you cannot have more than one started
callback server).
"""
if self._callback_server:
return False
if not callback_server_parameters:
callback_server_parameters = self.callback_server_parameters
self._callback_server = CallbackServer(
self.gateway_property.pool, self._gateway_client,
callback_server_parameters=callback_server_parameters)
try:
self._callback_server.start()
except Py4JNetworkError:
# Clean up ourselves before raising the exception.
self.shutdown()
self._callback_server = None
raise
return True | [
"def",
"start_callback_server",
"(",
"self",
",",
"callback_server_parameters",
"=",
"None",
")",
":",
"if",
"self",
".",
"_callback_server",
":",
"return",
"False",
"if",
"not",
"callback_server_parameters",
":",
"callback_server_parameters",
"=",
"self",
".",
"callback_server_parameters",
"self",
".",
"_callback_server",
"=",
"CallbackServer",
"(",
"self",
".",
"gateway_property",
".",
"pool",
",",
"self",
".",
"_gateway_client",
",",
"callback_server_parameters",
"=",
"callback_server_parameters",
")",
"try",
":",
"self",
".",
"_callback_server",
".",
"start",
"(",
")",
"except",
"Py4JNetworkError",
":",
"# Clean up ourselves before raising the exception.",
"self",
".",
"shutdown",
"(",
")",
"self",
".",
"_callback_server",
"=",
"None",
"raise",
"return",
"True"
] | https://github.com/cloudera/hue/blob/23f02102d4547c17c32bd5ea0eb24e9eadd657a4/desktop/core/ext-py/py4j-0.9/src/py4j/java_gateway.py#L1323-L1351 |
|
zhl2008/awd-platform | 0416b31abea29743387b10b3914581fbe8e7da5e | web_flaskbb/lib/python2.7/site-packages/werkzeug/datastructures.py | python | _CacheControl.to_header | (self) | return dump_header(self) | Convert the stored values into a cache control header. | Convert the stored values into a cache control header. | [
"Convert",
"the",
"stored",
"values",
"into",
"a",
"cache",
"control",
"header",
"."
] | def to_header(self):
"""Convert the stored values into a cache control header."""
return dump_header(self) | [
"def",
"to_header",
"(",
"self",
")",
":",
"return",
"dump_header",
"(",
"self",
")"
] | https://github.com/zhl2008/awd-platform/blob/0416b31abea29743387b10b3914581fbe8e7da5e/web_flaskbb/lib/python2.7/site-packages/werkzeug/datastructures.py#L1941-L1943 |
|
zhl2008/awd-platform | 0416b31abea29743387b10b3914581fbe8e7da5e | web_flaskbb/Python-2.7.9/Lib/aifc.py | python | Aifc_write.getcomptype | (self) | return self._comptype | [] | def getcomptype(self):
return self._comptype | [
"def",
"getcomptype",
"(",
"self",
")",
":",
"return",
"self",
".",
"_comptype"
] | https://github.com/zhl2008/awd-platform/blob/0416b31abea29743387b10b3914581fbe8e7da5e/web_flaskbb/Python-2.7.9/Lib/aifc.py#L662-L663 |
|||
holzschu/Carnets | 44effb10ddfc6aa5c8b0687582a724ba82c6b547 | Library/lib/python3.7/site-packages/jedi/refactoring.py | python | _rename | (names, replace_str) | return dct | For both rename and inline. | For both rename and inline. | [
"For",
"both",
"rename",
"and",
"inline",
"."
] | def _rename(names, replace_str):
""" For both rename and inline. """
order = sorted(names, key=lambda x: (x.module_path, x.line, x.column),
reverse=True)
def process(path, old_lines, new_lines):
if new_lines is not None: # goto next file, save last
dct[path] = path, old_lines, new_lines
dct = {}
current_path = object()
new_lines = old_lines = None
for name in order:
if name.in_builtin_module():
continue
if current_path != name.module_path:
current_path = name.module_path
process(current_path, old_lines, new_lines)
if current_path is not None:
# None means take the source that is a normal param.
with open(current_path) as f:
source = f.read()
new_lines = split_lines(python_bytes_to_unicode(source))
old_lines = new_lines[:]
nr, indent = name.line, name.column
line = new_lines[nr - 1]
new_lines[nr - 1] = line[:indent] + replace_str + \
line[indent + len(name.name):]
process(current_path, old_lines, new_lines)
return dct | [
"def",
"_rename",
"(",
"names",
",",
"replace_str",
")",
":",
"order",
"=",
"sorted",
"(",
"names",
",",
"key",
"=",
"lambda",
"x",
":",
"(",
"x",
".",
"module_path",
",",
"x",
".",
"line",
",",
"x",
".",
"column",
")",
",",
"reverse",
"=",
"True",
")",
"def",
"process",
"(",
"path",
",",
"old_lines",
",",
"new_lines",
")",
":",
"if",
"new_lines",
"is",
"not",
"None",
":",
"# goto next file, save last",
"dct",
"[",
"path",
"]",
"=",
"path",
",",
"old_lines",
",",
"new_lines",
"dct",
"=",
"{",
"}",
"current_path",
"=",
"object",
"(",
")",
"new_lines",
"=",
"old_lines",
"=",
"None",
"for",
"name",
"in",
"order",
":",
"if",
"name",
".",
"in_builtin_module",
"(",
")",
":",
"continue",
"if",
"current_path",
"!=",
"name",
".",
"module_path",
":",
"current_path",
"=",
"name",
".",
"module_path",
"process",
"(",
"current_path",
",",
"old_lines",
",",
"new_lines",
")",
"if",
"current_path",
"is",
"not",
"None",
":",
"# None means take the source that is a normal param.",
"with",
"open",
"(",
"current_path",
")",
"as",
"f",
":",
"source",
"=",
"f",
".",
"read",
"(",
")",
"new_lines",
"=",
"split_lines",
"(",
"python_bytes_to_unicode",
"(",
"source",
")",
")",
"old_lines",
"=",
"new_lines",
"[",
":",
"]",
"nr",
",",
"indent",
"=",
"name",
".",
"line",
",",
"name",
".",
"column",
"line",
"=",
"new_lines",
"[",
"nr",
"-",
"1",
"]",
"new_lines",
"[",
"nr",
"-",
"1",
"]",
"=",
"line",
"[",
":",
"indent",
"]",
"+",
"replace_str",
"+",
"line",
"[",
"indent",
"+",
"len",
"(",
"name",
".",
"name",
")",
":",
"]",
"process",
"(",
"current_path",
",",
"old_lines",
",",
"new_lines",
")",
"return",
"dct"
] | https://github.com/holzschu/Carnets/blob/44effb10ddfc6aa5c8b0687582a724ba82c6b547/Library/lib/python3.7/site-packages/jedi/refactoring.py#L63-L95 |
|
HiKapok/tf.fashionAI | bc7d26c78e845df4eda0997494a5859cab1ec5de | net/detxt_cpn.py | python | conv2d_fixed_padding | (inputs, filters, kernel_size, strides, data_format, kernel_initializer=tf.glorot_uniform_initializer, name=None) | return tf.layers.conv2d(
inputs=inputs, filters=filters, kernel_size=kernel_size, strides=strides,
padding=('SAME' if strides == 1 else 'VALID'), use_bias=False,
kernel_initializer=kernel_initializer(),
data_format=data_format, name=name) | Strided 2-D convolution with explicit padding. | Strided 2-D convolution with explicit padding. | [
"Strided",
"2",
"-",
"D",
"convolution",
"with",
"explicit",
"padding",
"."
] | def conv2d_fixed_padding(inputs, filters, kernel_size, strides, data_format, kernel_initializer=tf.glorot_uniform_initializer, name=None):
"""Strided 2-D convolution with explicit padding."""
# The padding is consistent and is based only on `kernel_size`, not on the
# dimensions of `inputs` (as opposed to using `tf.layers.conv2d` alone).
if strides > 1:
inputs = fixed_padding(inputs, kernel_size, data_format)
return tf.layers.conv2d(
inputs=inputs, filters=filters, kernel_size=kernel_size, strides=strides,
padding=('SAME' if strides == 1 else 'VALID'), use_bias=False,
kernel_initializer=kernel_initializer(),
data_format=data_format, name=name) | [
"def",
"conv2d_fixed_padding",
"(",
"inputs",
",",
"filters",
",",
"kernel_size",
",",
"strides",
",",
"data_format",
",",
"kernel_initializer",
"=",
"tf",
".",
"glorot_uniform_initializer",
",",
"name",
"=",
"None",
")",
":",
"# The padding is consistent and is based only on `kernel_size`, not on the",
"# dimensions of `inputs` (as opposed to using `tf.layers.conv2d` alone).",
"if",
"strides",
">",
"1",
":",
"inputs",
"=",
"fixed_padding",
"(",
"inputs",
",",
"kernel_size",
",",
"data_format",
")",
"return",
"tf",
".",
"layers",
".",
"conv2d",
"(",
"inputs",
"=",
"inputs",
",",
"filters",
"=",
"filters",
",",
"kernel_size",
"=",
"kernel_size",
",",
"strides",
"=",
"strides",
",",
"padding",
"=",
"(",
"'SAME'",
"if",
"strides",
"==",
"1",
"else",
"'VALID'",
")",
",",
"use_bias",
"=",
"False",
",",
"kernel_initializer",
"=",
"kernel_initializer",
"(",
")",
",",
"data_format",
"=",
"data_format",
",",
"name",
"=",
"name",
")"
] | https://github.com/HiKapok/tf.fashionAI/blob/bc7d26c78e845df4eda0997494a5859cab1ec5de/net/detxt_cpn.py#L67-L78 |
|
demisto/content | 5c664a65b992ac8ca90ac3f11b1b2cdf11ee9b07 | Packs/ProofpointThreatResponse/Integrations/ProofpointThreatResponse/ProofpointThreatResponse.py | python | create_incidents_context | (incidents_list) | return context | Parses the incidents list and returns the incidents context
Args:
incidents_list (list): The incidents list to parse
Returns:
list. The context created from the incidents list | Parses the incidents list and returns the incidents context | [
"Parses",
"the",
"incidents",
"list",
"and",
"returns",
"the",
"incidents",
"context"
] | def create_incidents_context(incidents_list):
"""Parses the incidents list and returns the incidents context
Args:
incidents_list (list): The incidents list to parse
Returns:
list. The context created from the incidents list
"""
context = list(incidents_list)
for incident in context:
incident['incident_field_values'] = create_incident_field_context(incident)
if incident.get('events'):
for event in incident['events']:
event['emails'] = get_emails_context(event)
return context | [
"def",
"create_incidents_context",
"(",
"incidents_list",
")",
":",
"context",
"=",
"list",
"(",
"incidents_list",
")",
"for",
"incident",
"in",
"context",
":",
"incident",
"[",
"'incident_field_values'",
"]",
"=",
"create_incident_field_context",
"(",
"incident",
")",
"if",
"incident",
".",
"get",
"(",
"'events'",
")",
":",
"for",
"event",
"in",
"incident",
"[",
"'events'",
"]",
":",
"event",
"[",
"'emails'",
"]",
"=",
"get_emails_context",
"(",
"event",
")",
"return",
"context"
] | https://github.com/demisto/content/blob/5c664a65b992ac8ca90ac3f11b1b2cdf11ee9b07/Packs/ProofpointThreatResponse/Integrations/ProofpointThreatResponse/ProofpointThreatResponse.py#L259-L276 |
|
home-assistant/core | 265ebd17a3f17ed8dc1e9bdede03ac8e323f1ab1 | homeassistant/components/syncthru/sensor.py | python | SyncThruMainSensor.extra_state_attributes | (self) | return {
"display_text": self.syncthru.device_status_details(),
} | Show current printer display text. | Show current printer display text. | [
"Show",
"current",
"printer",
"display",
"text",
"."
] | def extra_state_attributes(self):
"""Show current printer display text."""
return {
"display_text": self.syncthru.device_status_details(),
} | [
"def",
"extra_state_attributes",
"(",
"self",
")",
":",
"return",
"{",
"\"display_text\"",
":",
"self",
".",
"syncthru",
".",
"device_status_details",
"(",
")",
",",
"}"
] | https://github.com/home-assistant/core/blob/265ebd17a3f17ed8dc1e9bdede03ac8e323f1ab1/homeassistant/components/syncthru/sensor.py#L134-L138 |
|
DamnWidget/anaconda | a9998fb362320f907d5ccbc6fcf5b62baca677c0 | commands/set_python_interpreter.py | python | AnacondaSetPythonInterpreter.save_project_data | (self, data: Dict[str, Any]) | Saves the provided data to the project settings | Saves the provided data to the project settings | [
"Saves",
"the",
"provided",
"data",
"to",
"the",
"project",
"settings"
] | def save_project_data(self, data: Dict[str, Any]) -> None:
"""Saves the provided data to the project settings"""
sublime.active_window().set_project_data(data)
sublime.status_message("Python path is set successfuly") | [
"def",
"save_project_data",
"(",
"self",
",",
"data",
":",
"Dict",
"[",
"str",
",",
"Any",
"]",
")",
"->",
"None",
":",
"sublime",
".",
"active_window",
"(",
")",
".",
"set_project_data",
"(",
"data",
")",
"sublime",
".",
"status_message",
"(",
"\"Python path is set successfuly\"",
")"
] | https://github.com/DamnWidget/anaconda/blob/a9998fb362320f907d5ccbc6fcf5b62baca677c0/commands/set_python_interpreter.py#L55-L58 |
||
pymedusa/Medusa | 1405fbb6eb8ef4d20fcca24c32ddca52b11f0f38 | ext/boto/s3/bucket.py | python | Bucket.get_all_multipart_uploads | (self, headers=None, **params) | return self._get_all([('Upload', MultiPartUpload),
('CommonPrefixes', Prefix)],
'uploads', headers, **params) | A lower-level, version-aware method for listing active
MultiPart uploads for a bucket. This closely models the
actual S3 API and requires you to manually handle the paging
of results. For a higher-level method that handles the
details of paging for you, you can use the list method.
:type max_uploads: int
:param max_uploads: The maximum number of uploads to retrieve.
Default value is 1000.
:type key_marker: string
:param key_marker: Together with upload_id_marker, this
parameter specifies the multipart upload after which
listing should begin. If upload_id_marker is not
specified, only the keys lexicographically greater than
the specified key_marker will be included in the list.
If upload_id_marker is specified, any multipart uploads
for a key equal to the key_marker might also be included,
provided those multipart uploads have upload IDs
lexicographically greater than the specified
upload_id_marker.
:type upload_id_marker: string
:param upload_id_marker: Together with key-marker, specifies
the multipart upload after which listing should begin. If
key_marker is not specified, the upload_id_marker
parameter is ignored. Otherwise, any multipart uploads
for a key equal to the key_marker might be included in the
list only if they have an upload ID lexicographically
greater than the specified upload_id_marker.
:type encoding_type: string
:param encoding_type: Requests Amazon S3 to encode the response and
specifies the encoding method to use.
An object key can contain any Unicode character; however, XML 1.0
parser cannot parse some characters, such as characters with an
ASCII value from 0 to 10. For characters that are not supported in
XML 1.0, you can add this parameter to request that Amazon S3
encode the keys in the response.
Valid options: ``url``
:type delimiter: string
:param delimiter: Character you use to group keys.
All keys that contain the same string between the prefix, if
specified, and the first occurrence of the delimiter after the
prefix are grouped under a single result element, CommonPrefixes.
If you don't specify the prefix parameter, then the substring
starts at the beginning of the key. The keys that are grouped
under CommonPrefixes result element are not returned elsewhere
in the response.
:type prefix: string
:param prefix: Lists in-progress uploads only for those keys that
begin with the specified prefix. You can use prefixes to separate
a bucket into different grouping of keys. (You can think of using
prefix to make groups in the same way you'd use a folder in a
file system.)
:rtype: ResultSet
:return: The result from S3 listing the uploads requested | A lower-level, version-aware method for listing active
MultiPart uploads for a bucket. This closely models the
actual S3 API and requires you to manually handle the paging
of results. For a higher-level method that handles the
details of paging for you, you can use the list method. | [
"A",
"lower",
"-",
"level",
"version",
"-",
"aware",
"method",
"for",
"listing",
"active",
"MultiPart",
"uploads",
"for",
"a",
"bucket",
".",
"This",
"closely",
"models",
"the",
"actual",
"S3",
"API",
"and",
"requires",
"you",
"to",
"manually",
"handle",
"the",
"paging",
"of",
"results",
".",
"For",
"a",
"higher",
"-",
"level",
"method",
"that",
"handles",
"the",
"details",
"of",
"paging",
"for",
"you",
"you",
"can",
"use",
"the",
"list",
"method",
"."
] | def get_all_multipart_uploads(self, headers=None, **params):
"""
A lower-level, version-aware method for listing active
MultiPart uploads for a bucket. This closely models the
actual S3 API and requires you to manually handle the paging
of results. For a higher-level method that handles the
details of paging for you, you can use the list method.
:type max_uploads: int
:param max_uploads: The maximum number of uploads to retrieve.
Default value is 1000.
:type key_marker: string
:param key_marker: Together with upload_id_marker, this
parameter specifies the multipart upload after which
listing should begin. If upload_id_marker is not
specified, only the keys lexicographically greater than
the specified key_marker will be included in the list.
If upload_id_marker is specified, any multipart uploads
for a key equal to the key_marker might also be included,
provided those multipart uploads have upload IDs
lexicographically greater than the specified
upload_id_marker.
:type upload_id_marker: string
:param upload_id_marker: Together with key-marker, specifies
the multipart upload after which listing should begin. If
key_marker is not specified, the upload_id_marker
parameter is ignored. Otherwise, any multipart uploads
for a key equal to the key_marker might be included in the
list only if they have an upload ID lexicographically
greater than the specified upload_id_marker.
:type encoding_type: string
:param encoding_type: Requests Amazon S3 to encode the response and
specifies the encoding method to use.
An object key can contain any Unicode character; however, XML 1.0
parser cannot parse some characters, such as characters with an
ASCII value from 0 to 10. For characters that are not supported in
XML 1.0, you can add this parameter to request that Amazon S3
encode the keys in the response.
Valid options: ``url``
:type delimiter: string
:param delimiter: Character you use to group keys.
All keys that contain the same string between the prefix, if
specified, and the first occurrence of the delimiter after the
prefix are grouped under a single result element, CommonPrefixes.
If you don't specify the prefix parameter, then the substring
starts at the beginning of the key. The keys that are grouped
under CommonPrefixes result element are not returned elsewhere
in the response.
:type prefix: string
:param prefix: Lists in-progress uploads only for those keys that
begin with the specified prefix. You can use prefixes to separate
a bucket into different grouping of keys. (You can think of using
prefix to make groups in the same way you'd use a folder in a
file system.)
:rtype: ResultSet
:return: The result from S3 listing the uploads requested
"""
self.validate_kwarg_names(params, ['max_uploads', 'key_marker',
'upload_id_marker', 'encoding_type',
'delimiter', 'prefix'])
return self._get_all([('Upload', MultiPartUpload),
('CommonPrefixes', Prefix)],
'uploads', headers, **params) | [
"def",
"get_all_multipart_uploads",
"(",
"self",
",",
"headers",
"=",
"None",
",",
"*",
"*",
"params",
")",
":",
"self",
".",
"validate_kwarg_names",
"(",
"params",
",",
"[",
"'max_uploads'",
",",
"'key_marker'",
",",
"'upload_id_marker'",
",",
"'encoding_type'",
",",
"'delimiter'",
",",
"'prefix'",
"]",
")",
"return",
"self",
".",
"_get_all",
"(",
"[",
"(",
"'Upload'",
",",
"MultiPartUpload",
")",
",",
"(",
"'CommonPrefixes'",
",",
"Prefix",
")",
"]",
",",
"'uploads'",
",",
"headers",
",",
"*",
"*",
"params",
")"
] | https://github.com/pymedusa/Medusa/blob/1405fbb6eb8ef4d20fcca24c32ddca52b11f0f38/ext/boto/s3/bucket.py#L539-L611 |
|
UniShared/videonotes | 803cdd97b90823fb17f50dd55999aa7d1fec6c3a | lib/apiclient/model.py | python | JsonModel.__init__ | (self, data_wrapper=False) | Construct a JsonModel.
Args:
data_wrapper: boolean, wrap requests and responses in a data wrapper | Construct a JsonModel. | [
"Construct",
"a",
"JsonModel",
"."
] | def __init__(self, data_wrapper=False):
"""Construct a JsonModel.
Args:
data_wrapper: boolean, wrap requests and responses in a data wrapper
"""
self._data_wrapper = data_wrapper | [
"def",
"__init__",
"(",
"self",
",",
"data_wrapper",
"=",
"False",
")",
":",
"self",
".",
"_data_wrapper",
"=",
"data_wrapper"
] | https://github.com/UniShared/videonotes/blob/803cdd97b90823fb17f50dd55999aa7d1fec6c3a/lib/apiclient/model.py#L248-L254 |
||
lovelylain/pyctp | fd304de4b50c4ddc31a4190b1caaeb5dec66bc5d | futures/ctp/ApiStruct.py | python | FutureSignIO.__init__ | (self, TradeCode='', BankID='', BankBranchID='', BrokerID='', BrokerBranchID='', TradeDate='', TradeTime='', BankSerial='', TradingDay='', PlateSerial=0, LastFragment=LF_Yes, SessionID=0, InstallID=0, UserID='', Digest='', CurrencyID='', DeviceID='', BrokerIDByBank='', OperNo='', RequestID=0, TID=0) | [] | def __init__(self, TradeCode='', BankID='', BankBranchID='', BrokerID='', BrokerBranchID='', TradeDate='', TradeTime='', BankSerial='', TradingDay='', PlateSerial=0, LastFragment=LF_Yes, SessionID=0, InstallID=0, UserID='', Digest='', CurrencyID='', DeviceID='', BrokerIDByBank='', OperNo='', RequestID=0, TID=0):
self.TradeCode = '' #业务功能码, char[7]
self.BankID = '' #银行代码, char[4]
self.BankBranchID = 'BankBrchID' #银行分支机构代码, char[5]
self.BrokerID = '' #期商代码, char[11]
self.BrokerBranchID = 'FutureBranchID' #期商分支机构代码, char[31]
self.TradeDate = '' #交易日期, char[9]
self.TradeTime = '' #交易时间, char[9]
self.BankSerial = '' #银行流水号, char[13]
self.TradingDay = 'TradeDate' #交易系统日期 , char[9]
self.PlateSerial = 'Serial' #银期平台消息流水号, int
self.LastFragment = '' #最后分片标志, char
self.SessionID = '' #会话号, int
self.InstallID = '' #安装编号, int
self.UserID = '' #用户标识, char[16]
self.Digest = '' #摘要, char[36]
self.CurrencyID = '' #币种代码, char[4]
self.DeviceID = '' #渠道标志, char[3]
self.BrokerIDByBank = 'BankCodingForFuture' #期货公司银行编码, char[33]
self.OperNo = '' #交易柜员, char[17]
self.RequestID = '' #请求编号, int
self.TID = '' | [
"def",
"__init__",
"(",
"self",
",",
"TradeCode",
"=",
"''",
",",
"BankID",
"=",
"''",
",",
"BankBranchID",
"=",
"''",
",",
"BrokerID",
"=",
"''",
",",
"BrokerBranchID",
"=",
"''",
",",
"TradeDate",
"=",
"''",
",",
"TradeTime",
"=",
"''",
",",
"BankSerial",
"=",
"''",
",",
"TradingDay",
"=",
"''",
",",
"PlateSerial",
"=",
"0",
",",
"LastFragment",
"=",
"LF_Yes",
",",
"SessionID",
"=",
"0",
",",
"InstallID",
"=",
"0",
",",
"UserID",
"=",
"''",
",",
"Digest",
"=",
"''",
",",
"CurrencyID",
"=",
"''",
",",
"DeviceID",
"=",
"''",
",",
"BrokerIDByBank",
"=",
"''",
",",
"OperNo",
"=",
"''",
",",
"RequestID",
"=",
"0",
",",
"TID",
"=",
"0",
")",
":",
"self",
".",
"TradeCode",
"=",
"''",
"#业务功能码, char[7]",
"self",
".",
"BankID",
"=",
"''",
"#银行代码, char[4]",
"self",
".",
"BankBranchID",
"=",
"'BankBrchID'",
"#银行分支机构代码, char[5]",
"self",
".",
"BrokerID",
"=",
"''",
"#期商代码, char[11]",
"self",
".",
"BrokerBranchID",
"=",
"'FutureBranchID'",
"#期商分支机构代码, char[31]",
"self",
".",
"TradeDate",
"=",
"''",
"#交易日期, char[9]",
"self",
".",
"TradeTime",
"=",
"''",
"#交易时间, char[9]",
"self",
".",
"BankSerial",
"=",
"''",
"#银行流水号, char[13]",
"self",
".",
"TradingDay",
"=",
"'TradeDate'",
"#交易系统日期 , char[9]",
"self",
".",
"PlateSerial",
"=",
"'Serial'",
"#银期平台消息流水号, int",
"self",
".",
"LastFragment",
"=",
"''",
"#最后分片标志, char",
"self",
".",
"SessionID",
"=",
"''",
"#会话号, int",
"self",
".",
"InstallID",
"=",
"''",
"#安装编号, int",
"self",
".",
"UserID",
"=",
"''",
"#用户标识, char[16]",
"self",
".",
"Digest",
"=",
"''",
"#摘要, char[36]",
"self",
".",
"CurrencyID",
"=",
"''",
"#币种代码, char[4]",
"self",
".",
"DeviceID",
"=",
"''",
"#渠道标志, char[3]",
"self",
".",
"BrokerIDByBank",
"=",
"'BankCodingForFuture'",
"#期货公司银行编码, char[33]",
"self",
".",
"OperNo",
"=",
"''",
"#交易柜员, char[17]",
"self",
".",
"RequestID",
"=",
"''",
"#请求编号, int",
"self",
".",
"TID",
"=",
"''"
] | https://github.com/lovelylain/pyctp/blob/fd304de4b50c4ddc31a4190b1caaeb5dec66bc5d/futures/ctp/ApiStruct.py#L5176-L5197 |
||||
beeware/ouroboros | a29123c6fab6a807caffbb7587cf548e0c370296 | ouroboros/tkinter/ttk.py | python | Treeview.bbox | (self, item, column=None) | return self._getints(self.tk.call(self._w, "bbox", item, column)) or '' | Returns the bounding box (relative to the treeview widget's
window) of the specified item in the form x y width height.
If column is specified, returns the bounding box of that cell.
If the item is not visible (i.e., if it is a descendant of a
closed item or is scrolled offscreen), returns an empty string. | Returns the bounding box (relative to the treeview widget's
window) of the specified item in the form x y width height. | [
"Returns",
"the",
"bounding",
"box",
"(",
"relative",
"to",
"the",
"treeview",
"widget",
"s",
"window",
")",
"of",
"the",
"specified",
"item",
"in",
"the",
"form",
"x",
"y",
"width",
"height",
"."
] | def bbox(self, item, column=None):
"""Returns the bounding box (relative to the treeview widget's
window) of the specified item in the form x y width height.
If column is specified, returns the bounding box of that cell.
If the item is not visible (i.e., if it is a descendant of a
closed item or is scrolled offscreen), returns an empty string."""
return self._getints(self.tk.call(self._w, "bbox", item, column)) or '' | [
"def",
"bbox",
"(",
"self",
",",
"item",
",",
"column",
"=",
"None",
")",
":",
"return",
"self",
".",
"_getints",
"(",
"self",
".",
"tk",
".",
"call",
"(",
"self",
".",
"_w",
",",
"\"bbox\"",
",",
"item",
",",
"column",
")",
")",
"or",
"''"
] | https://github.com/beeware/ouroboros/blob/a29123c6fab6a807caffbb7587cf548e0c370296/ouroboros/tkinter/ttk.py#L1178-L1185 |
|
bootphon/phonemizer | b79c7be58e5143cf3c7a1461ded6e873fff2bc0e | phonemizer/backend/festival/festival.py | python | FestivalBackend.executable | (cls) | return pathlib.Path(executable).resolve() | Returns the absolute path to the festival executable used as backend
The following precedence rule applies for executable lookup:
1. As specified by FestivalBackend.set_executable()
2. Or as specified by the environment variable
PHONEMIZER_FESTIVAL_EXECUTABLE
3. Or the default 'festival' binary found on the system with
`shutil.which('festival')`
Raises
------
RuntimeError if the festival executable cannot be found or if the
environment variable PHONEMIZER_FESTIVAL_EXECUTABLE is set to a
non-executable file | Returns the absolute path to the festival executable used as backend | [
"Returns",
"the",
"absolute",
"path",
"to",
"the",
"festival",
"executable",
"used",
"as",
"backend"
] | def executable(cls):
"""Returns the absolute path to the festival executable used as backend
The following precedence rule applies for executable lookup:
1. As specified by FestivalBackend.set_executable()
2. Or as specified by the environment variable
PHONEMIZER_FESTIVAL_EXECUTABLE
3. Or the default 'festival' binary found on the system with
`shutil.which('festival')`
Raises
------
RuntimeError if the festival executable cannot be found or if the
environment variable PHONEMIZER_FESTIVAL_EXECUTABLE is set to a
non-executable file
"""
if cls._FESTIVAL_EXECUTABLE:
return cls._FESTIVAL_EXECUTABLE
if 'PHONEMIZER_FESTIVAL_EXECUTABLE' in os.environ:
executable = pathlib.Path(os.environ[
'PHONEMIZER_FESTIVAL_EXECUTABLE'])
if not (
executable.is_file()
and os.access(executable, mode=os.X_OK)
):
raise RuntimeError(
f'PHONEMIZER_FESTIVAL_EXECUTABLE={executable} '
f'is not an executable file')
return executable.resolve()
executable = shutil.which('festival')
if not executable: # pragma: nocover
raise RuntimeError(
'failed to find festival executable')
return pathlib.Path(executable).resolve() | [
"def",
"executable",
"(",
"cls",
")",
":",
"if",
"cls",
".",
"_FESTIVAL_EXECUTABLE",
":",
"return",
"cls",
".",
"_FESTIVAL_EXECUTABLE",
"if",
"'PHONEMIZER_FESTIVAL_EXECUTABLE'",
"in",
"os",
".",
"environ",
":",
"executable",
"=",
"pathlib",
".",
"Path",
"(",
"os",
".",
"environ",
"[",
"'PHONEMIZER_FESTIVAL_EXECUTABLE'",
"]",
")",
"if",
"not",
"(",
"executable",
".",
"is_file",
"(",
")",
"and",
"os",
".",
"access",
"(",
"executable",
",",
"mode",
"=",
"os",
".",
"X_OK",
")",
")",
":",
"raise",
"RuntimeError",
"(",
"f'PHONEMIZER_FESTIVAL_EXECUTABLE={executable} '",
"f'is not an executable file'",
")",
"return",
"executable",
".",
"resolve",
"(",
")",
"executable",
"=",
"shutil",
".",
"which",
"(",
"'festival'",
")",
"if",
"not",
"executable",
":",
"# pragma: nocover",
"raise",
"RuntimeError",
"(",
"'failed to find festival executable'",
")",
"return",
"pathlib",
".",
"Path",
"(",
"executable",
")",
".",
"resolve",
"(",
")"
] | https://github.com/bootphon/phonemizer/blob/b79c7be58e5143cf3c7a1461ded6e873fff2bc0e/phonemizer/backend/festival/festival.py#L92-L129 |
|
sahana/eden | 1696fa50e90ce967df69f66b571af45356cc18da | modules/templates/UCCE/controllers.py | python | dc_TargetName.apply_method | (self, r, **attr) | return output | Entry point for REST API
Args:
r: the S3Request
attr: controller arguments | Entry point for REST API | [
"Entry",
"point",
"for",
"REST",
"API"
] | def apply_method(self, r, **attr):
"""
Entry point for REST API
Args:
r: the S3Request
attr: controller arguments
"""
if r.name == "target":
if r.http == "POST" and r.representation == "json":
# AJAX method
# Action the request
table = r.table
target_id = r.id
if not current.auth.s3_has_permission("update", table, record_id=target_id):
r.unauthorised()
# Update Name
name = r.post_vars.get("name")
if name:
db = current.db
s3db = current.s3db
# Update Target
db(table.id == target_id).update(name = name)
# Update Template
ttable = s3db.dc_template
template = db(ttable.id == r.record.template_id).select(ttable.id,
ttable.table_id,
limitby = (0, 1)
).first()
template.update_record(name = name)
# Update Dynamic Table
# (UCCE's mobile app uses s3_table.title for the Survey name...which works since 1 Template == 1 Target, beyond UCCE this won't be possible)
dtable = s3db.s3_table
db(dtable.id == template.table_id).update(title = name)
# Message
current.response.headers["Content-Type"] = "application/json"
output = current.xml.json_message(True, 200, current.T("Survey Renamed"))
else:
r.error(400, current.T("Invalid Parameters"))
else:
r.error(415, current.ERROR.BAD_FORMAT)
else:
r.error(404, current.ERROR.BAD_RESOURCE)
return output | [
"def",
"apply_method",
"(",
"self",
",",
"r",
",",
"*",
"*",
"attr",
")",
":",
"if",
"r",
".",
"name",
"==",
"\"target\"",
":",
"if",
"r",
".",
"http",
"==",
"\"POST\"",
"and",
"r",
".",
"representation",
"==",
"\"json\"",
":",
"# AJAX method",
"# Action the request",
"table",
"=",
"r",
".",
"table",
"target_id",
"=",
"r",
".",
"id",
"if",
"not",
"current",
".",
"auth",
".",
"s3_has_permission",
"(",
"\"update\"",
",",
"table",
",",
"record_id",
"=",
"target_id",
")",
":",
"r",
".",
"unauthorised",
"(",
")",
"# Update Name",
"name",
"=",
"r",
".",
"post_vars",
".",
"get",
"(",
"\"name\"",
")",
"if",
"name",
":",
"db",
"=",
"current",
".",
"db",
"s3db",
"=",
"current",
".",
"s3db",
"# Update Target",
"db",
"(",
"table",
".",
"id",
"==",
"target_id",
")",
".",
"update",
"(",
"name",
"=",
"name",
")",
"# Update Template",
"ttable",
"=",
"s3db",
".",
"dc_template",
"template",
"=",
"db",
"(",
"ttable",
".",
"id",
"==",
"r",
".",
"record",
".",
"template_id",
")",
".",
"select",
"(",
"ttable",
".",
"id",
",",
"ttable",
".",
"table_id",
",",
"limitby",
"=",
"(",
"0",
",",
"1",
")",
")",
".",
"first",
"(",
")",
"template",
".",
"update_record",
"(",
"name",
"=",
"name",
")",
"# Update Dynamic Table",
"# (UCCE's mobile app uses s3_table.title for the Survey name...which works since 1 Template == 1 Target, beyond UCCE this won't be possible)",
"dtable",
"=",
"s3db",
".",
"s3_table",
"db",
"(",
"dtable",
".",
"id",
"==",
"template",
".",
"table_id",
")",
".",
"update",
"(",
"title",
"=",
"name",
")",
"# Message",
"current",
".",
"response",
".",
"headers",
"[",
"\"Content-Type\"",
"]",
"=",
"\"application/json\"",
"output",
"=",
"current",
".",
"xml",
".",
"json_message",
"(",
"True",
",",
"200",
",",
"current",
".",
"T",
"(",
"\"Survey Renamed\"",
")",
")",
"else",
":",
"r",
".",
"error",
"(",
"400",
",",
"current",
".",
"T",
"(",
"\"Invalid Parameters\"",
")",
")",
"else",
":",
"r",
".",
"error",
"(",
"415",
",",
"current",
".",
"ERROR",
".",
"BAD_FORMAT",
")",
"else",
":",
"r",
".",
"error",
"(",
"404",
",",
"current",
".",
"ERROR",
".",
"BAD_RESOURCE",
")",
"return",
"output"
] | https://github.com/sahana/eden/blob/1696fa50e90ce967df69f66b571af45356cc18da/modules/templates/UCCE/controllers.py#L1198-L1247 |
|
pythad/selenium_extensions | b56639994f2dd2063361bee4677e8f341e83812a | selenium_extensions/helpers.py | python | element_has_gone_stale | (element) | Checks if element has gone stale
Args:
element (selenium.webdriver.remote.webelement.WebElement): Selenium webelement to check for.
Returns:
bool: True if element has gone stale, False otherwise.
Examples:
::
from selenium_extensions.helpers import element_has_gone_stale
if element_has_gone_stale(your_element):
pass # Do something
::
from selenium_extensions.helpers import wait_for_function_truth
from selenium_extensions.helpers import element_has_gone_stale
login_btn = driver.find_element_by_class_name('login_btn')
wait_for_function_truth(element_has_gone_stale, element) | Checks if element has gone stale | [
"Checks",
"if",
"element",
"has",
"gone",
"stale"
] | def element_has_gone_stale(element):
'''Checks if element has gone stale
Args:
element (selenium.webdriver.remote.webelement.WebElement): Selenium webelement to check for.
Returns:
bool: True if element has gone stale, False otherwise.
Examples:
::
from selenium_extensions.helpers import element_has_gone_stale
if element_has_gone_stale(your_element):
pass # Do something
::
from selenium_extensions.helpers import wait_for_function_truth
from selenium_extensions.helpers import element_has_gone_stale
login_btn = driver.find_element_by_class_name('login_btn')
wait_for_function_truth(element_has_gone_stale, element)
'''
try:
# Poll the object with an arbitrary call
element.find_elements_by_id('non-existing-id')
return False
except StaleElementReferenceException:
return True | [
"def",
"element_has_gone_stale",
"(",
"element",
")",
":",
"try",
":",
"# Poll the object with an arbitrary call",
"element",
".",
"find_elements_by_id",
"(",
"'non-existing-id'",
")",
"return",
"False",
"except",
"StaleElementReferenceException",
":",
"return",
"True"
] | https://github.com/pythad/selenium_extensions/blob/b56639994f2dd2063361bee4677e8f341e83812a/selenium_extensions/helpers.py#L27-L59 |
||
holzschu/Carnets | 44effb10ddfc6aa5c8b0687582a724ba82c6b547 | Library/lib/python3.7/site-packages/pip/_vendor/pyparsing.py | python | withClass | (classname, namespace='') | return withAttribute(**{classattr : classname}) | Simplified version of :class:`withAttribute` when
matching on a div class - made difficult because ``class`` is
a reserved word in Python.
Example::
html = '''
<div>
Some text
<div class="grid">1 4 0 1 0</div>
<div class="graph">1,3 2,3 1,1</div>
<div>this <div> has no class</div>
</div>
'''
div,div_end = makeHTMLTags("div")
div_grid = div().setParseAction(withClass("grid"))
grid_expr = div_grid + SkipTo(div | div_end)("body")
for grid_header in grid_expr.searchString(html):
print(grid_header.body)
div_any_type = div().setParseAction(withClass(withAttribute.ANY_VALUE))
div_expr = div_any_type + SkipTo(div | div_end)("body")
for div_header in div_expr.searchString(html):
print(div_header.body)
prints::
1 4 0 1 0
1 4 0 1 0
1,3 2,3 1,1 | Simplified version of :class:`withAttribute` when
matching on a div class - made difficult because ``class`` is
a reserved word in Python. | [
"Simplified",
"version",
"of",
":",
"class",
":",
"withAttribute",
"when",
"matching",
"on",
"a",
"div",
"class",
"-",
"made",
"difficult",
"because",
"class",
"is",
"a",
"reserved",
"word",
"in",
"Python",
"."
] | def withClass(classname, namespace=''):
"""Simplified version of :class:`withAttribute` when
matching on a div class - made difficult because ``class`` is
a reserved word in Python.
Example::
html = '''
<div>
Some text
<div class="grid">1 4 0 1 0</div>
<div class="graph">1,3 2,3 1,1</div>
<div>this <div> has no class</div>
</div>
'''
div,div_end = makeHTMLTags("div")
div_grid = div().setParseAction(withClass("grid"))
grid_expr = div_grid + SkipTo(div | div_end)("body")
for grid_header in grid_expr.searchString(html):
print(grid_header.body)
div_any_type = div().setParseAction(withClass(withAttribute.ANY_VALUE))
div_expr = div_any_type + SkipTo(div | div_end)("body")
for div_header in div_expr.searchString(html):
print(div_header.body)
prints::
1 4 0 1 0
1 4 0 1 0
1,3 2,3 1,1
"""
classattr = "%s:class" % namespace if namespace else "class"
return withAttribute(**{classattr : classname}) | [
"def",
"withClass",
"(",
"classname",
",",
"namespace",
"=",
"''",
")",
":",
"classattr",
"=",
"\"%s:class\"",
"%",
"namespace",
"if",
"namespace",
"else",
"\"class\"",
"return",
"withAttribute",
"(",
"*",
"*",
"{",
"classattr",
":",
"classname",
"}",
")"
] | https://github.com/holzschu/Carnets/blob/44effb10ddfc6aa5c8b0687582a724ba82c6b547/Library/lib/python3.7/site-packages/pip/_vendor/pyparsing.py#L5531-L5567 |
|
nltk/nltk_contrib | c9da2c29777ca9df650740145f1f4a375ccac961 | nltk_contrib/mit/six863/semantics/featurelite.py | python | Variable.name | (self) | return self._name | @return: This variable's name.
@rtype: C{string} | [] | def name(self):
"""
@return: This variable's name.
@rtype: C{string}
"""
return self._name | [
"def",
"name",
"(",
"self",
")",
":",
"return",
"self",
".",
"_name"
] | https://github.com/nltk/nltk_contrib/blob/c9da2c29777ca9df650740145f1f4a375ccac961/nltk_contrib/mit/six863/semantics/featurelite.py#L144-L149 |
||
edisonlz/fastor | 342078a18363ac41d3c6b1ab29dbdd44fdb0b7b3 | base/site-packages/tencentcloud/mariadb/v20170312/models.py | python | ModifyDBParametersRequest.__init__ | (self) | :param InstanceId: 实例 ID,形如:tdsql-ow728lmc。
:type InstanceId: str
:param Params: 参数列表,每一个元素是Param和Value的组合
:type Params: list of DBParamValue | :param InstanceId: 实例 ID,形如:tdsql-ow728lmc。
:type InstanceId: str
:param Params: 参数列表,每一个元素是Param和Value的组合
:type Params: list of DBParamValue | [
":",
"param",
"InstanceId",
":",
"实例",
"ID,形如:tdsql",
"-",
"ow728lmc。",
":",
"type",
"InstanceId",
":",
"str",
":",
"param",
"Params",
":",
"参数列表,每一个元素是Param和Value的组合",
":",
"type",
"Params",
":",
"list",
"of",
"DBParamValue"
] | def __init__(self):
"""
:param InstanceId: 实例 ID,形如:tdsql-ow728lmc。
:type InstanceId: str
:param Params: 参数列表,每一个元素是Param和Value的组合
:type Params: list of DBParamValue
"""
self.InstanceId = None
self.Params = None | [
"def",
"__init__",
"(",
"self",
")",
":",
"self",
".",
"InstanceId",
"=",
"None",
"self",
".",
"Params",
"=",
"None"
] | https://github.com/edisonlz/fastor/blob/342078a18363ac41d3c6b1ab29dbdd44fdb0b7b3/base/site-packages/tencentcloud/mariadb/v20170312/models.py#L1999-L2007 |
||
Yelp/Tron | d60b015163418bf66f638e4c12337289ad8c040a | tron/serialize/runstate/statemanager.py | python | PersistentStateManager.disabled | (self) | Temporarily disable the state manager. | Temporarily disable the state manager. | [
"Temporarily",
"disable",
"the",
"state",
"manager",
"."
] | def disabled(self):
"""Temporarily disable the state manager."""
self.enabled, prev_enabled = False, self.enabled
try:
yield
finally:
self.enabled = prev_enabled | [
"def",
"disabled",
"(",
"self",
")",
":",
"self",
".",
"enabled",
",",
"prev_enabled",
"=",
"False",
",",
"self",
".",
"enabled",
"try",
":",
"yield",
"finally",
":",
"self",
".",
"enabled",
"=",
"prev_enabled"
] | https://github.com/Yelp/Tron/blob/d60b015163418bf66f638e4c12337289ad8c040a/tron/serialize/runstate/statemanager.py#L218-L224 |
||
pypa/pipenv | b21baade71a86ab3ee1429f71fbc14d4f95fb75d | pipenv/vendor/requests/adapters.py | python | HTTPAdapter.cert_verify | (self, conn, url, verify, cert) | Verify a SSL certificate. This method should not be called from user
code, and is only exposed for use when subclassing the
:class:`HTTPAdapter <requests.adapters.HTTPAdapter>`.
:param conn: The urllib3 connection object associated with the cert.
:param url: The requested URL.
:param verify: Either a boolean, in which case it controls whether we verify
the server's TLS certificate, or a string, in which case it must be a path
to a CA bundle to use
:param cert: The SSL certificate to verify. | Verify a SSL certificate. This method should not be called from user
code, and is only exposed for use when subclassing the
:class:`HTTPAdapter <requests.adapters.HTTPAdapter>`. | [
"Verify",
"a",
"SSL",
"certificate",
".",
"This",
"method",
"should",
"not",
"be",
"called",
"from",
"user",
"code",
"and",
"is",
"only",
"exposed",
"for",
"use",
"when",
"subclassing",
"the",
":",
"class",
":",
"HTTPAdapter",
"<requests",
".",
"adapters",
".",
"HTTPAdapter",
">",
"."
] | def cert_verify(self, conn, url, verify, cert):
"""Verify a SSL certificate. This method should not be called from user
code, and is only exposed for use when subclassing the
:class:`HTTPAdapter <requests.adapters.HTTPAdapter>`.
:param conn: The urllib3 connection object associated with the cert.
:param url: The requested URL.
:param verify: Either a boolean, in which case it controls whether we verify
the server's TLS certificate, or a string, in which case it must be a path
to a CA bundle to use
:param cert: The SSL certificate to verify.
"""
if url.lower().startswith('https') and verify:
cert_loc = None
# Allow self-specified cert location.
if verify is not True:
cert_loc = verify
if not cert_loc:
cert_loc = extract_zipped_paths(DEFAULT_CA_BUNDLE_PATH)
if not cert_loc or not os.path.exists(cert_loc):
raise IOError("Could not find a suitable TLS CA certificate bundle, "
"invalid path: {}".format(cert_loc))
conn.cert_reqs = 'CERT_REQUIRED'
if not os.path.isdir(cert_loc):
conn.ca_certs = cert_loc
else:
conn.ca_cert_dir = cert_loc
else:
conn.cert_reqs = 'CERT_NONE'
conn.ca_certs = None
conn.ca_cert_dir = None
if cert:
if not isinstance(cert, basestring):
conn.cert_file = cert[0]
conn.key_file = cert[1]
else:
conn.cert_file = cert
conn.key_file = None
if conn.cert_file and not os.path.exists(conn.cert_file):
raise IOError("Could not find the TLS certificate file, "
"invalid path: {}".format(conn.cert_file))
if conn.key_file and not os.path.exists(conn.key_file):
raise IOError("Could not find the TLS key file, "
"invalid path: {}".format(conn.key_file)) | [
"def",
"cert_verify",
"(",
"self",
",",
"conn",
",",
"url",
",",
"verify",
",",
"cert",
")",
":",
"if",
"url",
".",
"lower",
"(",
")",
".",
"startswith",
"(",
"'https'",
")",
"and",
"verify",
":",
"cert_loc",
"=",
"None",
"# Allow self-specified cert location.",
"if",
"verify",
"is",
"not",
"True",
":",
"cert_loc",
"=",
"verify",
"if",
"not",
"cert_loc",
":",
"cert_loc",
"=",
"extract_zipped_paths",
"(",
"DEFAULT_CA_BUNDLE_PATH",
")",
"if",
"not",
"cert_loc",
"or",
"not",
"os",
".",
"path",
".",
"exists",
"(",
"cert_loc",
")",
":",
"raise",
"IOError",
"(",
"\"Could not find a suitable TLS CA certificate bundle, \"",
"\"invalid path: {}\"",
".",
"format",
"(",
"cert_loc",
")",
")",
"conn",
".",
"cert_reqs",
"=",
"'CERT_REQUIRED'",
"if",
"not",
"os",
".",
"path",
".",
"isdir",
"(",
"cert_loc",
")",
":",
"conn",
".",
"ca_certs",
"=",
"cert_loc",
"else",
":",
"conn",
".",
"ca_cert_dir",
"=",
"cert_loc",
"else",
":",
"conn",
".",
"cert_reqs",
"=",
"'CERT_NONE'",
"conn",
".",
"ca_certs",
"=",
"None",
"conn",
".",
"ca_cert_dir",
"=",
"None",
"if",
"cert",
":",
"if",
"not",
"isinstance",
"(",
"cert",
",",
"basestring",
")",
":",
"conn",
".",
"cert_file",
"=",
"cert",
"[",
"0",
"]",
"conn",
".",
"key_file",
"=",
"cert",
"[",
"1",
"]",
"else",
":",
"conn",
".",
"cert_file",
"=",
"cert",
"conn",
".",
"key_file",
"=",
"None",
"if",
"conn",
".",
"cert_file",
"and",
"not",
"os",
".",
"path",
".",
"exists",
"(",
"conn",
".",
"cert_file",
")",
":",
"raise",
"IOError",
"(",
"\"Could not find the TLS certificate file, \"",
"\"invalid path: {}\"",
".",
"format",
"(",
"conn",
".",
"cert_file",
")",
")",
"if",
"conn",
".",
"key_file",
"and",
"not",
"os",
".",
"path",
".",
"exists",
"(",
"conn",
".",
"key_file",
")",
":",
"raise",
"IOError",
"(",
"\"Could not find the TLS key file, \"",
"\"invalid path: {}\"",
".",
"format",
"(",
"conn",
".",
"key_file",
")",
")"
] | https://github.com/pypa/pipenv/blob/b21baade71a86ab3ee1429f71fbc14d4f95fb75d/pipenv/vendor/requests/adapters.py#L203-L253 |
||
tensorflow/models | 6b8bb0cbeb3e10415c7a87448f08adc3c484c1d3 | research/object_detection/models/keras_models/resnet_v1.py | python | resnet_v1_101 | (batchnorm_training,
batchnorm_scale=True,
default_batchnorm_momentum=0.997,
default_batchnorm_epsilon=1e-5,
weight_decay=0.0001,
conv_hyperparams=None,
min_depth=8,
depth_multiplier=1,
**kwargs) | return tf.keras.applications.resnet.ResNet101(
layers=layers_override, **kwargs) | Instantiates the Resnet50 architecture, modified for object detection.
Args:
batchnorm_training: Bool. Assigned to Batch norm layer `training` param
when constructing `freezable_batch_norm.FreezableBatchNorm` layers.
batchnorm_scale: If True, uses an explicit `gamma` multiplier to scale
the activations in the batch normalization layer.
default_batchnorm_momentum: Float. When 'conv_hyperparams' is None,
batch norm layers will be constructed using this value as the momentum.
default_batchnorm_epsilon: Float. When 'conv_hyperparams' is None,
batch norm layers will be constructed using this value as the epsilon.
weight_decay: The weight decay to use for regularizing the model.
conv_hyperparams: A `hyperparams_builder.KerasLayerHyperparams` object
containing hyperparameters for convolution ops. Optionally set to `None`
to use default resnet_v1 layer builders.
min_depth: Minimum number of filters in the convolutional layers.
depth_multiplier: The depth multiplier to modify the number of filters
in the convolutional layers.
**kwargs: Keyword arguments forwarded directly to the
`tf.keras.applications.Mobilenet` method that constructs the Keras
model.
Returns:
A Keras ResnetV1-101 model instance. | Instantiates the Resnet50 architecture, modified for object detection. | [
"Instantiates",
"the",
"Resnet50",
"architecture",
"modified",
"for",
"object",
"detection",
"."
] | def resnet_v1_101(batchnorm_training,
batchnorm_scale=True,
default_batchnorm_momentum=0.997,
default_batchnorm_epsilon=1e-5,
weight_decay=0.0001,
conv_hyperparams=None,
min_depth=8,
depth_multiplier=1,
**kwargs):
"""Instantiates the Resnet50 architecture, modified for object detection.
Args:
batchnorm_training: Bool. Assigned to Batch norm layer `training` param
when constructing `freezable_batch_norm.FreezableBatchNorm` layers.
batchnorm_scale: If True, uses an explicit `gamma` multiplier to scale
the activations in the batch normalization layer.
default_batchnorm_momentum: Float. When 'conv_hyperparams' is None,
batch norm layers will be constructed using this value as the momentum.
default_batchnorm_epsilon: Float. When 'conv_hyperparams' is None,
batch norm layers will be constructed using this value as the epsilon.
weight_decay: The weight decay to use for regularizing the model.
conv_hyperparams: A `hyperparams_builder.KerasLayerHyperparams` object
containing hyperparameters for convolution ops. Optionally set to `None`
to use default resnet_v1 layer builders.
min_depth: Minimum number of filters in the convolutional layers.
depth_multiplier: The depth multiplier to modify the number of filters
in the convolutional layers.
**kwargs: Keyword arguments forwarded directly to the
`tf.keras.applications.Mobilenet` method that constructs the Keras
model.
Returns:
A Keras ResnetV1-101 model instance.
"""
layers_override = _LayersOverride(
batchnorm_training,
batchnorm_scale=batchnorm_scale,
default_batchnorm_momentum=default_batchnorm_momentum,
default_batchnorm_epsilon=default_batchnorm_epsilon,
conv_hyperparams=conv_hyperparams,
weight_decay=weight_decay,
min_depth=min_depth,
depth_multiplier=depth_multiplier)
return tf.keras.applications.resnet.ResNet101(
layers=layers_override, **kwargs) | [
"def",
"resnet_v1_101",
"(",
"batchnorm_training",
",",
"batchnorm_scale",
"=",
"True",
",",
"default_batchnorm_momentum",
"=",
"0.997",
",",
"default_batchnorm_epsilon",
"=",
"1e-5",
",",
"weight_decay",
"=",
"0.0001",
",",
"conv_hyperparams",
"=",
"None",
",",
"min_depth",
"=",
"8",
",",
"depth_multiplier",
"=",
"1",
",",
"*",
"*",
"kwargs",
")",
":",
"layers_override",
"=",
"_LayersOverride",
"(",
"batchnorm_training",
",",
"batchnorm_scale",
"=",
"batchnorm_scale",
",",
"default_batchnorm_momentum",
"=",
"default_batchnorm_momentum",
",",
"default_batchnorm_epsilon",
"=",
"default_batchnorm_epsilon",
",",
"conv_hyperparams",
"=",
"conv_hyperparams",
",",
"weight_decay",
"=",
"weight_decay",
",",
"min_depth",
"=",
"min_depth",
",",
"depth_multiplier",
"=",
"depth_multiplier",
")",
"return",
"tf",
".",
"keras",
".",
"applications",
".",
"resnet",
".",
"ResNet101",
"(",
"layers",
"=",
"layers_override",
",",
"*",
"*",
"kwargs",
")"
] | https://github.com/tensorflow/models/blob/6b8bb0cbeb3e10415c7a87448f08adc3c484c1d3/research/object_detection/models/keras_models/resnet_v1.py#L307-L351 |
|
OpenNMT/OpenNMT-py | 4815f07fcd482af9a1fe1d3b620d144197178bc5 | onmt/inputters/inputter.py | python | make_tgt | (data, vocab) | return alignment | [] | def make_tgt(data, vocab):
tgt_size = max([t.size(0) for t in data])
alignment = torch.zeros(tgt_size, len(data)).long()
for i, sent in enumerate(data):
alignment[:sent.size(0), i] = sent
return alignment | [
"def",
"make_tgt",
"(",
"data",
",",
"vocab",
")",
":",
"tgt_size",
"=",
"max",
"(",
"[",
"t",
".",
"size",
"(",
"0",
")",
"for",
"t",
"in",
"data",
"]",
")",
"alignment",
"=",
"torch",
".",
"zeros",
"(",
"tgt_size",
",",
"len",
"(",
"data",
")",
")",
".",
"long",
"(",
")",
"for",
"i",
",",
"sent",
"in",
"enumerate",
"(",
"data",
")",
":",
"alignment",
"[",
":",
"sent",
".",
"size",
"(",
"0",
")",
",",
"i",
"]",
"=",
"sent",
"return",
"alignment"
] | https://github.com/OpenNMT/OpenNMT-py/blob/4815f07fcd482af9a1fe1d3b620d144197178bc5/onmt/inputters/inputter.py#L45-L50 |