nwo
stringlengths 5
91
| sha
stringlengths 40
40
| path
stringlengths 5
174
| language
stringclasses 1
value | identifier
stringlengths 1
120
| parameters
stringlengths 0
3.15k
| argument_list
stringclasses 1
value | return_statement
stringlengths 0
24.1k
| docstring
stringlengths 0
27.3k
| docstring_summary
stringlengths 0
13.8k
| docstring_tokens
sequence | function
stringlengths 22
139k
| function_tokens
sequence | url
stringlengths 87
283
|
---|---|---|---|---|---|---|---|---|---|---|---|---|---|
graphcore/examples | 46d2b7687b829778369fc6328170a7b14761e5c6 | applications/tensorflow/bert/bert_data/glue.py | python | _truncate_seq_pair | (tokens_a, tokens_b, max_length) | Truncates a sequence pair in place to the maximum length. | Truncates a sequence pair in place to the maximum length. | [
"Truncates",
"a",
"sequence",
"pair",
"in",
"place",
"to",
"the",
"maximum",
"length",
"."
] | def _truncate_seq_pair(tokens_a, tokens_b, max_length):
"""Truncates a sequence pair in place to the maximum length."""
# This is a simple heuristic which will always truncate the longer sequence
# one token at a time. This makes more sense than truncating an equal percent
# of tokens from each, since if one sequence is very short then each token
# that's truncated likely contains more information than a longer sequence.
while True:
total_length = len(tokens_a) + len(tokens_b)
if total_length <= max_length:
break
if len(tokens_a) > len(tokens_b):
tokens_a.pop()
else:
tokens_b.pop() | [
"def",
"_truncate_seq_pair",
"(",
"tokens_a",
",",
"tokens_b",
",",
"max_length",
")",
":",
"# This is a simple heuristic which will always truncate the longer sequence",
"# one token at a time. This makes more sense than truncating an equal percent",
"# of tokens from each, since if one sequence is very short then each token",
"# that's truncated likely contains more information than a longer sequence.",
"while",
"True",
":",
"total_length",
"=",
"len",
"(",
"tokens_a",
")",
"+",
"len",
"(",
"tokens_b",
")",
"if",
"total_length",
"<=",
"max_length",
":",
"break",
"if",
"len",
"(",
"tokens_a",
")",
">",
"len",
"(",
"tokens_b",
")",
":",
"tokens_a",
".",
"pop",
"(",
")",
"else",
":",
"tokens_b",
".",
"pop",
"(",
")"
] | https://github.com/graphcore/examples/blob/46d2b7687b829778369fc6328170a7b14761e5c6/applications/tensorflow/bert/bert_data/glue.py#L839-L853 |
||
shakedzy/dython | 7cf1581f3ca025add53150ebcba817910953fdcc | dython/nominal.py | python | cramers_v | (x,
y,
bias_correction=True,
nan_strategy=_REPLACE,
nan_replace_value=_DEFAULT_REPLACE_VALUE) | Calculates Cramer's V statistic for categorical-categorical association.
This is a symmetric coefficient: V(x,y) = V(y,x)
Original function taken from: https://stackoverflow.com/a/46498792/5863503
Wikipedia: https://en.wikipedia.org/wiki/Cram%C3%A9r%27s_V
Parameters:
-----------
x : list / NumPy ndarray / Pandas Series
A sequence of categorical measurements
y : list / NumPy ndarray / Pandas Series
A sequence of categorical measurements
bias_correction : Boolean, default = True
Use bias correction from Bergsma and Wicher,
Journal of the Korean Statistical Society 42 (2013): 323-328.
nan_strategy : string, default = 'replace'
How to handle missing values: can be either 'drop' to remove samples
with missing values, or 'replace' to replace all missing values with
the nan_replace_value. Missing values are None and np.nan.
nan_replace_value : any, default = 0.0
The value used to replace missing values with. Only applicable when
nan_strategy is set to 'replace'.
Returns:
--------
float in the range of [0,1] | Calculates Cramer's V statistic for categorical-categorical association.
This is a symmetric coefficient: V(x,y) = V(y,x) | [
"Calculates",
"Cramer",
"s",
"V",
"statistic",
"for",
"categorical",
"-",
"categorical",
"association",
".",
"This",
"is",
"a",
"symmetric",
"coefficient",
":",
"V",
"(",
"x",
"y",
")",
"=",
"V",
"(",
"y",
"x",
")"
] | def cramers_v(x,
y,
bias_correction=True,
nan_strategy=_REPLACE,
nan_replace_value=_DEFAULT_REPLACE_VALUE):
"""
Calculates Cramer's V statistic for categorical-categorical association.
This is a symmetric coefficient: V(x,y) = V(y,x)
Original function taken from: https://stackoverflow.com/a/46498792/5863503
Wikipedia: https://en.wikipedia.org/wiki/Cram%C3%A9r%27s_V
Parameters:
-----------
x : list / NumPy ndarray / Pandas Series
A sequence of categorical measurements
y : list / NumPy ndarray / Pandas Series
A sequence of categorical measurements
bias_correction : Boolean, default = True
Use bias correction from Bergsma and Wicher,
Journal of the Korean Statistical Society 42 (2013): 323-328.
nan_strategy : string, default = 'replace'
How to handle missing values: can be either 'drop' to remove samples
with missing values, or 'replace' to replace all missing values with
the nan_replace_value. Missing values are None and np.nan.
nan_replace_value : any, default = 0.0
The value used to replace missing values with. Only applicable when
nan_strategy is set to 'replace'.
Returns:
--------
float in the range of [0,1]
"""
if nan_strategy == _REPLACE:
x, y = replace_nan_with_value(x, y, nan_replace_value)
elif nan_strategy == _DROP:
x, y = remove_incomplete_samples(x, y)
confusion_matrix = pd.crosstab(x, y)
chi2 = ss.chi2_contingency(confusion_matrix)[0]
n = confusion_matrix.sum().sum()
phi2 = chi2 / n
r, k = confusion_matrix.shape
if bias_correction:
phi2corr = max(0, phi2 - ((k - 1) * (r - 1)) / (n - 1))
rcorr = r - ((r - 1) ** 2) / (n - 1)
kcorr = k - ((k - 1) ** 2) / (n - 1)
if min((kcorr - 1), (rcorr - 1)) == 0:
warnings.warn(
"Unable to calculate Cramer's V using bias correction. Consider using bias_correction=False",
RuntimeWarning)
return np.nan
else:
return np.sqrt(phi2corr / min((kcorr - 1), (rcorr - 1)))
else:
return np.sqrt(phi2 / min(k - 1, r - 1)) | [
"def",
"cramers_v",
"(",
"x",
",",
"y",
",",
"bias_correction",
"=",
"True",
",",
"nan_strategy",
"=",
"_REPLACE",
",",
"nan_replace_value",
"=",
"_DEFAULT_REPLACE_VALUE",
")",
":",
"if",
"nan_strategy",
"==",
"_REPLACE",
":",
"x",
",",
"y",
"=",
"replace_nan_with_value",
"(",
"x",
",",
"y",
",",
"nan_replace_value",
")",
"elif",
"nan_strategy",
"==",
"_DROP",
":",
"x",
",",
"y",
"=",
"remove_incomplete_samples",
"(",
"x",
",",
"y",
")",
"confusion_matrix",
"=",
"pd",
".",
"crosstab",
"(",
"x",
",",
"y",
")",
"chi2",
"=",
"ss",
".",
"chi2_contingency",
"(",
"confusion_matrix",
")",
"[",
"0",
"]",
"n",
"=",
"confusion_matrix",
".",
"sum",
"(",
")",
".",
"sum",
"(",
")",
"phi2",
"=",
"chi2",
"/",
"n",
"r",
",",
"k",
"=",
"confusion_matrix",
".",
"shape",
"if",
"bias_correction",
":",
"phi2corr",
"=",
"max",
"(",
"0",
",",
"phi2",
"-",
"(",
"(",
"k",
"-",
"1",
")",
"*",
"(",
"r",
"-",
"1",
")",
")",
"/",
"(",
"n",
"-",
"1",
")",
")",
"rcorr",
"=",
"r",
"-",
"(",
"(",
"r",
"-",
"1",
")",
"**",
"2",
")",
"/",
"(",
"n",
"-",
"1",
")",
"kcorr",
"=",
"k",
"-",
"(",
"(",
"k",
"-",
"1",
")",
"**",
"2",
")",
"/",
"(",
"n",
"-",
"1",
")",
"if",
"min",
"(",
"(",
"kcorr",
"-",
"1",
")",
",",
"(",
"rcorr",
"-",
"1",
")",
")",
"==",
"0",
":",
"warnings",
".",
"warn",
"(",
"\"Unable to calculate Cramer's V using bias correction. Consider using bias_correction=False\"",
",",
"RuntimeWarning",
")",
"return",
"np",
".",
"nan",
"else",
":",
"return",
"np",
".",
"sqrt",
"(",
"phi2corr",
"/",
"min",
"(",
"(",
"kcorr",
"-",
"1",
")",
",",
"(",
"rcorr",
"-",
"1",
")",
")",
")",
"else",
":",
"return",
"np",
".",
"sqrt",
"(",
"phi2",
"/",
"min",
"(",
"k",
"-",
"1",
",",
"r",
"-",
"1",
")",
")"
] | https://github.com/shakedzy/dython/blob/7cf1581f3ca025add53150ebcba817910953fdcc/dython/nominal.py#L90-L144 |
||
naver/claf | 6f45b1ecca0aa2b3bcf99e79c9cb2c915ba0bf3b | claf/tokens/elmo.py | python | ElmoLstm._lstm_forward | (
self,
inputs: PackedSequence,
initial_state: Optional[Tuple[torch.Tensor, torch.Tensor]] = None,
) | return stacked_sequence_outputs, final_state_tuple | Parameters
----------
inputs : ``PackedSequence``, required.
A batch first ``PackedSequence`` to run the stacked LSTM over.
initial_state : ``Tuple[torch.Tensor, torch.Tensor]``, optional, (default = None)
A tuple (state, memory) representing the initial hidden state and memory
of the LSTM, with shape (num_layers, batch_size, 2 * hidden_size) and
(num_layers, batch_size, 2 * cell_size) respectively.
Returns
-------
output_sequence : ``torch.FloatTensor``
The encoded sequence of shape (num_layers, batch_size, sequence_length, hidden_size)
final_states: ``Tuple[torch.FloatTensor, torch.FloatTensor]``
The per-layer final (state, memory) states of the LSTM, with shape
(num_layers, batch_size, 2 * hidden_size) and (num_layers, batch_size, 2 * cell_size)
respectively. The last dimension is duplicated because it contains the state/memory
for both the forward and backward layers. | Parameters
----------
inputs : ``PackedSequence``, required.
A batch first ``PackedSequence`` to run the stacked LSTM over.
initial_state : ``Tuple[torch.Tensor, torch.Tensor]``, optional, (default = None)
A tuple (state, memory) representing the initial hidden state and memory
of the LSTM, with shape (num_layers, batch_size, 2 * hidden_size) and
(num_layers, batch_size, 2 * cell_size) respectively.
Returns
-------
output_sequence : ``torch.FloatTensor``
The encoded sequence of shape (num_layers, batch_size, sequence_length, hidden_size)
final_states: ``Tuple[torch.FloatTensor, torch.FloatTensor]``
The per-layer final (state, memory) states of the LSTM, with shape
(num_layers, batch_size, 2 * hidden_size) and (num_layers, batch_size, 2 * cell_size)
respectively. The last dimension is duplicated because it contains the state/memory
for both the forward and backward layers. | [
"Parameters",
"----------",
"inputs",
":",
"PackedSequence",
"required",
".",
"A",
"batch",
"first",
"PackedSequence",
"to",
"run",
"the",
"stacked",
"LSTM",
"over",
".",
"initial_state",
":",
"Tuple",
"[",
"torch",
".",
"Tensor",
"torch",
".",
"Tensor",
"]",
"optional",
"(",
"default",
"=",
"None",
")",
"A",
"tuple",
"(",
"state",
"memory",
")",
"representing",
"the",
"initial",
"hidden",
"state",
"and",
"memory",
"of",
"the",
"LSTM",
"with",
"shape",
"(",
"num_layers",
"batch_size",
"2",
"*",
"hidden_size",
")",
"and",
"(",
"num_layers",
"batch_size",
"2",
"*",
"cell_size",
")",
"respectively",
".",
"Returns",
"-------",
"output_sequence",
":",
"torch",
".",
"FloatTensor",
"The",
"encoded",
"sequence",
"of",
"shape",
"(",
"num_layers",
"batch_size",
"sequence_length",
"hidden_size",
")",
"final_states",
":",
"Tuple",
"[",
"torch",
".",
"FloatTensor",
"torch",
".",
"FloatTensor",
"]",
"The",
"per",
"-",
"layer",
"final",
"(",
"state",
"memory",
")",
"states",
"of",
"the",
"LSTM",
"with",
"shape",
"(",
"num_layers",
"batch_size",
"2",
"*",
"hidden_size",
")",
"and",
"(",
"num_layers",
"batch_size",
"2",
"*",
"cell_size",
")",
"respectively",
".",
"The",
"last",
"dimension",
"is",
"duplicated",
"because",
"it",
"contains",
"the",
"state",
"/",
"memory",
"for",
"both",
"the",
"forward",
"and",
"backward",
"layers",
"."
] | def _lstm_forward(
self,
inputs: PackedSequence,
initial_state: Optional[Tuple[torch.Tensor, torch.Tensor]] = None,
) -> Tuple[torch.Tensor, Tuple[torch.Tensor, torch.Tensor]]:
"""
Parameters
----------
inputs : ``PackedSequence``, required.
A batch first ``PackedSequence`` to run the stacked LSTM over.
initial_state : ``Tuple[torch.Tensor, torch.Tensor]``, optional, (default = None)
A tuple (state, memory) representing the initial hidden state and memory
of the LSTM, with shape (num_layers, batch_size, 2 * hidden_size) and
(num_layers, batch_size, 2 * cell_size) respectively.
Returns
-------
output_sequence : ``torch.FloatTensor``
The encoded sequence of shape (num_layers, batch_size, sequence_length, hidden_size)
final_states: ``Tuple[torch.FloatTensor, torch.FloatTensor]``
The per-layer final (state, memory) states of the LSTM, with shape
(num_layers, batch_size, 2 * hidden_size) and (num_layers, batch_size, 2 * cell_size)
respectively. The last dimension is duplicated because it contains the state/memory
for both the forward and backward layers.
"""
if initial_state is None:
hidden_states: List[Optional[Tuple[torch.Tensor, torch.Tensor]]] = [None] * len(
self.forward_layers
)
elif initial_state[0].size()[0] != len(self.forward_layers):
raise ValueError(
"Initial states were passed to forward() but the number of "
"initial states does not match the number of layers."
)
else:
hidden_states = list(zip(initial_state[0].split(1, 0), initial_state[1].split(1, 0)))
inputs, batch_lengths = pad_packed_sequence(inputs, batch_first=True)
forward_output_sequence = inputs
backward_output_sequence = inputs
final_states = []
sequence_outputs = []
for layer_index, state in enumerate(hidden_states):
forward_layer = getattr(self, "forward_layer_{}".format(layer_index))
backward_layer = getattr(self, "backward_layer_{}".format(layer_index))
forward_cache = forward_output_sequence
backward_cache = backward_output_sequence
if state is not None:
forward_hidden_state, backward_hidden_state = state[0].split(self.hidden_size, 2)
forward_memory_state, backward_memory_state = state[1].split(self.cell_size, 2)
forward_state = (forward_hidden_state, forward_memory_state)
backward_state = (backward_hidden_state, backward_memory_state)
else:
forward_state = None
backward_state = None
forward_output_sequence, forward_state = forward_layer(
forward_output_sequence, batch_lengths, forward_state
)
backward_output_sequence, backward_state = backward_layer(
backward_output_sequence, batch_lengths, backward_state
)
# Skip connections, just adding the input to the output.
if layer_index != 0:
forward_output_sequence += forward_cache
backward_output_sequence += backward_cache
sequence_outputs.append(
torch.cat([forward_output_sequence, backward_output_sequence], -1)
)
# Append the state tuples in a list, so that we can return
# the final states for all the layers.
final_states.append(
(
torch.cat([forward_state[0], backward_state[0]], -1),
torch.cat([forward_state[1], backward_state[1]], -1),
)
)
stacked_sequence_outputs: torch.FloatTensor = torch.stack(sequence_outputs)
# Stack the hidden state and memory for each layer into 2 tensors of shape
# (num_layers, batch_size, hidden_size) and (num_layers, batch_size, cell_size)
# respectively.
final_hidden_states, final_memory_states = zip(*final_states)
final_state_tuple: Tuple[torch.FloatTensor, torch.FloatTensor] = (
torch.cat(final_hidden_states, 0),
torch.cat(final_memory_states, 0),
)
return stacked_sequence_outputs, final_state_tuple | [
"def",
"_lstm_forward",
"(",
"self",
",",
"inputs",
":",
"PackedSequence",
",",
"initial_state",
":",
"Optional",
"[",
"Tuple",
"[",
"torch",
".",
"Tensor",
",",
"torch",
".",
"Tensor",
"]",
"]",
"=",
"None",
",",
")",
"->",
"Tuple",
"[",
"torch",
".",
"Tensor",
",",
"Tuple",
"[",
"torch",
".",
"Tensor",
",",
"torch",
".",
"Tensor",
"]",
"]",
":",
"if",
"initial_state",
"is",
"None",
":",
"hidden_states",
":",
"List",
"[",
"Optional",
"[",
"Tuple",
"[",
"torch",
".",
"Tensor",
",",
"torch",
".",
"Tensor",
"]",
"]",
"]",
"=",
"[",
"None",
"]",
"*",
"len",
"(",
"self",
".",
"forward_layers",
")",
"elif",
"initial_state",
"[",
"0",
"]",
".",
"size",
"(",
")",
"[",
"0",
"]",
"!=",
"len",
"(",
"self",
".",
"forward_layers",
")",
":",
"raise",
"ValueError",
"(",
"\"Initial states were passed to forward() but the number of \"",
"\"initial states does not match the number of layers.\"",
")",
"else",
":",
"hidden_states",
"=",
"list",
"(",
"zip",
"(",
"initial_state",
"[",
"0",
"]",
".",
"split",
"(",
"1",
",",
"0",
")",
",",
"initial_state",
"[",
"1",
"]",
".",
"split",
"(",
"1",
",",
"0",
")",
")",
")",
"inputs",
",",
"batch_lengths",
"=",
"pad_packed_sequence",
"(",
"inputs",
",",
"batch_first",
"=",
"True",
")",
"forward_output_sequence",
"=",
"inputs",
"backward_output_sequence",
"=",
"inputs",
"final_states",
"=",
"[",
"]",
"sequence_outputs",
"=",
"[",
"]",
"for",
"layer_index",
",",
"state",
"in",
"enumerate",
"(",
"hidden_states",
")",
":",
"forward_layer",
"=",
"getattr",
"(",
"self",
",",
"\"forward_layer_{}\"",
".",
"format",
"(",
"layer_index",
")",
")",
"backward_layer",
"=",
"getattr",
"(",
"self",
",",
"\"backward_layer_{}\"",
".",
"format",
"(",
"layer_index",
")",
")",
"forward_cache",
"=",
"forward_output_sequence",
"backward_cache",
"=",
"backward_output_sequence",
"if",
"state",
"is",
"not",
"None",
":",
"forward_hidden_state",
",",
"backward_hidden_state",
"=",
"state",
"[",
"0",
"]",
".",
"split",
"(",
"self",
".",
"hidden_size",
",",
"2",
")",
"forward_memory_state",
",",
"backward_memory_state",
"=",
"state",
"[",
"1",
"]",
".",
"split",
"(",
"self",
".",
"cell_size",
",",
"2",
")",
"forward_state",
"=",
"(",
"forward_hidden_state",
",",
"forward_memory_state",
")",
"backward_state",
"=",
"(",
"backward_hidden_state",
",",
"backward_memory_state",
")",
"else",
":",
"forward_state",
"=",
"None",
"backward_state",
"=",
"None",
"forward_output_sequence",
",",
"forward_state",
"=",
"forward_layer",
"(",
"forward_output_sequence",
",",
"batch_lengths",
",",
"forward_state",
")",
"backward_output_sequence",
",",
"backward_state",
"=",
"backward_layer",
"(",
"backward_output_sequence",
",",
"batch_lengths",
",",
"backward_state",
")",
"# Skip connections, just adding the input to the output.",
"if",
"layer_index",
"!=",
"0",
":",
"forward_output_sequence",
"+=",
"forward_cache",
"backward_output_sequence",
"+=",
"backward_cache",
"sequence_outputs",
".",
"append",
"(",
"torch",
".",
"cat",
"(",
"[",
"forward_output_sequence",
",",
"backward_output_sequence",
"]",
",",
"-",
"1",
")",
")",
"# Append the state tuples in a list, so that we can return",
"# the final states for all the layers.",
"final_states",
".",
"append",
"(",
"(",
"torch",
".",
"cat",
"(",
"[",
"forward_state",
"[",
"0",
"]",
",",
"backward_state",
"[",
"0",
"]",
"]",
",",
"-",
"1",
")",
",",
"torch",
".",
"cat",
"(",
"[",
"forward_state",
"[",
"1",
"]",
",",
"backward_state",
"[",
"1",
"]",
"]",
",",
"-",
"1",
")",
",",
")",
")",
"stacked_sequence_outputs",
":",
"torch",
".",
"FloatTensor",
"=",
"torch",
".",
"stack",
"(",
"sequence_outputs",
")",
"# Stack the hidden state and memory for each layer into 2 tensors of shape",
"# (num_layers, batch_size, hidden_size) and (num_layers, batch_size, cell_size)",
"# respectively.",
"final_hidden_states",
",",
"final_memory_states",
"=",
"zip",
"(",
"*",
"final_states",
")",
"final_state_tuple",
":",
"Tuple",
"[",
"torch",
".",
"FloatTensor",
",",
"torch",
".",
"FloatTensor",
"]",
"=",
"(",
"torch",
".",
"cat",
"(",
"final_hidden_states",
",",
"0",
")",
",",
"torch",
".",
"cat",
"(",
"final_memory_states",
",",
"0",
")",
",",
")",
"return",
"stacked_sequence_outputs",
",",
"final_state_tuple"
] | https://github.com/naver/claf/blob/6f45b1ecca0aa2b3bcf99e79c9cb2c915ba0bf3b/claf/tokens/elmo.py#L835-L925 |
|
pts/pdfsizeopt | 33ec5e5c637fc8967d6d238dfdaf8c55605efe83 | lib/pdfsizeopt/main.py | python | PdfData._ConvertImageWithJbig2 | (self, image, cmd_name, cmd_pattern, obj_num,
color_type) | return cmd_name, image | Converts with jbig2. Assumes image is saved to image.file_name. | Converts with jbig2. Assumes image is saved to image.file_name. | [
"Converts",
"with",
"jbig2",
".",
"Assumes",
"image",
"is",
"saved",
"to",
"image",
".",
"file_name",
"."
] | def _ConvertImageWithJbig2(self, image, cmd_name, cmd_pattern, obj_num,
color_type):
"""Converts with jbig2. Assumes image is saved to image.file_name."""
old_image, image = image, ImageData(image)
if color_type != 'gray':
image.SavePng( # Changes .file_name.
file_name=TMP_PREFIX + 'img-%d.gray.png' % obj_num,
do_force_gray=True)
image.idat = self.ConvertImage(
sourcefn=image.file_name,
is_inverted=image.is_inverted,
targetfn=TMP_PREFIX + 'img-%d.jbig2' % obj_num,
cmd_pattern=cmd_pattern,
cmd_name=cmd_name,
do_just_read=True)[1]
if image.file_name != old_image.file_name:
os.remove(image.file_name)
image.compression = 'jbig2'
image.file_name = TMP_PREFIX + 'img-%d.jbig2' % obj_num
return cmd_name, image | [
"def",
"_ConvertImageWithJbig2",
"(",
"self",
",",
"image",
",",
"cmd_name",
",",
"cmd_pattern",
",",
"obj_num",
",",
"color_type",
")",
":",
"old_image",
",",
"image",
"=",
"image",
",",
"ImageData",
"(",
"image",
")",
"if",
"color_type",
"!=",
"'gray'",
":",
"image",
".",
"SavePng",
"(",
"# Changes .file_name.",
"file_name",
"=",
"TMP_PREFIX",
"+",
"'img-%d.gray.png'",
"%",
"obj_num",
",",
"do_force_gray",
"=",
"True",
")",
"image",
".",
"idat",
"=",
"self",
".",
"ConvertImage",
"(",
"sourcefn",
"=",
"image",
".",
"file_name",
",",
"is_inverted",
"=",
"image",
".",
"is_inverted",
",",
"targetfn",
"=",
"TMP_PREFIX",
"+",
"'img-%d.jbig2'",
"%",
"obj_num",
",",
"cmd_pattern",
"=",
"cmd_pattern",
",",
"cmd_name",
"=",
"cmd_name",
",",
"do_just_read",
"=",
"True",
")",
"[",
"1",
"]",
"if",
"image",
".",
"file_name",
"!=",
"old_image",
".",
"file_name",
":",
"os",
".",
"remove",
"(",
"image",
".",
"file_name",
")",
"image",
".",
"compression",
"=",
"'jbig2'",
"image",
".",
"file_name",
"=",
"TMP_PREFIX",
"+",
"'img-%d.jbig2'",
"%",
"obj_num",
"return",
"cmd_name",
",",
"image"
] | https://github.com/pts/pdfsizeopt/blob/33ec5e5c637fc8967d6d238dfdaf8c55605efe83/lib/pdfsizeopt/main.py#L7209-L7228 |
|
iMoonLab/HGNN | 2d19c82084e4694ec3b2e911d8755dbd9129fd6e | utils/hypergraph_utils.py | python | hyperedge_concat | (*H_list) | return H | Concatenate hyperedge group in H_list
:param H_list: Hyperedge groups which contain two or more hypergraph incidence matrix
:return: Fused hypergraph incidence matrix | Concatenate hyperedge group in H_list
:param H_list: Hyperedge groups which contain two or more hypergraph incidence matrix
:return: Fused hypergraph incidence matrix | [
"Concatenate",
"hyperedge",
"group",
"in",
"H_list",
":",
"param",
"H_list",
":",
"Hyperedge",
"groups",
"which",
"contain",
"two",
"or",
"more",
"hypergraph",
"incidence",
"matrix",
":",
"return",
":",
"Fused",
"hypergraph",
"incidence",
"matrix"
] | def hyperedge_concat(*H_list):
"""
Concatenate hyperedge group in H_list
:param H_list: Hyperedge groups which contain two or more hypergraph incidence matrix
:return: Fused hypergraph incidence matrix
"""
H = None
for h in H_list:
if h is not None and h != []:
# for the first H appended to fused hypergraph incidence matrix
if H is None:
H = h
else:
if type(h) != list:
H = np.hstack((H, h))
else:
tmp = []
for a, b in zip(H, h):
tmp.append(np.hstack((a, b)))
H = tmp
return H | [
"def",
"hyperedge_concat",
"(",
"*",
"H_list",
")",
":",
"H",
"=",
"None",
"for",
"h",
"in",
"H_list",
":",
"if",
"h",
"is",
"not",
"None",
"and",
"h",
"!=",
"[",
"]",
":",
"# for the first H appended to fused hypergraph incidence matrix",
"if",
"H",
"is",
"None",
":",
"H",
"=",
"h",
"else",
":",
"if",
"type",
"(",
"h",
")",
"!=",
"list",
":",
"H",
"=",
"np",
".",
"hstack",
"(",
"(",
"H",
",",
"h",
")",
")",
"else",
":",
"tmp",
"=",
"[",
"]",
"for",
"a",
",",
"b",
"in",
"zip",
"(",
"H",
",",
"h",
")",
":",
"tmp",
".",
"append",
"(",
"np",
".",
"hstack",
"(",
"(",
"a",
",",
"b",
")",
")",
")",
"H",
"=",
"tmp",
"return",
"H"
] | https://github.com/iMoonLab/HGNN/blob/2d19c82084e4694ec3b2e911d8755dbd9129fd6e/utils/hypergraph_utils.py#L58-L78 |
|
IntelAI/nauta | bbedb114a755cf1f43b834a58fc15fb6e3a4b291 | applications/cli/example-python/package_examples/resnet/imagenet_main.py | python | define_imagenet_flags | () | [] | def define_imagenet_flags():
resnet_run_loop.define_resnet_flags(
resnet_size_choices=['18', '34', '50', '101', '152', '200'])
flags.adopt_module_key_flags(resnet_run_loop)
flags_core.set_defaults(train_epochs=1) | [
"def",
"define_imagenet_flags",
"(",
")",
":",
"resnet_run_loop",
".",
"define_resnet_flags",
"(",
"resnet_size_choices",
"=",
"[",
"'18'",
",",
"'34'",
",",
"'50'",
",",
"'101'",
",",
"'152'",
",",
"'200'",
"]",
")",
"flags",
".",
"adopt_module_key_flags",
"(",
"resnet_run_loop",
")",
"flags_core",
".",
"set_defaults",
"(",
"train_epochs",
"=",
"1",
")"
] | https://github.com/IntelAI/nauta/blob/bbedb114a755cf1f43b834a58fc15fb6e3a4b291/applications/cli/example-python/package_examples/resnet/imagenet_main.py#L300-L304 |
||||
dit/dit | 2853cb13110c5a5b2fa7ad792e238e2177013da2 | dit/math/ops.py | python | exp_func | (b) | return exp | Returns a base-`b` exponential function.
Parameters
----------
b : positive float or 'e'
The base of the desired exponential function.
Returns
-------
exp : function
The base-`b` exponential function. The returned function will operate
elementwise on NumPy arrays, but note, it is not a ufunc.
Examples
--------
>>> exp2 = exp_func(2)
>>> exp2(1)
2.0
>>> exp3 = exp_func(3)
>>> exp3(1)
3.0
Raises
------
InvalidBase
If the base is less than zero or equal to one. | Returns a base-`b` exponential function. | [
"Returns",
"a",
"base",
"-",
"b",
"exponential",
"function",
"."
] | def exp_func(b):
"""
Returns a base-`b` exponential function.
Parameters
----------
b : positive float or 'e'
The base of the desired exponential function.
Returns
-------
exp : function
The base-`b` exponential function. The returned function will operate
elementwise on NumPy arrays, but note, it is not a ufunc.
Examples
--------
>>> exp2 = exp_func(2)
>>> exp2(1)
2.0
>>> exp3 = exp_func(3)
>>> exp3(1)
3.0
Raises
------
InvalidBase
If the base is less than zero or equal to one.
"""
from dit.utils import is_string_like
if is_string_like(b) and b not in acceptable_base_strings:
raise InvalidBase(msg=b)
if b == 'linear':
exp = lambda x: x # pragma: no branch
elif b == 2:
exp = np.exp2
elif b == 10:
exp = lambda x: 10**x
elif b == 'e' or np.isclose(b, np.e):
exp = np.exp
else:
if b <= 0 or b == 1:
raise InvalidBase(b)
def exp(x, base=b):
"""
Return `base`**`x`
Parameters
----------
x : float
The number to exponentiate
base : float
The base of the exponential
Returns
-------
p : float
`base`**`x`
"""
return base**np.asarray(x)
return exp | [
"def",
"exp_func",
"(",
"b",
")",
":",
"from",
"dit",
".",
"utils",
"import",
"is_string_like",
"if",
"is_string_like",
"(",
"b",
")",
"and",
"b",
"not",
"in",
"acceptable_base_strings",
":",
"raise",
"InvalidBase",
"(",
"msg",
"=",
"b",
")",
"if",
"b",
"==",
"'linear'",
":",
"exp",
"=",
"lambda",
"x",
":",
"x",
"# pragma: no branch",
"elif",
"b",
"==",
"2",
":",
"exp",
"=",
"np",
".",
"exp2",
"elif",
"b",
"==",
"10",
":",
"exp",
"=",
"lambda",
"x",
":",
"10",
"**",
"x",
"elif",
"b",
"==",
"'e'",
"or",
"np",
".",
"isclose",
"(",
"b",
",",
"np",
".",
"e",
")",
":",
"exp",
"=",
"np",
".",
"exp",
"else",
":",
"if",
"b",
"<=",
"0",
"or",
"b",
"==",
"1",
":",
"raise",
"InvalidBase",
"(",
"b",
")",
"def",
"exp",
"(",
"x",
",",
"base",
"=",
"b",
")",
":",
"\"\"\"\n Return `base`**`x`\n\n Parameters\n ----------\n x : float\n The number to exponentiate\n base : float\n The base of the exponential\n\n Returns\n -------\n p : float\n `base`**`x`\n \"\"\"",
"return",
"base",
"**",
"np",
".",
"asarray",
"(",
"x",
")",
"return",
"exp"
] | https://github.com/dit/dit/blob/2853cb13110c5a5b2fa7ad792e238e2177013da2/dit/math/ops.py#L43-L108 |
|
balikasg/topicModelling | ba5795eccbf23223d236c999c9cbbcd3d0c11e95 | senLDA/lda_sentenceLayer.py | python | lda_gibbs_sampling1.inference | (self) | The learning process. Here only one iteration over the data.
A loop will be calling this function for as many iterations as needed. | The learning process. Here only one iteration over the data.
A loop will be calling this function for as many iterations as needed. | [
"The",
"learning",
"process",
".",
"Here",
"only",
"one",
"iteration",
"over",
"the",
"data",
".",
"A",
"loop",
"will",
"be",
"calling",
"this",
"function",
"for",
"as",
"many",
"iterations",
"as",
"needed",
"."
] | def inference(self):
""" The learning process. Here only one iteration over the data.
A loop will be calling this function for as many iterations as needed. """
for m, doc in enumerate(self.docs):
z_n, n_m_z = self.z_m_n[m], self.n_m_z[m] #Take the topics of the sentences and the number of sentences assigned to each topic
for sid, sentence in enumerate(doc):
z = z_n[sid] # Obtain the topic that was assigned to sentences
n_m_z[z] -= 1 # Decrease the number of the sentences in the current document assigned to topic z
self.n_z_t[z, sentence.astype(dtype=np.int32)] -= 1 #Decrease the number of the words assigned to topic z
self.n_z[z] -= len(sentence) # Decrease the total number of words assigned to topic z
# Get full conditional to sample from
p_z = self.get_full_conditional(sentence, m, z, self.n_z, self.n_m_z)
new_z = np.random.multinomial(1, p_z ).argmax()
z_n[sid] = new_z
n_m_z[new_z] += 1
self.n_z_t[new_z, sentence.astype(dtype=np.int32)] += 1
self.n_z[new_z] += len(sentence) | [
"def",
"inference",
"(",
"self",
")",
":",
"for",
"m",
",",
"doc",
"in",
"enumerate",
"(",
"self",
".",
"docs",
")",
":",
"z_n",
",",
"n_m_z",
"=",
"self",
".",
"z_m_n",
"[",
"m",
"]",
",",
"self",
".",
"n_m_z",
"[",
"m",
"]",
"#Take the topics of the sentences and the number of sentences assigned to each topic",
"for",
"sid",
",",
"sentence",
"in",
"enumerate",
"(",
"doc",
")",
":",
"z",
"=",
"z_n",
"[",
"sid",
"]",
"# Obtain the topic that was assigned to sentences",
"n_m_z",
"[",
"z",
"]",
"-=",
"1",
"# Decrease the number of the sentences in the current document assigned to topic z",
"self",
".",
"n_z_t",
"[",
"z",
",",
"sentence",
".",
"astype",
"(",
"dtype",
"=",
"np",
".",
"int32",
")",
"]",
"-=",
"1",
"#Decrease the number of the words assigned to topic z",
"self",
".",
"n_z",
"[",
"z",
"]",
"-=",
"len",
"(",
"sentence",
")",
"# Decrease the total number of words assigned to topic z",
"# Get full conditional to sample from",
"p_z",
"=",
"self",
".",
"get_full_conditional",
"(",
"sentence",
",",
"m",
",",
"z",
",",
"self",
".",
"n_z",
",",
"self",
".",
"n_m_z",
")",
"new_z",
"=",
"np",
".",
"random",
".",
"multinomial",
"(",
"1",
",",
"p_z",
")",
".",
"argmax",
"(",
")",
"z_n",
"[",
"sid",
"]",
"=",
"new_z",
"n_m_z",
"[",
"new_z",
"]",
"+=",
"1",
"self",
".",
"n_z_t",
"[",
"new_z",
",",
"sentence",
".",
"astype",
"(",
"dtype",
"=",
"np",
".",
"int32",
")",
"]",
"+=",
"1",
"self",
".",
"n_z",
"[",
"new_z",
"]",
"+=",
"len",
"(",
"sentence",
")"
] | https://github.com/balikasg/topicModelling/blob/ba5795eccbf23223d236c999c9cbbcd3d0c11e95/senLDA/lda_sentenceLayer.py#L113-L129 |
||
graphql-python/graphql-core | 9ea9c3705d6826322027d9bb539d37c5b25f7af9 | src/graphql/pyutils/suggestion_list.py | python | suggestion_list | (input_: str, options: Collection[str]) | return sorted(
options_by_distance,
key=lambda option: (
options_by_distance.get(option, 0),
natural_comparison_key(option),
),
) | Get list with suggestions for a given input.
Given an invalid input string and list of valid options, returns a filtered list
of valid options sorted based on their similarity with the input. | Get list with suggestions for a given input. | [
"Get",
"list",
"with",
"suggestions",
"for",
"a",
"given",
"input",
"."
] | def suggestion_list(input_: str, options: Collection[str]) -> List[str]:
"""Get list with suggestions for a given input.
Given an invalid input string and list of valid options, returns a filtered list
of valid options sorted based on their similarity with the input.
"""
options_by_distance = {}
lexical_distance = LexicalDistance(input_)
threshold = int(len(input_) * 0.4) + 1
for option in options:
distance = lexical_distance.measure(option, threshold)
if distance is not None:
options_by_distance[option] = distance
# noinspection PyShadowingNames
return sorted(
options_by_distance,
key=lambda option: (
options_by_distance.get(option, 0),
natural_comparison_key(option),
),
) | [
"def",
"suggestion_list",
"(",
"input_",
":",
"str",
",",
"options",
":",
"Collection",
"[",
"str",
"]",
")",
"->",
"List",
"[",
"str",
"]",
":",
"options_by_distance",
"=",
"{",
"}",
"lexical_distance",
"=",
"LexicalDistance",
"(",
"input_",
")",
"threshold",
"=",
"int",
"(",
"len",
"(",
"input_",
")",
"*",
"0.4",
")",
"+",
"1",
"for",
"option",
"in",
"options",
":",
"distance",
"=",
"lexical_distance",
".",
"measure",
"(",
"option",
",",
"threshold",
")",
"if",
"distance",
"is",
"not",
"None",
":",
"options_by_distance",
"[",
"option",
"]",
"=",
"distance",
"# noinspection PyShadowingNames",
"return",
"sorted",
"(",
"options_by_distance",
",",
"key",
"=",
"lambda",
"option",
":",
"(",
"options_by_distance",
".",
"get",
"(",
"option",
",",
"0",
")",
",",
"natural_comparison_key",
"(",
"option",
")",
",",
")",
",",
")"
] | https://github.com/graphql-python/graphql-core/blob/9ea9c3705d6826322027d9bb539d37c5b25f7af9/src/graphql/pyutils/suggestion_list.py#L8-L30 |
|
neovim/pynvim | 71102c03efdcd6e7a1db1057a68478fc2249734d | pynvim/plugin/decorators.py | python | encoding | (encoding=True) | return dec | DEPRECATED: use pynvim.decode(). | DEPRECATED: use pynvim.decode(). | [
"DEPRECATED",
":",
"use",
"pynvim",
".",
"decode",
"()",
"."
] | def encoding(encoding=True):
"""DEPRECATED: use pynvim.decode()."""
if isinstance(encoding, str):
encoding = True
def dec(f):
f._nvim_decode = encoding
return f
return dec | [
"def",
"encoding",
"(",
"encoding",
"=",
"True",
")",
":",
"if",
"isinstance",
"(",
"encoding",
",",
"str",
")",
":",
"encoding",
"=",
"True",
"def",
"dec",
"(",
"f",
")",
":",
"f",
".",
"_nvim_decode",
"=",
"encoding",
"return",
"f",
"return",
"dec"
] | https://github.com/neovim/pynvim/blob/71102c03efdcd6e7a1db1057a68478fc2249734d/pynvim/plugin/decorators.py#L167-L175 |
|
OpenMDAO/OpenMDAO | f47eb5485a0bb5ea5d2ae5bd6da4b94dc6b296bd | openmdao/utils/mpi.py | python | multi_proc_fail_check | (comm) | Raise an AnalysisError on all procs if it is raised on one.
Wrap this around code that you want to globally fail if it fails
on any MPI process in comm. If not running under MPI, don't
handle any exceptions.
Parameters
----------
comm : MPI communicator or None
Communicator from the ParallelGroup that owns the calling solver.
Yields
------
None | Raise an AnalysisError on all procs if it is raised on one. | [
"Raise",
"an",
"AnalysisError",
"on",
"all",
"procs",
"if",
"it",
"is",
"raised",
"on",
"one",
"."
] | def multi_proc_fail_check(comm):
"""
Raise an AnalysisError on all procs if it is raised on one.
Wrap this around code that you want to globally fail if it fails
on any MPI process in comm. If not running under MPI, don't
handle any exceptions.
Parameters
----------
comm : MPI communicator or None
Communicator from the ParallelGroup that owns the calling solver.
Yields
------
None
"""
if MPI is None:
yield
else:
try:
yield
except AnalysisError:
msg = traceback.format_exc()
else:
msg = ''
fails = comm.allgather(msg)
for i, f in enumerate(fails):
if f:
raise AnalysisError("AnalysisError raised in rank %d: traceback follows\n%s"
% (i, f)) | [
"def",
"multi_proc_fail_check",
"(",
"comm",
")",
":",
"if",
"MPI",
"is",
"None",
":",
"yield",
"else",
":",
"try",
":",
"yield",
"except",
"AnalysisError",
":",
"msg",
"=",
"traceback",
".",
"format_exc",
"(",
")",
"else",
":",
"msg",
"=",
"''",
"fails",
"=",
"comm",
".",
"allgather",
"(",
"msg",
")",
"for",
"i",
",",
"f",
"in",
"enumerate",
"(",
"fails",
")",
":",
"if",
"f",
":",
"raise",
"AnalysisError",
"(",
"\"AnalysisError raised in rank %d: traceback follows\\n%s\"",
"%",
"(",
"i",
",",
"f",
")",
")"
] | https://github.com/OpenMDAO/OpenMDAO/blob/f47eb5485a0bb5ea5d2ae5bd6da4b94dc6b296bd/openmdao/utils/mpi.py#L158-L190 |
||
OCA/stock-logistics-warehouse | 185c1b0cb9e31e3746a89ec269b4bc09c69b2411 | stock_packaging_calculator/models/product.py | python | Product.product_qty_by_packaging | (self, prod_qty, with_contained=False) | return self._product_qty_by_packaging(
self._ordered_packaging(),
prod_qty,
with_contained=with_contained,
) | Calculate quantity by packaging.
The minimal quantity is always represented by the UoM of the product.
Limitation: fractional quantities are lost.
:prod_qty: total qty to satisfy.
:with_contained: include calculation of contained packagings.
eg: 1 pallet contains 4 big boxes and 6 little boxes.
:returns: list of dict in the form
[{id: 1, qty: qty_per_package, name: package_name}]
If `with_contained` is passed, each element will include
the quantity of smaller packaging, like:
{contained: [{id: 1, qty: 4, name: "Big box"}]} | Calculate quantity by packaging. | [
"Calculate",
"quantity",
"by",
"packaging",
"."
] | def product_qty_by_packaging(self, prod_qty, with_contained=False):
"""Calculate quantity by packaging.
The minimal quantity is always represented by the UoM of the product.
Limitation: fractional quantities are lost.
:prod_qty: total qty to satisfy.
:with_contained: include calculation of contained packagings.
eg: 1 pallet contains 4 big boxes and 6 little boxes.
:returns: list of dict in the form
[{id: 1, qty: qty_per_package, name: package_name}]
If `with_contained` is passed, each element will include
the quantity of smaller packaging, like:
{contained: [{id: 1, qty: 4, name: "Big box"}]}
"""
self.ensure_one()
return self._product_qty_by_packaging(
self._ordered_packaging(),
prod_qty,
with_contained=with_contained,
) | [
"def",
"product_qty_by_packaging",
"(",
"self",
",",
"prod_qty",
",",
"with_contained",
"=",
"False",
")",
":",
"self",
".",
"ensure_one",
"(",
")",
"return",
"self",
".",
"_product_qty_by_packaging",
"(",
"self",
".",
"_ordered_packaging",
"(",
")",
",",
"prod_qty",
",",
"with_contained",
"=",
"with_contained",
",",
")"
] | https://github.com/OCA/stock-logistics-warehouse/blob/185c1b0cb9e31e3746a89ec269b4bc09c69b2411/stock_packaging_calculator/models/product.py#L50-L76 |
|
LumaPictures/pymel | fa88a3f4fa18e09bb8aa9bdf4dab53d984bada72 | maintenance/stubs.py | python | subpackages | (packagemod, skip_regex=None) | Given a module object, returns an iterator which yields a tuple
(modulename, moduleobject, ispkg)
for the given module and all it's submodules/subpackages. | Given a module object, returns an iterator which yields a tuple
(modulename, moduleobject, ispkg)
for the given module and all it's submodules/subpackages. | [
"Given",
"a",
"module",
"object",
"returns",
"an",
"iterator",
"which",
"yields",
"a",
"tuple",
"(",
"modulename",
"moduleobject",
"ispkg",
")",
"for",
"the",
"given",
"module",
"and",
"all",
"it",
"s",
"submodules",
"/",
"subpackages",
"."
] | def subpackages(packagemod, skip_regex=None):
"""
Given a module object, returns an iterator which yields a tuple
(modulename, moduleobject, ispkg)
for the given module and all it's submodules/subpackages.
"""
if hasattr(packagemod, '__path__'):
yield packagemod.__name__, packagemod, True
for importer, modname, ispkg in walk_packages(
packagemod.__path__, packagemod.__name__ + '.',
skip_regex=skip_regex):
# if skip_regex and re.match(skip_regex, modname):
# print("skipping %s %s" % (modname, skip_regex))
# mod = None
# else:
if modname not in sys.modules:
if verbose:
print("importing %s" % (modname,))
try:
mod = importer.find_module(modname).load_module(modname)
except Exception as e:
print("error importing %s: %s" % (modname, e))
mod = None
else:
mod = sys.modules[modname]
yield modname, mod, ispkg
else:
yield packagemod.__name__, packagemod, False | [
"def",
"subpackages",
"(",
"packagemod",
",",
"skip_regex",
"=",
"None",
")",
":",
"if",
"hasattr",
"(",
"packagemod",
",",
"'__path__'",
")",
":",
"yield",
"packagemod",
".",
"__name__",
",",
"packagemod",
",",
"True",
"for",
"importer",
",",
"modname",
",",
"ispkg",
"in",
"walk_packages",
"(",
"packagemod",
".",
"__path__",
",",
"packagemod",
".",
"__name__",
"+",
"'.'",
",",
"skip_regex",
"=",
"skip_regex",
")",
":",
"# if skip_regex and re.match(skip_regex, modname):",
"# print(\"skipping %s %s\" % (modname, skip_regex))",
"# mod = None",
"# else:",
"if",
"modname",
"not",
"in",
"sys",
".",
"modules",
":",
"if",
"verbose",
":",
"print",
"(",
"\"importing %s\"",
"%",
"(",
"modname",
",",
")",
")",
"try",
":",
"mod",
"=",
"importer",
".",
"find_module",
"(",
"modname",
")",
".",
"load_module",
"(",
"modname",
")",
"except",
"Exception",
"as",
"e",
":",
"print",
"(",
"\"error importing %s: %s\"",
"%",
"(",
"modname",
",",
"e",
")",
")",
"mod",
"=",
"None",
"else",
":",
"mod",
"=",
"sys",
".",
"modules",
"[",
"modname",
"]",
"yield",
"modname",
",",
"mod",
",",
"ispkg",
"else",
":",
"yield",
"packagemod",
".",
"__name__",
",",
"packagemod",
",",
"False"
] | https://github.com/LumaPictures/pymel/blob/fa88a3f4fa18e09bb8aa9bdf4dab53d984bada72/maintenance/stubs.py#L127-L154 |
||
ChunyuanLI/Optimus | f63f4a7ca10aea022978500a37d72dd53a37a576 | code/examples/utils_multiple_choice.py | python | RaceProcessor.get_dev_examples | (self, data_dir) | return self._create_examples(high + middle, 'dev') | See base class. | See base class. | [
"See",
"base",
"class",
"."
] | def get_dev_examples(self, data_dir):
"""See base class."""
logger.info("LOOKING AT {} dev".format(data_dir))
high = os.path.join(data_dir, 'dev/high')
middle = os.path.join(data_dir, 'dev/middle')
high = self._read_txt(high)
middle = self._read_txt(middle)
return self._create_examples(high + middle, 'dev') | [
"def",
"get_dev_examples",
"(",
"self",
",",
"data_dir",
")",
":",
"logger",
".",
"info",
"(",
"\"LOOKING AT {} dev\"",
".",
"format",
"(",
"data_dir",
")",
")",
"high",
"=",
"os",
".",
"path",
".",
"join",
"(",
"data_dir",
",",
"'dev/high'",
")",
"middle",
"=",
"os",
".",
"path",
".",
"join",
"(",
"data_dir",
",",
"'dev/middle'",
")",
"high",
"=",
"self",
".",
"_read_txt",
"(",
"high",
")",
"middle",
"=",
"self",
".",
"_read_txt",
"(",
"middle",
")",
"return",
"self",
".",
"_create_examples",
"(",
"high",
"+",
"middle",
",",
"'dev'",
")"
] | https://github.com/ChunyuanLI/Optimus/blob/f63f4a7ca10aea022978500a37d72dd53a37a576/code/examples/utils_multiple_choice.py#L106-L113 |
|
bslatkin/dpxdt | 9f860de1731021d99253670429e5f2157e1f6297 | dpxdt/server/auth.py | python | superuser_required | (f) | return wrapped | Requires the requestor to be a super user. | Requires the requestor to be a super user. | [
"Requires",
"the",
"requestor",
"to",
"be",
"a",
"super",
"user",
"."
] | def superuser_required(f):
"""Requires the requestor to be a super user."""
@functools.wraps(f)
@login_required
def wrapped(*args, **kwargs):
if not (current_user.is_authenticated() and current_user.superuser):
abort(403)
return f(*args, **kwargs)
return wrapped | [
"def",
"superuser_required",
"(",
"f",
")",
":",
"@",
"functools",
".",
"wraps",
"(",
"f",
")",
"@",
"login_required",
"def",
"wrapped",
"(",
"*",
"args",
",",
"*",
"*",
"kwargs",
")",
":",
"if",
"not",
"(",
"current_user",
".",
"is_authenticated",
"(",
")",
"and",
"current_user",
".",
"superuser",
")",
":",
"abort",
"(",
"403",
")",
"return",
"f",
"(",
"*",
"args",
",",
"*",
"*",
"kwargs",
")",
"return",
"wrapped"
] | https://github.com/bslatkin/dpxdt/blob/9f860de1731021d99253670429e5f2157e1f6297/dpxdt/server/auth.py#L174-L182 |
|
aio-libs/create-aio-app | 66e6418ec33e090fa16c589a0e7d22bfceb8e75a | create_aio_app/utils/config.py | python | parse_arguments | () | return parser.parse_args().__dict__ | Parse console arguments and return config object. | Parse console arguments and return config object. | [
"Parse",
"console",
"arguments",
"and",
"return",
"config",
"object",
"."
] | def parse_arguments() -> dict:
"""
Parse console arguments and return config object.
"""
parser = argparse.ArgumentParser(
prog="create-aio-app",
description=(
"create-aio-app - a tool that helps quickly create a basis "
"for the microservice on aiohttp and prepare the development "
"environment."
)
)
parser.add_argument(
"name",
type=name_type,
nargs='?',
metavar="<project-name>",
help='the name of the future project.'
)
parser.add_argument(
'--redis',
action='store_true',
help='added redis settings to the generating project'
)
parser.add_argument(
'--without-postgres',
action='store_true',
help='generate project without postgres settings'
)
parser.add_argument(
'--uvloop',
action='store_true',
help='use uvloop event loop for aiohttp'
)
return parser.parse_args().__dict__ | [
"def",
"parse_arguments",
"(",
")",
"->",
"dict",
":",
"parser",
"=",
"argparse",
".",
"ArgumentParser",
"(",
"prog",
"=",
"\"create-aio-app\"",
",",
"description",
"=",
"(",
"\"create-aio-app - a tool that helps quickly create a basis \"",
"\"for the microservice on aiohttp and prepare the development \"",
"\"environment.\"",
")",
")",
"parser",
".",
"add_argument",
"(",
"\"name\"",
",",
"type",
"=",
"name_type",
",",
"nargs",
"=",
"'?'",
",",
"metavar",
"=",
"\"<project-name>\"",
",",
"help",
"=",
"'the name of the future project.'",
")",
"parser",
".",
"add_argument",
"(",
"'--redis'",
",",
"action",
"=",
"'store_true'",
",",
"help",
"=",
"'added redis settings to the generating project'",
")",
"parser",
".",
"add_argument",
"(",
"'--without-postgres'",
",",
"action",
"=",
"'store_true'",
",",
"help",
"=",
"'generate project without postgres settings'",
")",
"parser",
".",
"add_argument",
"(",
"'--uvloop'",
",",
"action",
"=",
"'store_true'",
",",
"help",
"=",
"'use uvloop event loop for aiohttp'",
")",
"return",
"parser",
".",
"parse_args",
"(",
")",
".",
"__dict__"
] | https://github.com/aio-libs/create-aio-app/blob/66e6418ec33e090fa16c589a0e7d22bfceb8e75a/create_aio_app/utils/config.py#L20-L55 |
|
Qiskit/qiskit-terra | b66030e3b9192efdd3eb95cf25c6545fe0a13da4 | qiskit/circuit/library/standard_gates/y.py | python | YGate.__array__ | (self, dtype=None) | return numpy.array([[0, -1j], [1j, 0]], dtype=dtype) | Return a numpy.array for the Y gate. | Return a numpy.array for the Y gate. | [
"Return",
"a",
"numpy",
".",
"array",
"for",
"the",
"Y",
"gate",
"."
] | def __array__(self, dtype=None):
"""Return a numpy.array for the Y gate."""
return numpy.array([[0, -1j], [1j, 0]], dtype=dtype) | [
"def",
"__array__",
"(",
"self",
",",
"dtype",
"=",
"None",
")",
":",
"return",
"numpy",
".",
"array",
"(",
"[",
"[",
"0",
",",
"-",
"1j",
"]",
",",
"[",
"1j",
",",
"0",
"]",
"]",
",",
"dtype",
"=",
"dtype",
")"
] | https://github.com/Qiskit/qiskit-terra/blob/b66030e3b9192efdd3eb95cf25c6545fe0a13da4/qiskit/circuit/library/standard_gates/y.py#L114-L116 |
|
tp4a/teleport | 1fafd34f1f775d2cf80ea4af6e44468d8e0b24ad | server/www/packages/packages-linux/x64/cffi/vengine_gen.py | python | VGenericEngine._generate | (self, step_name) | [] | def _generate(self, step_name):
for name, tp in self._get_declarations():
kind, realname = name.split(' ', 1)
try:
method = getattr(self, '_generate_gen_%s_%s' % (kind,
step_name))
except AttributeError:
raise VerificationError(
"not implemented in verify(): %r" % name)
try:
method(tp, realname)
except Exception as e:
model.attach_exception_info(e, name)
raise | [
"def",
"_generate",
"(",
"self",
",",
"step_name",
")",
":",
"for",
"name",
",",
"tp",
"in",
"self",
".",
"_get_declarations",
"(",
")",
":",
"kind",
",",
"realname",
"=",
"name",
".",
"split",
"(",
"' '",
",",
"1",
")",
"try",
":",
"method",
"=",
"getattr",
"(",
"self",
",",
"'_generate_gen_%s_%s'",
"%",
"(",
"kind",
",",
"step_name",
")",
")",
"except",
"AttributeError",
":",
"raise",
"VerificationError",
"(",
"\"not implemented in verify(): %r\"",
"%",
"name",
")",
"try",
":",
"method",
"(",
"tp",
",",
"realname",
")",
"except",
"Exception",
"as",
"e",
":",
"model",
".",
"attach_exception_info",
"(",
"e",
",",
"name",
")",
"raise"
] | https://github.com/tp4a/teleport/blob/1fafd34f1f775d2cf80ea4af6e44468d8e0b24ad/server/www/packages/packages-linux/x64/cffi/vengine_gen.py#L99-L112 |
||||
ospalh/anki-addons | 4ece13423bd541e29d9b40ebe26ca0999a6962b1 | batteries/numbers.py | python | Real.conjugate | (self) | return +self | Conjugate is a no-op for Reals. | Conjugate is a no-op for Reals. | [
"Conjugate",
"is",
"a",
"no",
"-",
"op",
"for",
"Reals",
"."
] | def conjugate(self):
"""Conjugate is a no-op for Reals."""
return +self | [
"def",
"conjugate",
"(",
"self",
")",
":",
"return",
"+",
"self"
] | https://github.com/ospalh/anki-addons/blob/4ece13423bd541e29d9b40ebe26ca0999a6962b1/batteries/numbers.py#L263-L265 |
|
tomerfiliba/plumbum | 20cdda5e8bbd9f83d64b154f6b4fcd28216c63e1 | plumbum/machines/session.py | python | SessionPopen.communicate | (self, input=None) | return stdout, stderr | Consumes the process' stdout and stderr until the it terminates.
:param input: An optional bytes/buffer object to send to the process over stdin
:returns: A tuple of (stdout, stderr) | Consumes the process' stdout and stderr until the it terminates. | [
"Consumes",
"the",
"process",
"stdout",
"and",
"stderr",
"until",
"the",
"it",
"terminates",
"."
] | def communicate(self, input=None):
"""Consumes the process' stdout and stderr until the it terminates.
:param input: An optional bytes/buffer object to send to the process over stdin
:returns: A tuple of (stdout, stderr)
"""
stdout = []
stderr = []
sources = [("1", stdout, self.stdout)]
if not self.isatty:
# in tty mode, stdout and stderr are unified
sources.append(("2", stderr, self.stderr))
i = 0
while sources:
if input:
chunk = input[:1000]
self.stdin.write(chunk)
self.stdin.flush()
input = input[1000:]
i = (i + 1) % len(sources)
name, coll, pipe = sources[i]
try:
line = pipe.readline()
shell_logger.debug("%s> %r", name, line)
except EOFError:
shell_logger.debug("%s> Nothing returned.", name)
self.proc.poll()
returncode = self.proc.returncode
stdout = b"".join(stdout).decode(self.custom_encoding, "ignore")
stderr = b"".join(stderr).decode(self.custom_encoding, "ignore")
argv = self.argv.decode(self.custom_encoding, "ignore").split(";")[:1]
if returncode == 5:
raise IncorrectLogin(
argv,
returncode,
stdout,
stderr,
message="Incorrect username or password provided",
)
elif returncode == 6:
raise HostPublicKeyUnknown(
argv,
returncode,
stdout,
stderr,
message="The authenticity of the host can't be established",
)
elif returncode != 0:
raise SSHCommsError(
argv,
returncode,
stdout,
stderr,
message="SSH communication failed",
)
elif name == "2":
raise SSHCommsChannel2Error(
argv,
returncode,
stdout,
stderr,
message="No stderr result detected. Does the remote have Bash as the default shell?",
)
else:
raise SSHCommsError(
argv,
returncode,
stdout,
stderr,
message="No communication channel detected. Does the remote exist?",
)
if not line:
del sources[i]
else:
coll.append(line)
if self.isatty:
stdout.pop(0) # discard first line of prompt
try:
self.returncode = int(stdout.pop(-1))
except (IndexError, ValueError):
self.returncode = "Unknown"
self._done = True
stdout = b"".join(stdout)
stderr = b"".join(stderr)
return stdout, stderr | [
"def",
"communicate",
"(",
"self",
",",
"input",
"=",
"None",
")",
":",
"stdout",
"=",
"[",
"]",
"stderr",
"=",
"[",
"]",
"sources",
"=",
"[",
"(",
"\"1\"",
",",
"stdout",
",",
"self",
".",
"stdout",
")",
"]",
"if",
"not",
"self",
".",
"isatty",
":",
"# in tty mode, stdout and stderr are unified",
"sources",
".",
"append",
"(",
"(",
"\"2\"",
",",
"stderr",
",",
"self",
".",
"stderr",
")",
")",
"i",
"=",
"0",
"while",
"sources",
":",
"if",
"input",
":",
"chunk",
"=",
"input",
"[",
":",
"1000",
"]",
"self",
".",
"stdin",
".",
"write",
"(",
"chunk",
")",
"self",
".",
"stdin",
".",
"flush",
"(",
")",
"input",
"=",
"input",
"[",
"1000",
":",
"]",
"i",
"=",
"(",
"i",
"+",
"1",
")",
"%",
"len",
"(",
"sources",
")",
"name",
",",
"coll",
",",
"pipe",
"=",
"sources",
"[",
"i",
"]",
"try",
":",
"line",
"=",
"pipe",
".",
"readline",
"(",
")",
"shell_logger",
".",
"debug",
"(",
"\"%s> %r\"",
",",
"name",
",",
"line",
")",
"except",
"EOFError",
":",
"shell_logger",
".",
"debug",
"(",
"\"%s> Nothing returned.\"",
",",
"name",
")",
"self",
".",
"proc",
".",
"poll",
"(",
")",
"returncode",
"=",
"self",
".",
"proc",
".",
"returncode",
"stdout",
"=",
"b\"\"",
".",
"join",
"(",
"stdout",
")",
".",
"decode",
"(",
"self",
".",
"custom_encoding",
",",
"\"ignore\"",
")",
"stderr",
"=",
"b\"\"",
".",
"join",
"(",
"stderr",
")",
".",
"decode",
"(",
"self",
".",
"custom_encoding",
",",
"\"ignore\"",
")",
"argv",
"=",
"self",
".",
"argv",
".",
"decode",
"(",
"self",
".",
"custom_encoding",
",",
"\"ignore\"",
")",
".",
"split",
"(",
"\";\"",
")",
"[",
":",
"1",
"]",
"if",
"returncode",
"==",
"5",
":",
"raise",
"IncorrectLogin",
"(",
"argv",
",",
"returncode",
",",
"stdout",
",",
"stderr",
",",
"message",
"=",
"\"Incorrect username or password provided\"",
",",
")",
"elif",
"returncode",
"==",
"6",
":",
"raise",
"HostPublicKeyUnknown",
"(",
"argv",
",",
"returncode",
",",
"stdout",
",",
"stderr",
",",
"message",
"=",
"\"The authenticity of the host can't be established\"",
",",
")",
"elif",
"returncode",
"!=",
"0",
":",
"raise",
"SSHCommsError",
"(",
"argv",
",",
"returncode",
",",
"stdout",
",",
"stderr",
",",
"message",
"=",
"\"SSH communication failed\"",
",",
")",
"elif",
"name",
"==",
"\"2\"",
":",
"raise",
"SSHCommsChannel2Error",
"(",
"argv",
",",
"returncode",
",",
"stdout",
",",
"stderr",
",",
"message",
"=",
"\"No stderr result detected. Does the remote have Bash as the default shell?\"",
",",
")",
"else",
":",
"raise",
"SSHCommsError",
"(",
"argv",
",",
"returncode",
",",
"stdout",
",",
"stderr",
",",
"message",
"=",
"\"No communication channel detected. Does the remote exist?\"",
",",
")",
"if",
"not",
"line",
":",
"del",
"sources",
"[",
"i",
"]",
"else",
":",
"coll",
".",
"append",
"(",
"line",
")",
"if",
"self",
".",
"isatty",
":",
"stdout",
".",
"pop",
"(",
"0",
")",
"# discard first line of prompt",
"try",
":",
"self",
".",
"returncode",
"=",
"int",
"(",
"stdout",
".",
"pop",
"(",
"-",
"1",
")",
")",
"except",
"(",
"IndexError",
",",
"ValueError",
")",
":",
"self",
".",
"returncode",
"=",
"\"Unknown\"",
"self",
".",
"_done",
"=",
"True",
"stdout",
"=",
"b\"\"",
".",
"join",
"(",
"stdout",
")",
"stderr",
"=",
"b\"\"",
".",
"join",
"(",
"stderr",
")",
"return",
"stdout",
",",
"stderr"
] | https://github.com/tomerfiliba/plumbum/blob/20cdda5e8bbd9f83d64b154f6b4fcd28216c63e1/plumbum/machines/session.py#L100-L186 |
|
arthurdejong/python-stdnum | 02dec52602ae0709b940b781fc1fcebfde7340b7 | stdnum/meid.py | python | _bit_length | (n) | Return the number of bits necessary to store the number in binary. | Return the number of bits necessary to store the number in binary. | [
"Return",
"the",
"number",
"of",
"bits",
"necessary",
"to",
"store",
"the",
"number",
"in",
"binary",
"."
] | def _bit_length(n):
"""Return the number of bits necessary to store the number in binary."""
try:
return n.bit_length()
except AttributeError: # pragma: no cover (Python 2.6 only)
import math
return int(math.log(n, 2)) + 1 | [
"def",
"_bit_length",
"(",
"n",
")",
":",
"try",
":",
"return",
"n",
".",
"bit_length",
"(",
")",
"except",
"AttributeError",
":",
"# pragma: no cover (Python 2.6 only)",
"import",
"math",
"return",
"int",
"(",
"math",
".",
"log",
"(",
"n",
",",
"2",
")",
")",
"+",
"1"
] | https://github.com/arthurdejong/python-stdnum/blob/02dec52602ae0709b940b781fc1fcebfde7340b7/stdnum/meid.py#L108-L114 |
||
pyjs/pyjs | 6c4a3d3a67300cd5df7f95a67ca9dcdc06950523 | pgen/lib2to3/pytree.py | python | Base.post_order | (self) | Returns a post-order iterator for the tree.
This must be implemented by the concrete subclass. | Returns a post-order iterator for the tree. | [
"Returns",
"a",
"post",
"-",
"order",
"iterator",
"for",
"the",
"tree",
"."
] | def post_order(self):
"""Returns a post-order iterator for the tree.
This must be implemented by the concrete subclass.
"""
raise NotImplementedError | [
"def",
"post_order",
"(",
"self",
")",
":",
"raise",
"NotImplementedError"
] | https://github.com/pyjs/pyjs/blob/6c4a3d3a67300cd5df7f95a67ca9dcdc06950523/pgen/lib2to3/pytree.py#L89-L94 |
||
lquirosd/P2PaLA | 254abf1f7a2a8968bbcaabda356048a32232f0ba | utils/optparse.py | python | Arguments._build_region_types | (self) | return reg_type | build a dic of regions and their respective type | build a dic of regions and their respective type | [
"build",
"a",
"dic",
"of",
"regions",
"and",
"their",
"respective",
"type"
] | def _build_region_types(self):
""" build a dic of regions and their respective type"""
reg_type = {"full_page": "TextRegion"}
if self.opts.region_type == None:
for reg in self.opts.regions:
reg_type[reg] = "TextRegion"
return reg_type
msg = ""
for c in self.opts.region_type:
try:
parent, childs = c.split(":")
regs = childs.split(",")
for reg in regs:
if reg in self.opts.regions:
reg_type[reg] = parent
else:
msg = '\nCannot assign region "{0}" to any type. {0} not defined as region'.format(
reg
)
except:
raise argparse.ArgumentTypeError(
"Malformed argument {}".formatt(c) + msg
)
return reg_type | [
"def",
"_build_region_types",
"(",
"self",
")",
":",
"reg_type",
"=",
"{",
"\"full_page\"",
":",
"\"TextRegion\"",
"}",
"if",
"self",
".",
"opts",
".",
"region_type",
"==",
"None",
":",
"for",
"reg",
"in",
"self",
".",
"opts",
".",
"regions",
":",
"reg_type",
"[",
"reg",
"]",
"=",
"\"TextRegion\"",
"return",
"reg_type",
"msg",
"=",
"\"\"",
"for",
"c",
"in",
"self",
".",
"opts",
".",
"region_type",
":",
"try",
":",
"parent",
",",
"childs",
"=",
"c",
".",
"split",
"(",
"\":\"",
")",
"regs",
"=",
"childs",
".",
"split",
"(",
"\",\"",
")",
"for",
"reg",
"in",
"regs",
":",
"if",
"reg",
"in",
"self",
".",
"opts",
".",
"regions",
":",
"reg_type",
"[",
"reg",
"]",
"=",
"parent",
"else",
":",
"msg",
"=",
"'\\nCannot assign region \"{0}\" to any type. {0} not defined as region'",
".",
"format",
"(",
"reg",
")",
"except",
":",
"raise",
"argparse",
".",
"ArgumentTypeError",
"(",
"\"Malformed argument {}\"",
".",
"formatt",
"(",
"c",
")",
"+",
"msg",
")",
"return",
"reg_type"
] | https://github.com/lquirosd/P2PaLA/blob/254abf1f7a2a8968bbcaabda356048a32232f0ba/utils/optparse.py#L733-L756 |
|
debian-calibre/calibre | 020fc81d3936a64b2ac51459ecb796666ab6a051 | src/calibre/utils/zipfile.py | python | ZipExtFile.read1 | (self, n) | return data | Read up to n bytes with at most one read() system call. | Read up to n bytes with at most one read() system call. | [
"Read",
"up",
"to",
"n",
"bytes",
"with",
"at",
"most",
"one",
"read",
"()",
"system",
"call",
"."
] | def read1(self, n):
"""Read up to n bytes with at most one read() system call."""
# Simplify algorithm (branching) by transforming negative n to large n.
if n < 0 or n is None:
n = self.MAX_N
# Bytes available in read buffer.
len_readbuffer = len(self._readbuffer) - self._offset
# Read from file.
if self._compress_left > 0 and n > len_readbuffer + len(self._unconsumed):
nbytes = n - len_readbuffer - len(self._unconsumed)
nbytes = max(nbytes, self.MIN_READ_SIZE)
nbytes = min(nbytes, self._compress_left)
data = self._fileobj.read(nbytes)
self._compress_left -= len(data)
if data and self._decrypter is not None:
data = b''.join(bytes(bytearray(map(self._decrypter, bytearray(data)))))
if self._compress_type == ZIP_STORED:
self._update_crc(data, eof=(self._compress_left==0))
self._readbuffer = self._readbuffer[self._offset:] + data
self._offset = 0
else:
# Prepare deflated bytes for decompression.
self._unconsumed += data
# Handle unconsumed data.
if (len(self._unconsumed) > 0 and n > len_readbuffer and
self._compress_type == ZIP_DEFLATED):
data = self._decompressor.decompress(
self._unconsumed,
max(n - len_readbuffer, self.MIN_READ_SIZE)
)
self._unconsumed = self._decompressor.unconsumed_tail
eof = len(self._unconsumed) == 0 and self._compress_left == 0
if eof:
data += self._decompressor.flush()
self._update_crc(data, eof=eof)
self._readbuffer = self._readbuffer[self._offset:] + data
self._offset = 0
# Read from buffer.
data = self._readbuffer[self._offset: self._offset + n]
self._offset += len(data)
return data | [
"def",
"read1",
"(",
"self",
",",
"n",
")",
":",
"# Simplify algorithm (branching) by transforming negative n to large n.",
"if",
"n",
"<",
"0",
"or",
"n",
"is",
"None",
":",
"n",
"=",
"self",
".",
"MAX_N",
"# Bytes available in read buffer.",
"len_readbuffer",
"=",
"len",
"(",
"self",
".",
"_readbuffer",
")",
"-",
"self",
".",
"_offset",
"# Read from file.",
"if",
"self",
".",
"_compress_left",
">",
"0",
"and",
"n",
">",
"len_readbuffer",
"+",
"len",
"(",
"self",
".",
"_unconsumed",
")",
":",
"nbytes",
"=",
"n",
"-",
"len_readbuffer",
"-",
"len",
"(",
"self",
".",
"_unconsumed",
")",
"nbytes",
"=",
"max",
"(",
"nbytes",
",",
"self",
".",
"MIN_READ_SIZE",
")",
"nbytes",
"=",
"min",
"(",
"nbytes",
",",
"self",
".",
"_compress_left",
")",
"data",
"=",
"self",
".",
"_fileobj",
".",
"read",
"(",
"nbytes",
")",
"self",
".",
"_compress_left",
"-=",
"len",
"(",
"data",
")",
"if",
"data",
"and",
"self",
".",
"_decrypter",
"is",
"not",
"None",
":",
"data",
"=",
"b''",
".",
"join",
"(",
"bytes",
"(",
"bytearray",
"(",
"map",
"(",
"self",
".",
"_decrypter",
",",
"bytearray",
"(",
"data",
")",
")",
")",
")",
")",
"if",
"self",
".",
"_compress_type",
"==",
"ZIP_STORED",
":",
"self",
".",
"_update_crc",
"(",
"data",
",",
"eof",
"=",
"(",
"self",
".",
"_compress_left",
"==",
"0",
")",
")",
"self",
".",
"_readbuffer",
"=",
"self",
".",
"_readbuffer",
"[",
"self",
".",
"_offset",
":",
"]",
"+",
"data",
"self",
".",
"_offset",
"=",
"0",
"else",
":",
"# Prepare deflated bytes for decompression.",
"self",
".",
"_unconsumed",
"+=",
"data",
"# Handle unconsumed data.",
"if",
"(",
"len",
"(",
"self",
".",
"_unconsumed",
")",
">",
"0",
"and",
"n",
">",
"len_readbuffer",
"and",
"self",
".",
"_compress_type",
"==",
"ZIP_DEFLATED",
")",
":",
"data",
"=",
"self",
".",
"_decompressor",
".",
"decompress",
"(",
"self",
".",
"_unconsumed",
",",
"max",
"(",
"n",
"-",
"len_readbuffer",
",",
"self",
".",
"MIN_READ_SIZE",
")",
")",
"self",
".",
"_unconsumed",
"=",
"self",
".",
"_decompressor",
".",
"unconsumed_tail",
"eof",
"=",
"len",
"(",
"self",
".",
"_unconsumed",
")",
"==",
"0",
"and",
"self",
".",
"_compress_left",
"==",
"0",
"if",
"eof",
":",
"data",
"+=",
"self",
".",
"_decompressor",
".",
"flush",
"(",
")",
"self",
".",
"_update_crc",
"(",
"data",
",",
"eof",
"=",
"eof",
")",
"self",
".",
"_readbuffer",
"=",
"self",
".",
"_readbuffer",
"[",
"self",
".",
"_offset",
":",
"]",
"+",
"data",
"self",
".",
"_offset",
"=",
"0",
"# Read from buffer.",
"data",
"=",
"self",
".",
"_readbuffer",
"[",
"self",
".",
"_offset",
":",
"self",
".",
"_offset",
"+",
"n",
"]",
"self",
".",
"_offset",
"+=",
"len",
"(",
"data",
")",
"return",
"data"
] | https://github.com/debian-calibre/calibre/blob/020fc81d3936a64b2ac51459ecb796666ab6a051/src/calibre/utils/zipfile.py#L647-L697 |
|
frescobaldi/frescobaldi | 301cc977fc4ba7caa3df9e4bf905212ad5d06912 | frescobaldi_app/autocomplete/documentdata.py | python | DocumentDataSource.bookcommands | (self, cursor) | return listmodel.ListModel(sorted(set(itertools.chain(
completiondata.book,
harvest.include_identifiers(cursor),
harvest.names(cursor)))), display = util.command) | Stuff inside \\book { }. | Stuff inside \\book { }. | [
"Stuff",
"inside",
"\\\\",
"book",
"{",
"}",
"."
] | def bookcommands(self, cursor):
"""Stuff inside \\book { }. """
return listmodel.ListModel(sorted(set(itertools.chain(
completiondata.book,
harvest.include_identifiers(cursor),
harvest.names(cursor)))), display = util.command) | [
"def",
"bookcommands",
"(",
"self",
",",
"cursor",
")",
":",
"return",
"listmodel",
".",
"ListModel",
"(",
"sorted",
"(",
"set",
"(",
"itertools",
".",
"chain",
"(",
"completiondata",
".",
"book",
",",
"harvest",
".",
"include_identifiers",
"(",
"cursor",
")",
",",
"harvest",
".",
"names",
"(",
"cursor",
")",
")",
")",
")",
",",
"display",
"=",
"util",
".",
"command",
")"
] | https://github.com/frescobaldi/frescobaldi/blob/301cc977fc4ba7caa3df9e4bf905212ad5d06912/frescobaldi_app/autocomplete/documentdata.py#L88-L93 |
|
pyqt/examples | 843bb982917cecb2350b5f6d7f42c9b7fb142ec1 | src/pyqt-official/painting/transformations.py | python | RenderArea.paintEvent | (self, event) | [] | def paintEvent(self, event):
painter = QPainter(self)
painter.setRenderHint(QPainter.Antialiasing)
painter.fillRect(event.rect(), QBrush(Qt.white))
painter.translate(66, 66)
painter.save()
self.transformPainter(painter)
self.drawShape(painter)
painter.restore()
self.drawOutline(painter)
self.transformPainter(painter)
self.drawCoordinates(painter) | [
"def",
"paintEvent",
"(",
"self",
",",
"event",
")",
":",
"painter",
"=",
"QPainter",
"(",
"self",
")",
"painter",
".",
"setRenderHint",
"(",
"QPainter",
".",
"Antialiasing",
")",
"painter",
".",
"fillRect",
"(",
"event",
".",
"rect",
"(",
")",
",",
"QBrush",
"(",
"Qt",
".",
"white",
")",
")",
"painter",
".",
"translate",
"(",
"66",
",",
"66",
")",
"painter",
".",
"save",
"(",
")",
"self",
".",
"transformPainter",
"(",
"painter",
")",
"self",
".",
"drawShape",
"(",
"painter",
")",
"painter",
".",
"restore",
"(",
")",
"self",
".",
"drawOutline",
"(",
"painter",
")",
"self",
".",
"transformPainter",
"(",
"painter",
")",
"self",
".",
"drawCoordinates",
"(",
"painter",
")"
] | https://github.com/pyqt/examples/blob/843bb982917cecb2350b5f6d7f42c9b7fb142ec1/src/pyqt-official/painting/transformations.py#L80-L95 |
||||
scrapy/scrapy | b04cfa48328d5d5749dca6f50fa34e0cfc664c89 | scrapy/extensions/feedexport.py | python | FeedExporter.from_crawler | (cls, crawler) | return exporter | [] | def from_crawler(cls, crawler):
exporter = cls(crawler)
crawler.signals.connect(exporter.open_spider, signals.spider_opened)
crawler.signals.connect(exporter.close_spider, signals.spider_closed)
crawler.signals.connect(exporter.item_scraped, signals.item_scraped)
return exporter | [
"def",
"from_crawler",
"(",
"cls",
",",
"crawler",
")",
":",
"exporter",
"=",
"cls",
"(",
"crawler",
")",
"crawler",
".",
"signals",
".",
"connect",
"(",
"exporter",
".",
"open_spider",
",",
"signals",
".",
"spider_opened",
")",
"crawler",
".",
"signals",
".",
"connect",
"(",
"exporter",
".",
"close_spider",
",",
"signals",
".",
"spider_closed",
")",
"crawler",
".",
"signals",
".",
"connect",
"(",
"exporter",
".",
"item_scraped",
",",
"signals",
".",
"item_scraped",
")",
"return",
"exporter"
] | https://github.com/scrapy/scrapy/blob/b04cfa48328d5d5749dca6f50fa34e0cfc664c89/scrapy/extensions/feedexport.py#L288-L293 |
|||
pulp/pulp | a0a28d804f997b6f81c391378aff2e4c90183df9 | agent/pulp/agent/gofer/pulpplugin.py | python | Conduit.update_progress | (self, report) | Send the updated progress report.
:param report: A handler progress report.
:type report: object | Send the updated progress report.
:param report: A handler progress report.
:type report: object | [
"Send",
"the",
"updated",
"progress",
"report",
".",
":",
"param",
"report",
":",
"A",
"handler",
"progress",
"report",
".",
":",
"type",
"report",
":",
"object"
] | def update_progress(self, report):
"""
Send the updated progress report.
:param report: A handler progress report.
:type report: object
"""
context = Context.current()
context.progress.details = report
context.progress.report() | [
"def",
"update_progress",
"(",
"self",
",",
"report",
")",
":",
"context",
"=",
"Context",
".",
"current",
"(",
")",
"context",
".",
"progress",
".",
"details",
"=",
"report",
"context",
".",
"progress",
".",
"report",
"(",
")"
] | https://github.com/pulp/pulp/blob/a0a28d804f997b6f81c391378aff2e4c90183df9/agent/pulp/agent/gofer/pulpplugin.py#L317-L325 |
||
BigBrotherBot/big-brother-bot | 848823c71413c86e7f1ff9584f43e08d40a7f2c0 | b3/plugins/poweradminurt/iourt42.py | python | Poweradminurt42Plugin.cmd_pajump | (self, data, client, cmd=None) | Change game type to Jump
(You can safely use the command without the 'pa' at the beginning) | Change game type to Jump
(You can safely use the command without the 'pa' at the beginning) | [
"Change",
"game",
"type",
"to",
"Jump",
"(",
"You",
"can",
"safely",
"use",
"the",
"command",
"without",
"the",
"pa",
"at",
"the",
"beginning",
")"
] | def cmd_pajump(self, data, client, cmd=None):
"""
Change game type to Jump
(You can safely use the command without the 'pa' at the beginning)
"""
self.console.setCvar('g_gametype', '9')
if client:
client.message('^7game type changed to ^4Jump')
self.set_configmode('jump') | [
"def",
"cmd_pajump",
"(",
"self",
",",
"data",
",",
"client",
",",
"cmd",
"=",
"None",
")",
":",
"self",
".",
"console",
".",
"setCvar",
"(",
"'g_gametype'",
",",
"'9'",
")",
"if",
"client",
":",
"client",
".",
"message",
"(",
"'^7game type changed to ^4Jump'",
")",
"self",
".",
"set_configmode",
"(",
"'jump'",
")"
] | https://github.com/BigBrotherBot/big-brother-bot/blob/848823c71413c86e7f1ff9584f43e08d40a7f2c0/b3/plugins/poweradminurt/iourt42.py#L194-L202 |
||
microsoft/nni | 31f11f51249660930824e888af0d4e022823285c | nni/algorithms/nas/pytorch/proxylessnas/mutator.py | python | ProxylessNasMutator.arch_disable_grad | (self) | Disable gradient of architecture weights, i.e., does not
calcuate gradient for them. | Disable gradient of architecture weights, i.e., does not
calcuate gradient for them. | [
"Disable",
"gradient",
"of",
"architecture",
"weights",
"i",
".",
"e",
".",
"does",
"not",
"calcuate",
"gradient",
"for",
"them",
"."
] | def arch_disable_grad(self):
"""
Disable gradient of architecture weights, i.e., does not
calcuate gradient for them.
"""
for mutable in self.undedup_mutables:
mutable.registered_module.to_disable_grad() | [
"def",
"arch_disable_grad",
"(",
"self",
")",
":",
"for",
"mutable",
"in",
"self",
".",
"undedup_mutables",
":",
"mutable",
".",
"registered_module",
".",
"to_disable_grad",
"(",
")"
] | https://github.com/microsoft/nni/blob/31f11f51249660930824e888af0d4e022823285c/nni/algorithms/nas/pytorch/proxylessnas/mutator.py#L455-L461 |
||
tendenci/tendenci | 0f2c348cc0e7d41bc56f50b00ce05544b083bf1d | tendenci/apps/corporate_memberships/utils.py | python | validate_import_file | (file_path) | return all(requirements), missing_required_fields | Run import file against required fields
'name' and 'corporate_membership_type' are required fields | Run import file against required fields
'name' and 'corporate_membership_type' are required fields | [
"Run",
"import",
"file",
"against",
"required",
"fields",
"name",
"and",
"corporate_membership_type",
"are",
"required",
"fields"
] | def validate_import_file(file_path):
"""
Run import file against required fields
'name' and 'corporate_membership_type' are required fields
"""
normalize_newline(file_path)
data = csv.reader(default_storage.open(file_path, mode='rU'))
fields = next(data)
fields = [smart_str(field) for field in fields]
corp_memb_keys = [slugify(cm) for cm in fields]
required = ('name','corporate_membership_type')
requirements = [r in corp_memb_keys for r in required]
missing_required_fields = [r for r in required if r not in fields]
return all(requirements), missing_required_fields | [
"def",
"validate_import_file",
"(",
"file_path",
")",
":",
"normalize_newline",
"(",
"file_path",
")",
"data",
"=",
"csv",
".",
"reader",
"(",
"default_storage",
".",
"open",
"(",
"file_path",
",",
"mode",
"=",
"'rU'",
")",
")",
"fields",
"=",
"next",
"(",
"data",
")",
"fields",
"=",
"[",
"smart_str",
"(",
"field",
")",
"for",
"field",
"in",
"fields",
"]",
"corp_memb_keys",
"=",
"[",
"slugify",
"(",
"cm",
")",
"for",
"cm",
"in",
"fields",
"]",
"required",
"=",
"(",
"'name'",
",",
"'corporate_membership_type'",
")",
"requirements",
"=",
"[",
"r",
"in",
"corp_memb_keys",
"for",
"r",
"in",
"required",
"]",
"missing_required_fields",
"=",
"[",
"r",
"for",
"r",
"in",
"required",
"if",
"r",
"not",
"in",
"fields",
"]",
"return",
"all",
"(",
"requirements",
")",
",",
"missing_required_fields"
] | https://github.com/tendenci/tendenci/blob/0f2c348cc0e7d41bc56f50b00ce05544b083bf1d/tendenci/apps/corporate_memberships/utils.py#L480-L495 |
|
jgyates/genmon | 2cb2ed2945f55cd8c259b09ccfa9a51e23f1341e | gentemp.py | python | GenTemp.GetIDFromDeviceName | (self, device) | return "UNKNOWN_ID" | [] | def GetIDFromDeviceName(self, device):
try:
if "28-" in device or "3b-" in device:
id = device.split("/")[5]
return id
except Exception as e1:
self.LogErrorLine("Error in GetIDFromDeviceName for " + device + " : " + str(e1))
return "UNKNOWN_ID" | [
"def",
"GetIDFromDeviceName",
"(",
"self",
",",
"device",
")",
":",
"try",
":",
"if",
"\"28-\"",
"in",
"device",
"or",
"\"3b-\"",
"in",
"device",
":",
"id",
"=",
"device",
".",
"split",
"(",
"\"/\"",
")",
"[",
"5",
"]",
"return",
"id",
"except",
"Exception",
"as",
"e1",
":",
"self",
".",
"LogErrorLine",
"(",
"\"Error in GetIDFromDeviceName for \"",
"+",
"device",
"+",
"\" : \"",
"+",
"str",
"(",
"e1",
")",
")",
"return",
"\"UNKNOWN_ID\""
] | https://github.com/jgyates/genmon/blob/2cb2ed2945f55cd8c259b09ccfa9a51e23f1341e/gentemp.py#L203-L211 |
|||
modflowpy/flopy | eecd1ad193c5972093c9712e5c4b7a83284f0688 | flopy/mf6/data/mfdatascalar.py | python | MFScalar.add_one | (self) | Adds one if this is an integer scalar | Adds one if this is an integer scalar | [
"Adds",
"one",
"if",
"this",
"is",
"an",
"integer",
"scalar"
] | def add_one(self):
"""Adds one if this is an integer scalar"""
datum_type = self.structure.get_datum_type()
if datum_type == int or datum_type == np.int32:
if self._get_storage_obj().get_data() is None:
try:
self._get_storage_obj().set_data(1)
except Exception as ex:
type_, value_, traceback_ = sys.exc_info()
comment = "Could not set data to 1"
raise MFDataException(
self.structure.get_model(),
self.structure.get_package(),
self._path,
"setting data",
self.structure.name,
inspect.stack()[0][3],
type_,
value_,
traceback_,
comment,
self._simulation_data.debug,
ex,
)
else:
try:
current_val = self._get_storage_obj().get_data()
except Exception as ex:
type_, value_, traceback_ = sys.exc_info()
raise MFDataException(
self.structure.get_model(),
self.structure.get_package(),
self._path,
"getting data",
self.structure.name,
inspect.stack()[0][3],
type_,
value_,
traceback_,
None,
self._simulation_data.debug,
ex,
)
try:
self._get_storage_obj().set_data(current_val + 1)
except Exception as ex:
type_, value_, traceback_ = sys.exc_info()
comment = f'Could increment data "{current_val}" by one.'
raise MFDataException(
self.structure.get_model(),
self.structure.get_package(),
self._path,
"setting data",
self.structure.name,
inspect.stack()[0][3],
type_,
value_,
traceback_,
comment,
self._simulation_data.debug,
ex,
)
else:
message = (
"{} of type {} does not support add one "
"operation.".format(
self._data_name, self.structure.get_datum_type()
)
)
type_, value_, traceback_ = sys.exc_info()
raise MFDataException(
self.structure.get_model(),
self.structure.get_package(),
self._path,
"adding one to scalar",
self.structure.name,
inspect.stack()[0][3],
type_,
value_,
traceback_,
message,
self._simulation_data.debug,
) | [
"def",
"add_one",
"(",
"self",
")",
":",
"datum_type",
"=",
"self",
".",
"structure",
".",
"get_datum_type",
"(",
")",
"if",
"datum_type",
"==",
"int",
"or",
"datum_type",
"==",
"np",
".",
"int32",
":",
"if",
"self",
".",
"_get_storage_obj",
"(",
")",
".",
"get_data",
"(",
")",
"is",
"None",
":",
"try",
":",
"self",
".",
"_get_storage_obj",
"(",
")",
".",
"set_data",
"(",
"1",
")",
"except",
"Exception",
"as",
"ex",
":",
"type_",
",",
"value_",
",",
"traceback_",
"=",
"sys",
".",
"exc_info",
"(",
")",
"comment",
"=",
"\"Could not set data to 1\"",
"raise",
"MFDataException",
"(",
"self",
".",
"structure",
".",
"get_model",
"(",
")",
",",
"self",
".",
"structure",
".",
"get_package",
"(",
")",
",",
"self",
".",
"_path",
",",
"\"setting data\"",
",",
"self",
".",
"structure",
".",
"name",
",",
"inspect",
".",
"stack",
"(",
")",
"[",
"0",
"]",
"[",
"3",
"]",
",",
"type_",
",",
"value_",
",",
"traceback_",
",",
"comment",
",",
"self",
".",
"_simulation_data",
".",
"debug",
",",
"ex",
",",
")",
"else",
":",
"try",
":",
"current_val",
"=",
"self",
".",
"_get_storage_obj",
"(",
")",
".",
"get_data",
"(",
")",
"except",
"Exception",
"as",
"ex",
":",
"type_",
",",
"value_",
",",
"traceback_",
"=",
"sys",
".",
"exc_info",
"(",
")",
"raise",
"MFDataException",
"(",
"self",
".",
"structure",
".",
"get_model",
"(",
")",
",",
"self",
".",
"structure",
".",
"get_package",
"(",
")",
",",
"self",
".",
"_path",
",",
"\"getting data\"",
",",
"self",
".",
"structure",
".",
"name",
",",
"inspect",
".",
"stack",
"(",
")",
"[",
"0",
"]",
"[",
"3",
"]",
",",
"type_",
",",
"value_",
",",
"traceback_",
",",
"None",
",",
"self",
".",
"_simulation_data",
".",
"debug",
",",
"ex",
",",
")",
"try",
":",
"self",
".",
"_get_storage_obj",
"(",
")",
".",
"set_data",
"(",
"current_val",
"+",
"1",
")",
"except",
"Exception",
"as",
"ex",
":",
"type_",
",",
"value_",
",",
"traceback_",
"=",
"sys",
".",
"exc_info",
"(",
")",
"comment",
"=",
"f'Could increment data \"{current_val}\" by one.'",
"raise",
"MFDataException",
"(",
"self",
".",
"structure",
".",
"get_model",
"(",
")",
",",
"self",
".",
"structure",
".",
"get_package",
"(",
")",
",",
"self",
".",
"_path",
",",
"\"setting data\"",
",",
"self",
".",
"structure",
".",
"name",
",",
"inspect",
".",
"stack",
"(",
")",
"[",
"0",
"]",
"[",
"3",
"]",
",",
"type_",
",",
"value_",
",",
"traceback_",
",",
"comment",
",",
"self",
".",
"_simulation_data",
".",
"debug",
",",
"ex",
",",
")",
"else",
":",
"message",
"=",
"(",
"\"{} of type {} does not support add one \"",
"\"operation.\"",
".",
"format",
"(",
"self",
".",
"_data_name",
",",
"self",
".",
"structure",
".",
"get_datum_type",
"(",
")",
")",
")",
"type_",
",",
"value_",
",",
"traceback_",
"=",
"sys",
".",
"exc_info",
"(",
")",
"raise",
"MFDataException",
"(",
"self",
".",
"structure",
".",
"get_model",
"(",
")",
",",
"self",
".",
"structure",
".",
"get_package",
"(",
")",
",",
"self",
".",
"_path",
",",
"\"adding one to scalar\"",
",",
"self",
".",
"structure",
".",
"name",
",",
"inspect",
".",
"stack",
"(",
")",
"[",
"0",
"]",
"[",
"3",
"]",
",",
"type_",
",",
"value_",
",",
"traceback_",
",",
"message",
",",
"self",
".",
"_simulation_data",
".",
"debug",
",",
")"
] | https://github.com/modflowpy/flopy/blob/eecd1ad193c5972093c9712e5c4b7a83284f0688/flopy/mf6/data/mfdatascalar.py#L231-L313 |
||
jython/jython3 | def4f8ec47cb7a9c799ea4c745f12badf92c5769 | lib-python/3.5.1/asyncio/events.py | python | AbstractEventLoop.create_unix_server | (self, protocol_factory, path, *,
sock=None, backlog=100, ssl=None) | A coroutine which creates a UNIX Domain Socket server.
The return value is a Server object, which can be used to stop
the service.
path is a str, representing a file systsem path to bind the
server socket to.
sock can optionally be specified in order to use a preexisting
socket object.
backlog is the maximum number of queued connections passed to
listen() (defaults to 100).
ssl can be set to an SSLContext to enable SSL over the
accepted connections. | A coroutine which creates a UNIX Domain Socket server. | [
"A",
"coroutine",
"which",
"creates",
"a",
"UNIX",
"Domain",
"Socket",
"server",
"."
] | def create_unix_server(self, protocol_factory, path, *,
sock=None, backlog=100, ssl=None):
"""A coroutine which creates a UNIX Domain Socket server.
The return value is a Server object, which can be used to stop
the service.
path is a str, representing a file systsem path to bind the
server socket to.
sock can optionally be specified in order to use a preexisting
socket object.
backlog is the maximum number of queued connections passed to
listen() (defaults to 100).
ssl can be set to an SSLContext to enable SSL over the
accepted connections.
"""
raise NotImplementedError | [
"def",
"create_unix_server",
"(",
"self",
",",
"protocol_factory",
",",
"path",
",",
"*",
",",
"sock",
"=",
"None",
",",
"backlog",
"=",
"100",
",",
"ssl",
"=",
"None",
")",
":",
"raise",
"NotImplementedError"
] | https://github.com/jython/jython3/blob/def4f8ec47cb7a9c799ea4c745f12badf92c5769/lib-python/3.5.1/asyncio/events.py#L344-L363 |
||
Dan-in-CA/SIP | 7d08d807d7730bff2b5eaaa57e743665c8b143a6 | cheroot/server.py | python | HTTPServer.bind_addr | (self, value) | Set the interface on which to listen for connections. | Set the interface on which to listen for connections. | [
"Set",
"the",
"interface",
"on",
"which",
"to",
"listen",
"for",
"connections",
"."
] | def bind_addr(self, value):
"""Set the interface on which to listen for connections."""
if isinstance(value, tuple) and value[0] in ('', None):
# Despite the socket module docs, using '' does not
# allow AI_PASSIVE to work. Passing None instead
# returns '0.0.0.0' like we want. In other words:
# host AI_PASSIVE result
# '' Y 192.168.x.y
# '' N 192.168.x.y
# None Y 0.0.0.0
# None N 127.0.0.1
# But since you can get the same effect with an explicit
# '0.0.0.0', we deny both the empty string and None as values.
raise ValueError(
"Host values of '' or None are not allowed. "
"Use '0.0.0.0' (IPv4) or '::' (IPv6) instead "
'to listen on all active interfaces.',
)
self._bind_addr = value | [
"def",
"bind_addr",
"(",
"self",
",",
"value",
")",
":",
"if",
"isinstance",
"(",
"value",
",",
"tuple",
")",
"and",
"value",
"[",
"0",
"]",
"in",
"(",
"''",
",",
"None",
")",
":",
"# Despite the socket module docs, using '' does not",
"# allow AI_PASSIVE to work. Passing None instead",
"# returns '0.0.0.0' like we want. In other words:",
"# host AI_PASSIVE result",
"# '' Y 192.168.x.y",
"# '' N 192.168.x.y",
"# None Y 0.0.0.0",
"# None N 127.0.0.1",
"# But since you can get the same effect with an explicit",
"# '0.0.0.0', we deny both the empty string and None as values.",
"raise",
"ValueError",
"(",
"\"Host values of '' or None are not allowed. \"",
"\"Use '0.0.0.0' (IPv4) or '::' (IPv6) instead \"",
"'to listen on all active interfaces.'",
",",
")",
"self",
".",
"_bind_addr",
"=",
"value"
] | https://github.com/Dan-in-CA/SIP/blob/7d08d807d7730bff2b5eaaa57e743665c8b143a6/cheroot/server.py#L1688-L1706 |
||
openstack/zaqar | 1726ac41b5369cc30e99fd652f29f5300b95d958 | zaqar/transport/wsgi/utils.py | python | get_checked_field | (document, name, value_type, default_value) | Validates and retrieves a typed field from a document.
This function attempts to look up doc[name], and raises
appropriate HTTP errors if the field is missing or not an
instance of the given type.
:param document: dict-like object
:param name: field name
:param value_type: expected value type, or '*' to accept any type
:param default_value: Default value to use if the value is missing,
or None to make the value required.
:raises HTTPBadRequest: if the field is missing or not an
instance of value_type
:returns: value obtained from doc[name] | Validates and retrieves a typed field from a document. | [
"Validates",
"and",
"retrieves",
"a",
"typed",
"field",
"from",
"a",
"document",
"."
] | def get_checked_field(document, name, value_type, default_value):
"""Validates and retrieves a typed field from a document.
This function attempts to look up doc[name], and raises
appropriate HTTP errors if the field is missing or not an
instance of the given type.
:param document: dict-like object
:param name: field name
:param value_type: expected value type, or '*' to accept any type
:param default_value: Default value to use if the value is missing,
or None to make the value required.
:raises HTTPBadRequest: if the field is missing or not an
instance of value_type
:returns: value obtained from doc[name]
"""
try:
value = document[name]
except KeyError:
if default_value is not None:
value = default_value
else:
description = _(u'Missing "{name}" field.').format(name=name)
raise errors.HTTPBadRequestBody(description)
# PERF(kgriffs): We do our own little spec thing because it is way
# faster than jsonschema.
if value_type == '*' or isinstance(value, value_type):
return value
description = _(u'The value of the "{name}" field must be a {vtype}.')
description = description.format(name=name, vtype=value_type.__name__)
raise errors.HTTPBadRequestBody(description) | [
"def",
"get_checked_field",
"(",
"document",
",",
"name",
",",
"value_type",
",",
"default_value",
")",
":",
"try",
":",
"value",
"=",
"document",
"[",
"name",
"]",
"except",
"KeyError",
":",
"if",
"default_value",
"is",
"not",
"None",
":",
"value",
"=",
"default_value",
"else",
":",
"description",
"=",
"_",
"(",
"u'Missing \"{name}\" field.'",
")",
".",
"format",
"(",
"name",
"=",
"name",
")",
"raise",
"errors",
".",
"HTTPBadRequestBody",
"(",
"description",
")",
"# PERF(kgriffs): We do our own little spec thing because it is way",
"# faster than jsonschema.",
"if",
"value_type",
"==",
"'*'",
"or",
"isinstance",
"(",
"value",
",",
"value_type",
")",
":",
"return",
"value",
"description",
"=",
"_",
"(",
"u'The value of the \"{name}\" field must be a {vtype}.'",
")",
"description",
"=",
"description",
".",
"format",
"(",
"name",
"=",
"name",
",",
"vtype",
"=",
"value_type",
".",
"__name__",
")",
"raise",
"errors",
".",
"HTTPBadRequestBody",
"(",
"description",
")"
] | https://github.com/openstack/zaqar/blob/1726ac41b5369cc30e99fd652f29f5300b95d958/zaqar/transport/wsgi/utils.py#L147-L180 |
||
FederatedAI/FATE | 32540492623568ecd1afcb367360133616e02fa3 | python/federatedml/param/secure_add_example_param.py | python | SecureAddExampleParam.check | (self) | [] | def check(self):
if self.seed is not None and type(self.seed).__name__ != "int":
raise ValueError("random seed should be None or integers")
if type(self.partition).__name__ != "int" or self.partition < 1:
raise ValueError("partition should be an integer large than 0")
if type(self.data_num).__name__ != "int" or self.data_num < 1:
raise ValueError("data_num should be an integer large than 0") | [
"def",
"check",
"(",
"self",
")",
":",
"if",
"self",
".",
"seed",
"is",
"not",
"None",
"and",
"type",
"(",
"self",
".",
"seed",
")",
".",
"__name__",
"!=",
"\"int\"",
":",
"raise",
"ValueError",
"(",
"\"random seed should be None or integers\"",
")",
"if",
"type",
"(",
"self",
".",
"partition",
")",
".",
"__name__",
"!=",
"\"int\"",
"or",
"self",
".",
"partition",
"<",
"1",
":",
"raise",
"ValueError",
"(",
"\"partition should be an integer large than 0\"",
")",
"if",
"type",
"(",
"self",
".",
"data_num",
")",
".",
"__name__",
"!=",
"\"int\"",
"or",
"self",
".",
"data_num",
"<",
"1",
":",
"raise",
"ValueError",
"(",
"\"data_num should be an integer large than 0\"",
")"
] | https://github.com/FederatedAI/FATE/blob/32540492623568ecd1afcb367360133616e02fa3/python/federatedml/param/secure_add_example_param.py#L28-L36 |
||||
zhl2008/awd-platform | 0416b31abea29743387b10b3914581fbe8e7da5e | web_flaskbb/lib/python2.7/site-packages/whoosh/searching.py | python | Searcher.__exit__ | (self, *exc_info) | [] | def __exit__(self, *exc_info):
self.close() | [
"def",
"__exit__",
"(",
"self",
",",
"*",
"exc_info",
")",
":",
"self",
".",
"close",
"(",
")"
] | https://github.com/zhl2008/awd-platform/blob/0416b31abea29743387b10b3914581fbe8e7da5e/web_flaskbb/lib/python2.7/site-packages/whoosh/searching.py#L161-L162 |
||||
leo-editor/leo-editor | 383d6776d135ef17d73d935a2f0ecb3ac0e99494 | leo/commands/baseCommands.py | python | BaseEditCommandsClass.getWSString | (self, s) | return ''.join([ch if ch == '\t' else ' ' for ch in s]) | Return s with all characters replaced by tab or space. | Return s with all characters replaced by tab or space. | [
"Return",
"s",
"with",
"all",
"characters",
"replaced",
"by",
"tab",
"or",
"space",
"."
] | def getWSString(self, s):
"""Return s with all characters replaced by tab or space."""
return ''.join([ch if ch == '\t' else ' ' for ch in s]) | [
"def",
"getWSString",
"(",
"self",
",",
"s",
")",
":",
"return",
"''",
".",
"join",
"(",
"[",
"ch",
"if",
"ch",
"==",
"'\\t'",
"else",
"' '",
"for",
"ch",
"in",
"s",
"]",
")"
] | https://github.com/leo-editor/leo-editor/blob/383d6776d135ef17d73d935a2f0ecb3ac0e99494/leo/commands/baseCommands.py#L82-L84 |
|
TesterlifeRaymond/doraemon | d5cb6e34bd5f2aa97273ce0c0c9303e32beaa333 | venv/lib/python3.6/site-packages/requests/cookies.py | python | RequestsCookieJar.copy | (self) | return new_cj | Return a copy of this RequestsCookieJar. | Return a copy of this RequestsCookieJar. | [
"Return",
"a",
"copy",
"of",
"this",
"RequestsCookieJar",
"."
] | def copy(self):
"""Return a copy of this RequestsCookieJar."""
new_cj = RequestsCookieJar()
new_cj.update(self)
return new_cj | [
"def",
"copy",
"(",
"self",
")",
":",
"new_cj",
"=",
"RequestsCookieJar",
"(",
")",
"new_cj",
".",
"update",
"(",
"self",
")",
"return",
"new_cj"
] | https://github.com/TesterlifeRaymond/doraemon/blob/d5cb6e34bd5f2aa97273ce0c0c9303e32beaa333/venv/lib/python3.6/site-packages/requests/cookies.py#L415-L419 |
|
salabim/salabim | e0de846b042daf2dc71aaf43d8adc6486b57f376 | salabim.py | python | Environment.years | (self, t) | return t * 86400 * 365 * self._time_unit | convert the given time in years to the current time unit
Parameters
----------
t : float or distribution
time in years |n|
if distribution, the distribution is sampled
Returns
-------
time in years, converted to the current time_unit : float | convert the given time in years to the current time unit | [
"convert",
"the",
"given",
"time",
"in",
"years",
"to",
"the",
"current",
"time",
"unit"
] | def years(self, t):
"""
convert the given time in years to the current time unit
Parameters
----------
t : float or distribution
time in years |n|
if distribution, the distribution is sampled
Returns
-------
time in years, converted to the current time_unit : float
"""
self._check_time_unit_na()
if callable(t):
t = t()
return t * 86400 * 365 * self._time_unit | [
"def",
"years",
"(",
"self",
",",
"t",
")",
":",
"self",
".",
"_check_time_unit_na",
"(",
")",
"if",
"callable",
"(",
"t",
")",
":",
"t",
"=",
"t",
"(",
")",
"return",
"t",
"*",
"86400",
"*",
"365",
"*",
"self",
".",
"_time_unit"
] | https://github.com/salabim/salabim/blob/e0de846b042daf2dc71aaf43d8adc6486b57f376/salabim.py#L7797-L7814 |
|
beancount/beancount | cb3526a1af95b3b5be70347470c381b5a86055fe | beancount/scripts/example.py | python | date_random_seq | (date_begin, date_end, days_min, days_max) | Generate a sequence of dates with some random increase in days.
Args:
date_begin: The start date.
date_end: The end date.
days_min: The minimum number of days to advance on each iteration.
days_max: The maximum number of days to advance on each iteration.
Yields:
Instances of datetime.date. | Generate a sequence of dates with some random increase in days. | [
"Generate",
"a",
"sequence",
"of",
"dates",
"with",
"some",
"random",
"increase",
"in",
"days",
"."
] | def date_random_seq(date_begin, date_end, days_min, days_max):
"""Generate a sequence of dates with some random increase in days.
Args:
date_begin: The start date.
date_end: The end date.
days_min: The minimum number of days to advance on each iteration.
days_max: The maximum number of days to advance on each iteration.
Yields:
Instances of datetime.date.
"""
assert days_min > 0
assert days_min <= days_max
date = date_begin
while date < date_end:
nb_days_forward = random.randint(days_min, days_max)
date += datetime.timedelta(days=nb_days_forward)
if date >= date_end:
break
yield date | [
"def",
"date_random_seq",
"(",
"date_begin",
",",
"date_end",
",",
"days_min",
",",
"days_max",
")",
":",
"assert",
"days_min",
">",
"0",
"assert",
"days_min",
"<=",
"days_max",
"date",
"=",
"date_begin",
"while",
"date",
"<",
"date_end",
":",
"nb_days_forward",
"=",
"random",
".",
"randint",
"(",
"days_min",
",",
"days_max",
")",
"date",
"+=",
"datetime",
".",
"timedelta",
"(",
"days",
"=",
"nb_days_forward",
")",
"if",
"date",
">=",
"date_end",
":",
"break",
"yield",
"date"
] | https://github.com/beancount/beancount/blob/cb3526a1af95b3b5be70347470c381b5a86055fe/beancount/scripts/example.py#L210-L229 |
||
jborean93/pypsrp | 678df470648e9b82a1be82d6a08e056b2d3c413c | pypsrp/complex_objects.py | python | Array._build_array | (self, mae, mal) | return values | [] | def _build_array(self, mae, mal):
values = []
length = mal.pop(-1)
while True:
entry = []
for i in range(0, length):
entry.append(mae.pop(0))
values.append(entry)
if len(mae) == 0:
break
if len(mal) == 0:
values = values[0]
elif len(mal) > 1:
values = self._build_array(values, mal)
return values | [
"def",
"_build_array",
"(",
"self",
",",
"mae",
",",
"mal",
")",
":",
"values",
"=",
"[",
"]",
"length",
"=",
"mal",
".",
"pop",
"(",
"-",
"1",
")",
"while",
"True",
":",
"entry",
"=",
"[",
"]",
"for",
"i",
"in",
"range",
"(",
"0",
",",
"length",
")",
":",
"entry",
".",
"append",
"(",
"mae",
".",
"pop",
"(",
"0",
")",
")",
"values",
".",
"append",
"(",
"entry",
")",
"if",
"len",
"(",
"mae",
")",
"==",
"0",
":",
"break",
"if",
"len",
"(",
"mal",
")",
"==",
"0",
":",
"values",
"=",
"values",
"[",
"0",
"]",
"elif",
"len",
"(",
"mal",
")",
">",
"1",
":",
"values",
"=",
"self",
".",
"_build_array",
"(",
"values",
",",
"mal",
")",
"return",
"values"
] | https://github.com/jborean93/pypsrp/blob/678df470648e9b82a1be82d6a08e056b2d3c413c/pypsrp/complex_objects.py#L1614-L1631 |
|||
TencentCloud/tencentcloud-sdk-python | 3677fd1cdc8c5fd626ce001c13fd3b59d1f279d2 | tencentcloud/tcr/v20190924/models.py | python | PeerReplicationOption.__init__ | (self) | r"""
:param PeerRegistryUin: 待同步实例的uin
:type PeerRegistryUin: str
:param PeerRegistryToken: 待同步实例的访问永久Token
:type PeerRegistryToken: str
:param EnablePeerReplication: 是否开启跨主账号实例同步
:type EnablePeerReplication: bool | r"""
:param PeerRegistryUin: 待同步实例的uin
:type PeerRegistryUin: str
:param PeerRegistryToken: 待同步实例的访问永久Token
:type PeerRegistryToken: str
:param EnablePeerReplication: 是否开启跨主账号实例同步
:type EnablePeerReplication: bool | [
"r",
":",
"param",
"PeerRegistryUin",
":",
"待同步实例的uin",
":",
"type",
"PeerRegistryUin",
":",
"str",
":",
"param",
"PeerRegistryToken",
":",
"待同步实例的访问永久Token",
":",
"type",
"PeerRegistryToken",
":",
"str",
":",
"param",
"EnablePeerReplication",
":",
"是否开启跨主账号实例同步",
":",
"type",
"EnablePeerReplication",
":",
"bool"
] | def __init__(self):
r"""
:param PeerRegistryUin: 待同步实例的uin
:type PeerRegistryUin: str
:param PeerRegistryToken: 待同步实例的访问永久Token
:type PeerRegistryToken: str
:param EnablePeerReplication: 是否开启跨主账号实例同步
:type EnablePeerReplication: bool
"""
self.PeerRegistryUin = None
self.PeerRegistryToken = None
self.EnablePeerReplication = None | [
"def",
"__init__",
"(",
"self",
")",
":",
"self",
".",
"PeerRegistryUin",
"=",
"None",
"self",
".",
"PeerRegistryToken",
"=",
"None",
"self",
".",
"EnablePeerReplication",
"=",
"None"
] | https://github.com/TencentCloud/tencentcloud-sdk-python/blob/3677fd1cdc8c5fd626ce001c13fd3b59d1f279d2/tencentcloud/tcr/v20190924/models.py#L5388-L5399 |
||
zhu327/doge | 60991418a0cfedc5b65d1e20cb5c11ec741bd021 | doge/rpc/server.py | python | Server.load | (self, cls: Type) | 加载RPC methods类 | 加载RPC methods类 | [
"加载RPC",
"methods类"
] | def load(self, cls: Type) -> None:
"""加载RPC methods类"""
self.handler = DogeRPCServer(self.context, cls) | [
"def",
"load",
"(",
"self",
",",
"cls",
":",
"Type",
")",
"->",
"None",
":",
"self",
".",
"handler",
"=",
"DogeRPCServer",
"(",
"self",
".",
"context",
",",
"cls",
")"
] | https://github.com/zhu327/doge/blob/60991418a0cfedc5b65d1e20cb5c11ec741bd021/doge/rpc/server.py#L57-L59 |
||
openai/jukebox | 08efbbc1d4ed1a3cef96e08a931944c8b4d63bb3 | tensorboardX/tensorboardX/event_file_writer.py | python | EventsWriter.write_event | (self, event) | return self._write_serialized_event(event.SerializeToString()) | Append "event" to the file. | Append "event" to the file. | [
"Append",
"event",
"to",
"the",
"file",
"."
] | def write_event(self, event):
'''Append "event" to the file.'''
# Check if event is of type event_pb2.Event proto.
if not isinstance(event, event_pb2.Event):
raise TypeError("Expected an event_pb2.Event proto, "
" but got %s" % type(event))
return self._write_serialized_event(event.SerializeToString()) | [
"def",
"write_event",
"(",
"self",
",",
"event",
")",
":",
"# Check if event is of type event_pb2.Event proto.",
"if",
"not",
"isinstance",
"(",
"event",
",",
"event_pb2",
".",
"Event",
")",
":",
"raise",
"TypeError",
"(",
"\"Expected an event_pb2.Event proto, \"",
"\" but got %s\"",
"%",
"type",
"(",
"event",
")",
")",
"return",
"self",
".",
"_write_serialized_event",
"(",
"event",
".",
"SerializeToString",
"(",
")",
")"
] | https://github.com/openai/jukebox/blob/08efbbc1d4ed1a3cef96e08a931944c8b4d63bb3/tensorboardX/tensorboardX/event_file_writer.py#L51-L58 |
|
iGio90/Dwarf | bb3011cdffd209c7e3f5febe558053bf649ca69c | dwarf_debugger/ui/panels/panel_java_trace.py | python | JavaTracePanel.on_enumeration_start | (self) | [] | def on_enumeration_start(self):
self.class_list.clear() | [
"def",
"on_enumeration_start",
"(",
"self",
")",
":",
"self",
".",
"class_list",
".",
"clear",
"(",
")"
] | https://github.com/iGio90/Dwarf/blob/bb3011cdffd209c7e3f5febe558053bf649ca69c/dwarf_debugger/ui/panels/panel_java_trace.py#L333-L334 |
||||
numenta/nupic | b9ebedaf54f49a33de22d8d44dff7c765cdb5548 | src/nupic/encoders/base.py | python | Encoder.getDescription | (self) | **Must be overridden by subclasses.**
This returns a list of tuples, each containing (``name``, ``offset``).
The ``name`` is a string description of each sub-field, and ``offset`` is
the bit offset of the sub-field for that encoder.
For now, only the 'multi' and 'date' encoders have multiple (name, offset)
pairs. All other encoders have a single pair, where the offset is 0.
:return: list of tuples containing (name, offset) | **Must be overridden by subclasses.** | [
"**",
"Must",
"be",
"overridden",
"by",
"subclasses",
".",
"**"
] | def getDescription(self):
"""
**Must be overridden by subclasses.**
This returns a list of tuples, each containing (``name``, ``offset``).
The ``name`` is a string description of each sub-field, and ``offset`` is
the bit offset of the sub-field for that encoder.
For now, only the 'multi' and 'date' encoders have multiple (name, offset)
pairs. All other encoders have a single pair, where the offset is 0.
:return: list of tuples containing (name, offset)
"""
raise Exception("getDescription must be implemented by all subclasses") | [
"def",
"getDescription",
"(",
"self",
")",
":",
"raise",
"Exception",
"(",
"\"getDescription must be implemented by all subclasses\"",
")"
] | https://github.com/numenta/nupic/blob/b9ebedaf54f49a33de22d8d44dff7c765cdb5548/src/nupic/encoders/base.py#L381-L394 |
||
robotlearn/pyrobolearn | 9cd7c060723fda7d2779fa255ac998c2c82b8436 | pyrobolearn/tools/bridges/bridge.py | python | Bridge.interface | (self, interface) | Set the interface associated to the bridge. | Set the interface associated to the bridge. | [
"Set",
"the",
"interface",
"associated",
"to",
"the",
"bridge",
"."
] | def interface(self, interface):
"""Set the interface associated to the bridge."""
if not isinstance(interface, Interface):
raise TypeError("Expecting interface to be an instance of Interface, instead got {}".format(interface))
self._interface = interface | [
"def",
"interface",
"(",
"self",
",",
"interface",
")",
":",
"if",
"not",
"isinstance",
"(",
"interface",
",",
"Interface",
")",
":",
"raise",
"TypeError",
"(",
"\"Expecting interface to be an instance of Interface, instead got {}\"",
".",
"format",
"(",
"interface",
")",
")",
"self",
".",
"_interface",
"=",
"interface"
] | https://github.com/robotlearn/pyrobolearn/blob/9cd7c060723fda7d2779fa255ac998c2c82b8436/pyrobolearn/tools/bridges/bridge.py#L68-L72 |
||
AutodeskRoboticsLab/Mimic | 85447f0d346be66988303a6a054473d92f1ed6f4 | mimic/scripts/robotmath/transforms.py | python | vector_normalize | (v) | return v | Computes normalized vector
:param v: Vector
:return: | Computes normalized vector
:param v: Vector
:return: | [
"Computes",
"normalized",
"vector",
":",
"param",
"v",
":",
"Vector",
":",
"return",
":"
] | def vector_normalize(v):
"""
Computes normalized vector
:param v: Vector
:return:
"""
d = 0
r = range(len(v))
for i in r:
d += math.pow(v[i], 2)
d = math.sqrt(d)
for i in r:
v[i] /= d
return v | [
"def",
"vector_normalize",
"(",
"v",
")",
":",
"d",
"=",
"0",
"r",
"=",
"range",
"(",
"len",
"(",
"v",
")",
")",
"for",
"i",
"in",
"r",
":",
"d",
"+=",
"math",
".",
"pow",
"(",
"v",
"[",
"i",
"]",
",",
"2",
")",
"d",
"=",
"math",
".",
"sqrt",
"(",
"d",
")",
"for",
"i",
"in",
"r",
":",
"v",
"[",
"i",
"]",
"/=",
"d",
"return",
"v"
] | https://github.com/AutodeskRoboticsLab/Mimic/blob/85447f0d346be66988303a6a054473d92f1ed6f4/mimic/scripts/robotmath/transforms.py#L180-L193 |
|
huggingface/transformers | 623b4f7c63f60cce917677ee704d6c93ee960b4b | examples/research_projects/bertabs/run_summarization.py | python | format_summary | (translation) | return summary | Transforms the output of the `from_batch` function
into nicely formatted summaries. | Transforms the output of the `from_batch` function
into nicely formatted summaries. | [
"Transforms",
"the",
"output",
"of",
"the",
"from_batch",
"function",
"into",
"nicely",
"formatted",
"summaries",
"."
] | def format_summary(translation):
"""Transforms the output of the `from_batch` function
into nicely formatted summaries.
"""
raw_summary, _, _ = translation
summary = (
raw_summary.replace("[unused0]", "")
.replace("[unused3]", "")
.replace("[PAD]", "")
.replace("[unused1]", "")
.replace(r" +", " ")
.replace(" [unused2] ", ". ")
.replace("[unused2]", "")
.strip()
)
return summary | [
"def",
"format_summary",
"(",
"translation",
")",
":",
"raw_summary",
",",
"_",
",",
"_",
"=",
"translation",
"summary",
"=",
"(",
"raw_summary",
".",
"replace",
"(",
"\"[unused0]\"",
",",
"\"\"",
")",
".",
"replace",
"(",
"\"[unused3]\"",
",",
"\"\"",
")",
".",
"replace",
"(",
"\"[PAD]\"",
",",
"\"\"",
")",
".",
"replace",
"(",
"\"[unused1]\"",
",",
"\"\"",
")",
".",
"replace",
"(",
"r\" +\"",
",",
"\" \"",
")",
".",
"replace",
"(",
"\" [unused2] \"",
",",
"\". \"",
")",
".",
"replace",
"(",
"\"[unused2]\"",
",",
"\"\"",
")",
".",
"strip",
"(",
")",
")",
"return",
"summary"
] | https://github.com/huggingface/transformers/blob/623b4f7c63f60cce917677ee704d6c93ee960b4b/examples/research_projects/bertabs/run_summarization.py#L127-L143 |
|
TencentCloud/tencentcloud-sdk-python | 3677fd1cdc8c5fd626ce001c13fd3b59d1f279d2 | tencentcloud/gaap/v20180529/models.py | python | ModifyRealServerNameRequest.__init__ | (self) | r"""
:param RealServerName: 源站名称
:type RealServerName: str
:param RealServerId: 源站ID
:type RealServerId: str | r"""
:param RealServerName: 源站名称
:type RealServerName: str
:param RealServerId: 源站ID
:type RealServerId: str | [
"r",
":",
"param",
"RealServerName",
":",
"源站名称",
":",
"type",
"RealServerName",
":",
"str",
":",
"param",
"RealServerId",
":",
"源站ID",
":",
"type",
"RealServerId",
":",
"str"
] | def __init__(self):
r"""
:param RealServerName: 源站名称
:type RealServerName: str
:param RealServerId: 源站ID
:type RealServerId: str
"""
self.RealServerName = None
self.RealServerId = None | [
"def",
"__init__",
"(",
"self",
")",
":",
"self",
".",
"RealServerName",
"=",
"None",
"self",
".",
"RealServerId",
"=",
"None"
] | https://github.com/TencentCloud/tencentcloud-sdk-python/blob/3677fd1cdc8c5fd626ce001c13fd3b59d1f279d2/tencentcloud/gaap/v20180529/models.py#L5814-L5822 |
||
zhl2008/awd-platform | 0416b31abea29743387b10b3914581fbe8e7da5e | web_hxb2/lib/python3.5/site-packages/django/views/i18n.py | python | render_javascript_catalog | (catalog=None, plural=None) | return http.HttpResponse(template.render(context), 'text/javascript') | [] | def render_javascript_catalog(catalog=None, plural=None):
template = Engine().from_string(js_catalog_template)
def indent(s):
return s.replace('\n', '\n ')
context = Context({
'catalog_str': indent(json.dumps(
catalog, sort_keys=True, indent=2)) if catalog else None,
'formats_str': indent(json.dumps(
get_formats(), sort_keys=True, indent=2)),
'plural': plural,
})
return http.HttpResponse(template.render(context), 'text/javascript') | [
"def",
"render_javascript_catalog",
"(",
"catalog",
"=",
"None",
",",
"plural",
"=",
"None",
")",
":",
"template",
"=",
"Engine",
"(",
")",
".",
"from_string",
"(",
"js_catalog_template",
")",
"def",
"indent",
"(",
"s",
")",
":",
"return",
"s",
".",
"replace",
"(",
"'\\n'",
",",
"'\\n '",
")",
"context",
"=",
"Context",
"(",
"{",
"'catalog_str'",
":",
"indent",
"(",
"json",
".",
"dumps",
"(",
"catalog",
",",
"sort_keys",
"=",
"True",
",",
"indent",
"=",
"2",
")",
")",
"if",
"catalog",
"else",
"None",
",",
"'formats_str'",
":",
"indent",
"(",
"json",
".",
"dumps",
"(",
"get_formats",
"(",
")",
",",
"sort_keys",
"=",
"True",
",",
"indent",
"=",
"2",
")",
")",
",",
"'plural'",
":",
"plural",
",",
"}",
")",
"return",
"http",
".",
"HttpResponse",
"(",
"template",
".",
"render",
"(",
"context",
")",
",",
"'text/javascript'",
")"
] | https://github.com/zhl2008/awd-platform/blob/0416b31abea29743387b10b3914581fbe8e7da5e/web_hxb2/lib/python3.5/site-packages/django/views/i18n.py#L195-L209 |
|||
caiiiac/Machine-Learning-with-Python | 1a26c4467da41ca4ebc3d5bd789ea942ef79422f | MachineLearning/venv/lib/python3.5/site-packages/sklearn/datasets/lfw.py | python | _fetch_lfw_pairs | (index_file_path, data_folder_path, slice_=None,
color=False, resize=None) | return pairs, target, np.array(['Different persons', 'Same person']) | Perform the actual data loading for the LFW pairs dataset
This operation is meant to be cached by a joblib wrapper. | Perform the actual data loading for the LFW pairs dataset | [
"Perform",
"the",
"actual",
"data",
"loading",
"for",
"the",
"LFW",
"pairs",
"dataset"
] | def _fetch_lfw_pairs(index_file_path, data_folder_path, slice_=None,
color=False, resize=None):
"""Perform the actual data loading for the LFW pairs dataset
This operation is meant to be cached by a joblib wrapper.
"""
# parse the index file to find the number of pairs to be able to allocate
# the right amount of memory before starting to decode the jpeg files
with open(index_file_path, 'rb') as index_file:
split_lines = [ln.strip().split(b('\t')) for ln in index_file]
pair_specs = [sl for sl in split_lines if len(sl) > 2]
n_pairs = len(pair_specs)
# iterating over the metadata lines for each pair to find the filename to
# decode and load in memory
target = np.zeros(n_pairs, dtype=np.int)
file_paths = list()
for i, components in enumerate(pair_specs):
if len(components) == 3:
target[i] = 1
pair = (
(components[0], int(components[1]) - 1),
(components[0], int(components[2]) - 1),
)
elif len(components) == 4:
target[i] = 0
pair = (
(components[0], int(components[1]) - 1),
(components[2], int(components[3]) - 1),
)
else:
raise ValueError("invalid line %d: %r" % (i + 1, components))
for j, (name, idx) in enumerate(pair):
try:
person_folder = join(data_folder_path, name)
except TypeError:
person_folder = join(data_folder_path, str(name, 'UTF-8'))
filenames = list(sorted(listdir(person_folder)))
file_path = join(person_folder, filenames[idx])
file_paths.append(file_path)
pairs = _load_imgs(file_paths, slice_, color, resize)
shape = list(pairs.shape)
n_faces = shape.pop(0)
shape.insert(0, 2)
shape.insert(0, n_faces // 2)
pairs.shape = shape
return pairs, target, np.array(['Different persons', 'Same person']) | [
"def",
"_fetch_lfw_pairs",
"(",
"index_file_path",
",",
"data_folder_path",
",",
"slice_",
"=",
"None",
",",
"color",
"=",
"False",
",",
"resize",
"=",
"None",
")",
":",
"# parse the index file to find the number of pairs to be able to allocate",
"# the right amount of memory before starting to decode the jpeg files",
"with",
"open",
"(",
"index_file_path",
",",
"'rb'",
")",
"as",
"index_file",
":",
"split_lines",
"=",
"[",
"ln",
".",
"strip",
"(",
")",
".",
"split",
"(",
"b",
"(",
"'\\t'",
")",
")",
"for",
"ln",
"in",
"index_file",
"]",
"pair_specs",
"=",
"[",
"sl",
"for",
"sl",
"in",
"split_lines",
"if",
"len",
"(",
"sl",
")",
">",
"2",
"]",
"n_pairs",
"=",
"len",
"(",
"pair_specs",
")",
"# iterating over the metadata lines for each pair to find the filename to",
"# decode and load in memory",
"target",
"=",
"np",
".",
"zeros",
"(",
"n_pairs",
",",
"dtype",
"=",
"np",
".",
"int",
")",
"file_paths",
"=",
"list",
"(",
")",
"for",
"i",
",",
"components",
"in",
"enumerate",
"(",
"pair_specs",
")",
":",
"if",
"len",
"(",
"components",
")",
"==",
"3",
":",
"target",
"[",
"i",
"]",
"=",
"1",
"pair",
"=",
"(",
"(",
"components",
"[",
"0",
"]",
",",
"int",
"(",
"components",
"[",
"1",
"]",
")",
"-",
"1",
")",
",",
"(",
"components",
"[",
"0",
"]",
",",
"int",
"(",
"components",
"[",
"2",
"]",
")",
"-",
"1",
")",
",",
")",
"elif",
"len",
"(",
"components",
")",
"==",
"4",
":",
"target",
"[",
"i",
"]",
"=",
"0",
"pair",
"=",
"(",
"(",
"components",
"[",
"0",
"]",
",",
"int",
"(",
"components",
"[",
"1",
"]",
")",
"-",
"1",
")",
",",
"(",
"components",
"[",
"2",
"]",
",",
"int",
"(",
"components",
"[",
"3",
"]",
")",
"-",
"1",
")",
",",
")",
"else",
":",
"raise",
"ValueError",
"(",
"\"invalid line %d: %r\"",
"%",
"(",
"i",
"+",
"1",
",",
"components",
")",
")",
"for",
"j",
",",
"(",
"name",
",",
"idx",
")",
"in",
"enumerate",
"(",
"pair",
")",
":",
"try",
":",
"person_folder",
"=",
"join",
"(",
"data_folder_path",
",",
"name",
")",
"except",
"TypeError",
":",
"person_folder",
"=",
"join",
"(",
"data_folder_path",
",",
"str",
"(",
"name",
",",
"'UTF-8'",
")",
")",
"filenames",
"=",
"list",
"(",
"sorted",
"(",
"listdir",
"(",
"person_folder",
")",
")",
")",
"file_path",
"=",
"join",
"(",
"person_folder",
",",
"filenames",
"[",
"idx",
"]",
")",
"file_paths",
".",
"append",
"(",
"file_path",
")",
"pairs",
"=",
"_load_imgs",
"(",
"file_paths",
",",
"slice_",
",",
"color",
",",
"resize",
")",
"shape",
"=",
"list",
"(",
"pairs",
".",
"shape",
")",
"n_faces",
"=",
"shape",
".",
"pop",
"(",
"0",
")",
"shape",
".",
"insert",
"(",
"0",
",",
"2",
")",
"shape",
".",
"insert",
"(",
"0",
",",
"n_faces",
"//",
"2",
")",
"pairs",
".",
"shape",
"=",
"shape",
"return",
"pairs",
",",
"target",
",",
"np",
".",
"array",
"(",
"[",
"'Different persons'",
",",
"'Same person'",
"]",
")"
] | https://github.com/caiiiac/Machine-Learning-with-Python/blob/1a26c4467da41ca4ebc3d5bd789ea942ef79422f/MachineLearning/venv/lib/python3.5/site-packages/sklearn/datasets/lfw.py#L328-L376 |
|
pgq/skytools-legacy | 8b7e6c118572a605d28b7a3403c96aeecfd0d272 | python/londiste/playback.py | python | TableState.forget | (self) | Reset all info. | Reset all info. | [
"Reset",
"all",
"info",
"."
] | def forget(self):
"""Reset all info."""
self.state = TABLE_MISSING
self.last_snapshot_tick = None
self.str_snapshot = None
self.from_snapshot = None
self.sync_tick_id = None
self.ok_batch_count = 0
self.last_tick = 0
self.table_attrs = {}
self.changed = 1
self.plugin = None
self.copy_pos = 0
self.max_parallel_copy = MAX_PARALLEL_COPY | [
"def",
"forget",
"(",
"self",
")",
":",
"self",
".",
"state",
"=",
"TABLE_MISSING",
"self",
".",
"last_snapshot_tick",
"=",
"None",
"self",
".",
"str_snapshot",
"=",
"None",
"self",
".",
"from_snapshot",
"=",
"None",
"self",
".",
"sync_tick_id",
"=",
"None",
"self",
".",
"ok_batch_count",
"=",
"0",
"self",
".",
"last_tick",
"=",
"0",
"self",
".",
"table_attrs",
"=",
"{",
"}",
"self",
".",
"changed",
"=",
"1",
"self",
".",
"plugin",
"=",
"None",
"self",
".",
"copy_pos",
"=",
"0",
"self",
".",
"max_parallel_copy",
"=",
"MAX_PARALLEL_COPY"
] | https://github.com/pgq/skytools-legacy/blob/8b7e6c118572a605d28b7a3403c96aeecfd0d272/python/londiste/playback.py#L86-L99 |
||
adamchainz/django-cors-headers | 57a5bdfef2223a2f6f973bcbae261c3055136707 | src/corsheaders/middleware.py | python | CorsMiddleware._https_referer_replace | (self, request: HttpRequest) | When https is enabled, django CSRF checking includes referer checking
which breaks when using CORS. This function updates the HTTP_REFERER
header to make sure it matches HTTP_HOST, provided that our cors logic
succeeds | When https is enabled, django CSRF checking includes referer checking
which breaks when using CORS. This function updates the HTTP_REFERER
header to make sure it matches HTTP_HOST, provided that our cors logic
succeeds | [
"When",
"https",
"is",
"enabled",
"django",
"CSRF",
"checking",
"includes",
"referer",
"checking",
"which",
"breaks",
"when",
"using",
"CORS",
".",
"This",
"function",
"updates",
"the",
"HTTP_REFERER",
"header",
"to",
"make",
"sure",
"it",
"matches",
"HTTP_HOST",
"provided",
"that",
"our",
"cors",
"logic",
"succeeds"
] | def _https_referer_replace(self, request: HttpRequest) -> None:
"""
When https is enabled, django CSRF checking includes referer checking
which breaks when using CORS. This function updates the HTTP_REFERER
header to make sure it matches HTTP_HOST, provided that our cors logic
succeeds
"""
origin = request.META.get("HTTP_ORIGIN")
if (
request.is_secure()
and origin
and "ORIGINAL_HTTP_REFERER" not in request.META
):
url = urlparse(origin)
if (
not conf.CORS_ALLOW_ALL_ORIGINS
and not self.origin_found_in_white_lists(origin, url)
):
return
try:
http_referer = request.META["HTTP_REFERER"]
http_host = "https://%s/" % request.META["HTTP_HOST"]
request.META = request.META.copy()
request.META["ORIGINAL_HTTP_REFERER"] = http_referer
request.META["HTTP_REFERER"] = http_host
except KeyError:
pass | [
"def",
"_https_referer_replace",
"(",
"self",
",",
"request",
":",
"HttpRequest",
")",
"->",
"None",
":",
"origin",
"=",
"request",
".",
"META",
".",
"get",
"(",
"\"HTTP_ORIGIN\"",
")",
"if",
"(",
"request",
".",
"is_secure",
"(",
")",
"and",
"origin",
"and",
"\"ORIGINAL_HTTP_REFERER\"",
"not",
"in",
"request",
".",
"META",
")",
":",
"url",
"=",
"urlparse",
"(",
"origin",
")",
"if",
"(",
"not",
"conf",
".",
"CORS_ALLOW_ALL_ORIGINS",
"and",
"not",
"self",
".",
"origin_found_in_white_lists",
"(",
"origin",
",",
"url",
")",
")",
":",
"return",
"try",
":",
"http_referer",
"=",
"request",
".",
"META",
"[",
"\"HTTP_REFERER\"",
"]",
"http_host",
"=",
"\"https://%s/\"",
"%",
"request",
".",
"META",
"[",
"\"HTTP_HOST\"",
"]",
"request",
".",
"META",
"=",
"request",
".",
"META",
".",
"copy",
"(",
")",
"request",
".",
"META",
"[",
"\"ORIGINAL_HTTP_REFERER\"",
"]",
"=",
"http_referer",
"request",
".",
"META",
"[",
"\"HTTP_REFERER\"",
"]",
"=",
"http_host",
"except",
"KeyError",
":",
"pass"
] | https://github.com/adamchainz/django-cors-headers/blob/57a5bdfef2223a2f6f973bcbae261c3055136707/src/corsheaders/middleware.py#L47-L76 |
||
zhl2008/awd-platform | 0416b31abea29743387b10b3914581fbe8e7da5e | web_hxb2/lib/python3.5/site-packages/django/contrib/contenttypes/fields.py | python | GenericRelation.get_content_type | (self) | return ContentType.objects.get_for_model(self.model,
for_concrete_model=self.for_concrete_model) | Return the content type associated with this field's model. | Return the content type associated with this field's model. | [
"Return",
"the",
"content",
"type",
"associated",
"with",
"this",
"field",
"s",
"model",
"."
] | def get_content_type(self):
"""
Return the content type associated with this field's model.
"""
return ContentType.objects.get_for_model(self.model,
for_concrete_model=self.for_concrete_model) | [
"def",
"get_content_type",
"(",
"self",
")",
":",
"return",
"ContentType",
".",
"objects",
".",
"get_for_model",
"(",
"self",
".",
"model",
",",
"for_concrete_model",
"=",
"self",
".",
"for_concrete_model",
")"
] | https://github.com/zhl2008/awd-platform/blob/0416b31abea29743387b10b3914581fbe8e7da5e/web_hxb2/lib/python3.5/site-packages/django/contrib/contenttypes/fields.py#L430-L435 |
|
securesystemslab/zippy | ff0e84ac99442c2c55fe1d285332cfd4e185e089 | zippy/benchmarks/src/benchmarks/sympy/sympy/core/containers.py | python | Tuple.tuple_count | (self, value) | return self.args.count(value) | T.count(value) -> integer -- return number of occurrences of value | T.count(value) -> integer -- return number of occurrences of value | [
"T",
".",
"count",
"(",
"value",
")",
"-",
">",
"integer",
"--",
"return",
"number",
"of",
"occurrences",
"of",
"value"
] | def tuple_count(self, value):
"""T.count(value) -> integer -- return number of occurrences of value"""
return self.args.count(value) | [
"def",
"tuple_count",
"(",
"self",
",",
"value",
")",
":",
"return",
"self",
".",
"args",
".",
"count",
"(",
"value",
")"
] | https://github.com/securesystemslab/zippy/blob/ff0e84ac99442c2c55fe1d285332cfd4e185e089/zippy/benchmarks/src/benchmarks/sympy/sympy/core/containers.py#L104-L106 |
|
Komodo/KomodoEdit | 61edab75dce2bdb03943b387b0608ea36f548e8e | src/codeintel/play/sample.py | python | func_no_args | () | func_no_args doc | func_no_args doc | [
"func_no_args",
"doc"
] | def func_no_args():
"func_no_args doc"
pass | [
"def",
"func_no_args",
"(",
")",
":",
"pass"
] | https://github.com/Komodo/KomodoEdit/blob/61edab75dce2bdb03943b387b0608ea36f548e8e/src/codeintel/play/sample.py#L14-L16 |
||
duo-labs/py_webauthn | fe97b9841328aa84559bd2a282c07d20145845c1 | webauthn/helpers/bytes_to_base64url.py | python | bytes_to_base64url | (val: bytes) | return urlsafe_b64encode(val).decode("utf-8").replace("=", "") | Base64URL-encode the provided bytes | Base64URL-encode the provided bytes | [
"Base64URL",
"-",
"encode",
"the",
"provided",
"bytes"
] | def bytes_to_base64url(val: bytes) -> str:
"""
Base64URL-encode the provided bytes
"""
return urlsafe_b64encode(val).decode("utf-8").replace("=", "") | [
"def",
"bytes_to_base64url",
"(",
"val",
":",
"bytes",
")",
"->",
"str",
":",
"return",
"urlsafe_b64encode",
"(",
"val",
")",
".",
"decode",
"(",
"\"utf-8\"",
")",
".",
"replace",
"(",
"\"=\"",
",",
"\"\"",
")"
] | https://github.com/duo-labs/py_webauthn/blob/fe97b9841328aa84559bd2a282c07d20145845c1/webauthn/helpers/bytes_to_base64url.py#L4-L8 |
|
jimmy201602/webterminal | d0bfe4808408c87f13b1b199b30b462d194ccd89 | elfinder/volumes/filesystem.py | python | ElfinderVolumeLocalFileSystem._join_path | (self, path1, path2) | return os.path.join(path1, path2) | Join two paths and return full path. If the latter path is
absolute, return it.
See :func:`elfinder.volumes.base.ElfinderVolumeDriver._join_path`. | Join two paths and return full path. If the latter path is
absolute, return it. | [
"Join",
"two",
"paths",
"and",
"return",
"full",
"path",
".",
"If",
"the",
"latter",
"path",
"is",
"absolute",
"return",
"it",
"."
] | def _join_path(self, path1, path2):
"""
Join two paths and return full path. If the latter path is
absolute, return it.
See :func:`elfinder.volumes.base.ElfinderVolumeDriver._join_path`.
"""
return os.path.join(path1, path2) | [
"def",
"_join_path",
"(",
"self",
",",
"path1",
",",
"path2",
")",
":",
"return",
"os",
".",
"path",
".",
"join",
"(",
"path1",
",",
"path2",
")"
] | https://github.com/jimmy201602/webterminal/blob/d0bfe4808408c87f13b1b199b30b462d194ccd89/elfinder/volumes/filesystem.py#L102-L110 |
|
Nekmo/amazon-dash | ac2b2f98282ec08036e1671fe937dfda381a911f | amazon_dash/execute.py | python | ExecuteIFTTT.get_url | (self) | return url | IFTTT Webhook url
:return: url
:rtype: str | IFTTT Webhook url | [
"IFTTT",
"Webhook",
"url"
] | def get_url(self):
"""IFTTT Webhook url
:return: url
:rtype: str
"""
if not self.data[self.execute_name]:
raise InvalidConfig(extra_body='Value for IFTTT is required on {} device. Get your key here: '
'https://ifttt.com/services/maker_webhooks/settings'.format(self.name))
if not self.data.get('event'):
raise InvalidConfig(extra_body='Event option is required for IFTTT on {} device. '
'You define the event name when creating a Webhook '
'applet'.format(self.name))
url = self.url_pattern.format(event=self.data['event'], key=self.data[self.execute_name])
return url | [
"def",
"get_url",
"(",
"self",
")",
":",
"if",
"not",
"self",
".",
"data",
"[",
"self",
".",
"execute_name",
"]",
":",
"raise",
"InvalidConfig",
"(",
"extra_body",
"=",
"'Value for IFTTT is required on {} device. Get your key here: '",
"'https://ifttt.com/services/maker_webhooks/settings'",
".",
"format",
"(",
"self",
".",
"name",
")",
")",
"if",
"not",
"self",
".",
"data",
".",
"get",
"(",
"'event'",
")",
":",
"raise",
"InvalidConfig",
"(",
"extra_body",
"=",
"'Event option is required for IFTTT on {} device. '",
"'You define the event name when creating a Webhook '",
"'applet'",
".",
"format",
"(",
"self",
".",
"name",
")",
")",
"url",
"=",
"self",
".",
"url_pattern",
".",
"format",
"(",
"event",
"=",
"self",
".",
"data",
"[",
"'event'",
"]",
",",
"key",
"=",
"self",
".",
"data",
"[",
"self",
".",
"execute_name",
"]",
")",
"return",
"url"
] | https://github.com/Nekmo/amazon-dash/blob/ac2b2f98282ec08036e1671fe937dfda381a911f/amazon_dash/execute.py#L392-L406 |
|
inasafe/inasafe | 355eb2ce63f516b9c26af0c86a24f99e53f63f87 | safe/report/impact_report.py | python | QgsLayoutContext.save_as_raster | (self) | return self._save_as_raster | Boolean that indicates the composition will be saved as Raster.
:rtype: bool | Boolean that indicates the composition will be saved as Raster. | [
"Boolean",
"that",
"indicates",
"the",
"composition",
"will",
"be",
"saved",
"as",
"Raster",
"."
] | def save_as_raster(self):
"""Boolean that indicates the composition will be saved as Raster.
:rtype: bool
"""
return self._save_as_raster | [
"def",
"save_as_raster",
"(",
"self",
")",
":",
"return",
"self",
".",
"_save_as_raster"
] | https://github.com/inasafe/inasafe/blob/355eb2ce63f516b9c26af0c86a24f99e53f63f87/safe/report/impact_report.py#L233-L238 |
|
eegsynth/eegsynth | bb2b88be4c5758e23c3c12d0ac34c5b98896df3b | module/outputzeromq/outputzeromq.py | python | _stop | () | Stop and clean up on SystemExit, KeyboardInterrupt | Stop and clean up on SystemExit, KeyboardInterrupt | [
"Stop",
"and",
"clean",
"up",
"on",
"SystemExit",
"KeyboardInterrupt"
] | def _stop():
'''Stop and clean up on SystemExit, KeyboardInterrupt
'''
global monitor, trigger, r, context
monitor.success('Closing threads')
for thread in trigger:
thread.stop()
r.publish('OUTPUTZEROMQ_UNBLOCK', 1)
for thread in trigger:
thread.join()
context.destroy()
sys.exit() | [
"def",
"_stop",
"(",
")",
":",
"global",
"monitor",
",",
"trigger",
",",
"r",
",",
"context",
"monitor",
".",
"success",
"(",
"'Closing threads'",
")",
"for",
"thread",
"in",
"trigger",
":",
"thread",
".",
"stop",
"(",
")",
"r",
".",
"publish",
"(",
"'OUTPUTZEROMQ_UNBLOCK'",
",",
"1",
")",
"for",
"thread",
"in",
"trigger",
":",
"thread",
".",
"join",
"(",
")",
"context",
".",
"destroy",
"(",
")",
"sys",
".",
"exit",
"(",
")"
] | https://github.com/eegsynth/eegsynth/blob/bb2b88be4c5758e23c3c12d0ac34c5b98896df3b/module/outputzeromq/outputzeromq.py#L191-L202 |
||
cloudera/hue | 23f02102d4547c17c32bd5ea0eb24e9eadd657a4 | desktop/core/ext-py/openpyxl-2.6.4/openpyxl/worksheet/merge.py | python | MergedCellRange._get_borders | (self) | If the upper left cell of the merged cell does not yet exist, it is
created.
The upper left cell gets the border information of the bottom and right
border from the bottom right cell of the merged cell, if available. | If the upper left cell of the merged cell does not yet exist, it is
created.
The upper left cell gets the border information of the bottom and right
border from the bottom right cell of the merged cell, if available. | [
"If",
"the",
"upper",
"left",
"cell",
"of",
"the",
"merged",
"cell",
"does",
"not",
"yet",
"exist",
"it",
"is",
"created",
".",
"The",
"upper",
"left",
"cell",
"gets",
"the",
"border",
"information",
"of",
"the",
"bottom",
"and",
"right",
"border",
"from",
"the",
"bottom",
"right",
"cell",
"of",
"the",
"merged",
"cell",
"if",
"available",
"."
] | def _get_borders(self):
"""
If the upper left cell of the merged cell does not yet exist, it is
created.
The upper left cell gets the border information of the bottom and right
border from the bottom right cell of the merged cell, if available.
"""
# Top-left cell.
self.start_cell = self.ws._cells.get((self.min_row, self.min_col))
if self.start_cell is None:
self.start_cell = self.ws.cell(row=self.min_row, column=self.min_col)
# Bottom-right cell
end_cell = self.ws._cells.get((self.max_row, self.max_col))
if end_cell is not None:
self.start_cell.border += Border(right=end_cell.border.right,
bottom=end_cell.border.bottom) | [
"def",
"_get_borders",
"(",
"self",
")",
":",
"# Top-left cell.",
"self",
".",
"start_cell",
"=",
"self",
".",
"ws",
".",
"_cells",
".",
"get",
"(",
"(",
"self",
".",
"min_row",
",",
"self",
".",
"min_col",
")",
")",
"if",
"self",
".",
"start_cell",
"is",
"None",
":",
"self",
".",
"start_cell",
"=",
"self",
".",
"ws",
".",
"cell",
"(",
"row",
"=",
"self",
".",
"min_row",
",",
"column",
"=",
"self",
".",
"min_col",
")",
"# Bottom-right cell",
"end_cell",
"=",
"self",
".",
"ws",
".",
"_cells",
".",
"get",
"(",
"(",
"self",
".",
"max_row",
",",
"self",
".",
"max_col",
")",
")",
"if",
"end_cell",
"is",
"not",
"None",
":",
"self",
".",
"start_cell",
".",
"border",
"+=",
"Border",
"(",
"right",
"=",
"end_cell",
".",
"border",
".",
"right",
",",
"bottom",
"=",
"end_cell",
".",
"border",
".",
"bottom",
")"
] | https://github.com/cloudera/hue/blob/23f02102d4547c17c32bd5ea0eb24e9eadd657a4/desktop/core/ext-py/openpyxl-2.6.4/openpyxl/worksheet/merge.py#L72-L89 |
||
and3rson/clay | c271cecf6b6ea6465abcdd2444171b1a565a60a3 | clay/vlc.py | python | MediaList.index_of_item | (self, p_md) | return libvlc_media_list_index_of_item(self, p_md) | Find index position of List media instance in media list.
Warning: the function will return the first matched position.
The L{lock} should be held upon entering this function.
@param p_md: media instance.
@return: position of media instance or -1 if media not found. | Find index position of List media instance in media list.
Warning: the function will return the first matched position.
The L{lock} should be held upon entering this function. | [
"Find",
"index",
"position",
"of",
"List",
"media",
"instance",
"in",
"media",
"list",
".",
"Warning",
":",
"the",
"function",
"will",
"return",
"the",
"first",
"matched",
"position",
".",
"The",
"L",
"{",
"lock",
"}",
"should",
"be",
"held",
"upon",
"entering",
"this",
"function",
"."
] | def index_of_item(self, p_md):
'''Find index position of List media instance in media list.
Warning: the function will return the first matched position.
The L{lock} should be held upon entering this function.
@param p_md: media instance.
@return: position of media instance or -1 if media not found.
'''
return libvlc_media_list_index_of_item(self, p_md) | [
"def",
"index_of_item",
"(",
"self",
",",
"p_md",
")",
":",
"return",
"libvlc_media_list_index_of_item",
"(",
"self",
",",
"p_md",
")"
] | https://github.com/and3rson/clay/blob/c271cecf6b6ea6465abcdd2444171b1a565a60a3/clay/vlc.py#L2498-L2505 |
|
conjure-up/conjure-up | d2bf8ab8e71ff01321d0e691a8d3e3833a047678 | conjureup/models/step.py | python | StepModel.get_state | (self, key, phase=None) | return app.state.get(key) or '' | Return the state data value for the given key, namespaced by the
spell, step, and optionally phase. | Return the state data value for the given key, namespaced by the
spell, step, and optionally phase. | [
"Return",
"the",
"state",
"data",
"value",
"for",
"the",
"given",
"key",
"namespaced",
"by",
"the",
"spell",
"step",
"and",
"optionally",
"phase",
"."
] | def get_state(self, key, phase=None):
"""
Return the state data value for the given key, namespaced by the
spell, step, and optionally phase.
"""
if phase is None:
key = "conjure-up.{}.{}.{}".format(app.config['spell'],
self.name,
key)
else:
key = "conjure-up.{}.{}.{}.{}".format(app.config['spell'],
self.name,
phase.value,
key)
return app.state.get(key) or '' | [
"def",
"get_state",
"(",
"self",
",",
"key",
",",
"phase",
"=",
"None",
")",
":",
"if",
"phase",
"is",
"None",
":",
"key",
"=",
"\"conjure-up.{}.{}.{}\"",
".",
"format",
"(",
"app",
".",
"config",
"[",
"'spell'",
"]",
",",
"self",
".",
"name",
",",
"key",
")",
"else",
":",
"key",
"=",
"\"conjure-up.{}.{}.{}.{}\"",
".",
"format",
"(",
"app",
".",
"config",
"[",
"'spell'",
"]",
",",
"self",
".",
"name",
",",
"phase",
".",
"value",
",",
"key",
")",
"return",
"app",
".",
"state",
".",
"get",
"(",
"key",
")",
"or",
"''"
] | https://github.com/conjure-up/conjure-up/blob/d2bf8ab8e71ff01321d0e691a8d3e3833a047678/conjureup/models/step.py#L139-L153 |
|
Blockstream/satellite | ceb46a00e176c43a6b4170359f6948663a0616bb | blocksatcli/ip.py | python | _add_to_interfaces_d | (ifname, addr, netmask) | Create configuration file at /etc/network/interfaces.d/ | Create configuration file at /etc/network/interfaces.d/ | [
"Create",
"configuration",
"file",
"at",
"/",
"etc",
"/",
"network",
"/",
"interfaces",
".",
"d",
"/"
] | def _add_to_interfaces_d(ifname, addr, netmask):
"""Create configuration file at /etc/network/interfaces.d/"""
if_dir = "/etc/network/interfaces.d/"
cfg = ("iface {0} inet static\n"
" address {1}\n"
" netmask {2}\n").format(ifname, addr, netmask)
fname = ifname + ".conf"
path = os.path.join(if_dir, fname)
if (runner.dry):
util.fill_print("Create a file named {} at {} and add the "
"following to it:".format(fname, if_dir))
print(cfg)
return
runner.create_file(cfg, path, root=True) | [
"def",
"_add_to_interfaces_d",
"(",
"ifname",
",",
"addr",
",",
"netmask",
")",
":",
"if_dir",
"=",
"\"/etc/network/interfaces.d/\"",
"cfg",
"=",
"(",
"\"iface {0} inet static\\n\"",
"\" address {1}\\n\"",
"\" netmask {2}\\n\"",
")",
".",
"format",
"(",
"ifname",
",",
"addr",
",",
"netmask",
")",
"fname",
"=",
"ifname",
"+",
"\".conf\"",
"path",
"=",
"os",
".",
"path",
".",
"join",
"(",
"if_dir",
",",
"fname",
")",
"if",
"(",
"runner",
".",
"dry",
")",
":",
"util",
".",
"fill_print",
"(",
"\"Create a file named {} at {} and add the \"",
"\"following to it:\"",
".",
"format",
"(",
"fname",
",",
"if_dir",
")",
")",
"print",
"(",
"cfg",
")",
"return",
"runner",
".",
"create_file",
"(",
"cfg",
",",
"path",
",",
"root",
"=",
"True",
")"
] | https://github.com/Blockstream/satellite/blob/ceb46a00e176c43a6b4170359f6948663a0616bb/blocksatcli/ip.py#L80-L95 |
||
inspirehep/magpie | 29dac813f3f1aed6820a667d08c7d5eb7b5051c7 | magpie/main.py | python | Magpie.fit_scaler | (self, train_dir) | return self.scaler | Fit a scaler on given data. Word vectors must be trained already.
:param train_dir: directory with '.txt' files
:return: fitted scaler object | Fit a scaler on given data. Word vectors must be trained already.
:param train_dir: directory with '.txt' files | [
"Fit",
"a",
"scaler",
"on",
"given",
"data",
".",
"Word",
"vectors",
"must",
"be",
"trained",
"already",
".",
":",
"param",
"train_dir",
":",
"directory",
"with",
".",
"txt",
"files"
] | def fit_scaler(self, train_dir):
"""
Fit a scaler on given data. Word vectors must be trained already.
:param train_dir: directory with '.txt' files
:return: fitted scaler object
"""
if not self.word2vec_model:
raise ValueError('word2vec model is not trained. ' + \
'Run train_word2vec() first.')
if self.scaler:
print('WARNING! Overwriting already fitted scaler.',
file=sys.stderr)
self.scaler = fit_scaler(train_dir, word2vec_model=self.word2vec_model)
return self.scaler | [
"def",
"fit_scaler",
"(",
"self",
",",
"train_dir",
")",
":",
"if",
"not",
"self",
".",
"word2vec_model",
":",
"raise",
"ValueError",
"(",
"'word2vec model is not trained. '",
"+",
"'Run train_word2vec() first.'",
")",
"if",
"self",
".",
"scaler",
":",
"print",
"(",
"'WARNING! Overwriting already fitted scaler.'",
",",
"file",
"=",
"sys",
".",
"stderr",
")",
"self",
".",
"scaler",
"=",
"fit_scaler",
"(",
"train_dir",
",",
"word2vec_model",
"=",
"self",
".",
"word2vec_model",
")",
"return",
"self",
".",
"scaler"
] | https://github.com/inspirehep/magpie/blob/29dac813f3f1aed6820a667d08c7d5eb7b5051c7/magpie/main.py#L256-L273 |
|
tp4a/teleport | 1fafd34f1f775d2cf80ea4af6e44468d8e0b24ad | server/www/packages/packages-windows/x86/ldap3/protocol/rfc2849.py | python | add_attributes | (attributes, all_base64) | return lines | [] | def add_attributes(attributes, all_base64):
lines = []
oc_attr = None
# objectclass first, even if this is not specified in the RFC
for attr in attributes:
if attr.lower() == 'objectclass':
for val in attributes[attr]:
lines.append(_convert_to_ldif(attr, val, all_base64))
oc_attr = attr
break
# remaining attributes
for attr in attributes:
if attr != oc_attr and attr in attributes:
for val in attributes[attr]:
lines.append(_convert_to_ldif(attr, val, all_base64))
return lines | [
"def",
"add_attributes",
"(",
"attributes",
",",
"all_base64",
")",
":",
"lines",
"=",
"[",
"]",
"oc_attr",
"=",
"None",
"# objectclass first, even if this is not specified in the RFC",
"for",
"attr",
"in",
"attributes",
":",
"if",
"attr",
".",
"lower",
"(",
")",
"==",
"'objectclass'",
":",
"for",
"val",
"in",
"attributes",
"[",
"attr",
"]",
":",
"lines",
".",
"append",
"(",
"_convert_to_ldif",
"(",
"attr",
",",
"val",
",",
"all_base64",
")",
")",
"oc_attr",
"=",
"attr",
"break",
"# remaining attributes",
"for",
"attr",
"in",
"attributes",
":",
"if",
"attr",
"!=",
"oc_attr",
"and",
"attr",
"in",
"attributes",
":",
"for",
"val",
"in",
"attributes",
"[",
"attr",
"]",
":",
"lines",
".",
"append",
"(",
"_convert_to_ldif",
"(",
"attr",
",",
"val",
",",
"all_base64",
")",
")",
"return",
"lines"
] | https://github.com/tp4a/teleport/blob/1fafd34f1f775d2cf80ea4af6e44468d8e0b24ad/server/www/packages/packages-windows/x86/ldap3/protocol/rfc2849.py#L98-L115 |
|||
tensorflow/models | 6b8bb0cbeb3e10415c7a87448f08adc3c484c1d3 | official/vision/beta/projects/yolo/dataloaders/classification_input.py | python | Parser._parse_train_image | (self, decoded_tensors) | return image | Parses image data for training. | Parses image data for training. | [
"Parses",
"image",
"data",
"for",
"training",
"."
] | def _parse_train_image(self, decoded_tensors):
"""Parses image data for training."""
image_bytes = decoded_tensors[self._image_field_key]
if self._decode_jpeg_only:
image_shape = tf.image.extract_jpeg_shape(image_bytes)
# Crops image.
cropped_image = preprocess_ops.random_crop_image_v2(
image_bytes, image_shape)
image = tf.cond(
tf.reduce_all(tf.equal(tf.shape(cropped_image), image_shape)),
lambda: preprocess_ops.center_crop_image_v2(image_bytes, image_shape),
lambda: cropped_image)
else:
# Decodes image.
image = tf.io.decode_image(image_bytes, channels=3)
image.set_shape([None, None, 3])
# Crops image.
cropped_image = preprocess_ops.random_crop_image(image)
image = tf.cond(
tf.reduce_all(tf.equal(tf.shape(cropped_image), tf.shape(image))),
lambda: preprocess_ops.center_crop_image(image),
lambda: cropped_image)
if self._aug_rand_hflip:
image = tf.image.random_flip_left_right(image)
# Resizes image.
image = tf.image.resize(
image, self._output_size, method=tf.image.ResizeMethod.BILINEAR)
image.set_shape([self._output_size[0], self._output_size[1], 3])
# Apply autoaug or randaug.
if self._augmenter is not None:
image = self._augmenter.distort(image)
# Convert image to self._dtype.
image = tf.image.convert_image_dtype(image, self._dtype)
image = image / 255.0
return image | [
"def",
"_parse_train_image",
"(",
"self",
",",
"decoded_tensors",
")",
":",
"image_bytes",
"=",
"decoded_tensors",
"[",
"self",
".",
"_image_field_key",
"]",
"if",
"self",
".",
"_decode_jpeg_only",
":",
"image_shape",
"=",
"tf",
".",
"image",
".",
"extract_jpeg_shape",
"(",
"image_bytes",
")",
"# Crops image.",
"cropped_image",
"=",
"preprocess_ops",
".",
"random_crop_image_v2",
"(",
"image_bytes",
",",
"image_shape",
")",
"image",
"=",
"tf",
".",
"cond",
"(",
"tf",
".",
"reduce_all",
"(",
"tf",
".",
"equal",
"(",
"tf",
".",
"shape",
"(",
"cropped_image",
")",
",",
"image_shape",
")",
")",
",",
"lambda",
":",
"preprocess_ops",
".",
"center_crop_image_v2",
"(",
"image_bytes",
",",
"image_shape",
")",
",",
"lambda",
":",
"cropped_image",
")",
"else",
":",
"# Decodes image.",
"image",
"=",
"tf",
".",
"io",
".",
"decode_image",
"(",
"image_bytes",
",",
"channels",
"=",
"3",
")",
"image",
".",
"set_shape",
"(",
"[",
"None",
",",
"None",
",",
"3",
"]",
")",
"# Crops image.",
"cropped_image",
"=",
"preprocess_ops",
".",
"random_crop_image",
"(",
"image",
")",
"image",
"=",
"tf",
".",
"cond",
"(",
"tf",
".",
"reduce_all",
"(",
"tf",
".",
"equal",
"(",
"tf",
".",
"shape",
"(",
"cropped_image",
")",
",",
"tf",
".",
"shape",
"(",
"image",
")",
")",
")",
",",
"lambda",
":",
"preprocess_ops",
".",
"center_crop_image",
"(",
"image",
")",
",",
"lambda",
":",
"cropped_image",
")",
"if",
"self",
".",
"_aug_rand_hflip",
":",
"image",
"=",
"tf",
".",
"image",
".",
"random_flip_left_right",
"(",
"image",
")",
"# Resizes image.",
"image",
"=",
"tf",
".",
"image",
".",
"resize",
"(",
"image",
",",
"self",
".",
"_output_size",
",",
"method",
"=",
"tf",
".",
"image",
".",
"ResizeMethod",
".",
"BILINEAR",
")",
"image",
".",
"set_shape",
"(",
"[",
"self",
".",
"_output_size",
"[",
"0",
"]",
",",
"self",
".",
"_output_size",
"[",
"1",
"]",
",",
"3",
"]",
")",
"# Apply autoaug or randaug.",
"if",
"self",
".",
"_augmenter",
"is",
"not",
"None",
":",
"image",
"=",
"self",
".",
"_augmenter",
".",
"distort",
"(",
"image",
")",
"# Convert image to self._dtype.",
"image",
"=",
"tf",
".",
"image",
".",
"convert_image_dtype",
"(",
"image",
",",
"self",
".",
"_dtype",
")",
"image",
"=",
"image",
"/",
"255.0",
"return",
"image"
] | https://github.com/tensorflow/models/blob/6b8bb0cbeb3e10415c7a87448f08adc3c484c1d3/official/vision/beta/projects/yolo/dataloaders/classification_input.py#L24-L66 |
|
Qirky/FoxDot | 76318f9630bede48ff3994146ed644affa27bfa4 | FoxDot/lib/Effects/NewEffects.py | python | _Effect.add | (self, *args, **kwargs) | return | [] | def add(self, *args, **kwargs):
lines = ["{} = {}".format(key, value) for key, value in kwargs.items()]
self.lines.extend(lines)
return | [
"def",
"add",
"(",
"self",
",",
"*",
"args",
",",
"*",
"*",
"kwargs",
")",
":",
"lines",
"=",
"[",
"\"{} = {}\"",
".",
"format",
"(",
"key",
",",
"value",
")",
"for",
"key",
",",
"value",
"in",
"kwargs",
".",
"items",
"(",
")",
"]",
"self",
".",
"lines",
".",
"extend",
"(",
"lines",
")",
"return"
] | https://github.com/Qirky/FoxDot/blob/76318f9630bede48ff3994146ed644affa27bfa4/FoxDot/lib/Effects/NewEffects.py#L40-L43 |
|||
DataDog/integrations-core | 934674b29d94b70ccc008f76ea172d0cdae05e1e | kubelet/datadog_checks/kubelet/prometheus.py | python | CadvisorPrometheusScraperMixin.container_network_transmit_packets_dropped_total | (self, metric, scraper_config) | [] | def container_network_transmit_packets_dropped_total(self, metric, scraper_config):
metric_name = scraper_config['namespace'] + '.network.tx_dropped'
labels = ['interface']
self._process_pod_rate(metric_name, metric, scraper_config, labels=labels) | [
"def",
"container_network_transmit_packets_dropped_total",
"(",
"self",
",",
"metric",
",",
"scraper_config",
")",
":",
"metric_name",
"=",
"scraper_config",
"[",
"'namespace'",
"]",
"+",
"'.network.tx_dropped'",
"labels",
"=",
"[",
"'interface'",
"]",
"self",
".",
"_process_pod_rate",
"(",
"metric_name",
",",
"metric",
",",
"scraper_config",
",",
"labels",
"=",
"labels",
")"
] | https://github.com/DataDog/integrations-core/blob/934674b29d94b70ccc008f76ea172d0cdae05e1e/kubelet/datadog_checks/kubelet/prometheus.py#L518-L521 |
||||
python-cmd2/cmd2 | c1f6114d52161a3b8a32d3cee1c495d79052e1fb | cmd2/cmd2.py | python | Cmd._generate_transcript | (self, history: Union[List[HistoryItem], List[str]], transcript_file: str) | Generate a transcript file from a given history of commands | Generate a transcript file from a given history of commands | [
"Generate",
"a",
"transcript",
"file",
"from",
"a",
"given",
"history",
"of",
"commands"
] | def _generate_transcript(self, history: Union[List[HistoryItem], List[str]], transcript_file: str) -> None:
"""Generate a transcript file from a given history of commands"""
self.last_result = False
# Validate the transcript file path to make sure directory exists and write access is available
transcript_path = os.path.abspath(os.path.expanduser(transcript_file))
transcript_dir = os.path.dirname(transcript_path)
if not os.path.isdir(transcript_dir) or not os.access(transcript_dir, os.W_OK):
self.perror(f"'{transcript_dir}' is not a directory or you don't have write access")
return
commands_run = 0
try:
with self.sigint_protection:
# Disable echo while we manually redirect stdout to a StringIO buffer
saved_echo = self.echo
saved_stdout = self.stdout
self.echo = False
# The problem with supporting regular expressions in transcripts
# is that they shouldn't be processed in the command, just the output.
# In addition, when we generate a transcript, any slashes in the output
# are not really intended to indicate regular expressions, so they should
# be escaped.
#
# We have to jump through some hoops here in order to catch the commands
# separately from the output and escape the slashes in the output.
transcript = ''
for history_item in history:
# build the command, complete with prompts. When we replay
# the transcript, we look for the prompts to separate
# the command from the output
first = True
command = ''
if isinstance(history_item, HistoryItem):
history_item = history_item.raw
for line in history_item.splitlines():
if first:
command += f"{self.prompt}{line}\n"
first = False
else:
command += f"{self.continuation_prompt}{line}\n"
transcript += command
# Use a StdSim object to capture output
stdsim = utils.StdSim(cast(TextIO, self.stdout))
self.stdout = cast(TextIO, stdsim)
# then run the command and let the output go into our buffer
try:
stop = self.onecmd_plus_hooks(history_item, raise_keyboard_interrupt=True)
except KeyboardInterrupt as ex:
self.perror(ex)
stop = True
commands_run += 1
# add the regex-escaped output to the transcript
transcript += stdsim.getvalue().replace('/', r'\/')
# check if we are supposed to stop
if stop:
break
finally:
with self.sigint_protection:
# Restore altered attributes to their original state
self.echo = saved_echo
self.stdout = cast(TextIO, saved_stdout)
# Check if all commands ran
if commands_run < len(history):
self.pwarning(f"Command {commands_run} triggered a stop and ended transcript generation early")
# finally, we can write the transcript out to the file
try:
with open(transcript_path, 'w') as fout:
fout.write(transcript)
except OSError as ex:
self.perror(f"Error saving transcript file '{transcript_path}': {ex}")
else:
# and let the user know what we did
if commands_run == 1:
plural = 'command and its output'
else:
plural = 'commands and their outputs'
self.pfeedback(f"{commands_run} {plural} saved to transcript file '{transcript_path}'")
self.last_result = True | [
"def",
"_generate_transcript",
"(",
"self",
",",
"history",
":",
"Union",
"[",
"List",
"[",
"HistoryItem",
"]",
",",
"List",
"[",
"str",
"]",
"]",
",",
"transcript_file",
":",
"str",
")",
"->",
"None",
":",
"self",
".",
"last_result",
"=",
"False",
"# Validate the transcript file path to make sure directory exists and write access is available",
"transcript_path",
"=",
"os",
".",
"path",
".",
"abspath",
"(",
"os",
".",
"path",
".",
"expanduser",
"(",
"transcript_file",
")",
")",
"transcript_dir",
"=",
"os",
".",
"path",
".",
"dirname",
"(",
"transcript_path",
")",
"if",
"not",
"os",
".",
"path",
".",
"isdir",
"(",
"transcript_dir",
")",
"or",
"not",
"os",
".",
"access",
"(",
"transcript_dir",
",",
"os",
".",
"W_OK",
")",
":",
"self",
".",
"perror",
"(",
"f\"'{transcript_dir}' is not a directory or you don't have write access\"",
")",
"return",
"commands_run",
"=",
"0",
"try",
":",
"with",
"self",
".",
"sigint_protection",
":",
"# Disable echo while we manually redirect stdout to a StringIO buffer",
"saved_echo",
"=",
"self",
".",
"echo",
"saved_stdout",
"=",
"self",
".",
"stdout",
"self",
".",
"echo",
"=",
"False",
"# The problem with supporting regular expressions in transcripts",
"# is that they shouldn't be processed in the command, just the output.",
"# In addition, when we generate a transcript, any slashes in the output",
"# are not really intended to indicate regular expressions, so they should",
"# be escaped.",
"#",
"# We have to jump through some hoops here in order to catch the commands",
"# separately from the output and escape the slashes in the output.",
"transcript",
"=",
"''",
"for",
"history_item",
"in",
"history",
":",
"# build the command, complete with prompts. When we replay",
"# the transcript, we look for the prompts to separate",
"# the command from the output",
"first",
"=",
"True",
"command",
"=",
"''",
"if",
"isinstance",
"(",
"history_item",
",",
"HistoryItem",
")",
":",
"history_item",
"=",
"history_item",
".",
"raw",
"for",
"line",
"in",
"history_item",
".",
"splitlines",
"(",
")",
":",
"if",
"first",
":",
"command",
"+=",
"f\"{self.prompt}{line}\\n\"",
"first",
"=",
"False",
"else",
":",
"command",
"+=",
"f\"{self.continuation_prompt}{line}\\n\"",
"transcript",
"+=",
"command",
"# Use a StdSim object to capture output",
"stdsim",
"=",
"utils",
".",
"StdSim",
"(",
"cast",
"(",
"TextIO",
",",
"self",
".",
"stdout",
")",
")",
"self",
".",
"stdout",
"=",
"cast",
"(",
"TextIO",
",",
"stdsim",
")",
"# then run the command and let the output go into our buffer",
"try",
":",
"stop",
"=",
"self",
".",
"onecmd_plus_hooks",
"(",
"history_item",
",",
"raise_keyboard_interrupt",
"=",
"True",
")",
"except",
"KeyboardInterrupt",
"as",
"ex",
":",
"self",
".",
"perror",
"(",
"ex",
")",
"stop",
"=",
"True",
"commands_run",
"+=",
"1",
"# add the regex-escaped output to the transcript",
"transcript",
"+=",
"stdsim",
".",
"getvalue",
"(",
")",
".",
"replace",
"(",
"'/'",
",",
"r'\\/'",
")",
"# check if we are supposed to stop",
"if",
"stop",
":",
"break",
"finally",
":",
"with",
"self",
".",
"sigint_protection",
":",
"# Restore altered attributes to their original state",
"self",
".",
"echo",
"=",
"saved_echo",
"self",
".",
"stdout",
"=",
"cast",
"(",
"TextIO",
",",
"saved_stdout",
")",
"# Check if all commands ran",
"if",
"commands_run",
"<",
"len",
"(",
"history",
")",
":",
"self",
".",
"pwarning",
"(",
"f\"Command {commands_run} triggered a stop and ended transcript generation early\"",
")",
"# finally, we can write the transcript out to the file",
"try",
":",
"with",
"open",
"(",
"transcript_path",
",",
"'w'",
")",
"as",
"fout",
":",
"fout",
".",
"write",
"(",
"transcript",
")",
"except",
"OSError",
"as",
"ex",
":",
"self",
".",
"perror",
"(",
"f\"Error saving transcript file '{transcript_path}': {ex}\"",
")",
"else",
":",
"# and let the user know what we did",
"if",
"commands_run",
"==",
"1",
":",
"plural",
"=",
"'command and its output'",
"else",
":",
"plural",
"=",
"'commands and their outputs'",
"self",
".",
"pfeedback",
"(",
"f\"{commands_run} {plural} saved to transcript file '{transcript_path}'\"",
")",
"self",
".",
"last_result",
"=",
"True"
] | https://github.com/python-cmd2/cmd2/blob/c1f6114d52161a3b8a32d3cee1c495d79052e1fb/cmd2/cmd2.py#L4719-L4805 |
||
CGCookie/retopoflow | 3d8b3a47d1d661f99ab0aeb21d31370bf15de35e | addon_common/common/bezier.py | python | compute_quadratic_weights | (t) | return (t1**2, 2*t0*t1, t0**2) | [] | def compute_quadratic_weights(t):
t0, t1 = t, (1-t)
return (t1**2, 2*t0*t1, t0**2) | [
"def",
"compute_quadratic_weights",
"(",
"t",
")",
":",
"t0",
",",
"t1",
"=",
"t",
",",
"(",
"1",
"-",
"t",
")",
"return",
"(",
"t1",
"**",
"2",
",",
"2",
"*",
"t0",
"*",
"t1",
",",
"t0",
"**",
"2",
")"
] | https://github.com/CGCookie/retopoflow/blob/3d8b3a47d1d661f99ab0aeb21d31370bf15de35e/addon_common/common/bezier.py#L31-L33 |
|||
pillone/usntssearch | 24b5e5bc4b6af2589d95121c4d523dc58cb34273 | NZBmegasearch/mechanize/_headersutil.py | python | split_header_words | (header_values) | return result | r"""Parse header values into a list of lists containing key,value pairs.
The function knows how to deal with ",", ";" and "=" as well as quoted
values after "=". A list of space separated tokens are parsed as if they
were separated by ";".
If the header_values passed as argument contains multiple values, then they
are treated as if they were a single value separated by comma ",".
This means that this function is useful for parsing header fields that
follow this syntax (BNF as from the HTTP/1.1 specification, but we relax
the requirement for tokens).
headers = #header
header = (token | parameter) *( [";"] (token | parameter))
token = 1*<any CHAR except CTLs or separators>
separators = "(" | ")" | "<" | ">" | "@"
| "," | ";" | ":" | "\" | <">
| "/" | "[" | "]" | "?" | "="
| "{" | "}" | SP | HT
quoted-string = ( <"> *(qdtext | quoted-pair ) <"> )
qdtext = <any TEXT except <">>
quoted-pair = "\" CHAR
parameter = attribute "=" value
attribute = token
value = token | quoted-string
Each header is represented by a list of key/value pairs. The value for a
simple token (not part of a parameter) is None. Syntactically incorrect
headers will not necessarily be parsed as you would want.
This is easier to describe with some examples:
>>> split_header_words(['foo="bar"; port="80,81"; discard, bar=baz'])
[[('foo', 'bar'), ('port', '80,81'), ('discard', None)], [('bar', 'baz')]]
>>> split_header_words(['text/html; charset="iso-8859-1"'])
[[('text/html', None), ('charset', 'iso-8859-1')]]
>>> split_header_words([r'Basic realm="\"foo\bar\""'])
[[('Basic', None), ('realm', '"foobar"')]] | r"""Parse header values into a list of lists containing key,value pairs. | [
"r",
"Parse",
"header",
"values",
"into",
"a",
"list",
"of",
"lists",
"containing",
"key",
"value",
"pairs",
"."
] | def split_header_words(header_values):
r"""Parse header values into a list of lists containing key,value pairs.
The function knows how to deal with ",", ";" and "=" as well as quoted
values after "=". A list of space separated tokens are parsed as if they
were separated by ";".
If the header_values passed as argument contains multiple values, then they
are treated as if they were a single value separated by comma ",".
This means that this function is useful for parsing header fields that
follow this syntax (BNF as from the HTTP/1.1 specification, but we relax
the requirement for tokens).
headers = #header
header = (token | parameter) *( [";"] (token | parameter))
token = 1*<any CHAR except CTLs or separators>
separators = "(" | ")" | "<" | ">" | "@"
| "," | ";" | ":" | "\" | <">
| "/" | "[" | "]" | "?" | "="
| "{" | "}" | SP | HT
quoted-string = ( <"> *(qdtext | quoted-pair ) <"> )
qdtext = <any TEXT except <">>
quoted-pair = "\" CHAR
parameter = attribute "=" value
attribute = token
value = token | quoted-string
Each header is represented by a list of key/value pairs. The value for a
simple token (not part of a parameter) is None. Syntactically incorrect
headers will not necessarily be parsed as you would want.
This is easier to describe with some examples:
>>> split_header_words(['foo="bar"; port="80,81"; discard, bar=baz'])
[[('foo', 'bar'), ('port', '80,81'), ('discard', None)], [('bar', 'baz')]]
>>> split_header_words(['text/html; charset="iso-8859-1"'])
[[('text/html', None), ('charset', 'iso-8859-1')]]
>>> split_header_words([r'Basic realm="\"foo\bar\""'])
[[('Basic', None), ('realm', '"foobar"')]]
"""
assert type(header_values) not in STRING_TYPES
result = []
for text in header_values:
orig_text = text
pairs = []
while text:
m = token_re.search(text)
if m:
text = unmatched(m)
name = m.group(1)
m = quoted_value_re.search(text)
if m: # quoted value
text = unmatched(m)
value = m.group(1)
value = escape_re.sub(r"\1", value)
else:
m = value_re.search(text)
if m: # unquoted value
text = unmatched(m)
value = m.group(1)
value = value.rstrip()
else:
# no value, a lone token
value = None
pairs.append((name, value))
elif text.lstrip().startswith(","):
# concatenated headers, as per RFC 2616 section 4.2
text = text.lstrip()[1:]
if pairs: result.append(pairs)
pairs = []
else:
# skip junk
non_junk, nr_junk_chars = re.subn("^[=\s;]*", "", text)
assert nr_junk_chars > 0, (
"split_header_words bug: '%s', '%s', %s" %
(orig_text, text, pairs))
text = non_junk
if pairs: result.append(pairs)
return result | [
"def",
"split_header_words",
"(",
"header_values",
")",
":",
"assert",
"type",
"(",
"header_values",
")",
"not",
"in",
"STRING_TYPES",
"result",
"=",
"[",
"]",
"for",
"text",
"in",
"header_values",
":",
"orig_text",
"=",
"text",
"pairs",
"=",
"[",
"]",
"while",
"text",
":",
"m",
"=",
"token_re",
".",
"search",
"(",
"text",
")",
"if",
"m",
":",
"text",
"=",
"unmatched",
"(",
"m",
")",
"name",
"=",
"m",
".",
"group",
"(",
"1",
")",
"m",
"=",
"quoted_value_re",
".",
"search",
"(",
"text",
")",
"if",
"m",
":",
"# quoted value",
"text",
"=",
"unmatched",
"(",
"m",
")",
"value",
"=",
"m",
".",
"group",
"(",
"1",
")",
"value",
"=",
"escape_re",
".",
"sub",
"(",
"r\"\\1\"",
",",
"value",
")",
"else",
":",
"m",
"=",
"value_re",
".",
"search",
"(",
"text",
")",
"if",
"m",
":",
"# unquoted value",
"text",
"=",
"unmatched",
"(",
"m",
")",
"value",
"=",
"m",
".",
"group",
"(",
"1",
")",
"value",
"=",
"value",
".",
"rstrip",
"(",
")",
"else",
":",
"# no value, a lone token",
"value",
"=",
"None",
"pairs",
".",
"append",
"(",
"(",
"name",
",",
"value",
")",
")",
"elif",
"text",
".",
"lstrip",
"(",
")",
".",
"startswith",
"(",
"\",\"",
")",
":",
"# concatenated headers, as per RFC 2616 section 4.2",
"text",
"=",
"text",
".",
"lstrip",
"(",
")",
"[",
"1",
":",
"]",
"if",
"pairs",
":",
"result",
".",
"append",
"(",
"pairs",
")",
"pairs",
"=",
"[",
"]",
"else",
":",
"# skip junk",
"non_junk",
",",
"nr_junk_chars",
"=",
"re",
".",
"subn",
"(",
"\"^[=\\s;]*\"",
",",
"\"\"",
",",
"text",
")",
"assert",
"nr_junk_chars",
">",
"0",
",",
"(",
"\"split_header_words bug: '%s', '%s', %s\"",
"%",
"(",
"orig_text",
",",
"text",
",",
"pairs",
")",
")",
"text",
"=",
"non_junk",
"if",
"pairs",
":",
"result",
".",
"append",
"(",
"pairs",
")",
"return",
"result"
] | https://github.com/pillone/usntssearch/blob/24b5e5bc4b6af2589d95121c4d523dc58cb34273/NZBmegasearch/mechanize/_headersutil.py#L61-L144 |
|
tensorflow/tfx | b4a6b83269815ed12ba9df9e9154c7376fef2ea0 | tfx/dsl/components/base/base_component.py | python | BaseComponent._validate_component_class | (cls) | Validate that the SPEC_CLASSES property of this class is set properly. | Validate that the SPEC_CLASSES property of this class is set properly. | [
"Validate",
"that",
"the",
"SPEC_CLASSES",
"property",
"of",
"this",
"class",
"is",
"set",
"properly",
"."
] | def _validate_component_class(cls):
"""Validate that the SPEC_CLASSES property of this class is set properly."""
if not (inspect.isclass(cls.SPEC_CLASS) and
issubclass(cls.SPEC_CLASS, types.ComponentSpec)):
raise TypeError(
('Component class %s expects SPEC_CLASS property to be a subclass '
'of types.ComponentSpec; got %s instead.') % (cls, cls.SPEC_CLASS))
if not isinstance(cls.EXECUTOR_SPEC, executor_spec.ExecutorSpec):
raise TypeError((
'Component class %s expects EXECUTOR_SPEC property to be an instance '
'of ExecutorSpec; got %s instead.') % (cls, type(cls.EXECUTOR_SPEC)))
if not (inspect.isclass(cls.DRIVER_CLASS) and
issubclass(cls.DRIVER_CLASS, base_driver.BaseDriver)):
raise TypeError(
('Component class %s expects DRIVER_CLASS property to be a subclass '
'of base_driver.BaseDriver; got %s instead.') %
(cls, cls.DRIVER_CLASS)) | [
"def",
"_validate_component_class",
"(",
"cls",
")",
":",
"if",
"not",
"(",
"inspect",
".",
"isclass",
"(",
"cls",
".",
"SPEC_CLASS",
")",
"and",
"issubclass",
"(",
"cls",
".",
"SPEC_CLASS",
",",
"types",
".",
"ComponentSpec",
")",
")",
":",
"raise",
"TypeError",
"(",
"(",
"'Component class %s expects SPEC_CLASS property to be a subclass '",
"'of types.ComponentSpec; got %s instead.'",
")",
"%",
"(",
"cls",
",",
"cls",
".",
"SPEC_CLASS",
")",
")",
"if",
"not",
"isinstance",
"(",
"cls",
".",
"EXECUTOR_SPEC",
",",
"executor_spec",
".",
"ExecutorSpec",
")",
":",
"raise",
"TypeError",
"(",
"(",
"'Component class %s expects EXECUTOR_SPEC property to be an instance '",
"'of ExecutorSpec; got %s instead.'",
")",
"%",
"(",
"cls",
",",
"type",
"(",
"cls",
".",
"EXECUTOR_SPEC",
")",
")",
")",
"if",
"not",
"(",
"inspect",
".",
"isclass",
"(",
"cls",
".",
"DRIVER_CLASS",
")",
"and",
"issubclass",
"(",
"cls",
".",
"DRIVER_CLASS",
",",
"base_driver",
".",
"BaseDriver",
")",
")",
":",
"raise",
"TypeError",
"(",
"(",
"'Component class %s expects DRIVER_CLASS property to be a subclass '",
"'of base_driver.BaseDriver; got %s instead.'",
")",
"%",
"(",
"cls",
",",
"cls",
".",
"DRIVER_CLASS",
")",
")"
] | https://github.com/tensorflow/tfx/blob/b4a6b83269815ed12ba9df9e9154c7376fef2ea0/tfx/dsl/components/base/base_component.py#L111-L127 |
||
AppScale/gts | 46f909cf5dc5ba81faf9d81dc9af598dcf8a82a9 | AppServer/google/appengine/datastore/datastore_query.py | python | Cursor.to_bytes | (self) | return self.__compiled_cursor.Encode() | Serialize cursor as a byte string. | Serialize cursor as a byte string. | [
"Serialize",
"cursor",
"as",
"a",
"byte",
"string",
"."
] | def to_bytes(self):
"""Serialize cursor as a byte string."""
return self.__compiled_cursor.Encode() | [
"def",
"to_bytes",
"(",
"self",
")",
":",
"return",
"self",
".",
"__compiled_cursor",
".",
"Encode",
"(",
")"
] | https://github.com/AppScale/gts/blob/46f909cf5dc5ba81faf9d81dc9af598dcf8a82a9/AppServer/google/appengine/datastore/datastore_query.py#L1490-L1492 |
|
Tautulli/Tautulli | 2410eb33805aaac4bd1c5dad0f71e4f15afaf742 | lib/bleach/_vendor/html5lib/_inputstream.py | python | EncodingBytes.getPosition | (self) | [] | def getPosition(self):
if self._position >= len(self):
raise StopIteration
if self._position >= 0:
return self._position
else:
return None | [
"def",
"getPosition",
"(",
"self",
")",
":",
"if",
"self",
".",
"_position",
">=",
"len",
"(",
"self",
")",
":",
"raise",
"StopIteration",
"if",
"self",
".",
"_position",
">=",
"0",
":",
"return",
"self",
".",
"_position",
"else",
":",
"return",
"None"
] | https://github.com/Tautulli/Tautulli/blob/2410eb33805aaac4bd1c5dad0f71e4f15afaf742/lib/bleach/_vendor/html5lib/_inputstream.py#L619-L625 |
||||
OpenMDAO/OpenMDAO-Framework | f2e37b7de3edeaaeb2d251b375917adec059db9b | openmdao.main/src/openmdao/main/container.py | python | Container.remove_trait | (self, name) | Overrides HasTraits definition of remove_trait in order to
keep track of dynamically added traits for serialization. | Overrides HasTraits definition of remove_trait in order to
keep track of dynamically added traits for serialization. | [
"Overrides",
"HasTraits",
"definition",
"of",
"remove_trait",
"in",
"order",
"to",
"keep",
"track",
"of",
"dynamically",
"added",
"traits",
"for",
"serialization",
"."
] | def remove_trait(self, name):
"""Overrides HasTraits definition of remove_trait in order to
keep track of dynamically added traits for serialization.
"""
try:
del self._added_traits[name]
except KeyError:
pass
try:
del self._cached_traits_[name]
except (KeyError, TypeError):
pass
try:
del self._trait_metadata[name]
except KeyError:
pass
super(Container, self).remove_trait(name) | [
"def",
"remove_trait",
"(",
"self",
",",
"name",
")",
":",
"try",
":",
"del",
"self",
".",
"_added_traits",
"[",
"name",
"]",
"except",
"KeyError",
":",
"pass",
"try",
":",
"del",
"self",
".",
"_cached_traits_",
"[",
"name",
"]",
"except",
"(",
"KeyError",
",",
"TypeError",
")",
":",
"pass",
"try",
":",
"del",
"self",
".",
"_trait_metadata",
"[",
"name",
"]",
"except",
"KeyError",
":",
"pass",
"super",
"(",
"Container",
",",
"self",
")",
".",
"remove_trait",
"(",
"name",
")"
] | https://github.com/OpenMDAO/OpenMDAO-Framework/blob/f2e37b7de3edeaaeb2d251b375917adec059db9b/openmdao.main/src/openmdao/main/container.py#L512-L529 |
||
linxid/Machine_Learning_Study_Path | 558e82d13237114bbb8152483977806fc0c222af | Machine Learning In Action/Chapter8-Regression/venv/Lib/site-packages/pip-9.0.1-py3.6.egg/pip/_vendor/ipaddress.py | python | _BaseNetwork.is_private | (self) | return (self.network_address.is_private and
self.broadcast_address.is_private) | Test if this address is allocated for private networks.
Returns:
A boolean, True if the address is reserved per
iana-ipv4-special-registry or iana-ipv6-special-registry. | Test if this address is allocated for private networks. | [
"Test",
"if",
"this",
"address",
"is",
"allocated",
"for",
"private",
"networks",
"."
] | def is_private(self):
"""Test if this address is allocated for private networks.
Returns:
A boolean, True if the address is reserved per
iana-ipv4-special-registry or iana-ipv6-special-registry.
"""
return (self.network_address.is_private and
self.broadcast_address.is_private) | [
"def",
"is_private",
"(",
"self",
")",
":",
"return",
"(",
"self",
".",
"network_address",
".",
"is_private",
"and",
"self",
".",
"broadcast_address",
".",
"is_private",
")"
] | https://github.com/linxid/Machine_Learning_Study_Path/blob/558e82d13237114bbb8152483977806fc0c222af/Machine Learning In Action/Chapter8-Regression/venv/Lib/site-packages/pip-9.0.1-py3.6.egg/pip/_vendor/ipaddress.py#L1153-L1162 |
|
chainer/chainercv | 7159616642e0be7c5b3ef380b848e16b7e99355b | chainercv/evaluations/eval_instance_segmentation_coco.py | python | _create_anno | (msk, lb, sc, img_id, anno_id, ar=None, crw=None) | return anno | [] | def _create_anno(msk, lb, sc, img_id, anno_id, ar=None, crw=None):
H, W = msk.shape
if crw is None:
crw = False
msk = np.asfortranarray(msk.astype(np.uint8))
rle = mask_tools.encode(msk)
if ar is None:
# We compute dummy area to pass to pycocotools.
# Note that area dependent scores are ignored afterwards.
ar = mask_tools.area(rle)
if crw is None:
crw = False
# Rounding is done to make the result consistent with COCO.
anno = {
'image_id': img_id, 'category_id': lb,
'segmentation': rle,
'area': ar,
'id': anno_id,
'iscrowd': crw}
if sc is not None:
anno.update({'score': sc})
return anno | [
"def",
"_create_anno",
"(",
"msk",
",",
"lb",
",",
"sc",
",",
"img_id",
",",
"anno_id",
",",
"ar",
"=",
"None",
",",
"crw",
"=",
"None",
")",
":",
"H",
",",
"W",
"=",
"msk",
".",
"shape",
"if",
"crw",
"is",
"None",
":",
"crw",
"=",
"False",
"msk",
"=",
"np",
".",
"asfortranarray",
"(",
"msk",
".",
"astype",
"(",
"np",
".",
"uint8",
")",
")",
"rle",
"=",
"mask_tools",
".",
"encode",
"(",
"msk",
")",
"if",
"ar",
"is",
"None",
":",
"# We compute dummy area to pass to pycocotools.",
"# Note that area dependent scores are ignored afterwards.",
"ar",
"=",
"mask_tools",
".",
"area",
"(",
"rle",
")",
"if",
"crw",
"is",
"None",
":",
"crw",
"=",
"False",
"# Rounding is done to make the result consistent with COCO.",
"anno",
"=",
"{",
"'image_id'",
":",
"img_id",
",",
"'category_id'",
":",
"lb",
",",
"'segmentation'",
":",
"rle",
",",
"'area'",
":",
"ar",
",",
"'id'",
":",
"anno_id",
",",
"'iscrowd'",
":",
"crw",
"}",
"if",
"sc",
"is",
"not",
"None",
":",
"anno",
".",
"update",
"(",
"{",
"'score'",
":",
"sc",
"}",
")",
"return",
"anno"
] | https://github.com/chainer/chainercv/blob/7159616642e0be7c5b3ef380b848e16b7e99355b/chainercv/evaluations/eval_instance_segmentation_coco.py#L276-L297 |
|||
mudpi/mudpi-core | fb206b1136f529c7197f1e6b29629ed05630d377 | mudpi/extensions/nfc/trigger.py | python | NFCTrigger._parse_data | (self, data) | return data | Get nested data if set otherwise return the data | Get nested data if set otherwise return the data | [
"Get",
"nested",
"data",
"if",
"set",
"otherwise",
"return",
"the",
"data"
] | def _parse_data(self, data):
""" Get nested data if set otherwise return the data """
if isinstance(data, dict):
return data.get('tag_id') if not self.nested_source else data.get(self.nested_source, None)
return data | [
"def",
"_parse_data",
"(",
"self",
",",
"data",
")",
":",
"if",
"isinstance",
"(",
"data",
",",
"dict",
")",
":",
"return",
"data",
".",
"get",
"(",
"'tag_id'",
")",
"if",
"not",
"self",
".",
"nested_source",
"else",
"data",
".",
"get",
"(",
"self",
".",
"nested_source",
",",
"None",
")",
"return",
"data"
] | https://github.com/mudpi/mudpi-core/blob/fb206b1136f529c7197f1e6b29629ed05630d377/mudpi/extensions/nfc/trigger.py#L104-L108 |
|
bruderstein/PythonScript | df9f7071ddf3a079e3a301b9b53a6dc78cf1208f | PythonLib/full/_pyio.py | python | TextIOBase.truncate | (self, pos=None) | Truncate size to pos, where pos is an int. | Truncate size to pos, where pos is an int. | [
"Truncate",
"size",
"to",
"pos",
"where",
"pos",
"is",
"an",
"int",
"."
] | def truncate(self, pos=None):
"""Truncate size to pos, where pos is an int."""
self._unsupported("truncate") | [
"def",
"truncate",
"(",
"self",
",",
"pos",
"=",
"None",
")",
":",
"self",
".",
"_unsupported",
"(",
"\"truncate\"",
")"
] | https://github.com/bruderstein/PythonScript/blob/df9f7071ddf3a079e3a301b9b53a6dc78cf1208f/PythonLib/full/_pyio.py#L1865-L1867 |
||
sqlmapproject/sqlmap | 3b07b70864624dff4c29dcaa8a61c78e7f9189f7 | lib/parse/banner.py | python | bannerParser | (banner) | This function calls a class to extract information from the given
DBMS banner based upon the data in XML file | This function calls a class to extract information from the given
DBMS banner based upon the data in XML file | [
"This",
"function",
"calls",
"a",
"class",
"to",
"extract",
"information",
"from",
"the",
"given",
"DBMS",
"banner",
"based",
"upon",
"the",
"data",
"in",
"XML",
"file"
] | def bannerParser(banner):
"""
This function calls a class to extract information from the given
DBMS banner based upon the data in XML file
"""
xmlfile = None
if Backend.isDbms(DBMS.MSSQL):
xmlfile = paths.MSSQL_XML
elif Backend.isDbms(DBMS.MYSQL):
xmlfile = paths.MYSQL_XML
elif Backend.isDbms(DBMS.ORACLE):
xmlfile = paths.ORACLE_XML
elif Backend.isDbms(DBMS.PGSQL):
xmlfile = paths.PGSQL_XML
if not xmlfile:
return
if Backend.isDbms(DBMS.MSSQL):
handler = MSSQLBannerHandler(banner, kb.bannerFp)
parseXmlFile(xmlfile, handler)
handler = FingerprintHandler(banner, kb.bannerFp)
parseXmlFile(paths.GENERIC_XML, handler)
else:
handler = FingerprintHandler(banner, kb.bannerFp)
parseXmlFile(xmlfile, handler)
parseXmlFile(paths.GENERIC_XML, handler) | [
"def",
"bannerParser",
"(",
"banner",
")",
":",
"xmlfile",
"=",
"None",
"if",
"Backend",
".",
"isDbms",
"(",
"DBMS",
".",
"MSSQL",
")",
":",
"xmlfile",
"=",
"paths",
".",
"MSSQL_XML",
"elif",
"Backend",
".",
"isDbms",
"(",
"DBMS",
".",
"MYSQL",
")",
":",
"xmlfile",
"=",
"paths",
".",
"MYSQL_XML",
"elif",
"Backend",
".",
"isDbms",
"(",
"DBMS",
".",
"ORACLE",
")",
":",
"xmlfile",
"=",
"paths",
".",
"ORACLE_XML",
"elif",
"Backend",
".",
"isDbms",
"(",
"DBMS",
".",
"PGSQL",
")",
":",
"xmlfile",
"=",
"paths",
".",
"PGSQL_XML",
"if",
"not",
"xmlfile",
":",
"return",
"if",
"Backend",
".",
"isDbms",
"(",
"DBMS",
".",
"MSSQL",
")",
":",
"handler",
"=",
"MSSQLBannerHandler",
"(",
"banner",
",",
"kb",
".",
"bannerFp",
")",
"parseXmlFile",
"(",
"xmlfile",
",",
"handler",
")",
"handler",
"=",
"FingerprintHandler",
"(",
"banner",
",",
"kb",
".",
"bannerFp",
")",
"parseXmlFile",
"(",
"paths",
".",
"GENERIC_XML",
",",
"handler",
")",
"else",
":",
"handler",
"=",
"FingerprintHandler",
"(",
"banner",
",",
"kb",
".",
"bannerFp",
")",
"parseXmlFile",
"(",
"xmlfile",
",",
"handler",
")",
"parseXmlFile",
"(",
"paths",
".",
"GENERIC_XML",
",",
"handler",
")"
] | https://github.com/sqlmapproject/sqlmap/blob/3b07b70864624dff4c29dcaa8a61c78e7f9189f7/lib/parse/banner.py#L86-L115 |
||
CouchPotato/CouchPotatoV1 | 135b3331d1b88ef645e29b76f2d4cc4a732c9232 | library/sqlalchemy/orm/interfaces.py | python | SessionExtension.after_commit | (self, session) | Execute after a commit has occured.
Note that this may not be per-flush if a longer running
transaction is ongoing. | Execute after a commit has occured.
Note that this may not be per-flush if a longer running
transaction is ongoing. | [
"Execute",
"after",
"a",
"commit",
"has",
"occured",
".",
"Note",
"that",
"this",
"may",
"not",
"be",
"per",
"-",
"flush",
"if",
"a",
"longer",
"running",
"transaction",
"is",
"ongoing",
"."
] | def after_commit(self, session):
"""Execute after a commit has occured.
Note that this may not be per-flush if a longer running
transaction is ongoing.""" | [
"def",
"after_commit",
"(",
"self",
",",
"session",
")",
":"
] | https://github.com/CouchPotato/CouchPotatoV1/blob/135b3331d1b88ef645e29b76f2d4cc4a732c9232/library/sqlalchemy/orm/interfaces.py#L348-L352 |
||
jgagneastro/coffeegrindsize | 22661ebd21831dba4cf32bfc6ba59fe3d49f879c | App/dist/coffeegrindsize.app/Contents/Resources/lib/python3.7/scipy/sparse/linalg/_onenormest.py | python | _onenormest_core | (A, AT, t, itmax) | return est, v, w, nmults, nresamples | Compute a lower bound of the 1-norm of a sparse matrix.
Parameters
----------
A : ndarray or other linear operator
A linear operator that can produce matrix products.
AT : ndarray or other linear operator
The transpose of A.
t : int, optional
A positive parameter controlling the tradeoff between
accuracy versus time and memory usage.
itmax : int, optional
Use at most this many iterations.
Returns
-------
est : float
An underestimate of the 1-norm of the sparse matrix.
v : ndarray, optional
The vector such that ||Av||_1 == est*||v||_1.
It can be thought of as an input to the linear operator
that gives an output with particularly large norm.
w : ndarray, optional
The vector Av which has relatively large 1-norm.
It can be thought of as an output of the linear operator
that is relatively large in norm compared to the input.
nmults : int, optional
The number of matrix products that were computed.
nresamples : int, optional
The number of times a parallel column was observed,
necessitating a re-randomization of the column.
Notes
-----
This is algorithm 2.4. | Compute a lower bound of the 1-norm of a sparse matrix. | [
"Compute",
"a",
"lower",
"bound",
"of",
"the",
"1",
"-",
"norm",
"of",
"a",
"sparse",
"matrix",
"."
] | def _onenormest_core(A, AT, t, itmax):
"""
Compute a lower bound of the 1-norm of a sparse matrix.
Parameters
----------
A : ndarray or other linear operator
A linear operator that can produce matrix products.
AT : ndarray or other linear operator
The transpose of A.
t : int, optional
A positive parameter controlling the tradeoff between
accuracy versus time and memory usage.
itmax : int, optional
Use at most this many iterations.
Returns
-------
est : float
An underestimate of the 1-norm of the sparse matrix.
v : ndarray, optional
The vector such that ||Av||_1 == est*||v||_1.
It can be thought of as an input to the linear operator
that gives an output with particularly large norm.
w : ndarray, optional
The vector Av which has relatively large 1-norm.
It can be thought of as an output of the linear operator
that is relatively large in norm compared to the input.
nmults : int, optional
The number of matrix products that were computed.
nresamples : int, optional
The number of times a parallel column was observed,
necessitating a re-randomization of the column.
Notes
-----
This is algorithm 2.4.
"""
# This function is a more or less direct translation
# of Algorithm 2.4 from the Higham and Tisseur (2000) paper.
A_linear_operator = aslinearoperator(A)
AT_linear_operator = aslinearoperator(AT)
if itmax < 2:
raise ValueError('at least two iterations are required')
if t < 1:
raise ValueError('at least one column is required')
n = A.shape[0]
if t >= n:
raise ValueError('t should be smaller than the order of A')
# Track the number of big*small matrix multiplications
# and the number of resamplings.
nmults = 0
nresamples = 0
# "We now explain our choice of starting matrix. We take the first
# column of X to be the vector of 1s [...] This has the advantage that
# for a matrix with nonnegative elements the algorithm converges
# with an exact estimate on the second iteration, and such matrices
# arise in applications [...]"
X = np.ones((n, t), dtype=float)
# "The remaining columns are chosen as rand{-1,1},
# with a check for and correction of parallel columns,
# exactly as for S in the body of the algorithm."
if t > 1:
for i in range(1, t):
# These are technically initial samples, not resamples,
# so the resampling count is not incremented.
resample_column(i, X)
for i in range(t):
while column_needs_resampling(i, X):
resample_column(i, X)
nresamples += 1
# "Choose starting matrix X with columns of unit 1-norm."
X /= float(n)
# "indices of used unit vectors e_j"
ind_hist = np.zeros(0, dtype=np.intp)
est_old = 0
S = np.zeros((n, t), dtype=float)
k = 1
ind = None
while True:
Y = np.asarray(A_linear_operator.matmat(X))
nmults += 1
mags = _sum_abs_axis0(Y)
est = np.max(mags)
best_j = np.argmax(mags)
if est > est_old or k == 2:
if k >= 2:
ind_best = ind[best_j]
w = Y[:, best_j]
# (1)
if k >= 2 and est <= est_old:
est = est_old
break
est_old = est
S_old = S
if k > itmax:
break
S = sign_round_up(Y)
del Y
# (2)
if every_col_of_X_is_parallel_to_a_col_of_Y(S, S_old):
break
if t > 1:
# "Ensure that no column of S is parallel to another column of S
# or to a column of S_old by replacing columns of S by rand{-1,1}."
for i in range(t):
while column_needs_resampling(i, S, S_old):
resample_column(i, S)
nresamples += 1
del S_old
# (3)
Z = np.asarray(AT_linear_operator.matmat(S))
nmults += 1
h = _max_abs_axis1(Z)
del Z
# (4)
if k >= 2 and max(h) == h[ind_best]:
break
# "Sort h so that h_first >= ... >= h_last
# and re-order ind correspondingly."
#
# Later on, we will need at most t+len(ind_hist) largest
# entries, so drop the rest
ind = np.argsort(h)[::-1][:t+len(ind_hist)].copy()
del h
if t > 1:
# (5)
# Break if the most promising t vectors have been visited already.
if np.in1d(ind[:t], ind_hist).all():
break
# Put the most promising unvisited vectors at the front of the list
# and put the visited vectors at the end of the list.
# Preserve the order of the indices induced by the ordering of h.
seen = np.in1d(ind, ind_hist)
ind = np.concatenate((ind[~seen], ind[seen]))
for j in range(t):
X[:, j] = elementary_vector(n, ind[j])
new_ind = ind[:t][~np.in1d(ind[:t], ind_hist)]
ind_hist = np.concatenate((ind_hist, new_ind))
k += 1
v = elementary_vector(n, ind_best)
return est, v, w, nmults, nresamples | [
"def",
"_onenormest_core",
"(",
"A",
",",
"AT",
",",
"t",
",",
"itmax",
")",
":",
"# This function is a more or less direct translation",
"# of Algorithm 2.4 from the Higham and Tisseur (2000) paper.",
"A_linear_operator",
"=",
"aslinearoperator",
"(",
"A",
")",
"AT_linear_operator",
"=",
"aslinearoperator",
"(",
"AT",
")",
"if",
"itmax",
"<",
"2",
":",
"raise",
"ValueError",
"(",
"'at least two iterations are required'",
")",
"if",
"t",
"<",
"1",
":",
"raise",
"ValueError",
"(",
"'at least one column is required'",
")",
"n",
"=",
"A",
".",
"shape",
"[",
"0",
"]",
"if",
"t",
">=",
"n",
":",
"raise",
"ValueError",
"(",
"'t should be smaller than the order of A'",
")",
"# Track the number of big*small matrix multiplications",
"# and the number of resamplings.",
"nmults",
"=",
"0",
"nresamples",
"=",
"0",
"# \"We now explain our choice of starting matrix. We take the first",
"# column of X to be the vector of 1s [...] This has the advantage that",
"# for a matrix with nonnegative elements the algorithm converges",
"# with an exact estimate on the second iteration, and such matrices",
"# arise in applications [...]\"",
"X",
"=",
"np",
".",
"ones",
"(",
"(",
"n",
",",
"t",
")",
",",
"dtype",
"=",
"float",
")",
"# \"The remaining columns are chosen as rand{-1,1},",
"# with a check for and correction of parallel columns,",
"# exactly as for S in the body of the algorithm.\"",
"if",
"t",
">",
"1",
":",
"for",
"i",
"in",
"range",
"(",
"1",
",",
"t",
")",
":",
"# These are technically initial samples, not resamples,",
"# so the resampling count is not incremented.",
"resample_column",
"(",
"i",
",",
"X",
")",
"for",
"i",
"in",
"range",
"(",
"t",
")",
":",
"while",
"column_needs_resampling",
"(",
"i",
",",
"X",
")",
":",
"resample_column",
"(",
"i",
",",
"X",
")",
"nresamples",
"+=",
"1",
"# \"Choose starting matrix X with columns of unit 1-norm.\"",
"X",
"/=",
"float",
"(",
"n",
")",
"# \"indices of used unit vectors e_j\"",
"ind_hist",
"=",
"np",
".",
"zeros",
"(",
"0",
",",
"dtype",
"=",
"np",
".",
"intp",
")",
"est_old",
"=",
"0",
"S",
"=",
"np",
".",
"zeros",
"(",
"(",
"n",
",",
"t",
")",
",",
"dtype",
"=",
"float",
")",
"k",
"=",
"1",
"ind",
"=",
"None",
"while",
"True",
":",
"Y",
"=",
"np",
".",
"asarray",
"(",
"A_linear_operator",
".",
"matmat",
"(",
"X",
")",
")",
"nmults",
"+=",
"1",
"mags",
"=",
"_sum_abs_axis0",
"(",
"Y",
")",
"est",
"=",
"np",
".",
"max",
"(",
"mags",
")",
"best_j",
"=",
"np",
".",
"argmax",
"(",
"mags",
")",
"if",
"est",
">",
"est_old",
"or",
"k",
"==",
"2",
":",
"if",
"k",
">=",
"2",
":",
"ind_best",
"=",
"ind",
"[",
"best_j",
"]",
"w",
"=",
"Y",
"[",
":",
",",
"best_j",
"]",
"# (1)",
"if",
"k",
">=",
"2",
"and",
"est",
"<=",
"est_old",
":",
"est",
"=",
"est_old",
"break",
"est_old",
"=",
"est",
"S_old",
"=",
"S",
"if",
"k",
">",
"itmax",
":",
"break",
"S",
"=",
"sign_round_up",
"(",
"Y",
")",
"del",
"Y",
"# (2)",
"if",
"every_col_of_X_is_parallel_to_a_col_of_Y",
"(",
"S",
",",
"S_old",
")",
":",
"break",
"if",
"t",
">",
"1",
":",
"# \"Ensure that no column of S is parallel to another column of S",
"# or to a column of S_old by replacing columns of S by rand{-1,1}.\"",
"for",
"i",
"in",
"range",
"(",
"t",
")",
":",
"while",
"column_needs_resampling",
"(",
"i",
",",
"S",
",",
"S_old",
")",
":",
"resample_column",
"(",
"i",
",",
"S",
")",
"nresamples",
"+=",
"1",
"del",
"S_old",
"# (3)",
"Z",
"=",
"np",
".",
"asarray",
"(",
"AT_linear_operator",
".",
"matmat",
"(",
"S",
")",
")",
"nmults",
"+=",
"1",
"h",
"=",
"_max_abs_axis1",
"(",
"Z",
")",
"del",
"Z",
"# (4)",
"if",
"k",
">=",
"2",
"and",
"max",
"(",
"h",
")",
"==",
"h",
"[",
"ind_best",
"]",
":",
"break",
"# \"Sort h so that h_first >= ... >= h_last",
"# and re-order ind correspondingly.\"",
"#",
"# Later on, we will need at most t+len(ind_hist) largest",
"# entries, so drop the rest",
"ind",
"=",
"np",
".",
"argsort",
"(",
"h",
")",
"[",
":",
":",
"-",
"1",
"]",
"[",
":",
"t",
"+",
"len",
"(",
"ind_hist",
")",
"]",
".",
"copy",
"(",
")",
"del",
"h",
"if",
"t",
">",
"1",
":",
"# (5)",
"# Break if the most promising t vectors have been visited already.",
"if",
"np",
".",
"in1d",
"(",
"ind",
"[",
":",
"t",
"]",
",",
"ind_hist",
")",
".",
"all",
"(",
")",
":",
"break",
"# Put the most promising unvisited vectors at the front of the list",
"# and put the visited vectors at the end of the list.",
"# Preserve the order of the indices induced by the ordering of h.",
"seen",
"=",
"np",
".",
"in1d",
"(",
"ind",
",",
"ind_hist",
")",
"ind",
"=",
"np",
".",
"concatenate",
"(",
"(",
"ind",
"[",
"~",
"seen",
"]",
",",
"ind",
"[",
"seen",
"]",
")",
")",
"for",
"j",
"in",
"range",
"(",
"t",
")",
":",
"X",
"[",
":",
",",
"j",
"]",
"=",
"elementary_vector",
"(",
"n",
",",
"ind",
"[",
"j",
"]",
")",
"new_ind",
"=",
"ind",
"[",
":",
"t",
"]",
"[",
"~",
"np",
".",
"in1d",
"(",
"ind",
"[",
":",
"t",
"]",
",",
"ind_hist",
")",
"]",
"ind_hist",
"=",
"np",
".",
"concatenate",
"(",
"(",
"ind_hist",
",",
"new_ind",
")",
")",
"k",
"+=",
"1",
"v",
"=",
"elementary_vector",
"(",
"n",
",",
"ind_best",
")",
"return",
"est",
",",
"v",
",",
"w",
",",
"nmults",
",",
"nresamples"
] | https://github.com/jgagneastro/coffeegrindsize/blob/22661ebd21831dba4cf32bfc6ba59fe3d49f879c/App/dist/coffeegrindsize.app/Contents/Resources/lib/python3.7/scipy/sparse/linalg/_onenormest.py#L325-L468 |
|
openstack/ironic | b392dc19bcd29cef5a69ec00d2f18a7a19a679e5 | ironic/drivers/modules/irmc/boot.py | python | _prepare_boot_iso | (task, root_uuid) | Prepare a boot ISO to boot the node.
:param task: a TaskManager instance containing the node to act on.
:param root_uuid: the uuid of the root partition.
:raises: MissingParameterValue, if any of the required parameters are
missing.
:raises: InvalidParameterValue, if any of the parameters have invalid
value.
:raises: ImageCreationFailed, if creating boot ISO
for BIOS boot_mode failed. | Prepare a boot ISO to boot the node. | [
"Prepare",
"a",
"boot",
"ISO",
"to",
"boot",
"the",
"node",
"."
] | def _prepare_boot_iso(task, root_uuid):
"""Prepare a boot ISO to boot the node.
:param task: a TaskManager instance containing the node to act on.
:param root_uuid: the uuid of the root partition.
:raises: MissingParameterValue, if any of the required parameters are
missing.
:raises: InvalidParameterValue, if any of the parameters have invalid
value.
:raises: ImageCreationFailed, if creating boot ISO
for BIOS boot_mode failed.
"""
deploy_info = _parse_deploy_info(task.node)
driver_internal_info = task.node.driver_internal_info
# fetch boot iso
if deploy_info.get('boot_iso'):
boot_iso_href = deploy_info['boot_iso']
if _is_image_href_ordinary_file_name(boot_iso_href):
driver_internal_info['boot_iso'] = boot_iso_href
else:
boot_iso_filename = _get_iso_name(task.node, label='boot')
boot_iso_fullpathname = os.path.join(
CONF.irmc.remote_image_share_root, boot_iso_filename)
images.fetch(task.context, boot_iso_href, boot_iso_fullpathname)
driver_internal_info['boot_iso'] = boot_iso_filename
# create boot iso
else:
image_href = deploy_info['image_source']
image_props = ['kernel_id', 'ramdisk_id']
image_properties = images.get_image_properties(
task.context, image_href, image_props)
kernel_href = (task.node.instance_info.get('kernel')
or image_properties['kernel_id'])
ramdisk_href = (task.node.instance_info.get('ramdisk')
or image_properties['ramdisk_id'])
deploy_iso_href = deploy_info['deploy_iso']
boot_mode = boot_mode_utils.get_boot_mode(task.node)
kernel_params = deploy_info['kernel_append_params']
boot_iso_filename = _get_iso_name(task.node, label='boot')
boot_iso_fullpathname = os.path.join(
CONF.irmc.remote_image_share_root, boot_iso_filename)
images.create_boot_iso(task.context, boot_iso_fullpathname,
kernel_href, ramdisk_href,
deploy_iso_href=deploy_iso_href,
root_uuid=root_uuid,
kernel_params=kernel_params,
boot_mode=boot_mode)
driver_internal_info['boot_iso'] = boot_iso_filename
# save driver_internal_info['boot_iso']
task.node.driver_internal_info = driver_internal_info
task.node.save() | [
"def",
"_prepare_boot_iso",
"(",
"task",
",",
"root_uuid",
")",
":",
"deploy_info",
"=",
"_parse_deploy_info",
"(",
"task",
".",
"node",
")",
"driver_internal_info",
"=",
"task",
".",
"node",
".",
"driver_internal_info",
"# fetch boot iso",
"if",
"deploy_info",
".",
"get",
"(",
"'boot_iso'",
")",
":",
"boot_iso_href",
"=",
"deploy_info",
"[",
"'boot_iso'",
"]",
"if",
"_is_image_href_ordinary_file_name",
"(",
"boot_iso_href",
")",
":",
"driver_internal_info",
"[",
"'boot_iso'",
"]",
"=",
"boot_iso_href",
"else",
":",
"boot_iso_filename",
"=",
"_get_iso_name",
"(",
"task",
".",
"node",
",",
"label",
"=",
"'boot'",
")",
"boot_iso_fullpathname",
"=",
"os",
".",
"path",
".",
"join",
"(",
"CONF",
".",
"irmc",
".",
"remote_image_share_root",
",",
"boot_iso_filename",
")",
"images",
".",
"fetch",
"(",
"task",
".",
"context",
",",
"boot_iso_href",
",",
"boot_iso_fullpathname",
")",
"driver_internal_info",
"[",
"'boot_iso'",
"]",
"=",
"boot_iso_filename",
"# create boot iso",
"else",
":",
"image_href",
"=",
"deploy_info",
"[",
"'image_source'",
"]",
"image_props",
"=",
"[",
"'kernel_id'",
",",
"'ramdisk_id'",
"]",
"image_properties",
"=",
"images",
".",
"get_image_properties",
"(",
"task",
".",
"context",
",",
"image_href",
",",
"image_props",
")",
"kernel_href",
"=",
"(",
"task",
".",
"node",
".",
"instance_info",
".",
"get",
"(",
"'kernel'",
")",
"or",
"image_properties",
"[",
"'kernel_id'",
"]",
")",
"ramdisk_href",
"=",
"(",
"task",
".",
"node",
".",
"instance_info",
".",
"get",
"(",
"'ramdisk'",
")",
"or",
"image_properties",
"[",
"'ramdisk_id'",
"]",
")",
"deploy_iso_href",
"=",
"deploy_info",
"[",
"'deploy_iso'",
"]",
"boot_mode",
"=",
"boot_mode_utils",
".",
"get_boot_mode",
"(",
"task",
".",
"node",
")",
"kernel_params",
"=",
"deploy_info",
"[",
"'kernel_append_params'",
"]",
"boot_iso_filename",
"=",
"_get_iso_name",
"(",
"task",
".",
"node",
",",
"label",
"=",
"'boot'",
")",
"boot_iso_fullpathname",
"=",
"os",
".",
"path",
".",
"join",
"(",
"CONF",
".",
"irmc",
".",
"remote_image_share_root",
",",
"boot_iso_filename",
")",
"images",
".",
"create_boot_iso",
"(",
"task",
".",
"context",
",",
"boot_iso_fullpathname",
",",
"kernel_href",
",",
"ramdisk_href",
",",
"deploy_iso_href",
"=",
"deploy_iso_href",
",",
"root_uuid",
"=",
"root_uuid",
",",
"kernel_params",
"=",
"kernel_params",
",",
"boot_mode",
"=",
"boot_mode",
")",
"driver_internal_info",
"[",
"'boot_iso'",
"]",
"=",
"boot_iso_filename",
"# save driver_internal_info['boot_iso']",
"task",
".",
"node",
".",
"driver_internal_info",
"=",
"driver_internal_info",
"task",
".",
"node",
".",
"save",
"(",
")"
] | https://github.com/openstack/ironic/blob/b392dc19bcd29cef5a69ec00d2f18a7a19a679e5/ironic/drivers/modules/irmc/boot.py#L278-L336 |
||
KazukiOnodera/Instacart | 416b6b0220d3aed62c8d323caa3ee46f4b614a72 | py_feature/utils.py | python | to_pickles | (df, path, split_size=3, inplace=True) | return | path = '../output/mydf'
wirte '../output/mydf/0.p'
'../output/mydf/1.p'
'../output/mydf/2.p' | path = '../output/mydf'
wirte '../output/mydf/0.p'
'../output/mydf/1.p'
'../output/mydf/2.p' | [
"path",
"=",
"..",
"/",
"output",
"/",
"mydf",
"wirte",
"..",
"/",
"output",
"/",
"mydf",
"/",
"0",
".",
"p",
"..",
"/",
"output",
"/",
"mydf",
"/",
"1",
".",
"p",
"..",
"/",
"output",
"/",
"mydf",
"/",
"2",
".",
"p"
] | def to_pickles(df, path, split_size=3, inplace=True):
"""
path = '../output/mydf'
wirte '../output/mydf/0.p'
'../output/mydf/1.p'
'../output/mydf/2.p'
"""
if inplace==True:
df.reset_index(drop=True, inplace=True)
else:
df = df.reset_index(drop=True)
gc.collect()
mkdir_p(path)
kf = KFold(n_splits=split_size)
for i, (train_index, val_index) in enumerate(tqdm(kf.split(df))):
df.iloc[val_index].to_pickle(f'{path}/{i:03d}.p')
return | [
"def",
"to_pickles",
"(",
"df",
",",
"path",
",",
"split_size",
"=",
"3",
",",
"inplace",
"=",
"True",
")",
":",
"if",
"inplace",
"==",
"True",
":",
"df",
".",
"reset_index",
"(",
"drop",
"=",
"True",
",",
"inplace",
"=",
"True",
")",
"else",
":",
"df",
"=",
"df",
".",
"reset_index",
"(",
"drop",
"=",
"True",
")",
"gc",
".",
"collect",
"(",
")",
"mkdir_p",
"(",
"path",
")",
"kf",
"=",
"KFold",
"(",
"n_splits",
"=",
"split_size",
")",
"for",
"i",
",",
"(",
"train_index",
",",
"val_index",
")",
"in",
"enumerate",
"(",
"tqdm",
"(",
"kf",
".",
"split",
"(",
"df",
")",
")",
")",
":",
"df",
".",
"iloc",
"[",
"val_index",
"]",
".",
"to_pickle",
"(",
"f'{path}/{i:03d}.p'",
")",
"return"
] | https://github.com/KazukiOnodera/Instacart/blob/416b6b0220d3aed62c8d323caa3ee46f4b614a72/py_feature/utils.py#L61-L80 |
|
IronLanguages/ironpython2 | 51fdedeeda15727717fb8268a805f71b06c0b9f1 | Src/StdLib/repackage/setuptools/setuptools/msvc.py | python | PlatformInfo.current_dir | (self, hidex86=False, x64=False) | return (
'' if (self.current_cpu == 'x86' and hidex86) else
r'\x64' if (self.current_cpu == 'amd64' and x64) else
r'\%s' % self.current_cpu
) | Current platform specific subfolder.
Parameters
----------
hidex86: bool
return '' and not '\x86' if architecture is x86.
x64: bool
return '\x64' and not '\amd64' if architecture is amd64.
Return
------
subfolder: str
'\target', or '' (see hidex86 parameter) | Current platform specific subfolder. | [
"Current",
"platform",
"specific",
"subfolder",
"."
] | def current_dir(self, hidex86=False, x64=False):
"""
Current platform specific subfolder.
Parameters
----------
hidex86: bool
return '' and not '\x86' if architecture is x86.
x64: bool
return '\x64' and not '\amd64' if architecture is amd64.
Return
------
subfolder: str
'\target', or '' (see hidex86 parameter)
"""
return (
'' if (self.current_cpu == 'x86' and hidex86) else
r'\x64' if (self.current_cpu == 'amd64' and x64) else
r'\%s' % self.current_cpu
) | [
"def",
"current_dir",
"(",
"self",
",",
"hidex86",
"=",
"False",
",",
"x64",
"=",
"False",
")",
":",
"return",
"(",
"''",
"if",
"(",
"self",
".",
"current_cpu",
"==",
"'x86'",
"and",
"hidex86",
")",
"else",
"r'\\x64'",
"if",
"(",
"self",
".",
"current_cpu",
"==",
"'amd64'",
"and",
"x64",
")",
"else",
"r'\\%s'",
"%",
"self",
".",
"current_cpu",
")"
] | https://github.com/IronLanguages/ironpython2/blob/51fdedeeda15727717fb8268a805f71b06c0b9f1/Src/StdLib/repackage/setuptools/setuptools/msvc.py#L264-L284 |
|
fossasia/x-mario-center | fe67afe28d995dcf4e2498e305825a4859566172 | softwarecenter/backend/channel.py | python | SoftwareChannel.__init__ | (self, channel_name, channel_origin, channel_component,
source_entry=None, installed_only=False,
channel_icon=None, channel_query=None,
channel_sort_mode=SortMethods.BY_ALPHABET) | configure the software channel object based on channel name,
origin, and component (the latter for detecting the partner
channel) | configure the software channel object based on channel name,
origin, and component (the latter for detecting the partner
channel) | [
"configure",
"the",
"software",
"channel",
"object",
"based",
"on",
"channel",
"name",
"origin",
"and",
"component",
"(",
"the",
"latter",
"for",
"detecting",
"the",
"partner",
"channel",
")"
] | def __init__(self, channel_name, channel_origin, channel_component,
source_entry=None, installed_only=False,
channel_icon=None, channel_query=None,
channel_sort_mode=SortMethods.BY_ALPHABET):
"""
configure the software channel object based on channel name,
origin, and component (the latter for detecting the partner
channel)
"""
self._channel_name = channel_name
self._channel_origin = channel_origin
self._channel_component = channel_component
self._channel_color = None
self._channel_view_id = None
self.installed_only = installed_only
self._channel_sort_mode = channel_sort_mode
# distro specific stuff
self.distro = softwarecenter.distro.get_distro()
# configure the channel
self._channel_display_name = self._get_display_name_for_channel(
channel_name, channel_origin, channel_component)
if channel_icon is None:
self._channel_icon = self._get_icon_for_channel(
channel_name, channel_origin, channel_component)
else:
self._channel_icon = channel_icon
if channel_query is None:
self._channel_query = self._get_channel_query_for_channel(
channel_name, channel_origin, channel_component)
else:
self._channel_query = channel_query
# a sources.list entry attached to the channel (this is currently
# only used for not-yet-enabled channels)
self._source_entry = source_entry
# when the channel needs to be added to the systems sources.list
self.needs_adding = False | [
"def",
"__init__",
"(",
"self",
",",
"channel_name",
",",
"channel_origin",
",",
"channel_component",
",",
"source_entry",
"=",
"None",
",",
"installed_only",
"=",
"False",
",",
"channel_icon",
"=",
"None",
",",
"channel_query",
"=",
"None",
",",
"channel_sort_mode",
"=",
"SortMethods",
".",
"BY_ALPHABET",
")",
":",
"self",
".",
"_channel_name",
"=",
"channel_name",
"self",
".",
"_channel_origin",
"=",
"channel_origin",
"self",
".",
"_channel_component",
"=",
"channel_component",
"self",
".",
"_channel_color",
"=",
"None",
"self",
".",
"_channel_view_id",
"=",
"None",
"self",
".",
"installed_only",
"=",
"installed_only",
"self",
".",
"_channel_sort_mode",
"=",
"channel_sort_mode",
"# distro specific stuff",
"self",
".",
"distro",
"=",
"softwarecenter",
".",
"distro",
".",
"get_distro",
"(",
")",
"# configure the channel",
"self",
".",
"_channel_display_name",
"=",
"self",
".",
"_get_display_name_for_channel",
"(",
"channel_name",
",",
"channel_origin",
",",
"channel_component",
")",
"if",
"channel_icon",
"is",
"None",
":",
"self",
".",
"_channel_icon",
"=",
"self",
".",
"_get_icon_for_channel",
"(",
"channel_name",
",",
"channel_origin",
",",
"channel_component",
")",
"else",
":",
"self",
".",
"_channel_icon",
"=",
"channel_icon",
"if",
"channel_query",
"is",
"None",
":",
"self",
".",
"_channel_query",
"=",
"self",
".",
"_get_channel_query_for_channel",
"(",
"channel_name",
",",
"channel_origin",
",",
"channel_component",
")",
"else",
":",
"self",
".",
"_channel_query",
"=",
"channel_query",
"# a sources.list entry attached to the channel (this is currently",
"# only used for not-yet-enabled channels)",
"self",
".",
"_source_entry",
"=",
"source_entry",
"# when the channel needs to be added to the systems sources.list",
"self",
".",
"needs_adding",
"=",
"False"
] | https://github.com/fossasia/x-mario-center/blob/fe67afe28d995dcf4e2498e305825a4859566172/softwarecenter/backend/channel.py#L140-L175 |
||
beville/ComicStreamer | 62eb914652695ea41a5e1f0cfbd044cbc6854e84 | libs/rumps/rumps.py | python | Window.title | (self) | return self._alert.messageText() | [] | def title(self):
return self._alert.messageText() | [
"def",
"title",
"(",
"self",
")",
":",
"return",
"self",
".",
"_alert",
".",
"messageText",
"(",
")"
] | https://github.com/beville/ComicStreamer/blob/62eb914652695ea41a5e1f0cfbd044cbc6854e84/libs/rumps/rumps.py#L516-L517 |
|||
matrix-org/synapse | 8e57584a5859a9002759963eb546d523d2498a01 | synapse/rest/media/v1/filepath.py | python | MediaFilePaths.local_media_thumbnail_dir | (self, media_id: str) | return os.path.join(
self.base_path,
"local_thumbnails",
_validate_path_component(media_id[0:2]),
_validate_path_component(media_id[2:4]),
_validate_path_component(media_id[4:]),
) | Retrieve the local store path of thumbnails of a given media_id
Args:
media_id: The media ID to query.
Returns:
Path of local_thumbnails from media_id | Retrieve the local store path of thumbnails of a given media_id | [
"Retrieve",
"the",
"local",
"store",
"path",
"of",
"thumbnails",
"of",
"a",
"given",
"media_id"
] | def local_media_thumbnail_dir(self, media_id: str) -> str:
"""
Retrieve the local store path of thumbnails of a given media_id
Args:
media_id: The media ID to query.
Returns:
Path of local_thumbnails from media_id
"""
return os.path.join(
self.base_path,
"local_thumbnails",
_validate_path_component(media_id[0:2]),
_validate_path_component(media_id[2:4]),
_validate_path_component(media_id[4:]),
) | [
"def",
"local_media_thumbnail_dir",
"(",
"self",
",",
"media_id",
":",
"str",
")",
"->",
"str",
":",
"return",
"os",
".",
"path",
".",
"join",
"(",
"self",
".",
"base_path",
",",
"\"local_thumbnails\"",
",",
"_validate_path_component",
"(",
"media_id",
"[",
"0",
":",
"2",
"]",
")",
",",
"_validate_path_component",
"(",
"media_id",
"[",
"2",
":",
"4",
"]",
")",
",",
"_validate_path_component",
"(",
"media_id",
"[",
"4",
":",
"]",
")",
",",
")"
] | https://github.com/matrix-org/synapse/blob/8e57584a5859a9002759963eb546d523d2498a01/synapse/rest/media/v1/filepath.py#L197-L212 |
|
python/cpython | e13cdca0f5224ec4e23bdd04bb3120506964bc8b | Lib/bz2.py | python | BZ2File.read1 | (self, size=-1) | return self._buffer.read1(size) | Read up to size uncompressed bytes, while trying to avoid
making multiple reads from the underlying stream. Reads up to a
buffer's worth of data if size is negative.
Returns b'' if the file is at EOF. | Read up to size uncompressed bytes, while trying to avoid
making multiple reads from the underlying stream. Reads up to a
buffer's worth of data if size is negative. | [
"Read",
"up",
"to",
"size",
"uncompressed",
"bytes",
"while",
"trying",
"to",
"avoid",
"making",
"multiple",
"reads",
"from",
"the",
"underlying",
"stream",
".",
"Reads",
"up",
"to",
"a",
"buffer",
"s",
"worth",
"of",
"data",
"if",
"size",
"is",
"negative",
"."
] | def read1(self, size=-1):
"""Read up to size uncompressed bytes, while trying to avoid
making multiple reads from the underlying stream. Reads up to a
buffer's worth of data if size is negative.
Returns b'' if the file is at EOF.
"""
self._check_can_read()
if size < 0:
size = io.DEFAULT_BUFFER_SIZE
return self._buffer.read1(size) | [
"def",
"read1",
"(",
"self",
",",
"size",
"=",
"-",
"1",
")",
":",
"self",
".",
"_check_can_read",
"(",
")",
"if",
"size",
"<",
"0",
":",
"size",
"=",
"io",
".",
"DEFAULT_BUFFER_SIZE",
"return",
"self",
".",
"_buffer",
".",
"read1",
"(",
"size",
")"
] | https://github.com/python/cpython/blob/e13cdca0f5224ec4e23bdd04bb3120506964bc8b/Lib/bz2.py#L166-L176 |
|
TencentCloud/tencentcloud-sdk-python | 3677fd1cdc8c5fd626ce001c13fd3b59d1f279d2 | tencentcloud/cls/v20201016/models.py | python | ExportInfo.__init__ | (self) | r"""
:param TopicId: 日志主题ID
:type TopicId: str
:param ExportId: 日志导出任务ID
:type ExportId: str
:param Query: 日志导出查询语句
:type Query: str
:param FileName: 日志导出文件名
:type FileName: str
:param FileSize: 日志文件大小
:type FileSize: int
:param Order: 日志导出时间排序
:type Order: str
:param Format: 日志导出格式
:type Format: str
:param Count: 日志导出数量
:type Count: int
:param Status: 日志下载状态。Processing:导出正在进行中,Complete:导出完成,Failed:导出失败,Expired:日志导出已过期(三天有效期)。
:type Status: str
:param From: 日志导出起始时间
:type From: int
:param To: 日志导出结束时间
:type To: int
:param CosPath: 日志导出路径
:type CosPath: str
:param CreateTime: 日志导出创建时间
:type CreateTime: str | r"""
:param TopicId: 日志主题ID
:type TopicId: str
:param ExportId: 日志导出任务ID
:type ExportId: str
:param Query: 日志导出查询语句
:type Query: str
:param FileName: 日志导出文件名
:type FileName: str
:param FileSize: 日志文件大小
:type FileSize: int
:param Order: 日志导出时间排序
:type Order: str
:param Format: 日志导出格式
:type Format: str
:param Count: 日志导出数量
:type Count: int
:param Status: 日志下载状态。Processing:导出正在进行中,Complete:导出完成,Failed:导出失败,Expired:日志导出已过期(三天有效期)。
:type Status: str
:param From: 日志导出起始时间
:type From: int
:param To: 日志导出结束时间
:type To: int
:param CosPath: 日志导出路径
:type CosPath: str
:param CreateTime: 日志导出创建时间
:type CreateTime: str | [
"r",
":",
"param",
"TopicId",
":",
"日志主题ID",
":",
"type",
"TopicId",
":",
"str",
":",
"param",
"ExportId",
":",
"日志导出任务ID",
":",
"type",
"ExportId",
":",
"str",
":",
"param",
"Query",
":",
"日志导出查询语句",
":",
"type",
"Query",
":",
"str",
":",
"param",
"FileName",
":",
"日志导出文件名",
":",
"type",
"FileName",
":",
"str",
":",
"param",
"FileSize",
":",
"日志文件大小",
":",
"type",
"FileSize",
":",
"int",
":",
"param",
"Order",
":",
"日志导出时间排序",
":",
"type",
"Order",
":",
"str",
":",
"param",
"Format",
":",
"日志导出格式",
":",
"type",
"Format",
":",
"str",
":",
"param",
"Count",
":",
"日志导出数量",
":",
"type",
"Count",
":",
"int",
":",
"param",
"Status",
":",
"日志下载状态。Processing",
":",
"导出正在进行中,Complete",
":",
"导出完成,Failed",
":",
"导出失败,Expired",
":",
"日志导出已过期(三天有效期)。",
":",
"type",
"Status",
":",
"str",
":",
"param",
"From",
":",
"日志导出起始时间",
":",
"type",
"From",
":",
"int",
":",
"param",
"To",
":",
"日志导出结束时间",
":",
"type",
"To",
":",
"int",
":",
"param",
"CosPath",
":",
"日志导出路径",
":",
"type",
"CosPath",
":",
"str",
":",
"param",
"CreateTime",
":",
"日志导出创建时间",
":",
"type",
"CreateTime",
":",
"str"
] | def __init__(self):
r"""
:param TopicId: 日志主题ID
:type TopicId: str
:param ExportId: 日志导出任务ID
:type ExportId: str
:param Query: 日志导出查询语句
:type Query: str
:param FileName: 日志导出文件名
:type FileName: str
:param FileSize: 日志文件大小
:type FileSize: int
:param Order: 日志导出时间排序
:type Order: str
:param Format: 日志导出格式
:type Format: str
:param Count: 日志导出数量
:type Count: int
:param Status: 日志下载状态。Processing:导出正在进行中,Complete:导出完成,Failed:导出失败,Expired:日志导出已过期(三天有效期)。
:type Status: str
:param From: 日志导出起始时间
:type From: int
:param To: 日志导出结束时间
:type To: int
:param CosPath: 日志导出路径
:type CosPath: str
:param CreateTime: 日志导出创建时间
:type CreateTime: str
"""
self.TopicId = None
self.ExportId = None
self.Query = None
self.FileName = None
self.FileSize = None
self.Order = None
self.Format = None
self.Count = None
self.Status = None
self.From = None
self.To = None
self.CosPath = None
self.CreateTime = None | [
"def",
"__init__",
"(",
"self",
")",
":",
"self",
".",
"TopicId",
"=",
"None",
"self",
".",
"ExportId",
"=",
"None",
"self",
".",
"Query",
"=",
"None",
"self",
".",
"FileName",
"=",
"None",
"self",
".",
"FileSize",
"=",
"None",
"self",
".",
"Order",
"=",
"None",
"self",
".",
"Format",
"=",
"None",
"self",
".",
"Count",
"=",
"None",
"self",
".",
"Status",
"=",
"None",
"self",
".",
"From",
"=",
"None",
"self",
".",
"To",
"=",
"None",
"self",
".",
"CosPath",
"=",
"None",
"self",
".",
"CreateTime",
"=",
"None"
] | https://github.com/TencentCloud/tencentcloud-sdk-python/blob/3677fd1cdc8c5fd626ce001c13fd3b59d1f279d2/tencentcloud/cls/v20201016/models.py#L3051-L3092 |
||
digidotcom/xbee-python | 0757f4be0017530c205175fbee8f9f61be9614d1 | digi/xbee/packets/common.py | python | ExplicitAddressingPacket.x16bit_dest_addr | (self, x16bit_addr) | Sets the 16-bit destination address.
Args:
x16bit_addr (:class:`.XBee16BitAddress`): the new 16-bit destination address.
.. seealso::
| :class:`.XBee16BitAddress` | Sets the 16-bit destination address. | [
"Sets",
"the",
"16",
"-",
"bit",
"destination",
"address",
"."
] | def x16bit_dest_addr(self, x16bit_addr):
"""
Sets the 16-bit destination address.
Args:
x16bit_addr (:class:`.XBee16BitAddress`): the new 16-bit destination address.
.. seealso::
| :class:`.XBee16BitAddress`
"""
self.__x16bit_addr = x16bit_addr | [
"def",
"x16bit_dest_addr",
"(",
"self",
",",
"x16bit_addr",
")",
":",
"self",
".",
"__x16bit_addr",
"=",
"x16bit_addr"
] | https://github.com/digidotcom/xbee-python/blob/0757f4be0017530c205175fbee8f9f61be9614d1/digi/xbee/packets/common.py#L2735-L2745 |
||
rytilahti/python-miio | b6e53dd16fac77915426e7592e2528b78ef65190 | miio/alarmclock.py | python | AlarmClock.query | (self) | return self.send("alarm_ops", payload) | -> 192.168.0.128 data= {"id":227,"method":"alarm_ops","params":
{"operation":"query","index":0,"update_datetime":1564205198413,"req_type":"reminder"}} | -> 192.168.0.128 data= {"id":227,"method":"alarm_ops","params":
{"operation":"query","index":0,"update_datetime":1564205198413,"req_type":"reminder"}} | [
"-",
">",
"192",
".",
"168",
".",
"0",
".",
"128",
"data",
"=",
"{",
"id",
":",
"227",
"method",
":",
"alarm_ops",
"params",
":",
"{",
"operation",
":",
"query",
"index",
":",
"0",
"update_datetime",
":",
"1564205198413",
"req_type",
":",
"reminder",
"}}"
] | def query(self):
"""
-> 192.168.0.128 data= {"id":227,"method":"alarm_ops","params":
{"operation":"query","index":0,"update_datetime":1564205198413,"req_type":"reminder"}}
"""
payload = {
"operation": "query",
"index": 0,
"update_datetime": int(time.time() * 1000),
"req_type": "timer",
}
return self.send("alarm_ops", payload) | [
"def",
"query",
"(",
"self",
")",
":",
"payload",
"=",
"{",
"\"operation\"",
":",
"\"query\"",
",",
"\"index\"",
":",
"0",
",",
"\"update_datetime\"",
":",
"int",
"(",
"time",
".",
"time",
"(",
")",
"*",
"1000",
")",
",",
"\"req_type\"",
":",
"\"timer\"",
",",
"}",
"return",
"self",
".",
"send",
"(",
"\"alarm_ops\"",
",",
"payload",
")"
] | https://github.com/rytilahti/python-miio/blob/b6e53dd16fac77915426e7592e2528b78ef65190/miio/alarmclock.py#L254-L267 |
|
llSourcell/AI_Artist | 3038c06c2e389b9c919c881c9a169efe2fd7810e | lib/python2.7/site-packages/pkg_resources/_vendor/packaging/specifiers.py | python | _IndividualSpecifier.__eq__ | (self, other) | return self._spec == other._spec | [] | def __eq__(self, other):
if isinstance(other, string_types):
try:
other = self.__class__(other)
except InvalidSpecifier:
return NotImplemented
elif not isinstance(other, self.__class__):
return NotImplemented
return self._spec == other._spec | [
"def",
"__eq__",
"(",
"self",
",",
"other",
")",
":",
"if",
"isinstance",
"(",
"other",
",",
"string_types",
")",
":",
"try",
":",
"other",
"=",
"self",
".",
"__class__",
"(",
"other",
")",
"except",
"InvalidSpecifier",
":",
"return",
"NotImplemented",
"elif",
"not",
"isinstance",
"(",
"other",
",",
"self",
".",
"__class__",
")",
":",
"return",
"NotImplemented",
"return",
"self",
".",
"_spec",
"==",
"other",
".",
"_spec"
] | https://github.com/llSourcell/AI_Artist/blob/3038c06c2e389b9c919c881c9a169efe2fd7810e/lib/python2.7/site-packages/pkg_resources/_vendor/packaging/specifiers.py#L114-L123 |
|||
cloudant/bigcouch | 8e9c1ec0ed1676ff152f10658f5c83a1a91fa8fe | couchjs/scons/scons-local-2.0.1/SCons/Script/SConscript.py | python | compute_exports | (exports) | return retval | Compute a dictionary of exports given one of the parameters
to the Export() function or the exports argument to SConscript(). | Compute a dictionary of exports given one of the parameters
to the Export() function or the exports argument to SConscript(). | [
"Compute",
"a",
"dictionary",
"of",
"exports",
"given",
"one",
"of",
"the",
"parameters",
"to",
"the",
"Export",
"()",
"function",
"or",
"the",
"exports",
"argument",
"to",
"SConscript",
"()",
"."
] | def compute_exports(exports):
"""Compute a dictionary of exports given one of the parameters
to the Export() function or the exports argument to SConscript()."""
loc, glob = get_calling_namespaces()
retval = {}
try:
for export in exports:
if SCons.Util.is_Dict(export):
retval.update(export)
else:
try:
retval[export] = loc[export]
except KeyError:
retval[export] = glob[export]
except KeyError, x:
raise SCons.Errors.UserError("Export of non-existent variable '%s'"%x)
return retval | [
"def",
"compute_exports",
"(",
"exports",
")",
":",
"loc",
",",
"glob",
"=",
"get_calling_namespaces",
"(",
")",
"retval",
"=",
"{",
"}",
"try",
":",
"for",
"export",
"in",
"exports",
":",
"if",
"SCons",
".",
"Util",
".",
"is_Dict",
"(",
"export",
")",
":",
"retval",
".",
"update",
"(",
"export",
")",
"else",
":",
"try",
":",
"retval",
"[",
"export",
"]",
"=",
"loc",
"[",
"export",
"]",
"except",
"KeyError",
":",
"retval",
"[",
"export",
"]",
"=",
"glob",
"[",
"export",
"]",
"except",
"KeyError",
",",
"x",
":",
"raise",
"SCons",
".",
"Errors",
".",
"UserError",
"(",
"\"Export of non-existent variable '%s'\"",
"%",
"x",
")",
"return",
"retval"
] | https://github.com/cloudant/bigcouch/blob/8e9c1ec0ed1676ff152f10658f5c83a1a91fa8fe/couchjs/scons/scons-local-2.0.1/SCons/Script/SConscript.py#L100-L119 |
|
gaasedelen/lighthouse | 7245a2d2c4e84351cd259ed81dafa4263167909a | plugins/lighthouse/metadata.py | python | DatabaseMetadata.get_closest_function | (self, address) | Get the function metadata for the function closest to the give address. | Get the function metadata for the function closest to the give address. | [
"Get",
"the",
"function",
"metadata",
"for",
"the",
"function",
"closest",
"to",
"the",
"give",
"address",
"."
] | def get_closest_function(self, address):
"""
Get the function metadata for the function closest to the give address.
"""
# sanity check
if not self._function_addresses:
return None
# get the closest insertion point of the given address
index = bisect.bisect_left(self._function_addresses, address)
# the given address is a min, return the first known function
if index == 0:
return self.functions[self._function_addresses[0]]
# given address is a max, return the last known function
if index == len(self._function_addresses):
return self.functions[self._function_addresses[-1]]
# select the two candidate addresses
before = self._function_addresses[index - 1]
after = self._function_addresses[index]
# return the function closest to the given address
if after - address < address - before:
return self.functions[after]
else:
return self.functions[before] | [
"def",
"get_closest_function",
"(",
"self",
",",
"address",
")",
":",
"# sanity check",
"if",
"not",
"self",
".",
"_function_addresses",
":",
"return",
"None",
"# get the closest insertion point of the given address",
"index",
"=",
"bisect",
".",
"bisect_left",
"(",
"self",
".",
"_function_addresses",
",",
"address",
")",
"# the given address is a min, return the first known function",
"if",
"index",
"==",
"0",
":",
"return",
"self",
".",
"functions",
"[",
"self",
".",
"_function_addresses",
"[",
"0",
"]",
"]",
"# given address is a max, return the last known function",
"if",
"index",
"==",
"len",
"(",
"self",
".",
"_function_addresses",
")",
":",
"return",
"self",
".",
"functions",
"[",
"self",
".",
"_function_addresses",
"[",
"-",
"1",
"]",
"]",
"# select the two candidate addresses",
"before",
"=",
"self",
".",
"_function_addresses",
"[",
"index",
"-",
"1",
"]",
"after",
"=",
"self",
".",
"_function_addresses",
"[",
"index",
"]",
"# return the function closest to the given address",
"if",
"after",
"-",
"address",
"<",
"address",
"-",
"before",
":",
"return",
"self",
".",
"functions",
"[",
"after",
"]",
"else",
":",
"return",
"self",
".",
"functions",
"[",
"before",
"]"
] | https://github.com/gaasedelen/lighthouse/blob/7245a2d2c4e84351cd259ed81dafa4263167909a/plugins/lighthouse/metadata.py#L281-L309 |